id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3207230 | <reponame>Sajadrahimi/arvan-client
import logging
from arvan_client.arvan.api import Arvan
from arvan_client.dns.domain import Domain
class DNS(Arvan):
get_domains_url = 'https://napi.arvancloud.com/cdn/4.0/domains'
def get_domains(self, query_params: dict = None):
r = self._send_request('GET', self.get_domains_url, query_params)
logging.error(r)
return [Domain(**x) for x in r['data']]
| StarcoderdataPython |
1766510 | import unittest
import numpy as np
from bio_rtd.chromatography import bt_curve, bt_load
from bio_rtd.core import ChromatographyLoadBreakthrough
from bio_rtd_test.aux_bio_rtd_test import TestLogger
class DummyChromatographyLoadBreakthrough(ChromatographyLoadBreakthrough):
POSSIBLE_KEY_GROUPS = []
OPTIONAL_KEYS = []
def _calc_unbound_to_load_ratio(self, loaded_material: np.ndarray) -> np.ndarray:
return np.ones_like(loaded_material) * 0.7
def _update_btc_parameters(self, **kwargs) -> None: # pragma: no cover
pass
def get_total_bc(self) -> float: # pragma: no cover
pass
class TestChromatographyLoadBreakthrough(unittest.TestCase):
def test_calc_c_bound(self):
t = np.linspace(0, 10, 100)
f = np.ones_like(t) * 0.2
c = np.ones([1, t.size])
bt = DummyChromatographyLoadBreakthrough(t[1])
# zero in -> zero out
self.assertTrue(np.all(bt.calc_c_bound(f, c * 0) == 0))
# normal function
np.testing.assert_array_equal(
c * (1 - 0.7),
bt.calc_c_bound(f, c)
)
class TestConstantPatternSolution(unittest.TestCase):
def btc_init(self, dbc_100, k):
dt = 0.4
self.btc = bt_load.ConstantPatternSolution(dt, dbc_100, k)
self.btc.set_logger_from_parent("id", TestLogger())
self.assertEqual(self.btc.k, k)
self.assertEqual(self.btc.dbc_100, dbc_100)
self.assertEqual(self.btc._cv, -1)
def test_update_btc_parameters(self):
self.btc_init(120, 0.2)
cv = 14.5
with self.assertRaises(KeyError):
self.btc.update_btc_parameters(cv_not_right=cv)
self.assertEqual(self.btc._cv, -1)
self.btc.update_btc_parameters(cv=cv)
self.assertEqual(self.btc._cv, cv)
def test_calc_unbound_to_load_ratio(self):
def run_test(cv, dbc_100, k):
m = np.array([0, dbc_100 * cv, dbc_100 * cv * 2, 0.1, dbc_100 * cv * 1.1, dbc_100 * cv * 3])
r_target = bt_curve.btc_constant_pattern_solution(m, dbc_100, k, cv, None)
self.btc_init(dbc_100, k)
self.btc.update_btc_parameters(cv=cv)
r = self.btc._calc_unbound_to_load_ratio(m)
np.testing.assert_array_almost_equal(r_target, r)
self.btc_init(120, 0.2)
with self.assertRaises(AssertionError): # update_btc_parameters must be called
self.btc._calc_unbound_to_load_ratio(np.array([]))
run_test(14.5, 120, 0.2)
with self.assertWarns(Warning):
run_test(4.5, 2, 1.2)
run_test(21.5, 20, 0.5)
with self.assertWarns(Warning):
run_test(4.5, 2, 1.2)
def test_get_total_bc(self):
self.btc_init(120, 0.2)
with self.assertRaises(AssertionError): # cv is undefined
self.btc.get_total_bc()
self.btc._cv = 23.3
self.assertEqual(self.btc._cv * self.btc.dbc_100,
self.btc.get_total_bc())
| StarcoderdataPython |
112219 | <reponame>aadhityasw/Competitive-Programs
def leaders(A,N):
lead = []
ma = A[-1]
for num in reversed(A) :
if num >= ma :
ma = num
lead.append(ma)
return reversed(lead)
# The below code will result in the same answer but will take a lot more time in the process because
# the addition of two lists take more time than append operation with reverse.
""" lead = [ma] + lead
return lead"""
import math
def main():
T=int(input())
while(T>0):
N=int(input())
A=[int(x) for x in input().strip().split()]
A=leaders(A,N)
for i in A:
print(i,end=" ")
print()
T-=1
if __name__=="__main__":
main()
| StarcoderdataPython |
15231 | <gh_stars>0
import serial
ser = serial.Serial('COM7',115200, timeout=1)
while True:
print("R: ", ser.readline()) | StarcoderdataPython |
1610919 | <reponame>Petro-Viron/django-notification
from __future__ import print_function
import logging
import pynliner
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import EmailMultiAlternatives
from django.db import models
from django.db.models.query import QuerySet
from django.template import engines
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import activate, get_language
from django.utils.translation import ugettext as _
from postmark import PMMail
from twilio.rest import Client as TwilioRestClient
from .signals import email_sent, sms_sent
try:
import pickle as pickle
except ImportError:
import pickle
notifications_logger = logging.getLogger("pivot.notifications")
QUEUE_ALL = getattr(settings, "NOTIFICATION_QUEUE_ALL", False)
TWILIO_ACCOUNT_SID = getattr(settings, "TWILIO_ACCOUNT_SID", False)
TWILIO_ACCOUNT_TOKEN = getattr(settings, "TWILIO_ACCOUNT_TOKEN", False)
TWILIO_CALLER_ID = getattr(settings, "TWILIO_CALLER_ID", False)
if 'guardian' in settings.INSTALLED_APPS:
enable_object_notifications = True
def custom_permission_check(perm, obj, user):
from guardian.models import UserObjectPermission
return UserObjectPermission.objects.filter(user=user, permission__codename=perm,
object_pk=obj.pk,
content_type=ContentType.objects.get_for_model(obj)).exists()
else:
enable_object_notifications = False
class LanguageStoreNotAvailable(Exception):
pass
class NoticeType(models.Model):
label = models.CharField(_('label'), max_length=40)
display = models.CharField(_('display'), max_length=50)
description = models.CharField(_('description'), max_length=100)
# by default only on for media with sensitivity less than or equal to this number
default = models.IntegerField(_('default'))
def __str__(self):
return self.label
class Meta:
verbose_name = _("notice type")
verbose_name_plural = _("notice types")
# if this gets updated, the create() method below needs to be as well...
NOTICE_MEDIA = (
("1", _("Email")),
("2", _("Display")),
("3", _("SMS")),
)
def notice_medium_as_text(medium):
return dict(NOTICE_MEDIA)[medium]
# how spam-sensitive is the medium
NOTICE_MEDIA_DEFAULTS = {
"1": 2, # email
"2": 3,
"3": 3,
}
class NoticeSetting(models.Model):
"""
Indicates, for a given user, whether to send notifications
of a given type to a given medium.
"""
user = models.ForeignKey(User, verbose_name=_('user'), on_delete=models.CASCADE)
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'), on_delete=models.CASCADE)
medium = models.CharField(_('medium'), max_length=1, choices=NOTICE_MEDIA)
send = models.BooleanField(_('send'), default=False)
class Meta:
verbose_name = _("notice setting")
verbose_name_plural = _("notice settings")
unique_together = ("user", "notice_type", "medium")
def get_notification_setting(user, notice_type, medium):
try:
return NoticeSetting.objects.get(user=user, notice_type=notice_type, medium=medium)
except NoticeSetting.DoesNotExist:
default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default)
try:
setting = NoticeSetting(user=user, notice_type=notice_type, medium=medium, send=default)
setting.save()
except IntegrityError:
# We are occassionally getting IntegrityErrors here (possible race condition?)
# so try getting again
setting = NoticeSetting.objects.get(user=user, notice_type=notice_type, medium=medium)
return setting
def get_all_notification_settings(user):
return NoticeSetting.objects.filter(user=user)
def create_notification_setting(user, notice_type, medium):
default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default)
setting = NoticeSetting(user=user, notice_type=notice_type, medium=medium, send=default)
setting.save()
return setting
def should_send(user, notice_type, medium, obj_instance=None):
if enable_object_notifications and obj_instance:
has_custom_settings = custom_permission_check('custom_notification_settings', obj_instance, user)
if has_custom_settings:
medium_text = notice_medium_as_text(medium)
perm_string = "%s-%s" % (medium_text, notice_type.label)
return custom_permission_check(perm_string, obj_instance, user)
return get_notification_setting(user, notice_type, medium).send
class NoticeManager(models.Manager):
def notices_for(self, user, archived=False, unseen=None, on_site=None, sent=False):
"""
returns Notice objects for the given user.
If archived=False, it only include notices not archived.
If archived=True, it returns all notices for that user.
If unseen=None, it includes all notices.
If unseen=True, return only unseen notices.
If unseen=False, return only seen notices.
"""
if sent:
lookup_kwargs = {"sender": user}
else:
lookup_kwargs = {"recipient": user}
qs = self.filter(**lookup_kwargs)
if not archived:
self.filter(archived=archived)
if unseen is not None:
qs = qs.filter(unseen=unseen)
if on_site is not None:
qs = qs.filter(on_site=on_site)
return qs
def unseen_count_for(self, recipient, **kwargs):
"""
returns the number of unseen notices for the given user but does not
mark them seen
"""
return self.notices_for(recipient, unseen=True, **kwargs).count()
def received(self, recipient, **kwargs):
"""
returns notices the given recipient has recieved.
"""
kwargs["sent"] = False
return self.notices_for(recipient, **kwargs)
def sent(self, sender, **kwargs):
"""
returns notices the given sender has sent
"""
kwargs["sent"] = True
return self.notices_for(sender, **kwargs)
class Notice(models.Model):
recipient = models.ForeignKey(User, on_delete=models.CASCADE, related_name='recieved_notices',
verbose_name=_('recipient'))
sender = models.ForeignKey(User, on_delete=models.CASCADE, null=True, related_name='sent_notices',
verbose_name=_('sender'))
message = models.TextField(_('message'))
notice_type = models.ForeignKey(NoticeType, on_delete=models.CASCADE, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=timezone.now, db_index=True)
unseen = models.BooleanField(_('unseen'), default=True)
archived = models.BooleanField(_('archived'), default=False)
on_site = models.BooleanField(_('on site'), default=False)
objects = NoticeManager()
def __str__(self):
return self.message
def archive(self):
self.archived = True
self.save()
def is_unseen(self):
"""
returns value of self.unseen but also changes it to false.
Use this in a template to mark an unseen notice differently the first
time it is shown.
"""
unseen = self.unseen
if unseen:
self.unseen = False
self.save()
return unseen
class Meta:
ordering = ["-added"]
verbose_name = _("notice")
verbose_name_plural = _("notices")
def get_absolute_url(self):
return reverse("notification_notice", args=[str(self.pk)])
class NoticeQueueBatch(models.Model):
"""
A queued notice.
Denormalized data for a notice.
"""
pickled_data = models.TextField()
def create_notice_type(label, display, description, default=2, verbosity=1):
"""
Creates a new NoticeType.
This is intended to be used by other apps as a post_syncdb manangement step.
"""
try:
notice_type = NoticeType.objects.get(label=label)
updated = False
if display != notice_type.display:
notice_type.display = display
updated = True
if description != notice_type.description:
notice_type.description = description
updated = True
if default != notice_type.default:
notice_type.default = default
updated = True
if updated:
notice_type.save()
if verbosity > 1:
print("Updated %s NoticeType" % label)
except NoticeType.DoesNotExist:
NoticeType(label=label, display=display, description=description, default=default).save()
if verbosity > 1:
print("Created %s NoticeType" % label)
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, 'NOTIFICATION_LANGUAGE_MODULE', False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split('.')
try:
return getattr(user, model_name.lower()).language
except AttributeError:
pass
model = apps.get_model(app_label=app_label, model_name=model_name)
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, 'language'):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable
def get_formatted_messages(formats, label, context):
"""
Returns a dictionary with the format identifier as the key. The values are
are fully rendered templates with the given context.
"""
format_templates = {}
for format in formats:
# conditionally turn off autoescaping for .txt extensions in format
engine_names = [e.name for e in engines.all()]
if format.endswith(".txt") and "notification.txt" in engine_names:
engine = 'notification.txt'
else:
engine = None
format_templates[format] = render_to_string((
'notification/%s/%s' % (label, format),
'notification/%s' % format), context=context, using=engine)
return format_templates
def send_now(users, label, extra_context=None, on_site=True, sender=None, attachments=[], \
obj_instance=None, force_send=False):
"""
Creates a new notice.
This is intended to be how other apps create new notices.
notification.send(user, 'friends_invite_sent', {
'spam': 'eggs',
'foo': 'bar',
)
You can pass in on_site=False to prevent the notice emitted from being
displayed on the site.
"""
if extra_context is None:
extra_context = {}
notice_type = NoticeType.objects.get(label=label)
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
current_site = Site.objects.get_current()
current_language = get_language()
formats = (
'short.txt',
'full.txt',
'notice.html',
'full.html',
'sms.txt',
)
from django.db import connection
for user in users:
should_send_email = user.is_active and (
user.email and force_send or should_send(user, notice_type, "1", obj_instance))
should_send_sms = user.userprofile.sms and user.is_active and should_send(user, notice_type, "3", obj_instance)
# disabled check for on_site for now since we are not using it
# on_site = should_send(user, notice_type, "2", obj_instance) #On-site display
on_site = False
if not (should_send_email or should_send_sms or on_site):
continue
recipients = []
# get user language for user from language store defined in
# NOTIFICATION_LANGUAGE_MODULE setting
try:
language = get_notification_language(user)
except LanguageStoreNotAvailable:
language = None
if language is not None:
# activate the user's language
activate(language)
# update context with user specific translations
context = {
"recipient": user,
"sender": sender,
"notice": _(notice_type.display),
"notices_url": "",
"current_site": current_site,
}
context.update(extra_context)
# get prerendered format messages
messages = get_formatted_messages(formats, label, context)
context['message'] = messages['short.txt']
# Strip newlines from subject
subject = ''.join(render_to_string('notification/email_subject.txt', context).splitlines())
context['message'] = messages['full.txt']
body = render_to_string('notification/email_body.txt', context)
body = pynliner.fromString(body)
notice = Notice.objects.create(recipient=user, message=messages['notice.html'],
notice_type=notice_type, on_site=on_site, sender=sender)
if should_send_email: # Email
recipients.append(user.email)
# send empty "plain text" data
msg = EmailMultiAlternatives(subject, "", settings.DEFAULT_FROM_EMAIL, recipients)
# attach html data as alternative
msg.attach_alternative(body, "text/html")
for attachment in attachments:
msg.attach(attachment)
try:
msg.send()
email_sent.send(sender=Notice, user=user, notice_type=notice_type, obj=obj_instance)
notifications_logger.info(
"SUCCESS:EMAIL:%s: data=(notice_type=%s, subject=%s)" % (user, notice_type, subject))
except:
notifications_logger.exception(
"ERROR:EMAIL:%s: data=(notice_type=%s, subject=%s)" % (user, notice_type, subject))
if should_send_sms:
try:
rc = TwilioRestClient(TWILIO_ACCOUNT_SID, TWILIO_ACCOUNT_TOKEN)
rc.api.v2010.messages.create(
to=user.userprofile.sms,
from_=TWILIO_CALLER_ID,
body=messages['sms.txt'],
)
sms_sent.send(sender=Notice, user=user, notice_type=notice_type, obj=obj_instance)
notifications_logger.info(
"SUCCESS:SMS:%s: data=(notice_type=%s, msg=%s)" % (user, notice_type, messages['sms.txt']))
except:
notifications_logger.exception(
"ERROR:SMS:%s: data=(notice_type=%s, msg=%s)" % (user, notice_type, messages['sms.txt']))
# reset environment to original language
activate(current_language)
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs)
def queue(users, label, extra_context=None, on_site=True, sender=None):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, on_site, sender))
NoticeQueueBatch(pickled_data=pickle.dumps(notices).encode("base64")).save()
class ObservedItemManager(models.Manager):
def all_for(self, observed, signal):
"""
Returns all ObservedItems for an observed object,
to be sent when a signal is emited.
"""
content_type = ContentType.objects.get_for_model(observed)
observed_items = self.filter(content_type=content_type, object_id=observed.id, signal=signal)
return observed_items
def get_for(self, observed, observer, signal):
content_type = ContentType.objects.get_for_model(observed)
observed_item = self.get(content_type=content_type, object_id=observed.id, user=observer, signal=signal)
return observed_item
class ObservedItem(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_('user'))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
observed_object = GenericForeignKey('content_type', 'object_id')
notice_type = models.ForeignKey(NoticeType, on_delete=models.CASCADE, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=timezone.now)
# the signal that will be listened to send the notice
signal = models.TextField(verbose_name=_('signal'))
objects = ObservedItemManager()
class Meta:
ordering = ['-added']
verbose_name = _('observed item')
verbose_name_plural = _('observed items')
def send_notice(self, extra_context=None):
if extra_context is None:
extra_context = {}
extra_context.update({'observed': self.observed_object})
send([self.user], self.notice_type.label, extra_context)
def observe(observed, observer, notice_type_label, signal='post_save'):
"""
Create a new ObservedItem.
To be used by applications to register a user as an observer for some object.
"""
notice_type = NoticeType.objects.get(label=notice_type_label)
observed_item = ObservedItem(user=observer, observed_object=observed,
notice_type=notice_type, signal=signal)
observed_item.save()
return observed_item
def stop_observing(observed, observer, signal='post_save'):
"""
Remove an observed item.
"""
observed_item = ObservedItem.objects.get_for(observed, observer, signal)
observed_item.delete()
def send_observation_notices_for(observed, signal='post_save', extra_context=None):
"""
Send a notice for each registered user about an observed object.
"""
if extra_context is None:
extra_context = {}
observed_items = ObservedItem.objects.all_for(observed, signal)
for observed_item in observed_items:
observed_item.send_notice(extra_context)
return observed_items
def is_observing(observed, observer, signal='post_save'):
if isinstance(observer, AnonymousUser):
return False
try:
observed_items = ObservedItem.objects.get_for(observed, observer, signal)
return True
except ObservedItem.DoesNotExist:
return False
except ObservedItem.MultipleObjectsReturned:
return True
def handle_observations(sender, instance, *args, **kw):
send_observation_notices_for(instance)
| StarcoderdataPython |
3370976 | <reponame>idiot-ag/youtubepy<gh_stars>1-10
"""
"""
from multiprocessing import Pool
def init_driver():
"""
:return:
"""
if __name__ == "__main__":
P = Pool(cnt)
P.map(self.init_driver, zip(drivers, url_passing)) | StarcoderdataPython |
4814507 | import urllib.request
import json
def paper_titles_for_id(person_id):
url = f'https://api.labs.cognitive.microsoft.com/academic/v1.0/evaluate?expr=Composite(AA.AuId={person_id})&attributes=DN&subscription-key=a6f4e6b22dd9422cb40d0e8c2ef8eb3c&count=99999999'
output = json.load(urllib.request.urlopen(url))
titles = [x['DN'] for x in output['entities']]
return titles
# print(paper_titles_for_id(2105886198)) | StarcoderdataPython |
3290735 | <filename>docs/autogen.py
import os
import pathlib
import six
# From https://github.com/keras-team/keras/blob/0a0ac3fa5462cf4a72636ca4498a0a82ac91fc32/docs/autogen.py
def get_module_docstring(filepath):
"""Extract the module docstring.
Also finds the line at which the docstring ends.
"""
co = compile(open(filepath, encoding='utf-8').read(), filepath, 'exec')
if co.co_consts and isinstance(co.co_consts[0], six.string_types):
docstring = co.co_consts[0]
else:
print('Could not get the docstring from ' + filepath)
docstring = ''
return docstring, co.co_firstlineno
def copy_examples(examples_dir, destination_dir):
"""Copy the examples directory in the documentation.
Prettify files by extracting the docstrings written in Markdown.
"""
pathlib.Path(destination_dir).mkdir(exist_ok=True)
for file in os.listdir(examples_dir):
if not file.endswith('.py'):
continue
module_path = os.path.join(examples_dir, file)
docstring, starting_line = get_module_docstring(module_path)
destination_file = os.path.join(destination_dir, file[:-2] + 'md')
with open(destination_file, 'w+', encoding='utf-8') as f_out, \
open(os.path.join(examples_dir, file),
'r+', encoding='utf-8') as f_in:
f_out.write(docstring + '\n\n')
# skip docstring
for _ in range(starting_line):
next(f_in)
f_out.write('```python\n')
# next line might be empty.
line = next(f_in)
if line != '\n':
f_out.write(line)
# copy the rest of the file.
for line in f_in:
f_out.write(line)
f_out.write('```')
if __name__ == "__main__":
print(os.getcwd())
copy_examples("./docs/examples", "./_build/pydocmd/examples")
| StarcoderdataPython |
3323272 | <gh_stars>0
# Generated by Django 2.2.12 on 2020-05-28 01:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('websites', '0002_website_status'),
]
operations = [
migrations.AddField(
model_name='website',
name='apikey',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Website apikey'),
),
]
| StarcoderdataPython |
3295762 | import os
import subprocess
from avatar2 import *
filename = "a.out"
GDB_PORT = 1234
# This is a bare minimum elf-file, gracefully compiled from
# https://github.com/abraithwaite/teensy
tiny_elf = (
b"\x7f\x45\x4c\x46\x02\x01\x01\x00\xb3\x2a\x31\xc0\xff\xc0\xcd\x80"
b"\x02\x00\x3e\x00\x01\x00\x00\x00\x08\x00\x40\x00\x00\x00\x00\x00"
b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x40\x00\x38\x00\x01\x00\x00\x00\x00\x00\x00\x00"
b"\x01\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00"
b"\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x20\x00\x00\x00\x00\x00"
)
# Hello world shellcode
shellcode = (
b"\x68\x72\x6c\x64\x21\x48\xb8\x48\x65\x6c\x6c\x6f\x20\x57\x6f\x50"
b"\x48\x89\xef\x48\x89\xe6\x6a\x0c\x5a\x6a\x01\x58\x0f\x05"
)
# Save our executable to disk
with open(filename, "wb") as f:
f.write(tiny_elf)
os.chmod(filename, 0o744)
# Create the avatar instance and specify the architecture for this analysis
avatar = Avatar(arch=archs.x86.X86_64)
# Load the gdb memory map loader
avatar.load_plugin("gdb_memory_map_loader")
# Create the endpoint: a gdbserver connected to our tiny ELF file
gdbserver = subprocess.Popen(
"gdbserver --once 127.0.0.1:%d a.out" % GDB_PORT, shell=True
)
# Create the corresponding target, using the GDBTarget backend
target = avatar.add_target(GDBTarget, gdb_port=GDB_PORT)
# Initialize the target.
# This usually connects the target to the endpoint
target.init()
# Load the memory maps from the target without update.
mem_ranges = target.load_memory_mappings(update=False)
assert len(avatar.memory_ranges) == 0
assert (mem_ranges)
# Load the memory maps from the target with update (default).
target.load_memory_mappings()
assert (mem_ranges.boundary_table == avatar.memory_ranges.boundary_table)
# Now it is possible to interact with the target.
# For example, we can insert our shellcode at the current point of execution
target.write_memory(target.read_register("pc"), len(shellcode), shellcode, raw=True)
# We can now resume the execution in our target
# You should see hello world printed on your screen! :)
target.cont()
# Clean up!
os.remove(filename)
avatar.shutdown()
| StarcoderdataPython |
1691627 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Implements key derivation routines. These are mostly meant to be used as
modifiers for multibin expressions that can be passed as key arguments to
modules in `refinery.units.crypto.cipher`.
"""
import importlib
from refinery.units import arg, Unit
from refinery.lib.argformats import number
from refinery.lib.types import ByteStr
from enum import Enum
from typing import Callable
__all__ = ['arg', 'HASH', 'KeyDerivation']
class HASH(str, Enum):
MD2 = 'MD2'
MD4 = 'MD4'
MD5 = 'MD5'
SHA1 = 'SHA'
SHA256 = 'SHA256'
SHA512 = 'SHA512'
SHA224 = 'SHA224'
SHA384 = 'SHA384'
def multidecode(data: ByteStr, function: Callable[[str], ByteStr]) -> ByteStr:
for codec in ['utf8', 'latin1', 'cp1252']:
try:
return function(data.decode(codec))
except UnicodeError:
continue
else:
return function(''.join(chr(t) for t in data))
class KeyDerivation(Unit, abstract=True):
def __init__(
self,
size: arg(help='The number of bytes to generate.', type=number),
salt: arg(help='Salt for the derivation.'),
hash: arg.option(choices=HASH, metavar='hash',
help='Specify one of these algorithms (default is {default}): {choices}') = None,
iter: arg.number(metavar='iter', help='Number of iterations; default is {default}.') = None,
**kw
):
if hash is not None:
name = arg.as_option(hash, HASH)
hash = importlib.import_module(F'Crypto.Hash.{name}')
return super().__init__(salt=salt, size=size, iter=iter, hash=hash, **kw)
@property
def hash(self): return self.args.hash
| StarcoderdataPython |
1616029 | dt_alt = dt_true.copy()
print(f'before: {dt_true.shape[0]} observations, {dt_true.shape[1]} variables')
dt_true.drop('ratio',axis=1,inplace=True)
I = dt_true['income'] > 1.5
dt_true.drop(dt_true[I].index,inplace=True)
dt_true.drop(dt_true.loc[:5].index,inplace=True)
print(f'after: {dt_true.shape[0]} observations, {dt_true.shape[1]} variables')
# alternative: keep where I is false
del dt_alt['ratio']
I = dt_alt['income'] > 1.5
dt_alt = dt_alt[~I]
dt_alt = dt_alt.iloc[5:,:]
print(f'after (alt): {dt_alt.shape[0]} observations, {dt_alt.shape[1]} variables') | StarcoderdataPython |
191405 | <filename>utils/pool.py<gh_stars>0
from multiprocessing import Process, Queue
from time import sleep
from typing import Callable, Iterable
def __worker(work, time_sleep, task_queue, done_queue):
"""
:param work:
:type work: Callable[[object], (bool, object)]
:param time_sleep:
:type time_sleep: float
:param task_queue:
:type task_queue: Queue
:param done_queue:
:type done_queue: Queue
:return: (Result of the work, Success, Task ID)
"""
for task in iter(task_queue.get, 'STOP'):
ok, result = work(task)
done_queue.put((result, ok, task))
if ok:
print("OK {}".format(task))
else:
print("FAIL {}".format(task))
sleep(time_sleep)
def distribute_work(task_generator, func_work, time_sleep, pools=4):
"""
:param task_generator:
:type task_generator: Callable[[], List]
:param func_work:
:type func_work: Callable[[object], (bool, object)]
:param time_sleep:
:type time_sleep: float
:param pools:
:type pools: int
:return:
:rtype: Iterable
"""
# https://docs.python.org/ko/3/library/multiprocessing.html#multiprocessing-examples
# Distribute pages to crawl
tasks = task_generator()
# Create queues
queue_task = Queue()
queue_done = Queue()
# Submit tasks
for task in tasks:
queue_task.put(task)
# Start worker process
for _ in range(pools):
Process(target=__worker, args=(func_work, time_sleep, queue_task, queue_done)).start()
print("Started!")
# Collect unordered results
result_list = []
success_once = set()
failed_once = set()
while len(success_once) + len(failed_once) != len(tasks) or not queue_task.empty():
try:
result, ok, task_id = queue_done.get()
except:
continue
if ok:
success_once.add(task_id)
if result is not None:
result_list.append(result)
else:
# Retry once
if task_id not in failed_once:
failed_once.add(task_id)
queue_task.put(task_id)
# Stop
for _ in range(pools):
queue_task.put('STOP')
# Print failed ones
for task_fail in failed_once:
print("Failed: {}".format(task_fail))
print("Stopped all!")
return result_list
| StarcoderdataPython |
3219100 | <filename>airmap_airspace_example.py
#!/usr/bin/env python3
from airmap import AirMapAPI
token = input("Enter your API Token for airmap: ")
airmap_api = AirMapAPI(token)
# geo map coordinates for ASU Tempe Campus
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
-111.94244384765625,
33.41209918127008
],
[
-111.91300392150879,
33.41209918127008
],
[
-111.91300392150879,
33.431083186694096
],
[
-111.94244384765625,
33.431083186694096
],
[
-111.94244384765625,
33.41209918127008
]
]
]
}
}
]
}
print(airmap_api.search_airspace(geo_json)) | StarcoderdataPython |
4805968 | __author__ = '<NAME>'
__version__ = '0.1.0'
__license__ = 'MIT'
import csv, sys
class Json:
def __init__(self, json_data):
self.json_data = json_data
self.rows_list = []
def recursive_json(self, data, store_list = [], level = 0, key = None):
# code for dictionary
if type(data) == type({}):
# for each key in the data
for key in sorted(data.keys()):
# if the size of the list is smaller than the level
# then add the value to the end
if len(store_list) <= level+1:
store_list.append(key)
# if already some other variable had
# resided at this level before,
# add the value to the list inplace of old one
else:
store_list[level] = key
# once processed send the data back to the
# function along with the list created and
# the level
# Also send the key(it is optional)
self.recursive_json(data[key], store_list, level+1, key)
# if data is a list
# take each and every value in the list and then send it back to
# the function so that we can check if the value is some other data
# type and process it based on the data
elif type(data) == type([]):
for value in data:
self.recursive_json(value, store_list, level, key)
# This is the case when we have reached the end of the
# the level. Now add the final value to the
# store list(only till the row because we have not deleted)
# the values residing in other levels which might have resided
# before.
# except: Actually written that for unicode error
else:
try:
self.rows_list.append(store_list[:level]+[data])
except:
print("Error occured at:", data)
def convert_to_csv(self, filename = "jsonto.csv", delimiter = ","):
self.recursive_json(self.json_data)
with open(filename, "wb") as csvfile:
spamwriter = csv.writer(csvfile, delimiter = delimiter)
for row in self.rows_list:
try:
spamwriter.writerow(row)
except:
print("Error occured at:", row)
| StarcoderdataPython |
1798279 | <filename>application/routers/projects.py
"""VBR Project routes"""
from typing import Dict
from fastapi import APIRouter, Body, Depends, HTTPException
from vbr.api import VBR_Api
from vbr.utils.barcode import generate_barcode_string, sanitize_identifier_string
from ..dependencies import *
from .models import Project, transform
from .utils import parameters_to_query
router = APIRouter(
prefix="/projects",
tags=["projects"],
responses={404: {"description": "Not found"}},
route_class=LoggingRoute,
)
@router.get("/", dependencies=[Depends(vbr_read_public)], response_model=List[Project])
def list_projects(
# See views/projects_public.sql for possible filter names
project_id: Optional[str] = None,
name: Optional[str] = None,
abbreviation: Optional[str] = None,
description: Optional[str] = None,
client: VBR_Api = Depends(vbr_admin_client),
common=Depends(limit_offset),
):
"""List Projects.
Refine results using filter parameters.
Requires: **VBR_READ_PUBLIC**"""
# TODO - build up from filters
query = parameters_to_query(
project_id=project_id,
name=name,
abbreviation=abbreviation,
description=description,
)
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="projects_public",
query=query,
limit=common["limit"],
offset=common["offset"],
)
]
return rows
@router.get(
"/{project_id}", dependencies=[Depends(vbr_read_public)], response_model=Project
)
def get_project_by_id(
project_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get a Project by ID.
Requires: **VBR_READ_PUBLIC**"""
query = {"project_id": {"operator": "eq", "value": project_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="projects_public", query=query, limit=1, offset=0
)[0]
)
return row
# TODO
# PUT /{project_id} - update project
# POST / - create new project
| StarcoderdataPython |
1716468 | <reponame>wnzhang/rtb-unbiased-learning
#!/usr/bin/python
import sys
from collections import defaultdict
print 'Begin to build Kaplan Meier estimator based on', ((sys.argv[1]).split('/'))[-1], 'and', ((sys.argv[3]).split('/'))[-1], 'for', ((sys.argv[2]).split('/'))[-1]
#build zb dictionary
bo_dict = defaultdict(list)
#add smooth data
upper = 301
for i in range(0, upper):
bo_dict[i].append(1)
fi = open(sys.argv[1], 'r')
size = upper
for line in fi:
s = line.strip().split()
#b = int(s[0]) #boolean value
b = int(s[0]) #bid price
for i in range(1, len(s)):
o = int(s[i])
bo_dict[b].append(o)
size += 1
fi.close()
size0 = size - 1
#build bdn list
bdns = []
wins = 0
for z in bo_dict:
wins = sum(bo_dict[z])
b = z
d = wins
n = size0
bdn = [b, d, n]
bdns.append(bdn)
size0 -= len(bo_dict[z]) # len
#build new winning probability
zw_dict = {}
min_p_w = 0
bdns_length = len(bdns)
count = 0
p_l_tmp = (size - 1.0) / size
for bdn in bdns:
count += 1
b = float(bdn[0])
d = float(bdn[1])
n = float(bdn[2])
p_l = p_l_tmp
p_w = max(1.0 - p_l, min_p_w)
zw_dict[int(b)] = p_w
if count < bdns_length:
p_l_tmp = (n - d) / n * p_l_tmp
def win_prob(bid):
if bid in zw_dict:
return zw_dict[bid]
last_key = -1
for key in zw_dict:
if last_key == -1:
last_key = key
if bid <= key:
return zw_dict[last_key]
else:
last_key = key
return 1.
#read wyzx.imp to build wyzx.uimp.km
fi1 = open(sys.argv[2], 'r')
fi2 = open(sys.argv[3], 'r')
fo = open(sys.argv[4], 'w')
for line1 in fi1:
line2 = fi2.readline()
s1 = line1.strip().split()
s2 = line2.strip().split()
z = int(s2[2])
s1[0] = str(win_prob(z))
fo.write('\t'.join(s1) + '\n')
fi1.close()
fi2.close()
fo.close()
print 'Finished creating file:', ((sys.argv[4]).split('/'))[-1]
print '-------------------'
# output win prob
adv = 'null'
for a in ['1458', '2259', '2261', '2821', '2997', '3358', '3386', '3427', '3476', 'all']:
if a in sys.argv[3]:
adv = a
break
win_prob_file = '../results/win-prob/{}.kimp.winprob.txt'.format(adv)
print 'output win prob to ' + win_prob_file
fof = open(win_prob_file, 'w')
for bid in range(302):
fof.write('%d\t%.8f\n' % (bid, win_prob(bid)))
fof.close()
#print bdns
#print 'km win fun'
#print zw_dict
#print bo_dict[6]
'''
a = {}
for i in bo_dict:
a[i] = sum(bo_dict[i])
print a
''' | StarcoderdataPython |
1745044 | # coding:utf-8
"""
A corpus parser for preparing data for a tensorflow chatbot
"""
import os
import random
from ast import literal_eval
from tqdm import tqdm
DELIM = ' +++$+++ '
"""分隔符"""
SIZE = 3000
movie_lines_filepath = 'data/cornell-movie-dialogs-corpus/movie_lines.txt'
movie_conversations = 'data/cornell-movie-dialogs-corpus/movie_conversations.txt'
def punctuation_processing(line):
"""
1\在',', '.', '?', '!'符号前加入空格
2\去除'[',']','...','-','<i>','</i>','<u>','</u>'
3\全部转换为小写字母
:param line: 原始句子
:return: line_pro 处理后的句子
"""
replace_list = ["..", "...", "-", "[", "]", "<i>", "</i>", "<u>", "</u>",
"<b>", "</b>", "<U>", "</U>", "<", ">", "{", "}"]
for replace_string in replace_list:
line = line.replace(replace_string, " ")
line = line.replace(".", " . ")
line = line.replace(",", " , ")
line = line.replace("!", " !")
line = line.replace("?", " ?")
line = line.replace('"', '')
line_pro = " ".join(line.lower().split()) + "\n"
return line_pro
def get_id2line():
"""
1. 读取 'movie-lines.txt'
2. 构建 line_id 和 text对应关系的词典( key = line_id, value = text )
:return: (dict) {line-id: text, ...}
"""
id2line = {}
id_index = 0
text_index = 4
with open(movie_lines_filepath, 'r', encoding= 'utf-8') as f:
for line in f:
items = line.split(DELIM)
if len(items) == 5:
line_id = items[id_index]
dialog_text = punctuation_processing(items[text_index])
id2line[line_id] = dialog_text
return id2line
def get_conversations():
"""
1. 读取'movie_conversations.txt'
2. 生成对话列表[list of line_id's]
:return: [list of line_id's]
"""
conversation_ids_index = -1
conversations = []
with open(movie_conversations, 'r') as f:
for line in f:
items = line.split(DELIM)
conversation_ids_field = items[conversation_ids_index]
conversation_ids = literal_eval(conversation_ids_field) # evaluate as a python list
conversations.append(conversation_ids)
return conversations
def count_linestokens(line):
"""
计算句子token个数(包括符号)
:param line: 句子
:return: line_num
"""
line_num = len(line.split(' '))
return line_num
def judge(line1, line2):
"""
判断对话中的行是否为空
:param line1:
:param line2:
:return:
"""
if (len(line1) < 2 or len(line2) < 2):
return True
else:
return False
def generate_double(id2line, conversations, output_directory='tmp', test_set_size=3000):
"""
生成二元组对话文件
:param conversations: (list) Collection line ids consisting of a single conversation
:param id2line: (dict) mapping of line-ids to actual line text
:param output_directory: (str) Directory to write files
:param test_set_size: (int) number of samples to use for test data set 测试集大小 默认为30000
:return:train_enc_filepath, train_dec_filepath, test_enc_filepath, test_dec_filepath 路径
"""
questions = []
answers = []
for conversation in conversations:
if len(conversation) % 2 != 0:
conversation = conversation[:-1] # remove last item
for idx, line_id in enumerate(conversation):
if idx % 2 == 0:
questions.append(id2line[line_id])
else:
answers.append(id2line[line_id])
"""
for idx, line_id in enumerate(conversation):
if idx == 0:
questions.append(id2line[line_id])
elif idx == len(conversation)-1:
answers.append(id2line[line_id])
else:
questions.append(id2line[line_id])
answers.append(id2line[line_id])
"""
questions_tmp = []
answers_tmp = []
print('Processing replace blank line')
for i in range(len(questions)):
if judge(questions[i], answers[i]):
continue
questions_tmp.append(questions[i])
answers_tmp.append(answers[i])
questions = questions_tmp
answers = answers_tmp
#创建文件目录
output_directory = 'dataset/cornell/' + output_directory
isExists = os.path.exists(output_directory)
if not isExists:
os.mkdir(output_directory)
print('Created directory successfully ', output_directory)
else:
print('the directory:', '//', output_directory, 'has already exited!')
#输出
train_enc_filepath = os.path.join(output_directory, 'train.enc')
train_dec_filepath = os.path.join(output_directory, 'train.dec')
test_enc_filepath = os.path.join(output_directory, 'test.enc')
test_dec_filepath = os.path.join(output_directory, 'test.dec')
train_enc = open(train_enc_filepath, 'w', encoding='utf-8')
train_dec = open(train_dec_filepath, 'w', encoding='utf-8')
test_enc = open(test_enc_filepath, 'w', encoding='utf-8')
test_dec = open(test_dec_filepath, 'w', encoding='utf-8')
# choose test_set_size number of items to put into testset
test_ids = random.sample(range(len(questions)), test_set_size)
print('Outputting train/test enc/dec files...')
for i in tqdm(range(len(questions))):
if i in test_ids:
test_enc.write(questions[i])
test_dec.write(answers[i])
else:
train_enc.write(questions[i])
train_dec.write(answers[i])
# close files
train_enc.close()
train_dec.close()
test_enc.close()
test_dec.close()
return train_enc_filepath, train_dec_filepath, test_enc_filepath, test_dec_filepath
def generate_triple(id2line, conversations, output_directory='triple', test_set_size=3000):
"""
生成三元组对话文件
:param conversations: (list) Collection line ids consisting of a single conversation
:param id2line: (dict) mapping of line-ids to actual line text
:param output_directory: (str) Directory to write files
:param test_set_size: (int) number of samples to use for test data set 测试集大小 默认为3000
:return:train_enc1_filepath, train_enc2_filepath, train_dec_filepath, test_enc1_filepath,
test_enc2_filepath, test_dec_filepath 路径
"""
first = []
second = []
third = []
#
for conversation in conversations:
ConversationLength = len(conversation)
if ConversationLength >= 3:
for idx, line_id in enumerate(conversation):
if idx == 0:
first.append(id2line[line_id])
elif idx == ConversationLength - 1:
third.append(id2line[line_id])
elif ConversationLength == 3:
second.append(id2line[line_id])
elif idx == 1:
first.append(id2line[line_id])
second.append(id2line[line_id])
elif idx == ConversationLength - 2:
second.append(id2line[line_id])
third.append(id2line[line_id])
else:
first.append(id2line[line_id])
second.append(id2line[line_id])
third.append(id2line[line_id])
isExists = os.path.exists(output_directory)
if not isExists:
os.mkdirs(output_directory)
print('Created directory successfully: ', '//' , output_directory)
else:
print('the directory:','//', output_directory, 'has already exited!')
train_enc1_filepath = os.path.join(output_directory, 'train.enc1')
train_enc2_filepath = os.path.join(output_directory, 'train.enc2')
train_dec_filepath = os.path.join(output_directory, 'train.dec')
test_enc1_filepath = os.path.join(output_directory, 'test.enc1')
test_enc2_filepath = os.path.join(output_directory, 'test.enc2')
test_dec_filepath = os.path.join(output_directory, 'test.dec')
train_enc1 = open(train_enc1_filepath, 'w', encoding='utf-8')
train_enc2 = open(train_enc2_filepath, 'w', encoding='utf-8')
train_dec = open(train_dec_filepath, 'w', encoding='utf-8')
test_enc1 = open(test_enc1_filepath, 'w', encoding='utf-8')
test_enc2 = open(test_enc2_filepath, 'w', encoding='utf-8')
test_dec = open(test_dec_filepath, 'w', encoding='utf-8')
# choose test_set_size number of items to put into testset
test_ids = random.sample(range(len(first)), test_set_size)
print('Outputting train/test enc/dec files...')
for i in tqdm(range(len(first))):
if i in test_ids:
test_enc1.write(first[i])
test_enc2.write(second[i])
test_dec.write(third[i])
else:
train_enc1.write(first[i])
train_enc2.write(second[i])
train_dec.write(third[i])
# close files
train_enc1.close()
train_enc2.close()
train_dec.close()
test_enc1.close()
test_enc2.close()
test_dec.close()
return train_enc1_filepath, train_enc2_filepath, train_dec_filepath, \
test_enc1_filepath, test_enc2_filepath, test_dec_filepath
if __name__ == '__main__':
import argparse
"""
parser = argparse.ArgumentParser(description=__doc__)
DEFAULT_OUTPUT_DIRECTORY = 'cornell'
parser.add_argument('-l', '--lines',
default='data/cornell-movie-dialogs-corpus/movie_lines.txt',
help='Path to Cornell Corpus, "movie_lines.txt"')
parser.add_argument('-c', '--conversations',
default='data/cornell-movie-dialogs-corpus/movie_conversations.txt',
help='Path to Cornell Corpus, "movie_conversations.txt"')
parser.add_argument('-o', '--output_directory',
dest='output_directory',
default=DEFAULT_OUTPUT_DIRECTORY,
help='Output directory for train/test data [DEFAULT={}]'.format(DEFAULT_OUTPUT_DIRECTORY))
parser.add_argument('-s', '--size',
default=3000,
help='Size of test set')
args = parser.parse_args()
"""
print('Collection line-ids...')
id2lines = get_id2line()
print('Collection conversations...')
conversations = get_conversations()
result_filepaths = generate_double(id2lines, conversations, 'v1.1', SIZE)
print('Done')
| StarcoderdataPython |
1650888 | <filename>official/cv/c3d/export.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore import dtype as mstype
from mindspore import context, Tensor, export
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.c3d_model import C3D
from src.model_utils.config import config
from src.model_utils.moxing_adapter import moxing_wrapper
@moxing_wrapper()
def export_model(ckpt_path):
network = C3D(num_classes=config.num_classes)
network.set_train(False)
param_dict = load_checkpoint(ckpt_path)
load_param_into_net(network, param_dict)
image_shape = [config.batch_size, 3, config.clip_length] + config.final_shape
window_image = Tensor(np.zeros(image_shape), mstype.float32)
export(network, window_image, file_name=config.mindir_file_name, file_format=config.file_format)
if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target,
save_graphs=False, device_id=config.device_id)
export_model(ckpt_path=config.ckpt_file)
| StarcoderdataPython |
1688321 | <filename>kingbird/drivers/openstack/sdk.py<gh_stars>0
# Copyright 2016 Ericsson AB
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
OpenStack Driver
'''
import collections
from oslo_log import log
from oslo_utils import timeutils
from kingbird.common import consts
from kingbird.common import endpoint_cache
from kingbird.common import exceptions
from kingbird.common.i18n import _
from kingbird.drivers.openstack.cinder_v2 import CinderClient
from kingbird.drivers.openstack.keystone_v3 import KeystoneClient
from kingbird.drivers.openstack.neutron_v2 import NeutronClient
from kingbird.drivers.openstack.nova_v2 import NovaClient
# Gap, in seconds, to determine whether the given token is about to expire
STALE_TOKEN_DURATION = 30
LOG = log.getLogger(__name__)
class OpenStackDriver(object):
os_clients_dict = collections.defaultdict(dict)
def __init__(self, region_name=None):
# Check if objects are cached and try to use those
self.region_name = region_name
if 'keystone' in OpenStackDriver.os_clients_dict and \
self._is_token_valid():
self.keystone_client = OpenStackDriver.os_clients_dict['keystone']
else:
self.keystone_client = KeystoneClient()
OpenStackDriver.os_clients_dict['keystone'] = self.keystone_client
self.disabled_quotas = self._get_disabled_quotas(region_name)
if region_name in OpenStackDriver.os_clients_dict and \
self._is_token_valid():
LOG.info('Using cached OS client objects')
self.nova_client = OpenStackDriver.os_clients_dict[
region_name]['nova']
self.cinder_client = OpenStackDriver.os_clients_dict[
region_name]['cinder']
self.neutron_client = OpenStackDriver.os_clients_dict[
region_name]['neutron']
else:
# Create new objects and cache them
LOG.info(_("Creating fresh OS Clients objects"))
self.neutron_client = NeutronClient(region_name,
self.disabled_quotas,
self.keystone_client.session)
self.nova_client = NovaClient(region_name,
self.keystone_client.session,
self.disabled_quotas)
self.cinder_client = CinderClient(region_name,
self.disabled_quotas,
self.keystone_client.session)
OpenStackDriver.os_clients_dict[
region_name] = collections.defaultdict(dict)
OpenStackDriver.os_clients_dict[region_name][
'nova'] = self.nova_client
OpenStackDriver.os_clients_dict[region_name][
'cinder'] = self.cinder_client
OpenStackDriver.os_clients_dict[region_name][
'neutron'] = self.neutron_client
def get_enabled_projects(self):
try:
return self.keystone_client.get_enabled_projects()
except Exception as exception:
LOG.error('Error Occurred: %s', exception.message)
def get_enabled_users(self):
try:
return self.keystone_client.get_enabled_users()
except Exception as exception:
LOG.error('Error Occurred : %s', exception.message)
def get_resource_usages(self, project_id):
try:
nova_usages = self.nova_client.get_resource_usages(project_id)
neutron_usages = self.neutron_client.get_resource_usages(
project_id)
cinder_usages = self.cinder_client.get_resource_usages(project_id)
return nova_usages, neutron_usages, cinder_usages
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
exceptions.TimeOut):
# Delete the cached objects for that region
del OpenStackDriver.os_clients_dict[self.region_name]
except Exception as exception:
LOG.error('Error Occurred: %s', exception.message)
def write_quota_limits(self, project_id, limits_to_write):
try:
self.nova_client.update_quota_limits(project_id,
**limits_to_write['nova'])
self.cinder_client.update_quota_limits(project_id,
**limits_to_write['cinder'])
self.neutron_client.update_quota_limits(project_id,
limits_to_write['neutron'])
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
exceptions.TimeOut):
# Delete the cached objects for that region
del OpenStackDriver.os_clients_dict[self.region_name]
except Exception as exception:
LOG.error('Error Occurred: %s', exception.message)
def delete_quota_limits(self, project_id):
try:
self.nova_client.delete_quota_limits(project_id)
self.neutron_client.delete_quota_limits(project_id)
self.cinder_client.delete_quota_limits(project_id)
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
exceptions.TimeOut):
# Delete the cached objects for that region
del OpenStackDriver.os_clients_dict[self.region_name]
except Exception as exception:
LOG.error('Error Occurred: %s', exception.message)
def _get_disabled_quotas(self, region):
disabled_quotas = []
if not self.keystone_client.is_service_enabled('volume') and \
not self.keystone_client.is_service_enabled('volumev2'):
disabled_quotas.extend(consts.CINDER_QUOTA_FIELDS)
# Neutron
if not self.keystone_client.is_service_enabled('network'):
disabled_quotas.extend(consts.NEUTRON_QUOTA_FIELDS)
else:
disabled_quotas.extend(['floating_ips', 'fixed_ips'])
disabled_quotas.extend(['security_groups',
'security_group_rules'])
return disabled_quotas
def get_all_regions_for_project(self, project_id):
try:
# Retrieve regions based on endpoint filter for the project.
region_lists = self._get_filtered_regions(project_id)
if not region_lists:
# If endpoint filter is not used for the project, then
# return all regions
region_lists = endpoint_cache.EndpointCache().get_all_regions()
return region_lists
except Exception as exception:
LOG.error('Error Occurred: %s', exception.message)
raise
def _get_filtered_regions(self, project_id):
return self.keystone_client.get_filtered_region(project_id)
def _is_token_valid(self):
keystone = self.os_clients_dict['keystone'].keystone_client
try:
token = keystone.tokens.validate(keystone.session.get_token())
except Exception as exception:
LOG.info('Exception Occurred: %s', exception.message)
# Reset the cached dictionary
OpenStackDriver.os_clients_dict = collections.defaultdict(dict)
return False
expiry_time = timeutils.normalize_time(timeutils.parse_isotime(
token['expires_at']))
if timeutils.is_soon(expiry_time, STALE_TOKEN_DURATION):
LOG.info('The cached keystone token will expire soon')
# Reset the cached dictionary
OpenStackDriver.os_clients_dict = collections.defaultdict(dict)
return False
else:
return True
| StarcoderdataPython |
81116 | <filename>code/tests/test_prepare/test_pmi.py<gh_stars>1-10
import os.path
from unittest import TestCase
from code.cli import PARAMS_DIR, TESTS_DIR
from code.prepare.base import load_data
from code.prepare.lexstat import set_schema
from code.prepare.pmi import *
from code.prepare.params import load_params
FIXTURE_DATASET = os.path.join(TESTS_DIR, 'fixtures/GER.tsv')
class PMITestCase(TestCase):
def setUp(self):
self.params = load_params(PARAMS_DIR)
self.data = load_data(FIXTURE_DATASET)
def test_get_pairs(self):
syn, non_syn = get_pairs('English', 'German', self.data)
self.assertEqual(len(syn), 117)
self.assertIn('962/English,German/1,1', syn)
self.assertIn('962/English,German/1,2', syn)
self.assertEqual(len(non_syn), 12763)
self.assertIn(('ɔːl', 'jaːr'), non_syn)
self.assertIn(('jɪər', 'al'), non_syn)
def test_get_asjp_data(self):
data = get_asjp_data(self.data, self.params)
count = sum([len(j) for i in data.values() for j in i.values()])
self.assertEqual(count, 814)
self.assertEqual(data['English']['98'], ['ol'])
self.assertEqual(data['Norwegian']['1226'], ['or'])
self.assertEqual(data['German']['962'], ['frau', 'vaip'])
def test_get_asjp_data_in_lexstat_asjp_mode(self):
set_schema('asjp')
data = get_asjp_data(self.data, self.params)
count = sum([len(j) for i in data.values() for j in i.values()])
self.assertEqual(count, 814)
self.assertEqual(data['English']['98'], ['ol'])
self.assertEqual(data['Norwegian']['1226'], ['or'])
self.assertEqual(data['German']['962'], ['frau', 'vaip'])
def test_calc_pmi(self):
self.assertEqual(calc_pmi('ol', 'al', self.params), 2.960483758607)
def test_prepare_lang_pair(self):
asjp_data = get_asjp_data(self.data, self.params)
s = prepare_lang_pair('English', 'German', asjp_data, self.params)
self.assertEqual(len(s), 117)
self.assertEqual(s['962/English,German/1,1'][0], -7.005012217116)
self.assertAlmostEqual(s['962/English,German/1,1'][1], 0.4219680350987151, 1)
self.assertAlmostEqual(s['962/English,German/1,1'][2], 0.8628257140265899, 1)
self.assertEqual(s['962/English,German/1,2'][0], -7.557346819036999)
self.assertAlmostEqual(s['962/English,German/1,2'][1], 0.477984957693513, 1)
self.assertAlmostEqual(s['962/English,German/1,2'][2], 0.7381760162462817, 1)
for sample in s.values():
self.assertEqual(len(sample), 5)
self.assertAlmostEqual(sample[3], 3.63223180795, 0)
self.assertAlmostEqual(sample[4], 1.289847282477176, 1)
| StarcoderdataPython |
1675540 | <reponame>Kh4L/gluon-cv<filename>gluoncv/model_zoo/action_recognition/c3d.py
"""C3D, implemented in Gluon. https://arxiv.org/abs/1412.0767"""
# pylint: disable=arguments-differ,unused-argument
__all__ = ['C3D', 'c3d_kinetics400']
from mxnet import init
from mxnet.context import cpu
from mxnet.gluon.block import HybridBlock
from mxnet.gluon import nn
class C3D(HybridBlock):
r"""
The Convolutional 3D network (C3D).
Learning Spatiotemporal Features with 3D Convolutional Networks.
ICCV, 2015. https://arxiv.org/abs/1412.0767
Parameters
----------
nclass : int
Number of classes in the training dataset.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
dropout_ratio : float
Dropout value used in the dropout layers after dense layers to avoid overfitting.
init_std : float
Default standard deviation value for initializing dense layers.
ctx : str
Context, default CPU. The context in which to load the pretrained weights.
"""
def __init__(self, nclass, dropout_ratio=0.5,
num_segments=1, num_crop=1, feat_ext=False,
init_std=0.001, ctx=None, **kwargs):
super(C3D, self).__init__()
self.num_segments = num_segments
self.num_crop = num_crop
self.feat_ext = feat_ext
self.feat_dim = 8192
with self.name_scope():
self.conv1 = nn.Conv3D(in_channels=3, channels=64,
kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3D(pool_size=(1, 2, 2), strides=(1, 2, 2))
self.conv2 = nn.Conv3D(in_channels=64, channels=128,
kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool2 = nn.MaxPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2))
self.conv3a = nn.Conv3D(in_channels=128, channels=256,
kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.conv3b = nn.Conv3D(in_channels=256, channels=256,
kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool3 = nn.MaxPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2))
self.conv4a = nn.Conv3D(in_channels=256, channels=512,
kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.conv4b = nn.Conv3D(in_channels=512, channels=512,
kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool4 = nn.MaxPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2))
self.conv5a = nn.Conv3D(in_channels=512, channels=512,
kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.conv5b = nn.Conv3D(in_channels=512, channels=512,
kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool5 = nn.MaxPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding=(0, 1, 1))
self.fc6 = nn.Dense(in_units=8192, units=4096,
weight_initializer=init.Normal(sigma=init_std))
self.fc7 = nn.Dense(in_units=4096, units=4096,
weight_initializer=init.Normal(sigma=init_std))
self.fc8 = nn.Dense(in_units=4096, units=nclass,
weight_initializer=init.Normal(sigma=init_std))
self.dropout = nn.Dropout(rate=dropout_ratio)
self.relu = nn.Activation('relu')
def hybrid_forward(self, F, x):
"""Hybrid forward of C3D net"""
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = self.pool5(x)
# segmental consensus
x = F.reshape(x, shape=(-1, self.num_segments * self.num_crop, self.feat_dim))
x = F.mean(x, axis=1)
x = self.relu(self.fc6(x))
x = self.dropout(x)
if self.feat_ext:
return x
x = self.relu(self.fc7(x))
x = self.dropout(x)
x = self.fc8(x)
return x
def c3d_kinetics400(nclass=400, pretrained=False, ctx=cpu(),
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, **kwargs):
r"""The Convolutional 3D network (C3D) trained on Kinetics400 dataset.
Learning Spatiotemporal Features with 3D Convolutional Networks.
ICCV, 2015. https://arxiv.org/abs/1412.0767
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = C3D(nclass=nclass, ctx=ctx, num_segments=num_segments,
num_crop=num_crop, feat_ext=feat_ext, **kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('c3d_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
| StarcoderdataPython |
83312 | <reponame>sirca/bdkd_datastore<gh_stars>1-10
# Copyright 2015 Nicta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import boto.s3.connection
import io
import errno
import hashlib
import json
import logging
import os, stat, sys, time, getpass
from datetime import datetime
import shutil
import urlparse, urllib2
import yaml
import re
import warnings
import copy
import tarfile
import posixpath
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
_config_global_file = '/etc/bdkd/Current/datastore.conf'
_config_user_file = os.path.expanduser(os.environ.get('BDKD_DATASTORE_CONFIG', '~/.bdkd_datastore.conf'))
_settings = None
_hosts = None
_repositories = None
TIME_FORMAT = '%a, %d %b %Y %H:%M:%S %Z'
ISO_8601_UTC_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
BDKD_FILE_SUFFIX = '.bdkd'
logger = logging.getLogger(__name__)
def get_uid():
"""
Get a unique user identifier in a cross-platform manner.
On Unix type systems, this equates os.getuid; otherwise,
getpass.getuser
"""
try:
return os.getuid()
except AttributeError:
return getpass.getuser()
def checksum(local_path):
""" Calculate the md5sum of the contents of a local file. """
result = None
if os.path.exists(local_path):
md5 = hashlib.md5()
with open(local_path,'rb') as f:
for chunk in iter(lambda: f.read(1048576), b''):
md5.update(chunk)
result = md5.hexdigest()
return result
def mkdir_p(dest_dir):
""" Make a directory, including all parent directories. """
try:
os.makedirs(dest_dir)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def common_directory(paths):
"""
Find the directory common to a set of paths.
This function differs from os.path.commonprefix() which has no concept of
directories (it works a character at a time). This function breaks each
path up into directories for comparison.
"""
common_parts = []
shortest_path = None
for path in paths:
parts = path.split(os.sep)
if shortest_path == None:
shortest_path = len(parts)
elif len(parts) < shortest_path:
shortest_path = len(parts)
for i in range(0, len(parts)):
if i >= len(common_parts):
common_parts.append(parts[i])
else:
if parts[i] != common_parts[i]:
common_parts[i] = None
common_count = 0
common_parts = common_parts[0:shortest_path]
for common_part in common_parts:
if common_part == None:
break
else:
common_count += 1
common_parts = common_parts[0:common_count]
if common_count:
leading = ''
if len(paths[0]) and paths[0][0] == os.sep:
leading = os.sep
common_path = leading + posixpath.join(*common_parts)
else:
common_path = ''
return common_path
def touch(fname, times=None):
""" Update the timestamps of a local file. """
with file(fname, 'a'):
os.utime(fname, times)
class Host(object):
"""
A host that provides a S3-compatible service.
"""
def __init__( self, access_key=None, secret_key=None,
host='s3.amazonaws.com', port=None,
secure=True):
self.connection = boto.s3.connection.S3Connection(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
host=host, port=port,
is_secure=secure)
self.netloc = '{0}:{1}'.format(host,port)
class Repository(object):
"""
Storage container for a Resource and its Files.
The Repository may be backed by a S3 host, in which case operations may
involve coordinating reads and writes between a remote object store and a
local filesystem cache.
"""
resources_prefix = 'resources'
files_prefix = 'files'
bundle_prefix = 'bundle'
def __init__(self, host, name, cache_path=None, stale_time=60):
"""
Create a "connection" to a Repository.
"""
self.host = host
self.name = name
self.local_cache = posixpath.join(
(cache_path or settings()['cache_root']),
str(get_uid()),
name)
self.bucket = None
self.stale_time = stale_time
def get_bucket(self):
"""
Get the S3 bucket for this Repository (or None if no host is configured).
"""
if self.host and not self.bucket:
try:
self.bucket = self.host.connection.get_bucket(self.name)
except: #I want to narrow this down, but the docs are not clear on what can be raised...
print >>sys.stderr, 'Error accessing repository "{0}"'.format(self.name)
raise
return self.bucket
def __resource_name_key(self, name):
# For the given Resource name, return the S3 key string
return posixpath.join(type(self).resources_prefix, name)
def __resource_name_cache_path(self, name):
# For the given Resource name, return the path that would be used for a
# local cache file
return posixpath.join(self.local_cache, type(self).resources_prefix, name)
def __file_keyname(self, resource_file):
# For the given ResourceFile, return the S3 key string
return resource_file.location()
def __file_cache_path(self, resource_file):
# For the given ResourceFile, return the path that would be used for a
# local cache file
return os.path.expanduser(posixpath.join(self.local_cache,
resource_file.location_or_remote()))
def _rebuild_required(self, resource, obj_list):
# For the given resource, check if a file list rebuild is necessary by
# comparing the timestamp most recently modified file to that of metadata
# file
resource_keyname = self.__resource_name_key(resource.name)
resource_key = self.get_bucket().get_all_keys(prefix=resource_keyname)[0]
resource_timestamp = datetime.strptime(resource_key.last_modified, ISO_8601_UTC_FORMAT)
rebuild_required = False
for obj in obj_list:
obj_timestamp = datetime.strptime(obj.last_modified, ISO_8601_UTC_FORMAT)
if obj_timestamp > resource_timestamp: # i.e. if object is newer than resource metadata
rebuild_required = True
break
return rebuild_required
def file_path(self, resource_file):
return self.__file_cache_path(resource_file)
def rebuild_file_list(self, resource):
bucket = self.get_bucket()
if not bucket:
return False
prefix = Repository.files_prefix + '/' + resource.name + '/'
obj_list = bucket.get_all_keys(prefix=prefix)
if not self._rebuild_required(resource, obj_list):
logger.debug("Rebuild not required")
return False
new_files = {}
for obj in obj_list:
if obj.key == prefix: # if "directory" name appears, skip
continue
if obj.key.endswith(BDKD_FILE_SUFFIX) and obj.key[:-5] in new_files:
# If this is a .bdkd file, delete (since S3 always returns
# values in alphabetical order, we can assume the main
# file is already in the list)
obj.delete()
continue
md5file = bucket.get_all_keys(prefix=obj.key + BDKD_FILE_SUFFIX)
if len(md5file) == 1: # if md5 file exists, this is a newly found file
obj_md5 = md5file[0].get_contents_as_string().strip()
new_files[obj.key] = obj.size, obj.last_modified, obj_md5
resource.add_files_from_storage_paths(new_files)
return True
def __download(self, key_name, dest_path):
# Ensure that a file on the local system is up-to-date with respect to
# an object in the S3 repository, downloading it if required. Returns
# True if the remote object was downloaded.
bucket = self.get_bucket()
if not bucket:
return False
local_exists = os.path.exists(dest_path)
if local_exists and self.stale_time and (time.time() - os.stat(dest_path)[stat.ST_MTIME]) < self.stale_time:
logger.debug("Not refreshing %s: not stale", dest_path)
return False
key = bucket.get_key(key_name)
if key:
logger.debug("Key %s exists", key_name)
if local_exists:
if key.etag.strip('"') == checksum(dest_path):
logger.debug("Checksum match -- no need to refresh")
try:
touch(dest_path)
except IOError, e:
if e.errno != errno.EACCES:
raise
else:
mode = os.stat(dest_path).st_mode
os.chmod(dest_path, stat.S_IRWXU|stat.S_IRWXG)
touch(dest_path)
os.chmod(dest_path, mode)
return False
else:
logger.debug("Removing destination file %s before overwriting", dest_path)
os.remove(dest_path)
else:
mkdir_p(os.path.dirname(dest_path))
with open(dest_path, 'wb') as fh:
logger.debug("Retrieving repository data to %s", dest_path)
key.get_contents_to_file(fh)
return True
else:
logger.debug("Key %s does not exist in repository, not refreshing", key_name)
return False
def __upload(self, key_name, src_path, write_bdkd_file=False, md5sum=None):
# Ensure that an object in the S3 repository is up-to-date with respect
# to a file on the local system, uploading it if required. Returns
# True if the local file was uploaded.
bucket = self.get_bucket()
if not bucket:
return False
do_upload = True
file_key = bucket.get_key(key_name)
if file_key:
logger.debug("Existing key %s", key_name)
if file_key.etag.strip('"') == checksum(src_path):
logger.debug("Local file %s unchanged", src_path)
do_upload = False
else:
logger.debug("New key %s", key_name)
file_key = boto.s3.key.Key(bucket, key_name)
if do_upload:
logger.debug("Uploading to %s from %s", key_name, src_path)
file_key.set_contents_from_filename(src_path)
if write_bdkd_file and md5sum:
bdkd_file_key = boto.s3.key.Key(bucket, key_name + BDKD_FILE_SUFFIX)
bdkd_file_key.set_contents_from_string(md5sum)
return do_upload
def __delete(self, key_name):
# Delete the object identified by the key name from the S3 repository
bucket = self.get_bucket()
if bucket:
key = boto.s3.key.Key(bucket, key_name)
bucket.delete_key(key)
def __refresh_remote(self, url, local_path, etag=None, mod=stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH):
remote = urllib2.urlopen(urllib2.Request(url))
if remote.info().has_key('etag'):
if remote.info().getheader('etag') == etag:
return False
if remote.info().has_key('last-modified')and os.path.exists(local_path):
local_mtime = os.stat(local_path).st_mtime
last_modified = time.mktime(time.strptime(
remote.info().getheader('last-modified'),
TIME_FORMAT))
if last_modified < local_mtime:
return False
# Need to download file
if os.path.exists(local_path):
os.remove(local_path)
mkdir_p(os.path.dirname(local_path))
with open(local_path, 'wb') as fh:
shutil.copyfileobj(remote, fh)
remote.close()
os.chmod(local_path, mod)
return True
def __delete_resource_file(self, resource_file):
key_name = self.__file_keyname(resource_file)
bucket = self.get_bucket()
if bucket and key_name:
self.__delete(key_name)
cache_path = self.__file_cache_path(resource_file)
if os.path.exists(cache_path):
os.remove(cache_path)
def __delete_resource(self, resource):
for resource_file in (resource.files + [resource.bundle]):
if resource_file:
self.__delete_resource_file(resource_file)
key_name = self.__resource_name_key(resource.name)
bucket = self.get_bucket()
if bucket and key_name:
self.__delete(key_name)
cache_path = self.__resource_name_cache_path(resource.name)
if os.path.exists(cache_path):
os.remove(cache_path)
def __resource_name_conflict(self, resource_name):
"""
Check whether a Resource name conflicts with some existing Resource.
Returns the name(s) of any conflicting Resources or None if no conflict
found.
"""
resources_prefix = type(self).resources_prefix
bucket = self.get_bucket()
if bucket:
# Check for conflict with a longer path name
key_prefix = self.__resource_name_key(resource_name) + '/'
resource_names = []
for key in bucket.list(key_prefix):
resource_names.append(key.name[(len(type(self).resources_prefix) + 1):])
if len(resource_names) > 0:
# There are other Resources whose names start with this
# Resource name
return resource_names
# Check for conflict with a shorter name
name_parts = resource_name.split('/')[0:-1]
while len(name_parts):
key_name = self.__resource_name_key('/'.join(name_parts))
key = bucket.get_key(key_name)
if key:
return [ key_name ]
name_parts = name_parts[0:-1]
return None
def _resource_file_dest_path(self, resource_file):
dest_path = self.__file_cache_path(resource_file)
logger.debug("Cache path for resource file is %s", dest_path)
return dest_path
def _refresh_resource_file(self, resource_file):
dest_path = self._resource_file_dest_path(resource_file)
bucket = self.get_bucket()
if bucket and not resource_file.is_bundled():
location = resource_file.location()
if location:
if self.__download(location, dest_path):
logger.debug("Refreshed resource file from %s to %s", location, dest_path)
else:
logger.debug("Not refreshing resource file %s to %s", location, dest_path)
else:
self.__refresh_remote(resource_file.remote(), dest_path, resource_file.meta('ETag'))
resource_file.path = dest_path
return dest_path
def refresh_resource(self, resource, refresh_all=False):
"""
Synchronise a locally-cached Resource with the Repository's remote host
(if applicable).
This method ensures that the local Resource is up-to-date with respect
to the S3 object store. However if there is no Host for this
Repository then no action needs to be performed.
"""
bucket = self.get_bucket()
if not bucket:
return
cache_path = self.__resource_name_cache_path(resource.name)
resource_key = self.__resource_name_key(resource.name)
if self.__download(resource_key, cache_path) or refresh_all:
if os.path.exists(cache_path):
resource.reload(cache_path)
for resource_file in resource.files:
self._refresh_resource_file(resource_file)
logger.debug("Refreshed resource file with path %s", resource_file.path)
def __save_resource_file(self, resource_file, write_bdkd_file=False):
file_cache_path = self.__file_cache_path(resource_file)
if resource_file.path and os.path.exists(resource_file.path) and resource_file.location():
if resource_file.path != file_cache_path:
resource_file.relocate(file_cache_path)
bucket = self.get_bucket()
if bucket:
file_keyname = self.__file_keyname(resource_file)
md5sum = None
if 'md5sum' in resource_file.metadata:
md5sum = resource_file.metadata['md5sum']
self.__upload(file_keyname, resource_file.path, write_bdkd_file=write_bdkd_file,
md5sum=md5sum)
def save(self, resource, overwrite=False, update_bundle=True, skip_resource_file=False):
"""
Save a Resource to the Repository.
"""
conflicting_names = self.__resource_name_conflict(resource.name)
if conflicting_names:
raise ValueError("The Resource name '" + resource.name +
"' conflicts with other Resource names including: " +
', '.join(conflicting_names))
resource_cache_path = self.__resource_name_cache_path(resource.name)
if not skip_resource_file:
resource.write(resource_cache_path)
resource.path = resource_cache_path
if resource.repository != self:
logger.debug("Setting the repository for the resource")
resource.repository = self
if resource.bundle:
if update_bundle:
resource.update_bundle()
self.__save_resource_file(resource.bundle)
else:
if resource.files_to_be_deleted:
for resource_file in resource.files_to_be_deleted:
self.__delete_resource_file(resource_file)
resource.files_to_be_deleted = []
else:
for resource_file in resource.files:
self.__save_resource_file(resource_file, write_bdkd_file=skip_resource_file)
bucket = self.get_bucket()
if bucket:
resource_keyname = self.__resource_name_key(resource.name)
resource_key = bucket.get_key(resource_keyname)
if resource_key:
if not overwrite:
raise ValueError("Resource already exists!")
else:
resource_key = boto.s3.key.Key(bucket, resource_keyname)
if not skip_resource_file:
logger.debug("Uploading resource from %s to key %s", resource_cache_path, resource_keyname)
resource_key.set_contents_from_filename(resource_cache_path)
def move(self, from_resource, to_name):
try:
self.copy(from_resource, to_name)
from_resource.reload(from_resource.path)
from_resource.delete()
except Exception as e:
print >>sys.stderr, e.message
raise
def copy(self, from_resource, to_name):
"""
Copy resource from its original position to the given name in this repository.
"""
# Get destination bucket (needs to exist)
to_bucket = self.get_bucket()
if not to_bucket:
raise ValueError("Can only rename into a storage-backed repository")
# Check that from_resource has a bucket
from_bucket = None
if from_resource.repository:
from_bucket = from_resource.repository.get_bucket()
if not from_bucket:
raise ValueError("Can only rename a Resource into a storage-backed repository")
# Check that to_name has no conflicts with existing resources
conflicting_names = self.__resource_name_conflict(to_name)
if conflicting_names:
raise ValueError("The Resource name '" + to_name +
"' conflicts with other Resource names including: " +
', '.join(conflicting_names))
# Check that name is not already in use
to_keyname = self.__resource_name_key(to_name)
to_key = to_bucket.get_key(to_keyname)
if to_key:
raise ValueError("Cannot rename: name in use")
# Create unsaved destination resource (also checks name)
to_resource = Resource(to_name, files=[],
metadata=copy.copy(from_resource.metadata))
# Copy files to to_resource and save
try:
from_prefix = posixpath.join(Repository.files_prefix, from_resource.name, '')
for from_file in from_resource.files:
to_file = copy.copy(from_file)
# Do S3 copy if in S3 (i.e. has 'location')
if 'location' in from_file.metadata:
to_location = posixpath.join(Repository.files_prefix,
to_resource.name,
from_file.metadata['location'][len(from_prefix):])
if not from_file.is_bundled():
to_bucket.copy_key(to_location, from_bucket.name,
from_file.metadata['location'])
to_file.metadata['location'] = to_location
# Add file to to_resource
to_resource.files.append(to_file)
if from_resource.bundle:
to_resource.bundle = copy.copy(from_resource.bundle)
to_resource.bundle.resource = to_resource
from_location = from_resource.bundle.metadata['location']
to_location = posixpath.join(Repository.files_prefix,
to_resource.name,
from_location[len(from_prefix):])
to_bucket.copy_key(to_location, from_bucket.name,
from_location)
to_resource.bundle.metadata['location'] = to_location
# Save destination resource
self.save(to_resource, update_bundle=False)
except Exception as e:
print >>sys.stderr, e.message
# Undo: delete all to-files if save failed
for to_file in to_resource.files:
if 'location' in to_file.metadata:
self.__delete_resource_file(to_file)
to_resource.delete()
raise
def list(self, prefix=''):
"""
List all Resource names available in the Repository.
If 'prefix' is provided then a subset of resources with that leading
path will be returned.
"""
resource_names = []
resources_prefix = type(self).resources_prefix
if prefix:
resources_prefix = posixpath.join(resources_prefix, prefix)
bucket = self.get_bucket()
if bucket:
for key in bucket.list(resources_prefix):
resource_names.append(key.name[(len(type(self).resources_prefix) + 1):])
else:
resource_path = posixpath.join(self.local_cache,
type(self).resources_prefix)
for (dirpath, dirnames, filenames) in os.walk(resource_path):
if len(filenames):
for filename in filenames:
resource_names.append(posixpath.join(
dirpath[(len(resource_path) + 1):], filename))
return resource_names
def get(self, name):
"""
Acquire a Resource by name.
Returns the named resource, or None if no such resource exists in the
Repository.
"""
keyname = self.__resource_name_key(name)
cache_path = self.__resource_name_cache_path(name)
self.__download(keyname, cache_path)
if os.path.exists(cache_path):
resource = Resource.load(cache_path)
resource.repository = self
return resource
else:
return None
def delete(self, resource_or_name, force_delete_published=False):
"""
Delete a Resource -- either directly or by name.
"""
resource = None
if isinstance(resource_or_name, Resource):
resource = resource_or_name
self.refresh_resource(resource)
else:
resource = self.get(resource_or_name)
if not resource:
return
if resource.published and not force_delete_published:
raise ValueError("Cannot delete a published resource without override")
self.__delete_resource(resource)
resource.repository = None
def get_resource_key(self, name, key_attr=None):
"""
Acquire the key for a resource in the object storage.
:param name: name of the resource
:param key_attr: the attribute of the key of interest (error purposes)
:returns: the boto key for the resource
"""
bucket = self.get_bucket()
if not bucket:
if key_attr is None:
key_attr = 'the bucket'
raise ValueError('Cannot get %s for this repository.' %s (key_attr))
key_name = self.__resource_name_key(name)
key = bucket.get_key(key_name)
if not key:
raise ValueError('Key %s does not exist in the repository' % (key_name))
return key
def get_resource_last_modified(self, name):
"""
Acquire the last modified time of the resource (only look at the resource
meta data rather than interrogate every single files under that resource)
:param name: name of the resource
:returns: the last modified date/time in a long string format as per S3
"""
key = self.get_resource_key(name, key_attr='last modified date')
return boto.utils.parse_ts(key.last_modified)
class Asset(object):
"""
Superclass of things that can be stored within a Repository. This includes
Resource and ResourceFile objects.
:ivar path:
The local filesystem path of the Asset
:ivar metadata:
Dictionary of meta-data key/value pairs
"""
def __init__(self):
self.path = None
self.metadata = None
self.files = None
def relocate(self, dest_path, mod=stat.S_IRWXU,
move=False):
"""
Relocate an Asset's file to some other path, and set the mode of the
relocated file.
"""
if self.path:
if os.path.exists(dest_path):
os.remove(dest_path)
else:
mkdir_p(os.path.dirname(dest_path))
if move:
shutil.move(self.path, dest_path)
else:
shutil.copy2(self.path, dest_path)
os.chmod(dest_path, mod)
self.path = dest_path
def meta(self, keyname):
"""
Get the meta-data value for the given key.
"""
if self.metadata:
return self.metadata.get(keyname, None)
else:
return None
class MetadataException(Exception):
def __init__(self, missing_fields):
self.missing_fields = missing_fields
class AddFilesException(Exception):
def __init__(self, conflicting_files):
self.conflicting_files = conflicting_files
class DeleteFilesException(Exception):
def __init__(self, non_existent_files):
self.non_existent_files = non_existent_files
class Resource(Asset):
"""
A source of data consisting of one or more files plus associated meta-data.
:ivar repository:
The Repository to which this Resource belongs (if applicable)
:ivar name:
The name of the Resource (uniquely identifying it within its
Repository)
:ivar files:
A list of the ResourceFile instances associated with this Resource
"""
class ResourceJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Resource):
resource = dict(name=o.name)
if o.metadata:
resource['metadata'] = o.metadata
file_data = []
if o.files:
for resource_file in o.files:
file_data.append(resource_file.metadata)
resource['files'] = file_data
if o.bundle:
resource['bundle'] = o.bundle.metadata
resource['published'] = o.published
return resource
else:
return json.JSONEncoder.default(self, o)
mandatory_metadata_fields = ['description', 'author', 'author_email']
@classmethod
def validate_name(cls, name):
if isinstance(name, basestring):
if ( len(name) and
name[0] != '/' and
name[-1] != '/' ):
return True
return False
@classmethod
def bundle_temp_path(cls, name):
return posixpath.join(settings()['cache_root'], str(get_uid()), name)
def __init__(self, name, files=None, bundle=None, metadata=None, publish=True):
"""
Constructor for a Resource given a name, file data and any meta-data.
"""
if name and not type(self).validate_name(name):
raise ValueError("Invalid resource name!")
super(Resource, self).__init__()
self.repository = None
self.name = name
if metadata and not isinstance(metadata, dict):
raise ValueError("Meta-data must be a dictionary")
self.metadata = metadata or dict()
self.bundle = bundle
self.files = files
self.files_to_be_deleted = []
self.published = publish
@classmethod
def __normalise_file_data(cls, raw_data):
files_data = [] ; common_prefix = ''
location_paths = []
if not isinstance(raw_data, list):
raw_data = [ raw_data ]
for file_data in raw_data:
path = None
location = None
meta = None
if isinstance(file_data, dict):
meta = file_data
path = meta.pop('path', None)
else:
# String form: either a repository or remote location
meta = {}
url = urlparse.urlparse(file_data)
if url.netloc:
meta['remote'] = file_data
else:
path = file_data
if not 'remote' in meta:
location = os.path.expanduser(path)
location_paths.append(os.path.dirname(location))
files_data.append(dict(path=path, location=location, meta=meta))
# Get the common prefix of all local paths
if len(location_paths):
if len(location_paths) > 1:
common_prefix = common_directory(location_paths)
else:
common_prefix = location_paths[0]
# Strip common prefix from all files with a location
for file_data in files_data:
if file_data['location'] != None:
file_data['location'] = file_data['location'][len(common_prefix):]
if file_data['location'][0] == '/':
file_data['location'] = file_data['location'][1:]
return files_data
@classmethod
def new(cls, name, files_data=None, metadata=None, do_bundle=False, publish=True):
"""
A convenience factory method that creates a new, unsaved Resource of
the given name, using file information and metadata.
The file data can be a single string filename or a dictionary of file
metadata. The filename can either be a local path ('path') or a
remote URL ('remote') that is either HTTP or FTP. For more than one
file provide an array of these.
The rest of the keyword arguments are used as Resource meta-data.
The Resource and all its ResourceFile objects ready to be saved to a Repository.
"""
resource_files = []
bundle = None
if files_data:
if do_bundle:
bundle_path = cls.bundle_temp_path(name)
bundle = ResourceFile(bundle_path, resource=None, metadata={
'location': posixpath.join(Repository.files_prefix, name,
'.bundle', 'bundle.tar.gz')})
mkdir_p(os.path.dirname(bundle_path))
bundle.files = []
bundle_archive = tarfile.open(name=bundle_path, mode='w:gz')
# resource_files.append(bundle)
files_data = cls.__normalise_file_data(files_data)
for file_data in files_data:
path = file_data.pop('path', None)
location = file_data.pop('location', None)
meta = file_data.pop('meta', None)
if 'remote' in meta:
remote_url = urllib2.urlopen(urllib2.Request(meta['remote']))
keyset = set(k.lower() for k in meta)
for header_name in [ 'etag', 'last-modified', 'content-length', 'content-type' ]:
if not header_name in keyset and remote_url.info().has_key(header_name):
meta[header_name] = remote_url.info().getheader(header_name)
else:
if do_bundle:
meta['bundled'] = True
meta['location'] = posixpath.join(Repository.files_prefix, name, location)
if path:
path = os.path.expanduser(path)
if not 'md5sum' in meta:
meta['md5sum'] = checksum(path)
if not 'last-modified' in meta:
meta['last-modified'] = time.strftime(TIME_FORMAT,
time.gmtime(os.path.getmtime(path)))
if not 'content-length' in meta:
meta['content-length'] = os.stat(path).st_size
if bundle:
bundle_archive.add(name=path, arcname=location)
else:
raise ValueError("For Resource files, either a path to a local file or a remote URL is required")
resource_file = ResourceFile(path, resource=None, metadata=meta)
resource_files.append(resource_file)
if do_bundle:
bundle_archive.close()
resource = cls(name, files=resource_files, metadata=metadata, publish=publish)
if publish:
missing_fields = resource.validate_mandatory_metadata()
if missing_fields:
raise MetadataException(missing_fields)
if do_bundle:
resource.bundle = bundle
for resource_file in resource_files:
resource_file.resource = resource
return resource
@classmethod
def load(cls, local_resource_filename):
"""
Load a Resource from a local JSON file containing Resource meta-data.
"""
resource = cls(None, None)
resource.reload(local_resource_filename)
resource.path = local_resource_filename
return resource
def _process_files(self, files_data, resource_name=None, bundle_archive=None):
"""
Processes normalised file data
"""
if not resource_name:
name = self.name
else:
name = resource_name
resource_files = []
for file_data in files_data:
path = file_data.pop('path', None)
location = file_data.pop('location', None)
meta = file_data.pop('meta', None)
if 'remote' in meta:
remote_url = urllib2.urlopen(urllib2.Request(meta['remote']))
keyset = set(k.lower() for k in meta)
for header_name in [ 'etag', 'last-modified', 'content-length', 'content-type' ]:
if not header_name in keyset and remote_url.info().has_key(header_name):
meta[header_name] = remote_url.info().getheader(header_name)
else:
if bundle_archive:
meta['bundled'] = True
meta['location'] = posixpath.join(Repository.files_prefix, name, location)
if path:
path = os.path.expanduser(path)
if not 'md5sum' in meta:
meta['md5sum'] = checksum(path)
if not 'last-modified' in meta:
meta['last-modified'] = time.strftime(TIME_FORMAT,
time.gmtime(os.path.getmtime(path)))
if not 'content-length' in meta:
meta['content-length'] = os.stat(path).st_size
if bundle_archive:
bundle_archive.add(name=path, arcname=location)
else:
raise ValueError("For Resource files, either a path to a local file or a remote URL is required")
resource_file = ResourceFile(path, resource=None, metadata=meta)
resource_files.append(resource_file)
return resource_files
def add_files(self, files=None, add_to_published=False, overwrite=False):
if self.published and add_to_published == False:
raise ValueError("Cannot add files to a published Resource unless override is specified.")
if files:
files = self.__normalise_file_data(files)
resource_files = self._process_files(files)
# Check if any of the files already exist
conflicting_file_names = []
conflicting_files = []
for existing_file in self.files:
for resource_file in resource_files:
if existing_file.metadata['location'] == resource_file.metadata['location']:
conflicting_file_names.append(resource_file.path)
conflicting_files.append(resource_file.metadata['location'])
if conflicting_file_names and not overwrite:
raise AddFilesException(conflicting_file_names)
if overwrite:
non_conflicting_files = []
for existing_file in self.files:
if existing_file.metadata['location'] not in conflicting_files:
non_conflicting_files.append(existing_file)
self.files = non_conflicting_files + resource_files
else:
self.files += resource_files
return True
else:
return False
def delete_files_from_remote(self, filenames, delete_from_published=False):
if self.published and delete_from_published == False:
raise ValueError("Cannot delete files from a published Resource unless override is specified.")
matching_files = []
files_not_found = []
for filename in filenames:
found = False
for existing_file in self.files:
if filename == existing_file.storage_location():
matching_files.append(existing_file)
found = True
break
if not found:
files_not_found.append(filename)
if files_not_found:
raise DeleteFilesException(non_existent_files=files_not_found)
# Delete from files list
for file in matching_files:
self.files.remove(file)
self.files_to_be_deleted = matching_files
return True
def validate_mandatory_metadata(self):
"""
Checks if mandatory fields are present, and the values are not None.
Returns list of fields that are not found.
"""
fields_not_found = []
for field in Resource.mandatory_metadata_fields:
if not field in self.metadata or self.metadata[field] is None:
fields_not_found.append(field)
return fields_not_found
def add_files_from_storage_paths(self, file_paths):
for path, (size, last_modified, md5sum) in file_paths.iteritems():
meta = {}
meta['location'] = path
meta['content-length'] = size
dt = datetime.strptime(last_modified, ISO_8601_UTC_FORMAT)
meta['last-modified'] = datetime.strftime(dt, TIME_FORMAT) + " UTC"
meta['md5sum'] = md5sum
resource_file = ResourceFile(path, resource=None, metadata=meta)
# Check if resource of same name already exists (in case this is an overwrite)
already_exists = False
for i in range(len(self.files)):
if self.files[i].location() == resource_file.location():
self.files[i] = resource_file
already_exists = True
break
# Otherwise assume this is a new file
if not already_exists:
self.files.append(resource_file)
def reload(self, local_resource_filename):
"""
Reload a Resource from a Resource metadata file (local).
"""
if local_resource_filename and os.path.exists(local_resource_filename):
resource_files = []
with io.open(local_resource_filename, encoding='UTF-8') as fh:
data = json.load(fh)
files_data = data.pop('files', [])
for file_data in files_data:
resource_files.append(ResourceFile(None, resource=self,
metadata=file_data))
bundle_data = data.pop('bundle', None)
if bundle_data:
self.bundle = ResourceFile(None, resource=self,
metadata=bundle_data)
self.name = data.pop('name', None)
self.path = local_resource_filename
self.metadata = data.get('metadata', dict())
self.files = resource_files
self.published = data.pop('published', None)
def to_json(self, **kwargs):
"""
Create a JSON string representation of the Resource: its files and
meta-data.
"""
return Resource.ResourceJSONEncoder(ensure_ascii=False,
encoding='UTF-8', **kwargs).encode(self)
def write(self, dest_path, mod=stat.S_IRWXU):
"""
Write the JSON file representation of a Resource to a destination file.
"""
if os.path.exists(dest_path):
os.remove(dest_path)
else:
mkdir_p(os.path.dirname(dest_path))
with io.open(dest_path, encoding='UTF-8', mode='w') as fh:
logger.debug("Writing JSON serialised resource to %s", dest_path)
fh.write(unicode(self.to_json()))
os.chmod(dest_path, mod)
self.path = dest_path
def local_paths(self):
"""
Get a list of local filenames for all the File data associated with
this Resource.
(Note that this method will trigger a refresh of the Resource, ensuring that all
locally-stored data is relatively up-to-date.)
"""
if self.repository:
self.repository.refresh_resource(self, True)
paths = []
do_refresh = True
if self.bundle:
self.bundle.unpack_bundle(do_refresh=True)
for resource_file in self.files:
paths.append(resource_file.local_path())
return paths
def files_matching(self, pattern):
"""
Return a list of ResourceFile objects where the location or remote
matches a given pattern.
If no files match an empty array is returned.
"""
matches = []
for resource_file in self.files:
if re.search(pattern, resource_file.location_or_remote()):
matches.append(resource_file)
return matches
def file_ending(self, suffix):
"""
Returns the first ResourceFile ending with the given suffix.
If no ResourceFiles match, None is returned.
"""
match = None
for resource_file in self.files:
if resource_file.location_or_remote().endswith(suffix):
if match:
warnings.warn("Found multiple files: also '" +
match.location_or_remote() + "'", RuntimeWarning)
match = resource_file
return match
def update_bundle(self):
"""
Update the bundle with any local file changes.
"""
if not self.bundle:
return # no-op
bundle_file = tarfile.open(self.bundle.local_path(), mode='w:gz')
for resource_file in self.files:
if resource_file.path and resource_file.location():
storage_location = resource_file.storage_location()
bundle_file.add(resource_file.path,
resource_file.storage_location())
bundle_file.close()
def save(self):
"""
Helper method that saves the resource back to the repository that
it was loaded from. Can only save if the resource was loaded from a
repository, otherwise it throws.
"""
if not self.repository:
raise ValueError("Cannot save a resource that is not loaded from a repository")
# Always overwrite the existing one since it was loaded from the repository anyway.
self.repository.save(self, overwrite=True)
def delete(self):
if not self.repository:
raise ValueError("Cannot delete a resource that is not loaded from a repository")
self.repository.delete(self)
def is_bundled(self):
return self.bundle != None
def publish(self):
if self.published:
print "Nothing to do, resource is already 'Published'"
return False
missing_fields = self.validate_mandatory_metadata()
if missing_fields:
raise ValueError("Missing mandatory fields: '{0}'".format(missing_fields))
self.repository.rebuild_file_list(self)
self.published=True
self.repository.save(self, overwrite=True)
self.repository.refresh_resource(resource=self, refresh_all=True)
return True
def unpublish(self):
if not self.published:
print "Nothing to do, resource is already 'Unpublished'"
return False
self.published=False
self.repository.save(self, overwrite=True)
self.repository.refresh_resource(resource=self, refresh_all=True)
return True
def is_published(self):
return self.published
class ResourceFile(Asset):
"""
A file component of a Resource, including any file-specific meta-data
fields.
Note that a ResourceFile may point to a repository object ("location") or
some other file stored on the Internet ("remote").
"""
def __init__(self, path, resource=None, metadata=None):
"""
Constructor for a Resource file given a local filesystem path, the
Resource that owns the ResourceFile, and any other meta-data.
"""
super(ResourceFile, self).__init__()
self.metadata = metadata
self.resource = resource
self.path = path
def is_bundled(self):
return (self.metadata and 'bundled' in self.metadata)
def bundle_dirname(self):
"""
Get the directory where bundled files are to be unpacked.
"""
if self.is_bundled() and self.path and self.path.endswith('.tar.gz'):
return self.path.split('.tar.gz')[0]
else:
return None
def unpack_bundle(self, do_refresh=True):
"""
If this ResourceFile is bundled, unpack its contents to the bundle path.
"""
if not self.resource or not self.resource.repository:
return
unpack_path = posixpath.join(self.resource.repository.local_cache,
Repository.files_prefix, self.resource.name)
if not self.path:
do_refresh = True
resource_filename = self.local_path()
if not os.path.exists(unpack_path):
mkdir_p(unpack_path)
bundle_file = tarfile.open(resource_filename)
bundle_file.extractall(path=unpack_path)
bundle_file.close()
def local_path(self):
"""
Get the local filename for this File's data.
(Note that this method will trigger a refresh of this File, ensuring
that all locally-stored data is relatively up-to-date. Only this File
is refreshed: not the Resource, nor the Resource's other File objects.)
"""
if (self.resource and self.resource.repository):
if self.is_bundled():
self.path = self.resource.repository._resource_file_dest_path(self)
if not os.path.exists(self.path):
self.resource.local_paths() # Trigger refresh
else:
if self.resource.meta('unified'):
self.resource.repository.refresh_resource(self.resource, True)
else:
self.resource.repository._refresh_resource_file(self)
try:
return str(self.path)
except UnicodeEncodeError:
return self.path
def location(self):
"""
Get the meta-data "location" of the ResourceFile (if it is stored in
the Repository) or None.
"""
return self.meta('location')
def storage_location(self):
"""
The path of a ResourceFile within its Resource directory.
"""
if self.resource and self.location():
return self.location()[(len(posixpath.join(Repository.files_prefix,
self.resource.name)) + 1):]
else:
return None
def remote(self):
"""
Get the "remote" URL of the ResourceFile (if it is stored elsewhere on
the Internet) or None.
"""
return self.meta('remote')
def location_or_remote(self):
"""
Get either the "location" or "remote" -- whichever is applicable.
"""
return self.location() or self.remote()
def __load_config():
global _settings, _hosts, _repositories
_settings = {}
_hosts = {}
_repositories = {}
for file_name in [_config_global_file, _config_user_file]:
if os.path.exists(file_name):
with open(file_name) as f:
config = yaml.load(f)
# Update settings
if 'settings' in config and config['settings']:
_settings.update(config['settings'])
# Update hosts
if 'hosts' in config and config['hosts']:
for host_name, host_config in config['hosts'].iteritems():
# create host
params = {}
if 'host' in host_config:
params['host'] = host_config['host']
if 'port' in host_config:
params['port'] = host_config['port']
if 'secure' in host_config:
params['secure'] = host_config['secure']
if 'access_key' in host_config:
params['access_key'] = host_config['access_key']
if 'secret_key' in host_config:
params['secret_key'] = host_config['secret_key']
host = Host(**params)
_hosts[host_name] = host
# Update repositories
if 'repositories' in config and config['repositories']:
for repo_name, repo_config in config['repositories'].iteritems():
if 'host' in repo_config:
host = _hosts[repo_config['host']]
else:
host = None
cache_path = os.path.expanduser(
repo_config.get('cache_path',
posixpath.join(_settings['cache_root'])))
stale_time = repo_config.get('stale_time', 60)
repo = Repository(host, repo_name, cache_path, stale_time)
_repositories[repo_name] = repo
def settings():
"""
Get a dictionary of the configured settings for BDKD Datastore.
These settings may originate from the system-wide configuration (in /etc)
or user-specific configuration.
"""
global _settings
if not _settings:
__load_config()
return _settings
def hosts():
"""
Get a dictionary of the configured hosts for BDKD Datastore.
"""
global _hosts
if not _hosts:
__load_config()
return _hosts
def repositories():
"""
Get a dictionary of all configured Repositories, by name.
"""
global _repositories
if not _repositories:
__load_config()
return _repositories
def repository(name):
"""
Get a configured Repository by name, or None if no such Repository was
configured.
"""
return repositories().get(name, None)
| StarcoderdataPython |
155835 | <gh_stars>0
import warnings
import random
import string
from collections import OrderedDict
from decimal import Decimal
from ..generic.general_methods import aedt_exception_handler, generate_unique_name
from ..modeler.Object3d import EdgePrimitive, FacePrimitive, VertexPrimitive
try:
import clr
clr.AddReference("System.Collections")
from System.Collections.Generic import List
clr.AddReference("System")
from System import Double, Array
except ImportError:
warnings.warn("Pythonnet is needed to run pyaedt")
@aedt_exception_handler
def tuple2dict(t, d):
"""
Parameters
----------
t :
d :
Returns
-------
"""
k = t[0]
v = t[1]
if type(v) is list and len(t) > 2:
d[k] = v
elif type(v) is list and len(t) == 2 and not v:
d[k] = None
elif type(v) is list and type(v[0]) is tuple and len(
t) == 2: # len check is to avoid expanding the list with a 3rd element=None
d[k] = OrderedDict()
for tt in v:
tuple2dict(tt, d[k])
else:
d[k] = v
@aedt_exception_handler
def dict2arg(d, arg_out):
"""
Parameters
----------
d :
arg_out :
Returns
-------
"""
for k, v in d.items():
if type(v) is OrderedDict or type(v) is dict:
arg = ["NAME:" + k]
dict2arg(v, arg)
arg_out.append(arg)
elif v is None:
arg_out.append(["NAME:" + k])
elif type(v) is list and len(v)>0 and (type(v[0]) is OrderedDict or type(v[0]) is dict):
for el in v:
arg = ["NAME:" + k]
dict2arg(el, arg)
arg_out.append(arg)
else:
arg_out.append(k + ":=")
if type(v) is EdgePrimitive or type(v) is FacePrimitive or type(v) is VertexPrimitive:
arg_out.append(v.id)
else:
arg_out.append(v)
@aedt_exception_handler
def arg2dict(arg, dict_out):
"""
Parameters
----------
arg :
dict_out :
Returns
-------
"""
if arg[0] == "NAME:DimUnits" or arg[0] == "NAME:Points":
dict_out[arg[0][5:]] = list(arg[1:])
elif arg[0][:5] == 'NAME:':
top_key = arg[0][5:]
dict_in = OrderedDict()
i = 1
while i < len(arg):
if (type(arg[i]) is list or type(arg[i]) is tuple) and arg[i][0][:5] == 'NAME:':
arg2dict(arg[i], dict_in)
i += 1
elif arg[i][-2:] == ':=':
dict_in[arg[i][:-2]] = arg[i + 1]
i += 2
else:
raise ValueError('Incorrect data argument format')
dict_out[top_key] = dict_in
else:
raise ValueError('Incorrect data argument format')
@aedt_exception_handler
def create_list_for_csharp(input_list, return_strings=False):
"""
Parameters
----------
input_list :
return_strings :
(Default value = False)
Returns
-------
"""
if return_strings:
col=List[str]()
else:
col=List[Double]()
for el in input_list:
if return_strings:
col.Add(str(el))
else:
col.Add(el)
return col
@aedt_exception_handler
def create_table_for_csharp(input_list_of_list, return_strings=True):
"""
Parameters
----------
input_list_of_list :
return_strings :
(Default value = True)
Returns
-------
"""
new_table = List[List[str]]()
for col in input_list_of_list:
newcol=create_list_for_csharp(col, return_strings)
new_table.Add(newcol)
return new_table
@aedt_exception_handler
def format_decimals(el):
"""
Parameters
----------
el :
Returns
-------
"""
if float(el) > 1000:
num = "{:,.0f}".format(Decimal(el))
elif float(el) > 1:
num = "{:,.3f}".format(Decimal(el))
else:
num = "{:.3E}".format(Decimal(el))
return num
@aedt_exception_handler
def random_string(length=6):
"""Generate a random string
Parameters
----------
length :
length of the random string (Default value = 6)
Returns
-------
type
random string
"""
char_set = string.ascii_uppercase + string.digits
random_str = ''.join(random.sample(char_set, int(length)))
return random_str
| StarcoderdataPython |
1788190 | import datetime
class Employee:
raise_amount = 1.04
def __init__(self, first_name: str, last_name: str, age: int):
self.first_name = first_name
self.last_name = last_name
self.age = age
def full_name(self) -> str:
return f'{self.first_name} {self.last_name}'
@classmethod
def set_raise_amount(cls, amount):
cls.raise_amount = amount
@classmethod
def from_string(cls, string: str):
first_name, last_name, age = string.split('-')
return Employee(first_name, last_name, age)
@staticmethod
def is_work_day(day):
if day.weekday() in (5, 6):
return False
return True
class Developer(Employee):
def __init__(self, first_name, last_name, age, language):
super().__init__(first_name, last_name, age)
self.language = language
anish = Developer(first_name='Anish', last_name='Sachdeva', age=22, language='Java')
ada = Developer(first_name='Ada', last_name='Lovelace', age=30, language='Python')
print(ada.language)
| StarcoderdataPython |
3327816 | <filename>setup.py
from setuptools import setup
from setuptools import find_packages
setup(
name='bugswarm-client',
version='0.1.6',
url='https://github.com/BugSwarm/client',
author='BugSwarm',
author_email='<EMAIL>',
description='The official command line client for the BugSwarm artifact dataset',
long_description='The official command line client for the BugSwarm artifact dataset',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
],
zip_safe=False,
packages=find_packages(),
namespace_packages=[
'bugswarm',
],
install_requires=[
'Click==6.7',
'requests>=2.20.0',
'bugswarm-common==0.1.13',
],
entry_points={
'console_scripts': [
'bugswarm = bugswarm.client.bugswarm:cli',
],
},
)
| StarcoderdataPython |
4810627 | class OdbJobTime:
"""The OdbJobTime object stores the analysis time of a job.
Attributes
----------
systemTime: str
A float specifying the systemtime for the analysis. This attribute is read-only.
userTime: str
A float specifying the usertime for the analysis. This attribute is read-only.
wallclockTime: str
A float specifying the wallclocktime for the analysis. This attribute is read-only.
Notes
-----
This object can be accessed by:
.. code-block:: python
import visualization
session.odbData[name].diagnosticData.jobTime
"""
# A float specifying the systemtime for the analysis. This attribute is read-only.
systemTime: str = ''
# A float specifying the usertime for the analysis. This attribute is read-only.
userTime: str = ''
# A float specifying the wallclocktime for the analysis. This attribute is read-only.
wallclockTime: str = ''
| StarcoderdataPython |
3397605 | <filename>rubikenv/rubikgym.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 15:51:06 2018
@author: adrien
"""
import numpy as np
import pandas as pd
import gym
from gym import spaces
class rubik_cube:
"""
This is a rubik's cube class simulator
Attributes :
- state : a 9x6 array of value between 1 and 6
"""
number_of_face = 6
number_of_element_in_sideface = 3
def __init__(self, init_state=None):
"""
Initialisation of the rubik
"""
# init state initialisation
if init_state is not None:
init_state = init_state.astype(int)
self.state = init_state
self.init_state = np.copy(init_state)
else:
# perfect cube
init_state = np.zeros((self.number_of_element_in_sideface,
self.number_of_element_in_sideface,self.number_of_face))
for i in range(self.number_of_face):
init_state[:,:,i] = i
init_state = init_state.astype(int)
self.state = init_state
self.init_state = np.copy(init_state)
# other ?
def setInit(self):
# perfect cube
init_state = np.zeros((self.number_of_element_in_sideface,
self.number_of_element_in_sideface,self.number_of_face))
for i in range(self.number_of_face):
init_state[:, :, i] = i
init_state = init_state.astype(int)
self.state = init_state
self.init_state = np.copy(init_state)
def move(self,index_move):
"""
For the convention there is exactly 12 possible moves
the move are indexed between 0 and 11
the index is in
[X Y Z] with
X : 0 1 2 3
Y : 4 5 6 7
Z : 8 9 10 11
The first two number here are the move corresponding the a certain
position on the face.
The two other number at the end are the inverse of those move (the two first)
X Y and Z corresponding to the rotation angle
"""
value_side = index_move % 2 # entre 0 et 1 the position of the rotation on the face
value_side_rotation = index_move // 4 # entre 0 et 2 the rotation index of the array
value_side_inverse = (index_move % 4)//2 # entre 0 et 1 if inverse or not
#print("value_side= ", str(value_side))
#print("value_side_rotation= ", str(value_side_rotation))
#print("value_side_inverse= ", str(value_side_inverse))
if value_side == 1:
value_side = 2 # correction to simplify the calculation
if value_side_rotation == 0:
# inversion value
if value_side_inverse == 0:
self.state[:,value_side,[5,1,4,3]] = self.state[:,value_side,[1,4,3,5]]
if value_side == 0:
self.state[:,:,0] = np.rot90(self.state[:,:,0],k=3)
else:
self.state[:,:,2] = np.rot90(self.state[:,:,2])
else:
self.state[:,value_side,[5,1,4,3]] = self.state[:,value_side,[3,5,1,4]]
if value_side == 0:
self.state[:,:,0] = np.rot90(self.state[:,:,0])
else:
self.state[:,:,2] = np.rot90(self.state[:,:,2], k=3)
elif value_side_rotation == 1:
# inversion value
if value_side_inverse == 0:
self.state[:,value_side,[5,0,4,2]] = self.state[:,value_side,[0,4,2,5]]
if value_side == 0:
self.state[:,:,1] = np.rot90(self.state[:,:,1],k=3)
else:
self.state[:,:,3] = np.rot90(self.state[:,:,3])
else:
self.state[:,value_side,[5,0,4,2]] = self.state[:,value_side,[2,5,0,4]]
if value_side == 0:
self.state[:,:,1] = np.rot90(self.state[:,:,1])
else:
self.state[:,:,3] = np.rot90(self.state[:,:,3], k=3)
# TODO again
elif value_side_rotation == 2:
tmp_state = np.copy(self.state)
# inversion value
if value_side_inverse == 0:
# TODO more complex
self.state[:,value_side,0] = tmp_state[value_side,:,1][::-1]
self.state[2-value_side,:,3] = tmp_state[:,value_side,0]
self.state[:,2-value_side,2] = tmp_state[2-value_side,:,3][::-1]
self.state[value_side,:,1] = tmp_state[:,2-value_side,2]
if value_side == 0:
self.state[:,:,4] = np.rot90(self.state[:,:,4],k=3)
else:
self.state[:,:,5] = np.rot90(self.state[:,:,5])
else:
self.state[value_side,:,1] = tmp_state[:,value_side,0][::-1]
self.state[:,value_side,0] = tmp_state[2-value_side,:,3]
self.state[2-value_side,:,3] = tmp_state[:,2-value_side,2][::-1]
self.state[:,2-value_side,2] = tmp_state[value_side,:,1]
if value_side == 0:
self.state[:,:,4] = np.rot90(self.state[:,:,4])
else:
self.state[:,:,5] = np.rot90(self.state[:,:,5], k=3)
def move_cube(self, index_move,state):
"""
For the convention there is exactly 12 possible moves
the move are indexed between 0 and 11
the index is in
[X Y Z] with
X : 0 1 2 3
Y : 4 5 6 7
Z : 8 9 10 11
The first two number here are the move corresponding the a certain
position on the face.
The two other number at the end are the inverse of those move (the two first)
X Y and Z corresponding to the rotation angle
"""
value_side = index_move % 2 # entre 0 et 1 the position of the rotation on the face
value_side_rotation = index_move // 4 # entre 0 et 2 the rotation index of the array
value_side_inverse = (index_move % 4)//2 # entre 0 et 1 if inverse or not
#print("value_side= ", str(value_side))
#print("value_side_rotation= ", str(value_side_rotation))
#print("value_side_inverse= ", str(value_side_inverse))
if value_side == 1:
value_side = 2 # correction to simplify the calculation
if value_side_rotation == 0:
# inversion value
if value_side_inverse == 0:
state[:,value_side,[5,1,4,3]] = state[:,value_side,[1,4,3,5]]
if value_side == 0:
state[:,:,0] = np.rot90(state[:,:,0],k=3)
else:
state[:,:,2] = np.rot90(state[:,:,2])
else:
state[:,value_side,[5,1,4,3]] = state[:,value_side,[3,5,1,4]]
if value_side == 0:
state[:,:,0] = np.rot90(state[:,:,0])
else:
state[:,:,2] = np.rot90(state[:,:,2], k=3)
elif value_side_rotation == 1:
# inversion value
if value_side_inverse == 0:
state[:,value_side,[5,0,4,2]] = state[:,value_side,[0,4,2,5]]
if value_side == 0:
state[:,:,1] = np.rot90(state[:,:,1],k=3)
else:
state[:,:,3] = np.rot90(state[:,:,3])
else:
state[:,value_side,[5,0,4,2]] = state[:,value_side,[2,5,0,4]]
if value_side == 0:
state[:,:,1] = np.rot90(state[:,:,1])
else:
state[:,:,3] = np.rot90(state[:,:,3], k=3)
# TODO again
elif value_side_rotation == 2:
tmp_state = np.copy(state)
# inversion value
if value_side_inverse == 0:
# TODO more complex
state[:,value_side,0] = tmp_state[value_side,:,1][::-1]
state[2-value_side,:,3] = tmp_state[:,value_side,0]
state[:,2-value_side,2] = tmp_state[2-value_side,:,3][::-1]
state[value_side,:,1] = tmp_state[:,2-value_side,2]
if value_side == 0:
state[:,:,4] = np.rot90(state[:,:,4],k=3)
else:
state[:,:,5] = np.rot90(state[:,:,5])
else:
state[value_side,:,1] = tmp_state[:,value_side,0][::-1]
state[:,value_side,0] = tmp_state[2-value_side,:,3]
state[2-value_side,:,3] = tmp_state[:,2-value_side,2][::-1]
state[:,2-value_side,2] = tmp_state[value_side,:,1]
if value_side == 0:
state[:,:,4] = np.rot90(state[:,:,4])
else:
state[:,:,5] = np.rot90(state[:,:,5], k=3)
return state
class rubikgym(gym.Env, rubik_cube):
reward_range = (-1, 1)
spec = None
# Set these in ALL subclasses
action_space = spaces.Discrete(12)
# flatten discret space
observation_space = spaces.MultiDiscrete([6 for _ in range(3*3*6)])
def __init__(self):
gym.Env.__init__(self)
rubik_cube.__init__(self)
def step(self, action):
self.move(action)
return self.state, 0, 0, 0
def reset(self):
self.setInit(), 0
def render(self, mode='human'):
print(self.state)
def set_init(self, state):
self.init_state = state
self.state = state
| StarcoderdataPython |
3352340 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by <NAME> and contributors.
:license: BSD, see LICENSE for more details.
"""
import json
import logging
from typing import Optional, List, Dict, Any
from ..common.flag import flags
from ..http.exception import HttpRequestRejected
from ..http.parser import HttpParser
from ..http.codes import httpStatusCodes
from ..http.proxy import HttpProxyBasePlugin
from ..common.utils import text_
import re
logger = logging.getLogger(__name__)
# See adblock.json file in repository for sample example config
flags.add_argument(
'--filtered-url-regex-config',
type=str,
default='',
help='Default: No config. Comma separated list of IPv4 and IPv6 addresses.'
)
class FilterByURLRegexPlugin(HttpProxyBasePlugin):
"""Drops traffic by inspecting request URL and checking
against a list of regular expressions. Example, default
filter list below can be used as a starting point for
filtering ads.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.filters: List[Dict[str, Any]] = []
if self.flags.filtered_url_regex_config != '':
with open(self.flags.filtered_url_regex_config, 'rb') as f:
self.filters = json.load(f)
def before_upstream_connection(
self, request: HttpParser) -> Optional[HttpParser]:
return request
def handle_client_request(
self, request: HttpParser) -> Optional[HttpParser]:
# determine host
request_host = None
if request.host:
request_host = request.host
else:
if b'host' in request.headers:
request_host = request.header(b'host')
if not request_host:
logger.error("Cannot determine host")
return request
# build URL
url = b'%s%s' % (
request_host,
request.path
)
# check URL against list
rule_number = 1
for blocked_entry in self.filters:
# if regex matches on URL
if re.search(text_(blocked_entry['regex']), text_(url)):
# log that the request has been filtered
logger.info("Blocked: %r with status_code '%r' by rule number '%r'" % (
text_(url),
httpStatusCodes.NOT_FOUND,
rule_number,
))
# close the connection with the status code from the filter
# list
raise HttpRequestRejected(
status_code=httpStatusCodes.NOT_FOUND,
headers={b'Connection': b'close'},
reason=b'Blocked',
)
# stop looping through filter list
break
# increment rule number
rule_number += 1
return request
def handle_upstream_chunk(self, chunk: memoryview) -> memoryview:
return chunk
def on_upstream_connection_close(self) -> None:
pass
| StarcoderdataPython |
1705737 | import sys
from typeguard.importhook import install_import_hook
def pytest_addoption(parser):
group = parser.getgroup("typeguard")
group.addoption(
"--typeguard-packages",
action="store",
help="comma separated name list of packages and modules to instrument for "
"type checking",
)
def pytest_configure(config):
value = config.getoption("typeguard_packages")
if not value:
return
packages = [pkg.strip() for pkg in value.split(",")]
already_imported_packages = sorted(
package for package in packages if package in sys.modules
)
if already_imported_packages:
message = (
"typeguard cannot check these packages because they "
"are already imported: {}"
)
raise RuntimeError(message.format(", ".join(already_imported_packages)))
install_import_hook(packages=packages)
| StarcoderdataPython |
3318621 | <filename>Pyrado/scripts/training/omo_a2c.py
import torch as to
import pyrado
from pyrado.algorithms.step_based.a2c import A2C
from pyrado.algorithms.step_based.gae import GAE
from pyrado.environments.pysim.one_mass_oscillator import OneMassOscillatorSim
from pyrado.logger.experiment import setup_experiment
from pyrado.policies.feed_back.fnn import FNNPolicy
from pyrado.spaces import ValueFunctionSpace
from pyrado.utils.data_types import EnvSpec
if __name__ == "__main__":
dt = 1e-3
env = OneMassOscillatorSim(dt, 5000)
ex_dir = setup_experiment(OneMassOscillatorSim.name, A2C.name)
hparam = {
"particle_hparam": {
"actor": {"hidden_sizes": [32, 24], "hidden_nonlin": to.relu},
"vfcn": {"hidden_sizes": [32, 24], "hidden_nonlin": to.relu},
"critic": {},
},
"max_iter": 100,
"min_steps": 10000,
}
particle_param = hparam.pop("particle_hparam")
actor = FNNPolicy(spec=env.spec, **particle_param["actor"])
vfcn = FNNPolicy(spec=EnvSpec(env.obs_space, ValueFunctionSpace), **particle_param["vfcn"])
critic = GAE(vfcn, **particle_param["critic"])
algo = A2C(ex_dir, env, actor, critic, **hparam)
algo.train()
| StarcoderdataPython |
24246 |
""" config.py
Microsimulation config for mulit-LAD MPI simulation
"""
import numpy as np
import glob
import neworder
# define some global variables describing where the starting population and the parameters of the dynamics come from
initial_populations = glob.glob("examples/people_multi/data/ssm_*_MSOA11_ppp_2011.csv")
asfr = "examples/shared/NewETHPOP_fertility.csv"
asmr = "examples/shared/NewETHPOP_mortality.csv"
# internal in-migration
asir = "examples/shared/NewETHPOP_inmig.csv"
# internal out-migration
asor = "examples/shared/NewETHPOP_outmig.csv"
# immigration
ascr = "examples/shared/NewETHPOP_immig.csv"
# emigration
asxr = "examples/shared/NewETHPOP_emig.csv"
# MPI split initial population files over threads
def partition(arr, count):
return [arr[i::count] for i in range(count)]
initial_populations = partition(initial_populations, neworder.mpi.size())
# running/debug options
neworder.log_level = 1
# initialisation
neworder.initialisations = {
"people": { "module": "population", "class_": "Population", "args": (initial_populations[neworder.mpi.rank()], asfr, asmr, asir, asor, ascr, asxr) }
}
# define the evolution
neworder.timeline = neworder.Timeline(2011.25, 2050.25, [39])
# timestep must be defined in neworder
neworder.dataframe.transitions = {
"fertility": "people.births(timestep)",
"mortality": "people.deaths(timestep)",
"migration": "people.migrations(timestep)",
"age": "people.age(timestep)"
}
# checks to perform after each timestep. Assumed to return a boolean
neworder.do_checks = True # Faith
# assumed to be methods of class_ returning True if checks pass
neworder.checks = {
"check": "people.check()"
}
# Generate output at each checkpoint
neworder.checkpoints = {
#"check_data" : "people.check()",
"write_table" : "people.write_table()"
}
| StarcoderdataPython |
1777467 | <filename>text-classification/a05_Seq2seqWithAttention/a1_seq2seq_attention_predict.py
# -*- coding: utf-8 -*-
#prediction using model.
#process--->1.load data(X:list of lint,y:int). 2.create session. 3.feed data. 4.predict
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import tensorflow as tf
import numpy as np
#from p5_fastTextB_model import fastTextB as fastText
from data_util_zhihu import load_data_predict,load_final_test_data,create_voabulary,create_voabulary_label
from keras.preprocessing.sequence import pad_sequences #to_categorical
import os
import codecs
from a1_seq2seq_attention_model import seq2seq_attention_model
#configuration
FLAGS=tf.flags.FLAGS
tf.flags.DEFINE_integer("num_classes",1999+3,"number of label") #3 ADDITIONAL TOKEN: _GO,_END,_PAD
tf.flags.DEFINE_float("learning_rate",0.01,"learning rate")
tf.flags.DEFINE_integer("batch_size", 1, "Batch size for training/evaluating.") #批处理的大小 32-->128
tf.flags.DEFINE_integer("decay_steps", 6000, "how many steps before decay learning rate.") #6000批处理的大小 32-->128
tf.flags.DEFINE_float("decay_rate", 1.0, "Rate of decay for learning rate.") #0.87一次衰减多少
tf.flags.DEFINE_string("ckpt_dir","checkpoint_seq2seq_attention/seq2seq_attention1/","checkpoint location for the model")
tf.flags.DEFINE_integer("sequence_length",100,"max sentence length")
tf.flags.DEFINE_integer("embed_size",100,"embedding size")
tf.flags.DEFINE_boolean("is_training",False,"is traning.true:tranining,false:testing/inference")
tf.flags.DEFINE_integer("num_epochs",10,"number of epochs to run.")
tf.flags.DEFINE_integer("validate_every", 1, "Validate every validate_every epochs.") #每10轮做一次验证
tf.flags.DEFINE_integer("validate_step", 1000, "how many step to validate.") #1500做一次检验
tf.flags.DEFINE_boolean("use_embedding",True,"whether to use embedding or not.")
#tf.flags.DEFINE_string("cache_path","text_cnn_checkpoint/data_cache.pik","checkpoint location for the model")
#train-zhihu4-only-title-all.txt
tf.flags.DEFINE_string("traning_data_path","train-zhihu4-only-title-all.txt","path of traning data.") #O.K.train-zhihu4-only-title-all.txt-->training-data/test-zhihu4-only-title.txt--->'training-data/train-zhihu5-only-title-multilabel.txt'
tf.flags.DEFINE_string("word2vec_model_path","zhihu-word2vec-title-desc.bin-100","word2vec's vocabulary and vectors") #zhihu-word2vec.bin-100-->zhihu-word2vec-multilabel-minicount15.bin-100
tf.flags.DEFINE_boolean("multi_label_flag",True,"use multi label or single label.") #set this false. becase we are using it is a sequence of token here.
tf.flags.DEFINE_integer("num_sentences", 4, "number of sentences in the document") #每10轮做一次验证
tf.flags.DEFINE_integer("hidden_size",100,"hidden size")
tf.flags.DEFINE_float("l2_lambda", 0.0001, "l2 regularization")
tf.flags.DEFINE_string("predict_target_file","checkpoint_seq2seq_attention/seq2seq_attention1//zhihu_result_seq2seq_attention.csv","target file path for final prediction")
tf.flags.DEFINE_string("predict_source_file",'test-zhihu-forpredict-title-desc-v6.txt',"target file path for final prediction") #test-zhihu-forpredict-v4only-title.txt
tf.flags.DEFINE_integer("decoder_sent_length",6,"length of decoder inputs")
#1.load data(X:list of lint,y:int). 2.create session. 3.feed data. 4.training (5.validation) ,(6.prediction)
# 1.load data with vocabulary of words and labels
_GO="_GO"
_END="_END"
_PAD="_PAD"
def main(_):
# 1.load data with vocabulary of words and labels
vocabulary_word2index, vocabulary_index2word = create_voabulary(word2vec_model_path=FLAGS.word2vec_model_path,name_scope="seq2seq_attention") # simple='simple'
vocab_size = len(vocabulary_word2index)
print("seq2seq_attention.vocab_size:", vocab_size)
vocabulary_word2index_label, vocabulary_index2word_label = create_voabulary_label(name_scope="seq2seq_attention",use_seq2seq=True)
questionid_question_lists=load_final_test_data(FLAGS.predict_source_file)
test= load_data_predict(vocabulary_word2index,vocabulary_word2index_label,questionid_question_lists)
testX=[]
question_id_list=[]
for tuple in test:
question_id,question_string_list=tuple
question_id_list.append(question_id)
testX.append(question_string_list)
# 2.Data preprocessing: Sequence padding
print("start padding....")
testX2 = pad_sequences(testX, maxlen=FLAGS.sequence_length, value=0.) # padding to max length
print("end padding...")
# 3.create session.
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
# 4.Instantiate Model
model=seq2seq_attention_model(FLAGS.num_classes, FLAGS.learning_rate, FLAGS.batch_size, FLAGS.decay_steps, FLAGS.decay_rate, FLAGS.sequence_length,
vocab_size, FLAGS.embed_size,FLAGS.hidden_size, FLAGS.is_training,decoder_sent_length=FLAGS.decoder_sent_length,l2_lambda=FLAGS.l2_lambda)
saver=tf.train.Saver()
if os.path.exists(FLAGS.ckpt_dir+"checkpoint"):
print("Restoring Variables from Checkpoint")
saver.restore(sess,tf.train.latest_checkpoint(FLAGS.ckpt_dir))
else:
print("Can't find the checkpoint.going to stop")
return
# 5.feed data, to get logits
number_of_training_data=len(testX2);print("number_of_training_data:",number_of_training_data)
index=0
predict_target_file_f = codecs.open(FLAGS.predict_target_file, 'a', 'utf8')
decoder_input=np.reshape(np.array([vocabulary_word2index_label[_GO]]+[vocabulary_word2index_label[_PAD]]*(FLAGS.decoder_sent_length-1)),[-1,FLAGS.decoder_sent_length])
for start, end in zip(range(0, number_of_training_data, FLAGS.batch_size),range(FLAGS.batch_size, number_of_training_data+1, FLAGS.batch_size)):
predictions,logits=sess.run([model.predictions,model.logits],feed_dict={model.input_x:testX2[start:end],model.decoder_input:decoder_input,model.dropout_keep_prob:1}) #'shape of logits:', ( 1, 1999)
# 6. get lable using logtis
predicted_labels=get_label_using_logits(logits[0],predictions,vocabulary_index2word_label,vocabulary_word2index_label)
# 7. write question id and labels to file system.
write_question_id_with_labels(question_id_list[index],predicted_labels,predict_target_file_f)
index=index+1
predict_target_file_f.close()
def get_label_using_logits(logits, predictions,vocabulary_index2word_label,vocabulary_word2index_label, top_number=5):
#print("logits:",logits.shape) #(6, 2002)
result_list=[]
for i,row in enumerate(logits):
#print("i,",i,"row:",row)
if i!=len(logits)-1: #not include result from last column, which usually it should be <END> TOKEN.
label=process_each_row_get_lable(row,vocabulary_index2word_label,vocabulary_word2index_label,result_list)
result_list.append(label)
return result_list
def process_each_row_get_lable(row,vocabulary_index2word_label,vocabulary_word2index_label,result_list):
"""
:param row: it is a list.length is number of labels. e.g. 2002
:param vocabulary_index2word_label
:param result_list
:return: a lable
"""
label_list=list(np.argsort(row))
label_list.reverse()
#print("label_list:",label_list) # a list,length is number of labels.
for i,index in enumerate(label_list): # if index is not exists, and not _PAD,_END, then it is the label we want.
#print(i,"index:",index)
flag1=vocabulary_index2word_label[index] not in result_list
flag2=index!=vocabulary_word2index_label[_PAD]
flag3=index!=vocabulary_word2index_label[_END]
if flag1 and flag2 and flag3:
#print("going to return ")
return vocabulary_index2word_label[index]
def get_label_using_logitsO(pred_list, vocabulary_index2word_label,vocabulary_word2index_label, top_number=5):
print("pred_list[0]:",pred_list[0]) #(6, 2002) for example.e.g. array([ 310, 1541, 75, 1, 1, 1])
result_list=[]
pred_list_=pred_list.tolist()[0]
print("pred_list_:",pred_list_)
for index in pred_list_:
print("index:",index)
word=vocabulary_index2word_label[index]
print("word:",word) #('index:', 2, ';word:', '_PAD')
result_list.append(word)
return result_list
# write question id and labels to file system.
def write_question_id_with_labels(question_id,labels_list,f):
labels_string=",".join(labels_list)
f.write(question_id+","+labels_string+"\n")
if __name__ == "__main__":
tf.app.run() | StarcoderdataPython |
3350521 | <reponame>jaggernod/bedtime-stories<filename>etl/src/data_classes.py
from dataclasses import dataclass
import datetime
@dataclass
class Model:
current_temperature: float
num_tea_boils: int
wake_up_time: datetime.datetime
outside_temperature: dict
| StarcoderdataPython |
1640654 | '''
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
'''
# Write your code here
n = int(input())
arr = [int(i) for i in input().split()]
q = int(input())
for i in range(q):
t = int(input())
m = [-1]
for k in range(1,len(arr)+1):
if sum(arr[:k])>=t:
m.append(k)
break
print(m[-1])
| StarcoderdataPython |
19433 | <filename>python/tests/test_tree_intersection.py
from challenges.tree_intersection.tree_intersection import find_intersection
from challenges.tree.tree import BinarySearchTree
def test_find_intersection():
tree1 = BinarySearchTree()
tree1.add(1)
tree1.add(2)
tree1.add(3)
tree1.add(4)
tree1.add(5)
tree1.add(6)
tree1.add(7)
tree1.add(8)
tree2 = BinarySearchTree()
tree2.add(12)
tree2.add(12)
tree2.add(13)
tree2.add(14)
tree2.add(15)
tree2.add(16)
tree2.add(7)
tree2.add(8)
actual = find_intersection(tree1, tree2)
expected = [7,8]
assert actual == expected
def test_empty_binary_tree():
tree1 = BinarySearchTree()
tree1.add(1)
tree1.add(2)
tree1.add(3)
tree1.add(4)
tree1.add(5)
tree1.add(6)
tree1.add(7)
tree1.add(8)
tree2 = BinarySearchTree()
actual = find_intersection(tree1, tree2)
expected = []
assert actual == expected
def test_first_empty_binary_tree():
tree2 = BinarySearchTree()
tree2.add(1)
tree2.add(2)
tree2.add(3)
tree2.add(4)
tree2.add(5)
tree2.add(6)
tree2.add(7)
tree2.add(8)
tree1 = BinarySearchTree()
actual = find_intersection(tree1, tree2)
expected = []
assert actual == expected
def test_same_tree():
tree1 = BinarySearchTree()
tree1.add(1)
tree1.add(2)
tree1.add(3)
tree1.add(4)
tree1.add(5)
tree1.add(6)
tree1.add(7)
tree1.add(8)
tree2 = BinarySearchTree()
tree2.add(1)
tree2.add(2)
tree2.add(3)
tree2.add(4)
tree2.add(5)
tree2.add(6)
tree2.add(7)
tree2.add(8)
actual = find_intersection(tree1, tree2)
expected = [1,2,3,4,5,6,7,8]
assert actual == expected
| StarcoderdataPython |
3288666 | <filename>sdk/python/pulumi_azure_nextgen/documentdb/_enums.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'BackupPolicyType',
'CompositePathSortOrder',
'ConflictResolutionMode',
'ConnectorOffer',
'DataType',
'DatabaseAccountKind',
'DatabaseAccountOfferType',
'DefaultConsistencyLevel',
'IndexKind',
'IndexingMode',
'NetworkAclBypass',
'PartitionKind',
'ResourceIdentityType',
'RoleDefinitionType',
'ServerVersion',
'SpatialType',
'TriggerOperation',
'TriggerType',
]
class BackupPolicyType(str, Enum):
"""
Describes the mode of backups.
"""
PERIODIC = "Periodic"
CONTINUOUS = "Continuous"
class CompositePathSortOrder(str, Enum):
"""
Sort order for composite paths.
"""
ASCENDING = "ascending"
DESCENDING = "descending"
class ConflictResolutionMode(str, Enum):
"""
Indicates the conflict resolution mode.
"""
LAST_WRITER_WINS = "LastWriterWins"
CUSTOM = "Custom"
class ConnectorOffer(str, Enum):
"""
The cassandra connector offer type for the Cosmos DB database C* account.
"""
SMALL = "Small"
class DataType(str, Enum):
"""
The datatype for which the indexing behavior is applied to.
"""
STRING = "String"
NUMBER = "Number"
POINT = "Point"
POLYGON = "Polygon"
LINE_STRING = "LineString"
MULTI_POLYGON = "MultiPolygon"
class DatabaseAccountKind(str, Enum):
"""
Indicates the type of database account. This can only be set at database account creation.
"""
GLOBAL_DOCUMENT_DB = "GlobalDocumentDB"
MONGO_DB = "MongoDB"
PARSE = "Parse"
class DatabaseAccountOfferType(str, Enum):
"""
The offer type for the database
"""
STANDARD = "Standard"
class DefaultConsistencyLevel(str, Enum):
"""
The default consistency level and configuration settings of the Cosmos DB account.
"""
EVENTUAL = "Eventual"
SESSION = "Session"
BOUNDED_STALENESS = "BoundedStaleness"
STRONG = "Strong"
CONSISTENT_PREFIX = "ConsistentPrefix"
class IndexKind(str, Enum):
"""
Indicates the type of index.
"""
HASH = "Hash"
RANGE = "Range"
SPATIAL = "Spatial"
class IndexingMode(str, Enum):
"""
Indicates the indexing mode.
"""
CONSISTENT = "consistent"
LAZY = "lazy"
NONE = "none"
class NetworkAclBypass(str, Enum):
"""
Indicates what services are allowed to bypass firewall checks.
"""
NONE = "None"
AZURE_SERVICES = "AzureServices"
class PartitionKind(str, Enum):
"""
Indicates the kind of algorithm used for partitioning. For MultiHash, multiple partition keys (upto three maximum) are supported for container create
"""
HASH = "Hash"
RANGE = "Range"
MULTI_HASH = "MultiHash"
class ResourceIdentityType(str, Enum):
"""
The type of identity used for the resource. The type 'SystemAssigned,UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the service.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned"
NONE = "None"
class RoleDefinitionType(str, Enum):
"""
Indicates whether the Role Definition was built-in or user created.
"""
BUILT_IN_ROLE = "BuiltInRole"
CUSTOM_ROLE = "CustomRole"
class ServerVersion(str, Enum):
"""
Describes the ServerVersion of an a MongoDB account.
"""
SERVER_VERSION_3_2 = "3.2"
SERVER_VERSION_3_6 = "3.6"
SERVER_VERSION_4_0 = "4.0"
class SpatialType(str, Enum):
"""
Indicates the spatial type of index.
"""
POINT = "Point"
LINE_STRING = "LineString"
POLYGON = "Polygon"
MULTI_POLYGON = "MultiPolygon"
class TriggerOperation(str, Enum):
"""
The operation the trigger is associated with
"""
ALL = "All"
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
REPLACE = "Replace"
class TriggerType(str, Enum):
"""
Type of the Trigger
"""
PRE = "Pre"
POST = "Post"
| StarcoderdataPython |
1702525 | <gh_stars>0
class CompanyStaffDefault:
# https://www.django-rest-framework.org/community/3.11-announcement/#validator-default-context
requires_context = True
def __call__(self, serializer_field):
return serializer_field.context['request'].user.companystaff
| StarcoderdataPython |
1669005 | """
This script creates individual 3D views for each room in the active view. The active view must be a 3D view.
"""
# pylint: disable=import-error,invalid-name,broad-except
import clr
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
from pyrevit import revit
from pyrevit import script
from pyrevit import forms
__title__ = " 3D Room Views by Active 3D View"
__author__ = "{{author}}"
# __context__ = "active-3d-view"
logger = script.get_logger()
output = script.get_output()
threeD_view = revit.doc.ActiveView
forms.check_viewtype(threeD_view, ViewType.ThreeD, exitscript=True)
rooms = []
collector = (
FilteredElementCollector(revit.doc)
.OfCategory(BuiltInCategory.OST_Rooms)
.ToElements()
)
for c in collector:
if c.Area != 0:
rooms.append(c)
views = []
col2 = FilteredElementCollector(revit.doc).OfClass(View3D).ToElements()
for view in col2:
if view.IsTemplate == False:
views.append(view.ViewName)
total_work = len(rooms)
for idx, room in enumerate(rooms):
roomName = room.LookupParameter("Name").AsString()
roomNumber = room.LookupParameter("Number").AsString()
newName = "3D - " + roomName + " " + roomNumber
# Get View Family Type of Plan
viewTypeId = threeD_view.GetTypeId()
level = room.LevelId
with revit.Transaction("Create 3D Views by Room"):
if newName not in views:
# Get Room Bounding Box and Create New
roomBB = room.get_BoundingBox(threeD_view)
rMax = roomBB.Max
rMin = roomBB.Min
newMaxP = XYZ(rMax.X + 1, rMax.Y + 1, rMax.Z + 1)
newMinP = XYZ(rMin.X - 1, rMin.Y - 1, rMin.Z - 1)
newBB = BoundingBoxXYZ()
newBB.Max = newMaxP
newBB.Min = newMinP
threeD = View3D.CreateIsometric(revit.doc, viewTypeId)
# box = app.Create.NewBoundingBoxXYZ()
# box.Min = Min[count]
# box.Max = Max[count]
# bbox.append(box)
a = View3D.SetSectionBox(threeD, newBB)
threeD.ViewName = newName
print("Creating 3D View: %s" % threeD.ViewName)
else:
message = 'View "%s" already exists' % newName
logger.warning(message)
output.update_progress(idx + 1, total_work)
print("Completed\n")
| StarcoderdataPython |
3311339 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: <NAME> <<EMAIL>>
#
import threading
import time
NUM=0
def locked(lock_name = '_lock'):
def locked_with_name(func):
def wrapped_locked(self,*args, **kargs):
the_lock = getattr(self,lock_name)
# global NUM
# print "Acquiring...",func,NUM
the_lock.acquire()
# NUM += 1
# print "\tAcquired",func
try:
return func(self,*args, **kargs)
finally:
# NUM -= 1
# print "Releasing...",func, NUM
the_lock.release()
wrapped_locked.__name__ = func.__name__
wrapped_locked.__doc__ = func.__doc__
return wrapped_locked
return locked_with_name
class UnfairLock(object):
# This is far better than 0.01 or 0.0001, but I haven't tried other values.
# Of course, this value depends much on the computer
SLICE = 0.001
def __init__(self):
self._lock = threading.Lock()
def acquire(self):
while True:
acquired = self._lock.acquire(False)
if acquired:
return
time.sleep(self.SLICE)
def release(self):
self._lock.release()
class _InternalReadLock(object):
def __init__(self, rwlock):
self.rwlock = rwlock
def acquire(self):
self.rwlock._acquire_reading()
def release(self):
self.rwlock._release_reading()
class _InternalWriteLock(object):
def __init__(self, rwlock):
self.rwlock = rwlock
def acquire(self):
self.rwlock._acquire_writing()
def release(self):
self.rwlock._release_writing()
class RWLock(object):
_SHORT_TIME = 0.05
def __init__(self):
self._lock = threading.RLock()
self._read_lock = _InternalReadLock(self)
self._write_lock = _InternalWriteLock(self)
self._condition = threading.Condition()
self._reading = 0
self._writing = None
@locked()
def _get_reading(self):
return self._reading
@locked()
def _increment_reading(self):
self._reading += 1
@locked()
def _decrement_reading(self):
self._reading -= 1
@locked()
def _set_writing(self):
self._writing = [threading.currentThread(), 1]
@locked()
def _decrement_writing(self):
self._writing[1] = self._writing[1] - 1
if self._writing[1] == 0:
self._writing = None
@locked()
def _is_writing(self):
return self._writing != None
@locked()
def _someone_else_is_writing(self):
return self._writing != None and self._writing[0] != threading.currentThread()
@locked()
def _am_i_writing(self):
am_i = self._writing != None and self._writing[0] == threading.currentThread()
if am_i:
self._writing[1] = self._writing[1] + 1
return am_i
def _acquire_reading(self):
self._condition.acquire()
try:
while self._someone_else_is_writing():
self._condition.wait()
self._increment_reading()
self._condition.notifyAll()
finally:
self._condition.release()
def _acquire_writing(self):
self._condition.acquire()
try:
if not self._am_i_writing():
while self._get_reading() > 0 or self._is_writing():
self._condition.wait()
self._set_writing()
self._condition.notifyAll()
finally:
self._condition.release()
def _release_reading(self):
self._condition.acquire()
try:
self._decrement_reading()
self._condition.notifyAll()
finally:
self._condition.release()
def _release_writing(self):
self._condition.acquire()
try:
self._decrement_writing()
self._condition.notifyAll()
finally:
self._condition.release()
def read_lock(self):
return self._read_lock
def write_lock(self):
return self._write_lock
| StarcoderdataPython |
3231115 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn.functional as F
import math
import torch.nn as nn
def js_div(p_output, q_output, get_softmax=True):
"""
Function that measures JS divergence between target and output logits:
"""
KLDivLoss = nn.KLDivLoss(reduction='batchmean')
if get_softmax:
p_output = F.softmax(p_output)
q_output = F.softmax(q_output)
log_mean_output = ((p_output + q_output )/2).log()
return (KLDivLoss(log_mean_output, p_output) + KLDivLoss(log_mean_output, q_output))/2
def consis_loss(logps, temp=0.5, lam=1.0):
ps = [torch.exp(p) for p in logps]
sum_p = 0.
for p in ps:
sum_p = sum_p + p
avg_p = sum_p/len(ps)
#p2 = torch.exp(logp2)
sharp_p = (torch.pow(avg_p, 1./temp) / torch.sum(torch.pow(avg_p, 1./temp), dim=1, keepdim=True)).detach()
loss = 0.
for p in ps:
loss += torch.mean((p-sharp_p).pow(2).sum(1))
loss = loss/len(ps)
return args.lam * loss
def flag_bounded(model_forward, perturb_shape, y, optimizer, device, criterion, m=3, step_size=1e-3, mag=1e-3, mask=None):
model, forward = model_forward
model.train()
optimizer.zero_grad()
with torch.no_grad():
perturb = None
out_ori = forward(perturb).view(-1)
if mask is not None:
out_ori = out_ori[mask]
if mag > 0:
perturb = torch.FloatTensor(*perturb_shape).uniform_(-1, 1).to(device)
perturb = perturb * mag / math.sqrt(perturb_shape[-1])
else:
perturb = torch.FloatTensor(
*perturb_shape).uniform_(-step_size, step_size).to(device)
perturb.requires_grad_()
out = forward(perturb).view(-1)
if mask is not None:
out = out[mask]
loss = criterion(out, y) +js_div(out_ori, out, get_softmax=True)
loss /= m
for _ in range(m-1):
# loss.backward()
model.manual_backward(loss)
perturb_data = perturb.detach() + step_size * torch.sign(perturb.grad.detach())
if mag > 0:
perturb_data_norm = torch.norm(perturb_data, dim=-1).detach()
exceed_mask = (perturb_data_norm > mag).to(perturb_data)
reweights = (mag / perturb_data_norm * exceed_mask +
(1-exceed_mask)).unsqueeze(-1)
perturb_data = (perturb_data * reweights).detach()
perturb.data = perturb_data.data
perturb.grad[:] = 0
out = forward(perturb).view(-1)
if mask is not None:
out = out[mask]
loss = criterion(out, y) +js_div(out_ori, out, get_softmax=True)
loss /= m
# loss.backward()
model.manual_backward(loss)
optimizer.step()
return loss, out
| StarcoderdataPython |
131367 | import requests
import re
import time
import datetime
import numpy as np
"""
Prerequisite: Create access tokens
You need private access token to have full access to Github search
API.
Generate your access token in [here](https://github.com/settings/tokens)
you don't need to tick on any access permission because you are not
modifying your private repositories.
"""
# input your token here
token = "e6a9b0b2c3598c64aa84add48e13ab94c43c978c"
def extract_repo(result):
return (itm["url"] for itm in result.json()["items"])
def query_backoff(*args, **argv):
max_tries = 5
wait = 120
for _ in range(max_tries):
r = requests.get(*args, **argv)
if r.status_code == 200:
return r
print("Query failed. Wait %d secs and try again: %s" % (wait, r.content))
time.sleep(wait)
wait *= 2
def retrieve_matched_repo(query, num, from_year, to_year=None, n_per_query=5):
headers = {'Authorization': 'token %s' % token}
base_url = 'https://api.github.com/'
from_date = datetime.date(from_year, 1, 1)
if to_year is None:
to_date = datetime.date.today()
else:
to_date = datetime.date(to_year, 1, 1)
date_diff = (to_date - from_date).days
date_list = [from_date + datetime.timedelta(days=d) for d in np.random.choice(date_diff, size=(num // n_per_query, ))]
repos = []
for date in date_list:
yestarday = date - datetime.timedelta(days=7)
payload = {
'q': query +
" sort:updated" +
" created:%d-%02d-%02d..%d-%02d-%02d" % (yestarday.year, yestarday.month, yestarday.day, date.year, date.month, date.day)}
# github block similar queries, so take extra intervals
time.sleep(20)
r = requests.get(base_url + 'search/repositories', params=payload, headers=headers)
repos.extend(list(extract_repo(r))[:n_per_query])
return repos
result = retrieve_matched_repo('tensorflow language:python', 200, 2015)
with open("found_all_tensorflow.txt", 'w') as fout:
for r in result:
fout.write(r + '\n')
result = retrieve_matched_repo('pytorch language:python', 200, 2017)
with open("found_all_pytorch.txt", 'w') as fout:
for r in result:
fout.write(r + '\n')
| StarcoderdataPython |
1725137 | <filename>basic/016_3Sum_Closest.py
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
# result
res = 0
diff = 2**31 - 1 # inf
nums.sort()
n = len(nums)
for i in range(0, n - 2):
# edge case
if i > 0 and nums[i] == nums[i - 1]:
continue
# left and right index
l, r = i + 1, n - 1
while l < r:
# compute the sum of current 3 elements
s = nums[i] + nums[l] + nums[r]
if s == target:
return target
elif s > target:
if s - target < diff:
res = s
diff = s - target
r -= 1
elif s < target:
if target - s < diff:
res = s
diff = target - s
l += 1
return res | StarcoderdataPython |
3362016 | # coding=utf-8
import re
import random
import base64
import itertools
import concurrent.futures
import difflib
import logging
from urllib import parse
from lib.sqldb import Sqldb
from lib.settings import *
from lib.cli_output import *
from lib.Requests import Requests
class Cartesian(object):
def __init__(self):
self._data_list = []
# 添加生成笛卡尔积的数据列表
def add_data(self, data=[]):
self._data_list.append(data)
# 计算笛卡尔积
def build(self):
urls = []
for item in itertools.product(*self._data_list):
urls.append(item[0] + item[1])
return urls
class DirScan():
def __init__(self, dbname):
self.notstr = ''
self.notlen = ''
self.goto = ''
self.title = ''
self.dbname = dbname
self.ext = 'asp,php'
self.outjson = []
self.req = Requests()
def get_urls(self, domain):
domain = domain.replace('http://', '').replace('https://', '').rstrip('/')
ext = self.ext.split(',')
ext = list(map(lambda x: '.' + x, ext))
path = [
"/robots.txt", "/README.md", "/crossdomain.xml", "/.git/config",
"/.hg"
"/.git/index", "/.svn/entries", "/.svn/wc.db", "/.DS_Store",
"/CVS/Root", "/CVS/Entries", "/.idea/workspace.xml",
"/nginx_status", "/.mysql_history", "/login/", "/phpMyAdmin",
"/pma/", "/pmd/", "/SiteServer", "/admin/", "/Admin/", "/manage",
"/manager/", "/manage/html", "/resin-admin", "/resin-doc",
"/axis2-admin", "/admin-console", "/system", "/wp-admin",
"/uc_server", "/debug", "/Conf", "/webmail", "/service",
"/memadmin", "/owa", "/harbor", "/master", "/root", "/xmlrpc.php",
"/phpinfo.php", "/zabbix", "/api", "/backup", "/inc",
"/web.config", "/httpd.conf", "/local.conf", "/sitemap.xml",
"/app.config", "/.bash_history", "/.rediscli_history", "/.bashrc",
"/.history", "/nohup.out", "/.mysql_history", "/server-status",
"/solr/", "/examples/",
"/examples/servlets/servlet/SessionExample", "/manager/html",
"/login.do", "/config/database.yml", "/database.yml", "/db.conf",
"/db.ini", "/jmx-console/HtmlAdaptor", "/cacti/",
"/jenkins/script", "/memadmin/index.php", "/pma/index.php",
"/phpMyAdmin/index.php", "/.git/HEAD", "/.gitignore",
"/.ssh/known_hosts", "/.ssh/id_rsa", "/id_rsa",
"/.ssh/authorized_keys", "/app.cfg", "/.mysql.php.swp",
"/.db.php.swp", "/.database.php.swp", "/.settings.php.swp",
"/.config.php.swp", "/config/.config.php.swp",
"/.config.inc.php.swp", "/config.inc.php.bak", "/php.ini",
"/sftp-config.json", "/WEB-INF/web.xml",
"/WEB-INF/web.xml.bak", "/WEB-INF/config.xml",
"/WEB-INF/struts-config.xml", "/server.xml",
"/config/database.yml", "/WEB-INF/database.properties",
"/WEB-INF/log4j.properties", "/WEB-INF/config/dbconfig",
"/fckeditor/_samples/default.html", "/ckeditor/samples/",
"/ueditor/ueditor.config.js",
"/javax.faces.resource...%2fWEB-INF/web.xml.jsf", "/wp-config.php",
"/configuration.php", "/sites/default/settings.php", "/config.php",
"/config.inc.php", "/data/config.php", "/data/config.inc.php",
"/data/common.inc.php", "/include/config.inc.php",
"/WEB-INF/classes/", "/WEB-INF/lib/", "/WEB-INF/src/", "/.bzr",
"/SearchPublicRegistries.jsp", "/.bash_logout",
"/resin-doc/resource/tutorial/jndi-appconfig/test?inputFile=/etc/profile",
"/test2.html", "/conf.ini", "/index.tar.tz", "/index.cgi.bak",
"/WEB-INF/classes/struts.xml", "/package.rar",
"/WEB-INF/applicationContext.xml", "/mysql.php", "/apc.php",
"/zabbix/", "/script", "/editor/ckeditor/samples/", "/upfile.php",
"/conf.tar.gz",
"/WEB-INF/classes/conf/spring/applicationContext-datasource.xml",
"/output.tar.gz", "/.vimrc", "/INSTALL.TXT", "/pool.sh",
"/database.sql.gz", "/o.tar.gz", "/upload.sh",
"/WEB-INF/classes/dataBase.properties", "/b.php", "/setup.sh",
"/db.php.bak", "/WEB-INF/classes/conf/jdbc.properties",
"/WEB-INF/spring.xml", "/.htaccess",
"/resin-doc/viewfile/?contextpath=/&servletpath=&file=index.jsp",
"/.htpasswd", "/id_dsa", "/WEB-INF/conf/activemq.xml",
"/config/config.php", "/.idea/modules.xml",
"/WEB-INF/spring-cfg/applicationContext.xml", "/test2.txt",
"/WEB-INF/classes/applicationContext.xml",
"/WEB-INF/conf/database_config.properties",
"/WEB-INF/classes/rabbitmq.xml",
"/ckeditor/samples/sample_posteddata.php", "/proxy.pac",
"/sql.php", "/test2.php", "/build.tar.gz",
"/WEB-INF/classes/config/applicationContext.xml",
"/WEB-INF/dwr.xml", "/readme", "/phpmyadmin/index.php",
"/WEB-INF/web.properties", "/readme.html", "/key"
]
leaks = Cartesian()
leaks.add_data([
'/www', '/1', '/2016', '/2017', '/2018', '/2019', '/wwwroot',
'/backup', '/index', '/web', '/test', '/tmp', '/default', '/temp',
'/extra', '/file', '/qq', '/up', '/config', '/' + domain
])
leaks.add_data([
'.tar.gz', '.zip', '.rar', '.sql', '.7z', '.bak', '.tar', '.txt',
'.log', '.tmp', '.gz', '.bak~', '.sh'
])
path.extend(leaks.build())
index = Cartesian()
index.add_data([
'/1', '/l', '/info', '/index', '/admin', '/login', '/qq', '/q',
'/shell', '/p', '/a', '/userinfo', '/api', '/common', '/web',
'/manage', '/loading', '/left', '/zzzz', '/welcome', '/ma', '/66'
])
index.add_data(ext)
path.extend(index.build())
return set(path)
def diff(self, text):
result = difflib.SequenceMatcher(None, self.notstr, text).quick_ratio()
return result
def _verify(self, r, goto, title):
result = True
if r.status_code in BLOCK_CODE:
result = False
if r.headers['Content-Type'] in BLOCK_CONTYPE:
result = False
if len(r.text) == self.notlen:
result = False
if goto == self.goto:
result = False
for i in PAGE_404:
if i in r.text:
result = False
break
if title == self.title and title != 'None':
result = False
return result
def check404(self, url):
# 访问一个随机的页面记录404页面的长度与内容
key = str(random.random() * 100)
random_url = base64.b64encode(key.encode('utf-8'))
url = url + '/' + random_url.decode(
'utf-8') + '.html'
try:
r = self.req.get(url)
self.notstr = r.text[:10000]
self.notlen = len(r.text)
if r.is_redirect:
self.goto = r.headers['Location']
except Exception as e:
logging.exception(e)
def scan(self, host):
try:
r = self.req.get(host)
if r.is_redirect:
goto = r.headers['Location']
else:
goto = 'test'
if r.headers['Content-Type']:
contype = re.sub('\w+/', '', str(r.headers['Content-Type']))
contype = re.sub(';.*', '', contype)
else:
contype = 'None'
text = r.text[:10000]
title = re.search('(?<=<title>).*(?=</title>)', text)
if self._verify(r, goto, title):
if contype == 'html':
result = self.diff(text)
else:
result = 0
if result < 0.8:
if title == None:
title = 'None'
else:
title = title.group()
title = re.sub(r'\n|\t', '', title)
urlresult = parse.urlparse(host)
sys.stdout.write(bcolors.OKGREEN + '[+] {}{:^12}{:^14}\t{:^18}\t{:^8}\n'.format(
r.status_code, len(r.text), title, contype, str(r.url)) + bcolors.ENDC)
data = {
urlresult.netloc: {
"rsp_code": r.status_code,
"rsp_len": len(r.text),
"title": title,
"contype": contype,
"url": urlresult.path
}
}
self.outjson.append(data)
except Exception as e:
pass
return 'OK'
def save(self, urls):
Sqldb(self.dbname).get_urls(urls)
def run(self, task):
print(bcolors.RED + 'URLS:' + bcolors.ENDC)
with concurrent.futures.ThreadPoolExecutor(
max_workers=THREADS) as executor:
futures = [executor.submit(self.scan, i) for i in task]
for future in concurrent.futures.as_completed(futures):
future.result()
self.save(self.outjson)
# 创建启动任务
def pool(self, host):
self.check404(host)
task = []
urls = self.get_urls(host)
for url in urls:
task.append(host + url)
self.run(task)
| StarcoderdataPython |
1715121 | """Unfriendly.
A tool for unfollowing inactive friends on Twitter.
"""
import unfriendly.app
__author__ = '<NAME>'
__version__ = '1.0.0'
APP_NAME = 'Unfriendly'
def main():
"""Application entry point."""
unfriendly.app.main(APP_NAME, __version__)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1697193 | <reponame>Valmarelox/auto_struct
from .base_type import BaseType
from .base_single_value_type import BaseSingleValueType
from .base_struct import BaseStruct
| StarcoderdataPython |
1607543 | <reponame>neilschark/ba-website<filename>performance_tests/pyplot_files/database_string_25_rps.py
import matplotlib.pyplot as plt
import sys
from ExtractDataCsv import extract_data_csv
from TableDataGenerator import write_table_data
import copy
timestamp = sys.argv[1]
data_type = sys.argv[2]
test_type = "database_string_25"
deployments = ["classic", "docker", "orchestration"]
save_filename = f"{timestamp}__{test_type}_rps"
average_data = {}
# Get data from csv
data = extract_data_csv(deployments, timestamp, test_type, x_position=0, y_position=4, calc_average=False)
data_table_data = extract_data_csv(deployments, timestamp, test_type, x_position=1, y_position=4, calc_average=False)
cut_front = 11
cut_back = 4
write_table_data(data_table_data, save_filename, cut_front, cut_back)
# delete entries that are too early:
for deployment, deployment_data in data.items():
for i in range(0, cut_front):
del(deployment_data["x"][0])
del(deployment_data["y"][0])
# delete entries that are too late:
for deployment, deployment_data in data.items():
for i in range(0, cut_back):
del(deployment_data["x"][-1])
del(deployment_data["y"][-1])
#calculate average
data_copy = copy.deepcopy(data)
for deployment, deployment_data in data_copy.items():
for _ in range(0, 4):
del deployment_data["y"][0]
del deployment_data["y"][-1]
average_data[deployment] = round(sum(deployment_data["y"])/len(deployment_data["y"]), 2)
# get first timestamp
start_timestamps = {}
for deployment in deployments:
start_timestamps[deployment] = data[deployment]["x"][0]
# Calc timestamps starting from 0
for deployment, deployment_data in data.items():
for iterator, timestamp in enumerate(deployment_data["x"]):
deployment_data["x"][iterator] = timestamp - start_timestamps[deployment]
plt.plot(data["classic"]["x"], data["classic"]["y"], color="tab:blue", marker=".")
plt.plot(data["docker"]["x"], data["docker"]["y"], color="tab:green", marker=".")
plt.plot(data["orchestration"]["x"], data["orchestration"]["y"], color="tab:orange", marker=".")
plt.title("Datenbank (String): Anfragen pro Sekunde 25 Nutzer")
plt.xlabel("Zeit in Sekunden")
plt.ylabel("Anfragen pro Sekunde")
plt.legend([f'Klassisch (Ø: {average_data["classic"]})', f'Docker (Ø: {average_data["docker"]})', f'K3s (Ø: {average_data["orchestration"]})'], loc=0)
#plt.legend(['Klassisch', 'Docker', 'K3s'], loc=0)
# Limit graph scale:
#plt.xlim(0)
plt.ylim(10, 12)
plt.savefig(f"../graphs/{save_filename}.{data_type}")
| StarcoderdataPython |
1794136 | <filename>contenttype/tree/TestParsonalTree_BlackBox.py<gh_stars>0
import unittest
from SubTypeTree import SubTypeTreeFactory
from SubTypeTree import VenderTreeFactory
from SubTypeTree import SubTypeTree
from SubTypeTree import VenderTree
from SubTypeTree import GitHubVenderTree
from SubTypeTree import StandardTree
from SubTypeTree import ParsonalTree
from SubTypeTree import UnregisteredTree
class TestParsonalTree_BlackBox(unittest.TestCase):
def test_Values(self):
tree_list = ['tree1']
tree = ParsonalTree(tree_list)
self.assertEqual('prs', tree.GetFacet())
self.assertEqual(tree_list, tree.TreeList)
| StarcoderdataPython |
3376680 | <reponame>radiosmersh/gsquery<filename>gsquery/colorizer.py
OUTPUT_TEXT = 0
OUTPUT_HTML = 1
OUTPUT_ANSI = 2
class none:
def __init__(self):
self.mode = OUTPUT_TEXT
def parse(self, string):
return string
class q3(none):
COLORS_HTML = {
"0": "000000",
"1": "DA0120",
"2": "00B906",
"3": "E8FF19",
"4": "170BDB",
"5": "23C2C6",
"6": "E201DB",
"7": "FFFFFF",
"8": "CA7C27",
"9": "757575",
"a": "EB9F53",
"b": "106F59",
"c": "5A134F",
"d": "035AFF",
"e": "681EA7",
"f": "5097C1",
"g": "BEDAC4",
"h": "024D2C",
"i": "7D081B",
"j": "90243E",
"k": "743313",
"l": "A7905E",
"m": "555C26",
"n": "AEAC97",
"o": "C0BF7F",
"p": "000000",
"q": "DA0120",
"r": "00B906",
"s": "E8FF19",
"t": "170BDB",
"u": "23C2C6",
"v": "E201DB",
"w": "FFFFFF",
"x": "CA7C27",
"y": "757575",
"z": "CC8034",
"/": "DBDF70",
"*": "BBBBBB",
"-": "747228",
"+": "993400",
"?": "670504",
"@": "623307",
}
COLORS_ANSI = {
"0": "0;30m",
"1": "0;31m",
"2": "0;32m",
"3": "1;33m",
"4": "0;34m",
"5": "1;34m",
"6": "1;35m",
"7": "1;37m",
"8": "0;33m",
"9": "1;30m",
}
def parse(self, string):
currentcode = None
output = ""
while len(string) > 0:
if string[0] == "^":
output += self.parsecode(currentcode, string[1])
currentcode = string[1]
string = string[2:]
else:
output += string[0]
string = string[1:]
output += self.parsecode(currentcode)
return output
def parsecode(self, currentcode = None, newcode = None):
output = ""
if currentcode == newcode:
pass
elif self.mode == OUTPUT_TEXT:
pass
elif self.mode == OUTPUT_HTML:
if not currentcode == None:
output += "</span>"
if newcode in self.COLORS_HTML:
output += "<span style=\"color:#%s;\">" % (self.COLORS_HTML[newcode],)
elif self.mode == OUTPUT_ANSI:
if currentcode == None or newcode == None:
output += "\033[0;0m"
if newcode in self.COLORS_ANSI:
output += "\033[%s" % (self.COLORS_ANSI[newcode],)
return output
class unreal(none):
def parse(self, string):
currentcode = None
output = ""
while len(string) > 0:
if ord(string[0]) == 27:
output += self.parsecode(currentcode, string[1:4])
currentcode = string[1:4]
string = string[4:]
else:
output += string[0]
string = string[1:]
output += self.parsecode(currentcode)
return output
def parsecode(self, currentcode = None, newcode = None):
output = ""
if currentcode == newcode:
pass
elif self.mode == OUTPUT_TEXT:
pass
elif self.mode == OUTPUT_HTML:
if not currentcode == None:
output += "</span>"
if not newcode == None:
output += "<span style=\"color:rgb(%d,%d,%d);\">" % (ord(newcode[0]),ord(newcode[1]),ord(newcode[2]))
elif self.mode == OUTPUT_ANSI:
if currentcode == None or newcode == None:
output += "\033[0;0m"
return output
| StarcoderdataPython |
121851 | <gh_stars>0
import pytest
from eth_keys import keys
from cytoolz import (
merge,
)
from evm.exceptions import ValidationError
from evm.auxiliary.user_account_contract.transaction import (
UserAccountTransaction,
UnsignedUserAccountTransaction
)
VALID_PARAMS = {
"chain_id": 1,
"shard_id": 2,
"to": b"\xaa" * 20,
"gas": 300000,
"access_list": [[b"\xaa" * 20, b"\x00"]],
"destination": b"\xbb" * 20,
"value": 4,
"nonce": 5,
"min_block": 6,
"max_block": 7,
"gas_price": 8,
"msg_data": b"\xcc" * 123,
}
INVALID_PARAMS = {
"chain_id": b"\x01",
"shard_id": b"\x02",
"to": "0x" + "aa" * 20,
"gas": b"\x03",
"access_list": [[b"\xaa" * 20, 0]],
"destination": "0x" + "bb" * 20,
"value": b"\x04",
"nonce": b"\x05",
"min_block": b"\x06",
"max_block": b"\x07",
"gas_price": b"\x08",
"msg_data": 123,
}
@pytest.fixture
def unsigned_transaction():
return UnsignedUserAccountTransaction(**VALID_PARAMS)
def test_signing(unsigned_transaction):
private_key = keys.PrivateKey(b"\x22" * 32)
signed_transaction = unsigned_transaction.as_signed_transaction(private_key)
assert signed_transaction.get_sender() == private_key.public_key.to_canonical_address()
def test_data(unsigned_transaction):
private_key = keys.PrivateKey(b"\x22" * 32)
signed_transaction = unsigned_transaction.as_signed_transaction(private_key)
assert len(signed_transaction.data) > 10 * 32
assert signed_transaction.data.endswith(signed_transaction.msg_data)
assert signed_transaction.data.endswith(unsigned_transaction.msg_data)
assert len(signed_transaction.data) == len(unsigned_transaction.data) + 96
@pytest.mark.parametrize("key,value", INVALID_PARAMS.items())
def test_validation(key, value):
# construct object with valid parameters, apply invalid values afterwards
# this ensures object creation succeeds
tx = UnsignedUserAccountTransaction(**VALID_PARAMS)
with pytest.raises(ValidationError):
setattr(tx, key, value)
tx.validate()
tx = UserAccountTransaction(**merge(VALID_PARAMS, {"v": 27, "r": 1, "s": 1}))
with pytest.raises(ValidationError):
setattr(tx, key, value)
tx.validate()
| StarcoderdataPython |
3350940 | from .BaseProvisioner import BaseProvisioner
from .DiggLpMetaFarmProvisioner import DiggLpMetaFarmProvisioner
from .SushiDiggWbtcLpOptimizerProvisioner import SushiDiggWbtcLpOptimizerProvisioner
from .DiggRewardsProvisioner import DiggRewardsProvisioner
from .BadgerRewardsProvisioner import BadgerRewardsProvisioner
from .HarvestMetaFarmProvisioner import HarvestMetaFarmProvisioner
from .SushiBadgerWbtcProvisioner import SushiBadgerWbtcProvisioner
from .SushiLpOptimizerProvisioner import SushiLpOptimizerProvisioner
from .BadgerLpMetaFarmProvisioner import BadgerLpMetaFarmProvisioner
from .CurveGaugeProvisioner import CurveGaugeProvisioner
from .SushiClawUSDCProvisioner import SushiClawUSDCProvisioner
from .PancakeBnbBtcbProvisioner import PancakeBnbBtcbProvisioner
from .SushiWbtcIbBtcLpOptimizerProvisioner import SushiWbtcIbBtcLpOptimizerProvisioner
| StarcoderdataPython |
3398989 | <filename>gaia-sdk-python/gaia_sdk/http/tests/test_hmactokenbuilder.py
import unittest
from gaia_sdk.api.GaiaCredentials import HMACCredentials
from gaia_sdk.api.client_options import ClientOptions
from gaia_sdk.http.HMACTokenBuilder import HMACTokenBuilder
class TestHMACTokenBuilder(unittest.TestCase):
def test_generate_token(self):
options = ClientOptions(HMACCredentials("apiKey", "secret"))
timestamp = int(1592924470)
payload = "hi"
nonce = "353823db-c12b-44b2-b0dc-c4d813c74b24"
expected_token = "<KEY>"
token = HMACTokenBuilder().with_payload(payload) \
.with_nonce(nonce) \
.with_timestamp(timestamp) \
.with_client_options(options).build()
self.assertEqual(token, expected_token)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1683899 | <reponame>jambondeau1967/BackupMacOS
'''
depends on lib libLogger
'''
import os
import sys
import logging
import glob
import json
logger = logging.getLogger('libConfig')
def folder_exists(folder):
'''
check if folder exists
'''
present = False
if os.path.isdir(folder):
logging.debug(f'folder present: {folder}')
present = True
else:
logging.debug(f'folder not present: {folder}')
return present
def folder_create(folder):
res = False
try:
if not os.path.exists(folder):
os.makefolders(folder)
logger.info(f'folder created: {folder}')
else:
logger.info(f'folder created: {folder}')
res = True
except:
logger.critical(
f'folder {folder} NOT created..?? exception {sys.exc_info()[0]}')
return res
def folder_listfiles(folder, mask='*'):
'''
returns count and filesnames from folder
filter mask
'''
count = 0
files = None
if os.path.exists(folder):
folder_mask = os.path.join(folder, mask)
files = glob.glob(folder_mask)
count = len(files)
else:
logger.info(f'folder does not exists: {folder}')
return count, files
def file_exists(file, notexists_is_error=False):
'''
check if file exists
'''
exists = False
if os.path.isfile(file):
logging.debug(f'file present: {file}')
exists = True
else:
if notexists_is_error:
logging.error(f'file NOT present: {file}')
else:
logging.info(f'file NOT present: {file}')
return exists
def file_getcontent(file):
'''
get all the lines of file file_fullname
argument must be an existing filenaam including full path
return boolean (result), list
'''
lines = None
result = False
count = 0
try:
if file_exists(file):
with open(file) as openfile:
lines = openfile.readlines()
result = True
except FileNotFoundError as fnf_error:
lines = None
logger.error(f'FileNotFoundError {fnf_error}')
if result:
if lines == None:
logger.info(f'file {file} contents is None')
else:
count = len(lines)
logger.info(f'file {file} contains {count} lines.')
return result, lines, count
def file_json_getcontent(file):
with open(file, "r") as read_file:
json_data = json.load(read_file)
return json_data
| StarcoderdataPython |
3353727 | '''
title : app.py
description : Streamlit app that compares Messi and Ronaldo's stats and shows their positions on the pitch.
author : <NAME>
date_created : 20200521
date_modified : 20200613
version : 1.0
usage : streamlit run app.py
python_version : 3.7.6
'''
import datetime
import unicodedata
import markdown
import json
import numpy as np
import pandas as pd
import bokeh
import streamlit as st
from plots import *
@st.cache(allow_output_mutation=True)
def get_data(foot):
#Reading Data
messi_events_data_df = pd.read_pickle("./data/messi_events_data_df.pkl")
ronaldo_events_data_df = pd.read_pickle("./data/ronaldo_events_data_df.pkl")
#Dealing with double backslashes
messi_events_data_df['label'] = messi_events_data_df['label'].apply(lambda x: bytes(x, encoding='utf-8').decode('unicode-escape'))
ronaldo_events_data_df['label'] = ronaldo_events_data_df['label'].apply(lambda x: bytes(x, encoding='utf-8').decode('unicode-escape'))
if foot == 'Left':
messi_events_data_df = messi_events_data_df[messi_events_data_df['left_foot']]
ronaldo_events_data_df = ronaldo_events_data_df[ronaldo_events_data_df['left_foot']]
if foot == 'Right':
messi_events_data_df = messi_events_data_df[messi_events_data_df['right_foot']]
ronaldo_events_data_df = ronaldo_events_data_df[ronaldo_events_data_df['right_foot']]
barca_matches_dates_df = pd.read_pickle("./data/barca_matches_dates_df.pkl")
real_matches_dates_df = pd.read_pickle("./data/real_matches_dates_df.pkl")
barca_matches_dates_df['match'] = barca_matches_dates_df['match'].apply(lambda x: bytes(x, encoding='utf-8').decode('unicode-escape'))
real_matches_dates_df['match'] = real_matches_dates_df['match'].apply(lambda x: bytes(x, encoding='utf-8').decode('unicode-escape'))
return messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df
def plot_goals(messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df):
#Getting events data positions
messi_goals = messi_events_data_df[messi_events_data_df['goal'] == True]['positions']
ronaldo_goals = ronaldo_events_data_df[ronaldo_events_data_df['goal'] == True]['positions']
#Pitch with events
p_messi = plot_events(messi_goals, 'Goals', 'red')
p_ronaldo = plot_events(ronaldo_goals, 'Goals', 'blue')
#Table
messi_stats = messi_events_data_df.groupby(['label']).sum()['goal'].astype(int)
messi_stats_df = pd.DataFrame(data=zip(messi_stats.index, messi_stats), columns=['match', '#goals'])
ronaldo_stats = ronaldo_events_data_df.groupby(['label']).sum()['goal'].astype(int)
ronaldo_stats_df = pd.DataFrame(data=zip(ronaldo_stats.index, ronaldo_stats), columns=['match', '#goals'])
#Adding Dates
messi_stats_df = pd.merge(messi_stats_df, barca_matches_dates_df, on='match', copy=False, how="left")
ronaldo_stats_df = pd.merge(ronaldo_stats_df, real_matches_dates_df, on='match', copy=False, how="left")
#Change order of columns
messi_stats_df = messi_stats_df[['date', 'match', '#goals']]
ronaldo_stats_df = ronaldo_stats_df[['date', 'match', '#goals']]
grid = bokeh.layouts.grid(
children=[
[p_messi, p_ronaldo],
[print_table(messi_stats_df), print_table(ronaldo_stats_df)],
],
sizing_mode="stretch_width",
)
return bokeh.models.Panel(child=grid, title="Goals")
def plot_assists(messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df):
#Getting events data positions
messi_assists = messi_events_data_df[messi_events_data_df['assist'] == True]['positions']
ronaldo_assists = ronaldo_events_data_df[ronaldo_events_data_df['assist'] == True]['positions']
#Pitch with events
p_messi = plot_events(messi_assists, 'Assists', 'red')
p_ronaldo = plot_events(ronaldo_assists, 'Assists', 'blue')
#Table
messi_stats = messi_events_data_df.groupby(['label']).sum()['assist'].astype(int)
messi_stats_df = pd.DataFrame(data=zip(messi_stats.index, messi_stats), columns=['match', '#assists'])
ronaldo_stats = ronaldo_events_data_df.groupby(['label']).sum()['assist'].astype(int)
ronaldo_stats_df = pd.DataFrame(data=zip(ronaldo_stats.index, ronaldo_stats), columns=['match', '#assists'])
#Adding Dates
messi_stats_df = pd.merge(messi_stats_df, barca_matches_dates_df, on='match', copy=False, how="left")
ronaldo_stats_df = pd.merge(ronaldo_stats_df, real_matches_dates_df, on='match', copy=False, how="left")
#Change order of columns
messi_stats_df = messi_stats_df[['date', 'match', '#assists']]
ronaldo_stats_df = ronaldo_stats_df[['date', 'match', '#assists']]
grid = bokeh.layouts.grid(
children=[
[p_messi, p_ronaldo],
[print_table(messi_stats_df), print_table(ronaldo_stats_df)],
],
sizing_mode="stretch_width",
)
return bokeh.models.Panel(child=grid, title="Assists")
def plot_shots(messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df):
#Getting events data positions
messi_shots = messi_events_data_df[messi_events_data_df['eventName'] == 'Shot']['positions']
ronaldo_shots = ronaldo_events_data_df[ronaldo_events_data_df['eventName'] == 'Shot']['positions']
#Pitch with events
p_messi = plot_events(messi_shots, 'Shots', 'red')
p_ronaldo = plot_events(ronaldo_shots, 'Shots', 'blue')
# Table
messi_stats = messi_events_data_df.groupby(['label', 'eventName']).count()['eventId']
messi_stats_df = pd.DataFrame(data=zip(messi_stats[:, 'Shot'].index, messi_stats[:, 'Shot']), columns=['match', '#shots'])
ronaldo_stats = ronaldo_events_data_df.groupby(['label', 'eventName']).count()['eventId']
ronaldo_stats_df = pd.DataFrame(data=zip(ronaldo_stats[:, 'Shot'].index, ronaldo_stats[:, 'Shot']), columns=['match', '#shots'])
#Adding Dates
messi_stats_df = pd.merge(messi_stats_df, barca_matches_dates_df, on='match', copy=False, how="left")
ronaldo_stats_df = pd.merge(ronaldo_stats_df, real_matches_dates_df, on='match', copy=False, how="left")
#Change order of columns
messi_stats_df = messi_stats_df[['date', 'match', '#shots']]
ronaldo_stats_df = ronaldo_stats_df[['date', 'match', '#shots']]
grid = bokeh.layouts.grid(
children=[
[p_messi, p_ronaldo],
[print_table(messi_stats_df), print_table(ronaldo_stats_df)],
],
sizing_mode="stretch_width",
)
return bokeh.models.Panel(child=grid, title="Shots")
def plot_free_kicks(messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df):
#Getting events data positions
messi_free_kicks = messi_events_data_df[messi_events_data_df['subEventName'] == 'Free kick shot']['positions']
ronaldo_free_kicks = ronaldo_events_data_df[ronaldo_events_data_df['subEventName'] == 'Free kick shot']['positions']
#Pitch with events
p_messi = plot_events(messi_free_kicks, 'Free Kicks', 'red')
p_ronaldo = plot_events(ronaldo_free_kicks, 'Free Kicks', 'blue')
# Table
try:
messi_stats = messi_events_data_df.groupby(['label', 'eventName']).count()['eventId']
messi_stats_df = pd.DataFrame(data=zip(messi_stats[:, 'Free Kick'].index, messi_stats[:, 'Free Kick']), columns=['match', '#free kicks'])
except:
messi_stats_df = pd.DataFrame(columns=['match', '#free kicks'])
try:
ronaldo_stats = ronaldo_events_data_df.groupby(['label', 'eventName']).count()['eventId']
ronaldo_stats_df = pd.DataFrame(data=zip(ronaldo_stats[:, 'Free Kick'].index, ronaldo_stats[:, 'Free Kick']), columns=['match', '#free kicks'])
except:
ronaldo_stats_df = pd.DataFrame(columns=['match', '#free kicks'])
#Adding Dates
messi_stats_df = pd.merge(messi_stats_df, barca_matches_dates_df, on='match', copy=False, how="left")
ronaldo_stats_df = pd.merge(ronaldo_stats_df, real_matches_dates_df, on='match', copy=False, how="left")
#Change order of columns
messi_stats_df = messi_stats_df[['date', 'match', '#free kicks']]
ronaldo_stats_df = ronaldo_stats_df[['date', 'match', '#free kicks']]
grid = bokeh.layouts.grid(
children=[
[p_messi, p_ronaldo],
[print_table(messi_stats_df), print_table(ronaldo_stats_df)],
],
sizing_mode="stretch_width",
)
return bokeh.models.Panel(child=grid, title="Free Kicks")
def plot_passes(messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df):
#Getting events data positions
messi_passes = messi_events_data_df[messi_events_data_df['eventName'] == 'Pass']['positions']
ronaldo_passes = ronaldo_events_data_df[ronaldo_events_data_df['eventName'] == 'Pass']['positions']
#Pitch with events
p_messi = plot_events(messi_passes, 'Passes', 'red')
p_ronaldo = plot_events(ronaldo_passes, 'Passes', 'blue')
# Table
messi_stats = messi_events_data_df.groupby(['label', 'eventName']).count()['eventId']
messi_stats_df = pd.DataFrame(data=zip(messi_stats[:, 'Pass'].index, messi_stats[:, 'Pass']), columns=['match', '#passes'])
ronaldo_stats = ronaldo_events_data_df.groupby(['label', 'eventName']).count()['eventId']
ronaldo_stats_df = pd.DataFrame(data=zip(ronaldo_stats[:, 'Pass'].index, ronaldo_stats[:, 'Pass']), columns=['match', '#passes'])
#Adding Dates
messi_stats_df = pd.merge(messi_stats_df, barca_matches_dates_df, on='match', copy=False, how="left")
ronaldo_stats_df = pd.merge(ronaldo_stats_df, real_matches_dates_df, on='match', copy=False, how="left")
#Change order of columns
messi_stats_df = messi_stats_df[['date', 'match', '#passes']]
ronaldo_stats_df = ronaldo_stats_df[['date', 'match', '#passes']]
grid = bokeh.layouts.grid(
children=[
[p_messi, p_ronaldo],
[print_table(messi_stats_df), print_table(ronaldo_stats_df)],
],
sizing_mode="stretch_width",
)
return bokeh.models.Panel(child=grid, title="Passes")
if __name__ == '__main__':
#CSS to display content correctly
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
max-width: 95%;
}}
</style>
""",
unsafe_allow_html=True,
)
foot = st.sidebar.radio("Foot", ('Either Left or Right', 'Left', 'Right'))
messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df = get_data(foot)
#Calculate Stats of both playters and structure them in a Pandas DataFrame
goals = [messi_events_data_df['goal'].sum(), ronaldo_events_data_df['goal'].sum()]
assists = [messi_events_data_df['assist'].sum(), ronaldo_events_data_df['assist'].sum()]
shots = [messi_events_data_df[messi_events_data_df['eventName'] == 'Shot'].count()['eventName'],
ronaldo_events_data_df[ronaldo_events_data_df['eventName'] == 'Shot'].count()['eventName']]
free_kicks = [messi_events_data_df[messi_events_data_df['subEventName'] == 'Free kick shot'].count()['subEventName'],
ronaldo_events_data_df[ronaldo_events_data_df['subEventName'] == 'Free kick shot'].count()['subEventName']]
passes = [messi_events_data_df[messi_events_data_df['eventName'] == 'Pass'].count()['eventName'],
ronaldo_events_data_df[ronaldo_events_data_df['eventName'] == 'Pass'].count()['eventName']]
stats_df = pd.DataFrame([goals, assists, shots, free_kicks, passes],
columns=['Messi', 'Ronaldo'],
index=['Goals', 'Assists', 'Shots', 'Free Kicks', 'Passes'])
st.sidebar.markdown(""" ### Stats """)
st.sidebar.dataframe(stats_df)
st.image('./messi_ronaldo.png', use_column_width=True, format='PNG')
tabs = bokeh.models.Tabs(
tabs=[
plot_goals(messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df),
plot_assists(messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df),
plot_shots(messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df),
plot_free_kicks(messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df),
plot_passes(messi_events_data_df, ronaldo_events_data_df, barca_matches_dates_df, real_matches_dates_df),
]
)
st.bokeh_chart(tabs) | StarcoderdataPython |
4811365 | # -*- coding: UTF-8 -*-
# File: __init__.py
# Author: <NAME> <<EMAIL>>
from pkgutil import walk_packages
import os
"""
Common utils.
These utils should be irrelevant to tensorflow.
"""
def _global_import(name):
p = __import__(name, globals(), None, level=1)
lst = p.__all__ if '__all__' in dir(p) else dir(p)
del globals()[name]
for k in lst:
globals()[k] = p.__dict__[k]
_global_import('naming')
_global_import('utils')
_global_import('gpu')
| StarcoderdataPython |
1658000 | from django.conf import settings
from django.utils import timezone
class TimezoneMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
tz = request.session.get('django_timezone',
default=request.user.profile.time_zone) or settings.TIME_ZONE
timezone.activate(tz)
else:
timezone.deactivate()
| StarcoderdataPython |
3379336 | <filename>tests/trainers/lightning/test_loop_conditions.py<gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import patch
from tests.trainers.test_utils import get_lightning_trainer
class TestLightningTrainer(unittest.TestCase):
def test_epoch_over_updates(self):
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=None):
trainer = get_lightning_trainer(max_steps=2, max_epochs=0.04)
self.assertEqual(trainer._max_updates, 4)
self._check_values(trainer, 0, 0)
trainer.trainer.fit(trainer.model, trainer.data_module.train_loader)
self._check_values(trainer, 4, 0)
def test_fractional_epoch(self):
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=None):
trainer = get_lightning_trainer(max_steps=None, max_epochs=0.04)
self.assertEqual(trainer._max_updates, 4)
self._check_values(trainer, 0, 0)
trainer.trainer.fit(trainer.model, trainer.data_module.train_loader)
self._check_values(trainer, 4, 0)
def test_updates(self):
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=None):
trainer = get_lightning_trainer(max_steps=2, max_epochs=None)
self.assertEqual(trainer._max_updates, 2)
self._check_values(trainer, 0, 0)
trainer.trainer.fit(trainer.model, trainer.data_module.train_loader)
self._check_values(trainer, 2, 0)
def _check_values(self, trainer, current_iteration, current_epoch):
self.assertEqual(trainer.trainer.global_step, current_iteration)
self.assertEqual(trainer.trainer.current_epoch, current_epoch)
| StarcoderdataPython |
41809 | #!/usr/bin/env python
# coding: utf-8
import os
import unittest
from yalign.wordpairscore import WordPairScore
class TestWordPairScore(unittest.TestCase):
def setUp(self):
self.word_pair_score = self._create_word_pair_score('test_word_scores.csv')
def _create_word_pair_score(self, filename):
base_path = os.path.dirname(os.path.abspath(__file__))
translations = os.path.join(base_path, "data", filename)
return WordPairScore(translations)
def test_load_translations_in_gz_format(self):
word_pair_score = self._create_word_pair_score('test_word_scores.csv.gz')
translations = word_pair_score.translations
self.check_translations(translations)
def test_translations(self):
translations = self.word_pair_score.translations
self.check_translations(translations)
def check_translations(self, translations):
self.assertEqual(3, len(translations))
self.assertEqual(translations[u'house'], {u'casa': 1.0})
self.assertEqual(translations[u'you'], {u'ustedes': 0.625,
u'vosotros': 0.375,
u'vos': 0.75})
self.assertEqual(translations[u'yourselves'], {u'vosotros': 0.75})
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
20412 | #!/usr/bin/env python3
"""awspfx
Usage:
awspfx.py <profile>
awspfx.py [(-c | --current) | (-l | --list) | (-s | --swap)]
awspfx.py token [(-p | --profile) <profile>]
awspfx.py sso [(login | token)] [(-p | --profile) <profile>]
awspfx.py -h | --help
awspfx.py --version
Examples:
awspfx.py default # Change profile to 'default'
awspfx.py token # Token from current profile, default from SSO
awspfx.py token -p default # Token from profile 'default'
awspfx.py (-c | -l | -s)
SubCommands:
token Generate credentials
-p --profile Select profile
Options:
-c --current Change the profile
-l --list List profiles
-s --swap Swap previous the profile
-h --help Show this screen.
--version Show version.
WIP:
sso Option to login
sts Option to assume-role
"""
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from configparser import ConfigParser as cfgParser
import boto3
from colorlog import ColoredFormatter
from docopt import docopt
from iterfzf import iterfzf
def setup_logging():
log_level = logging.INFO
log_format = "\n%(log_color)s%(levelname)s%(reset)s => %(log_color)s%(message)s%(reset)s"
logging.root.setLevel(log_level)
formatter = ColoredFormatter(log_format)
stream_ = logging.StreamHandler()
stream_.setLevel(log_level)
stream_.setFormatter(formatter)
log_ = logging.getLogger("pythonConfig")
log_.setLevel(log_level)
log_.addHandler(stream_)
return log_
def exit_err(msg):
log.error(msg)
sys.exit()
def has_which(command, err=True):
cmd = shutil.which(command) is not None
if cmd:
return command
else:
if err:
exit_err(f"Command not installed: {command}")
else:
return False
def has_file(file, create=False):
f = os.path.isfile(file) or False
if not f:
if create:
f_ = open(file, "w+")
f_.close()
else:
exit_err(f"File not exist: {file}")
return file
def run_cmd(command):
rc, out = subprocess.getstatusoutput(command)
if rc != 0:
err = "Occurred: ", out
exit_err(err)
return out
def fzf(data: list, current: str = None):
cmd = has_which("fzf", err=False)
if not cmd:
print(*data, sep="\n")
exit_err("Not installed 'fzf'")
return iterfzf(data) or exit_err("you did not choose any of the options")
def sed_inplace(filename, pattern, repl):
p = re.compile(pattern, re.MULTILINE)
with tempfile.NamedTemporaryFile(mode="w", delete=False) as tmp_file:
with open(filename, "r") as file:
text = file.read()
if "AWS_PROFILE" in text:
new = p.sub(repl, text)
tmp_file.write(new)
else:
print("No exist profile")
tmp_file.write(text)
tmp_file.write(f"export {repl}")
shutil.copystat(filename, tmp_file.name)
shutil.move(tmp_file.name, filename)
def setup_aws(ctx: str = None):
try:
if ctx is None:
# if aws_profile_env is None:
# del os.environ['AWS_PROFILE']
aws_session = boto3.session.Session()
else:
aws_session = boto3.session.Session(profile_name=ctx)
return aws_session
except Exception as e:
exit_err(e)
def current_profile(err=True):
ctx = aws.profile_name
if err:
return ctx or exit_err("Getting current profile")
return ctx
def get_profiles(err=True):
try:
ctx_ls = aws.available_profiles
ctx = sorted(ctx_ls, reverse=True)
if err:
return ctx or exit_err("Getting profile list")
return ctx
except Exception as e:
log.error(e)
def list_profiles(lst=False):
ctx_current = current_profile(err=False)
ctx_list = get_profiles()
if lst:
ctx = reversed(ctx_list)
print(*ctx, sep="\n")
else:
p = fzf(data=ctx_list, current=ctx_current)
return p
def read_profile():
with open(awspfx_cache, 'r') as file:
r = file.read()
return r
def save_profile(ctx_current):
ctx = ctx_current if ctx_current else ""
with open(awspfx_cache, "w") as file:
file.write(ctx)
def switch_profile(ctx, ctx_current):
ctx_old = f'AWS_PROFILE="{ctx_current}"'
ctx_repl = f'AWS_PROFILE="{ctx}"'
sed_inplace(envrc_file, ctx_old, ctx_repl)
save_profile(ctx_current)
run_cmd("direnv allow && direnv reload")
def set_profile(ctx, ctx_current=None, sms=None):
if not ctx_current:
ctx_current = current_profile(err=False)
if ctx == ctx_current:
log.warning(f"The profile is not changed: {ctx_current}")
else:
switch_profile(ctx, ctx_current)
sms_text = sms or f"Switched to profile: {ctx}"
log.info(sms_text)
def swap_profile():
ctx = read_profile()
if ctx:
sms_text = f"Switched to previous profile: {ctx}"
set_profile(ctx=ctx, sms=sms_text)
def exist_profile(ctx):
if ctx in get_profiles():
return True
else:
exit_err(f"Profile does not exist: {ctx}")
def sso(account_id, role_name):
client = aws.client("sso", region_name="us-east-1")
aws_sso_cache = os.path.expanduser("~/.aws/sso/cache")
json_files = [
pos_json for pos_json in os.listdir(
aws_sso_cache
) if pos_json.endswith(
".json"
)
]
for json_file in json_files:
path = f"{aws_sso_cache}/{json_file}"
with open(path) as file:
data = json.load(file)
if "accessToken" in data:
access_token = data['accessToken']
try:
cred = client.get_role_credentials(
accountId=account_id,
roleName=role_name,
accessToken=access_token
)
return cred
except Exception as e:
log.error(e)
log.warning("The SSO session associated with this profile has expired "
"or is otherwise invalid. To refresh this SSO session run "
"aws sso login with the corresponding profile.")
sys.exit(2)
def sts(account_id, role, region):
role_info = {
"RoleArn": f"arn:aws:iam::{account_id}:role/{role}",
"RoleSessionName": "session01"
}
client = aws.client("sts", region_name=region)
cred = client.assume_role(**role_info)
return cred
def get_token(ctx, sso_=True, sts_=False):
aws_cred = cfgParser()
aws_cred.read(creds_file)
act_id = os.getenv("AWS_ACCOUNT_ID") or aws_cred.get(ctx, "account_id")
act_role = os.getenv("AWS_ROLE_NAME") or aws_cred.get(ctx, "role_name")
act_region = os.getenv("AWS_REGION") or aws_cred.get(ctx, "region")
if sso_:
cred = sso(account_id=act_id, role_name=act_role)
elif sts_:
cred = sts(account_id=act_id, role=act_role, region=act_region)
else:
cred = {}
exit_err("Not select option from token")
aws_access_key_id = cred['roleCredentials']['accessKeyId']
aws_secret_access_key = cred['roleCredentials']['secretAccessKey']
aws_session_token = cred['roleCredentials']['sessionToken']
# print('Save Credentials in ~/.aws/credentials ...')
aws_cred.set(ctx, "aws_access_key_id", aws_access_key_id)
aws_cred.set(ctx, "aws_secret_access_key", aws_secret_access_key)
aws_cred.set(ctx, "aws_session_token", aws_session_token)
with open(creds_file, "w") as f:
aws_cred.write(f)
def main(argv):
ctx = argv['<profile>']
if ctx == "token" or argv['token']:
if argv['--profile']:
if exist_profile(ctx):
get_token(ctx)
log.info(f"Generate token to: {ctx}")
else:
ctx = current_profile()
get_token(ctx)
log.info(f"Generate token to: {ctx}")
sys.exit()
if ctx == "sso" or argv['sso']:
print("sso")
sys.exit()
if argv['--current']:
log.info(f"The current profile is: '{current_profile()}'")
sys.exit()
if argv['--list']:
list_profiles(lst=True)
sys.exit()
if argv['--swap']:
swap_profile()
sys.exit()
if ctx or ctx is None:
if ctx is None:
ctx_profile = list_profiles()
else:
ctx_profile = ctx if exist_profile(ctx) else sys.exit()
set_profile(ctx_profile)
sys.exit()
if __name__ == "__main__":
log = setup_logging()
home_path = os.getenv('HOME') or exit_err("Home directory does not exist?")
# aws_profile_env = os.getenv("AWS_PROFILE")
aws = setup_aws()
awspfx_cache = has_file(f"{home_path}/.aws/awspfx", create=True)
direnv = has_which("direnv")
envrc_file = has_file(f"{home_path}/.envrc")
creds_file = has_file(f"{home_path}/.aws/credentials")
arguments = docopt(__doc__, version=f'awspfx 0.1.6 - python {sys.version}')
main(arguments)
| StarcoderdataPython |
3358283 | <reponame>YuOZW/pyGSM
from .conjugate_gradient import conjugate_gradient
from .eigenvector_follow import eigenvector_follow
from .lbfgs import lbfgs
from ._linesearch import backtrack,NoLineSearch
from .beales_cg import beales_cg
| StarcoderdataPython |
86440 | <reponame>Jay4C/Python-Macros-For_FreeCAD
import FreeCAD, Part, Drawing, math, Mesh, importDXF
DOC = FreeCAD.activeDocument()
DOC_NAME = "part_rotor"
def clear_doc():
# Clear the active document deleting all the objects
for obj in DOC.Objects:
DOC.removeObject(obj.Name)
def setview():
# Rearrange View
FreeCAD.Gui.SendMsgToActiveView("ViewFit")
FreeCAD.Gui.activeDocument().activeView().viewAxometric()
if DOC is None:
FreeCAD.newDocument(DOC_NAME)
FreeCAD.setActiveDocument(DOC_NAME)
DOC = FreeCAD.activeDocument()
else:
clear_doc()
# EPS= tolerance to use to cut the parts
EPS = 0.10
EPS_C = EPS * (-0.5)
maximal_diameter = 100
maximal_heigth = 15
# part_rotor
part_rotor = Part.makeCylinder(maximal_diameter/2, maximal_heigth)
# part_rotor cut by cylinder_1
cylinder_1 = Part.makeCylinder(5, maximal_heigth)
part_rotor = part_rotor.cut(cylinder_1)
# holes for fixing the magnets
degre = 30
for i in range(int(360/degre)):
radius = maximal_diameter/2 - 12.5
alpha=(i*degre*math.pi)/180
hole_vector = FreeCAD.Vector(radius*math.cos(alpha), radius*math.sin(alpha), 0)
hole = Part.makeCylinder(7.5, maximal_heigth)
hole.translate(hole_vector)
part_rotor = part_rotor.cut(hole)
# holes for the cooling
degre = 90
for i in range(int(360/degre)):
radius = 5 + 12.5
alpha=(i*degre*math.pi)/180
hole_vector = FreeCAD.Vector(radius*math.cos(alpha), radius*math.sin(alpha), 0)
hole = Part.makeCylinder(7.5, maximal_heigth)
hole.translate(hole_vector)
part_rotor = part_rotor.cut(hole)
# holes for the cooling
degres = [1*45, 3*45, 5*45, 7*45]
for degre in degres:
radius = math.sqrt(2*math.pow(16.25,2))
alpha=(degre*math.pi)/180
hole_vector = FreeCAD.Vector(radius*math.cos(alpha), radius*math.sin(alpha), 0)
hole = Part.makeCylinder(7.5, maximal_heigth)
hole.translate(hole_vector)
part_rotor = part_rotor.cut(hole)
# Cut the holes for the screws fixing the magnets
for i in range(12):
axe_y = FreeCAD.Vector(0, 1, 0)
axe_z = FreeCAD.Vector(0, 0, 1)
radius_screw = maximal_diameter/2 - 10
alpha = (i*2*math.pi)/12
hole_vector = FreeCAD.Vector(radius_screw*math.cos(alpha), radius_screw*math.sin(alpha), 7.5)
hole = Part.makeCylinder(2.5, maximal_diameter/2 - 10, hole_vector, axe_y)
hole.rotate(hole_vector, axe_z, alpha*(360/(2*math.pi)) - 90)
part_rotor = part_rotor.cut(hole)
Part.show(part_rotor)
DOC.recompute()
__objs__ = []
__objs__.append(FreeCAD.getDocument("part_rotor").getObject("Shape"))
stl_file = u"part_rotor.stl"
Mesh.export(__objs__, stl_file)
dxf_file = u"part_rotor.dxf"
importDXF.export(__objs__, dxf_file)
setview()
| StarcoderdataPython |
1784409 | <reponame>cds-snc/url-shortener
"""create short_urls table
Revision ID: cd33c5fd06d2
Revises: 9<PASSWORD>
Create Date: 2022-01-20 04:46:42.628093
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('short_urls',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('original_url', sa.String(), nullable=False),
sa.Column('short_url', sa.String(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('short_urls')
# ### end Alembic commands ###
| StarcoderdataPython |
4801162 | import os
import unittest
from datetime import datetime
from intuitquickbooks.auth import Oauth1SessionManager
from intuitquickbooks.client import QuickBooks
from intuitquickbooks.objects.term import Term
class TermTest(unittest.TestCase):
def setUp(self):
self.session_manager = Oauth1SessionManager(
sandbox=True,
consumer_key=os.environ.get('CONSUMER_KEY'),
consumer_secret=os.environ.get('CONSUMER_SECRET'),
access_token=os.environ.get('ACCESS_TOKEN'),
access_token_secret=os.environ.get('ACCESS_TOKEN_SECRET'),
)
self.qb_client = QuickBooks(
session_manager=self.session_manager,
sandbox=True,
company_id=os.environ.get('COMPANY_ID')
)
self.name = "Term {0}".format(datetime.now().strftime('%d%H%M'))
def test_create(self):
term = Term()
term.Name = self.name
term.DueDays = 10
term.save(qb=self.qb_client)
query_term = Term.get(term.Id, qb=self.qb_client)
self.assertEquals(query_term.Id, term.Id)
self.assertEquals(query_term.Name, self.name)
self.assertEquals(query_term.DueDays, 10)
def test_update(self):
term = Term.all(max_results=1, qb=self.qb_client)[0]
term.DueDays = 60
term.save(qb=self.qb_client)
query_term = Term.get(term.Id, qb=self.qb_client)
self.assertEquals(query_term.Id, term.Id)
self.assertEquals(query_term.DueDays, 60)
| StarcoderdataPython |
1623173 | <filename>meshpy/tools/test_stable_pose.py
"""
Regressive test for stable poses. Qualitative only.
Author: <NAME>
"""
import IPython
import numpy as np
import os
import random
import sys
from autolab_core import Point, RigidTransform
from meshpy import ObjFile, Mesh3D
from visualization import Visualizer3D as vis
if __name__ == '__main__':
mesh_name = sys.argv[1]
#np.random.seed(111)
#random.seed(111)
# read mesh
mesh = ObjFile(mesh_name).read()
mesh.vertices_ = np.load('../dex-net/data/meshes/lego_vertices.npy')
mesh.center_of_mass = np.load('../dex-net/data/meshes/lego_com.npy')
#T_obj_table = RigidTransform(rotation=[0.92275663, 0.13768089, 0.35600924, -0.05311874],
# from_frame='obj', to_frame='table')
T_obj_table = RigidTransform(rotation=[-0.1335021, 0.87671711, 0.41438141, 0.20452958],
from_frame='obj', to_frame='table')
stable_pose = mesh.resting_pose(T_obj_table)
#print stable_pose.r
table_dim = 0.3
T_obj_table_plot = mesh.get_T_surface_obj(T_obj_table)
T_obj_table_plot.translation[0] += 0.1
vis.figure()
vis.mesh(mesh, T_obj_table_plot,
color=(1,0,0), style='wireframe')
vis.points(Point(mesh.center_of_mass, 'obj'), T_obj_table_plot,
color=(1,0,1), scale=0.01)
vis.pose(T_obj_table_plot, alpha=0.1)
vis.mesh_stable_pose(mesh, stable_pose, dim=table_dim,
color=(0,1,0), style='surface')
vis.pose(stable_pose.T_obj_table, alpha=0.1)
vis.show()
exit(0)
# compute stable poses
vis.figure()
vis.mesh(mesh, color=(1,1,0), style='surface')
vis.mesh(mesh.convex_hull(), color=(1,0,0))
stable_poses = mesh.stable_poses()
vis.show()
| StarcoderdataPython |
3238027 | # -*- coding: utf-8 -*-
# @Author: chandan
# @Date: 2016-12-11 08:41:58
# @Last Modified by: chandan
# @Last Modified time: 2016-12-11 08:41:58
| StarcoderdataPython |
134247 | <reponame>theapricot/oppapp2<gh_stars>0
from models import *
connected = {}
# FUNCTION DEFINITIONS #
def allcansignup():
# returns true if property "cansignup" is true for all users
# (used for sign-ups open/closed switch on editor page)
return all([a[0] for a in Users.query.with_entities(Users.cansignup).all()])
def changesetting(usr, name, val):
# settings are stored as JSON/python dict encoded as a string in users database table
# this method allows easy setting of these settings
sets = ast.literal_eval(usr.settings)
sets[name] = val
usr.settings = str(sets)
db.session.commit()
def stream_template(template_name, **context):
app.update_template_context(context)
t = app.jinja_env.get_template(template_name)
rv = t.stream(context)
#rv.enable_buffering(5)
return rv
def update_online():
#print(connected)
sio.emit('update_online', connected)
def constructEvData(events):
data = []
#print("loading events...")
alldates = [-1]
i = 1
j = 0
for event in events:
alldates.append(event.date.day)
evdata = {"name":event.name,
"date":event.date.strftime("%B %d"),
"id":event.id,
"time":event.date.strftime("%-I:%M%p"),
"endtime":event.enddate.strftime("%-I:%M%p"),
"info":event.info,
"location":event.desc,
"mystatus":0,
"attendees":[]}
if current_user in event.users:
evdata['mystatus'] = 1
if current_user in event.usersPreferred:
evdata['mystatus'] = 2
for dude in event.users + event.usersPreferred:
evdata['attendees'].append(dude.fname + ' ' + dude.lname)
if event.date.day == alldates[i-1]:
# append to events
#print("appended to old!")
data[j-1]['events'].append(evdata)
else:
# make new weekday
#print("new weekday!")
data.append({"weekday":event.date.strftime("%a"),"date":event.date.strftime("%b %d"),"events":[evdata]})
j += 1
i += 1
return data
| StarcoderdataPython |
1638779 | # -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.colorimetry.photometry` module.
"""
import unittest
from colour.colorimetry import (SDS_ILLUMINANTS, SDS_LIGHT_SOURCES,
luminous_flux, luminous_efficiency,
luminous_efficacy, sd_zeros)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestLuminousFlux', 'TestLuminousEfficiency', 'TestLuminousEfficacy'
]
class TestLuminousFlux(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.photometry.luminous_flux` definition unit
tests methods.
"""
def test_luminous_flux(self):
"""
Tests :func:`colour.colorimetry.photometry.luminous_flux` definition.
"""
self.assertAlmostEqual(
luminous_flux(SDS_ILLUMINANTS['FL2'].copy().normalise()),
28588.73612977,
places=7)
self.assertAlmostEqual(
luminous_flux(SDS_LIGHT_SOURCES['Neodimium Incandescent']),
23807.65552737,
places=7)
self.assertAlmostEqual(
luminous_flux(SDS_LIGHT_SOURCES['F32T8/TL841 (Triphosphor)']),
13090.06759053,
places=7)
class TestLuminousEfficiency(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.photometry.luminous_efficiency`
definition unit tests methods.
"""
def test_luminous_efficiency(self):
"""
Tests :func:`colour.colorimetry.photometry.luminous_efficiency`
definition.
"""
self.assertAlmostEqual(
luminous_efficiency(SDS_ILLUMINANTS['FL2'].copy().normalise()),
0.49317624,
places=7)
self.assertAlmostEqual(
luminous_efficiency(SDS_LIGHT_SOURCES['Neodimium Incandescent']),
0.19943936,
places=7)
self.assertAlmostEqual(
luminous_efficiency(
SDS_LIGHT_SOURCES['F32T8/TL841 (Triphosphor)']),
0.51080919,
places=7)
class TestLuminousEfficacy(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.photometry.luminous_efficacy`
definition unit tests methods.
"""
def test_luminous_efficacy(self):
"""
Tests :func:`colour.colorimetry.photometry.luminous_efficacy`
definition.
"""
self.assertAlmostEqual(
luminous_efficacy(SDS_ILLUMINANTS['FL2'].copy().normalise()),
336.83937176,
places=7)
self.assertAlmostEqual(
luminous_efficacy(SDS_LIGHT_SOURCES['Neodimium Incandescent']),
136.21708032,
places=7)
self.assertAlmostEqual(
luminous_efficacy(SDS_LIGHT_SOURCES['F32T8/TL841 (Triphosphor)']),
348.88267549,
places=7)
sd = sd_zeros()
sd[555] = 1
self.assertAlmostEqual(luminous_efficacy(sd), 683.00000000, places=7)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1647110 | from django.conf.urls import url
from sic_financeiro.usuarios import views as usuario_view
urlpatterns = [
url(r'^login/$', usuario_view.do_login, name='usuario_login'),
url(r'^logout/$', usuario_view.do_logout, name='usuario_logout'),
url(r'^signup/$', usuario_view.signup, name='usuario_signup'),
url(r'^Termo_de_Uso/$', usuario_view.termo, name='usuario_termo'),
]
| StarcoderdataPython |
3276327 | import random
from typing import Callable
from turnable import Game, HookType, build_game
from turnable.chars import PlayableEntity, Entity
from turnable.state import States
from turnable.streams import CommandResponse
from turnable.helpers.text import clear_terminal
class PoisonousCharacter(PlayableEntity):
"""
Custom Character
Can poison enemies, poison stacks increasing duration. Has 10% base
dodge change.
"""
POISON_TURNS_LEFT_ATT = '_MyGame_poison_left'
POISON_HOOK_ATT = '_MyGame_poison_hook'
POISON_DURATION = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dodge_change = 0.1
def take_damage(self, damage: int):
""" Has a chance to not take damage. """
if random.random() >= self.dodge_change:
super().take_damage(damage)
def available_actions(self):
actions = []
command = self.COMMAND_CLASS
if self.game.state == States.IN_FIGHT:
actions.append(command('POI', 'Poisons enemies, they take 3 damage a turn.', 'handle_poison', self))
actions.extend(super().available_actions())
return actions
def handle_poison(self, resp: CommandResponse):
""" Poisons all enemies """
# The name of the attribute that will be injected into the enemy object
for enemy in self.game.room.enemies:
self._infect(enemy)
def _infect(self, enemy: Entity):
turns_left = getattr(enemy, self.POISON_TURNS_LEFT_ATT, self.POISON_DURATION)
hook = getattr(enemy, self.POISON_HOOK_ATT, None)
if hook:
turns_left = turns_left + self.POISON_DURATION
else:
hook = build_poison_hook(self.POISON_HOOK_ATT, self.POISON_TURNS_LEFT_ATT, enemy, 3)
self.game.add_hook(HookType.ENEMY_TURN_START, hook)
setattr(enemy, self.POISON_HOOK_ATT, hook)
setattr(enemy, self.POISON_TURNS_LEFT_ATT, turns_left)
def build_poison_hook(hook_attr: str, turns_left_attr: str, enemy: Entity, damage: int) -> Callable:
"""
This functions BUILDS the hook function that is added to the game.
We can use this builder pattern when we want to keep references to certain things. In this
case we save a reference to the *enemy* that is poisoned, how much *damage* is the poison dealing and
how many *turns* will it last.
"""
def poison_hook(game: Game, hook_type: HookType, hook_id: str):
"""
This is the actual hook function that gets added with ``game.add_hook``.
We set a counter attribute in the enemy that tells us when to stop doing damage to it.
"""
if not enemy or not enemy.is_alive():
return
turns_left = getattr(enemy, turns_left_attr)
if turns_left <= 0:
game.remove_hook(hook_type, hook_id)
setattr(enemy, hook_attr, None)
return
enemy.take_damage(damage)
setattr(enemy, turns_left_attr, turns_left - 1)
return poison_hook
def start_screen(game: Game, hook_type: HookType, hook_id: str):
""" This is a hook that starts a countdown at the start of the game. """
clear_terminal()
print(f"All set.")
print(f"Welcome, {game.player.name}, to {game.name}.")
print(f"Press any key to start")
input()
def main():
print("""Set up your game. """)
game_name = input("Choose game name:")
player_name = input("Choose player name:")
g = build_game(game_name, player_name)
g.add_hook(HookType.GAME_START, start_screen)
g.start()
if __name__ == '__main__':
main() | StarcoderdataPython |
3237983 | <reponame>Katsutoshii/adversarial-epidemics
'''
File: __init__.py
Project: src
File Created: Monday, 10th February 2020 4:26:45 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Thursday, 27th February 2020 4:34:33 pm
Modified By: <NAME> (<EMAIL>)
'''
| StarcoderdataPython |
3326335 | from boxme_user.models import AbstractBoxmeUser
class MyCustomBoxmeUser(AbstractBoxmeUser):
class Meta(AbstractBoxmeUser.Meta):
verbose_name = "MyCustomBoxmeUserVerboseName"
verbose_name_plural = "MyCustomBoxmeUserVerboseNamePlural"
| StarcoderdataPython |
1753769 | import collections
import unittest
from unittest.mock import call, MagicMock
from .. import slurm_engine
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.process_runner = MagicMock()
self.process_runner.CalledProcessError = Exception
self.engine = self.generate_engine()
def generate_engine(self, **kwargs):
default_kwargs = {'process_runner': self.process_runner}
return slurm_engine.SlurmEngine(**{**default_kwargs, **kwargs})
def mockify_engine_attrs(self, attrs=None):
for attr in attrs:
setattr(self.engine, attr, MagicMock())
class SubmitJobTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.mockify_engine_attrs(['_write_engine_entrypoint'])
self.job_spec = collections.defaultdict(MagicMock, **{
'dir': 'some_dir',
'entrypoint': 'some_entrypoint'
})
self.job = collections.defaultdict(MagicMock,
**{'job_spec': self.job_spec})
self.extra_cfgs = MagicMock()
self.process_runner.run_process.return_value = (
self.generate_successful_sbatch_proc())
def _submit_job(self):
return self.engine.submit_job(job=self.job, extra_cfgs=self.extra_cfgs)
def test_writes_engine_entrypoint(self):
self._submit_job()
self.assertEqual(self.engine._write_engine_entrypoint.call_args,
call(job=self.job, extra_cfgs=self.extra_cfgs))
def test_calls_sbatch(self):
self._submit_job()
entrypoint_path = self.engine._write_engine_entrypoint.return_value
workdir = self.job['job_spec']['dir']
expected_cmd = ['sbatch', '--workdir=%s' % workdir, entrypoint_path]
self.assertEqual(self.process_runner.run_process.call_args,
call(cmd=expected_cmd, check=True))
def generate_successful_sbatch_proc(self, job_id='12345'):
proc = MagicMock()
proc.returncode = 0
proc.stdout = 'Submitted batch job %s' % job_id
return proc
def test_returns_engine_meta_for_successful_submission(self):
job_id = '12345'
self.process_runner.run_process.return_value = (
self.generate_successful_sbatch_proc(job_id=job_id)
)
engine_meta = self._submit_job()
expected_engine_meta = {'job_id': job_id}
self.assertEqual(engine_meta, expected_engine_meta)
def test_handles_failed_submission(self):
class MockError(Exception):
stdout = 'some_stdout'
stderr = 'some_stderr'
self.process_runner.CalledProcessError = MockError
def simulate_failed_proc(cmd, *args, **kwargs):
proc = MagicMock()
proc.returncode = 1
proc.stderr = 'some error'
raise self.process_runner.CalledProcessError(
proc.returncode, cmd)
self.process_runner.run_process.side_effect = simulate_failed_proc
with self.assertRaises(self.engine.SubmissionError):
self._submit_job()
class GetKeyedStatesTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.mockify_engine_attrs(attrs=['get_slurm_jobs_by_id',
'slurm_job_to_engine_state'])
self.keyed_metas = {i: MagicMock() for i in range(3)}
self.expected_job_ids = [
engine_meta['job_id']
for engine_meta in self.keyed_metas.values()
]
self.mock_slurm_jobs_by_id = {
job_id: MagicMock() for job_id in self.expected_job_ids
}
self.engine.get_slurm_jobs_by_id.return_value = (
self.mock_slurm_jobs_by_id)
def _get(self):
return self.engine.get_keyed_states(
keyed_metas=self.keyed_metas)
def test_gets_slurm_jobs_by_id(self):
self._get()
self.assertEqual(self.engine.get_slurm_jobs_by_id.call_args,
call(job_ids=self.expected_job_ids))
def test_gets_engine_states(self):
self._get()
self.assertEqual(
self._get_sorted_slurm_job_calls(
calls=self.engine.slurm_job_to_engine_state.call_args_list),
self._get_sorted_slurm_job_calls(
calls=[call(slurm_job=slurm_job)
for slurm_job in self.mock_slurm_jobs_by_id.values()])
)
def _get_sorted_slurm_job_calls(self, calls=None):
def _key_fn(call):
if len(call) == 3:
return id(call[2]['slurm_job'])
elif len(call) == 2:
return id(call[1]['slurm_job'])
return sorted(calls, key=_key_fn)
def test_returns_keyed_states(self):
result = self._get()
expected_result = {
key: self.engine.slurm_job_to_engine_state.return_value
for key in self.keyed_metas
}
self.assertEqual(result, expected_result)
class GetSlurmJobsByIdTestCase(BaseTestCase):
pass
class GetSlurmJobsViaSacctTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.job_ids = ["job_id_%s" % i for i in range(3)]
self.mock_slurm_jobs = [MagicMock() for job_id in self.job_ids]
self.engine.parse_sacct_stdout = (
MagicMock(return_value={'records': self.mock_slurm_jobs})
)
def _get(self):
return self.engine.get_slurm_jobs_via_sacct(job_ids=self.job_ids)
def test_makes_expected_process_call(self):
self._get()
csv_job_ids = ",".join(self.job_ids)
expected_cmd = ['sacct', '--jobs=%s' % csv_job_ids, '--long',
'--noconvert', '--parsable2', '--allocations']
self.assertEqual(self.engine.process_runner.run_process.call_args,
call(cmd=expected_cmd, check=True))
def test_returns_parsed_jobs(self):
self.assertEqual(self._get(), self.mock_slurm_jobs)
class ParseSacctOutputTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.fields = ["field_%s" for i in range(5)]
self.records = [
["record_%s__%s_value" % (i, field) for field in self.fields]
for i in range(3)
]
self.sacct_stdout = "\n".join([
"|".join(self.fields),
*["|".join(record) for record in self.records]
])
def test_returns_fields_and_records(self):
result = self.engine.parse_sacct_stdout(sacct_stdout=self.sacct_stdout)
expected_result = {
'fields': self.fields,
'records': [
{field: record[i] for i, field in enumerate(self.fields)}
for record in self.records
]
}
self.assertEqual(result, expected_result)
class SlurmJobToJobStateTestCase(BaseTestCase):
def test_generates_expected_engine_state_for_non_null_slurm_job(self):
self.engine.slurm_job_to_status = MagicMock()
slurm_job = MagicMock()
result = self.engine.slurm_job_to_engine_state(slurm_job=slurm_job)
expected_result = {
'engine_job_state': slurm_job,
'status': self.engine.slurm_job_to_status.return_value
}
self.assertEqual(result, expected_result)
def test_generates_expected_engine_state_for_null_slurm_job(self):
result = self.engine.slurm_job_to_engine_state(slurm_job=None)
expected_result = {'engine_job_state': None}
self.assertEqual(result, expected_result)
class SlurmJobToStatusTestCase(BaseTestCase):
def test_handles_known_statuses(self):
slurm_jobs = {}
expected_mappings = {}
for engine_job_status, slurm_states \
in self.engine.SLURM_STATES_TO_ENGINE_JOB_STATUSES.items():
for slurm_state in slurm_states:
expected_mappings[slurm_state] = engine_job_status
slurm_jobs[slurm_state] = {'JobState': slurm_state}
actual_mappings = {
slurm_state: self.engine.slurm_job_to_status(slurm_job=slurm_job)
for slurm_state, slurm_job in slurm_jobs.items()
}
self.assertEqual(expected_mappings, actual_mappings)
def test_handles_unknown_status(self):
slurm_job = {'JobState': 'some_crazy_JobState'}
self.assertEqual(self.engine.slurm_job_to_status(slurm_job=slurm_job),
self.engine.JOB_STATUSES.UNKNOWN)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
67772 | <reponame>fakhrirofi/twit-webhook<gh_stars>1-10
from dotenv import load_dotenv
from pprint import pprint
from webhook import Event
import os
import json
from threading import Thread
from pyngrok import ngrok
from time import sleep
from twitivity import Activity
load_dotenv(".env")
http = ngrok.connect(8080, bind_tls=True)
URL = http.public_url
# function that will be called when webhook receives data
def process_data(data: json):
"""
:param data: Ref: https://developer.twitter.com/en/docs/twitter-api/enterprise/account-activity-api/guides/account-activity-data-objects
"""
# your code here...
pprint(data)
webhook = {
"name_of_the_webhook": os.environ["CONSUMER_SECRET"],
}
callback_route = "callback"
server = Event(callback_route, webhook, process_data)
app = server.get_wsgi()
Thread(target=app.run, kwargs={"port": 8080}).start()
sleep(3)
reg = Activity(
os.environ["CONSUMER_KEY"],
os.environ["CONSUMER_SECRET"],
os.environ["ACCESS_TOKEN"],
os.environ["ACCESS_TOKEN_SECRET"],
os.environ["ENV_NAME"],
)
# because we use ngrok (dynamic url) to run the webhook, we should delete all
# webhooks before registering a new webhook url
reg.delete_all()
reg.register_webhook(f"{URL}/{callback_route}/name_of_the_webhook")
reg.subscribe()
| StarcoderdataPython |
115542 | <filename>ebcli/operations/platform_version_ops.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from datetime import datetime
import os
import sys
import tempfile
from shutil import copyfile, move
import threading
import yaml
from semantic_version import Version
from termcolor import colored
from ebcli.core import io, fileoperations
from ebcli.core.ebglobals import Constants
from ebcli.lib import elasticbeanstalk, heuristics, s3
from ebcli.objects import api_filters
from ebcli.objects.exceptions import (
InvalidPlatformVersionError,
NotFoundError,
PlatformWorkspaceEmptyError,
ValidationError,
)
from ebcli.objects.platform import PlatformBranch, PlatformVersion
from ebcli.objects.sourcecontrol import SourceControl
from ebcli.operations import commonops, logsops
from ebcli.operations.tagops import tagops
from ebcli.resources.statics import namespaces, option_names
from ebcli.resources.strings import alerts, strings, prompts
from ebcli.resources.regex import PackerRegExpressions, PlatformRegExpressions
class PackerStreamMessage(object):
def __init__(self, event):
self.event = event
def raw_message(self):
event = self.event
if isinstance(event, bytes):
event = event.decode('utf-8')
matches = PackerRegExpressions.LOG_MESSAGE_REGEX.search(event)
return matches.groups(0)[0] if matches else None
def message_severity(self):
matches = PackerRegExpressions.LOG_MESSAGE_SEVERITY_REGEX.search(self.event)
return matches.groups(0)[0] if matches else None
def format(self):
ui_message = self.ui_message()
if ui_message:
return ui_message
other_packer_message = self.other_packer_message()
if other_packer_message:
if sys.version_info < (3, 0):
other_packer_message = other_packer_message.encode('utf-8')
other_packer_message_target = self.other_packer_message_target()
formatted_other_message = '{}:{}'.format(
other_packer_message_target,
other_packer_message
)
if sys.version_info < (3, 0):
formatted_other_message = formatted_other_message.decode('utf-8')
return formatted_other_message
other_message = self.other_message()
if other_message:
return other_message
def ui_message(self):
return self.__return_match(PackerRegExpressions.PACKER_UI_MESSAGE_FORMAT_REGEX)
def other_packer_message(self):
return self.__return_match(PackerRegExpressions.PACKER_OTHER_MESSAGE_DATA_REGEX)
def other_packer_message_target(self):
return self.__return_match(PackerRegExpressions.PACKER_OTHER_MESSAGE_TARGET_REGEX)
def other_message(self):
return self.__return_match(PackerRegExpressions.OTHER_FORMAT_REGEX)
def __return_match(self, regex):
raw_message = self.raw_message()
if not raw_message:
return
if isinstance(raw_message, bytes):
raw_message = raw_message.decode('utf-8')
matches = regex.search(raw_message)
return matches.groups(0)[0].strip() if matches else None
class PackerStreamFormatter(object):
def format(self, message, stream_name=None):
packer_stream_message = PackerStreamMessage(message)
if packer_stream_message.raw_message():
formatted_message = packer_stream_message.format()
else:
formatted_message = '{0} {1}'.format(stream_name, message)
return formatted_message
def create_platform_version(
version,
major_increment,
minor_increment,
patch_increment,
instance_type,
vpc=None,
staged=False,
timeout=None,
tags=None,
):
_raise_if_directory_is_empty()
_raise_if_platform_definition_file_is_missing()
version and _raise_if_version_format_is_invalid(version)
platform_name = fileoperations.get_platform_name()
instance_profile = fileoperations.get_instance_profile(None)
key_name = commonops.get_default_keyname()
version = version or _resolve_version_number(
platform_name,
major_increment,
minor_increment,
patch_increment
)
tags = tagops.get_and_validate_tags(tags)
source_control = SourceControl.get_source_control()
io.log_warning(strings['sc.unstagedchanges']) if source_control.untracked_changes_exist() else None
version_label = _resolve_version_label(source_control, staged)
bucket, key, file_path = _resolve_s3_bucket_and_key(platform_name, version_label, source_control, staged)
_upload_platform_version_to_s3_if_necessary(bucket, key, file_path)
io.log_info('Creating Platform Version ' + version_label)
response = elasticbeanstalk.create_platform_version(
platform_name, version, bucket, key, instance_profile, key_name, instance_type, tags, vpc)
environment_name = 'eb-custom-platform-builder-packer'
io.echo(colored(
strings['platformbuildercreation.info'].format(environment_name), attrs=['reverse']))
fileoperations.update_platform_version(version)
commonops.set_environment_for_current_branch(environment_name)
stream_platform_logs(response, platform_name, version, timeout)
def delete_platform_version(platform_version, force=False):
arn = version_to_arn(platform_version)
if not force:
io.echo(prompts['platformdelete.confirm'].replace('{platform-arn}', arn))
io.validate_action(prompts['platformdelete.validate'], arn)
environments = []
try:
environments = [env for env in elasticbeanstalk.get_environments() if env.platform.version == arn]
except NotFoundError:
pass
if len(environments) > 0:
_, platform_name, platform_version = PlatformVersion.arn_to_platform(arn)
raise ValidationError(strings['platformdeletevalidation.error'].format(
platform_name,
platform_version,
'\n '.join([env.name for env in environments])
))
response = elasticbeanstalk.delete_platform(arn)
request_id = response['ResponseMetadata']['RequestId']
timeout = 10
commonops.wait_for_success_events(request_id, timeout_in_minutes=timeout, platform_arn=arn)
def describe_custom_platform_version(
owner=None,
platform_arn=None,
platform_name=None,
platform_version=None,
status=None
):
if not platform_arn:
platforms = list_custom_platform_versions(
platform_name=platform_name,
platform_version=platform_version,
status=status
)
platform_arn = platforms[0]
return elasticbeanstalk.describe_platform_version(platform_arn)
def find_custom_platform_version_from_string(solution_string):
available_custom_platforms = list_custom_platform_versions()
for custom_platform_matcher in [
PlatformVersion.match_with_complete_arn,
PlatformVersion.match_with_platform_name,
]:
matched_custom_platform = custom_platform_matcher(available_custom_platforms, solution_string)
if matched_custom_platform:
return matched_custom_platform
def get_latest_custom_platform_version(platform):
"""
:param platform: A custom platform ARN or a custom platform name
:return: A PlatformVersion object representing the latest version of `platform`
"""
account_id, platform_name, platform_version = PlatformVersion.arn_to_platform(platform)
if account_id:
matching_platforms = list_custom_platform_versions(
platform_name=platform_name,
status='Ready'
)
if matching_platforms:
return PlatformVersion(matching_platforms[0])
def get_latest_eb_managed_platform(platform_arn):
account_id, platform_name, platform_version = PlatformVersion.arn_to_platform(platform_arn)
if not account_id:
matching_platforms = list_eb_managed_platform_versions(
platform_name=platform_name,
status='Ready'
)
if matching_platforms:
return PlatformVersion(matching_platforms[0])
def get_latest_platform_version(platform_name=None, owner=None, ignored_states=None):
if ignored_states is None:
ignored_states = ['Deleting', 'Failed']
platforms = get_platforms(
platform_name=platform_name,
ignored_states=ignored_states,
owner=owner,
platform_version="latest"
)
try:
return platforms[platform_name]
except KeyError:
return None
def get_platforms(platform_name=None, ignored_states=None, owner=None, platform_version=None):
platform_list = list_custom_platform_versions(
platform_name=platform_name,
platform_version=platform_version
)
platforms = dict()
for platform in platform_list:
if ignored_states and platform['PlatformStatus'] in ignored_states:
continue
_, platform_name, platform_version = PlatformVersion.arn_to_platform(platform)
platforms[platform_name] = platform_version
return platforms
def get_platform_arn(platform_name, platform_version, owner=None):
platform = describe_custom_platform_version(
platform_name=platform_name,
platform_version=platform_version,
owner=owner
)
if platform:
return platform['PlatformArn']
def get_platform_versions_for_branch(branch_name, recommended_only=False):
filters = [
{
'Type': 'PlatformBranchName',
'Operator': '=',
'Values': [branch_name],
}
]
if recommended_only:
filters.append({
'Type': 'PlatformLifecycleState',
'Operator': '=',
'Values': ['Recommended'],
})
platform_version_summaries = elasticbeanstalk.list_platform_versions(
filters=filters)
return [
PlatformVersion.from_platform_version_summary(summary)
for summary in platform_version_summaries]
def get_preferred_platform_version_for_branch(branch_name):
"""
Gets the latest recommended platform version for a platform branch. If no
platform versions are recommended it retreives the latest.
"""
matched_versions = get_platform_versions_for_branch(branch_name)
matched_versions = list(sorted(
matched_versions,
key=lambda x: x.sortable_version,
reverse=True))
recommended_versions = [
version for version in matched_versions if version.is_recommended]
if len(recommended_versions) > 0:
return recommended_versions[0]
elif len(matched_versions) > 0:
return matched_versions[0]
else:
raise NotFoundError(alerts['platform.invalidstring'].format(
branch_name))
def list_custom_platform_versions(
platform_name=None,
platform_version=None,
show_status=False,
status=None
):
filters = [api_filters.PlatformOwnerFilter(values=[Constants.OWNED_BY_SELF]).json()]
return list_platform_versions(filters, platform_name, platform_version, show_status, status)
def list_eb_managed_platform_versions(
platform_name=None,
platform_version=None,
show_status=False,
status=None
):
filters = [api_filters.PlatformOwnerFilter(values=['AWSElasticBeanstalk']).json()]
return list_platform_versions(filters, platform_name, platform_version, show_status, status)
def list_platform_versions(
filters,
platform_name=None,
platform_version=None,
show_status=False,
status=None
):
if platform_name:
filters.append(
api_filters.PlatformNameFilter(values=[platform_name]).json()
)
if platform_version:
filters.append(
api_filters.PlatformVersionFilter(values=[platform_version]).json()
)
if status:
filters.append(
api_filters.PlatformStatusFilter(values=[status]).json()
)
platforms_list = elasticbeanstalk.list_platform_versions(filters=filters)
return __formatted_platform_descriptions(platforms_list, show_status)
def stream_platform_logs(response, platform_name, version, timeout):
arn = response['PlatformSummary']['PlatformArn']
request_id = response['ResponseMetadata']['RequestId']
streamer = io.get_event_streamer()
builder_events = threading.Thread(
target=logsops.stream_platform_logs,
args=(platform_name, version, streamer, 5, None, PackerStreamFormatter()))
builder_events.daemon = True
builder_events.start()
commonops.wait_for_success_events(
request_id,
platform_arn=arn,
streamer=streamer,
timeout_in_minutes=timeout or 30
)
def version_to_arn(platform_version):
platform_name = fileoperations.get_platform_name()
arn = None
if PlatformRegExpressions.VALID_PLATFORM_VERSION_FORMAT.match(platform_version):
arn = get_platform_arn(platform_name, platform_version, owner=Constants.OWNED_BY_SELF)
elif PlatformVersion.is_valid_arn(platform_version):
arn = platform_version
elif PlatformRegExpressions.VALID_PLATFORM_SHORT_FORMAT.match(platform_version):
match = PlatformRegExpressions.VALID_PLATFORM_SHORT_FORMAT.match(platform_version)
platform_name, platform_version = match.group(1, 2)
arn = get_platform_arn(platform_name, platform_version, owner=Constants.OWNED_BY_SELF)
if not arn:
raise InvalidPlatformVersionError(strings['exit.nosuchplatformversion'])
return arn
def _create_app_version_zip_if_not_present_on_s3(
platform_name,
version_label,
source_control,
staged
):
s3_bucket, s3_key = commonops.get_app_version_s3_location(platform_name, version_label)
file_name, file_path = None, None
if s3_bucket is None and s3_key is None:
file_name, file_path = commonops._zip_up_project(version_label, source_control, staged=staged)
s3_bucket = elasticbeanstalk.get_storage_location()
s3_key = platform_name + '/' + file_name
return s3_bucket, s3_key, file_path
def _datetime_now():
return datetime.now()
def _enable_healthd():
option_settings = []
option_settings.append({
'namespace': namespaces.HEALTH_SYSTEM,
'option_name': option_names.SYSTEM_TYPE,
'value': 'enhanced'
})
option_settings.append({
'namespace': namespaces.ENVIRONMENT,
'option_name': option_names.SERVICE_ROLE,
'value': 'aws-elasticbeanstalk-service-role'
})
fileoperations.ProjectRoot.traverse()
with open('platform.yaml', 'r') as stream:
platform_yaml = yaml.safe_load(stream)
try:
platform_options = platform_yaml['option_settings']
except KeyError:
platform_options = []
options_to_inject = []
for option in option_settings:
found_option = False
for platform_option in platform_options:
if option['namespace'] == (
platform_option['namespace']
and option['option_name'] == platform_option['option_name']
):
found_option = True
break
if not found_option:
options_to_inject.append(option)
platform_options.extend(options_to_inject)
platform_yaml['option_settings'] = list(platform_options)
with open('platform.yaml', 'w') as stream:
stream.write(yaml.dump(platform_yaml, default_flow_style=False))
def _generate_platform_yaml_copy():
file_descriptor, original_platform_yaml = tempfile.mkstemp()
os.close(file_descriptor)
copyfile('platform.yaml', original_platform_yaml)
return original_platform_yaml
def _raise_if_directory_is_empty():
cwd = os.getcwd()
fileoperations.ProjectRoot.traverse()
try:
if heuristics.directory_is_empty():
raise PlatformWorkspaceEmptyError(strings['exit.platformworkspaceempty'])
finally:
os.chdir(cwd)
def _raise_if_platform_definition_file_is_missing():
if not heuristics.has_platform_definition_file():
raise PlatformWorkspaceEmptyError(strings['exit.no_pdf_file'])
def _raise_if_version_format_is_invalid(version):
if not PlatformRegExpressions.VALID_PLATFORM_VERSION_FORMAT.match(version):
raise InvalidPlatformVersionError(strings['exit.invalidversion'])
def _resolve_s3_bucket_and_key(
platform_name,
version_label,
source_control,
staged
):
platform_yaml_copy = _generate_platform_yaml_copy()
try:
_enable_healthd()
s3_bucket, s3_key, file_path = _create_app_version_zip_if_not_present_on_s3(
platform_name,
version_label,
source_control,
staged
)
finally:
move(platform_yaml_copy, 'platform.yaml')
return s3_bucket, s3_key, file_path
def _resolve_version_label(source_control, staged):
version_label = source_control.get_version_label()
if staged:
timestamp = _datetime_now().strftime("%y%m%d_%H%M%S")
version_label = version_label + '-stage-' + timestamp
return version_label
def _resolve_version_number(
platform_name,
major_increment,
minor_increment,
patch_increment
):
version = get_latest_platform_version(
platform_name=platform_name,
owner=Constants.OWNED_BY_SELF,
ignored_states=[]
)
if version is None:
version = '1.0.0'
else:
major, minor, patch = version.split('.', 3)
if major_increment:
major = str(int(major) + 1)
minor = '0'
patch = '0'
if minor_increment:
minor = str(int(minor) + 1)
patch = '0'
if patch_increment or not(major_increment or minor_increment):
patch = str(int(patch) + 1)
version = "%s.%s.%s" % (major, minor, patch)
return version
def __formatted_platform_descriptions(platforms_list, show_status):
platform_tuples = []
for platform in platforms_list:
platform_tuples.append(
{
'PlatformArn': platform['PlatformArn'],
'PlatformStatus': platform['PlatformStatus']
}
)
platform_tuples.sort(
key=lambda platform_tuple: (
PlatformVersion.get_platform_name(platform_tuple['PlatformArn']),
Version(PlatformVersion.get_platform_version(platform_tuple['PlatformArn']))
),
reverse=True
)
formatted_platform_descriptions = []
for index, platform_tuple in enumerate(platform_tuples):
if show_status:
formatted_platform_description = '{platform_arn} Status: {platform_status}'.format(
platform_arn=platform_tuple['PlatformArn'],
platform_status=platform_tuple['PlatformStatus']
)
else:
formatted_platform_description = platform_tuple['PlatformArn']
formatted_platform_descriptions.append(formatted_platform_description)
return formatted_platform_descriptions
def _upload_platform_version_to_s3_if_necessary(bucket, key, file_path):
try:
s3.get_object_info(bucket, key)
io.log_info('S3 Object already exists. Skipping upload.')
except NotFoundError:
io.log_info('Uploading archive to s3 location: ' + key)
s3.upload_platform_version(bucket, key, file_path)
fileoperations.delete_app_versions()
| StarcoderdataPython |
30097 | # Generated by Django 3.2.5 on 2021-07-26 18:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myhood', '0009_business_created_on'),
]
operations = [
migrations.RemoveField(
model_name='neighborhood',
name='occupants_count',
),
]
| StarcoderdataPython |
1793544 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import tensorflow as tf
tf.enable_eager_execution()
def get_type_id(tgt_len, tgt_idx, type_val):
tgt_idx_left_shift = tgt_idx[:-1]
type_val_right_shift = type_val[1:]
new_type_id_shift = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx_left_shift[:, None],
updates=type_val_right_shift
)
new_type_id_shift = tf.concat([type_val[:1], new_type_id_shift], axis=0)
new_type_id_shift = tf.math.cumsum(new_type_id_shift, exclusive=True)[1:]
new_type_id = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx[:, None],
updates=type_val
)
new_type_id = tf.math.cumsum(new_type_id, exclusive=True)
new_type_id = new_type_id_shift - new_type_id
return new_type_id
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
seq_len = 16
inputs = tf.range(1, seq_len + 1, 1, dtype=tf.int32)
type_id = tf.range(1, seq_len + 1, 1, dtype=tf.int32)
del_ratio = 0.1
add_ratio = 0.1
rep_ratio = 0.2
rep_label = -1
add_label = -2
del_label = -3
del_rand = tf.random.uniform(shape=[seq_len], minval=0, maxval=1)
del_mask = del_rand < del_ratio
non_del_mask = tf.logical_not(del_mask)
right_shift_del_mask = tf.concat(
[tf.constant(False, shape=[1]), del_mask[:-1]], axis=0)
non_add_mask = tf.logical_or(del_mask, right_shift_del_mask)
add_rand = tf.random.uniform(shape=[seq_len], minval=0, maxval=1)
add_num = tf.reduce_sum(tf.cast(add_rand < add_ratio, tf.int32))
add_uniform = tf.random.uniform(shape=[add_num, seq_len], minval=0, maxval=1)
add_uniform -= 1e5 * tf.cast(non_add_mask, tf.float32)
add_idx = tf.argmax(add_uniform, axis=1)
add_cnt = tf.reduce_sum(tf.one_hot(add_idx, seq_len, dtype=tf.int32), 0)
rep_rand = tf.random.uniform(shape=[seq_len], minval=0, maxval=1)
rep_mask = tf.logical_and(tf.equal(add_cnt, 0), tf.logical_not(non_add_mask))
rep_mask = tf.logical_and(
rep_rand < (rep_ratio / (1 - 2 * del_ratio - add_ratio)), rep_mask)
rep_input = tf.where(
rep_mask,
tf.constant(rep_label, shape=[seq_len]),
inputs)
tgt_len_encoder = tgt_len_decoder = seq_len
print("rep", tf.cast(rep_mask, tf.int32).numpy().tolist())
print("add", add_cnt.numpy().tolist())
print("del", tf.cast(del_mask, tf.int32).numpy().tolist())
ori_idx = tf.range(seq_len)
#### encoder input
shift_val = add_cnt - tf.cast(del_mask, tf.int32)
shift_val = tf.cumsum(shift_val)
shift_idx = ori_idx + shift_val
tgt_len = tgt_len_encoder
valid_tgt = shift_idx < tgt_len
# remove deleted token
tgt_idx = tf.boolean_mask(shift_idx, tf.logical_and(non_del_mask, valid_tgt))
tgt_val = tf.boolean_mask(rep_input, tf.logical_and(non_del_mask, valid_tgt))
type_val = tf.boolean_mask(type_id, tf.logical_and(non_del_mask, valid_tgt))
max_len = tf.math.reduce_max(tgt_idx) + 1
enc_type_id = get_type_id(tgt_len, tgt_idx, type_val)
enc_seq = tf.scatter_nd(
shape=[tgt_len],
indices=tf.range(0, max_len)[:, None],
updates=tf.zeros(shape=[max_len], dtype=tf.int32) + add_label
)
enc_seq = tf.tensor_scatter_nd_update(
enc_seq,
indices=tgt_idx[:, None],
updates=tgt_val)
print("encoder input")
print(enc_seq.numpy().tolist())
print(enc_type_id.numpy().tolist())
#### decoder
shift_val = tf.cumsum(add_cnt)
shift_idx = ori_idx + shift_val
tgt_len = tgt_len_decoder
valid_tgt = shift_idx < tgt_len
tgt_idx = tf.boolean_mask(shift_idx, valid_tgt)
tgt_val = tf.boolean_mask(inputs, valid_tgt)
type_val = tf.boolean_mask(type_id, valid_tgt)
max_len = tf.math.reduce_max(tgt_idx) + 1
pad_id = 100
eos_id = 101
add_id = 102
dec_type_id = get_type_id(tgt_len, tgt_idx, type_val)
dec_seq = tf.concat(
[tf.zeros(shape=[max_len], dtype=tf.int32) + add_id,
tf.zeros(shape=[tgt_len - max_len], dtype=tf.int32) + pad_id], 0)
dec_seq = tf.tensor_scatter_nd_update(
dec_seq,
indices=tgt_idx[:, None],
updates=tgt_val)
# decoder input
dec_inp = tf.concat([tf.constant(eos_id, shape=[1]), dec_seq[:-1]], 0)
# edit type label
dec_add_mask = tf.equal(dec_seq, add_id)
dec_rep_mask = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx[:, None],
updates=tf.boolean_mask(rep_mask, valid_tgt)
)
dec_del_mask = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx[:, None],
updates=tf.boolean_mask(del_mask, valid_tgt)
)
edit_label = tf.cast(dec_add_mask, tf.int32) * add_label
edit_label += tf.cast(dec_rep_mask, tf.int32) * rep_label
edit_label += tf.cast(dec_del_mask, tf.int32) * del_label
print("decoder")
print("inputs", dec_inp.numpy().tolist())
print("target", dec_seq.numpy().tolist())
print("labels", edit_label.numpy().tolist())
print("type_id", dec_type_id.numpy().tolist())
if __name__ == "__main__":
app.run(main)
| StarcoderdataPython |
1688744 | <reponame>foxsi/foxsi-smex<filename>pyfoxsi/scripts/convolve.py
from pyfoxsi.psf import convolve
from sunpy.map import Map
import matplotlib.pyplot as plt
f = '/Users/schriste/Google Drive/Work/FOXSI SMEX/Data/hsi_image_20050513_164526to164626_pixon_3arcsecx64_25to50kev_d1to9.fits'
hsi = Map(f)
foxsi_map = convolve(hsi)
plt.figure()
plt.subplot(1, 2, 1)
hsi.plot()
plt.subplot(1, 2, 2)
foxsi_map.plot()
plt.show()
| StarcoderdataPython |
3336391 | import sys
from rimp.installer import install_repl, update_cache, already_installed
from rimp.fetcher import collect_files
def load_repl(name: str, project: str, force_reinstall: bool = False, verbose: bool = True):
if sys.platform == "win32":
if ".rimp/Lib/site-packages" not in sys.path:
sys.path.append(".rimp/Lib/site-packages")
else:
py = "python" + sys.version[:3]
comp_dir = ".rimp/lib/" + py + "/site-packages"
if comp_dir not in sys.path:
sys.path.append(comp_dir)
if not force_reinstall and already_installed(name, project):
return
repl_total = {}
for file_path, contents in collect_files(name, project):
repl_total[file_path] = contents
install_repl(repl_total, verbose)
update_cache(name, project)
| StarcoderdataPython |
1692433 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2021, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import month_diff, getdate
from frappe.model.document import Document
from one_fm.one_fm.doctype.indemnity_allocation.indemnity_allocation import get_total_indemnity
from dateutil.relativedelta import relativedelta
from datetime import date
class EmployeeIndemnity(Document):
pass
@frappe.whitelist()
def get_indemnity_for_employee(employee, exit_status, doj, exit_date):
"""This function calculates the indemnity of an employee with respect to his exit status and exit date.
Args:
employee (str): Employee ID
exit_status (str): "Resignation", "Termination" or "End of Service"
doj (str): Employee's Date of Joining
exit_date (str): Employee's date of exit.
Returns:
_type_: list of employee's Indemnity Allocation,
Indemnity Policy: According to Exit Status),
Indemnity Pecent: According to years of experience
Total Indemnity allowed: Indemnity Allocated untill date of exit.
"""
#get employee's Indemnity Allocation
allocation = frappe.get_doc('Indemnity Allocation', {'employee': employee, 'expired': ['!=', 1]})
if allocation:
#get Indemnity Policy for the given exit status
policy = frappe.get_doc('Indemnity Policy', {'exit_status': exit_status})
if policy:
#Employee's experience/working years and Indemnity allocation
total_working_year = relativedelta(date.today(), getdate(doj)).years
#Indemnity allocation up until exit date
total_indemnity_allocated = get_total_indemnity(getdate(doj), getdate(exit_date))
indemnity_percentage = 0
allocation_policy = sorted(policy.indemnity_policy_details, key=lambda x: x.threshold_year)
#get the percent got the employee's years of experience
for alloc_policy in allocation_policy:
if alloc_policy.threshold_year <= total_working_year:
indemnity_percentage = alloc_policy.indemnity_percentage
#calculate the allocation by applying the percent
total_indemnity_allowed = total_indemnity_allocated * (indemnity_percentage/100)
return {'allocation': allocation.name, 'policy': policy.name,
'indemnity_percentage': indemnity_percentage, 'total_indemnity_allowed': total_indemnity_allowed}
return False
@frappe.whitelist()
def get_salary_for_employee(employee):
#get employee's indemnity Amount from his Salary Structure Assignment
if employee:
return frappe.get_value("Salary Structure Assignment",{"employee":employee},["indemnity_amount"]) | StarcoderdataPython |
1715899 | import sqlite3
import logging
import os
class DBEngine(object):
def __init__(self, connectstring):
self.connectstring = connectstring
self.connection = None
def open(self):
if not self.connection:
try:
self.connection = sqlite3.connect(self.connectstring)
except:
return False
return True
def close(self):
if self.connection:
self.connection.close()
self.connection = None
def _destruct(self):
self.close()
if self.connectionstring == ":memory:":
return
os.remove(self.connectstring)
def engine(self):
return "sqlite"
class LogDB(object):
def __init__(self, dbengine):
self.dbengine = dbengine
self.version = "1.0"
self.commit_count = 0
self.commit_limit = 1000
def _getCursor(self):
if hasattr(self, '_cursor'):
return getattr(self, '_cursor')
setattr(self, '_cursor', self.dbengine.connection.cursor())
return getattr(self, '_cursor')
def close(self):
self.dbengine.connection.commit()
self.dbengine.connection.close()
self.dbengine.connection = None
def _commit(self):
self.commit_count += 1
if self.commit_count >= self.commit_limit:
self.dbengine.connection.commit()
self.commit_count = 0
def _exists(self, tablename):
cursor = self._getCursor()
cursor.execute("select count(type) from sqlite_master where tbl_name=?;", (tablename,))
result = int(cursor.fetchone()[0])
return True if result else False
def _create_properties(self):
cursor = self._getCursor()
cursor.execute("create table if not exists properties (label TEXT UNIQUE,value TEXT);")
cursor.execute("INSERT INTO properties (label,value) VALUES (?,?)", ("VERSION", self.version))
self._commit()
def _checkVersion(self):
cursor = self._getCursor()
cursor.execute("select value from properties where label=?", ("VERSION",))
result = cursor.fetchone()
if len(result) != 1:
logging.critical("Database corruption. Cannot determine version number.")
return False
if int(float(result[0])) != int(float(self.version)):
logging.info("Database version mismatch. Database version=" + result[0] + ", API version=" + self.version)
return False
if float(result[0]) < float(self.version):
logging.info(
"Database version mismatch. Database version " + result[
0] + " less than API version " + self.version)
return False
return True
def instantiate(self):
if not self._exists("properties"):
self._create_properties()
else:
if not self._checkVersion():
return False
cursor = self._getCursor()
cursor.execute(
"create table if not exists connlog (sourceip not null, destip not null, destport INTEGER not null, "
" numconnections INTEGER, firstconnectdate, PRIMARY KEY(sourceip,destip,destport))"
)
cursor.execute(
"create table if not exists connerr (sourceip not null, destip not null, destport INTEGER not null, "
" numconnections INTEGER, firstconnectdate, PRIMARY KEY(sourceip,destip,destport))"
)
cursor.execute(
"create table if not exists smtplog (source not null, destination not null, numconnections integer, "
"firstconnectdate, PRIMARY KEY(source,destination))"
)
cursor.execute(
"create table if not exists httplog (host not null, numconnections integer, firstconnectdate, "
"PRIMARY KEY(host))"
)
self._commit()
return True
def destruct(self):
self.dbengine._destruct()
def add_conn_record(self, data):
cursor = self._getCursor()
if data["conn_state"] == "SF":
cursor.execute(
"insert or ignore into connlog (sourceip,destip,destport,numconnections,firstconnectdate) "
"values (?,?,?,0,?)",
(data["id.orig_h"], data["id.resp_h"], data["id.resp_p"], data["ts"])
)
cursor.execute(
"update connlog set numconnections=numconnections+1 where "
"sourceip=? and destip=? and destport=?",
(data["id.orig_h"], data["id.resp_h"], data["id.resp_p"])
)
else:
cursor.execute(
"insert or ignore into connlog (sourceip,destip,destport,numconnections,firstconnectdate) "
"values (?,?,?,0,?)",
(data["id.orig_h"], data["id.resp_h"], data["id.resp_p"], data["ts"])
)
cursor.execute(
"update connlog set numconnections=numconnections+1 where "
"sourceip=? and destip=? and destport=?",
(data["id.orig_h"], data["id.resp_h"], data["id.resp_p"])
)
self._commit()
return
def add_smtp_record(self, data):
cursor = self._getCursor()
cursor.execute("select count(*) from smtplog where source=? and destination=?",
(data["source"], data["destination"]))
result = int(cursor.fetchone()[0])
if result == 0:
cursor.execute(
"insert into smtplog (source,destination,numconnections,firstconnectdate) "
"values (?,?,1,?)",
(data["mailfrom"], data["rcptto"], data["ts"])
)
else:
cursor.execute(
"update smtplog set numconnections=numconnections+1 where source=? and destination=?",
(data["mailfrom"], data["rcptto"])
)
self._commit()
return
def add_http_record(self, data):
try:
cursor = self._getCursor()
cursor.execute(
"insert or ignore into httplog (host,numconnections,firstconnectdate) "
"values (?,?,?)",
(data["host"], 1, data["ts"])
)
cursor.execute(
"update httplog set numconnections=numconnections+1 where "
"host=?",
(data["host"], )
)
self._commit()
#cursor.close()
except:
logging.error("MYSQLDB: Error processing: " + repr(data))
return
| StarcoderdataPython |
3367215 | import csv
import requests
import os
csv.field_size_limit(2147483647)
possible_ingredients = set()
BASE_URL = "https://world.openfoodfacts.org"
INPUT_FILE = 'en.openfoodfacts.org.products.csv'
OUTPUT_FILE = 'raw_ingredients.txt'
COUNTRIES = ['United Kingdom',
'United States',
'United-states-of-america',
'European Union',
'Canada']
""" Open a data stream to download the massive data file"""
def get_data_stream(url):
return requests.get(BASE_URL + url, stream=True)
"""Extract the ingredients from the CSV file and save it to disk"""
def get_source_data():
print(" >> Downloading data...")
off_data = get_data_stream("/data/en.openfoodfacts.org.products.csv")
f = open(INPUT_FILE, 'wb')
# chunk_size = 100MB chunks
for data in off_data.iter_content(chunk_size=10240 * 10240):
f.write(data)
f.flush()
f.close()
print(" >> done.\n")
print(" >> Processing data...")
if not os.path.exists(INPUT_FILE) or not os.path.isfile(INPUT_FILE):
# Raise an exception here instead
return False
"""Read CSV file"""
with open(INPUT_FILE, 'rt', encoding="utf-8") as csvfile:
"""Iterate through the rows in the CSV file"""
filereader = csv.DictReader(
csvfile, delimiter='\t', quoting=csv.QUOTE_NONE)
for product in filereader:
"""get the list of ingredients from the product row"""
ingredients_text = product['ingredients_text']
country = product['countries_en']
"""Only save ingredients for the countries specified"""
if ingredients_text is not None and country in COUNTRIES:
possible_ingredients.add(ingredients_text)
"""Save the data to disk"""
with open(OUTPUT_FILE, 'wt', encoding="utf-8") as outputfile:
for ingredient in possible_ingredients:
outputfile.write(ingredient + "\n")
"""Delete the CSV file"""
os.remove(INPUT_FILE)
print(" >> Writing to", OUTPUT_FILE, "\n")
return True
# print or return stats? # [(# of ingredients, # of products, # words removed), etc...]
if __name__ == '__main__':
get_source_data()
| StarcoderdataPython |
1690214 | a = int(input())
b = int(input())
if a * b > 0: print(3 if a < 0 else 1)
else: print(4 if a > 0 else 2)
| StarcoderdataPython |
1705011 | <filename>checking/utils/test_checking.py<gh_stars>0
import os
import shutil
import tempfile
import unittest
from checking.utils import resolve_config, strip_config
class TestChecking(unittest.TestCase):
def setUp(self):
self.home = tempfile.mkdtemp()
self.no_file = os.path.join(self.home, 'no_file_here')
self.script_storage = os.path.join(self.home, 'script_storage')
with open(self.script_storage, 'w') as config:
config.write('# comment 1\nSTORAGE=holographic')
self.script_empty = os.path.join(self.home, 'empty')
with open(self.script_empty, 'w') as config:
pass
self.script_error = os.path.join(self.home, 'error')
with open(self.script_error, 'w') as config:
config.write('#!/bin/bash\n()=Bozo')
self.script_all_comments = os.path.join(self.home, 'all_comments')
with open(self.script_all_comments, 'w') as config:
config.write('#!/bin/bash\n# line 1\n # line 2\n')
self.script_comments = os.path.join(self.home, 'comments')
with open(self.script_comments, 'w') as config:
config.write('#!/bin/bash\nline 1\nline 2\n')
def tearDown(self):
shutil.rmtree(self.home)
def test_resolve_storage(self):
config = resolve_config(self.script_storage)
self.assertEqual(config['STORAGE'], 'holographic')
def test_resolve_no_file(self):
config = resolve_config(self.no_file)
self.assertDictEqual(config, {})
def test_resolve_empty(self):
config = resolve_config(self.script_empty)
self.assertDictEqual(config, {})
def test_resolve_error(self):
config = resolve_config(self.script_error)
self.assertDictEqual(config, {})
def test_strip_storage(self):
config = strip_config(self.script_storage)
self.assertListEqual(config, ['STORAGE=holographic'])
def test_strip_no_file(self):
config = strip_config(self.no_file)
self.assertListEqual(config, [])
def test_strip_empty(self):
config = strip_config(self.script_empty)
self.assertListEqual(config, [])
def test_strip_error(self):
config = strip_config(self.script_error)
self.assertListEqual(config, ['()=Bozo'])
| StarcoderdataPython |
1693151 | """Retrieves text posts from reddit using the reddit API, and saves them in a csv format.
Authors: <NAME>, <NAME>, <NAME>, <NAME>
"""
import datetime
import argparse
import requests
import pathlib
import csv
TODAY = datetime.datetime.utcnow()
ONE_MONTH_DELTA = datetime.timedelta(days=30)
MINUS_ONE_MONTH = TODAY - ONE_MONTH_DELTA
def submissionFilter(submission):
"""Filters the given submission to make sure it has text in its body.
Params:
- submission (dict): The submission to be filtered
Returns:
- contains_text (bool): True if the submission contains text, false otherwise
"""
if (not submission['is_self'] or 'selftext' not in submission.keys() or not submission['selftext']
or submission['selftext'] == '[deleted]' or submission['selftext'] == '[removed]'):
return False
return True
# End of submissionFilter()
def api_request(subreddit: str, from_date: int, until_date: int) -> list:
"""Uses the reddit API to get the results of a single API request.
Params:
- reddit (praw.Reddit): The Reddit instance to use to run the quest
- subreddit (str): The subreddit on which to run the request
- from_date (int): UTC timestamp for the date from which to start looking for submissions
- until_date (int): UTC timestamp for the date at which to stop looking for submissions
Returns:
- submissions (list<dict>): The list of submissions found
"""
print("from: {} to: {}".format(
datetime.datetime.fromtimestamp(from_date), datetime.datetime.fromtimestamp(until_date)))
response = requests.get(
"https://api.pushshift.io/reddit/search/submission/?subreddit={}&after={}&before={}&size=500&sort=asc".format(
subreddit, from_date, until_date-1
))
response = response.json()
submissions = response['data']
submissions = filter(submissionFilter, submissions)
submissions = list(submissions)
return submissions
# End of api_request()
def get_submissions(subreddit: str, from_date: datetime.datetime, until_date: datetime.datetime) -> list:
"""Uses the reddit API to find submissions between two dates.
Params:
- from_date (int): UTC timestamp for the date from which to start looking for submissions
- until_date (int): UTC timestamp for the date at which to stop looking for submissions
"""
returned_submissions = api_request(subreddit, from_date, until_date)
num_submissions = 0
original_from_date = datetime.datetime.fromtimestamp(from_date)
while returned_submissions:
num_submissions += len(returned_submissions)
latest_submission_timestamp = returned_submissions[-1]['created_utc']
print('Got {} submissions from {} until {}'.format(num_submissions,
original_from_date,
datetime.datetime.fromtimestamp(latest_submission_timestamp)))
submissions_to_csv(subreddit, returned_submissions)
returned_submissions = api_request(subreddit, latest_submission_timestamp, until_date)
print("Got {:d} submissions!".format(num_submissions))
# End of get_submissions()
def submissions_to_csv(subreddit: str, submissions: list):
"""Saves the submissions as a csv file in data/<subreddit>/submissions.csv.
Params:
- subreddit (str): The subreddit for which the submissions were obtained
- submissions (list<praw.Submission>): The submissions returned from the reddit API
"""
directory_path = pathlib.Path("./data/{}".format(subreddit))
directory_path.mkdir(parents=True, exist_ok=True)
file_path = directory_path / 'submissions.csv'
new_file = True
if file_path.is_file(): # pylint: disable=E1101
new_file = False
with file_path.open('a', encoding="utf-8") as csv_file: # pylint: disable=E1101
csv_writer = csv.writer(csv_file)
if new_file: # Write headings
csv_writer.writerow(
['title', 'score', 'num_comments', 'over_18', 'created_utc', 'selftext'])
for submission in submissions:
csv_writer.writerow([submission['title'], submission['score'],
submission['num_comments'], submission['over_18'],
datetime.datetime.fromtimestamp(submission['created_utc']),
submission['selftext'].replace('\n', "\\n")])
# End of submissions_to_csv()
def date_type(date_arg: str) -> datetime.datetime:
"""Returns the date associated with a given string of the format YYYY-MM-DD, where the month and day are optional.
Params:
- date_arg (str): The date argument passed in by the user
Returns:
- date (datetime.datetime): The datetime object made from the argument
"""
try:
date = datetime.datetime.strptime(date_arg, "%Y-%m-%d")
except ValueError:
try:
date = datetime.datetime.strptime(date_arg, "%Y-%m")
except ValueError:
date = datetime.datetime.strptime(date_arg, "%Y")
return date
# End of date_type()
def parse_arguments() -> argparse.Namespace:
"""Parses the given command-line arguments.
Returns:
- args (argparse.Namespace): Namespace containing parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--subreddit', type=str, help='The subreddit from which to get data',
default='askreddit')
parser.add_argument('-f', '--from_date', type=date_type,
help='The date from which to start getting data (format = YYYY-MM-DD)',
default=datetime.datetime(2018, 2, 1))
parser.add_argument('-u', '--until_date', type=date_type,
help='The date until which to get data (format = YYYY-MM-DD)',
default=datetime.datetime(2018, 3, 1))
args = parser.parse_args()
print(args)
return args
# End of parse_arguments()
def to_utc(date: datetime.datetime) -> int:
"""Converts the date to a utc timestamp so it plays nice with the pushshift API.
Params:
- date (datetime.datetime): The date in local time
Returns:
- utc_timestamp (int): The utc timestamp
"""
timestamp = date.timestamp()
utc_date = datetime.datetime.utcfromtimestamp(timestamp)
utc_timestamp = utc_date.timestamp()
utc_timestamp = int(utc_timestamp)
return utc_timestamp
# End of to_utc()
if __name__ == '__main__':
args = parse_arguments()
utc_from_date = to_utc(args.from_date)
utc_until_date = to_utc(args.until_date)
get_submissions(args.subreddit, utc_from_date, utc_until_date)
| StarcoderdataPython |
1211 | def nearest_mid(input_list, lower_bound_index, upper_bound_index, search_value):
return lower_bound_index + (
(upper_bound_index - lower_bound_index)
// (input_list[upper_bound_index] - input_list[lower_bound_index])
) * (search_value - input_list[lower_bound_index])
def interpolation_search(ordered_list, term):
size_of_list = len(ordered_list) - 1
index_of_first_element = 0
index_of_last_element = size_of_list
while index_of_first_element <= index_of_last_element:
mid_point = nearest_mid(
ordered_list, index_of_first_element, index_of_last_element, term
)
if mid_point > index_of_last_element or mid_point < index_of_first_element:
return None
if ordered_list[mid_point] == term:
return mid_point
if term > ordered_list[mid_point]:
index_of_first_element = mid_point + 1
else:
index_of_last_element = mid_point - 1
store = [2, 4, 5, 12, 43, 54, 60, 77]
a = interpolation_search(store, 2)
print("Index position of value 2 is ", a)
| StarcoderdataPython |
3238880 | from ridewithgps import APIClient
class RideWithGPS(APIClient):
BASE_URL= 'https://ridewithgps.com/' | StarcoderdataPython |
3291041 | # -*- coding: utf-8 -*-
from collections import defaultdict
class Annotation(object):
"""This class represents an annotation."""
def __init__(self, id, representation, spans, labels=()):
"""
Create an annotation object.
:param id: (string) The id of the current annotation.
:param representation: (string) The string representation of the
annotation. Doesn't take into account the fact that annotations may be
discontinous.
:param spans: (list of list of ints) A list of list of ints
representing the starting and ending points, in characters, for any
words in the annotation.
:param labels: (list of strings) a list of initial labels for the
annotation object. These never get an initial value.
:return: None
"""
self.id = id
# Returns a new dictionary-like object.
self.links = defaultdict(list)
self.labels = defaultdict(list)
for label in labels:
self.labels[label] = []
self.repr = representation
self.spans = spans
self.realspan = (spans[0][0], spans[-1][1])
self.words = []
def __repr__(self):
"""Representation of the annotation."""
return "Annotation: {0}".format(self.repr.encode("utf-8"))
| StarcoderdataPython |
1776950 | <reponame>triplejberger/Queue
import datetime
def get_readable_time_from_ingame_timestamp(ingame_time):
return str(datetime.timedelta(milliseconds=ingame_time)) | StarcoderdataPython |
1688585 | <reponame>computationalmodelling/fidimag<gh_stars>10-100
from fidimag.extensions.common_clib import normalise
from fidimag.extensions.common_clib import init_scalar
from fidimag.extensions.common_clib import init_vector
from fidimag.extensions.common_clib import init_vector_func_fast
import fidimag.extensions.clib as clib
import numpy as np
def extract_data(mesh, npys, pos, comp='x'):
"""
extract data of special positions for given npy data
npys:
the names of npys
pos:
something like [(1,0,0),...,(2,3,4)]
"""
ids = []
for p in pos:
ids.append(mesh.index(p[0], p[1], p[2]))
ids = np.array(ids)
if comp == 'x':
cmpi = 0
elif comp == 'y':
cmpi = 1
elif comp == 'z':
cmpi = 2
else:
raise Exception('Seems given component is wrong!!!')
ids += cmpi * mesh.n
all_data = []
for ny in npys:
all_data.append(np.load(ny)[ids])
return np.array(all_data)
def compute_RxRy(mesh, spin, nx_start=0, nx_stop=-1, ny_start=0, ny_stop=-1):
res = clib.compute_RxRy(spin, mesh.nx, mesh.ny,
mesh.nz, nx_start, nx_stop, ny_start, ny_stop)
return res
| StarcoderdataPython |
3280989 | <gh_stars>0
# https://www.acmicpc.net/problem/2753
a = input()
a = int(a)
if (a % 4 == 0):
if (a % 100 == 0):
if (a % 400 == 0):
print("1")
else:
print("0")
else:
print("1")
else:
print("0")
| StarcoderdataPython |
48753 | <gh_stars>1-10
import logging
from dataclasses import dataclass
from telliot_core.dtypes.value_type import ValueType
from telliot_core.queries.abi_query import AbiQuery
logger = logging.getLogger(__name__)
@dataclass
class Snapshot(AbiQuery):
"""Returns the result for a given option ID (a specific proposal) on Snapshot.
An array of values representing the amount of votes (uints) for each vote option should be returned
Attributes:
proposal_id:
Specifies the requested data a of a valid proposal on Snapshot.
see https://docs.snapshot.org/graphql-api for reference
"""
proposal_id: str
#: ABI used for encoding/decoding parameters
abi = [{"name": "proposal_id", "type": "string"}]
@property
def value_type(self) -> ValueType:
"""Data type returned for a Snapshot query.
- `uint256[]`: variable-length array of 256-bit values with 18 decimals of precision
- `packed`: false
"""
return ValueType(abi_type="uint256[]", packed=False)
| StarcoderdataPython |
4800841 | <reponame>sages-pl/2022-01-pythonsqlalchemy-aptiv<gh_stars>0
"""
* Assignment: OOP Relations HasPosition
* Complexity: medium
* Lines of code: 18 lines
* Time: 8 min
English:
1. Define class `Point`
2. Class `Point` has attributes `x: int = 0` and `y: int = 0`
3. Define class `HasPosition`
4. In `HasPosition` define method `get_position(self) -> Point`
5. In `HasPosition` define method `set_position(self, x: int, y: int) -> None`
6. In `HasPosition` define method `change_position(self, left: int = 0, right: int = 0, up: int = 0, down: int = 0) -> None`
7. Assume left-top screen corner as a initial coordinates position:
a. going right add to `x`
b. going left subtract from `x`
c. going up subtract from `y`
d. going down add to `y`
8. Run doctests - all must succeed
Polish:
1. Zdefiniuj klasę `Point`
2. Klasa `Point` ma atrybuty `x: int = 0` oraz `y: int = 0`
3. Zdefiniuj klasę `HasPosition`
4. W `HasPosition` zdefiniuj metodę `get_position(self) -> Point`
5. W `HasPosition` zdefiniuj metodę `set_position(self, x: int, y: int) -> None`
6. W `HasPosition` zdefiniuj metodę `change_position(self, left: int = 0, right: int = 0, up: int = 0, down: int = 0) -> None`
7. Przyjmij górny lewy róg ekranu za punkt początkowy:
a. idąc w prawo dodajesz `x`
b. idąc w lewo odejmujesz `x`
c. idąc w górę odejmujesz `y`
d. idąc w dół dodajesz `y`
8. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> from inspect import isclass, ismethod
>>> assert isclass(Point)
>>> assert isclass(HasPosition)
>>> assert hasattr(Point, 'x')
>>> assert hasattr(Point, 'y')
>>> assert hasattr(HasPosition, 'get_position')
>>> assert hasattr(HasPosition, 'set_position')
>>> assert hasattr(HasPosition, 'change_position')
>>> assert ismethod(HasPosition().get_position)
>>> assert ismethod(HasPosition().set_position)
>>> assert ismethod(HasPosition().change_position)
>>> class Astronaut(HasPosition):
... pass
>>> astro = Astronaut()
>>> astro.set_position(x=1, y=2)
>>> astro.get_position()
Point(x=1, y=2)
>>> astro.set_position(x=1, y=1)
>>> astro.change_position(right=1)
>>> astro.get_position()
Point(x=2, y=1)
>>> astro.set_position(x=1, y=1)
>>> astro.change_position(left=1)
>>> astro.get_position()
Point(x=0, y=1)
>>> astro.set_position(x=1, y=1)
>>> astro.change_position(down=1)
>>> astro.get_position()
Point(x=1, y=2)
>>> astro.set_position(x=1, y=1)
>>> astro.change_position(up=1)
>>> astro.get_position()
Point(x=1, y=0)
"""
from dataclasses import dataclass
| StarcoderdataPython |
1769651 | import string
from kivy.app import App
from task_widgets.task_base.intro_hint import IntroHint
from utils import import_kv
from .find_in_table_dynamic import FindInTableDynamic
import_kv(__file__)
class IntroHintFindLetterDynamic(IntroHint):
pass
class FindLetterDynamic(FindInTableDynamic):
SIZE = 5
TASK_KEY = "find_letter_dynamic"
INTRO_HINT_CLASS = IntroHintFindLetterDynamic
def generate_alphabet(self):
lang = App.get_running_app().lang
return string.ascii_uppercase if lang != 'ru' else [unichr(i) for i in range(1040, 1072)]
| StarcoderdataPython |
164424 | from socketio.namespace import BaseNamespace
from socketio.mixins import RoomsMixin, BroadcastMixin
from socketio.sdjango import namespace
from collections import defaultdict
import redis
from gevent import Greenlet
def home_redis_worker():
r = redis.StrictRedis(host='localhost', port=6379, db=0)
tacosub = r.pubsub()
tacosub.subscribe('tacos')
for item in tacosub.listen():
if item['type'] == "message":
for socket in HomeNamespace.sockets:
socket.emit('taco', item['data'])
home_greenlet = Greenlet.spawn(home_redis_worker)
@namespace('/home')
class HomeNamespace(BaseNamespace):
sockets = set([])
def initialize(self):
HomeNamespace.sockets.add(self)
def recv_disconnect(self):
HomeNamespace.sockets.discard(self)
self.disconnect(silent=True)
###########################
def user_redis_worker():
r = redis.StrictRedis(host='localhost', port=6379, db=0)
usersub = r.pubsub()
usersub.psubscribe('user_*')
for item in usersub.listen():
if item['type'] == "pmessage":
user = item['channel'][5:]
for socket in UserNamespace.sockets[user]:
socket.emit('taco', item['data'])
user_greenlet = Greenlet.spawn(user_redis_worker)
@namespace('/user')
class UserNamespace(BaseNamespace):
sockets = defaultdict(set)
def on_set_user(self, username):
self.username = username
UserNamespace.sockets[username].add(self)
def recv_disconnect(self):
UserNamespace.sockets[self.username].discard(self)
self.disconnect(silent=True) | StarcoderdataPython |
3380336 | # -*- coding: utf-8 -*-
# @Author: gunjianpan
# @Date: 2019-03-26 10:21:05
# @Last Modified by: gunjianpan
# @Last Modified time: 2019-08-10 16:56:44
import aiohttp
import asyncio
import codecs
import json
import logging
import os
import re
import shutil
import struct
import sys
import time
from collections import namedtuple
from configparser import ConfigParser
from enum import IntEnum
from ssl import _create_unverified_context
sys.path.append(os.getcwd())
from proxy.getproxy import GetFreeProxy
from util.util import basic_req, can_retry, echo, mkdir, time_str
logger = logging.getLogger(__name__)
proxy_req = GetFreeProxy().proxy_req
data_dir = 'bilibili/data/'
websocket_dir = '%swebsocket/' % data_dir
assign_path = 'bilibili/assign_up.ini'
one_day = 86400
"""
* bilibili @websocket
* www.bilibili.com/video/av{av_id}
* wss://broadcast.chat.bilibili.com:7823/sub
"""
class Operation(IntEnum):
SEND_HEARTBEAT = 2
ONLINE = 3
COMMAND = 5
AUTH = 7
RECV = 8
NESTED = 9
DANMAKU = 1000
class BWebsocketClient:
''' bilibili websocket client '''
ROOM_INIT_URL = 'https://www.bilibili.com/video/av%d'
WEBSOCKET_URL = 'wss://broadcast.chat.bilibili.com:7823/sub'
HEARTBEAT_BODY = '[object Object]'
HEADER_STRUCT = struct.Struct('>I2H2IH')
HeaderTuple = namedtuple(
'HeaderTuple', ('total_len', 'header_len', 'proto_ver', 'operation', 'time', 'zero'))
_COMMAND_HANDLERS = {
'DM': lambda client, command: client._on_get_danmaku(command['info'][1], command['info'][0])
}
def __init__(self, av_id: int, types=0, p: int = -1):
''' init class '''
self._av_id = av_id
self._room_id = None
self._count = 1
self._types = types
self._begin_time = int(time.time())
self._loop = asyncio.get_event_loop()
self._session = aiohttp.ClientSession(loop=self._loop)
self._is_running = False
self._websocket = None
self._p = p if p > 0 else 1
self._getroom_id()
async def close(self):
await self._session.close()
def run(self):
''' Create Thread '''
if self._is_running:
raise RuntimeError('This client is already running')
self._is_running = True
return asyncio.ensure_future(self._message_loop(), loop=self._loop)
def _getroom_id(self, proxy: bool = True):
''' get av room id '''
url = self.ROOM_INIT_URL % self._av_id
text = proxy_req(url, 3) if proxy else basic_req(url, 3)
pages = re.findall('"pages":\[(.*?)\],', text)
if not len(pages):
if can_retry(url, 5):
self._getroom_id(proxy=proxy)
cid = re.findall('"cid":(.*?),', pages[0])
assert len(cid) >= self._p, 'Actual Page len: {} <=> Need Pages Num: {}'.format(
len(cid), self._p)
self._room_id = int(cid[self._p - 1])
echo(3, 'Room_id:', self._room_id)
def parse_struct(self, data: dict, operation: int):
''' parse struct '''
assert int(time.time()) < self._begin_time + \
7 * one_day, 'Excess Max RunTime!!!'
if operation == 7:
body = json.dumps(data).replace(" ", '').encode('utf-8')
else:
body = self.HEARTBEAT_BODY.encode('utf-8')
header = self.HEADER_STRUCT.pack(
self.HEADER_STRUCT.size + len(body),
self.HEADER_STRUCT.size,
1,
operation,
self._count,
0
)
self._count += 1
return header + body
async def _send_auth(self):
''' send auth '''
auth_params = {
'room_id': 'video://%d/%d' % (self._av_id, self._room_id),
"platform": "web",
"accepts": [1000]
}
await self._websocket.send_bytes(self.parse_struct(auth_params, Operation.AUTH))
async def _message_loop(self):
''' loop sent message '''
if self._room_id is None:
self._getroom_id()
while True:
heartbeat_con = None
try:
async with self._session.ws_connect(self.WEBSOCKET_URL) as websocket:
self._websocket = websocket
await self._send_auth()
heartbeat_con = asyncio.ensure_future(
self._heartbeat_loop(), loop=self._loop)
async for message in websocket:
if message.type == aiohttp.WSMsgType.BINARY:
await self._handle_message(message.data, 0)
else:
logger.warning(
'Unknown Message type = %s %s', message.type, message.data)
except asyncio.CancelledError:
break
except aiohttp.ClientConnectorError:
logger.warning('Retrying */*/*/*/---')
try:
await asyncio.sleep(5)
except asyncio.CancelledError:
break
finally:
if heartbeat_con is not None:
heartbeat_con.cancel()
try:
await heartbeat_con
except asyncio.CancelledError:
break
self._websocket = None
self._is_running = False
async def _heartbeat_loop(self):
''' heart beat every 30s '''
if self._types and int(time.time()) > self._begin_time + one_day:
self.close()
for _ in range(int(one_day * 7 / 30)):
try:
await self._websocket.send_bytes(self.parse_struct({}, Operation.SEND_HEARTBEAT))
await asyncio.sleep(30)
except (asyncio.CancelledError, aiohttp.ClientConnectorError):
break
async def _handle_message(self, message: str, offset: int = 0):
''' handle message'''
while offset < len(message):
try:
header = self.HeaderTuple(
*self.HEADER_STRUCT.unpack_from(message, offset))
body = message[offset +
self.HEADER_STRUCT.size: offset + header.total_len]
if header.operation == Operation.ONLINE or header.operation == Operation.COMMAND:
body = json.loads(body.decode('utf-8'))
if header.operation == Operation.ONLINE:
await self._on_get_online(body)
else:
await self._handle_command(body)
elif header.operation == Operation.RECV:
print('Connect Build!!!')
elif header.operation == Operation.NESTED:
offset += self.HEADER_STRUCT.size
continue
elif header.operation == Operation.DANMAKU:
body = json.loads(body.decode('utf-8'))
print(body)
print('>>>>DANMAKU tail socket>>>>')
else:
logger.warning('Unknown operation = %d %s %s',
header.operation, header, body)
offset += header.total_len
except:
pass
async def _handle_command(self, command):
if isinstance(command, list):
for one_command in command:
await self._handle_command(one_command)
return
cmd = command['cmd']
if cmd in self._COMMAND_HANDLERS:
handler = self._COMMAND_HANDLERS[cmd]
if handler is not None:
await handler(self, command)
else:
logger.warning('Unknown Command = %s %s', cmd, command)
async def _on_get_online(self, online):
''' get online num '''
pass
async def _on_get_danmaku(self, content, user_name):
''' get danmaku '''
pass
class OneBWebsocketClient(BWebsocketClient):
''' get one bilibili websocket client '''
async def _on_get_online(self, online):
online = online['data']['room']['online']
with codecs.open(self.get_path('online'), 'a', encoding='utf-8') as f:
f.write(self.get_data([online]))
print('Online:', online)
async def _on_get_danmaku(self, content, user_name):
with codecs.open(self.get_path('danmaku'), 'a', encoding='utf-8') as f:
f.write(self.get_data([content, user_name]))
print(content, user_name)
def get_data(self, origin_data: list) -> str:
''' get data '''
return ','.join(str(ii) for ii in [time_str(), *origin_data]) + '\n'
def get_path(self, types: str) -> str:
''' get path '''
p_path = '_p%d' % self._p if self._p != -1 else ''
return '%s%d_%s%s.csv' % (websocket_dir, self._av_id, types, p_path)
async def async_main(av_id: int, types: int, p: int):
client = OneBWebsocketClient(av_id, types, p=p)
future = client.run()
try:
await future
finally:
await client.close()
def BSocket(av_id: int, types: int = 0, p: int = -1):
''' build a loop websocket connect'''
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(async_main(av_id, types, p))
finally:
loop.close()
if __name__ == '__main__':
mkdir(data_dir)
mkdir(websocket_dir)
if not os.path.exists(assign_path):
shutil.copy(assign_path + '.tmp', assign_path)
''' Test for San Diego demon '''
''' PS: the thread of BSocket have to be currentThread in its processing. '''
if len(sys.argv) == 3:
av_id = int(sys.argv[1])
p = int(sys.argv[2])
else:
cfg = ConfigParser()
cfg.read(assign_path, 'utf-8')
av_id = cfg.getint('basic', 'basic_av_id')
p = cfg.getint('basic', 'basic_av_p') if len(
cfg['basic']['basic_av_p']) else -1
BSocket(av_id, p=p)
| StarcoderdataPython |
3227735 | <gh_stars>10-100
# class PersonMentionInlineBlock:
# pass
#
#
# class PageMentionInlineBlock:
# pass
#
#
# class DateInlineBlock:
# pass
#
#
# class EmojiInlineBlock:
# pass
#
#
# class EquationInlineBlock:
# pass
#
#
## TODO: smelly? why here? why so much code?
# class NotionDateBlock:
#
# start = None
# end = None
# timezone = None
# reminder = None
#
# def __init__(self, start, end=None, timezone=None, reminder=None):
# self.start = start
# self.end = end
# self.timezone = timezone
# self.reminder = reminder
#
# @classmethod
# def from_notion(cls, obj):
# if isinstance(obj, dict):
# data = obj
# elif isinstance(obj, list):
# data = obj[0][1][0][1]
# else:
# return None
# start = cls._parse_datetime(data.get("start_date"), data.get("start_time"))
# end = cls._parse_datetime(data.get("end_date"), data.get("end_time"))
# timezone = data.get("timezone")
# reminder = data.get("reminder")
# return cls(start, end=end, timezone=timezone, reminder=reminder)
#
# @classmethod
# def _parse_datetime(cls, date_str, time_str):
# if not date_str:
# return None
# if time_str:
# return datetime.strptime(date_str + " " + time_str, "%Y-%m-%d %H:%M")
# else:
# return datetime.strptime(date_str, "%Y-%m-%d").date()
#
# def _format_datetime(self, date_or_datetime):
# if not date_or_datetime:
# return None, None
# if isinstance(date_or_datetime, datetime):
# return (
# date_or_datetime.strftime("%Y-%m-%d"),
# date_or_datetime.strftime("%H:%M"),
# )
# else:
# return date_or_datetime.strftime("%Y-%m-%d"), None
#
# def type(self):
# name = "date"
# if isinstance(self.start, datetime):
# name += "time"
# if self.end:
# name += "range"
# return name
#
# def to_notion(self):
#
# if self.end:
# self.start, self.end = sorted([self.start, self.end])
#
# start_date, start_time = self._format_datetime(self.start)
# end_date, end_time = self._format_datetime(self.end)
#
# if not start_date:
# return []
#
# data = {"type": self.type(), "start_date": start_date}
#
# if end_date:
# data["end_date"] = end_date
#
# if "time" in data["type"]:
# data["time_zone"] = str(self.timezone or get_localzone())
# data["start_time"] = start_time or "00:00"
# if end_date:
# data["end_time"] = end_time or "00:00"
#
# if self.reminder:
# data["reminder"] = self.reminder
#
# return [["‣", [["d", data]]]]
#
| StarcoderdataPython |
3310394 | import cherrypy
import json
class Freeboard(object):
exposed=True
def GET(self,*uri,**params):
return open("./freeboard/index.html","r").read()
class Save(object):
exposed=True
def POST(self,*uri,**params):
data=json.loads(params['json_string'])
with open("./freeboard/dashboard/dashboard.json", "w") as f:
json.dump(dash, f) # Write json to file
if __name__ == '__main__':
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.staticdir.root':"C:/Users/Edoardo/Documents/PycharmProjects/Programming_for_IoT_applications/lab2/freeboard"
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "css"
},
'/dashboard': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "dashboard"
},
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "js"
},
'/img': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "img"
},
'/plugins': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "plugins"
},
}
cherrypy.tree.mount(Freeboard(), '/', conf)
cherrypy.tree.mount(Save(), '/save', conf)
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
cherrypy.config.update({'server.socket_port': 8080})
cherrypy.engine.start()
cherrypy.engine.block()
| StarcoderdataPython |
3282754 | <reponame>yogesh-kamble/mozillians
from django.db.models import signals
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.models import User
from multidb.pinning import use_master
from mozillians.users.models import ExternalAccount, UserProfile, Vouch
from mozillians.users.tasks import subscribe_user_to_basket, unsubscribe_from_basket_task
# Signal to create a UserProfile.
@receiver(signals.post_save, sender=User, dispatch_uid='create_user_profile_sig')
def create_user_profile(sender, instance, created, raw, **kwargs):
if not raw:
up, created = UserProfile.objects.get_or_create(user=instance)
if not created:
signals.post_save.send(sender=UserProfile, instance=up, created=created, raw=raw)
# Signal to remove the User object when a profile is deleted
@receiver(signals.post_delete, sender=UserProfile, dispatch_uid='delete_user_obj_sig')
def delete_user_obj_sig(sender, instance, **kwargs):
if instance.user:
instance.user.delete()
# Basket User signals
@receiver(signals.post_save, sender=UserProfile, dispatch_uid='update_basket_sig')
def update_basket(sender, instance, **kwargs):
newsletters = [settings.BASKET_VOUCHED_NEWSLETTER]
if instance.is_vouched:
subscribe_user_to_basket.delay(instance.id, newsletters)
else:
unsubscribe_from_basket_task.delay(instance.email, newsletters)
@receiver(signals.pre_delete, sender=UserProfile, dispatch_uid='unsubscribe_from_basket_sig')
def unsubscribe_from_basket(sender, instance, **kwargs):
newsletters = [settings.BASKET_VOUCHED_NEWSLETTER, settings.BASKET_NDA_NEWSLETTER]
unsubscribe_from_basket_task.delay(instance.email, newsletters)
# Signals related to vouching.
@receiver(signals.post_delete, sender=Vouch, dispatch_uid='update_vouch_flags_delete_sig')
@receiver(signals.post_save, sender=Vouch, dispatch_uid='update_vouch_flags_save_sig')
def update_vouch_flags(sender, instance, **kwargs):
if kwargs.get('raw'):
return
try:
profile = instance.vouchee
except UserProfile.DoesNotExist:
# In this case we delete not only the vouches but the
# UserProfile as well. Do nothing.
return
with use_master:
vouches_qs = Vouch.objects.filter(vouchee=profile)
vouches = vouches_qs.count()
profile.is_vouched = vouches > 0
profile.can_vouch = vouches >= settings.CAN_VOUCH_THRESHOLD
profile.save(**{'autovouch': False})
@receiver(signals.post_save, sender=ExternalAccount, dispatch_uid='add_employee_vouch_sig')
def add_employee_vouch(sender, instance, **kwargs):
"""Add a vouch if an alternate email address is a mozilla* address."""
if kwargs.get('raw') or not instance.type == ExternalAccount.TYPE_EMAIL:
return
instance.user.auto_vouch()
| StarcoderdataPython |
1757250 | <reponame>firefalcon73/Color_Detection
import cv2
import pandas as pd
import numpy as np
img = cv2.imread("colorpic.jpg")
imgWidth = img.shape[1] - 40
index = ['color', 'color_name', 'hex', 'R', 'G', 'B']
df = pd.read_csv("colors.csv", header=None, names=index)
r = g = b = xpos = ypos = 0
def getRGBvalue(event, x, y, flags, param):
global b, g, r, xpos, ypos, clicked
xpos = x
ypos = y
b, g, r = img[y, x]
b = int(b)
g = int(g)
r = int(r)
def colorname(B, G, R):
minimum = 10000
for i in range(len(df)):
d = abs(B - int(df.loc[i, "B"])) + abs(G - int(df.loc[i, "G"])) + abs(R - int(df.loc[i, "R"]))
if (d <= minimum):
minimum = d
cname = df.loc[i, "color_name"] + "Hex" + df.loc[i, "hex"]
return cname
cv2.namedWindow("Image")
cv2.setMouseCallback("Image", getRGBvalue)
while True:
cv2.imshow("Image", img)
cv2.rectangle(img, (20, 20), (imgWidth, 60), (b, g, r), -1)
text = colorname(b, g, r) + ' R=' + str(r) + ' G=' + str(g) + ' B=' + str(b)
cv2.putText(img, text, (50, 50), 2, 0.8, (255, 255, 255), 2, cv2.LINE_AA)
if (r + g + b >= 600):
cv2.putText(img, text, (50, 50), 2, 0.8, (0, 0, 0), 2, cv2.LINE_AA)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
| StarcoderdataPython |
95755 | import os
# data processing
def get_basic_config(feature_type, label_type, SPLITS, HOLDOUT):
config = {}
config['feature_type'] = feature_type #input_type to feature_type
config['label_type'] = label_type
config['SPLITS'] = SPLITS
config['HOLDOUT'] = HOLDOUT
config['DATA_PATH'] = '.' + os.sep + 'data'
config['FEATURE_PATH'] = '.' + os.sep + 'features'
config['FEATURE_PATH_COMPARE'] = config['FEATURE_PATH'] + os.sep + 'compare/'
config['FEATURE_PATH_LLD'] = config['FEATURE_PATH'] + os.sep + 'lld' + os.sep
config['FEATURE_PATH_Spectro'] = config['FEATURE_PATH'] + os.sep + 'spectrogram' + os.sep
config['FEATURE_PATH_BoAW'] = config['FEATURE_PATH'] + os.sep + 'boaw' + os.sep
config['FEATURE_PATH_eGemaps'] = config['FEATURE_PATH'] + os.sep + 'egemaps' + os.sep
config['FEATURE_PATH_PKLS'] = config['FEATURE_PATH'] + os.sep + 'pkls' + os.sep + config['feature_type'] + os.sep
config['FEATURE_PATH_DS'] = config['FEATURE_PATH'] + os.sep + 'deepspectrum' + os.sep
config['GENDER'] = ['women', 'men']
# Modify openSMILE paths here:
config['SMILEexe'] = '~/opensmile-2.3.0/bin/linux_x64_standalone_static/SMILExtract'
config['SMILEconf'] = '~/opensmile-2.3.0/config/ComParE_2016.conf'
config['egemapsconf'] = '~/opensmile-2.3.0/config/gemaps/eGeMAPSv01a.conf'
config['openXBOW'] = './tools/openXBOW.jar'
#Deep Spectrum configuration
config['ds'] = 'deepspectrum features'
config['dsconf'] = ' -nm 256 -nl -en vgg16 -fl fc2 -m mel '
if config['feature_type'] == 'mfcc':
config = get_mfcc_config(config)
if config['feature_type'] == 'lld':
config = get_lld_config(config)
if 'boaw' in config['feature_type']:
# modify BoAW settings here
config = get_lld_config(config)
config = get_bow_config(config)
if config['feature_type'] == 'raw':
config = get_raw_audio_config(config)
return config
def get_mfcc_config(config):
# modify MFCC settings here
config['num_components'] = 40 # number of MFCCs to return
config['num_timesteps_mfcc'] = 44 # frames
config['mfcc_max_pad_len'] = config['num_timesteps_mfcc']
config['num_channels'] = 1
return config
def get_lld_config(config):
# modify LDDs settings here
config['num_timesteps_lld'] = 100 # frames
config['lld_max_pad_len'] = config['num_timesteps_lld']
return config
def get_lld_config(config):
# modify LDDs settings here
config['num_timesteps_lld'] = 224 # frames
config['lld_max_pad_len'] = config['num_timesteps_lld']
return config
def get_bow_config(config):
# modify LDDs settings here
config['csize'] = int(config['feature_type'].split('_')[-1]) # frames
if config['csize'] not in [125, 250, 500, 1000, 2000]:
print("csize not in [125, 250, 500, 1000, 2000] ")
exit()
#config['lld_max_pad_len'] = config['num_timesteps_lld']
return config
def get_raw_audio_config(config):
config['frame_rate'] = 40 # 40 ms 1 sec
config['audio_length'] = 1000
config['sample_rate'] = 16000
return config
# experiments
# overall config
def get_basic_evaluation(model_type, experiment_name, feature_type):
config = {}
config['feature_type'] = feature_type #input_type to feature_type
config['model_type'] = model_type
config['experiment_name'] = experiment_name #input_type to feature_type
config['FEATURE_PATH'] = '.' + os.sep + 'features'
config['FEATURE_PATH_PKLS'] = config['FEATURE_PATH'] + os.sep + 'pkls' + os.sep + config['feature_type'] + os.sep
config['EXPERIMENT_PATH'] = '.' + os.sep + 'experiments' + os.sep + experiment_name + os.sep
config['PLOT_PATH'] = config['EXPERIMENT_PATH'] + 'plots' + os.sep
config['MODEL_PATH'] = config['EXPERIMENT_PATH'] + 'model' + os.sep
config['RESULT_PATH'] = config['EXPERIMENT_PATH'] + 'results' + os.sep
config['num_labels'] = 2
return config
def get_lstm_config(model_type, experiment_name, feature_type):
config = get_basic_evaluation(model_type, experiment_name, feature_type)
config = get_mfcc_config(config)
if config['feature_type'] == 'mfcc':
config['lstm1_n'] = 40
config['lstm2_n'] = 40//2
config['parameter_list'] = [{
'num_epochs': 200,
'num_batch_size': 32,
'learning_rate' : 0.0001
},
]
elif config['feature_type'] == 'lld':
config['lstm1_n'] = 50
config['lstm2_n'] = 30
config['parameter_list'] = [{
'num_epochs': 200,
'num_batch_size': 32,
'learning_rate' : 0.0001
},
]
elif config['feature_type'] == 'raw':
config['lstm1_n'] = 50
config['lstm2_n'] = 30
config['parameter_list'] = [{
'num_epochs': 200,
'num_batch_size': 32,
'learning_rate' : 0.0001
},
]
else:
config['lstm1_n'] = 50
config['lstm2_n'] = 30
config['parameter_list'] = [{
'num_epochs': 200,
'num_batch_size': 32,
'learning_rate' : 0.0001
},
]
# experiment parameter identified as best working parameters:
return config
# cnn specific config
def get_cnn_config(model_type, experiment_name, feature_type):
config = get_basic_evaluation(model_type, experiment_name, feature_type)
config = get_mfcc_config(config)
# experiment parameter identified as best working parameters:
config['parameter_list'] = [{
'num_epochs': 100,
'num_batch_size': 10,
'learning_rate' : 0.0001
},
{
'num_epochs': 100,
'num_batch_size': 64,
'learning_rate' : 0.00001
}]
return config
# cnn specific config
def get_crnn_config(model_type, experiment_name, feature_type):
config = get_basic_evaluation(model_type, experiment_name, feature_type)
config = get_mfcc_config(config)
if config['feature_type'] == 'mfcc':
config['Conv1_filters'] = 10
config['Conv1_kernel_size'] = 6
config['Conv2_filters'] = 20
config['Conv2_kernel_size'] = 8
config['Conv3_filters'] = 40
config['Conv3_kernel_size'] = 10
config['lstm1_n'] = 40
config['lstm2_n'] = 40//2
config['parameter_list'] = get_parameter_optimisation()
# [{
# 'num_epochs': 500,
# 'num_batch_size': 32,
# 'learning_rate' : 0.005
# },]
elif config['feature_type'] == 'lld':
config['Conv1_filters'] = 30
config['Conv1_kernel_size'] = 10
config['Conv2_filters'] = 30
config['Conv2_kernel_size'] = 8
config['Conv3_filters'] = 40
config['Conv3_kernel_size'] = 10
config['lstm1_n'] = 50
config['lstm2_n'] = 30
config['parameter_list'] = get_parameter_optimisation()
# [{
# 'num_epochs': 500,
# 'num_batch_size': 32,
# 'learning_rate' : 0.0001
# },]
if config['feature_type'] == 'raw':
config['Conv1_filters'] = 40
config['Conv1_kernel_size'] = 2
config['Conv2_filters'] = 50
config['Conv2_kernel_size'] = 4
config['Conv3_filters'] = 0
config['Conv3_kernel_size'] = 8
config['lstm1_n'] = 50
config['lstm2_n'] = 40//2
config['parameter_list'] = get_parameter_optimisation()
# [{
# 'num_epochs': 500,
# 'num_batch_size': 50,
# 'learning_rate' : 0.0001
# },]
else:
config['Conv1_filters'] = 10
config['Conv1_kernel_size'] = 6
config['Conv2_filters'] = 20
config['Conv2_kernel_size'] = 8
config['Conv3_filters'] = 40
config['Conv3_kernel_size'] = 10
config['lstm1_n'] = 40
config['lstm2_n'] = 40//2
config['parameter_list'] = get_parameter_optimisation()
# [{
# 'num_epochs': 500,
# 'num_batch_size': 32,
# 'learning_rate' : 0.005
# },]
# experiment parameter identified as best working parameters:
return config
def get_parameter_optimisation():
# test [{'num_epochs': 1,
# 'num_batch_size': 16,
# 'learning_rate' : 0.01}]
return [
{'num_epochs': 3000,
'num_batch_size': 64,
'learning_rate' : 1e-05}]
[{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.0001},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 5e-05},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 1e-05},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 0.0001}]
[{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.01},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.01},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.01},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.0001},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.0001},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 0.0001},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 5e-05},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 5e-05},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 5e-05},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 1e-05},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 1e-05},
{'num_epochs': 500,
'num_batch_size': 16,
'learning_rate' : 1e-05},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 0.01},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 0.01},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 0.01},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 0.0001},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 0.0001},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 0.0001},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 5e-05},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 5e-05},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 5e-05},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 1e-05},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 1e-05},
{'num_epochs': 500,
'num_batch_size': 32,
'learning_rate' : 1e-05},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 0.01},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 0.01},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 0.01},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 0.001},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 0.0001},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 0.0001},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 0.0001},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 5e-05},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 5e-05},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 5e-05},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 1e-05},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 1e-05},
{'num_epochs': 500,
'num_batch_size': 64,
'learning_rate' : 1e-05},]
# svm specific config
def get_svm_config(model_type, experiment_name, feature_type):
config = get_basic_evaluation(model_type, experiment_name, feature_type)
config['svm_seq_agg'] = 'middle' # mean or middle
# experiment parameter identified as best working parameters:
config['parameter_list'] = [
{
'C': 1e-6,
'max_iter': 10000
},
{
'C': 1e-5,
'max_iter': 10000
},
{
'C': 1e-4,
'max_iter': 10000
},
{
'C': 1e-3,
'max_iter': 10000
},
{
'C': 1e-2,
'max_iter': 10000
},
{
'C': 1e-1,
'max_iter': 10000
},
{
'C': 1e0,
'max_iter': 10000
},
{
'C': 10,
'max_iter': 10000
}
]
return config
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.