id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
9672359 | <gh_stars>1-10
import keras
import tensorflow as tf
from keras.applications.vgg19 import VGG19
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input
from keras.models import Model, Sequential, load_model
from keras.optimizers import Adam
from keras.layers import Dense
from keras import backend as K
import sys
import numpy as np
import pickle
import time
from get_train_test_data import preprocDataset
# To use the GPU
config = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1, \
allow_soft_placement=True, device_count = {'CPU': 1})
session = tf.Session(config=config)
K.set_session(session)
# Wrapper function for deprecated sigmoid_cross_entropy_with_logits usage
def wrap(f):
def wrapper(*arg, **kw):
kw = {'logits': arg[0], 'labels':arg[1]}
arg=()
ret = f(*arg, **kw)
return ret
return wrapper
tf.nn.sigmoid_cross_entropy_with_logits = wrap(tf.nn.sigmoid_cross_entropy_with_logits)
# Get filename of image to classify
filename = sys.argv[1]
# Preprocess the image
preprocd_image = preprocDataset([filename], '', 0)[0][0]
# Number of features extracted from block4_pool layer
EXTRACTED_FEATURE_SHAPE = (1, 32, 32, 512)
# Define pre-trained VGG19 model
pt_model = VGG19(weights='imagenet', include_top=False, input_shape=(512, 512, 3))
model = Model(input=pt_model.input, output=pt_model.get_layer('block4_pool').output)
# Extract block5_pool features for the input image
b5p_features = model.predict(preprocd_image)
b5p_features = b5p_features.reshape(EXTRACTED_FEATURE_SHAPE)
# Load our saved model (best conv model)
saved_model = load_model('v650-b120-conv-bestmodel.h5')
# Get prediction from saved model
pred = saved_model.predict(b5p_features)
# Classify
if pred >= 0.5:
output = 1
else:
output = 0
print 'Output:', output
| StarcoderdataPython |
3230461 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from StringIO import StringIO
except ImportError:
# Python 3
from io import BytesIO as StringIO
from mstranslator import AccessToken, AccessError, Translator, ArgumentOutOfRangeException
import requests
SUBSCRIPTION_KEY = os.environ['TEST_MSTRANSLATOR_SUBSCRIPTION_KEY']
class TranslatorMock(Translator):
"""
A Translator mock that returns url on `.make_request()` method call without
making a real HTTP request.
"""
def make_request(self, action, params=None):
prepped = requests.Request('GET',
url=self.make_url(action),
params=params).prepare()
return prepped.url
class AccessTokenTestCase(unittest.TestCase):
def test_access(self):
at = AccessToken(SUBSCRIPTION_KEY)
assert at.token
def test_access_denied(self):
at = AccessToken("AN_INVALID_SUBSCRIPTION_KEY")
self.assertRaises(AccessError, at.request_token)
class TranslatorTestCase(unittest.TestCase):
def setUp(self):
self.translator = Translator(SUBSCRIPTION_KEY)
self.translator_mock = TranslatorMock(SUBSCRIPTION_KEY)
def test_translate(self):
t = self.translator.translate('world', 'en', 'ru')
self.assertEqual('мир', t)
def test_translate_exception(self):
self.assertRaises(ArgumentOutOfRangeException, self.translator.translate, 'world', 'en', 'asdf')
def test_translate_array(self):
ts = self.translator.translate_array(['hello', 'world'], 'en', 'ru')
translations = [t['TranslatedText'] for t in ts]
self.assertEqual(['Привет', 'мир'], translations)
def test_translate_array2(self):
ts = self.translator.translate_array2(['hello', 'world', 'Hello. How are you?'], 'en', 'ru')
translations = [t['TranslatedText'] for t in ts]
self.assertEqual(['Привет', 'мир', 'Привет. Как ваши дела?'], translations)
alignments = [t['Alignment'] for t in ts]
self.assertEqual(['0:4-0:5', '0:4-0:2', '0:5-0:6 7:18-8:21'], alignments)
def test_get_translations(self):
t = self.translator.get_translations('world', 'en', 'ru')
self.assertIsInstance(t, dict)
self.assertIn('Translations', t)
def test_break_sentences(self):
t = self.translator.break_sentences('Hello. How are you?', 'en')
self.assertEqual(['Hello. ', 'How are you?'], t)
def test_add_translation(self):
url = self.translator_mock.add_translation('orig', 'trans', 'en', 'ru', user='test')
self.assertIn('originalText=orig', url)
self.assertIn('translatedText=trans', url)
def test_get_langs(self):
langs = self.translator.get_langs()
self.assertIsInstance(langs, list)
self.assertIn('en', langs)
def test_get_lang_names(self):
lang_names = self.translator.get_lang_names(['ru', 'en'], 'en')
self.assertEqual(['Russian', 'English'], lang_names)
def test_get_speackable_langs(self):
langs = self.translator.get_langs(speakable=True)
self.assertIsInstance(langs, list)
self.assertIn('en-us', langs)
def test_detect_lang(self):
self.assertEqual('en', self.translator.detect_lang('Hello'))
def test_detect_langs(self):
self.assertEqual(['en', 'ru'], self.translator.detect_langs(['Hello', 'Привет']))
def test_speak(self):
self.assertIsNotNone(self.translator.speak('Hello', 'en'))
def test_speak_to_file(self):
s = StringIO()
self.translator.speak_to_file(s, 'Hello', 'en')
s.seek(0)
self.assertTrue(len(s.read()) > 0)
| StarcoderdataPython |
1667859 | # Generated by Django 2.1.1 on 2018-12-01 19:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('todo', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='TodoModel',
new_name='Todo',
),
]
| StarcoderdataPython |
4800884 | from app import views, forms, app
app.run()
| StarcoderdataPython |
3333965 | #!/usr/bin/env python2.7
#
# Copyright 2016 Cluster Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import signal
import time
import django
from backend.lk.logic import appstore_review_ingestion
# This sets up logging.
django.setup()
SHUTDOWN = False
def handle_shutdown_signal(signum, frame):
logging.info('Received signal %s, shutting down...', signum)
global SHUTDOWN
SHUTDOWN = True
def main():
logging.info('Looking for reviews to ingest...')
ingested_count = 0
while not SHUTDOWN:
for app, country in appstore_review_ingestion.apps_countries_to_ingest(30):
appstore_review_ingestion.ingest_app(app, country)
ingested_count += 1
if SHUTDOWN:
break
if ingested_count >= 25:
logging.info('Ingested reviews for %d app(s)...', ingested_count)
ingested_count = 0
else:
# If the queue is empty, chill out.
time.sleep(1.0)
if __name__ == '__main__':
signal.signal(signal.SIGABRT, handle_shutdown_signal)
signal.signal(signal.SIGINT, handle_shutdown_signal)
signal.signal(signal.SIGTERM, handle_shutdown_signal)
main()
| StarcoderdataPython |
6613586 | import os
import sys
from numpy.distutils.exec_command import exec_command
def installing():
status, output = exec_command('conda build . --no-anaconda-upload')
status, output = exec_command('conda build . --output')
status, output = exec_command('conda install --use-local '+output)
status, output = exec_command('conda build purge')
def remove():
status, output = exec_command('conda remove kinnetmt --yes')
def update():
remove()
installing()
if '--install' in sys.argv[1:]:
print('Building and installing local dev version via conda')
installing()
elif '--remove' in sys.argv[1:]:
print('Removing local dev package')
remove()
elif '--update' in sys.argv[1:]:
print('Updating local dev package')
update()
| StarcoderdataPython |
11224591 | <gh_stars>1-10
# coding: utf-8
import pprint
import re
import six
class ListTemplatesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'template_type': 'str',
'is_build_in': 'str',
'offset': 'int',
'limit': 'int',
'name': 'str',
'sort': 'str',
'asc': 'str'
}
attribute_map = {
'x_language': 'X-Language',
'template_type': 'template_type',
'is_build_in': 'is_build_in',
'offset': 'offset',
'limit': 'limit',
'name': 'name',
'sort': 'sort',
'asc': 'asc'
}
def __init__(self, x_language=None, template_type=None, is_build_in=None, offset=None, limit=None, name=None, sort=None, asc=None):
"""ListTemplatesRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._template_type = None
self._is_build_in = None
self._offset = None
self._limit = None
self._name = None
self._sort = None
self._asc = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
self.template_type = template_type
self.is_build_in = is_build_in
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if name is not None:
self.name = name
if sort is not None:
self.sort = sort
if asc is not None:
self.asc = asc
@property
def x_language(self):
"""Gets the x_language of this ListTemplatesRequest.
语言类型 中文:zh-cn 英文:en-us,默认en-us
:return: The x_language of this ListTemplatesRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListTemplatesRequest.
语言类型 中文:zh-cn 英文:en-us,默认en-us
:param x_language: The x_language of this ListTemplatesRequest.
:type: str
"""
self._x_language = x_language
@property
def template_type(self):
"""Gets the template_type of this ListTemplatesRequest.
模板类型
:return: The template_type of this ListTemplatesRequest.
:rtype: str
"""
return self._template_type
@template_type.setter
def template_type(self, template_type):
"""Sets the template_type of this ListTemplatesRequest.
模板类型
:param template_type: The template_type of this ListTemplatesRequest.
:type: str
"""
self._template_type = template_type
@property
def is_build_in(self):
"""Gets the is_build_in of this ListTemplatesRequest.
是否内置模板
:return: The is_build_in of this ListTemplatesRequest.
:rtype: str
"""
return self._is_build_in
@is_build_in.setter
def is_build_in(self, is_build_in):
"""Sets the is_build_in of this ListTemplatesRequest.
是否内置模板
:param is_build_in: The is_build_in of this ListTemplatesRequest.
:type: str
"""
self._is_build_in = is_build_in
@property
def offset(self):
"""Gets the offset of this ListTemplatesRequest.
偏移量,表示从此偏移量开始查询,offset大于等于0
:return: The offset of this ListTemplatesRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListTemplatesRequest.
偏移量,表示从此偏移量开始查询,offset大于等于0
:param offset: The offset of this ListTemplatesRequest.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListTemplatesRequest.
每页显示的条目数量
:return: The limit of this ListTemplatesRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListTemplatesRequest.
每页显示的条目数量
:param limit: The limit of this ListTemplatesRequest.
:type: int
"""
self._limit = limit
@property
def name(self):
"""Gets the name of this ListTemplatesRequest.
模板名称,匹配规则为模糊匹配
:return: The name of this ListTemplatesRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListTemplatesRequest.
模板名称,匹配规则为模糊匹配
:param name: The name of this ListTemplatesRequest.
:type: str
"""
self._name = name
@property
def sort(self):
"""Gets the sort of this ListTemplatesRequest.
排序字段
:return: The sort of this ListTemplatesRequest.
:rtype: str
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this ListTemplatesRequest.
排序字段
:param sort: The sort of this ListTemplatesRequest.
:type: str
"""
self._sort = sort
@property
def asc(self):
"""Gets the asc of this ListTemplatesRequest.
是否正序
:return: The asc of this ListTemplatesRequest.
:rtype: str
"""
return self._asc
@asc.setter
def asc(self, asc):
"""Sets the asc of this ListTemplatesRequest.
是否正序
:param asc: The asc of this ListTemplatesRequest.
:type: str
"""
self._asc = asc
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListTemplatesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
214183 | <filename>SVM/ExtractFeatureData/test.py<gh_stars>10-100
import os
import datetime
import pandas as pd
from CommonFunction.extract_color_data import extract_color_data
from CommonFunction.extract_SURF_data import extract_SURF_data
from CommonFunction.extract_ELA_data import extract_ELA_data
inputpath = 'G:/SVM/Celeba_face/devel/'
inputfiles = os.listdir(inputpath)
outputpath = 'G:/SVM/Celeba_feature_data/'
def color():
outputfile = outputpath + 'devel_color.xlsx'
excel_writer = pd.ExcelWriter(outputfile)
for inputfile in inputfiles:
inputpath_file = inputpath + inputfile
# print(inputpath_file)
extract_color_data(inputpath_file, excel_writer)
excel_writer.save()
excel_writer.close()
def SURF():
outputfile = outputpath + 'devel_SURF.xlsx'
excel_writer = pd.ExcelWriter(outputfile)
for inputfile in inputfiles:
inputpath_file = inputpath + inputfile
# print(inputpath_file)
extract_SURF_data(inputpath_file, excel_writer)
excel_writer.save()
excel_writer.close()
def ELA():
outputfile = outputpath + 'devel_ELA.xlsx'
excel_writer = pd.ExcelWriter(outputfile)
for inputfile in inputfiles:
inputpath_file = inputpath + inputfile
# print(inputpath_file)
extract_ELA_data(inputpath_file, outputpath, excel_writer)
excel_writer.save()
excel_writer.close()
if __name__ == '__main__':
color_startTime = datetime.datetime.now()
print('Color startTime: {}'.format(color_startTime))
color()
color_endTime = datetime.datetime.now()
print('Color endTime: {}'.format(color_endTime))
print('Color running time: {}'.format(color_endTime - color_startTime))
SURF_startTime = datetime.datetime.now()
print('SURF startTime: {}'.format(SURF_startTime))
SURF()
SURF_endTime = datetime.datetime.now()
print('SURF endTime: {}'.format(SURF_endTime))
print('SURF running time: {}'.format(SURF_endTime - SURF_startTime))
ELA_startTime = datetime.datetime.now()
print('ELA startTime: {}'.format(ELA_startTime))
ELA()
ELA_endTime = datetime.datetime.now()
print('ELA endTime: {}'.format(ELA_endTime))
print('ELA running time: {}'.format(ELA_endTime - ELA_startTime))
# Color startTime: 2020-04-22 12:52:05.791388
# Color endTime: 2020-04-22 12:53:29.395234
# Color running time: 0:01:23.603846
# SURF startTime: 2020-04-22 12:53:29.396202
# SURF endTime: 2020-04-22 12:53:30.652361
# SURF running time: 0:00:01.256159
# ELA startTime: 2020-04-22 12:53:30.653393
# ELA endTime: 2020-04-22 12:53:57.133904
# ELA running time: 0:00:26.480511
| StarcoderdataPython |
6666972 | <reponame>agooding-netizen/GildedRose-Refactoring-Kata<gh_stars>0
# -*- coding: utf-8 -*-
import re
class GildedRose(object):
def __init__(self, items):
self.items = items
@staticmethod
def upgrade(item, rate):
if item.quality < 50-rate:
item.quality += rate
else:
item.quality = 50
@staticmethod
def downgrade(item, rate):
if item.quality > 0+rate:
item.quality -= rate
else:
item.quality = 0
@staticmethod
def sulfuras(item):
item.quality = 80
def standard(self, item):
if item.sell_in > 0:
self.downgrade(item, 1)
elif item.sell_in <= 0:
self.downgrade(item, 2)
item.sell_in -= 1
def brie(self, item):
if item.sell_in > 0:
self.upgrade(item, 1)
elif item.sell_in <= 0:
self.upgrade(item, 2)
item.sell_in -= 1
def concert(self, item):
if item.sell_in > 10:
self.upgrade(item, 1)
elif item.sell_in > 5:
self.upgrade(item, 2)
elif item.sell_in > 0:
self.upgrade(item, 3)
elif item.sell_in <= 0:
item.quality = 0
item.sell_in -= 1
def conjured(self, item):
if item.sell_in > 0:
self.downgrade(item, 2)
if item.sell_in <= 0:
self.downgrade(item, 4)
item.sell_in -= 1
def name_switch(self, item_name, item):
{
"Aged Brie": self.brie,
"Backstage passes to a TAFKAL80ETC concert": self.concert,
"Sulfuras, Hand of Ragnaros": self.sulfuras,
"Conjured": self.conjured,
"Elixir of the Mongoose": self.standard,
"+5 Dexterity Vest": self.standard
}[item_name](item)
@staticmethod
def conjure_check(item):
conjured = re.findall(r'[Cc]onjured', item.name)
if conjured:
item_name = {"name": "Conjured"}
else:
item_name = {"name": item.name}
return item_name["name"]
def update_quality(self):
for item in self.items:
item_name = self.conjure_check(item)
self.name_switch(item_name, item)
class Item:
def __init__(self, name, sell_in, quality):
self.name = name
self.sell_in = sell_in
self.quality = quality
def __repr__(self):
return "%s, %s, %s" % (self.name, self.sell_in, self.quality)
| StarcoderdataPython |
1776046 | <reponame>mesarcik/NLN
def print_epoch(model_type,epoch,time,losses,AUC):
"""
Messages to print while training
model_type (str): type of model_type
epoch (int): The current epoch
time (int): The time elapsed per Epoch
losses (dict): the losses of the model
AUC (double): AUROC score of the model
"""
print ('__________________')
print('Epoch {} at {} sec \n{} losses: {} \nAUC = {}'.format(epoch,
time,
model_type,
losses,
AUC))
| StarcoderdataPython |
249958 | <filename>Latest/venv/Lib/site-packages/apptools/naming/object_factory.py
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought naming package component>
#------------------------------------------------------------------------------
""" The base class for all object factories. """
# Enthought library imports.
from traits.api import HasTraits
class ObjectFactory(HasTraits):
""" The base class for all object factories.
An object factory accepts some information about how to create an object
(such as a reference) and returns an instance of that object.
"""
###########################################################################
# 'ObjectFactory' interface.
###########################################################################
def get_object_instance(self, state, name, context):
""" Creates an object using the specified state information.
Returns None if the factory cannot create the object (ie. it does not
recognise the state passed to it).
"""
raise NotImplementedError
### EOF #######################################################################
| StarcoderdataPython |
322671 | <filename>lms/tests/test_exercise_unit_tests.py
import os
import pytest # type: ignore
from lms.lmsdb import models
from lms.lmstests.public.unittests import import_tests
from lms.lmstests.public.unittests import executers
from lms.lmstests.public.unittests import tasks
from lms.models import notifications
from lms.tests import conftest
STUDENT_CODE = """
def foo(bar=None):
return 'bar' if bar == 'bar' else 'foo'
"""
EXERCISE_TESTS = os.path.join(conftest.SAMPLES_DIR, 'student_test_code.py')
INVALID_EXERCISE_TESTS = os.path.join(
conftest.SAMPLES_DIR, 'not_working_test_code.py')
UNITTEST_NOTIFICATION = notifications.NotificationKind.UNITTEST_ERROR.value
class TestUTForExercise:
def test_check_solution_with_exercise_process_stub(
self, solution: models.Solution,
):
self._initialize_solution(solution, EXERCISE_TESTS)
self._run_unit_tests(solution.id)
self._verify_comments()
self._verify_notifications(solution)
def test_check_solution_with_invalid_exercise(
self, solution: models.Solution,
):
self._initialize_solution(solution, INVALID_EXERCISE_TESTS)
self._run_unit_tests(solution.id)
auto_comments = tuple(models.SolutionExerciseTestExecution.select())
assert len(auto_comments) == 1
comment = auto_comments[0]
expected_name = models.ExerciseTestName.FATAL_TEST_NAME
assert comment.exercise_test_name.test_name == expected_name
expected_name = models.ExerciseTestName.FATAL_TEST_PRETTY_TEST_NAME
assert comment.exercise_test_name.pretty_test_name == expected_name
all_notifications = list(notifications.get(user=solution.solver))
assert len(all_notifications) == 1
assert all_notifications[0].kind == UNITTEST_NOTIFICATION
@pytest.mark.skip('Should run with docker system access')
def test_check_solution_with_exercise_ut_full_docker(
self, solution: models.Solution,
):
self._initialize_solution(solution, EXERCISE_TESTS)
self._run_unit_tests(
solution.id, executers.DockerExecutor.executor_name(),
)
self._verify_comments()
@staticmethod
def _verify_comments():
auto_comments = tuple(models.SolutionExerciseTestExecution.select())
assert len(auto_comments) == 2
first = auto_comments[0]
assert first.exercise_test_name.test_name == 'test_check_bar_bar'
assert first.exercise_test_name.pretty_test_name == 'שם כזה מגניב 2'
expected = ('AssertionError: איזה ברברון'
"assert 'bar' == 'barbaron' - barbaron + bar")
assert expected == first.user_message
assert "foo('bar') == 'barbaron'" in first.staff_message
@staticmethod
def _verify_notifications(solution):
all_notifications = notifications.get(user=solution.solver)
assert len(all_notifications) == 1
assert all_notifications[0].kind == UNITTEST_NOTIFICATION
@staticmethod
def _initialize_solution(solution: models.Solution, module_name: str):
solution_file = solution.solution_files.get()
solution_file.code = STUDENT_CODE
solution_file.save()
import_tests.load_test_from_module(module_name)
@staticmethod
def _run_unit_tests(solution_id, executor_name=None):
if executor_name is None:
executor_name = executers.SameProcessExecutor.executor_name()
tasks.run_tests_for_solution(
solution_id=solution_id,
executor_name=executor_name,
)
| StarcoderdataPython |
5006576 | from transformers import AutoTokenizer
import torch
lines=open('object_vocab.txt').readlines()
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
vs=tokenizer.vocab_size
sv = torch.zeros(len(lines), vs)
for i, l in enumerate(lines):
v=(tokenizer(l)['input_ids'][1:-1])
for vv in v:
sv[i][vv] = 1
torch.save(sv, 'object_vocab_bertbaseuncased.pt')
| StarcoderdataPython |
1798114 | <reponame>malywonsz/txtai
"""
Factory module
"""
from .sqlite import SQLite
class DatabaseFactory:
"""
Methods to create document databases.
"""
@staticmethod
def create(config):
"""
Create a Database.
Args:
config: database configuration parameters
Returns:
Database
"""
# Database instance
database = None
# Enables document database
content = config.get("content")
# Standardize content name
if content is True:
content = "sqlite"
# Create document database instance
if content == "sqlite":
database = SQLite(config)
# Store config back
config["content"] = content
return database
| StarcoderdataPython |
3389428 | <reponame>PinkRoccade-Local-Government-OSS/PinkWave<gh_stars>1-10
#!/usr/bin/python
"""
The Macro class can be used to load macro scripts written in Python
"""
import os
from os.path import isfile
import importlib
from time import sleep
import sys,os
from os.path import dirname,abspath
from os import walk
# Importing PinkWave extensions
sys.path.append(dirname(dirname(abspath(__file__))))
from extensions.Util import Util
class Macro:
def __init__(self,macroPath):
self.macroPath = macroPath
if not isfile(self.macroPath):
raise Exception("Macro not found in path: %s" % macroPath)
def start(self):
macroName = self.macroPath.replace(Util.getAppDir(),"")
macroName = macroName.strip("/")
macroName = macroName.replace(".py", "")
macroName = macroName.replace("/",".")
mod = importlib.import_module(macroName)
try:
mod.start()
except AttributeError:
print "Macro %s has no 'start()' function" % macroName
raise
sleep(3)
| StarcoderdataPython |
5090742 | <gh_stars>1-10
import os
# Prepare dataset
os.system("python ./prepare_dataset.py --images_dir ../data/ImageNet/original --output_dir ../data/ImageNet/SRGAN/train --image_size 96 --step 48 --num_workers 10")
# Split train and valid
os.system("python ./split_train_valid_dataset.py --train_images_dir ../data/ImageNet/SRGAN/train --valid_images_dir ../data/ImageNet/SRGAN/valid --valid_samples_ratio 0.05")
| StarcoderdataPython |
8003271 | <filename>gamePlayer.py
from gameElements import *
import json
import time
import os
import matplotlib.pyplot as plt
import progressbar
class GamePlay:
def __init__(self, GameRunner):
self.game = GameRunner
self.boardsizeTrain = 3
self.boardsizeTest = 3
self.num_iterations_train = 100
self.num_iterations_test = 30
def train(self):
self.qL = QLearn(self.boardsizeTrain)
s = Snake()
board = Board(self.boardsizeTrain,s, False, True)
while(board.gamesCompleted < self.num_iterations_train):
x = self.qL.algorithm1(board.gamestate.body, board.gamestate.apple, board.n)
board.changeHeading(x)
board.updateBoard()
self.qL.algorithm2(board.gameOver, board.gameWon, board.gamestate.body, board.gamestate.apple, board.n)
if board.gameOver or board.gameWon:
board.resetAll()
self.qL.saveQTable()
return (len(self.qL.qTable))
def play(self, verbose = True):
s = Snake()
board = Board(self.boardsizeTest, s, verbose = verbose, replayOn = True)
if verbose:
board.draw()
score = 0
while (True):
state = self.qL.stateNow(board.gamestate.body, board.gamestate.apple, board.n)
x = self.qL.bestAction(state)
if verbose:
print(x)
board.changeHeading(x)
board.updateBoard()
if board.gameOver or board.gameWon:
break
if verbose:
board.draw()
#time.sleep(.1)
score = board.gameScore
if verbose:
print("score: ", board.gameScore)
print(len(self.qL.qTable))
return [board.gameScore, len(self.qL.qTable)]
def trainTestResults(self):
self.qL.reset()
scores = []
qTableSizes = []
trials = []
print("Training and testing...")
bar = progressbar.ProgressBar()
for i in bar(range(self.num_iterations_test)):
self.train()
trials.append(i+1)
res = self.play(verbose = False)
scores.append(res[0])
qTableSizes.append(res[1])
plt.scatter(trials, scores)
plt.xlabel("Trials")
plt.ylabel("Scores")
plt.ylim([0,self.boardsizeTest**2 + 1])
plt.show()
plt.scatter(qTableSizes, scores)
plt.xlabel("Table Size")
plt.ylabel("Scores")
plt.ylim([0,self.boardsizeTest**2 + 1])
plt.show()
class QLearn:
def __init__(self,n):
self.qTable = {}
self.learningRate = 0.5
self.discountFactor = 0.9
self.randomize = 0.05
self.availableActions = ['w','a','s','d']
self.score = 0
self.missed = 0
self.n = n
self.filename = "qTables/"+str(self.n)+'x'+str(self.n) + '.json'
if (os.path.exists(self.filename)):
with open(self.filename, 'r') as fp:
self.qTable = json.load(fp)
def printQTable(self):
print(json.dumps(self.qTable, indent=1))
def reset(self):
if (os.path.exists(self.filename)):
os.remove(self.filename)
def saveQTable(self):
with open(self.filename, 'w') as fp:
json.dump(self.qTable, fp)
def stateNow(self, body, apple, n):
playerHeadx = body[0][0]
playerHeady = body[0][1]
fruitx = apple[0]
fruity = apple[1]
trail = body[1:]
trailRelativePose = [0]*len(trail)
fruitRelativePosex = fruitx - playerHeadx
while(fruitRelativePosex < 0):
fruitRelativePosex += n
while(fruitRelativePosex > n):
fruitRelativePosex -= n
fruitRelativePosey = fruity - playerHeady
while(fruitRelativePosey < 0):
fruitRelativePosey += n
while(fruitRelativePosey > n):
fruitRelativePosey -= n
stateName = str(fruitRelativePosex) + "," + str(fruitRelativePosey)
l = len(trail)
if len(trail) >1:
for i in range(l):
if (trailRelativePose[i] == 0):
trailRelativePose[i] = [0, 0]
trailRelativePose[i][0] = trail[i][0] - playerHeadx;
while(trailRelativePose[i][0] < 0):
trailRelativePose[i][0] += n
while(trailRelativePose[i][0] > n):
trailRelativePose[i][0] -= n
trailRelativePose[i][1] = trail[i][1] - playerHeady
while (trailRelativePose[i][1] < 0):
trailRelativePose[i][1] += n
while (trailRelativePose[i][1] > n):
trailRelativePose[i][1] -= n
stateName += ',' + str(trailRelativePose[i][0]) + ',' + str(trailRelativePose[i][1]);
return stateName
def tableRow(self,s):
if s not in self.qTable:
self.qTable[s] = {'w' : 0, 'a' : 0, 's' : 0, 'd' : 0}
return self.qTable[s]
def updateTable(self, s0, s1, reward, action):
q0 = self.tableRow(s0)
q1 = self.tableRow(s1)
newValue = reward + self.discountFactor * max([q1['w'], q1['a'], q1['s'], q1['d']]) - q0[action]
self.qTable[s0][action] = q0[action] + self.learningRate * newValue
def bestAction(self, s):
q = self.tableRow(s)
if random.random() < self.randomize:
r = random.randint(0,3)
return self.availableActions[r]
maxValue = q[self.availableActions[0]]
choseAction = self.availableActions[0]
actionsZero = []
for i in range(len(self.availableActions)):
if(q[self.availableActions[i]] == 0):
actionsZero.insert(0,self.availableActions[i])
if(q[self.availableActions[i]] > maxValue):
maxValue = q[self.availableActions[i]]
choseAction = self.availableActions[i]
if(maxValue == 0):
r2 = random.randint(0,len(actionsZero)-1)
choseAction = actionsZero[r2]
return choseAction
def algorithm1(self, body, apple, n):
if (len(body) == 1):
s = "nobody"
r = random.randint(0,3)
a = self.availableActions[r]
else:
s = self.stateNow(body, apple, n)
a = self.bestAction(s)
self.curState = s
self.curAction = a
return a
def algorithm2(self, gameOver, gameWon, body, apple, n):
if gameWon == True:
r = 1
nextState = self.stateNow(body, apple, n)
self.updateTable(self.curState, nextState, r, self.curAction)
if(r > 0):
self.score += r
if(r < 0):
self.missed += r
elif gameOver == True:
r = -1
nextState = self.stateNow(body, apple, n)
self.updateTable(self.curState, nextState, r, self.curAction)
if(r > 0):
self.score += r
if(r < 0):
self.missed += r
else:
playerHeadx = body[0][0]
playerHeady = body[0][1]
fruitx = apple[0]
fruity = apple[1]
r = 0
if (playerHeadx - fruitx == 0 and playerHeady - fruity == 0):
r = 1
else:
r = -0.1
nextState = self.stateNow(body, apple, n)
self.updateTable(self.curState, nextState, r, self.curAction)
if(r > 0):
self.score += r
if(r < 0):
self.missed += r
if __name__ == "__main__":
game = GameRunner()
player = GamePlay(game)
player.train()
player.trainTestResults()
| StarcoderdataPython |
6484889 | #!/usr/bin/evn python
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
from skimage.feature import peak_local_max
import copy
def corner_score(img):
img = cv2.GaussianBlur(img,(3,3),0)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,5,3,0.04)
minVal = 0.0005* dst.max()
cScoreMap = np.where(dst <= minVal, 0, dst)
x, y = np.where(cScoreMap > 0)
corners = np.hstack((y.reshape(-1,1),x.reshape(-1,1)))
return cScoreMap, corners
def applyANMS(scoreMap, corners, image, numBest = 150):
img = image.copy()
C = scoreMap.copy()
locmax = peak_local_max(C,min_distance=10)
nStrong = locmax.shape[0]
if nStrong < numBest:
print("Not enough strong corners\n")
print(nStrong)
r = [np.Infinity for i in range(nStrong)]
x=np.zeros((nStrong,1))
y=np.zeros((nStrong,1))
ed=0
for i in range(nStrong):
for j in range(nStrong):
if(C[locmax[j][0],locmax[j][1]] > C[locmax[i][0],locmax[i][1]]):
ed = (locmax[j][0]-locmax[i][0])**2 + (locmax[j][1]-locmax[i][1])**2
if ed<r[i]:
r[i] = ed
x[i] = locmax[i][0]
y[i] = locmax[i][1]
ind = np.argsort(r)
ind = ind[-numBest:]
x_best=np.zeros((numBest,1))
y_best=np.zeros((numBest,1))
for i in range(numBest):
x_best[i] = np.int0(x[ind[i]])
y_best[i] = np.int0(y[ind[i]])
cv2.circle(img,(y_best[i],x_best[i]),3,255,-1)
corners = np.hstack((y_best.reshape(-1,1),x_best.reshape(-1,1)))
return corners,img
def ransac(pts1, pts2, threshold = 5):
Hfinal = np.zeros((3,3))
maxInliers = 0
for iters in range(100):
ndxs = [np.random.randint(0,len(pts1)) for i in range(4)]
p1 = pts1[ndxs]
p2 = pts2[ndxs]
H = cv2.getPerspectiveTransform(np.float32(p1), np.float32(p2))
numInliers = 0
for pt1, pt2 in zip(pts1, pts2):
fromPt = np.array(pt1)
toPt = np.array(pt2)
newPt = np.dot(H, np.array([fromPt[0],fromPt[1],1]))
if newPt[2]!=0:
newPt/=newPt[2]
else:
newPt/1e-8
diff = np.linalg.norm(toPt - newPt[:2])
if diff < threshold:
numInliers+=1
if maxInliers < numInliers:
maxInliers = numInliers
Hfinal = H
if maxInliers > 40 :
break
return Hfinal
def combine(img2, img1, H):
h1,w1 = img1.shape[:2]
h2,w2 = img2.shape[:2]
pts1 = np.float32([[0,0],[0,h1],[w1,h1],[w1,0]]).reshape(-1,1,2)
pts2 = np.float32([[0,0],[0,h2],[w2,h2],[w2,0]]).reshape(-1,1,2)
warpdPts = cv2.perspectiveTransform(pts2, H)
pts = np.vstack((pts1, warpdPts)).reshape(-1,2)
xmin, ymin = np.int32(pts.min(axis=0))
xmax, ymax = np.int32(pts.max(axis=0))
t = [-xmin,-ymin]
Ht = np.array([[1,0,t[0]],[0,1,t[1]],[0,0,1]])
result = cv2.warpPerspective(img2, Ht.dot(H), (xmax-xmin, ymax-ymin))
# cv2.imshow("Warped img2", result)
# cv2.waitKey(0)
result[t[1]:h1+t[1],t[0]:w1+t[0]] = img1
return result
def stitch(img1, img2):
imgs = [img1, img2]
scoreMapList = []
cornersList = []
for img in imgs:
tempScoreMap, tempcorners = corner_score(img)
scoreMapList.append(tempScoreMap)
cornersList.append(tempcorners)
for corners, img in zip(cornersList, copy.deepcopy(imgs)):
for corner in corners:
cv2.circle(img,tuple(corner),2,(255,0,0), -1)
# cv2.imshow("Detect Corners", img)
# cv2.waitKey(0)
anmsCornersList = []
for scoreMap, corners, img in zip(scoreMapList, cornersList, copy.deepcopy(imgs)):
newCorners,img = applyANMS(scoreMap, corners, img, 150)
anmsCornersList.append(newCorners)
# cv2.imshow("Apply ANMS", img)
# cv2.waitKey(0)
detector = cv2.ORB_create(nfeatures=1500)
descriptorsList = []
kpList = []
for corners, img in zip( anmsCornersList, copy.deepcopy(imgs)):
img = cv2.GaussianBlur(img,(3,3),0)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
kp = [cv2.KeyPoint(corner[0], corner[1], 5) for corner in corners]
# kp = detector.detect(gray,None)
kp, des = detector.compute(gray, kp)
descriptorsList.append(des)
kpList.append(kp)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(descriptorsList[0],descriptorsList[1])
# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)
# Draw first 10 matches.
img3 = cv2.drawMatches(img1,kpList[0],img2,kpList[1],matches[:10], None)
plt.imshow(img3)
plt.show()
pts1 = np.float32([kpList[0][m.queryIdx].pt for m in matches])
pts2 = np.float32([kpList[1][m.trainIdx].pt for m in matches])
H = ransac(pts1, pts2)
print(H)
test = cv2.warpPerspective(img1, H,(img1.shape[1], img1.shape[0]))
# cv2.imshow("Warp", test)
# cv2.waitKey(0)
combImg = combine(img1, img2,H)
# cv2.imshow("Final", combImg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return combImg
def main():
imgs = [cv2.imread(file) for file in glob.glob("../data/train/set2/*.jpg")]
result = imgs[0]
for i in range(1,len(imgs)):
result = stitch(result, imgs[i])
result = cv2.resize(result, (1000,1000))
cv2.imshow("Pano", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| StarcoderdataPython |
205562 | expected_output = {
"track": {
"1": {
"type": "Interface",
"instance": "Ethernet1/4",
"subtrack": "IP Routing",
"state": "DOWN",
"change_count": 1,
"last_change": "3w5d",
"tracked_by": {
1: {
"name": "HSRP",
"interface": "Vlan2",
"id": "2"
},
2: {
"name": "HSRP",
"interface": "Ethernet1/1",
"id": "1"
},
3: {
"name": "VRRPV3",
"interface": "Vlan2",
"id": "2"
},
4: {
"name": "TrackList",
"id": "10"
},
5: {
"name": "TrackList",
"id": "11"
},
6: {
"name": "TrackList",
"id": "12"
},
7: {
"name": "Route Map Configuration"
}
},
"delay_up_secs": 20.0
},
"12": {
"type": "List",
"sublist": "weight",
"state": "DOWN",
"change_count": 1,
"last_change": "3w3d",
"threshold_down": "10",
"threshold_up": "20",
"track_list_members": {
1: {
"object_id": "10",
"weight": "10",
"obj_state": "UP"},
2: {
"object_id": "1",
"weight": "100",
"obj_state": "DOWN"
}
}
},
"13": {
"type": "Interface",
"instance": "loopback1",
"subtrack": "Line Protocol",
"state": "DOWN",
"change_count": 2,
"last_change": "3w3d",
"delay_up_secs": 23.0,
"delay_down_secs": 24.0
},
}
}
| StarcoderdataPython |
11399840 | """Shows a basic text dialog.
"""
import sys
sys.path.append("..")
from agpy.ui import *
from agpy.window import *
from agpy.utils import *
sys.path.remove("..")
def main():
show_window("Dialog test.")
dlg("Hi!")
dlg("Test again!")
quit()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3511099 | <gh_stars>10-100
"""This module is used to scrape the all of the APIs from a given source file
and return their name and kind. These include classes, structs, functions,
and certain variable types. It is not used to actually describe these elements.
That is the job of the autodescriber.
This module is available as an xdress plugin by the name ``xdress.autoall``.
Including this plugin enables the ``classes``, ``functions``, and ``variables``
run control parameters to have an asterix ('*') in the name positon (index 0).
For example, rather than writing::
classes = [
('People', 'people'),
('JoanOfArc', 'people'),
('JEdgaHoover', 'people'),
('Leslie', 'people'),
('HuaMulan', 'people'),
]
we can instead simply write::
classes = [('*', 'people')]
Isn't this grand?!
:author: <NAME> <<EMAIL>>
Automatic Finder API
====================
"""
from __future__ import print_function
import os
import io
import re
import sys
from hashlib import md5
from pprint import pprint, pformat
from warnings import warn
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import pycparser
except ImportError:
pycparser = None
try:
from . import clang
from .clang.cindex import CursorKind
except ImportError:
clang = None
from . import utils
from . import astparsers
from .utils import find_source, FORBIDDEN_NAMES, NotSpecified, RunControl, apiname, \
ensure_apiname
if os.name == 'nt':
import ntpath
import posixpath
if sys.version_info[0] >= 3:
basestring = str
class GccxmlFinder(object):
"""Class used for discovering APIs using an etree representation of
the GCC-XML AST."""
def __init__(self, root=None, onlyin=None, verbose=False):
"""Parameters
-------------
root : element tree node, optional
The root element node of the AST.
onlyin : str, optional
Filename to search, prevents finding APIs coming from other libraries.
verbose : bool, optional
Flag to display extra information while visiting the file.
"""
self.verbose = verbose
self._root = root
origonlyin = onlyin
onlyin = [onlyin] if isinstance(onlyin, basestring) else onlyin
onlyin = set() if onlyin is None else set(onlyin)
onlyin = [root.find("File[@name='{0}']".format(oi)) for oi in onlyin]
self.onlyin = set([oi.attrib['id'] for oi in onlyin if oi is not None])
if 0 == len(self.onlyin):
msg = ("None of these files are present: {0!r}; "
"autodescribing will probably fail.")
msg = msg.format(origonlyin)
warn(msg, RuntimeWarning)
self.variables = []
self.functions = []
self.classes = []
def __str__(self):
return ("vars = " + pformat(self.variables) + "\n" +
"funcs = " + pformat(self.functions) + "\n" +
"classes = " + pformat(self.classes) + "\n")
def _pprint(self, node):
if self.verbose:
print("Auto-Found: {0} {1} {2}".format(node.tag,
node.attrib.get('id', ''),
node.attrib.get('name', None)))
def visit(self, node=None):
"""Visits the node and all sub-nodes, filling the API names
as it goes.
Parameters
----------
node : element tree node, optional
The element tree node to start from. If this is None, then the
top-level node is found and visited.
"""
node = node or self._root
self.variables += self.visit_kinds(node, "Enumeration")
self.functions += self.visit_kinds(node, "Function")
self.classes += self.visit_kinds(node, ["Class", "Struct"])
def visit_kinds(self, node, kinds):
"""Visits the node and all sub-nodes, finding instances of the kinds
and recording the names as it goes.
Parameters
----------
node : element tree node
The element tree node to start from.
kinds : str or sequence of str
The API elements to find.
Returns
-------
names : list of str
Names of the API elements in this file that match the kinds provided.
"""
if not isinstance(kinds, basestring):
names = []
for k in kinds:
names += self.visit_kinds(node, k)
names = [n for n in names if n not in FORBIDDEN_NAMES]
return names
names = set()
for child in node.iterfind(".//" + kinds):
if child.attrib.get('file', None) not in self.onlyin:
continue
name = child.attrib.get('name', '_')
if name.startswith('_'):
continue
if name in FORBIDDEN_NAMES:
continue
names.add(utils.parse_template(name))
self._pprint(child)
return sorted(names)
def gccxml_findall(filename, includes=(), defines=('XDRESS',), undefines=(),
extra_parser_args=(), verbose=False, debug=False,
builddir='build', language='c++', clang_includes=()):
"""Automatically finds all API elements in a file via GCC-XML.
Parameters
----------
filename : str
The path to the file
includes : list of str, optional
The list of extra include directories to search for header files.
defines : list of str, optional
The list of extra macro definitions to apply.
undefines : list of str, optional
The list of extra macro undefinitions to apply.
extra_parser_args : list of str, optional
Further command line arguments to pass to the parser.
verbose : bool, optional
Flag to diplay extra information while describing the class.
debug : bool, optional
Flag to enable/disable debug mode.
builddir : str, optional
Location of -- often temporary -- build files.
language : str
Valid language flag.
clang_includes : ignored
Returns
-------
variables : list of strings
A list of variable names to wrap from the file.
functions : list of strings
A list of function names to wrap from the file.
classes : list of strings
A list of class names to wrap from the file.
"""
if os.name == 'nt':
# GCC-XML and/or Cygwin wants posix paths on Windows.
filename = posixpath.join(*ntpath.split(filename))
root = astparsers.gccxml_parse(filename, includes=includes, defines=defines,
undefines=undefines, extra_parser_args=extra_parser_args, verbose=verbose,
debug=debug, builddir=builddir)
basename = filename.rsplit('.', 1)[0]
onlyin = set([filename] +
[basename + '.' + h for h in utils._hdr_exts if h.startswith('h')])
finder = GccxmlFinder(root, onlyin=onlyin, verbose=verbose)
finder.visit()
return finder.variables, finder.functions, finder.classes
def clang_findall(filename, includes=(), defines=('XDRESS',), undefines=(),
extra_parser_args=(), verbose=False, debug=False, builddir='build',
language='c++', clang_includes=()):
"""Automatically finds all API elements in a file via clang.
Parameters
----------
filename : str
The path to the file
includes : list of str, optional
The list of extra include directories to search for header files.
defines : list of str, optional
The list of extra macro definitions to apply.
undefines : list of str, optional
The list of extra macro undefinitions to apply.
extra_parser_args : list of str, optional
Further command line arguments to pass to the parser.
language : str
Valid language flag.
verbose : Ignored
debug : Ignored
builddir : Ignored
clang_includes : list of str, optional
clang-specific include paths.
Returns
-------
variables : list of strings
A list of variable names to wrap from the file.
functions : list of strings
A list of function names to wrap from the file.
classes : list of strings
A list of class names to wrap from the file.
"""
tu = astparsers.clang_parse(filename, includes=includes, defines=defines,
undefines=undefines,
extra_parser_args=extra_parser_args, verbose=verbose,
debug=debug, language=language,
clang_includes=clang_includes)
basename = filename.rsplit('.', 1)[0]
onlyin = frozenset([filename] +
[basename + '.' + h for h in utils._hdr_exts if h.startswith('h')])
variables, functions, classes = [],[],[]
def visit(node):
kind = node.kind
if kind == CursorKind.NAMESPACE:
for kid in node.get_children():
visit(kid)
elif kind == CursorKind.ENUM_DECL:
variables.append(node.spelling)
elif kind == CursorKind.FUNCTION_DECL:
functions.append(node.spelling)
elif kind in (CursorKind.CLASS_DECL,CursorKind.STRUCT_DECL):
classes.append(node.spelling)
for node in tu.cursor.get_children():
file = node.extent.start.file
if file and file.name in onlyin:
visit(node)
return variables, functions, classes
class PycparserFinder(astparsers.PycparserNodeVisitor):
"""Class used for discovering APIs using the pycparser AST."""
def __init__(self, root=None, onlyin=None, verbose=False):
"""Parameters
-------------
root : element tree node, optional
The root element node of the AST.
onlyin : str, optional
Filename to search, prevents finding APIs coming from other libraries.
verbose : bool, optional
Flag to display extra information while visiting the file.
"""
super(PycparserFinder, self).__init__()
self.verbose = verbose
self._root = root
self.onlyin = onlyin
self.variables = []
self.functions = []
self.classes = []
def __str__(self):
return ("vars = " + pformat(self.variables) + "\n" +
"funcs = " + pformat(self.functions) + "\n" +
"classes = " + pformat(self.classes) + "\n")
def _pprint(self, node):
if self.verbose:
node.show()
def visit(self, node=None):
"""Visits the node and all sub-nodes, filling the API names
as it goes.
Parameters
----------
node : element tree node, optional
The element tree node to start from. If this is None, then the
top-level node is found and visited.
"""
node = node or self._root
super(PycparserFinder, self).visit(node)
def visit_Enum(self, node):
if node.coord.file not in self.onlyin:
return
name = node.name
if name.startswith('_'):
return
if name in FORBIDDEN_NAMES:
return
self._pprint(node)
self.variables.append(name)
def visit_FuncDecl(self, node):
if node.coord.file not in self.onlyin:
return
if isinstance(node.type, pycparser.c_ast.PtrDecl):
name = node.type.type.declname
else:
name = node.type.declname
if name is None or name.startswith('_'):
return
if name in FORBIDDEN_NAMES:
return
self._pprint(node)
self.functions.append(name)
def visit_Struct(self, node):
if node.coord.file not in self.onlyin:
return
name = node.name
if name is None:
self._status = "<name-not-found>"
return
if name.startswith('_'):
return
if name in FORBIDDEN_NAMES:
return
self._pprint(node)
self.classes.append(name)
def visit_Typedef(self, node):
if node.coord.file not in self.onlyin:
return
self._pprint(node)
self._status = None
self.visit(node.type)
stat = self._status
self._status = None
if stat is None:
return
if stat == "<name-not-found>":
name = node.name
if name is None or name.startswith('_'):
return
if name in FORBIDDEN_NAMES:
return
self.classes.append(name)
def pycparser_findall(filename, includes=(), defines=('XDRESS',), undefines=(),
extra_parser_args=(), verbose=False, debug=False,
builddir='build', language='c', clang_includes=()):
"""Automatically finds all API elements in a file via GCC-XML.
Parameters
----------
filename : str
The path to the file
includes : list of str, optional
The list of extra include directories to search for header files.
defines : list of str, optional
The list of extra macro definitions to apply.
undefines : list of str, optional
The list of extra macro undefinitions to apply.
extra_parser_args : list of str, optional
Further command line arguments to pass to the parser.
verbose : bool, optional
Flag to diplay extra information while describing the class.
debug : bool, optional
Flag to enable/disable debug mode.
builddir : str, optional
Location of -- often temporary -- build files.
language : str
Valid language flag.
clang_includes : ignored
Returns
-------
variables : list of strings
A list of variable names to wrap from the file.
functions : list of strings
A list of function names to wrap from the file.
classes : list of strings
A list of class names to wrap from the file.
"""
root = astparsers.pycparser_parse(filename, includes=includes, defines=defines,
undefines=undefines, extra_parser_args=extra_parser_args,
verbose=verbose, debug=debug, builddir=builddir)
basename = filename.rsplit('.', 1)[0]
onlyin = set([filename, basename + '.h'])
finder = PycparserFinder(root, onlyin=onlyin, verbose=verbose)
finder.visit()
return finder.variables, finder.functions, finder.classes
#
# Top-level function
#
_finders = {
'clang': clang_findall,
'gccxml': gccxml_findall,
'pycparser': pycparser_findall,
}
def findall(filename, includes=(), defines=('XDRESS',), undefines=(),
extra_parser_args=(), parsers='gccxml', verbose=False, debug=False,
builddir='build', language='c++', clang_includes=()):
"""Automatically finds all API elements in a file. This is the main entry point.
Parameters
----------
filename : str
The path to the file.
includes: list of str, optional
The list of extra include directories to search for header files.
defines: list of str, optional
The list of extra macro definitions to apply.
undefines: list of str, optional
The list of extra macro undefinitions to apply.
extra_parser_args : list of str, optional
Further command line arguments to pass to the parser.
parsers : str, list, or dict, optional
The parser / AST to use to use for the file. Currently 'clang', 'gccxml',
and 'pycparser' are supported, though others may be implemented in the
future. If this is a string, then this parser is used. If this is a list,
this specifies the parser order to use based on availability. If this is
a dictionary, it specifies the order to use parser based on language, i.e.
``{'c' ['pycparser', 'gccxml'], 'c++': ['gccxml', 'pycparser']}``.
verbose : bool, optional
Flag to diplay extra information while describing the class.
debug : bool, optional
Flag to enable/disable debug mode.
builddir : str, optional
Location of -- often temporary -- build files.
language : str
Valid language flag.
clang_includes : list of str, optional
clang-specific include paths.
Returns
-------
variables : list of strings
A list of variable names to wrap from the file.
functions : list of strings
A list of function names to wrap from the file.
classes : list of strings
A list of class names to wrap from the file.
"""
parser = astparsers.pick_parser(language, parsers)
finder = _finders[parser]
rtn = finder(filename, includes=includes, defines=defines, undefines=undefines,
extra_parser_args=extra_parser_args, verbose=verbose, debug=debug,
builddir=builddir, language=language, clang_includes=clang_includes)
return rtn
#
# Persisted Cache for great speed up
#
class AutoNameCache(object):
"""A quick persistent cache for name lists automatically found in files.
The keys are (classname, filename, kind) tuples. The values are
(hashes-of-the-file, finder-results) tuples."""
def __init__(self, cachefile=os.path.join('build', 'autoname.cache')):
"""Parameters
-------------
cachefile : str, optional
Path to description cachefile.
"""
self.cachefile = cachefile
if os.path.isfile(cachefile):
with io.open(cachefile, 'rb') as f:
self.cache = pickle.load(f)
else:
self.cache = {}
def isvalid(self, filename):
"""Boolean on whether the cach value for a filename matches the state
of the file on the system."""
key = filename
if key not in self.cache:
return False
cachehash = self.cache[key][0]
with io.open(filename, 'rb') as f:
filebytes = f.read()
currhash = md5(filebytes).hexdigest()
return cachehash == currhash
def __getitem__(self, key):
return self.cache[key][1] # return the results of the finder only
def __setitem__(self, key, value):
filename = key
with io.open(filename, 'rb') as f:
filebytes = f.read()
currhash = md5(filebytes).hexdigest()
self.cache[key] = (currhash, value)
def __delitem__(self, key):
del self.cache[key]
def dump(self):
"""Writes the cache out to the filesystem."""
if not os.path.exists(self.cachefile):
pardir = os.path.split(self.cachefile)[0]
if not os.path.exists(pardir):
os.makedirs(pardir)
with io.open(self.cachefile, 'wb') as f:
pickle.dump(self.cache, f, pickle.HIGHEST_PROTOCOL)
def __str__(self):
return pformat(self.cache)
#
# Plugin
#
class XDressPlugin(astparsers.ParserPlugin):
"""This plugin resolves the '*' syntax in wrapper types by parsing the
source files prio to describing them.
"""
allsrc = varhasstar = fnchasstar = clshasstar = None
def defaultrc(self):
rc = RunControl()
rc._update(super(XDressPlugin, self).defaultrc)
return rc
def report_debug(self, rc):
msg = super(XDressPlugin, self).report_debug(rc)
msg += "Autoall:\n\n"
msg += "allsrc = {0}\n\n".format(pformat(self.allsrc))
msg += "varhasstar = {0}\n\n".format(pformat(self.varhasstar))
msg += "fnchasstar = {0}\n\n".format(pformat(self.fnchasstar))
msg += "clshasstar = {0}\n\n".format(pformat(self.clshasstar))
return msg
def setup(self, rc):
"""Expands variables, functions, and classes in the rc based on
copying src filenames to tar filename and the special '*' all syntax."""
super(XDressPlugin, self).setup(rc)
self.setup_basic(rc)
self.setup_heavy(rc)
def execute(self, rc):
# dummy
pass
# Helper methods
def setup_basic(self, rc):
"""Does the easy part of setting up an autodecsibe environment"""
# first pass -- gather and expand target
allsrc = {}
varhasstar = False
for i, var in enumerate(rc.variables):
rc.variables[i] = var = ensure_apiname(var)
if var.srcname == '*':
allsrc.update(zip(var.srcfiles, [var.language]*len(var.srcfiles)))
varhasstar = True
fnchasstar = False
for i, fnc in enumerate(rc.functions):
rc.functions[i] = fnc = ensure_apiname(fnc)
if fnc.srcname == '*':
allsrc.update(zip(fnc.srcfiles, [fnc.language]*len(fnc.srcfiles)))
#allsrc.update(fnc.srcfiles)
fnchasstar = True
clshasstar = False
for i, cls in enumerate(rc.classes):
rc.classes[i] = cls = ensure_apiname(cls)
if cls.srcname == '*':
allsrc.update(zip(cls.srcfiles, [cls.language]*len(cls.srcfiles)))
#allsrc.update(cls.srcfiles)
clshasstar = True
self.allsrc = allsrc
self.varhasstar = varhasstar
self.fnchasstar = fnchasstar
self.clshasstar = clshasstar
def setup_heavy(self, rc):
"""Does the hard work of actually searching the source files."""
print("autoall: discovering API names")
if not self.varhasstar and not self.fnchasstar and not self.clshasstar:
print("autoall: no API names to discover!")
return
allsrc = self.allsrc
kinds = ['variables', 'functions', 'classes']
# second pass -- find all
allfiles = {}
cachefile = os.path.join(rc.builddir, 'autoname.cache')
autonamecache = AutoNameCache(cachefile=cachefile)
for i, (srcfile, lang) in enumerate(allsrc.items()):
print("autoall: searching {0}".format(srcfile))
if autonamecache.isvalid(srcfile):
found = autonamecache[srcfile]
else:
found = findall(srcfile, includes=rc.includes, defines=rc.defines,
undefines=rc.undefines,
extra_parser_args=rc.extra_parser_args,
parsers=rc.parsers, verbose=rc.verbose,
debug=rc.debug, builddir=rc.builddir, language=lang,
clang_includes=rc.clang_includes)
autonamecache[srcfile] = found
autonamecache.dump()
allfiles[srcfile] = found
for k, kind in enumerate(kinds):
if 0 < len(found[k]):
fstr = ", ".join([str(_) for _ in found[k]])
print("autoall: found {0}: {1}".format(kind, fstr))
if 0 == i%rc.clear_parser_cache_period:
astparsers.clearmemo()
# third pass -- replace *s
if self.varhasstar:
newvars = []
for var in rc.variables:
if var.srcname == '*':
for srcfile in var.srcfiles:
for x in allfiles[srcfile][0]:
newvar = var._replace(srcname=x, tarname=x)
if newvar not in newvars:
newvars.append(newvar)
else:
newvars.append(var)
rc.variables = newvars
if self.fnchasstar:
newfncs = []
for fnc in rc.functions:
if fnc.srcname == '*':
for srcfile in fnc.srcfiles:
for x in allfiles[srcfile][1]:
newfnc = fnc._replace(srcname=x, tarname=x)
if newfnc not in newfncs:
newfncs.append(newfnc)
else:
newfncs.append(fnc)
rc.functions = newfncs
if self.clshasstar:
newclss = []
for cls in rc.classes:
if cls.srcname == '*':
for srcfile in cls.srcfiles:
for x in allfiles[srcfile][2]:
newcls = cls._replace(srcname=x, tarname=x)
if newcls not in newclss:
newclss.append(newcls)
else:
newclss.append(cls)
rc.classes = newclss
| StarcoderdataPython |
6638911 | # encoding: utf-8
import pytest
import six
from bs4 import BeautifulSoup
import ckan.model as model
import ckan.tests.factories as factories
import ckan.tests.helpers as helpers
from ckan.common import config
from ckan.lib.helpers import url_for
from ckan.model.system_info import get_system_info
@pytest.fixture
def sysadmin_env():
user = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
return env
def _reset_config(app):
"""Reset config via action"""
user = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
app.post(url=url_for("admin.reset_config"), extra_environ=env)
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestConfig(object):
"""View tests to go along with 'Customizing look and feel' docs."""
def test_site_title(self, app, sysadmin_env):
"""Configure the site title"""
# current site title
index_response = app.get("/")
assert "Welcome - CKAN" in index_response
url = url_for(u"admin.config")
# change site title
form = {"ckan.site_title": "Test Site Title", "save": ""}
resp = app.post(url, data=form, environ_overrides=sysadmin_env)
# new site title
new_index_response = app.get("/")
assert "Welcome - Test Site Title" in new_index_response
# reset config value
_reset_config(app)
reset_index_response = app.get("/")
assert "Welcome - CKAN" in reset_index_response
def test_main_css_list(self, app, sysadmin_env):
"""Style list contains pre-configured styles"""
STYLE_NAMES = ["Default", "Red", "Green", "Maroon", "Fuchsia"]
url = url_for(u"admin.config")
config_response = app.get(url, environ_overrides=sysadmin_env)
config_response_html = BeautifulSoup(config_response.body)
style_select_options = config_response_html.select(
"#field-ckan-main-css option"
)
for option in style_select_options:
assert option.string in STYLE_NAMES
def test_main_css(self, app, sysadmin_env):
"""Select a colour style"""
# current style
index_response = app.get("/")
assert "main.css" in index_response or "main.min.css" in index_response
url = url_for(u"admin.config")
# set new style css
form = {"ckan.main_css": "/base/css/red.css", "save": ""}
resp = app.post(url, data=form, environ_overrides=sysadmin_env)
assert "red.css" in resp or "red.min.css" in resp
assert not helpers.body_contains(resp, "main.min.css")
def test_tag_line(self, app, sysadmin_env):
"""Add a tag line (only when no logo)"""
# current tagline
index_response = app.get("/")
assert "Special Tagline" not in index_response
url = url_for(u"admin.config")
# set new tagline css
form = {"ckan.site_description": "Special Tagline", "save": ""}
resp = app.post(url, data=form, environ_overrides=sysadmin_env)
# new tagline not visible yet
new_index_response = app.get("/")
assert "Special Tagline" not in new_index_response
url = url_for(u"admin.config")
# remove logo
form = {"ckan.site_logo": "", "save": ""}
resp = app.post(url, data=form, environ_overrides=sysadmin_env)
# new tagline
new_index_response = app.get("/")
assert "Special Tagline" in new_index_response
# reset config value
_reset_config(app)
reset_index_response = app.get("/")
assert "Special Tagline" not in reset_index_response
def test_about(self, app, sysadmin_env):
"""Add some About tag text"""
# current about
about_response = app.get("/about")
assert "My special about text" not in about_response
# set new about
url = url_for(u"admin.config")
form = {"ckan.site_about": "My special about text", "save": ""}
resp = app.post(url, data=form, environ_overrides=sysadmin_env)
# new about
new_about_response = app.get("/about")
assert "My special about text" in new_about_response
# reset config value
_reset_config(app)
reset_about_response = app.get("/about")
assert "My special about text" not in reset_about_response
def test_intro(self, app, sysadmin_env):
"""Add some Intro tag text"""
# current intro
intro_response = app.get("/")
assert "My special intro text" not in intro_response
# set new intro
url = url_for(u"admin.config")
form = {"ckan.site_intro_text": "My special intro text", "save": ""}
resp = app.post(url, data=form, environ_overrides=sysadmin_env)
# new intro
new_intro_response = app.get("/")
assert "My special intro text" in new_intro_response
# reset config value
_reset_config(app)
reset_intro_response = app.get("/")
assert "My special intro text" not in reset_intro_response
def test_custom_css(self, app, sysadmin_env):
"""Add some custom css to the head element"""
# current tagline
intro_response_html = BeautifulSoup(app.get("/").body)
style_tag = intro_response_html.select("head style")
assert len(style_tag) == 0
# set new tagline css
url = url_for(u"admin.config")
form = {
"ckan.site_custom_css": "body {background-color:red}",
"save": "",
}
resp = app.post(url, data=form, environ_overrides=sysadmin_env)
# new tagline not visible yet
new_intro_response_html = BeautifulSoup(app.get("/").body)
style_tag = new_intro_response_html.select("head style")
assert len(style_tag) == 1
assert style_tag[0].text.strip() == "body {background-color:red}"
# reset config value
_reset_config(app)
reset_intro_response_html = BeautifulSoup(app.get("/").body)
style_tag = reset_intro_response_html.select("head style")
assert len(style_tag) == 0
@pytest.mark.ckan_config("debug", True)
def test_homepage_style(self, app, sysadmin_env):
"""Select a homepage style"""
# current style
index_response = app.get("/")
assert "<!-- Snippet home/layout1.html start -->" in index_response
# set new style css
url = url_for(u"admin.config")
form = {"ckan.homepage_style": "2", "save": ""}
resp = app.post(url, data=form, environ_overrides=sysadmin_env)
# new style
new_index_response = app.get("/")
assert (
"<!-- Snippet home/layout1.html start -->"
not in new_index_response
)
assert "<!-- Snippet home/layout2.html start -->" in new_index_response
# reset config value
_reset_config(app)
reset_index_response = app.get("/")
assert (
"<!-- Snippet home/layout1.html start -->" in reset_index_response
)
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestTrashView(object):
"""View tests for permanently deleting datasets with Admin Trash."""
def test_trash_view_anon_user(self, app):
"""An anon user shouldn't be able to access trash view."""
trash_url = url_for("admin.trash")
trash_response = app.get(trash_url)
assert trash_response.status_code == 403
def test_trash_view_normal_user(self, app):
"""A normal logged in user shouldn't be able to access trash view."""
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
trash_url = url_for(controller="admin", action="trash")
trash_response = app.get(trash_url, extra_environ=env, status=403)
assert (
"Need to be system administrator to administer" in trash_response
)
def test_trash_view_sysadmin(self, app):
"""A sysadmin should be able to access trash view."""
user = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
trash_url = url_for(controller="admin", action="trash")
trash_response = app.get(trash_url, extra_environ=env, status=200)
# On the purge page
assert "form-purge-packages" in trash_response
def test_trash_no_datasets(self, app):
"""Getting the trash view with no 'deleted' datasets should list no
datasets."""
factories.Dataset()
user = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
trash_url = url_for(controller="admin", action="trash")
trash_response = app.get(trash_url, extra_environ=env, status=200)
trash_response_html = BeautifulSoup(trash_response.body)
# it's called a 'user list' for some reason
trash_pkg_list = trash_response_html.select("ul.user-list li")
# no packages available to purge
assert len(trash_pkg_list) == 0
def test_trash_with_deleted_datasets(self, app):
"""Getting the trash view with 'deleted' datasets should list the
datasets."""
user = factories.Sysadmin()
factories.Dataset(state="deleted")
factories.Dataset(state="deleted")
factories.Dataset()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
trash_url = url_for(controller="admin", action="trash")
trash_response = app.get(trash_url, extra_environ=env, status=200)
trash_response_html = BeautifulSoup(trash_response.body)
# it's called a 'user list' for some reason
trash_pkg_list = trash_response_html.select("ul.user-list li")
# Two packages in the list to purge
assert len(trash_pkg_list) == 2
def test_trash_purge_deleted_datasets(self, app, sysadmin_env):
"""Posting the trash view with 'deleted' datasets, purges the
datasets."""
factories.Dataset(state="deleted")
factories.Dataset(state="deleted")
factories.Dataset()
# how many datasets before purge
pkgs_before_purge = model.Session.query(model.Package).count()
assert pkgs_before_purge == 3
trash_url = url_for("admin.trash")
resp = app.post(
trash_url,
data={"purge-packages": ""},
environ_overrides=sysadmin_env,
status=200,
)
# how many datasets after purge
pkgs_after_purge = model.Session.query(model.Package).count()
assert pkgs_after_purge == 1
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestAdminConfigUpdate(object):
def _update_config_option(self, app):
sysadmin = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(sysadmin["name"])}
url = url_for(u"admin.config")
form = {"ckan.site_title": "My Updated Site Title", "save": ""}
return app.post(url, data=form, environ_overrides=env)
def test_admin_config_update(self, app):
"""Changing a config option using the admin interface appropriately
updates value returned by config_option_show,
system_info.get_system_info and in the title tag in templates."""
# test value before update
# config_option_show returns default value
before_update = helpers.call_action(
"config_option_show", key="ckan.site_title"
)
assert before_update == "CKAN"
# system_info.get_system_info returns None, or default
# test value before update
before_update = get_system_info("ckan.site_title")
assert before_update is None
# test value before update with default
before_update_default = get_system_info(
"ckan.site_title", config["ckan.site_title"]
)
assert before_update_default == "CKAN"
# title tag contains default value
# app = make_app()
home_page_before = app.get("/", status=200)
assert "Welcome - CKAN" in home_page_before
# update the option
self._update_config_option(app)
# test config_option_show returns new value after update
after_update = helpers.call_action(
"config_option_show", key="ckan.site_title"
)
assert after_update == "My Updated Site Title"
# system_info.get_system_info returns new value
after_update = get_system_info("ckan.site_title")
assert after_update == "My Updated Site Title"
# test value after update with default
after_update_default = get_system_info(
"ckan.site_title", config["ckan.site_title"]
)
assert after_update_default == "My Updated Site Title"
# title tag contains new value
home_page_after = app.get("/", status=200)
assert "Welcome - My Updated Site Title" in home_page_after
| StarcoderdataPython |
3411626 | <filename>tests/category/collection_test.py
from dataclasses import asdict, dataclass, field
from typing import Generic, TypeVar
from category import Vector
T = TypeVar("T")
def test_vector_init():
assert [] == Vector() == []
assert [] == Vector([]) == []
assert [] == Vector(()) == []
# assert [] == Vector({}) == [] # Syntax Error
vector = Vector([0, 1, 2])
assert Vector is type(vector)
assert 0 == vector[0] == 0
assert [0, 1, 2] == vector == [0, 1, 2]
assert (0, 1, 2) != vector != (0, 1, 2)
assert {0, 1, 2} != vector != {0, 1, 2}
assert [0, 2, 4] == Vector([element * 2 for element in vector]) == [0, 2, 4]
def test_vector_add():
# immutable + mutalbe
vector = Vector([0, 1, 2])
im_vector = vector + [3]
assert im_vector is not vector
assert Vector is type(im_vector)
assert [0, 1, 2, 3] == im_vector == [0, 1, 2, 3]
assert [0, 1, 2] == vector == [0, 1, 2]
assert im_vector != vector
# immutable + immutable
other_vector = Vector([3, 4, 5])
ii_vector = vector + other_vector
assert ii_vector is not vector
assert ii_vector is not other_vector
assert Vector is type(ii_vector)
assert [0, 1, 2, 3, 4, 5] == ii_vector == [0, 1, 2, 3, 4, 5]
assert [0, 1, 2] == vector == [0, 1, 2]
assert [3, 4, 5] == other_vector == [3, 4, 5]
assert ii_vector != vector
assert ii_vector != other_vector
def test_vector_add_warning_case():
# mutable + immutable
vector = Vector([0, 1, 2])
mi_vector = [3] + vector
assert mi_vector is not vector
assert list is type(mi_vector)
assert Vector is not type(mi_vector)
assert [3, 0, 1, 2] == mi_vector == [3, 0, 1, 2]
assert [0, 1, 2] == vector == [0, 1, 2]
assert mi_vector != [3]
assert mi_vector != vector
# Syntax Error
# def test_immutable_vector_setitem():
# vector = Vector([0, 1, 2])
# try:
# vector[1] = 9
# assert False
# except TypeError:
# assert True
# Syntax Error
# def test_immutable_vector_delitem():
# vector = Vector([0, 1, 2])
# try:
# del vector[1]
# assert False
# except TypeError:
# assert True
def test_vector_append():
vector = Vector([0, 1, 2])
appended_vector = vector.append(3)
assert appended_vector is not vector
assert Vector is type(vector)
assert Vector is type(appended_vector)
assert [0, 1, 2, 3] == appended_vector == [0, 1, 2, 3]
def test_vector_extend():
vector = Vector([0, 1, 2])
extend_vector = [3, 4, 5]
extended_vector = vector.extend(extend_vector)
assert extended_vector is not vector
assert extended_vector is not extend_vector
assert Vector is type(vector)
assert Vector is type(extended_vector)
assert [0, 1, 2, 3, 4, 5] == extended_vector == [0, 1, 2, 3, 4, 5]
def test_vector_insert():
vector = Vector([0, 1, 2])
inserted_vector = vector.insert(1, 9)
assert inserted_vector is not vector
assert Vector is type(vector)
assert Vector is type(inserted_vector)
assert [0, 9, 1, 2] == inserted_vector == [0, 9, 1, 2]
assert [0, 1, 2] == vector == [0, 1, 2]
def test_vector_remove():
vector = Vector([0, 1, 2])
removed_vector = vector.remove(1)
assert removed_vector is not vector
assert Vector is type(vector)
assert Vector is type(removed_vector)
assert [0, 2] == removed_vector == [0, 2]
assert [0, 1, 2] == vector == [0, 1, 2]
def test_vector_pop():
vector = Vector([0, 1, 2])
poped_vector = vector.pop(1)
assert poped_vector is not vector
assert Vector is type(vector)
assert Vector is type(poped_vector)
assert [0, 2] == poped_vector == [0, 2]
assert [0, 1, 2] == vector == [0, 1, 2]
def test_vector_index():
vector = Vector([0, 1, 2])
index = vector.index(1, 0, 2)
assert Vector is type(vector)
assert 1 == index
assert [0, 1, 2] == vector == [0, 1, 2]
def test_vector_clear():
vector = Vector([0, 1, 2])
try:
vector.clear()
assert False
except TypeError:
assert True
def test_vector_count():
vector = Vector([0, 1, 2])
assert 1 == vector.count(0)
assert type(vector)
assert [0, 1, 2] == vector == [0, 1, 2]
def test_vector_sort():
vector = Vector([0, 1, 2])
sorted_vector = vector.sort(reverse=True)
assert sorted_vector is not vector
assert Vector is type(vector)
assert Vector is type(sorted_vector)
assert [2, 1, 0] == sorted_vector == [2, 1, 0]
assert [0, 1, 2] == vector == [0, 1, 2]
def test_vector_reverse():
vector = Vector([0, 1, 2])
reversed_vector = vector.reverse()
assert reversed_vector is not vector
assert Vector is type(vector)
assert Vector is type(reversed_vector)
assert [2, 1, 0] == reversed_vector == [2, 1, 0]
assert [0, 1, 2] == vector == [0, 1, 2]
def test_vector_copy():
vector = Vector([0, 1, 2])
copied_vector = vector.copy()
assert copied_vector is not vector
assert Vector is type(vector)
assert Vector is type(copied_vector)
assert copied_vector == vector == copied_vector
assert [0, 1, 2] == copied_vector == [0, 1, 2]
assert [0, 1, 2] == vector == [0, 1, 2]
def test_vector_empty():
vector = Vector([0, 1, 2])
assert False is vector.is_empty()
def test_vector_non_empty():
vector = Vector([0, 1, 2])
assert True is vector.non_empty()
def test_immutable_vector_size():
vector = Vector([0, 1, 2])
assert 3 == vector.size()
def test_immutable_vector_map():
vector = Vector([0, 1, 2])
mapped_vector = vector.map(lambda x: x * 2)
assert mapped_vector is not vector
assert [0, 2, 4] == mapped_vector
def test_immutable_vector_redece():
vector = Vector([0, 1, 2])
reduced_vector = vector.reduce(lambda left, right: left * right)
assert reduced_vector is not vector
assert 0 == reduced_vector
def test_immutable_vector_dataclass():
@dataclass(frozen=True)
class SeqEntity(Generic[T]):
value: Vector[T] = field(default_factory=Vector[T])
entity = SeqEntity[T](value=Vector())
dict_entity = asdict(entity)
entity_from_dict = SeqEntity[T](**dict_entity)
assert {"value": []} == dict_entity == {"value": []}
assert entity_from_dict == entity == entity_from_dict
number_entity = SeqEntity[T](value=Vector([0, 1, 2]))
dict_number_entity = asdict(number_entity)
number_entity_from_dict = SeqEntity[T](**dict_number_entity)
assert {"value": [0, 1, 2]} == dict_number_entity == {"value": [0, 1, 2]}
assert number_entity_from_dict == number_entity == number_entity_from_dict
| StarcoderdataPython |
5032084 | <filename>cmdline/EncripionV2.py
alphabet = 'abcdefghijklmnopqrstuvwxyz'
i = 0
newMessage = ''
keys=[3,1,4,1,5]
message=input("Enter A Message:")
for character in message:
i=i+1
i=i % 4
if character in alphabet:
position = alphabet.find(character)
newPosition = (position + keys[i]) % 26
newCharacter = alphabet[newPosition]
newMessage += newCharacter
else:
newMessage += character
print("Your Message is ",newMessage)
| StarcoderdataPython |
163791 | #!/usr/bin/env python3
"""
User management script for the wireguard server
"""
import os
import io
import stat
import shutil
import sys
import platform
import argparse
import hashlib
import crypt
import logging
import secrets
import string
import subprocess
import pathlib
class OperationError(Exception):
'''raise this when there's an Exception running operations'''
class Settings(object):
def __init__(self):
self.arg = self.get_arg()
def get_arg(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument('-c', '--config_path', type=str, default='./config.yaml', required=False, help='Specify config path')
self.parser.add_argument('-crd', '--credentials_path', type=str, default='./credentials.txt', required=False, help='Specify credentials path')
self.parser.add_argument('-lg', '--log_path', type=str, default='./mgt_users.log', required=False, help='Specify log path')
self.parser.add_argument('-bk', '--config_backup', type=str, default='./config.yaml.bk', required=False, help='Specify config backup path')
self.parser.add_argument('-cbk', '--credentials_backup', type=str, required=False, help='Specify credentials backup path')
self.parser.add_argument('-v', '--verbose', action='store_true', default=False, required=False, help='Enable verbose output')
self.parser.add_argument('-f', '--force', action='store_true', default=False, required=False, help='Force a command without checks')
self.parser.add_argument('-e', '--encrypt', action='store_true', default=False, required=False, help='Encrypt credentials file')
self.parser.add_argument('-d', '--decrypt', action='store_true', default=False, required=False, help='Decrypt credentials file')
self.parser.add_argument('-b', '--backup', action='store_true', default=False, required=False, help='Backup configurations and credentials files')
self.parser.add_argument('-l', '--list_users', action='store_true', default=False, required=False, help='List users')
self.parser.add_argument('-s', '--show_password', nargs='?', const='', required=False, help='Show user password, if empty shows for all users')
self.parser.add_argument('-a', '--add_user', type=str, required=False, help='Add user')
self.parser.add_argument('-r', '--rm_user', action='append', type=str, required=False, help='Remove user, specify multiple times to remove multimple users')
self.parser.add_argument('-p', '--change_password', type=str, required=False, help='Change user password')
self.parser.add_argument('-x', '--random_password', action='store_true', default=False, required=False, help='Set random password')
self.parser.add_argument('-n', '--num_chars_password', type=int, default=24, required=False, help='Specify number of charecters for auto-generated passwords')
self.parser.add_argument('-t', '--type_password', type=str, choices=(
'Alpha',
'Num',
'Sym',
'AlphaNum',
'AlphaSym',
'NumSym',
'AlphaNumSym',
), default='AlphaNum', nargs='?', const='AlphaNum', required=False, help='Specify type of password to generate')
args, unknown = self.parser.parse_known_args()
self.arg = self.parser.parse_args()
return self.arg
class Basics(Settings):
def __init__(self):
Settings.__init__(self)
def continue_input(self, prompt=True):
yes = False
while not yes:
if prompt:
self.log.info("continue?")
answer = input("(y/n): ")
self.log.info(answer)
if answer.lower() in ["true", "yes", "y"]:
yes = True
elif answer.lower() in ["false", "no", "n"]:
yes = False
break
else:
self.logger.error(f"invalid response, try again: '{answer}'")
return yes
def read_file(self, p):
if self.valid_file(p, logger=True):
with open(p) as f:
lines = f.readlines()
return lines
def print_list(self, lst):
if not lst:
raise OperationError("invalid list")
elif len(lst) == 0:
raise OperationError("empty list")
for e in lst:
print(e)
def is_type_path(self, entry):
spl = entry.split(os.sep)
s = pathlib.PurePath(entry).suffix
if len(spl) > 1:
return True
elif s:
return True
else:
return False
def valid_file(self, path, logger=False):
try:
exists = os.path.exists(path)
except PermissionError:
self.logger.error(f"permission denied: '{path}'")
return
except:
self.logger.error(f"failed to copy: '{path}'")
return
if exists:
if os.path.isfile(path):
return True
else:
if logger:
self.logger.error(f"not a file: '{source}'")
return False
else:
if logger:
self.logger.error(f"does not exist: '{path}'")
return False
def valid_credentials_file(self, path):
if self.valid_file(path):
try:
lines = self.read_file(path)
valid_lines = list()
for l in lines:
if l.isspace():
continue
l = l.rstrip('\r\n')
l = l.split()
if len(l) == 2:
valid_lines.append(True)
else:
return False
if all(valid_lines):
return True
except ValueError:
return False
else:
return False
class Logging(Basics):
def __init__(self):
Basics.__init__(self)
self.log_level = "DEBUG" if self.arg.verbose else "INFO"
self.logger = self.setup()
self.work_dir = pathlib.Path(self.arg.config_path).parent
self.save()
def setup(self):
logging.basicConfig(
format='[%(levelname)s] %(message)s',
level=logging.INFO,
stream=sys.stdout)
self.log = logging.getLogger(__name__)
self.log.setLevel(self.log_level)
return self.log
def color(self):
coloredlogs.install(fmt='[%(levelname)s] %(message)s', level=self.log_level, logger=self.log)
def save(self):
log_file = self.work_dir.joinpath(pathlib.Path(self.arg.log_path))
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
fh = logging.FileHandler(log_file)
fh.setLevel(self.log_level)
fh.setFormatter(formatter)
self.log.addHandler(fh)
return self.log
class Setup(Logging):
def __init__(self):
Logging.__init__(self)
self.pip = ['pip3', '--version']
def execute(self, cmd):
try:
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
except FileNotFoundError:
self.logger.error("command not found: '{c}'".format(c=" ".join(cmd)))
def exit_code(self, cmd, logger=None):
sp = self.execute(cmd)
if sp == None:
status = -1
return status
else:
status = sp.wait()
out, err = sp.communicate()
self.logger.debug("console: {o}".format(o=out.rstrip('\r\n')))
self.logger.debug("error: {e}".format(e=err))
return status
def install(self, required_pkgs):
if not self.arg.force:
self.check_linux()
local_bin = os.path.join(os.getenv("HOME"), ".local/bin")
def is_installed(requirement):
import pkg_resources
try:
pkg_resources.require(requirement)
except pkg_resources.ResolutionError:
return False
else:
return True
def is_local_path_set():
exists = False
paths = os.getenv("PATH").split(":")
for p in paths:
if p == local_bin:
exists = True
return exists
if not is_local_path_set():
os.environ["PATH"] += os.pathsep + local_bin
for pkg in required_pkgs:
installed = is_installed(pkg)
if installed == False:
if not self.arg.force:
self.check_pip()
p = pkg.split(">=")
self.logger.info(f"installing: '{p[0]}'")
if not self.arg.force:
if self.continue_input():
subprocess.run(['pip3', 'install', pkg])
else:
sys.exit()
else:
subprocess.run(['pip3', 'install', pkg])
def check_pip(self):
exit_code = self.exit_code(self.pip)
if exit_code == 0:
self.logger.debug("Pip installation found")
else:
self.logger.error("Pip is not installed!")
sys.exit()
def check_linux(self):
sys_arch = platform.system()
if sys_arch == "Linux":
self.logger.debug(f"Running on: '{sys_arch}'")
is_linux = True
return True
else:
self.logger.warning(f"This script has not been tested on '{sys_arch}'")
if not self.continue_input():
sys.exit()
class Operations(Logging):
def __init__(self):
Logging.__init__(self)
self.yaml = YAML(typ="rt", pure=True)
self.yaml.default_flow_style = False
self.yaml.allow_duplicate_keys = True
self.yaml.allow_unicode = True
self.yaml.indent(mapping=2, sequence=2, offset=2)
self.yaml.width = 1000
self.config = self.load_config()
if self.config.get("auth").get("basic").get("users") == None:
self.config["auth"]["basic"]["users"] = []
if self.config != None:
self.hashes = self.config.get("auth").get("basic").get("users")
else:
self.logger.error("Invalid configuration")
sys.exit()
try:
self.users = self.get_users()
except OperationError:
self.users = []
def load_config(self):
if not self.valid_file(self.arg.config_path, logger=False):
self.logger.info(f"creating config file: '{self.arg.config_path}")
self.yaml.dump({
'auth': {
'basic': {
'users': None,
}
}},
self.work_dir.joinpath(pathlib.Path(self.arg.config_path)))
try:
with open(self.arg.config_path, "r") as f:
loaded_config = self.yaml.load(f)
except PermissionError:
self.logger.error(f"permission denied: '{self.arg.config_path}'")
return
except:
self.logger.error(f"failed to copy: '{self.arg.config_path}'")
return
return loaded_config
def update_config(self):
out = pathlib.Path(self.arg.config_path)
try:
in_hash = self.get_file_hash(self.arg.config_path, algo="md5")
self.yaml.dump(self.config, out)
out_hash = self.get_file_hash(self.arg.config_path, algo="md5")
if not self.is_hash_same(in_hash, out_hash) and not in_hash == None:
self.logger.info("Config Updated. You must restart the Wireguard Access Server for changes to take effect")
except PermissionError:
self.logger.error(f"permission denied: '{out}'")
return
except:
self.logger.error(f"failed to copy: '{out}'")
return
def get_users(self):
usrs_lst = list()
if self.hashes:
for u in self.hashes:
name = u.split(":")[0]
usrs_lst.append(name)
return usrs_lst
else:
raise OperationError("No users defined!")
def copy_file(self, source, target):
success = False
if self.valid_file(source):
try:
shutil.copy2(source, target)
except PermissionError:
self.logger.error(f"permission denied: '{source}'")
return
except:
self.logger.error(f"failed to copy: '{source}'")
return
st = os.stat(source)
if self.is_hash_same(self.get_file_hash(source, algo="md5"), self.get_file_hash(target, algo="md5"), logger=True):
success = True
self.logger.debug("backup of '{s}' saved at '{t}' with hash '{h}'".format(s=source, t=target, h=self.get_file_hash(source, algo="md5")))
else:
self.logger.error(f"error backing up: '{source}'")
return success
def get_file_hash(self, path, algo="blake"):
if self.valid_file(path):
try:
with open(path, "rb") as f:
if algo == "blake":
file_hash = hashlib.blake2b()
elif algo == "md5":
file_hash = hashlib.md5()
chunk = f.read(8192)
while chunk:
file_hash.update(chunk)
chunk = f.read(8192)
except PermissionError:
self.logger.error(f"permission denied: '{path}'")
return
except:
self.logger.error(f"failed to copy: '{path}'")
return
file_hash_digest = file_hash.hexdigest()
return file_hash_digest
def is_hash_same(self, source_hash, target_file_hash, logger=False):
if source_hash == target_file_hash:
self.logger.debug(f"hash match: '{source_hash}'")
return True
else:
if logger: self.logger.warning(f"hash '{source_hash}' does not match '{target_file_hash}'")
return False
class Crypt(Operations):
def __init__(self):
Operations.__init__(self)
self.cred_pwd = str()
self.pwd_entered = False
encrypted_name = "{b}.aes".format(b=os.path.basename(self.arg.credentials_path))
encrypted_path = os.path.join(os.path.dirname(self.arg.credentials_path), encrypted_name)
unencrypted_path = "{b}.plain".format(b=os.path.basename(self.arg.credentials_path))
if self.is_aes(encrypted_path, logger=True):
self.encrypted_path = encrypted_path
self.credentials = self.encrypted_path
self.unencrypted_path = self.arg.credentials_path
self.backup_source = encrypted_path
elif not self.is_aes(unencrypted_path, logger=False) and self.valid_file(unencrypted_path, logger=False):
self.encrypted_path = self.arg.credentials_path
self.credentials = self.encrypted_path
self.unencrypted_path = unencrypted_path
self.backup_source = unencrypted_path
elif self.is_aes(self.arg.credentials_path, logger=False):
self.encrypted_path = self.arg.credentials_path
self.credentials = self.encrypted_path
self.unencrypted_path = unencrypted_path
self.backup_source = self.arg.credentials_path
elif self.valid_file(self.arg.credentials_path, logger=False):
self.encrypted_path = encrypted_path
self.credentials = self.arg.credentials_path
self.unencrypted_path = self.credentials
self.backup_source = self.arg.credentials_path
else:
self.logger.error(f"credentials file not found, create?")
if self.continue_input(prompt=False):
self.encrypted_path = encrypted_path
self.credentials = self.encrypted_path
self.unencrypted_path = self.arg.credentials_path
self.backup_source = encrypted_path
try:
with open(self.unencrypted_path, "w") as f:
f.write(str())
self.encrypt_file(self.encrypted_path, self.unencrypted_path)
except:
self.logger.error(f"failed to create credentials file: '{self.unencrypted_path}'")
sys.exit()
else:
sys.exit()
def run_backup(self):
config_default = "{b}.bk".format(b=os.path.basename(self.arg.config_path))
config_backup_path = self.arg.config_backup if self.arg.config_backup else config_default
credentials_default = "{b}.bk".format(b=os.path.basename(self.backup_source))
credentials_backup_path = self.arg.credentials_backup if self.arg.credentials_backup else credentials_default
if self.valid_file(self.arg.config_path, logger=True):
conf_copied = self.copy_file(self.arg.config_path, config_backup_path)
config_hash = self.get_file_hash(self.arg.config_path, algo="md5")
config_backup_hash = self.get_file_hash(config_backup_path, algo="md5")
if config_hash == None:
self.logger.error(f"backup failed: {self.arg.config_path}")
elif self.is_hash_same(config_hash, config_backup_hash):
self.logger.info(f"Succesfuly backed up '{self.arg.config_path}' to '{config_backup_path}'")
else:
self.logger.error(f"backup failed: {self.arg.config_path}")
if self.valid_file(self.backup_source, logger=True):
cred_copied = self.copy_file(self.backup_source, credentials_backup_path)
cred_hash = self.get_file_hash(self.backup_source, algo="md5")
cred_backup_hash = self.get_file_hash(credentials_backup_path, algo="md5")
if cred_hash == None:
self.logger.error(f"backup failed: {self.backup_source}")
elif self.is_hash_same(cred_hash, cred_backup_hash):
self.logger.info(f"Succesfuly backed up '{self.backup_source}' to '{credentials_backup_path}'")
else:
self.logger.error(f"backup failed: {self.backup_source}")
def is_aes(self, file_path, logger=False):
AESBlockSize = 16
bufferSize = 64 * 1024
# validate bufferSize
if bufferSize % AESBlockSize != 0:
if logger:
self.logger.error("Buffer size must be a multiple of AES block size")
return False
if self.valid_file(file_path):
inputLength = os.stat(file_path).st_size
try:
fIn = io.open(file_path, "rb")
except PermissionError:
self.logger.error(f"permission denied: '{file_path}'")
return
except:
self.logger.error(f"failed to copy: '{file_path}'")
return
fdata = fIn.read(3)
# check if file is in AES Crypt format (also min length check)
if (fdata != bytes("AES", "utf8") or inputLength < 136):
if logger:
self.logger.error(f"File is corrupted or not an AES Crypt: '{file_path}'")
return False
# check if file is in AES Crypt format, version 2
fdata = fIn.read(1)
if len(fdata) != 1:
if logger:
self.logger.error(f"File is corrupted: '{file_path}'")
return False
if fdata != b"\x02":
if logger:
self.logger.error(f"Incompatible AES Crypt format, must be version 2: '{file_path}'")
return False
# skip reserved byte
fIn.read(1)
# skip all the extensions
while True:
fdata = fIn.read(2)
if len(fdata) != 2:
if logger:
self.logger.error(f"File is corrupted: '{file_path}'")
return False
break
if fdata == b"\x00\x00":
break
fIn.read(int.from_bytes(fdata, byteorder="big"))
# read external iv
iv1 = fIn.read(16)
fIn.close()
if len(iv1) != 16:
if logger:
self.logger.error(f"File is corrupted: '{file_path}'")
return False
else:
return False
if logger:
self.logger.debug(f"Valid AES file: '{file_path}'")
return True
def passprompt(self, prompt, out = sys.stdout):
out.write(prompt); out.flush()
password = ""
while True:
ch = readchar.readchar()
if ch == '\r':
print('')
break
# Account for backspacing
elif ch == '\b' or ch == '\x7f':
out.write('\b \b')
password = password[0:len(password)-1]
out.flush()
else:
password += ch
out.write('*')
out.flush()
return password
def prompt_credentials(self):
if not self.pwd_entered:
self.cred_pwd = self.passprompt("Credentials file password: ")
self.pwd_entered = True
def gen_password(self):
if self.arg.type_password == "Alpha":
password = ''.join((secrets.choice(string.ascii_letters) for i in range(self.arg.num_chars_password)))
elif self.arg.type_password == "<PASSWORD>":
password = ''.join((secrets.choice(string.digits) for i in range(self.arg.num_chars_password)))
elif self.arg.type_password == "Sym":
password = ''.join((secrets.choice(string.punctuation) for i in range(self.arg.num_chars_password)))
elif self.arg.type_password == "<PASSWORD>":
password = ''.join((secrets.choice(string.ascii_letters + string.digits) for i in range(self.arg.num_chars_password)))
elif self.arg.type_password == "Alpha<PASSWORD>":
password = ''.join((secrets.choice(string.ascii_letters + string.punctuation) for i in range(self.arg.num_chars_password)))
elif self.arg.type_password == "<PASSWORD>":
password = ''.join((secrets.choice(string.digits + string.punctuation) for i in range(self.arg.num_chars_password)))
elif self.arg.type_password == "<PASSWORD>":
password = ''.join((secrets.choice(string.ascii_letters + string.digits + string.punctuation) for i in range(self.arg.num_chars_password)))
else:
self.logger.error(f"unknown password type: '{self.arg.type_password}'")
sys.exit()
return password
def gen_pass_hash(self, pwd):
hashed = bcrypt.using(rounds=14, ident="2y").hash(pwd)
return hashed
def verify_pwd(self, pwd, pwd_hash):
match = False
while not match:
match = bcrypt.verify(pwd, pwd_hash)
if not match:
pwd = self.passprompt("incorrect password, try again: ")
else:
match = True
return match
def encrypt_file(self, crypt_file, plain_file):
bufferSize = 64 * 1024
if self.valid_file(plain_file, logger=True):
self.logger.debug(f"encrypting: '{plain_file}'")
self.prompt_credentials()
try:
pyAesCrypt.encryptFile(plain_file, crypt_file, self.cred_pwd, bufferSize)
except ValueError:
self.logger.error(f"Failed to encrypt: '{plain_file}'")
return
if self.valid_file(plain_file):
self.logger.debug(f"deleting: '{plain_file}'")
os.remove(plain_file)
else:
self.logger.error(f"encryption failed: '{plain_file}'")
def decrypt_file(self, crypt_file, plain_file):
bufferSize = 64 * 1024
if self.is_aes(crypt_file):
self.logger.debug(f"decrypting: '{crypt_file}'")
self.prompt_credentials()
try:
pyAesCrypt.decryptFile(crypt_file, plain_file, self.cred_pwd, bufferSize)
except ValueError:
self.logger.error("Invalid password, or file is corrupted")
return
if self.valid_file(crypt_file):
self.logger.debug(f"deleting: '{crypt_file}'")
os.remove(crypt_file)
else:
self.logger.error(f"decryption failed: '{crypt_file}'")
def open_encrypted_path(self):
bufferSize = 64 * 1024
fDec = io.BytesIO()
if self.is_aes(self.credentials):
encFileSize = os.stat(self.credentials).st_size
self.logger.debug(f"opening: '{self.credentials}'")
self.prompt_credentials()
with open(self.credentials, "rb") as fIn:
try:
pyAesCrypt.decryptStream(fIn, fDec, self.cred_pwd, bufferSize, encFileSize)
except ValueError:
self.logger.error(f"failed to decrypt file: '{self.credentials}'")
sys.exit()
content = fDec.getvalue().decode('UTF-8')
fDec.close()
lines = [s + ('\n') for s in content.split('\n')]
for l in lines:
if l.isspace(): lines.remove(l)
return lines
else:
sys.exit()
def write_credentials(self, content, action, pwd=None):
crypt = False
if self.is_aes(self.credentials):
self.decrypt_file(self.encrypted_path, self.unencrypted_path)
crypt = True
if action == "append":
self.logger.debug(f"appending to: '{self.unencrypted_path}'")
try:
in_hash = self.get_file_hash(self.unencrypted_path, algo="md5")
with open(self.unencrypted_path, "a") as f:
f.write(content + "\n")
out_hash = self.get_file_hash(self.unencrypted_path, algo="md5")
if not self.is_hash_same(in_hash, out_hash) and not in_hash == None:
changed = True
else:
changed = False
if crypt:
self.encrypt_file(self.encrypted_path, self.unencrypted_path)
return changed
except:
return False
elif action == "create":
self.logger.debug(f"creating: '{self.unencrypted_path}'")
try:
in_hash = self.get_file_hash(self.unencrypted_path, algo="md5")
with open(self.unencrypted_path, "w") as f:
for c in content:
if c.isspace():
continue
f.write(c)
out_hash = self.get_file_hash(self.unencrypted_path, algo="md5")
if not self.is_hash_same(in_hash, out_hash) and not in_hash == None:
changed = True
else:
changed = False
if crypt:
self.encrypt_file(self.encrypted_path, self.unencrypted_path)
return changed
except:
return False
else:
self.logger.error(f"invalid action: '{action}'")
return False
def remove_credentials(self, usr):
updated = False
usr_crd = str()
crds = list()
crypt = False
if self.is_aes(self.credentials):
self.decrypt_file(self.encrypted_path, self.unencrypted_path)
crypt = True
usr_creds = self.read_file(self.unencrypted_path)
if usr_creds:
for c in usr_creds:
if c.isspace():
continue
l = c.rstrip('\r\n')
u = l.split()[0]
crds.append(u)
if u == usr:
try:
usr_creds.remove(c)
written = self.write_credentials(usr_creds, "create")
if written:
updated = True
self.logger.debug(f"credentials removed: '{u}'")
else:
self.logger.error(f"credentials removal failed: '{u}'")
break
except ValueError:
self.logger.error(f"Failed to remove credentials: '{u}'")
if not usr in crds:
self.logger.warning(f"no credentials found for: '{usr}'")
if crypt:
self.encrypt_file(self.encrypted_path, self.unencrypted_path)
return updated
class Commands(Crypt):
def __init__(self):
Crypt.__init__(self)
if self.arg.backup:
self.run_backup()
elif self.arg.encrypt:
self.encrypt_file(self.encrypted_path, self.unencrypted_path)
elif self.arg.decrypt:
self.decrypt_file(self.encrypted_path, self.unencrypted_path)
elif self.arg.show_password:
self.user_password_show(self.arg.show_password)
elif self.arg.show_password == '':
self.user_password_show()
elif self.arg.add_user:
if self.valid_credentials_file(self.arg.add_user):
self.iterate_file(self.arg.add_user, "add")
elif self.is_type_path(self.arg.add_user):
self.logger.warning(f"This looks like a path '{self.arg.add_user}', add this user? ")
if not self.continue_input():
sys.exit()
self.user_add(self.arg.add_user, skip_prompt=self.arg.force, logger=True)
else:
self.user_add(self.arg.add_user, skip_prompt=self.arg.force, logger=True)
elif self.arg.rm_user:
for ru in self.arg.rm_user:
self.user_remove(ru, skip_prompt=self.arg.force)
elif self.arg.change_password:
if self.valid_credentials_file(self.arg.change_password):
self.iterate_file(self.arg.change_password, "change")
elif self.is_type_path(self.arg.change_password):
self.logger.warning(f"This looks like a path '{self.arg.change_password}', change passwords? ")
if not self.continue_input():
sys.exit()
self.user_add(self.arg.add_user, skip_prompt=self.arg.force, logger=True)
else:
self.user_password_change(self.arg.change_password)
def user_exists(self, usr, logger=False):
if usr in self.users:
if logger:
self.logger.debug(f"user exists: '{usr}'")
return True
else:
if logger:
self.logger.error(f"user does not exist: '{usr}'")
return False
def user_password_show(self, usr=None):
def display(uc):
num_element_chars = [len(s.split()[0]) for s in uc]
if len(num_element_chars) != 0:
max_chars = max(num_element_chars)
else:
self.logger.warning("no passwords found")
for c in uc:
c = c.rstrip('\r\n')
if c.isspace() or not c:
continue
u = c.split()[0]
p = c.split()[1]
if usr == None:
num_spaces = max_chars - len(u) + 2
spaces = str().join([" " for s in range(num_spaces)])
output = f"{u}{spaces}{p}"
print(output)
elif usr == u:
print(p)
if self.is_aes(self.credentials):
usr_creds = self.open_encrypted_path()
elif self.valid_file(self.unencrypted_path):
usr_creds = self.read_file(self.unencrypted_path)
else:
return
if usr == None:
display(usr_creds)
elif self.user_exists(usr, logger=True):
display(usr_creds)
def user_password_change(self, usr, passwd=None):
if self.user_exists(usr, logger=True):
if self.arg.force:
self.user_remove(usr, skip_prompt=True)
self.user_add(usr, passwd, skip_prompt=True)
elif self.arg.force and passwd:
self.user_remove(usr, skip_prompt=True)
self.user_add(usr, passwd, skip_prompt=True)
else:
for u in self.hashes:
name = u.split(":")[0]
old_hash = u.split(":")[1]
if name == usr:
old_pwd = self.passprompt(f"Enter previous password for '{usr}': ")
if self.verify_pwd(old_pwd, old_hash):
self.user_remove(usr, skip_prompt=True)
self.user_add(usr, skip_prompt=True)
break
def user_add(self, usr, passwd=<PASSWORD>, skip_prompt=False, logger=False):
if self.user_exists(usr):
self.logger.error(f"user already exists: '{usr}'")
return
if self.arg.random_password or passwd == "<gen>":
if skip_prompt:
self.logger.info(f"generating random password for: '{usr}'")
pwd = self.gen_password()
else:
self.logger.info(f"Generate random password for: '{usr}'?")
if self.continue_input():
pwd = self.gen_password()
print(pwd)
else:
return
elif passwd == "<prompt>":
pwd = self.passprompt(f"Enter new password for: '{usr}': ")
elif passwd:
pwd = passwd
else:
pwd = self.passprompt(f"Enter new password for: '{usr}': ")
hashed_pwd = self.gen_pass_hash(pwd)
usr_hash = f"{usr}:{hashed_pwd}"
usr_auth = f"{usr} {pwd}"
self.config.get("auth").get("basic").get("users").append(usr_hash)
self.update_config()
written = self.write_credentials(usr_auth, "append")
if written:
if logger:
self.logger.info(f"user added: '{usr}'")
else:
self.logger.error(f"failed to add user: '{usr}'")
def user_remove(self, usr, skip_prompt=False):
if not self.user_exists(usr):
self.logger.error(f"user does not exists: '{usr}'")
return
if skip_prompt:
check = True
else:
self.logger.warning(f"remove user '{usr}'?")
check = self.continue_input(prompt=False)
if check:
if self.user_exists(usr, logger=True):
for h in self.hashes:
u = h.split(":")[0]
if u == usr:
removed = self.remove_credentials(u)
if removed:
self.logger.debug(f"credentials removed: '{u}'")
else:
self.logger.error(f"failed to remove credentials: '{u}'")
try:
self.config.get("auth").get("basic").get("users").remove(h)
self.users.remove(u)
self.update_config()
self.logger.debug(f"user removed: '{u}'")
except ValueError:
self.logger.error(f"Failed to remove user: '{u}'")
break
else:
return
def iterate_file(self, path, kind):
entries = self.read_file(path)
for e in entries:
e = e.rstrip('\r\n')
p = None
if len(e.split()) == 1:
u = e.split()[0]
elif len(e.split()) == 2:
u = e.split()[0]
p = e.split()[1]
else:
self.logger.error(f"invalid entry: '{e}'")
if kind == "add":
self.user_add(u, p, skip_prompt=self.arg.force, logger=True)
elif kind == "change":
self.user_password_change(u, p)
def main():
settings = Settings()
basics = Basics()
arg = settings.arg
logging = Logging()
logging.color()
logger = logging.logger
op = Operations()
if arg.list_users:
try:
op.print_list(op.get_users())
except OperationError as err:
logger.error(err)
sys.exit()
if any([
arg.backup,
arg.encrypt,
arg.decrypt,
arg.show_password,
arg.show_password == '',
arg.add_user,
arg.rm_user,
arg.change_password,
]):
cmd = Commands()
if __name__ == "__main__":
setup = Setup()
setup.install([
'coloredlogs>=15.0',
'ruyaml>=0.20.0',
'bcrypt>=3.2.0',
'passlib>=1.7.4',
'pyAesCrypt>=5.0.0',
'readchar>=3.0.4',
])
import coloredlogs
import pyAesCrypt
import readchar
from ruyaml import YAML
from passlib.hash import bcrypt
main() | StarcoderdataPython |
12822887 | <reponame>vlukes/io3d
#! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Module for readin 3D dicom data
"""
# import funkcí z jiného adresáře
import sys
import os.path
# path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../extern/pyseg_base/src"))
# sys.path.append(os.path.join(path_to_script,
# "../extern/py3DSeedEditor/"))
# ys.path.append(os.path.join(path_to_script, "../extern/"))
# mport featurevector
from loguru import logger
import argparse
__author__ = "mjirik"
import paramiko
host = "172.16.17.32" # hard-coded
port = 22
transport = paramiko.Transport((host, port))
username = "lisa_sftp" # hard-coded
username = "lisa_normal" # hard-coded
password = "<PASSWORD>" # hard-coded
# username = "mjirik" #hard-coded
# password = "<PASSWORD>" #hard-coded
transport.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(transport)
import sys
path = "./" + sys.argv[1] # hard-coded
localpath = sys.argv[1]
sftp.put(localpath, path)
sftp.close()
transport.close()
print("Upload done.")
| StarcoderdataPython |
8057783 | from keras.datasets import cifar10
from PIL import Image
import numpy as np
import os
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
max_num_datas = 1000
num_classes = 4
num_datas_list = np.zeros(num_classes)
img_dir = "../data"
id = 0
for x, y in zip(x_train, y_train):
if np.sum(num_datas_list) > max_num_datas * len(num_datas_list):
break
label = y[0]
if label >= num_classes:
continue
if num_datas_list[label] == max_num_datas:
continue
num_datas_list[label] += 1
img_path = os.path.join(img_dir, "{}_{}.jpg".format(label, id))
id += 1
img = Image.fromarray(x)
img.save(img_path)
| StarcoderdataPython |
3427260 | # Create a program that takes an IP address entered at the keyboard
# and prints out the number of segments it contains, and the length of each segment.
#
# An IP address consists of 4 numbers, separated from each other with a full stop. But
# your program should just count however many are entered
# Examples of the input you may get are:
# 127.0.0.1
# .192.168.0.1
# 10.0.123456.255
# 172.16
# 255
#
# So your program should work even with invalid IP Addresses. We're just interested in the
# number of segments and how long each one is.
#
# Once you have a working program, here are some more suggestions for invalid input to test:
# .123.45.678.91
# 123.4567.8.9.
# 123.156.289.10123456
# 10.10t.10.10
# 192.168.127.12.12.90
# '' - that is, press enter without typing anything
#
# This challenge is intended to practise for loops and if/else statements, so although
# you could use other techniques (such as splitting the string up), that's not the
# approach we're looking for here.
input_prompt = ("Please enter an IP address. An IP address consists of 4 numbers, "
"separated from each other with a full stop: ")
ipAddress = input(input_prompt)
if ipAddress[-1] != '.':
ipAddress += '.'
segment = 1
segment_length = 0
# character = ""
for character in ipAddress:
if character == '.':
print("segment {} contains {} characters".format(segment, segment_length))
segment += 1
segment_length = 0
else:
segment_length += 1
# unless the final character in the string was a . then we haven't printed the last segment
# if character != '.':
# print("segment {} contains {} characters".format(segment, segment_length))
| StarcoderdataPython |
9662822 | <gh_stars>0
from psutil import virtual_memory
def check_memory():
mem = virtual_memory()
return mem.percent | StarcoderdataPython |
87468 | # MyFirstControllerの__init__メソッド内に追加する
self.jointIndex = 0
self.goingLeft = True
| StarcoderdataPython |
3234507 | import mock
import os
import unittest
import shutil
from rf_runner.fetcher import AbstractFetcher, LocalFetcher, ZipFetcher
from rf_runner.fetcher_factory import FetcherFactory
lf_config = {'src': 'testcases'}
zf_config = {'url': 'https://github.com/devopsspiral/rf-service/archive/master.zip'}
zfp_config = {'url': 'https://github.com/devopsspiral/rf-service/archive/master.zip',
'path': 'rf-service-master/test/resources/testcases'}
class TestFetcher(unittest.TestCase):
def test_abstract_fetcher_inits(self):
f = AbstractFetcher()
self.assertFalse(os.path.exists(f.context))
self.assertTrue(f.context.startswith('/tmp/rf-runner/'))
self.assertFalse(f.context.endswith('rf-runner/'))
another = AbstractFetcher()
self.assertNotEqual(f.context, another.context)
def test_abstract_fetcher_gets_context(self):
f = AbstractFetcher()
self.assertTrue(f.context, f.get_context())
@mock.patch('rf_runner.fetcher.AbstractFetcher.fetch')
@mock.patch('rf_runner.fetcher.AbstractFetcher.clean')
def test_abstract_fetcher_update_runs_clean_and_fetch(self, mock_clean, mock_fetch):
f = AbstractFetcher()
f.update()
mock_fetch.assert_called_once()
mock_clean.assert_called_once()
def test_abstract_fetcher_creates_and_cleans_context(self):
with AbstractFetcher() as f:
self.assertTrue(os.path.exists(f.context))
self.assertFalse(os.path.exists(f.context))
def test_fetcher_factory_creates_localfetcher(self):
data = {'type': 'LocalFetcher', 'src': 'testcases'}
f = FetcherFactory.get(data)
self.assertTrue(isinstance(f, LocalFetcher))
self.assertEqual('testcases', f.src)
def test_fetcher_factory_creates_zipfetcher(self):
data = {'type': 'ZipFetcher', 'url': 'http://someurl'}
f = FetcherFactory.get(data)
self.assertTrue(isinstance(f, ZipFetcher))
self.assertEqual('http://someurl', f.url)
data = {'type': 'ZipFetcher', 'url': 'http://someurl', 'path': 'somepath'}
f = FetcherFactory.get(data)
self.assertEqual('http://someurl', f.url)
self.assertEqual('somepath', f.path)
def test_local_fetcher_inits(self):
f = LocalFetcher(lf_config)
self.assertFalse(os.path.exists(f.context))
self.assertTrue(f.context.startswith('/tmp/rf-runner/'))
def test_local_fetcher_gets_files(self):
with LocalFetcher(lf_config) as f:
f.fetch()
self.assertEqual(1, len(os.listdir(f.get_context())))
self.assertFalse(os.path.exists(f.context))
def test_local_fetcher_cleans_context(self):
with LocalFetcher(lf_config) as f:
f.fetch()
self.assertEqual(1, len(os.listdir(f.get_context())))
f.clean()
self.assertEqual(0, len(os.listdir(f.get_context())))
def test_local_fetcher_removes_before_fetch(self):
with LocalFetcher(lf_config) as f:
f.update()
file_path = os.path.join(f.get_context(),
os.listdir(f.get_context())[0])
shutil.copy(file_path, file_path+'_bu')
f.update()
self.assertEqual(1, len(os.listdir(f.get_context())))
def test_zip_fetcher_inits(self):
f = ZipFetcher(zf_config)
self.assertFalse(os.path.exists(f.context))
self.assertTrue(f.context.startswith('/tmp/rf-runner/'))
def test_zip_fetcher_gets_all_files(self):
with ZipFetcher(zf_config) as f:
f.fetch()
self.assertEqual('rf-service-master', os.listdir(f.get_context())[0])
self.assertFalse(os.path.exists(f.context))
def test_zip_fetcher_gets_specific_dir(self):
with ZipFetcher(zfp_config) as f:
f.fetch()
existing_files = []
for r, _, files in os.walk(f.get_context()):
for filename in files:
existing_files.append(os.path.join(r, filename))
self.assertEqual(2, len(existing_files))
| StarcoderdataPython |
9601753 | import os
import re
import subprocess
import asyncio
import shlex
import magic
from alot.buffers import EnvelopeBuffer, SearchBuffer
from alot.commands import CommandCanceled
from alot.helper import mailto_to_envelope
from alot.settings.const import settings
from alot.settings.errors import NoMatchingAccount
from notmuch2 import Thread
def pre_buffer_focus(ui, dbm, buf):
if isinstance(buf, SearchBuffer):
buf.rebuild()
# TODO: async
# mark current message read at github
def github_mark_read(ui, msg=None):
if msg is None:
msg = ui.current_buffer.get_selected_message()
msg = msg.get_email()
if msg.is_multipart():
msgtext = b""
for msgpart in msg.get_payload():
msgtext += msgpart.get_payload(decode=True)
else:
msgtext = msg.get_payload(decode=True)
r = b'src="(https://github.com/notifications/beacon/.*.gif)"'
beacons = re.findall(r, msgtext)
if beacons:
subprocess.Popen(['curl', '-s', beacons[0]], stdout=open(os.devnull, 'w'))
ui.notify('removed from github notifications:\n %s' % beacons[0])
else:
ui.notify('no beacon found')
# automatically mark github notifications as read
async def post_search_select(ui, cmd, dbm):
current_msg = ui.current_buffer.get_selected_message()
if current_msg.get_author()[1] == '<EMAIL>':
last_msg = list(ui.current_buffer.messagetrees())[-1]._message
if 'unread' in last_msg.get_tags():
github_mark_read(ui, list(ui.current_buffer.messagetrees())[-1]._message)
# command to manually fetch mails
def getmails(ui):
ui.notify("fetchinig email..")
subprocess.call(['pkill', '-u', os.environ['USER'], '-USR1', 'maildaemon'])
def exit():
subprocess.call(['notmuch-backup'])
def _sorted_func(func, key):
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
return sorted(result, key=key)
return wrapper
async def post_thread_save(ui, dbm, cmd):
# we are only concerned when we saved a single focused attachment
if cmd.all or not cmd.path:
return
if magic.from_file(cmd.path).endswith(' text, with CRLF line terminators'):
if (await ui.choice("convert Windows text file?", select='yes', cancel='no')) == 'no':
return
process = await asyncio.create_subprocess_exec('dos2unix', cmd.path)
await process.wait()
if magic.from_file(cmd.path).startswith('ISO-8859 text'):
if (await ui.choice("convert ISO-8859 text file?", select='yes', cancel='no')) == 'no':
return
process = await asyncio.create_subprocess_shell('iconv -f latin1 -t utf8 {0} | sponge {0}'.format(shlex.quote(cmd.path)))
await process.wait()
def unsubscribe_list(ui):
"""
Unsubscribe from a mailing list.
This hook reads the 'List-Unsubscribe' header of a mail in thread mode,
constructs a unsubsribe-mail according to any mailto-url it finds
and opens the new mail in an envelope buffer.
"""
msg = ui.current_buffer.get_selected_message()
e = msg.get_email()
uheader = e['List-Unsubscribe']
dtheader = e.get('Delivered-To', None)
if uheader is not None:
mailto_match = re.search(r'<(mailto:\S*)>', uheader)
if mailto_match is not None:
envelope = mailto_to_envelope(mailto_match.group(1))
if dtheader is not None:
envelope['From'] = dtheader
try:
envelope.account = settings.account_matching_address(dtheader)
except NoMatchingAccount:
msg = 'Cannot compose mail - ' \
'no account found for `%s`' % dtheader
ui.notify(msg, priority='error')
raise CommandCanceled()
ui.buffer_open(EnvelopeBuffer(ui, envelope))
else:
ui.notify('focussed mail contains no \'List-Unsubscribe\' header',
'error')
def _thread_destroy(self):
self._thread_p = None
Thread._destroy = _thread_destroy
| StarcoderdataPython |
1991373 | import time
import json
import paho.mqtt.client as mqtt
def spiral(client, boule, temps):
cmds = []
resets = []
order = [10, 2, 7, 4, 12, 0, 6, 5, 11, 1, 3, 8, 9]
for i in order:
center = i / 12 * 3 * 256
R = max([0, 255 - center, center - 2*256])
G = abs(255 - center) if center < 2*256-1 else 0
B = abs(2*256 - center) if center > 256 else 0
cmd = {
'command': 'set_pixel',
'led': i,
'rgb': [R, G, B]
}
cmds.append(cmd)
reset = {
'command': 'set_pixel',
'led': i,
'rgb': [0, 0, 0]
}
resets.append(reset)
t = time.time()
i = 0
print("coucou", t, temps, time.time())
while t + temps > time.time():
print("test")
client.publish("laumio/{}/json".format(boule), json.dumps(resets[i]))
i = (i+1) % 13
client.publish("laumio/{}/json".format(boule), json.dumps(cmds[i]))
time.sleep(0.1)
def pluie(client, boule,dt):
cmds=[]
cmds1=[]
rings = [2,1,0]
for i in rings :
cmd = {
'command': 'set_ring',
'ring': i,
'rgb': [0, 0, 255]
}
cmd1 = {
'command': 'set_ring',
'ring': (i+1)%3,
'rgb': [0, 0, 0]
}
cmds.append(cmd)
cmds1.append(cmd1)
t=time.time()
i=0
while t + dt > time.time():
client.publish("laumio/{}/json".format(boule), json.dumps(cmds[i]))
client.publish("laumio/{}/json".format(boule), json.dumps(cmds1[i]))
i=(i+1)%3
time.sleep(0.5) | StarcoderdataPython |
3386158 | import numpy as np
import copy
from sklearn.linear_model import LogisticRegression, SGDClassifier
class ClassifierChain() :
'''
Classifier Chain
----------------
TODO: much of this can be shared with Regressor Chains, and thus
probably we should use some kind of base class to inherit here.
Note: ScikitLearn also includes 'ClassifierChain'. A difference is
probabilistic extensions are included here.
'''
h = None
L = -1
def __init__(self, h=LogisticRegression(), order=None):
''' init
Parameters
----------
h : sklearn model
The base classifier
order : str
None to use default order, 'random' for random order.
'''
self.base_model = h
self.order = order
def fit(self, X, Y):
''' fit
'''
N, self.L = Y.shape
L = self.L
N, D = X.shape
self.chain = np.arange(L)
if self.order == 'random':
np.random.shuffle(self.chain)
# Set the chain order
Y = Y[:,self.chain]
# Train
self.h = [ copy.deepcopy(self.base_model) for j in range(L)]
XY = np.zeros((N, D + L-1))
XY[:,0:D] = X
XY[:,D:] = Y[:,0:L-1]
for j in range(self.L):
self.h[j].fit(XY[:,0:D+j], Y[:,j])
return self
def partial_fit(self, X, Y):
''' partial_fit
N.B. Assume that fit has already been called
(i.e., this is more of an 'update')
'''
N, self.L = Y.shape
L = self.L
N, D = X.shape
# Set the chain order
Y = Y[:,self.chain]
XY = np.zeros((N, D + L-1))
XY[:,0:D] = X
XY[:,D:] = Y[:,0:L-1]
for j in range(L):
self.h[j].partial_fit(XY[:,0:D+j], Y[:,j])
return self
def predict(self, X):
''' predict
Returns predictions for X
'''
N,D = X.shape
Y = np.zeros((N,self.L))
for j in range(self.L):
if j>0:
X = np.column_stack([X, Y[:,j-1]])
Y[:,j] = self.h[j].predict(X)
# Unset the chain order (back to default)
return Y[:,np.argsort(self.chain)]
def predict_proba(self, X):
''' predict_proba
Returns marginals [P(y_1=1|x),...,P(y_L=1|x,y_1,...,y_{L-1})]
i.e., confidence predictionss given inputs, for each instance.
N.B. This function suitable for multi-label (binary) data
only at the moment (may give index-out-of-bounds error if
uni- or multi-target (of > 2 values) data is used in training).
'''
N,D = X.shape
Y = np.zeros((N,self.L))
for j in range(self.L):
if j>0:
X = np.column_stack([X, Y[:,j-1]])
Y[:,j] = self.h[j].predict_proba(X)[:,1]
return Y
def P(y, x, cc, payoff=np.prod):
''' Payoff function, P(Y=y|X=x)
What payoff do we get for predicting y | x, under model cc.
Parameters
----------
x: input instance
y: its true labels
cc: a classifier chain
payoff: payoff function
Returns
-------
A single number; the payoff of predicting y | x.
'''
D = len(x)
L = len(y)
p = np.zeros(L)
xy = np.zeros(D + L)
xy[0:D] = x.copy()
for j in range(L):
P_j = cc.h[j].predict_proba(xy[0:D+j].reshape(1,-1))[0] # e.g., [0.9, 0.1] wrt 0, 1
xy[D+j] = y[j] # e.g., 1
p[j] = P_j[y[j]] # e.g., 0.1
# or, y[j] = 0 is predicted with probability p[j] = 0.9
return payoff(p)
class ProbabilisticClassifierChain(ClassifierChain):
'''
Probabilistic Classifier Chains (PCC)
'''
def predict(self, X):
''' Predict
Explores all possible branches of the probability tree.
(i.e., all possible 2^L label combinations).
Returns
-------
Predictions Y.
'''
N,D = X.shape
Yp = np.zeros((N,self.L))
# for each instance
for n in range(N):
w_max = 0.
# for each and every possible label combination
for b in range(2**self.L):
# put together a label vector
y_ = np.array(list(map(int, np.binary_repr(b,width=self.L))))
# ... and gauge a probability for it (given x)
w_ = P(y_,X[n],self)
# if it performs well, keep it, and record the max
if w_ > w_max:
Yp[n,:] = y_[:].copy()
w_max = w_
return Yp
class MCC(ProbabilisticClassifierChain):
''' Monte Carlo Sampling Classifier Chains
PCC, using Monte Carlo sampling, published as 'MCC'.
M samples are taken from the posterior distribution. Therefore we need
a probabilistic interpretation of the output, and thus, this is a
particular variety of ProbabilisticClassifierChain.
N.B. Multi-label (binary) only at this moment.
'''
M = 10
def __init__(self, h=LogisticRegression(), M=10):
''' Do M iterations, unless overridded by M at predict()tion time '''
ClassifierChain.__init__(self,h)
self.M = M
def sample(self, x):
'''
Sample y ~ P(y|x)
Returns
-------
y: a sampled label vector
p: the associated probabilities, i.e., p(y_j=1)=p_j
'''
D = len(x)
p = np.zeros(self.L)
y = np.zeros(self.L)
xy = np.zeros(D + self.L)
xy[0:D] = x.copy()
for j in range(self.L):
P_j = self.h[j].predict_proba(xy[0:D+j].reshape(1,-1))[0]
y_j = np.random.choice(2,1,p=P_j)
xy[D+j] = y_j
y[j] = y_j
p[j] = P_j[y_j]
return y, p
def predict(self, X, M = 'default'):
''' Predict
Parameters
----------
X: Input matrix, (an Numpy.ndarray of shape (n_samples, n_features)
M: Number of sampling iterations
-------
NB: quite similar to PCC's predict function.
Depending on the implementation, y_max, w_max may be initially set to 0,
if we wish to rely solely on the sampling. Setting the w_max based on a naive CC prediction
gives a good baseline to work from.
return predictions for X
'''
N,D = X.shape
Yp = np.zeros((N,self.L))
if M == 'default':
M = self.M
# for each instance
for n in range(N):
Yp[n,:] = ClassifierChain.predict(self, X[n].reshape(1,-1))
w_max = P(Yp[n,:].astype(int),X[n],self)
# for M times
for m in range(M):
y_, p_ = self.sample(X[n]) # N.B. in fact, the calcualtion p_ is done again in P.
w_ = P(y_.astype(int),X[n],self)
# if it performs well, keep it, and record the max
if w_ > w_max:
Yp[n,:] = y_[:].copy()
w_max = w_
return Yp
def demo():
import sys
sys.path.append( '../data' )
from skmultiflow.data.synth import make_logical
X,Y = make_logical()
N,L = Y.shape
print("TRUE: ")
print(Y)
print("vs")
print("CC")
cc = ClassifierChain(SGDClassifier(max_iter=100,loss='log'))
cc.fit(X, Y)
print(cc.predict(X))
print("RCC")
cc = ClassifierChain(SGDClassifier(max_iter=100,loss='log'),order='random')
cc.fit(X, Y)
print(cc.predict(X))
print("MCC")
mcc = MCC(SGDClassifier(max_iter=100,loss='log'),M=1000)
mcc.fit(X, Y)
Yp = mcc.predict(X, M=50)
print("with 50 iterations ...")
print(Yp)
Yp = mcc.predict(X, 'default')
print("with default (%d) iterations ..." % 1000)
print(Yp)
print("PCC")
pcc = ProbabilisticClassifierChain(SGDClassifier(max_iter=100,loss='log'))
pcc.fit(X, Y)
print(pcc.predict(X))
if __name__ == '__main__':
demo()
| StarcoderdataPython |
5024764 | <reponame>Dheeraj8383/pcb-tools<gh_stars>100-1000
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# copyright 2014 <NAME> <<EMAIL>>
# copyright 2014 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This module provides RS-274-X AM macro modifiers parsing.
"""
from .am_eval import OpCode, eval_macro
import string
class Token:
ADD = "+"
SUB = "-"
# compatibility as many gerber writes do use non compliant X
MULT = ("x", "X")
DIV = "/"
OPERATORS = (ADD, SUB, MULT[0], MULT[1], DIV)
LEFT_PARENS = "("
RIGHT_PARENS = ")"
EQUALS = "="
EOF = "EOF"
def token_to_opcode(token):
if token == Token.ADD:
return OpCode.ADD
elif token == Token.SUB:
return OpCode.SUB
elif token in Token.MULT:
return OpCode.MUL
elif token == Token.DIV:
return OpCode.DIV
else:
return None
def precedence(token):
if token == Token.ADD or token == Token.SUB:
return 1
elif token in Token.MULT or token == Token.DIV:
return 2
else:
return 0
def is_op(token):
return token in Token.OPERATORS
class Scanner:
def __init__(self, s):
self.buff = s
self.n = 0
def eof(self):
return self.n == len(self.buff)
def peek(self):
if not self.eof():
return self.buff[self.n]
return Token.EOF
def ungetc(self):
if self.n > 0:
self.n -= 1
def getc(self):
if self.eof():
return ""
c = self.buff[self.n]
self.n += 1
return c
def readint(self):
n = ""
while not self.eof() and (self.peek() in string.digits):
n += self.getc()
return int(n)
def readfloat(self):
n = ""
while not self.eof() and (self.peek() in string.digits or self.peek() == "."):
n += self.getc()
# weird case where zero is ommited inthe last modifider, like in ',0.'
if n == ".":
return 0
return float(n)
def readstr(self, end="*"):
s = ""
while not self.eof() and self.peek() != end:
s += self.getc()
return s.strip()
def print_instructions(instructions):
for opcode, argument in instructions:
print("%s %s" % (OpCode.str(opcode),
str(argument) if argument is not None else ""))
def read_macro(macro):
instructions = []
for block in macro.split("*"):
is_primitive = False
is_equation = False
found_equation_left_side = False
found_primitive_code = False
equation_left_side = 0
primitive_code = 0
unary_minus_allowed = False
unary_minus = False
if Token.EQUALS in block:
is_equation = True
else:
is_primitive = True
scanner = Scanner(block)
# inlined here for compactness and convenience
op_stack = []
def pop():
return op_stack.pop()
def push(op):
op_stack.append(op)
def top():
return op_stack[-1]
def empty():
return len(op_stack) == 0
while not scanner.eof():
c = scanner.getc()
if c == ",":
found_primitive_code = True
# add all instructions on the stack to finish last modifier
while not empty():
instructions.append((token_to_opcode(pop()), None))
unary_minus_allowed = True
elif c in Token.OPERATORS:
if c == Token.SUB and unary_minus_allowed:
unary_minus = True
unary_minus_allowed = False
continue
while not empty() and is_op(top()) and precedence(top()) >= precedence(c):
instructions.append((token_to_opcode(pop()), None))
push(c)
elif c == Token.LEFT_PARENS:
push(c)
elif c == Token.RIGHT_PARENS:
while not empty() and top() != Token.LEFT_PARENS:
instructions.append((token_to_opcode(pop()), None))
if empty():
raise ValueError("unbalanced parentheses")
# discard "("
pop()
elif c.startswith("$"):
n = scanner.readint()
if is_equation and not found_equation_left_side:
equation_left_side = n
else:
instructions.append((OpCode.LOAD, n))
elif c == Token.EQUALS:
found_equation_left_side = True
elif c == "0":
if is_primitive and not found_primitive_code:
instructions.append((OpCode.PUSH, scanner.readstr("*")))
found_primitive_code = True
else:
# decimal or integer disambiguation
if scanner.peek() not in '.' or scanner.peek() == Token.EOF:
instructions.append((OpCode.PUSH, 0))
elif c in "123456789.":
scanner.ungetc()
if is_primitive and not found_primitive_code:
primitive_code = scanner.readint()
else:
n = scanner.readfloat()
if unary_minus:
unary_minus = False
n *= -1
instructions.append((OpCode.PUSH, n))
else:
# whitespace or unknown char
pass
# add all instructions on the stack to finish last modifier (if any)
while not empty():
instructions.append((token_to_opcode(pop()), None))
# at end, we either have a primitive or a equation
if is_primitive and found_primitive_code:
instructions.append((OpCode.PRIM, primitive_code))
if is_equation:
instructions.append((OpCode.STORE, equation_left_side))
return instructions
if __name__ == '__main__':
import sys
instructions = read_macro(sys.argv[1])
print("insructions:")
print_instructions(instructions)
print("eval:")
for primitive in eval_macro(instructions):
print(primitive)
| StarcoderdataPython |
9783714 | <gh_stars>0
import datetime
import functools
# import logging
import numpy as np
import pandas as pd
from . import smoothing
from . import study_constants as constants
LENGTH_OF_COMPLETE_DAY = 24 * 60 / 3
ONE_MONTH_IN_DAYS = 365.0 / 12
BASE_FEATURES = [
'AllMeters', 'BodyMass', 'Food', 'KCal_hr', 'PedMeters', 'PedSpeed',
'RQ', 'VCO2', 'VH2O', 'VO2', 'Water', 'WheelMeters', 'WheelSpeed',
'XBreak', 'YBreak', 'ZBreak']
CUMULATIVE_FEATURES = [
'AllMeters', 'Food', 'PedMeters', 'Water', 'WheelMeters', 'XBreak',
'YBreak', 'ZBreak']
HMM_FEATURES = [
'VO2', 'VCO2', 'VH2O', 'KCal_hr', 'RQ', 'Food', 'Water', 'PedMeters',
'WheelMeters', 'XBreak', 'YBreak', 'ZBreak']
STATE_FEATURES = ['state-%d' % x for x in range(6)]
AGGREGATE_FEATURES = BASE_FEATURES + STATE_FEATURES
FEATURE_ORDER = [
'VO2', 'VCO2', 'RQ', 'KCal_hr', 'VH2O', 'Food', 'Water',
'WheelMeters', 'AllMeters', 'PedMeters',
'XBreak', 'YBreak', 'ZBreak']
STATE_ORDER = ['SLEEP', 'REST', 'ACTIVE', 'RUN', 'EAT', 'EAT&DRINK']
STATE2NAME = constants.STATE2DESCRIPTOR
def make_age_group(age_in_months, interval=3, max_age=33):
if age_in_months >= max_age:
return '%02d months or older' % max_age
lower_bound = (age_in_months // interval) * interval
return '%02d-%02d months' % (lower_bound, lower_bound + interval)
def make_trace_metadata(complete_df, mouse_df):
"""Compute per-trace metadata such as age at run etc."""
def _get_run_number(trace_id):
run_number = int(trace_id.split('_')[-1])
return run_number
trace_meta_df = (
complete_df.groupby(['mouse_id', 'trace_id'])['date']
.max().reset_index().copy())
trace_meta_df = trace_meta_df.sort_values(['mouse_id', 'date'])
# Add run number
run_numbers = trace_meta_df['trace_id'].apply(_get_run_number)
trace_meta_df.loc[:, 'run_number'] = run_numbers.values
# Add age fields
trace_meta_df.loc[:, 'age'] = pd.Series(
trace_meta_df['date'].values
- mouse_df.loc[trace_meta_df['mouse_id']]['date_of_birth'].values
).dt.days.values
trace_meta_df.loc[:, 'age_in_months'] = (
trace_meta_df['age'] / (365 / 12.0)
).astype(int)
trace_meta_df.loc[:, 'age_group'] = trace_meta_df['age_in_months'].apply(
make_age_group)
# Add lifespan information
days_to_death = (
mouse_df.loc[trace_meta_df['mouse_id']]['age_at_death'].values
- trace_meta_df['age'])
fraction_of_lifespan = trace_meta_df['age'].astype(float).div(
mouse_df.loc[trace_meta_df['mouse_id']]['age_at_death'].values,
fill_value=np.nan)
trace_meta_df.loc[:, 'fraction_of_lifespan'] = fraction_of_lifespan
trace_meta_df.loc[:, 'days_to_death'] = days_to_death
# Remove traces that have deaths within a week of met cage measurement
mask = ~(days_to_death <= 7).values
trace_meta_df = trace_meta_df[mask]
trace_meta_df = trace_meta_df.rename({'date': 'trace_date'}, axis=1)
trace_meta_df = trace_meta_df.set_index('trace_id')
return trace_meta_df
def add_trace_meta(df, trace_meta_df):
for col in trace_meta_df:
df.loc[:, col] = trace_meta_df.loc[df['trace_id']][col].values
return df
def add_derived_features(complete_df, trace_meta_df):
""" Make base features from 3-minute resolution level data."""
state_dummies = pd.get_dummies(
complete_df['states'], prefix='state-', prefix_sep='')
complete_df = pd.concat([complete_df, state_dummies], axis=1)
complete_df = complete_df[
(complete_df['trace_id'].isin(trace_meta_df.index)).values
]
# Add flag for complete day to time_df - exclude incomplete days
timepoints_by_trace_date = complete_df.groupby(['trace_id', 'date']).size()
timepoints_by_trace_date = timepoints_by_trace_date[
timepoints_by_trace_date == LENGTH_OF_COMPLETE_DAY]
complete_trace_dates = timepoints_by_trace_date.reset_index().copy()
complete_trace_dates = complete_trace_dates[['trace_id', 'date']]
complete_trace_dates.loc[:, 'part_of_complete_day'] = True
complete_df = complete_df.merge(complete_trace_dates,
on=['trace_id', 'date'], how='left')
complete_df['part_of_complete_day'] = (
complete_df['part_of_complete_day'].fillna(False))
return complete_df
def make_hourly_average_df(complete_df):
""" Make hourly-average features from 3-minute resolution level data."""
hourly_df = complete_df.copy()
def _round_dwn_to_hour(t):
hour = (int(t.total_seconds() // 3600) + 1) % 24
return datetime.time(hour=hour)
hourly_df['time_hour'] = hourly_df['time_period'].apply(_round_dwn_to_hour)
hourly_average_df = hourly_df.groupby(['trace_id', 'time_hour'])
hourly_average_df = hourly_average_df[AGGREGATE_FEATURES].agg(np.mean)
hourly_average_df = hourly_average_df.reset_index()
return hourly_average_df
def make_time_window_and_lights_on_off_ratio_dfs(complete_df):
""" Make time window features from 3-minute resolution level data.
These features include:
- Averages per 4 hour time bin
- Ratio of averages of 4 hour bins before / after lights on / off
"""
time_window_df = complete_df.copy()
def _subset_time_window(time_window_df, time_window):
mask = time_window_df.index.get_level_values(1) == time_window
df = time_window_df[mask]
df.index = df.index.droplevel(1)
return df
def _make_ratio_df(pre_df, post_df, suffix):
ratio_df = pre_df / post_df
# mask = post_df < 1e-3
# ratio_df[mask] = None
ratio_df = ratio_df.clip(0, 1e2)
ratio_df.columns = ['%s(%s)' % (col, suffix)
for col in ratio_df.columns]
return ratio_df.reset_index()
# Partition timepoint into 4 hour time windows around light transitions
time_boundaries = [
datetime.timedelta(hours=0, seconds=-1), datetime.timedelta(hours=3),
datetime.timedelta(hours=7), datetime.timedelta(hours=11),
datetime.timedelta(hours=15), datetime.timedelta(hours=19),
datetime.timedelta(hours=23), datetime.timedelta(hours=24, seconds=1)]
time_window = pd.cut(time_window_df.time_period, bins=time_boundaries,
labels=['dark', 'late-dark', 'early-light',
'light', 'late-light', 'early-dark',
'dark2'])
time_window[time_window == 'dark2'] = 'dark'
time_window = time_window.cat.remove_unused_categories()
time_window_df['time_window'] = time_window.astype(str)
time_window_df = time_window_df.groupby(['trace_id', 'time_window'])
time_window_df = time_window_df[AGGREGATE_FEATURES].agg(np.mean)
# Make lights on/off ratio features
pre_lights_on_df = _subset_time_window(time_window_df, 'late-dark')
post_lights_on_df = _subset_time_window(time_window_df, 'early-light')
lights_on_ratio_df = _make_ratio_df(
pre_lights_on_df, post_lights_on_df, 'lights-on-ratio')
pre_lights_off_df = _subset_time_window(time_window_df, 'late-light')
post_lights_off_df = _subset_time_window(time_window_df, 'early-dark')
lights_off_ratio_df = _make_ratio_df(
pre_lights_off_df, post_lights_off_df, 'lights-off-ratio')
time_window_df = time_window_df.unstack()
time_window_df.columns = ['%s(%s)' % (var, time_window)
for var, time_window in time_window_df.columns]
time_window_df = time_window_df.reset_index()
return time_window_df, lights_on_ratio_df, lights_off_ratio_df
def make_trace_total_and_average_dfs(complete_df):
""" Make per-trace average features from 3-minute resolution level data."""
trace_average_df = complete_df.copy()
def _aggregate_by_day(df, agg_features, reduce_fn):
agg_df = df.copy()
agg_df = agg_df.groupby(['trace_id', 'date']).agg(reduce_fn)
return agg_df.reset_index()
# Only keep complete days
mask = trace_average_df['part_of_complete_day']
trace_average_df = trace_average_df[mask]
# Sums of features of complete days
total_by_date_df = _aggregate_by_day(
trace_average_df, AGGREGATE_FEATURES, 'sum')
# Averages of features of complete days
average_by_date_df = _aggregate_by_day(
trace_average_df, AGGREGATE_FEATURES, 'mean')
# Aggregate by trace now to get the average per-day sum or
# mean across the run
agg_fns = {col: 'mean' for col in AGGREGATE_FEATURES}
trace_daily_total_df = (total_by_date_df.groupby('trace_id')
.agg(agg_fns).reset_index())
trace_daily_average_df = (average_by_date_df.groupby('trace_id')
.agg(agg_fns).reset_index())
return trace_daily_total_df, trace_daily_average_df
def make_state_means_df(complete_df):
""" Make per-state average features from 3-minute resolution level data."""
state_means_df = complete_df.copy()
state_means_df = (state_means_df.groupby(['trace_id', 'states'])
[BASE_FEATURES].mean())
state_means_df = state_means_df.unstack()
state_means_df.columns = [x + '(' + STATE2NAME[y] + ')'
for x, y in state_means_df.columns]
state_means_df = state_means_df.reset_index()
return state_means_df
def make_circadian_means_and_ratio_dfs(complete_df):
""" Make circadian agg features from 3-minute resolution level data.
These features include:
- Means for each circadian period (light / dark)
- Ratio of circadian means
"""
circadian_means_df = complete_df.copy()
circadian_means_df = (circadian_means_df
.groupby(['day_night', 'trace_id'])[BASE_FEATURES]
.mean())
circadian_ratio_df = (circadian_means_df.loc['night'] /
circadian_means_df.loc['day'])
mask = circadian_means_df.loc['day'] < 1e-3
circadian_ratio_df[mask] = None
circadian_ratio_df = circadian_ratio_df.reset_index()
return circadian_means_df, circadian_ratio_df
def compute_window_stats(data, indicator_fn, name):
""" Computes activity window features from indicator function.
Indicator function should be a function that takes in a row
representing a 3 minute window of data and returns a boolean
determining if that time period belongs to an activity period
(e.g., feeding, sleeping, high activity).
This function then computes:
- The number of activity windows
- Median length and interval between activity windows
- (Robust) max length and interval between activity windows
"""
index = ['num period', 'median period', 'median interval', 'max period',
'max interval']
index = [name + ' ' + col for col in index]
missing = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan], index=index)
if isinstance(data, pd.Series):
if ((~np.isnan(data)).mean() < 0.7 or
len(data) < LENGTH_OF_COMPLETE_DAY):
return missing
else:
for col in data.columns:
if ((~np.isnan(data[col])).mean() < 0.7 or
len(data[col]) < LENGTH_OF_COMPLETE_DAY):
return missing
indicators = indicator_fn(data)
labels = indicators.ne(indicators.shift()).cumsum()
period_lengths = labels[indicators].value_counts()
interval_lengths = labels[~indicators].value_counts()
if len(period_lengths) == 0:
num_periods = np.nan
median_period = np.nan
max_period = np.nan
else:
num_periods = len(period_lengths)
median_period = period_lengths.quantile(0.5) / 20
max_period = period_lengths.quantile(0.95) / 20
if len(interval_lengths) == 0:
median_interval = np.nan
max_interval = np.nan
else:
median_interval = interval_lengths.quantile(0.5) / 20
max_interval = interval_lengths.quantile(0.95) / 20
return pd.Series([num_periods, median_period, median_interval,
max_period, max_interval], index=index)
def make_window_stats_df(complete_df, columns, indicator_fn, name):
""" Computes activity window features per day in the met cage."""
map_fn = functools.partial(
compute_window_stats, indicator_fn=indicator_fn, name=name)
complete_df = complete_df[complete_df['part_of_complete_day']]
window_stats_df = (complete_df.groupby(['trace_id', 'date'])[columns]
.apply(map_fn).reset_index())
window_stats_df = window_stats_df.drop('date', axis=1)
window_stats_df = window_stats_df.groupby('trace_id').mean().reset_index()
return window_stats_df
def make_feeding_window_stats_df(complete_df):
def _indicator_fn(data):
return (data['Food'] > 1e-3) | (data['states'].isin([4, 5]))
return make_window_stats_df(
complete_df, ['Food', 'states'], _indicator_fn, 'feeding')
def make_sleeping_window_stats_df(complete_df):
def _indicator_fn(data):
return data['states'].isin([1])
return make_window_stats_df(
complete_df, ['states'], _indicator_fn, 'sleeping')
def make_exercising_window_stats_df(complete_df):
def _indicator_fn(data):
return data['states'].isin([2, 3])
return make_window_stats_df(
complete_df, ['states'], _indicator_fn, 'exercising')
def generate_all_feature_dfs(complete_df, mouse_df):
""" Generates all features and smoothes per-trace features."""
trace_meta_df = make_trace_metadata(complete_df, mouse_df)
complete_df = add_derived_features(complete_df, trace_meta_df)
# Generate feature sets
hourly_average_df = make_hourly_average_df(complete_df)
t_window_dfs = make_time_window_and_lights_on_off_ratio_dfs(complete_df)
time_window_df, lights_on_ratio_df, lights_off_ratio_df = t_window_dfs
circadian_features_dfs = make_circadian_means_and_ratio_dfs(complete_df)
circadian_means_df, circadian_ratio_df = circadian_features_dfs
daily_totals_and_avgs_dfs = make_trace_total_and_average_dfs(complete_df)
trace_daily_total_df, trace_daily_average_df = daily_totals_and_avgs_dfs
state_means_df = make_state_means_df(complete_df)
feeding_window_stats_df = make_feeding_window_stats_df(complete_df)
sleeping_window_stats_df = make_sleeping_window_stats_df(complete_df)
exercising_window_stats_df = make_exercising_window_stats_df(complete_df)
def _stack_features(feature_dfs):
feature_subsets = [df.copy().set_index('trace_id')
for df in feature_dfs]
return pd.concat(feature_subsets, axis=1, sort=True).reset_index()
all_trace_features_df = _stack_features([
trace_daily_average_df, state_means_df,
sleeping_window_stats_df, exercising_window_stats_df,
feeding_window_stats_df, time_window_df,
lights_on_ratio_df, lights_off_ratio_df])
trace_ids = all_trace_features_df['trace_id']
trace_dates = trace_meta_df.loc[trace_ids]['trace_date']
all_trace_features_df['trace_date'] = trace_dates
mouse_ids = trace_meta_df.loc[trace_ids]['mouse_id']
smoothed_trace_features_df = (
all_trace_features_df.set_index('trace_id')
.sort_values('trace_date')
.groupby(mouse_ids)
.apply(smoothing.l1_trend_filter_agg,
'trace_date', lmbda=40))
smoothed_trace_features_df.index.name = 'trace_id'
smoothed_trace_features_df = smoothed_trace_features_df.reset_index()
trace_feature_sets = {
'trace average': trace_daily_average_df,
'trace sum': trace_daily_total_df,
'state means': state_means_df,
'sleeping window': sleeping_window_stats_df,
'exercise window': exercising_window_stats_df,
'feeding window': feeding_window_stats_df,
'hourly means': hourly_average_df,
'time window means': time_window_df,
'circadian ratio': circadian_ratio_df,
'lights on ratio': lights_on_ratio_df,
'lights off ratio': lights_off_ratio_df,
'all features': all_trace_features_df,
'smoothed all features': smoothed_trace_features_df
}
for key, df in trace_feature_sets.items():
trace_feature_sets[key] = (
add_trace_meta(df, trace_meta_df).set_index('trace_id'))
complete_df.loc[:, 'age_group'] = complete_df['age_in_months'].apply(
make_age_group)
trace_feature_sets.update({
'complete data': complete_df,
'trace metadata': trace_meta_df,
'mouse metadata': mouse_df})
return trace_feature_sets
| StarcoderdataPython |
5174866 | <filename>mspray/apps/trials/migrations/0003_sample_bgeom.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-08-10 15:18
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('trials', '0002_sample_submission_id'),
]
operations = [
migrations.AddField(
model_name='sample',
name='bgeom',
field=django.contrib.gis.db.models.fields.PolygonField(blank=True, null=True, srid=4326),
),
]
| StarcoderdataPython |
6662584 | <gh_stars>0
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient.v1.events import EventManager
from mock import MagicMock
from mock import patch
from mox3 import mox
import testtools
class EventManagerTest(testtools.TestCase):
def setUp(self):
super(EventManagerTest, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.UnsetStubs)
self.addCleanup(self.m.ResetAll)
def test_list_event(self):
stack_id = 'teststack',
resource_name = 'testresource'
manager = EventManager(None)
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id(stack_id).AndReturn('teststack/abcd1234')
self.m.ReplayAll()
manager._list = MagicMock()
manager.list(stack_id, resource_name)
# Make sure url is correct.
manager._list.assert_called_once_with('/stacks/teststack%2Fabcd1234/'
'resources/testresource/events',
"events")
def test_list_event_with_unicode_resource_name(self):
stack_id = 'teststack',
resource_name = u'\u5de5\u4f5c'
manager = EventManager(None)
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id(stack_id).AndReturn('teststack/abcd1234')
self.m.ReplayAll()
manager._list = MagicMock()
manager.list(stack_id, resource_name)
# Make sure url is correct.
manager._list.assert_called_once_with('/stacks/teststack%2Fabcd1234/'
'resources/%E5%B7%A5%E4%BD%9C/'
'events', "events")
def test_list_event_with_none_resource_name(self):
stack_id = 'teststack',
manager = EventManager(None)
manager._list = MagicMock()
manager.list(stack_id)
# Make sure url is correct.
manager._list.assert_called_once_with('/stacks/teststack/'
'events', "events")
def test_get_event(self):
fields = {'stack_id': 'teststack',
'resource_name': 'testresource',
'event_id': '1'}
class FakeAPI(object):
"""Fake API and ensure request url is correct."""
def json_request(self, *args, **kwargs):
expect = ('GET',
'/stacks/teststack%2Fabcd1234/resources'
'/testresource/events/1')
assert args == expect
return {}, {'event': []}
manager = EventManager(FakeAPI())
with patch('heatclient.v1.events.Event'):
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id('teststack').AndReturn(
'teststack/abcd1234')
self.m.ReplayAll()
manager.get(**fields)
def test_get_event_with_unicode_resource_name(self):
fields = {'stack_id': 'teststack',
'resource_name': u'\u5de5\u4f5c',
'event_id': '1'}
class FakeAPI(object):
"""Fake API and ensure request url is correct."""
def json_request(self, *args, **kwargs):
expect = ('GET',
'/stacks/teststack%2Fabcd1234/resources'
'/%E5%B7%A5%E4%BD%9C/events/1')
assert args == expect
return {}, {'event': []}
manager = EventManager(FakeAPI())
with patch('heatclient.v1.events.Event'):
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id('teststack').AndReturn(
'teststack/abcd1234')
self.m.ReplayAll()
manager.get(**fields)
| StarcoderdataPython |
1778994 | <reponame>tiboun/python-bigquery-test-kit
# Copyright (c) 2020 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
# C0114 disabled because this module contains only one class
# pylint: disable=C0114
from copy import deepcopy
from typing import Optional
from google.cloud.bigquery.client import Client
from google.cloud.bigquery.job import LoadJobConfig, SourceFormat
from bq_test_kit.bq_dsl.bq_resources.data_loaders.base_data_loader import \
BaseDataLoader
from bq_test_kit.bq_dsl.bq_resources.data_loaders.mixins.raw_file_loader_mixin import \
RawFileLoaderMixin
from bq_test_kit.resource_loaders.package_file_loader import PackageFileLoader
class DsvDataLoader(BaseDataLoader, RawFileLoaderMixin):
"""Loader of Delimiter-Seperated Value data. By default, it's CSV.
"""
def __init__(self,
*, table, partition: Optional[str] = None, from_: PackageFileLoader,
bq_client: Client, load_job_config: LoadJobConfig = LoadJobConfig()):
"""Constructor of DsvDataLoader.
Args:
table (Table): table to load data into.
from_ (PackageFileLoader): specifies where data is.
bq_client (Client): instance of bigquery client to use accross the DSL.
partition (Optional[str], optional): if you plan to load into a specific partition. Used as a decorator.
Defaults to None.
load_job_config (LoadJobConfig, optional): Big Query load job config.
This is the object updated by this DSL. Defaults to LoadJobConfig().
"""
_load_job_config = deepcopy(load_job_config)
_load_job_config.source_format = SourceFormat.CSV
super().__init__(table=table, partition=partition, from_=from_,
bq_client=bq_client, load_job_config=_load_job_config)
def allow_jagged_rows(self, allow: bool = True):
"""Allow missing trailing optional columns.
Args:
allow (bool, optional): allow or not jagged rows. Defaults to True.
Returns:
DsvDataLoader: new instance of DsvDataLoader with allow_jagged_rows set to 'allow'.
"""
data_loader = deepcopy(self)
data_loader.load_job_config.allow_jagged_rows = allow
return data_loader
def allow_quoted_newlines(self, allow: bool = True):
"""Allow quoted data containing newline characters.
Args:
allow (bool, optional): allow or not jagged rows. Defaults to True.
Returns:
DsvDataLoader: new instance of DsvDataLoader with allow_quoted_newlines set to 'allow'.
"""
data_loader = deepcopy(self)
data_loader.load_job_config.allow_quoted_newlines = allow
return data_loader
def with_field_delimiter(self, delimiter: str):
"""The field's separator.
Args:
delimiter (str): delimiter to use.
Returns:
DsvDataLoader: new instance of DsvDataLoader with updated field_delimiter.
"""
data_loader = deepcopy(self)
data_loader.load_job_config.field_delimiter = delimiter
return data_loader
def with_null_marker(self, marker: str):
"""Specifies what represent a null value.
Args:
marker (str): null value marker.
Returns:
DsvDataLoader: new instance of DsvDataLoader with updated null marker.
"""
data_loader = deepcopy(self)
data_loader.load_job_config.null_marker = marker
return data_loader
def with_quote_character(self, char: str):
"""Character used to quote data sections
Args:
char (str): a character.
Returns:
DsvDataLoader: new instance of DsvDataLoader with updated quote character.
"""
data_loader = deepcopy(self)
data_loader.load_job_config.quote_character = char
return data_loader
def skip_leading_rows(self, nb_lines: int):
"""Number of rows to skip from the beginning of the file.
Args:
nb_lines (int): number of lines
Returns:
DsvDataLoader: new instance of DsvDataLoader with updated leading rows to skip.
"""
data_loader = deepcopy(self)
data_loader.load_job_config.skip_leading_rows = nb_lines
return data_loader
def __deepcopy__(self, memo):
return self._deepcopy_base_data_loader(DsvDataLoader, memo)
| StarcoderdataPython |
3261230 | <filename>modeling/blender/test_util.py
"""
Tests for blender utilities.
"""
# Copryight (c) 2020 <NAME>. All rights reserved.
from unittest.mock import Mock
from modeling.blender import util
def test_flatten():
"""test flattening node structures"""
n_a = 'a'
n_b = 'b'
n_c = 'c'
struct = [n_a, n_b, n_c]
assert util.flatten_nodes(struct) == struct
struct = [[n_a, n_b, n_c], n_b, n_c]
res = [n_a, n_b, n_c, n_b, n_c]
assert util.flatten_nodes(struct) == res
struct = [[[n_a, n_b], n_a, n_b], n_a, n_b]
res = [n_a, n_b, n_a, n_b, n_a, n_b]
assert util.flatten_nodes(struct) == res
def test_configure_cycles():
"""test configure_cycles"""
scene = Mock()
util.configure_cycles(scene, 16, 32, True)
# not the best test but whatever
assert scene.render.engine == 'CYCLES'
assert scene.cycles.feature_set == 'EXPERIMENTAL'
assert scene.cycles.samples == 16
assert scene.cycles.preview_samples == 32
assert scene.cycles.device == 'GPU'
| StarcoderdataPython |
4845412 | firstname = input('Input Here First Name: ')
lastname = input('Input Here Last Name ')
print(lastname[::-1] + ' ' + firstname[::-1] ) | StarcoderdataPython |
6587397 | <filename>zwiz/_utils.py<gh_stars>0
"""This module contains utility classes related to scraping HS3 website"""
import re
# pylint: disable=C0103 # Non-snake variable names
# pylint: disable=R0902 # Many instances
# pylint: disable=R0903 # Few public methods
class Node:
"""
This is a conveniance class holding the z-wave node object.
The purpose is to objectify the z-wave node, so that we can
put methods and attributes on it and easy working with the
node in various settings.
"""
def __init__(self, node_id):
"""
Initialize the Node object.
Other attributes are set directly to this object from scraper
functions. They are not pre-defined here. If the Node objects
becomes more important, pre-setting attributes and handling this
a bit more structurally migth be a good idea. For now, it does
its job.
Arguments:
node_id (int): The ID of the node corresponding to the ID
in the z-wave network.
"""
self.node_id = node_id
class Edge:
"""
This is a convencience class holding the z-wave edge (the relationship
between two nodes). The purpose of objectifying this is to make it easier
to work with the edges later.
"""
def __init__(self, source: Node, target: Node, edgetype, weight):
"""Initialize the Edge by passing the source and target node objects"""
self.source = source
self.target = target
self.type = edgetype
self.weight = weight
self.id = f"{source.node_id}__{target.node_id}"
class Scrapers:
"""This is a utility class containing functions that scrape HTML"""
# pylint: disable=C0103 # allow short non-snake variables
@staticmethod
def _get_main_tables(soup):
"""
Identify the header_table and the node_table from the scraped
HTML. Identify the header table by looking for "Current Z-Wave Networks"
and the nodes table by looking for "Node Information".
Arguments:
soup (bs4.BeautifulSoup): Parsed HTML as a bs4 object.
Returns:
header_table, nodes_table (tuple of str): HTML a string for the
identified tables.
Raises:
ValueError: If either header or nodes table is not found in html.
"""
tables = soup.find_all('table')
header_table = None
nodes_table = None
for table in tables:
first_td = str(table.find_all('td')[0])
if "Current Z-Wave Networks" in first_td:
header_table = table
if "Node Information" in first_td:
nodes_table = table
if not header_table:
raise ValueError('Header table not found on HS3 page')
if not nodes_table:
raise ValueError('Nodes table not found on HS3 page')
return header_table, nodes_table
@staticmethod
def _get_header(header_table):
"""
Scrape the header table, and return the header attributes.
Arguments:
header_table (str): The HTML containing the header table.
Returns:
header (dict): Dictionary containing the header attributes.
"""
tds = header_table.find_all('tr')[2].find_all('td')
header = {
'Network Friendly Name': tds[0].contents[0].strip(),
'HomeID': tds[1].contents[0].strip(),
'Number of Nodes': int(tds[2].contents[0].strip()),
'Interface Name': tds[3].contents[0].strip(),
'Interface Model': tds[4].contents[0].strip(),
'NodeID': int(tds[5].contents[0].strip()),
}
return header
@staticmethod
def _get_nodes(nodes_table):
"""
Scrape the nodes table, and return the individual nodes.
Identify key trs in the table by looking for identifying strings.
Implicitly, this means that this will fail if key strings occur multiple times.
Arguments:
nodes_table (str): The piece of HMTL containing the nodes table
Returns:
nodes (dict of node_id:Node): Dictionary with node_id as key, Node object as value
representing the nodes found in the Z-wave network.
"""
#from ._hs3data import Node # pylint: disable=C0415 # import outside top-level
node_trs = nodes_table.find_all('tr')
nodes = {}
for tr in node_trs:
# Identify the first tr by looking for "Full Name".
# Get the node_id and other info.
# Keep node_id until next time.
if "<b>Full Name" in str(tr):
# this is first tr in a node. This is where the NodeID should be
# so using this to initialize a new Node.
node_id = Scrapers.find_node_id(str(tr))
nodes[node_id] = Node(node_id=node_id)
nodes[node_id].name = Scrapers.find_pair_value(tr, "Full Name")
# Find the second TR in a node, which contains various key:value pairs
# Technically not necessary for the specific use case (edges), but
# in the nice-to-have bucket... This approach can be used for other
# contents of the node later as well.
if "<b>Manufacturer" in str(tr):
nodes[node_id].manufacturer = Scrapers.find_pair_value(tr, "Manufacturer")
nodes[node_id].type = Scrapers.find_pair_value(tr, "Type")
nodes[node_id].listens = Scrapers.find_pair_value(tr, "Listens")
nodes[node_id].version = Scrapers.find_pair_value(tr, "Version")
nodes[node_id].firmware = Scrapers.find_pair_value(tr, "Firmware")
nodes[node_id].speed = Scrapers.find_pair_value(tr, "Speed")
# Find the tr in the html containing neighbors
if "<b>Neighbors" in str(tr):
neighbors = Scrapers.find_pair_value(tr, "Neighbors")
if neighbors is None:
neighbors = []
else:
neighbors = [int(n.strip()) for n in neighbors.split()]
nodes[node_id].neighbors = neighbors
# Find the tr in the node html containing the last working route
if "<b>Last Working Route" in str(tr):
last_working_route = Scrapers.get_last_working_route(str(tr))
nodes[node_id].last_working_route = last_working_route
return nodes
@staticmethod
def get_last_working_route(tr):
"""
Parse the Last Working Route from the tr.
This assumes that the tr has been pre-identified.
The Last Working Route is the list of node_id's representing
the route last used by the device to reach the central node.
It is formatted like this: <node_id> -> <node_id>, where the
">" is represented as ">".
"""
# first find the correct pair from the html
last_working_route = Scrapers.find_pair_value(tr, "Last Working Route")
if last_working_route.startswith('None'):
# This means that there was no last working route
# Retuning empty list
return []
if last_working_route.startswith('Direct'):
# This means that the last working route was directly to the central_node
return [1]
last_working_route = last_working_route.split()[0]
sep = '->'
if sep in last_working_route:
# The route has more than one node, so split it and return the list
return [int(l) for l in last_working_route.split(sep)]
# the route has only one node, so return a list containing that only
return [int(last_working_route)]
@staticmethod
def find_node_id(html):
"""
Extract the node_id from the html. It will be the only integer between
two html tags, so it can be identified by regexing for that.
Arguments:
html (str): The html containing the node_id
Returns:
node_id (int): The node_id
"""
node_id = int(re.findall(r'>\d+<', html)[0].replace('<','').replace('>', ''))
return node_id
@staticmethod
def find_pair_value(html, key): # pylint: disable=R1710 # inconsistent return statements
"""
Search the html, find the contents, assume that the resulting
matches are key/values from pairs. Search for the key, then
return the next - which then will be the value.
Arguments:
html (str): The HTML string
key (str): The key to search for
Returns:
value (str): The value corresponding to the given key
"""
# clean the html
html = str(html).replace('\n', '')
# regex to find all text content
# The challenge is to find only text, not any html-tags
# Supposedly, BeautifulSoup can do this, but I could not figure out how...
matches = re.findall(r">.[^<>]+<", str(html))
# Remove known noise
removings = ['>', '<', ':', ' ', ',']
for r in removings:
matches = [m.replace(r, '') for m in matches]
matches = [m.strip() for m in matches]
matches = [m for m in matches if m]
# This feels like something for iter and next(), but...
for m, match in enumerate(matches):
if match == key:
try:
return matches[m+1]
except IndexError:
return None
| StarcoderdataPython |
1603479 | import networkx as nx
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer, CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sumy.utils import get_stop_words
import re
import math
import warnings
warnings.simplefilter("ignore", UserWarning)
class SentenceFeature():
def __init__(self, parser) -> None:
self.sents = parser.sents
self.sents_i = list(range(len(self.sents))) # list contains index of each sentence
# self.chunked_sentences = parser.chunked_sentences()
# self.entities_name = self.ner(self.chunked_sentences)
self.vectorizer = CountVectorizer(stop_words=get_stop_words("english"))
self.X = self.vectorizer.fit_transform(self.sents)
self.processed_words = parser.processed_words
self.unprocessed_words = parser.unprocessed_words
def _get_name_entity(self, sent_i):
return len(self.entities_name[sent_i])
def _get_position(self, sent_i):
count = self.sents_i[-1]
position = 1
if count != 0:
position = sent_i / count
return position
def sentence_position(self, sent_i):
"""
:param sent_i: int
Index of a sentence
:return: float
"""
return len(self.sents) - sent_i / len(self.sents)
def get_noun_adj(self, sent_i):
words_num = len(self.unprocessed_words[sent_i])
if words_num != 0:
return len(self.processed_words[sent_i]) / words_num
return len(self.processed_words[sent_i])
def numerical_data(self, sent_i):
"""
:param sent_i: int
Index of a sentence
:return: float
"""
word_len = len(self.unprocessed_words[sent_i])
if word_len != 0:
return len(re.findall(r'\d+(.\d+)?', self.sents[sent_i])) / word_len
return 0
def sentence_length(self, sent_i):
return len(self.unprocessed_words[sent_i]) / np.max(len(self.unprocessed_words))
def max_leng_sent(self):
return np.max(len(self.unprocessed_words))
def _get_doc_first(self, sent_i):
"""
:param sent_i: int
Index of a sentence
:return: int
1, input sentence is the first sentence of a document.
0, input sentence is not the first sentence of a document.
"""
# return int(sent_i == 0)
doc_first = int(sent_i == 0)
if doc_first == 0:
doc_first = 0
return doc_first
def _get_length(self, sent_i):
"""
:param sent_i: int
Index of a sentence
:return: int
The number of words in a sentence
"""
return len(self.unprocessed_words[sent_i])
def get_surface_features(self, sents_i=None):
"""
Surface features are based on structure of documents or sentences.
:param sents_i: list or int, optional
list contains multiple sentence indices
int indicate a single sentence index
:return: list
1-dimensional list consists of position, doc_first, para_first, length and quote features for int sents_i parameter
2-dimensional list consists of position, doc_first, para_first, length and quote features for list sents_i parameter
"""
# solely get surface features for unlabeled data
if sents_i is None:
sents_i = self.sents_i
def get_features(sent_i):
position = self._get_position(sent_i) # get 1/sentence no
doc_first = self._get_doc_first(sent_i)
length = self._get_length(sent_i)
return [position, doc_first, length]
surface_features = []
if type(sents_i) is list:
# get surface features for multiple samples for labeled data
for sent_i in sents_i:
surface_feature = get_features(sent_i)
surface_features.append(surface_feature)
# self.features_name = ["position", "doc_first", "para_first", "length", "quote"]
else:
# get surface features for single sample for labeled data
surface_features = get_features(sents_i)
surface_features = np.asarray(surface_features)
return surface_features
def _get_stopwords_ratio(self, sent_i):
"""
:param sent_i: int
Index of a sentence
:return: float, in between [0, 1]
Stop words ratio of s
"""
words_num = len(self.unprocessed_words[sent_i])
if words_num != 0:
non_stopwords_num = len(self.processed_words[sent_i])
stopwords_ratio = (words_num - non_stopwords_num) / words_num
else:
stopwords_ratio = 1
return stopwords_ratio
def _get_tf_idf(self, sent_i):
a = self._get_avg_doc_freq(sent_i)
if a <= 0:
return 0
return self._get_avg_term_freq(sent_i) * math.log(a)
def _get_all_tf_idf(self):
score = []
for idx, val in enumerate(self.sents):
a = self._get_avg_doc_freq(idx)
if a <= 0:
b = 0
else:
b = (self._get_avg_term_freq(idx) * math.log(a))
score.append(b)
return score
def _get_centroid_similarity(self, sent_i):
tfidfScore = self._get_all_tf_idf()
centroidIndex = tfidfScore.index(max(tfidfScore))
return self._cal_cosine_similarity([self.sents[sent_i], self.sents[centroidIndex]])
def _get_avg_term_freq(self, sent_i):
"""
:param sent_i: int
Index of a sentence
:param vectorizer: sklearn.feature_extraction.text.CountVectorizer
:param X: array, [n_samples, n_features]
Document-term matrix.
:return: float
Average Term Frequency
"""
GTF = np.ravel(self.X.sum(axis=0)) # sum each columns to get total counts for each word
unprocessed_words = self.unprocessed_words[sent_i]
total_TF = 0
count = 0
for w in unprocessed_words:
w_i_in_array = self.vectorizer.vocabulary_.get(w) # map from word to column index
if w_i_in_array:
total_TF += GTF[w_i_in_array]
count += 1
if count != 0:
avg_TF = total_TF / count
else:
avg_TF = 0
return avg_TF
def _get_avg_doc_freq(self, sent_i):
"""
:param sent_i: int
Index of a sentence
:param vectorizer: sklearn.feature_extraction.text.CountVectorizer
:param X: array, [n_samples, n_features]
Document-term matrix.
:return: float
Average Document Frequency
"""
unprocessed_words = self.unprocessed_words[sent_i]
total_DF = 0
count = 0
for w in unprocessed_words:
w_i_in_array = self.vectorizer.vocabulary_.get(w) # map from word to column index
if w_i_in_array:
total_DF += len(self.X[:, w_i_in_array].nonzero()[0])
count += 1
if count != 0:
avg_DF = total_DF / count
else:
avg_DF = 0
return avg_DF
def get_content_features(self, sents_i):
# solely get content features for unlabeled data
if sents_i is None:
sents_i = self.sents_i
def get_features(sent_i):
stop = self._get_stopwords_ratio(sent_i)
TF = self._get_avg_term_freq(sent_i)
DF = self._get_avg_doc_freq(sent_i)
# Emb = self._get_emb(sent_i, word_vectors)
# core_rank_score = self._get_avg_core_rank_score(sent_i)
return [stop, TF, DF]
content_features = []
if type(sents_i) is list:
# get surface features for multiple samples for labeled data
for sent_i in sents_i:
content_feature = get_features(sent_i)
content_features.append(content_feature)
# self.features_name = ["stop", "TF", "DF", "core_rank_score"]
else:
# get surface features for single sample for labeled data
content_features = get_features(sents_i)
content_features = np.asarray(content_features)
return content_features
def _cal_cosine_similarity(self, documents):
"""
:param documents: list
:return: float, in between [0, 1]
"""
tfidf_vectorizer = TfidfVectorizer()
try:
tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
similarity = cosine_similarity(tfidf_matrix[0, :], tfidf_matrix[1, :])[0][0]
except ValueError:
if documents[0] == documents[1]:
similarity = 1.0
else:
similarity = 0.0
return similarity
def _get_first_rel_doc(self, sent_i):
"""
:param sent_i: int
Index of a sentence
:return: float
Similarity with the first sentence in the document
"""
first_sent_doc = self.sents[0]
sent = self.sents[sent_i]
relevance = self._cal_cosine_similarity([first_sent_doc, sent])
return relevance
def page_rank_rel(self, thres=0.1):
"""
PageRank value of the sentence based on the sentence map
:param thres: int
Every two sentences are regarded relevant if their similarity is above a threshold.
:return: dict
Dictionary of index nodes with PageRank as value.
"""
G = nx.Graph()
# Build a sentence map.
# Every two sentences are regarded relevant if their similarity is above a threshold.
# Every two relevant sentences are connected with a unidirectional link.
for i in self.sents_i[:-2]:
for j in self.sents_i[i + 1:]:
sim = self._cal_cosine_similarity([self.sents[i], self.sents[j]])
if sim > thres:
G.add_edge(i, j)
pr = nx.pagerank(G)
return pr
def get_relevance_features(self, sents_i):
"""
Relevance features are incorporated to exploit inter-sentence relationships.
:param sents_i: list or int, optional
list contains multiple sentence indices
int indicate a single sentence index
:return: list
1-dimensional list consists of first_rel_doc, first_rel_para and page_rank_rel features for int sents_i parameter
2-dimensional list consists of first_rel_doc, first_rel_para and page_rank_rel features for list sents_i parameter
"""
# solely get relevance features for unlabeled data
if sents_i is None:
sents_i = self.sents_i
try:
self.pr
except AttributeError:
self.pr = self.page_rank_rel()
# global_avg_word_emb = self._get_global_avg_word_emb(word_vectors)
def get_features(sent_i):
first_rel_doc = self._get_first_rel_doc(sent_i)
page_rank_rel = self.pr.get(sent_i, 0)
# Emb_cos = self._get_emb_cos(sent_i, word_vectors, global_avg_word_emb)
return [first_rel_doc, page_rank_rel]
relevance_features = []
if type(sents_i) is list:
# get surface features for multiple samples for labeled data
for sent_i in sents_i:
relevance_feature = get_features(sent_i)
relevance_features.append(relevance_feature)
# self.features_name = ["first_rel_doc", "first_rel_para", "page_rank_rel"]
else:
# get surface features for single sample for labeled data
relevance_features = get_features(sents_i)
relevance_features = np.asarray(relevance_features)
return relevance_features
def get_all_features(self, sent_i=None):
"""
Concatenate sub-features together.
:param vectorizer: sklearn.feature_extraction.text.CountVectorizer
:param X: Document-term matrix
:param word_vectors: optional
:param sent_i: index of sent
:return: numpy array
"""
surface_features = self.get_surface_features(sent_i)
content_features = self.get_content_features(sent_i)
relevance_features = self.get_relevance_features(sent_i)
all_feature = np.concatenate((surface_features, content_features, relevance_features), axis=0)
# self.features_name = ["position", "doc_first", "para_first", "length", "quote", "stop", "TF", "DF",
# "core_rank_score", "first_rel_doc", "first_rel_para", "page_rank_rel"]
return all_feature
def get_all_features_of_sent(self, vectorizer, X, word_vectors=None, sents_i=None):
"""
Concatenate sub-features together.
:param vectorizer: sklearn.feature_extraction.text.CountVectorizer
:param X: Document-term matrix
:param word_vectors: optional
:param sents_i: list
:return: numpy array
"""
# get all feature for unlabeled data
if sents_i is None:
sents_i = self.sents_i
all_features = []
for sent_i in sents_i:
surface_features = self.get_surface_features(sent_i)
content_features = self.get_content_features(sent_i, vectorizer, X, word_vectors)
relevance_features = self.get_relevance_features(sent_i)
all_feature = np.concatenate((surface_features, content_features, relevance_features), axis=0)
all_features.append(all_feature)
# self.features_name = ["position", "doc_first", "para_first", "length", "quote", "stop", "TF", "DF",
# "core_rank_score", "first_rel_doc", "first_rel_para", "page_rank_rel"]
return all_features
@staticmethod
def get_global_term_freq(parsers):
"""
:param parsers: newssum.parser.StoryParser
:return: tuple, (vectorizer, X)
vectorizer, sklearn.feature_extraction.text.CountVectorizer.
X, Document-term matrix.
"""
vectorizer = CountVectorizer(stop_words=get_stop_words("english"))
if type(parsers) is list:
corpus = [parser.body for parser in parsers]
else:
corpus = [parsers.body]
X = vectorizer.fit_transform(corpus)
return vectorizer, X
def extract_entity_names(self, t):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == 'NE':
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(self.extract_entity_names(child))
return entity_names
def ner(self, chunked_sentences):
entity_names = []
for tree in chunked_sentences:
# print(self.extract_entity_names(tree))
entity_names.append(self.extract_entity_names(tree))
return entity_names
| StarcoderdataPython |
8130240 | from typing import Type, Any, List, Dict, Set, Tuple, Union, Optional, Iterator, Iterable
from Helpers.Graph import Pps2DGraph, PpsHyperGraph, PpsLogHyperGraph
from Helpers.Torches import *
from Helpers.GlobalSettings import Gs, Gsv
from Models.CommonLayers import FeatureInteractor
from Dataset import GraphDataset
class GCNLayer(nn.Module):
def __init__(self,
device: th.device,
dataset: GraphDataset,
input_dimension: int,
output_dimension: int):
super().__init__()
self.device = device
self.dataset = dataset
self.input_dimension = input_dimension
self.output_dimension = output_dimension
self.adjacency = SparseTensor.from_torch_sparse_coo_tensor(dataset.graph2d.Adjacency).coalesce()
self.Dv_neg_1_slash_2 = dataset.graph2d.VertexDegrees.pow(-0.5)
self.feature_transform = nn.Linear(input_dimension, output_dimension)
def forward(self, input_features: Tensor):
# DADX(W)
all: Tensor = input_features
# 为了减小运算量,当输出维度更小时,就先进行特征转换
if self.input_dimension >= self.output_dimension:
all = self.feature_transform(all)
all = self.Dv_neg_1_slash_2 * all
all = thsp.matmul(self.adjacency, all)
all = self.Dv_neg_1_slash_2 * all
else:
all = self.Dv_neg_1_slash_2 * all
all = thsp.matmul(self.adjacency, all)
all = self.Dv_neg_1_slash_2 * all
all = self.feature_transform(all)
return all
class GATLayer(nn.Module):
def __init__(self,
device: th.device,
dataset: GraphDataset,
input_dimension: int,
output_dimension: int):
super().__init__()
self.device = device
self.dataset = dataset
self.input_dimension = input_dimension
self.output_dimension = output_dimension
indices = dataset.graph2d.Adjacency.indices()
self.node_feature_selector = indices.t()
if Gs.Gnn.gat_head == Gsv.concat:
feature_aggregate = nn.Linear(2 * output_dimension, 1)
init.xavier_uniform_(
feature_aggregate.weight,
gain=init.calculate_gain(Gs.Gnn.gat_activation[1])
)
self.feature_aggregate = nn.Sequential(
feature_aggregate,
Gs.Gnn.gat_activation[0]()
)
elif Gs.Gnn.gat_head == Gsv.product:
feature_aggregate = nn.Linear(output_dimension, 1)
init.xavier_uniform_(
feature_aggregate.weight,
gain=init.calculate_gain(Gs.Gnn.gat_activation[1])
)
self.feature_aggregate = nn.Sequential(
feature_aggregate,
Gs.Gnn.gat_activation[0](),
)
else:
raise ValueError()
self.dgl_graph = dgl.graph(
data=(indices[0], indices[1]),
num_nodes=dataset.node_count,
device=device)
self.feature_transform = nn.Linear(input_dimension, output_dimension)
def forward(self, input_features: Tensor):
input_features = self.feature_transform(input_features)
# (edge_count, 2, feature_dimension)
selected_node_feature = input_features[self.node_feature_selector]
if Gs.Gnn.gat_head == Gsv.concat:
# (edge_count, 2 * feature_dimension)
selected_node_feature = selected_node_feature.reshape(-1, 2 * self.output_dimension)
elif Gs.Gnn.gat_head == Gsv.product:
# (edge_count, feature_dimension)
selected_node_feature = selected_node_feature[:, 0, :] * selected_node_feature[:, 1, :]
# selected_node_feature = F.normalize(selected_node_feature)
# (edge_count)
edge_importance = self.feature_aggregate(selected_node_feature).squeeze_()
edge_importance = dgl.ops.edge_softmax(self.dgl_graph, edge_importance.float())
output_features = dgl.ops.u_mul_e_sum(self.dgl_graph, input_features.float(), edge_importance)
return output_features
class HGCNLayer(nn.Module):
def __init__(self,
device: torch.device,
dataset: GraphDataset,
input_dimension: int,
output_dimension: int):
super().__init__()
self.device = device
self.dataset = dataset
graph = dataset.graph
incidence = graph.Adjacency
self.Dv_neg_1_slash_2 = graph.VertexDegrees.pow(-0.5)
self.De_neg_1 = graph.EdgeDegrees.pow(-1)
self.incidence = SparseTensor.from_torch_sparse_coo_tensor(incidence).coalesce()
self.incidence_t = SparseTensor.from_torch_sparse_coo_tensor(incidence.t()).coalesce()
self.feature_transform = nn.Linear(input_dimension, output_dimension)
def forward(self, input_features: Tensor):
# HyperGCN: Dv W(h) H De Ht W(h)(t) Dv X W(x)
input_features = self.feature_transform(input_features)
input_features = self.Dv_neg_1_slash_2 * input_features
edge_features = thsp.matmul(self.incidence_t, input_features)
edge_features = self.De_neg_1 * edge_features
output_features = thsp.matmul(self.incidence, edge_features)
output_features = self.Dv_neg_1_slash_2 * output_features
return output_features
class IHGNNLayer(nn.Module):
class _FakeGraph:
def __init__(self, adjacency: Tensor) -> None:
self.Adjacency = adjacency
class _FakeDataset:
def __init__(self, adjacency: Tensor, node_count: int) -> None:
self.Graph = IHGNNLayer._FakeGraph(adjacency)
self.NodeCount = node_count
def __init__(self,
device: torch.device,
dataset: GraphDataset,
input_dimension: int,
output_dimension: int,
feature_interaction_order: int,
phase2_attention: bool):
super().__init__()
self.device = device
self.dataset = dataset
self.feature_interaction_order = feature_interaction_order
self.attention_phase2 = phase2_attention
graph = dataset.hypergraph
assert feature_interaction_order in [1, 2, 3], '特征交互阶数只能为 1 2 或 3'
incidence = graph.Adjacency
node_indices = incidence.indices()[0]
edge_indices = incidence.indices()[1]
#self.Dv_neg_1_slash_2 = graph.VertexDegrees.pow(-0.5)
self.Dv_neg_1 = graph.VertexDegrees.pow(-1)
#self.De_neg_1 = graph.EdgeDegrees.pow(-1)
self.incidence = SparseTensor.from_torch_sparse_coo_tensor(incidence).coalesce()
# 边特征聚合层 或 高阶特征层
self.feature_interactor = FeatureInteractor(
dataset=dataset,
max_order=feature_interaction_order,
node_feature_dimension=input_dimension,
output_dimension=input_dimension
)
if phase2_attention:
# 构造第二阶段的二元图
fake_adj = torch.sparse_coo_tensor(
indices=torch.stack([edge_indices + dataset.node_count, node_indices]),
values=torch.ones(len(edge_indices), dtype=torch.long, device=device),
size=[dataset.node_count + graph.EdgeCount] * 2,
dtype=torch.float,
).coalesce()
self.fake_gat = GATLayer(
device=device,
dataset=IHGNNLayer._FakeDataset(
fake_adj,
dataset.node_count + graph.EdgeCount
),
input_dimension=output_dimension,
output_dimension=output_dimension
)
self.feature_transform = nn.Linear(input_dimension, output_dimension)
def forward(self, input_features: Tensor):
# HyperGCN: Dv W(h) H De Ht W(h)(t) Dv X W(x)
input_features = self.feature_transform(input_features)
edge_features = self.feature_interactor(input_features)
if self.attention_phase2:
# Phase-2 attention
output_features = self.fake_gat(torch.cat([input_features, edge_features]))
output_features = output_features[:input_features.size(0)]
else:
# High-order feature interaction
output_features = thsp.matmul(self.incidence, edge_features)
output_features = self.Dv_neg_1 * output_features
return output_features
| StarcoderdataPython |
63745 | # Based on https://github.com/eklitzke/utxodump
from typing import Tuple
import binascii
import leveldb
import config
import json
import os
def decode_varint(val: bytearray) -> Tuple[int, int]:
n = 0
for i, c in enumerate(val):
n = (n << 7) | (c & 0x7f)
if c & 0x80:
n += 1
else:
return n, i + 1
assert False # not reached
def decode_height(val: bytearray) -> int:
code, consumed = decode_varint(val)
return code >> 1
def decode_txid(key: bytearray) -> str:
assert key[0] == 67
txid = binascii.hexlify(key[1:33][::-1]).decode('utf8')
compressed_vout = key[33:]
vout, declen = decode_varint(compressed_vout)
assert declen == len(compressed_vout)
return txid, vout
def locate_db(path: str) -> str:
datadir = os.path.expanduser(path)
return os.path.join(datadir, 'chainstate')
def get_obfuscate_key(conn: leveldb.LevelDB) -> bytearray:
secret = conn.Get(bytearray(b'\x0e\x00obfuscate_key'))
assert secret[0] == 8 and len(secret) == 9
return secret[1:]
def decrypt(ciphertext: bytearray, key: bytearray):
for i, c in enumerate(ciphertext):
ciphertext[i] = c ^ key[i % len(key)]
def get_unspent(path: str, snapshot_start: str, snapshot_end: str):
conn = leveldb.LevelDB(locate_db(path))
secret = get_obfuscate_key(conn)
result = {}
for k, v in conn.RangeIter(b'C', b'D', include_value=True):
decrypt(v, secret)
txid, vout = decode_txid(k)
height = decode_height(v)
if height > snapshot_start and height < snapshot_end:
if txid not in result:
result[txid] = [vout]
else:
result[txid].append(vout)
return result
data = get_unspent(config.BLOCKCHAIN_DIR, config.SNAPSHOT_START, config.SNAPSHOT_END)
with open('{}/unspent.json'.format(config.SNAPSHOT_DIR), 'w') as file:
json.dump(data, file)
| StarcoderdataPython |
9623387 | <filename>esmvalcore/preprocessor/_derive/rtnt.py
"""Derivation of variable `rtnt`."""
from iris import Constraint
from ._baseclass import DerivedVariableBase
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable `rtnt`."""
@staticmethod
def required(project):
"""Declare the variables needed for derivation."""
required = [
{
'short_name': 'rsdt'
},
{
'short_name': 'rsut'
},
{
'short_name': 'rlut'
},
]
return required
@staticmethod
def calculate(cubes):
"""Compute toa net downward total radiation."""
rsdt_cube = cubes.extract_strict(
Constraint(name='toa_incoming_shortwave_flux'))
rsut_cube = cubes.extract_strict(
Constraint(name='toa_outgoing_shortwave_flux'))
rlut_cube = cubes.extract_strict(
Constraint(name='toa_outgoing_longwave_flux'))
rtnt_cube = rsdt_cube - rsut_cube - rlut_cube
return rtnt_cube
| StarcoderdataPython |
11214172 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 30 17:48:44 2021
@author: HP
"""
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pandas
import time
from bs4 import BeautifulSoup
from selenium.common.exceptions import ElementClickInterceptedException, StaleElementReferenceException
from datetime import datetime, timedelta
import os
import requests
dir_path = os.path.dirname(os.path.realpath(__file__))
output_directory = dir_path+"\\cryptoslam_main_pages" # Data will be printed out here
if not os.path.exists(output_directory): # create the folder if not exists already
os.mkdir(output_directory)
def contains(list_1,list_2):
"""
Check if list_1 elements is in list_2
"""
boolean = True
for item in list_1:
if item not in list_2:
boolean = False
return boolean
# https://cryptoslam.io/cryptopunks/marketplace buraya da bak
# product sayfasından top market buyers ı al
def obtain_series_links(series_names):
links = []
for product in series_names[0]:
product = product.lower()
splitted = product.split()
product = "-".join(splitted)
series_link = "https://cryptoslam.io/" + product
links.append((product,series_link))
return links
series_names = pandas.read_pickle("series_names.pkl") # Get series names (cryptopunks, art blocks etc.)
series_main_pages = obtain_series_links(series_names)
options = webdriver.FirefoxOptions()
# options.headless = True
options.add_argument("--start-maximized")
browser = webdriver.Firefox(options=options)
for page in series_main_pages:
series_name = page[0]
if os.path.exists(output_directory+"\\cryptoslam_"+page[0]+".xlsx"):
continue
urlpage = page[1]
browser.get(urlpage)
browser.maximize_window()
time.sleep(10)
try:
browser.find_element_by_xpath("/html/body/div[3]/div[1]/div[1]").click()
except :
pass
time.sleep(10)
soup = BeautifulSoup(browser.page_source)
soup_table = soup.find_all("table")
tables = pandas.read_html(str(soup_table))
for table in tables:
if contains(["Buyer","Amount","USD"],table.columns) :
top_buyers_table = table
elif contains(["Seller","Amount","USD"],table.columns):
top_sellers_table = table
# Get the tables of top buyers and sellers
result_buyers =browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div[2]/div[2]/div[3]/div[1]/div/div[3]/div/div[2]/div/table/tbody/tr/td[1]/a")
result_sellers =browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div[2]/div[2]/div[3]/div[2]/div/div[3]/div/div[2]/div/table/tbody/tr/td[1]/a")
# Sometimes the pages loads partially, and I cant gather top seller and buyer data,
# If i cant gather data, I reload the page. try this until we gather the tables, or we tried atleast 5 times
try_refreshing = 0
while len(result_buyers) == 0:
browser.refresh()
time.sleep(5)
result_buyers =browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div[2]/div[2]/div[3]/div[1]/div/div[3]/div/div[2]/div/table/tbody/tr/td[1]/a")
result_sellers =browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div[2]/div[2]/div[3]/div[2]/div/div[3]/div/div[2]/div/table/tbody/tr/td[1]/a")
try_refreshing +=1
if try_refreshing == 5:
break
buyer_data= list()
seller_data = list()
# Gather buyer and seller addresses
for result in result_buyers:
address = result.get_attribute("data-original-title")
buyer_data.append(address)
for result in result_sellers:
address = result.get_attribute("data-original-title")
seller_data.append(address)
try:
top_buyers_table["Buyer"] = buyer_data
top_sellers_table["Seller"] = seller_data
print(series_name,"gathered")
except ValueError:
print(series_name,"failed to gather")
continue
if len(top_buyers_table) == 0:
print(series_name)
top_buyers_table.to_excel(output_directory+"\\cryptoslam_"+page[0]+"_top_buyers.xlsx")
top_sellers_table.to_excel(output_directory+"\\cryptoslam_"+page[0]+"_top_sellers.xlsx")
browser.quit() # Kill the browser | StarcoderdataPython |
3509468 | """Common statistical modelling code."""
import numpy as np
def multivariate_normal_pdf(x, mean, cov):
"""Unnormalized multivariate normal probability density function."""
# Convert to ndarray
x = np.asanyarray(x)
mean = np.asanyarray(mean)
cov = np.asarray(cov)
# Deviation from mean
dev = x - mean
if isinstance(dev, np.ma.MaskedArray):
if np.all(np.ma.getmaskarray(dev)):
return np.ones(dev.shape[:-1])
else:
dev = np.ma.getdata(dev) * ~np.ma.getmaskarray(dev)
# Broadcast cov, if needed
if cov.ndim <= dev.ndim:
extra_dim = 1 + dev.ndim - cov.ndim
cov = np.broadcast_to(cov, (1,) * extra_dim + cov.shape)
exponent = -0.5 * np.einsum('...i,...i', dev, np.linalg.solve(cov, dev))
return np.exp(exponent) / np.sqrt(np.linalg.det(cov))
def multivariate_normal_rvs(mean, cov):
std = svd_sqrt(cov)
nw = std.size[-1]
w = np.random.randn(*np.shape(mean), nw)
return mean + np.einsum('...ij,...j', std, w)
def svd_sqrt(a):
u, s, vh = np.linalg.svd(a)
return u * s
| StarcoderdataPython |
1686389 | from typing import List, Callable, Dict, Any, Tuple, NamedTuple
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as tf_layers
from tensorflow.python.ops import template as template_ops
from tqdm import tqdm
from glow import flow_layers as fl
from glow import tf_ops
from glow import tf_ops as ops
K = tf.keras.backend
keras = tf.keras
class OpenAITemplate(NamedTuple):
"""
A shallow neural network used by GLOW paper:
* https://github.com/openai/glow
activation_fn: activation function used after each conv layer
width: number of filters in the shallow network
"""
activation_fn: Callable[[tf.Tensor], tf.Tensor] = tf.nn.relu
width: int = 32
def create_template_fn(
self,
name: str,
) -> Callable[[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
"""
Creates simple shallow network. Note that this function will return a
tensorflow template.
Args:
name: a scope name of the network
Returns:
a template function
"""
def _shift_and_log_scale_fn(x: tf.Tensor, y_label: tf.Tensor = None):
"""NN is a shallow, 3 convolutions with 512 units: 3x3, 1x1, 3x3, the last one returns shift and logscale
"""
shape = K.int_shape(x)
num_channels = shape[3]
with tf.variable_scope("BlockNN"):
h = x
# Concatenate conditioning labels with x.
# Just in the shift and log scale fn should be fine...
h = ops.conv2d("l_1", h, self.width)
depth = K.int_shape(h)[-1]
label_size = K.int_shape(y_label)[-1]
dense_w = tf.get_variable(
"dense_w",
shape=(label_size, depth),
initializer=tf.contrib.layers.xavier_initializer())
dense_b = tf.get_variable(
"dense_b",
shape=(depth, ),
initializer=tf.contrib.layers.xavier_initializer())
conditioning_y = tf.nn.xw_plus_b(y_label, dense_w, dense_b)
h = h + conditioning_y[:, None, None, :]
h = self.activation_fn(h) # 3x3 filter
h = self.activation_fn(
ops.conv2d("l_2", h, self.width, filter_size=[1, 1]))
# create shift and log_scale with zero initialization
shift_log_scale = ops.conv2d_zeros("l_last", h,
2 * num_channels)
shift = shift_log_scale[:, :, :, 0::2]
log_scale = shift_log_scale[:, :, :, 1::2]
log_scale = tf.clip_by_value(log_scale, -15.0, 15.0)
return shift, log_scale
return template_ops.make_template(name, _shift_and_log_scale_fn)
def step_flow(name: str,
shift_and_log_scale_fn: Callable[[tf.Tensor], tf.Tensor]
) -> Tuple[fl.ChainLayer, fl.ActnormLayer]:
"""Create single step of the Glow model:
1. actnorm
2. invertible conv
3. affine coupling layer
Returns:
step_layer: a flow layer which perform 1-3 operations
actnorm: a reference of actnorm layer from step 1. This reference can be
used to initialize this layer using data dependent initialization
"""
actnorm = fl.ActnormLayer()
layers = [
actnorm,
fl.InvertibleConv1x1Layer(),
fl.AffineCouplingLayer(shift_and_log_scale_fn=shift_and_log_scale_fn),
]
return fl.ChainLayer(layers, name=name), actnorm
def initialize_actnorms(
sess: tf.Session(),
feed_dict_fn: Callable[[], Dict[tf.Tensor, np.ndarray]],
actnorm_layers: List[fl.ActnormLayer],
num_steps: int = 100,
num_init_iterations: int = 10,
) -> None:
"""Initialize actnorm layers using data dependent initialization
Args:
sess: an instance of tf.Session
feed_dict_fn: a feed dict function which return feed_dict to the tensorflow
sess.run function.
actnorm_layers: a list of actnorms to initialize
num_steps: number of batches to used for iterative initialization.
num_init_iterations: a get_ddi_init_ops parameter. For more details
see the implementation.
"""
for actnorm_layer in tqdm(actnorm_layers):
init_op = actnorm_layer.get_ddi_init_ops(num_init_iterations)
for i in range(num_steps):
sess.run(init_op, feed_dict=feed_dict_fn())
def create_simple_flow(num_steps: int = 1,
num_scales: int = 3,
num_bits: int = 5,
template_fn: Any = OpenAITemplate()
) -> Tuple[List[fl.FlowLayer], List[fl.ActnormLayer]]:
"""Create Glow model. This implementation may slightly differ from the
official one. For example the last layer here is the fl.FactorOutLayer
Args:
num_steps: number of steps per single scale; K parameter from the paper
num_scales: number of scales, a L parameter from the paper. Each scale
reduces the tensor spatial dimension by 2.
num_bits: input image quantization
template_fn: a template function used in AffineCoupling layer
Returns:
layers: a list of layers which define normalizing flow
actnorms: a list of actnorm layers which can be initialized using data
dependent initialization. See: initialize_actnorms() function.
"""
layers = [fl.QuantizeImage(num_bits=num_bits)]
actnorm_layers = []
for scale in range(num_scales):
scale_name = f"Scale{scale+1}"
scale_steps = []
for s in range(num_steps):
name = f"Step{s+1}"
step_layer, actnorm_layer = step_flow(
name=name,
shift_and_log_scale_fn=template_fn.create_template_fn(name))
scale_steps.append(step_layer)
actnorm_layers.append(actnorm_layer)
layers += [
fl.SqueezingLayer(name=scale_name),
fl.ChainLayer(scale_steps, name=scale_name),
fl.FactorOutLayer(name=scale_name),
]
return layers, actnorm_layers
| StarcoderdataPython |
11210029 | <gh_stars>0
import pandas as pd
import datetime
import seaborn as sns
import matplotlib.pyplot as plt
### Utils ###
def scores2file(score_map, output_file, sep=" "):
"""Export scores to .csv files"""
score_df = pd.DataFrame(score_map, columns=["node_id","score"])
score_df.to_csv(output_file,sep=sep, header=False, index=False)
def epoch2date(epoch, tz_info=None):
"""Convert epoch to date based on timezone information. If 'tz_info==None' then local timezone information is used."""
if tz_info == None:
dt = datetime.datetime.fromtimestamp(epoch)
else:
dt = datetime.datetime.fromtimestamp(epoch, tz=tz_info)
return "%i-%.2i-%.2i" % (dt.year, dt.month, dt.day)
### Tennis player information ###
def update_match_counts(df, true_matches):
found_players=set(true_matches.keys())
df["found_players"] = df["players"].apply(lambda p_list: [p for p in p_list if p in true_matches])
df['missing_players'] = df['players'].apply(lambda x: list(set(x)-found_players))
df["num_players"] = df.apply(lambda x: len(x["players"]), axis=1)
df["num_found_players"] = df.apply(lambda x: len(x["found_players"]), axis=1)
df["num_missing_players"] = df.apply(lambda x: len(x["missing_players"]), axis=1)
df["frac_missing_players"] = df["num_missing_players"] / df["num_players"]
def extract_daily_players(schedule_df, player_accounts, category_filter_func=None):
"""Return daily tennis players in dictionary and dataframe based on the schedule and the found player-account assigments."""
true_matches = player_accounts
# creating dataframe
if category_filter_func == None:
schedule_df_tmp = schedule_df
else:
schedule_df_tmp = schedule_df[schedule_df["matchHeader"].apply(category_filter_func)]
daily_players = {}
for index, row in schedule_df_tmp.iterrows():
date, winner, loser = row["date"], row["playerName active"], row["playerName opponent"]
header, court, match = row["matchHeader"], row["courtName"], row["orderNumber"]
match_id = "%s_%s_%i" % (header, court, match)
if not date in daily_players:
daily_players[date] = {}
daily_players[date][winner] = match_id
daily_players[date][loser] = match_id
# daily players grouped
daily_players_grouped = [(key, set(daily_players[key].keys())) for key in daily_players]
daily_players_df = pd.DataFrame(daily_players_grouped, columns=["date", "players"])
update_match_counts(daily_players_df, true_matches)
daily_players_df = daily_players_df.sort_values("date").reset_index(drop=True)
return daily_players, daily_players_df
### Labeling nodes ###
def set_label_value(label_value_dict, user, date_idx, collected_dates, screen_name_to_player, daily_found_player_dict):
label = 0.0
if user in screen_name_to_player:
if screen_name_to_player[user] in daily_found_player_dict[collected_dates[date_idx]]:
label = label_value_dict["current"]
elif date_idx > 0 and screen_name_to_player[user] in daily_found_player_dict[collected_dates[date_idx-1]]:
label = label_value_dict["previous"]
elif date_idx < len(collected_dates)-1:
next_date = collected_dates[date_idx+1]
if next_date in daily_found_player_dict and screen_name_to_player[user] in daily_found_player_dict[next_date]:
label = label_value_dict["next"]
return label
def get_daily_label_dicts(label_value_dict, collected_dates, mentions_df, mapper_dicts, verbose=False):
"""Label users in mention data based on schedule."""
screen_name_to_player, user_dict, daily_found_player_dict = mapper_dicts
if verbose:
print(len(screen_name_to_player), len(user_dict), len(daily_found_player_dict), len(mentions_df))
daily_label_dicts = {}
if verbose:
print("Labeling users STARTED")
for date_idx, date in enumerate(collected_dates):
label_dict = {}
for user in user_dict:
user_id = user_dict[user]
label_dict[user_id] = set_label_value(label_value_dict, user, date_idx, collected_dates, screen_name_to_player, daily_found_player_dict)
daily_label_dicts[date] = label_dict
if verbose:
print("Labeling users FINISHED")
return daily_label_dicts
### Visualization ###
def visu_players(handler, figsize=(15,10)):
df = handler.daily_p_df
df = df[df["date"].isin(handler.dates)]
# Initialize the matplotlib figure
fig, ax = plt.subplots(figsize=figsize)
sns.set_color_codes("muted")
sns.barplot(x="num_players", y="date", data=df,
label="Total number of players", color="b")
sns.set_color_codes("muted")
sns.barplot(x="num_found_players", y="date", data=df,
label="Number of players with\n assigned Twitter account", color="#21456e")
ax.legend( loc="lower right", frameon=True)
ax.set(xlabel="Number of tennis players",ylabel="")
plt.xlim((0,220))
sns.despine(left=True, bottom=True)
plt.legend(loc='upper right')
return fig
def visu_graph(handler, figsize=(12,8)):
num_of_mentions = handler.mentions["date"].value_counts()
num_of_nodes = {}
for d in handler.dates:
num_of_nodes[d] = get_num_nodes(handler.mentions, d)
return visu_mention_count(handler.dates, num_of_mentions, num_of_nodes, figsize)
def visu_mention_count(tournament_dates, num_of_mentions, num_of_nodes, figsize=(12,8)):
x = range(len(tournament_dates))
x_ticks = [d[5:] for d in tournament_dates]
y_ticks = [10000*i for i in range(1,5)]
edges = [num_of_mentions[d] for d in tournament_dates]
nodes = [num_of_nodes[d] for d in tournament_dates]
fig = plt.figure(figsize=figsize)
plt.gcf().subplots_adjust(bottom=0.2)
plt.plot(x,edges, "-", linewidth=5.0,label="Number of edges (mentions)", c="#21456e")
plt.plot(x,nodes, "--",linewidth=5.0,label="Number of nodes (accounts)", c="#e84d3d")
plt.xticks(x, x_ticks, rotation=90)
plt.yticks(y_ticks,y_ticks)
plt.legend()
return fig
def get_num_nodes(df,date):
partial_df = df[df["date"] == date]
return len(set(partial_df["src"]).union(set(partial_df["trg"])))
| StarcoderdataPython |
1873482 | <gh_stars>100-1000
# coding=utf-8
import logging
import time
import numpy as np
import sys
import copy
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from experiments.config import setup_lcsts
from emolga.utils.generic_utils import *
from emolga.models.covc_encdec import NRM
from emolga.models.encdec import NRM as NRM0
from emolga.dataset.build_dataset import deserialize_from_file
from collections import OrderedDict
from fuel import datasets
from fuel import transformers
from fuel import schemes
setup = setup_lcsts
def init_logging(logfile):
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S' )
fh = logging.FileHandler(logfile)
# ch = logging.StreamHandler()
fh.setFormatter(formatter)
# ch.setFormatter(formatter)
# fh.setLevel(logging.INFO)
# ch.setLevel(logging.INFO)
# logging.getLogger().addHandler(ch)
logging.getLogger().addHandler(fh)
logging.getLogger().setLevel(logging.INFO)
return logging
# prepare logging.
tmark = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))
config = setup() # load settings.
for w in config:
print '{0}={1}'.format(w, config[w])
logger = init_logging(config['path_log'] + '/experiments.CopyLCSTS.id={}.log'.format(tmark))
n_rng = np.random.RandomState(config['seed'])
np.random.seed(config['seed'])
rng = RandomStreams(n_rng.randint(2 ** 30))
logger.info('Start!')
train_set, test_set, idx2word, word2idx = deserialize_from_file(config['dataset'])
if config['voc_size'] == -1: # not use unk
config['enc_voc_size'] = max(zip(*word2idx.items())[1]) + 1
config['dec_voc_size'] = config['enc_voc_size']
else:
config['enc_voc_size'] = config['voc_size']
config['dec_voc_size'] = config['enc_voc_size']
samples = len(train_set['source'])
logger.info('build dataset done. ' +
'dataset size: {} ||'.format(samples) +
'vocabulary size = {0}/ batch size = {1}'.format(
config['dec_voc_size'], config['batch_size']))
def unk_filter(data):
if config['voc_size'] == -1:
return copy.copy(data)
else:
mask = (np.less(data, config['voc_size'])).astype(dtype='int32')
data = copy.copy(data * mask + (1 - mask))
return data
source = '临近 岁末 , 新 基金 发行 步入 旺季 , 11 月份 以来 单周 新基 ' + \
'发行 数 始终保持 35 只 以上 的 高位 , 仅 11 月 25 日 一天 , ' + \
'就 有 12 只 基金 同时 发售 。 国内 首只 公募 对冲 混合型 基金 — 嘉实 绝对 收益 策略 ' + \
'定期 混合 基金 自 发行 首日 便 备受 各界 青睐 , 每日 认购 均 能 达到 上 亿'
target = '首只 公募 对冲 基金 每日 吸金 上 亿'
test_s = [word2idx[w.decode('utf-8')] for w in source.split()]
test_t = [word2idx[w.decode('utf-8')] for w in target.split()]
logger.info('load the data ok.')
logger.info('Evaluate CopyNet')
echo = 9
tmark = '20160226-164053' # '20160221-025049' # copy-net model [no unk]
config['copynet'] = True
agent = NRM(config, n_rng, rng, mode=config['mode'],
use_attention=True, copynet=config['copynet'], identity=config['identity'])
agent.build_()
agent.compile_('display')
agent.load(config['path_h5'] + '/experiments.CopyLCSTS.id={0}.epoch={1}.pkl'.format(tmark, echo))
logger.info('generating [testing set] samples')
v = agent.evaluate_(np.asarray(test_s, dtype='int32'),
np.asarray(test_t, dtype='int32'),
idx2word, np.asarray(unk_filter(test_s), dtype='int32'))
logger.info('Complete!') | StarcoderdataPython |
11328281 | # -*- coding: utf-8 -*-
"""
*Google search* Plugin
----------------------
Searches Google
Usage::
.g Nyan cat
"""
import itertools
import utils.plugin
from google import search
def gsearch_internal(query):
return search(query, stop=3)
def gsearch(server=None, channel=None, nick=None, text=None, **kwargs):
command, query = text.split(" ", 1)
for url in itertools.islice(gsearch_internal(query), 0, 3):
server.privmsg(channel if channel.find("#") > -1 else nick, u"» %s" % url)
gsearch.settings = {
'events': utils.plugin.EVENTS.PUBMSG + utils.plugin.EVENTS.PRIVMSG,
'text': r'\.g(oogle)? .*',
'channels': utils.plugin.CHANNELS.ALL,
'users': utils.plugin.USERS.ALL
} | StarcoderdataPython |
144373 | import numpy as np
from wrappa import WrappaObject, WrappaImage
class DSModel:
def __init__(self, **kwargs):
pass
def predict(self, data, **kwargs):
_ = kwargs
# Data is always an array of WrappaObjects
responses = []
for obj in data:
img = obj.image.as_ndarray
rotated_img = np.rot90(img)
resp = WrappaObject(WrappaImage.init_from_ndarray(
payload=rotated_img,
ext=obj.image.ext,
))
responses.append(resp)
return responses
def predict_180(self, data, **kwargs):
_ = kwargs
# Data is always an array of WrappaObjects
responses = []
for obj in data:
img = obj.image.as_ndarray
rotated_img = np.rot90(img)
rotated_img = np.rot90(rotated_img)
resp = WrappaObject(WrappaImage.init_from_ndarray(
payload=rotated_img,
ext=obj.image.ext,
))
responses.append(resp)
return responses | StarcoderdataPython |
232107 | <filename>baidu.py
import os
from selenium import webdriver
from selenium.common.exceptions import (NoAlertPresentException,
NoSuchElementException)
from selenium.webdriver import FirefoxOptions as FFO
from selenium.webdriver.chrome import options
profile_dir = os.getenv('localappdata') + '\\Google\\Chrome\\User Data'
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument(
'blink-settings=imagesEnabled=false') # 不加载图片, 提升速度
# 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
#chrome_options.add_argument('--headless')
chrome_options.add_argument("user-data-dir=" + profile_dir)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.implicitly_wait(30)
url = 'https://wenku.baidu.com/task/browse/daily'
driver.get(url)
driver.find_element_by_css_selector(
'#signin > div.bd > div.clearfix.new-sidebar > span').click()
"""
#kw
#su
"""
[
'http://xueshu.baidu.com/u/paperhelp',
'http://xueshu.baidu.com/usercenter/?tab=collect',
'http://xueshu.baidu.com/usercenter', ##wrapper > p > span.typeitem.sel
]
driver.get('http://xueshu.baidu.com/usercenter')
driver.find_element_by_css_selector('#wrapper > p > span.typeitem.sel').click()
| StarcoderdataPython |
11309133 | import logging
from .ava_eval import do_ava_evaluation
def ava_evaluation(dataset, predictions, output_folder, **_):
logger = logging.getLogger("alphaction.inference")
logger.info("performing ava evaluation.")
return do_ava_evaluation(
dataset=dataset,
predictions=predictions,
output_folder=output_folder,
logger=logger,
)
| StarcoderdataPython |
103183 | <filename>Scripts/HighLayer/GenePrioritization/src/FormatFileQTLR.py
#!/usr/bin/python2
import fileinput
import sys
# Format Genotyped file (GeneNetwork Format) into a text file for QTL/R
# Require a phenotype file (tab-delimited) with a header and row name in 1st column (example BXD_trait.txt)
# Usage: ./FormatFileQTLR.py <genotyped file> <Phenotype file> <output>
genof = sys.argv[1] # From GeneNetwork
phenof = sys.argv[2]
output = sys.argv[3]
pheno = {}
l = 0
for line in fileinput.input(phenof):
ls = line.split()
if l == 0:
header_pheno = ls[1:]
else:
# Format Mouse_ID, change BXD089 into BXD89
#if ls[0][3] == '0':
# ls[0]=ls[0][0:3]+ls[0][4:]
# if ls[0] == 'BXD05':
# ls[0] ='BXD5'
#if ls[0] == 'C57BL6':
# ls[0] = 'C57BL/6J'
#if ls[0] == 'DBA2':
# ls[0] = 'DBA/2J'
pheno[ls[0]]=[]
#print ls[0]
for i in ls[1:]:
pheno[ls[0]].append(i)
l += 1
# Read Genotype file
geno = {}
l = 0
ID = 0
passed = {} # use to remove duplicates
for line in fileinput.input(genof):
#print line
if line[0] != '@':
ls = line.split()
if l == 0:
header_geno = ls
else:
if ls[1] not in passed:
passed[ls[1]] = True
geno[ID] = {}
for i in range(0,len(header_geno)):
geno[ID][header_geno[i]] = ls[i]
ID += 1
l += 1
out = open(output,'w')
out2=open(output+'.MBDistance.QTLR','w')
# Write Header:
LocusList=[geno[key]['Locus'] for key in sorted(geno.keys())]
out.write(','.join(header_pheno)+','+'sex,Mouse_ID,'+','.join(LocusList)+'\n')
out2.write(','.join(header_pheno)+','+'sex,Mouse_ID,'+','.join(LocusList)+'\n')
ChromoList=[geno[key]['Chr'] for key in sorted(geno.keys())]
out.write(','*(len(header_pheno)+2)+','.join(ChromoList)+'\n')
out2.write(','*(len(header_pheno)+2)+','.join(ChromoList)+'\n')
cMList=[geno[key]['cM'] for key in sorted(geno.keys())]
out.write(','*(len(header_pheno)+2)+','.join(cMList)+'\n')
cMList=[str((float(geno[key]['Mb']))) for key in sorted(geno.keys())]
#out.write(','*(len(header_pheno)+2)+','.join(cMList)+'\n')
out2.write(','*(len(header_pheno)+2)+','.join(cMList)+'\n')
# Write Data
for mouse_ID in pheno:
to_write = []
# Add Pheno Data
for phenoData in pheno[mouse_ID]:
to_write.append(phenoData)
# Add Sex
to_write.append('1')
# Add ID
to_write.append(mouse_ID)
# Add Genotype
if mouse_ID in geno[geno.keys()[0]]:
for key in sorted(geno.keys()):
if geno[key][mouse_ID] == 'B':
to_write.append('BB')
elif geno[key][mouse_ID] == 'D':
to_write.append('DD')
elif geno[key][mouse_ID] == 'H':
to_write.append('BD')
elif geno[key][mouse_ID] == 'E':
to_write.append('EE')
elif geno[key][mouse_ID] == 'F':
to_write.append('FF')
elif geno[key][mouse_ID] == 'U':
to_write.append('-')
else:
print geno[key][mouse_ID]
out.write(','.join(to_write)+'\n')
out2.write(','.join(to_write)+'\n')
else:
print "mouse:",mouse_ID,"Not in genotype Data"
| StarcoderdataPython |
1752792 | <filename>creational/singleton/monostate.py<gh_stars>0
class CEO:
_shared_state = {
'name': 'Steve',
'age': 55
}
def __init__(self):
self.__dict__ = self._shared_state
def __str__(self):
return f'{self.name} is {self.age} years old'
if __name__ == '__main__':
ceo1 = CEO()
print(ceo1)
ceo2 = CEO()
ceo2.age = 77
print(ceo2) | StarcoderdataPython |
1982639 | <gh_stars>1-10
from __future__ import unicode_literals
__version__ = '2018.03.20'
| StarcoderdataPython |
3309559 | <filename>L1TriggerConfig/GMTConfigProducers/python/L1MuGMTRSKeysOnline_cfi.py
import FWCore.ParameterSet.Config as cms
L1MuGMTRSKeysOnline = cms.ESProducer("L1MuGMTRSKeysOnlineProd",
onlineAuthentication = cms.string('.'),
subsystemLabel = cms.string('L1MuGMT'),
onlineDB = cms.string('oracle://CMS_OMDS_LB/CMS_TRG_R'),
enableL1MuGMTChannelMask = cms.bool( True )
)
| StarcoderdataPython |
3200448 | <gh_stars>0
#venv/bin/python
# -*- coding:utf-8 -*-
from random import randint
from random import choice
from logbook import Logger, TimedRotatingFileHandler
import os
import sys
# BASE_DIR = os.path.abspath(os.path.join(os.getcwd(), ".."))
# sys.path.append(BASE_DIR)
handler = TimedRotatingFileHandler('../logs/lottery_generator.log')
handler.push_application()
logger = Logger(name='Lottery Generator', level='info')
def random_int_generator(num_list):
#num = randint(m, n + 1)
num = choice(num_list)
yield num
def shuangseqiu_lottery_generator():
"""
Double color lottery generator.
"""
blue_nums = [i for i in range(1, 34)]
red_nums = [i for i in range(1, 17)]
nums = []
randint_generator = random_int_generator
i = 1
while i < 7:
num = next(randint_generator(blue_nums))
# print(num)
nums.append(num)
blue_nums.remove(num)
# print(blue_nums)
i += 1
last_num = next(randint_generator(red_nums))
green_nums = sorted(nums)
green_nums.append(last_num)
shuangseqiu_lottery = green_nums
yield shuangseqiu_lottery
def random_shuangse(n):
"""
Get few random double color lotteries using choice function.
"""
lotteries = []
i = 1
while i < n + 1:
shuangse_lottery = shuangseqiu_lottery_generator
lottery = next(shuangse_lottery())
# print(lottery)
lotteries.append(lottery)
i += 1
return lotteries
if __name__ == '__main__':
lottery_num = int(input('请输入需要随机的双色球彩票数:'))
shuangse_lotteries = random_shuangse(lottery_num)
print('随机生成的彩票投注信息如下:')
for shuangse_lottery in shuangse_lotteries:
print(shuangse_lottery)
| StarcoderdataPython |
11266762 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# a backend
# Copyright 2007, <NAME> <<EMAIL>>
# Copyright 2008, <NAME> <<EMAIL>>
import re
from coherence.backend import BackendItem, Container, AbstractBackendStore
from coherence.upnp.core import DIDLLite
from coherence.upnp.core.DIDLLite import classChooser, Resource
from coherence.upnp.core.utils import getPage
class PlaylistItem(BackendItem):
logCategory = 'playlist_store'
def __init__(self, title, stream_url, mimetype, **kwargs):
BackendItem.__init__(self)
self.name = title
self.stream_url = stream_url
self.mimetype = mimetype
self.url = stream_url
self.item = kwargs.get('item', None)
self.parent = kwargs.get('parent', None)
self.update_id = kwargs.get('update_id', 0)
def get_id(self):
return self.storage_id
def get_item(self):
if self.item is None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
if self.mimetype.startswith('video/'):
item = DIDLLite.VideoItem(upnp_id, upnp_parent_id, self.name)
else:
item = DIDLLite.AudioItem(upnp_id, upnp_parent_id, self.name)
# what to do with MMS:// feeds?
protocol = 'http-get'
if self.stream_url.startswith('rtsp://'):
protocol = 'rtsp-rtp-udp'
res = Resource(self.stream_url,
f'{protocol}:*:{self.mimetype}:*')
res.size = None
item.res.append(res)
self.item = item
return self.item
def get_url(self):
return self.url
class PlaylistStore(AbstractBackendStore):
'''
.. versionchanged:: 0.9.0
Migrated from louie/dispatcher to EventDispatcher
'''
logCategory = 'playlist_store'
implements = ['MediaServer']
wmc_mapping = {'16': 1000}
description = (
'Playlist',
'exposes the list of video/audio streams from a m3u playlist (e.g. '
'web TV listings published by french ISPs such as Free, SFR...).',
None)
options = [
{'option': 'name', 'text': 'Server Name:', 'type': 'string',
'default': 'my media',
'help': 'the name under this MediaServer '
'shall show up with on other UPnP clients'},
{'option': 'version', 'text': 'UPnP Version:', 'type': 'int',
'default': 2, 'enum': (2, 1),
'help': 'the highest UPnP version this MediaServer shall support',
'level': 'advance'},
{'option': 'uuid', 'text': 'UUID Identifier:', 'type': 'string',
'help': 'the unique (UPnP) identifier for this MediaServer, '
'usually automatically set',
'level': 'advance'},
{'option': 'playlist_url', 'text': 'Playlist file URL:',
'type': 'string', 'help': 'URL to the playlist file (M3U).'},
]
playlist_url = None
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.playlist_url = self.config.get(
'playlist_url',
'https://mafreebox.freebox.fr/freeboxtv/playlist.m3u'
)
self.name = self.config.get('name', 'playlist')
self.init_completed = True
def __repr__(self):
return self.__class__.__name__
def append(self, obj, parent):
if isinstance(obj, str):
mimetype = 'directory'
else:
mimetype = obj['mimetype']
UPnPClass = classChooser(mimetype)
id = self.getnextID()
update = False
if hasattr(self, 'update_id'):
update = True
item = PlaylistItem(
id, obj, mimetype,
parent=parent,
storageid=parent,
upnpclass=UPnPClass,
update=update)
self.store[id] = item
self.store[id].store = self
if hasattr(self, 'update_id'):
self.update_id += 1
if self.server:
self.server.content_directory_server.set_variable(
0, 'SystemUpdateID', self.update_id)
if parent:
value = (parent.get_id(), parent.get_update_id())
if self.server:
self.server.content_directory_server.set_variable(
0, 'ContainerUpdateIDs', value)
if mimetype == 'directory':
return self.store[id]
return None
def upnp_init(self):
self.current_connection_id = None
if self.server:
self.server.connection_manager_server.set_variable(
0,
'SourceProtocolInfo',
['rtsp-rtp-udp:*:video/mpeg:*',
'http-get:*:video/mpeg:*',
'rtsp-rtp-udp:*:audio/mpeg:*',
'http-get:*:audio/mpeg:*'],
default=True)
rootItem = Container(None, self.name)
self.set_root_item(rootItem)
return self.retrievePlaylistItems(self.playlist_url, rootItem)
def retrievePlaylistItems(self, url, parent_item):
def gotPlaylist(playlist):
self.info('got playlist')
items = []
if playlist:
content, header = playlist
if isinstance(content, bytes):
content = content.decode('utf-8')
lines = content.splitlines().__iter__()
line = next(lines)
while line is not None:
self.debug(line)
if re.search('#EXTINF', line):
channel = re.match('#EXTINF:.*,(.*)', line).group(1)
mimetype = 'video/mpeg'
self.info('\t- channel found: [%r] => %r' % (
mimetype, channel))
line = next(lines)
while re.search('#EXTVLCOPT', line):
option = re.match('#EXTVLCOPT:(.*)', line).group(1)
if option == 'no-video':
mimetype = 'audio/mpeg'
line = next(lines)
url = line
item = PlaylistItem(channel, url, mimetype)
parent_item.add_child(item)
items.append(item)
try:
line = next(lines)
except StopIteration:
line = None
return items
def gotError(error):
self.warning(f'Unable to retrieve playlist: {url}')
print(f'Error: {error}')
return None
d = getPage(url)
d.addCallback(gotPlaylist)
d.addErrback(gotError)
return d
| StarcoderdataPython |
9778322 | import os
import sys
import subprocess
from pathlib import Path
import Utils
from io import BytesIO
from urllib.request import urlopen
class VulkanConfiguration:
requiredVulkanVersion = "172.16.58.3"
vulkanDirectory = "./Corby/vendor/VulkanSDK"
@classmethod
def Validate(cls):
if (not cls.CheckVulkanSDK()):
print("Vulkan SDK not installed correctly.")
return
if (not cls.CheckVulkanSDKDebugLibs()):
print("Vulkan SDK debug libs not found.")
@classmethod
def CheckVulkanSDK(cls):
vulkanSDK = os.environ.get("VULKAN_SDK")
if (vulkanSDK is None):
print("\nYou don't have the Vulkan SDK installed!")
cls.__InstallVulkanSDK()
return False
else:
print(f"\nLocated Vulkan SDK at {vulkanSDK}")
if (cls.requiredVulkanVersion not in vulkanSDK):
print(f"You don't have the correct Vulkan SDK version! (Engine requires {cls.requiredVulkanVersion})")
cls.__InstallVulkanSDK()
return False
print(f"Correct Vulkan SDK located at {vulkanSDK}")
return True
@classmethod
def __InstallVulkanSDK(cls):
permissionGranted = False
while not permissionGranted:
reply = str(input("Would you like to install VulkanSDK {0:s}? [Y/N]: ".format(cls.requiredVulkanVersion))).lower().strip()[:1]
if reply == 'n':
return
permissionGranted = (reply == 'y')
vulkanInstallURL = f"https://sdk.lunarg.com/sdk/download/{cls.requiredVulkanVersion}/windows/VulkanSDK-{cls.requiredVulkanVersion}-Installer.exe"
vulkanPath = f"{cls.vulkanDirectory}/VulkanSDK-{cls.requiredVulkanVersion}-Installer.exe"
print("Downloading {0:s} to {1:s}".format(vulkanInstallURL, vulkanPath))
Utils.DownloadFile(vulkanInstallURL, vulkanPath)
print("Running Vulkan SDK installer...")
os.startfile(os.path.abspath(vulkanPath))
print("Re-run this script after installation!")
quit()
@classmethod
def CheckVulkanSDKDebugLibs(cls):
shadercdLib = Path(f"{cls.vulkanDirectory}/Lib/shaderc_sharedd.lib")
VulkanSDKDebugLibsURLlist = [
f"https://sdk.lunarg.com/sdk/download/{cls.requiredVulkanVersion}/windows/VulkanSDK-{cls.requiredVulkanVersion}-DebugLibs.zip",
f"https://files.lunarg.com/SDK-{cls.requiredVulkanVersion}/VulkanSDK-{cls.requiredVulkanVersion}-DebugLibs.zip"
]
if not shadercdLib.exists():
print(f"\nNo Vulkan SDK debug libs found. (Checked {shadercdLib})")
vulkanPath = f"{cls.vulkanDirectory}/VulkanSDK-{cls.requiredVulkanVersion}-DebugLibs.zip"
Utils.DownloadFile(VulkanSDKDebugLibsURLlist, vulkanPath)
print("Extracting", vulkanPath)
Utils.UnzipFile(vulkanPath, deleteZipFile=False)
print(f"Vulkan SDK debug libs installed at {os.path.abspath(cls.vulkanDirectory)}")
else:
print(f"\nVulkan SDK debug libs located at {os.path.abspath(cls.vulkanDirectory)}")
return True
if __name__ == "__main__":
VulkanConfiguration.Validate()
| StarcoderdataPython |
11233885 | <filename>partition_data.py
import glob
import sys
import os
import random
import pdb
from PIL import Image, ImageOps
import cPickle as pickle
import numpy as np
from scipy import misc
from constants import *
if not os.path.isdir(BLOB_TRAIN_IMAGE_DIR):
os.makedirs(BLOB_TRAIN_IMAGE_DIR)
if not os.path.isdir(BLOB_TRAIN_LABELS_DIR):
os.makedirs(BLOB_TRAIN_LABELS_DIR)
if not os.path.isdir(BLOB_TEST_IMAGE_DIR):
os.makedirs(BLOB_TEST_IMAGE_DIR)
if not os.path.isdir(BLOB_TEST_LABELS_DIR):
os.makedirs(BLOB_TEST_LABELS_DIR)
files = glob.glob(DATA_LABELS_DIR)
all_files = []
print(DATA_LABELS_DIR)
# read files and add into list
for filename in glob.glob(DATA_LABELS_DIR + '*.png'):
label_name = filename.replace(DATA_LABELS_DIR,"")
image_name = label_name[-13:-4] # frame....jpg
image_name = image_name + ".jpg"
print(image_name)
number = int(image_name[5:9])
temp = [number, image_name, label_name]
all_files.append(temp)
# sort
sorted_all_files = sorted(all_files)
num_files= len(all_files)
#num_train = np.ceil(num_files*PERCENTAGE_TRAIN)
num_train = 35
train_info = {}
train_txt = open(BLOB_FILE_WITH_TRAIN_INDICES, 'w')
test_txt = open(BLOB_FILE_WITH_TEST_INDICES, 'w')
#count_source_txt = open(COUNT_SOURCE_FILE_INDICES, 'w')
count = 0
r_sum = 0
g_sum = 0
b_sum = 0
for it in range(num_files):
temp = sorted_all_files[it]
# Read image
im = Image.open('{}{}'.format(DATA_IMAGE_DIR, temp[1]))
label = misc.imread(glob.glob("{}{}".format(DATA_LABELS_DIR, temp[2]))[0])
label[label>0] = 1
image_label = Image.fromarray(np.uint8(label))
if np.max(label) == 0:
continue
# Save image
if it<num_train:
im_array = np.asarray(im)
r_sum += np.mean(im_array[:,:,0])
g_sum += np.mean(im_array[:,:,1])
b_sum += np.mean(im_array[:,:,2])
for itt in range(NUM_TRAIN_PER_IMAGE):
save_str = 'frame' + str(count)
nc, nr = im.size
valid = False
while not valid:
start_row = random.randint(0, nr - CROP_HEIGHT)
start_col = random.randint(0, nc - CROP_WIDTH)
new_im = im.crop((start_col, start_row, start_col + CROP_WIDTH, start_row + CROP_HEIGHT))
#new_label = label.crop((start_col, start_row, start_col + CROP_WIDTH, start_row + CROP_HEIGHT))
new_label_array = label[start_row:start_row + CROP_HEIGHT, start_col:start_col + CROP_WIDTH]
if np.max(new_label_array) != 0:
valid = True
new_label = Image.fromarray(np.uint8(new_label_array))
rotate = random.randint(0, 1)
if rotate == 1:
new_im = ImageOps.mirror(new_im)
new_label = ImageOps.mirror(new_label)
train_info[save_str] = [temp[1], temp[2], start_col, start_row, start_col + CROP_WIDTH, start_row + CROP_HEIGHT]
new_im.save(BLOB_TRAIN_IMAGE_DIR + save_str + '.png')
new_label.save(BLOB_TRAIN_LABELS_DIR + save_str + '.png')
train_txt.write(save_str + '\n')
count += 1
else:
image_label = Image.fromarray(np.uint8(label))
im.save(BLOB_TEST_IMAGE_DIR + temp[1])
image_label.save(BLOB_TEST_LABELS_DIR + temp[2])
test_txt.write(temp[1] + '\n')
r_mean = r_sum / num_train
g_mean = g_sum / num_train
b_mean = b_sum / num_train
print("R: " + str(r_mean) + " G: " + str(g_mean) + " B: " + str(b_mean))
pickle.dump(train_info, open(BLOB_TRAIN_INFO, 'w'))
train_txt.close()
test_txt.close()
| StarcoderdataPython |
4838826 | <gh_stars>1-10
#!/usr/bin/env python
import networkx as nx
from collections import defaultdict
def neighbors(grid, p):
height = len(grid)
width = len(grid[0])
px, py = p
if px == 0 or py == 0 or px == width-1 or py == height-1:
return []
return [
(p[0], p[1]-1),
(p[0], p[1]+1),
(p[0]-1, p[1]),
(p[0]+1, p[1])
]
def solve(inp):
start = None
end = None
portals = defaultdict(list)
maze = nx.Graph()
for y, line in enumerate(inp):
for x, c in enumerate(line):
p = (x, y)
if c == '.':
maze.add_node(p)
for n in neighbors(inp, p):
if inp[n[1]][n[0]] == '.':
maze.add_edge(p, n)
if 'A' <= c <= 'Z':
portal_pos = None
portal = None
for n in neighbors(inp, p):
nc = inp[n[1]][n[0]]
if nc == '.':
portal_pos = n
if 'A' <= nc <= 'Z':
portal = ''.join(sorted(c+nc))
if portal != None and portal_pos != None:
if portal == 'AA':
start = portal_pos
if portal == 'ZZ':
end = portal_pos
portals[portal].append(portal_pos)
if len(portals[portal]) == 2:
maze.add_edge(portals[portal][0], portals[portal][1])
maze.add_edge(portals[portal][1], portals[portal][0])
print(nx.shortest_path_length(maze, start, end))
# with open('test.txt', 'r') as f:
# input = f.read().splitlines()
# solve(input)
with open('input.txt', 'r') as f:
input = f.read().splitlines()
solve(input)
| StarcoderdataPython |
1665085 | """This is an example of how to use the simple sqlfluff api."""
import sqlfluff
# -------- LINTING ----------
my_bad_query = "SeLEct *, 1, blah as fOO from myTable"
# Lint the given string and return an array of violations in JSON representation.
lint_result = sqlfluff.lint(my_bad_query, dialect="bigquery")
# lint_result =
# [
# {"code": "L010", "line_no": 1, "line_pos": 1, "description": "Keywords must be consistently upper case."}
# ...
# ]
# -------- FIXING ----------
# Fix the given string and get a string back which has been fixed.
fix_result_1 = sqlfluff.fix(my_bad_query, dialect="bigquery")
# fix_result_1 = 'SELECT *, 1, blah AS foo FROM mytable\n'
# We can also fix just specific rules.
fix_result_2 = sqlfluff.fix(my_bad_query, rules=["L010"])
# fix_result_2 = 'SELECT *, 1, blah AS fOO FROM myTable'
# Or a subset of rules...
fix_result_3 = sqlfluff.fix(my_bad_query, rules=["L010", "L014"])
# fix_result_3 = 'SELECT *, 1, blah AS fOO FROM mytable'
# -------- PARSING ----------
# Parse the given string and return a JSON representation of the parsed tree.
parse_result = sqlfluff.parse(my_bad_query)
# parse_result = {'file': {'statement': {...}, 'newline': '\n'}}
| StarcoderdataPython |
1937963 | from azure.storage.fileshare import ShareServiceClient
def main():
# Create a file share client
share_client = ShareServiceClient.from_connection_string(
"DefaultEndpointsProtocol=https;AccountName=<account_name>;AccountKey=<account_key>;EndpointSuffix=core.windows.net")
# Create a file share
share_client.create_share("<share_name>")
# Delete the file share
share_client.delete_share("<share_name>") | StarcoderdataPython |
333387 | <filename>tests/test_client.py
from jina.clients import py_client
from jina.clients.python import PyClient
from jina.flow import Flow
from jina.proto.jina_pb2 import Document
from tests import JinaTestCase
class MyTestCase(JinaTestCase):
def test_client(self):
f = Flow().add(yaml_path='_forward')
with f:
print(py_client(port_grpc=f.port_grpc).call_unary(b'a1234', mode='index'))
def test_check_input(self):
input_fn = iter([b'1234', b'45467'])
PyClient.check_input(input_fn)
input_fn = iter([Document(), Document()])
PyClient.check_input(input_fn, in_proto=True)
bad_input_fn = iter([b'1234', '45467'])
self.assertRaises(TypeError, PyClient.check_input, bad_input_fn)
bad_input_fn = iter([Document()])
self.assertRaises(TypeError, PyClient.check_input, bad_input_fn)
| StarcoderdataPython |
3367999 | <filename>day11/script2.py<gh_stars>0
import numpy as np
# TODO : this works but takes forever
# TODO : Would be smarter to calculate all blocks starting at a given position
# TODO : to re-use the previous calculation each time
SIZE = 300 # fuel matrix size
def power(x, y, serial):
rack_id = x + 10
p = rack_id * y
p += serial
p *= rack_id
p = int((p / 100) % 10)
p -= 5
return p
def power_mx(serial):
mx = np.zeros((SIZE, SIZE))
for x in range(SIZE):
for y in range(SIZE):
mx[x, y] = power(x+1, y+1, serial) # +1 because cells are from 1 to 300
return mx
def square_power(mx, x, y, square_size):
p = 0
for dx in range(square_size):
for dy in range(square_size):
p += mx[x + dx, y + dy]
return p
def power_mx_square(mx, s):
mx_size = SIZE - s + 1
mx_square = np.zeros((mx_size, mx_size))
for x in range(mx_size):
for y in range(mx_size):
mx_square[x, y] = square_power(mx, x, y, s)
return mx_square
def process(serial):
mx = power_mx(serial)
max_s = 0
max_power = -100000
max_coords = (0, 0)
for s in range(1, SIZE+1):
mx_square = power_mx_square(mx, s)
curr_max_power = np.max(mx_square)
if curr_max_power > max_power:
max_power = curr_max_power
max_s = s
(max_x, max_y) = tuple(np.argwhere(mx_square == curr_max_power)[0])
max_coords = (max_x + 1, max_y + 1)
print('Step ', s, 'local mx = ', curr_max_power, ', max so far = ',
list(max_coords) + [max_s], ' (max = ', max_power, ')')
return list(max_coords) + [max_s]
def compute(file_name):
with open(file_name, "r") as file:
serial = int(file.readline())
return process(serial)
if __name__ == '__main__':
print("Max power 3x3 cell = ", compute("data.txt"))
| StarcoderdataPython |
6409612 | <filename>examples/plot_neurosynth_implementation.py
# -*- coding: utf-8 -*-
r"""
NeuroLang Example based Implementing a NeuroSynth Query
====================================================
"""
# %%
import warnings
warnings.filterwarnings("ignore")
from pathlib import Path
from typing import Iterable
import nibabel
import nilearn.datasets
import nilearn.image
import nilearn.plotting
import numpy as np
import pandas as pd
from neurolang import ExplicitVBROverlay, NeurolangPDL
from neurolang.frontend.neurosynth_utils import get_ns_mni_peaks_reported
###############################################################################
# Data preparation
# ----------------
data_dir = Path.home() / "neurolang_data"
###############################################################################
# Load the MNI atlas and resample it to 4mm voxels
mni_t1 = nibabel.load(
nilearn.datasets.fetch_icbm152_2009(data_dir=str(data_dir / "icbm"))["t1"]
)
mni_t1_4mm = nilearn.image.resample_img(mni_t1, np.eye(3) * 4)
###############################################################################
# Probabilistic Logic Programming in NeuroLang
# --------------------------------------------
nl = NeurolangPDL()
###############################################################################
# Adding new aggregation function to build a region overlay
@nl.add_symbol
def agg_create_region_overlay(
i: Iterable, j: Iterable, k: Iterable, p: Iterable
) -> ExplicitVBROverlay:
mni_coords = np.c_[i, j, k]
return ExplicitVBROverlay(
mni_coords, mni_t1_4mm.affine, p, image_dim=mni_t1_4mm.shape
)
###############################################################################
# Load the NeuroSynth database
peak_data = get_ns_mni_peaks_reported(data_dir)
ijk_positions = np.round(
nibabel.affines.apply_affine(
np.linalg.inv(mni_t1_4mm.affine),
peak_data[["x", "y", "z"]].values.astype(float),
)
).astype(int)
peak_data["i"] = ijk_positions[:, 0]
peak_data["j"] = ijk_positions[:, 1]
peak_data["k"] = ijk_positions[:, 2]
peak_data = peak_data[["i", "j", "k", "id"]]
nl.add_tuple_set(peak_data, name="PeakReported")
study_ids = nl.load_neurosynth_study_ids(data_dir, "Study")
nl.add_uniform_probabilistic_choice_over_set(
study_ids.value, name="SelectedStudy"
)
nl.load_neurosynth_term_study_associations(
data_dir, "TermInStudyTFIDF", tfidf_threshold=1e-3
)
# %%
###############################################################################
# Probabilistic program and querying
with nl.scope as e:
e.Activation[e.i, e.j, e.k] = e.PeakReported(
e.i, e.j, e.k, e.s
) & e.SelectedStudy(e.s)
e.TermAssociation[e.t] = e.SelectedStudy(e.s) & e.TermInStudyTFIDF(
e.s, e.t, ...
)
e.ActivationGivenTerm[e.i, e.j, e.k, e.PROB[e.i, e.j, e.k]] = e.Activation(
e.i, e.j, e.k
) // e.TermAssociation("auditory")
e.ActivationGivenTermImage[
agg_create_region_overlay(e.i, e.j, e.k, e.p)
] = e.ActivationGivenTerm(e.i, e.j, e.k, e.p)
img_query = nl.query((e.x,), e.ActivationGivenTermImage(e.x))
# %%
###############################################################################
# Plotting results
# --------------------------------------------
result_image = img_query.fetch_one()[0].spatial_image()
img = result_image.get_fdata()
plot = nilearn.plotting.plot_stat_map(
result_image, threshold=np.percentile(img[img > 0], 95)
)
nilearn.plotting.show()
| StarcoderdataPython |
4843571 | # Generated by Django 2.1.2 on 2018-10-03 23:58
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('id', models.UUIDField(default=uuid.uuid4, help_text='A unique identifier for the user.', primary_key=True, serialize=False, unique=True, verbose_name='ID')),
('is_active', models.BooleanField(default=True, help_text='A boolean indicating if the user account is active. Inactive accounts cannot perform actions on the site.', verbose_name='is active')),
('is_staff', models.BooleanField(default=False, help_text='A boolean indicating if the user is allowed to access the admin site.', verbose_name='is staff')),
('is_superuser', models.BooleanField(default=False, help_text='A boolean indicating if the user has all permissions without them being explicitly granted.', verbose_name='is superuser')),
('name', models.CharField(help_text="The user's name.", max_length=100, verbose_name='full name')),
('username', models.CharField(help_text='The name the user logs in as.', max_length=100, unique=True, verbose_name='username')),
('time_created', models.DateTimeField(auto_now_add=True, help_text='The time the user was created.', verbose_name='time created')),
('time_updated', models.DateTimeField(auto_now=True, help_text='The time the user was last updated.', verbose_name='time updated')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'ordering': ('time_created',),
},
),
]
| StarcoderdataPython |
9646205 | <filename>View/telaAbrirProjeto.py
from tkinter import *
from Controller import controleBanco
# Tela que mostra os projetos existentes
def TelaAbrirProjeto(tela):
# Cria a tela
telaAbrir = Toplevel(tela)
telaAbrir.title('ABRIR PROJETO')
telaAbrir.geometry('300x250+620+120')
telaAbrir['bg'] = 'gray'
telaAbrir.resizable(False,False)
telaAbrir.focus_force()
telaAbrir.grab_set()
# Busca projetos existentes no banco
dadosFormat = controleBanco.ControleMostraExistente()
# Cria a caixa de prijetos existentes
caixaLista = Listbox(telaAbrir)
caixaLista.place(x=25,y=25,width=245,height=150)
# Preenche a caixa com os projetos existentes
for item in dadosFormat:
caixaLista.insert(END,item)
barraDeRoalgem = Scrollbar(telaAbrir,orient='vertical',command=caixaLista.yview)
barraDeRoalgem.place(x=260,y=25,width=15,height=150)
caixaLista.configure(yscrollcommand=barraDeRoalgem.set)
def AbrirProjeto():
controleBanco.ControleAbreProjeto(caixaLista.get(ACTIVE))
telaAbrir.destroy()
# Botão abrir projeto
btnAbrir = Button(telaAbrir,text='ABRIR',command=AbrirProjeto,foreground='white',bg='black')
btnAbrir.place(x=232,y=200) | StarcoderdataPython |
8157750 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchbearer
from torchbearer import cite
IMAGE = torchbearer.state_key('image')
""" State key under which to hold the image being ascended on """
_stanley2007compositional = """
@article{stanley2007compositional,
title={Compositional pattern producing networks: A novel abstraction of development},
author={<NAME>},
journal={Genetic programming and evolvable machines},
volume={8},
number={2},
pages={131--162},
year={2007},
publisher={Springer}
}
"""
def _correlate_color(image, correlation, max_norm):
if image.size(0) == 4:
alpha = image[-1].unsqueeze(0)
image = image[:-1]
else:
alpha = None
shape = image.shape
image = image.view(3, -1).permute(1, 0)
color_correlation_normalized = correlation / max_norm
image = image.matmul(color_correlation_normalized.to(image.device).t())
image = image.permute(1, 0).contiguous().view(shape)
if alpha is not None:
image = torch.cat((image, alpha), dim=0)
return image
def _inverse_correlate_color(image, correlation, max_norm):
if image.size(0) == 4:
alpha = image[-1].unsqueeze(0)
image = image[:-1]
else:
alpha = None
shape = image.shape
image = image.view(3, -1).permute(1, 0)
color_correlation_normalized = correlation / max_norm
image = image.matmul(color_correlation_normalized.to('cpu').t().inverse().to(image.device))
image = image.permute(1, 0).contiguous().view(shape)
if alpha is not None:
image = torch.cat((image, alpha), dim=0)
return image
def _inverse_sigmoid(x, eps=1e-4):
x.clamp_(0.01, 0.99)
return ((x / ((1 - x) + eps)) + eps).log()
def _inverse_clamp(x, color_mean, correlate):
if correlate:
if x.dim() > 3:
x[:3] = x[:3] - color_mean
else:
x = x - color_mean
return x
def image(shape, transform=None, correlate=True, fft=True, sigmoid=True, sd=0.01, decay_power=1, requires_grad=True):
""" Helper function to generate an image with the given parameters
Args:
shape (tuple[int]): Shape of the final image.
transform: Transforms to apply to the image
correlate (bool): If True, correlate colour channels of the image when loaded.
fft (bool): If True, image created in fourier domain
sigmoid (bool): If True, sigmoid the image
sd (float): Standard deviation of random initialisation of the image
decay_power (int / float): Rate of decay on the normalising constant in FFT image
requires_grad (bool): If True, Image tensor requires gradient.
Returns:
"""
if not fft:
img = torch.randn(shape) if sigmoid else torch.rand(shape)
img = TensorImage(img, transform=transform, correlate=correlate, requires_grad=requires_grad)
else:
img = FFTImage(shape, sd=sd, decay_power=decay_power, transform=transform, correlate=correlate, requires_grad=requires_grad)
img = img.sigmoid() if sigmoid else img.clamp()
return img
class Image(nn.Module, torchbearer.callbacks.imaging.ImagingCallback):
""" Base image class which wraps an image tensor with transforms and allow de/correlating colour channels
Args:
transform: Transforms to apply to the image
correlate (bool): If True, correlate colour channels of the image when loaded.
"""
def on_batch(self, state):
return self.get_valid_image()
def __init__(self, transform=None, correlate=True):
super(Image, self).__init__()
self.color_correlation_svd_sqrt = nn.Parameter(
torch.tensor([[0.26, 0.09, 0.02],
[0.27, 0.00, -0.05],
[0.27, -0.09, 0.03]], dtype=torch.float32),
requires_grad=False)
self.max_norm_svd_sqrt = self.color_correlation_svd_sqrt.norm(dim=0).max()
self.color_mean = nn.Parameter(torch.tensor([0.48, 0.46, 0.41], dtype=torch.float32), requires_grad=False)
self.transform = transform if transform is not None else lambda x: x
self.activation = lambda x: x
self.correlate = correlate
self.correction = (lambda x: _correlate_color(x, self.color_correlation_svd_sqrt,
self.max_norm_svd_sqrt)) if correlate else (lambda x: x)
def with_handler(self, handler, index=None):
img = self.get_valid_image()
if img.dim() == 3:
img = img.unsqueeze(0)
rng = range(img.size(0)) if index is None else index
state = {torchbearer.EPOCH: 0} # Hack, should do this in a better way
try:
for i in rng:
handler(img[i], i, state)
except TypeError:
handler(img[rng], rng, state)
return self
@property
def image(self):
"""
Class property that returns an un-normalised, parameterised image.
Returns:
`torch.Tensor`: Image (channels, height, width) in real space
"""
raise NotImplementedError
def get_valid_image(self):
"""
Return a valid (0, 1) representation of this image, following activation function and colour correction.
Returns:
`torch.Tensor`: Image (channels, height, width) in real space
"""
return self.activation(self.correction(self.image))
def forward(self, _, state):
image = self.get_valid_image()
state[IMAGE] = image
x = self.transform(image).unsqueeze(0)
state[torchbearer.INPUT] = x
return x
def with_activation(self, function):
self.activation = function
return self
def sigmoid(self):
return self.with_activation(torch.sigmoid)
def clamp(self, floor=0., ceil=1.):
scale = ceil - floor
def clamp(x):
return ((x.tanh() + 1.) / 2.) * scale + floor
if self.correlate:
def activation(x):
if x.dim() > 3:
x[:3] = x[:3] + self.color_mean
else:
x = x + self.color_mean
return x
return self.with_activation(lambda x: clamp(activation(x)))
else:
return self.with_activation(clamp)
def load_file(self, file):
"""Load this Image with the contents of the given file.
Args:
file (str): The image file to load
"""
from PIL import Image
im = Image.open(file)
tensor = torch.from_numpy(np.array(im)).float().permute(2, 0, 1) / 255.
return self.load_tensor(tensor)
def load_tensor(self, tensor):
"""Load this Image with the contents of the given tensor.
Args:
tensor: The tensor to load
"""
if 'sigmoid' in self.activation.__name__:
tensor = _inverse_sigmoid(tensor)
elif 'clamp' in self.activation.__name__:
tensor = _inverse_clamp(tensor, self.color_mean, self.correlate)
if self.correlate:
tensor = _inverse_correlate_color(tensor, self.color_correlation_svd_sqrt, self.max_norm_svd_sqrt)
return self._load_inverse(tensor)
def _load_inverse(self, tensor):
raise NotImplementedError
class TensorImage(Image):
""" Wrapper for Image which takes a torch.Tensor.
Args:
tensor (`torch.Tensor`): Image tensor
transform: Transforms to apply to the image
correlate (bool): If True, correlate colour channels of the image when loaded.
requires_grad (bool): If True, tensor requires gradient.
"""
def __init__(self, tensor, transform=None, correlate=True, requires_grad=True):
super(TensorImage, self).__init__(transform=transform, correlate=correlate)
self.tensor = nn.Parameter(tensor, requires_grad=requires_grad)
@property
def image(self):
""" Class property that returns the image tensor
Returns:
`torch.Tensor`: Image (channels, height, width) in real space
"""
return self.tensor
def _load_inverse(self, tensor):
self.tensor = nn.Parameter(tensor.to(self.tensor.device), requires_grad=self.tensor.requires_grad)
return self
def fftfreq2d(w, h):
import numpy as np
fy = np.fft.fftfreq(h)[:, None]
if w % 2 == 1:
fx = np.fft.fftfreq(w)[: w // 2 + 2]
else:
fx = np.fft.fftfreq(w)[: w // 2 + 1]
return torch.from_numpy(np.sqrt(fx * fx + fy * fy)).float()
class FFTImage(Image):
""" Wrapper for Image with creates a random image in the fourer domain with the given parameters
Args:
shape (tuple[int]): Shape of the final image.
sd (float): Standard deviation of random initialisation of the image
decay_power (int / float): Rate of decay on the normalising constant in FFT image
transform: Transforms to apply to the image
correlate (bool): If True, correlate colour channels of the image when loaded.
requires_grad (bool): If True, Image tensor requires gradient.
"""
def __init__(self, shape, sd=0.01, decay_power=1, transform=None, correlate=True, requires_grad=True):
super(FFTImage, self).__init__(transform=transform, correlate=correlate)
self.decay_power = decay_power
freqs = fftfreq2d(shape[2], shape[1])
self.scale = FFTImage._scale(shape, freqs, decay_power)
param_size = [shape[0]] + list(freqs.shape) + [2]
param = torch.randn(param_size) * sd
self.param = nn.Parameter(param, requires_grad=requires_grad)
self._shape = shape
@staticmethod
def _scale(shape, freqs, decay_power):
scale = torch.ones(1) / torch.max(freqs, torch.tensor([1. / max(shape[2], shape[1])], dtype=torch.float32)).pow(decay_power)
return nn.Parameter(scale * math.sqrt(shape[2] * shape[1]), requires_grad=False)
@property
def image(self):
""" Class property that returns the image in the real domain
Returns:
`torch.Tensor`: Image (channels, height, width) in real space
"""
ch, h, w = self._shape
spectrum = self.scale.unsqueeze(0).unsqueeze(3) * self.param
image = torch.irfft(spectrum, 2)
image = image[:ch, :h, :w] / 4.0
return image
def _load_inverse(self, tensor):
self._shape = list(tensor.shape)
self.scale = FFTImage._scale(self._shape, fftfreq2d(self._shape[2], self._shape[1]), self.decay_power)
self.scale.data = self.scale.data.to(self.param.device)
if self._shape[2] % 2 == 1:
tensor = torch.cat((tensor, torch.zeros(self._shape[:2] + [1], device=tensor.device)), dim=2)
tensor = torch.rfft(tensor.to(self.param.device), 2)
tensor = tensor * 4 / self.scale.unsqueeze(0).unsqueeze(3)
self.param = nn.Parameter(tensor.to(self.param.device).data, requires_grad=self.param.requires_grad)
return self
@cite(_stanley2007compositional)
class CPPNImage(Image):
"""Implements a simple Compositional Pattern Producing Network (CPPN), based on the lucid tutorial
`xy2rgb <https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/differentiable-parameterizations/xy2rgb.ipynb>`_.
This is a convolutional network which is given a coordinate system and outputs an image. The size of the input grid
can then be changed to produce outputs at arbitrary resolutions.
Args:
shape (tuple[int]): Shape (channels, height, width) of the final image.
hidden_channels (int): The number of channels in hidden layers.
layers (int): The number of convolutional layers.
activation: The activation function to use (defaults to CPPNImage.Composite).
normalise (bool): If True (default), add instance norm to each layer.
correlate (bool): If True, correlate colour channels of the image when loaded.
transform: Transforms to apply to the image.
"""
class Composite(nn.Module):
"""Normalised concatenation of atan(x) and atan^2(x) defined in
`xy2rgb <https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/differentiable-parameterizations/xy2rgb.ipynb>`_.
"""
def forward(self, x):
x = torch.atan(x)
return torch.cat((x / 0.67, x.pow(2) / 0.6), 1)
class UnbiasedComposite(nn.Module):
"""Unbiased normalised concatenation of atan(x) and atan^2(x) defined in
`xy2rgb <https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/differentiable-parameterizations/xy2rgb.ipynb>`_.
"""
def forward(self, x):
x = torch.atan(x)
return torch.cat((x / 0.67, (x.pow(2) - 0.45) / 0.396), 1)
class NormalisedReLU(nn.Module):
"""Normalised ReLU function defined in
`xy2rgb <https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/differentiable-parameterizations/xy2rgb.ipynb>`_.
"""
def forward(self, x):
x = x.relu()
return (x - 0.4) / 0.58
class NormalisedLeakyReLU(nn.LeakyReLU):
"""Normalised leaky ReLU function. See
`torch.nn.LeakyReLU <https://pytorch.org/docs/stable/nn.html#torch.nn.LeakyReLU>`_ for details.
Args:
negative_slope (float): Controls the angle of the negative slope.
"""
def __init__(self, negative_slope=0.01):
super(CPPNImage.NormalisedLeakyReLU, self).__init__(negative_slope=negative_slope)
a = np.random.normal(0.0, 1.0, 10**4)
a = np.maximum(a, 0.0) + negative_slope * np.minimum(a, 0.0)
self.mean = a.mean()
self.std = a.std()
def forward(self, x):
x = super(CPPNImage.NormalisedLeakyReLU, self).forward(x)
return (x - self.mean) / self.std
@staticmethod
def _make_grid(height, width):
r = 3. ** 0.5
x_coord_range = torch.linspace(-r, r, steps=width)
y_coord_range = torch.linspace(-r, r, steps=height)
x, y = torch.meshgrid(y_coord_range, x_coord_range)
return nn.Parameter(torch.stack((x, y), dim=0).unsqueeze(0), requires_grad=False)
def __init__(self, shape, hidden_channels=24, layers=8, activation=None, normalise=False, correlate=True, transform=None):
super(CPPNImage, self).__init__(transform=transform, correlate=correlate)
activation = CPPNImage.Composite() if activation is None else activation
(self.channels, self.height, self.width) = shape
self.loc = CPPNImage._make_grid(self.height, self.width)
convs = []
act_ch = hidden_channels * activation(torch.zeros(1, 1, 1, 1)).size(1)
for i in range(layers):
in_ch = 2 if i == 0 else act_ch
c = nn.Conv2d(in_ch, hidden_channels, 1)
c.weight.data.normal_(0, np.sqrt(1.0 / in_ch))
c.bias.data.zero_()
convs.append(c)
if normalise:
convs.append(nn.InstanceNorm2d(hidden_channels))
convs.append(activation)
c = nn.Conv2d(act_ch, self.channels, 1)
c.weight.data.zero_()
c.bias.data.zero_()
convs.append(c)
self.convs = nn.Sequential(*convs)
@property
def image(self):
img = self.convs(self.loc).squeeze(0)
return img
def resize(self, height, width):
"""Return a new version of this CPPNImage that outputs images at a different resolution. The underlying
convolutional network will be shared across both objects.
Args:
height (int): The height (pixels) of the new image.
width (int): The width (pixels) of the new image.
Returns:
A new CPPNImage with the given size.
"""
import copy
res = copy.copy(self) # Shallow copy, just replace the loc tensor
res.height = height
res.width = width
res.loc = CPPNImage._make_grid(res.height, res.width)
return res.to(self.loc.device)
def _load_inverse(self, tensor):
raise NotImplementedError
| StarcoderdataPython |
6639052 | import os
import webapp2
import jinja2
from google.appengine.api import users
import logging
#Jinja Loader
template_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.getcwd()))
from main import Account,Settings,PostalAddress,PhysicalAddress,Drive,ProjectArt,Wallet
import time
class NavigationHandler(webapp2.RequestHandler):
def SettingsRouter(self):
userid = self.request.get('userid')
useraction = self.request.get('useraction')
if useraction == "save":
logging.warning("saving settings")
deliveryoptions = self.request.get('deliveryoptions')
paymentoptions = self.request.get('paymentoptions')
method_email = self.request.get('methodemail')
method_sms = self.request.get('methodsms')
method_phone = self.request.get('methodphone')
subject_projects = self.request.get('subjectprojects')
subject_delivery = self.request.get('subjectdelivery')
subject_payments = self.request.get('subjectpayments')
subject_promotions = self.request.get('subjectpromotions')
settings_query = Settings.query(Settings.userid == userid)
settings_list = settings_query.fetch()
if len(settings_list) > 0:
this_settings = settings_list[0]
this_settings.write_delivery_option(option=deliveryoptions)
this_settings.write_payment_option(payment_option=paymentoptions)
this_settings.set_notification_method_email(set=(method_email == "true"))
this_settings.set_notification_method_sms(set=(method_sms == "true"))
this_settings.set_notification_method_phone(set=(method_phone == "true"))
this_settings.set_notification_subject_projects(set=(subject_projects == "true"))
this_settings.set_notification_subject_deliveries(set=(subject_delivery == "true"))
this_settings.set_notification_subject_payments(set=(subject_payments == "true"))
this_settings.set_notification_subject_promotions(set=(subject_promotions == "true"))
this_settings.put()
else:
this_settings = Settings()
this_settings.write_userid(userid=userid)
this_settings.put()
template = template_env.get_template('templates/pages/settings.html')
context = {'this_settings': this_settings}
self.response.write(template.render(context))
else:
logging.warning("reading settings")
settings_query = Settings.query(Settings.userid == userid)
settings_list = settings_query.fetch()
if len(settings_list) > 0:
this_settings = settings_list[0]
else:
this_settings = Settings()
this_settings.write_userid(userid=userid)
this_settings.put()
template = template_env.get_template('templates/pages/settings.html')
context = {'this_settings': this_settings}
self.response.write(template.render(context))
def RouteProfile(self):
"""
:rtype: object
"""
userid = self.request.get('userid')
useraction = self.request.get('useraction')
if useraction == "save":
names = self.request.get('names')
cell = self.request.get('cell')
email = self.request.get('email')
website = self.request.get('website')
photourl = self.request.get('photourl')
box = self.request.get('box')
postalcity = self.request.get('city')
postalprovince = self.request.get('province')
postalcode = self.request.get('postalcode')
stand = self.request.get('stand')
street = self.request.get('street')
physicalcity = self.request.get('physicalcity')
physicalprovince = self.request.get('physicalprovince')
physicalcode = self.request.get('physicalcode')
account_request = Account.query(Account.userid == userid)
this_account_list = account_request.fetch()
postal_request = PostalAddress.query(PostalAddress.userid == userid)
this_postal_list = postal_request.fetch()
physical_request = PhysicalAddress.query(PhysicalAddress.userid == userid)
this_physical_list = physical_request.fetch()
wallet_request = Wallet.query(Wallet.userid == userid)
this_wallet_list = wallet_request.fetch()
if len(this_account_list) > 0:
this_account = this_account_list[0]
if len(this_postal_list) > 0:
this_postal = this_postal_list[0]
else:
this_postal = PostalAddress()
this_postal.write_userid(userid=userid)
if len(this_physical_list) > 0:
this_physical = this_physical_list[0]
else:
this_physical = PhysicalAddress()
this_physical.write_userid(userid=userid)
if len(this_wallet_list) > 0:
this_wallet = this_wallet_list[0]
else:
this_wallet = Wallet()
this_wallet.write_userid(userid=userid)
this_wallet.put()
this_account.write_names(names=names)
this_account.write_cell(cell=cell)
this_account.write_email(email=email)
this_account.write_website(website=website)
this_account.write_photourl(photourl=photourl)
this_account.put()
this_postal.write_box(box=box)
this_postal.write_city(city=postalcity)
this_postal.write_province(province=postalprovince)
this_postal.write_postalcode(postalcode=postalcode)
this_postal.put()
this_physical.write_stand(stand=stand)
this_physical.write_streetname(streetname=street)
this_physical.write_city(city=physicalcity)
this_physical.write_province(province=physicalprovince)
this_physical.write_postalcode(postalcode=physicalcode)
this_physical.put()
self.response.write("successfully updated profile information")
else:
account_request = Account.query(Account.userid == userid)
this_account_list = account_request.fetch()
postal_request = PostalAddress.query(PostalAddress.userid == userid)
this_postal_list = postal_request.fetch()
physical_request = PhysicalAddress.query(PhysicalAddress.userid == userid)
this_physical_list = physical_request.fetch()
wallet_request = Wallet.query(Wallet.userid == userid)
this_wallet_list = wallet_request.fetch()
if len(this_account_list) > 0:
this_account = this_account_list[0]
if len(this_postal_list) > 0:
this_postal = this_postal_list[0]
else:
this_postal = PostalAddress()
this_postal.write_userid(userid=userid)
this_postal.put()
if len(this_physical_list) > 0:
this_physical = this_physical_list[0]
else:
this_physical = PhysicalAddress()
this_physical.write_userid(userid=userid)
this_physical.put()
if len(this_wallet_list) > 0:
this_wallet = this_wallet_list[0]
else:
this_wallet = Wallet()
this_wallet.write_userid(userid=userid)
this_wallet.put()
template = template_env.get_template('templates/pages/profile.html')
context = {'this_account': this_account, 'this_postal': this_postal, 'this_physical': this_physical,'this_wallet':this_wallet}
self.response.write(template.render(context))
else:
this_account = Account()
this_account.write_userid(userid=userid)
this_account.put()
this_postal = PostalAddress()
this_postal.write_userid(userid=userid)
this_postal.put()
this_physical = PhysicalAddress()
this_physical.write_userid(userid=userid)
this_physical.put()
this_wallet = Wallet()
this_wallet.write_userid(userid=userid)
this_wallet.put()
template = template_env.get_template('templates/pages/profile.html')
context = {'this_account': this_account, 'this_postal': this_postal, 'this_physical': this_physical,
'this_wallet': this_wallet}
self.response.write(template.render(context))
def get(self):
url = str(self.request.url)
router = url.split("/")
router = router[len(router) - 1]
if router == "home":
from main import Products
products_query = Products.query()
products_list = products_query.fetch()
template = template_env.get_template('templates/pages/home.html')
context = {'products_list':products_list}
self.response.write(template.render(context))
elif router == "about":
template = template_env.get_template('templates/pages/about.html')
context = {}
self.response.write(template.render(context))
elif router == "contact":
template = template_env.get_template('templates/pages/contact.html')
context = {}
self.response.write(template.render(context))
elif router == "services":
template = template_env.get_template('templates/pages/services.html')
context = {}
self.response.write(template.render(context))
elif router == "createaccount":
template = template_env.get_template('templates/pages/createaccount.html')
context = {}
self.response.write(template.render(context))
elif router == "login":
template = template_env.get_template('templates/pages/login.html')
context = {}
self.response.write(template.render(context))
elif router == "header":
template = template_env.get_template('templates/nav/header.html')
context = {}
self.response.write(template.render(context))
elif router == "sidebar":
adminloginlink = users.create_login_url(dest_url="/")
template = template_env.get_template('templates/nav/sidebar.html')
context = {'adminloginlink':adminloginlink}
self.response.write(template.render(context))
def post(self):
router = self.request.get('route')
logging.info(router)
if router == "login":
template = template_env.get_template('templates/pages/login.html')
context = {}
self.response.write(template.render(context))
elif router == "new-user":
names = self.request.get('names')
email = self.request.get('email')
password = self.request.get('password')
userid = self.request.get('userid')
account_request = Account.query(Account.userid == userid)
account_list = account_request.fetch()
if len(account_list) == 0:
account = Account()
account.write_names(names=names)
account.write_email(email=email)
account.write_userid(userid=userid)
account.write_password(password=password)
account.put()
self.response.write('successfully created a new user account')
else:
self.response.write('you already have an account please login')
elif router == "settings":
self.SettingsRouter()
elif router == "profile":
self.RouteProfile()
elif router == "drive":
userid = self.request.get('userid')
drive_request = Drive.query(Drive.userid == userid)
this_drive_list = drive_request.fetch()
if len(this_drive_list) > 0:
this_drive = this_drive_list[0]
else:
this_drive = Drive()
this_drive.write_driveid(driveid=this_drive.create_driveid())
this_drive.put()
art_request = ProjectArt.query(ProjectArt.driveid == this_drive.driveid)
this_art_files = art_request.fetch()
template = template_env.get_template('templates/pages/drive.html')
context = {'this_drive':this_drive,'this_art_files':this_art_files}
self.response.write(template.render(context))
elif router == "addfunds":
template = template_env.get_template('templates/pages/addfunds.html')
context = {}
self.response.write(template.render(context))
elif router == "projects":
template = template_env.get_template('templates/pages/projects.html')
context = {}
self.response.write(template.render(context))
elif router == "chat":
template = template_env.get_template('templates/pages/chat.html')
context = {}
self.response.write(template.render(context))
elif router == "dashboard":
#TODO- note that the admin can login through the dashboard link ... /dashboard
if users.is_current_user_admin():
template = template_env.get_template('templates/pages/dashboard.html')
context = {}
self.response.write(template.render(context))
else:
template = template_env.get_template('templates/pages/500.html')
context = {}
self.response.write(template.render(context))
elif router == "inbox":
template = template_env.get_template('templates/pages/inbox.htm')
context = {}
self.response.write(template.render(context))
elif router == "logout":
template = template_env.get_template('templates/pages/logout.html')
context = {}
self.response.write(template.render(context))
app = webapp2.WSGIApplication([
('/nav/.*', NavigationHandler)
], debug=True) | StarcoderdataPython |
6515402 | <filename>src/models/wisenet_base/test.py
import torch
import torch.nn as nn
from torch.utils import data
import numpy as np
import pickle
import cv2
from torch.autograd import Variable
import torch.optim as optim
import scipy.misc
import sys
import os
import os.path as osp
import datetime
import random
import timeit, tqdm
import misc as ms
import pandas as pd
from pydoc import locate
start = timeit.default_timer()
import datetime as dt
import time
import glob
# from losses import losses
from skimage.segmentation import find_boundaries
from sklearn.metrics import confusion_matrix
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
from pycocotools import mask as maskUtils
from datasets import helpers as d_helpers
# from core import proposals as prp
def create_dataset(main_dict):
test_set = ms.load_test(main_dict)
path_base = "/mnt/datasets/public/issam/VOCdevkit/annotations/"
d_helpers.pascal2cocoformat("{}/instances_val2012.json".format(path_base), test_set)
data = ms.load_json("{}/instances_val2012.json".format(path_base))
def create_voc2007(main_dict):
main_dict["dataset_name"] = "Pascal2007"
test_set = ms.load_test(main_dict)
ms.get_batch(test_set, [1])
path_base = "/mnt/datasets/public/issam/VOCdevkit/annotations/"
d_helpers.pascal2cocoformat("{}/instances_val2012.json".format(path_base), test_set)
data = ms.load_json("{}/instances_val2012.json".format(path_base))
def test_list(model, cocoGt, val_set, indices, predict_proposal):
annList = []
for i in indices:
batch = ms.get_batch(val_set, [i])
annList += predict_proposal(model, batch, "annList")
cocoEval, cocoDt = d_helpers.evaluateAnnList(annList)
# probs = F.softmax(self(batch["images"].cuda()),dim=1).data
# blobs = bu.get_blobs(probs)
for i in indices:
batch = ms.get_batch(val_set, [i])
image_id = int(batch["name"][0])
annList = cocoGt.imgToAnns[image_id]
mask = d_helpers.annList2mask(annList)
dt_mask = d_helpers.annList2mask(cocoDt.imgToAnns[image_id])
ms.images(batch["images"], mask, denorm=1, win=str(i))
ms.images(batch["images"], dt_mask, denorm=1, win=str(i)+"_pred")
def test_COCOmap(main_dict):
# create_voc2007(main_dict)
model = ms.load_best_model(main_dict)
_, val_set = ms.load_trainval(main_dict)
path_base = "/mnt/datasets/public/issam/VOCdevkit/annotations/"
fname = "{}/instances_val2012.json".format(path_base)
cocoGt = COCO(fname)
# fname = "{}/instances_val2012.json".format(path_base)
# cocoGt = COCO(fname)
fname = (path_base + "/results/"+ main_dict["exp_name"]
+"_"+str(main_dict["model_options"]["predict_proposal"])+".json")
# test_list(model, cocoGt, val_set, [0,1,2,3], prp.Blobs)
import ipdb; ipdb.set_trace() # breakpoint 06c353ef //
# test_list(model, cocoGt, val_set, [0], prp.BestObjectness)
# test_list(model, cocoGt, val_set, [0,1,2,3], prp.Blobs)
if not os.path.exists(fname) or 1:
annList = []
for i in range(len(val_set)):
batch = ms.get_batch(val_set, [i])
try:
annList += model.predict(batch, "annList")
except Exception as exc:
import ipdb; ipdb.set_trace() # breakpoint 5f61b0cfx //
if (i % 100) == 0:
cocoEval, _ = d_helpers.evaluateAnnList(annList)
ms.save_json(fname.replace(".json","inter.json"), annList)
# ms.save_json("tmp.json", annList)
# cocoDt = cocoGt.loadRes("tmp.json")
# cocoEval = COCOeval(cocoGt, cocoDt, "segm")
# cocoEval.params.imgIds = list(set([v["image_id"] for v in cocoDt.anns.values()]))
# cocoEval.evaluate()
# cocoEval.accumulate()
# cocoEval.summarize()
print("{}/{}".format(i, len(val_set)))
ms.save_json(fname, annList)
# cocoEval = d_helpers.evaluateAnnList(ms.load_json(fname))
# cocoEval = COCOeval(cocoGt, cocoDt, annType)
#cocoEval.params.imgIds = list(set([v["image_id"] for v in cocoDt.anns.values()]))
if 1:
#cocoEval.params.imgIds = [2007000033]
cocoDt = cocoGt.loadRes(fname)
cocoEval = COCOeval(cocoGt, cocoDt, "segm")
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Images:", len(cocoEval.params.imgIds))
print("Model: {}, Loss: {}, Pred: {}".format(main_dict["model_name"],
main_dict["loss_name"],
main_dict["model_options"]["predict_proposal"]))
import ipdb; ipdb.set_trace() # breakpoint c6f8f580 //
# d_helpers.visGT(cocoGt, cocoDt,ms.get_batch(val_set, [169]))
# d_helpers.valList(cocoGt, cocoDt, val_set, [173,174])
# model.set_proposal(None); vis.visBlobs(model, ms.get_batch(val_set, [169]), "blobs")
return "mAP25: {:.2f} - mAP75:{:.2f}".format(cocoEval.stats[1], cocoEval.stats[2])
# d_helpers.evaluateAnnList(annList)
# path_base = "/mnt/datasets/public/issam/VOCdevkit/annotations/"
# annFile = "{}/pascal_val2012.json".format(path_base)
# cocoGt = COCO(annFile)
# test_set = ms.load_test(main_dict)
# fname = "{}/instances_val2012.json".format(path_base)
# if os.path.exists(fname):
# cocoGt = COCO(fname)
# else:
# path_base = "/mnt/datasets/public/issam/VOCdevkit/annotations/"
# d_helpers.pascal2cocoformat("{}/instances_val2012.json".format(path_base), test_set)
# cocoDt = cocoGt.loadRes(fname.replace(".json", "_best.json"))
# cocoEval = COCOeval(cocoGt, cocoDt, "segm")
# cocoEval.evaluate()
# cocoEval.accumulate()
# cocoEval.summarize()
def evaluateAnnList(annList):
path_base = "/mnt/datasets/public/issam/VOCdevkit/annotations/"
fname = "{}/instances_val2012.json".format(path_base)
cocoGt = COCO(fname)
ms.save_json("tmp.json", annList)
cocoDt = cocoGt.loadRes("tmp.json")
cocoEval = COCOeval(cocoGt, cocoDt, "segm")
# cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = list(set([v["image_id"] for v in cocoDt.anns.values()]))
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("# images:", len(cocoEval.params.imgIds))
return cocoEval
def test_run(main_dict, metric_name, save,
reset, predict_proposal=None):
if predict_proposal is None:
predict_proposal = ""
history = ms.load_history(main_dict)
if history is None:
best_epoch = 0
else:
best_epoch = history["best_model"]["epoch"]
fname = main_dict["path_save"] + "/test_{}{}_{}.json".format(predict_proposal, metric_name, best_epoch)
print("Testing: {} - {} - {} - {} - best epoch: {}".format(main_dict["dataset_name"],
main_dict["config_name"],
main_dict["loss_name"],
metric_name,
best_epoch))
if not os.path.exists(fname) or reset == "reset":
with torch.no_grad():
score = ms.val_test(main_dict, metric_name=metric_name, n_workers=1)
ms.save_json(fname, score)
else:
score = ms.load_json(fname)
return score[metric_name]
def test_load(main_dict, metric_name, predict_proposal=None):
if predict_proposal is None:
predict_proposal = ""
results = glob.glob(main_dict["path_save"] +
"/test_{}{}_[0-9]*.json".format(predict_proposal,
metric_name))
results_dict = {}
for r in results:
results_dict[int(os.path.basename(r).replace(".json","").split("_")[-1])] = r
if len(results_dict) != 0:
best = max(results_dict.keys())
fname = results_dict[best]
result = ms.load_json(fname)
#ms.save_json(fname.replace("None", main_dict["metric_name"]), result)
history = ms.load_history(main_dict)
if history is None:
return "{:.2f}".format(result[metric_name])
best_epoch = history["best_model"]["epoch"]
if best_epoch == best:
return "{:.2f} - ({})".format(result[metric_name], best, predict_proposal)
else:
return "{:.2f}* - ({})".format(result[metric_name], best, predict_proposal)
else:
return "empty"
| StarcoderdataPython |
11252433 | <reponame>JacksonCrawford/relational_contracts
import foundation
import requests
from bs4 import BeautifulSoup
import json
import time
import random
year = input("Enter a year: ")
startTime = time.time()
# Uses requests to navigate to the Wired sitemap and grab all data under the specified class
page = requests.get("https://www.wired.com/sitemap/")
soupLinks = BeautifulSoup(page.text, "lxml")
links = soupLinks.find(class_="sitemap__section-archive")
# Finds all links with specified "year" in them
def masterLinker():
resume = True
linkList = list()
aTags = links.find_all("a")
for link in aTags:
url = link.contents[0]
'''if str(url) == "https://www.wired.com/sitemap?year=2007&month=11&week=1":
resume = True'''
if year in str(url) and resume:
linkList.append(url)
return linkList
# Finds all links within the class of the site specified above
def linker(url):
linkList = list()
try:
site = requests.get(url)
except:
t = time.localtime()
currentTime = time.strftime("%H:%M:%S", t)
print("Sleeping for 1 hour (3600 sec), beginning at:", currentTime)
time.sleep(3600)
site = requests.get(url)
soupy = BeautifulSoup(site.text, "lxml")
linkz = soupy.find(class_="sitemap__section-archive")
aTags = linkz.find_all("a")
for link in aTags:
url = link.contents[0]
linkList.append(url)
return linkList
# Main function that is run, uses a loop to evaluate data with scraper()
# in all links returned by linker()
def main():
bigLinks = masterLinker()
linkNum = 0
for bigLink in bigLinks:
print(str(bigLink) + "----------------")
lilLinks = linker(bigLink)
for link in lilLinks:
data = foundation.scraper(link)
timestamp = data["timestamp"]
fileDate = timestamp[:timestamp.find(" "):]
articleNum = f"{linkNum:04}"
with open("wired_article_" + fileDate + "_" + articleNum + ".json", "w") as j:
json.dump(data, j)
print(str(link) + " -- #" + str(articleNum))
linkNum += 1
time.sleep(random.randint(8, 35))
with open("stats.txt", "w") as file:
file.write("Year: " + str(year) + "\nTime to complete: " + str(time.time() - startTime))
file.write("\n# of files: " + str(linkNum) + "\n\nJackson's Notes: ")
main()
| StarcoderdataPython |
8055073 | from sklearn.linear_model import LogisticRegression
import numpy as np
def train_and_predict(X_train, X_valid, y_train, y_valid, X_test, params,
fold_ind, scoring):
"""train_and_predict
train and evaluate the model and predict targets.
The interface is same across any ML algorithms.
Parameters
----------
X_train : array-like
features in the training data
X_valid : array-like
features in the training data
y_train : array-like
ground truth target in the training data
y_valid : array-like
ground truth target in the validation data
X_test : array-like
features in the test data
params : dict
model parameters governed by yaml files.
fold_ind : int
fold id
scoring : function
evaluation metrics function
Returns
-------
[type]
[description]
"""
# train model
model = LogisticRegression(**params)
model.fit(X_train, y_train)
# evaluate model on the validation dataset
y_val_pred = model.predict_proba(X_valid)
score = scoring(y_valid, np.argmax(y_val_pred, axis=1))
# predict targets on the test dataset
y_test_pred = model.predict(X_test)
# store results
res = {}
res['model'] = model
res['score'] = score
res['y_val_pred'] = y_val_pred
res['y_test_pred'] = y_test_pred
return res
| StarcoderdataPython |
3391070 | <reponame>tomchuk/meetup_20160428<gh_stars>0
from django.conf.urls import url, include
from rest_framework import routers
from todo import views as todo_views
router = routers.DefaultRouter()
router.register(r'todos', todo_views.TodoViewSet, base_name='todo')
urlpatterns = [
url(r'^$', todo_views.index, name='index'),
url(r'^login/$', todo_views.login, name='login'),
url(r'^logout/$', todo_views.logout, name='logout'),
url(r'^api/', include(router.urls)),
]
| StarcoderdataPython |
6619103 | #<NAME>
#ITP_449, Spring 2020
#HW02
#Question 3
import re
def main():
ask = input("Please enter your password:")
while True:
if (len(ask) < 8):
print(":( Try Again")
ask = input("Please enter your password:")
elif not re.search("[a-z]", ask):
print(":( Try Again")
ask = input("Please enter your password:")
elif not re.search("[A-Z]", ask):
print(":( Try Again")
ask = input("Please enter your password:")
elif not re.search("[0-9]", ask):
print(":( Try Again")
ask = input("Please enter your password:")
elif not re.search("[-!@#$]", ask):
print(":( Try Again")
ask = input("Please enter your password:")
elif re.search("\s", ask):
print(":( Try Again")
ask = input("Please enter your password:")
else:
print("Access Granted")
break
main() | StarcoderdataPython |
79938 | import yaml
import numpy as np
from os import path
from absl import flags
from pysc2.env import sc2_env
from pysc2.lib import features
from pysc2.lib import actions
sc2_f_path = path.abspath(path.join(path.dirname(__file__), "..", "configs", "sc2_config.yml"))
with open(sc2_f_path, 'r') as ymlfile:
sc2_cfg = yaml.load(ymlfile)
# TODO: update README.md for adding random seed for game env
def create_sc2_minigame_env(map_name, mode, visualize=False):
"""Create sc2 game env with available actions printer
Set screen, minimap same resolution and x, y same pixels for simplicity.
"""
assert mode in ['full', 'lite', 'test']
# workaround for pysc2 flags
FLAGS = flags.FLAGS
FLAGS([__file__])
env_seed = 3 if mode == 'test' else None
env = sc2_env.SC2Env(
map_name=map_name,
step_mul=sc2_cfg[mode]['step_mul'],
screen_size_px=(sc2_cfg[mode]['resl'],) * 2,
minimap_size_px=(sc2_cfg[mode]['resl'],) * 2,
visualize=visualize,
random_seed=env_seed)
return env
# TODO: move preprocess to neuro net embed layer
# TODO: move post process into sc2_env extension
class GameInterfaceHandler(object):
"""Provide game interface info.
Transform observed game image and available actions into CNN input tensors.
- Special Categorial 2d image:
single layer normalized by scalar max
(no same category overlapping)
- Categorial 2d image:
expand to multiple layer
- Scalar 2d image:
single layer normalized by scalar max
NOTE: This class can potentially be a decorator to wrap sc2_env
"""
def __init__(self, mode):
assert mode in ['full', 'lite', 'test']
self.dtype = np.float32
self.minimap_player_id = features.MINIMAP_FEATURES.player_id.index
self.screen_player_id = features.SCREEN_FEATURES.player_id.index
self.screen_unit_type = features.SCREEN_FEATURES.unit_type.index
self.screen_resolution = sc2_cfg[mode]['resl']
self.minimap_resolution = sc2_cfg[mode]['resl']
(self.sub_to_full_acts, self.full_to_sub_acts) = self._get_action_mappings(
sc2_cfg[mode]['action_list'])
self.num_action = len(self.sub_to_full_acts)
self.non_spatial_actions = self._get_non_spatial_actions()
self.screen_imgs = sc2_cfg[mode]['screen_imgs']
self.minimap_imgs = sc2_cfg[mode]['minimap_imgs']
@property
def screen_channels(self):
"""Return number of channels for preprocessed screen image"""
channels = 0
for i, screen_feature in enumerate(features.SCREEN_FEATURES):
if len(self.screen_imgs) > 0 and i not in self.screen_imgs:
continue
if i == self.screen_player_id or i == self.screen_unit_type:
channels += 1
elif screen_feature.type == features.FeatureType.SCALAR:
channels += 1
else:
channels += screen_feature.scale
return channels
def _preprocess_screen(self, screen):
"""Transform screen image into expanded tensor
Args:
screen: obs.observation['screen']
Returns:
ndarray, shape (len(SCREEN_FEATURES), screen_size_px.y, screen_size_px.x)
"""
screen = np.array(screen, dtype=self.dtype)
layers = []
assert screen.shape[0] == len(features.SCREEN_FEATURES)
for i, screen_feature in enumerate(features.SCREEN_FEATURES):
if len(self.screen_imgs) > 0 and i not in self.screen_imgs:
continue
if i == self.screen_player_id or i == self.screen_unit_type:
layers.append(np.log(screen[i:i + 1] + 1.))
elif screen_feature.type == features.FeatureType.SCALAR:
layers.append(np.log(screen[i:i + 1] + 1.))
else:
layer = np.zeros(
(screen_feature.scale, screen.shape[1], screen.shape[2]),
dtype=self.dtype)
for j in range(screen_feature.scale):
indy, indx = (screen[i] == j).nonzero()
layer[j, indy, indx] = 1
layers.append(layer)
return np.concatenate(layers, axis=0)
def get_screen(self, observation):
"""Extract screen variable from observation['minimap']
Args:
observation: Timestep.obervation
Returns:
screen: ndarray, shape (1, len(SCREEN_FEATURES), screen_size_px.y, screen_size_px.x)
"""
screen = self._preprocess_screen(observation['screen'])
return np.expand_dims(screen, 0)
@property
def minimap_channels(self):
"""Return number of channels for preprocessed minimap image"""
channels = 0
for i, minimap_feature in enumerate(features.MINIMAP_FEATURES):
if len(self.minimap_imgs) > 0 and i not in self.minimap_imgs:
continue
if i == self.minimap_player_id:
channels += 1
elif minimap_feature.type == features.FeatureType.SCALAR:
channels += 1
else:
channels += minimap_feature.scale
return channels
def _preprocess_minimap(self, minimap):
"""Transform minimap image into expanded tensor
Args:
minimap: obs.observation['minimap']
Returns:
ndarray, shape (len(MINIMAP_FEATURES), minimap_size_px.y, minimap_size_px.x)
"""
minimap = np.array(minimap, dtype=self.dtype)
layers = []
assert minimap.shape[0] == len(features.MINIMAP_FEATURES)
for i, minimap_feature in enumerate(features.MINIMAP_FEATURES):
if len(self.minimap_imgs) > 0 and i not in self.minimap_imgs:
continue
if i == self.minimap_player_id:
layers.append(np.log(minimap[i:i + 1] + 1.))
elif minimap_feature.type == features.FeatureType.SCALAR:
layers.append(np.log(minimap[i:i + 1] + 1.))
else:
layer = np.zeros(
(minimap_feature.scale, minimap.shape[1], minimap.shape[2]),
dtype=self.dtype)
for j in range(minimap_feature.scale):
indy, indx = (minimap[i] == j).nonzero()
layer[j, indy, indx] = 1
layers.append(layer)
return np.concatenate(layers, axis=0)
def get_minimap(self, observation):
"""Extract minimap variable from observation['minimap']
Args:
observation: Timestep.observation
Returns:
minimap: ndarray, shape (1, len(MINIMAP_FEATURES), minimap_size_px.y, minimap_size_px.x)
"""
minimap = self._preprocess_minimap(observation['minimap'])
return np.expand_dims(minimap, 0)
def _preprocess_available_actions(self, available_actions):
"""Returns ndarray of available_actions from observed['available_actions']
shape (num_actions)
"""
available_actions = np.intersect1d(available_actions, self.sub_to_full_acts)
a_actions = np.zeros((self.num_action), dtype=self.dtype)
a_actions[self.full_to_sub_acts[available_actions]] = 1.
return a_actions
def get_available_actions(self, observation):
"""
Args:
observation: Timestep.observation
Returns:
available_action: ndarray, shape(num_actions)
"""
return self._preprocess_available_actions(
observation['available_actions'])
def get_info(self, observation):
"""Extract available actioins as info from state.observation['available_actioins']
Args:
observation: Timestep.observation
Returns:
info: ndarray, shape (num_actions)
"""
return self.get_available_actions(observation)
def postprocess_action(self, non_spatial_action, spatial_action):
"""Transform selected non_spatial and spatial actions into pysc2 FunctionCall
Args:
non_spatial_action: ndarray, shape (1, 1)
spatial_action: ndarray, shape (1, 1)
Returns:
FunctionCall as action for pysc2_env
"""
act_id = self.sub_to_full_acts[non_spatial_action[0][0]]
target = spatial_action[0][0]
target_point = [
int(target % self.screen_resolution),
int(target // self.screen_resolution)
] # (x, y)
act_args = []
for arg in actions.FUNCTIONS[act_id].args:
if arg.name in ('screen', 'minimap', 'screen2'):
act_args.append(target_point)
else:
act_args.append([0])
return actions.FunctionCall(act_id, act_args)
def _get_non_spatial_actions(self):
non_spatial_actions = [True] * self.num_action
for func_id, func in enumerate(actions.FUNCTIONS):
for arg in func.args:
if arg.name in ('screen', 'minimap', 'screen2'):
non_spatial_actions[self.full_to_sub_acts[func_id]] = False
break
return non_spatial_actions
def is_non_spatial_action(self, action_id):
return self.non_spatial_actions[self.full_to_sub_acts[action_id]]
def _get_action_mappings(self, action_list):
"""Fill actioin list if it's empty
Args:
action_list: list
Returns:
sub_to_full_acts: ndarray
full_to_sub_acts: ndarray
"""
if len(action_list) == 0:
action_list = [i for i in range(len(actions.FUNCTIONS))]
sub_to_full_acts = action_list
full_to_sub_acts = [-1] * len(actions.FUNCTIONS)
for idx, val in enumerate(sub_to_full_acts):
full_to_sub_acts[val] = idx
return (np.asarray(sub_to_full_acts, dtype=np.int32), np.asarray(full_to_sub_acts, dtype=np.int32))
| StarcoderdataPython |
1998505 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-18 <NAME> and contributors
# See LICENSE.rst for details.
# PYTHON_ARGCOMPLETE_OK
"""
Scrolling artist + song and play/pause indicator
"""
import os
import time
from PIL import ImageFont, Image, ImageDraw
from demo_opts import get_device
from luma.core.render import canvas
from luma.core.image_composition import ImageComposition, ComposableImage
titles = [
("Bridge over troubled water", "Simon & Garfunkel"),
("Up", "R.E.M."),
("Wild Child", "Lou Reed & The Velvet Underground"),
("(Shake Shake Shake) Shake your body", "KC & The Sunshine Band"),
]
class TextImage():
def __init__(self, device, text, font):
with canvas(device) as draw:
w, h = draw.textsize(text, font)
self.image = Image.new(device.mode, (w, h))
draw = ImageDraw.Draw(self.image)
draw.text((0, 0), text, font=font, fill="white")
del draw
self.width = w
self.height = h
class Synchroniser():
def __init__(self):
self.synchronised = {}
def busy(self, task):
self.synchronised[id(task)] = False
def ready(self, task):
self.synchronised[id(task)] = True
def is_synchronised(self):
for task in self.synchronised.items():
if task[1] is False:
return False
return True
class Scroller():
WAIT_SCROLL = 1
SCROLLING = 2
WAIT_REWIND = 3
WAIT_SYNC = 4
def __init__(self, image_composition, rendered_image, scroll_delay, synchroniser):
self.image_composition = image_composition
self.speed = 1
self.image_x_pos = 0
self.rendered_image = rendered_image
self.image_composition.add_image(rendered_image)
self.max_pos = rendered_image.width - image_composition().width
self.delay = scroll_delay
self.ticks = 0
self.state = self.WAIT_SCROLL
self.synchroniser = synchroniser
self.render()
self.synchroniser.busy(self)
self.cycles = 0
self.must_scroll = self.max_pos > 0
def __del__(self):
self.image_composition.remove_image(self.rendered_image)
def tick(self):
# Repeats the following sequence:
# wait - scroll - wait - rewind -> sync with other scrollers -> wait
if self.state == self.WAIT_SCROLL:
if not self.is_waiting():
self.cycles += 1
self.state = self.SCROLLING
self.synchroniser.busy(self)
elif self.state == self.WAIT_REWIND:
if not self.is_waiting():
self.synchroniser.ready(self)
self.state = self.WAIT_SYNC
elif self.state == self.WAIT_SYNC:
if self.synchroniser.is_synchronised():
if self.must_scroll:
self.image_x_pos = 0
self.render()
self.state = self.WAIT_SCROLL
elif self.state == self.SCROLLING:
if self.image_x_pos < self.max_pos:
if self.must_scroll:
self.render()
self.image_x_pos += self.speed
else:
self.state = self.WAIT_REWIND
def render(self):
self.rendered_image.offset = (self.image_x_pos, 0)
def is_waiting(self):
self.ticks += 1
if self.ticks > self.delay:
self.ticks = 0
return False
return True
def get_cycles(self):
return self.cycles
def make_font(name, size):
font_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'fonts', name))
return ImageFont.truetype(font_path, size)
# ------- main
device = get_device()
if device.height >= 16:
font = make_font("code2000.ttf", 12)
else:
font = make_font("pixelmix.ttf", 8)
image_composition = ImageComposition(device)
try:
while True:
for title in titles:
synchroniser = Synchroniser()
ci_song = ComposableImage(TextImage(device, title[0], font).image, position=(0, 1))
ci_artist = ComposableImage(TextImage(device, title[1], font).image, position=(0, 30))
song = Scroller(image_composition, ci_song, 100, synchroniser)
artist = Scroller(image_composition, ci_artist, 100, synchroniser)
cycles = 0
while cycles < 3:
artist.tick()
song.tick()
time.sleep(0.025)
cycles = song.get_cycles()
with canvas(device, background=image_composition()) as draw:
image_composition.refresh()
draw.rectangle(device.bounding_box, outline="white")
del artist
del song
except KeyboardInterrupt:
pass
| StarcoderdataPython |
9672298 | <filename>b0mb3r/app/routers/attack.py<gh_stars>0
import asyncio
import re
import uuid
import phonenumbers
from fastapi import APIRouter, HTTPException
from loguru import logger
from b0mb3r.app.models import AttackModel, StatusModel
from b0mb3r.app.status import status
from b0mb3r.main import perform_attack
router = APIRouter()
@logger.catch
@router.post("/start")
async def start_attack(attack: AttackModel):
only_digits_phone = re.sub("[^0-9]", "", attack.phone)
country_code = phonenumbers.parse(f"+{only_digits_phone}").country_code
# May seem confusing, but phone is actually a full international phone: 79001234567
attack_id = uuid.uuid4().hex
status[attack_id] = {"started_at": None, "currently_at": None, "end_at": None}
asyncio.create_task(
perform_attack(attack_id, attack.number_of_cycles, country_code, only_digits_phone)
)
return {"success": True, "id": attack_id}
@logger.catch
@router.get("/{attack_id}/status", response_model=StatusModel)
def get_attack_status(attack_id: str):
if attack_id not in status:
raise HTTPException(status_code=404)
return StatusModel(**status[attack_id])
| StarcoderdataPython |
5115039 | <reponame>Needoliprane/ThePhantomOfTheOpera
from GameClass.Player import Player
class Joseph(Player):
def actions(self, room, otherPerson, otherPersons):
if self.room.isOn() == True:
self.room.switchOffTheLight()
else:
self.room.switchOnTheLight() | StarcoderdataPython |
8178391 | <reponame>AltimateAI/pyconcrete<filename>test/test_exe_testcases.py
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Create on : 2019/07/13
from __future__ import unicode_literals
import os
from os.path import join
from test import base
from test.utility import ImportedTestCase, ImportedTestCaseError
class TestExe(base.TestPyConcreteBase):
def discover(self):
test_cases = []
test_case_dir = join(base.ROOT_DIR, 'test', 'exe_testcases')
for d in os.listdir(test_case_dir):
test_case_path = join(test_case_dir, d)
itc = ImportedTestCase(self._pyconcrete_exe, test_case_path)
if itc.is_available_test_case():
test_cases.append(itc)
else:
print('test_exe_testcases - {itc.module_name} (skip)'.format(itc=itc))
return sorted(test_cases, key=lambda x: x.module_name)
def test_auto_loading(self):
test_cases = self.discover()
error = False
for tc in test_cases:
print('test_exe_testcases - {tc.module_name} ... '.format(tc=tc),
end='')
try:
res = tc.run()
if res:
self.assertTrue(res, "{tc.module_name} validate failed".format(tc=tc))
print('Success')
else:
print('Fail')
except ImportedTestCaseError as e:
print('Validate Exception')
print('{{')
print(' {tc.module_name} tmp_dir=`{tc.tmp_dir}`'.format(tc=tc))
print(' return code = {return_code}'.format(return_code=e.return_code))
print(' ======= output lines ======')
print(' ' + '\n '.join(e.output_lines))
print(' ======= validate_errors ======')
print(' ' + e.validate_errors)
print('}}')
error = True
except Exception as e:
print('Exception')
print(str(e))
print('{{')
print(' {tc.module_name} tmp_dir=`{tc.tmp_dir}`'.format(tc=tc))
print('}}')
raise
tc.close()
assert error is False
| StarcoderdataPython |
6530106 |
import smart_imports
smart_imports.all()
########################################
# processors
########################################
class EmissaryProcessor(utils_views.ArgumentProcessor):
CONTEXT_NAME = 'current_emissary'
DEFAULT_VALUE = None
ERROR_MESSAGE = 'Неверный идентификатор эмиссара'
def parse(self, context, raw_value):
try:
emissary_id = int(raw_value)
except ValueError:
self.raise_wrong_format()
emissary = logic.load_emissary(emissary_id=emissary_id)
if emissary is None:
self.raise_wrong_value()
return emissary
class EmissaryClanRightsProcessor(utils_views.BaseViewProcessor):
ARG_EMISSARY_ATTRIBUTE = utils_views.ProcessorArgument(default='current_emissary')
ARG_EMISSARY_CLAN_RIGHTS_ATTRIBUTE = utils_views.ProcessorArgument(default='emissary_clan_rights')
def preprocess(self, context):
emissary = getattr(context, self.emissary_attribute, None)
if emissary is None:
setattr(context, '', None)
return
clan = clans_logic.load_clan(emissary.clan_id)
setattr(context,
self.emissary_clan_rights_attribute,
clans_logic.operations_rights(initiator=context.account,
clan=clan,
is_moderator=False))
class EmissaryInGameProcessor(utils_views.BaseViewProcessor):
ARG_EMISSARY_ATTRIBUTE = utils_views.ProcessorArgument(default='current_emissary')
def preprocess(self, context):
if getattr(context, self.emissary_attribute).state.is_IN_GAME:
return
raise utils_views.ViewError(code='emissary.emissary_is_not_in_game',
message='Эмиссар уже не участвует в игре (убит или уволен)')
class EventTypeProcessor(utils_views.ArgumentProcessor):
CONTEXT_NAME = 'event_type'
DEFAULT_VALUE = NotImplemented
ERROR_MESSAGE = 'Неверный идентификатор мероприятия'
def parse(self, context, raw_value):
try:
event_value = int(raw_value)
except ValueError:
self.raise_wrong_format()
return relations.EVENT_TYPE(event_value)
class EventIdProcessor(utils_views.ArgumentProcessor):
CONTEXT_NAME = 'current_event_id'
DEFAULT_VALUE = NotImplemented
ERROR_MESSAGE = 'Неверный идентификатор события'
def parse(self, context, raw_value):
try:
event_id = int(raw_value)
except ValueError:
self.raise_wrong_format()
return event_id
class EventPermissionProcessor(utils_views.AccessProcessor):
ERROR_CODE = 'emissaries.no_rights'
ERROR_MESSAGE = 'Вы не можете проводить эту операцию'
ARG_PERMISSIONS_ATTRIBUTE = utils_views.ProcessorArgument(default='emissary_clan_rights')
def check(self, context):
rights = getattr(context, self.permissions_attribute)
if rights is None:
return False
if hasattr(context, 'event_type'):
event_type = context.event_type
else:
event_type = storage.events.get_or_load(context.current_event_id).concrete_event.TYPE
if event_type is None:
return False
return getattr(rights, event_type.clan_permission)()
########################################
# resource and global processors
########################################
resource = utils_views.Resource(name='emissaries')
resource.add_processor(accounts_views.CurrentAccountProcessor())
resource.add_processor(utils_views.FakeResourceProcessor())
resource.add_processor(clans_views.AccountClanProcessor(account_attribute='account', clan_attribute='clan'))
resource.add_processor(clans_views.ClanRightsProcessor(clan_attribute='clan'))
resource.add_processor(EmissaryProcessor(url_name='emissary'))
resource.add_processor(EmissaryClanRightsProcessor())
@resource('#emissary', name='show')
def show(context):
if context.current_emissary is None:
raise utils_views.ViewError(code='emissaries.not_found',
message='Эмиссар не найден.')
clan_events = None
if context.clan:
number = conf.settings.CLAN_CHRONICLE_RECORDS_ON_EMISSARY_PAGE
total_events, clan_events = clans_tt_services.chronicle.cmd_get_last_events(clan=context.clan,
tags=(context.current_emissary.meta_object().tag,),
number=number)
tt_api_events_log.fill_events_wtih_meta_objects(clan_events)
total_events, game_events = chronicle_tt_services.chronicle.cmd_get_last_events(tags=(context.current_emissary.meta_object().tag,),
number=conf.settings.GAME_CHRONICLE_RECORDS_ON_EMISSARY_PAGE)
tt_api_events_log.fill_events_wtih_meta_objects(game_events)
emissary_power = politic_power_logic.get_emissaries_power([context.current_emissary.id])[context.current_emissary.id]
active_emissary_events_types = [event.concrete_event.TYPE for event in context.current_emissary.active_events()]
all_emissary_events = [events.TYPES[event] for event in sorted(relations.EVENT_TYPE.records, key=lambda e: e.text)]
return utils_views.Page('emissaries/show.html',
content={'resource': context.resource,
'emissary': context.current_emissary,
'clan_events': clan_events,
'game_events': game_events,
'clan': clans_logic.load_clan(clan_id=context.current_emissary.clan_id),
'emissary_clan_rights': context.emissary_clan_rights,
'emissary_power': emissary_power,
'active_emissary_events_types': active_emissary_events_types,
'all_emissary_events': all_emissary_events})
def check_clan_restrictions(clan_id):
clan_attributes = clans_logic.load_attributes(clan_id)
if clan_attributes.fighters_maximum < clans_logic.get_combat_personnel(clan_id):
raise utils_views.ViewError(code='emissaries.maximum_fighters',
message='Боевой состав вашей гильдии превышает максимально допустимый..')
if not logic.has_clan_space_for_emissary(clan_id, clan_attributes):
raise utils_views.ViewError(code='emissaries.maximum_emissaries',
message='Ваша гильдия уже наняла максимально возможное количество эмиссаров.')
@accounts_views.LoginRequiredProcessor()
@accounts_views.BanGameProcessor()
@clans_views.ClanStaticOperationAccessProcessor(permissions_attribute='clan_rights', permission='can_emissaries_relocation')
@resource('create-dialog')
def create_dialog(context):
check_clan_restrictions(context.clan.id)
return utils_views.Page('emissaries/create_dialog.html',
content={'clan': context.clan,
'form': forms.EmissaryForm(),
'tt_clans_constants': tt_clans_constants,
'resource': context.resource})
@accounts_views.LoginRequiredProcessor()
@accounts_views.BanGameProcessor()
@clans_views.ClanStaticOperationAccessProcessor(permissions_attribute='clan_rights', permission='can_emissaries_relocation')
@utils_views.FormProcessor(form_class=forms.EmissaryForm)
@resource('create', method='POST')
def create(context):
with django_transaction.atomic():
clans_logic.lock_clan_for_update(context.clan.id)
check_clan_restrictions(context.clan.id)
with clans_views.points_banker(account_id=context.clan.id,
type='create_emissary',
amount=-tt_clans_constants.PRICE_CREATE_EMISSARY):
emissary = logic.create_emissary(initiator=context.account,
clan=context.clan,
place_id=context.form.c.place,
gender=context.form.c.gender,
race=context.form.c.race,
utg_name=game_names.generator().get_name(context.form.c.race,
context.form.c.gender))
return utils_views.AjaxOk(content={'next_url': utils_urls.url('game:emissaries:show', emissary.id)})
@accounts_views.LoginRequiredProcessor()
@accounts_views.BanGameProcessor()
@EventTypeProcessor(get_name='event_type')
@EventPermissionProcessor()
@resource('#emissary', 'start-event-dialog')
def start_event_dialog(context):
if context.event_type is None:
raise utils_views.ViewError(code='common.argument_required', message='Не указан тип мероприятия')
event_class = events.TYPES[context.event_type]
max_power_to_spend = event_class.power_cost(context.current_emissary, days=event_class.max_event_length())
current_power = politic_power_logic.get_emissaries_power(emissaries_ids=[context.current_emissary.id])[context.current_emissary.id]
show_power_warning = (current_power <= max_power_to_spend + conf.settings.SHOW_START_EVENT_WARNING_BARRIER)
return utils_views.Page('emissaries/start_event_dialog.html',
content={'emissary': context.current_emissary,
'form': event_class.form(emissary=context.current_emissary),
'event_class': event_class,
'resource': context.resource,
'current_power': current_power,
'show_power_warning': show_power_warning})
def _check_emissaries_events(emissary, event_class):
if emissary.attrs.maximum_simultaneously_events <= len(emissary.active_events()):
raise utils_views.ViewError(code='emissaies.maximum_simultaneously_events',
message='У эмиссара слишком много активных мероприятий. Дождитесь их завершения или отмените одно.')
if event_class.TYPE in {event.concrete_event.TYPE for event in emissary.active_events()}:
raise utils_views.ViewError(code='emissaies.dublicate_event',
message='Нельзя запустить два мероприятия одного типа.')
if not event_class.is_available(emissary=emissary, active_events={event.concrete_event.TYPE for event in emissary.active_events()}):
raise utils_views.ViewError(code='emissaries.event_not_available', message='Нельзя начать это мероприятие.')
@accounts_views.LoginRequiredProcessor()
@accounts_views.BanGameProcessor()
@EventTypeProcessor(get_name='event_type')
@EventPermissionProcessor()
@resource('#emissary', 'start-event', method='POST')
def start_event(context):
emissary = context.current_emissary
event_class = events.TYPES[context.event_type]
form = event_class.form(emissary=emissary,
post=context.django_request.POST)
if not form.is_valid():
raise utils_views.ViewError(code='form_errors', message=form.errors)
with django_transaction.atomic():
if not logic.lock_emissary_for_update(emissary.id):
raise utils_views.ViewError(code='emissaies.no_emissary_found', message='Активный эмиссар не найден')
_check_emissaries_events(emissary, event_class)
with clans_views.points_banker(account_id=context.clan.id,
type='start_event',
amount=-event_class.action_points_cost(emissary)):
current_power = politic_power_logic.get_emissaries_power(emissaries_ids=[emissary.id])[emissary.id]
days = form.c.period
required_power = event_class.power_cost(emissary, days)
if current_power < required_power:
raise utils_views.ViewError(code='emissaies.no_enough_power', message='У эмиссара недостаточно влияния')
concrete_event = event_class.construct_by_form(emissary, form)
try:
with concrete_event.on_create(context.current_emissary):
event = logic.create_event(initiator=context.account,
emissary=context.current_emissary,
concrete_event=concrete_event,
days=days)
concrete_event.after_create(event)
logic.save_event(event)
except exceptions.OnEventCreateError:
raise utils_views.ViewError(code='emissaries.on_create_error',
message='Не выполнено одно из специфичных для мероприятия условий')
# влияние отнимаем после успешног осоздания мероприятия
# так как при его создании могут возникунть ошибки, которые не должны влиять на списание влияния
# поскольку списание влияния не транзакционное и при ошибке оно не вернётся.
impact = game_tt_services.PowerImpact(type=game_tt_services.IMPACT_TYPE.EMISSARY_POWER,
actor_type=tt_api_impacts.OBJECT_TYPE.ACCOUNT,
actor_id=context.account.id,
target_type=tt_api_impacts.OBJECT_TYPE.EMISSARY,
target_id=emissary.id,
amount=-required_power)
politic_power_logic.add_power_impacts([impact])
return utils_views.AjaxOk(content={'next_url': utils_urls.url('game:emissaries:show', emissary.id)})
@accounts_views.LoginRequiredProcessor()
@accounts_views.BanGameProcessor()
@EventIdProcessor(get_name='event')
@EventPermissionProcessor()
@resource('#emissary', 'stop-event', method='POST')
def stop_event(context):
event = storage.events.get_or_load(context.current_event_id)
emissary = context.current_emissary
if event.state.is_STOPPED:
return utils_views.AjaxOk(content={'next_url': utils_urls.url('game:emissaries:show', emissary.id)})
if event.emissary_id != emissary.id:
raise utils_views.ViewError(code='emissaries.wrong_emissary', message='Эмиссар не проводит это мероприятие')
logic.cancel_event(initiator=context.account, event=event)
return utils_views.AjaxOk(content={'next_url': utils_urls.url('game:emissaries:show', emissary.id)})
| StarcoderdataPython |
12802642 | # MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Centralized A2C policy
"""
from collections import OrderedDict
import numpy as np
from gym import spaces
from ray.rllib.agents.a3c.a3c_tf_policy import A3CLoss
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.evaluation.postprocessing import Postprocessing, compute_advantages
from ray.rllib.models import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.preprocessors import Preprocessor
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.tf_ops import explained_variance, make_tf_callable
from baselines.marl_benchmark.networks import CentralizedActorCriticModel
tf1, tf, tfv = try_import_tf()
class CentralizedValueMixin:
def __init__(self: TFPolicy):
self.compute_central_vf = make_tf_callable(
self.get_session(), dynamic_shape=True
)(self.model.central_value_function)
def build_cac_model(
policy: TFPolicy, obs_space: spaces.Space, action_space: spaces.Space, config
) -> ModelV2:
policy.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=action_space.n
if isinstance(action_space, spaces.Discrete)
else np.product(action_space.shape),
model_config=config["model"],
framework="tf",
default_model=CentralizedActorCriticModel,
name="cac",
)
return policy.model
def get_action_buffer(
action_space: spaces.Space,
action_preprocessor: Preprocessor,
batch: SampleBatch,
copy_length: int,
):
if isinstance(action_space, spaces.Discrete):
buffer_action = np.eye(action_preprocessor.size)[
batch[SampleBatch.ACTIONS][:copy_length]
]
elif isinstance(action_space, spaces.Box):
buffer_action = batch[SampleBatch.ACTIONS][:copy_length]
else:
raise NotImplementedError(
f"Do not support such an action space yet: {action_space}"
)
return buffer_action
def postprocess_trajectory(
policy: TFPolicy, sample_batch: SampleBatch, other_agent_batches=None, episode=None
):
last_r = 0.0
batch_length = len(sample_batch[SampleBatch.CUR_OBS])
critic_preprocessor = policy.model.critic_preprocessor
action_preprocessor = policy.model.act_preprocessor
obs_preprocessor = policy.model.obs_preprocessor
critic_obs_array = np.zeros((batch_length,) + critic_preprocessor.shape)
offset_slot = action_preprocessor.size + obs_preprocessor.size
if policy.loss_initialized():
# ordered by agent keys
other_agent_batches = OrderedDict(other_agent_batches)
for i, (other_id, (other_policy, batch)) in enumerate(
other_agent_batches.items()
):
offset = (i + 1) * offset_slot
copy_length = min(batch_length, batch[SampleBatch.CUR_OBS].shape[0])
# TODO(ming): check the action type
buffer_action = get_action_buffer(
policy.action_space, action_preprocessor, batch, copy_length
)
oppo_features = np.concatenate(
[batch[SampleBatch.CUR_OBS][:copy_length], buffer_action], axis=-1
)
assert oppo_features.shape[-1] == offset_slot
critic_obs_array[
:copy_length, offset : offset + offset_slot
] = oppo_features
# fill my features to critic_obs_array
buffer_action = get_action_buffer(
policy.action_space, action_preprocessor, sample_batch, batch_length
)
critic_obs_array[:batch_length, 0:offset_slot] = np.concatenate(
[sample_batch[SampleBatch.CUR_OBS], buffer_action], axis=-1
)
sample_batch[CentralizedActorCriticModel.CRITIC_OBS] = critic_obs_array
sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf(
sample_batch[CentralizedActorCriticModel.CRITIC_OBS]
)
else:
sample_batch[CentralizedActorCriticModel.CRITIC_OBS] = critic_obs_array
sample_batch[SampleBatch.VF_PREDS] = np.zeros_like(
(batch_length,), dtype=np.float32
)
train_batch = compute_advantages(
sample_batch,
last_r,
policy.config["gamma"],
policy.config["lambda"],
policy.config["use_gae"],
)
return train_batch
def ac_loss_func(policy, model, dist_class, train_batch):
"""Predefined actor-critic loss reuse."""
logits, _ = policy.model.from_batch(train_batch)
action_dist = dist_class(logits, policy.model)
policy.loss = A3CLoss(
action_dist,
train_batch[SampleBatch.ACTIONS],
train_batch[Postprocessing.ADVANTAGES],
train_batch[Postprocessing.VALUE_TARGETS],
policy.model.central_value_function(
train_batch[CentralizedActorCriticModel.CRITIC_OBS]
),
policy.config["vf_loss_coeff"],
policy.config["entropy_coeff"],
)
return policy.loss.total_loss
def setup_mixins(policy, obs_space, action_space, config):
CentralizedValueMixin.__init__(policy)
def stats(policy, train_batch):
return {
"policy_loss": policy.loss.pi_loss,
"policy_entropy": policy.loss.entropy,
"vf_loss": policy.loss.vf_loss,
}
def central_vf_stats(policy, train_batch, grads):
# Report the explained variance of the central value function.
return {
"grad_gnorm": tf.linalg.global_norm(grads),
"vf_explained_var": explained_variance(
train_batch[Postprocessing.VALUE_TARGETS], policy.model.value_function()
),
}
DEFAULT_CONFIG = with_common_config(
{
"gamma": 0.95,
"lambda": 1.0, # if gae=true, work for it.
"use_gae": False,
"vf_loss_coeff": 0.5,
"entropy_coeff": 0.01,
"truncate_episodes": True,
"use_critic": True,
"grad_clip": 40.0,
"lr": 0.0001,
"min_iter_time_s": 5,
"sample_async": True,
"lr_schedule": None,
}
)
CA2CTFPolicy = build_tf_policy(
name="CA2CTFPolicy",
stats_fn=stats,
grad_stats_fn=central_vf_stats,
loss_fn=ac_loss_func,
postprocess_fn=postprocess_trajectory,
before_loss_init=setup_mixins,
make_model=build_cac_model,
mixins=[CentralizedValueMixin],
get_default_config=lambda: DEFAULT_CONFIG,
)
CA2CTrainer = build_trainer(
name="CA2C", default_policy=CA2CTFPolicy, default_config=DEFAULT_CONFIG
)
| StarcoderdataPython |
5082457 | from .data_utils import * | StarcoderdataPython |
3434407 | #!/usr/bin/python
###############################################################
# Copyright (c) 2017 ZTE Corporation
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import json
import numbers
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from asq.initiators import query
import humanfriendly
from numpy import mean
import yaml
from qtip.util.export_to import export_to_file
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False):
return result
with open(self._task.args.get('spec')) as stream:
spec = yaml.safe_load(stream)
metrics_files = self._task.args.get('metrics')
metrics = {}
for metric, filename in metrics_files.items():
with open(filename) as f:
metrics[metric] = json.load(f)
with open(self._task.args.get('sysinfo')) as f:
data = json.load(f)
sysinfo = dict([(k['name'], data[k['name']][0]) for k in spec['system_info']])
dest = self._task.args.get('dest')
baseline_file = self._task.args.get('baseline')
if baseline_file is not None:
with open(baseline_file) as f:
baseline = json.load(f)
return calc_qpi(spec, metrics, sysinfo, baseline, dest=dest)
else:
return calc_qpi(spec, metrics, sysinfo, None, dest=dest)
@export_to_file
def calc_qpi(qpi_spec, metrics, sysinfo, qpi_baseline):
display.vv("calculate QPI {}".format(qpi_spec['name']))
display.vvv("spec: {}".format(qpi_spec))
display.vvv("metrics: {}".format(metrics))
display.vvv("baseline: {}".format(qpi_baseline))
section_results = []
qpi_score = 0
if qpi_baseline:
for s in qpi_spec['sections']:
s_baseline = query(qpi_baseline['sections']).first(
lambda section: section['name'] == s['name'])
section_results.append(calc_section(s, metrics, s_baseline))
# TODO(yujunz): use formula in spec
qpi_score = int(
mean([r['score'] for r in section_results]) * qpi_baseline['score'])
else:
for s in qpi_spec['sections']:
section_results.append(calc_section(s, metrics))
results = {
'score': qpi_score,
'name': qpi_spec['name'],
'description': qpi_spec['description'],
'system_info': sysinfo,
'sections': section_results,
'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
'baseline': "https://git.opnfv.org/qtip/tree/resources/baselines/compute.json"
}
return results
def calc_section(section_spec, metrics, section_baseline=None):
display.vv("calculate section {}".format(section_spec['name']))
display.vvv("spec: {}".format(section_spec))
display.vvv("metrics: {}".format(metrics))
display.vvv("baseline: {}".format(section_baseline))
metric_results = []
section_score = 0
if section_baseline:
for m in section_spec['metrics']:
m_baseline = query(section_baseline['metrics']).first(
lambda metric: metric['name'] == m['name'])
metric_results.append(calc_metric(m, metrics[m['name']], m_baseline))
section_score = mean([r['score'] for r in metric_results])
else:
for m in section_spec['metrics']:
metric_results.append(calc_metric(m, metrics[m['name']]))
# TODO(yujunz): use formula in spec
return {
'score': section_score,
'name': section_spec['name'],
'description': section_spec.get('description', 'section'),
'metrics': metric_results
}
def calc_metric(metric_spec, metrics, metric_baseline=None):
display.vv("calculate metric {}".format(metric_spec['name']))
display.vvv("spec: {}".format(metric_spec))
display.vvv("metrics: {}".format(metrics))
display.vvv("baseline: {}".format(metric_baseline))
# TODO(yujunz): use formula in spec
workload_results = []
metric_score = 0
if metric_baseline:
for w in metric_spec['workloads']:
w_baseline = query(metric_baseline['workloads']).first(
lambda workload: workload['name'] == w['name'])
workload_results.append({
'name': w['name'],
'description': 'workload',
'score': calc_score(metrics[w['name']], w_baseline['baseline']),
'result': metrics[w['name']][0]
})
metric_score = mean([r['score'] for r in workload_results])
else:
for w in metric_spec['workloads']:
workload_results.append({
'name': w['name'],
'baseline': metrics[w['name']][0]
})
return {
'score': metric_score,
'name': metric_spec['name'],
'description': metric_spec.get('description', 'metric'),
'workloads': workload_results
}
def calc_score(metrics, baseline):
if not isinstance(baseline, numbers.Number):
baseline = humanfriendly.parse_size(baseline)
return mean(
[m if isinstance(m, numbers.Number) else humanfriendly.parse_size(m)
for m in metrics]) / baseline
| StarcoderdataPython |
9675810 | <gh_stars>10-100
# delete the API client with name 'my_api_client'
client.delete_api_clients(names=['my_api_client'])
# Other valid fields: ids
# See section "Common Fields" for examples
| StarcoderdataPython |
69518 | <reponame>scottsilverlabs/raspberrystem-hw-base
from bs4 import BeautifulSoup
import sys
try:
prog, name = sys.argv
except:
print "Usage: eagle-hflip.py <file> <scale_factor> "
sys.exit()
with file(name) as f:
soup = BeautifulSoup(f)
for tag in soup.plain.find_all(["vertex", "polygon", "wire"]):
for attr in ["x", "x1", "x2", "curve"]:
if attr in tag.attrs:
tag[attr] = float(tag[attr]) * -1
with file(name, "w") as f:
f.write(str(soup))
| StarcoderdataPython |
6571323 | <reponame>restinya/Barkeep<filename>coggers/reward.py
import discord
import asyncio
import requests
import re
from discord.utils import get
from discord.ext import commands
from math import floor
from configs.settings import command_prefix
from utils import accessDB, point_buy, alpha_emojis, db, VerboseMDStringifier, traceBack, checkForChar
class Reward(commands.Cog):
def __init__ (self, bot):
self.bot = bot
@commands.group(aliases=['r'], case_insensitive=True)
async def reward(self, ctx):
pass
async def cog_command_error(self, ctx, error):
msg = None
@commands.cooldown(1, float('inf'), type=commands.BucketType.user)
@commands.has_role('DM')
@commands.command()
async def encounter(self, ctx, user_list, level: int, renown: int):
channel = ctx.channel
author = ctx.author
user = author.display_name
user_name = author.name
player_roster = [author] + ctx.message.mentions
reward_format = f'Please follow this format:\n```yaml\n{command_prefix}reward encounter "@player1 @player2 [...]" ENCOUNTERLVL RENOWN```'
if '"' not in ctx.message.content:
await channel.send(f"Make sure you put quotes **`\"`** around your list of players and retry the command!\n\n{reward_format}")
self.timer.get_command('prep').reset_cooldown(ctx)
return
if author in ctx.message.mentions:
#inform the user of the proper command syntax
await channel.send(f"You cannot reward players with yourself in the player list! {reward_format}")
self.timer.get_command('prep').reset_cooldown(ctx)
return
reward_embed = discord.Embed()
@commands.cooldown(1, float('inf'), type=commands.BucketType.user)
@commands.has_role('DM')
@commands.command()
async def levelup(self, ctx, user, char):
channel = ctx.channel
author = ctx.author
user = author.display_name
user_name = author.name | StarcoderdataPython |
294013 | import pandas as pd
def get_county_data(state_name: str = "Colorado"):
df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv")
df.loc[:, "ServiceArea"] = False
return df[df["STNAME"] == state_name]
| StarcoderdataPython |
8036729 | <filename>setup.py
"""setuptools packaging."""
import setuptools
setuptools.setup(
name="docker_kafka_reconciliation",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="Submits Kafka reconciliation queries to Athena and uploads the results to S3",
entry_points={
"console_scripts": [
"query=kafka_reconciliation:main"
]
},
package_dir={"": "kafka_reconciliation"},
packages=setuptools.find_packages("kafka_reconciliation"),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| StarcoderdataPython |
1945244 | <filename>modules/sbot/robot.py<gh_stars>0
from __future__ import annotations
import math
import random
from os import path, environ
from typing import Optional
from threading import Lock
from sbot import motor, radio, magnet, arduino, compass, encoder
# Webots specific library
from controller import Robot as WebotsRobot
from shared_utils import RobotType
class Robot:
"""
Primary API for access to robot parts.
This robot requires that the consumer manage the progession of time manually
by calling the `sleep` method.
"""
def __init__(self, quiet: bool = False, init: bool = True) -> None:
self._initialised = False
self._quiet = quiet
self.webot = WebotsRobot()
# returns a float, but should always actually be an integer value
self._timestep = int(self.webot.getBasicTimeStep())
self.mode = environ.get("SR_ROBOT_MODE", "dev")
self.zone = int(environ.get("SR_ROBOT_ZONE", 0))
self.type = RobotType(environ.get("SR_ROBOT_TYPE", "forklift"))
self.arena = "A"
self.usbkey = path.normpath(path.join(environ["SR_ROBOT_FILE"], "../"))
# Lock used to guard access to Webot's time stepping machinery, allowing
# us to safely advance simulation time from *either* the competitor's
# code (in the form of our `sleep` method) or from our background
# thread, but not both.
self._step_lock = Lock()
if init:
self.init()
self.wait_start()
@classmethod
def setup(cls) -> Robot:
return cls(init=False)
def init(self) -> None:
self._init_devs()
self._initialised = True
self.display_info()
def _get_user_code_info(self) -> Optional[str]:
user_version_path = path.join(self.usbkey, '.user-rev')
if path.exists(user_version_path):
with open(user_version_path) as f:
return f.read().strip()
return None
def display_info(self) -> None:
user_code_version = self._get_user_code_info()
parts = [
f"Zone: {self.zone}",
f"Mode: {self.mode}",
f"Type: {self.type.value}",
]
if user_code_version:
parts.append(f"User code: {user_code_version}")
print("Robot Initialized. {}.".format(", ".join(parts))) # noqa:T001
def webots_step_and_should_continue(self, duration_ms: int) -> bool:
"""
Run a webots step of the given duration in milliseconds.
Returns whether or not the simulation should continue (based on
Webots telling us whether or not the simulation is about to end).
"""
if duration_ms <= 0:
raise ValueError(
f"Duration must be greater than zero, not {duration_ms!r}",
)
with self._step_lock:
# We use Webots in synchronous mode (specifically
# `synchronization` is left at its default value of `TRUE`). In
# that mode, Webots returns -1 from step to indicate that the
# simulation is terminating, or 0 otherwise.
result = self.webot.step(duration_ms)
return result != -1
def wait_start(self) -> None:
"Wait for the start signal to happen"
if self.mode not in ["comp", "dev", "remote-dev"]:
raise Exception(
f"mode of '{self.mode}' is not supported -- must be 'comp', "
"'dev or 'remote-dev'",
)
if self.zone < 0 or self.zone > 3:
raise Exception(
f"zone must be in range 0-3 inclusive -- value of {self.zone} is invalid",
)
if self.arena not in ["A", "B"]:
raise Exception("arena must be A or B")
print("Waiting for start signal.") # noqa:T001
# Always advance time by a little bit. This simulates the real-world
# condition that the wait-start mechanism would always wait for the
# start button.
self.webots_step_and_should_continue(
self._timestep * random.randint(8, 20),
)
if self.mode in ['comp', 'remote-dev']:
# Interact with the supervisor "robot" to wait for the start of the match.
self.webot.setCustomData('ready')
while (
self.webot.getCustomData() != 'start' and
self.webots_step_and_should_continue(self._timestep)
):
pass
print("Starting") # noqa:T001
def _init_devs(self) -> None:
"Initialise the attributes for accessing devices"
# Motor boards
self._init_motors()
# Ruggeduinos
self._init_arduino()
# No camera for SR2021
# Radio
self._init_radio()
# Compass
self._init_compass()
# Crane Magnet
self._init_magnet()
# Position encoders
self._init_encoders()
def _init_motors(self) -> None:
self.motor_boards = motor.init_motor_array(self.webot, self.type)
def _init_arduino(self) -> None:
self.arduino = arduino.init_arduino(self.webot, self.type)
def _init_radio(self) -> None:
self.radio = radio.Radio(self.webot, self.zone, self._step_lock)
def _init_compass(self) -> None:
if self.type != RobotType.CRANE: # The crane lacks a compass
self.compass = compass.Compass(self.webot)
def _init_magnet(self) -> None:
if self.type == RobotType.CRANE:
self.magnet = magnet.Magnet(self.webot)
def _init_encoders(self) -> None:
self.encoders = encoder.init_encoder_array(self.webot, self.type)
def time(self) -> float:
"""
Roughly equivalent to `time.time` but for simulation time.
"""
return self.webot.getTime()
def sleep(self, secs: float) -> None:
"""
Roughly equivalent to `time.sleep` but accounting for simulation time.
"""
# Checks that secs is positive or zero
if secs < 0:
raise ValueError('sleep length must be non-negative')
# Ensure the time delay is a valid step increment, while also ensuring
# that small values remain nonzero.
n_steps = math.ceil((secs * 1000) / self._timestep)
duration_ms = n_steps * self._timestep
# Assume that we're in the main thread here, so we don't really need to
# do any cleanup if Webots tells us the simulation is terminating. When
# webots kills the process all the proper tidyup will happen anyway.
self.webots_step_and_should_continue(duration_ms)
@property
def is_competition(self) -> bool:
return self.mode == 'comp'
| StarcoderdataPython |
5195230 | import cv2
try:
image = cv2.imread('image/lego.jpg')
(height, width) = image.shape[:2]
res = cv2.resize(image, (int(width / 2), int(height / 2)), interpolation=cv2.INTER_CUBIC)
cv2.imshow('Image Edge Detection', res)
k = cv2.waitKey(0) & 0xFF
if k == 27:
cv2.destroyAllWindows()
elif k == ord('s'):
cv2.imwrite('lego_copy.png', res)
cv2.destroyAllWindows()
print('Photo saved!')
except IOError:
print('Error while reading files !!!') | StarcoderdataPython |
9653486 | # Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Classes that provide the interface for writing genomics data.
`GenomicsWriter` defines the core API supported by writers, and is subclassed
directly or indirectly (via `DispatchingGenomicsWriter`) for all concrete
implementations.
`TFRecordWriter` is an implementation of the `GenomicsWriter` API for reading
`TFRecord` files. This is usable for all data types when writing data as
serialized protocol buffers.
`DispatchingGenomicsWriter` is an abstract class defined for convenience on top
of `GenomicsWriter` that supports writing to either the native file format or to
`TFRecord` files of the corresponding protocol buffer used to encode data of
that file type. The output format chosen is dependent upon the filename to which
the data are being written.
Concrete implementations for individual file types (e.g. BED, SAM, VCF, etc.)
reside in type-specific modules in this package. A general example of the write
functionality is shown below.
```python
# options is a writer-specific set of options.
options = ...
# records is an iterable of protocol buffers of the specific data type.
records = ...
with GenomicsWriterSubClass(output_path, options) as writer:
for proto in records:
writer.write(proto)
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import errno
from absl import logging
from third_party.nucleus.io.python import tfrecord_writer
class GenomicsWriter(object):
"""Abstract base class for writing genomics data.
A GenomicsWriter only has one method, write, which writes a single
protocol buffer to a file.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def write(self, proto):
"""Writes proto to the file.
Args:
proto: A protocol buffer.
"""
def __enter__(self):
"""Enter a `with` block."""
return self
@abc.abstractmethod
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Exit a `with` block. Typically, this will close the file."""
class TFRecordWriter(GenomicsWriter):
"""A GenomicsWriter that writes to a TFRecord file.
Example usage:
writer = TFRecordWriter('/tmp/my_output.tfrecord.gz')
for record in records:
writer.write(record)
Note that TFRecord files do not need to be wrapped in a "with" block.
"""
def __init__(self, output_path, header=None, compression_type=None):
"""Initializer.
Args:
output_path: str. The output path to which the records are written.
header: An optional header for the particular data type. This can be
useful for file types that have logical headers where some operations
depend on that header information (e.g. VCF using its headers to
determine type information of annotation fields).
compression_type: Either 'ZLIB', 'GZIP', '' (uncompressed), or
None. If None, __init__ will guess the compression type based on
the input_path's suffix.
Raises:
IOError: if there was any problem opening output_path for writing.
"""
super(TFRecordWriter, self).__init__()
self.header = header
if compression_type is None:
compression_type = 'GZIP' if output_path.endswith('.gz') else ''
self._writer = tfrecord_writer.TFRecordWriter.from_file(
output_path, compression_type)
if self._writer is None:
raise IOError(errno.EIO, 'Error opening %s for writing' % output_path)
def write(self, proto):
"""Writes the proto to the TFRecord file."""
self._writer.write(proto.SerializeToString())
def __exit__(self, exit_type, exit_value, exit_traceback):
self._writer.close()
class DispatchingGenomicsWriter(GenomicsWriter):
"""A GenomicsWriter that dispatches based on the file extension.
If '.tfrecord' is present in the filename, a TFRecordWriter is used.
Otherwise, a native writer is.
Sub-classes of DispatchingGenomicsWriter must define a _native_writer()
method.
"""
def __init__(self, output_path, **kwargs):
"""Initializer.
Args:
output_path: str. The output path to which the records are written.
**kwargs: k=v named args. Keyword arguments used to instantiate the native
writer, if applicable.
"""
super(DispatchingGenomicsWriter, self).__init__()
self.header = kwargs.get('header', None)
if '.tfrecord' in output_path:
self._writer = TFRecordWriter(output_path, header=self.header)
else:
self._writer = self._native_writer(output_path, **kwargs)
logging.info('Writing %s with %s',
output_path, self._writer.__class__.__name__)
self._post_init_hook()
@abc.abstractmethod
def _native_writer(self, output_path, **kwargs):
"""Returns a GenomicsWriter for writing the records `natively`.
Args:
output_path: The path to write the records to.
**kwargs: Zero or more keyword arguments.
Returns:
A GenomicsWriter.
"""
def write(self, proto):
self._writer.write(proto)
def __exit__(self, exit_type, exit_value, exit_traceback):
self._writer.__exit__(exit_type, exit_value, exit_traceback)
def _post_init_hook(self):
"""Hook for subclasses to run code at the end of __init__."""
| StarcoderdataPython |
5067903 | # 响应状态码、响应头、响应体
# 1. 发起请求 requests.[method](url) 返回响应对象
# 2. resp.status_code 响应状态码
# resp.headers 响应头
# resp.content 字节码数据
# resp.text 字符串
import requests
resp = requests.get('http://www.baidu.com')
status_code = resp.status_code
print(f'响应状态码:{status_code}')
headers = resp.headers # 字典
print('响应头')
print(headers)
bytes_body = resp.content # 响应体的字节码类型
print('字节码数据')
print(bytes_body)
text = resp.text # 字符串格式 ; resp.content.decode(编码:utf-8)
print(text)
| StarcoderdataPython |
3413954 | <filename>Chapter03/ch03_ex1.py<gh_stars>10-100
#!/usr/bin/env python3
"""Functional Python Programming
Chapter 3, Example Set 1
"""
from typing import Callable
class Mersenne1:
"""Callable object with a **Strategy** plug in required."""
def __init__(self, algorithm: Callable[[int], int]) -> None:
self.pow2 = algorithm
def __call__(self, arg: int) -> int:
return self.pow2(arg)-1
def shifty(b: int) -> int:
"""2**b via shifting.
>>> shifty(17)-1
131071
"""
return 1 << b
def multy(b: int) -> int:
"""2**b via naive recursion.
>>> multy(17)-1
131071
"""
if b == 0:
return 1
return 2*multy(b-1)
def faster(b: int) -> int:
"""2**b via faster divide-and-conquer recursion.
>>> faster(17)-1
131071
"""
if b == 0:
return 1
if b%2 == 1:
return 2*faster(b-1)
t = faster(b//2)
return t*t
# Implementations of Mersenne with strategy objects plugged in properly.
m1s = Mersenne1(shifty)
m1m = Mersenne1(multy)
m1f = Mersenne1(faster)
# Alternative Mersenne using class-level configuration.
# The syntax is awkward.
class Mersenne2:
pow2: Callable[[int], int] = None
def __call__(self, arg: int) -> int:
pow2 = self.__class__.__dict__['pow2']
return pow2(arg)-1
class ShiftyMersenne(Mersenne2):
pow2 = shifty
class MultyMersenee(Mersenne2):
pow2 = multy
class FasterMersenne(Mersenne2):
pow2 = faster
m2s = ShiftyMersenne()
m2m = MultyMersenee()
m2f = FasterMersenne()
test_mersenne = """
>>> m1s(17)
131071
>>> m1m(17)
131071
>>> m1f(17)
131071
>>> m2s(17)
131071
>>> m2m(17)
131071
>>> m2f(17)
131071
>>> m1s(89)
618970019642690137449562111
>>> m1m(89)
618970019642690137449562111
>>> m1f(89)
618970019642690137449562111
"""
test_pure = """
>>> def m(n):
... return 2**n-1
>>> m(89)
618970019642690137449562111
"""
__test__ = {
'test_mersenne': test_mersenne,
'test_pure': test_pure
}
def test():
import doctest
doctest.testmod(verbose=2)
def performance():
import timeit
print(m1s.pow2.__name__,
timeit.timeit(
"""m1s(17)""",
"""from Chapter_3.ch03_ex1 import m1s"""))
print(m1m.pow2.__name__,
timeit.timeit(
"""m1m(17)""",
"""from Chapter_3.ch03_ex1 import m1m"""))
print(m1f.pow2.__name__,
timeit.timeit(
"""m1f(17)""",
"""from Chapter_3.ch03_ex1 import m1f"""))
if __name__ == "__main__":
import sys
print(sys.version)
test()
# import timeit
# performance()
| StarcoderdataPython |
3363190 | <reponame>tor-councilmatic/scrapers-ca
from __future__ import unicode_literals
from utils import CanadianScraper, CanadianPerson as Person
COUNCIL_PAGE = 'http://www.gov.mb.ca/legislature/members/mla_list_alphabetical.html'
def get_party(abbreviation):
return {
'NDP': 'New Democratic Party of Manitoba',
'PC': 'Progressive Conservative Party of Manitoba',
'L': 'Manitoba Liberal Party',
'Liberal': 'Manitoba Liberal Party', # needed for a formatting error
'IND': 'Independent',
}[abbreviation]
class ManitobaPersonScraper(CanadianScraper):
def scrape(self):
member_page = self.lxmlize(COUNCIL_PAGE)
table = member_page.xpath('//table')[0]
rows = table.cssselect('tr')[1:]
for row in rows:
(namecell, constitcell, partycell) = row.cssselect('td')
full_name = namecell.text_content().strip()
if full_name.lower() == 'vacant':
continue
(last, first) = full_name.split(',')
name = first.replace('Hon.', '').strip() + ' ' + last.title().strip()
district = ' '.join(constitcell.text_content().split())
party = get_party(partycell.text)
url = namecell.cssselect('a')[0].get('href')
photo, email = self.get_details(url)
p = Person(primary_org='legislature', name=name, district=district, role='MLA',
party=party, image=photo)
p.add_source(COUNCIL_PAGE)
p.add_source(url)
p.add_contact('email', email)
yield p
def get_details(self, url):
page = self.lxmlize(url)
photo = page.xpath('//img[@class="page_graphic"]/@src')[0]
email = self.get_email(page)
return photo, email
| StarcoderdataPython |
9620698 | import ip_publica as publica
from subprocess import Popen, PIPE, STDOUT
import os
#prueba con los colores#
from colorama import Fore, init, Back, Style
BIENVENIDA = '''
______ _ _ _
(____ \(_) (_) | |
____) )_ ____ ____ _ _ ____ ____ _ _ | | ___
| __ (| |/ _ ) _ \ | | / _ ) _ \| |/ || |/ _ \
| |__) ) ( (/ /| | | \ V ( (/ /| | | | ( (_| | |_| |
|______/|_|\____)_| |_|\_/ \____)_| |_|_|\____|\___/
'''
def comando(orden):
eventStatus = Popen(orden, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
outputStatus = eventStatus.communicate()
return outputStatus[0].decode('utf-8')
def salida_datos(ip_publica,ip_privada):
print(Fore.GREEN+'Tu direccion ip publica>> '+ Fore.WHITE+'%s'%ip_publica)
print(Fore.GREEN+'Tu direccion ip privada>> '+ Fore.WHITE+'%s'%ip_privada)
if __name__ == "__main__":
print(BIENVENIDA)
## primero vemos lo de la informacion principal antes de que comiences a usar una terminal
print('obteniendo tus direcciones ip')
print(Fore.YELLOW+'Buscando..')
ip_privada = comando('ip add | egrep -o "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/24"')
salida_datos(publica.obtener_ip_publica(), ip_privada)
| StarcoderdataPython |
4919422 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2020-07-29
# @Filename: test_configuration.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import inspect
import io
import os
import unittest.mock
import pytest
from sdsstools import Configuration, get_config
from sdsstools.configuration import DEFAULT_PATHS, read_yaml_file
BASE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "etc/test.yml")
BASE = """
cat1:
key1: base_value
cat2:
key2: 1
"""
EXTENDABLE = """
#
#!extends {base_path}
cat1:
# test
key1: value1
"""
@pytest.fixture(autouse=True)
def cleanup():
yield
if "TEST_CONFIG_PATH" in os.environ:
del os.environ["TEST_CONFIG_PATH"]
@pytest.fixture
def config_file(tmp_path):
content = """
cat1:
key1: another_value
"""
tmp_file = tmp_path / "test_config.yml"
tmp_file.write_text(content)
yield tmp_file
tmp_file.unlink()
@pytest.fixture
def update_default_paths(config_file):
orig_paths = DEFAULT_PATHS.copy()
DEFAULT_PATHS[:] = [config_file.parent / "{name}_config"]
yield
DEFAULT_PATHS[:] = orig_paths
@pytest.fixture
def set_envvar():
os.environ["A_TEST_VARIABLE"] = "blah"
yield
del os.environ["A_TEST_VARIABLE"]
@pytest.fixture
def extendable(tmp_path):
base_path = tmp_path / "base.yaml"
base_path.write_text(BASE)
yield io.StringIO(EXTENDABLE.format(base_path=str(base_path))), base_path
def test_configuration(config_file):
config = Configuration()
assert config == {}
config = Configuration(config_file)
assert config["cat1"]["key1"] == "another_value"
config = Configuration(base_config=config_file)
assert config["cat1"]["key1"] == "another_value"
def test_configuration_user(config_file):
config = Configuration(config_file, base_config=BASE_CONFIG_FILE)
assert config["cat1"]["key1"] == "another_value"
assert config["cat1"]["key2"] == 1
def test_configuration_envvar(set_envvar):
config = Configuration(BASE_CONFIG_FILE)
assert config["cat2"]["key4"] == "blah"
def test_configuration_envvar_defaults():
config = Configuration(BASE_CONFIG_FILE, default_envvars={"A_TEST_VARIABLE": "foo"})
assert config["cat2"]["key4"] == "foo"
def test_configurations_bad_value():
with pytest.raises(ValueError):
Configuration(1) # type: ignore
def test_configuration_dict():
config = {"cat1": {"key1": 1}}
conf = Configuration(base_config=config)
assert conf._BASE == config
assert conf._BASE_CONFIG_FILE is None
assert conf.CONFIG_FILE is None
def test_configuration_user_dict():
config = Configuration(base_config=BASE_CONFIG_FILE)
assert config._BASE_CONFIG_FILE == BASE_CONFIG_FILE
assert config.CONFIG_FILE == BASE_CONFIG_FILE
assert config.keys() != {}
config.load({}, use_base=False)
assert list(config.keys()) == []
def test_get_config_etc():
config = get_config("test", allow_user=False)
assert isinstance(config, Configuration)
assert config["cat1"]["key1"] == "value"
config = get_config("test", allow_user=True)
assert isinstance(config, Configuration)
assert config["cat1"]["key1"] == "value"
def test_get_config_etc_with_user(config_file):
config = get_config("test", allow_user=True, user_path=config_file)
assert isinstance(config, Configuration)
assert config["cat1"]["key1"] == "another_value"
def test_get_config_etc_with_user_str(config_file):
config = get_config("test", allow_user=True, user_path=str(config_file))
assert isinstance(config, Configuration)
assert config["cat1"]["key1"] == "another_value"
def test_get_config_default_path(update_default_paths):
config = get_config("test")
assert config["cat1"]["key1"] == "another_value"
def test_get_config_envvar_path(config_file):
os.environ["TEST_CONFIG_PATH"] = str(config_file)
config = get_config("test")
assert config["cat1"]["key1"] == "another_value"
def test_get_config_no_update(config_file):
config = get_config(
"test",
config_file=BASE_CONFIG_FILE,
user_path=config_file,
merge_mode="replace",
)
assert config["cat1"]["key1"] == "another_value"
assert "cat2" not in config
@unittest.mock.patch.object(inspect, "stack", side_effect=AttributeError)
def test_get_config_bad_module(mock_func):
config = get_config("test")
assert config == {}
def test_extends(extendable):
stream, __ = extendable
data = read_yaml_file(stream)
assert data["cat1"]["key1"] == "base_value"
assert "cat2" in data
def test_extends_file_not_found(extendable):
stream, base_path = extendable
base_path.unlink()
with pytest.raises(FileExistsError):
read_yaml_file(stream)
def test_dont_extend(extendable):
stream, __ = extendable
data = read_yaml_file(stream, use_extends=False)
assert data["cat1"]["key1"] == "value1"
assert "cat2" not in data
def test_extends_from_file(tmp_path):
base_path = tmp_path / "subdir" / "base.yaml"
(tmp_path / "subdir").mkdir()
base_path.touch()
base_path.write_text(BASE)
extendable_path = tmp_path / "extendable.yaml"
extendable_relative = EXTENDABLE.format(base_path="subdir/base.yaml")
extendable_path.write_text(extendable_relative)
data = read_yaml_file(extendable_path)
assert data["cat1"]["key1"] == "base_value"
assert "cat2" in data
def test_read_empty_yaml(tmp_path):
path = tmp_path / "base.yaml"
path.touch()
data = read_yaml_file(path)
assert data == {}
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.