hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fcc9e01939ec837964e1db345f0c9a95613dca04 | 2,057 | py | Python | _backend_api/views.py | Amechi101/indieapp | 606c1346f65c343eb2cc8f7fba9d555b8c30a7fa | [
"MIT"
] | null | null | null | _backend_api/views.py | Amechi101/indieapp | 606c1346f65c343eb2cc8f7fba9d555b8c30a7fa | [
"MIT"
] | null | null | null | _backend_api/views.py | Amechi101/indieapp | 606c1346f65c343eb2cc8f7fba9d555b8c30a7fa | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.contrib import auth, messages
from django.views.generic import ListView
from django.views.generic.detail import SingleObjectMixin
from _backend_api.models import Product, Brand, Location
from subscription.models import Subscription
from subscription.managers import SubscriptionManager
class BrandDetailView(SingleObjectMixin, ListView):
template_name = 'brands/_brandguide.html'
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=Brand.objects.all())
return super(BrandDetailView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(BrandDetailView, self).get_context_data(**kwargs)
ctx['brand'] = self.object
ctx['is_followed'] = Subscription.objects.filter(brand=self.object, user=self.request.user.id).count()
ctx['product_list'] = Product.objects.filter(brand=self.object)
ctx['address_list'] = Location.objects.filter(brand=self.object)
return ctx
def get_queryset(self, **kwargs):
return self.object
class BrandCollectionView(SingleObjectMixin, ListView):
template_name = 'brands/_brandcollection.html'
context_object_name = 'brand_collection'
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=Brand.objects.all())
return super(BrandCollectionView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(BrandCollectionView, self).get_context_data(**kwargs)
ctx['brand'] = self.object
ctx['product_list'] = Product.objects.filter(brand=self.object)
return ctx
def get_queryset(self, **kwargs):
return self.object
class BrandArchiveView( ListView ):
template_name = 'brands/_brandarchive.html'
model = Brand
def get_context_data(self, **kwargs):
ctx = super(BrandArchiveView, self).get_context_data(**kwargs)
ctx['brand_list_archive'] = Brand.objects.filter(brand_state=True).order_by('brand_name')
return ctx
| 26.371795 | 104 | 0.7579 |
a071ac8c516edd4f2ca5b67cffe5e58b7db49419 | 3,958 | py | Python | glvm/auto_encoder.py | zhangdongkun98/generative-latent-variable-models | 11e54ff33e6b86e2b87213b8620676a4e19ad24f | [
"MIT"
] | null | null | null | glvm/auto_encoder.py | zhangdongkun98/generative-latent-variable-models | 11e54ff33e6b86e2b87213b8620676a4e19ad24f | [
"MIT"
] | null | null | null | glvm/auto_encoder.py | zhangdongkun98/generative-latent-variable-models | 11e54ff33e6b86e2b87213b8620676a4e19ad24f | [
"MIT"
] | null | null | null | import rllib
from typing import List
import torch
import torch.nn as nn
from torch.optim import Adam
class AutoEncoder(rllib.template.MethodSingleAgent):
lr_model = 0.0003
buffer_size = 10000
batch_size = 144
weight = batch_size / buffer_size
start_timesteps = 10000
save_model_interval = 200
def __init__(self, config: rllib.basic.YamlConfig, writer: rllib.basic.Writer):
'''
'''
super().__init__(config, writer)
self.model = Model(config).to(self.device)
self.models_to_save = [self.model]
self.optimizer = Adam(self.model.parameters(), lr=self.lr_model)
self.model_loss = nn.MSELoss()
self._memory = config.get('buffer', ReplayBuffer)(self.buffer_size, self.batch_size, self.device)
return
def update_parameters(self):
if len(self._memory) < self.start_timesteps:
return
super().update_parameters()
'''load data batch'''
experience = self._memory.sample()
input: torch.Tensor = experience.input
output = self.model(input)
loss = self.model_loss(output, input.detach())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.writer.add_scalar('loss/loss', loss.detach().item(), self.step_update)
if self.step_update % self.save_model_interval == 0: self._save_model()
return
@torch.no_grad()
def select_action(self, _):
super().select_action()
action = torch.Tensor(1,self.dim_action).uniform_(-1,1)
return action
class Model(rllib.template.Model):
def __init__(self, config):
super(Model, self).__init__(config)
self.dim_latent = config.dim_latent
self.encoder = nn.Sequential(
nn.Conv2d(config.in_channels, 32, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(32), nn.LeakyReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(64), nn.LeakyReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(128), nn.LeakyReLU(),
nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(256), nn.LeakyReLU(),
nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(512), nn.LeakyReLU(),
)
self.encoder_output = nn.Sequential(
nn.Linear(512 *4, config.dim_latent), nn.Tanh()
)
self.decoder_input = nn.Linear(config.dim_latent, 512 *4)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, output_padding=1), nn.BatchNorm2d(256), nn.LeakyReLU(),
nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, output_padding=1), nn.BatchNorm2d(128), nn.LeakyReLU(),
nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1), nn.BatchNorm2d(64), nn.LeakyReLU(),
nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1), nn.BatchNorm2d(32), nn.LeakyReLU(),
nn.ConvTranspose2d(32, config.in_channels, kernel_size=3, stride=2, padding=1, output_padding=1), nn.Tanh(),
)
self.apply(rllib.utils.init_weights)
return
def forward(self, input: torch.Tensor):
z = self.encode(input)
output = self.decode(z)
return output
def encode(self, input: torch.Tensor):
x = self.encoder(input)
x = torch.flatten(x, start_dim=1)
return self.encoder_output(x)
def decode(self, z: torch.Tensor):
x = self.decoder_input(z)
x = x.view(z.shape[0], 512, 2, 2)
x = self.decoder(x)
return x
class ReplayBuffer(rllib.buffer.ReplayBuffer):
def _batch_stack(self, batch):
image = torch.cat(list(batch), dim=0)
experience = rllib.template.Experience(input=image)
return experience
| 33.260504 | 132 | 0.638201 |
0e51ec28fcabe2694449f367a6dd949a1f5e8a53 | 28 | py | Python | logger/__init__.py | ducminhkhoi/Video-Attention | 84aea8a14bd375ca9765f76a6e5b6ad3f8a951c7 | [
"MIT"
] | null | null | null | logger/__init__.py | ducminhkhoi/Video-Attention | 84aea8a14bd375ca9765f76a6e5b6ad3f8a951c7 | [
"MIT"
] | null | null | null | logger/__init__.py | ducminhkhoi/Video-Attention | 84aea8a14bd375ca9765f76a6e5b6ad3f8a951c7 | [
"MIT"
] | 2 | 2020-02-29T02:41:50.000Z | 2021-10-02T16:54:34.000Z | from logger.logger import *
| 14 | 27 | 0.785714 |
d2624356d7fdcb1e5a73c54b384b575c16e48ef1 | 319 | py | Python | src/kong/__init__.py | SpazioDati/python-kong | fbfc0a357a5ea0471c2dc138bcbe521349ded965 | [
"BSD-2-Clause"
] | null | null | null | src/kong/__init__.py | SpazioDati/python-kong | fbfc0a357a5ea0471c2dc138bcbe521349ded965 | [
"BSD-2-Clause"
] | null | null | null | src/kong/__init__.py | SpazioDati/python-kong | fbfc0a357a5ea0471c2dc138bcbe521349ded965 | [
"BSD-2-Clause"
] | 1 | 2020-01-17T06:55:00.000Z | 2020-01-17T06:55:00.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
try: # pragma: no cover
# https://urllib3.readthedocs.org/en/latest/security.html#pyopenssl
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
__version__ = '0.3.1'
| 26.583333 | 71 | 0.742947 |
e26f4e1f9ad50b625a802011a462d50cbf49defa | 8,079 | py | Python | contrib/devtools/update-translations.py | Saci-de-bani/onecommacoin | bff7fd5cab50d4be5fb685185c54df035adb95ce | [
"MIT"
] | null | null | null | contrib/devtools/update-translations.py | Saci-de-bani/onecommacoin | bff7fd5cab50d4be5fb685185c54df035adb95ce | [
"MIT"
] | null | null | null | contrib/devtools/update-translations.py | Saci-de-bani/onecommacoin | bff7fd5cab50d4be5fb685185c54df035adb95ce | [
"MIT"
] | 1 | 2021-08-06T05:44:52.000Z | 2021-08-06T05:44:52.000Z | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'onecommacoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| 38.841346 | 124 | 0.636094 |
58f8a62fdf55d9bd7fda59b23544b47740dc1abd | 780 | py | Python | assignements/Session1/test_S3.py | charlottedrb/BachelorDIM-Lectures-Algorithms-2020 | f34cb432cd2b34b548cbc44539084831fd45695e | [
"MIT"
] | null | null | null | assignements/Session1/test_S3.py | charlottedrb/BachelorDIM-Lectures-Algorithms-2020 | f34cb432cd2b34b548cbc44539084831fd45695e | [
"MIT"
] | null | null | null | assignements/Session1/test_S3.py | charlottedrb/BachelorDIM-Lectures-Algorithms-2020 | f34cb432cd2b34b548cbc44539084831fd45695e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 08:46:18 2020
@author: derbaghc
"""
import cv2
import numpy as np
import S3_imgproc_tools as main
import pytest
# =============================================================================
# Tests pour la fonction de déclenchements d'erreurs
# =============================================================================
def test_type_errors_tuNone():
##Je m'assure que je reçois bien la ValueError
with pytest.raises(AttributeError):
main.type_errors(None)
def test_type_errors_tuArray():
with pytest.raises(AttributeError):
main.type_errors(1)
def test_type_errors_tuuint8():
with pytest.raises(TypeError):
main.type_errors(np.zeros((2, 2), dtype=np.float32))
| 25.16129 | 79 | 0.553846 |
1f15aacfca79687c062fc340db09411d67e1fc21 | 727 | py | Python | HLTrigger/Configuration/python/HLT_75e33/modules/hltRechitInRegionsHGCAL_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | HLTrigger/Configuration/python/HLT_75e33/modules/hltRechitInRegionsHGCAL_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | HLTrigger/Configuration/python/HLT_75e33/modules/hltRechitInRegionsHGCAL_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:16:05.000Z | 2021-11-30T16:16:05.000Z | import FWCore.ParameterSet.Config as cms
hltRechitInRegionsHGCAL = cms.EDProducer("HLTHGCalRecHitsInRegionsProducer",
etaPhiRegions = cms.VPSet(cms.PSet(
inputColl = cms.InputTag("hltL1TEGammaHGCFilteredCollectionProducer"),
maxDEta = cms.double(0.0),
maxDPhi = cms.double(0.0),
maxDeltaR = cms.double(0.35),
maxEt = cms.double(999999.0),
minEt = cms.double(5.0),
type = cms.string('L1EGamma')
)),
inputCollTags = cms.VInputTag("HGCalRecHitL1Seeded:HGCEERecHits", "HGCalRecHitL1Seeded:HGCHEBRecHits", "HGCalRecHitL1Seeded:HGCHEFRecHits"),
outputProductNames = cms.vstring(
'HGCEERecHits',
'HGCHEBRecHits',
'HGCHEFRecHits'
)
)
| 36.35 | 144 | 0.678129 |
a4157da2608fdbeaad524a08bd7d21477c304128 | 1,369 | py | Python | tests/test_top_level_editable.py | bendikro/pip-tools | 62a5b4d74b889a9265fc0f1ca91ca2b92a5801c0 | [
"BSD-3-Clause"
] | 1 | 2018-12-03T01:55:55.000Z | 2018-12-03T01:55:55.000Z | tests/test_top_level_editable.py | DalavanCloud/pip-tools | c1b4c67c44e97efcb16f7f88d7f3062db1b8c5d0 | [
"BSD-3-Clause"
] | 1 | 2022-02-09T11:29:10.000Z | 2022-02-09T11:29:10.000Z | tests/test_top_level_editable.py | DalavanCloud/pip-tools | c1b4c67c44e97efcb16f7f88d7f3062db1b8c5d0 | [
"BSD-3-Clause"
] | 2 | 2021-11-11T00:36:27.000Z | 2022-02-09T09:47:35.000Z | import os
import pytest
from piptools.repositories import PyPIRepository
from piptools.scripts.compile import get_pip_command
class MockedPyPIRepository(PyPIRepository):
def get_dependencies(self, ireq):
# "mock" everything but editable reqs to avoid disk and network I/O
# when possible
if not ireq.editable:
return set()
return super(MockedPyPIRepository, self).get_dependencies(ireq)
def _get_repository():
pip_command = get_pip_command()
pip_args = []
pip_options, _ = pip_command.parse_args(pip_args)
session = pip_command._build_session(pip_options)
repository = MockedPyPIRepository(pip_options, session)
return repository
@pytest.mark.parametrize(
('input', 'expected'),
((tup) for tup in [
([os.path.join(os.path.dirname(__file__), 'test_data', 'small_fake_package')],
['six']),
])
)
def test_editable_top_level_deps_preserved(base_resolver, repository, from_editable, input, expected):
input = [from_editable(line) for line in input]
repository = _get_repository()
output = base_resolver(input, prereleases=False, repository=repository).resolve()
output = {p.name for p in output}
# sanity check that we're expecting something
assert output != set()
for package_name in expected:
assert package_name in output
| 29.12766 | 102 | 0.710007 |
d38afec42d8537ed2d5b313ecfdabace98fb421f | 400 | py | Python | papermerge/core/serializers/automate.py | papermerge/papermerge-core | e377441a8e5ddb748fa3861f686fa22c00d407fe | [
"Apache-2.0"
] | 45 | 2021-01-10T14:33:52.000Z | 2022-03-27T14:02:56.000Z | papermerge/core/serializers/automate.py | papermerge/papermerge-core | e377441a8e5ddb748fa3861f686fa22c00d407fe | [
"Apache-2.0"
] | 8 | 2021-01-08T20:02:21.000Z | 2022-02-01T13:12:24.000Z | papermerge/core/serializers/automate.py | papermerge/papermerge-core | e377441a8e5ddb748fa3861f686fa22c00d407fe | [
"Apache-2.0"
] | 12 | 2021-01-07T20:03:07.000Z | 2022-03-24T04:07:51.000Z | from rest_framework import serializers
from papermerge.core.models import Automate
class AutomateSerializer(serializers.ModelSerializer):
class Meta:
model = Automate
resource_name = 'automates'
fields = (
'id',
'name',
'match',
'matching_algorithm',
'is_case_sensitive',
'dst_folder',
)
| 21.052632 | 54 | 0.57 |
6be0e401c8f92d7d21e743ed8d667d7755e8b595 | 17,859 | py | Python | TFGeneration/GenerateTFRecord.py | bradleyfowler123/TIES_DataGeneration | 28f29b0f292b4652cfcd2d68674b50aea792f3ac | [
"MIT"
] | null | null | null | TFGeneration/GenerateTFRecord.py | bradleyfowler123/TIES_DataGeneration | 28f29b0f292b4652cfcd2d68674b50aea792f3ac | [
"MIT"
] | null | null | null | TFGeneration/GenerateTFRecord.py | bradleyfowler123/TIES_DataGeneration | 28f29b0f292b4652cfcd2d68674b50aea792f3ac | [
"MIT"
] | 1 | 2021-07-12T07:34:10.000Z | 2021-07-12T07:34:10.000Z | import warnings
warnings.filterwarnings("ignore")
import tensorflow as tf
import numpy as np
import traceback
import cv2
import os
import string
import pickle
from multiprocessing import Process,Lock
from TableGeneration.Table import Table
from multiprocessing import Process,Pool,cpu_count
import random
import argparse
from TableGeneration.tools import *
import numpy as np
from selenium.webdriver import Firefox
from selenium.webdriver import PhantomJS
import warnings
from TableGeneration.Transformation import *
def warn(*args,**kwargs):
pass
class Logger:
def __init__(self):
pass
#self.file=open('logtxt.txt','a+')
def write(self,txt):
file = open('logfile.txt', 'a+')
file.write(txt)
file.close()
class GenerateTFRecord:
def __init__(self, outpath,filesize,unlvimagespath,unlvocrpath,unlvtablepath,visualizeimgs,visualizebboxes,distributionfilepath):
self.outtfpath = outpath #directory to store tfrecords
self.filesize=filesize #number of images in each tfrecord
self.unlvocrpath=unlvocrpath #unlv ocr ground truth files
self.unlvimagespath=unlvimagespath #unlv images
self.unlvtablepath=unlvtablepath #unlv ground truth of tabls
self.visualizeimgs=visualizeimgs #wheter to store images separately or not
self.distributionfile=distributionfilepath #pickle file containing UNLV distribution
self.logger=Logger() #if we want to use logger and store output to file
#self.logdir = 'logdir/'
#self.create_dir(self.logdir)
#logging.basicConfig(filename=os.path.join(self.logdir,'Log.log'), filemode='a+', format='%(name)s - %(levelname)s - %(message)s')
self.num_of_max_vertices=900 #number of vertices (maximum number of words in any table)
self.max_length_of_word=30 #max possible length of each word
self.row_min=3 #minimum number of rows in a table (includes headers)
self.row_max=15 #maximum number of rows in a table
self.col_min=3 #minimum number of columns in a table
self.col_max=9 #maximum number of columns in a table
self.minshearval=-0.1 #minimum value of shear to apply to images
self.maxshearval=0.1 #maxmimum value of shear to apply to images
self.minrotval=-0.01 #minimum rotation applied to images
self.maxrotval=0.01 #maximum rotation applied to images
self.num_data_dims=5 #data dimensions to store in tfrecord
self.max_height=768 #max image height
self.max_width=1366 #max image width
self.tables_cat_dist = self.get_category_distribution(self.filesize)
self.visualizebboxes=visualizebboxes
def get_category_distribution(self,filesize):
tables_cat_dist=[0,0,0,0]
firstdiv=filesize//2
tables_cat_dist[0]=firstdiv//2
tables_cat_dist[1]=firstdiv-tables_cat_dist[0]
seconddiv=filesize-firstdiv
tables_cat_dist[2]=seconddiv//2
tables_cat_dist[3]=seconddiv-tables_cat_dist[2]
return tables_cat_dist
def create_dir(self,fpath): #creates directory fpath if it does not exist
if(not os.path.exists(fpath)):
os.mkdir(fpath)
def str_to_int(self,str): #converts each character in a word to equivalent int
intsarr=np.array([ord(chr) for chr in str])
padded_arr=np.zeros(shape=(self.max_length_of_word),dtype=np.int64)
padded_arr[:len(intsarr)]=intsarr
return padded_arr
def convert_to_int(self, arr): #simply converts array to a string
return [int(val) for val in arr]
def pad_with_zeros(self,arr,shape): #will pad the input array with zeros to make it equal to 'shape'
dummy=np.zeros(shape,dtype=np.int64)
dummy[:arr.shape[0],:arr.shape[1]]=arr
return dummy
def generate_tf_record(self, im, cellmatrix, rowmatrix, colmatrix, arr,tablecategory,imgindex,output_file_name):
'''This function generates tfrecord files using given information'''
cellmatrix=self.pad_with_zeros(cellmatrix,(self.num_of_max_vertices,self.num_of_max_vertices))
colmatrix = self.pad_with_zeros(colmatrix, (self.num_of_max_vertices, self.num_of_max_vertices))
rowmatrix = self.pad_with_zeros(rowmatrix, (self.num_of_max_vertices, self.num_of_max_vertices))
#im = np.array(cv2.imread(img_path, 0),dtype=np.int64)
im=im.astype(np.int64)
img_height, img_width=im.shape
words_arr = arr[:, 1].tolist()
no_of_words = len(words_arr)
lengths_arr = self.convert_to_int(arr[:, 0])
vertex_features=np.zeros(shape=(self.num_of_max_vertices,self.num_data_dims),dtype=np.int64)
lengths_arr=np.array(lengths_arr).reshape(len(lengths_arr),-1)
sample_out=np.array(np.concatenate((arr[:,2:],lengths_arr),axis=1))
vertex_features[:no_of_words,:]=sample_out
if(self.visualizebboxes):
self.draw_matrices(im,arr,[rowmatrix,colmatrix,cellmatrix],imgindex,output_file_name)
#vertex_text=np.chararray(shape=(self.num_of_max_vertices,self.max_length_of_word))
#vertex_text[:no_of_words,:]=list(map(self.str_to_chars, words_arr))
#vertex_text=words_arr+[""]*(self.num_of_max_vertices-len(words_arr))
vertex_text = np.zeros((self.num_of_max_vertices,self.max_length_of_word), dtype=np.int64)
vertex_text[:no_of_words]=np.array(list(map(self.str_to_int,words_arr)))
feature = dict()
feature['image'] = tf.train.Feature(float_list=tf.train.FloatList(value=im.astype(np.float32).flatten()))
feature['global_features'] = tf.train.Feature(float_list=tf.train.FloatList(value=np.array([img_height, img_width,no_of_words,tablecategory]).astype(np.float32).flatten()))
feature['vertex_features'] = tf.train.Feature(float_list=tf.train.FloatList(value=vertex_features.astype(np.float32).flatten()))
feature['adjacency_matrix_cells'] = tf.train.Feature(int64_list=tf.train.Int64List(value=cellmatrix.astype(np.int64).flatten()))
feature['adjacency_matrix_cols'] = tf.train.Feature(int64_list=tf.train.Int64List(value=colmatrix.astype(np.int64).flatten()))
feature['adjacency_matrix_rows'] = tf.train.Feature(int64_list=tf.train.Int64List(value=rowmatrix.astype(np.int64).flatten()))
feature['vertex_text'] = tf.train.Feature(int64_list=tf.train.Int64List(value=vertex_text.astype(np.int64).flatten()))
all_features = tf.train.Features(feature=feature)
seq_ex = tf.train.Example(features=all_features)
return seq_ex
def generate_tables(self,driver,N_imgs,output_file_name):
row_col_min=[self.row_min,self.col_min] #to randomly select number of rows
row_col_max=[self.row_max,self.col_max] #to randomly select number of columns
rc_arr = np.random.uniform(low=row_col_min, high=row_col_max, size=(N_imgs, 2)) #random row and col selection for N images
all_table_categories=[0,0,0,0] #These 4 values will count the number of images for each of the category
rc_arr[:,0]=rc_arr[:,0]+2 #increasing the number of rows by a fix 2. (We can comment out this line. Does not affect much)
data_arr=[]
exceptioncount=0
rc_count=0 #for iterating through row and col array
for assigned_category,cat_count in enumerate(self.tables_cat_dist):
for _ in range(cat_count):
rows = int(round(rc_arr[rc_count][0]))
cols = int(round(rc_arr[rc_count][1]))
exceptcount=0
while(True):
#This loop is to repeat and retry generating image if some an exception is encountered.
try:
#initialize table class
table = Table(rows,cols,self.unlvimagespath,self.unlvocrpath,self.unlvtablepath,assigned_category+1,self.distributionfile)
#get table of rows and cols based on unlv distribution and get features of this table
#(same row, col and cell matrices, total unique ids, html conversion of table and its category)
same_cell_matrix,same_col_matrix,same_row_matrix, id_count, html_content,tablecategory= table.create()
#convert this html code to image using selenium webdriver. Get equivalent bounding boxes
#for each word in the table. This will generate ground truth for our problem
im,bboxes = html_to_img(driver, html_content, id_count)
# apply_shear: bool - True: Apply Transformation, False: No Transformation | probability weight for shearing to be 25%
#apply_shear = random.choices([True, False],weights=[0.25,0.75])[0]
#if(apply_shear==True):
if(assigned_category+1==4):
#randomly select shear and rotation levels
while(True):
shearval = np.random.uniform(self.minshearval, self.maxshearval)
rotval = np.random.uniform(self.minrotval, self.maxrotval)
if(shearval!=0.0 or rotval!=0.0):
break
#If the image is transformed, then its categorycategory is 4
#transform image and bounding boxes of the words
im, bboxes = Transform(im, bboxes, shearval, rotval, self.max_width, self.max_height)
tablecategory=4
if(self.visualizeimgs):
#if the image and equivalent html is need to be stored
dirname=os.path.join('visualizeimgs/','category'+str(tablecategory))
f=open(os.path.join(dirname,'html',str(rc_count)+output_file_name.replace('.tfrecord','.html')),'w')
f.write(html_content)
f.close()
im.save(os.path.join(dirname,'img',str(rc_count)+output_file_name.replace('.tfrecord','.png')), dpi=(600, 600))
# driver.quit()
# 0/0
data_arr.append([[same_row_matrix, same_col_matrix, same_cell_matrix, bboxes,[tablecategory]],[im]])
all_table_categories[tablecategory-1]+=1
#print('Assigned category: ',assigned_category+1,', generated category: ',tablecategory)
break
except Exception as e:
#traceback.print_exc()
exceptcount+=1
if(exceptioncount>10):
print('More than 10 exceptions occured for file: ',output_file_name)
#if there are more than 10 exceptions, then return None
return None
#traceback.print_exc()
#print('\nException No.', exceptioncount, ' File: ', str(output_file_name))
#logging.error("Exception Occured "+str(output_file_name),exc_info=True)
rc_count+=1
if(len(data_arr)!=N_imgs):
#If total number of images are not generated, then return None.
print('Images not equal to the required size.')
return None
return data_arr,all_table_categories
def draw_matrices(self,img,arr,matrices,imgindex,output_file_name):
'''Call this fucntion to draw visualizations of a matrix on image'''
no_of_words=len(arr)
colors = np.random.randint(0, 255, (no_of_words, 3))
arr = arr[:, 2:]
img=img.astype(np.uint8)
img=np.dstack((img,img,img))
mat_names=['row','col','cell']
output_file_name=output_file_name.replace('.tfrecord','')
for matname,matrix in zip(mat_names,matrices):
im=img.copy()
x=1
indices = np.argwhere(matrix[x] == 1)
for index in indices:
cv2.rectangle(im, (int(arr[index, 0])-3, int(arr[index, 1])-3),
(int(arr[index, 2])+3, int(arr[index, 3])+3),
(0,255,0), 1)
x = 4
indices = np.argwhere(matrix[x] == 1)
for index in indices:
cv2.rectangle(im, (int(arr[index, 0])-3, int(arr[index, 1])-3),
(int(arr[index, 2])+3, int(arr[index, 3])+3),
(0, 0, 255), 1)
img_name=os.path.join('bboxes/',output_file_name+'_'+str(imgindex)+'_'+matname+'.jpg')
cv2.imwrite(img_name,im)
def write_tf(self,filesize,threadnum):
'''This function writes tfrecords. Input parameters are: filesize (number of images in one tfrecord), threadnum(thread id)'''
options = tf.compat.v1.io.TFRecordOptions(tf.compat.v1.io.TFRecordCompressionType.GZIP)
opts = Options()
opts.set_headless()
assert opts.headless
#driver=PhantomJS()
driver = Firefox(options=opts)
while(True):
starttime = time.time()
#randomly select a name of length=20 for tfrecords file.
output_file_name = ''.join(random.choices(string.ascii_uppercase + string.digits, k=20)) + '.tfrecord'
print('\nThread: ',threadnum,' Started:', output_file_name)
#data_arr contains the images of generated tables and all_table_categories contains the table category of each of the table
data_arr,all_table_categories = self.generate_tables(driver, filesize, output_file_name)
if(data_arr is not None):
if(len(data_arr)==filesize):
with tf.io.TFRecordWriter(os.path.join(self.outtfpath,output_file_name+".gz"),options=options) as writer:
try:
for imgindex,subarr in enumerate(data_arr):
arr=subarr[0]
img=np.asarray(subarr[1][0],np.int64)[:,:,0]
colmatrix = np.array(arr[1],dtype=np.int64)
cellmatrix = np.array(arr[2],dtype=np.int64)
rowmatrix = np.array(arr[0],dtype=np.int64)
bboxes = np.array(arr[3])
tablecategory=arr[4][0]
seq_ex = self.generate_tf_record(img, cellmatrix, rowmatrix, colmatrix, bboxes,tablecategory,imgindex,output_file_name)
writer.write(seq_ex.SerializeToString())
print('\nThread :',threadnum,' Completed in ',time.time()-starttime,' ' ,output_file_name,'with len:',(len(data_arr)))
print('category 1: ',all_table_categories[0],', category 2: ',all_table_categories[1],', category 3: ',all_table_categories[2],', category 4: ',all_table_categories[3])
except Exception as e:
print('Exception occurred in write_tf function for file: ',output_file_name)
traceback.print_exc()
self.logger.write(traceback.format_exc())
# print('Thread :',threadnum,' Removing',output_file_name)
# os.remove(os.path.join(self.outtfpath,output_file_name))
driver.stop_client()
driver.quit()
def write_to_tf(self,max_threads):
'''This function starts tfrecords generation with number of threads = max_threads with each thread
working on a single tfrecord'''
if(not os.path.exists(self.distributionfile)):
if((not os.path.exists(self.unlvtablepath)) or (not os.path.exists(self.unlvimagespath)) or (not os.path.exists(self.unlvocrpath))):
print('UNLV dataset folders do not exist.')
return
#create all directories here
if(self.visualizeimgs):
self.create_dir('visualizeimgs')
for tablecategory in range(1,5):
dirname=os.path.join('visualizeimgs/','category'+str(tablecategory))
self.create_dir(dirname)
self.create_dir(os.path.join(dirname,'html'))
self.create_dir(os.path.join(dirname, 'img'))
if(self.visualizebboxes):
self.create_dir('bboxes')
self.create_dir(self.outtfpath) #create output directory if it does not exist
starttime=time.time()
threads=[]
for threadnum in range(max_threads):
proc = Process(target=self.write_tf, args=(self.filesize, threadnum,))
proc.start()
threads.append(proc)
for proc in threads:
proc.join()
print(time.time()-starttime)
| 53.47006 | 196 | 0.590739 |
75e4cee3a2b209dfb20e3ce7be3a738acc0c864c | 1,397 | py | Python | ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/status_params.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/status_params.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/status_params.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
config = Script.get_config()
pid_dir = config['configurations']['storm-env']['storm_pid_dir']
pid_nimbus = format("{pid_dir}/nimbus.pid")
pid_supervisor = format("{pid_dir}/supervisor.pid")
pid_drpc = format("{pid_dir}/drpc.pid")
pid_ui = format("{pid_dir}/ui.pid")
pid_logviewer = format("{pid_dir}/logviewer.pid")
pid_rest_api = format("{pid_dir}/restapi.pid")
pid_files = {"logviewer":pid_logviewer,
"ui": pid_ui,
"nimbus": pid_nimbus,
"supervisor": pid_supervisor,
"drpc": pid_drpc,
"rest_api": pid_rest_api} | 38.805556 | 72 | 0.735147 |
6948f49e37bafa003372ad0b74b648a8341f9fad | 5,306 | py | Python | sociation2vec/utils.py | ur001/sociation_corpus | 050e6bc35f7e4261a46e509a05d484937b30e8b6 | [
"MIT"
] | 3 | 2017-11-05T06:39:25.000Z | 2017-11-16T13:44:54.000Z | sociation2vec/utils.py | ur001/sociation_corpus | 050e6bc35f7e4261a46e509a05d484937b30e8b6 | [
"MIT"
] | null | null | null | sociation2vec/utils.py | ur001/sociation_corpus | 050e6bc35f7e4261a46e509a05d484937b30e8b6 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Создание исходного корпуса Sociation.org
Выполняется в консоли. Код для ознакомления
"""
import os
from collections import Counter
from operator import itemgetter
from random import randint
import numpy as np
from gensim import corpora, models, similarities
class DictEncoder(object):
"""
Вспомогательная фигня для создания/хранения словарей
Каждому новому слову сопоставляет целое число
>>> my_dict = DictEncoder()
>>> print my_dict[u'слово']
0
>>> print my_dict[u'другое слово']
1
>>> print my_dict.decode[0]
слово
>>> my_dict.save(file_name)
"""
def __init__(self):
self.encode = {}
self.decode = []
def add(self, item):
if item in self.encode:
return self.encode[item]
index = len(self.decode)
self.decode.append(item)
self.encode[item] = index
return index
def save(self, file_name):
with open(file_name, 'w') as file:
for item in self.decode:
file.write(item + '\n')
@classmethod
def load(cls, file_name):
encoder = cls()
with open(file_name, 'r') as file:
for item in file:
encoder.add(item.strip())
return encoder
def __getitem__(self, item):
return self.add(item)
def __contains__(self, item):
return item in self.encode
def __len__(self):
return len(self.decode)
class WordsComparator(object):
def __init__(self, feature1_max, feature2_max, similarity_degree=1/3):
self.feature1_max = feature1_max
self.feature2_max = feature2_max
self.prod_max = feature1_max * feature2_max
self.similarity_degree = similarity_degree
def __call__(self, feature1, feature2):
diff1_value = feature1 * (self.feature2_max - feature2) / self.prod_max
diff2_value = feature2 * (self.feature1_max - feature1) / self.prod_max
common_value = (feature1 * feature2 / self.prod_max) ** self.similarity_degree
return diff1_value, diff2_value, common_value
def bow2nparray_vec(vec):
return np.array(list(map(itemgetter(1), vec)))
def nparray2bow_vec(vec):
return [(idx, val) for idx, val in enumerate(vec) if val]
def compare_words(
word1_features,
word2_features,
count=10,
exclude=set(),
similarity_degree=0.5,
separate=False,
min_feature_value=0.3
):
"""
Сравнение двух слов на основе списка похожих (или вообще каких-либо фич слова).
Возвращает 3 списка: характерные для первого слова, второго и общие
:param dict[int, float] word1_features: фичи первого слова: словарь {feature: value}
:param dict[int, float] word2_features: фичи второго слова: словарь {feature: value}
:param in count: число слов в результах
:param float similarity_degree: число 0..1. 1 — полное разделение слов, 0 — максимальный поиск сходства
:param bool separate: «срогое разделение» — запрет попадания одного слова в несколько колонок
:param float min_feature_value: минимальное значение
"""
diff1, diff2, common = {}, {}, {} # Характерное для первого слова, для второго и общее
features = set(word1_features.keys()).union(word2_features.keys())
for feature in features:
if feature in exclude:
continue
feature1 = word1_features.get(feature, 0)
feature2 = word2_features.get(feature, 0)
if feature1 < min_feature_value and feature2 < min_feature_value:
continue
diff1_value = feature1 * (1 - feature2)
diff2_value = feature2 * (1 - feature1)
common_value = (feature1 * feature2) ** similarity_degree
max_value = max(diff1_value, diff2_value, common_value)
if diff1_value == max_value or not separate:
diff1[feature] = diff1_value
if diff2_value == max_value or not separate:
diff2[feature] = diff2_value
if common_value == max_value or not separate:
common[feature] = common_value
return (
sorted(diff1.items(), key=itemgetter(1), reverse=True)[:count],
sorted(diff2.items(), key=itemgetter(1), reverse=True)[:count],
sorted(common.items(), key=itemgetter(1), reverse=True)[:count],
)
def nparray2str(value, binary=False):
if binary:
return value.tostring()
else:
return ' '.join(map(str, value))
def save_word2vec(file_name, corpus, dictionary, binary=False):
with open(file_name, 'w') as file:
file.write('{} {}\n'.format(len(dictionary), len(corpus[0])))
for word_idx, vector in enumerate(corpus):
word = dictionary[word_idx].replace(' ', '_')
file.write('{} {}\n'.format(word, nparray2str(vector, binary)))
def read_word2vec(path):
words_dict = DictEncoder()
corpus = []
with open(path, 'r') as file:
for line in file:
part1, part2 = line.strip().split(' ', 1)
if not part1.isdigit():
word_name, word_vector = part1, np.fromstring(part2, dtype="float32", sep=" ")
words_dict.add(word_name)
corpus.append(word_vector)
return np.vstack(corpus), words_dict | 32.157576 | 107 | 0.637015 |
a5186465f09e98330d03cddec3197fe3bddb1359 | 1,616 | py | Python | editDistance/editDistance.py | alexZrAl/Simple-Algorithms | d420b5efea08719315b27908267a8b7fd7dffc50 | [
"MIT"
] | null | null | null | editDistance/editDistance.py | alexZrAl/Simple-Algorithms | d420b5efea08719315b27908267a8b7fd7dffc50 | [
"MIT"
] | null | null | null | editDistance/editDistance.py | alexZrAl/Simple-Algorithms | d420b5efea08719315b27908267a8b7fd7dffc50 | [
"MIT"
] | null | null | null | import numpy as np
def getEditDist(src, target):
'''
Compares the edit distance of string target with string src, in quadratic time
@param src: The "sample" string
@param target: The string we want to compare with the sample
'''
row = len(src) + 1
col = len(target) + 1
# chart stores the distance for each character
chart = np.zeros((row, col))
# calculate edit distances
for i in range(0, row):
chart[i][0] = i
for j in range(0, col):
chart[0][j] = j
for i in range(1, row):
for j in range(1, col):
diff = (int)(src[i-1] != target[j-1])
chart[i][j] = min(chart[i-1][j] + 1, chart[i][j-1] + 1, chart[i-1][j-1] + diff)
return chart[len(src)][len(target)]
def constructWordMap(inputStr):
'''
Maps each sample word to its edit distance to the input word
then return the mapping
@param inputStr: input word provided by user
@returns the mapping
'''
lstWords = list()
for line in open("example.txt", "r"):
lstWords.append(line.split()[0])
wordDict = dict()
for word in lstWords:
wordDict[word] = getEditDist(word, inputStr)
return wordDict
if __name__ == "__main__":
'''
Showcase with a given example of English words
'''
inputStr = input("Enter a word. ")
wordDict = constructWordMap(inputStr)
# print output
print("Results from shortest to longest edit distance: ")
for k,v in sorted(wordDict.items(), key = lambda x : x[1]):
print("Word: {} \t {}".format(k, v))
| 28.857143 | 91 | 0.587252 |
4451a99d6a9228f5644d8c5f02d048a61e04a276 | 1,043 | py | Python | sublime_helper/__init__.py | derickc/Fountainhead | b9b6bdae7a62aff17c8a277bf384ba13a0c49ad1 | [
"MIT"
] | 66 | 2015-01-25T17:36:33.000Z | 2021-09-16T11:21:22.000Z | sublime_helper/__init__.py | chukhran/Fountainhead | 2db9da646e4cfcdfe89b8b21e6ba08b11d7e39e6 | [
"MIT"
] | 24 | 2015-01-21T12:42:50.000Z | 2020-06-15T14:36:56.000Z | sublime_helper/__init__.py | chukhran/Fountainhead | 2db9da646e4cfcdfe89b8b21e6ba08b11d7e39e6 | [
"MIT"
] | 13 | 2015-01-23T12:55:31.000Z | 2022-03-27T01:17:20.000Z | class SublimeHelper:
def cursor_scope(self, view, offset=1):
'''
Gives the scope based on cursor position.
'''
return view.scope_name(view.sel()[0].end() - offset)
def line_scope(self, view, offset=1):
'''
Gives the scope for a given line based on cursor position. Defaults to the previous line.
'''
return view.scope_name(view.text_point(view.rowcol(view.sel()[0].end())[0] - offset, 0))
def line_string(self, view, offset=1):
'''
Gives the string of text for a given line. Defaults to the previous line.
'''
return view.substr(view.line(view.text_point(view.rowcol(view.sel()[0].end())[0] - offset, 0)))
def scope_list(self, view, scope='text.fountain '):
'''
Gives a list of all strings for a given scope.
'''
regions = []
scopes = []
regions = view.find_by_selector(scope)
for region in regions:
scopes.append(view.substr(region))
return scopes
| 33.645161 | 103 | 0.587728 |
a4375a05632edda459c2524d107e4b471f567340 | 3,011 | py | Python | args/starting_party.py | HansGR/WorldsCollide | af227be553e120ee004b130598360c61daf7df59 | [
"MIT"
] | 7 | 2022-01-15T02:53:53.000Z | 2022-02-17T00:51:32.000Z | args/starting_party.py | HansGR/WorldsCollide | af227be553e120ee004b130598360c61daf7df59 | [
"MIT"
] | 8 | 2022-01-16T02:45:24.000Z | 2022-03-21T02:08:27.000Z | args/starting_party.py | HansGR/WorldsCollide | af227be553e120ee004b130598360c61daf7df59 | [
"MIT"
] | 5 | 2022-01-15T02:53:38.000Z | 2022-01-19T17:42:10.000Z | def name():
return "Starting Party"
def parse(parser):
starting_party = parser.add_argument_group("Starting Party")
from data.characters import Characters
character_options = [name.lower() for name in Characters.DEFAULT_NAME]
character_options.append("random")
character_options.append("randomngu")
starting_party.add_argument("-sc1", "--start-char1", default = "", type = str.lower, choices = character_options,
help = "Starting party member")
starting_party.add_argument("-sc2", "--start-char2", default = "", type = str.lower, choices = character_options,
help = "Starting party member")
starting_party.add_argument("-sc3", "--start-char3", default = "", type = str.lower, choices = character_options,
help = "Starting party member")
starting_party.add_argument("-sc4", "--start-char4", default = "", type = str.lower, choices = character_options,
help = "Starting party member")
def process(args):
# convert arguments to list of starting party
args.start_chars = []
if args.start_char1:
args.start_chars.append(args.start_char1)
if args.start_char2:
args.start_chars.append(args.start_char2)
if args.start_char3:
args.start_chars.append(args.start_char3)
if args.start_char4:
args.start_chars.append(args.start_char4)
if not args.start_chars:
# no starting characters specified, pick one random starting character
args.start_chars = ["random"]
else:
# ensure only 4 starting characters and no duplicates (except random)
assert len(args.start_chars) <= 4
start_chars_found = set()
for char in args.start_chars:
assert (char == "random" or char == "randomngu" or char not in start_chars_found)
start_chars_found.add(char)
def flags(args):
flags = ""
if args.start_char1:
flags += f" -sc1 {args.start_char1}"
if args.start_char2:
flags += f" -sc2 {args.start_char2}"
if args.start_char3:
flags += f" -sc3 {args.start_char3}"
if args.start_char4:
flags += f" -sc4 {args.start_char4}"
return flags
def options(args):
result = []
start_chars = [args.start_char1, args.start_char2, args.start_char3, args.start_char4]
for start_char in start_chars:
value = "None"
if start_char == "randomngu":
value = "Random (No Gogo/Umaro)"
elif start_char:
value = start_char.capitalize()
result.append(("Start Character", value))
return result
def menu(args):
entries = options(args)
for index, entry in enumerate(entries):
entries[index] = (entry[1], "")
return (name(), entries)
def log(args):
from log import format_option
log = [name()]
entries = options(args)
for entry in entries:
log.append(format_option(*entry))
return log
| 35.011628 | 117 | 0.63268 |
aae4af6bf88ead0d714c092e22fad305d8877644 | 13,648 | py | Python | codes/2_run_model_cequeau.py | aviolinist/EEE | 032e2029815229875048cc92dd7da24ff3f71e93 | [
"MIT"
] | 6 | 2019-09-27T15:38:37.000Z | 2021-02-03T13:58:01.000Z | codes/2_run_model_cequeau.py | aviolinist/EEE | 032e2029815229875048cc92dd7da24ff3f71e93 | [
"MIT"
] | null | null | null | codes/2_run_model_cequeau.py | aviolinist/EEE | 032e2029815229875048cc92dd7da24ff3f71e93 | [
"MIT"
] | 5 | 2019-09-27T15:38:52.000Z | 2022-03-22T17:24:37.000Z | #!/usr/bin/env python
from __future__ import print_function
# Copyright 2019 Juliane Mai - juliane.mai(at)uwaterloo.ca
#
# License
# This file is part of the EEE code library for "Computationally inexpensive identification
# of noninformative model parameters by sequential screening: Efficient Elementary Effects (EEE)".
#
# The EEE code library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The MVA code library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with The EEE code library.
# If not, see <https://github.com/julemai/EEE/blob/master/LICENSE>.
#
# If you use this method in a publication please cite:
#
# M Cuntz & J Mai et al. (2015).
# Computationally inexpensive identification of noninformative model parameters by sequential screening.
# Water Resources Research, 51, 6417-6441.
# https://doi.org/10.1002/2015WR016907.
#
# An example calling sequence to derive model outputs for previously sampled parameter sets stored
# in an ASCII file (option -i) where some lines might be skipped (option -s). The final model outputs
# are stored in a pickle file (option -o). The model outputs are stored as dictionaries. Multiple
# model outputs are possible.
#
# python 2_run_model_cequeau.py \
# -i parameter_sets_1_scaled_para21_M.dat \
# -s XXX
# -o model_output.pkl
"""
Runs a model for a bunch of parameter sets and stores model outputs in a pickle file.
History
-------
Written, JM, Mar 2019
"""
# -------------------------------------------------------------------------
# Command line arguments - if script
#
# Comment|Uncomment - Begin
#if __name__ == '__main__':
# -----------------------
# add subolder scripts/lib to search path
# -----------------------
import sys
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.abspath(dir_path+'/lib'))
sys.path.append(os.path.abspath(dir_path+'/../examples/cequeau-nc/model'))
import argparse
import numpy as np
import scipy.stats as stats
import copy
import pickle
from pathlib2 import Path
import subprocess
import shutil
import datetime
from cequeau_templates import EXECUTION_XML, PARAMETRES_XML, BASSINVERSANT_XML # in examples/cequeau-nc/model/
from cequeau_common import writeString, makeDirectories, get_discharge # in examples/cequeau-nc/model/
from fread import fread # in lib/
infile = 'example_cequeau-nc/parameter_sets_1_scaled_para9_M.dat' # name of file containing sampled parameter sets to run the model
outfile = 'example_cequeau-nc/model_output.pkl' # name of file used to save (scalar) model outputs
skip = None # number of lines to skip in input file
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='''An example calling sequence to derive model outputs for previously sampled parameter sets stored in an ASCII file (option -i) where some lines might be skipped (option -s). The final model outputs are stored in a pickle file (option -o). The model outputs are stored as dictionaries. Multiple model outputs are possible..''')
parser.add_argument('-i', '--infile', action='store',
default=infile, dest='infile', metavar='infile',
help="Name of file containing sampled SCALED parameter sets to run the model (default: 'parameter_sets.out').")
parser.add_argument('-s', '--skip', action='store',
default=skip, dest='skip', metavar='skip',
help="Number of lines to skip in input file (default: None).")
parser.add_argument('-o', '--outfile', action='store',
default=outfile, dest='outfile', metavar='outfile',
help="Name of file used to save (scalar) model outputs in a pickle file (default: 'model_output.pkl').")
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
skip = args.skip
del parser, args
def model_function(paras, run_id=None):
# input:
# paras ... list of model parameters scaled to their range;
# values for all N model parameters have to be provided
# example:
# [ x1, x2, x3, x4, .... ]
# run_id ... optional name of this run (to, e.g., print or store in a file)
# example:
# run_aset_001
# output:
# model output in dictionary
# example:
# model['out'] = 7.4
if not(run_id is None):
print("Run ID: ",run_id)
# ---------------
# derive some parameters
# ---------------
dict_dparas = {}
# dict_dparas['sum_x05_x06'] = paras[4]+paras[5] # MAX_MELT_FACTOR > MIN_MELT_FACTOR
# dict_dparas['sum_x09_x10'] = paras[8]+paras[9] # SNOW_SWI_MAX > SNOW_SWI_MIN
# dict_dparas['half_x20'] = paras[19] * 0.5 * 1000 # half the value but in [mm] not [m]
# dict_dparas['half_x21'] = paras[20] * 0.5 * 1000 # half the value but in [mm] not [m]
# ---------------
# paste all paras into template files
# ---------------
# ex.: string = "parameter v01 = {par[v01]} and another parameter v02 = {par[v02]}"
# keys = ['v01','v02']
# vals = [1.0,3.0]
# string.format(par=dict(zip(keys,vals)))
#
# --> 'parameter v01 = 1.0 and another parameter v02 = 3.0'
#
# to replace patterns: {par[v01]} by parameter value paras[0]
# {par[v02]} by parameter value paras[1]
# ...
if len(paras) > 9 and len(paras) < 100:
keys_paras = ["v{:02d}".format(ii) for ii in range(1,len(paras)+1) ]
elif len(paras) > 99 and len(paras) < 1000:
keys_paras = ["v{:03d}" for ii in range(1,len(paras)+1) ]
elif len(paras) <= 9:
keys_paras = ["v"+str(ii) for ii in range(1,len(paras)+1) ]
else:
raise ValueError("More than 999 parameters are not implemented yet!")
vals_paras = paras
dict_paras = dict(zip(keys_paras,vals_paras))
# fill in to templates
# templates need to have patterns:
# {par[v01]}, {par[v02]}, ... for parameters
# {dpar[something]}, {dpar[somethingelse]}, ... for derived parameters
# ---------------
# create a run folder
# ---------------
tmp_folder = "/tmp/eee-analysis/"+str(run_id) # "/tmp/juletest" # TODO a generic folder name in /tmp
cequeau_exe_name = os.path.abspath(dir_path+"/../"+"examples/cequeau-nc/model/cequeau")
cequeau_obs_folder = os.path.abspath(dir_path+"/../"+"examples/cequeau-nc/model/data_obs")
cequeau_run_details = os.path.abspath(os.path.dirname(infile)+"/../model/"+"cequeau-setup.dat")
cequeau_run_details = os.path.abspath("../model/"+"cequeau-setup.dat")
print(">>> cequeau_run_details read from: ",cequeau_run_details)
# read details from cequeau-setup.dat to be fed into CEQUEAU run files
ff = open(cequeau_run_details, "r")
lines = ff.readlines()
ff.close()
dict_setup = {}
dict_setup['tmp_folder'] = tmp_folder
dict_setup['basin_id'] = None
dict_setup['start_day'] = None
dict_setup['end_day'] = None
for ii in lines:
if (len(ii) > 0):
if (not(ii.startswith('#'))):
ii = ii.split('#')[0].strip()
ii_key = ii.split('=')[0].strip()
ii_val = ii.split('=')[1].strip()
if ii_key == 'basin_id':
dict_setup['basin_id'] = np.int(ii_val)
if ii_key == 'start_day':
dict_setup['start_day'] = ii_val
if ii_key == 'end_day':
dict_setup['end_day'] = ii_val
if ( dict_setup['basin_id'] is None ) or ( dict_setup['start_day'] is None ) or ( dict_setup['end_day'] is None ):
print("basin_id: ",dict_setup['basin_id'])
print("start_day: ",dict_setup['start_day'])
print("end_day: ",dict_setup['end_day'])
raise ValueError('CEQUEAU setup file has missing key values!')
# cleanup before run
if os.path.exists(tmp_folder):
shutil.rmtree(tmp_folder)
# print setups
print("dict_setup: ",dict_setup)
print("dict_paras: ",dict_paras)
# all CEQUEAU setup files
writeString( Path(tmp_folder,"execution.xml"), EXECUTION_XML.format(setup=dict_setup,par=dict_paras,dpar=dict_dparas) )
writeString( Path(tmp_folder,"parametres.xml"), PARAMETRES_XML.format(setup=dict_setup,par=dict_paras,dpar=dict_dparas) )
writeString( Path(tmp_folder,"bassinVersant.xml"), BASSINVERSANT_XML.format(setup=dict_setup,par=dict_paras,dpar=dict_dparas) )
# link executable
if not(os.path.exists(str(Path(tmp_folder,os.path.basename(cequeau_exe_name))))):
print("from: ",os.path.realpath(cequeau_exe_name))
print("to: ",str(Path(tmp_folder,os.path.basename(cequeau_exe_name))))
os.symlink(os.path.realpath(cequeau_exe_name), str(Path(tmp_folder,os.path.basename(cequeau_exe_name))))
# link observations folder
if not(os.path.exists(str(Path(tmp_folder,os.path.basename(cequeau_obs_folder))))):
os.symlink(os.path.realpath(cequeau_obs_folder), str(Path(tmp_folder,os.path.basename(cequeau_obs_folder))))
# create ouput folder
out_folder = str(Path(tmp_folder,"output"))
os.makedirs(out_folder)
# ---------------
# run the model with these input rv* files
# ---------------
cmd = [str(Path(tmp_folder,os.path.basename(cequeau_exe_name))),str(Path(tmp_folder,"execution.xml"))]
print("run cmd: ",' '.join(cmd))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
print("")
print("Cequeau standard output:")
for line in process.stdout:
print(">>> ",line.rstrip()) # rstrip removes trailing \n
if not(os.path.exists(str(Path(tmp_folder,"output","resultats.nc")))):
print("")
print("ERROR: No resultats.nc produced")
print("")
print("Cequeau error file content:")
# ff = open(str(Path(tmp_folder,"output","Raven_errors.txt")), "r")
# lines = ff.readlines()
# ff.close()
# for line in lines:
# print(">>> ",line.rstrip()) # rstrip removes trailing \n
raise ValueError("ERROR: No resultats.nc produced (scroll up to see content of error file)")
model = {}
# # ---------------
# # extract model output: Diagnostics: NSE
# # ---------------
# model['nse'] = 0.0
# ff = open(str(Path(tmp_folder,"output","Diagnostics.csv")), "r")
# lines = ff.readlines()
# ff.close()
# nse = np.float(lines[-1].strip().split(',')[2])
# print("NSE: ",nse)
# model['nse'] = nse
# print("")
# ---------------
# extract model output: Hydrographs: simulated Q
# ---------------
model['Q'] = 0.0
back_days = 0
# 1 PD Passes Dangereuses
# 2 PERIB Peribonka
# 3 LM Lac Manouane
# 4 MBLANC Montagnes Blanches
basin_id = dict_setup['basin_id'] # starts with 1
start_day = dict_setup['start_day']
start_day = datetime.datetime(int(start_day[0:4]),int(start_day[5:7]),int(start_day[8:10]),0,0)
end_day = dict_setup['end_day']
end_day = datetime.datetime(int(end_day[0:4]),int(end_day[5:7]),int(end_day[8:10]),0,0)
ntime = (end_day - start_day).days + 1 + 1 # second +1 because CEQUEAU always has last day as end_day+1
qsim_file = str(Path(tmp_folder,"output","resultats.nc"))
qsim = get_discharge(start_day, qsim_file, ntime, var="debitExutoire", ibasin=basin_id-1, group="etatsCP", ilag=0)
model['Q'] = qsim
print("Q: ",model['Q'][0:4],"...",model['Q'][-4:])
print("Q_range: [",np.min(model['Q']),",",np.max(model['Q']),"]")
print("shape Q: ",np.shape(model['Q']))
print("")
# ---------------
# cleanup
# ---------------
#if os.path.exists(tmp_folder):
# shutil.rmtree(tmp_folder)
return model
# read parameter sets
ff = open(infile, "r")
parasets = ff.readlines()
ff.close()
if skip is None:
skip = np.int(parasets[0].strip().split(':')[1])
else:
skip = np.int(skip)
parasets = parasets[skip:]
model_output = {}
# this loop could be easily parallized and modified such that it
# actually submits multiple tasks to a HPC
for iparaset,paraset in enumerate(parasets):
paraset = list(map(float,paraset.strip().split()))
model = model_function(paraset,run_id='run_set_'+str(iparaset))
if iparaset == 0:
for ikey in model.keys():
model_output[ikey] = []
for ikey in model.keys():
model_output[ikey].append(model[ikey])
pickle.dump( model_output, open( outfile, "wb" ) )
print("wrote: '"+outfile+"'")
| 40.984985 | 374 | 0.606609 |
442f875f7dd039b48b782ee185925322f7ab356d | 1,461 | py | Python | pyStatus/plugins/Memory.py | fleaz/pyStatus | cc9f89d1173897da929bbb85e435a912bf1dc5c3 | [
"MIT"
] | null | null | null | pyStatus/plugins/Memory.py | fleaz/pyStatus | cc9f89d1173897da929bbb85e435a912bf1dc5c3 | [
"MIT"
] | null | null | null | pyStatus/plugins/Memory.py | fleaz/pyStatus | cc9f89d1173897da929bbb85e435a912bf1dc5c3 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import psutil
from ..BarItem import BarItem
class Memory(BarItem):
def __init__(self):
BarItem.__init__(self, "Memory")
self.output['name'] = "Memory"
self.update()
def update(self):
mem = psutil.virtual_memory()
mem_total = mem[0]
mem_used = mem[3]
mem_buffer = mem[7]
mem_cached = mem[8]
mem_used = mem_used - mem_buffer - mem_cached
mem_total = mem_total / 1048578
mem_used = mem_used / 1048578
self.output['full_text'] = "RAM: {0:.0f} MB / {1:.0f} MB".format(mem_used, mem_total)
class FreeMemory(BarItem):
def __init__(self):
BarItem.__init__(self, "MemoryFree")
self.output['name'] = "MemoryFree"
self.update()
def update(self):
mem = psutil.virtual_memory()
mem_total = mem[0]
mem_used = mem[3]
mem_buffer = mem[7]
mem_cached = mem[8]
mem_used = mem_used - mem_buffer - mem_cached
total_memory = (mem_total / 1048578)
free_memory = (total_memory - (mem_used / 1048578))
self.output['full_text'] = "RAM free: {0:.0f}MB".format(free_memory)
percent_free = free_memory / total_memory * 100
if percent_free < 10:
self.output['color'] = '#FF0000'
elif percent_free < 25:
self.output['color'] = '#FFFF00'
else:
self.output['color'] = '#FFFFFF'
| 25.631579 | 93 | 0.579055 |
9c2cf0ebcc1df2aeed838a6395df29469a37697b | 1,884 | py | Python | tests/test_forms.py | jcass77/django-yearlessdate | 19ed3ecb16efe33eea6f02138bb4365447cb2ea7 | [
"BSD-3-Clause"
] | null | null | null | tests/test_forms.py | jcass77/django-yearlessdate | 19ed3ecb16efe33eea6f02138bb4365447cb2ea7 | [
"BSD-3-Clause"
] | null | null | null | tests/test_forms.py | jcass77/django-yearlessdate | 19ed3ecb16efe33eea6f02138bb4365447cb2ea7 | [
"BSD-3-Clause"
] | null | null | null | from djangoyearlessdate.helpers import YearlessDate
from djangoyearlessdate.forms import YearlessDateSelect
from .testapp.forms import YearForm, YearlessDateForm
class TestYearlessDateWidget:
def test_decompress_with_value(self):
assert YearlessDateSelect().decompress(YearlessDate(22, 6)) == [22, 6]
def test_decompress_without_value(self):
assert YearlessDateSelect().decompress(None) == [None, None]
class TestYearlessDateField:
def test_valid_yearless_date(self):
form = YearlessDateForm(data={'yearless_date_0': '1', 'yearless_date_1': '9'})
assert form.is_valid()
assert form.cleaned_data['yearless_date'] == YearlessDate(1, 9)
def test_invalid_yearless_date(self):
form = YearlessDateForm(data={'yearless_date_0': '31', 'yearless_date_1': '9'})
assert not form.is_valid()
assert form.errors['yearless_date'] == [u'Invalid date.']
def test_yearless_date_not_supplied(self):
form = YearlessDateForm(data={'yearless_date_0': '', 'yearless_date_1': ''})
assert not form.is_valid()
assert form.errors['yearless_date'] == [u'This field is required.']
class TestYearField:
def test_1900_is_valid(self):
form = YearForm(data={'year': '1900'})
assert form.is_valid()
assert form.cleaned_data['year'] == 1900
def test_2200_is_valid(self):
form = YearForm(data={'year': '2200'})
assert form.is_valid()
assert form.cleaned_data['year'] == 2200
def test_pre_1900_year_is_invalid(self):
form = YearForm(data={'year': '1899'})
assert not form.is_valid()
assert form.errors['year'] == [u'Invalid year.']
def test_post_2200_year_is_invalid(self):
form = YearForm(data={'year': '2201'})
assert not form.is_valid()
assert form.errors['year'] == [u'Invalid year.']
| 36.941176 | 87 | 0.674628 |
121b35795cff2acccc3d2bbf37cd67f4f44c5322 | 354 | py | Python | python/ray/tune/automl/__init__.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 21,382 | 2016-09-26T23:12:52.000Z | 2022-03-31T21:47:45.000Z | python/ray/tune/automl/__init__.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 19,689 | 2016-09-17T08:21:25.000Z | 2022-03-31T23:59:30.000Z | python/ray/tune/automl/__init__.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 4,114 | 2016-09-23T18:54:01.000Z | 2022-03-31T15:07:32.000Z | from ray.tune.automl.genetic_searcher import GeneticSearch
from ray.tune.automl.search_policy import GridSearch, RandomSearch
from ray.tune.automl.search_space import SearchSpace, \
ContinuousSpace, DiscreteSpace
__all__ = [
"ContinuousSpace",
"DiscreteSpace",
"SearchSpace",
"GridSearch",
"RandomSearch",
"GeneticSearch",
]
| 25.285714 | 66 | 0.748588 |
2d958ee9de18d8957b296a98c2054d1ac6433781 | 536 | py | Python | exercicios_python/Exercicio_042.py | GabsOrtega/logica-python | 6f4e752d0796c9bf70be8f7108bc3bd49d877709 | [
"MIT"
] | null | null | null | exercicios_python/Exercicio_042.py | GabsOrtega/logica-python | 6f4e752d0796c9bf70be8f7108bc3bd49d877709 | [
"MIT"
] | null | null | null | exercicios_python/Exercicio_042.py | GabsOrtega/logica-python | 6f4e752d0796c9bf70be8f7108bc3bd49d877709 | [
"MIT"
] | null | null | null | r1 = float(input('Primeiro segmento: '))
r2 = float(input('Segundo segmento: '))
r3 = float(input('Terceiro segmento: '))
if r1+r2 > r3 and r2+r3 > r1 and r3+r1 > r2:
print('É POSSÍVEL FORMAR UM TRIÂNGULO COM ESTES SEGMENTOS DE RETA')
if r1 == r2 and r2 == r3:
print('TRIÂNGULO EQUILÁTERO')
elif r1 == r2 or r2 == r3 or r1 == r3:
print('TRIÂNGULO ISÓSCELES')
elif r1 != r2 and r2 != r3 and r3 != r1:
print('TRIÂNGULO ESCALENO')
else:
print('NÃO É POSSÍVEL FORMAR UM TRIÂNGULO!') | 38.285714 | 72 | 0.606343 |
91015941bab84753c1d866b89ba10edafc51f6f3 | 9,929 | py | Python | tests/parallel_test.py | alexdavies/jax | 812df27a2d8f61bfac95ff2867d1284cac81837f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-12T20:19:20.000Z | 2020-05-12T20:19:20.000Z | tests/parallel_test.py | alexdavies/jax | 812df27a2d8f61bfac95ff2867d1284cac81837f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/parallel_test.py | alexdavies/jax | 812df27a2d8f61bfac95ff2867d1284cac81837f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import itertools
import unittest
from unittest import SkipTest, skip
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
from jax import test_util as jtu
from jax import lax
from jax.api import _papply, _parallelize, soft_pmap, jit, make_jaxpr
from jax.linear_util import wrap_init
from jax.util import prod
from jax.config import config
config.parse_flags_with_absl()
ignore_soft_pmap_warning = functools.partial(
jtu.ignore_warning, message="soft_pmap is an experimental.*")
class PapplyTest(jtu.JaxTestCase):
def testIdentity(self):
pfun, axis_name = _papply(lambda x: x)
ans = pfun(np.arange(3))
expected = np.arange(3)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMap(self):
pfun, axis_name = _papply(jnp.sin)
ans = pfun(np.arange(3.))
expected = np.sin(np.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_soft_pmap_warning()
def testSum(self):
pfun, axis_name = _papply(lambda x: jnp.sum(x, axis=0))
jaxpr = make_jaxpr(pfun)(np.ones(3))
expected_jaxpr = make_jaxpr(
lambda x: lax.psum(x, axis_name))(np.zeros((5, 3)))
assert repr(jaxpr) == repr(expected_jaxpr)
arg = np.arange(15.).reshape((5, 3))
ans = soft_pmap(pfun, axis_name)(arg)[0]
expected = np.sum(arg, axis=0)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_soft_pmap_warning()
def testMax(self):
pfun, axis_name = _papply(lambda x: jnp.max(x, axis=0))
jaxpr = make_jaxpr(pfun)(np.ones(3))
expected_jaxpr = make_jaxpr(
lambda x: lax.pmax(x, axis_name))(np.zeros((5, 3)))
assert repr(jaxpr) == repr(expected_jaxpr)
arg = np.arange(15.).reshape((5, 3))
ans = soft_pmap(pfun, axis_name)(arg)[0]
expected = np.max(arg, axis=0)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_soft_pmap_warning()
def testSelect(self):
p = np.arange(15).reshape((5, 3)) % 4 == 1
f = np.zeros((5, 3))
def fun(t):
return lax.select(p, t, f)
t = np.ones((5, 3))
ans = soft_pmap(*_papply(fun))(t)
expected = fun(t)
self.assertAllClose(ans, expected, check_dtypes=True)
def testLogSoftmax(self):
raise SkipTest("test doesn't pass yet") # TODO(frostig)
def fun(x):
return x - jnp.log(jnp.sum(jnp.exp(x)))
pfun, axis_name = _papply(fun)
jaxpr = make_jaxpr(pfun)(np.zeros(5))
expected_jaxpr = make_jaxpr(
lambda x: x - jnp.log(lax.psum(jnp.exp(x), axis_name)))(np.zeros(5))
assert repr(jaxpr) == repr(expected_jaxpr)
ans = soft_pmap(pfun, axis_name)(np.arange(1., 5.))
expected = fun(np.arange(1., 5.))
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_soft_pmap_warning()
def testAdd(self):
x = np.array([[1, 2, 3], [4, 5, 6]])
expected = x + x
pfun, axis_name = _papply(jnp.add)
ans = soft_pmap(pfun, axis_name)(x, x)
self.assertAllClose(ans, expected, check_dtypes=True)
def testAddBroadcasting(self):
raise SkipTest("test doesn't pass yet") # TODO(frostig)
def fun(x):
return x + 3
x = np.array([[1, 2], [3, 4]])
expected = x + 3
pfun, axis_name = _papply(fun)
ans = soft_pmap(pfun, axis_name)(x)
self.assertAllClose(ans, expected, check_dtypes=True)
def testMakeJaxprPapplyComposition(self):
raise SkipTest( # TODO(mattjj)
"fails because select's papply rule calls an SPMD primitive")
x = b = np.ones(3)
pfun, axis_name = _papply(lambda a: jnp.where(x, a, b))
make_jaxpr(pfun)(np.ones(3)) # doesn't crash
@skip("causing trace state errors that affect other tests")
class ParallelizeTest(jtu.JaxTestCase):
def dedup(self, arr, expected_rank):
if arr.ndim == expected_rank + 1:
for i in range(arr.shape[0] - 1):
self.assertAllClose(arr[i], arr[i + 1], check_dtypes=True)
return arr[0]
else:
assert arr.ndim == expected_rank
return arr
def testNormalize(self):
def f(x):
return x / x.sum(0)
x = np.arange(4.)
expected = f(x)
ans = _parallelize(f)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jaxpr = make_jaxpr(_parallelize(f))(x)
self.assertIn('psum', repr(jaxpr))
def testAdd(self):
x = np.arange(10)
y = 2 * np.arange(10)
def f(x): return x + y
expected = f(x)
ans = _parallelize(f)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testAdd2(self):
x = np.arange(10)
y = 2 * np.arange(10)
def f(y): return x + y
expected = f(y)
ans = _parallelize(f)(y)
self.assertAllClose(ans, expected, check_dtypes=False)
def testAdd3(self):
x = np.arange(10)
y = 2 * np.arange(10)
def f(x, y):
return x + y
expected = f(x, y)
ans = _parallelize(f)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
@unittest.skip("Missing cases in gather papply rule")
def testOuter(self):
x = np.arange(10)
y = 2 * np.arange(10)
def f(x): return x[:, None] * y
expected = f(x)
ans = _parallelize(f)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testOuter2(self):
x = np.arange(10)
y = 2 * np.arange(10)
def f(y): return x[:, None] * y
expected = f(y)
ans = _parallelize(f)(y)
self.assertAllClose(ans, expected, check_dtypes=False)
@unittest.skip("Missing cases in gather papply rule")
def testOuter3(self):
x = np.arange(10)
y = 2 * np.arange(10)
def f(x, y): return x[:, None] * y
expected = f(x, y)
ans = _parallelize(f)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "testTranspose_shape={}_perm={}"
.format(shape, perm),
"shape": shape, "perm": perm}
for shape in [
(2, 2),
(3, 3),
(2, 2, 2),
(2, 3, 4),
(2, 3, 2)
]
for perm in itertools.permutations(list(range(len(shape))))
))
def testTranspose(self, shape, perm):
def fun(x):
return lax.transpose(x, perm)
x = np.arange(prod(shape)).reshape(shape)
expected = fun(x)
ans = _parallelize(fun)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testTransposeAndAddRank2(self):
def fun(x):
return x + x.T
x = np.reshape(np.arange(4., dtype=np.float32), (2, 2))
expected = fun(x)
ans = _parallelize(fun)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testTransposeAndAddRank3(self):
def fun(x):
return x + x.T
x = np.reshape(np.arange(8., dtype=np.float32), (2, 2, 2))
expected = fun(x)
ans = _parallelize(fun)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDot(self):
raise SkipTest("known failure") # TODO(frostig)
x = np.reshape(np.arange(4., dtype=np.float32), (2, 2))
def fun(x, y):
return lax.dot(x, y)
expected = fun(x, x)
pfun, axis_name = _papply(fun)
ans = soft_pmap(pfun, axis_name)(x, x)
ans = self.dedup(ans, expected.ndim)
self.assertAllClose(ans, expected, check_dtypes=False)
# Test lax.dot_general on two rank-3 arguments, generating a test method call
# for every matching of dimensions, and each matched pair of dimensions being
# {batch, contracting, neither}. In combination with that, split the first
# dimension of the LHS, that of the RHS, and that of both.
@parameterized.named_parameters(
{"testcase_name": "_dimMatch={}_matchTypes={}_split={}".format(
matching, coloring, split),
"matching": matching, "coloring": coloring, "split": split}
for matching in itertools.permutations(range(3))
for coloring in itertools.product(range(3), range(3), range(3))
for split in range(3))
def testDotGeneral(self, matching, coloring, split):
BATCH, CONTRACT, _ = range(3)
SPLIT_LHS, SPLIT_RHS, SPLIT_BOTH = range(3)
x = np.reshape(np.arange(8.), (2, 2, 2))
y = np.reshape(np.arange(8.), (2, 2, 2)) + 4.
cdims = [(i, matching[i]) for i in range(3) if coloring[i] == CONTRACT]
bdims = [(i, matching[i]) for i in range(3) if coloring[i] == BATCH]
dimension_numbers = [
list(zip(*cdims)) or [(), ()],
list(zip(*bdims)) or [(), ()]
]
def f(x, y):
return lax.dot_general(x, y, dimension_numbers)
if split == SPLIT_LHS:
fun = lambda x: f(x, y)
elif split == SPLIT_RHS:
fun = lambda y: f(x, y)
else:
fun = f
try:
if split != SPLIT_BOTH:
expected = fun(x)
pfun, axis_name = _papply(fun)
ans = soft_pmap(pfun, axis_name)(x)
else:
expected = fun(x, y)
pfun, axis_name = _papply(fun)
ans = soft_pmap(pfun, axis_name)(x, y)
except (NotImplementedError, TypeError) as e:
raise SkipTest(str(e)) from e
ans = self.dedup(ans, expected.ndim)
self.assertAllClose(ans, expected, check_dtypes=False)
def testCall(self):
@jit
def fun(x):
return x
x = np.reshape(np.arange(8., dtype=np.float32), (2, 2, 2))
expected = fun(x)
ans = _parallelize(fun)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
if __name__ == '__main__':
absltest.main()
| 29.638806 | 79 | 0.642461 |
7d634bd4f14e9df3035785f9707c9f77f6b51f4d | 690 | py | Python | ecs/docstash/views.py | programmierfabrik/ecs | 2389a19453e21b2ea4e40b272552bcbd42b926a9 | [
"Apache-2.0"
] | 9 | 2017-02-13T18:17:13.000Z | 2020-11-21T20:15:54.000Z | ecs/docstash/views.py | programmierfabrik/ecs | 2389a19453e21b2ea4e40b272552bcbd42b926a9 | [
"Apache-2.0"
] | 2 | 2021-05-20T14:26:47.000Z | 2021-05-20T14:26:48.000Z | ecs/docstash/views.py | programmierfabrik/ecs | 2389a19453e21b2ea4e40b272552bcbd42b926a9 | [
"Apache-2.0"
] | 4 | 2017-04-02T18:48:59.000Z | 2021-11-23T15:40:35.000Z | from django.http import Http404
from django.shortcuts import get_object_or_404
from ecs.docstash.models import DocStash
from ecs.documents.models import Document
from ecs.documents.views import handle_download
def download_document(request, docstash_key=None, document_pk=None, view=False):
docstash = get_object_or_404(DocStash, key=docstash_key, owner=request.user)
if int(document_pk) not in docstash.value['document_pks']:
raise Http404()
return handle_download(request, Document.objects.get(pk=document_pk), view=view)
def view_document(request, docstash_key=None, document_pk=None):
return download_document(request, docstash_key, document_pk, view=True)
| 38.333333 | 84 | 0.801449 |
350341e48ba6df997263ad2c86e6c32b023ea177 | 10,263 | py | Python | Kai/python/tools/fileloader.py | NJManganelli/FourTopNAOD | 9743d5b49bdbad27a74abb7b2d5b7295f678a0e3 | [
"Apache-2.0"
] | 1 | 2022-01-17T17:29:38.000Z | 2022-01-17T17:29:38.000Z | Kai/python/tools/fileloader.py | NJManganelli/FourTopNAOD | 9743d5b49bdbad27a74abb7b2d5b7295f678a0e3 | [
"Apache-2.0"
] | null | null | null | Kai/python/tools/fileloader.py | NJManganelli/FourTopNAOD | 9743d5b49bdbad27a74abb7b2d5b7295f678a0e3 | [
"Apache-2.0"
] | 1 | 2021-12-15T10:56:50.000Z | 2021-12-15T10:56:50.000Z | import json
from pprint import pprint
class FileLoader:
"""FileLoader takes as input an evenSet, era, and channel; it returns a list of files, a json, and a configuration dictionary.
eventSet: "TTTT", "TT", "DY", etc.
era: 2016, 2017, 2018. This corresponds to both MC tunes and Data run sets, as well as the default configuration parameters.
channel: SL or DL to return the appropriate configuration parameters that is loaded. Overriden by configName option.
configName: (Optional) When specified, returns the requested configuration instead of the standard one for that era and channel.
serverPrefix: the default prefix for files is root://cms-xrd-global.cern.ch/. Pass any other as a string in the same format. """
def __init__(self, eventSet=None, era=None, channel=None, configName=None, jsonName=None, filePrefix="root://cms-xrd-global.cern.ch/"):
#internal variables grabbed, except jsonName and configName
self._eventSet = eventSet
self._era = era
self._channel = channel
self._filePrefix = filePrefix
#Make these all named options, but still require the first three to always be passed
if (self._eventSet == None or self._era == None or self._channel == None):
raise RuntimeError("FileLoader requires an eventSet, an era(2016, 2017, 2018), and an analysis channel(SL, DL)")
#########################################
### USER DEFINED DATASETS and PATHS ###
### Use convenient name and era ###
### and finally indicate path inside ###
### self._filePath defined first ###
### Data must begin with "Run" ###
### Monte Carlo must NOT ###
#########################################
#Ensure JSON files are formatted properly using tool like https://jsonformatter.curiousconcept.com/
self._jsonPath = "../jsons/"
self._configPath = "../config/"
self._filePath = "../filelists/"
self._eventDict = {
"TTTT-PSweights" : { "2016" : "NOT IMPLEMENTED",
"2017" : "TTTT_TuneCP5_PSweights_13TeV-amcatnlo-pythia8.txt",
"2018" : "NOT IMPLEMENTED" },
"TTTT" : { "2016" : "NOT IMPLEMENTED",
"2017" : "TTTT_TuneCP5_13TeV-amcatnlo-pythia8.txt",
"2018" : "NOT IMPLEMENTED" },
"TTJetsSL" : { "2016" : "NOT IMPLEMENTED",
"2017" : "TTJets_SingleLeptFromT_TuneCP5_13TeV-madgraphMLM-pythia8.txt",
"2018" : "NOT IMPLEMENTED" },
"WJetsToLNu" : { "2016" : "NOT IMPLEMENTED",
"2017" : "WJetsToLNu_TuneCP5_13TeV-madgraphMLM-pythia8.txt",
"2018" : "NOT IMPLEMENTED" },
"DoubleMuonRunB" : { "2016" : "NOT IMPLEMENTED",
"2017" : "DoubleMuon_Run2017B-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"DoubleMuonRunC" : { "2016" : "NOT IMPLEMENTED",
"2017" : "DoubleMuon_Run2017C-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"DoubleMuonRunD" : { "2016" : "NOT IMPLEMENTED",
"2017" : "DoubleMuon_Run2017D-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"DoubleMuonRunE" : { "2016" : "NOT IMPLEMENTED",
"2017" : "DoubleMuon_Run2017E-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"DoubleMuonRunF" : { "2016" : "NOT IMPLEMENTED",
"2017" : "DoubleMuon_Run2017F-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"DoubleEGRunB" : { "2016" : "NOT IMPLEMENTED",
"2017" : "DoubleEG_Run2017B-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"DoubleEGRunC" : { "2016" : "NOT IMPLEMENTED",
"2017" : "DoubleEG_Run2017C-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"DoubleEGRunD" : { "2016" : "NOT IMPLEMENTED",
"2017" : "DoubleEG_Run2017D-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"DoubleEGRunE" : { "2016" : "NOT IMPLEMENTED",
"2017" : "DoubleEG_Run2017E-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"DoubleEGRunF" : { "2016" : "NOT IMPLEMENTED",
"2017" : "DoubleEG_Run2017F-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"MuonEGRunB" : { "2016" : "NOT IMPLEMENTED",
"2017" : "MuonEG_Run2017B-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"MuonEGRunC" : { "2016" : "NOT IMPLEMENTED",
"2017" : "MuonEG_Run2017C-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"MuonEGRunD" : { "2016" : "NOT IMPLEMENTED",
"2017" : "MuonEG_Run2017D-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"MuonEGRunE" : { "2016" : "NOT IMPLEMENTED",
"2017" : "MuonEG_Run2017E-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
"MuonEGRunF" : { "2016" : "NOT IMPLEMENTED",
"2017" : "MuonEG_Run2017F-Nano14Dec2018-v1",
"2018" : "NOT IMPLEMENTED" },
}
self._jsonDict = {"2016" : { "Golden" : "NOT IMPLEMENTED",
"ReReco" : "NOT IMPLEMENTED"
},
"2017" : { "Golden" : "NOT IMPLEMENTED",
"ReReco" : "Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON.txt"
},
"2018" : { "Golden" : "NOT IMPLEMENTED",
"ReReco" : "NOT IMPLEMENTED"
}
}
######################################################
### Name all necessary inputs for convenience here ###
######################################################
#Name configuration, using method that makes it convenient to add many without modifying this function
self._configName = self._configPath + channel + "_" + era + "_"
if configName is None:
self._configName += "default.json"
else:
self._configName += configName
if self._configName[-5:] != ".json":
self._configName += ".json"
#Name filelist input
self._filelistName = self._filePath + self._eventDict[eventSet][era]
#Grab jsonName from input
self._jsonName = jsonName
#################################
### Set up ToReturn variables ###
#################################
#name event JSON if Data and no jsonName was passed in
# if self._eventSet[:3] is "Run":
if "Run" in self._eventSet:
if self._jsonName is None:
self._jsonToReturn = self._jsonPath + self._jsonDict[era]["ReReco"]
print(self._jsonToReturn)
else:
self.jsonToReturn = self._jsonPath + self._jsonName
else:
self._jsonToReturn = None
#Empty filelistToReturn
self._filelistToReturn = []
#Empty config file
self._configToReturn = {}
#############################################
### hasLoaded boolean for invoking load() ###
#############################################
self._hasLoaded = False
def __load__(self):
#Open config file in read-only mode, then load the json
with open(self._configName, "r") as inConfig:
self._configToReturn = json.load(inConfig)
#Test that the JSON file can be opened, then do just close it
if self._jsonToReturn is not None:
try:
f = open(self._jsonToReturn, "r")
except IOError:
print("IOError: The Requested JSON ({0:s})file does not exist in the absolute/relative path specified by {1:s}".format(self._jsonName, self._jsonPath))
finally:
f.close()
with open(self._filelistName, "r") as inFiles:
for line in inFiles:
self._filelistToReturn.append(self._filePrefix + str(line).replace('\n',''))
self._hasLoaded = True
def printEventSets(self):
pprint(self._eventDict)
def printConfig(self):
if self._hasLoaded:
pprint(self._configToReturn)
else:
print("A configuration has yet to be loaded. Invoke getConfig(), getFiles(), or getJSONPath() first.")
def getFiles(self, indexOfFile=-1):
if not self._hasLoaded:
self.__load__()
if indexOfFile < 0:
return self._filelistToReturn
elif indexOfFile > len(self._filelistToReturn):
raise RuntimeError("You've requested a file that is beyond the index range available, which is 0 to {0:s}"
.format(self._filelistToReturn))
else:
return [self._filelistToReturn[indexOfFile]]
def getConfig(self):
if not self._hasLoaded:
self.__load__()
return self._configToReturn
def getJSONPath(self):
if not self._hasLoaded:
self.__load__()
return self._jsonToReturn
def getSet(self):
return self._eventSet
#Implement as class, create functions that return possible evensets, store internally so that these values to be returned (list of files, json) are always available through submethods...
| 51.833333 | 186 | 0.507357 |
7ef7a96cc18d760e65822d4582fffe830753dcc8 | 1,284 | py | Python | fHDHR_web/api/__init__.py | alexmerm/fHDHR | 586ffe5540a69da1430bec3dbbdcc8e86232fd03 | [
"WTFPL"
] | 39 | 2021-01-23T22:04:59.000Z | 2022-03-13T11:30:00.000Z | fHDHR_web/api/__init__.py | alexmerm/fHDHR | 586ffe5540a69da1430bec3dbbdcc8e86232fd03 | [
"WTFPL"
] | 124 | 2021-02-01T14:41:35.000Z | 2022-03-19T04:06:57.000Z | fHDHR_web/api/__init__.py | alexmerm/fHDHR | 586ffe5540a69da1430bec3dbbdcc8e86232fd03 | [
"WTFPL"
] | 12 | 2021-01-22T15:06:08.000Z | 2022-02-18T21:54:36.000Z |
from .root_url import Root_URL
from .startup_tasks import Startup_Tasks
from .settings import Settings
from .logs import Logs
from .versions import Versions
from .channels import Channels
from .origins import Origins
from .xmltv import xmlTV
from .m3u import M3U
from .w3u import W3U
from .epg import EPG
from .tuners import Tuners
from .debug import Debug_JSON
from .plugins import Plugins
from .ssdp import SSDP_API
from .scheduler import Scheduler_API
from .route_list import Route_List
from .images import Images
class fHDHR_API():
def __init__(self, fhdhr):
self.fhdhr = fhdhr
self.root_url = Root_URL(fhdhr)
self.startup_tasks = Startup_Tasks(fhdhr)
self.settings = Settings(fhdhr)
self.logs = Logs(fhdhr)
self.versions = Versions(fhdhr)
self.channels = Channels(fhdhr)
self.origins = Origins(fhdhr)
self.xmltv = xmlTV(fhdhr)
self.m3u = M3U(fhdhr)
self.w3u = W3U(fhdhr)
self.epg = EPG(fhdhr)
self.tuners = Tuners(fhdhr)
self.debug = Debug_JSON(fhdhr)
self.plugins = Plugins(fhdhr)
self.ssdp = SSDP_API(fhdhr)
self.scheduler = Scheduler_API(fhdhr)
self.route_list = Route_List(fhdhr)
self.images = Images(fhdhr)
| 25.176471 | 49 | 0.688474 |
15506c9d3b917a6a1bc46dffb7f880578de51951 | 5,177 | py | Python | static_compress/mixin.py | RentFreeMedia/django-static-compress | b56940b9246714401bdd0b24c2f9595419dc6671 | [
"MIT"
] | 8 | 2017-10-23T07:32:43.000Z | 2019-12-16T16:25:02.000Z | static_compress/mixin.py | RentFreeMedia/django-static-compress | b56940b9246714401bdd0b24c2f9595419dc6671 | [
"MIT"
] | 90 | 2018-06-02T07:37:29.000Z | 2022-03-31T13:01:24.000Z | static_compress/mixin.py | RentFreeMedia/django-static-compress | b56940b9246714401bdd0b24c2f9595419dc6671 | [
"MIT"
] | 8 | 2018-07-25T13:56:40.000Z | 2022-02-11T17:18:17.000Z | import os
from os.path import getatime, getctime, getmtime
import errno
from django.core.exceptions import ImproperlyConfigured
from . import compressors
__all__ = ["CompressMixin"]
DEFAULT_METHODS = ["gz", "br"]
METHOD_MAPPING = {
"gz": compressors.ZopfliCompressor,
"br": compressors.BrotliCompressor,
"gz+zlib": compressors.ZlibCompressor,
# gz+zlib and gz cannot be used at the same time, because they produce the same file extension.
}
class CompressMixin:
allowed_extensions = []
compress_methods = []
keep_original = True
compressors = []
minimum_kb = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We access Django settings lately here, to allow our app to be imported without
# defining DJANGO_SETTINGS_MODULE.
from django.conf import settings
self.allowed_extensions = getattr(settings, "STATIC_COMPRESS_FILE_EXTS", ["js", "css", "svg"])
self.compress_methods = getattr(settings, "STATIC_COMPRESS_METHODS", DEFAULT_METHODS)
self.keep_original = getattr(settings, "STATIC_COMPRESS_KEEP_ORIGINAL", True)
self.minimum_kb = getattr(settings, "STATIC_COMPRESS_MIN_SIZE_KB", 30)
valid = [i for i in self.compress_methods if i in METHOD_MAPPING]
if not valid:
raise ImproperlyConfigured("No valid method is defined in STATIC_COMPRESS_METHODS setting.")
if "gz" in valid and "gz+zlib" in valid:
raise ImproperlyConfigured("STATIC_COMPRESS_METHODS: gz and gz+zlib cannot be used at the same time.")
self.compressors = [METHOD_MAPPING[k]() for k in valid]
def get_alternate_compressed_path(self, name):
for compressor in self.compressors:
ext = compressor.extension
if name.endswith(".{}".format(ext)):
path = self.path(name)
else:
path = self.path("{}.{}".format(name, ext))
if os.path.exists(path):
return path
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)
def get_accessed_time(self, name):
if self.keep_original:
return super().get_accessed_time(name)
return self._datetime_from_timestamp(getatime(self.get_alternate_compressed_path(name)))
def get_created_time(self, name):
if self.keep_original:
return super().get_created_time(name)
return self._datetime_from_timestamp(getctime(self.get_alternate_compressed_path(name)))
def get_modified_time(self, name):
if self.keep_original:
return super().get_modified_time(name)
alt = self.get_alternate_compressed_path(name)
return self._datetime_from_timestamp(getmtime(alt))
def post_process(self, paths, dry_run=False, **options):
if hasattr(super(), "post_process"):
yield from super().post_process(paths, dry_run, **options)
if dry_run:
return
for name in paths.keys():
if not self._is_file_allowed(name):
continue
source_storage, path = paths[name]
# Process if file is big enough
if os.path.getsize(self.path(path)) < self.minimum_kb * 1024:
continue
src_mtime = source_storage.get_modified_time(path)
dest_path = self._get_dest_path(path)
with self._open(dest_path) as file:
for compressor in self.compressors:
dest_compressor_path = "{}.{}".format(dest_path, compressor.extension)
# Check if the original file has been changed.
# If not, no need to compress again.
full_compressed_path = self.path(dest_compressor_path)
try:
dest_mtime = self._datetime_from_timestamp(getmtime(full_compressed_path))
file_is_unmodified = dest_mtime.replace(microsecond=0) >= src_mtime.replace(microsecond=0)
except FileNotFoundError:
file_is_unmodified = False
if file_is_unmodified:
continue
# Delete old gzip file, or Nginx will pick the old file to serve.
# Note: Django won't overwrite the file, so we have to delete it ourselves.
if self.exists(dest_compressor_path):
self.delete(dest_compressor_path)
out = compressor.compress(path, file)
if out:
self._save(dest_compressor_path, out)
if not self.keep_original:
self.delete(name)
yield dest_path, dest_compressor_path, True
file.seek(0)
def _get_dest_path(self, path):
if hasattr(self, "hashed_name"):
return self.hashed_name(path)
return path
def _is_file_allowed(self, file):
for extension in self.allowed_extensions:
if file.endswith("." + extension):
return True
return False
| 40.131783 | 114 | 0.618119 |
3e8f653c47424631c337e80540016f32eaf3e5a2 | 14,378 | py | Python | env/lib/python3.7/site-packages/pandas/tests/series/test_repr.py | MarcoMancha/BreastCancerDetector | be0dfdcebd1ae66da6d0cf48e2525c24942ae877 | [
"Apache-2.0"
] | 5 | 2019-07-26T15:22:41.000Z | 2021-09-28T09:22:17.000Z | env/lib/python3.7/site-packages/pandas/tests/series/test_repr.py | MarcoMancha/BreastCancerDetector | be0dfdcebd1ae66da6d0cf48e2525c24942ae877 | [
"Apache-2.0"
] | 16 | 2021-03-19T09:44:52.000Z | 2022-03-12T00:22:14.000Z | env/lib/python3.7/site-packages/pandas/tests/series/test_repr.py | MarcoMancha/BreastCancerDetector | be0dfdcebd1ae66da6d0cf48e2525c24942ae877 | [
"Apache-2.0"
] | 9 | 2020-02-05T10:24:12.000Z | 2020-02-10T13:08:50.000Z | from datetime import datetime, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
date_range,
option_context,
period_range,
timedelta_range,
)
from pandas.core.base import StringMixin
from pandas.core.index import MultiIndex
import pandas.util.testing as tm
from .common import TestData
class TestSeriesRepr(TestData):
def test_multilevel_name_print(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
s = Series(range(len(index)), index=index, name="sth")
expected = [
"first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth, dtype: int64",
]
expected = "\n".join(expected)
assert repr(s) == expected
def test_name_printing(self):
# Test small Series.
s = Series([0, 1, 2])
s.name = "test"
assert "Name: test" in repr(s)
s.name = None
assert "Name:" not in repr(s)
# Test big Series (diff code path).
s = Series(range(1000))
s.name = "test"
assert "Name: test" in repr(s)
s.name = None
assert "Name:" not in repr(s)
s = Series(index=date_range("20010101", "20020101"), name="test")
assert "Name: test" in repr(s)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype("O")
ots[::2] = None
repr(ots)
# various names
for name in [
"",
1,
1.2,
"foo",
"\u03B1\u03B2\u03B3",
"loooooooooooooooooooooooooooooooooooooooooooooooooooong",
("foo", "bar", "baz"),
(1, 2),
("foo", 1, 2.3),
("\u03B1", "\u03B2", "\u03B3"),
("\u03B1", "bar"),
]:
self.series.name = name
repr(self.series)
biggie = Series(
tm.randn(1000), index=np.arange(1000), name=("foo", "bar", "baz")
)
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
assert "Name: 0" in rep_str
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
assert "Name: 0" in rep_str
ser = Series(["a\n\r\tb"], name="a\n\r\td", index=["a\n\r\tf"])
assert "\t" not in repr(ser)
assert "\r" not in repr(ser)
assert "a\n" not in repr(ser)
# with empty series (#4651)
s = Series([], dtype=np.int64, name="foo")
assert repr(s) == "Series([], Name: foo, dtype: int64)"
s = Series([], dtype=np.int64, name=None)
assert repr(s) == "Series([], dtype: int64)"
def test_tidy_repr(self):
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a) # should not raise exception
def test_repr_bool_fails(self, capsys):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
# It works (with no Cython exception barf)!
repr(s)
captured = capsys.readouterr()
assert captured.err == ""
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
# it works!
repr(s)
s.name = ("\u05d0",) * 2
repr(s)
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# ...The return value must be a string object.
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
df = Series(data, index=index1)
assert type(df.__repr__() == str) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
with pd.option_context("max_rows", None):
str(Series(range(1001))) # should not raise exception
def test_unicode_string_with_unicode(self):
df = Series(["\u05d0"], name="\u05d1")
str(df)
def test_str_to_bytes_raises(self):
# GH 26447
df = Series(["abc"], name="abc")
msg = "^'str' object cannot be interpreted as an integer$"
with pytest.raises(TypeError, match=msg):
bytes(df)
def test_timeseries_repr_object_dtype(self):
index = Index(
[datetime(2000, 1, 1) + timedelta(i) for i in range(1000)], dtype=object
)
ts = Series(np.random.randn(len(index)), index)
repr(ts)
ts = tm.makeTimeSeries(1000)
assert repr(ts).splitlines()[-1].startswith("Freq:")
ts2 = ts.iloc[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
def test_latex_repr(self):
result = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & $\alpha$ \\
1 & b \\
2 & c \\
\bottomrule
\end{tabular}
"""
with option_context("display.latex.escape", False, "display.latex.repr", True):
s = Series([r"$\alpha$", "b", "c"])
assert result == s._repr_latex_()
assert s._repr_latex_() is None
def test_index_repr_in_frame_with_nan(self):
# see gh-25061
i = Index([1, np.nan])
s = Series([1, 2], index=i)
exp = """1.0 1\nNaN 2\ndtype: int64"""
assert repr(s) == exp
class TestCategoricalRepr:
def test_categorical_repr_unicode(self):
# see gh-21002
class County(StringMixin):
name = "San Sebastián"
state = "PR"
def __str__(self):
return self.name + ", " + self.state
cat = pd.Categorical([County() for _ in range(61)])
idx = pd.Index(cat)
ser = idx.to_series()
repr(ser)
str(ser)
def test_categorical_repr(self):
a = Series(Categorical([1, 2, 3, 4]))
exp = (
"0 1\n1 2\n2 3\n3 4\n"
+ "dtype: category\nCategories (4, int64): [1, 2, 3, 4]"
)
assert exp == a.__str__()
a = Series(Categorical(["a", "b"] * 25))
exp = (
"0 a\n1 b\n"
+ " ..\n"
+ "48 a\n49 b\n"
+ "Length: 50, dtype: category\nCategories (2, object): [a, b]"
)
with option_context("display.max_rows", 5):
assert exp == repr(a)
levs = list("abcdefghijklmnopqrstuvwxyz")
a = Series(Categorical(["a", "b"], categories=levs, ordered=True))
exp = (
"0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]"
)
assert exp == a.__str__()
def test_categorical_series_repr(self):
s = Series(Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
assert repr(s) == exp
s = Series(Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
assert repr(s) == exp
def test_categorical_series_repr_ordered(self):
s = Series(Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
assert repr(s) == exp
s = Series(Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
assert repr(s) == exp
def test_categorical_series_repr_datetime(self):
idx = date_range("2011-01-01 09:00", freq="H", periods=5)
s = Series(Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa
assert repr(s) == exp
idx = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern")
s = Series(Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]""" # noqa
assert repr(s) == exp
def test_categorical_series_repr_datetime_ordered(self):
idx = date_range("2011-01-01 09:00", freq="H", periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
assert repr(s) == exp
idx = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern")
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
assert repr(s) == exp
def test_categorical_series_repr_period(self):
idx = period_range("2011-01-01 09:00", freq="H", periods=5)
s = Series(Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]""" # noqa
assert repr(s) == exp
idx = period_range("2011-01", freq="M", periods=5)
s = Series(Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
assert repr(s) == exp
def test_categorical_series_repr_period_ordered(self):
idx = period_range("2011-01-01 09:00", freq="H", periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]""" # noqa
assert repr(s) == exp
idx = period_range("2011-01", freq="M", periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
assert repr(s) == exp
def test_categorical_series_repr_timedelta(self):
idx = timedelta_range("1 days", periods=5)
s = Series(Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
assert repr(s) == exp
idx = timedelta_range("1 hours", periods=10)
s = Series(Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]""" # noqa
assert repr(s) == exp
def test_categorical_series_repr_timedelta_ordered(self):
idx = timedelta_range("1 days", periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
assert repr(s) == exp
idx = timedelta_range("1 hours", periods=10)
s = Series(Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]""" # noqa
assert repr(s) == exp
| 29.1643 | 103 | 0.525247 |
003e089e217978c3810712fb5e5aedd36ac27c54 | 7,584 | py | Python | tests/xbmcextra.py | dobo90/script.speedtester | 58052583bfc2ca624a489ad71df23e9038588d32 | [
"Apache-2.0"
] | 3 | 2020-09-26T08:12:00.000Z | 2021-05-01T22:12:26.000Z | tests/xbmcextra.py | dobo90/script.speedtester | 58052583bfc2ca624a489ad71df23e9038588d32 | [
"Apache-2.0"
] | 3 | 2020-10-04T11:54:28.000Z | 2022-01-26T01:59:16.000Z | tests/xbmcextra.py | dobo90/script.speedtester | 58052583bfc2ca624a489ad71df23e9038588d32 | [
"Apache-2.0"
] | 4 | 2020-09-26T09:28:23.000Z | 2021-04-27T13:05:48.000Z | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Extra functions for testing"""
# pylint: disable=invalid-name
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import xml.etree.ElementTree as ET
import polib
def kodi_to_ansi(string):
"""Convert Kodi format tags to ANSI codes"""
if string is None:
return None
string = string.replace('[BR]', '\n')
string = string.replace('[B]', '\033[1m')
string = string.replace('[/B]', '\033[21m')
string = string.replace('[I]', '\033[3m')
string = string.replace('[/I]', '\033[23m')
string = string.replace('[COLOR=gray]', '\033[30;1m')
string = string.replace('[COLOR=red]', '\033[31m')
string = string.replace('[COLOR=green]', '\033[32m')
string = string.replace('[COLOR=yellow]', '\033[33m')
string = string.replace('[COLOR=blue]', '\033[34m')
string = string.replace('[COLOR=purple]', '\033[35m')
string = string.replace('[COLOR=cyan]', '\033[36m')
string = string.replace('[COLOR=white]', '\033[37m')
string = string.replace('[/COLOR]', '\033[39;0m')
return string
def uri_to_path(uri):
"""Shorten a plugin URI to just the path"""
if uri is None:
return None
return ' \033[33m→ \033[34m%s\033[39;0m' % uri.replace('plugin://' + ADDON_ID, '')
def read_addon_xml(path):
"""Parse the addon.xml and return an info dictionary"""
info = dict(
path='./', # '/storage/.kodi/addons/plugin.video.vrt.nu',
profile='special://userdata', # 'special://profile/addon_data/plugin.video.vrt.nu/',
type='xbmc.python.pluginsource',
)
tree = ET.parse(path)
root = tree.getroot()
info.update(root.attrib) # Add 'id', 'name' and 'version'
info['author'] = info.pop('provider-name')
for child in root:
if child.attrib.get('point') != 'xbmc.addon.metadata':
continue
for grandchild in child:
# Handle assets differently
if grandchild.tag == 'assets':
for asset in grandchild:
info[asset.tag] = asset.text
continue
# Not in English ? Drop it
if grandchild.attrib.get('lang', 'en_GB') != 'en_GB':
continue
# Add metadata
info[grandchild.tag] = grandchild.text
return {info['name']: info}
def global_settings():
"""Use the global_settings file"""
import json
try:
with open('tests/userdata/global_settings.json') as fdesc:
settings = json.load(fdesc)
except OSError as e:
print("Error: Cannot use 'tests/userdata/global_settings.json' : %s" % e)
settings = {
'locale.language': 'resource.language.en_gb',
'network.bandwidth': 0,
}
if 'PROXY_SERVER' in os.environ:
settings['network.usehttpproxy'] = True
settings['network.httpproxytype'] = 0
print('Using proxy server from environment variable PROXY_SERVER')
settings['network.httpproxyserver'] = os.environ.get('PROXY_SERVER')
if 'PROXY_PORT' in os.environ:
print('Using proxy server from environment variable PROXY_PORT')
settings['network.httpproxyport'] = os.environ.get('PROXY_PORT')
if 'PROXY_USERNAME' in os.environ:
print('Using proxy server from environment variable PROXY_USERNAME')
settings['network.httpproxyusername'] = os.environ.get('PROXY_USERNAME')
if 'PROXY_PASSWORD' in os.environ:
print('Using proxy server from environment variable PROXY_PASSWORD')
settings['network.httpproxypassword'] = os.environ.get('PROXY_PASSWORD')
return settings
def addon_settings(addon_id=None):
"""Use the addon_settings file"""
import json
try:
with open('tests/userdata/addon_settings.json') as fdesc:
settings = json.load(fdesc)
except OSError as e:
print("Error: Cannot use 'tests/userdata/addon_settings.json' : %s" % e)
settings = {}
# Read credentials from environment or credentials.json
if 'ADDON_USERNAME' in os.environ and 'ADDON_PASSWORD' in os.environ:
# print('Using credentials from the environment variables ADDON_USERNAME and ADDON_PASSWORD')
settings[ADDON_ID]['username'] = os.environ.get('ADDON_USERNAME')
settings[ADDON_ID]['password'] = os.environ.get('ADDON_PASSWORD')
elif os.path.exists('tests/userdata/credentials.json'):
# print('Using credentials from tests/userdata/credentials.json')
with open('tests/userdata/credentials.json') as fdesc:
credentials = json.load(fdesc)
settings[ADDON_ID].update(credentials)
else:
print("Error: Cannot use 'tests/userdata/credentials.json'")
if addon_id:
return settings[addon_id]
return settings
def import_language(language):
"""Process the language.po file"""
try:
podb = polib.pofile('resources/language/{language}/strings.po'.format(language=language))
except IOError:
podb = polib.pofile('resources/language/resource.language.en_gb/strings.po')
podb.extend([
# WEEKDAY_LONG
polib.POEntry(msgctxt='#11', msgstr='Monday'),
polib.POEntry(msgctxt='#12', msgstr='Tuesday'),
polib.POEntry(msgctxt='#13', msgstr='Wednesday'),
polib.POEntry(msgctxt='#14', msgstr='Thursday'),
polib.POEntry(msgctxt='#15', msgstr='Friday'),
polib.POEntry(msgctxt='#16', msgstr='Saturday'),
polib.POEntry(msgctxt='#17', msgstr='Sunday'),
# MONTH_LONG
polib.POEntry(msgctxt='#21', msgstr='January'),
polib.POEntry(msgctxt='#22', msgstr='February'),
polib.POEntry(msgctxt='#23', msgstr='March'),
polib.POEntry(msgctxt='#24', msgstr='April'),
polib.POEntry(msgctxt='#25', msgstr='May'),
polib.POEntry(msgctxt='#26', msgstr='June'),
polib.POEntry(msgctxt='#27', msgstr='July'),
polib.POEntry(msgctxt='#28', msgstr='August'),
polib.POEntry(msgctxt='#29', msgstr='September'),
polib.POEntry(msgctxt='#30', msgstr='October'),
polib.POEntry(msgctxt='#31', msgstr='November'),
polib.POEntry(msgctxt='#32', msgstr='December'),
# WEEKDAY_SHORT
polib.POEntry(msgctxt='#41', msgstr='Mon'),
polib.POEntry(msgctxt='#42', msgstr='Tue'),
polib.POEntry(msgctxt='#43', msgstr='Wed'),
polib.POEntry(msgctxt='#44', msgstr='Thu'),
polib.POEntry(msgctxt='#45', msgstr='Fri'),
polib.POEntry(msgctxt='#46', msgstr='Sat'),
polib.POEntry(msgctxt='#47', msgstr='Sun'),
# MONTH_LONG
polib.POEntry(msgctxt='#51', msgstr='Jan'),
polib.POEntry(msgctxt='#52', msgstr='Feb'),
polib.POEntry(msgctxt='#53', msgstr='Mar'),
polib.POEntry(msgctxt='#54', msgstr='Apr'),
polib.POEntry(msgctxt='#55', msgstr='May'),
polib.POEntry(msgctxt='#56', msgstr='Jun'),
polib.POEntry(msgctxt='#57', msgstr='Jul'),
polib.POEntry(msgctxt='#58', msgstr='Aug'),
polib.POEntry(msgctxt='#59', msgstr='Sep'),
polib.POEntry(msgctxt='#50', msgstr='Oct'),
polib.POEntry(msgctxt='#51', msgstr='Nov'),
polib.POEntry(msgctxt='#52', msgstr='Dec'),
])
return podb
ADDON_INFO = read_addon_xml('addon.xml')
ADDON_ID = next(iter(list(ADDON_INFO.values()))).get('id')
| 39.915789 | 101 | 0.622495 |
8dfa6cc321dc935e54c3169d399774a302a81613 | 4,332 | py | Python | rustici_software_cloud_v2/models/xapi_credentials_list_schema.py | RusticiSoftware/scormcloud-api-v2-client-python | 04e2cce304a336caf492c3330c706840815c4abe | [
"Apache-2.0"
] | 2 | 2020-07-21T10:33:39.000Z | 2021-08-17T21:40:13.000Z | rustici_software_cloud_v2/models/xapi_credentials_list_schema.py | RusticiSoftware/scormcloud-api-v2-client-python | 04e2cce304a336caf492c3330c706840815c4abe | [
"Apache-2.0"
] | 2 | 2020-10-22T20:58:19.000Z | 2020-10-27T17:25:28.000Z | rustici_software_cloud_v2/models/xapi_credentials_list_schema.py | RusticiSoftware/scormcloud-api-v2-client-python | 04e2cce304a336caf492c3330c706840815c4abe | [
"Apache-2.0"
] | 1 | 2020-10-15T17:11:15.000Z | 2020-10-15T17:11:15.000Z | # coding: utf-8
"""
SCORM Cloud Rest API
REST API used for SCORM Cloud integrations. # noqa: E501
OpenAPI spec version: 2.0
Contact: systems@rusticisoftware.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class XapiCredentialsListSchema(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'xapi_credentials': 'list[XapiCredentialSchema]',
'more': 'str'
}
attribute_map = {
'xapi_credentials': 'xapiCredentials',
'more': 'more'
}
def __init__(self, xapi_credentials=None, more=None): # noqa: E501
"""XapiCredentialsListSchema - a model defined in Swagger""" # noqa: E501
self._xapi_credentials = None
self._more = None
self.discriminator = None
self.xapi_credentials = xapi_credentials
if more is not None:
self.more = more
@property
def xapi_credentials(self):
"""Gets the xapi_credentials of this XapiCredentialsListSchema. # noqa: E501
:return: The xapi_credentials of this XapiCredentialsListSchema. # noqa: E501
:rtype: list[XapiCredentialSchema]
"""
return self._xapi_credentials
@xapi_credentials.setter
def xapi_credentials(self, xapi_credentials):
"""Sets the xapi_credentials of this XapiCredentialsListSchema.
:param xapi_credentials: The xapi_credentials of this XapiCredentialsListSchema. # noqa: E501
:type: list[XapiCredentialSchema]
"""
if xapi_credentials is None:
raise ValueError("Invalid value for `xapi_credentials`, must not be `None`") # noqa: E501
self._xapi_credentials = xapi_credentials
@property
def more(self):
"""Gets the more of this XapiCredentialsListSchema. # noqa: E501
Token for getting the next set of results, from the prior set of results. # noqa: E501
:return: The more of this XapiCredentialsListSchema. # noqa: E501
:rtype: str
"""
return self._more
@more.setter
def more(self, more):
"""Sets the more of this XapiCredentialsListSchema.
Token for getting the next set of results, from the prior set of results. # noqa: E501
:param more: The more of this XapiCredentialsListSchema. # noqa: E501
:type: str
"""
self._more = more
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(XapiCredentialsListSchema, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XapiCredentialsListSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.293706 | 102 | 0.597184 |
a34af95358640a2d759a673d63dd286be9a75c01 | 7,621 | py | Python | sdks/python/apache_beam/testing/test_pipeline.py | davidtime/beam | f2d19fdf7118a08d222f0028753a58347e6352fd | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/testing/test_pipeline.py | davidtime/beam | f2d19fdf7118a08d222f0028753a58347e6352fd | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/testing/test_pipeline.py | davidtime/beam | f2d19fdf7118a08d222f0028753a58347e6352fd | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test Pipeline, a wrapper of Pipeline for test purpose"""
# pytype: skip-file
from __future__ import absolute_import
import argparse
import shlex
from nose.plugins.skip import SkipTest
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import Pipeline
from apache_beam.runners.runner import PipelineState
__all__ = [
'TestPipeline',
]
class TestPipeline(Pipeline):
""":class:`TestPipeline` class is used inside of Beam tests that can be
configured to run against pipeline runner.
It has a functionality to parse arguments from command line and build pipeline
options for tests who runs against a pipeline runner and utilizes resources
of the pipeline runner. Those test functions are recommended to be tagged by
``@attr("ValidatesRunner")`` annotation.
In order to configure the test with customized pipeline options from command
line, system argument ``--test-pipeline-options`` can be used to obtains a
list of pipeline options. If no options specified, default value will be used.
For example, use following command line to execute all ValidatesRunner tests::
python setup.py nosetests -a ValidatesRunner \\
--test-pipeline-options="--runner=DirectRunner \\
--job_name=myJobName \\
--num_workers=1"
For example, use assert_that for test validation::
pipeline = TestPipeline()
pcoll = ...
assert_that(pcoll, equal_to(...))
pipeline.run()
"""
def __init__(self,
runner=None,
options=None,
argv=None,
is_integration_test=False,
blocking=True,
additional_pipeline_args=None):
"""Initialize a pipeline object for test.
Args:
runner (~apache_beam.runners.runner.PipelineRunner): An object of type
:class:`~apache_beam.runners.runner.PipelineRunner` that will be used
to execute the pipeline. For registered runners, the runner name can be
specified, otherwise a runner object must be supplied.
options (~apache_beam.options.pipeline_options.PipelineOptions):
A configured
:class:`~apache_beam.options.pipeline_options.PipelineOptions`
object containing arguments that should be used for running the
pipeline job.
argv (List[str]): A list of arguments (such as :data:`sys.argv`) to be
used for building a
:class:`~apache_beam.options.pipeline_options.PipelineOptions` object.
This will only be used if argument **options** is :data:`None`.
is_integration_test (bool): :data:`True` if the test is an integration
test, :data:`False` otherwise.
blocking (bool): Run method will wait until pipeline execution is
completed.
additional_pipeline_args (List[str]): additional pipeline arguments to be
included when construction the pipeline options object.
Raises:
~exceptions.ValueError: if either the runner or options argument is not
of the expected type.
"""
self.is_integration_test = is_integration_test
self.not_use_test_runner_api = False
additional_pipeline_args = additional_pipeline_args or []
self.options_list = (
self._parse_test_option_args(argv) + additional_pipeline_args)
self.blocking = blocking
if options is None:
options = PipelineOptions(self.options_list)
super(TestPipeline, self).__init__(runner, options)
def run(self, test_runner_api=True):
result = super(TestPipeline, self).run(
test_runner_api=(False if self.not_use_test_runner_api
else test_runner_api))
if self.blocking:
state = result.wait_until_finish()
assert state in (PipelineState.DONE, PipelineState.CANCELLED), \
"Pipeline execution failed."
return result
def get_pipeline_options(self):
return self._options
def _parse_test_option_args(self, argv):
"""Parse value of command line argument: --test-pipeline-options to get
pipeline options.
Args:
argv: An iterable of command line arguments to be used. If not specified
then sys.argv will be used as input for parsing arguments.
Returns:
An argument list of options that can be parsed by argparser or directly
build a pipeline option.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--test-pipeline-options',
type=str,
action='store',
help='only run tests providing service options')
parser.add_argument('--not-use-test-runner-api',
action='store_true',
default=False,
help='whether not to use test-runner-api')
known, unused_argv = parser.parse_known_args(argv)
if self.is_integration_test and not known.test_pipeline_options:
# Skip integration test when argument '--test-pipeline-options' is not
# specified since nose calls integration tests when runs unit test by
# 'setup.py test'.
raise SkipTest('IT is skipped because --test-pipeline-options '
'is not specified')
self.not_use_test_runner_api = known.not_use_test_runner_api
return shlex.split(known.test_pipeline_options) \
if known.test_pipeline_options else []
def get_full_options_as_args(self, **extra_opts):
"""Get full pipeline options as an argument list.
Append extra pipeline options to existing option list if provided.
Test verifier (if contains in extra options) should be pickled before
appending, and will be unpickled later in the TestRunner.
"""
options = list(self.options_list)
for k, v in extra_opts.items():
if not v:
continue
elif isinstance(v, bool) and v:
options.append('--%s' % k)
elif 'matcher' in k:
options.append('--%s=%s' % (k, pickler.dumps(v).decode()))
else:
options.append('--%s=%s' % (k, v))
return options
def get_option(self, opt_name):
"""Get a pipeline option value by name
Args:
opt_name: The name of the pipeline option.
Returns:
None if option is not found in existing option list which is generated
by parsing value of argument `test-pipeline-options`.
"""
parser = argparse.ArgumentParser()
opt_name = opt_name[:2] if opt_name[:2] == '--' else opt_name
# Option name should start with '--' when it's used for parsing.
parser.add_argument('--' + opt_name,
type=str,
action='store')
known, _ = parser.parse_known_args(self.options_list)
return getattr(known, opt_name) if hasattr(known, opt_name) else None
| 38.882653 | 80 | 0.684949 |
832f610f7857940f434164dc364cf6041afc89e9 | 16,585 | py | Python | mobile_insight/analyzer/profile.py | unknownhandX/mobileinsight-core | 5efc3d877072d0a176c5e628064c6e981f2a0342 | [
"Apache-2.0"
] | null | null | null | mobile_insight/analyzer/profile.py | unknownhandX/mobileinsight-core | 5efc3d877072d0a176c5e628064c6e981f2a0342 | [
"Apache-2.0"
] | null | null | null | mobile_insight/analyzer/profile.py | unknownhandX/mobileinsight-core | 5efc3d877072d0a176c5e628064c6e981f2a0342 | [
"Apache-2.0"
] | 1 | 2018-11-15T07:57:55.000Z | 2018-11-15T07:57:55.000Z | #!/usr/bin/python
# Filename: profile.py
"""
Profile abstraction
Author: Yuanjie Li
"""
is_android = False
try:
from jnius import autoclass # For Android
is_android = True
except Exception as e:
import sqlite3 # Laptop version
is_android = False
import ast
import os
__all__ = ["ProfileHierarchy", "Profile"]
'''
To support per-level ID, we update the following hierarchy
LteRrc:87/Sib/Inter_freq:5780/ThreshXHigh
This is similar to HTTP, except that every level can define another ID for query (list)
The hierarchy ID is mandatory for root, but optional for non-root nodes
'''
class ProfileNode(object):
'''
A profile node for the ProfileHierarchy tree
'''
def __init__(self, name, id_required):
'''
Initialization of a Profile node
:param name: the name of this hierarchy
:type name: string
:param id_required: specify if this node has a key (id), e.g., Inter_freq:5780
:type id_required: boolean
'''
self.name = name
self.id_required = id_required
self.children = {} # A node list to its children
def add(self, child, id_required):
'''
Add a child node
:param child: a child node
:type child: string
:param id_required: specify if this node has a key (id), e.g., Inter_freq:5780
:type id_required: boolean
:returns: the added node
'''
child_node = ProfileNode(child, id_required)
self.children[child] = child_node
return child_node
def is_leaf(self):
'''
Return true if this node is a leaf
:returns: True if is a leaf, False otherwise
'''
return not self.children
class ProfileHierarchy(object):
'''An abstraction for analyzers to declare the profile it maintains.
Given this hierarchy, the analyzer automatically builds underlying database,
and enforces query with hierarchical name (e.g., LTERrcAnalyzer.Reconfig.Drx.long_drx)
Example construction: consider the following RRC profile hierarchy
LteRrc
- Sib
- Inter_freq (id_required, e.g., Inter_freq:5780)
- ThreshXHigh
- ThreshXLow
- Reconfig
- Drx
- Short_drx
- Long_drx
The following code constructs such a profile hierachy:
LteRrcProfile = ProfileHierarchy('LteRrc')
root = LteRrcProfile.get_root();
sib=root.add('Sib',False);
inter_freq=sib.add('Inter_freq',True) #ID required
reconfig=root.add('Reconfig',False);
measconfig=reconfig.add('MeasConfig',False)
drx=reconfig.add('Drx',False);
drx.add('Drx_short',False);
drx.add('Drx_long',False);
'''
def __init__(self, root):
'''
Initialization of the profile hierarchy
:param root: the root profile table name
:type root: string
'''
self.__root = ProfileNode(root, True) # Root MUST have a unique ID
def get_root(self):
'''
Return the root node
'''
return self.__root
def get_node(self, name):
'''
Get the node based on the hierarchical name
:param name: a hierarchical name separated by '.' (e.g., LteRrc:87.Sib)
:type name: string
:returns: the Node that corresponds to this name, or None if it does not exist
'''
nodes = name.split('.')
count = 0
cur_node = self.__root
node_split = nodes[count].split(':')
if node_split[0] != cur_node.name or len(node_split) == 1:
return None
while True:
# current node is matched, search the next hierarchy
count = count + 1
# the hierarchy is finished, then return the current node
if count == len(nodes):
return cur_node
match = False
# otherwise, update the node to the one that matches the new name
for child in cur_node.children.values():
node_split = nodes[count].split(':')
if child.name == node_split[0]:
if child.id_required and len(node_split) == 1:
# The mandatory ID is unavailable
return None
cur_node = child
match = True
break
# if no children matches, return None
if not match:
return None
class Profile(object):
'''
Profile abstraction
Given the profile hierarchy, this abstraction achieves
- Automatically create Tables for profile
- Enforce query with hierarchical name (e.g., LTERrcAnalyzer.Reconfig.Drx.long_drx)
- Update profile values
'''
def __init__(self, profile_hierarchy):
'''
Initialization of the Profile
:param profile_hierarchy: the profiel hierarchy tree
'''
self.__profile_hierarchy = profile_hierarchy
self.__db = None
self.__build_db()
def __create_table(self, node):
'''
Create SQL tables for the node
:param node: a node in the profile hierarchy
:type node: Node
'''
sql_cmd = 'CREATE TABLE IF NOT EXISTS ' + \
node.name + "(id,profile,primary key(id))"
if is_android:
self.__db.execSQL(sql_cmd)
else:
self.__db.execute(sql_cmd)
self.__conn.commit()
def __get_root_name(self):
return self.__profile_hierarchy.get_root().name
def __build_db(self):
'''
Build the internal DBs for the profile
'''
if self.__profile_hierarchy is None:
self.__db = None
else:
# setup internal database
root = self.__profile_hierarchy.get_root()
if is_android:
Environment = autoclass("android.os.Environment")
state = Environment.getExternalStorageState()
if not Environment.MEDIA_MOUNTED == state:
self.__db = None
return
sdcard_path = Environment.getExternalStorageDirectory().toString()
DB_PATH = os.path.join(sdcard_path, "mobileinsight/dbs")
activity = autoclass('org.kivy.android.PythonActivity')
if activity.mActivity:
self.__db = activity.mActivity.openOrCreateDatabase(
os.path.join(DB_PATH, root.name + '.db'), 0, None)
else:
service = autoclass('org.kivy.android.PythonService')
self.__db = service.mService.openOrCreateDatabase(
os.path.join(DB_PATH, root.name + '.db'), 0, None)
else:
self.__conn = sqlite3.connect(root.name + '.db')
self.__db = self.__conn.cursor()
self.__create_table(root)
def query(self, profile_name):
'''
Query the profile value with a hierarchical name.
Example: self.query('cell_id=87','LteRrc:87.Reconfig.Drx.Short_drx')
:param profile_name: a hierarcical name separated by '.'. If id is required, it's separated by ":"
e.g., "LteRrc:87.Sib.Inter_freq:5780.ThreshXHigh"
:type profile_name: string
:returns: value list that satisfies the query, or None if no such field (id not exist, incomplete record, etc.)
'''
try:
# Step 1: check if the name conforms to the hierarchy
if self.__profile_hierarchy is None: # no profile defined
return None
# Check if the field to query is valid
profile_node = self.__profile_hierarchy.get_node(profile_name)
if profile_node is None:
return None
profile_nodes = profile_name.split('.')
# Step 2: extract the raw profile
# NOTE: root profile MUST have a id
sql_cmd = "select profile from " + self.__get_root_name() + " where id=\"" + \
profile_nodes[0].split(":")[1] + "\""
if is_android:
sql_res = self.__db.rawQuery(sql_cmd, None)
else:
sql_res = self.__db.execute(sql_cmd).fetchall()
# if sql_res.getCount()==0: #the id does not exist
if (is_android and sql_res.getCount() == 0) or (
not is_android and len(sql_res) == 0):
return None
if is_android:
sql_res.moveToFirst()
# convert string to dictionary
res = ast.literal_eval(sql_res.getString(0))
else:
res = ast.literal_eval(sql_res[0][0])
# Step 3: extract the result from raw profile
for i in range(1, len(profile_nodes)):
if res is None: # no profile
break
profile_node_split = profile_nodes[i].split(":")
res = res[profile_node_split[0]]
if len(profile_node_split) > 1:
res = res[profile_node_split[1]]
return res
except BaseException: # TODO: raise warnings
return False
def update(self, profile_name, value_dict):
'''
Update a profile value
Example 1: self.update('LteRrc:87.Reconfig.Drx',{Drx_short:1,Drx_long:5})
Example 2: self.update('LteRrc:87.Sib.Inter_freq:5780',{ThreshXHigh:1,ThreshXLow:2})
If the id does not exist, create a new item in the root, with specified values and all other fields as "null"
Otherwise, update the specified field values, and keep the ramaining values unchanged.
The update operation is atomic. No partial update would be performed
:param profile_name: a hierarcical name separated by '.' (e.g., LteRrc.Reconfig.Drx)
:type profile_name: string
:param value: a field_name->value dictionary of the specified updated values.
All the field names should appear in the profile_name.
:type value: string->string dictionary
:returns: True if the update succeeds, False otherwise
'''
try:
# Step 1: check if the name conforms to the hierarchy
if not self.__profile_hierarchy: # no profile defined
raise Exception('No profile defined')
return False
# Check if the field to update is valid
test_node = self.__profile_hierarchy.get_node(profile_name)
if not test_node:
raise Exception('Invalid update: ' + profile_name)
return False
# Check the value fileds to update are indeed included based on
# hierarchy
for field_name in value_dict:
if field_name not in test_node.children:
# Invalid node
raise Exception('Invalid update field: ' + str(value_dict))
return False
profile_nodes = profile_name.split('.')
# Step 2: check if the id exists or not
sql_cmd = "select profile from " + self.__get_root_name() + " where id=\"" + \
profile_nodes[0].split(":")[1] + "\""
if is_android:
sql_res = self.__db.rawQuery(sql_cmd, None)
else:
sql_res = self.__db.execute(sql_cmd).fetchall()
# if not query_res:
# if sql_res.getCount()==0:
if (is_android and sql_res and sql_res.getCount() == 0) or (
not is_android and len(sql_res) == 0):
# The id does not exist. Create a new record
query_res = {}
res = query_res
profile_node = self.__profile_hierarchy.get_root()
# Init: all root's children are not initialized
for child in profile_node.children:
res[child] = None
# Go along hierarchy, init the remaining children
for i in range(1, len(profile_nodes)):
profile_node_split = profile_nodes[i].split(":")
profile_node = profile_node.children[profile_node_split[0]]
res[profile_node_split[0]] = {}
res = res[profile_node_split[0]]
if profile_node.id_required:
res[profile_node_split[1]] = {}
res = res[profile_node_split[1]]
for child in profile_node.children:
res[child] = None
for item in value_dict:
res[item] = value_dict[item]
# Insert the new record into table
sql_cmd = "insert into " + self.__get_root_name() + "(id,profile) values(\"" + \
profile_nodes[0].split(":")[1] + "\"," + "\"" + str(query_res) + "\")"
if is_android:
# print "Yuanjie: execSQL"
self.__db.execSQL(sql_cmd)
else:
self.__db.execute(sql_cmd)
self.__conn.commit()
return True
else:
if is_android:
sql_res.moveToFirst()
query_res = ast.literal_eval(
sql_res.getString(0)) # convert string to dictionary
else:
query_res = ast.literal_eval(sql_res[0][0])
# The id exists. Update the record
res = query_res
profile_node = self.__profile_hierarchy.get_root()
for i in range(1, len(profile_nodes)):
profile_node_split = profile_nodes[i].split(":")
if res[profile_node_split[0]] is not None:
res = res[profile_node_split[0]]
if len(profile_node_split) > 1:
if profile_node_split[1] not in res:
res[profile_node_split[1]] = {}
res = res[profile_node_split[1]]
else:
res[profile_node_split[0]] = {}
res = res[profile_node_split[0]]
if len(profile_node_split) > 1:
if profile_node_split[1] not in res:
res[profile_node_split[1]] = {}
res = res[profile_node_split[1]]
for child in profile_node.children:
res[child] = None
for item in value_dict:
res[item] = value_dict[item]
sql_cmd = "update " + self.__get_root_name() + " set profile=\"" + str(query_res) + \
"\" where id=\"" + profile_nodes[0].split(":")[1] + "\""
if is_android:
self.__db.execSQL(sql_cmd)
else:
self.__db.execute(sql_cmd)
self.__conn.commit()
return True
except BaseException: # TODO: raise warnings
return False
if __name__ == "__main__":
# Create a profile
profile_hierarchy = ProfileHierarchy('LteRrc')
root = profile_hierarchy.get_root()
root.add('Root_leaf', False)
sib = root.add('Sib', False)
inter_freq = sib.add('Inter_freq', True)
inter_freq.add('ThreshXHigh', False)
inter_freq.add('ThreshXLow', False)
reconfig = root.add('Reconfig', False)
drx = reconfig.add('Drx', False)
drx.add('Drx_short', False)
drx.add('Drx_long', False)
profile = Profile(profile_hierarchy)
res = profile.update(
'LteRrc:87.Reconfig.Drx', {
'Drx_short': '1', 'Drx_long': '5'})
print profile.query('LteRrc:87.Reconfig.Drx')
res = profile.update('LteRrc:87.Reconfig.Drx', {'Drx_long': '6'})
print profile.query('LteRrc:87.Reconfig.Drx')
print profile.query('LteRrc:87')
res = profile.update(
'LteRrc:86.Sib.Inter_freq:5780', {
'ThreshXHigh': '1', 'ThreshXLow': '5'})
res = profile.update(
'LteRrc:86.Sib.Inter_freq:1975', {
'ThreshXHigh': '2', 'ThreshXLow': '8'})
print profile.query('LteRrc:86.Sib')
profile.update('LteRrc:87', {'Root_leaf': 10})
print profile.query('LteRrc:87')
| 35.063425 | 119 | 0.554175 |
569b8b3f3be6b4d89d628d7803ff5b9449c627ee | 1,986 | py | Python | tbx/core/migrations/0006_auto_20150326_1023.py | elviva404/wagtail-torchbox | 718d9e2c4337073f010296932d369c726a01dbd3 | [
"MIT"
] | 103 | 2015-02-24T17:58:21.000Z | 2022-03-23T08:08:58.000Z | tbx/core/migrations/0006_auto_20150326_1023.py | elviva404/wagtail-torchbox | 718d9e2c4337073f010296932d369c726a01dbd3 | [
"MIT"
] | 145 | 2015-01-13T17:13:43.000Z | 2022-03-29T12:56:20.000Z | tbx/core/migrations/0006_auto_20150326_1023.py | elviva404/wagtail-torchbox | 718d9e2c4337073f010296932d369c726a01dbd3 | [
"MIT"
] | 57 | 2015-01-03T12:00:37.000Z | 2022-02-09T13:11:30.000Z | # -*- coding: utf-8 -*-
from django.db import models, migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
("torchbox", "0005_auto_20150325_1631"),
]
operations = [
migrations.AlterField(
model_name="blogpage",
name="body",
field=wagtail.core.fields.RichTextField(
verbose_name="body (deprecated. Use streamfield instead)"
),
preserve_default=True,
),
migrations.AlterField(
model_name="blogpage",
name="intro",
field=wagtail.core.fields.RichTextField(
verbose_name="Intro (deprecated. Use streamfield instead)", blank=True
),
preserve_default=True,
),
migrations.AlterField(
model_name="standardpage",
name="body",
field=wagtail.core.fields.RichTextField(
verbose_name="Body (deprecated. Use streamfield instead)", blank=True
),
preserve_default=True,
),
migrations.AlterField(
model_name="standardpage",
name="intro",
field=wagtail.core.fields.RichTextField(
verbose_name="Intro (deprecated. Use streamfield instead)", blank=True
),
preserve_default=True,
),
migrations.AlterField(
model_name="workpage",
name="body",
field=wagtail.core.fields.RichTextField(
verbose_name="Body (deprecated. Use streamfield instead)", blank=True
),
preserve_default=True,
),
migrations.AlterField(
model_name="workpage",
name="intro",
field=wagtail.core.fields.RichTextField(
verbose_name="Intro (deprecated. Use streamfield instead)", blank=True
),
preserve_default=True,
),
]
| 31.03125 | 86 | 0.553877 |
8608cc3343ab9fae1e85b195a0e190974b8f03ed | 2,483 | py | Python | dashbot/google.py | leonardocustodio/dashbotpy | e5ea48965d025704ec45ecbde6b49882a0def429 | [
"MIT"
] | 1 | 2020-07-08T15:03:50.000Z | 2020-07-08T15:03:50.000Z | dashbot/google.py | leonardocustodio/dashbotpy | e5ea48965d025704ec45ecbde6b49882a0def429 | [
"MIT"
] | null | null | null | dashbot/google.py | leonardocustodio/dashbotpy | e5ea48965d025704ec45ecbde6b49882a0def429 | [
"MIT"
] | 5 | 2018-09-04T15:59:05.000Z | 2020-09-22T21:21:13.000Z | import os
import datetime
import time
import json
from .version import __version__
from . import generic
class google(generic.generic):
def __init__(self,apiKey,debug=False,printErrors=False):
if 'DASHBOT_SERVER_ROOT' in os.environ:
serverRoot = os.environ['DASHBOT_SERVER_ROOT']
else:
serverRoot = 'https://tracker.dashbot.io'
self.urlRoot = serverRoot + '/track'
self.apiKey=apiKey
self.debug=debug
self.printErrors=printErrors
self.platform='google'
self.version = __version__
self.source = 'pip'
def logIncoming(self,event):
url = self.urlRoot + '?apiKey=' + self.apiKey + '&type=incoming&platform='+ self.platform + '&v=' + self.version + '-' + self.source
now = datetime.datetime.now()
timestamp = int(1000*(time.mktime(now.timetuple()) + now.microsecond * 1e-6))
try:
event = json.loads(event)
except Exception as e:
if self.debug:
print(e)
data={
'dashbot_timestamp':timestamp,
'request_body':event,
}
if self.debug:
print('Dashbot Incoming:'+url)
print(json.dumps(data))
self.makeRequest(url,'POST',data)
def logOutgoing(self,event,response,metadata=None):
url = self.urlRoot + '?apiKey=' + self.apiKey + '&type=outgoing&platform='+ self.platform + '&v=' + self.version + '-' + self.source
now = datetime.datetime.now()
timestamp = int(1000*(time.mktime(now.timetuple()) + now.microsecond * 1e-6))
try:
event = json.loads(event)
except Exception as e:
if self.debug:
print(e)
try:
response = json.loads(response)
except Exception as e:
if self.debug:
print(e)
data={
'dashbot_timestamp':timestamp,
'request_body':event,
'message':response
}
if metadata is not None:
data['metadata']=metadata
if self.debug:
print('Dashbot Outgoing:'+url)
print(json.dumps(data))
self.makeRequest(url,'POST',data) | 31.833333 | 140 | 0.510673 |
b2c0bc75aa567f1fa4a6d0ec2f6c00d2b1445ed2 | 549 | py | Python | src/solutions/solution016.py | samtcwong/daily-coding-problems | bd7da8e64a74a04f69a8b5b930100754e99b63f7 | [
"MIT"
] | null | null | null | src/solutions/solution016.py | samtcwong/daily-coding-problems | bd7da8e64a74a04f69a8b5b930100754e99b63f7 | [
"MIT"
] | null | null | null | src/solutions/solution016.py | samtcwong/daily-coding-problems | bd7da8e64a74a04f69a8b5b930100754e99b63f7 | [
"MIT"
] | null | null | null | from typing import List
# Problem #16 [Easy]
# Good morning! Here's your coding interview problem for today.
# This problem was asked by Twitter.
# You run an e-commerce website and want to record the last N order ids in a log.
# Implement a data structure to accomplish this, with the following API:
# record(order_id): adds the order_id to the log
# get_last(i): gets the ith last element from the log. i is guaranteed to be smaller than or equal to N.
# You should be as efficient with time and space as possible.
tests = None
solver = None
| 32.294118 | 104 | 0.750455 |
419d792a98470a1ef5fbf1867f740e47aa916dcc | 535 | py | Python | src/deploy/builder/stacks/migrations/0008_auto_20190529_1523.py | werelaxe/drapo | 5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b | [
"MIT"
] | null | null | null | src/deploy/builder/stacks/migrations/0008_auto_20190529_1523.py | werelaxe/drapo | 5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b | [
"MIT"
] | null | null | null | src/deploy/builder/stacks/migrations/0008_auto_20190529_1523.py | werelaxe/drapo | 5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b | [
"MIT"
] | null | null | null | # Generated by Django 2.1 on 2019-05-29 15:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stacks', '0007_auto_20190529_1520'),
]
operations = [
migrations.AlterField(
model_name='stack',
name='status',
field=models.CharField(choices=[('Error', 'Error'), ('Enqueued', 'Enqueued'), ('Successfully built and pushed', 'Successfully built and pushed'), ('Processing', 'Processing')], max_length=2),
),
]
| 28.157895 | 203 | 0.620561 |
a792ee0ac89e30b6ef18c7c40f68ad6bc9e5bfd6 | 2,847 | py | Python | unit-tests/test_importer.py | sk-keeper/Commander | 951317ee44439302520035171baf1d098be98cc6 | [
"MIT"
] | null | null | null | unit-tests/test_importer.py | sk-keeper/Commander | 951317ee44439302520035171baf1d098be98cc6 | [
"MIT"
] | null | null | null | unit-tests/test_importer.py | sk-keeper/Commander | 951317ee44439302520035171baf1d098be98cc6 | [
"MIT"
] | null | null | null | from unittest import TestCase, mock
import pytest
from data_vault import get_synced_params, get_connected_params
from helper import KeeperApiHelper
from keepercommander.importer import importer, commands
class TestImporterUtils(TestCase):
def setUp(self):
self.communicate_mock = mock.patch('keepercommander.api.communicate').start()
self.communicate_mock.side_effect = KeeperApiHelper.communicate_command
def tearDown(self):
mock.patch.stopall()
def test_load_importer_format(self):
for fmt in ['csv', 'json']:
self.assertTrue(issubclass(importer.importer_for_format(fmt), importer.BaseImporter))
self.assertTrue(issubclass(importer.exporter_for_format(fmt), importer.BaseExporter))
def test_path_components(self):
comps = list(importer.path_components('wwww\\wwww'))
self.assertEqual(len(comps), 2)
self.assertEqual(comps[0], 'wwww')
self.assertEqual(comps[1], 'wwww')
comps = list(importer.path_components('ww\\\\ww\\wwww'))
self.assertEqual(len(comps), 2)
self.assertEqual(comps[0], 'ww\\ww')
self.assertEqual(comps[1], 'wwww')
comps = list(importer.path_components('\\wwww\\'))
self.assertEqual(len(comps), 1)
self.assertEqual(comps[0], 'wwww')
comps = list(importer.path_components('wwww'))
self.assertEqual(len(comps), 1)
self.assertEqual(comps[0], 'wwww')
@pytest.mark.xfail(reason="TODO: This test needs investigation")
def test_export_import(self):
params_export = get_synced_params()
cmd_export = commands.RecordExportCommand()
param_import = get_connected_params()
cmd_import = commands.RecordImportCommand()
json_text = ''
def mock_write(text):
nonlocal json_text
json_text += text
def mock_read():
nonlocal json_text
return json_text
with mock.patch('keepercommander.api.sync_down'), mock.patch('builtins.open', mock.mock_open()) as m_open:
m_open.return_value.write = mock_write
cmd_export.execute(params_export, format='json', name='json')
with mock.patch('keepercommander.api.sync_down'), mock.patch('builtins.open', mock.mock_open()) as m_open, \
mock.patch('keepercommander.importer.imp_exp.execute_import_folder_record', return_value=([],[])):
m_open.return_value.read = mock_read
self.communicate_mock.side_effect = None
self.communicate_mock.return_value = {
'result': 'success',
'result_code': '',
'message': ''
}
with mock.patch('os.path.isfile', return_value=True):
cmd_import.execute(param_import, format='json', name='json')
| 36.974026 | 116 | 0.650861 |
989777feb5db0b1661da61572aa4b2053cdb1db3 | 874 | py | Python | survey_app/tests/test_util.py | acrellin/survey_classifier_app | c54fa9faa9ce02e5c5fdf54c38194710496a5025 | [
"BSD-3-Clause"
] | null | null | null | survey_app/tests/test_util.py | acrellin/survey_classifier_app | c54fa9faa9ce02e5c5fdf54c38194710496a5025 | [
"BSD-3-Clause"
] | 9 | 2016-12-03T17:42:28.000Z | 2016-12-06T22:13:55.000Z | survey_app/tests/test_util.py | acrellin/survey_classifier_app | c54fa9faa9ce02e5c5fdf54c38194710496a5025 | [
"BSD-3-Clause"
] | null | null | null | from survey_app import util
import numpy.testing as npt
import pytest
import os
from operator import itemgetter
from collections import OrderedDict
def test_robust_literal_eval():
"""Test util.robust_literal_eval"""
params = {"n_estimators": "1000",
"max_features": "auto",
"min_weight_fraction_leaf": "0.34",
"bootstrap": "True",
"class_weight": "{'a': 0.2, 'b': 0.8}",
"max_features2": "[150.3, 20, 'auto']"}
expected = {"n_estimators": 1000,
"max_features": "auto",
"min_weight_fraction_leaf": 0.34,
"bootstrap": True,
"class_weight": {'a': 0.2, 'b': 0.8},
"max_features2": [150.3, 20, "auto"]}
params = {k: util.robust_literal_eval(v) for k, v in params.items()}
npt.assert_equal(params, expected)
| 34.96 | 72 | 0.570938 |
8bd7fd4603f6055ed81c597509ecd4535e93ee0d | 1,252 | py | Python | mmcls/datasets/pipelines/__init__.py | ChaseMonsterAway/mmclassification | 85d26b8eb2fc799599c42ca33831c40707311bd7 | [
"Apache-2.0"
] | null | null | null | mmcls/datasets/pipelines/__init__.py | ChaseMonsterAway/mmclassification | 85d26b8eb2fc799599c42ca33831c40707311bd7 | [
"Apache-2.0"
] | null | null | null | mmcls/datasets/pipelines/__init__.py | ChaseMonsterAway/mmclassification | 85d26b8eb2fc799599c42ca33831c40707311bd7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, AutoContrast, Brightness,
ColorTransform, Contrast, Cutout, Equalize, Invert,
Posterize, RandAugment, Rotate, Sharpness, Shear,
Solarize, SolarizeAdd, Translate)
from .compose import Compose
from .formating import (Collect, ImageToTensor, ToNumpy, ToPIL, ToTensor,
Transpose, to_tensor)
from .loading import LoadImageFromFile
from .transforms import (CenterCrop, ColorJitter, Lighting, RandomCrop,
RandomErasing, RandomFlip, RandomGrayscale,
RandomResizedCrop, Resize, Filter)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToPIL', 'ToNumpy',
'Transpose', 'Collect', 'LoadImageFromFile', 'Resize', 'CenterCrop',
'RandomFlip', 'Normalize', 'RandomCrop', 'RandomResizedCrop',
'RandomGrayscale', 'Shear', 'Translate', 'Rotate', 'Invert',
'ColorTransform', 'Solarize', 'Posterize', 'AutoContrast', 'Equalize',
'Contrast', 'Brightness', 'Sharpness', 'AutoAugment', 'SolarizeAdd',
'Cutout', 'RandAugment', 'Lighting', 'ColorJitter', 'RandomErasing',
'Filter',
]
| 52.166667 | 78 | 0.646166 |
fd024f38255e47821a5f879746d2197c188fd5bf | 1,458 | py | Python | tests/test_decorator.py | WilliamMayor/flask-clacks | 491e22e34f06cdd25532ab7952cf1fa90f6bae1e | [
"MIT"
] | 1 | 2019-03-21T06:18:47.000Z | 2019-03-21T06:18:47.000Z | tests/test_decorator.py | WilliamMayor/flask-clacks | 491e22e34f06cdd25532ab7952cf1fa90f6bae1e | [
"MIT"
] | null | null | null | tests/test_decorator.py | WilliamMayor/flask-clacks | 491e22e34f06cdd25532ab7952cf1fa90f6bae1e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from flask import Flask
from flask_clacks import clacks
app = Flask(__name__)
methods = ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'HEAD', 'PATCH']
@app.route('/foo/', methods=methods)
@clacks
def foo():
return 'bar'
@app.route('/foo2/', methods=methods)
@clacks(names=('John Dearheart', ))
def foo2():
return 'bar2'
@app.route('/foo3/', methods=methods)
def foo3():
return 'bar3'
@pytest.mark.parametrize('verb', [m.lower() for m in methods])
def test_has_default_overhead(verb):
with app.test_client() as c:
resp = getattr(c, verb)('/foo/')
assert 'X-Clacks-Overhead' in resp.headers
assert len(resp.headers.getlist('X-Clacks-Overhead')) == 1
assert resp.headers.get('X-Clacks-Overhead') == 'GNU Terry Pratchett'
@pytest.mark.parametrize('verb', [m.lower() for m in methods])
def test_has_extra_overhead(verb):
with app.test_client() as c:
resp = getattr(c, verb)('/foo2/')
assert 'X-Clacks-Overhead' in resp.headers
overhead = resp.headers.getlist('X-Clacks-Overhead')
assert len(overhead) == 2
assert 'GNU Terry Pratchett' in overhead
assert 'GNU John Dearheart' in overhead
@pytest.mark.parametrize('verb', [m.lower() for m in methods])
def test_has_no_overhead(verb):
with app.test_client() as c:
resp = getattr(c, verb)('/foo3/')
assert 'X-Clacks-Overhead' not in resp.headers
| 27 | 77 | 0.650206 |
09c6f89a72e4ce72c854fb53e408adfa52c05164 | 2,404 | py | Python | src/utils/mapper.py | gchhablani/financial-sentiment-analysis | b18e9072f8edb9f09d0fef697892f2462d6d44e9 | [
"MIT"
] | 2 | 2021-10-03T14:24:52.000Z | 2021-11-17T14:55:53.000Z | src/utils/mapper.py | gchhablani/financial-sentiment-analysis | b18e9072f8edb9f09d0fef697892f2462d6d44e9 | [
"MIT"
] | null | null | null | src/utils/mapper.py | gchhablani/financial-sentiment-analysis | b18e9072f8edb9f09d0fef697892f2462d6d44e9 | [
"MIT"
] | 1 | 2021-10-03T14:25:36.000Z | 2021-10-03T14:25:36.000Z | """ Implement the ConfigMapper class.
See class docs for usage."""
class ConfigMapper:
"""Class for creating ConfigMapper objects.
This class can be used to get and store custom objects from the libraries.
For each class or object instantiated in any modules,
the ConfigMapper object can be used either with the functions,
or as a decorator to store the mapping in the function.
Examples:
configmapper = ConfigMapper()
from torch.nn.optim import Adam
configmapper.map("optimizers","adam")(Adam)
adam = configmapper.get("optimizers","adam")(...)
# Gives the Adam class with corresponding args
@configmapper.map("datasets","squad")
class Squad:
...
# This store the `Squad` class to configmapper
# Can be retrieved and used as
squad = Squad(...)
Note: This class has only datasets and schedulers now. The rest can be added as required.
Attributes:
Methods
-------
"""
dicts = {
"datasets": {},
"models": {},
"schedulers": {},
}
@classmethod
def map(cls, key, name):
"""
Map a particular name to an object, in the specified key
Args:
name (str): The name of the object which will be used.
key (str): The key of the mapper to be used.
"""
def wrap(obj):
if key in cls.dicts.keys():
cls.dicts[key][name] = obj
else:
cls.dicts[key] = {}
cls.dicts[key][name] = obj
return obj
return wrap
@classmethod
def get(cls, key, name):
"""Gets a particular object based on key and name.
Args:
key (str): [description]
name (str): [description]
Raises:
NotImplementedError: If the key or name is not defined.
Returns:
object: The object stored in that key,name pair.
"""
try:
return cls.dicts[key][name]
except KeyError as error:
if key in cls.dicts:
raise NotImplementedError(
"Key:{name} Undefined in Key:{key}".format(name=name, key=key)
)
else:
raise NotImplementedError("Key:{key} Undefined".format(key=key))
configmapper = ConfigMapper()
| 25.574468 | 93 | 0.557404 |
84ed24770870472015d7806c6962b9421ca24440 | 5,492 | py | Python | celiagg/__init__.py | celiagg/celia | 2eff50ab901c297e6f6e8491ddba997e325ffcb9 | [
"MIT"
] | 18 | 2016-12-11T16:44:23.000Z | 2021-10-11T11:17:40.000Z | celiagg/__init__.py | celiagg/celia | 2eff50ab901c297e6f6e8491ddba997e325ffcb9 | [
"MIT"
] | 36 | 2016-11-26T01:59:38.000Z | 2021-04-15T07:31:34.000Z | celiagg/__init__.py | pyagg/pyagg | 2eff50ab901c297e6f6e8491ddba997e325ffcb9 | [
"MIT"
] | 4 | 2016-12-22T12:53:10.000Z | 2018-10-07T17:57:50.000Z | # The MIT License (MIT)
#
# Copyright (c) 2014-2016 WUSTL ZPLAB
# Copyright (c) 2016-2021 Celiagg Contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors: Erik Hvatum <ice.rikh@gmail.com>
# John Wiggins
import sys
from . import _celiagg
from ._celiagg import (
AggError, BSpline, BlendMode, DrawingMode, FontCache, FontWeight,
FreeTypeFont, GradientSpread, GradientUnits, GraphicsState, Image,
InterpolationMode, InnerJoin, LineCap, LineJoin, LinearGradientPaint,
Path, PatternPaint, PatternStyle, PixelFormat, RadialGradientPaint, Rect,
ShapeAtPoints, SolidPaint, TextDrawingMode, Transform, Win32Font,
)
# Query the library
HAS_TEXT = _celiagg.has_text_rendering()
def example_font():
""" Returns the path to a TTF font which is included with the library for
testing purposes.
"""
import pkg_resources
# Windows GDI font selection uses names and not file paths.
# Our included font could be added to the system fonts using
# `AddFontResourceEx`, but that's beyond the scope of this function.
if sys.platform in ('win32', 'cygwin'):
return 'Segoe UI'
return pkg_resources.resource_filename(
'celiagg', 'data/Montserrat-Regular.ttf'
)
# Be explicit
__all__ = [
'HAS_TEXT', 'example_font',
'AggError', 'BlendMode', 'BSpline', 'DrawingMode', 'Font', 'FontCache',
'FontWeight', 'FreeTypeFont', 'GradientSpread', 'GradientUnits',
'GraphicsState', 'Image', 'InterpolationMode', 'InnerJoin',
'LinearGradientPaint', 'LineCap', 'LineJoin', 'RadialGradientPaint',
'Path', 'PatternPaint', 'PatternStyle', 'PixelFormat', 'Rect',
'ShapeAtPoints', 'SolidPaint', 'TextDrawingMode', 'Transform', 'Win32Font',
'CanvasG8', 'CanvasGA16', 'CanvasRGB24', 'CanvasRGBA32', 'CanvasBGRA32',
'CanvasRGBA128',
]
# Select the correct font class for the platform
if sys.platform in ('win32', 'cygwin'):
Font = Win32Font
else:
Font = FreeTypeFont
# Keep a font cache for callers that don't want to mess with it
__global_font_cache = None
_canvas_doc_string = """{klass_name}(array, bottom_up=False, font_cache=None)
Provides AGG (Anti-Grain Geometry) drawing routines that render to the
numpy array passed as the constructor argument. Because this array is
modified in place, it must be of type ``{array_type}``, must be
C-contiguous, and must be {channel_desc}.
:param array: A ``{array_type}`` array with shape {array_shape}.
:param bottom_up: If True, the origin is the bottom left, instead of top-left
:param font_cache: A ``FontCache`` instance. Defaults to a global instance.
"""
def _use_global_cache():
""" Return the global ``FontCache`` instance.
"""
global __global_font_cache
if __global_font_cache is None:
__global_font_cache = FontCache()
return __global_font_cache
def _build_canvas_factory(klass_name, array_type, array_shape, channel_desc):
""" Generate a Canvas factory.
This is done to preserve the v1.0.0 interface of canvas class constructors.
Using these factory functions keeps the ``font_cache`` parameter optional.
"""
klass = getattr(_celiagg, klass_name)
def factory(array, bottom_up=False, font_cache=None):
cache = _use_global_cache() if font_cache is None else font_cache
return klass(array, cache, bottom_up=bottom_up)
factory.__doc__ = _canvas_doc_string.format(
klass_name=klass_name,
array_type=array_type,
array_shape=array_shape,
channel_desc=channel_desc,
)
factory.__name__ = klass_name
return factory
# Generate the canvas classes
CanvasG8 = _build_canvas_factory(
'CanvasG8',
'numpy.uint8',
'(H, W)',
'MxN (1 channel: intensity)',
)
CanvasGA16 = _build_canvas_factory(
'CanvasGA16',
'numpy.uint8',
'(H, W, 2)',
'MxNx2 (2 channels: intensity and alpha)',
)
CanvasRGB24 = _build_canvas_factory(
'CanvasRGB24',
'numpy.uint8',
'(H, W, 3',
'MxNx3 (3 channels: red, green, and blue)',
)
CanvasRGBA32 = _build_canvas_factory(
'CanvasRGBA32',
'numpy.uint8',
'(H, W, 4)',
'MxNx4 (4 channels: red, green, blue, and alpha)',
)
CanvasBGRA32 = _build_canvas_factory(
'CanvasBGRA32',
'numpy.uint8',
'(H, W, 4)',
'MxNx4 (4 channels: blue, green, red, and alpha)',
)
CanvasRGBA128 = _build_canvas_factory(
'CanvasRGBA128',
'numpy.float32',
'(H, W, 4)',
'MxNx4 (2 channels: red, green, blue, and alpha)',
)
| 33.901235 | 79 | 0.712673 |
b8bf0c85fc38af24aa8afdf2fd3a22774e88e1f7 | 409 | py | Python | misc/defaultdict.py | gingerkirsch/playing-with-python | 5c958b22e13207e65bcaa94a982d71e2fe024e22 | [
"MIT"
] | null | null | null | misc/defaultdict.py | gingerkirsch/playing-with-python | 5c958b22e13207e65bcaa94a982d71e2fe024e22 | [
"MIT"
] | null | null | null | misc/defaultdict.py | gingerkirsch/playing-with-python | 5c958b22e13207e65bcaa94a982d71e2fe024e22 | [
"MIT"
] | null | null | null | import pprint
nodes = [
('a','b'),
('a', 'c'),
('b', 'a'),
('b', 'd'),
('c', 'a'),
('d', 'a'),
('d', 'b'),
('d', 'c')
]
graph = dict()
for from_, to in nodes:
if from_ not in graph:
graph[from_] = []
graph[from_].append(to)
pprint.pprint(graph)
import collections
graph = collections.defaultdict(list)
for from_, to in nodes:
graph[from_].append(to)
pprint.pprint(graph) | 13.633333 | 37 | 0.540342 |
d28bf82e172690c8effbd2639168c898115f2a79 | 11,924 | py | Python | docs/conf.py | sti320a/Adafruit_CircuitPython_CCS811 | 9b04575891b741530f76e84549f697e867adf7e3 | [
"MIT"
] | 29 | 2018-06-07T14:44:56.000Z | 2022-01-27T23:50:34.000Z | docs/conf.py | sti320a/Adafruit_CircuitPython_CCS811 | 9b04575891b741530f76e84549f697e867adf7e3 | [
"MIT"
] | 23 | 2017-08-10T00:23:00.000Z | 2022-02-05T02:24:21.000Z | docs/conf.py | sti320a/Adafruit_CircuitPython_CCS811 | 9b04575891b741530f76e84549f697e867adf7e3 | [
"MIT"
] | 31 | 2017-08-10T02:16:49.000Z | 2022-02-17T10:57:38.000Z | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# Adafruit's CCS811 Library documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 11 21:37:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
]
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["micropython", "adafruit_bus_device", "adafruit_register"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"BusDevice": (
"https://circuitpython.readthedocs.io/projects/busdevice/en/latest/",
None,
),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Mock out micropython ourselves.
# import imp
# m = imp.new_module("micropython")
# m.const = lambda x: x
# sys.modules["micropython"] = m
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit's CCS811 Library"
copyright = "2016, Dean Miller, Scott Shawcroft"
author = "Dean Miller, Scott Shawcroft"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Adafruit\'s CCS811 Library v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
# htmlhelp_basename = 'AdafruitsCCS811Librarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitsCCS811Library.tex",
"Adafruit's CCS811 Library Documentation",
"Dean Miller, Scott Shawcroft",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitsCCS811Library23library",
"Adafruit's CCS811 Library Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitsCCS811Library",
"Adafruit's CCS811 Library Documentation",
author,
"AdafruitsCCS811Library",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
| 29.22549 | 85 | 0.697165 |
bd366a96e96128cf749d2c8361794f20f4807d8b | 3,738 | py | Python | MeanShift/ML_42_MeanShift_Dynamic_bandwidth.py | vaibhav2408/machineLearningSentdex | b1b7fa75c7796cbf008877b44c3204cd5bc61960 | [
"Apache-2.0"
] | null | null | null | MeanShift/ML_42_MeanShift_Dynamic_bandwidth.py | vaibhav2408/machineLearningSentdex | b1b7fa75c7796cbf008877b44c3204cd5bc61960 | [
"Apache-2.0"
] | null | null | null | MeanShift/ML_42_MeanShift_Dynamic_bandwidth.py | vaibhav2408/machineLearningSentdex | b1b7fa75c7796cbf008877b44c3204cd5bc61960 | [
"Apache-2.0"
] | null | null | null | '''
In this machine learning tutorial, we cover the idea of a
dynamically weighted bandwidth with our Mean Shift
clustering algorithm
'''
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
import random
X, y = make_blobs(n_samples=50, centers=3, n_features=2)
# X = np.array([[1, 2],
# [1.5, 1.8],
# [5, 8 ],
# [8, 8],
# [1, 0.6],
# [9,11],
# [8,2],
# [10,2],
# [9,3]])
colors = 10*["g","r","b","c","y","k"]
class Mean_Shift:
def __init__(self, radius=None, radius_norm_step = 100):
self.radius = radius
self.radius_norm_step = radius_norm_step
def fit(self, data):
if self.radius == None:
all_data_centroids = np.average(data, axis=0)
## This gives the magnitude from the origin
all_data_norm = np.linalg.norm(all_data_centroids)
self.radius = all_data_norm/self.radius_norm_step
centroids = {}
for i in range(len(data)):
centroids[i] = data[i]
## [::-1] reverses the order of the list
weights = [i for i in range(self.radius_norm_step)][::-1]
while True:
new_centroids = []
for i in centroids:
in_bandwidth = []
centroid = centroids[i]
for featureset in data:
distance = np.linalg.norm(featureset - centroid)
# this is for when the feature set is comparing distacne to itself
if distance == 0:
distance = 0.0000001
weight_index = int(distance/self.radius)
if weight_index > self.radius_norm_step-1:
weight_index = self.radius_norm_step-1
to_add = (weights[weight_index]**2)*[featureset]
in_bandwidth += to_add
new_centroid = np.average(in_bandwidth, axis = 0)
### we're converting it to tuple so we can
### get "set" of the new_centroids
new_centroids.append(tuple(new_centroid))
uniques = sorted(list(set(new_centroids)))
to_pop = []
for i in uniques:
for ii in uniques:
if i==ii:
pass
elif np.linalg.norm(np.array(i)-np.array(ii)) <= self.radius and ii not in to_pop:
to_pop.append(ii)
break
for i in to_pop:
try:
uniques.remove(i)
except:
pass
## using this approach makes sure that
## if we modify centroids, the prev_centroids
## don't get modified
prev_centroids = dict(centroids)
centroids = {}
for i in range(len(uniques)):
centroids[i] = np.array(uniques[i])
optimized = True
for i in centroids:
if not np.array_equal(centroids[i], prev_centroids[i]):
optimized = False
if not optimized:
break
if optimized:
break
self.centroids = centroids
self.classifications = {}
for i in range(len(self.centroids)):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset-centroid) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
def predict(self, data) :
distance = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]
classification = (distance.index(min(distance)))
return classification
clf = Mean_Shift()
clf.fit(X)
# cluster = clf.predict([6,6])
# print(cluster)
centroids = clf.centroids
for classification in clf.classifications:
color = colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker='x', color= color, s=150, linewidths = 5)
for c in centroids:
plt.scatter(centroids[c][0], centroids[c][1], color='k', marker='*', s=150)
plt.show()
| 21.482759 | 94 | 0.650348 |
8408bb08b94ecc84b2c393738e36710c4850b1fb | 4,142 | py | Python | webempresa/settings.py | jhonattanrgc21/web-empresarial | 5c382874383497aa2f7a91edf56959ad2fc1ceca | [
"MIT"
] | null | null | null | webempresa/settings.py | jhonattanrgc21/web-empresarial | 5c382874383497aa2f7a91edf56959ad2fc1ceca | [
"MIT"
] | null | null | null | webempresa/settings.py | jhonattanrgc21/web-empresarial | 5c382874383497aa2f7a91edf56959ad2fc1ceca | [
"MIT"
] | null | null | null | """
Django settings for webempresa project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY', cast = str)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default = False, cast = bool)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'ckeditor',
'contact',
'core',
'services.apps.ServicesConfig',
'social.apps.SocialConfig',
'pages.apps.PagesConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webempresa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social.processors.ctx_dict'
],
},
},
]
WSGI_APPLICATION = 'webempresa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DATABASE', cast = str),
'USER': config('USER_DATABASE', cast = str),
'PASSWORD': config('PASSWORD_DATABASE', cast = str),
'HOST': config('HOST', cast = str),
'PORT': config('PORT_DATABASE', cast = int)
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'es-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Media files
MEDIA_URL = '/media/'
MEDI_ROOT = os.path.join(BASE_DIR, 'media')
# Ckeditor
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],
['Link', 'Unlink'],
]
}
} | 26.896104 | 140 | 0.67407 |
08092c3f35a41d046b46693b744906f423caea19 | 1,623 | py | Python | app/errors.py | kajusK/HiddenPlaces | aa976f611a419bc33f8a65f0314956ec09fe2bfd | [
"MIT"
] | null | null | null | app/errors.py | kajusK/HiddenPlaces | aa976f611a419bc33f8a65f0314956ec09fe2bfd | [
"MIT"
] | null | null | null | app/errors.py | kajusK/HiddenPlaces | aa976f611a419bc33f8a65f0314956ec09fe2bfd | [
"MIT"
] | null | null | null | """ Flask error handlers."""
from flask import render_template, request
from flask import current_app as app
from flask_login import current_user
from app.database import db
from app.models.event import EventLog, UnauthorizedEvent
def error_403(error: Exception):
"""Shows error page for user with insufficient priviledges.
Args:
error: An exception that was raised.
Returns:
A page content and error code.
"""
app.logger.error(f'403: {request.path} by {current_user.email}:'
f'{str(error)}')
EventLog.log(current_user, UnauthorizedEvent(request.path))
db.session.commit()
return render_template('403.html'), 403
def error_404(error: Exception):
"""Shows error page for Not found error.
Args:
error: An exception that was raised.
Returns:
A page content and error code.
"""
app.logger.error(f'404: {request.path}: {str(error)}')
return render_template('404.html'), 404
def error_500(error: Exception):
"""Shows error page for Internal server error.
Args:
error: An exception that was raised.
Returns:
A page content and error code.
"""
app.logger.error(f'500: {request.path}: {str(error)}')
return render_template('500.html'), 500
def unhandled_exception(error: Exception):
"""Shows error page for Unhandled exception.
Args:
error: An exception that was raised.
Returns:
A page content and error code.
"""
app.logger.exception(f'Unhandled Exception: {request.path}: {str(error)}')
return render_template('500.html'), 500
| 27.508475 | 78 | 0.669747 |
b54b499d685f300254e670e35b315cf985d8a181 | 3,971 | py | Python | django/contrib/gis/gdal/feature.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | django/contrib/gis/gdal/feature.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | django/contrib/gis/gdal/feature.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils.encoding import force_bytes, force_str
# For more information, see the OGR C API source code:
# https://gdal.org/api/vector_c_api.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
destructor = capi.destroy_feature
def __init__(self, feat, layer):
"""
Initialize Feature from a pointer and its Layer object.
"""
if not feat:
raise GDALException("Cannot create OGR Feature, invalid pointer given.")
self.ptr = feat
self._layer = layer
def __getitem__(self, index):
"""
Get the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, str):
i = self.index(index)
elif 0 <= index < self.num_fields:
i = index
else:
raise IndexError(
"Index out of range when accessing field in a feature: %s." % index
)
return Field(self, i)
def __len__(self):
"Return the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return "Feature FID %d in Layer<%s>" % (self.fid, self.layer_name)
def __eq__(self, other):
"Do equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
# #### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Return the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Return the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_str(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Return the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Return a list of fields in the Feature."
return [
force_str(
capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i)),
self.encoding,
strings_only=True,
)
for i in range(self.num_fields)
]
@property
def geom(self):
"Return the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Return the OGR Geometry Type for this Feature."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
# #### Feature Methods ####
def get(self, field):
"""
Return the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, "name", field)
return self[field_name].value
def index(self, field_name):
"Return the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise IndexError("Invalid OFT field name given: %s." % field_name)
return i
| 33.091667 | 84 | 0.631327 |
d6c2073d4c496f928bb189834018bcf5cb1ac9df | 3,131 | py | Python | experiments/mnist_cINN/cond_net.py | jlmaccal/FrEIA | 64a04cb784e19bdff69546657f602fd31835c21f | [
"MIT"
] | null | null | null | experiments/mnist_cINN/cond_net.py | jlmaccal/FrEIA | 64a04cb784e19bdff69546657f602fd31835c21f | [
"MIT"
] | null | null | null | experiments/mnist_cINN/cond_net.py | jlmaccal/FrEIA | 64a04cb784e19bdff69546657f602fd31835c21f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import config as c
import data as color_data
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3),
nn.Conv2d(32, 64, kernel_size=3),
nn.MaxPool2d(2),
nn.Conv2d(64, 64, kernel_size=3),
nn.Conv2d(64, 64, kernel_size=3),
nn.MaxPool2d(2),
)
self.linear = nn.Sequential(
nn.Dropout(),
nn.Linear(1024, 512),
nn.Dropout(),
nn.Linear(512, 512),
nn.Dropout(),
nn.Linear(512, c.cond_width),
)
self.fc_final = nn.Linear(c.cond_width, 10)
def forward(self, x):
x = self.conv(x)
x = x.view(c.batch_size, -1)
x = self.linear(x)
x = self.fc_final(x)
return F.log_softmax(x, dim=1)
def features(self, x):
x = self.conv(x)
x = x.view(c.batch_size, -1)
return self.linear(x)
model = Net().cuda()
log_interval = 25
def train():
model.train()
for batch_idx, (color, target, data) in enumerate(color_data.train_loader):
data, target = data.cuda(), target.long().cuda()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(color_data.train_loader.dataset),
100. * batch_idx / len(color_data.train_loader), loss.item()))
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./mnist_data', train=False, transform=transforms.ToTensor()),
batch_size=c.batch_size, shuffle=True, drop_last=True)
def test():
model.train()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
if __name__ == '__main__':
optimizer = optim.SGD(model.parameters(), lr=0.03, momentum=0.5)
for epoch in range(6):
train()
test()
torch.save(model.state_dict(), c.cond_net_file)
else:
model.train()
if c.cond_net_file:
model.load_state_dict(torch.load(c.cond_net_file))
| 31.94898 | 95 | 0.561482 |
b9f348fdfc79ebc5c9736bc58f9042855d47b0ee | 58,479 | py | Python | test/azure/version-tolerant/Expected/AcceptanceTests/StorageManagementClientVersionTolerant/storageversiontolerant/aio/operations/_operations.py | changlong-liu/autorest.python | 1f03e4c6a11934d385fab050dc44041f1e91e9ff | [
"MIT"
] | null | null | null | test/azure/version-tolerant/Expected/AcceptanceTests/StorageManagementClientVersionTolerant/storageversiontolerant/aio/operations/_operations.py | changlong-liu/autorest.python | 1f03e4c6a11934d385fab050dc44041f1e91e9ff | [
"MIT"
] | null | null | null | test/azure/version-tolerant/Expected/AcceptanceTests/StorageManagementClientVersionTolerant/storageversiontolerant/aio/operations/_operations.py | changlong-liu/autorest.python | 1f03e4c6a11934d385fab050dc44041f1e91e9ff | [
"MIT"
] | null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ...operations._operations import (
build_storage_accounts_check_name_availability_request,
build_storage_accounts_create_request_initial,
build_storage_accounts_delete_request,
build_storage_accounts_get_properties_request,
build_storage_accounts_list_by_resource_group_request,
build_storage_accounts_list_keys_request,
build_storage_accounts_list_request,
build_storage_accounts_regenerate_key_request,
build_storage_accounts_update_request,
build_usage_list_request,
)
T = TypeVar("T")
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StorageAccountsOperations:
"""StorageAccountsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def check_name_availability(self, account_name: JSONType, **kwargs: Any) -> JSONType:
"""Checks that account name is valid and is not in use.
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: JSONType
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
account_name = {
"name": "str", # Required.
"type": "Microsoft.Storage/storageAccounts" # Optional. Default value is
"Microsoft.Storage/storageAccounts".
}
# response body for status code(s): 200
response.json() == {
"message": "str", # Optional. Gets an error message explaining the Reason
value in more detail.
"nameAvailable": bool, # Optional. Gets a boolean value that indicates
whether the name is available for you to use. If true, the name is available. If
false, the name has already been taken or invalid and cannot be used.
"reason": "str" # Optional. Gets the reason that a storage account name
could not be used. The Reason element is only returned if NameAvailable is false.
Possible values include: "AccountNameInvalid", "AlreadyExists".
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = account_name
request = build_storage_accounts_check_name_availability_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
async def _create_initial(
self, resource_group_name: str, account_name: str, parameters: JSONType, **kwargs: Any
) -> Optional[JSONType]:
cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSONType]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = parameters
request = build_storage_accounts_create_request_initial(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def begin_create(
self, resource_group_name: str, account_name: str, parameters: JSONType, **kwargs: Any
) -> AsyncLROPoller[JSONType]:
"""Asynchronously creates a new storage account with the specified parameters. Existing accounts
cannot be updated with this API and should instead use the Update Storage Account API. If an
account is already created and subsequent PUT request is issued with exact same set of
properties, then HTTP 200 would be returned.
:param resource_group_name: The name of the resource group within the user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
parameters = {
"id": "str", # Optional. Resource Id.
"location": "str", # Required. Resource location.
"name": "str", # Optional. Resource name.
"properties": {
"accountType": "str" # Optional. Gets or sets the account type.
Possible values include: "Standard_LRS", "Standard_ZRS", "Standard_GRS",
"Standard_RAGRS", "Premium_LRS".
},
"tags": {
"str": "str" # Optional. A set of tags. Resource tags.
},
"type": "str" # Optional. Resource type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Required. Resource location.
"name": "str", # Optional. Resource name.
"properties": {
"accountType": "str", # Optional. Gets the type of the storage
account. Possible values include: "Standard_LRS", "Standard_ZRS",
"Standard_GRS", "Standard_RAGRS", "Premium_LRS".
"creationTime": "2020-02-20 00:00:00", # Optional. Gets the creation
date and time of the storage account in UTC.
"customDomain": {
"name": "str", # Optional. Gets or sets the custom domain
name. Name is the CNAME source.
"useSubDomain": bool # Optional. Indicates whether indirect
CName validation is enabled. Default value is false. This should only be
set on updates.
},
"lastGeoFailoverTime": "2020-02-20 00:00:00", # Optional. Gets the
timestamp of the most recent instance of a failover to the secondary
location. Only the most recent timestamp is retained. This element is not
returned if there has never been a failover instance. Only available if the
accountType is StandardGRS or StandardRAGRS.
"primaryEndpoints": {
"FooPoint": {
"Bar.Point": {
"RecursivePoint": ...
}
},
"blob": "str", # Optional. Gets the blob endpoint.
"dummyEndPoint": ...,
"queue": "str", # Optional. Gets the queue endpoint.
"table": "str" # Optional. Gets the table endpoint.
},
"primaryLocation": "str", # Optional. Gets the location of the
primary for the storage account.
"provisioningState": "str", # Optional. Gets the status of the
storage account at the time the operation was called. Possible values
include: "Creating", "ResolvingDNS", "Succeeded".
"secondaryEndpoints": {
"FooPoint": {
"Bar.Point": {
"RecursivePoint": ...
}
},
"blob": "str", # Optional. Gets the blob endpoint.
"dummyEndPoint": ...,
"queue": "str", # Optional. Gets the queue endpoint.
"table": "str" # Optional. Gets the table endpoint.
},
"secondaryLocation": "str", # Optional. Gets the location of the geo
replicated secondary for the storage account. Only available if the
accountType is StandardGRS or StandardRAGRS.
"statusOfPrimary": "str", # Optional. Gets the status indicating
whether the primary location of the storage account is available or
unavailable. Possible values include: "Available", "Unavailable".
"statusOfSecondary": "str" # Optional. Gets the status indicating
whether the secondary location of the storage account is available or
unavailable. Only available if the accountType is StandardGRS or
StandardRAGRS. Possible values include: "Available", "Unavailable".
},
"tags": {
"str": "str" # Optional. A set of tags. Resource tags.
},
"type": "str" # Optional. Resource type.
}
"""
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
@distributed_trace_async
async def delete(self, resource_group_name: str, account_name: str, **kwargs: Any) -> None:
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
request = build_storage_accounts_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def get_properties(self, resource_group_name: str, account_name: str, **kwargs: Any) -> JSONType:
"""Returns the properties for the specified storage account including but not limited to name,
account type, location, and account status. The ListKeys operation should be used to retrieve
storage keys.
:param resource_group_name: The name of the resource group within the user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Required. Resource location.
"name": "str", # Optional. Resource name.
"properties": {
"accountType": "str", # Optional. Gets the type of the storage
account. Possible values include: "Standard_LRS", "Standard_ZRS",
"Standard_GRS", "Standard_RAGRS", "Premium_LRS".
"creationTime": "2020-02-20 00:00:00", # Optional. Gets the creation
date and time of the storage account in UTC.
"customDomain": {
"name": "str", # Optional. Gets or sets the custom domain
name. Name is the CNAME source.
"useSubDomain": bool # Optional. Indicates whether indirect
CName validation is enabled. Default value is false. This should only be
set on updates.
},
"lastGeoFailoverTime": "2020-02-20 00:00:00", # Optional. Gets the
timestamp of the most recent instance of a failover to the secondary
location. Only the most recent timestamp is retained. This element is not
returned if there has never been a failover instance. Only available if the
accountType is StandardGRS or StandardRAGRS.
"primaryEndpoints": {
"FooPoint": {
"Bar.Point": {
"RecursivePoint": ...
}
},
"blob": "str", # Optional. Gets the blob endpoint.
"dummyEndPoint": ...,
"queue": "str", # Optional. Gets the queue endpoint.
"table": "str" # Optional. Gets the table endpoint.
},
"primaryLocation": "str", # Optional. Gets the location of the
primary for the storage account.
"provisioningState": "str", # Optional. Gets the status of the
storage account at the time the operation was called. Possible values
include: "Creating", "ResolvingDNS", "Succeeded".
"secondaryEndpoints": {
"FooPoint": {
"Bar.Point": {
"RecursivePoint": ...
}
},
"blob": "str", # Optional. Gets the blob endpoint.
"dummyEndPoint": ...,
"queue": "str", # Optional. Gets the queue endpoint.
"table": "str" # Optional. Gets the table endpoint.
},
"secondaryLocation": "str", # Optional. Gets the location of the geo
replicated secondary for the storage account. Only available if the
accountType is StandardGRS or StandardRAGRS.
"statusOfPrimary": "str", # Optional. Gets the status indicating
whether the primary location of the storage account is available or
unavailable. Possible values include: "Available", "Unavailable".
"statusOfSecondary": "str" # Optional. Gets the status indicating
whether the secondary location of the storage account is available or
unavailable. Only available if the accountType is StandardGRS or
StandardRAGRS. Possible values include: "Available", "Unavailable".
},
"tags": {
"str": "str" # Optional. A set of tags. Resource tags.
},
"type": "str" # Optional. Resource type.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
request = build_storage_accounts_get_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def update(
self, resource_group_name: str, account_name: str, parameters: JSONType, **kwargs: Any
) -> JSONType:
"""Updates the account type or tags for a storage account. It can also be used to add a custom
domain (note that custom domains cannot be added via the Create operation). Only one custom
domain is supported per storage account. This API can only be used to update one of tags,
accountType, or customDomain per call. To update multiple of these properties, call the API
multiple times with one change per call. This call does not change the storage keys for the
account. If you want to change storage account keys, use the RegenerateKey operation. The
location and name of the storage account cannot be changed after creation.
:param resource_group_name: The name of the resource group within the user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to update on the account. Note that only one property can be
changed at a time using this API.
:type parameters: JSONType
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
parameters = {
"id": "str", # Optional. Resource Id.
"location": "str", # Required. Resource location.
"name": "str", # Optional. Resource name.
"properties": {
"accountType": "str", # Optional. Gets or sets the account type.
Note that StandardZRS and PremiumLRS accounts cannot be changed to other
account types, and other account types cannot be changed to StandardZRS or
PremiumLRS. Possible values include: "Standard_LRS", "Standard_ZRS",
"Standard_GRS", "Standard_RAGRS", "Premium_LRS".
"customDomain": {
"name": "str", # Optional. Gets or sets the custom domain
name. Name is the CNAME source.
"useSubDomain": bool # Optional. Indicates whether indirect
CName validation is enabled. Default value is false. This should only be
set on updates.
}
},
"tags": {
"str": "str" # Optional. A set of tags. Resource tags.
},
"type": "str" # Optional. Resource type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Required. Resource location.
"name": "str", # Optional. Resource name.
"properties": {
"accountType": "str", # Optional. Gets the type of the storage
account. Possible values include: "Standard_LRS", "Standard_ZRS",
"Standard_GRS", "Standard_RAGRS", "Premium_LRS".
"creationTime": "2020-02-20 00:00:00", # Optional. Gets the creation
date and time of the storage account in UTC.
"customDomain": {
"name": "str", # Optional. Gets or sets the custom domain
name. Name is the CNAME source.
"useSubDomain": bool # Optional. Indicates whether indirect
CName validation is enabled. Default value is false. This should only be
set on updates.
},
"lastGeoFailoverTime": "2020-02-20 00:00:00", # Optional. Gets the
timestamp of the most recent instance of a failover to the secondary
location. Only the most recent timestamp is retained. This element is not
returned if there has never been a failover instance. Only available if the
accountType is StandardGRS or StandardRAGRS.
"primaryEndpoints": {
"FooPoint": {
"Bar.Point": {
"RecursivePoint": ...
}
},
"blob": "str", # Optional. Gets the blob endpoint.
"dummyEndPoint": ...,
"queue": "str", # Optional. Gets the queue endpoint.
"table": "str" # Optional. Gets the table endpoint.
},
"primaryLocation": "str", # Optional. Gets the location of the
primary for the storage account.
"provisioningState": "str", # Optional. Gets the status of the
storage account at the time the operation was called. Possible values
include: "Creating", "ResolvingDNS", "Succeeded".
"secondaryEndpoints": {
"FooPoint": {
"Bar.Point": {
"RecursivePoint": ...
}
},
"blob": "str", # Optional. Gets the blob endpoint.
"dummyEndPoint": ...,
"queue": "str", # Optional. Gets the queue endpoint.
"table": "str" # Optional. Gets the table endpoint.
},
"secondaryLocation": "str", # Optional. Gets the location of the geo
replicated secondary for the storage account. Only available if the
accountType is StandardGRS or StandardRAGRS.
"statusOfPrimary": "str", # Optional. Gets the status indicating
whether the primary location of the storage account is available or
unavailable. Possible values include: "Available", "Unavailable".
"statusOfSecondary": "str" # Optional. Gets the status indicating
whether the secondary location of the storage account is available or
unavailable. Only available if the accountType is StandardGRS or
StandardRAGRS. Possible values include: "Available", "Unavailable".
},
"tags": {
"str": "str" # Optional. A set of tags. Resource tags.
},
"type": "str" # Optional. Resource type.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = parameters
request = build_storage_accounts_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def list_keys(self, resource_group_name: str, account_name: str, **kwargs: Any) -> JSONType:
"""Lists the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account.
:type account_name: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"key1": "str", # Optional. Gets the value of key 1.
"key2": "str" # Optional. Gets the value of key 2.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
request = build_storage_accounts_list_keys_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable[JSONType]:
"""Lists all the storage accounts available under the subscription. Note that storage keys are not
returned; use the ListKeys operation for this.
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. Gets the link to the next set of results.
Currently this will always be empty as the API does not support pagination.
"value": [
{
"id": "str", # Optional. Resource Id.
"location": "str", # Required. Resource location.
"name": "str", # Optional. Resource name.
"properties": {
"accountType": "str", # Optional. Gets the type of
the storage account. Possible values include: "Standard_LRS",
"Standard_ZRS", "Standard_GRS", "Standard_RAGRS", "Premium_LRS".
"creationTime": "2020-02-20 00:00:00", # Optional.
Gets the creation date and time of the storage account in UTC.
"customDomain": {
"name": "str", # Optional. Gets or sets the
custom domain name. Name is the CNAME source.
"useSubDomain": bool # Optional. Indicates
whether indirect CName validation is enabled. Default value is
false. This should only be set on updates.
},
"lastGeoFailoverTime": "2020-02-20 00:00:00", #
Optional. Gets the timestamp of the most recent instance of a
failover to the secondary location. Only the most recent timestamp is
retained. This element is not returned if there has never been a
failover instance. Only available if the accountType is StandardGRS
or StandardRAGRS.
"primaryEndpoints": {
"FooPoint": {
"Bar.Point": {
"RecursivePoint": ...
}
},
"blob": "str", # Optional. Gets the blob
endpoint.
"dummyEndPoint": ...,
"queue": "str", # Optional. Gets the queue
endpoint.
"table": "str" # Optional. Gets the table
endpoint.
},
"primaryLocation": "str", # Optional. Gets the
location of the primary for the storage account.
"provisioningState": "str", # Optional. Gets the
status of the storage account at the time the operation was called.
Possible values include: "Creating", "ResolvingDNS", "Succeeded".
"secondaryEndpoints": {
"FooPoint": {
"Bar.Point": {
"RecursivePoint": ...
}
},
"blob": "str", # Optional. Gets the blob
endpoint.
"dummyEndPoint": ...,
"queue": "str", # Optional. Gets the queue
endpoint.
"table": "str" # Optional. Gets the table
endpoint.
},
"secondaryLocation": "str", # Optional. Gets the
location of the geo replicated secondary for the storage account.
Only available if the accountType is StandardGRS or StandardRAGRS.
"statusOfPrimary": "str", # Optional. Gets the
status indicating whether the primary location of the storage account
is available or unavailable. Possible values include: "Available",
"Unavailable".
"statusOfSecondary": "str" # Optional. Gets the
status indicating whether the secondary location of the storage
account is available or unavailable. Only available if the
accountType is StandardGRS or StandardRAGRS. Possible values include:
"Available", "Unavailable".
},
"tags": {
"str": "str" # Optional. A set of tags. Resource
tags.
},
"type": "str" # Optional. Resource type.
}
]
}
"""
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
def prepare_request(next_link=None):
if not next_link:
request = build_storage_accounts_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
)
request.url = self._client.format_url(request.url)
else:
request = build_storage_accounts_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
)
request.url = self._client.format_url(next_link)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable[JSONType]:
"""Lists all the storage accounts available under the given resource group. Note that storage keys
are not returned; use the ListKeys operation for this.
:param resource_group_name: The name of the resource group within the user’s subscription.
:type resource_group_name: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. Gets the link to the next set of results.
Currently this will always be empty as the API does not support pagination.
"value": [
{
"id": "str", # Optional. Resource Id.
"location": "str", # Required. Resource location.
"name": "str", # Optional. Resource name.
"properties": {
"accountType": "str", # Optional. Gets the type of
the storage account. Possible values include: "Standard_LRS",
"Standard_ZRS", "Standard_GRS", "Standard_RAGRS", "Premium_LRS".
"creationTime": "2020-02-20 00:00:00", # Optional.
Gets the creation date and time of the storage account in UTC.
"customDomain": {
"name": "str", # Optional. Gets or sets the
custom domain name. Name is the CNAME source.
"useSubDomain": bool # Optional. Indicates
whether indirect CName validation is enabled. Default value is
false. This should only be set on updates.
},
"lastGeoFailoverTime": "2020-02-20 00:00:00", #
Optional. Gets the timestamp of the most recent instance of a
failover to the secondary location. Only the most recent timestamp is
retained. This element is not returned if there has never been a
failover instance. Only available if the accountType is StandardGRS
or StandardRAGRS.
"primaryEndpoints": {
"FooPoint": {
"Bar.Point": {
"RecursivePoint": ...
}
},
"blob": "str", # Optional. Gets the blob
endpoint.
"dummyEndPoint": ...,
"queue": "str", # Optional. Gets the queue
endpoint.
"table": "str" # Optional. Gets the table
endpoint.
},
"primaryLocation": "str", # Optional. Gets the
location of the primary for the storage account.
"provisioningState": "str", # Optional. Gets the
status of the storage account at the time the operation was called.
Possible values include: "Creating", "ResolvingDNS", "Succeeded".
"secondaryEndpoints": {
"FooPoint": {
"Bar.Point": {
"RecursivePoint": ...
}
},
"blob": "str", # Optional. Gets the blob
endpoint.
"dummyEndPoint": ...,
"queue": "str", # Optional. Gets the queue
endpoint.
"table": "str" # Optional. Gets the table
endpoint.
},
"secondaryLocation": "str", # Optional. Gets the
location of the geo replicated secondary for the storage account.
Only available if the accountType is StandardGRS or StandardRAGRS.
"statusOfPrimary": "str", # Optional. Gets the
status indicating whether the primary location of the storage account
is available or unavailable. Possible values include: "Available",
"Unavailable".
"statusOfSecondary": "str" # Optional. Gets the
status indicating whether the secondary location of the storage
account is available or unavailable. Only available if the
accountType is StandardGRS or StandardRAGRS. Possible values include:
"Available", "Unavailable".
},
"tags": {
"str": "str" # Optional. A set of tags. Resource
tags.
},
"type": "str" # Optional. Resource type.
}
]
}
"""
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
def prepare_request(next_link=None):
if not next_link:
request = build_storage_accounts_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
)
request.url = self._client.format_url(request.url)
else:
request = build_storage_accounts_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
)
request.url = self._client.format_url(next_link)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@distributed_trace_async
async def regenerate_key(
self, resource_group_name: str, account_name: str, regenerate_key: JSONType, **kwargs: Any
) -> JSONType:
"""Regenerates the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param regenerate_key: Specifies name of the key which should be regenerated.
:type regenerate_key: JSONType
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
regenerate_key = {
"keyName": "str" # Optional. Possible values include: "key1", "key2".
}
# response body for status code(s): 200
response.json() == {
"key1": "str", # Optional. Gets the value of key 1.
"key2": "str" # Optional. Gets the value of key 2.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = regenerate_key
request = build_storage_accounts_regenerate_key_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
class UsageOperations:
"""UsageOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def list(self, **kwargs: Any) -> JSONType:
"""Gets the current usage count and the limit for the resources under the subscription.
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"value": [
{
"currentValue": 0, # Optional. Gets the current count of the
allocated resources in the subscription.
"limit": 0, # Optional. Gets the maximum count of the
resources that can be allocated in the subscription.
"name": {
"localizedValue": "str", # Optional. Gets a
localized string describing the resource name.
"value": "str" # Optional. Gets a string describing
the resource name.
},
"unit": "str" # Optional. Gets the unit of measurement.
Possible values include: "Count", "Bytes", "Seconds", "Percent",
"CountsPerSecond", "BytesPerSecond".
}
]
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2015-05-01-preview") # type: str
request = build_usage_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| 51.028796 | 107 | 0.541323 |
aea67febcbf29a80db4e4c7be98990aa5feb77cd | 631 | py | Python | live_poll/migrations/0008_auto_20201217_0045.py | HiroshiFuu/cs-balloting | 565eb3ee88769d88b27705828c10c7b5be964ef5 | [
"MIT"
] | null | null | null | live_poll/migrations/0008_auto_20201217_0045.py | HiroshiFuu/cs-balloting | 565eb3ee88769d88b27705828c10c7b5be964ef5 | [
"MIT"
] | null | null | null | live_poll/migrations/0008_auto_20201217_0045.py | HiroshiFuu/cs-balloting | 565eb3ee88769d88b27705828c10c7b5be964ef5 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.16 on 2020-12-17 00:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('live_poll', '0007_auto_20201217_0018'),
]
operations = [
migrations.AlterModelOptions(
name='livepollitem',
options={'managed': True, 'ordering': ['poll__company', 'order'], 'verbose_name': 'Live Poll Item', 'verbose_name_plural': 'Live Poll Items'},
),
migrations.AlterField(
model_name='livepollitem',
name='text',
field=models.TextField(max_length=511),
),
]
| 27.434783 | 154 | 0.605388 |
884f5f2af5f4a49679145b95e38e70dc0d85d744 | 773 | py | Python | setup.py | eturpin/api_python | fac15d06ef2510972ed3c812bb16a675d4e30e3c | [
"MIT"
] | 6 | 2018-12-16T19:53:57.000Z | 2020-11-22T12:36:57.000Z | setup.py | eturpin/api_python | fac15d06ef2510972ed3c812bb16a675d4e30e3c | [
"MIT"
] | 6 | 2019-02-01T13:51:59.000Z | 2020-11-23T22:42:57.000Z | setup.py | eturpin/api_python | fac15d06ef2510972ed3c812bb16a675d4e30e3c | [
"MIT"
] | 8 | 2018-12-16T19:53:48.000Z | 2021-11-24T17:08:04.000Z | from distutils.core import setup
from setuptools import find_packages
packages = [
'tsheets',
'tsheets.repos',
'tsheets.models',
]
setup(
name='tsheets',
version='0.3',
description='API library helper for TSheets.com',
long_description='Allows to use the TSheets.com API to manage the timesheets and all other related data',
author='Kannan Ponnusamy',
author_email ='kannan@endpoint.com',
license='MIT',
packages=find_packages(exclude=['tests']),
url='https://github.com/tsheets/api_python',
download_url='https://github.com/tsheets/api_python/tarball/0.3',
keywords=['api', 'rest', 'tsheets'],
install_requires=[
'requests>=2.7.0',
'python-dateutil==2.4.2',
'pytz==2015.7'
]
)
| 26.655172 | 109 | 0.654592 |
224e315109a7e7516f43c3945362fb59e9a0b1bd | 4,695 | py | Python | Cogs/moderation.py | Punit-Choudhary/Garuda | 85c17353c8dc7993bd4c43c8924dd532331c70df | [
"MIT"
] | 1 | 2021-11-15T14:40:10.000Z | 2021-11-15T14:40:10.000Z | Cogs/moderation.py | Punit-Choudhary/Garuda | 85c17353c8dc7993bd4c43c8924dd532331c70df | [
"MIT"
] | null | null | null | Cogs/moderation.py | Punit-Choudhary/Garuda | 85c17353c8dc7993bd4c43c8924dd532331c70df | [
"MIT"
] | 1 | 2022-01-17T07:20:38.000Z | 2022-01-17T07:20:38.000Z | import discord
import logging
from discord.ext import commands
from rich.logging import RichHandler
# setting up logging
FORMAT = "%(message)s"
logging.basicConfig(
level="INFO", format=FORMAT, datefmt="[%x]", handlers=[RichHandler()]
)
log = logging.getLogger("rich")
class ModerationCog(commands.Cog, name="Moderation Commands"):
def __init__(self, bot):
self.bot = bot
@commands.command(name="kick", usage="<@member> [reason : optional]")
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason=None):
"""
Kick out a member from server
"""
if ctx.author.id == member.id:
kick_fun_embed = discord.Embed(
title="**Are you OK bro??**",
description=f"🦅: {ctx.author.mention}, Looks like you need to visit a psychiatrist\n\
Here is the [contact info](https://www.youtube.com/watch?v=dQw4w9WgXcQ)",
color = 0x7716f5 # Purple
)
await ctx.channel.send(embed = kick_fun_embed)
return
try:
kick_dm_embed = discord.Embed(
title="**You Have Been Kicked Out**",
description=f"🦅: You have been kicked out of the **{ctx.guild.name}\n**\
{'Because: ' + str(reason) if reason != None else ''}",
color = 0xFF0000 # Red
)
await member.send(embed = kick_dm_embed)
await member.kick(reason=reason)
kick_embed = discord.Embed(
title = f"**Kicked {member.display_name}**",
color = 0x00FF00 # Green
)
kick_embed.add_field(name="Kicked Member 👢", value=member.display_name)
kick_embed.add_field(name="Kicked By", value=ctx.author.mention)
kick_embed.set_footer(text=f"Reason: {reason}")
await ctx.send(embed = kick_embed)
except Exception as e:
print(e)
@commands.command(name="ban", usage="<@member> [reason : optional]")
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *, reason=None):
"""
Ban a member from server
"""
if ctx.author.id == member.id:
ban_fun_embed = discord.Embed(
title="**Are you OK bro??**",
description=f"🦅: {ctx.author.mention}, Looks like you need to visit a psychiatrist\n\
Here is the [contact info](https://www.youtube.com/watch?v=dQw4w9WgXcQ)",
color = 0x7716f5 # Purple
)
await ctx.channel.send(embed = ban_fun_embed)
return
try:
ban_dm_embed = discord.Embed(
title="**You Have Been Banned**",
description=f"🦅: You have been Banned from **{ctx.guild.name}\n**\
{'Because: ' + str(reason) if reason != None else ''}",
color = 0xFF0000 # Red
)
await member.send(embed = ban_dm_embed)
await member.ban(reason=reason)
ban_embed = discord.Embed(
title = f"**Banned {member.display_name}**",
color = 0x00FF00 # Green
)
ban_embed.add_field(name="Banned Member 🚫", value=member.display_name)
ban_embed.add_field(name="Banned By", value=ctx.author.mention)
ban_embed.set_footer(text=f"Reason: {reason}")
ban_embed.set_thumbnail(url="https://media.tenor.co/videos/16d1dd77408db03a6c78210391957fc5/mp4")
await ctx.send(embed=ban_embed)
except Exception as e:
log.warn(e)
ban_exception_embed = discord.Embed(
title="Forbidden ⛔",
description=f"🦅: Either I don't have permissons to kick or {member.mention} has higher role.",
color=0xFF0000
)
await ctx.send(embed=ban_exception_embed)
@commands.command(name="clear", usage="<number of messages> | default 100")
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount=100):
"""
Purge requested no. of messages from channel.
"""
await ctx.channel.purge(limit=int(amount) + 1)
clear_embed = discord.Embed(
title="Message deleted!",
description=f"🦅: I've purged {amount} messages | **{ctx.author.name}**",
color=0x00FF00 # Green
)
await ctx.channel.send(embed=clear_embed, delete_after=10)
# Setup
def setup(bot):
bot.add_cog(ModerationCog(bot)) | 35.839695 | 110 | 0.569755 |
0654602ebc81da97b1d70799a1974c3c10707d6f | 1,470 | py | Python | scripts/artifacts/installedappsVending.py | mastenp/ALEAPP | b8ac061b82d5e6df7953331cf7bb7165f7552d84 | [
"MIT"
] | 1 | 2020-10-06T20:28:16.000Z | 2020-10-06T20:28:16.000Z | scripts/artifacts/installedappsVending.py | mastenp/ALEAPP | b8ac061b82d5e6df7953331cf7bb7165f7552d84 | [
"MIT"
] | null | null | null | scripts/artifacts/installedappsVending.py | mastenp/ALEAPP | b8ac061b82d5e6df7953331cf7bb7165f7552d84 | [
"MIT"
] | null | null | null | import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv
def get_installedappsVending(files_found, report_folder, seeker):
file_found = str(files_found[0])
db = sqlite3.connect(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
package_name,
title,
CASE
first_download_ms
WHEN
"0"
THEN
"0"
ELSE
datetime(first_download_ms / 1000, "unixepoch")
END AS "fdl",
install_reason,
auto_update
FROM appstate
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Installed Apps (Vending)')
report.start_artifact_report(report_folder, 'Installed Apps (Vending)')
report.add_script()
data_headers = ('Package Name', 'Title', 'First Download', 'Install Reason', 'Auto Update?')
data_list = []
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'installed apps vending'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No Installed Apps data available')
db.close()
return | 30 | 100 | 0.605442 |
67766c78681dcb3f3c0e441ff447667d25102c3b | 222 | py | Python | webium/webium/controls/webelement.py | WandyYing/vscode-remote-webium | c6291cc6f72cbc9b323221a0ef4bbba3134f7f60 | [
"Apache-2.0"
] | null | null | null | webium/webium/controls/webelement.py | WandyYing/vscode-remote-webium | c6291cc6f72cbc9b323221a0ef4bbba3134f7f60 | [
"Apache-2.0"
] | null | null | null | webium/webium/controls/webelement.py | WandyYing/vscode-remote-webium | c6291cc6f72cbc9b323221a0ef4bbba3134f7f60 | [
"Apache-2.0"
] | null | null | null | from selenium.webdriver.remote.webelement import WebElement as selenium_WebElement
class WebElement(selenium_WebElement):
def __init__(self, *args, **kwargs):
super(WebElement, self).__init__(*args, **kwargs) | 37 | 82 | 0.765766 |
e0b07e55487bcd52b1f1673ee0e9f57980762a95 | 22,016 | py | Python | train.py | Dan-hbd/NMTGMinor | 84e59ac8391ee78852d7c71afc60c3c8b8e3d44d | [
"MIT"
] | 1 | 2021-06-28T06:26:36.000Z | 2021-06-28T06:26:36.000Z | train.py | Dan-hbd/NMTGMinor | 84e59ac8391ee78852d7c71afc60c3c8b8e3d44d | [
"MIT"
] | null | null | null | train.py | Dan-hbd/NMTGMinor | 84e59ac8391ee78852d7c71afc60c3c8b8e3d44d | [
"MIT"
] | null | null | null | from __future__ import division
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import time, datetime
from onmt.train_utils.trainer import XETrainer
from onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc
from onmt.model_factory import build_model, optimize_model
from onmt.bayesian_factory import build_model as build_bayesian_model
from onmt.constants import add_tokenidx
from options import make_parser
from collections import defaultdict
import os
import numpy as np
parser = argparse.ArgumentParser(description='train.py')
onmt.markdown.add_md_help_argument(parser)
# Please look at the options file to see the options regarding models and data
parser = make_parser(parser)
opt = parser.parse_args()
# An ugly hack to have weight norm on / off
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.checkpointing = opt.checkpointing
onmt.constants.max_position_length = opt.max_position_length
# Use static dropout if checkpointing > 0
if opt.checkpointing > 0:
onmt.constants.static = True
if torch.cuda.is_available() and not opt.gpus:
print("WARNING: You have a CUDA device, should run with -gpus 0")
torch.manual_seed(opt.seed)
def numpy_to_torch(tensor_list):
out_list = list()
for tensor in tensor_list:
if isinstance(tensor, np.ndarray):
out_list.append(torch.from_numpy(tensor))
else:
out_list.append(tensor)
return out_list
def main():
if not opt.multi_dataset:
if opt.data_format in ['bin', 'raw']:
start = time.time()
if opt.data.endswith(".train.pt"):
print("Loading data from '%s'" % opt.data)
dataset = torch.load(opt.data)
else:
print("Loading data from %s" % opt.data + ".train.pt")
dataset = torch.load(opt.data + ".train.pt")
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Done after %s" % elapse)
dicts = dataset['dicts']
# For backward compatibility
train_dict = defaultdict(lambda: None, dataset['train'])
valid_dict = defaultdict(lambda: None, dataset['valid'])
if train_dict['src_lang'] is not None:
assert 'langs' in dicts
train_src_langs = train_dict['src_lang']
train_tgt_langs = train_dict['tgt_lang']
else:
# allocate new languages
dicts['langs'] = {'src': 0, 'tgt': 1}
train_src_langs = list()
train_tgt_langs = list()
# Allocation one for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
if not opt.streaming:
train_data = onmt.Dataset(numpy_to_torch(train_dict['src']), numpy_to_torch(train_dict['tgt']),
train_dict['src_sizes'], train_dict['tgt_sizes'],
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t = opt.sa_t,
upsampling=opt.upsampling,
num_split=len(opt.gpus),
cleaning=True)
else:
train_data = onmt.StreamDataset(train_dict['src'], train_dict['tgt'],
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech,
upsampling=opt.upsampling
)
if valid_dict['src_lang'] is not None:
assert 'langs' in dicts
valid_src_langs = valid_dict['src_lang']
valid_tgt_langs = valid_dict['tgt_lang']
else:
# allocate new languages
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
if not opt.streaming:
valid_data = onmt.Dataset(numpy_to_torch(valid_dict['src']), numpy_to_torch(valid_dict['tgt']),
valid_dict['src_sizes'], valid_dict['tgt_sizes'],
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
upsampling=opt.upsampling,
cleaning=True)
else:
valid_data = onmt.StreamDataset(numpy_to_torch(valid_dict['src']), numpy_to_torch(valid_dict['tgt']),
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
upsampling=opt.upsampling)
print(' * number of training sentences. %d' % len(dataset['train']['src']))
print(' * maximum batch size (words per batch). %d' % opt.batch_size_words)
elif opt.data_format in ['scp', 'scpmem', 'mmem']:
print("Loading memory mapped data files ....")
start = time.time()
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
dicts = torch.load(opt.data + ".dict.pt")
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(opt.data + ".scp_path.pt")
# allocate languages if not
if 'langs' not in dicts:
dicts['langs'] = {'src': 0, 'tgt': 1}
else:
print(dicts['langs'])
train_path = opt.data + '.train'
if opt.data_format in ['scp', 'scpmem']:
train_src = SCPIndexDataset(audio_data['train'], concat=opt.concat)
else:
train_src = MMapIndexedDataset(train_path + '.src')
train_tgt = MMapIndexedDataset(train_path + '.tgt')
# check the lang files if they exist (in the case of multi-lingual models)
if os.path.exists(train_path + '.src_lang.bin'):
assert 'langs' in dicts
train_src_langs = MMapIndexedDataset(train_path + '.src_lang')
train_tgt_langs = MMapIndexedDataset(train_path + '.tgt_lang')
else:
train_src_langs = list()
train_tgt_langs = list()
# Allocate a Tensor(1) for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
# check the length files if they exist
if os.path.exists(train_path + '.src_sizes.npy'):
train_src_sizes = np.load(train_path + '.src_sizes.npy')
train_tgt_sizes = np.load(train_path + '.tgt_sizes.npy')
else:
train_src_sizes, train_tgt_sizes = None, None
if opt.encoder_type == 'audio':
data_type = 'audio'
else:
data_type = 'text'
if not opt.streaming:
train_data = onmt.Dataset(train_src,
train_tgt,
train_src_sizes, train_tgt_sizes,
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
src_align_right=opt.src_align_right,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t = opt.sa_t,
upsampling=opt.upsampling,
cleaning=True, verbose=True)
else:
train_data = onmt.StreamDataset(train_src,
train_tgt,
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=False,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
upsampling=opt.upsampling)
valid_path = opt.data + '.valid'
if opt.data_format in ['scp', 'scpmem']:
valid_src = SCPIndexDataset(audio_data['valid'], concat=opt.concat)
else:
valid_src = MMapIndexedDataset(valid_path + '.src')
valid_tgt = MMapIndexedDataset(valid_path + '.tgt')
if os.path.exists(valid_path + '.src_lang.bin'):
assert 'langs' in dicts
valid_src_langs = MMapIndexedDataset(valid_path + '.src_lang')
valid_tgt_langs = MMapIndexedDataset(valid_path + '.tgt_lang')
else:
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
# check the length files if they exist
if os.path.exists(valid_path + '.src_sizes.npy'):
valid_src_sizes = np.load(valid_path + '.src_sizes.npy')
valid_tgt_sizes = np.load(valid_path + '.tgt_sizes.npy')
else:
valid_src_sizes, valid_tgt_sizes = None, None
if not opt.streaming:
valid_data = onmt.Dataset(valid_src, valid_tgt,
valid_src_sizes, valid_tgt_sizes,
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
src_align_right=opt.src_align_right,
cleaning=True, verbose=True, debug=True,
num_split=len(opt.gpus))
else:
# for validation data, we have to go through sentences (very slow but to ensure correctness)
valid_data = onmt.StreamDataset(valid_src, valid_tgt,
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Done after %s" % elapse)
else:
raise NotImplementedError
print(' * number of sentences in training data: %d' % train_data.size())
print(' * number of sentences in validation data: %d' % valid_data.size())
else:
print("[INFO] Reading multiple dataset ...")
# raise NotImplementedError
dicts = torch.load(opt.data + ".dict.pt")
root_dir = os.path.dirname(opt.data)
print("Loading training data ...")
train_dirs, valid_dirs = dict(), dict()
# scan the data directory to find the training data
for dir_ in os.listdir(root_dir):
if os.path.isdir(os.path.join(root_dir, dir_)):
if str(dir_).startswith("train"):
idx = int(dir_.split(".")[1])
train_dirs[idx] = dir_
if dir_.startswith("valid"):
idx = int(dir_.split(".")[1])
valid_dirs[idx] = dir_
train_sets, valid_sets = list(), list()
for (idx_, dir_) in sorted(train_dirs.items()):
data_dir = os.path.join(root_dir, dir_)
print("[INFO] Loading training data %i from %s" % (idx_, dir_))
if opt.data_format in ['bin', 'raw']:
raise NotImplementedError
elif opt.data_format in ['scp', 'scpmem', 'mmem']:
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = SCPIndexDataset(audio_data, concat=opt.concat)
else:
src_data = MMapIndexedDataset(os.path.join(data_dir, "data.src"))
tgt_data = MMapIndexedDataset(os.path.join(data_dir, "data.tgt"))
src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))
tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))
if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):
src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))
tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))
else:
src_sizes, sizes = None, None
if opt.encoder_type == 'audio':
data_type = 'audio'
else:
data_type = 'text'
if not opt.streaming:
train_data = onmt.Dataset(src_data,
tgt_data,
src_sizes, tgt_sizes,
src_lang_data, tgt_lang_data,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
src_align_right=opt.src_align_right,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t = opt.sa_t,
upsampling=opt.upsampling,
cleaning=True, verbose=True,
num_split=len(opt.gpus))
train_sets.append(train_data)
else:
print("Multi-dataset not implemented for Streaming tasks.")
raise NotImplementedError
for (idx_, dir_) in sorted(valid_dirs.items()):
data_dir = os.path.join(root_dir, dir_)
print("[INFO] Loading validation data %i from %s" % (idx_, dir_))
if opt.data_format in ['bin', 'raw']:
raise NotImplementedError
elif opt.data_format in ['scp', 'scpmem', 'mmem']:
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = SCPIndexDataset(audio_data, concat=opt.concat)
else:
src_data = MMapIndexedDataset(os.path.join(data_dir, "data.src"))
tgt_data = MMapIndexedDataset(os.path.join(data_dir, "data.tgt"))
src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))
tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))
if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):
src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))
tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))
else:
src_sizes, sizes = None, None
if opt.encoder_type == 'audio':
data_type = 'audio'
else:
data_type = 'text'
if not opt.streaming:
valid_data = onmt.Dataset(src_data, tgt_data,
src_sizes, tgt_sizes,
src_lang_data, tgt_lang_data,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
src_align_right=opt.src_align_right,
cleaning=True, verbose=True, debug=True,
num_split=len(opt.gpus))
valid_sets.append(valid_data)
else:
raise NotImplementedError
train_data = train_sets
valid_data = valid_sets
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
print("* Loading dictionaries from the checkpoint")
dicts = checkpoint['dicts']
else:
dicts['tgt'].patch(opt.patch_vocab_multiplier)
checkpoint = None
# Put the vocab mask from dicts to the datasets
for data in [train_data, valid_data]:
if isinstance(data, list):
for i, data_ in enumerate(data):
data_.set_mask(dicts['tgt'].vocab_mask)
data[i] = data_
else:
data.set_mask(dicts['tgt'].vocab_mask)
if "src" in dicts:
print(' * vocabulary size. source = %d; target = %d' %
(dicts['src'].size(), dicts['tgt'].size()))
else:
print('[INFO] vocabulary size. target = %d' %
(dicts['tgt'].size()))
print('* Building model...')
# update special tokens
onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
if not opt.fusion:
if opt.bayes_by_backprop:
model = build_bayesian_model(opt, dicts)
else:
model = build_model(opt, dicts)
""" Building the loss function """
if opt.nce:
from onmt.modules.nce.nce_loss import NCELoss
loss_function = NCELoss(opt.model_size, dicts['tgt'].size(), noise_ratio=opt.nce_noise,
logz=9, label_smoothing=opt.label_smoothing)
else:
loss_function = NMTLossFunc(opt.model_size, dicts['tgt'].size(),
label_smoothing=opt.label_smoothing,
mirror=opt.mirror_loss,
fast_xentropy=opt.fast_xentropy)
# This function replaces modules with the more optimized counterparts so that it can run faster
# Currently exp with LayerNorm
if not opt.memory_profiling:
optimize_model(model, fp16=opt.fp16)
else:
from onmt.model_factory import build_fusion
from onmt.modules.loss import FusionLoss
model = build_fusion(opt, dicts)
loss_function = FusionLoss(dicts['tgt'].size(), label_smoothing=opt.label_smoothing)
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
if not opt.debugging and len(opt.gpus) == 1:
if opt.bayes_by_backprop:
from onmt.train_utils.bayes_by_backprop_trainer import BayesianTrainer
trainer = BayesianTrainer(model, loss_function, train_data, valid_data, dicts, opt)
else:
trainer = XETrainer(model, loss_function, train_data, valid_data, dicts, opt)
else:
print("MultiGPU is not supported by this train.py. Use train_distributed.py with the same arguments "
"for MultiGPU training")
raise NotImplementedError
trainer.run(checkpoint=checkpoint)
if __name__ == "__main__":
main()
| 45.487603 | 117 | 0.518895 |
3762758b1e8bd039851d1e19f4523ff05622462c | 1,215 | py | Python | scripts/filter_design/fir_comparison_example.py | CyrilCadoux/dsp-labs | 8ef53fccb87ad842051d9032d127a86c1172155f | [
"MIT"
] | 18 | 2019-08-19T13:00:36.000Z | 2022-01-14T02:32:15.000Z | scripts/filter_design/fir_comparison_example.py | CyrilCadoux/dsp-labs | 8ef53fccb87ad842051d9032d127a86c1172155f | [
"MIT"
] | 2 | 2018-12-25T18:01:03.000Z | 2018-12-26T19:13:47.000Z | scripts/filter_design/fir_comparison_example.py | CyrilCadoux/dsp-labs | 8ef53fccb87ad842051d9032d127a86c1172155f | [
"MIT"
] | 10 | 2018-12-05T07:18:48.000Z | 2021-08-12T13:46:08.000Z | import numpy as np
from scipy.io import wavfile
import os
import matplotlib.pyplot as plt
from scipy.signal import firwin, lfilter
from utils import add_offset
def apply_fir(audio, b):
y_fir = lfilter(b, a=1, x=audio)
wavfile.write(filename="audio_hpf_{}.wav".format(len(b)),
rate=fs, data=y_fir.astype(data_type))
plt.figure()
plt.plot(time_vec, audio, 'tab:blue', label="original", alpha=ALPHA)
plt.plot(time_vec, y_fir, 'tab:orange', label="{}-tap".format(len(b)), alpha=ALPHA)
plt.xlabel("Time [seconds]")
plt.grid()
f = plt.gca()
f.axes.get_yaxis().set_ticks([0])
plt.legend()
fp = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "_templates", "speech.wav")
OFFSET = 5000
ALPHA = 0.75 # transparency for plot
# load signal
fs, audio = wavfile.read(fp)
data_type = audio.dtype
time_vec = np.arange(len(audio)) / fs
# add articifial offset
audio_off = add_offset(audio, OFFSET)
# apply different filters
fir_order = [40, 320]
cutoff = 100.
nyq = 0.5 * fs
fc_norm = cutoff / nyq
for order in fir_order:
b = firwin(numtaps=order+1, cutoff=fc_norm, window="hanning", pass_zero=False)
apply_fir(audio_off, b)
plt.show()
| 25.851064 | 96 | 0.680658 |
42074a70bf97d48524155928441d4c65e4da609b | 76,932 | py | Python | components/espcoredump/espcoredump.py | HallLabs/esp-idf | 826b9f63805ab9b955cd2129673117080c6d29ba | [
"Apache-2.0"
] | 1 | 2020-10-25T13:14:10.000Z | 2020-10-25T13:14:10.000Z | components/espcoredump/espcoredump.py | HallLabs/esp-idf | 826b9f63805ab9b955cd2129673117080c6d29ba | [
"Apache-2.0"
] | 1 | 2020-03-04T02:32:52.000Z | 2020-03-04T02:36:39.000Z | components/espcoredump/espcoredump.py | HallLabs/esp-idf | 826b9f63805ab9b955cd2129673117080c6d29ba | [
"Apache-2.0"
] | 1 | 2020-04-28T00:28:23.000Z | 2020-04-28T00:28:23.000Z | #!/usr/bin/env python
#
# ESP32 core dump Utility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from hashlib import sha256
import sys
try:
from builtins import zip
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
except ImportError:
print('Import has failed probably because of the missing "future" package. Please install all the packages for '
'interpreter {} from the $IDF_PATH/requirements.txt file.'.format(sys.executable))
sys.exit(1)
import os
import argparse
import subprocess
import tempfile
import struct
import errno
import base64
import binascii
import logging
import re
idf_path = os.getenv('IDF_PATH')
if idf_path:
sys.path.insert(0, os.path.join(idf_path, 'components', 'esptool_py', 'esptool'))
try:
import esptool
except ImportError:
print("esptool is not found! Set proper $IDF_PATH in environment.")
sys.exit(2)
__version__ = "0.4-dev"
if os.name == 'nt':
CLOSE_FDS = False
else:
CLOSE_FDS = True
INVALID_CAUSE_VALUE = 0xFFFF
# Exception cause dictionary to get translation of exccause register
# From 4.4.1.5 table 4-64 Exception Causes of Xtensa
# Instruction Set Architecture (ISA) Reference Manual
xtensa_exception_cause_dict = {
0: ("IllegalInstructionCause", "Illegal instruction"),
1: ("SyscallCause", "SYSCALL instruction"),
2: ("InstructionFetchErrorCause", "Processor internal physical address or data error during instruction fetch. (See EXCVADDR for more information)"),
3: ("LoadStoreErrorCause", "Processor internal physical address or data error during load or store. (See EXCVADDR for more information)"),
4: ("Level1InterruptCause", "Level-1 interrupt as indicated by set level-1 bits in the INTERRUPT register"),
5: ("AllocaCause", "MOVSP instruction, if caller`s registers are not in the register file"),
6: ("IntegerDivideByZeroCause", "QUOS: QUOU, REMS: or REMU divisor operand is zero"),
8: ("PrivilegedCause", "Attempt to execute a privileged operation when CRING ? 0"),
9: ("LoadStoreAlignmentCause", "Load or store to an unaligned address. (See EXCVADDR for more information)"),
12: ("InstrPIFDataErrorCause", "PIF data error during instruction fetch. (See EXCVADDR for more information)"),
13: ("LoadStorePIFDataErrorCause", "Synchronous PIF data error during LoadStore access. (See EXCVADDR for more information)"),
14: ("InstrPIFAddrErrorCause", "PIF address error during instruction fetch. (See EXCVADDR for more information)"),
15: ("LoadStorePIFAddrErrorCause", "Synchronous PIF address error during LoadStore access. (See EXCVADDR for more information)"),
16: ("InstTLBMissCause", "Error during Instruction TLB refill. (See EXCVADDR for more information)"),
17: ("InstTLBMultiHitCause", "Multiple instruction TLB entries matched. (See EXCVADDR for more information)"),
18: ("InstFetchPrivilegeCause", "An instruction fetch referenced a virtual address at a ring level less than CRING. (See EXCVADDR for more information)"),
20: ("InstFetchProhibitedCause", "An instruction fetch referenced a page mapped with an attribute that does not permit instruction fetch (EXCVADDR)."),
24: ("LoadStoreTLBMissCause", "Error during TLB refill for a load or store. (See EXCVADDR for more information)"),
25: ("LoadStoreTLBMultiHitCause", "Multiple TLB entries matched for a load or store. (See EXCVADDR for more information)"),
26: ("LoadStorePrivilegeCause", "A load or store referenced a virtual address at a ring level less than CRING. (See EXCVADDR for more information)"),
28: ("LoadProhibitedCause", "A load referenced a page mapped with an attribute that does not permit loads. (See EXCVADDR for more information)"),
29: ("StoreProhibitedCause", "A store referenced a page mapped with an attribute that does not permit stores [Region Protection Option or MMU Option]."),
32: ("Coprocessor0Disabled", "Coprocessor 0 instruction when cp0 disabled"),
33: ("Coprocessor1Disabled", "Coprocessor 1 instruction when cp1 disabled"),
34: ("Coprocessor2Disabled", "Coprocessor 2 instruction when cp2 disabled"),
35: ("Coprocessor3Disabled", "Coprocessor 3 instruction when cp3 disabled"),
36: ("Coprocessor4Disabled", "Coprocessor 4 instruction when cp4 disabled"),
37: ("Coprocessor5Disabled", "Coprocessor 5 instruction when cp5 disabled"),
38: ("Coprocessor6Disabled", "Coprocessor 6 instruction when cp6 disabled"),
39: ("Coprocessor7Disabled", "Coprocessor 7 instruction when cp7 disabled"),
INVALID_CAUSE_VALUE: ("InvalidCauseRegister", "Invalid EXCCAUSE register value or current task is broken and was skipped")}
class ESPCoreDumpError(RuntimeError):
"""Core dump runtime error class
"""
def __init__(self, message):
"""Constructor for core dump error
"""
super(ESPCoreDumpError, self).__init__(message)
class BinStruct(object):
"""Binary structure representation
Subclasses must specify actual structure layout using 'fields' and 'format' members.
For example, the following subclass represents structure with two fields:
f1 of size 2 bytes and 4 bytes f2. Little endian.
class SomeStruct(BinStruct):
fields = ("f1",
"f2")
format = "<HL"
Then subclass can be used to initialize fields of underlaying structure and convert it to binary representation:
f = open('some_struct.bin', 'wb')
s = SomeStruct()
s.f1 = 1
s.f2 = 10
f.write(s.dump())
f.close()
"""
def __init__(self, buf=None):
"""Base constructor for binary structure objects
"""
if buf is None:
buf = b'\0' * self.sizeof()
fields = struct.unpack(self.__class__.format, buf[:self.sizeof()])
self.__dict__.update(zip(self.__class__.fields, fields))
def sizeof(self):
"""Returns the size of the structure represented by specific subclass
"""
return struct.calcsize(self.__class__.format)
def dump(self):
"""Returns binary representation of structure
"""
keys = self.__class__.fields
return struct.pack(self.__class__.format, *(self.__dict__[k] for k in keys))
class Elf32FileHeader(BinStruct):
"""ELF32 file header
"""
fields = ("e_ident",
"e_type",
"e_machine",
"e_version",
"e_entry",
"e_phoff",
"e_shoff",
"e_flags",
"e_ehsize",
"e_phentsize",
"e_phnum",
"e_shentsize",
"e_shnum",
"e_shstrndx")
format = "<16sHHLLLLLHHHHHH"
def __init__(self, buf=None):
"""Constructor for ELF32 file header structure
"""
super(Elf32FileHeader, self).__init__(buf)
if buf is None:
# Fill in sane ELF header for LSB32
self.e_ident = b"\x7fELF\1\1\1\0\0\0\0\0\0\0\0\0"
self.e_version = ESPCoreDumpElfFile.EV_CURRENT
self.e_ehsize = self.sizeof()
class Elf32ProgramHeader(BinStruct):
"""ELF32 program header
"""
fields = ("p_type",
"p_offset",
"p_vaddr",
"p_paddr",
"p_filesz",
"p_memsz",
"p_flags",
"p_align")
format = "<LLLLLLLL"
class Elf32NoteDesc(object):
"""ELF32 note descriptor
"""
def __init__(self, name, type, desc):
"""Constructor for ELF32 note descriptor
"""
self.name = name
self.type = type
self.desc = desc
def dump(self):
"""Returns binary representation of ELF32 note descriptor
"""
nm_buf = bytearray(self.name, encoding='ascii') + b'\0'
hdr = struct.pack("<LLL", len(nm_buf), len(self.desc), self.type)
# pad for 4 byte alignment
name = nm_buf + ((4 - len(nm_buf)) % 4) * b'\0'
desc = self.desc + ((4 - len(self.desc)) % 4) * b'\0'
return hdr + name + desc
def read(self, data):
"""Reads ELF32 note descriptor
"""
hdr_sz = struct.calcsize("<LLL")
nm_len,desc_len,self.type = struct.unpack("<LLL", data[:hdr_sz])
nm_len_a = nm_len + ((4 - nm_len) % 4)
self.name = struct.unpack("<%ds" % (nm_len - 1), data[hdr_sz:hdr_sz + nm_len - 1])[0].decode('ascii')
self.desc = data[hdr_sz + nm_len_a:hdr_sz + nm_len_a + desc_len]
desc_len_a = desc_len + ((4 - desc_len) % 4)
return hdr_sz + nm_len_a + desc_len_a
class XtensaPrStatus(BinStruct):
"""Xtensa program status structure"""
fields = ("si_signo", "si_code", "si_errno",
"pr_cursig",
"pr_pad0",
"pr_sigpend",
"pr_sighold",
"pr_pid",
"pr_ppid",
"pr_pgrp",
"pr_sid",
"pr_utime",
"pr_stime",
"pr_cutime",
"pr_cstime")
format = "<3LHHLLLLLLQQQQ"
class EspCoreDumpTaskStatus(BinStruct):
"""Core dump status structure"""
# task status flags for note
TASK_STATUS_CORRECT = 0x00
TASK_STATUS_TCB_CORRUPTED = 0x01
TASK_STATUS_STACK_CORRUPTED = 0x02
fields = ("task_index",
"task_flags",
"task_tcb_addr",
"task_stack_start",
"task_stack_len",
"task_name")
format = "<LLLLL16s"
class ESPCoreDumpSegment(esptool.ImageSegment):
""" Wrapper class for a program segment in core ELF file, has a segment
type and flags as well as the common properties of an ImageSegment.
"""
# segment flags
PF_X = 0x1 # Execute
PF_W = 0x2 # Write
PF_R = 0x4 # Read
def __init__(self, addr, data, type, flags):
"""Constructor for program segment
"""
super(ESPCoreDumpSegment, self).__init__(addr, data)
self.flags = flags
self.type = type
def __repr__(self):
"""Returns string representation of program segment
"""
return "%s %s %s" % (self.type, self.attr_str(), super(ESPCoreDumpSegment, self).__repr__())
def attr_str(self):
"""Returns string representation of program segment attributes
"""
str = ''
if self.flags & self.PF_R:
str += 'R'
else:
str += ' '
if self.flags & self.PF_W:
str += 'W'
else:
str += ' '
if self.flags & self.PF_X:
str += 'X'
else:
str += ' '
return str
class ESPCoreDumpSection(esptool.ELFSection):
""" Wrapper class for a section in core ELF file, has a section
flags as well as the common properties of an esptool.ELFSection.
"""
# section flags
SHF_WRITE = 0x1
SHF_ALLOC = 0x2
SHF_EXECINSTR = 0x4
def __init__(self, name, addr, data, flags):
"""Constructor for section
"""
super(ESPCoreDumpSection, self).__init__(name, addr, data)
self.flags = flags
def __repr__(self):
"""Returns string representation of section
"""
return "%s %s" % (super(ESPCoreDumpSection, self).__repr__(), self.attr_str())
def attr_str(self):
"""Returns string representation of section attributes
"""
str = "R"
if self.flags & self.SHF_WRITE:
str += 'W'
else:
str += ' '
if self.flags & self.SHF_EXECINSTR:
str += 'X'
else:
str += ' '
if self.flags & self.SHF_ALLOC:
str += 'A'
else:
str += ' '
return str
class ESPCoreDumpElfFile(esptool.ELFFile):
""" Wrapper class for core dump ELF file
"""
# extra regs IDs used in EXTRA_INFO note
REG_EXCCAUSE_IDX = 0
REG_EXCVADDR_IDX = 1
REG_EPS2_IDX = 2
REG_EPS3_IDX = 3
REG_EPS4_IDX = 4
REG_EPS5_IDX = 5
REG_EPS6_IDX = 6
REG_EPS7_IDX = 7
REG_EPC1_IDX = 8
REG_EPC2_IDX = 9
REG_EPC3_IDX = 10
REG_EPC4_IDX = 11
REG_EPC5_IDX = 12
REG_EPC6_IDX = 13
REG_EPC7_IDX = 14
# ELF file type
ET_NONE = 0x0 # No file type
ET_REL = 0x1 # Relocatable file
ET_EXEC = 0x2 # Executable file
ET_DYN = 0x3 # Shared object file
ET_CORE = 0x4 # Core file
# ELF file version
EV_NONE = 0x0
EV_CURRENT = 0x1
# ELF file machine type
EM_NONE = 0x0
EM_XTENSA = 0x5E
# section types
SEC_TYPE_PROGBITS = 0x01
SEC_TYPE_STRTAB = 0x03
# special section index
SHN_UNDEF = 0x0
# program segment types
PT_NULL = 0x0
PT_LOAD = 0x1
PT_DYNAMIC = 0x2
PT_INTERP = 0x3
PT_NOTE = 0x4
PT_SHLIB = 0x5
PT_PHDR = 0x6
def __init__(self, name=None):
"""Constructor for core dump ELF file
"""
if name:
super(ESPCoreDumpElfFile, self).__init__(name)
else:
self.sections = []
self.program_segments = []
self.aux_segments = []
self.e_type = self.ET_NONE
self.e_machine = self.EM_NONE
def _read_elf_file(self, f):
"""Reads core dump from ELF file
"""
# read the ELF file header
LEN_FILE_HEADER = 0x34
try:
header = f.read(LEN_FILE_HEADER)
(ident,type,machine,_version,
self.entrypoint,phoff,shoff,_flags,
_ehsize, phentsize,phnum,_shentsize,
shnum,shstrndx) = struct.unpack("<16sHHLLLLLHHHHHH", header)
except struct.error as e:
raise ESPCoreDumpError("Failed to read a valid ELF header from %s: %s" % (f.name, e))
if bytearray([ident[0]]) != b'\x7f' or ident[1:4] != b'ELF':
raise ESPCoreDumpError("%s has invalid ELF magic header" % f.name)
if machine != self.EM_XTENSA:
raise ESPCoreDumpError("%s does not appear to be an Xtensa ELF file. e_machine=%04x" % (f.name, machine))
self.e_type = type
self.e_machine = machine
self.sections = []
self.program_segments = []
self.aux_segments = []
if shnum > 0:
self._read_sections(f, shoff, shstrndx)
if phnum > 0:
self._read_program_segments(f, phoff, phentsize, phnum)
def _read_sections(self, f, section_header_offs, shstrndx):
"""Reads core dump sections from ELF file
"""
f.seek(section_header_offs)
section_header = f.read()
LEN_SEC_HEADER = 0x28
if len(section_header) == 0:
raise ESPCoreDumpError("No section header found at offset %04x in ELF file." % section_header_offs)
if len(section_header) % LEN_SEC_HEADER != 0:
logging.warning('Unexpected ELF section header length %04x is not mod-%02x' % (len(section_header),LEN_SEC_HEADER))
# walk through the section header and extract all sections
section_header_offsets = range(0, len(section_header), LEN_SEC_HEADER)
def read_section_header(offs):
name_offs,sec_type,flags,lma,sec_offs,size = struct.unpack_from("<LLLLLL", section_header[offs:])
return (name_offs, sec_type, flags, lma, size, sec_offs)
all_sections = [read_section_header(offs) for offs in section_header_offsets]
prog_sections = [s for s in all_sections if s[1] == esptool.ELFFile.SEC_TYPE_PROGBITS]
# search for the string table section
if not shstrndx * LEN_SEC_HEADER in section_header_offsets:
raise ESPCoreDumpError("ELF file has no STRTAB section at shstrndx %d" % shstrndx)
_,sec_type,_,_,sec_size,sec_offs = read_section_header(shstrndx * LEN_SEC_HEADER)
if sec_type != esptool.ELFFile.SEC_TYPE_STRTAB:
logging.warning('ELF file has incorrect STRTAB section type 0x%02x' % sec_type)
f.seek(sec_offs)
string_table = f.read(sec_size)
# build the real list of ELFSections by reading the actual section names from the
# string table section, and actual data for each section from the ELF file itself
def lookup_string(offs):
raw = string_table[offs:]
return raw[:raw.index(b'\x00')]
def read_data(offs,size):
f.seek(offs)
return f.read(size)
prog_sections = [ESPCoreDumpSection(lookup_string(n_offs), lma, read_data(offs, size), flags)
for (n_offs, _type, flags, lma, size, offs) in prog_sections if lma != 0]
self.sections = prog_sections
def _read_program_segments(self, f, seg_table_offs, entsz, num):
"""Reads core dump program segments from ELF file
"""
f.seek(seg_table_offs)
seg_table = f.read(entsz * num)
LEN_SEG_HEADER = 0x20
if len(seg_table) == 0:
raise ESPCoreDumpError("No program header table found at offset %04x in ELF file." % seg_table_offs)
if len(seg_table) % LEN_SEG_HEADER != 0:
logging.warning('Unexpected ELF program header table length %04x is not mod-%02x' % (len(seg_table),LEN_SEG_HEADER))
# walk through the program segment table and extract all segments
seg_table_offs = range(0, len(seg_table), LEN_SEG_HEADER)
def read_program_header(offs):
type,offset,vaddr,_paddr,filesz,_memsz,flags,_align = struct.unpack_from("<LLLLLLLL", seg_table[offs:])
return (type,offset,vaddr,filesz,flags)
prog_segments = [read_program_header(offs) for offs in seg_table_offs]
# build the real list of ImageSegment by reading actual data for each segment from the ELF file itself
def read_data(offs,size):
f.seek(offs)
return f.read(size)
# read loadable segments
self.program_segments = [ESPCoreDumpSegment(vaddr, read_data(offset, filesz), type, flags)
for (type, offset, vaddr, filesz,flags) in prog_segments if vaddr != 0]
self.aux_segments = [ESPCoreDumpSegment(vaddr, read_data(offset, filesz), type, flags)
for (type, offset, vaddr, filesz, flags) in prog_segments if type == ESPCoreDumpElfFile.PT_NOTE and vaddr == 0]
def add_program_segment(self, addr, data, type, flags):
"""Adds new program segment
"""
# TODO: currently merging with existing segments is not supported
data_sz = len(data)
# check for overlapping and merge if needed
if addr != 0 and data_sz != 0:
for ps in self.program_segments:
seg_len = len(ps.data)
if addr >= ps.addr and addr < (ps.addr + seg_len):
raise ESPCoreDumpError("Can not add overlapping region [%x..%x] to ELF file. Conflict with existing [%x..%x]." %
(addr, addr + data_sz - 1, ps.addr, ps.addr + seg_len - 1))
if (addr + data_sz) > ps.addr and (addr + data_sz) <= (ps.addr + seg_len):
raise ESPCoreDumpError("Can not add overlapping region [%x..%x] to ELF file. Conflict with existing [%x..%x]." %
(addr, addr + data_sz - 1, ps.addr, ps.addr + seg_len - 1))
# append
self.program_segments.append(ESPCoreDumpSegment(addr, data, type, flags))
def add_aux_segment(self, data, type, flags):
"""Adds new note segment
"""
self.aux_segments.append(ESPCoreDumpSegment(0, data, type, flags))
def write_program_headers(self, f, off, segs):
for seg in segs:
phdr = Elf32ProgramHeader()
phdr.p_type = seg.type
phdr.p_offset = off
phdr.p_vaddr = seg.addr
phdr.p_paddr = phdr.p_vaddr # TODO
phdr.p_filesz = len(seg.data)
phdr.p_memsz = phdr.p_filesz # TODO
phdr.p_flags = seg.flags
phdr.p_align = 0 # TODO
f.write(phdr.dump())
off += phdr.p_filesz
return off
def dump(self, f):
"""Write core dump contents to file
"""
# TODO: currently dumps only program segments.
# dumping sections is not supported yet
# write ELF header
ehdr = Elf32FileHeader()
ehdr.e_type = self.e_type
ehdr.e_machine = self.e_machine
ehdr.e_entry = 0
ehdr.e_phoff = ehdr.sizeof()
ehdr.e_shoff = 0
ehdr.e_flags = 0
ehdr.e_phentsize = Elf32ProgramHeader().sizeof()
ehdr.e_phnum = len(self.program_segments) + len(self.aux_segments)
ehdr.e_shentsize = 0
ehdr.e_shnum = 0
ehdr.e_shstrndx = self.SHN_UNDEF
f.write(ehdr.dump())
# write program header table
cur_off = ehdr.e_ehsize + ehdr.e_phnum * ehdr.e_phentsize
cur_off = self.write_program_headers(f, cur_off, self.program_segments)
cur_off = self.write_program_headers(f, cur_off, self.aux_segments)
# write program segments
for segment in self.program_segments:
f.write(segment.data)
# write aux program segments
for segment in self.aux_segments:
f.write(segment.data)
class ESPCoreDumpLoaderError(ESPCoreDumpError):
"""Core dump loader error class
"""
def __init__(self, message):
"""Constructor for core dump loader error
"""
super(ESPCoreDumpLoaderError, self).__init__(message)
class ESPCoreDumpLoader(object):
"""Core dump loader base class
"""
ESP32_COREDUMP_VERSION_BIN = 1
ESP32_COREDUMP_VERSION_ELF_CRC32 = 2
ESP32_COREDUMP_VERSION_ELF_SHA256 = 3
ESP_CORE_DUMP_INFO_TYPE = 8266
ESP_CORE_DUMP_TASK_INFO_TYPE = 678
ESP_CORE_DUMP_EXTRA_INFO_TYPE = 677
ESP_COREDUMP_CURR_TASK_MARKER = 0xdeadbeef
ESP32_COREDUMP_HDR_FMT = '<5L'
ESP32_COREDUMP_HDR_SZ = struct.calcsize(ESP32_COREDUMP_HDR_FMT)
ESP32_COREDUMP_TSK_HDR_FMT = '<3L'
ESP32_COREDUMP_TSK_HDR_SZ = struct.calcsize(ESP32_COREDUMP_TSK_HDR_FMT)
ESP32_COREDUMP_MEM_SEG_HDR_FMT = '<2L'
ESP32_COREDUMP_MEM_SEG_HDR_SZ = struct.calcsize(ESP32_COREDUMP_MEM_SEG_HDR_FMT)
ESP32_COREDUMP_NOTE_HDR_FMT = '<3L'
ESP32_COREDUMP_NOTE_HDR_SZ = struct.calcsize(ESP32_COREDUMP_NOTE_HDR_FMT)
ESP32_COREDUMP_CRC_FMT = '<L'
ESP32_COREDUMP_CRC_SZ = struct.calcsize(ESP32_COREDUMP_CRC_FMT)
ESP32_COREDUMP_SHA256_FMT = '32c'
ESP32_COREDUMP_SHA256_SZ = struct.calcsize(ESP32_COREDUMP_SHA256_FMT)
def __init__(self):
"""Base constructor for core dump loader
"""
self.fcore = None
def _get_registers_from_stack(self, data, grows_down):
"""Returns list of registers (in GDB format) from xtensa stack frame
"""
# from "gdb/xtensa-tdep.h"
# typedef struct
# {
# 0 xtensa_elf_greg_t pc;
# 1 xtensa_elf_greg_t ps;
# 2 xtensa_elf_greg_t lbeg;
# 3 xtensa_elf_greg_t lend;
# 4 xtensa_elf_greg_t lcount;
# 5 xtensa_elf_greg_t sar;
# 6 xtensa_elf_greg_t windowstart;
# 7 xtensa_elf_greg_t windowbase;
# 8..63 xtensa_elf_greg_t reserved[8+48];
# 64 xtensa_elf_greg_t ar[64];
# } xtensa_elf_gregset_t;
REG_PC_IDX = 0
REG_PS_IDX = 1
REG_LB_IDX = 2
REG_LE_IDX = 3
REG_LC_IDX = 4
REG_SAR_IDX = 5
# REG_WS_IDX = 6
# REG_WB_IDX = 7
REG_AR_START_IDX = 64
# REG_AR_NUM = 64
# FIXME: acc to xtensa_elf_gregset_t number of regs must be 128,
# but gdb complanis when it less then 129
REG_NUM = 129
# XT_SOL_EXIT = 0
XT_SOL_PC = 1
XT_SOL_PS = 2
# XT_SOL_NEXT = 3
XT_SOL_AR_START = 4
XT_SOL_AR_NUM = 4
# XT_SOL_FRMSZ = 8
XT_STK_EXIT = 0
XT_STK_PC = 1
XT_STK_PS = 2
XT_STK_AR_START = 3
XT_STK_AR_NUM = 16
XT_STK_SAR = 19
XT_STK_EXCCAUSE = 20
XT_STK_EXCVADDR = 21
XT_STK_LBEG = 22
XT_STK_LEND = 23
XT_STK_LCOUNT = 24
XT_STK_FRMSZ = 25
extra_regs = {ESPCoreDumpElfFile.REG_EPS2_IDX: 0, ESPCoreDumpElfFile.REG_EPS3_IDX: 0,
ESPCoreDumpElfFile.REG_EPS4_IDX: 0, ESPCoreDumpElfFile.REG_EPS5_IDX: 0,
ESPCoreDumpElfFile.REG_EPS6_IDX: 0, ESPCoreDumpElfFile.REG_EPS7_IDX: 0,
ESPCoreDumpElfFile.REG_EPC1_IDX: 0, ESPCoreDumpElfFile.REG_EPC2_IDX: 0,
ESPCoreDumpElfFile.REG_EPC3_IDX: 0, ESPCoreDumpElfFile.REG_EPC4_IDX: 0,
ESPCoreDumpElfFile.REG_EPC5_IDX: 0, ESPCoreDumpElfFile.REG_EPC6_IDX: 0,
ESPCoreDumpElfFile.REG_EPC7_IDX: 0}
regs = [0] * REG_NUM
# TODO: support for growing up stacks
if not grows_down:
raise ESPCoreDumpLoaderError("Growing up stacks are not supported for now!")
ex_struct = "<%dL" % XT_STK_FRMSZ
if len(data) < struct.calcsize(ex_struct):
raise ESPCoreDumpLoaderError("Too small stack to keep frame: %d bytes!" % len(data))
stack = struct.unpack(ex_struct, data[:struct.calcsize(ex_struct)])
# Stack frame type indicator is always the first item
rc = stack[XT_STK_EXIT]
if rc != 0:
regs[REG_PC_IDX] = stack[XT_STK_PC]
regs[REG_PS_IDX] = stack[XT_STK_PS]
for i in range(XT_STK_AR_NUM):
regs[REG_AR_START_IDX + i] = stack[XT_STK_AR_START + i]
regs[REG_SAR_IDX] = stack[XT_STK_SAR]
regs[REG_LB_IDX] = stack[XT_STK_LBEG]
regs[REG_LE_IDX] = stack[XT_STK_LEND]
regs[REG_LC_IDX] = stack[XT_STK_LCOUNT]
# FIXME: crashed and some running tasks (e.g. prvIdleTask) have EXCM bit set
# and GDB can not unwind callstack properly (it implies not windowed call0)
if regs[REG_PS_IDX] & (1 << 5):
regs[REG_PS_IDX] &= ~(1 << 4)
if stack[XT_STK_EXCCAUSE] in xtensa_exception_cause_dict:
extra_regs[ESPCoreDumpElfFile.REG_EXCCAUSE_IDX] = stack[XT_STK_EXCCAUSE]
else:
extra_regs[ESPCoreDumpElfFile.REG_EXCCAUSE_IDX] = INVALID_CAUSE_VALUE
extra_regs[ESPCoreDumpElfFile.REG_EXCVADDR_IDX] = stack[XT_STK_EXCVADDR]
else:
regs[REG_PC_IDX] = stack[XT_SOL_PC]
regs[REG_PS_IDX] = stack[XT_SOL_PS]
for i in range(XT_SOL_AR_NUM):
regs[REG_AR_START_IDX + i] = stack[XT_SOL_AR_START + i]
# nxt = stack[XT_SOL_NEXT]
return regs,extra_regs
def tcb_is_sane(self, tcb_addr, tcb_size):
"""Check tcb address if it is correct
"""
return not (tcb_addr < 0x3ffae000 or (tcb_addr + tcb_size) > 0x40000000)
def stack_is_sane(self, sp):
"""Check stack address if it is correct
"""
return not(sp < 0x3ffae010 or sp > 0x3fffffff)
def addr_is_fake(self, addr):
"""Check if address is in fake area
"""
return ((addr < 0x3f3fffff and addr >= 0x20000000) or addr >= 0x80000000)
def remove_tmp_file(self, fname):
"""Silently removes temporary file
"""
try:
os.remove(fname)
except OSError as e:
if e.errno != errno.ENOENT:
logging.warning("Failed to remove temp file '%s' (%d)!" % (fname, e.errno))
def cleanup(self):
"""Cleans up loader resources
"""
if self.fcore:
self.fcore.close()
if self.fcore_name:
self.remove_tmp_file(self.fcore_name)
def extract_elf_corefile(self, core_fname=None, exe_name=None, off=0):
""" Reads the ELF formatted core dump image and parse it
"""
core_off = off
data = self.read_data(core_off, self.ESP32_COREDUMP_HDR_SZ)
tot_len,coredump_ver,task_num,tcbsz,segs_num = struct.unpack_from(self.ESP32_COREDUMP_HDR_FMT, data)
if coredump_ver == self.ESP32_COREDUMP_VERSION_ELF_CRC32:
checksum_len = self.ESP32_COREDUMP_CRC_SZ
elif coredump_ver == self.ESP32_COREDUMP_VERSION_ELF_SHA256:
checksum_len = self.ESP32_COREDUMP_SHA256_SZ
else:
raise ESPCoreDumpLoaderError("Core dump version '%d' is not supported!" % coredump_ver)
core_off += self.ESP32_COREDUMP_HDR_SZ
core_elf = ESPCoreDumpElfFile()
data = self.read_data(core_off, tot_len - checksum_len - self.ESP32_COREDUMP_HDR_SZ)
with open(core_fname, 'w+b') as fce:
try:
fce.write(data)
fce.flush()
fce.seek(0)
core_elf._read_elf_file(fce)
if exe_name:
exe_elf = ESPCoreDumpElfFile(exe_name)
# Read note segments from core file which are belong to tasks (TCB or stack)
for ns in core_elf.aux_segments:
if ns.type != ESPCoreDumpElfFile.PT_NOTE:
continue
note_read = 0
while note_read < len(ns.data):
note = Elf32NoteDesc("", 0, None)
note_read += note.read(ns.data[note_read:])
# Check for version info note
if 'ESP_CORE_DUMP_INFO' == note.name and note.type == self.ESP_CORE_DUMP_INFO_TYPE and exe_name:
app_sha256 = binascii.hexlify(exe_elf.sha256())
n_ver_len = struct.calcsize("<L")
n_sha256_len = self.ESP32_COREDUMP_SHA256_SZ * 2 # SHA256 as hex string
n_ver,coredump_sha256 = struct.unpack("<L%ds" % (n_sha256_len), note.desc[:n_ver_len + n_sha256_len])
if coredump_sha256 != app_sha256 or n_ver != coredump_ver:
raise ESPCoreDumpError("Invalid application image for coredump: app_SHA256(%s) != coredump_SHA256(%s)." %
(app_sha256, coredump_sha256))
except ESPCoreDumpError as e:
logging.warning("Failed to extract ELF core dump image into file %s. (Reason: %s)" % (core_fname, e))
return core_fname
def create_corefile(self, core_fname=None, exe_name=None, rom_elf=None, off=0):
"""Creates core dump ELF file
"""
core_off = off
data = self.read_data(core_off, self.ESP32_COREDUMP_HDR_SZ)
tot_len,coredump_ver,task_num,tcbsz,segs_num = struct.unpack_from(self.ESP32_COREDUMP_HDR_FMT, data)
if not core_fname:
fce = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
core_fname = fce.name
if coredump_ver == self.ESP32_COREDUMP_VERSION_ELF_CRC32 or coredump_ver == self.ESP32_COREDUMP_VERSION_ELF_SHA256:
return self.extract_elf_corefile(core_fname, exe_name)
elif coredump_ver > self.ESP32_COREDUMP_VERSION_ELF_SHA256:
raise ESPCoreDumpLoaderError("Core dump version '%d' is not supported! Should be up to '%d'." %
(coredump_ver, self.ESP32_COREDUMP_VERSION_ELF_SHA256))
with open(core_fname, 'w+b') as fce:
tcbsz_aligned = tcbsz
if tcbsz_aligned % 4:
tcbsz_aligned = 4 * (old_div(tcbsz_aligned,4) + 1)
# The version of core dump is ESP32_COREDUMP_VERSION_BIN
core_off += self.ESP32_COREDUMP_HDR_SZ
core_elf = ESPCoreDumpElfFile()
notes = b''
core_dump_info_notes = b''
task_info_notes = b''
task_status = EspCoreDumpTaskStatus()
for i in range(task_num):
task_status.task_index = i
task_status.task_flags = EspCoreDumpTaskStatus.TASK_STATUS_CORRECT
data = self.read_data(core_off, self.ESP32_COREDUMP_TSK_HDR_SZ)
tcb_addr,stack_top,stack_end = struct.unpack_from(self.ESP32_COREDUMP_TSK_HDR_FMT, data)
if stack_end > stack_top:
stack_len = stack_end - stack_top
stack_base = stack_top
else:
stack_len = stack_top - stack_end
stack_base = stack_end
stack_len_aligned = stack_len
if stack_len_aligned % 4:
stack_len_aligned = 4 * (old_div(stack_len_aligned,4) + 1)
core_off += self.ESP32_COREDUMP_TSK_HDR_SZ
logging.debug("Read TCB %d bytes @ 0x%x" % (tcbsz_aligned, tcb_addr))
data = self.read_data(core_off, tcbsz_aligned)
task_status.task_tcb_addr = tcb_addr
try:
if self.tcb_is_sane(tcb_addr, tcbsz_aligned):
if tcbsz != tcbsz_aligned:
core_elf.add_program_segment(tcb_addr, data[:tcbsz - tcbsz_aligned],
ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
else:
core_elf.add_program_segment(tcb_addr, data, ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
# task_status.task_name = bytearray("%s\0" % task_name_str, encoding='ascii')
elif tcb_addr and self.addr_is_fake(tcb_addr):
task_status.task_flags |= EspCoreDumpTaskStatus.TASK_STATUS_TCB_CORRUPTED
except ESPCoreDumpError as e:
logging.warning("Skip TCB %d bytes @ 0x%x. (Reason: %s)" % (tcbsz_aligned, tcb_addr, e))
core_off += tcbsz_aligned
logging.debug("Read stack %d bytes @ 0x%x" % (stack_len_aligned, stack_base))
data = self.read_data(core_off, stack_len_aligned)
if stack_len != stack_len_aligned:
data = data[:stack_len - stack_len_aligned]
task_status.task_stack_start = stack_base
task_status.task_stack_len = stack_len_aligned
try:
if self.stack_is_sane(stack_base):
core_elf.add_program_segment(stack_base, data, ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
elif stack_base and self.addr_is_fake(stack_base):
task_status.task_flags |= EspCoreDumpTaskStatus.TASK_STATUS_STACK_CORRUPTED
core_elf.add_program_segment(stack_base, data, ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
except ESPCoreDumpError as e:
logging.warning("Skip task's (%x) stack %d bytes @ 0x%x. (Reason: %s)" % (tcb_addr, stack_len_aligned, stack_base, e))
core_off += stack_len_aligned
try:
logging.debug("Stack start_end: 0x%x @ 0x%x" % (stack_top, stack_end))
task_regs,extra_regs = self._get_registers_from_stack(data, stack_end > stack_top)
except Exception as e:
logging.error(e)
return None
task_info_notes += Elf32NoteDesc("TASK_INFO", self.ESP_CORE_DUMP_TASK_INFO_TYPE, task_status.dump()).dump()
prstatus = XtensaPrStatus()
prstatus.pr_cursig = 0 # TODO: set sig only for current/failed task
prstatus.pr_pid = tcb_addr
note = Elf32NoteDesc("CORE", 1, prstatus.dump() + struct.pack("<%dL" % len(task_regs), *task_regs)).dump()
notes += note
if ESPCoreDumpElfFile.REG_EXCCAUSE_IDX in extra_regs and len(core_dump_info_notes) == 0:
# actually there will be only one such note - for crashed task
core_dump_info_notes += Elf32NoteDesc("ESP_CORE_DUMP_INFO", self.ESP_CORE_DUMP_INFO_TYPE, struct.pack("<L", coredump_ver)).dump()
exc_regs = []
for reg_id in extra_regs:
exc_regs.extend([reg_id, extra_regs[reg_id]])
core_dump_info_notes += Elf32NoteDesc("EXTRA_INFO", self.ESP_CORE_DUMP_EXTRA_INFO_TYPE,
struct.pack("<%dL" % (1 + len(exc_regs)), tcb_addr, *exc_regs)).dump()
for i in range(segs_num):
data = self.read_data(core_off, self.ESP32_COREDUMP_MEM_SEG_HDR_SZ)
core_off += self.ESP32_COREDUMP_MEM_SEG_HDR_SZ
mem_start,mem_sz = struct.unpack_from(self.ESP32_COREDUMP_MEM_SEG_HDR_FMT, data)
logging.debug("Read memory segment %d bytes @ 0x%x" % (mem_sz, mem_start))
data = self.read_data(core_off, stack_len_aligned)
core_elf.add_program_segment(mem_start, data, ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
core_off += mem_sz
# add notes
try:
core_elf.add_aux_segment(notes, ESPCoreDumpElfFile.PT_NOTE, 0)
except ESPCoreDumpError as e:
logging.warning("Skip NOTES segment %d bytes @ 0x%x. (Reason: %s)" % (len(notes), 0, e))
# add core dump info notes
try:
core_elf.add_aux_segment(core_dump_info_notes, ESPCoreDumpElfFile.PT_NOTE, 0)
except ESPCoreDumpError as e:
logging.warning("Skip core dump info NOTES segment %d bytes @ 0x%x. (Reason: %s)" % (len(core_dump_info_notes), 0, e))
try:
core_elf.add_aux_segment(task_info_notes, ESPCoreDumpElfFile.PT_NOTE, 0)
except ESPCoreDumpError as e:
logging.warning("Skip failed tasks info NOTES segment %d bytes @ 0x%x. (Reason: %s)" % (len(task_info_notes), 0, e))
# add ROM text sections
if rom_elf:
for ps in rom_elf.program_segments:
if ps.flags & ESPCoreDumpSegment.PF_X:
try:
core_elf.add_program_segment(ps.addr, ps.data, ESPCoreDumpElfFile.PT_LOAD, ps.flags)
except ESPCoreDumpError as e:
logging.warning("Skip ROM segment %d bytes @ 0x%x. (Reason: %s)" % (len(ps.data), ps.addr, e))
core_elf.e_type = ESPCoreDumpElfFile.ET_CORE
core_elf.e_machine = ESPCoreDumpElfFile.EM_XTENSA
core_elf.dump(fce)
return core_fname
def read_data(self, off, sz):
"""Reads data from raw core dump got from flash or UART
"""
self.fcore.seek(off)
data = self.fcore.read(sz)
return data
class ESPCoreDumpFileLoader(ESPCoreDumpLoader):
"""Core dump file loader class
"""
def __init__(self, path, b64=False):
"""Constructor for core dump file loader
"""
super(ESPCoreDumpFileLoader, self).__init__()
self.fcore = self._load_coredump(path, b64)
def _load_coredump(self, path, b64):
"""Loads core dump from (raw binary or base64-encoded) file
"""
logging.debug("Load core dump from '%s'", path)
self.fcore_name = None
if b64:
fhnd,self.fcore_name = tempfile.mkstemp()
fcore = os.fdopen(fhnd, 'wb')
fb64 = open(path, 'rb')
try:
while True:
line = fb64.readline()
if len(line) == 0:
break
data = base64.standard_b64decode(line.rstrip(b'\r\n'))
fcore.write(data)
fcore.close()
fcore = open(self.fcore_name, 'rb')
except Exception as e:
if self.fcore_name:
self.remove_tmp_file(self.fcore_name)
raise e
finally:
fb64.close()
else:
fcore = open(path, 'rb')
return fcore
class ESPCoreDumpFlashLoader(ESPCoreDumpLoader):
"""Core dump flash loader class
"""
ESP32_COREDUMP_FLASH_LEN_FMT = '<L'
ESP32_COREDUMP_FLASH_LEN_SZ = struct.calcsize(ESP32_COREDUMP_FLASH_LEN_FMT)
ESP32_COREDUMP_PART_TABLE_OFF = 0x8000
def __init__(self, off, tool_path=None, chip='esp32', port=None, baud=None):
"""Constructor for core dump flash loader
"""
super(ESPCoreDumpFlashLoader, self).__init__()
self.port = port
self.baud = baud
self.chip = chip
self.dump_sz = 0
self.fcore = self._load_coredump(off)
def get_tool_path(self, use_esptool=None):
"""Get tool path
"""
if use_esptool:
tool_path = os.path.join(idf_path, 'components', 'esptool_py', 'esptool') + os.path.sep
else:
tool_path = os.path.join(idf_path, 'components', 'partition_table') + os.path.sep
return tool_path
def get_core_dump_partition_info(self, part_off=None, tool_path=None):
"""Get core dump partition info using parttool
"""
logging.info("Retrieving core dump partition offset and size...")
if not tool_path:
tool_path = self.get_tool_path(use_esptool=False)
if not part_off:
part_off = self.ESP32_COREDUMP_PART_TABLE_OFF
size = None
offset = None
try:
tool_args = [sys.executable, tool_path + 'parttool.py', "-q", "--partition-table-offset", str(part_off)]
if self.port:
tool_args.extend(['--port', self.port])
invoke_args = tool_args + ["get_partition_info", "--partition-type", "data", "--partition-subtype", "coredump", "--info", "offset", "size"]
(offset_str, size_str) = subprocess.check_output(invoke_args).strip().split(b" ")
size = int(size_str, 16)
offset = int(offset_str, 16)
logging.info("Core dump partition offset=%d, size=%d", offset, size)
except subprocess.CalledProcessError as e:
logging.error("parttool get partition info failed with err %d" % e.returncode)
logging.debug("Command ran: '%s'" % e.cmd)
logging.debug("Command out:")
logging.debug(e.output)
logging.error("Check if the coredump partition exists in partition table.")
raise e
return (offset, size)
def invoke_parttool(self, tool_path=None):
"""Loads core dump from flash using parttool
"""
part_tool_args = [sys.executable, tool_path + 'parttool.py']
if self.port:
part_tool_args.extend(['--port', self.port])
part_tool_args.extend(['read_partition', '--partition-type', 'data', '--partition-subtype', 'coredump', '--output'])
self.fcore_name = None
f = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
try:
part_tool_args.append(f.name)
self.fcore_name = f.name
# read core dump partition
et_out = subprocess.check_output(part_tool_args)
if len(et_out):
logging.info(et_out.decode('utf-8'))
self.dump_sz = self._read_core_dump_length(f)
f.seek(self.dump_sz)
# cut free space of the partition
f.truncate()
f.seek(0)
except subprocess.CalledProcessError as e:
logging.error("parttool script execution failed with err %d" % e.returncode)
logging.debug("Command ran: '%s'" % e.cmd)
logging.debug("Command out:")
logging.debug(e.output)
if self.fcore_name:
f.close()
self.remove_tmp_file(self.fcore_name)
raise e
return f
def invoke_esptool(self, tool_path=None, off=None):
"""Loads core dump from flash using elftool
"""
tool_args = [sys.executable, tool_path + 'esptool.py', '-c', self.chip]
if self.port:
tool_args.extend(['-p', self.port])
if self.baud:
tool_args.extend(['-b', str(self.baud)])
f = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
self.fcore_name = None
try:
(part_offset, part_size) = self.get_core_dump_partition_info(tool_path='')
if not off:
off = part_offset # set default offset if not specified
logging.warning("The core dump image offset is not specified. Use partition offset: %d.", part_offset)
if part_offset != off:
logging.warning("Predefined image offset: %d does not match core dump partition offset: %d", off, part_offset)
tool_args.extend(['read_flash', str(off), str(self.ESP32_COREDUMP_FLASH_LEN_SZ)])
tool_args.append(f.name)
self.fcore_name = f.name
# read core dump length
et_out = subprocess.check_output(tool_args)
if len(et_out):
logging.info(et_out.decode('utf-8'))
self.dump_sz = self._read_core_dump_length(f)
if self.dump_sz == 0 or self.dump_sz > part_size:
logging.error("Incorrect size of core dump image: %d, use partition size instead: %d", self.dump_sz, part_size)
self.dump_sz = part_size
# set actual size of core dump image and read it from flash
tool_args[-2] = str(self.dump_sz)
et_out = subprocess.check_output(tool_args)
if len(et_out):
logging.info(et_out.decode('utf-8'))
except subprocess.CalledProcessError as e:
logging.error("esptool script execution failed with err %d" % e.returncode)
logging.debug("Command ran: '%s'" % e.cmd)
logging.debug("Command out:")
logging.debug(e.output)
if self.fcore_name:
f.close()
self.remove_tmp_file(self.fcore_name)
raise e
return f
def _load_coredump(self, off=None):
"""Loads core dump from flash using parttool or elftool (if offset is set)
"""
tool_path = None
try:
if off:
tool_path = ''
logging.info("Invoke esptool to read image.")
f = self.invoke_esptool(tool_path=tool_path, off=off)
else:
tool_path = ''
logging.info("Invoke parttool to read image.")
f = self.invoke_parttool(tool_path=tool_path)
except subprocess.CalledProcessError as e:
if len(e.output):
logging.info(e.output)
logging.warning("System path is not set. Try to use predefined path.")
if off:
tool_path = self.get_tool_path(use_esptool=True)
f = self.invoke_esptool(tool_path=tool_path, off=off)
else:
tool_path = self.get_tool_path(use_esptool=False)
f = self.invoke_parttool(tool_path=tool_path)
return f
def _read_core_dump_length(self, f):
"""Reads core dump length
"""
data = f.read(self.ESP32_COREDUMP_FLASH_LEN_SZ)
tot_len, = struct.unpack_from(self.ESP32_COREDUMP_FLASH_LEN_FMT, data)
return tot_len
def create_corefile(self, core_fname=None, exe_name=None, rom_elf=None):
"""Checks flash coredump data integrity and creates ELF file
"""
data = self.read_data(0, self.ESP32_COREDUMP_HDR_SZ)
self.checksum_len = 0
_,coredump_ver,_,_,_ = struct.unpack_from(self.ESP32_COREDUMP_HDR_FMT, data)
if coredump_ver == self.ESP32_COREDUMP_VERSION_ELF_CRC32 or coredump_ver == self.ESP32_COREDUMP_VERSION_BIN:
logging.debug("Dump size = %d, crc off = 0x%x", self.dump_sz, self.dump_sz - self.ESP32_COREDUMP_CRC_SZ)
data = self.read_data(self.dump_sz - self.ESP32_COREDUMP_CRC_SZ, self.ESP32_COREDUMP_CRC_SZ)
dump_crc, = struct.unpack_from(self.ESP32_COREDUMP_CRC_FMT, data)
data = self.read_data(0, self.dump_sz - self.ESP32_COREDUMP_CRC_SZ)
data_crc = binascii.crc32(data) & 0xffffffff
if dump_crc != data_crc:
raise ESPCoreDumpLoaderError("Invalid core dump CRC %x, should be %x" % (data_crc, dump_crc))
elif coredump_ver == self.ESP32_COREDUMP_VERSION_ELF_SHA256:
dump_sha256 = self.read_data(self.dump_sz - self.ESP32_COREDUMP_SHA256_SZ, self.ESP32_COREDUMP_SHA256_SZ)
data = self.read_data(0, self.dump_sz - self.ESP32_COREDUMP_SHA256_SZ)
data_sha256 = sha256(data)
data_sha256_str = data_sha256.hexdigest()
dump_sha256_str = binascii.hexlify(dump_sha256).decode('ascii')
if dump_sha256_str != data_sha256_str:
raise ESPCoreDumpLoaderError("Invalid core dump SHA256 '%s', should be '%s'" % (dump_sha256_str, data_sha256_str))
return super(ESPCoreDumpFlashLoader, self).create_corefile(core_fname, exe_name)
class GDBMIOutRecordHandler(object):
"""GDB/MI output record handler base class
"""
TAG = ''
def __init__(self, f, verbose=False):
"""Base constructor for GDB/MI output record handler
"""
self.verbose = verbose
def execute(self, ln):
"""Base method to execute GDB/MI output record handler function
"""
if self.verbose:
logging.debug("%s.execute: [[%s]]" % (self.__class__.__name__, ln))
class GDBMIOutStreamHandler(GDBMIOutRecordHandler):
"""GDB/MI output stream handler class
"""
def __init__(self, f, verbose=False):
"""Constructor for GDB/MI output stream handler
"""
super(GDBMIOutStreamHandler, self).__init__(None, verbose)
self.func = f
def execute(self, ln):
"""Executes GDB/MI output stream handler function
"""
GDBMIOutRecordHandler.execute(self, ln)
if self.func:
# remove TAG / quotes and replace c-string \n with actual NL
self.func(ln[1:].strip('"').replace('\\n', '\n').replace('\\t', '\t'))
class GDBMIResultHandler(GDBMIOutRecordHandler):
"""GDB/MI result handler class
"""
TAG = '^'
RC_DONE = 'done'
RC_RUNNING = 'running'
RC_CONNECTED = 'connected'
RC_ERROR = 'error'
RC_EXIT = 'exit'
def __init__(self, verbose=False):
"""Constructor for GDB/MI result handler
"""
super(GDBMIResultHandler, self).__init__(None, verbose)
self.result_class = ''
self.result_str = ''
def _parse_rc(self, ln, rc):
"""Parses result code
"""
rc_str = "{0}{1}".format(self.TAG, rc)
if not ln.startswith(rc_str):
return False
self.result_class = rc
if len(ln) > len(rc_str):
self.result_str = ln[len(rc_str):]
if self.result_str.startswith(','):
self.result_str = self.result_str[1:]
else:
logging.error("Invalid result format: '%s'" % ln)
else:
self.result_str = ''
return True
def execute(self, ln):
"""Executes GDB/MI result handler function
"""
GDBMIOutRecordHandler.execute(self, ln)
if self._parse_rc(ln, self.RC_DONE):
return
if self._parse_rc(ln, self.RC_RUNNING):
return
if self._parse_rc(ln, self.RC_CONNECTED):
return
if self._parse_rc(ln, self.RC_ERROR):
return
if self._parse_rc(ln, self.RC_EXIT):
return
logging.error("Unknown GDB/MI result: '%s'" % ln)
class GDBMIThreadListIdsHandler(GDBMIResultHandler):
"""GDB/MI thread-list-ids handler class
"""
def __init__(self, verbose=False):
"""Constructor for GDB/MI result handler
"""
super(GDBMIThreadListIdsHandler, self).__init__(verbose)
self.threads = []
self.current_thread = ''
def execute(self, ln):
"""Executes GDB/MI thread-list-ids handler function
"""
GDBMIResultHandler.execute(self, ln)
if self.result_class != self.RC_DONE:
return
# simple parsing method
result = re.search(r'thread-ids\s*=\s*\{([^\{\}]*)\}', self.result_str)
if result:
for tid in re.finditer(r'thread-id="(\d+)"', result.group(1)):
self.threads.append(tid.group(1))
result = re.search(r'current-thread-id="(\d+)"', self.result_str)
if result:
self.current_thread = result.group(1)
class GDBMIThreadSelectHandler(GDBMIResultHandler):
"""GDB/MI thread-select handler class
"""
def execute(self, ln):
"""Executes GDB/MI thread-select handler function
"""
GDBMIResultHandler.execute(self, ln)
if self.result_class != self.RC_DONE:
return
class GDBMIThreadInfoHandler(GDBMIResultHandler):
"""GDB/MI thread-info handler class
"""
def __init__(self, verbose=False):
"""Constructor for GDB/MI result handler
"""
super(GDBMIThreadInfoHandler, self).__init__(verbose)
self.current = False
self.id = ''
self.target_id = ''
self.details = ''
self.name = ''
self.frame = ''
self.state = ''
self.core = ''
def execute(self, ln):
"""Executes GDB/MI thread-info handler function
"""
GDBMIResultHandler.execute(self, ln)
if self.result_class != self.RC_DONE:
return
# simple parsing method
result = re.search(r'id="(\d+)"', self.result_str)
if result:
self.id = result.group(1)
result = re.search(r'current="\*"', self.result_str)
if result:
self.current = True
result = re.search(r'target-id="([^"]+)"', self.result_str)
if result:
self.target_id = result.group(1)
class GDBMIDataEvalHandler(GDBMIResultHandler):
"""GDB/MI data-evaluate-expression handler class
"""
def __init__(self, verbose=False):
"""Constructor for GDB/MI result handler
"""
super(GDBMIDataEvalHandler, self).__init__(verbose)
self.value = ''
def execute(self, ln):
"""Executes GDB/MI data-evaluate-expression handler function
"""
GDBMIResultHandler.execute(self, ln)
if self.result_class != self.RC_DONE:
return
# simple parsing method
if self.verbose:
logging.debug("GDBMIDataEvalHandler: result '%s'", self.result_str)
pos = 0
r = re.compile(r'([a-zA-Z_]+)=(.+)\,')
while True:
m = r.search(self.result_str, pos=pos)
if not m:
break
if m.group(1) == 'value':
if self.verbose:
logging.debug("GDBMIDataEvalHandler: found value = '%s'", m.group(2))
self.value = self.result.group(1)
return
pos = m.end(2) + 1
res_str = self.result_str[pos:]
res_str = res_str.replace(r'\"', '\'')
m = re.search(r'value="([^"]+)"', res_str)
if m:
if self.verbose:
logging.debug("GDBMIDataEvalHandler: found value = '%s'", m.group(1))
self.value = m.group(1)
class GDBMIStreamConsoleHandler(GDBMIOutStreamHandler):
"""GDB/MI console stream handler class
"""
TAG = '~'
def load_aux_elf(elf_path):
""" Loads auxilary ELF file and composes GDB command to read its symbols
"""
elf = None
sym_cmd = ''
if os.path.exists(elf_path):
elf = ESPCoreDumpElfFile(elf_path)
for s in elf.sections:
if s.name == '.text':
sym_cmd = 'add-symbol-file %s 0x%x' % (elf_path, s.addr)
return (elf, sym_cmd)
def dbg_corefile(args):
""" Command to load core dump from file or flash and run GDB debug session with it
"""
global CLOSE_FDS
loader = None
rom_elf,rom_sym_cmd = load_aux_elf(args.rom_elf)
if not args.core:
loader = ESPCoreDumpFlashLoader(args.off, port=args.port, baud=args.baud)
core_fname = loader.create_corefile(args.save_core, exe_name=args.prog, rom_elf=rom_elf)
if not core_fname:
logging.error("Failed to create corefile!")
loader.cleanup()
return
else:
core_fname = args.core
if args.core_format and args.core_format != 'elf':
loader = ESPCoreDumpFileLoader(core_fname, args.core_format == 'b64')
core_fname = loader.create_corefile(args.save_core, exe_name=args.prog, rom_elf=rom_elf)
if not core_fname:
logging.error("Failed to create corefile!")
loader.cleanup()
return
p = subprocess.Popen(bufsize=0,
args=[args.gdb,
'--nw', # ignore .gdbinit
'--core=%s' % core_fname, # core file,
'-ex', rom_sym_cmd,
args.prog
],
stdin=None, stdout=None, stderr=None,
close_fds=CLOSE_FDS
)
p.wait()
if loader:
if not args.core and not args.save_core:
loader.remove_tmp_file(core_fname)
loader.cleanup()
print('Done!')
def info_corefile(args):
""" Command to load core dump from file or flash and print it's data in user friendly form
"""
global CLOSE_FDS
def gdbmi_console_stream_handler(ln):
sys.stdout.write(ln)
sys.stdout.flush()
def gdbmi_read2prompt(f, out_handlers=None):
while True:
ln = f.readline().decode('utf-8').rstrip(' \r\n')
if ln == '(gdb)':
break
elif len(ln) == 0:
break
elif out_handlers:
for h in out_handlers:
if ln.startswith(out_handlers[h].TAG):
out_handlers[h].execute(ln)
break
def gdbmi_start(handlers, gdb_cmds):
gdb_args = [args.gdb,
'--quiet', # inhibit dumping info at start-up
'--nx', # inhibit window interface
'--nw', # ignore .gdbinit
'--interpreter=mi2', # use GDB/MI v2
'--core=%s' % core_fname] # core file
for c in gdb_cmds:
gdb_args += ['-ex', c]
gdb_args.append(args.prog)
p = subprocess.Popen(bufsize=0,
args=gdb_args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=CLOSE_FDS)
gdbmi_read2prompt(p.stdout, handlers)
return p
def gdbmi_cmd_exec(p, handlers, gdbmi_cmd):
for t in handlers:
handlers[t].result_class = None
p.stdin.write(bytearray("%s\n" % gdbmi_cmd, encoding='utf-8'))
gdbmi_read2prompt(p.stdout, handlers)
if not handlers[GDBMIResultHandler.TAG].result_class or handlers[GDBMIResultHandler.TAG].result_class == GDBMIResultHandler.RC_EXIT:
logging.error("GDB exited (%s / %s)!" % (handlers[GDBMIResultHandler.TAG].result_class, handlers[GDBMIResultHandler.TAG].result_str))
p.wait()
logging.error("Problem occured! GDB exited, restart it.")
p = gdbmi_start(handlers, [])
elif handlers[GDBMIResultHandler.TAG].result_class != GDBMIResultHandler.RC_DONE:
logging.error("GDB/MI command failed (%s / %s)!" % (handlers[GDBMIResultHandler.TAG].result_class, handlers[GDBMIResultHandler.TAG].result_str))
return p
def gdbmi_getinfo(p, handlers, gdb_cmd):
return gdbmi_cmd_exec(p, handlers, "-interpreter-exec console \"%s\"" % gdb_cmd)
def gdbmi_get_thread_ids(p):
handlers = {}
result = GDBMIThreadListIdsHandler(verbose=False)
handlers[GDBMIResultHandler.TAG] = result
handlers[GDBMIStreamConsoleHandler.TAG] = GDBMIStreamConsoleHandler(None, verbose=False)
p = gdbmi_cmd_exec(p, handlers, "-thread-list-ids")
return p,result.threads,result.current_thread
def gdbmi_switch_thread(p, thr_id):
handlers = {}
result = GDBMIThreadSelectHandler(verbose=False)
handlers[GDBMIResultHandler.TAG] = result
handlers[GDBMIStreamConsoleHandler.TAG] = GDBMIStreamConsoleHandler(None, verbose=False)
return gdbmi_cmd_exec(p, handlers, "-thread-select %s" % thr_id)
def gdbmi_get_thread_info(p, thr_id):
handlers = {}
result = GDBMIThreadInfoHandler(verbose=False)
handlers[GDBMIResultHandler.TAG] = result
handlers[GDBMIStreamConsoleHandler.TAG] = GDBMIStreamConsoleHandler(None, verbose=False)
if thr_id:
cmd = "-thread-info %s" % thr_id
else:
cmd = "-thread-info"
p = gdbmi_cmd_exec(p, handlers, cmd)
return p,result
def gdbmi_data_evaluate_expression(p, expr):
handlers = {}
result = GDBMIDataEvalHandler(verbose=False)
handlers[GDBMIResultHandler.TAG] = result
handlers[GDBMIStreamConsoleHandler.TAG] = GDBMIStreamConsoleHandler(None, verbose=False)
p = gdbmi_cmd_exec(p, handlers, "-data-evaluate-expression \"%s\"" % expr)
return p,result
def gdbmi_freertos_get_task_name(p, tcb_addr):
p,res = gdbmi_data_evaluate_expression(p, "(char*)((TCB_t *)0x%x)->pcTaskName" % tcb_addr)
result = re.match('0x[a-fA-F0-9]+[ \t]*\'([^\']*)\'', res.value)
if result:
return p,result.group(1)
return p,''
def gdb2freertos_thread_id(gdb_thread_id):
return int(gdb_thread_id.replace("process ", ""), 0)
loader = None
rom_elf,rom_sym_cmd = load_aux_elf(args.rom_elf)
if not args.core:
loader = ESPCoreDumpFlashLoader(args.off, port=args.port, baud=args.baud)
core_fname = loader.create_corefile(args.save_core, exe_name=args.prog, rom_elf=rom_elf)
if not core_fname:
logging.error("Failed to create corefile!")
loader.cleanup()
return
else:
core_fname = args.core
if args.core_format and args.core_format != 'elf':
loader = ESPCoreDumpFileLoader(core_fname, args.core_format == 'b64')
core_fname = loader.create_corefile(args.save_core, exe_name=args.prog, rom_elf=rom_elf)
if not core_fname:
logging.error("Failed to create corefile!")
loader.cleanup()
return
exe_elf = ESPCoreDumpElfFile(args.prog)
core_elf = ESPCoreDumpElfFile(core_fname)
merged_segs = []
core_segs = core_elf.program_segments
for s in exe_elf.sections:
merged = False
for ps in core_segs:
if ps.addr <= s.addr and ps.addr + len(ps.data) >= s.addr:
# sec: |XXXXXXXXXX|
# seg: |...XXX.............|
seg_addr = ps.addr
if ps.addr + len(ps.data) <= s.addr + len(s.data):
# sec: |XXXXXXXXXX|
# seg: |XXXXXXXXXXX...|
# merged: |XXXXXXXXXXXXXX|
seg_len = len(s.data) + (s.addr - ps.addr)
else:
# sec: |XXXXXXXXXX|
# seg: |XXXXXXXXXXXXXXXXX|
# merged: |XXXXXXXXXXXXXXXXX|
seg_len = len(ps.data)
merged_segs.append((s.name, seg_addr, seg_len, s.attr_str(), True))
core_segs.remove(ps)
merged = True
elif ps.addr >= s.addr and ps.addr <= s.addr + len(s.data):
# sec: |XXXXXXXXXX|
# seg: |...XXX.............|
seg_addr = s.addr
if (ps.addr + len(ps.data)) >= (s.addr + len(s.data)):
# sec: |XXXXXXXXXX|
# seg: |..XXXXXXXXXXX|
# merged: |XXXXXXXXXXXXX|
seg_len = len(s.data) + (ps.addr + len(ps.data)) - (s.addr + len(s.data))
else:
# sec: |XXXXXXXXXX|
# seg: |XXXXXX|
# merged: |XXXXXXXXXX|
seg_len = len(s.data)
merged_segs.append((s.name, seg_addr, seg_len, s.attr_str(), True))
core_segs.remove(ps)
merged = True
if not merged:
merged_segs.append((s.name, s.addr, len(s.data), s.attr_str(), False))
handlers = {}
handlers[GDBMIResultHandler.TAG] = GDBMIResultHandler(verbose=False)
handlers[GDBMIStreamConsoleHandler.TAG] = GDBMIStreamConsoleHandler(None, verbose=False)
p = gdbmi_start(handlers, [rom_sym_cmd])
extra_note = None
task_info = []
for seg in core_elf.aux_segments:
if seg.type != ESPCoreDumpElfFile.PT_NOTE:
continue
note_read = 0
while note_read < len(seg.data):
note = Elf32NoteDesc("", 0, None)
note_read += note.read(seg.data[note_read:])
if note.type == ESPCoreDumpLoader.ESP_CORE_DUMP_EXTRA_INFO_TYPE and 'EXTRA_INFO' in note.name:
extra_note = note
if note.type == ESPCoreDumpLoader.ESP_CORE_DUMP_TASK_INFO_TYPE and 'TASK_INFO' in note.name:
task_info_struct = EspCoreDumpTaskStatus(buf=note.desc)
task_info.append(task_info_struct)
print("===============================================================")
print("==================== ESP32 CORE DUMP START ====================")
handlers[GDBMIResultHandler.TAG].result_class = None
handlers[GDBMIStreamConsoleHandler.TAG].func = gdbmi_console_stream_handler
if extra_note:
extra_info = struct.unpack("<%dL" % (len(extra_note.desc) / struct.calcsize("<L")), extra_note.desc)
if extra_info[0] == ESPCoreDumpLoader.ESP_COREDUMP_CURR_TASK_MARKER:
print("\nCrashed task has been skipped.")
else:
p,task_name = gdbmi_freertos_get_task_name(p, extra_info[0])
print("\nCrashed task handle: 0x%x, name: '%s', GDB name: 'process %d'" % (extra_info[0], task_name, extra_info[0]))
print("\n================== CURRENT THREAD REGISTERS ===================")
if extra_note:
exccause = extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EXCCAUSE_IDX + 1]
exccause_str = xtensa_exception_cause_dict.get(exccause)
if not exccause_str:
exccause_str = ("Invalid EXCCAUSE code", "Invalid EXCAUSE description or not found.")
print("exccause 0x%x (%s)" % (exccause, exccause_str[0]))
print("excvaddr 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EXCVADDR_IDX + 1])
print("epc1 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPC1_IDX + 1])
print("epc2 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPC2_IDX + 1])
print("epc3 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPC3_IDX + 1])
print("epc4 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPC4_IDX + 1])
print("epc5 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPC5_IDX + 1])
print("epc6 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPC6_IDX + 1])
print("epc7 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPC7_IDX + 1])
print("eps2 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPS2_IDX + 1])
print("eps3 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPS3_IDX + 1])
print("eps4 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPS4_IDX + 1])
print("eps5 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPS5_IDX + 1])
print("eps6 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPS6_IDX + 1])
print("eps7 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EPS7_IDX + 1])
else:
print("Exception registers have not been found!")
p = gdbmi_getinfo(p, handlers, "info registers")
print("\n==================== CURRENT THREAD STACK =====================")
p = gdbmi_getinfo(p, handlers, "bt")
if task_info and task_info[0].task_flags != EspCoreDumpTaskStatus.TASK_STATUS_CORRECT:
print("The current crashed task is corrupted.")
print("Task #%d info: flags, tcb, stack (%x, %x, %x)." % (task_info[0].task_index,
task_info[0].task_flags,
task_info[0].task_tcb_addr,
task_info[0].task_stack_start))
print("\n======================== THREADS INFO =========================")
p = gdbmi_getinfo(p, handlers, "info threads")
# THREADS STACKS
p,threads,cur_thread = gdbmi_get_thread_ids(p)
for thr_id in threads:
task_index = int(thr_id) - 1
if thr_id == cur_thread:
continue
p = gdbmi_switch_thread(p, thr_id)
p,thr_info_res = gdbmi_get_thread_info(p, thr_id)
tcb_addr = gdb2freertos_thread_id(thr_info_res.target_id)
p,task_name = gdbmi_freertos_get_task_name(p, tcb_addr)
print("\n==================== THREAD %s (TCB: 0x%x, name: '%s') =====================" % (thr_id, tcb_addr, task_name))
p = gdbmi_getinfo(p, handlers, "bt")
if task_info and task_info[task_index].task_flags != EspCoreDumpTaskStatus.TASK_STATUS_CORRECT:
print("The task '%s' is corrupted." % thr_id)
print("Task #%d info: flags, tcb, stack (%x, %x, %x)." % (task_info[task_index].task_index,
task_info[task_index].task_flags,
task_info[task_index].task_tcb_addr,
task_info[task_index].task_stack_start))
print("\n======================= ALL MEMORY REGIONS ========================")
print("Name Address Size Attrs")
for ms in merged_segs:
print("%s 0x%x 0x%x %s" % (ms[0], ms[1], ms[2], ms[3]))
for cs in core_segs:
# core dump exec segments are from ROM, other are belong to tasks (TCB or stack)
if cs.flags & ESPCoreDumpSegment.PF_X:
seg_name = 'rom.text'
else:
seg_name = 'tasks.data'
print(".coredump.%s 0x%x 0x%x %s" % (seg_name, cs.addr, len(cs.data), cs.attr_str()))
if args.print_mem:
print("\n====================== CORE DUMP MEMORY CONTENTS ========================")
for cs in core_elf.program_segments:
# core dump exec segments are from ROM, other are belong to tasks (TCB or stack)
if cs.flags & ESPCoreDumpSegment.PF_X:
seg_name = 'rom.text'
else:
seg_name = 'tasks.data'
print(".coredump.%s 0x%x 0x%x %s" % (seg_name, cs.addr, len(cs.data), cs.attr_str()))
p = gdbmi_getinfo(p, handlers, "x/%dx 0x%x" % (old_div(len(cs.data),4), cs.addr))
print("\n===================== ESP32 CORE DUMP END =====================")
print("===============================================================")
p.stdin.write(b'q\n')
p.wait()
p.stdin.close()
p.stdout.close()
if loader:
if not args.core and not args.save_core:
loader.remove_tmp_file(core_fname)
loader.cleanup()
print('Done!')
def main():
parser = argparse.ArgumentParser(description='espcoredump.py v%s - ESP32 Core Dump Utility' % __version__, prog='espcoredump')
parser.add_argument('--chip', '-c',
help='Target chip type',
choices=['auto', 'esp32'],
default=os.environ.get('ESPTOOL_CHIP', 'auto'))
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', esptool.ESPLoader.DEFAULT_PORT))
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate used when flashing/reading',
type=int,
default=os.environ.get('ESPTOOL_BAUD', esptool.ESPLoader.ESP_ROM_BAUD))
subparsers = parser.add_subparsers(
dest='operation',
help='Run coredumper {command} -h for additional help')
parser_debug_coredump = subparsers.add_parser(
'dbg_corefile',
help='Starts GDB debugging session with specified corefile')
parser_debug_coredump.add_argument('--debug', '-d', help='Log level (0..3)', type=int, default=3)
parser_debug_coredump.add_argument('--gdb', '-g', help='Path to gdb', default='xtensa-esp32-elf-gdb')
parser_debug_coredump.add_argument('--core', '-c', help='Path to core dump file (if skipped core dump will be read from flash)', type=str)
parser_debug_coredump.add_argument('--core-format', '-t', help='(elf, raw or b64). File specified with "-c" is an ELF ("elf"), '
'raw (raw) or base64-encoded (b64) binary', type=str, default='elf')
parser_debug_coredump.add_argument('--off', '-o', help='Ofsset of coredump partition in flash '
'(type "make partition_table" to see).', type=int, default=None)
parser_debug_coredump.add_argument('--save-core', '-s', help='Save core to file. Othwerwise temporary core file will be deleted. '
'Ignored with "-c"', type=str)
parser_debug_coredump.add_argument('--rom-elf', '-r', help='Path to ROM ELF file.', type=str, default='esp32_rom.elf')
parser_debug_coredump.add_argument('prog', help='Path to program\'s ELF binary', type=str)
parser_info_coredump = subparsers.add_parser(
'info_corefile',
help='Print core dump info from file')
parser_info_coredump.add_argument('--debug', '-d', help='Log level (0..3)', type=int, default=3)
parser_info_coredump.add_argument('--gdb', '-g', help='Path to gdb', default='xtensa-esp32-elf-gdb')
parser_info_coredump.add_argument('--core', '-c', help='Path to core dump file (if skipped core dump will be read from flash)', type=str)
parser_info_coredump.add_argument('--core-format', '-t', help='(elf, raw or b64). File specified with "-c" is an ELF ("elf"), '
'raw (raw) or base64-encoded (b64) binary', type=str, default='elf')
parser_info_coredump.add_argument('--off', '-o', help='Offset of coredump partition in flash (type '
'"make partition_table" to see).', type=int, default=None)
parser_info_coredump.add_argument('--save-core', '-s', help='Save core to file. Othwerwise temporary core file will be deleted. '
'Does not work with "-c"', type=str)
parser_info_coredump.add_argument('--rom-elf', '-r', help='Path to ROM ELF file.', type=str, default='esp32_rom.elf')
parser_info_coredump.add_argument('--print-mem', '-m', help='Print memory dump', action='store_true')
parser_info_coredump.add_argument('prog', help='Path to program\'s ELF binary', type=str)
# internal sanity check - every operation matches a module function of the same name
for operation in subparsers.choices:
assert operation in globals(), "%s should be a module function" % operation
args = parser.parse_args()
log_level = logging.CRITICAL
if args.debug == 0:
log_level = logging.CRITICAL
elif args.debug == 1:
log_level = logging.ERROR
elif args.debug == 2:
log_level = logging.WARNING
elif args.debug == 3:
log_level = logging.INFO
else:
log_level = logging.DEBUG
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
print('espcoredump.py v%s' % __version__)
operation_func = globals()[args.operation]
operation_func(args)
if __name__ == '__main__':
try:
main()
except ESPCoreDumpError as e:
print('\nA fatal error occurred: %s' % e)
sys.exit(2)
| 43.986278 | 158 | 0.59485 |
6855aa30c92d56e22044785e2ef1b10780dfa637 | 58,992 | py | Python | BMCoGAN-coupDis-sepReg.py | tasfia/BMCoGAN | 0d400c2c71dbfb69af422afc487f65afb98de8af | [
"MIT"
] | null | null | null | BMCoGAN-coupDis-sepReg.py | tasfia/BMCoGAN | 0d400c2c71dbfb69af422afc487f65afb98de8af | [
"MIT"
] | null | null | null | BMCoGAN-coupDis-sepReg.py | tasfia/BMCoGAN | 0d400c2c71dbfb69af422afc487f65afb98de8af | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 17:13:09 2020
@author: tshermin
"""
from __future__ import print_function
from easydict import EasyDict
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torch.autograd import Variable
import math
import util
import classifier
from center_loss import TripCenterLoss_min_margin,TripCenterLoss_margin
import classifier_latent
import sys
import model
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
#import torch
#######################################################################
#Computing PMIs
import sys
def jointProb(x_s, x_t):
Pij = torch.matmul(x_t,x_s.t())
#Pij = (Pij + Pij.t()) / 2.
Pij = Pij / Pij.sum()
return Pij
def PMI(x_s, x_t, EPS=sys.float_info.epsilon):
sDim, kDim = x_s.size()
tDim, kDim = x_t.size()
Pij = jointProb(x_s, x_t)
assert (Pij.size() == (tDim, sDim))
Pi = Pij.sum(dim=1).view(tDim, 1).expand(tDim, sDim)
Pj = Pij.sum(dim=0).view(1, sDim).expand(tDim, sDim)
Pij[(Pij < EPS).data] = EPS # avoid NaN values.
Pj[(Pj < EPS).data] = EPS
Pi[(Pi < EPS).data] = EPS
pmi = -(torch.log(Pij) - torch.log(Pj) - torch.log(Pi))
#pmi = - Pij * (torch.log(Pi) + torch.log(Pj) - torch.log(Pij))
#print(pmi.size())
#pmi = pmi.sum(dim=1)
return pmi
##############################################################################
#import h5py
import numpy as np
import scipy.io as sio
import torch
from sklearn import preprocessing
import sys
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def map_label(label, classes):
mapped_label = torch.LongTensor(label.size())
for i in range(classes.size(0)):
mapped_label[label==classes[i]] = i
return mapped_label
class Logger(object):
def __init__(self, filename):
self.filename = filename
f = open(self.filename+'.log', "a")
f.close()
def write(self, message):
f = open(self.filename+'.log', "a")
f.write(message)
f.close()
class DATA_LOADER(object):
def __init__(self, opt):
if opt.matdataset:
if opt.dataset == 'imageNet1K':
self.read_matimagenet(opt)
else:
self.read_matdataset(opt)
self.index_in_epoch = 0
self.epochs_completed = 0
def read_matdataset(self, opt):
matcontent = sio.loadmat(opt.dataroot + "/" + opt.dataset + "/" + opt.image_embedding + ".mat")
feature = matcontent['features'].T
label = matcontent['labels'].astype(int).squeeze() - 1
matcontent = sio.loadmat(opt.dataroot + "/" + opt.dataset + "/" + opt.class_embedding + "_splits.mat")
# numpy array index starts from 0, matlab starts from 1
trainval_loc = matcontent['trainval_loc'].squeeze() - 1
train_loc = matcontent['train_loc'].squeeze() - 1
val_unseen_loc = matcontent['val_loc'].squeeze() - 1
test_seen_loc = matcontent['test_seen_loc'].squeeze() - 1
test_unseen_loc = matcontent['test_unseen_loc'].squeeze() - 1
self.attribute = torch.from_numpy(matcontent['att'].T).float()
if not opt.validation:
if opt.preprocessing:
if opt.standardization:
print('standardization...')
scaler = preprocessing.StandardScaler()
else:
scaler = preprocessing.MinMaxScaler()
_train_feature = scaler.fit_transform(feature[trainval_loc])
_test_seen_feature = scaler.transform(feature[test_seen_loc])
_test_unseen_feature = scaler.transform(feature[test_unseen_loc])
self.train_feature = torch.from_numpy(_train_feature).float()
mx = self.train_feature.max()
self.train_feature.mul_(1/mx)
self.train_label = torch.from_numpy(label[trainval_loc]).long()
self.test_unseen_feature = torch.from_numpy(_test_unseen_feature).float()
self.test_unseen_feature.mul_(1/mx)
self.test_unseen_label = torch.from_numpy(label[test_unseen_loc]).long()
self.test_seen_feature = torch.from_numpy(_test_seen_feature).float()
self.test_seen_feature.mul_(1/mx)
self.test_seen_label = torch.from_numpy(label[test_seen_loc]).long()
else:
self.train_feature = torch.from_numpy(feature[trainval_loc]).float()
self.train_label = torch.from_numpy(label[trainval_loc]).long()
self.test_unseen_feature = torch.from_numpy(feature[test_unseen_loc]).float()
self.test_unseen_label = torch.from_numpy(label[test_unseen_loc]).long()
self.test_seen_feature = torch.from_numpy(feature[test_seen_loc]).float()
self.test_seen_label = torch.from_numpy(label[test_seen_loc]).long()
else:
self.train_feature = torch.from_numpy(feature[train_loc]).float()
self.train_label = torch.from_numpy(label[train_loc]).long()
self.test_unseen_feature = torch.from_numpy(feature[val_unseen_loc]).float()
self.test_unseen_label = torch.from_numpy(label[val_unseen_loc]).long()
self.seenclasses = torch.from_numpy(np.unique(self.train_label.numpy()))
self.unseenclasses = torch.from_numpy(np.unique(self.test_unseen_label.numpy()))
self.seenTestclasses = torch.from_numpy(np.unique(self.test_seen_label.numpy()))
self.attribute_unseen=self.attribute[self.unseenclasses]
self.attribute_seen = self.attribute[self.seenclasses]
self.attribute_seen_test = self.attribute[self.seenTestclasses]
self.ntrain = self.train_feature.size()[0]
self.ntestSeen = self.test_seen_feature.size()[0]
self.ntrain_u = self.test_unseen_feature.size()[0]
self.ntrain_class = self.seenclasses.size(0)
self.ntest_class = self.unseenclasses.size(0)
self.ntest_Seenclass = self.seenTestclasses.size(0)
self.train_class = self.seenclasses.clone()
self.allclasses = torch.arange(0, self.ntrain_class+self.ntest_class).long()
#print(self.ntrain)
self.train_mapped_label = map_label(self.train_label, self.seenclasses)
self.all_feature = torch.cat((self.train_feature, self.test_unseen_feature,self.test_seen_feature))
self.both_feature = torch.cat((self.train_feature, self.test_unseen_feature))
soft = nn.Softmax(dim=-1)
ft = soft(self.attribute_unseen)
fs = soft(self.attribute_seen)
loss_pMI = PMI(fs, ft)
t = loss_pMI.unsqueeze(1)
self.x = torch.zeros([50, 5], dtype=torch.long)#.cuda()
#high_pmi_seen_label = torch.zeros(opt.batch_size, dtype=torch.long)#.cuda()
#trainfeature_unseen = torch.zeros(opt.batch_size, 2048)#.cuda()
for i in range(50):
self.x[i][0] = self.unseenclasses[i]#.cuda()
d = torch.topk(t[i],4,dim = 1)[1][0:4]
seen_label = torch.index_select(self.seenclasses, 0, d.view(4))
self.x[i][1] = seen_label[0]
self.x[i][2] = seen_label[1]
self.x[i][3] = seen_label[2]
self.x[i][4] = seen_label[3]
self.trainfeature_unseen = torch.zeros(50,30,2048)
for k in range(50):
count = 0
for i in range(self.ntrain):
if self.train_label[i] == self.x[k][1]:
count = count + 1
self.trainfeature_unseen_tmp = torch.zeros(count, 2048)#.cuda()
for i in range(count):
#for j in range(50):
if self.train_label[i] == self.x[k][1]:
self.trainfeature_unseen_tmp[i][:] = self.train_feature[i][:] #0.25 +
self.trainfeature_unseen[k][:] = self.trainfeature_unseen_tmp[0:30]
self.trainfeature_unseen2 = torch.zeros(50,30,2048)
for k in range(50):
count = 0
for i in range(self.ntrain):
if self.train_label[i] == self.x[k][2]:
count = count + 1
self.trainfeature_unseen_tmp = torch.zeros(count, 2048)#.cuda()
for i in range(count):
#for j in range(50):
if self.train_label[i] == self.x[k][2]:
self.trainfeature_unseen_tmp[i][:] = self.train_feature[i][:] #0.25 +
self.trainfeature_unseen2[k][:] = self.trainfeature_unseen_tmp[0:30]
#ft_k = torch.zeros(3,2, 5).cuda()
#ft_k1 = torch.zeros(3, 5).cuda()
#print(ft_k)
#ft_k1[0][:]=1
#ft_k1[1][:]=2
#print(ft_k1)
#ft_k[0][:] = ft_k1[0:2]
#print(ft_k)
##print(ft_k)
#ft_k[0][0][:]=1
#def next_train_unseen(self, batch_size,):
#return trainfeature_unseen
def next_batch(self, batch_size):
idx = torch.randperm(self.ntrain)[0:batch_size]
batch_feature = self.train_feature[idx]
batch_label = self.train_label[idx]
batch_att = self.attribute[batch_label]
#for i in range(batch_size):
#idx_unseen = torch.zeros(batch_size, 1, dtype=torch.int32).cuda()
batch_unseen_att = torch.zeros(batch_size, 312)#.cuda()
#high_pmi_seen_label = torch.zeros(batch_size, dtype=torch.int32)#.cuda()
#trainfeature_unseen = torch.zeros(batch_size, 2048)#.cuda()
batch_unseen_label = torch.zeros(batch_size, dtype=torch.long)#.cuda()
j=0
for i in range(batch_size):
if j == 50:
j=0
#if i == batch_size:
# break
#for j in range(50):
#idx_unseen[i] = self.unseenclasses[j]
batch_unseen_att[i] = self.attribute[self.unseenclasses[j]]
batch_unseen_label[i] = self.unseenclasses[j]
j=j+1
batch_feature_unseen = torch.zeros(batch_size, 2048)
#batch_feature_unseen2 = torch.zeros(batch_size, 2048)
for i in range(batch_size):
for j in range(50):
#p = 0.15
if batch_unseen_label[i] == int(self.x[j][0]):
#high_pmi_seen_label[i] = self.x[j][1]
r = torch.randperm(30)
batch_feature_unseen[i][:] = self.trainfeature_unseen[j][r[0]][:] #+ p
#p = p + .01
#batch_feature_unseen2[i][:] = self.trainfeature_unseen2[j][r[0]][:]
#print(high_pmi_seen_label)
#for i in range(batch_size):
#if high_pmi_seen_label[i] ==
#trainfeature_unseen = self.train_feature[idx]
return batch_feature, batch_label, batch_att,batch_unseen_att,batch_unseen_label,batch_feature_unseen#,batch_feature_unseen2
def next_batch_trainM(self, batch_size):
idx = torch.randperm(self.ntrain)[0:batch_size]
#batch_feature = self.train_feature[idx]
batch_label = self.train_label[idx]
batch_att = self.attribute[batch_label]
return batch_label, batch_att
def next_batch_TestM(self, batch_size):
idx = torch.randperm(self.ntestSeen)[0:batch_size]
#batch_feature = self.train_feature[idx]
batch_label = self.test_seen_label[idx]
batch_att = self.attribute[batch_label]
return batch_label, batch_att
def next_batch_M(self, batch_size):
print("ok")
idx_seen = torch.randperm(self.ntrain_class)[0:batch_size]
#print(idx_seen.size())
#print(idx_seen)
#idx_unseen = torch.randperm(self.ntest_class)
#print(idx_unseen.size())
#print(idx_unseen)
idx_unseen = torch.zeros(batch_size, 1, dtype=torch.int32).cuda()
batch_unseen_att = torch.zeros(batch_size, 312).cuda()
batch_unseen_label = torch.zeros(batch_size, dtype=torch.int32).cuda()
j=0
for i in range(batch_size):
if j == 50:
j=0
#if i == batch_size:
# break
#for j in range(50):
idx_unseen[i] = self.unseenclasses[j]
batch_unseen_att[i] = self.attribute[self.unseenclasses[j]]
batch_unseen_label[i] = self.unseenclasses[j]
j=j+1
#print(idx_unseen)
print(batch_unseen_label.size())
#print(batch_unseen_label)
print(batch_unseen_att.size())
#print(idx_unseen)
#idx_unseen = torch.randperm(self.ntrain_u)[0:batch_size]
#print(idx_unseen[1])
#batch_unseen_att = torch.zeros(batch_size, 1).cuda()
#batch_unseen_label = torch.zeros(batch_size, 1).cuda()
#for i in range(batch_size):
#batch_unseen_att[i] = self.attribute_unseen[idx_unseen[i]]
#batch_unseen_label[i] = self.unseenclasses[idx_unseen[i]]
batch_seen_att = self.attribute_seen[idx_seen]
batch_seen_label = self.seenclasses[idx_seen]
#batch_att = self.attribute[batch_label]
return idx_seen, idx_unseen, batch_seen_att, batch_unseen_att, batch_seen_label, batch_unseen_label
def next_batch_one_class(self, batch_size):
if self.index_in_epoch == self.ntrain_class:
self.index_in_epoch = 0
perm = torch.randperm(self.ntrain_class)
self.train_class[perm] = self.train_class[perm]
iclass = self.train_class[self.index_in_epoch]
idx = self.train_label.eq(iclass).nonzero().squeeze()
perm = torch.randperm(idx.size(0))
idx = idx[perm]
iclass_feature = self.train_feature[idx]
iclass_label = self.train_label[idx]
self.index_in_epoch += 1
return iclass_feature[0:batch_size], iclass_label[0:batch_size], self.attribute[iclass_label[0:batch_size]]
def next_batch_transductive(self, batch_size):
idx = torch.randperm(self.ntrain)[0:batch_size]
batch_seen_feature = self.train_feature[idx]
batch_seen_label = self.train_label[idx]
batch_seen_att = self.attribute[batch_seen_label]
idx = torch.randperm(self.all_feature.shape[0])[0:batch_size]
batch_both_feature = self.all_feature[idx]
idx_both_att = torch.randint(0, self.attribute.shape[0], (batch_size,))
batch_both_att = self.attribute[idx_both_att]
return batch_seen_feature, batch_seen_label, batch_seen_att, batch_both_feature, batch_both_att
def next_batch_transductive_both(self, batch_size):
idx = torch.randperm(self.ntrain)[0:batch_size]
batch_seen_feature = self.train_feature[idx]
batch_seen_label = self.train_label[idx]
batch_seen_att = self.attribute[batch_seen_label]
idx = torch.randperm(self.both_feature.shape[0])[0:batch_size]
batch_both_feature = self.both_feature[idx]
idx_both_att = torch.randint(0, self.attribute.shape[0], (batch_size,))
batch_both_att = self.attribute[idx_both_att]
return batch_seen_feature, batch_seen_label, batch_seen_att, batch_both_feature, batch_both_att
def next_batch_MMD(self, batch_size):
# idx = torch.randperm(self.ntrain)[0:batch_size]
index = torch.randint(self.seenclasses.shape[0], (2,))
while index[0]==index[1]:
index = torch.randint(self.seenclasses.shape[0], (2,))
select_labels=self.seenclasses[index]
X_features=self.train_feature[self.train_label==select_labels[0]]
Y_features = self.train_feature[self.train_label == select_labels[1]]
idx_X = torch.randperm(X_features.shape[0])[0:batch_size]
X_features = X_features[idx_X]
idx_Y = torch.randperm(Y_features.shape[0])[0:batch_size]
Y_features = Y_features[idx_Y]
return X_features,Y_features
def next_batch_MMD_all(self):
# idx = torch.randperm(self.ntrain)[0:batch_size]
index = torch.randint(self.seenclasses.shape[0], (2,))
while index[0]==index[1]:
index = torch.randint(self.seenclasses.shape[0], (2,))
select_labels=self.seenclasses[index]
X_features=self.train_feature[self.train_label==select_labels[0]]
Y_features = self.train_feature[self.train_label == select_labels[1]]
return X_features,Y_features
def next_batch_unseenatt(self, batch_size,unseen_batch_size):
idx = torch.randperm(self.ntrain)[0:batch_size]
batch_feature = self.train_feature[idx]
batch_label = self.train_label[idx]
batch_att = self.attribute[batch_label]
# idx = torch.randperm(data)[0:batch_size]
idx_unseen =torch.randint(0, self.unseenclasses.shape[0], (unseen_batch_size,))
unseen_label=self.unseenclasses[idx_unseen]
unseen_att=self.attribute[unseen_label]
return batch_feature, batch_label, batch_att,unseen_label,unseen_att
# select batch samples by randomly drawing batch_size classes
def next_batch_uniform_class(self, batch_size):
batch_class = torch.LongTensor(batch_size)
for i in range(batch_size):
idx = torch.randperm(self.ntrain_class)[0]
batch_class[i] = self.train_class[idx]
batch_feature = torch.FloatTensor(batch_size, self.train_feature.size(1))
batch_label = torch.LongTensor(batch_size)
batch_att = torch.FloatTensor(batch_size, self.attribute.size(1))
for i in range(batch_size):
iclass = batch_class[i]
idx_iclass = self.train_label.eq(iclass).nonzero().squeeze()
idx_in_iclass = torch.randperm(idx_iclass.size(0))[0]
idx_file = idx_iclass[idx_in_iclass]
batch_feature[i] = self.train_feature[idx_file]
batch_label[i] = self.train_label[idx_file]
batch_att[i] = self.attribute[batch_label[i]]
return batch_feature, batch_label, batch_att
#################################################################################
parser = argparse.ArgumentParser(description='ADA')
args = parser.parse_known_args()[0]
args = EasyDict({
"dataset": 'CUB',
"dataroot": 'C:/PHD/data/RRF-GZSL/data',
"matdataset": True,
"image_embedding":'res101',
"class_embedding":'att',
"syn_num":400,
"gzsl":True,
"preprocessing": False,
"standardization":False,
"validation":False,
"workers":2,
"batch_size":512,
"batch_size_M":512,
"resSize":2048,
"attSize":312,
"nz":312,
"ngh":4096,
"latenSize":1024,
"nepoch":5000,
"critic_iter":5,
"i_c":0.1,
"lambda1":10,
"cls_weight":0.2,
"lr":0.0001,
"classifier_lr":0.001,
"beta1":0.5,
"cuda":True,
"ngpu":1,
"manualSeed":3483,
"nclass_all":200,
"nclass_seen":150,
"lr_dec":False,
"lr_dec_ep":1,
"lr_dec_rate":0.95,
"final_classifier":'knn',
"k":1,
"n_power":1,
"radius":3.5,
"center_margin":190,
"center_marginu":200,
"center_weight":0.1,
"save_path":'C:/PHD/data/RRF-GZSL/'
})
args.cuda = torch.cuda.is_available()
opt = args
#print(opt)
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# load data
data = DATA_LOADER(opt)
####################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class MLP_g(nn.Module):
def __init__(self, opt):
super(MLP_g, self).__init__()
self.fc1 = nn.Linear(opt.attSize + opt.nz, opt.ngh)
#self.fc1 = nn.Linear(opt.attSize, opt.ngh)
self.fc = nn.Linear(opt.resSize*2, opt.resSize)
self.fc2 = nn.Linear(opt.ngh, opt.resSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def F(self, s, u):
h = torch.cat((s, u), 1)
#print(h.size())
h = self.relu(self.fc(h))
#V_prime = F.relu(torch.einsum('vi,ij->vj',init_w2v_att,self.W_q))
return h
def forward(self, noise, att):
h = torch.cat((noise, att), 1)
#h = noise * att
h = self.lrelu(self.fc1(h))
h = self.relu(self.fc2(h))
return h
class Mapping(nn.Module):
def __init__(self, opt):
super(Mapping, self).__init__()
self.latensize=opt.latenSize
self.encoder_linear = nn.Linear(opt.resSize, opt.latenSize)
self.discriminator = nn.Linear(opt.latenSize, 1)
self.classifier = nn.Linear(opt.latenSize, opt.nclass_seen)
self.classifier2 = nn.Linear(opt.latenSize, 50)
self.lrelu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.logic = nn.LogSoftmax(dim=1)
self.apply(weights_init)
def forward(self, x):
laten=self.lrelu(self.encoder_linear(x))
seen,unseen = laten[:,:512],laten[:,512:]
#stds=self.sigmoid(stds)
#encoder_out = reparameter(mus, stds)
#if not se:
dis_out = self.discriminator(laten)
#else:
#dis_out = self.discriminator(laten)
preds=laten#self.logic(self.classifier(laten))
predu=laten#self.logic(self.classifier2(laten))
return seen,unseen,dis_out,preds,predu,laten
class Regressor(nn.Module):
def __init__(self,opt):
super(Regressor, self).__init__()
self.Sharedfc = nn.Linear(opt.resSize, 1024)
self.fc1 = nn.Linear(1024, opt.attSize)
self.fc2 = nn.Linear(1024, opt.attSize)
self.relu = nn.ReLU(True)
self.lrelu = nn.LeakyReLU(0.2, True)
self.discriminatorS = nn.Linear(opt.attSize, 1)
self.discriminatorU = nn.Linear(opt.attSize, 1)
self.apply(weights_init)
#def forward(self, s, u):
#ConstructedAtts = self.lrelu(self.Sharedfc(s))
#ConstructedAttu = self.lrelu(self.Sharedfc(u))
#ConstructedAtts = self.relu(self.fc1(ConstructedAtts))
#ConstructedAttu = self.relu(self.fc2(ConstructedAttu))
#dis_s = self.discriminatorS(ConstructedAtts)
#dis_u = self.discriminatorU(ConstructedAttu)
#return dis_s, dis_u
#return ConstructedAtts, ConstructedAttu
def forward(self, x):
ConstructedAtts = self.lrelu(self.Sharedfc(x))
#ConstructedAttu = self.lrelu(self.Sharedfc(u))
ConstructedAtts = self.relu(self.fc1(ConstructedAtts))
#ConstructedAttu = self.relu(self.fc2(ConstructedAttu))
#dis_s = self.discriminatorS(ConstructedAtts)
#dis_u = self.discriminatorU(ConstructedAttu)
#return dis_s, dis_u
return ConstructedAtts
class Discriminator(nn.Module):
def __init__(self, opt):
super(Discriminator, self).__init__()
#self.latensize = opt.latenSize
#self.encoder_linear = nn.Linear(opt.resSize, opt.latenSize)
self.discriminatorS = nn.Linear(opt.attSize, 256)
self.discriminatorU = nn.Linear(opt.attSize, 256)
self.fc = nn.Linear(256*2, 2)
self.lrelu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.logic = nn.LogSoftmax(dim=1)
self.apply(weights_init)
def forward(self, s, u):
dis_s = self.lrelu(self.discriminatorS(s))
dis_u = self.lrelu(self.discriminatorU(u))
hs = torch.cat((dis_s, dis_u), 1)
hs = self.fc(hs).squeeze()
return hs, dis_s, dis_u
##########################################################################
class ConditionalEntropyLoss(torch.nn.Module):
def __init__(self,model):
super(ConditionalEntropyLoss, self).__init__()
def forward(self, x, weight):
loss = F.softmax(x, dim=1) * F.log_softmax(x, dim=1) * weight
loss = loss.sum(dim=1)
return -1.0 * loss.mean(dim=0)
class VirtualAversarialTraining(nn.Module):
def __init__(self, model):
super(VirtualAversarialTraining, self).__init__()
self.n_power = args.n_power
self.XI = 1e-6
self.model = model
#self.model1 = model1
self.eps = args.radius
def forward(self, X, logit, weight):
vat_loss = self.virtualAdvLoss(X, logit, weight)
return vat_loss
def getNormalizedVector(self, d):
return F.normalize(d.view(d.size(0), -1), p=2, dim=1).reshape(d.size())
def klDivLogit_withWeight(self, logit_q, logit_p, weight):
q = F.softmax(logit_q, dim=1) * weight
qlogq = torch.mean(torch.sum(q * F.log_softmax(logit_q, dim=1), dim=1))
qlogp = torch.mean(torch.sum(q * F.log_softmax(logit_p, dim=1), dim=1))
return qlogq - qlogp
def klDivLogit(self, logit_q, logit_p):
q = F.softmax(logit_q, dim=1)
qlogq = torch.mean(torch.sum(q * F.log_softmax(logit_q, dim=1), dim=1))
qlogp = torch.mean(torch.sum(q * F.log_softmax(logit_p, dim=1), dim=1))
return qlogq - qlogp
def generateVirtualAdversarialPerturbation(self, x, logit):
d = torch.randn_like(x, device='cuda')
for _ in range(self.n_power):
d = self.XI * self.getNormalizedVector(d).requires_grad_()
logit_m,_= self.model(x + d)
#logit_m = self.model1(feat_m)
distance = self.klDivLogit(logit, logit_m)
grad = torch.autograd.grad(distance, [d])[0]
d = grad.detach()
return self.eps * self.getNormalizedVector(d)
def virtualAdvLoss(self, x, logit, weight):
r_vadv = self.generateVirtualAdversarialPerturbation(x, logit)
logit_m1 = logit.detach()
logit_m2,_= self.model(x + r_vadv)
#logit_m = self.model1(feat_m)
loss = self.klDivLogit_withWeight(logit_m1, logit_m2, weight)
return loss
############################################################################
class CLASSIFIER:
def __init__(self, map,latenSize, _train_X, _train_Y, data_loader, _nclass, _cuda, _lr=0.001, _beta1=0.5,
_nepoch=20, _batch_size=100, val=True):
self.train_X = _train_X
self.train_Y = _train_Y
self.test_seen_feature = data_loader.test_seen_feature
self.test_seen_label = data_loader.test_seen_label
self.test_unseen_feature = data_loader.test_unseen_feature
self.test_unseen_label = data_loader.test_unseen_label
self.seenclasses = data_loader.seenclasses
self.unseenclasses = data_loader.unseenclasses
self.batch_size = _batch_size
self.nepoch = _nepoch
self.nclass = _nclass
self.input_dim = _train_X.size(1)
self.latent_dim = latenSize
self.cuda = _cuda
self.model = LINEAR_LOGSOFTMAX(self.latent_dim, self.nclass)
self.model.apply(util.weights_init)
self.criterion = nn.NLLLoss()
self.map = map
for p in self.map.parameters(): # reset requires_grad
p.requires_grad = False
self.input = torch.FloatTensor(_batch_size, self.input_dim)
self.label = torch.LongTensor(_batch_size)
self.lr = _lr
self.beta1 = _beta1
# setup optimizer
self.optimizer = optim.Adam(self.model.parameters(), lr=_lr, betas=(_beta1, 0.999))
#self.optimizer = _optimizer
if self.cuda:
self.model.cuda()
self.criterion.cuda()
self.input = self.input.cuda()
self.label = self.label.cuda()
self.index_in_epoch = 0
self.epochs_completed = 0
self.ntrain = self.train_X.size()[0]
#if generalized:
self.acc_seen, self.acc_unseen, self.H = self.fit()
#else:
#self.acc = self.fit_zsl()
def fit(self):
best_H = 0
best_seen = 0
best_unseen = 0
for epoch in range(self.nepoch):
for i in range(0, self.ntrain, self.batch_size):
self.model.zero_grad()
batch_input, batch_label = self.next_batch(self.batch_size)
self.input.copy_(batch_input)
self.label.copy_(batch_label)
inputv = Variable(self.input)
labelv = Variable(self.label)
seen,unseen,dis_out,preds,predu,laten = self.map(inputv)
output = self.model(laten)
#output = self.model(inputv)
loss = self.criterion(output, labelv)
loss.backward()
self.optimizer.step()
acc_seen = self.val_gzsl(self.test_seen_feature, self.test_seen_label, self.seenclasses)
acc_unseen = self.val_gzsl(self.test_unseen_feature, self.test_unseen_label, self.unseenclasses)
H = 2 * acc_seen * acc_unseen / (acc_seen + acc_unseen)
if H > best_H:
best_seen = acc_seen
best_unseen = acc_unseen
best_H = H
return best_seen, best_unseen, best_H
def next_batch(self, batch_size):
start = self.index_in_epoch
# shuffle the data at the first epoch
if self.epochs_completed == 0 and start == 0:
perm = torch.randperm(self.ntrain)
self.train_X = self.train_X[perm]
self.train_Y = self.train_Y[perm]
# the last batch
if start + batch_size > self.ntrain:
self.epochs_completed += 1
rest_num_examples = self.ntrain - start
if rest_num_examples > 0:
X_rest_part = self.train_X[start:self.ntrain]
Y_rest_part = self.train_Y[start:self.ntrain]
# shuffle the data
perm = torch.randperm(self.ntrain)
self.train_X = self.train_X[perm]
self.train_Y = self.train_Y[perm]
# start next epoch
start = 0
self.index_in_epoch = batch_size - rest_num_examples
end = self.index_in_epoch
X_new_part = self.train_X[start:end]
Y_new_part = self.train_Y[start:end]
# print(start, end)
if rest_num_examples > 0:
return torch.cat((X_rest_part, X_new_part), 0), torch.cat((Y_rest_part, Y_new_part), 0)
else:
return X_new_part, Y_new_part
else:
self.index_in_epoch += batch_size
end = self.index_in_epoch
return self.train_X[start:end], self.train_Y[start:end]
def val_gzsl(self, test_X, test_label, target_classes):
start = 0
ntest = test_X.size()[0]
predicted_label = torch.LongTensor(test_label.size())
for i in range(0, ntest, self.batch_size):
end = min(ntest, start + self.batch_size)
if self.cuda:
mus,stds,dis_out,pred,encoder_out,laten = self.map(test_X[start:end].cuda())
output = self.model(laten)
#output = self.model(test_X[start:end].cuda())
else:
mus,stds,dis_out,pred,encoder_out,laten = self.map(test_X[start:end])
output = self.model(laten)
#output = self.model(test_X[start:end])
_, predicted_label[start:end] = torch.max(output.data, 1)
start = end
acc = self.compute_per_class_acc_gzsl(test_label, predicted_label, target_classes)
return acc
def compute_per_class_acc_gzsl(self, test_label, predicted_label, target_classes):
acc_per_class = 0
for i in target_classes:
idx = (test_label == i)
acc_per_class += float(torch.sum(test_label[idx] == predicted_label[idx])) / float(torch.sum(idx))
acc_per_class /= float(target_classes.size(0))
return acc_per_class
def val(self, test_X, test_label, target_classes):
start = 0
ntest = test_X.size()[0]
predicted_label = torch.LongTensor(test_label.size())
for i in range(0, ntest, self.batch_size):
end = min(ntest, start + self.batch_size)
if self.cuda:
mus,stds,dis_out,pred,encoder_out,laten = self.map(test_X[start:end].cuda())
output = self.model(laten)
#output = self.model(test_X[start:end].cuda())
else:
mus,stds,dis_out,pred,encoder_out,laten = self.map(test_X[start:end])
output = self.model(laten)
#output = self.model(test_X[start:end])
_, predicted_label[start:end] = torch.max(output.data, 1)
start = end
acc = self.compute_per_class_acc(util.map_label(test_label, target_classes), predicted_label,
target_classes.size(0))
return acc
def compute_per_class_acc(self, test_label, predicted_label, nclass):
acc_per_class = torch.FloatTensor(nclass).fill_(0)
for i in range(nclass):
idx = (test_label == i)
acc_per_class[i] = float(torch.sum(test_label[idx] == predicted_label[idx])) / float(torch.sum(idx))
return acc_per_class.mean()
class LINEAR_LOGSOFTMAX(nn.Module):
def __init__(self, input_dim, nclass):
super(LINEAR_LOGSOFTMAX, self).__init__()
self.fc = nn.Linear(input_dim, nclass)
self.logic = nn.LogSoftmax(dim=1)
def forward(self, x):
o = self.logic(self.fc(x))
return o
#############################################################################
############################################################################
#print("# of training samples: ", data.ntrain)
#print(data.attribute_unseen.size())
#print(data.attribute_seen.size())
# initialize generator and discriminator
netG = MLP_g(opt)
mapping= Mapping(opt)
#Reg = Regressor(opt)
RegS = Regressor(opt)
RegU = Regressor(opt)
Dis = Discriminator(opt)
#Dis = Discriminators(opt)
#DisS = Discriminators(opt)
#DisU = Discriminators(opt)
#Cls_a = Classifier_A(opt).cuda()
#cls = MLPClassifier(2048, opt.nclass_all)
#cls_criterion = nn.NLLLoss()
criterion = nn.CrossEntropyLoss()
mse = nn.MSELoss()
triplet_loss = nn.TripletMarginLoss(margin=1).cuda()
#vat_loss = VirtualAversarialTraining().cuda()
#cent = ConditionalEntropyLoss().cuda()
#criterion1 = nn.NLLLoss()
#Cls = Classifier(opt).cuda()
cls_criterion = nn.NLLLoss()
if opt.dataset in ['CUB','SUN']:
center_criterion = TripCenterLoss_margin(num_classes=opt.nclass_seen, feat_dim=opt.latenSize, use_gpu=opt.cuda)
#
center_criterion2 = TripCenterLoss_margin(num_classes=50, feat_dim=opt.latenSize, use_gpu=opt.cuda)
elif opt.dataset in ['AWA1','FLO']:
center_criterion = TripCenterLoss_min_margin(num_classes=opt.nclass_seen, feat_dim=opt.latenSize, use_gpu=opt.cuda)
else:
raise ValueError('Dataset %s is not supported'%(opt.dataset))
input_res = torch.FloatTensor(opt.batch_size, opt.resSize)
input_att_s = torch.FloatTensor(opt.batch_size, opt.attSize)
noise_s = torch.FloatTensor(opt.batch_size, opt.nz)
noise_u = torch.FloatTensor(opt.batch_size, opt.nz)
input_label_s = torch.LongTensor(opt.batch_size)
input_att_u = torch.FloatTensor(opt.batch_size, opt.attSize)
input_label_u = torch.LongTensor(opt.batch_size)
input_res_u = torch.FloatTensor(opt.batch_size, opt.resSize)
input_res_u2 = torch.FloatTensor(opt.batch_size, opt.resSize)
beta=0
# i_c=0.2
if opt.cuda:
mapping.cuda()
netG.cuda()
#Reg.cuda()
RegS.cuda()
RegU.cuda()
Dis.cuda()
#DisS.cuda()
#DisU.cuda()
#cls.cuda()
criterion.cuda()
mse.cuda()
input_res = input_res.cuda()
input_res_u = input_res_u.cuda()
input_res_u2 = input_res_u2.cuda()
noise_s, input_att_s = noise_s.cuda(), input_att_s.cuda()
noise_u, input_att_u = noise_u.cuda(), input_att_u.cuda()
cls_criterion.cuda()
input_label_s = input_label_s.cuda()
input_label_u = input_label_u.cuda()
def sample():
batch_feature, batch_label, batch_att,batch_unseen_att,batch_unseen_label,batch_feature_u = data.next_batch(opt.batch_size)
#batch_feature, batch_label, batch_att,batch_unseen_att,batch_unseen_label,batch_feature_u,batch_feature_u2 = data.next_batch(opt.batch_size)
input_res.copy_(batch_feature)
input_att_s.copy_(batch_att)
input_label_s.copy_(util.map_label(batch_label, data.seenclasses))
input_att_u.copy_(batch_unseen_att)
input_label_u.copy_(util.map_label(batch_unseen_label, data.unseenclasses))
input_res_u.copy_(batch_feature_u)
#input_res_u2.copy_(batch_feature_u2)
def generate_syn_feature(netG, classes, attribute, num):
nclass = classes.size(0)
syn_feature = torch.FloatTensor(nclass*num, opt.resSize)
syn_label = torch.LongTensor(nclass*num)
syn_att = torch.FloatTensor(num, opt.attSize)
syn_noise = torch.FloatTensor(num, opt.nz)
if opt.cuda:
syn_att = syn_att.cuda()
syn_noise = syn_noise.cuda()
with torch.no_grad():
for i in range(nclass):
iclass = classes[i]
iclass_att = attribute[iclass]
syn_att.copy_(iclass_att.repeat(num, 1))
syn_noise.normal_(0, 1)
output = netG(syn_noise,syn_att)
#output = output
#print(output.size())
syn_feature.narrow(0, i*num, num).copy_(output.data.cpu())
syn_label.narrow(0, i*num, num).fill_(iclass)
#syn_feature=output.data.cpu()
#syn_label.fill_(iclass)
return syn_feature, syn_label
#netG.load_state_dict(torch.load('Gen3.pt'))
#mapping.load_state_dict(torch.load('map3.pt'))
#RegS.load_state_dict(torch.load('RegS3.pt'))
#RegU.load_state_dict(torch.load('RegU3.pt'))
#Dis.load_state_dict(torch.load('Dis3.pt'))
# setup optimizer
optimizerD = optim.Adam(mapping.parameters(), lr=opt.lr,betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,betas=(opt.beta1, 0.999))
#optimizerR = optim.Adam(Reg.parameters(), lr=.0002,betas=(opt.beta1, 0.999))
optimizerRs = optim.Adam(RegS.parameters(), lr=.0002,betas=(opt.beta1, 0.999))
optimizerRu = optim.Adam(RegU.parameters(), lr=.0002,betas=(opt.beta1, 0.999))
optimizerDis = optim.Adam(Dis.parameters(), lr=0.0002,betas=(opt.beta1, 0.999))
optimizer_center=optim.Adam(center_criterion.parameters(), lr=opt.lr,betas=(opt.beta1, 0.999))
optimizer_center2=optim.Adam(center_criterion2.parameters(), lr=.00001,betas=(opt.beta1, 0.999))
#optimizerCls = optim.Adam(cls.parameters(), lr=opt.lr,betas=(opt.beta1, 0.999))
def compute_per_class_acc_gzsl( test_label, predicted_label, target_classes):
acc_per_class = 0
for i in target_classes:
idx = (test_label == i)
if torch.sum(idx)==0:
acc_per_class +=0
else:
acc_per_class += float(torch.sum(test_label[idx] == predicted_label[idx])) / float(torch.sum(idx))
acc_per_class /= float(target_classes.size(0))
return acc_per_class
def calc_gradient_penalty(netD, real_data, fake_data):
#print real_data.size()
alpha = torch.rand(opt.batch_size, 1)
alpha = alpha.expand(real_data.size())
if opt.cuda:
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if opt.cuda:
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
_,_,disc_interpolates,_ ,_,_= netD(interpolates)
ones = torch.ones(disc_interpolates.size())
if opt.cuda:
ones = ones.cuda()
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=ones,
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.lambda1
return gradient_penalty
def MI_loss(mus, sigmas, i_c, alpha=1e-8):
kl_divergence = (0.5 * torch.sum((mus ** 2) + (sigmas ** 2)
- torch.log((sigmas ** 2) + alpha) - 1, dim=1))
MI_loss = (torch.mean(kl_divergence) - i_c)
return MI_loss
def optimize_beta(beta, MI_loss,alpha2=1e-6):
beta_new = max(0, beta + (alpha2 * MI_loss))
# return the updated beta value:
return beta_new
# train a classifier on seen classes, obtain \theta of Equation (4)
pretrain_cls = classifier.CLASSIFIER(data.train_feature, map_label(data.train_label, data.seenclasses),
data.seenclasses.size(0), opt.resSize, opt.cuda, 0.001, 0.5, 50, 100)
#cls = CLASSIFIER.(opt.latenSize, train_X, train_Y, data, nclass, opt.cuda,
#opt.classifier_lr, 0.5, 25, opt.syn_num, True)
for p in pretrain_cls.model.parameters(): # set requires_grad to False
p.requires_grad = False
G_losses = []
D_losses = []
d = 0
bestaccS = 0
bestaccU = 0
for epoch in range(2000):
FP = 0
mean_lossD = 0
mean_lossG = 0
for i in range(0, data.ntrain, opt.batch_size):
#if i % 2 == 0:
# d = 0
#else:
# d = 1
for p in mapping.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for iter_d in range(opt.critic_iter):
sample()
#print(input_label)
#y_onehot = torch.zeros((opt.batch_size, opt.nclass_all)).cuda()
#y_onehot.scatter_(1, input_label.unsqueeze(1), 1)
#y_onehot.requires_grad_(False)
mapping.zero_grad()
input_resv = Variable(input_res)
input_attv = Variable(input_att_s)
#print(input_attv)
#if d == 0:
input_resv_u = Variable(input_res_u)
#else:
# input_resv_u = Variable(input_res_u2)
input_attv_u = Variable(input_att_u)
_,_,criticD_real1,_,_,latens = mapping(input_resv)
#print(criticD_real)
_,_,criticD_real_u,_,_,latenu = mapping(input_resv_u)
#print(criticD_real_u)
#latents_loss=cls_criterion(latens, input_label_s)
#latentu_loss=cls_criterion(latenu, input_label_u)
criticD_real = criticD_real1.mean()
criticD_real_u = criticD_real_u.mean()
## criticD_real.backward()
## train with fakeG
noise_s.normal_(0, 1)
noisev = Variable(noise_s)
#print(noisev)
fake = netG(noisev, input_attv)
seen,_,criticD_fake1,_,_,latens = mapping(fake.detach())
criticD_fake = criticD_fake1.mean()
noise_u.normal_(0, 1)
noisev_u = Variable(noise_u)
fake_u = netG(noisev_u, input_attv_u)
_,unseen,criticD_fake_u1,_,_,latenu = mapping(fake_u.detach())
criticD_fake_u = criticD_fake_u1.mean()
diff_lossSD = Variable(mse(criticD_real1, criticD_fake),requires_grad=True)
diff_lossUD = Variable(mse(criticD_real1, criticD_fake_u),requires_grad=True)
#diff_lossD = diff_lossSD - diff_lossUD
diff_lossD = triplet_loss(criticD_real1, criticD_fake, criticD_fake_u)
#com_feature = netG.F(fake,fake_u)
#_,_,criticD_com,_,_,_ = mapping(com_feature.detach())
#criticD_com = criticD_com.mean()
## criticD_fake.backward(one)
## gradient penalty
gradient_penalty = calc_gradient_penalty(mapping, input_resv, fake.data)
#gradient_penalty_com = calc_gradient_penalty(mapping, input_resv, com_feature.data)
#gradient_penalty_u = calc_gradient_penalty(mapping, input_resv_u, fake_u.data)
#mi_loss=MI_loss(torch.cat((muR, muF), dim=0),torch.cat((varR, varF), dim=0), opt.i_c)
center_loss=center_criterion(latens, input_label_s,margin=opt.center_margin)
#center_loss_u=center_criterion2(latenu, input_label_u,margin=opt.center_marginu)
Wasserstein_D = criticD_real - criticD_fake #+ criticD_real_u - criticD_fake_u
#D_cost = criticD_fake - criticD_real + 1 * gradient_penalty + 0.001*criticD_real**2 + .01 * (criticD_fake_u - criticD_real_u + gradient_penalty_u + 0.001*criticD_real_u**2)
D_cost = diff_lossD + criticD_fake - criticD_real + 1 * gradient_penalty + 0.001*criticD_real**2+center_loss*opt.center_weight#\
#+ .001 * (criticD_fake_u - criticD_real_u + gradient_penalty_u + 0.001*criticD_real_u**2 + center_loss_u*0.1)#\+.1*latents_loss + .01*latentu_loss
#+ (criticD_com - criticD_real + 1 * gradient_penalty_com)
#center_loss_u*0.1 +
D_cost.backward()
optimizerD.step()
#beta=optimize_beta(beta,mi_loss.item())
## for param in center_criterion.parameters():
## param.grad.data *= (1. / args.weight_cent)
optimizer_center.step()
#optimizer_center2.step()
############################
# (2) Update REG DIS network: optimize WGAN-GP objective, Equation (2)
###########################
RegS.zero_grad()
RegU.zero_grad()
Dis.zero_grad()
input_resv = Variable(input_res)
input_attv = Variable(input_att_s)
#input_resv_u = Variable(input_res_u)
input_attv_u = Variable(input_att_u)
noise_s.normal_(0, 1)
noisev = Variable(noise_s)
fakeS = netG(noisev, input_attv)
noise_s.normal_(0, 1)
noisev = Variable(noise_s)
fakeU = netG(noisev, input_attv_u)
#com_feature = netG.F(fakeS,fakeU)
#R_att_S, R_att_U = Reg(fakeS.detach(), fakeU.detach())
R_att_S = RegS(fakeS.detach())
R_att_U = RegU(fakeU.detach())
#R_att_S = RegS(com_feature.detach())
#R_att_U = RegU(com_feature.detach())
#dis_fake_S, dis_fake_U = Reg(fakeS.detach(), fakeU.detach())
#dis_loss = (mse(R_att_S, input_att_s) + mse(R_att_U, input_att_u))#/2
df,dis_fake_S, dis_fake_U = Dis(R_att_S, R_att_U)
dr,dis_real_S, dis_real_U = Dis(input_attv, input_attv_u)
Reg_loss = (mse(R_att_S, input_attv) + mse(R_att_U, input_attv_u))#/2
true_labels = Variable(torch.LongTensor(np.ones(opt.batch_size, dtype=np.int))).cuda()
fake_labels = Variable(torch.LongTensor(np.zeros(opt.batch_size, dtype=np.int))).cuda()
true_loss_S = nn.functional.cross_entropy(dr, true_labels)
#print(true_loss_S)
#true_loss_U = nn.functional.cross_entropy(dis_real_U, true_labels)
fake_loss = nn.functional.cross_entropy(df, fake_labels)
#print(fake_loss)
#fake_loss_U = nn.functional.cross_entropy(dis_fake_U, fake_labels)
#dummy_tensor = Variable(
#torch.zeros(dis_fake_S.size(0), dis_fake_S.size(1))).cuda()
#mseloss = mse(dis_fake_S - dis_fake_U, dummy_tensor) * dis_fake_S.size(
#1)
#com_loss = dis_loss + Reg_loss
#diff_loss = Variable(mse(fakeS.detach(), fakeU.detach()),requires_grad=True)
com_loss = true_loss_S + fake_loss + Reg_loss #+ diff_loss
com_loss.backward()
#optimizerR.step()
optimizerRu.step()
optimizerRs.step()
optimizerDis.step()
############################
# (2) Update G network: optimize WGAN-GP objective, Equation (2)
###########################
for p in mapping.parameters(): # reset requires_grad
p.requires_grad = False # avoid computation
netG.zero_grad()
#cls.zero_grad()
input_resv = Variable(input_res)
input_attv = Variable(input_att_s)
input_attv_u = Variable(input_att_u)
noise_s.normal_(0, 1)
noisev = Variable(noise_s)
fake = netG(noisev, input_attv)
_,_,criticG_fake1,_,_,_= mapping(fake)
criticG_fake = criticG_fake1.mean()
G_cost = -criticG_fake
noise_u.normal_(0, 1)
noisev_u = Variable(noise_u)
fake_u = netG(noisev_u, input_attv_u)
_,_,criticG_fake_u1,_,_,_= mapping(fake_u)
criticG_fake_u = criticG_fake_u1.mean()
G_cost_u = -criticG_fake_u
#diff_lossS = Variable(mse(input_resv, fake),requires_grad=True)
#diff_lossU = Variable(mse(input_resv, fake_u),requires_grad=True)
#diff_loss = diff_lossS - diff_lossU
_,_,criticG_real,_,_,_= mapping(input_resv)
#criticG_real = criticG_real.mean()
diff_lossS = Variable(mse(criticG_real, criticG_fake),requires_grad=True)
diff_lossU = Variable(mse(criticG_real, criticG_fake_u),requires_grad=True)
#diff_loss = diff_lossS - diff_lossU
#anchor = torch.randn(100, 128, requires_grad=True)
#positive = torch.randn(100, 128, requires_grad=True)
#negative = torch.randn(100, 128, requires_grad=True)
diff_loss = triplet_loss(criticG_real, criticG_fake, criticG_fake_u)
#>>> output.backward()
#print(diff_loss)
#com_feature = netG.F(fake,fake_u)
#_,_,criticG_com,_,_,_= mapping(com_feature, se=True)
#criticG_com = criticG_com.mean()
#G_com = -criticG_com
with torch.no_grad():
R_att_S = RegS(fake.detach())
R_att_U = RegU(fake_u.detach())
#R_att_S = RegS(com_feature.detach())
#R_att_U = RegU(com_feature.detach())
#R_att_S, R_att_U = Reg(fakeS.detach(), fakeU.detach())
d,dis_fake_S, dis_fake_U = Dis(R_att_S, R_att_U)
true_labels = Variable(torch.LongTensor(np.ones(opt.batch_size, dtype=np.int))).cuda()
fake_loss_S = nn.functional.cross_entropy(d, true_labels)
#print(fake_loss_S)
#print(fake_u.size())
#__,_,_,pred = Cls.forward(fake)
#Cls_loss = criterion(pred, y_onehot)
# center_loss_f = center_criterion(z_F, input_label)
# classification loss
# _, _, _, , _ = mapping(fake,input_attv, mean_mode=True)
# c_errG_latent = cls_criterion(latent_pred_fake, input_label)
# c_errG = cls_criterion(fake, input_label)
c_errG_fake = cls_criterion(pretrain_cls.model(fake), input_label_s)
#c_errG_fakecom = cls_criterion(pretrain_cls.model(com_feature), input_label_s)
errG = .8 * G_cost + .2*(c_errG_fake) + 2*fake_loss_S + diff_loss#- diff_lossU + diff_lossS#+ .001 * (G_cost_u) # + .001 * loss_cls_s
#+ 0.001 * vatloss_src + 0.001 * centloss_s + 0.001 * vatloss_trg +loss_cls_u) #+center_loss_f
#errG = G_cost + .5*(c_errG_fake) + .01 * loss_cls_s + .001 * (G_cost_u + 0.001 * vatloss_src + 0.001 * centloss_s + 0.001 * vatloss_trg +loss_cls_u) #+center_loss_f
#errG = G_cost + .8*(c_errG_fake) + .01 * loss_cls_s + 0.001 * (0.001 * vatloss_src + 0.001 * centloss_s )+ .001 * (G_cost_u + 0.001 * vatloss_trg +loss_cls_u) #+center_loss_f
errG.backward()
optimizerG.step()
# Save Losses for plotting later
G_losses.append(errG.item())
D_losses.append(D_cost.item())
if opt.lr_dec:
if (epoch + 1) % opt.lr_dec_ep == 0:
for param_group in optimizerD.param_groups:
param_group['lr'] = param_group['lr'] * opt.lr_dec_rate
for param_group in optimizerG.param_groups:
param_group['lr'] = param_group['lr'] * opt.lr_dec_rate
for param_group in optimizer_center.param_groups:
param_group['lr'] = param_group['lr'] * opt.lr_dec_rate
for param_group in optimizerDis.param_groups:
param_group['lr'] = param_group['lr'] * opt.lr_dec_rate
for param_group in optimizerRs.param_groups:
param_group['lr'] = param_group['lr'] * opt.lr_dec_rate
for param_group in optimizerRu.param_groups:
param_group['lr'] = param_group['lr'] * opt.lr_dec_rate
#for param_group in optimizerCls.param_groups:
#param_group['lr'] = param_group['lr'] * opt.lr_dec_rate
mean_lossG /= data.ntrain / opt.batch_size
mean_lossD /= data.ntrain / opt.batch_size
#print('[%d/%d] Loss_D: %.4f Loss_G: %.4f,Loss_Gu: %.4f,Loss_errG: %.4f,Loss_cls_s: %.4f,Loss_cls_u: %.4f, Wasserstein_dist: %.4f, c_errG_fake:%.4f'
# % (epoch, opt.nepoch, D_cost.item(), G_cost.item(),G_cost_u.item(),errG.item(),loss_cls_s.item(),loss_cls_u.item(),Wasserstein_D.item(),c_errG_fake.item()))
print('[%d/%d] Loss_D: %.4f Loss_Gcost: %.4f,fake_loss_S: %.4f,Loss_errG: %.4f Wasserstein_dist: %.4f, c_errG_fake:%.4f,reg: %.3f,com_loss: %.3f, diff: %.3f, diffS: %.3f,diffU: %.3f'
% (epoch, opt.nepoch, D_cost.item(), G_cost.item(),fake_loss_S.item(),errG.item(),Wasserstein_D.item(),c_errG_fake.item(),Reg_loss.item(), com_loss.item(), diff_loss.item(), diff_lossS.item(),diff_lossU.item()))
# evaluate the model, set G to evaluation mode
netG.eval()
mapping.eval()
RegS.eval()
RegU.eval()
Dis.eval()
#DisS.eval()
#DisU.eval()
#cls.eval()
# Generalized zero-shot learning
# Generalized zero-shot learning
syn_feature, syn_label = generate_syn_feature(netG, data.unseenclasses, data.attribute, opt.syn_num)
# _,_,_,_,_,laten = mapping(syn_feature.cuda())
train_X = torch.cat((data.train_feature, syn_feature), 0)
train_Y = torch.cat((data.train_label, syn_label), 0)
if opt.final_classifier == 'softmax':
nclass = opt.nclass_all
cls1 = CLASSIFIER(mapping, opt.latenSize, train_X, train_Y, data, nclass, opt.cuda,
opt.classifier_lr, 0.5, 30, opt.syn_num, True)
print('--------------------------------------------------------------')
print('unseen=%.4f, seen=%.4f, h=%.4f' % (cls1.acc_unseen, cls1.acc_seen, cls1.H))
elif opt.final_classifier == 'knn':
if epoch % 1 == 0: ## training a knn classifier takes too much time
clf = KNeighborsClassifier(n_neighbors=opt.k)
_, _, _, _, _, train_z = mapping(train_X.cuda())
clf.fit(X=train_z.cpu(), y=train_Y)
#clf.fit(X=train_X.cpu(), y=train_Y)
_, _, _, _, _, test_z_seen = mapping(data.test_seen_feature.cuda())
pred_Y_s = torch.from_numpy(clf.predict(test_z_seen.cpu()))
#pred_Y_s = torch.from_numpy(clf.predict(data.test_seen_feature.cpu()))
_, _, _, _, _, test_z_unseen = mapping(data.test_unseen_feature.cuda())
pred_Y_u = torch.from_numpy(clf.predict(test_z_unseen.cpu()))
acc_seen = compute_per_class_acc_gzsl(pred_Y_s, data.test_seen_label, data.seenclasses)
acc_unseen = compute_per_class_acc_gzsl(pred_Y_u, data.test_unseen_label, data.unseenclasses)
H = 2 * acc_seen * acc_unseen / (acc_seen + acc_unseen)
print('unseen=%.4f, seen=%.4f, h=%.4f' % (acc_unseen, acc_seen, H))
else:
raise ValueError('Classifier %s is not supported' % (opt.final_classifier))
if acc_unseen > bestaccU:
bestaccU = acc_unseen
bestaccS = acc_seen
torch.save(netG.state_dict(),"Gen5.pt")
torch.save(mapping.state_dict(),"map5.pt")
torch.save(RegS.state_dict(),"RegS5.pt")
torch.save(RegU.state_dict(),"RegU5.pt")
#torch.save(DisS.state_dict(),"DisS.pt")
torch.save(Dis.state_dict(),"Dis5.pt")
#if cls1.acc_seen > bestaccS:
#bestaccS = cls1.acc_seen
print('--------------------------------------------------------------')
print('Best unseen=%.4f, Best seen=%.4f' % (bestaccU, bestaccS))
#print(bestaccU)
#print(bestaccS)
netG.train()
mapping.train()
RegS.train()
RegU.train()
Dis.train()
#DisS.train()
#DisU.train()
#cls.train()
plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses,label="G")
plt.plot(D_losses,label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
##########1nn u-55 s-59
##########3nn u-56 s-63
##########1nn u-55 s-60
####try joint reg joint dis
####try full cogan struct
| 41.987189 | 226 | 0.6012 |
45592202eb1867bba24d7533fa67ce9da47cafc6 | 1,289 | py | Python | tests/integration/test_league.py | bsmiller25/espn-api | e06f0168eb84d9380cd29235de9de5cb3e351080 | [
"MIT"
] | 1 | 2019-10-29T01:00:02.000Z | 2019-10-29T01:00:02.000Z | tests/integration/test_league.py | bsmiller25/espn-api | e06f0168eb84d9380cd29235de9de5cb3e351080 | [
"MIT"
] | null | null | null | tests/integration/test_league.py | bsmiller25/espn-api | e06f0168eb84d9380cd29235de9de5cb3e351080 | [
"MIT"
] | null | null | null | from unittest import TestCase
from espn_api.football import League
# Integration test to make sure ESPN's API didnt change
class LeagueTest(TestCase):
def test_league_init(self):
league = League(1234, 2018)
self.assertEqual(league.current_week, 15)
def test_past_league(self):
league = League(12345, 2017)
self.assertEqual(league.nfl_week, 18)
def test_private_league(self):
with self.assertRaises(Exception):
League(368876, 2018)
def test_unknown_league(self):
with self.assertRaises(Exception):
League(2, 2018)
def test_bad_box_scores(self):
league = League(1234, 2018)
with self.assertRaises(Exception):
league.box_scores()
def test_bad_free_agents(self):
league = League(1234, 2018)
with self.assertRaises(Exception):
league.free_agents()
def test_box_scores(self):
league = League(48153503, 2019)
box_scores = league.box_scores(week=2)
self.assertEqual(repr(box_scores[1].away_team), 'Team(TEAM BERRY)')
self.assertEqual(repr(box_scores[1].away_lineup[1]), 'Player(Odell Beckham Jr., points:29, projected:16)')
| 29.295455 | 115 | 0.627618 |
6ebdca2fb08a4ec1713f47c6372f464405ae9fe7 | 2,034 | py | Python | 33. Search in Rotated Sorted Array.py | JazzikPeng/Algorithm-in-Python | 915135b1cdd02a6bb8d7068a54b2f497b2ec31d4 | [
"MIT"
] | 3 | 2018-02-05T06:15:57.000Z | 2019-04-07T23:33:07.000Z | 33. Search in Rotated Sorted Array.py | JazzikPeng/Algorithm-in-Python | 915135b1cdd02a6bb8d7068a54b2f497b2ec31d4 | [
"MIT"
] | null | null | null | 33. Search in Rotated Sorted Array.py | JazzikPeng/Algorithm-in-Python | 915135b1cdd02a6bb8d7068a54b2f497b2ec31d4 | [
"MIT"
] | null | null | null | class Solution:
def normal_search(self, nums, target):
left, right = 0, len(nums)-1
while right >= left:
mid = (left+right) // 2
if nums[mid] == target:
return mid
if nums[mid] > target:
right = mid-1
else:
left = mid + 1
return -1
def find_pivot(self, nums):
if len(nums) <= 2:
return nums.index(min(nums))
left, right = 0, len(nums) - 1
while right > left:
if right - left == 1:
return nums.index(min(nums[left:right+1]))
mid = (left + right) // 2
if nums[mid] < nums[right]: # Pivot is in mid to right
right = mid
else:
left = mid
print(nums[left], nums[right], left, right)
return -1
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums)==0:
return -1
if len(nums)==1:
if nums[0]==target:
return 0
else:
return -1
# Use O(log n) to find the Pivot
left, right = 0, len(nums) - 1
# This is a un-rotated array, since no duplication
print(right, left)
if nums[right] > nums[left]:
return self.normal_search( nums, target)
else: # This when the array is roated, we need to find the idx of pivot
pivot = self.find_pivot(nums)
print('pivot', pivot)
if nums[pivot] == target:
print('pivot', pivot)
return pivot
else:
nums = nums[pivot:] + nums[:pivot]
idx = self.normal_search(nums, target)
print('idx', idx)
if idx==-1:
return -1
else:
return (pivot+idx) % len(nums)
| 32.285714 | 79 | 0.438545 |
6d8933aab89923b495f7681200abfe10cbeb9e08 | 2,927 | py | Python | aoc21.py | zidarsk8/aoc2020 | 21239a8bfd3cba31f16c91c28a176e1163ba4cf9 | [
"Apache-2.0"
] | 1 | 2020-12-02T08:29:50.000Z | 2020-12-02T08:29:50.000Z | aoc21.py | zidarsk8/aoc2020 | 21239a8bfd3cba31f16c91c28a176e1163ba4cf9 | [
"Apache-2.0"
] | null | null | null | aoc21.py | zidarsk8/aoc2020 | 21239a8bfd3cba31f16c91c28a176e1163ba4cf9 | [
"Apache-2.0"
] | null | null | null | import aoc21_data
from typing import List, Tuple, Set, Dict
Food = List[Tuple[Set[str], Set[str]]]
def parse(text: str) -> Food:
food = []
for line in text.splitlines():
ingredits_part, allergen_part = line.split(" (contains ")
ingredits = ingredits_part.split()
allergens = allergen_part[:-1].split(", ")
assert len(allergens) == len(set(allergens))
assert len(ingredits) == len(set(ingredits))
food.append((set(ingredits), set(allergens)))
return food
def test_parse():
food = parse(aoc21_data.test_data)
assert food[0][1] == {"dairy", "fish"}
food = parse(aoc21_data.data)
assert food[0][1] == {"nuts", "sesame"}
def get_all_allergens(food: Food) -> Set[str]:
return food[0][1].union(*[allergens for _, allergens in food])
def test_get_all_allergens():
food = parse(aoc21_data.test_data)
assert get_all_allergens(food) == {"dairy", "fish", "soy"}
def get_ingredients_for_allergen(food: Food, allergen: str) -> Dict[str, Set[str]]:
for ingredits, allergens in food:
print(ingredits, allergens)
return {}
def reduce(food: Food) -> Dict[str, Set[str]]:
allergen_map: Dict[str, Set[str]] = {}
for ingredits, allergens in food:
for allergen in allergens:
if allergen not in allergen_map:
allergen_map[allergen] = set(ingredits)
else:
allergen_map[allergen] &= ingredits
singles: Dict[str, str] = {}
while allergen_map:
new_singles = {
allergen: ingredients.pop()
for allergen, ingredients in allergen_map.items()
if len(ingredients) == 1
}
for single_allergen, single_ingredient in new_singles.items():
del allergen_map[single_allergen]
for single_allergen, single_ingredient in new_singles.items():
for allergen, ingredients in allergen_map.items():
allergen_map[allergen] -= {single_ingredient}
singles.update(new_singles)
return singles
def count_other_ingredients(food):
allergen_map = reduce(food)
alergic_ingredients = set(allergen_map.values())
return sum(len(ingredients - alergic_ingredients) for ingredients, _ in food)
def canonical(food):
allergen_map = reduce(food)
return ",".join(ingredient for _, ingredient in sorted(allergen_map.items()))
def test_reduce():
food = parse(aoc21_data.test_data)
reduced = reduce(food)
assert reduced["fish"] == "sqjhc"
def test_count():
food = parse(aoc21_data.test_data)
assert count_other_ingredients(food) == 5
food = parse(aoc21_data.data)
assert count_other_ingredients(food) == 2595
def test_canonical():
food = parse(aoc21_data.test_data)
assert canonical(food) == "mxmxvkd,sqjhc,fvjkl"
food = parse(aoc21_data.data)
print(canonical(food))
assert canonical(food) == ""
| 28.144231 | 83 | 0.65152 |
968171ca5856337ebbcd8831bce969a5064b4a21 | 2,149 | py | Python | tests/utils.py | zarumaru/metalk8s | 8c79a28c2bd28ca5b84e58ace5605cbe6183fc75 | [
"Apache-2.0"
] | null | null | null | tests/utils.py | zarumaru/metalk8s | 8c79a28c2bd28ca5b84e58ace5605cbe6183fc75 | [
"Apache-2.0"
] | null | null | null | tests/utils.py | zarumaru/metalk8s | 8c79a28c2bd28ca5b84e58ace5605cbe6183fc75 | [
"Apache-2.0"
] | null | null | null | import ipaddress
import json
import logging
import re
import testinfra
import time
from typing import Optional, Dict
import pytest
LOGGER = logging.getLogger(__name__)
def retry(operation, times=1, wait=1, error_msg=None, name="default"):
last_assert = None
for idx in range(times):
try:
res = operation()
except AssertionError as exc:
last_assert = str(exc)
LOGGER.info(
"[%s] Attempt %d/%d failed: %s", name, idx, times, str(exc)
)
time.sleep(wait)
else:
LOGGER.info("[%s] Attempt %d/%d succeeded", name, idx, times)
return res
else:
if error_msg is None:
error_msg = (
"Failed to run operation '{name}' after {attempts} attempts "
"(waited {total}s in total)"
).format(name=name, attempts=times, total=times * wait)
if last_assert:
error_msg = error_msg + ': ' + last_assert
pytest.fail(error_msg)
def write_string(host, dest, contents):
return host.run("cat > {} << EOF\n{}\nEOF".format(dest, contents))
def get_ip_from_cidr(host, cidr):
network = ipaddress.IPv4Network(cidr)
with host.sudo():
ip_info = host.check_output("ip a | grep 'inet '")
for line in ip_info.splitlines():
match = re.match(
r'inet (?P<ip>[0-9]+(?:\.[0-9]+){3})/[0-9]+ ', line.strip()
)
assert match is not None, 'Unexpected format: {}'.format(line.strip())
candidate = match.group('ip')
if ipaddress.IPv4Address(candidate) in network:
return candidate
return None
def get_node_name(nodename, ssh_config=None):
"""Get a node name (from SSH config)."""
if ssh_config is not None:
node = testinfra.get_host(nodename, ssh_config=ssh_config)
return get_grain(node, 'id')
return nodename
def get_grain(host, key):
with host.sudo():
output = host.check_output(
'salt-call --local --out=json grains.get "{}"'.format(key)
)
grain = json.loads(output)['local']
return grain
| 27.909091 | 78 | 0.58725 |
e767178b9d671cba70cfdf3131fc816c01d936b6 | 51,564 | py | Python | stellargraph/core/graph.py | ashikrafi/stellargraph | 0f6f24d21ec9c016bb370c2d705af4e876495cb4 | [
"Apache-2.0"
] | 2 | 2020-03-27T07:02:09.000Z | 2020-05-30T16:46:18.000Z | stellargraph/core/graph.py | ashikrafi/stellargraph | 0f6f24d21ec9c016bb370c2d705af4e876495cb4 | [
"Apache-2.0"
] | null | null | null | stellargraph/core/graph.py | ashikrafi/stellargraph | 0f6f24d21ec9c016bb370c2d705af4e876495cb4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The StellarGraph class that encapsulates information required for
a machine-learning ready graph used by models.
"""
__all__ = ["StellarGraph", "StellarDiGraph", "GraphSchema", "NeighbourWithWeight"]
from typing import Iterable, Any, Mapping, List, Optional, Set
from collections import defaultdict, namedtuple
import pandas as pd
import numpy as np
import scipy.sparse as sps
import warnings
from .. import globalvar
from .schema import GraphSchema, EdgeType
from .experimental import experimental, ExperimentalWarning
from .element_data import NodeData, EdgeData, ExternalIdIndex
from .utils import is_real_iterable
from .validation import comma_sep, separated
from . import convert
NeighbourWithWeight = namedtuple("NeighbourWithWeight", ["node", "weight"])
class StellarGraph:
"""
StellarGraph class for graph machine learning.
Summary of a StellarGraph and the terminology used:
- it stores graph structure, as a collection of *nodes* and a collection of *edges* that connect
a *source* node to a *target* node
- each node and edge has an associated *type*
- each node has a numeric vector of *features*, and the vectors of all nodes with the same type
have the same dimension
- it is *homogeneous* if there is only one type of node and one type of edge
- it is *heterogeneous* if it is not homgeneous (more than one type of node, or more than
one type of edge)
- it is *directed* if the direction of an edge starting at its source node and finishing at
its target node is important
- it is *undirected* if the direction does not matter
- every StellarGraph can be a *multigraph*, meaning there can be multiple edges between any two
nodes
To create a StellarGraph object, at a minimum pass the edges as a Pandas
DataFrame. Each row of the edges DataFrame represents an edge, where the index is the
ID of the edge, and the ``source`` and ``target`` columns store the node ID of the source and
target nodes.
For example, suppose we're modelling a graph that's a square with a diagonal::
a -- b
| \\ |
| \\ |
d -- c
The DataFrame might look like::
edges = pd.DataFrame(
{"source": ["a", "b", "c", "d", "a"], "target": ["b", "c", "d", "a", "c"]}
)
If this data represents an undirected graph (the ordering of each edge source/target doesn't
matter)::
Gs = StellarGraph(edges=edges)
If this data represents a directed graph (the ordering does matter)::
Gs = StellarDiGraph(edges=edges)
One can also pass a DataFrame of nodes. Each row of the nodes DataFrame represents a node in the
graph, where the index is the ID of the node. When this nodes DataFrame is not passed (the
argument is left as the default), the set of nodes is automatically inferred. This inference in
the example above is equivalent to::
nodes = pd.DataFrame([], index=["a", "b", "c", "d"])
Gs = StellarGraph(nodes, edges)
Numeric node features are taken as any columns of the nodes DataFrame. For example, if the graph
above has two features ``x`` and ``y`` associated with each node::
nodes = pd.DataFrame(
{"x": [-1, 2, -3, 4], "y": [0.4, 0.1, 0.9, 0]}, index=["a", "b", "c", "d"]
)
Edge weights are taken as the optional ``weight`` column of the edges DataFrame::
edges = pd.DataFrame({
"source": ["a", "b", "c", "d", "a"],
"target": ["b", "c", "d", "a", "c"],
"weight": [10, 0.5, 1, 3, 13]
})
Heterogeneous graphs, with multiple node or edge types, can be created by passing multiple
DataFrames in a dictionary. The dictionary keys are the names/identifiers for the type. For
example, if the graph above has node ``a`` of type ``foo``, and the rest as type ``bar``, the
construction might look like::
foo_nodes = pd.DataFrame({"x": [-1]}, index=["a"])
bar_nodes = pd.DataFrame(
{"y": [0.4, 0.1, 0.9], "z": [100, 200, 300]}, index=["b", "c", "d"]
)
StellarGraph({"foo": foo_nodes, "bar": bar_nodes}, edges)
Notice the ``foo`` node has one feature ``x``, while the ``bar`` nodes have 2 features ``y`` and
``z``. A heterogeneous graph can have different features for each type.
Edges of different types work in the same way. example instance, if edges have different types based
on their orientation::
horizontal_edges = pd.DataFrame(
{"source": ["a", "c"], "target": ["b", "d"]}, index=[0, 2]
)
vertical_edges = pd.DataFrame(
{"source": ["b", "d"], "target": ["c", "a"]}, index=[1, 3]
)
diagonal_edges = pd.DataFrame({"source": ["a"], "target": ["c"]}, index=[4])
StellarGraph(nodes, {"h": horizontal_edges, "v": vertical_edges, "d": diagonal_edges})
A dictionary can be passed for both arguments::
StellarGraph(
{"foo": foo_nodes, "bar": bar_nodes},
{"h": horizontal_edges, "v": vertical_edges, "d": diagonal_edges}
)
.. note::
The IDs of nodes must be unique across all types: for example, it is an error to have a node
0 of type ``a``, and a node 0 of type ``b``. IDs of edges must also be unique across all
types.
.. seealso:: :meth:`from_networkx` for construction from a NetworkX graph.
Args:
nodes (DataFrame or dict of hashable to Pandas DataFrame, optional):
Features for every node in the graph. Any columns in the DataFrame are taken as numeric
node features of type ``dtype``. If there is only one type of node, a DataFrame can be
passed directly, and the type defaults to the ``node_type_default`` parameter. Nodes
have an ID taken from the index of the dataframe, and they have to be unique across all
types. For nodes with no features, an appropriate DataFrame can be created with
``pandas.DataFrame([], index=node_ids)``, where ``node_ids`` is a list of the node
IDs. If this is not passed, the nodes will be inferred from ``edges`` with no features
for each node.
edges (DataFrame or dict of hashable to Pandas DataFrame, optional):
An edge list for each type of edges as a Pandas DataFrame containing a source, target
and (optionally) weight column (the names of each are taken from the ``source_column``,
``target_column`` and ``edge_weight_column`` parameters). If there is only one type of
edges, a DataFrame can be passed directly, and the type defaults to the
``edge_type_default`` parameter. Edges have an ID taken from the index of the dataframe,
and they have to be unique across all types.
is_directed (bool, optional):
If True, the data represents a directed multigraph, otherwise an undirected multigraph.
source_column (str, optional):
The name of the column to use as the source node of edges in the ``edges`` edge list
argument.
target_column (str, optional):
The name of the column to use as the target node of edges in the ``edges`` edge list
argument.
edge_weight_column (str, optional):
The name of the column in each of the ``edges`` DataFrames to use as the weight of
edges. If the column does not exist in any of them, it is defaulted to ``1``.
node_type_default (str, optional):
The default node type to use, if ``nodes`` is passed as a DataFrame (not a ``dict``).
edge_type_default (str, optional):
The default edge type to use, if ``edges`` is passed as a DataFrame (not a ``dict``).
dtype (numpy data-type, optional):
The numpy data-type to use for the features extracted from each of the ``nodes`` DataFrames.
graph:
Deprecated, use :meth:`from_networkx`.
node_type_name:
Deprecated, use :meth:`from_networkx`.
edge_type_name:
Deprecated, use :meth:`from_networkx`.
node_features:
Deprecated, use :meth:`from_networkx`.
"""
def __init__(
self,
nodes=None,
edges=None,
*,
is_directed=False,
source_column=globalvar.SOURCE,
target_column=globalvar.TARGET,
edge_weight_column=globalvar.WEIGHT,
node_type_default=globalvar.NODE_TYPE_DEFAULT,
edge_type_default=globalvar.EDGE_TYPE_DEFAULT,
dtype="float32",
# legacy arguments:
graph=None,
node_type_name=globalvar.TYPE_ATTR_NAME,
edge_type_name=globalvar.TYPE_ATTR_NAME,
node_features=None,
):
import networkx
if isinstance(nodes, networkx.Graph):
# `StellarGraph(nx_graph)` -> `graph`
graph = nodes
nodes = None
if edges is not None:
raise ValueError(
"edges: expected no value when using legacy NetworkX constructor, found: {edges!r}"
)
# legacy NetworkX construction
if graph is not None:
# FIXME(#717): this should have a deprecation warning, once the tests and examples have
# stopped using it
if nodes is not None or edges is not None:
raise ValueError(
"graph: expected no value when using 'nodes' and 'edges' parameters, found: {graph!r}"
)
warnings.warn(
"Constructing a StellarGraph directly from a NetworkX graph has been replaced by the `StellarGraph.from_networkx` function",
DeprecationWarning,
)
nodes, edges = convert.from_networkx(
graph,
node_type_attr=node_type_name,
edge_type_attr=edge_type_name,
node_type_default=node_type_default,
edge_type_default=edge_type_default,
edge_weight_attr=edge_weight_column,
node_features=node_features,
dtype=dtype,
)
if edges is None:
edges = {}
self._is_directed = is_directed
self._edges = convert.convert_edges(
edges,
name="edges",
default_type=edge_type_default,
source_column=source_column,
target_column=target_column,
weight_column=edge_weight_column,
)
nodes_from_edges = pd.unique(
np.concatenate([self._edges.targets, self._edges.sources])
)
if nodes is None:
nodes_after_inference = pd.DataFrame([], index=nodes_from_edges)
else:
nodes_after_inference = nodes
self._nodes = convert.convert_nodes(
nodes_after_inference,
name="nodes",
default_type=node_type_default,
dtype=dtype,
)
if nodes is not None:
# check for dangling edges: make sure the explicitly-specified nodes parameter includes every
# node mentioned in the edges
try:
self._nodes.ids.to_iloc(
nodes_from_edges, smaller_type=False, strict=True,
)
except KeyError as e:
missing_values = e.args[0]
if not is_real_iterable(missing_values):
missing_values = [missing_values]
missing_values = pd.unique(missing_values)
raise ValueError(
f"edges: expected all source and target node IDs to be contained in `nodes`, "
f"found some missing: {comma_sep(missing_values)}"
)
@staticmethod
def from_networkx(
graph,
*,
edge_weight_attr="weight",
node_type_attr=globalvar.TYPE_ATTR_NAME,
edge_type_attr=globalvar.TYPE_ATTR_NAME,
node_type_default=globalvar.NODE_TYPE_DEFAULT,
edge_type_default=globalvar.EDGE_TYPE_DEFAULT,
node_features=None,
dtype="float32",
):
"""
Construct a ``StellarGraph`` object from a NetworkX graph::
Gs = StellarGraph.from_networkx(nx_graph)
To create a StellarGraph object with node features, supply the features
as a numeric feature vector for each node.
To take the feature vectors from a node attribute in the original NetworkX
graph, supply the attribute name to the ``node_features`` argument::
Gs = StellarGraph.from_networkx(nx_graph, node_features="feature")
where the nx_graph contains nodes that have a "feature" attribute containing
the feature vector for the node. All nodes of the same type must have
the same size feature vectors.
Alternatively, supply the node features as Pandas DataFrame objects with
the index of the DataFrame set to the node IDs. For graphs with a single node
type, you can supply the DataFrame object directly to StellarGraph::
node_data = pd.DataFrame(
[feature_vector_1, feature_vector_2, ..],
index=[node_id_1, node_id_2, ...])
Gs = StellarGraph.from_networkx(nx_graph, node_features=node_data)
For graphs with multiple node types, provide the node features as Pandas
DataFrames for each type separately, as a dictionary by node type.
This allows node features to have different sizes for each node type::
node_data = {
node_type_1: pd.DataFrame(...),
node_type_2: pd.DataFrame(...),
}
Gs = StellarGraph.from_networkx(nx_graph, node_features=node_data)
The dictionary only needs to include node types with features. If a node type isn't
mentioned in the dictionary (for example, if `nx_graph` above has a 3rd node type), each
node of that type will have a feature vector of length zero.
You can also supply the node feature vectors as an iterator of `node_id`
and feature vector pairs, for graphs with single and multiple node types::
node_data = zip([node_id_1, node_id_2, ...],
[feature_vector_1, feature_vector_2, ..])
Gs = StellarGraph.from_networkx(nx_graph, node_features=node_data)
Args:
graph: The NetworkX graph instance.
node_type_attr (str, optional):
This is the name for the node types that StellarGraph uses
when processing heterogeneous graphs. StellarGraph will
look for this attribute in the nodes of the graph to determine
their type.
node_type_default (str, optional):
This is the default node type to use for nodes that do not have
an explicit type.
edge_type_attr (str, optional):
This is the name for the edge types that StellarGraph uses
when processing heterogeneous graphs. StellarGraph will
look for this attribute in the edges of the graph to determine
their type.
edge_type_default (str, optional):
This is the default edge type to use for edges that do not have
an explicit type.
node_features (str, dict, list or DataFrame optional):
This tells StellarGraph where to find the node feature information
required by some graph models. These are expected to be
a numeric feature vector for each node in the graph.
edge_weight_attr (str, optional):
The name of the attribute to use as the weight of edges.
Returns:
A ``StellarGraph`` (if ``graph`` is undirected) or ``StellarDiGraph`` (if ``graph`` is
directed) instance representing the data in ``graph`` and ``node_features``.
"""
nodes, edges = convert.from_networkx(
graph,
node_type_attr=node_type_attr,
edge_type_attr=edge_type_attr,
node_type_default=node_type_default,
edge_type_default=edge_type_default,
edge_weight_attr=edge_weight_attr,
node_features=node_features,
dtype=dtype,
)
cls = StellarDiGraph if graph.is_directed() else StellarGraph
return cls(
nodes=nodes, edges=edges, edge_weight_column=edge_weight_attr, dtype=dtype
)
# customise how a missing attribute is handled to give better error messages for the NetworkX
# -> no NetworkX transition.
def __getattr__(self, item):
import networkx
try:
# do the normal access, in case the attribute actually exists, and to get the native
# python wording of the error
return super().__getattribute__(item)
except AttributeError as e:
if hasattr(networkx.MultiDiGraph, item):
# a networkx class has this as an attribute, so let's assume that it's old code
# from before the conversion and replace (the `from None`) the default exception
# with one with a more specific message that guides the user to the fix
type_name = type(self).__name__
raise AttributeError(
f"{e.args[0]}. The '{type_name}' type no longer inherits from NetworkX types: use a new StellarGraph method, or, if that is not possible, the `.to_networkx()` conversion function."
) from None
# doesn't look like a NetworkX method so use the default error
raise
def is_directed(self) -> bool:
"""
Indicates whether the graph is directed (True) or undirected (False).
Returns:
bool: The graph directedness status.
"""
return self._is_directed
def number_of_nodes(self) -> int:
"""
Obtains the number of nodes in the graph.
Returns:
int: The number of nodes.
"""
return len(self._nodes)
def number_of_edges(self) -> int:
"""
Obtains the number of edges in the graph.
Returns:
int: The number of edges.
"""
return len(self._edges)
def nodes(self, node_type=None) -> Iterable[Any]:
"""
Obtains the collection of nodes in the graph.
Args:
node_type (hashable, optional): a type of nodes that exist in the graph
Returns:
All the nodes in the graph if ``node_type`` is ``None``, otherwise all the nodes in the
graph of type ``node_type``.
"""
if node_type is None:
return self._nodes.ids.pandas_index
ilocs = self._nodes.type_range(node_type)
return self._nodes.ids.from_iloc(ilocs)
def edges(
self, include_edge_type=False, include_edge_weight=False
) -> Iterable[Any]:
"""
Obtains the collection of edges in the graph.
Args:
include_edge_type (bool): A flag that indicates whether to return edge types
of format (node 1, node 2, edge type) or edge pairs of format (node 1, node 2).
include_edge_weight (bool): A flag that indicates whether to return edge weights.
Weights are returned in a separate list.
Returns:
The graph edges. If edge weights are included then a tuple of (edges, weights)
"""
# FIXME: these would be better returned as the 2 or 3 arrays directly, rather than tuple-ing
# (the same applies to all other instances of zip in this file)
if include_edge_type:
edges = list(
zip(
self._edges.sources,
self._edges.targets,
self._edges.type_of_iloc(slice(None)),
)
)
else:
edges = list(zip(self._edges.sources, self._edges.targets))
if include_edge_weight:
return edges, self._edges.weights
return edges
def has_node(self, node: Any) -> bool:
"""
Indicates whether or not the graph contains the specified node.
Args:
node (any): The node.
Returns:
bool: A value of True (cf False) if the node is
(cf is not) in the graph.
"""
return node in self._nodes
def _transform_edges(
self, other_node_id, ilocs, include_edge_weight, filter_edge_types
):
if include_edge_weight:
weights = self._edges.weights[ilocs]
else:
weights = None
if filter_edge_types is not None:
filter_edge_type_ilocs = self._edges.types.to_iloc(filter_edge_types)
edge_type_ilocs = self._edges.type_ilocs[ilocs]
correct_type = np.isin(edge_type_ilocs, filter_edge_type_ilocs)
other_node_id = other_node_id[correct_type]
if weights is not None:
weights = weights[correct_type]
# FIXME(#718): it would be better to return these as ndarrays, instead of (zipped) lists
if weights is not None:
return [
NeighbourWithWeight(node, weight)
for node, weight in zip(other_node_id, weights)
]
return list(other_node_id)
def neighbors(
self, node: Any, include_edge_weight=False, edge_types=None
) -> Iterable[Any]:
"""
Obtains the collection of neighbouring nodes connected
to the given node.
Args:
node (any): The node in question.
include_edge_weight (bool, default False): If True, each neighbour in the
output is a named tuple with fields `node` (the node ID) and `weight` (the edge weight)
edge_types (list of hashable, optional): If provided, only traverse the graph
via the provided edge types when collecting neighbours.
Returns:
iterable: The neighbouring nodes.
"""
ilocs = self._edges.edge_ilocs(node, ins=True, outs=True)
source = self._edges.sources[ilocs]
target = self._edges.targets[ilocs]
other_node_id = np.where(source == node, target, source)
return self._transform_edges(
other_node_id, ilocs, include_edge_weight, edge_types
)
def in_nodes(
self, node: Any, include_edge_weight=False, edge_types=None
) -> Iterable[Any]:
"""
Obtains the collection of neighbouring nodes with edges
directed to the given node. For an undirected graph,
neighbours are treated as both in-nodes and out-nodes.
Args:
node (any): The node in question.
include_edge_weight (bool, default False): If True, each neighbour in the
output is a named tuple with fields `node` (the node ID) and `weight` (the edge weight)
edge_types (list of hashable, optional): If provided, only traverse the graph
via the provided edge types when collecting neighbours.
Returns:
iterable: The neighbouring in-nodes.
"""
if not self.is_directed():
# all edges are both incoming and outgoing for undirected graphs
return self.neighbors(
node, include_edge_weight=include_edge_weight, edge_types=edge_types
)
ilocs = self._edges.edge_ilocs(node, ins=True, outs=False)
source = self._edges.sources[ilocs]
return self._transform_edges(source, ilocs, include_edge_weight, edge_types)
def out_nodes(
self, node: Any, include_edge_weight=False, edge_types=None
) -> Iterable[Any]:
"""
Obtains the collection of neighbouring nodes with edges
directed from the given node. For an undirected graph,
neighbours are treated as both in-nodes and out-nodes.
Args:
node (any): The node in question.
include_edge_weight (bool, default False): If True, each neighbour in the
output is a named tuple with fields `node` (the node ID) and `weight` (the edge weight)
edge_types (list of hashable, optional): If provided, only traverse the graph
via the provided edge types when collecting neighbours.
Returns:
iterable: The neighbouring out-nodes.
"""
if not self.is_directed():
# all edges are both incoming and outgoing for undirected graphs
return self.neighbors(
node, include_edge_weight=include_edge_weight, edge_types=edge_types
)
ilocs = self._edges.edge_ilocs(node, ins=False, outs=True)
target = self._edges.targets[ilocs]
return self._transform_edges(target, ilocs, include_edge_weight, edge_types)
def nodes_of_type(self, node_type=None):
"""
Get the nodes of the graph with the specified node types.
Args:
node_type (hashable): a type of nodes that exist in the graph (this must be passed,
omitting it or passing ``None`` is deprecated)
Returns:
A list of node IDs with type node_type
"""
warnings.warn(
"'nodes_of_type' is deprecated and will be removed; use the 'nodes(type=...)' method instead",
DeprecationWarning,
stacklevel=2,
)
return list(self.nodes(node_type=node_type))
def node_type(self, node):
"""
Get the type of the node
Args:
node: Node ID
Returns:
Node type
"""
nodes = [node]
node_ilocs = self._nodes.ids.to_iloc(nodes, strict=True)
type_sequence = self._nodes.type_of_iloc(node_ilocs)
assert len(type_sequence) == 1
return type_sequence[0]
@property
def node_types(self):
"""
Get a list of all node types in the graph.
Returns:
set of types
"""
return set(self._nodes.types.pandas_index)
def node_feature_sizes(self, node_types=None):
"""
Get the feature sizes for the specified node types.
Args:
node_types (list, optional): A list of node types. If None all current node types
will be used.
Returns:
A dictionary of node type and integer feature size.
"""
all_sizes = self._nodes.feature_info()
if node_types is None:
node_types = all_sizes.keys()
return {type_name: all_sizes[type_name][0] for type_name in node_types}
def check_graph_for_ml(self, features=True):
"""
Checks if all properties required for machine learning training/inference are set up.
An error will be raised if the graph is not correctly setup.
"""
if all(size == 0 for _, size in self.node_feature_sizes().items()):
raise RuntimeError(
"This StellarGraph has no numeric feature attributes for nodes"
"Node features are required for machine learning"
)
# TODO: check the schema
# TODO: check the feature node_ids against the graph node ids?
def node_features(self, nodes, node_type=None):
"""
Get the numeric feature vectors for the specified node or nodes.
If the node type is not specified the node types will be found
for all nodes. It is therefore important to supply the ``node_type``
for this method to be fast.
Args:
nodes (list or hashable): Node ID or list of node IDs
node_type (hashable): the type of the nodes.
Returns:
Numpy array containing the node features for the requested nodes.
"""
nodes = np.asarray(nodes)
node_ilocs = self._nodes.ids.to_iloc(nodes)
valid = self._nodes.ids.is_valid(node_ilocs)
all_valid = valid.all()
valid_ilocs = node_ilocs if all_valid else node_ilocs[valid]
if node_type is None:
# infer the type based on the valid nodes
types = np.unique(self._nodes.type_of_iloc(valid_ilocs))
if len(types) == 0:
raise ValueError(
"must have at least one node for inference, if `node_type` is not specified"
)
if len(types) > 1:
raise ValueError("all nodes must have the same type")
node_type = types[0]
if all_valid:
return self._nodes.features(node_type, valid_ilocs)
# If there's some invalid values, they get replaced by zeros; this is designed to allow
# models that build fixed-size structures (e.g. GraphSAGE) based on neighbours to fill out
# missing neighbours with zeros automatically, using None as a sentinel.
# FIXME: None as a sentinel forces nodes to have dtype=object even with integer IDs, could
# instead use an impossible integer (e.g. 2**64 - 1)
# everything that's not the sentinel should be valid
non_nones = nodes != None
self._nodes.ids.require_valid(nodes[non_nones], node_ilocs[non_nones])
sampled = self._nodes.features(node_type, valid_ilocs)
features = np.zeros((len(nodes), sampled.shape[1]))
features[valid] = sampled
return features
##################################################################
# Computationally intensive methods:
def _edge_type_iloc_triples(self, selector=slice(None), stacked=False):
source_ilocs = self._nodes.ids.to_iloc(self._edges.sources[selector])
source_type_ilocs = self._nodes.type_ilocs[source_ilocs]
rel_type_ilocs = self._edges.type_ilocs[selector]
target_ilocs = self._nodes.ids.to_iloc(self._edges.targets[selector])
target_type_ilocs = self._nodes.type_ilocs[target_ilocs]
all_ilocs = source_type_ilocs, rel_type_ilocs, target_type_ilocs
if stacked:
return np.stack(all_ilocs, axis=-1)
return all_ilocs
def _edge_type_triples(self, selector=slice(None)):
src_ilocs, rel_ilocs, tgt_ilocs = self._edge_type_iloc_triples(
selector, stacked=False
)
return (
self._nodes.types.from_iloc(src_ilocs),
self._edges.types.from_iloc(rel_ilocs),
self._nodes.types.from_iloc(tgt_ilocs),
)
def _unique_type_triples(self, *, return_counts, selector=slice(None)):
all_type_ilocs = self._edge_type_iloc_triples(selector, stacked=True)
if len(all_type_ilocs) == 0:
# FIXME(https://github.com/numpy/numpy/issues/15559): if there's no edges, np.unique is
# being called on a shape=(0, 3) ndarray, and hits "ValueError: cannot reshape array of
# size 0 into shape (0,newaxis)", so we manually reproduce what would be returned
if return_counts:
ret = None, [], []
else:
ret = None, []
else:
ret = np.unique(
all_type_ilocs, axis=0, return_index=True, return_counts=return_counts
)
edge_ilocs = ret[1]
# we've now got the indices for an edge with each triple, along with the counts of them, so
# we can query to get the actual edge types (this is, at the time of writing, easier than
# getting the actual type for each type iloc in the triples)
unique_ets = self._edge_type_triples(edge_ilocs)
if return_counts:
return zip(*unique_ets, ret[2])
return zip(*unique_ets)
def info(self, show_attributes=None, sample=None, truncate=20):
"""
Return an information string summarizing information on the current graph.
This includes node and edge type information and their attributes.
Note: This requires processing all nodes and edges and could take a long
time for a large graph.
Args:
show_attributes: Deprecated, unused.
sample: Deprecated, unused.
truncate (int, optional): If an integer, show only the ``truncate`` most common node and
edge type triples; if ``None``, list each one individually.
Returns:
An information string.
"""
if show_attributes is not None:
warnings.warn(
"'show_attributes' is no longer used, remove it from the 'info()' call",
DeprecationWarning,
stacklevel=2,
)
if sample is not None:
warnings.warn(
"'sample' is no longer used, remove it from the 'info()' call",
DeprecationWarning,
stacklevel=2,
)
# always truncate the edge types listed for each node type, since they're redundant with the
# individual listing of edge types, and make for a single very long line
truncate_edge_types_per_node = 5
if truncate is not None:
truncate_edge_types_per_node = min(truncate_edge_types_per_node, truncate)
# Numpy processing is much faster than NetworkX processing, so we don't bother sampling.
gs = self.create_graph_schema()
feature_info = self._nodes.feature_info()
def str_edge_type(et):
n1, rel, n2 = et
return f"{n1}-{rel}->{n2}"
def str_node_type(count, nt):
feature_size, feature_dtype = feature_info[nt]
if feature_size > 0:
feature_text = f"{feature_dtype.name} vector, length {feature_size}"
else:
feature_text = "none"
edges = gs.schema[nt]
if edges:
edge_types = comma_sep(
[str_edge_type(et) for et in gs.schema[nt]],
limit=truncate_edge_types_per_node,
stringify=str,
)
else:
edge_types = "none"
return f"{nt}: [{count}]\n Features: {feature_text}\n Edge types: {edge_types}"
# sort the node types in decreasing order of frequency
node_types = sorted(
((len(self.nodes(node_type=nt)), nt) for nt in gs.node_types), reverse=True
)
nodes = separated(
[str_node_type(count, nt) for count, nt in node_types],
limit=truncate,
stringify=str,
sep="\n ",
)
# FIXME: it would be better for the schema to just include the counts directly
unique_ets = self._unique_type_triples(return_counts=True)
edge_types = sorted(
(
(count, EdgeType(src_ty, rel_ty, tgt_ty))
for src_ty, rel_ty, tgt_ty, count in unique_ets
),
reverse=True,
)
edges = separated(
[f"{str_edge_type(et)}: [{count}]" for count, et in edge_types],
limit=truncate,
stringify=str,
sep="\n ",
)
directed_str = "Directed" if self.is_directed() else "Undirected"
lines = [
f"{type(self).__name__}: {directed_str} multigraph",
f" Nodes: {self.number_of_nodes()}, Edges: {self.number_of_edges()}",
"",
" Node types:",
]
if nodes:
lines.append(" " + nodes)
lines.append("")
lines.append(" Edge types:")
if edges:
lines.append(" " + edges)
return "\n".join(lines)
def create_graph_schema(self, nodes=None):
"""
Create graph schema from the current graph.
Arguments:
nodes (list): A list of node IDs to use to build schema. This must
represent all node types and all edge types in the graph.
If not specified, all nodes and edges in the graph are used.
Returns:
GraphSchema object.
"""
graph_schema = {nt: set() for nt in self.node_types}
edge_types = set()
if nodes is None:
selector = slice(None)
else:
selector = np.isin(self._edges.sources, nodes) & np.isin(
self._edges.targets, nodes
)
for n1, rel, n2 in self._unique_type_triples(
selector=selector, return_counts=False
):
edge_type_tri = EdgeType(n1, rel, n2)
edge_types.add(edge_type_tri)
graph_schema[n1].add(edge_type_tri)
if not self.is_directed():
edge_type_tri = EdgeType(n2, rel, n1)
edge_types.add(edge_type_tri)
graph_schema[n2].add(edge_type_tri)
# Create ordered list of edge_types
edge_types = sorted(edge_types)
# Create keys for node and edge types
schema = {
node_label: sorted(node_data)
for node_label, node_data in graph_schema.items()
}
return GraphSchema(
self.is_directed(), sorted(self.node_types), edge_types, schema
)
def node_degrees(self) -> Mapping[Any, int]:
"""
Obtains a map from node to node degree.
Returns:
The degree of each node.
"""
return self._edges.degrees()
def to_adjacency_matrix(self, nodes: Optional[Iterable] = None, weighted=False):
"""
Obtains a SciPy sparse adjacency matrix of edge weights.
By default (``weighted=False``), each element of the matrix contains the number
of edges between the two vertices (only 0 or 1 in a graph without multi-edges).
Args:
nodes (iterable): The optional collection of nodes
comprising the subgraph. If specified, then the
adjacency matrix is computed for the subgraph;
otherwise, it is computed for the full graph.
weighted (bool): If true, use the edge weight column from the graph instead
of edge counts (weights from multi-edges are summed).
Returns:
The weighted adjacency matrix.
"""
if nodes is None:
index = self._nodes._id_index
selector = slice(None)
else:
nodes = list(nodes)
index = ExternalIdIndex(nodes)
selector = np.isin(self._edges.sources, nodes) & np.isin(
self._edges.targets, nodes
)
# these indices are computed relative to the index above. If `nodes` is None, they'll be the
# overall ilocs (for the original graph), otherwise they'll be the indices of the `nodes`
# list.
src_idx = index.to_iloc(self._edges.sources[selector])
tgt_idx = index.to_iloc(self._edges.targets[selector])
if weighted:
weights = self._edges.weights[selector]
else:
weights = np.ones(src_idx.shape, dtype=self._edges.weights.dtype)
n = len(index)
adj = sps.csr_matrix((weights, (src_idx, tgt_idx)), shape=(n, n))
if not self.is_directed():
# in an undirected graph, the adjacency matrix should be symmetric: which means counting
# weights from either "incoming" or "outgoing" edges, but not double-counting self loops
backward = adj.transpose(copy=True)
# this is setdiag(0), but faster, since it doesn't change the sparsity structure of the
# matrix (https://github.com/scipy/scipy/issues/11600)
(nonzero,) = backward.diagonal().nonzero()
backward[nonzero, nonzero] = 0
adj += backward
# this is a multigraph, let's eliminate any duplicate entries
adj.sum_duplicates()
return adj
def subgraph(self, nodes):
"""
Compute the node-induced subgraph implied by ``nodes``.
Args:
nodes (iterable): The nodes in the subgraph.
Returns:
A :class:`StellarGraph` or :class:`StellarDiGraph` instance containing only the nodes in
``nodes``, and any edges between them in ``self``. It contains the same node & edge
types, node features and edge weights as in ``self``.
"""
node_ilocs = self._nodes.ids.to_iloc(nodes, strict=True)
node_types = self._nodes.type_of_iloc(node_ilocs)
node_type_to_ilocs = pd.Series(node_ilocs, index=node_types).groupby(level=0)
node_frames = {
type_name: pd.DataFrame(
self._nodes.features(type_name, ilocs),
index=self._nodes.ids.from_iloc(ilocs),
)
for type_name, ilocs in node_type_to_ilocs
}
# FIXME(#985): this is O(edges in graph) but could potentially be optimised to O(edges in
# graph incident to `nodes`), which could be much fewer if `nodes` is small
edge_ilocs = np.where(
np.isin(self._edges.sources, nodes) & np.isin(self._edges.targets, nodes)
)
edge_frame = pd.DataFrame(
{
"id": self._edges.ids.from_iloc(edge_ilocs),
globalvar.SOURCE: self._edges.sources[edge_ilocs],
globalvar.TARGET: self._edges.targets[edge_ilocs],
globalvar.WEIGHT: self._edges.weights[edge_ilocs],
},
index=self._edges.type_of_iloc(edge_ilocs),
)
edge_frames = {
type_name: df.set_index("id")
for type_name, df in edge_frame.groupby(level=0)
}
cls = StellarDiGraph if self.is_directed() else StellarGraph
return cls(node_frames, edge_frames)
def connected_components(self):
"""
Compute the connected components in this graph, ordered by size.
The nodes in the largest component can be computed with ``nodes =
next(graph.connected_components())``. The node IDs returned by this method can be used to
compute the corresponding subgraph with ``graph.subgraph(nodes)``.
For directed graphs, this computes the weakly connected components. This effectively
treating each edge as undirected.
Returns:
An iterator over sets of node IDs in each connected component, from the largest (most nodes)
to smallest (fewest nodes).
"""
adj = self.to_adjacency_matrix()
count, cc_labels = sps.csgraph.connected_components(adj, directed=False)
cc_sizes = np.bincount(cc_labels, minlength=count)
cc_by_size = np.argsort(cc_sizes)[::-1]
return (
self._nodes.ids.from_iloc(cc_labels == cc_label) for cc_label in cc_by_size
)
def to_networkx(
self,
node_type_attr=globalvar.TYPE_ATTR_NAME,
edge_type_attr=globalvar.TYPE_ATTR_NAME,
edge_weight_attr=globalvar.WEIGHT,
feature_attr=globalvar.FEATURE_ATTR_NAME,
node_type_name=None,
edge_type_name=None,
edge_weight_label=None,
feature_name=None,
):
"""
Create a NetworkX MultiGraph or MultiDiGraph instance representing this graph.
Args:
node_type_attr (str): the name of the attribute to use to store a node's type (or label).
edge_type_attr (str): the name of the attribute to use to store a edge's type (or label).
edge_weight_attr (str): the name of the attribute to use to store a edge's weight.
feature_attr (str, optional): the name of the attribute to use to store a node's feature
vector; if ``None``, feature vectors are not stored within each node.
node_type_name (str): Deprecated, use ``node_type_attr``.
edge_type_name (str): Deprecated, use ``edge_type_attr``.
edge_weight_label (str): Deprecated, use ``edge_weight_attr``.
feature_name (str, optional): Deprecated, use ``feature_attr``.
Returns:
An instance of `networkx.MultiDiGraph` (if directed) or `networkx.MultiGraph` (if
undirected) containing all the nodes & edges and their types & features in this graph.
"""
import networkx
if node_type_name is not None:
warnings.warn(
"the 'node_type_name' parameter has been replaced by 'node_type_attr'",
DeprecationWarning,
)
node_type_attr = node_type_name
if edge_type_name is not None:
warnings.warn(
"the 'edge_type_name' parameter has been replaced by 'edge_type_attr'",
DeprecationWarning,
)
edge_type_attr = edge_type_name
if edge_weight_label is not None:
warnings.warn(
"the 'edge_weight_label' parameter has been replaced by 'edge_weight_attr'",
DeprecationWarning,
)
edge_weight_attr = edge_weight_label
if feature_name is not None:
warnings.warn(
"the 'feature_name' parameter has been replaced by 'feature_attr'",
DeprecationWarning,
)
feature_attr = feature_name
if self.is_directed():
graph = networkx.MultiDiGraph()
else:
graph = networkx.MultiGraph()
for ty in self.node_types:
node_ids = self.nodes(node_type=ty)
ty_dict = {node_type_attr: ty}
if feature_attr is not None:
features = self.node_features(node_ids, node_type=ty)
for node_id, node_features in zip(node_ids, features):
graph.add_node(
node_id, **ty_dict, **{feature_attr: node_features},
)
else:
graph.add_nodes_from(node_ids, **ty_dict)
iterator = zip(
self._edges.sources,
self._edges.targets,
self._edges.type_of_iloc(slice(None)),
self._edges.weights,
)
graph.add_edges_from(
(src, dst, {edge_type_attr: type_, edge_weight_attr: weight})
for src, dst, type_, weight in iterator
)
return graph
# FIXME: Experimental/special-case methods that need to be considered more; the underscores
# denote "package private", not fully private, and so are ok to use in the rest of stellargraph
def _get_index_for_nodes(self, nodes, node_type=None):
"""
Get the indices for the specified node or nodes.
If the node type is not specified the node types will be found
for all nodes. It is therefore important to supply the ``node_type``
for this method to be fast.
Args:
n: (list or hashable) Node ID or list of node IDs
node_type: (hashable) the type of the nodes.
Returns:
Numpy array containing the indices for the requested nodes.
"""
return self._nodes._id_index.to_iloc(nodes, strict=True)
def _adjacency_types(self, graph_schema: GraphSchema):
"""
Obtains the edges in the form of the typed mapping:
{edge_type_triple: {source_node: [target_node, ...]}}
Args:
graph_schema: The graph schema.
Returns:
The edge types mapping.
"""
source_types, rel_types, target_types = self._edge_type_triples(slice(None))
triples = defaultdict(lambda: defaultdict(lambda: []))
iterator = zip(
source_types,
rel_types,
target_types,
self._edges.sources,
self._edges.targets,
)
for src_type, rel_type, tgt_type, src, tgt in iterator:
triple = EdgeType(src_type, rel_type, tgt_type)
triples[triple][src].append(tgt)
if not self.is_directed() and src != tgt:
other_triple = EdgeType(tgt_type, rel_type, src_type)
triples[other_triple][tgt].append(src)
for subdict in triples.values():
for v in subdict.values():
# each list should be in order, to ensure sampling methods are deterministic
v.sort(key=str)
return triples
def _edge_weights(self, source_node: Any, target_node: Any) -> List[Any]:
"""
Obtains the weights of edges between the given pair of nodes.
Args:
source_node (any): The source node.
target_node (any): The target node.
Returns:
list: The edge weights.
"""
# self loops should only be counted once, which means they're effectively always a directed
# edge at the storage level, unlikely other edges in an undirected graph. This is
# particularly important with the intersection1d call, where the source_ilocs and
# target_ilocs will be equal, when source_node == target_node, and thus the intersection
# will contain all incident edges.
effectively_directed = self.is_directed() or source_node == target_node
both_dirs = not effectively_directed
source_ilocs = self._edges.edge_ilocs(source_node, ins=both_dirs, outs=True)
target_ilocs = self._edges.edge_ilocs(target_node, ins=True, outs=both_dirs)
ilocs = np.intersect1d(source_ilocs, target_ilocs, assume_unique=True)
return [float(x) for x in self._edges.weights[ilocs]]
# A convenience class that merely specifies that edges have direction.
class StellarDiGraph(StellarGraph):
def __init__(
self,
nodes=None,
edges=None,
*,
source_column=globalvar.SOURCE,
target_column=globalvar.TARGET,
edge_weight_column=globalvar.WEIGHT,
node_type_default=globalvar.NODE_TYPE_DEFAULT,
edge_type_default=globalvar.EDGE_TYPE_DEFAULT,
dtype="float32",
# legacy arguments
graph=None,
node_type_name=globalvar.TYPE_ATTR_NAME,
edge_type_name=globalvar.TYPE_ATTR_NAME,
node_features=None,
):
super().__init__(
nodes=nodes,
edges=edges,
is_directed=True,
source_column=source_column,
target_column=target_column,
edge_weight_column=edge_weight_column,
node_type_default=node_type_default,
edge_type_default=edge_type_default,
dtype=dtype,
# legacy arguments
graph=graph,
node_type_name=node_type_name,
edge_type_name=edge_type_name,
node_features=node_features,
)
| 38.480597 | 200 | 0.61031 |
f4cbcca7693f35b2fbb5a5e954bf0741b81c00ae | 2,505 | py | Python | opc.py | Tomvargas/ModX | 4fb862e11d6f66e828687dc32eed3762d41b07d5 | [
"MIT"
] | 2 | 2020-04-13T18:23:48.000Z | 2020-04-16T19:54:11.000Z | opc.py | Tomvargas/MODX | 4fb862e11d6f66e828687dc32eed3762d41b07d5 | [
"MIT"
] | null | null | null | opc.py | Tomvargas/MODX | 4fb862e11d6f66e828687dc32eed3762d41b07d5 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter.ttk import *
from tkinter import ttk
from tkinter import messagebox
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import pyplot
import time
import serial
import threading
import tkinter.messagebox
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from ttkthemes import themed_tk as tk
import time
from trdata import dato
#en caso de conexion exitosa muestra los datos reales
# si la conexion falla se muestra datos ficticios
data1=dato()
data1.insert(0,time.strftime("%H:%M:%S"))
class App(Frame):
data=data1
def __init__(self, parent):
Frame.__init__(self, parent)
self.CreateUI()
self.LoadTable()
self.grid(sticky = (N,S,W,E))
parent.grid_rowconfigure(0, weight = 1)
parent.grid_columnconfigure(0, weight = 1)
def CreateUI(self):
tv = Treeview(self)
#tv.tag_configure('even', background='#65dc40')
tv['columns'] = ('Hora', 'Estadoactual', 'Recomendado')
tv.heading("#0", text='Variable', anchor='w')
tv.column("#0", anchor="w")
tv.heading('Hora', text='Hora')
tv.column('Hora', anchor='center', width=100)
tv.heading('Estadoactual', text='Estado actual')
tv.column('Estadoactual', anchor='center', width=100)
tv.heading('Recomendado', text='Estado recomendado')
tv.column('Recomendado', anchor='center', width=100)
tv.grid(sticky = (N,S,W,E))
self.treeview = tv
self.grid_rowconfigure(0, weight = 1)
self.grid_columnconfigure(0, weight = 1)
def LoadTable(self):
self.treeview.insert('', 'end', text="PH", values=(data1[0],data1[1], '6.6'))
self.treeview.insert('', 'end', text="SALINIDAD", values=(data1[0],data1[2], '30%'))
self.treeview.insert('', 'end', text="TEMPERATURA", values=(data1[0],data1[3], '30° - 35°'))#3
self.treeview.insert('', 'end', text="HUMEDAD", values=(data1[0],data1[4], '13%'))
self.treeview.insert('', 'end', text="NIVEL DEL AGUA", values=(data1[0],data1[5], '10 cm'))
self.treeview.tag_configure('even', background='#65dc40')
def main():
root1 = Tk()
root1.iconbitmap(r'images/descarga_gMJ_icon.ico')
root1.geometry("600x200")
root1.title("Comparacion")
App(root1)
Button(root1, text="Verificar Valores", bg='#65dc40', fg='White').grid()
root1.mainloop()
if __name__ == '__main__':
main()
def cmpdata():
main() | 31.3125 | 102 | 0.644311 |
4292a1e847fbed0c98165616fba4372eaa7e8693 | 3,486 | py | Python | summary.py | top-quarks/ARC-solution | 1cd2e8d8ecf20c9c3d013103d317f538ea8acf8a | [
"MIT"
] | 45 | 2020-06-08T23:00:20.000Z | 2022-02-19T13:23:33.000Z | summary.py | top-quarks/ARC-solution | 1cd2e8d8ecf20c9c3d013103d317f538ea8acf8a | [
"MIT"
] | null | null | null | summary.py | top-quarks/ARC-solution | 1cd2e8d8ecf20c9c3d013103d317f538ea8acf8a | [
"MIT"
] | 5 | 2020-06-09T04:09:39.000Z | 2022-01-05T15:38:55.000Z | """from glob import glob
def read(fn):
f = open(fn)
t = f.read()
f.close()
return t
combined = ["output_id,output"]
for taski in range(419):
ids = set()
cands = []
for fn in glob("output/answer_%d_*.csv"%taski):
#if not '_13.' in fn and not '_3.' in fn: continue
t = read(fn).strip().split('\n')
ids.add(t[0])
for cand in t[1:]:
img, score = cand.split()
cands.append((float(score), img))
assert(len(ids) == 1)
id = ids.pop()
seen = set()
cands.sort(reverse=True)
best = []
for cand in cands:
score, img = cand
if not img in seen:
seen.add(img)
best.append(img)
if len(best) == 3:
break
if not best: best.append('|0|')
combined.append(id+','+' '.join(best))
outf = open('submission_part.csv', 'w')
for line in combined:
print(line, file=outf)
outf.close()
exit(0)
"""
inds = range(0,419)
inds = list(inds)
compressed = ''
memories = []
times = []
def read(fn):
try:
f = open(fn, 'r')
t = f.read()
f.close()
return t
except:
return ''
score = [0,0,0,0]
for i in inds:
t = read('store/%d_out.txt'%i)
line = t[t.index('Task #'):].split('\n')[0]
#print(line)
if line.count('Correct'): s = 3
elif line.count('Candidate'): s = 2
elif line.count('Dimensions'): s = 1
else: s = 0
score[s] += 1
compressed += str(s)
t = read('store/tmp/%d_err.txt'%i)
if t:
memories.append([int(t.split('maxresident')[0].split(' ')[-1]), i])
m,s = t.split('elapsed')[0].split(' ')[-1].split(':')
times.append([float(m)*60+float(s), i])
for i in range(3,0,-1):
score[i-1] += score[i]
print(compressed)
print()
print("Total: % 4d" % score[0])
print("Size : % 4d" % score[1])
print("Cands: % 4d" % score[2])
print("Correct:% 3d"% score[3])
memories.sort(reverse=True)
it = 0
for mem,i in memories:
print("%d : %.1f GB"%(i, mem/2**20))
it += 1
if it == 5: break
print()
times.sort(reverse=True)
it = 0
for secs,i in times:
print("%d : %.1f s"%(i, secs))
it += 1
if it == 5: break
exit(0)
for i in inds:
t = read('store/tmp/%d_err.txt'%i)
if t:
print(t.count("Features: 4"))
import numpy as np
from sklearn import cross_validation, linear_model
from math import log10
#Estimate times
x, y = [], []
for i in inds:
t = read('store/tmp/%d_err.txt'%i)
if t:
m,s = t.split('elapsed')[0].split(' ')[-1].split(':')
y.append(float(m)*60+float(s))
f = [float(i) for i in t.split('Features: ')[-1].split('\n')[0].split(' ')]
p = []
print(f, y[-1])
for i in range(len(f)):
for j in range(i):
p.append(f[i]*f[j])
p.append(f[i])
p = [f[0], f[3], f[0]*f[3]]
x.append(p)
"""loo = cross_validation.LeaveOneOut(len(y))
regr = linear_model.LinearRegression()
scores = cross_validation.cross_val_score(regr, x, y, scoring='neg_mean_squared_error', cv=loo,)
print(10**((-scores.mean())**.5))"""
model = linear_model.LinearRegression()
model.fit(x, y)
r_sq = model.score(x, y)
loo = cross_validation.LeaveOneOut(len(y))
scores = cross_validation.cross_val_score(model, x, y, scoring='neg_mean_squared_error', cv=loo,)
print(((-scores.mean())**.5))
print('coefficient of determination:', r_sq)
print('intercept:', model.intercept_)
print('slope:', model.coef_)
| 24.377622 | 97 | 0.552783 |
74443e86cd3689104f126d9f0414120e8e9cca83 | 363 | py | Python | rest_framework_bulk/tests/simple_app/urls.py | livingbio/django-rest-framework-bulk | d69902d706aa236559a4b6ae7748b482418762f2 | [
"MIT"
] | null | null | null | rest_framework_bulk/tests/simple_app/urls.py | livingbio/django-rest-framework-bulk | d69902d706aa236559a4b6ae7748b482418762f2 | [
"MIT"
] | null | null | null | rest_framework_bulk/tests/simple_app/urls.py | livingbio/django-rest-framework-bulk | d69902d706aa236559a4b6ae7748b482418762f2 | [
"MIT"
] | null | null | null | from __future__ import print_function, unicode_literals
from django.conf.urls import url, include
from rest_framework_bulk.routes import BulkRouter
from .views import SimpleViewSet
app_name = 'test'
router = BulkRouter()
router.register('simple', SimpleViewSet, 'simple')
urlpatterns = (
url(r'^api/', include((router.urls, 'test'), namespace='api')),
)
| 22.6875 | 67 | 0.757576 |
cd1ff5f3edcd5e139eca97102600dd30e828b7f6 | 304 | py | Python | employmentstatusprobability.py | virtualsociety/simulation-py | 89ba16cfcb0743aedc4eb669ed9853989aeebee1 | [
"MIT"
] | null | null | null | employmentstatusprobability.py | virtualsociety/simulation-py | 89ba16cfcb0743aedc4eb669ed9853989aeebee1 | [
"MIT"
] | null | null | null | employmentstatusprobability.py | virtualsociety/simulation-py | 89ba16cfcb0743aedc4eb669ed9853989aeebee1 | [
"MIT"
] | null | null | null | '''
Function to calculate the employment status probability
By Dr. Raymond Hoogendoorn
Copyright 2020
'''
def calculateEmploymentStatusProbability(df_employment, baseyear):
year = str(baseyear)
prob_dist = df_employment[year]
prob_dist = prob_dist / sum(prob_dist)
return prob_dist
| 23.384615 | 66 | 0.756579 |
d277ad2fc1262bd2c66418d4ac747bb431c28868 | 428 | py | Python | demosys/resources/data.py | Contraz/demosys-py | 0479e0f3b0a3901f601bffd2d11e155f97b47555 | [
"0BSD"
] | 70 | 2017-03-31T12:01:41.000Z | 2022-01-05T06:30:57.000Z | demosys/resources/data.py | Contraz/demosys-py | 0479e0f3b0a3901f601bffd2d11e155f97b47555 | [
"0BSD"
] | 69 | 2017-06-18T22:37:46.000Z | 2020-01-23T04:02:22.000Z | demosys/resources/data.py | Contraz/demosys-py | 0479e0f3b0a3901f601bffd2d11e155f97b47555 | [
"0BSD"
] | 9 | 2017-05-13T21:13:02.000Z | 2020-10-01T18:09:49.000Z | """
Registry general data files
"""
from demosys.conf import settings
from demosys.resources.base import BaseRegistry
from demosys.utils.module_loading import import_string
class DataFiles(BaseRegistry):
"""Registry for requested data files"""
def __init__(self):
super().__init__()
self._loaders = [
import_string(loader) for loader in settings.DATA_LOADERS
]
data = DataFiles()
| 22.526316 | 69 | 0.705607 |
d3ee7c5e9886615e6017ec30f1c5758f49456480 | 1,622 | py | Python | tool/githubify.py | coinjet/ripple-dev-portal | ade1cf861813a1735784671613b2800864b14788 | [
"Apache-2.0"
] | null | null | null | tool/githubify.py | coinjet/ripple-dev-portal | ade1cf861813a1735784671613b2800864b14788 | [
"Apache-2.0"
] | null | null | null | tool/githubify.py | coinjet/ripple-dev-portal | ade1cf861813a1735784671613b2800864b14788 | [
"Apache-2.0"
] | null | null | null | #!/bin/env python3
"""
Convert between Dev-Portal-ready markdown and Github-ready markdown. Has two modes:
Normal - Convert from Dev Portal format to Github:
* Comments out divs that the dev portal uses to generate tabs, so that Github parses the markdown inside
* Replaces local relative links with absolute links to ripple.com
Reverse - Convert from Github format to Dev Portal:
* Uncomments multicode divs
* Replaces absolute links with local ones that will work even offline.
Usage: githubify.py ripplerest_api.md > readme.md
You may still want to do some manual editing (for example, to add Travis status icons to your readme)
"""
import sys
def convert_page(text, reverse):
replacements = {
"<div class='multicode'>": "<!-- <div class='multicode'> -->",
"</div>": "<!-- </div> -->",
"(rest-api-tool.html": "(https://ripple.com/build/rest-tool",
"(transactions.html": "(https://ripple.com/build/transactions",
"(rippled-apis.html": "(https://ripple.com/build/rippled-apis",
}
for (k,v) in replacements.items():
if reverse:
text = text.replace(v,k)
else:
text = text.replace(k,v)
return text
if __name__ == "__main__":
if len(sys.argv) < 2 or len(sys.argv) > 3:
exit("usage: %s infile [reverse]" % sys.argv[0])
if len(sys.argv) == 3 and sys.argv[2].lower() == "reverse":
reverse = True
else:
reverse = False
with open(sys.argv[1]) as f:
text = f.read()
text = convert_page(text, reverse)
print(text)
| 33.102041 | 108 | 0.619605 |
6239c7e16a83312ec568640d46f48398e08dbd01 | 6,701 | py | Python | config/settings/production.py | gfavre/invoice-manager | 2a1db22edd51b461c090282c6fc1f290f3265379 | [
"MIT"
] | 1 | 2021-11-27T06:40:34.000Z | 2021-11-27T06:40:34.000Z | config/settings/production.py | gfavre/invoice-manager | 2a1db22edd51b461c090282c6fc1f290f3265379 | [
"MIT"
] | 2 | 2021-05-13T04:50:50.000Z | 2022-02-28T21:06:24.000Z | config/settings/production.py | gfavre/invoice-manager | 2a1db22edd51b461c090282c6fc1f290f3265379 | [
"MIT"
] | null | null | null | import logging
import sentry_sdk
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["beyondthewall.ch"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# https://github.com/jazzband/django-redis#memcached-exceptions-behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="beyondtheadmin <noreply@beyondthewall.ch>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[beyondtheadmin]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps/postmark/
EMAIL_BACKEND = "anymail.backends.postmark.EmailBackend"
ANYMAIL = {
"POSTMARK_SERVER_TOKEN": env("POSTMARK_SERVER_TOKEN"),
"POSTMARK_API_URL": env("POSTMARK_API_URL", default="https://api.postmarkapp.com/"),
}
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
integrations = [sentry_logging, DjangoIntegration(), CeleryIntegration()]
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=integrations,
environment=env("SENTRY_ENVIRONMENT", default="production"),
traces_sample_rate=env.float("SENTRY_TRACES_SAMPLE_RATE", default=0.0),
)
# Your stuff...
# ------------------------------------------------------------------------------
| 38.511494 | 88 | 0.600358 |
2668134eb65738a2db35b00e80acf49bf0cc167b | 10,978 | py | Python | src/command_modules/azure-cli-storage/azure/cli/command_modules/storage/file.py | 0cool321/azure-cli | fd8e6d46d5cee682aff51e262c06bc40c01636ba | [
"MIT"
] | 2 | 2020-07-22T18:53:05.000Z | 2021-09-11T05:52:33.000Z | src/command_modules/azure-cli-storage/azure/cli/command_modules/storage/file.py | 0cool321/azure-cli | fd8e6d46d5cee682aff51e262c06bc40c01636ba | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-storage/azure/cli/command_modules/storage/file.py | 0cool321/azure-cli | fd8e6d46d5cee682aff51e262c06bc40c01636ba | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Commands for storage file share operations
"""
# pylint: disable=too-many-arguments
import os.path
from azure.cli.core._logging import get_az_logger
from azure.cli.core._util import CLIError
from azure.common import AzureException, AzureHttpError
from .util import filter_none, create_blob_service_from_storage_client, collect_blobs, collect_files
def storage_file_upload_batch(client, destination, source, pattern=None, dryrun=False,
validate_content=False, content_settings=None, max_connections=1,
metadata=None):
"""
Upload local files to Azure Storage File Share in batch
"""
from .util import glob_files_locally
source_files = [c for c in glob_files_locally(source, pattern)]
if dryrun:
logger = get_az_logger(__name__)
logger.warning('upload files to file share')
logger.warning(' account %s', client.account_name)
logger.warning(' share %s', destination)
logger.warning(' total %d', len(source_files or []))
logger.warning(' operations')
for f in source_files or []:
logger.warning(' - %s => %s', *f)
return []
# TODO: Performance improvement
# 1. Upload files in parallel
def _upload_action(source_pair):
dir_name = os.path.dirname(source_pair[1])
file_name = os.path.basename(source_pair[1])
_make_directory_in_files_share(client, destination, dir_name)
client.create_file_from_path(share_name=destination,
directory_name=dir_name,
file_name=file_name,
local_file_path=source_pair[0],
content_settings=content_settings,
metadata=metadata,
max_connections=max_connections,
validate_content=validate_content)
return client.make_file_url(destination, dir_name, file_name)
return list(_upload_action(f) for f in source_files)
def storage_file_download_batch(client, source, destination, pattern=None, dryrun=False,
validate_content=False, max_connections=1):
"""
Download files from file share to local directory in batch
"""
from .util import glob_files_remotely, mkdir_p
source_files = glob_files_remotely(client, source, pattern)
if dryrun:
source_files_list = list(source_files)
logger = get_az_logger(__name__)
logger.warning('upload files to file share')
logger.warning(' account %s', client.account_name)
logger.warning(' share %s', source)
logger.warning('destination %s', destination)
logger.warning(' pattern %s', pattern)
logger.warning(' total %d', len(source_files_list))
logger.warning(' operations')
for f in source_files_list:
logger.warning(' - %s/%s => %s', f[0], f[1], os.path.join(destination, *f))
return []
def _download_action(pair):
destination_dir = os.path.join(destination, pair[0])
mkdir_p(destination_dir)
client.get_file_to_path(source,
directory_name=pair[0],
file_name=pair[1],
file_path=os.path.join(destination, *pair),
validate_content=validate_content,
max_connections=max_connections)
return client.make_file_url(source, *pair)
return list(_download_action(f) for f in source_files)
def storage_file_copy_batch(client, source_client,
destination_share=None, destination_path=None,
source_container=None, source_share=None, source_sas=None,
pattern=None, dryrun=False, metadata=None, timeout=None):
"""
Copy a group of files asynchronously
"""
logger = None
if dryrun:
logger = get_az_logger(__name__)
logger.warning('copy files or blobs to file share')
logger.warning(' account %s', client.account_name)
logger.warning(' share %s', destination_share)
logger.warning(' path %s', destination_path)
logger.warning(' source %s', source_container or source_share)
logger.warning('source type %s', 'blob' if source_container else 'file')
logger.warning(' pattern %s', pattern)
logger.warning(' operations')
if source_container:
# copy blobs to file share
# if the source client is None, recreate one from the destination client.
source_client = source_client or create_blob_service_from_storage_client(client)
# the cache of existing directories in the destination file share. the cache helps to avoid
# repeatedly create existing directory so as to optimize the performance.
existing_dirs = set([])
def action_blob_copy(blob_name):
if dryrun:
logger.warning(' - copy blob %s', blob_name)
else:
_create_file_and_directory_from_blob(
client, source_client, destination_share, source_container, source_sas,
blob_name, destination_dir=destination_path, metadata=metadata, timeout=timeout,
existing_dirs=existing_dirs)
return list(filter_none(action_blob_copy(blob) for blob in
collect_blobs(source_client, source_container, pattern)))
elif source_share:
# copy files from share to share
# if the source client is None, assume the file share is in the same storage account as
# destination, therefore client is reused.
source_client = source_client or client
# the cache of existing directories in the destination file share. the cache helps to avoid
# repeatedly create existing directory so as to optimize the performance.
existing_dirs = set([])
def action_file_copy(file_info):
dir_name, file_name = file_info
if dryrun:
logger.warning(' - copy file %s', os.path.join(dir_name, file_name))
else:
_create_file_and_directory_from_file(
client, source_client, destination_share, source_share, source_sas, dir_name,
file_name, destination_dir=destination_path, metadata=metadata,
timeout=timeout, existing_dirs=existing_dirs)
return list(filter_none(action_file_copy(file) for file in
collect_files(source_client, source_share, pattern)))
else:
# won't happen, the validator should ensure either source_container or source_share is set
raise ValueError('Fail to find source. Neither blob container or file share is specified.')
def _create_file_and_directory_from_blob(file_service, blob_service, share, container, sas,
blob_name,
destination_dir=None, metadata=None, timeout=None,
existing_dirs=None):
"""
Copy a blob to file share and create the directory if needed.
"""
blob_url = blob_service.make_blob_url(container, blob_name, sas_token=sas)
full_path = os.path.join(destination_dir, blob_name) if destination_dir else blob_name
file_name = os.path.basename(full_path)
dir_name = os.path.dirname(full_path)
_make_directory_in_files_share(file_service, share, dir_name, existing_dirs)
try:
file_service.copy_file(share, dir_name, file_name, blob_url, metadata, timeout)
return file_service.make_file_url(share, dir_name, file_name)
except AzureException:
error_template = 'Failed to copy blob {} to file share {}. Please check if you have ' + \
'permission to read source or set a correct sas token.'
raise CLIError(error_template.format(blob_name, share))
def _create_file_and_directory_from_file(file_service, source_file_service, share, source_share,
sas, source_file_dir, source_file_name,
destination_dir=None, metadata=None, timeout=None,
existing_dirs=None):
"""
Copy a file from one file share to another
"""
file_url = source_file_service.make_file_url(source_share, source_file_dir or None,
source_file_name, sas_token=sas)
full_path = os.path.join(destination_dir, source_file_dir, source_file_name) \
if destination_dir else os.path.join(source_file_dir, source_file_name)
file_name = os.path.basename(full_path)
dir_name = os.path.dirname(full_path)
_make_directory_in_files_share(file_service, share, dir_name, existing_dirs)
try:
file_service.copy_file(share, dir_name, file_name, file_url, metadata, timeout)
return file_service.make_file_url(share, dir_name or None, file_name)
except AzureException:
error_template = 'Failed to copy file {} from share {} to file share {}. Please check if ' \
'you have right permission to read source or set a correct sas token.'
raise CLIError(error_template.format(file_name, source_share, share))
def _make_directory_in_files_share(file_service, file_share, directory_path, existing_dirs=None):
"""
Create directories recursively.
This method accept a existing_dirs set which serves as the cache of existing directory. If the
parameter is given, the method will search the set first to avoid repeatedly create directory
which already exists.
"""
if not directory_path:
return
parents = [directory_path]
p = os.path.dirname(directory_path)
while p:
parents.append(p)
p = os.path.dirname(p)
for dir_name in reversed(parents):
if existing_dirs and (dir_name in existing_dirs):
continue
try:
file_service.create_directory(share_name=file_share,
directory_name=dir_name,
fail_on_exist=False)
except AzureHttpError:
raise CLIError('Failed to create directory {}'.format(dir_name))
if existing_dirs:
existing_dirs.add(directory_path)
| 44.266129 | 100 | 0.624157 |
e4da84832ddaec0f421822628e60b2fd1ed59482 | 2,775 | py | Python | examples/extensions/sudoku.py | jasondark/cvxpy | 56aaa01b0e9d98ae5a91a923708129a7b37a6f18 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2015-06-03T01:33:46.000Z | 2021-11-15T01:48:49.000Z | examples/extensions/sudoku.py | h-vetinari/cvxpy | 86307f271819bb78fcdf64a9c3a424773e8269fa | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-11-02T21:36:05.000Z | 2019-11-02T21:36:05.000Z | examples/extensions/sudoku.py | h-vetinari/cvxpy | 86307f271819bb78fcdf64a9c3a424773e8269fa | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-10-22T01:35:58.000Z | 2022-01-19T10:48:51.000Z | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy import *
from ncvx.boolean import Boolean
import ncvx.branch_and_bound
import cvxopt
import cProfile, pstats
import numpy as np
n = 9
# 9x9 sudoku grid
numbers = [Boolean(n,n), Boolean(n,n), Boolean(n,n),
Boolean(n,n), Boolean(n,n), Boolean(n,n),
Boolean(n,n), Boolean(n,n), Boolean(n,n)]
# TODO: 9*[Boolean(9,9)] doesn't work....
solution = cvxopt.matrix([
[0, 5, 2, 3, 7, 1, 8, 6, 4],
[6, 3, 7, 8, 0, 4, 5, 2, 1],
[1, 4, 8, 5, 2 ,6, 3, 0, 7],
[4, 7, 1, 2, 3, 0, 6, 5, 8],
[3, 6, 5, 1, 4, 8, 0, 7, 2],
[8, 2, 0, 6, 5, 7, 4, 1, 3],
[5, 1, 6, 7, 8, 3, 2, 4, 0],
[7, 0, 3, 4, 6, 2, 1, 8, 5],
[2, 8, 4, 0, 1, 5, 7, 3, 6]
])
# partial grid
known =[(0,6), (0,7), (1,4), (1,5), (1,8), (2,0), (2,2), (2,7), (2,8),
(3,0), (3,1), (4,0), (4,2), (4,4), (4,6), (4,8), (5,7), (5,8),
(6,0), (6,1), (6,6), (6,8), (7,0), (7,3), (7,4), (8,1), (8,2)]
def row(x,r):
m, n = x.size
for i in range(m):
for j in range(n):
if i == r: yield x[i,j]
def col(x,c):
m, n = x.size
for i in range(m):
for j in range(n):
if j == c: yield x[i,j]
def block(x,b):
m, n = x.size
for i in range(m):
for j in range(n):
# 0 block is r = 0,1, c = 0,1
# 1 block is r = 0,1, c = 2,3
# 2 block is r = 2,3, c = 0,1
# 3 block is r = 2,3, c = 2,3
if i // 3 == b // 3 and j // 3 == b % 3:
yield x[i,j]
pr = cProfile.Profile()
pr.enable()
# create the suboku constraints
constraints = [sum(numbers) == 1]
for i in range(n):
for num in range(n):
constraints.append(sum(row(numbers[num], i)) == 1)
constraints.append(sum(col(numbers[num], i)) == 1)
constraints.append(sum(block(numbers[num], i)) == 1)
constraints.extend(numbers[solution[k]][k] == 1 for k in known)
# attempt to solve
p = Problem(Minimize(sum(map(square, [num[0,0] for num in numbers]))), constraints)
p.solve(method="branch and bound")
pr.disable()
ps = pstats.Stats(pr)
ps.sort_stats('tottime').print_stats(.5)
A = np.zeros((n, n))
for i, num in enumerate(numbers):
A += i * num.value
print(np.sum(A - solution))
| 28.316327 | 83 | 0.557117 |
c2f7fe7f3bef71896a7e0ac0427b3e6bb56268c1 | 5,393 | py | Python | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/NV/float_buffer.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/NV/float_buffer.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/NV/float_buffer.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | '''OpenGL extension NV.float_buffer
Overview (from the spec)
This extension builds upon NV_fragment_program to provide a framebuffer
and texture format that allows fragment programs to read and write
unconstrained floating point data.
In unextended OpenGL, most computations dealing with color or depth
buffers are typically constrained to operate on values in the range [0,1].
Computational results are also typically clamped to the range [0,1].
Color, texture, and depth buffers themselves also hold values mapped to
the range [0,1].
The NV_fragment_program extension provides a general computational model
that supports floating-point numbers constrained only by the precision of
the underlying data types. The quantites computed by fragment programs do
not necessarily correspond in number or in range to conventional
attributes such as RGBA colors or depth values. Because of the range and
precision constraints imposed by conventional fixed-point color buffers,
it may be difficult (if not impossible) to use them to implement certain
multi-pass algorithms.
To enhance the extended range and precision available through fragment
programs, this extension provides floating-point RGBA color buffers that
can be used instead of conventional fixed-point RGBA color buffers. A
floating-point RGBA color buffer consists of one to four floating-point
components stored in the 16- or 32-bit floating-point formats (fp16 or
fp32) defined in the NV_half_float and NV_fragment_program extensions.
When a floating-point color buffer is used, the results of fragment
programs, as written to the "x", "y", "z", and "w" components of the
o[COLR] or o[COLH] output registers, are written directly to the color
buffer without any clamping or modification. Certain per-fragment
operations are bypassed when rendering to floating-point color buffers.
A floating-point color buffer can also be used as a texture map, either by
reading back the contents and then using conventional TexImage calls, or
by using the buffer directly via the ARB_render_texture extension.
This extension has many uses. Some possible uses include:
(1) Multi-pass algorithms with arbitrary intermediate results that
don't have to be artifically forced into the range [0,1]. In
addition, intermediate results can be written without having to
worry about out-of-range values.
(2) Deferred shading algorithms where an expensive fragment program is
executed only after depth testing is fully complete. Instead, a
simple program is executed, which stores the parameters necessary
to produce a final result. After the entire scene is rendered, a
second pass is executed over the entire frame buffer to execute
the complex fragment program using the results written to the
floating-point color buffer in the first pass. This will save the
cost of applying complex fragment programs to fragments that will
not appear in the final image.
(3) Use floating-point texture maps to evaluate functions with
arbitrary ranges. Arbitrary functions with a finite domain can be
approximated using a texture map holding sample results and
piecewise linear approximation.
There are several significant limitations on the use of floating-point
color buffers. First, floating-point color buffers do not support frame
buffer blending. Second, floating-point texture maps do not support
mipmapping or any texture filtering other than NEAREST. Third,
floating-point texture maps must be 2D, and must use the
NV_texture_rectangle extension.
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/NV/float_buffer.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_NV_float_buffer'
GL_FLOAT_R_NV = constant.Constant( 'GL_FLOAT_R_NV', 0x8880 )
GL_FLOAT_RG_NV = constant.Constant( 'GL_FLOAT_RG_NV', 0x8881 )
GL_FLOAT_RGB_NV = constant.Constant( 'GL_FLOAT_RGB_NV', 0x8882 )
GL_FLOAT_RGBA_NV = constant.Constant( 'GL_FLOAT_RGBA_NV', 0x8883 )
GL_FLOAT_R16_NV = constant.Constant( 'GL_FLOAT_R16_NV', 0x8884 )
GL_FLOAT_R32_NV = constant.Constant( 'GL_FLOAT_R32_NV', 0x8885 )
GL_FLOAT_RG16_NV = constant.Constant( 'GL_FLOAT_RG16_NV', 0x8886 )
GL_FLOAT_RG32_NV = constant.Constant( 'GL_FLOAT_RG32_NV', 0x8887 )
GL_FLOAT_RGB16_NV = constant.Constant( 'GL_FLOAT_RGB16_NV', 0x8888 )
GL_FLOAT_RGB32_NV = constant.Constant( 'GL_FLOAT_RGB32_NV', 0x8889 )
GL_FLOAT_RGBA16_NV = constant.Constant( 'GL_FLOAT_RGBA16_NV', 0x888A )
GL_FLOAT_RGBA32_NV = constant.Constant( 'GL_FLOAT_RGBA32_NV', 0x888B )
GL_TEXTURE_FLOAT_COMPONENTS_NV = constant.Constant( 'GL_TEXTURE_FLOAT_COMPONENTS_NV', 0x888C )
GL_FLOAT_CLEAR_COLOR_VALUE_NV = constant.Constant( 'GL_FLOAT_CLEAR_COLOR_VALUE_NV', 0x888D )
glget.addGLGetConstant( GL_FLOAT_CLEAR_COLOR_VALUE_NV, (4,) )
GL_FLOAT_RGBA_MODE_NV = constant.Constant( 'GL_FLOAT_RGBA_MODE_NV', 0x888E )
glget.addGLGetConstant( GL_FLOAT_RGBA_MODE_NV, (1,) )
def glInitFloatBufferNV():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| 52.872549 | 94 | 0.787131 |
6a400cf97c1963b044e2476f0ca7b4eed93c1c76 | 1,278 | py | Python | lesson2-09/solution2_9.py | nmpegetis/udacity-facebook-pytorch | 36a9de03c71892836e184e58695143960c6a7d2b | [
"MIT"
] | null | null | null | lesson2-09/solution2_9.py | nmpegetis/udacity-facebook-pytorch | 36a9de03c71892836e184e58695143960c6a7d2b | [
"MIT"
] | null | null | null | lesson2-09/solution2_9.py | nmpegetis/udacity-facebook-pytorch | 36a9de03c71892836e184e58695143960c6a7d2b | [
"MIT"
] | null | null | null | import pandas as pd
# Set initial values w1, w2, and bias for the linear equation 3*x1+4*x2-10
w1 = 3.0
w2 = 4.0
bias = -10.0
# Inputs and outputs
misclassified_inputPoint_coords = (1, 1)
outputs = []
condition = False
times = 0
learning_rate = 0.1
is_correct_string = ''
while condition == False:
calculated_w1 = (w1 + learning_rate * misclassified_inputPoint_coords[0])
calculated_w2 = (w2 + learning_rate * misclassified_inputPoint_coords[1])
calculated_bias = (bias + learning_rate * 1)
x1 = misclassified_inputPoint_coords[0]
x2 = misclassified_inputPoint_coords[1]
linear_combination = calculated_w1 * x1 + calculated_w2 * x2 + calculated_bias
output = int(linear_combination >= 0)
condition = output
is_correct_string = 'Yes' if condition == True else 'No'
outputs.append([calculated_w1, calculated_w2, calculated_bias,
linear_combination, output, is_correct_string])
times = times + 1
w1 = calculated_w1
w2 = calculated_w2
bias = calculated_bias
# Print output
print(times)
output_frame = pd.DataFrame(outputs, columns=[
'Input 1', ' Input 2', ' Bias', ' Linear Combination', ' Activation Output', ' Is Correct'])
print(output_frame.to_string(index=False))
| 33.631579 | 125 | 0.694836 |
84ef31cf6b690f94e2d27e2c0a2c4502afb26e9c | 7,454 | py | Python | preprocess/cskb_merge.py | HKUST-KnowComp/CSKB-Population | 7b1b2d25fbd0095b0cf009b933cfd5a62feadd58 | [
"MIT"
] | 13 | 2021-09-10T03:41:02.000Z | 2022-03-30T09:53:12.000Z | preprocess/cskb_merge.py | HKUST-KnowComp/CSKB-Population | 7b1b2d25fbd0095b0cf009b933cfd5a62feadd58 | [
"MIT"
] | 1 | 2022-02-09T23:08:33.000Z | 2022-03-22T22:28:37.000Z | preprocess/cskb_merge.py | HKUST-KnowComp/CSKB-Population | 7b1b2d25fbd0095b0cf009b933cfd5a62feadd58 | [
"MIT"
] | 2 | 2021-10-12T13:15:35.000Z | 2021-11-17T08:46:46.000Z | import pandas as pd
from random import sample
import networkx as nx
import numpy as np
from tqdm import tqdm, trange
import networkx as nx
import sys
sys.path.append("../")
sys.path.append("../Glucose")
from utils.utils import subject_list, object_list, group_list
from Glucose.utils.aser_to_glucose import generate_aser_to_glucose_dict
def reverse_px_py(original: str):
return original.replace("PersonX", "[PX]").replace("PersonY", "[PY]").replace("[PX]", "PersonY").replace(
"[PY]", "PersonX")
def merge_rel_dict(d1: dict, d2: dict):
d_merge = {}
for key in set(d1.keys()) | set(d2.keys()):
d_merge[key] = d1.get(key, 0) + d2.get(key, 0)
return d_merge
def normalize_head_tail(head, tail):
head_split = head.split()
tail_split = tail.split()
head_subj = head_split[0]
tail_subj = tail_split[0]
_, re_head, re_tail, _ = generate_aser_to_glucose_dict(head, tail, True)
re_head_reverse, re_tail_reverse = reverse_px_py(re_head), reverse_px_py(re_tail)
return re_head, re_tail, re_head_reverse, re_tail_reverse
relations = ['oEffect', 'oReact', 'oWant', 'xAttr',
'xIntent', 'xNeed', 'xReact', 'xWant', 'xEffect']
ATOMIC_tuples = dict([(r,
np.load('/home/data/tfangaa/CKGP/data/new_matching/ASER-format-words/ATOMIC_{}.npy'.format(r),
allow_pickle=True)) for r in relations])
clause_idx = np.load('../../ASER-core/Matching-atomic/clause_idx.npy', allow_pickle=True)
wc_idx = np.load('../../ASER-core/Matching-atomic/wildcard_idx.npy', allow_pickle=True)
node2id_dict = np.load("/home/data/tfangaa/CKGP/data/ASER_raw_data/aser_raw_node_dict.npy", allow_pickle=True)[()]
atomic_raw = pd.read_csv("/home/tfangaa/Downloads/ATOMIC/v4_atomic_all_agg.csv")
split_dict = dict((i,spl) for i,spl in enumerate(atomic_raw['split']))
############################################################
# 1. ATOMIC
############################################################
def get_atomic_graph(ATOMIC_tuples, relations):
G_atomic = nx.DiGraph()
for r in relations:
for hid, tuple_list in tqdm(enumerate(ATOMIC_tuples[r])):
if hid in clause_idx or hid in wc_idx:
continue
for tid, tuples in enumerate(tuple_list):
head_norm_list = []
tail_norm_list = []
for head, tail in tuples:
if len(head) == 0 or len(tail) == 0:
continue
re_head, re_tail, _, _ = normalize_head_tail(head, tail)
head_norm_list.append(re_head)
tail_norm_list.append(re_tail)
head_norm_list, tail_norm_list = list(set(head_norm_list)), list(set(tail_norm_list))
head_agg, tail_agg = "\t".join(head_norm_list), "\t".join(tail_norm_list)
if (head_agg, tail_agg) in G_atomic.edges:
G_atomic[head_agg][tail_agg]["relation"] = \
list(set(G_atomic[head_agg][tail_agg]["relation"])|set([r]))
G_atomic[head_agg][tail_agg]["hid"] = \
list(set(G_atomic[head_agg][tail_agg]["hid"])|set([hid]))
G_atomic[head_agg][tail_agg]["tid"] = \
list(set(G_atomic[head_agg][tail_agg]["tid"])|set([tid]))
else:
G_atomic.add_edge(head_agg, tail_agg, relation=[r], hid=[hid], tid=[tid], split=split_dict[hid])
return G_atomic
G_atomic = get_atomic_graph(ATOMIC_tuples, relations)
if not os.path.exists("../data/final_graph_file"):
os.mkdir("../data/final_graph_file")
os.mkdir("../data/final_graph_file/CSKG")
nx.write_gpickle(G_atomic,
"../data/final_graph_file/CSKG/G_atomic_agg_node.pickle")
############################################################
# 2. ConceptNet
############################################################
from utils.atomic_utils import SUBJ2POSS, PP_SINGLE
omcs_tuples = np.load("../data/omcs_tuples.npy", allow_pickle=True)[()]
parsed_omcs_dict = {
"trn":np.load("../data/new_matching/ASER-format-words/omcs_trn.npy", allow_pickle=True),
"tst":np.load("../data/new_matching/ASER-format-words/omcs_tst.npy", allow_pickle=True),
"dev":np.load("../data/new_matching/ASER-format-words/omcs_dev.npy", allow_pickle=True),
}
def get_cn_graph(parsed_omcs_dict, omcs_tuples):
G_cn = nx.DiGraph()
# check coverage
for spl in parsed_omcs_dict:
for hid, (head, rel, tail) in tqdm(enumerate(omcs_tuples[spl])):
head_norm_list = []
tail_norm_list = []
for pp in PP_SINGLE:
if pp + " " + head in node2id_dict and pp + " " + tail.lower() in node2id_dict:
re_head, re_tail, _, _ = normalize_head_tail(head, tail)
head_norm_list.append(re_head)
tail_norm_list.append(re_tail)
collapsed_list, r = parsed_omcs_dict[spl][hid]
assert r == rel
for head, tail in collapsed_list:
if not (len(head) > 0 and len(tail) > 0):
continue
re_head, re_tail, _, _ = normalize_head_tail(head, tail)
head_norm_list.append(re_head)
tail_norm_list.append(re_tail)
head_norm_list, tail_norm_list = list(set(head_norm_list)), list(set(tail_norm_list))
head_agg, tail_agg = "\t".join(head_norm_list), "\t".join(tail_norm_list)
if (head_agg, tail_agg) in G_cn.edges:
G_cn[head_agg][tail_agg]["relation"] = \
list(set(G_cn[head_agg][tail_agg]["relation"])|set([rel]))
G_cn[head_agg][tail_agg]["hid"] = \
list(set(G_cn[head_agg][tail_agg]["hid"])|set([hid]))
else:
G_cn.add_edge(head_agg, tail_agg, relation=[rel], hid=[hid], split=spl)
return G_cn
G_cn = get_cn_graph(parsed_omcs_dict, omcs_tuples)
nx.write_gpickle(G_cn,
"../data/final_graph_file/CSKG/G_cn_agg_node.pickle")
############################################################
# 3. ATOMIC2020
############################################################
parsed_atomic2020_dict = {
"trn":np.load("../data/new_matching/ASER-format-words/atomic2020_trn.npy", allow_pickle=True),
"tst":np.load("../data/new_matching/ASER-format-words/atomic2020_tst.npy", allow_pickle=True),
"dev":np.load("../data/new_matching/ASER-format-words/atomic2020_dev.npy", allow_pickle=True),}
from tqdm import tqdm
def get_atomic2020_graph(parsed_atomic2020_dict):
G_atomic2020 = nx.DiGraph()
for spl in parsed_atomic2020_dict:
for hid, item in tqdm(enumerate(parsed_atomic2020_dict[spl])):
if len(item) == 0:
continue
collapsed_list, rel = item
head_norm_list, tail_norm_list = [], []
for head, tail in collapsed_list:
if not (len(head) > 0 and len(tail) > 0):
continue
re_head, re_tail, _, _ = normalize_head_tail(head, tail)
head_norm_list.append(re_head)
tail_norm_list.append(re_tail)
head_norm_list, tail_norm_list = list(set(head_norm_list)), list(set(tail_norm_list))
head_agg, tail_agg = "\t".join(head_norm_list), "\t".join(tail_norm_list)
if (head_agg, tail_agg) in G_atomic2020.edges:
G_atomic2020[head_agg][tail_agg]["relation"] = \
list(set(G_atomic2020[head_agg][tail_agg]["relation"])|set([rel]))
G_atomic2020[head_agg][tail_agg]["hid"] = \
list(set(G_atomic2020[head_agg][tail_agg]["hid"])|set([hid]))
else:
G_atomic2020.add_edge(head_agg, tail_agg, relation=[rel], hid=[hid], split=spl)
return G_atomic2020
G_atomic2020 = get_atomic2020_graph(parsed_atomic2020_dict)
nx.write_gpickle(G_atomic2020, "../data/final_graph_file/CSKG/G_atomic2020_agg_node.pickle")
| 43.086705 | 115 | 0.645157 |
a64124d932f1629d4dc088873129b8537dfa20ad | 1,821 | py | Python | samples/samplesPY/arch/axes_normalized.py | preisach/animations | 07dc9598eee7ed3f73e35b99dd6c44c525a976d5 | [
"MIT"
] | null | null | null | samples/samplesPY/arch/axes_normalized.py | preisach/animations | 07dc9598eee7ed3f73e35b99dd6c44c525a976d5 | [
"MIT"
] | null | null | null | samples/samplesPY/arch/axes_normalized.py | preisach/animations | 07dc9598eee7ed3f73e35b99dd6c44c525a976d5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 1 12:21:56 2019
@author: m
"""
import matplotlib.pyplot as plt
# fig = plt.figure()
fig = plt.figure(figsize=(6,6))
# want some y padding
# oops there is surely a padding function, anyway, this works
plt.plot([-0.1, 1.1], [-0.2,1.2], color='w', linewidth=0)
# plt.plot([1.0/3.0, 1.2], [1.0, 1.0], color='black', linewidth=4)
# plt.plot([-0.2, 2.0/3.0], [0.0, 0.0], color='black', linewidth=4)
# plt.plot([1.0/3.0, 1.0/3.0], [1.0, -0.2], color='grey', linestyle="dashed", linewidth=1)
# plt.plot([2.0/3.0, 2.0/3.0], [-0.2, 1.0], color='grey', linestyle="dashed", linewidth=1)
plt.plot([0.25, 1.2], [1.0, 1.0], color='black', linewidth=4)
plt.plot([-0.2, 0.75], [0.0, 0.0], color='black', linewidth=4)
plt.plot([0.25, 0.25], [1.0, -0.2], color='grey', linestyle="dashed", linewidth=1)
plt.plot([0.75, 0.75], [-0.2, 1.0], color='grey', linestyle="dashed", linewidth=1)
# plt.plot([1.0/3.0, 1.0/3.0], [0.0, -0.2], color='grey', linestyle="dashed", linewidth=1)
# plt.plot([2.0/3.0, 2.0/3.0], [0.0, -0.2], color='grey', linestyle="dashed", linewidth=1)
plt.text(0.28, -0.2, "\u03B1", color="black", fontsize=16)
plt.text(0.78, -0.2, "\u03B2", color="black", fontsize=16)
plt.xlabel('input', fontsize=16)
plt.ylabel('output', fontsize=16)
plt.title ('Bistat/Thermostat/Nonideal Relay - Hysteron', fontsize=0)
plt.box(True)
plt.savefig("bistat.svg", format="svg", bbox="tight")
# search: dasharray
# replace
# id="line2d_20"
# L 167.049351 \d+
# L 167.049351 420
# <g id="line2d_21">
# <path clip-path="url(#p88fd846fbb)" d="M 275.750649 \d+
# <path clip-path="url(#p88fd846fbb)" d="M 275.750649 420
# 2 occurance
# stroke-linecap:square;stroke-width:4
# stroke-linecap:butt;stroke-width:4 | 31.947368 | 94 | 0.612301 |
11ed5fd766fc51d7d4393812fb02195037e1040e | 23,721 | py | Python | Play Tic-tac-toe against computer CLI.py | BigBIueWhale/play_Tic-tac-toe | 41e223d394d8aba383803730f3ec54c27dafbc71 | [
"Unlicense"
] | null | null | null | Play Tic-tac-toe against computer CLI.py | BigBIueWhale/play_Tic-tac-toe | 41e223d394d8aba383803730f3ec54c27dafbc71 | [
"Unlicense"
] | null | null | null | Play Tic-tac-toe against computer CLI.py | BigBIueWhale/play_Tic-tac-toe | 41e223d394d8aba383803730f3ec54c27dafbc71 | [
"Unlicense"
] | null | null | null | import copy
import random
import json
import pickle
def listPoss():
poss = [] #possibilities of game endings when x starts (symmetrical to 0 trarts), there are 255168 of those
#poss contains the lists of the moves that led to the game endings. first value of each poss list of moves moves is the outcome of those moves (1 x, 2 o, 3 draw)
#boards are 0 for nothing, 1 for x, 2 for o.
#listed from 0 to 8 (including both)
# 6 7 8
# 3 4 5
# 0 1 2
movePoss = [0] #list of the numbers of the squares chosen, by order of the moves
listToAppendOfMoves = []
board = [0, 0, 0, 0, 0, 0, 0, 0, 0]
counter1 = 0
while True:
counter1 += 1
if counter1 == 100000:
counter1 = 0
indexToChange = len(movePoss) - 1
currentMove = movePoss[indexToChange]
if currentMove == 9:
if indexToChange == 0:
break
indexToChange -= 1
movePoss.pop()
board[movePoss[indexToChange]] = 0
movePoss[indexToChange] += 1
elif not(board[currentMove] == 0):
if not(currentMove == 8): movePoss[indexToChange] += 1
else:
if indexToChange == 0:
break
indexToChange -= 1
movePoss.pop()
board[movePoss[indexToChange]] = 0
movePoss[indexToChange] += 1
else:
player = (indexToChange % 2) + 1
board[currentMove] = player
winningStatus = didWinBasedOnChangedSquare(board, currentMove)
if winningStatus == 0: movePoss.append(0)
else:
listToAppendOfMoves = movePoss.copy()
listToAppendOfMoves.insert(0, winningStatus)
poss.append(listToAppendOfMoves)
if not(currentMove == 8):
board[currentMove] = 0
movePoss[indexToChange] += 1
else:
if indexToChange == 0:
break
board[currentMove] = 0
indexToChange -= 1
movePoss.pop()
board[movePoss[indexToChange]] = 0
movePoss[indexToChange] += 1
return poss
def didWinBasedOnChangedSquare(boardToCheck, changedSquareNumber):
#returns 1 for x won, 2 for o won, 0 for nobody won yet, 3 for draw
valueAtChange = boardToCheck[changedSquareNumber]
if changedSquareNumber == 4:
if (boardToCheck[6] == valueAtChange) and (valueAtChange == boardToCheck[2]): return valueAtChange
elif (boardToCheck[0] == valueAtChange) and (valueAtChange == boardToCheck[8]): return valueAtChange
elif (boardToCheck[1] == valueAtChange) and (valueAtChange == boardToCheck[7]): return valueAtChange
elif (boardToCheck[3] == valueAtChange) and (valueAtChange == boardToCheck[5]): return valueAtChange
elif changedSquareNumber == 0:
if (boardToCheck[4] == valueAtChange) and (valueAtChange == boardToCheck[8]): return valueAtChange
elif (boardToCheck[1] == valueAtChange) and (valueAtChange == boardToCheck[2]): return valueAtChange
elif (boardToCheck[3] == valueAtChange) and (valueAtChange == boardToCheck[6]): return valueAtChange
elif changedSquareNumber == 2:
if (boardToCheck[4] == valueAtChange) and (valueAtChange == boardToCheck[6]): return valueAtChange
elif (boardToCheck[1] == valueAtChange) and (valueAtChange == boardToCheck[0]): return valueAtChange
elif (boardToCheck[5] == valueAtChange) and (valueAtChange == boardToCheck[8]): return valueAtChange
elif changedSquareNumber == 6:
if (boardToCheck[4] == valueAtChange) and (valueAtChange == boardToCheck[2]): return valueAtChange
elif (boardToCheck[7] == valueAtChange) and (valueAtChange == boardToCheck[8]): return valueAtChange
elif (boardToCheck[3] == valueAtChange) and (valueAtChange == boardToCheck[0]): return valueAtChange
elif changedSquareNumber == 8:
if (boardToCheck[4] == valueAtChange) and (valueAtChange == boardToCheck[0]): return valueAtChange
elif (boardToCheck[7] == valueAtChange) and (valueAtChange == boardToCheck[6]): return valueAtChange
elif (boardToCheck[5] == valueAtChange) and (valueAtChange == boardToCheck[2]): return valueAtChange
elif changedSquareNumber == 1:
if (boardToCheck[4] == valueAtChange) and (valueAtChange == boardToCheck[7]): return valueAtChange
elif (boardToCheck[0] == valueAtChange) and (valueAtChange == boardToCheck[2]): return valueAtChange
elif changedSquareNumber == 5:
if (boardToCheck[4] == valueAtChange) and (valueAtChange == boardToCheck[3]): return valueAtChange
elif (boardToCheck[8] == valueAtChange) and (valueAtChange == boardToCheck[2]): return valueAtChange
elif changedSquareNumber == 7:
if (boardToCheck[4] == valueAtChange) and (valueAtChange == boardToCheck[1]): return valueAtChange
elif (boardToCheck[6] == valueAtChange) and (valueAtChange == boardToCheck[8]): return valueAtChange
elif changedSquareNumber == 3:
if (boardToCheck[4] == valueAtChange) and (valueAtChange == boardToCheck[5]): return valueAtChange
elif (boardToCheck[6] == valueAtChange) and (valueAtChange == boardToCheck[0]): return valueAtChange
for counter in range(9):
if boardToCheck[counter] == 0: break
else: return 3
return 0
def findStart(poss, moves):
begin = 0
end = len(poss) - 1
if end == 0: return 0
beginChanged = False
if IsStart(poss, moves, begin): return begin
if IsStart(poss, moves, end): return end
while(True):
average = (begin + end) // 2
averagePlace = checkIfBigEqualOrSmall(poss, moves, average)
if averagePlace == 1:
begin = average
if IsStart(poss, moves, begin): return begin
else:
end = average
if IsStart(poss, moves, end): return end
if (begin == end) or (begin + 1 == end): return (-1) #doesn't exist
def findEnd(poss, moves):
begin = 0
end = len(poss) - 1
if end == 0: return 0
beginChanged = False
if IsEnd(poss, moves, begin): return begin
if IsEnd(poss, moves, end): return end
while(True):
average = (begin + end) // 2
averagePlace = checkIfBigEqualOrSmall(poss, moves, average)
if (averagePlace == 1) or (averagePlace == 0):
begin = average
if IsEnd(poss, moves, begin): return begin
else:
end = average
if IsEnd(poss, moves, end): return end
if begin == end: return (-1) #doesn't exist
def checkIfBigEqualOrSmall(poss, moves, place):
for move in range(len(moves)):
mm = moves[move]
ppm = poss[place][move + 1]
if mm > ppm: return 1 #small
elif mm < ppm: return 2 #big
return 0 #equal
def IsStart(poss, moves, place):
if checkIfBigEqualOrSmall(poss, moves, place) != 0: return False
else:
if place == 0: return True
elif checkIfBigEqualOrSmall(poss, moves, place - 1) == 1: return True
else: return False
def IsEnd(poss, moves, place):
if checkIfBigEqualOrSmall(poss, moves, place) != 0: return False
else:
if place == len(poss) - 1: return True
elif checkIfBigEqualOrSmall(poss, moves, place + 1) == 2: return True
else: return False
def limList(originalList, limSequence):
start = findStart(originalList, limSequence)
end = findEnd(originalList, limSequence) + 1
limListToReturn = []
for move in range(start, end):
limListToReturn.append(originalList[move])
return limListToReturn
def createListOfSecondPlayerMoves(moves):
secMoves = []
lenMoves = len(moves)
for move in range(2, lenMoves, 2):
secMoves.append(moves[move])
return secMoves
def createListOfFirstPlayerMoves(moves):
firMoves = []
lenMoves = len(moves)
for move in range(1, lenMoves, 2):
firMoves.append(moves[move])
return firMoves
def makePossOpposite(poss):
for moveSequence in poss:
if moveSequence[0] == 1: moveSequence[0] = 2
elif moveSequence[0] == 2: moveSequence[0] = 1
def createPossHashtable(poss):
possHash = {}
for moves in poss:
movesC = moves.copy()
del movesC[0]
tMoves = tuple(movesC)
possHash[tMoves] = moves[0]
return possHash
def findNextMoveRecursion(poss, moves, originalLengthOfMoves):
#when it comes to my move, I only need one of them to be good for me, when it comes to the opponent's move, I need all his possible moves to be good to me
#try iterating down where every time there's an option of the opponent, we define that we need each and every one of his options to be okay with us, or that from each and every option there's
#an option I can take that is completely good, or that for each and every one of the opponent's moves on one of those options...
isMyTurn = not((originalLengthOfMoves + len(moves)) % 2)
limList1 = copy.deepcopy(limList(poss, moves))
certain = [-1, -1, -1, -1, -1, -1, -1, -1, -1] #1 is win, 2 is loss, 3 is tie, 4 is invalid, 5 to expand in next level
for move in moves:
certain[move] = 4
for nextMove in range(9):
if certain[nextMove] == 4: continue
movesAfter = moves.copy()
movesAfter.append(nextMove)
limList2 = limList(limList1, movesAfter)
if len(limList2) != 0:
resultOnlyAfterNextMoveWhole = limList2[0]
resultOnlyAfterNextMove = resultOnlyAfterNextMoveWhole[0]
if len(resultOnlyAfterNextMoveWhole) - 1 == len(movesAfter): certain[nextMove] = resultOnlyAfterNextMove
else:
isAlwaysWin = True
isAlwaysWinOrTie = False
for movesToCheck in limList2:
if movesToCheck[0] == 2:
isAlwaysWin = False
isAlwaysWinOrTie = False
elif movesToCheck[0] == 3: isAlwaysWin = False
if isAlwaysWin: certain[nextMove] = 1
elif isAlwaysWinOrTie: certain[nextMove] = 3
else: certain[nextMove] = 5
else:
certain[nextMove] = 4
continue
if isMyTurn:
anyWins = False
anyTies = False
for nextMove in range(9):
if certain[nextMove] == 1:
anyWins = True
anyTies = True
elif certain[nextMove] == 3:
anyTies = True
if anyWins: return 1
elif anyTies: return 3
else:
anyWins = False
anyTies = False
for nextMove in range(9):
if certain[nextMove] == 5:
movesAfter = moves.copy()
movesAfter.append(nextMove)
value = findNextMoveRecursion(limList1, movesAfter, originalLengthOfMoves)
if value == 1:
anyWins = True
anyTies = True
elif value == 3:
anyWins = True
if anyWins: return 1
elif anyTies: return 3
else: return 2
else:
allWins = True
allWinsOrTies = True
for nextMove in range(9):
if certain[nextMove] == 2: return 2
elif certain[nextMove] == 3: allWins = False
elif certain[nextMove] == 5:
movesAfter = moves.copy()
movesAfter.append(nextMove)
value = findNextMoveRecursion(limList1, movesAfter, originalLengthOfMoves)
if value == 2: return 2
elif value == 3: allWins = False
if allWins: return 1
elif allWinsOrTies: return 3
else: return 2
def findNextMove(poss, moves):
return chooseMoveBasedOnLists(findNextMoveReturnChoosingLists(poss, moves))
def findNextMoveReturnChoosingLists(poss, moves):
if len(moves) != 0: limList1 = copy.deepcopy(limList(poss, moves))
else:
limList1 = copy.deepcopy(poss)
IStarted = not(len(moves) % 2)
if not IStarted: makePossOpposite(limList1)
certain = [-1, -1, -1, -1, -1, -1, -1, -1, -1]
for move in moves:
certain[move] = 4
for nextMove in range(9):
if certain[nextMove] == 4: continue
movesAfter = moves.copy()
movesAfter.append(nextMove)
value = findStart(limList1, movesAfter)
if value == -1:
certain[nextMove] = 4
continue
else:
listOfMovesToCheckIfFinal = limList1[value]
if len(listOfMovesToCheckIfFinal) - 1 == len(moves):
certain[nextMove] = listOfMovesToCheckIfFinal[0]
continue
else: certain[nextMove] = findNextMoveRecursion(limList1, movesAfter, len(moves))
wins = []
draws = []
for nextMove in range(9):
if certain[nextMove] == 1:
wins.append(nextMove)
elif certain[nextMove] == 3:
draws.append(nextMove)
listOfBoth = []
listOfBoth.append(wins)
listOfBoth.append(draws)
listOfBoth.append(certain)
return listOfBoth
def chooseMoveBasedOnListsWithLevel(listOfWinsAndDraws, level):
if level == -1: toMakeMistake = 1
elif level == 0: toMakeMistake = 0
elif level > 0: toMakeMistake = random.randint(0, level)
else:
toMakeMistake = 1
if toMakeMistake == 0:
validMoves = []
for nextMove in range(9):
if listOfWinsAndDraws[2][nextMove] != 4: validMoves.append(nextMove)
lenValidMoves = len(validMoves)
if lenValidMoves != 0:
return validMoves[random.randint(0, lenValidMoves - 1)]
lenWins = len(listOfWinsAndDraws[0])
if lenWins != 0:
return listOfWinsAndDraws[0][random.randint(0, lenWins - 1)]
lenDraws = len(listOfWinsAndDraws[1])
if lenDraws != 0:
return listOfWinsAndDraws[1][random.randint(0, lenDraws - 1)]
else:
validMoves = []
for nextMove in range(9):
if listOfWinsAndDraws[2][nextMove] != 4: validMoves.append(nextMove)
lenValidMoves = len(validMoves)
if lenValidMoves != 0:
return validMoves[random.randint(0, lenValidMoves - 1)]
def chooseMoveBasedOnLists(listOfWinsAndDraws):
lenWins = len(listOfWinsAndDraws[0])
if lenWins != 0:
return listOfWinsAndDraws[0][random.randint(0, lenWins - 1)]
lenDraws = len(listOfWinsAndDraws[1])
if lenDraws != 0:
return listOfWinsAndDraws[1][random.randint(0, lenDraws - 1)]
else:
validMoves = []
for nextMove in range(9):
if listOfWinsAndDraws[2][nextMove] != 4: validMoves.append(nextMove)
lenValidMoves = len(validMoves)
if lenValidMoves != 0:
return validMoves[random.randint(0, lenValidMoves - 1)]
def drawBoardFromMoveList(boardToDraw, moveList):
isXTurn = False
lenMoveList = len(moveList)
for move in range(lenMoveList):
isXTurn = not(isXTurn)
if isXTurn: boardToDraw[moveList[move]] = 1
else: boardToDraw[moveList[move]] = 2
def createDicOfWhatMovesArePossBasedOnBoard(poss):
possC = copy.deepcopy(poss)
dicOfBoardsAndThierResults = {}
counter = 0
counter2 = 0
for moves in possC:
counter += 1
counter2 += 1
if counter == 1000:
counter = 0
print(counter2)
del moves[0]
while len(moves) >= 1:
moves.pop()
boardC = [0, 0, 0, 0, 0, 0, 0, 0, 0]
drawBoardFromMoveList(boardC, moves)
boardCT = tuple(boardC)
value = dicOfBoardsAndThierResults.get(boardCT)
if value != None: continue
dicOfBoardsAndThierResults[boardCT] = findNextMoveReturnChoosingLists(poss, moves)
return dicOfBoardsAndThierResults
def findNextMoveBasedOnBoard(dicOfBoardsAndThierResults, board):
boardT = tuple(board)
return chooseMoveBasedOnLists(dicOfBoardsAndThierResults[boardT])
def findNextMoveBasedOnBoardWithLevel(dicOfBoardsAndThierResults, board, level):
boardT = tuple(board)
return chooseMoveBasedOnListsWithLevel(dicOfBoardsAndThierResults[boardT], level)
def convertFromInternetRepresentation(boardToConvert, started):
myRepBoard = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in range(9):
if boardToConvert[i + 1] == 'X':
if started == "X": myRepBoard[i] = 1
else: myRepBoard[i] = 2
elif boardToConvert[i + 1] == 'O':
if started == "X": myRepBoard[i] = 2
else: myRepBoard[i] = 1
return myRepBoard
try:
possFileToRead = open("possibilitiesTicTacToe.json")
poss = json.load(possFileToRead)
possFileToRead.close()
except FileNotFoundError:
print("possibilitiesTicTacToe.json Not found! No worrys. Creating it now...")
poss = listPoss()
possFileToWrite = open("possibilitiesTicTacToe.json", "w")
json.dump(poss, possFileToWrite)
possFileToWrite.close()
try:
dicFileToRead = open("ticTacToeDictionary.pickle", "rb")
dicOfBoardsAndThierResults = pickle.load(dicFileToRead)
dicFileToRead.close()
except FileNotFoundError:
print("ticTacToeDictionary.pickle Not found! No worrys. Creating it now... (will take about a minute)")
dicOfBoardsAndThierResults = createDicOfWhatMovesArePossBasedOnBoard(poss)
dicFileToWrite = open("ticTacToeDictionary.pickle", "wb")
pickle.dump(dicOfBoardsAndThierResults, dicFileToWrite)
dicFileToWrite.close()
print(len(dicOfBoardsAndThierResults))
print(len(poss))
#internet
# Tic Tac Toe
import random
def drawBoard(board):
# This function prints out the board that it was passed.
# "board" is a list of 10 strings representing the board (ignore index 0)
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
def inputPlayerLetter():
# Lets the player type which letter they want to be.
# Returns a list with the player’s letter as the first item, and the computer's letter as the second.
letter = ''
while not (letter == 'X' or letter == 'O'):
print('Do you want to be X or O?')
letter = input().upper()
# the first element in the list is the player’s letter, the second is the computer's letter.
if letter == 'X':
return ['X', 'O']
else:
return ['O', 'X']
def whoGoesFirst():
# Randomly choose the player who goes first.
if random.randint(0, 1) == 0:
return 'computer'
else:
return 'player'
def playAgain():
# This function returns True if the player wants to play again, otherwise it returns False.
print('Do you want to play again? (yes or no)')
return input().lower().startswith('y')
def makeMove(board, letter, move):
board[move] = letter
def isWinner(bo, le):
# Given a board and a player’s letter, this function returns True if that player has won.
# We use bo instead of board and le instead of letter so we don’t have to type as much.
return ((bo[7] == le and bo[8] == le and bo[9] == le) or # across the top
(bo[4] == le and bo[5] == le and bo[6] == le) or # across the middle
(bo[1] == le and bo[2] == le and bo[3] == le) or # across the bottom
(bo[7] == le and bo[4] == le and bo[1] == le) or # down the left side
(bo[8] == le and bo[5] == le and bo[2] == le) or # down the middle
(bo[9] == le and bo[6] == le and bo[3] == le) or # down the right side
(bo[7] == le and bo[5] == le and bo[3] == le) or # diagonal
(bo[9] == le and bo[5] == le and bo[1] == le)) # diagonal
def getBoardCopy(board):
# Make a duplicate of the board list and return it the duplicate.
dupeBoard = []
for i in board:
dupeBoard.append(i)
return dupeBoard
def isSpaceFree(board, move):
# Return true if the passed move is free on the passed board.
return board[move] == ' '
def getPlayerMove(board):
# Let the player type in their move.
move = ' '
while move not in '1 2 3 4 5 6 7 8 9'.split() or not isSpaceFree(board, int(move)):
print('What is your next move? (1-9)')
move = input()
return int(move)
def chooseRandomMoveFromList(board, movesList):
# Returns a valid move from the passed list on the passed board.
# Returns None if there is no valid move.
possibleMoves = []
for i in movesList:
if isSpaceFree(board, i):
possibleMoves.append(i)
if len(possibleMoves) != 0:
return random.choice(possibleMoves)
else:
return None
def isBoardFull(board):
# Return True if every space on the board has been taken. Otherwise return False.
for i in range(1, 10):
if isSpaceFree(board, i):
return False
return True
print('Welcome to Tic Tac Toe!')
while True:
# Reset the board
theBoard = [' '] * 10
playerLetter, computerLetter = inputPlayerLetter()
turn = whoGoesFirst()
started = turn
if started == "computer": started = computerLetter
elif started == "player": started = playerLetter
print('The ' + turn + ' will go first.')
levelStr = input("What level do you want to play at? 1 easy, 2 harder, 3 even harder, and so on. write \"infinity\" for impossible (recommended: 2):\n")
if levelStr == "infinity":
level = -1
else:
level = int(levelStr) - 1
gameIsPlaying = True
while gameIsPlaying:
if turn == 'player':
# Player’s turn.
drawBoard(theBoard)
move = getPlayerMove(theBoard)
makeMove(theBoard, playerLetter, move)
if isWinner(theBoard, playerLetter):
drawBoard(theBoard)
print('Hooray! You have won the game!')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print('The game is a tie!')
break
else:
turn = 'computer'
else:
# Computer’s turn.
RonenRepBoard = convertFromInternetRepresentation(theBoard, started)
move = findNextMoveBasedOnBoardWithLevel(dicOfBoardsAndThierResults, RonenRepBoard, level) + 1
print("\nmove: " + str(move) + "\n")
makeMove(theBoard, computerLetter, move)
if isWinner(theBoard, computerLetter):
drawBoard(theBoard)
print('The computer has beaten you! You lose.')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print('The game is a tie!')
break
else:
turn = 'player'
if not playAgain():
break
| 44.091078 | 192 | 0.597277 |
79f0508cc5a21b673fda2a47bac8c8965f18b685 | 29 | py | Python | wt/macros.py | aarnav25/WorkingTitle | ab007b3c4d910b756da78355817e4442b24c7684 | [
"MIT"
] | null | null | null | wt/macros.py | aarnav25/WorkingTitle | ab007b3c4d910b756da78355817e4442b24c7684 | [
"MIT"
] | null | null | null | wt/macros.py | aarnav25/WorkingTitle | ab007b3c4d910b756da78355817e4442b24c7684 | [
"MIT"
] | 1 | 2018-03-12T22:50:15.000Z | 2018-03-12T22:50:15.000Z | """quote unquote scripts."""
| 14.5 | 28 | 0.655172 |
5fde535d514dba8ff8e60ba0fb77834492e76dc1 | 203 | py | Python | news-app-env/Scripts/django-admin.py | suryapalla/tell-me-news | 1c308e586074ed8d31500227ed808215afc9cf41 | [
"MIT"
] | null | null | null | news-app-env/Scripts/django-admin.py | suryapalla/tell-me-news | 1c308e586074ed8d31500227ed808215afc9cf41 | [
"MIT"
] | 5 | 2021-03-30T13:41:24.000Z | 2021-09-22T19:13:38.000Z | news-app-env/Scripts/django-admin.py | suryapalla/tell-me-news | 1c308e586074ed8d31500227ed808215afc9cf41 | [
"MIT"
] | null | null | null | #!c:\users\pakumar\django_projects\git_hub_projects\tell-me-news\news-app-env\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 33.833333 | 96 | 0.802956 |
34955960dd57cff39f62de6bf3a7253dbf9ed9c9 | 4,252 | py | Python | src/submission_plain.py | ternaus/kaggle_wallmart | 073dcf7bbc3e0ac6201b0aae0bba44f5fb71f835 | [
"MIT"
] | null | null | null | src/submission_plain.py | ternaus/kaggle_wallmart | 073dcf7bbc3e0ac6201b0aae0bba44f5fb71f835 | [
"MIT"
] | null | null | null | src/submission_plain.py | ternaus/kaggle_wallmart | 073dcf7bbc3e0ac6201b0aae0bba44f5fb71f835 | [
"MIT"
] | null | null | null | from __future__ import division
__author__ = 'Vladimir Iglovikov'
# import pandas as pd
import numpy as np
import os
import graphlab as gl
from graphlab import SFrame
import pandas as pd
def merge_data(df):
return ''.join([str(df["store_nbr"]), "_", str(df["item_nbr"]), "_", df["date"]])
ind = True
weather = SFrame.read_csv(os.path.join('..', "data", "weather_modified_3.csv"))
if ind:
test = SFrame.read_csv(os.path.join('..', "data", "test.csv"))
train = SFrame.read_csv(os.path.join('..', "data", "train.csv"))
key = SFrame.read_csv(os.path.join('..', "data", "key.csv"))
zero_items = SFrame.read_csv(os.path.join('..', 'data', 'zero_items_solid_new.csv'))
train_new = train.join(zero_items)
if ind:
test_new = test.join(zero_items)
train_to_fit = train_new[train_new['units_mean'] != 0]
if ind:
test_to_fit = test_new[test_new['units_mean'] != 0]
weather_new = weather.join(key)
training = train_to_fit.join(weather_new)
if ind:
testing = test_to_fit.join(weather_new)
def f(x):
return int(x.strip().split('-')[0])
def get_weekday(x):
result = pd.to_datetime(x)
return result.weekday()
def get_quarter(x):
result = pd.to_datetime(x)
return result.quarter
training['year'] = training['date'].apply(f)
training['weekday'] = training['date'].apply(get_weekday)
training['quarter'] = training['date'].apply(get_quarter)
if ind:
testing['year'] = testing['date'].apply(f)
testing['weekday'] = testing['date'].apply(get_weekday)
testing['quarter'] = testing['date'].apply(get_quarter)
features = [
# 'date',
'store_nbr',
'item_nbr',
# 'units',
'units_mean',
'units_std',
'station_nbr',
'tmax',
'tmin',
'tavg',
'depart',
'dewpoint',
'wetbulb',
'heat',
'cool',
'sunrise',
'sunset',
'snowfall',
'preciptotal',
'stnpressure',
'sealevel',
'resultspeed',
'resultdir',
'avgspeed',
'HZ',
'FU',
'UP',
# 'TSSN',
'VCTS',
'DZ',
'BR',
'FG',
'BCFG',
'DU',
'FZRA',
'TS',
'RA',
'PL',
'GS',
# 'GR',
'FZDZ',
'VCFG',
# 'PRFG',
'FG+',
'TSRA',
'FZFG',
'BLDU',
'MIFG',
'SQ',
'BLSN',
'SN',
'SG',
'days',
'weekday',
'year',
'quarter']
for column in features:
a = training[column].mean()
training = training.fillna(column, a)
if ind:
testing = testing.fillna(column, a)
import math
training['units'] = training['units'].apply(lambda x: math.log(1 + x))
# training['weekday'] = training['date'].apply(pd.to_datetime).apply(lambda x : x.weekday())
sf_train, sf_test = training.random_split(0.7, seed=5)
params = {'target': 'units',
'features': features,
'max_iterations': 2000,
'max_depth': 10,
'min_loss_reduction': 1,
'step_size': 0.01,
'row_subsample': 0.8,
# 'column_subsample': 0.5,
}
if not ind:
model = gl.boosted_trees_regression.create(sf_train, validation_set=sf_test, **params)
else:
model = gl.boosted_trees_regression.create(training, validation_set=None, **params)
prediction_testing = model.predict(testing)
temp = pd.DataFrame()
temp['id'] = testing[["store_nbr", "item_nbr", "date"]].to_dataframe().apply(merge_data, 1)
temp['units'] = prediction_testing.apply(lambda x: math.exp(x) - 1)
submission = pd.read_csv('../data/sampleSubmission.csv')
result = temp.merge(submission, on=['id'], how='outer')
result.columns = ['id', 'units', 'units_x']
result = result.drop('units_x', 1)
result['units'] = result['units'].fillna(0)
result['units'] = result['units'].apply(lambda x: max(0, x))
result.to_csv(os.path.join("predictions4", "full_mean_filtered_solid_log_xgbt_mls1_ss01_md8_rs_08_station_nbr_added.csv"), index=False)
| 26.08589 | 137 | 0.55762 |
03572997c74a8ac44bf388d308db733ec71033dc | 11,246 | py | Python | src/inference_rigid.py | octavian-ganea/equidock_public | ac2c754399bf20b50a27d86dbff4f6669788d47f | [
"MIT"
] | 73 | 2022-02-04T23:03:16.000Z | 2022-03-31T08:08:34.000Z | src/inference_rigid.py | octavian-ganea/equidock_public | ac2c754399bf20b50a27d86dbff4f6669788d47f | [
"MIT"
] | 6 | 2022-02-06T03:21:35.000Z | 2022-03-22T03:44:57.000Z | src/inference_rigid.py | octavian-ganea/equidock_public | ac2c754399bf20b50a27d86dbff4f6669788d47f | [
"MIT"
] | 21 | 2022-02-04T22:49:39.000Z | 2022-03-30T12:36:44.000Z | import os
import torch
os.environ['DGLBACKEND'] = 'pytorch'
from datetime import datetime as dt
from src.utils.protein_utils import preprocess_unbound_bound, protein_to_graph_unbound_bound
from biopandas.pdb import PandasPdb
from src.utils.train_utils import *
from src.utils.args import *
from src.utils.ot_utils import *
from src.utils.zero_copy_from_numpy import *
from src.utils.io import create_dir
dataset = 'dips'
method_name = 'equidock'
remove_clashes = False # Set to true if you want to remove (most of the) steric clashes. Will increase run time.
if remove_clashes:
method_name = method_name + '_no_clashes'
print('Inference with postprocessing to remove clashes')
else:
print('Inference without any postprocessing to remove clashes')
# Ligand residue locations: a_i in R^3. Receptor: b_j in R^3
# Ligand: G_l(x) = -sigma * ln( \sum_i exp(- ||x - a_i||^2 / sigma) ), same for G_r(x)
# Ligand surface: x such that G_l(x) = surface_ct
# Other properties: G_l(a_i) < 0, G_l(x) = infinity if x is far from all a_i
# Intersection of ligand and receptor: points x such that G_l(x) < surface_ct && G_r(x) < surface_ct
# Intersection loss: IL = \avg_i max(0, surface_ct - G_r(a_i)) + \avg_j max(0, surface_ct - G_l(b_j))
def G_fn(protein_coords, x, sigma):
# protein_coords: (n,3) , x: (m,3), output: (m,)
e = torch.exp(- torch.sum((protein_coords.view(1, -1, 3) - x.view(-1,1,3)) ** 2, dim=2) / float(sigma) ) # (m, n)
return - sigma * torch.log(1e-3 + e.sum(dim=1) )
def compute_body_intersection_loss(model_ligand_coors_deform, bound_receptor_repres_nodes_loc_array, sigma = 25., surface_ct=10.):
assert model_ligand_coors_deform.shape[1] == 3
loss = torch.mean( torch.clamp(surface_ct - G_fn(bound_receptor_repres_nodes_loc_array, model_ligand_coors_deform, sigma), min=0) ) + \
torch.mean( torch.clamp(surface_ct - G_fn(model_ligand_coors_deform, bound_receptor_repres_nodes_loc_array, sigma), min=0) )
return loss
def get_rot_mat(euler_angles):
roll = euler_angles[0]
yaw = euler_angles[1]
pitch = euler_angles[2]
tensor_0 = torch.zeros([])
tensor_1 = torch.ones([])
cos = torch.cos
sin = torch.sin
RX = torch.stack([
torch.stack([tensor_1, tensor_0, tensor_0]),
torch.stack([tensor_0, cos(roll), -sin(roll)]),
torch.stack([tensor_0, sin(roll), cos(roll)])]).reshape(3, 3)
RY = torch.stack([
torch.stack([cos(pitch), tensor_0, sin(pitch)]),
torch.stack([tensor_0, tensor_1, tensor_0]),
torch.stack([-sin(pitch), tensor_0, cos(pitch)])]).reshape(3, 3)
RZ = torch.stack([
torch.stack([cos(yaw), -sin(yaw), tensor_0]),
torch.stack([sin(yaw), cos(yaw), tensor_0]),
torch.stack([tensor_0, tensor_0, tensor_1])]).reshape(3, 3)
R = torch.mm(RZ, RY)
R = torch.mm(R, RX)
return R
def get_residues(pdb_filename):
df = PandasPdb().read_pdb(pdb_filename).df['ATOM']
df.rename(columns={'chain_id': 'chain', 'residue_number': 'residue', 'residue_name': 'resname',
'x_coord': 'x', 'y_coord': 'y', 'z_coord': 'z', 'element_symbol': 'element'}, inplace=True)
residues = list(df.groupby(['chain', 'residue', 'resname'])) ## Not the same as sequence order !
return residues
def main(args):
## Pre-trained models.
if dataset == 'dips':
checkpoint_filename = 'oct20_Wdec_0.0001#ITS_lw_10.0#Hdim_64#Nlay_8#shrdLay_F#ln_LN#lnX_0#Hnrm_0#NattH_50#skH_0.75#xConnI_0.0#LkySl_0.01#pokOTw_1.0#fine_F#/'
checkpoint_filename = 'checkpts/' + checkpoint_filename + '/dips_model_best.pth'
elif dataset == 'db5':
checkpoint_filename = 'oct20_Wdec_0.001#ITS_lw_10.0#Hdim_64#Nlay_5#shrdLay_T#ln_LN#lnX_0#Hnrm_0#NattH_50#skH_0.5#xConnI_0.0#LkySl_0.01#pokOTw_1.0#fine_F#'
checkpoint_filename = 'checkpts/' + checkpoint_filename + '/db5_model_best.pth'
print('checkpoint_filename = ', checkpoint_filename)
checkpoint = torch.load(checkpoint_filename, map_location=args['device'])
for k,v in checkpoint['args'].items():
args[k] = v
args['debug'] = False
args['device'] = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
args['n_jobs'] = 1
args['worker'] = 0
model = create_model(args, log)
model.load_state_dict(checkpoint['state_dict'])
param_count(model, log)
model = model.to(args['device'])
model.eval()
print(args['layer_norm'], args['layer_norm_coors'], args['final_h_layer_norm'], args['intersection_loss_weight'])
print('divide_coors_dist = ', args['divide_coors_dist'])
time_list = []
input_dir = './test_sets_pdb/' + dataset + '_test_random_transformed/random_transformed/'
ground_truth_dir = './test_sets_pdb/' + dataset + '_test_random_transformed/complexes/'
output_dir = './test_sets_pdb/' + dataset + '_' + method_name + '_results/'
input_dir = './test_sets_pdb/jean/'
ground_truth_dir = './test_sets_pdb/jean/'
output_dir = './test_sets_pdb/jean_out/'
# create_dir(output_dir)
pdb_files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith('.pdb')]
for file in pdb_files:
if not file.endswith('_l_b.pdb'):
continue
ll = len('_l_b.pdb')
ligand_filename = os.path.join(input_dir, file[:-ll] + '_l_b' + '.pdb')
receptor_filename = os.path.join(ground_truth_dir, file[:-ll] + '_r_b' + '_COMPLEX.pdb')
gt_ligand_filename = os.path.join(ground_truth_dir, file[:-ll] + '_l_b' + '_COMPLEX.pdb')
out_filename = file[:-ll] + '_l_b' + '_' + method_name.upper() + '.pdb'
print(' inference on file = ', ligand_filename)
start = dt.now()
ppdb_ligand = PandasPdb().read_pdb(ligand_filename)
unbound_ligand_all_atoms_pre_pos = ppdb_ligand.df['ATOM'][['x_coord', 'y_coord', 'z_coord']].to_numpy().squeeze().astype(np.float32)
def get_nodes_coors_numpy(filename, all_atoms=False):
df = PandasPdb().read_pdb(filename).df['ATOM']
if not all_atoms:
return torch.from_numpy(df[df['atom_name'] == 'CA'][['x_coord', 'y_coord', 'z_coord']].to_numpy().squeeze().astype(np.float32))
return torch.from_numpy(df[['x_coord', 'y_coord', 'z_coord']].to_numpy().squeeze().astype(np.float32))
gt_ligand_nodes_coors = get_nodes_coors_numpy(gt_ligand_filename, all_atoms=True)
gt_receptor_nodes_coors = get_nodes_coors_numpy(receptor_filename, all_atoms=True)
initial_ligand_nodes_coors = get_nodes_coors_numpy(ligand_filename, all_atoms=True)
unbound_predic_ligand, \
unbound_predic_receptor, \
bound_ligand_repres_nodes_loc_clean_array,\
bound_receptor_repres_nodes_loc_clean_array = preprocess_unbound_bound(
get_residues(ligand_filename), get_residues(receptor_filename),
graph_nodes=args['graph_nodes'], pos_cutoff=args['pocket_cutoff'], inference=True)
ligand_graph, receptor_graph = protein_to_graph_unbound_bound(unbound_predic_ligand,
unbound_predic_receptor,
bound_ligand_repres_nodes_loc_clean_array,
bound_receptor_repres_nodes_loc_clean_array,
graph_nodes=args['graph_nodes'],
cutoff=args['graph_cutoff'],
max_neighbor=args['graph_max_neighbor'],
one_hot=False,
residue_loc_is_alphaC=args['graph_residue_loc_is_alphaC']
)
if args['input_edge_feats_dim'] < 0:
args['input_edge_feats_dim'] = ligand_graph.edata['he'].shape[1]
ligand_graph.ndata['new_x'] = ligand_graph.ndata['x']
assert np.linalg.norm(bound_ligand_repres_nodes_loc_clean_array - ligand_graph.ndata['x'].detach().cpu().numpy()) < 1e-1
# Create a batch of a single DGL graph
batch_hetero_graph = batchify_and_create_hetero_graphs_inference(ligand_graph, receptor_graph)
batch_hetero_graph = batch_hetero_graph.to(args['device'])
model_ligand_coors_deform_list, \
model_keypts_ligand_list, model_keypts_receptor_list, \
all_rotation_list, all_translation_list = model(batch_hetero_graph, epoch=0)
rotation = all_rotation_list[0].detach().cpu().numpy()
translation = all_translation_list[0].detach().cpu().numpy()
new_residues = (rotation @ bound_ligand_repres_nodes_loc_clean_array.T).T+translation
assert np.linalg.norm(new_residues - model_ligand_coors_deform_list[0].detach().cpu().numpy()) < 1e-1
unbound_ligand_new_pos = (rotation @ unbound_ligand_all_atoms_pre_pos.T).T+translation
euler_angles_finetune = torch.zeros([3], requires_grad=True)
translation_finetune = torch.zeros([3], requires_grad=True)
ligand_th = (get_rot_mat(euler_angles_finetune) @ torch.from_numpy(unbound_ligand_new_pos).T).T + translation_finetune
## Optimize the non-intersection loss:
if remove_clashes:
non_int_loss_item = 100.
it = 0
while non_int_loss_item > 0.5 and it < 2000:
non_int_loss = compute_body_intersection_loss(ligand_th, gt_receptor_nodes_coors, sigma=8, surface_ct=8)
non_int_loss_item = non_int_loss.item()
eta = 1e-3
if non_int_loss < 2.:
eta = 1e-4
if it > 1500:
eta = 1e-2
if it % 100 == 0:
print(it, ' ' , non_int_loss_item)
non_int_loss.backward()
translation_finetune = translation_finetune - eta * translation_finetune.grad.detach()
translation_finetune = torch.tensor(translation_finetune, requires_grad=True)
euler_angles_finetune = euler_angles_finetune - eta * euler_angles_finetune.grad.detach()
euler_angles_finetune = torch.tensor(euler_angles_finetune, requires_grad=True)
ligand_th = (get_rot_mat(euler_angles_finetune) @ torch.from_numpy(unbound_ligand_new_pos).T).T + translation_finetune
it += 1
ppdb_ligand.df['ATOM'][['x_coord', 'y_coord', 'z_coord']] = ligand_th.detach().numpy() # unbound_ligand_new_pos
unbound_ligand_save_filename = os.path.join(output_dir, out_filename)
ppdb_ligand.to_pdb(path=unbound_ligand_save_filename, records=['ATOM'], gz=False)
end = dt.now()
time_list.append((end-start).total_seconds())
time_array = np.array(time_list)
log(f"Mean runtime: {np.mean(time_array)}, std runtime: {np.std(time_array)}")
log('Time list = ', time_list)
if __name__ == "__main__":
main(args) | 44.984 | 165 | 0.64165 |
e17ddf88e73770bffcba1a5a77a5cdb6144b19f6 | 1,490 | py | Python | aliyun-python-sdk-openanalytics-open/aliyunsdkopenanalytics_open/request/v20180619/GetRdsInstanceRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-openanalytics-open/aliyunsdkopenanalytics_open/request/v20180619/GetRdsInstanceRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-openanalytics-open/aliyunsdkopenanalytics_open/request/v20180619/GetRdsInstanceRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkopenanalytics_open.endpoint import endpoint_data
class GetRdsInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'openanalytics-open', '2018-06-19', 'GetRdsInstance','openanalytics')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DBInstanceId(self):
return self.get_body_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_body_params('DBInstanceId', DBInstanceId) | 39.210526 | 98 | 0.774497 |
7d49c8a91505855ee1b0607fe6e0e03416f785e7 | 4,158 | py | Python | django_faker/templatetags/fakers.py | rpatterson/django-faker | 77df5f92fdf5e300e4b1ba512bbd82d3c0010108 | [
"MIT"
] | null | null | null | django_faker/templatetags/fakers.py | rpatterson/django-faker | 77df5f92fdf5e300e4b1ba512bbd82d3c0010108 | [
"MIT"
] | null | null | null | django_faker/templatetags/fakers.py | rpatterson/django-faker | 77df5f92fdf5e300e4b1ba512bbd82d3c0010108 | [
"MIT"
] | 2 | 2018-11-26T13:48:40.000Z | 2019-01-04T12:33:50.000Z | from inspect import getargspec
from django.template.base import TemplateSyntaxError
from django import template
from django.template.library import parse_bits
register = template.Library()
from django_faker import DjangoFaker
# *** Django allows for optional assignment out of box now with simple tags now ***
# def optional_assignment_tag(func=None, takes_context=None, name=None):
# """
# https://groups.google.com/forum/?fromgroups=#!topic/django-developers/E0XWFrkRMGc
# new template tags type
# """
# def dec(func):
# params, varargs, varkw, defaults = getargspec(func)
#
# class AssignmentNode(template.Node):
# def __init__(self, takes_context, args, kwargs, target_var=None):
# super(AssignmentNode, self).__init__(takes_context, args, kwargs)
# self.target_var = target_var
#
# def render(self, context):
# resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
# output = func(*resolved_args, **resolved_kwargs)
# if self.target_var is None:
# return output
# else:
# context[self.target_var] = output
# return ''
#
# function_name = (name or
# getattr(func, '_decorated_function', func).__name__)
#
# def compile_func(parser, token):
# bits = token.split_contents()[1:]
# if len(bits) < 2 or bits[-2] != 'as':
# target_var = None
# else:
# target_var = bits[-1]
# bits = bits[:-2]
# args, kwargs = parse_bits(parser, bits, params,
# varargs, varkw, defaults, takes_context, function_name)
# return AssignmentNode(takes_context, args, kwargs, target_var)
#
# compile_func.__doc__ = func.__doc__
# register.tag(function_name, compile_func)
# return func
# if func is None:
# # @register.assignment_tag(...)
# return dec
# elif callable(func):
# # @register.assignment_tag
# return dec(func)
# else:
# raise TemplateSyntaxError("Invalid arguments provided to assignment_tag")
@register.simple_tag(name='fake')
def do_fake(formatter, *args, **kwargs):
"""
call a faker format
uses:
{% fake "formatterName" *args **kwargs as myvar %}
{{ myvar }}
or:
{% fake 'name' %}
"""
return DjangoFaker.get_generator().format(formatter, *args, **kwargs)
@register.simple_tag(name='fakestr')
def fake_tag_str(formatter, *args, **kwargs):
"""
call a faker format
uses:
{% fakestr "formatterName" *values **kwargs %}
"""
if formatter == 'dateTimeThisCentury':
print(args, kwargs)
return DjangoFaker.get_generator().format(formatter, *args, **kwargs)
@register.filter(name='fake')
def do_fake_filter(formatter, arg=None):
"""
call a faker format
uses:
{{ 'randomElement'|fake:mylist }}
{% if 'boolean'|fake:30 %} .. {% endif %}
{% for word in 'words'|fake:times %}{{ word }}\n{% endfor %}
"""
args = []
if not arg is None: args.append(arg)
return DjangoFaker.get_generator().format(formatter, *args)
@register.filter(name='or_fake')
def do_or_fake_filter(value, formatter):
"""
call a faker if value is None
uses:
{{ myint|or_fake:'randomInt' }}
"""
if not value:
value = DjangoFaker.get_generator().format(formatter)
return value
@register.filter
def get_range(value):
"""
http://djangosnippets.org/snippets/1357/
Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the views
"""
return range(value)
| 29.489362 | 87 | 0.588264 |
bbbf27c6425b5cc573de39c279b5fb12b1dd6338 | 2,258 | py | Python | plugins/snowflake/setup.py | tashay/dbt | 84420675e0c6dcef05b0c53908b326b59c7da23c | [
"Apache-2.0"
] | null | null | null | plugins/snowflake/setup.py | tashay/dbt | 84420675e0c6dcef05b0c53908b326b59c7da23c | [
"Apache-2.0"
] | null | null | null | plugins/snowflake/setup.py | tashay/dbt | 84420675e0c6dcef05b0c53908b326b59c7da23c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if sys.version_info < (3, 6):
print('Error: dbt does not support this version of Python.')
print('Please upgrade to Python 3.6 or higher.')
sys.exit(1)
from setuptools import setup
try:
from setuptools import find_namespace_packages
except ImportError:
# the user has a downlevel version of setuptools.
print('Error: dbt requires setuptools v40.1.0 or higher.')
print('Please upgrade setuptools with "pip install --upgrade setuptools" '
'and try again')
sys.exit(1)
package_name = "dbt-snowflake"
package_version = "0.19.0a1"
description = """The snowflake adapter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(
name=package_name,
version=package_version,
description=description,
long_description=description,
long_description_content_type='text/markdown',
author="Fishtown Analytics",
author_email="info@fishtownanalytics.com",
url="https://github.com/fishtown-analytics/dbt",
packages=find_namespace_packages(include=['dbt', 'dbt.*']),
package_data={
'dbt': [
'include/snowflake/dbt_project.yml',
'include/snowflake/sample_profiles.yml',
'include/snowflake/macros/*.sql',
'include/snowflake/macros/**/*.sql',
]
},
install_requires=[
'dbt-core=={}'.format(package_version),
'snowflake-connector-python==2.2.10',
'azure-common<2.0.0',
'azure-storage-blob>=12.0.0,<13.0.0',
'urllib3>=1.20,<1.26.0',
# this seems sufficiently broad
'cryptography>=2,<3',
],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
python_requires=">=3.6.2",
)
| 30.931507 | 78 | 0.64039 |
7251a81a81d27c220bbcdb1664f4b427531d3934 | 6,537 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/operations/_usage_operations.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/operations/_usage_operations.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/operations/_usage_operations.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
location: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2018-06-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages") # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class UsageOperations(object):
"""UsageOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
location: str,
**kwargs: Any
) -> Iterable["_models.ListUsagesResult"]:
"""Gets, for the specified location, the current compute resource usage information as well as the
limits for compute resources under the subscription.
:param location: The location for which resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListUsagesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_06_01.models.ListUsagesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2018-06-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListUsagesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListUsagesResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages"} # type: ignore
| 40.85625 | 161 | 0.652746 |
a4a3f9c1156aa60f8aa386b6648c833bc0cfde9d | 1,439 | py | Python | test/persistent.py | leifdenby/rlite-py | 90f0726bfb8173d7afa4fc87d75e28b627c10020 | [
"BSD-2-Clause"
] | 73 | 2015-03-02T18:57:57.000Z | 2022-02-19T22:27:24.000Z | test/persistent.py | leifdenby/rlite-py | 90f0726bfb8173d7afa4fc87d75e28b627c10020 | [
"BSD-2-Clause"
] | 6 | 2015-03-26T04:06:45.000Z | 2017-04-09T23:04:38.000Z | test/persistent.py | leifdenby/rlite-py | 90f0726bfb8173d7afa4fc87d75e28b627c10020 | [
"BSD-2-Clause"
] | 8 | 2015-03-04T09:54:25.000Z | 2022-01-13T05:05:31.000Z | # coding=utf-8
from unittest import *
import os.path
import sys
import hirlite
class PersistentTest(TestCase):
PATH = 'rlite.rld'
def setUp(self):
if os.path.exists(PersistentTest.PATH):
os.unlink(PersistentTest.PATH)
self.rlite = hirlite.Rlite(PersistentTest.PATH)
def tearDown(self):
self.rlite = None
if os.path.exists(PersistentTest.PATH):
os.unlink(PersistentTest.PATH)
def test_write_close_open(self):
self.rlite.command('set', 'key', 'value')
self.rlite = hirlite.Rlite(PersistentTest.PATH) # close db, open a new one
self.assertEquals(b'value', self.rlite.command('get', 'key'))
def test_pubsub(self):
self.assertEquals([b'subscribe', b'channel', 1], self.rlite.command('subscribe', 'channel', 'channel2'))
self.assertEquals([b'subscribe', b'channel2', 2], self.rlite.command('__rlite_poll'))
rlite2 = hirlite.Rlite(PersistentTest.PATH)
self.assertEquals(1, rlite2.command('publish', 'channel', 'hello world'))
r = self.rlite.command('__rlite_poll', '0')
self.assertEquals(r, [b'message', b'channel', b'hello world'])
self.assertEquals([b'unsubscribe', b'channel2', 1], self.rlite.command('unsubscribe'))
self.assertEquals([b'unsubscribe', b'channel', 0], self.rlite.command('__rlite_poll'))
self.assertEquals(None, self.rlite.command('__rlite_poll'))
| 39.972222 | 112 | 0.660876 |
f716f3a29096b6b3d1684fa66a9f8119736e670b | 468 | py | Python | contentcuration/contentcuration/migrations/0015_auto_20160914_1640.py | Tlazypanda/studio | cd1c2f169c705027cdd808cbbcae907d0a9b21d2 | [
"MIT"
] | 1 | 2019-03-30T18:14:25.000Z | 2019-03-30T18:14:25.000Z | contentcuration/contentcuration/migrations/0015_auto_20160914_1640.py | Tlazypanda/studio | cd1c2f169c705027cdd808cbbcae907d0a9b21d2 | [
"MIT"
] | 4 | 2016-05-06T17:19:30.000Z | 2019-03-15T01:51:24.000Z | contentcuration/contentcuration/migrations/0015_auto_20160914_1640.py | Tlazypanda/studio | cd1c2f169c705027cdd808cbbcae907d0a9b21d2 | [
"MIT"
] | 4 | 2016-10-18T22:49:08.000Z | 2019-09-17T11:20:51.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-14 23:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0014_channel_language'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='thumbnail',
field=models.TextField(blank=True, null=True),
),
]
| 22.285714 | 58 | 0.623932 |
d3b92914a74b2a5966ec0869e4a3616cfcf636fd | 250 | py | Python | createdb.py | fijal/ocd | c1a96e322b8b8945299680c492d7fba137b3e5dd | [
"MIT"
] | null | null | null | createdb.py | fijal/ocd | c1a96e322b8b8945299680c492d7fba137b3e5dd | [
"MIT"
] | null | null | null | createdb.py | fijal/ocd | c1a96e322b8b8945299680c492d7fba137b3e5dd | [
"MIT"
] | null | null | null |
import sys
from sqlalchemy import create_engine
from ocd.model import meta
def create_db(url):
eng = create_engine(url)
with eng.connect():
meta.create_all(eng)
return eng
if __name__ == '__main__':
create_db(sys.argv[1]) | 16.666667 | 36 | 0.692 |
57d755ac16fbef46880ae2ebf5882b98f5d80179 | 229 | py | Python | pieseis/utils/config.py | JoeXinfa/pieseis | 4898cf7811c582ecb0bf02e18b326d21698d9c78 | [
"CC-BY-4.0"
] | 2 | 2018-11-19T16:16:51.000Z | 2020-04-05T01:47:56.000Z | pieseis/utils/config.py | JoeXinfa/pieseis | 4898cf7811c582ecb0bf02e18b326d21698d9c78 | [
"CC-BY-4.0"
] | 1 | 2020-02-24T15:52:47.000Z | 2020-02-24T15:52:47.000Z | pieseis/utils/config.py | JoeXinfa/pieseis | 4898cf7811c582ecb0bf02e18b326d21698d9c78 | [
"CC-BY-4.0"
] | 2 | 2019-09-15T14:29:29.000Z | 2020-04-05T01:48:56.000Z | """
This python config files sets the most used config constants and such
"""
#import os
NAME = "pieseis"
LOG_FILE = "pyseis.log"
# While debugging / developing, set by default to True
#DEBUG = os.getenv("DEBUG")
DEBUG = False
| 19.083333 | 69 | 0.716157 |
adaee8e36b8a2437264590a8ea934a00850409d1 | 969 | py | Python | tests/test_largest_factor.py | Melon-Bread/tdd-demo | 52c93b27cc496aa3950b8bb6eaf56a6c9d781271 | [
"MIT"
] | 2 | 2017-12-08T21:44:17.000Z | 2019-04-19T01:36:24.000Z | tests/test_largest_factor.py | Melon-Bread/tdd-demo | 52c93b27cc496aa3950b8bb6eaf56a6c9d781271 | [
"MIT"
] | null | null | null | tests/test_largest_factor.py | Melon-Bread/tdd-demo | 52c93b27cc496aa3950b8bb6eaf56a6c9d781271 | [
"MIT"
] | 3 | 2015-03-19T01:31:16.000Z | 2019-09-27T18:08:03.000Z | import pytest
from largest_factor import largest_factor
number_to_largest_prime_factor = {
2: 2,
3: 3,
5: 5,
2**2 * 13: 13,
2**3 * 3**2: 3,
5*7*11: 11,
5*7*11*11: 11,
2**100: 2,
}
@pytest.mark.parametrize('number, expected_largest_prime_factor', number_to_largest_prime_factor.items())
def test_known_number_returns_expected(number, expected_largest_prime_factor):
assert expected_largest_prime_factor == largest_factor(number)
bad_values_to_expected_error = {
-1: ValueError,
0: ValueError,
1: ValueError,
'hello': ValueError,
}
@pytest.mark.parametrize('bad_value, expected_error', bad_values_to_expected_error.items())
def test_known_value_returns_expected_error(bad_value, expected_error):
# Rendundant tests show alternate syntax.
with pytest.raises(expected_error):
largest_factor(bad_value)
# older syntax works with 2.4
pytest.raises(expected_error, "largest_factor(bad_value)")
| 28.5 | 105 | 0.732714 |
a56e29cf52c4e6d73f8229be40417d7e5b20db79 | 1,774 | py | Python | lib/sqlalchemy/sql/__init__.py | jmagnusson/sqlalchemy | 0e7904e730c3d2b0d3a394ad60010158ee29050c | [
"MIT"
] | null | null | null | lib/sqlalchemy/sql/__init__.py | jmagnusson/sqlalchemy | 0e7904e730c3d2b0d3a394ad60010158ee29050c | [
"MIT"
] | null | null | null | lib/sqlalchemy/sql/__init__.py | jmagnusson/sqlalchemy | 0e7904e730c3d2b0d3a394ad60010158ee29050c | [
"MIT"
] | null | null | null | # sql/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .expression import (
Alias,
ClauseElement,
ColumnCollection,
ColumnElement,
CompoundSelect,
Delete,
FromClause,
Insert,
Join,
Select,
Selectable,
TableClause,
Update,
alias,
and_,
any_,
all_,
asc,
between,
bindparam,
case,
cast,
collate,
column,
delete,
desc,
distinct,
except_,
except_all,
exists,
extract,
false,
False_,
func,
funcfilter,
insert,
intersect,
intersect_all,
join,
label,
literal,
literal_column,
modifier,
not_,
null,
or_,
outerjoin,
outparam,
over,
select,
subquery,
table,
text,
true,
True_,
tuple_,
type_coerce,
union,
union_all,
update,
within_group
)
from .visitors import ClauseVisitor
def __go(lcls):
global __all__
from .. import util as _sa_util
import inspect as _inspect
__all__ = sorted(name for name, obj in lcls.items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
from .annotation import _prepare_annotations, Annotated
from .elements import AnnotatedColumnElement, ClauseList
from .selectable import AnnotatedFromClause
_prepare_annotations(ColumnElement, AnnotatedColumnElement)
_prepare_annotations(FromClause, AnnotatedFromClause)
_prepare_annotations(ClauseList, Annotated)
_sa_util.dependencies.resolve_all("sqlalchemy.sql")
from . import naming
__go(locals())
| 18.479167 | 77 | 0.654453 |
8656d3a2098e28bf55fe9235cf9297dd49baa69c | 2,064 | py | Python | applications/address/views.py | dev-easyshares/mighty | a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b | [
"MIT"
] | null | null | null | applications/address/views.py | dev-easyshares/mighty | a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b | [
"MIT"
] | 1 | 2022-03-12T00:57:37.000Z | 2022-03-12T00:57:37.000Z | applications/address/views.py | dev-easyshares/mighty | a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b | [
"MIT"
] | null | null | null | from django.conf import settings
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from mighty.views import TemplateView, FormDescView
from mighty.applications.address import get_address_backend
from mighty.applications.address.forms import AddressFormDesc
address_backend = get_address_backend()
class AddresFormDescView(FormDescView):
form = AddressFormDesc
@method_decorator(login_required, name='dispatch')
class LocationDetail(TemplateView):
@property
def location(self):
input_str = self.request.GET.get('location')
return address_backend.get_location(input_str) if input_str else {}
def render_to_response(self, context, **response_kwargs):
return JsonResponse(self.location, safe=False, **response_kwargs)
@method_decorator(login_required, name='dispatch')
class LocationList(TemplateView):
@property
def locations(self):
input_str = self.request.GET.get('location')
return address_backend.give_list(input_str) if input_str else []
def render_to_response(self, context, **response_kwargs):
return JsonResponse(self.locations, safe=False, **response_kwargs)
if 'rest_framework' in settings.INSTALLED_APPS:
from rest_framework.generics import RetrieveAPIView, ListAPIView
from rest_framework.response import Response
class LocationDetail(RetrieveAPIView):
def get_object(self, queryset=None):
input_str = self.request.GET.get('location')
return address_backend.get_list(input_str) if input_str else {}
def get(self, request, uid, action=None, format=None):
return Response(self.get_object())
class LocationList(ListAPIView):
def get_queryset(self, queryset=None):
input_str = self.request.GET.get('location')
return address_backend.give_list(input_str) if input_str else []
def get(self, request, format=None):
return Response(self.get_queryset())
| 38.222222 | 76 | 0.742733 |
2655fdc665fbb4a20fd4d4e2681705c2bd9cd455 | 1,916 | py | Python | tests/test_emulator/test_kernels.py | spencerhurt/Starfish | fd38aadbd94d83fcdf0ff8c3ce29d8aad6bea88a | [
"BSD-3-Clause"
] | 53 | 2015-01-21T19:26:37.000Z | 2021-05-13T16:23:26.000Z | tests/test_emulator/test_kernels.py | spencerhurt/Starfish | fd38aadbd94d83fcdf0ff8c3ce29d8aad6bea88a | [
"BSD-3-Clause"
] | 112 | 2015-01-20T11:11:27.000Z | 2021-06-01T13:57:28.000Z | tests/test_emulator/test_kernels.py | spencerhurt/Starfish | fd38aadbd94d83fcdf0ff8c3ce29d8aad6bea88a | [
"BSD-3-Clause"
] | 20 | 2015-01-13T22:45:03.000Z | 2021-05-20T22:47:10.000Z | import numpy as np
import pytest
from Starfish.emulator.kernels import rbf_kernel, batch_kernel
@pytest.fixture
def mock_params():
params = np.array((100, 1, 0.1), dtype=np.double) * np.random.randn(
200, 3
) + np.tile((6000, 4, 0), (200, 1))
return params
@pytest.fixture
def mock_kern_params():
variances = np.ones(6)
lengthscales = np.ones((6, 3))
return variances, lengthscales
class TestKernel:
def test_rbf_kernel_same(self, mock_params, mock_kern_params):
variances, lengthscales = mock_kern_params
cov = rbf_kernel(mock_params, mock_params, variances[0], lengthscales[0])
assert cov.shape == (len(mock_params), len(mock_params))
assert np.allclose(cov, cov.T)
assert np.all(cov.diagonal() >= 0.0)
def test_rbf_kernel_diff(self, mock_params, mock_kern_params):
variances, lengthscales = mock_kern_params
other_params = mock_params[10:50]
cov = rbf_kernel(mock_params, other_params, variances[0], lengthscales[0])
assert cov.shape == (len(mock_params), len(other_params))
class TestBatchKernel:
def test_batch_kernel_same(self, mock_params, mock_kern_params):
variances, lengthscales = mock_kern_params
cov = batch_kernel(mock_params, mock_params, variances, lengthscales)
assert cov.shape == (
len(variances) * len(mock_params),
len(variances) * len(mock_params),
)
assert np.allclose(cov, cov.T)
assert np.all(cov.diagonal() >= 0.0)
def test_batch_kernel_diff(self, mock_params, mock_kern_params):
variances, lengthscales = mock_kern_params
other_params = mock_params[10:50]
cov = batch_kernel(mock_params, other_params, variances, lengthscales)
assert cov.shape == (
len(variances) * len(mock_params),
len(variances) * len(other_params),
)
| 34.214286 | 82 | 0.670146 |
f0d325201dfeda4833a85bd7148f4bdd3bd9f18e | 135 | py | Python | python_pillow/100.file.read_write.py | takenobu-hs/pixel-manipulation-examples | ceaefefd2d7bcf1fcce98cc09118c527f2cde42d | [
"BSD-3-Clause"
] | null | null | null | python_pillow/100.file.read_write.py | takenobu-hs/pixel-manipulation-examples | ceaefefd2d7bcf1fcce98cc09118c527f2cde42d | [
"BSD-3-Clause"
] | null | null | null | python_pillow/100.file.read_write.py | takenobu-hs/pixel-manipulation-examples | ceaefefd2d7bcf1fcce98cc09118c527f2cde42d | [
"BSD-3-Clause"
] | null | null | null |
from PIL import Image
#-- read pixels
im = Image.open('../images/img001.png').convert('RGB')
#-- save to png
im.save('z100.png')
| 11.25 | 54 | 0.637037 |
0a39512324cafb2ac792dcdb1a76bf96e77b759a | 314 | py | Python | core/providers/__init__.py | CarboniDavide/rtscan | 3fe447235d0534a7d66e7d6f5e8b9c00d8832b4f | [
"MIT"
] | null | null | null | core/providers/__init__.py | CarboniDavide/rtscan | 3fe447235d0534a7d66e7d6f5e8b9c00d8832b4f | [
"MIT"
] | null | null | null | core/providers/__init__.py | CarboniDavide/rtscan | 3fe447235d0534a7d66e7d6f5e8b9c00d8832b4f | [
"MIT"
] | null | null | null | from .socket_provider import SocketProvider
from .pcapy_provider import PcapyProvider
from .provider import Provider
from core.exceptions import *
class ProviderType():
Socket = "SocketProvider"
Pcapy = "PcapyProvider"
def create(providerType, device=None):
return globals()[providerType](device) | 28.545455 | 46 | 0.773885 |
d9529807e6c0843bc06c22c227df5d8e283d2fed | 837 | py | Python | restaurants_addresses_webscraping/restaurants_addresses_webscraping/spiders/restaurants_addresses_spider.py | calinjovrea/AIFinancialStatusTask | 47c032e9ddb7142ee8ef4eebca8c0126909f29f3 | [
"MIT"
] | null | null | null | restaurants_addresses_webscraping/restaurants_addresses_webscraping/spiders/restaurants_addresses_spider.py | calinjovrea/AIFinancialStatusTask | 47c032e9ddb7142ee8ef4eebca8c0126909f29f3 | [
"MIT"
] | null | null | null | restaurants_addresses_webscraping/restaurants_addresses_webscraping/spiders/restaurants_addresses_spider.py | calinjovrea/AIFinancialStatusTask | 47c032e9ddb7142ee8ef4eebca8c0126909f29f3 | [
"MIT"
] | null | null | null | import scrapy
class AddressSpider(scrapy.Spider):
name = "addressesofrestaurants"
def start_requests(self):
urls = [
'https://tosseduk.com/locations/baker-street/',
'https://www.gbk.co.uk/location/covent-garden?address',
'https://www.superfishuk.co.uk/branches/ashtead/',
'https://theivysohobrasserie.com/',
]
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0'}
for url in urls:
yield scrapy.Request(url=url, callback=self.parse, headers=headers)
def parse(self, response):
regex_selectors_results = response.xpath('string(//*)').re(r"\d{1,2}\D\d+[ ]\w+[ ]\w+")
return {'restaurant_street_address': regex_selectors_results[0]} | 33.48 | 104 | 0.593787 |
17153c2c7c152ef0bd143fd6e67bf1d9fbb3dd32 | 1,489 | py | Python | benchmark/kernel/sort_pool.py | itamblyn/pytorch_geometric | 67ed16492863378b8434b03713a75924f0cc5df1 | [
"MIT"
] | 2 | 2020-08-06T16:14:15.000Z | 2021-11-08T07:33:21.000Z | benchmark/kernel/sort_pool.py | ChenShengsGitHub/pytorch_geometric | 86308313d6f1af56e5931e2ca89bb1a867c10ff3 | [
"MIT"
] | 1 | 2021-11-29T18:14:13.000Z | 2021-11-29T18:14:13.000Z | benchmark/kernel/sort_pool.py | ChenShengsGitHub/pytorch_geometric | 86308313d6f1af56e5931e2ca89bb1a867c10ff3 | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
from torch.nn import Linear, Conv1d
from torch_geometric.nn import SAGEConv, global_sort_pool
class SortPool(torch.nn.Module):
def __init__(self, dataset, num_layers, hidden):
super().__init__()
self.k = 30
self.conv1 = SAGEConv(dataset.num_features, hidden)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(SAGEConv(hidden, hidden))
self.conv1d = Conv1d(hidden, 32, 5)
self.lin1 = Linear(32 * (self.k - 5 + 1), hidden)
self.lin2 = Linear(hidden, dataset.num_classes)
def reset_parameters(self):
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.conv1d.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x = F.relu(self.conv1(x, edge_index))
for conv in self.convs:
x = F.relu(conv(x, edge_index))
x = global_sort_pool(x, batch, self.k)
x = x.view(len(x), self.k, -1).permute(0, 2, 1)
x = F.relu(self.conv1d(x))
x = x.view(len(x), -1)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return F.log_softmax(x, dim=-1)
def __repr__(self):
return self.__class__.__name__
| 34.627907 | 66 | 0.611148 |
e757ccf3825a81d98e3b312f4195613b1ebbc4f2 | 1,897 | py | Python | gs_manager/servers/__init__.py | AngellusMortis/game_server_manager | f288a5dee08acee83cca6935c742e0e82cc281ff | [
"MIT"
] | 6 | 2017-12-11T20:42:37.000Z | 2021-01-17T02:24:44.000Z | gs_manager/servers/__init__.py | AngellusMortis/game_server_manager | f288a5dee08acee83cca6935c742e0e82cc281ff | [
"MIT"
] | null | null | null | gs_manager/servers/__init__.py | AngellusMortis/game_server_manager | f288a5dee08acee83cca6935c742e0e82cc281ff | [
"MIT"
] | 1 | 2020-07-27T18:37:41.000Z | 2020-07-27T18:37:41.000Z | import importlib
import inspect
import sys
from typing import List, Optional
from gs_manager.servers.base import (
STATUS_FAILED,
STATUS_PARTIAL_FAIL,
STATUS_SUCCESS,
BaseServer,
EmptyServer,
)
from gs_manager.servers.generic import (
JavaServer,
RconServer,
ScreenServer,
SteamServer,
)
from gs_manager.servers.specific import (
ArkServer,
MinecraftServer,
) # , StarboundServer <- server always crashes :(
__all__ = [
"get_servers",
"EmptyServer",
"BaseServer",
"ScreenServer",
"JavaServer",
"SteamServer",
"RconServer",
"ArkServer",
"MinecraftServer",
# "StarboundServer",
"STATUS_FAILED",
"STATUS_PARTIAL_FAIL",
"STATUS_SUCCESS",
]
def get_servers() -> List[str]:
server_classes = inspect.getmembers(
sys.modules[__name__], predicate=inspect.isclass
)
types = []
for server_name, server_klass in server_classes:
if issubclass(server_klass, EmptyServer):
types.append(server_klass.name)
return types
def get_server_class(klass_name: str) -> Optional[EmptyServer]:
module_path = "gs_manager.servers"
if "." not in klass_name:
server_classes = inspect.getmembers(
sys.modules[__name__], predicate=inspect.isclass
)
for server_name, server_klass in server_classes:
if (
issubclass(server_klass, EmptyServer)
and server_klass.name == klass_name
):
return server_klass
try:
module_path, klass_name = klass_name.rsplit(".", 1)
module = importlib.import_module(module_path)
klass = getattr(module, klass_name)
except (ValueError, ModuleNotFoundError, AttributeError):
return None
if not (inspect.isclass(klass) and issubclass(klass, EmptyServer)):
return None
return klass
| 24.320513 | 71 | 0.656827 |
aa73c431ecd8b59c21f79dbf209cefec258347a3 | 3,205 | py | Python | deep_rl/component/memory_lineworld.py | psurya1994/successor-feature-representations | efbd5ad49d574aebee68762d1f2c1a0a4a2b8b28 | [
"MIT"
] | null | null | null | deep_rl/component/memory_lineworld.py | psurya1994/successor-feature-representations | efbd5ad49d574aebee68762d1f2c1a0a4a2b8b28 | [
"MIT"
] | null | null | null | deep_rl/component/memory_lineworld.py | psurya1994/successor-feature-representations | efbd5ad49d574aebee68762d1f2c1a0a4a2b8b28 | [
"MIT"
] | 1 | 2021-04-30T08:14:15.000Z | 2021-04-30T08:14:15.000Z | import numpy as np
import gym
from gym import spaces
import matplotlib.pyplot as plt
import random
from PIL import Image, ImageDraw, ImageFont
class MemoryLineWorld(gym.Env):
def __init__(self, size=5, p=0, horizon=100):
# Setting things up
self.updates = 0
self.size = size # size of the chain
self.loc = (self.size - 1) // 2 # Start at the middle
self.action_space = spaces.Discrete(4) # 0 - left, 1 - right, 2 - click blue, 3 - click green
self.observation_space = spaces.Box(low=np.zeros(size+2), high=np.ones(size+2), dtype=np.uint8) # as the state space is one hot encoded
self.p = p # stocasticity
self.horizon = horizon
# object choice
self.color = random.choice(['blue', 'green'])
self.color2val = {'blue':0, 'green':1}
# Reward values
self.step_reward = -1
self.correct_reward = 10
self.wrong_reward = -10
def reset(self):
self.updates = 0
self.loc = (self.size - 1) // 2 # Start at the middle
self.color = random.choice(['blue', 'green'])
return self._loc2state(self.loc)
def _loc2state(self, loc):
temp = np.zeros(self.observation_space.shape[0])
temp[loc] = 1
if(self.loc == 0):
temp[self.size + self.color2val[self.color]] = 1
return temp
def step(self, action):
# Making sure valid action is chosen
assert self.action_space.contains(action)
self.updates += 1
# If the environment is stocastic, you move equivalent to taking a random action
if(np.random.rand() <= self.p):
action = np.random.randint(low=0, high=4)
done = False
# Stepping along on the chain
if(action == 0):
self.loc = self.loc - 1
elif(action == 1):
self.loc = self.loc + 1
# Compensating for walls
if(self.loc < 0):
self.loc = 0
elif(self.loc >= self.size):
self.loc = self.size - 1
# Checking for termination
R = self.step_reward
if(self.updates >= self.horizon):
done = True
if((self.loc == self.size-1) and (action>2)):
done = True
if(action-2 == self.color2val[self.color]):
R = self.correct_reward
else:
R = self.wrong_reward
return self._loc2state(self.loc), R, done, {}
def render(self, mode='human', printR=False, cs=100):
'''Rendering the state of environment
passing for now, need to implement a nice visualization later.
'''
self.cs = cs
h = self.size*cs
w = cs
img = Image.new('RGBA', (h, w), "white")
draw = ImageDraw.Draw(img)
for i in range(1, self.size):
draw.line([cs*i, 0, cs*i, h], fill="gray", width=10)
for i in range(0, self.size):
if(i == self.size-1):
draw.ellipse([i*cs, 0, i*cs+cs, cs], fill="black")
if(i == 0):
draw.ellipse([0, 0, cs/2, cs/2], fill=self.color)
if(i == self.loc):
draw.ellipse([i*cs+cs/3, cs/3, i*cs+cs*2/3, 2*cs/3], fill="red")
plt.title('red: agent | black: choose state')
if(mode=="human"):
plt.imshow(img)
plt.show()
elif(mode=="rgb_array"):
return img
| 28.616071 | 140 | 0.583775 |
b7d897ed0d62b06fd474c3ac0dbf62be0c4d372b | 3,694 | py | Python | TBConsumer.py | PHT-Medic/central-train-builder | 4e557a30dbfd8a96df577e1ce550268cf46f6d22 | [
"MIT"
] | null | null | null | TBConsumer.py | PHT-Medic/central-train-builder | 4e557a30dbfd8a96df577e1ce550268cf46f6d22 | [
"MIT"
] | 19 | 2021-11-22T12:20:33.000Z | 2022-03-15T11:14:32.000Z | TBConsumer.py | PHT-EU/central-train-builder | 4e557a30dbfd8a96df577e1ce550268cf46f6d22 | [
"MIT"
] | null | null | null | import pika
from train_lib.clients import Consumer, PHTClient
from train_lib.clients.rabbitmq import LOG_FORMAT
import json
from dotenv import load_dotenv, find_dotenv
import os
import logging
from builder.train_builder import TrainBuilder, BuildStatus
from loguru import logger
from builder.messages import BuilderResponse
LOGGER = logging.getLogger(__name__)
class TBConsumer(Consumer):
def __init__(self, amqp_url: str, queue: str = "", routing_key: str = None):
super().__init__(amqp_url, queue, routing_key=routing_key)
self.ampq_url = amqp_url
api_url = os.getenv("UI_TRAIN_API")
if api_url[-1] != "/":
api_url += "/"
vault_url = os.getenv("VAULT_URL")
if vault_url[-1] != "/":
vault_url = vault_url + "/"
self.pht_client = PHTClient(ampq_url=amqp_url, api_url=api_url,
vault_url=vault_url, vault_token=os.getenv("VAULT_TOKEN"))
self.builder = TrainBuilder()
# Set auto reconnect to tr
self.auto_reconnect = True
# Configure routing key
self.ROUTING_KEY = "tb"
def on_message(self, _unused_channel, basic_deliver, properties, body):
try:
message = json.loads(body)
except Exception as e:
logger.error(f"Failed to parse message: {e}")
response = BuilderResponse(type=BuildStatus.FAILED.value, data={"message": "Failed to parse message"})
self.publish_events_for_train(response)
super().on_message(_unused_channel, basic_deliver, properties, body)
return
logger.info(f"Received message: \n {message}")
response = self.builder.process_message(message)
if response:
# post message to train router to notify that the train has been built
if response.type == BuildStatus.FINISHED.value:
# check if the train has been already submitted if not notify the train router via rabbitmq
if not self.builder.redis_store.train_submitted(response.data["id"]):
self.post_message_for_train_router(response.data["id"])
self.publish_events_for_train(response)
super().on_message(_unused_channel, basic_deliver, properties, body)
def publish_events_for_train(self, response: BuilderResponse, exchange: str = "pht",
exchange_type: str = "topic", routing_key: str = "ui.tb.event"):
connection = pika.BlockingConnection(pika.URLParameters(self.ampq_url))
channel = connection.channel()
channel.exchange_declare(exchange=exchange, exchange_type=exchange_type, durable=True)
json_message = response.json().encode("utf-8")
channel.basic_publish(exchange=exchange, routing_key=routing_key, body=json_message)
logger.debug(f"Published message: {json_message}")
connection.close()
def post_message_for_train_router(self, train_id: str):
"""
Notifies the train router via RabbitMQ that the train has been built and the route is stored in vault
:param train_id: id of the train that has been built
:return:
"""
message = {
"type": "trainBuilt",
"data": {
"id": train_id
}
}
self.pht_client.publish_message_rabbit_mq(message, routing_key="tr")
def main():
load_dotenv(find_dotenv())
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
tb_consumer = TBConsumer(os.getenv("AMQP_URL"), "", routing_key="tb")
# os.getenv("UI_TRAIN_API")
tb_consumer.run()
if __name__ == '__main__':
main()
| 37.313131 | 114 | 0.652139 |
a1fdfc0392e7ec3a879817b9fecafadf5a6b62b5 | 1,727 | py | Python | misc.py | xw-hu/DGNL-Net | 44e8516429fe8a4e7db615573454b24f51f595cc | [
"BSD-2-Clause"
] | 12 | 2020-10-20T07:16:05.000Z | 2022-03-16T13:28:44.000Z | misc.py | xw-hu/DGNL-Net | 44e8516429fe8a4e7db615573454b24f51f595cc | [
"BSD-2-Clause"
] | null | null | null | misc.py | xw-hu/DGNL-Net | 44e8516429fe8a4e7db615573454b24f51f595cc | [
"BSD-2-Clause"
] | 3 | 2021-09-19T10:57:41.000Z | 2022-03-04T11:36:37.000Z | import os
import random
from torch.autograd import Variable
import torch
class AvgMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
class ReplayBuffer():
def __init__(self, max_size=50):
assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'
self.max_size = max_size
self.data = []
def push_and_pop(self, data):
to_return = []
for element in data.data:
element = torch.unsqueeze(element, 0)
if len(self.data) < self.max_size:
self.data.append(element)
to_return.append(element)
else:
if random.uniform(0,1) > 0.5:
i = random.randint(0, self.max_size-1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
return Variable(torch.cat(to_return))
def mse_loss(input, target):
return torch.sum((input - target)**2) / input.data.nelement()
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
| 26.166667 | 91 | 0.568037 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.