content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#!/usr/bin/python3
import requests
s = requests.Session()
#url = "http://127.0.0.1:8080"
url = "http://52.76.131.184/_mijkweb/"
#payload = {'request' : '{"type":"login", "login":"mijkenator@gmail.com", "password":"test", "as":"admin"}'}
payload = {'request' : '{"type":"login", "login":"mijkenator", "password":"test"}'}
#payload = {'request' : '{"type":"login", "login":"gelevanog@gmail.com", "password":"lalala", "as":"contractor"}'}
r = s.post(url, data=payload)
print(r.text)
#payload = {'request' : '{"type":"get_orders", "uid":"18", "cid":"4"}'}
#r = s.post(url+"admin/order/", data=payload)
#print(r.text)
payload = {'request' : '{"type":"cancel_order","order_id":5}'}
r = s.post(url+"user", data=payload)
print(r.text)
|
nilq/baby-python
|
python
|
import pytest
from anticrlf.types import SubstitutionMap
from anticrlf.exception import UnsafeSubstitutionError
def test_substitution_assign():
smap = SubstitutionMap(key="value")
assert type(smap) == SubstitutionMap
assert smap['key'] == 'value'
assert smap["\n"] == "\\n"
assert smap["\r"] == "\\r"
smap["key"] = "2value"
assert type(smap) == SubstitutionMap
assert smap['key'] == '2value'
def test_bad_substitution():
with pytest.raises(UnsafeSubstitutionError):
SubstitutionMap(x="hex")
smap = SubstitutionMap(x="y")
with pytest.raises(UnsafeSubstitutionError):
smap['x'] = 'hex'
smap = SubstitutionMap()
smap["x"] = "r"
with pytest.raises(UnsafeSubstitutionError):
smap["r"] = "\\r"
assert "\r" in smap.keys()
with pytest.raises(UnsafeSubstitutionError):
smap["x"] = "\r" # any use of \r as a value should trigger this
with pytest.raises(UnsafeSubstitutionError):
smap["x"] = "\n" # any use of \n as a value should trigger this
def test_delete():
smap = SubstitutionMap()
del smap["\n"]
assert smap["\n"] == "\\n"
smap["x"] = "y"
del smap["x"]
assert "x" not in smap
|
nilq/baby-python
|
python
|
from gitlab.base import RESTManager, RESTObject
from gitlab.mixins import (
AccessRequestMixin,
CreateMixin,
DeleteMixin,
ListMixin,
ObjectDeleteMixin,
)
__all__ = [
"GroupAccessRequest",
"GroupAccessRequestManager",
"ProjectAccessRequest",
"ProjectAccessRequestManager",
]
class GroupAccessRequest(AccessRequestMixin, ObjectDeleteMixin, RESTObject):
pass
class GroupAccessRequestManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):
_path = "/groups/{group_id}/access_requests"
_obj_cls = GroupAccessRequest
_from_parent_attrs = {"group_id": "id"}
class ProjectAccessRequest(AccessRequestMixin, ObjectDeleteMixin, RESTObject):
pass
class ProjectAccessRequestManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):
_path = "/projects/{project_id}/access_requests"
_obj_cls = ProjectAccessRequest
_from_parent_attrs = {"project_id": "id"}
|
nilq/baby-python
|
python
|
#n = ''
#while n != 'MnFf':
#print('Qual o seu sexo? ')
#x = input('[M/F]').upper().strip()
#if x in 'MmFf':
#print('OBRIGADO PELO ACESSO.')
#else:
#print('TENTE NOVAMENTE, INVALIDO.')
sexo = str(input('Qual seu SEXO?: [M/F] ')).strip()
while sexo not in 'FfMm':
sexo = str(input('Opção invalida.Tente novamente.\nQual seu SEXO?: [M/F]'))
if sexo in 'Mm':
print(f'Homao da porra em rs')
else:
print(f'Rabudinha em princesa')
|
nilq/baby-python
|
python
|
"""
Package for working with JSON-format configuration files.
"""
from ._JSONObject import JSONObject
from ._StrictJSONObject import StrictJSONObject
from ._typing import (
Absent,
OptionallyPresent,
PropertyValueType
)
|
nilq/baby-python
|
python
|
import itertools
from datetime import datetime, timedelta
from notifications_utils.polygons import Polygons
from notifications_utils.template import BroadcastPreviewTemplate
from orderedset import OrderedSet
from werkzeug.utils import cached_property
from app.broadcast_areas import CustomBroadcastAreas, broadcast_area_libraries
from app.formatters import round_to_significant_figures
from app.models import JSONModel, ModelList
from app.models.user import User
from app.notify_client.broadcast_message_api_client import (
broadcast_message_api_client,
)
class BroadcastMessage(JSONModel):
ALLOWED_PROPERTIES = {
'id',
'service_id',
'template_id',
'content',
'service_id',
'created_by',
'personalisation',
'starts_at',
'finishes_at',
'created_at',
'approved_at',
'cancelled_at',
'updated_at',
'created_by_id',
'approved_by_id',
'cancelled_by_id',
}
libraries = broadcast_area_libraries
def __lt__(self, other):
if self.starts_at and other.starts_at:
return self.starts_at < other.starts_at
if self.starts_at and not other.starts_at:
return True
if not self.starts_at and other.starts_at:
return False
if self.updated_at and not other.updated_at:
return self.updated_at < other.created_at
if not self.updated_at and other.updated_at:
return self.created_at < other.updated_at
if not self.updated_at and not other.updated_at:
return self.created_at < other.created_at
return self.updated_at < other.updated_at
@classmethod
def create(cls, *, service_id, template_id):
return cls(broadcast_message_api_client.create_broadcast_message(
service_id=service_id,
template_id=template_id,
content=None,
reference=None,
))
@classmethod
def create_from_content(cls, *, service_id, content, reference):
return cls(broadcast_message_api_client.create_broadcast_message(
service_id=service_id,
template_id=None,
content=content,
reference=reference,
))
@classmethod
def from_id(cls, broadcast_message_id, *, service_id):
return cls(broadcast_message_api_client.get_broadcast_message(
service_id=service_id,
broadcast_message_id=broadcast_message_id,
))
@property
def areas(self):
library_areas = self.get_areas(areas=self._dict['areas'])
if library_areas:
if len(library_areas) != len(self._dict['areas']):
raise RuntimeError(
f'BroadcastMessage has {len(self._dict["areas"])} areas '
f'but {len(library_areas)} found in the library'
)
return library_areas
return CustomBroadcastAreas(
areas=self._dict['areas'],
polygons=self._dict['simple_polygons'],
)
@property
def parent_areas(self):
return sorted(set(self._parent_areas_iterator))
@property
def _parent_areas_iterator(self):
for area in self.areas:
for parent in area.parents:
yield parent
@cached_property
def polygons(self):
return Polygons(
list(itertools.chain(*(
area.polygons for area in self.areas
)))
)
@cached_property
def simple_polygons(self):
return self.get_simple_polygons(areas=self.areas)
@property
def reference(self):
if self.template_id:
return self._dict['template_name']
return self._dict['reference']
@property
def template(self):
return BroadcastPreviewTemplate({
'template_type': BroadcastPreviewTemplate.template_type,
'name': self.reference,
'content': self.content,
})
@property
def status(self):
if (
self._dict['status']
and self._dict['status'] == 'broadcasting'
and self.finishes_at < datetime.utcnow().isoformat()
):
return 'completed'
return self._dict['status']
@cached_property
def created_by(self):
return User.from_id(self.created_by_id) if self.created_by_id else None
@cached_property
def approved_by(self):
return User.from_id(self.approved_by_id)
@cached_property
def cancelled_by(self):
return User.from_id(self.cancelled_by_id)
@property
def count_of_phones(self):
return round_to_significant_figures(
sum(area.count_of_phones for area in self.areas),
1
)
@property
def count_of_phones_likely(self):
area_estimate = self.simple_polygons.estimated_area
bleed_area_estimate = self.simple_polygons.bleed.estimated_area - area_estimate
return round_to_significant_figures(
self.count_of_phones + (self.count_of_phones * bleed_area_estimate / area_estimate),
1
)
def get_areas(self, areas):
return broadcast_area_libraries.get_areas(
*areas
)
def get_simple_polygons(self, areas):
polygons = Polygons(
list(itertools.chain(*(
area.simple_polygons for area in areas
)))
)
# If we’ve added multiple areas then we need to re-simplify the
# combined shapes to keep the point count down
return polygons.smooth.simplify if len(areas) > 1 else polygons
def add_areas(self, *new_areas):
areas = list(OrderedSet(
self._dict['areas'] + list(new_areas)
))
simple_polygons = self.get_simple_polygons(areas=self.get_areas(areas=areas))
self._update(areas=areas, simple_polygons=simple_polygons.as_coordinate_pairs_lat_long)
def remove_area(self, area_to_remove):
areas = [
area for area in self._dict['areas']
if area != area_to_remove
]
simple_polygons = self.get_simple_polygons(areas=self.get_areas(areas=areas))
self._update(areas=areas, simple_polygons=simple_polygons.as_coordinate_pairs_lat_long)
def _set_status_to(self, status):
broadcast_message_api_client.update_broadcast_message_status(
status,
broadcast_message_id=self.id,
service_id=self.service_id,
)
def _update(self, **kwargs):
broadcast_message_api_client.update_broadcast_message(
broadcast_message_id=self.id,
service_id=self.service_id,
data=kwargs,
)
def request_approval(self):
self._set_status_to('pending-approval')
def approve_broadcast(self):
self._update(
starts_at=datetime.utcnow().isoformat(),
finishes_at=(
datetime.utcnow() + timedelta(hours=4, minutes=0)
).isoformat(),
)
self._set_status_to('broadcasting')
def reject_broadcast(self):
self._set_status_to('rejected')
def cancel_broadcast(self):
self._set_status_to('cancelled')
class BroadcastMessages(ModelList):
model = BroadcastMessage
client_method = broadcast_message_api_client.get_broadcast_messages
def with_status(self, *statuses):
return [
broadcast for broadcast in self if broadcast.status in statuses
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''
Dane Warren
Obtain the k nearest neighbors of the given player and use the second seasons these neighbors to predict the second season of the given player.
'''
import cPickle as pickle
'''
@param rbID: The ID of the running back to get the stats for
@param rrbStats: A list containing dictionaries of rookie running back statlines
@returns: The rookie season statline of the given ID
'''
def getIndividualRookieStats(rbID, rrbStats):
for player in rrbStats:
if player["ID"] == rbID:
print(player)
return player
return 0
'''
@param x: Number to compare to y
@param y: Number to compare to x
@returns: The similarity of the two numbers
'''
def similarityScore(x, y):
if x < y and y != 0:
similarity = float(x) / float(y)
elif x > y and x != 0:
similarity = float(y) / float(x)
else:
similarity = 1
if x < 0 or y < 0:
similarity = 0
if x == 0 and y == 0:
similarity = 1
return similarity
'''
@param rb: The stats of every rookie running back since 1950
@param stats: The given running back's rookie season stats
@param ID: The ID of the given running back
@returns: The similarity rating of the given rb and the next rb in the list
'''
def getSimilarity(rb, stats):
similarity = 0
if rb["gamesPlayed"] >= 8:
rushYpASim = similarityScore(rb["rushYpA"], stats["rushYpA"])
rushYpGSim = similarityScore(rb["rushYpG"], stats["rushYpG"])
rushTDpGSim = similarityScore(rb["rushTDpG"], stats["rushTDpG"])
rushTDpASim = similarityScore(rb["rushTDpA"], stats["rushTDpA"])
airYpGSim = similarityScore(rb["airYpG"], stats["airYpG"])
airYpRSim = similarityScore(rb["airYpR"], stats["airYpR"])
airTDpGSim = similarityScore(rb["airTDpG"], stats["airTDpG"])
airTDpRSim = similarityScore(rb["airTDpR"], stats["airTDpR"])
similarity = .125 * (rushYpASim + rushYpGSim + rushTDpGSim + rushTDpASim + airYpGSim + airYpRSim + airTDpGSim + airTDpRSim)
if rb["ID"] == stats["ID"]:
similarity = 0
return similarity
'''
@param k: The number of neighbors to return
@param inputRB: The stats of the given running back
@param rrbStats: The stats of every rookie running back
@returns: k nearest neighbors and the similarity scores
'''
def getNearestNeighbors(k, inputRB, rrbStats):
nearestNeighbors = {}
for rb in rrbStats:
similarity = getSimilarity(rb, inputRB)
if len(nearestNeighbors) < k:
nearestNeighbors[rb["ID"]] = similarity
else:
sortedNums = sorted(nearestNeighbors.values())
if(similarity > sortedNums[0]):
sortedNames = sorted(nearestNeighbors, key=nearestNeighbors.get)
del nearestNeighbors[sortedNames[0]]
nearestNeighbors[rb["ID"]] = similarity
return nearestNeighbors
def getNeighbors(inputRB, rrbStats):
neighbors = {}
for rb in rrbStats:
neighbors[rb["ID"]] = getSimilarity(rb, inputRB)
return neighbors
def getSophomoreStats(srbStats, ID):
for player in srbStats:
if player["ID"] == ID:
if player["fantasyPoints"] == 0:
return -1 #Player did not touch the ball in their second season
return player
return -1
def main():
file = open("../../datasets/pkl_datasets/rookie_rbStats.pkl","rb")
rrbStats = pickle.load(file)
file.close()
file = open("../../datasets/pkl_datasets/sophomore_rbStats.pkl","rb")
srbStats = pickle.load(file)
file.close()
file = open("../../datasets/pkl_datasets/runningbacksNameID.pkl")
rbs = pickle.load(file)
file.close()
#GET INPUT RB
rbID = raw_input("Enter a running back. (Name or ID) \n")
if rbID.isdigit():
rbID = int(rbID)
else:
rbID = rbs[rbID]
inputRB = getIndividualRookieStats(rbID, rrbStats)
if inputRB["fantasyPoints"] == 0:
print("This player did not touch the ball in their rookie season.")
return
if getSophomoreStats(srbStats, rbID) == "This player did not touch the ball in their sophomore season.":
print("This player did not touch the ball in their sophomore season.")
return
#GET NEIGHBORS
for rb in rrbStats:
if rb["fantasyPoints"] == 0:
rrbStats.remove(rb)
neighbors = getNeighbors(inputRB, rrbStats)
#SORT NEIGHBORS
sortedNeighbors = []
for value in sorted(neighbors.values()):
for key in neighbors.keys():
sort = {}
if neighbors[key] == value:
sort[key] = value
sortedNeighbors.append(sort)
#GET K NEAREST NEIGHBORS
k = 5
kNeighbors = sortedNeighbors[-k:]
#GET NEIGHBOR SOPHOMORE STATS AND COMPARE
stats = {}
neighborStats = []
maxPredictiveRating = 0
bestK = 0
bestNeighborFPPG = 0
while k <= 40:
kNeighbors = sortedNeighbors[-k:]
totalNeighborFP = 0
totalNeighborGames = 0
for neighbor in kNeighbors:
stats = getSophomoreStats(srbStats, neighbor.keys()[0])
if stats["fantasyPoints"] != 0:
totalNeighborFP += stats["fantasyPoints"]
totalNeighborGames += stats["gamesPlayed"]
neighborFPPG = float(totalNeighborFP) / float(totalNeighborGames)
inputRBSophomoreStats = getSophomoreStats(srbStats, rbID)
predictiveRating = similarityScore(neighborFPPG, inputRBSophomoreStats["fantasyPointsPerGame"])
if predictiveRating > maxPredictiveRating:
maxPredictiveRating = predictiveRating
bestK = k
bestNeighborFPPG = neighborFPPG
k += 1
print(bestK)
print(maxPredictiveRating)
print(bestNeighborFPPG)
print(inputRBSophomoreStats["fantasyPointsPerGame"])
print(getSophomoreStats(srbStats, 4418))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import FWCore.ParameterSet.Config as cms
# list of crystal indecies
ics = cms.untracked.vint32(1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
81, 82, 83, 84, 85,
86, 87, 88, 89, 90,
101, 102, 103, 104, 105,
106, 107, 108, 109, 110,
121, 122, 123, 124, 125,
126, 127, 128, 129, 130,
141, 142, 143, 144, 145,
146, 147, 148, 149, 150,
161, 162, 163, 164, 165,
166, 167, 168, 169, 170,
181, 182, 183, 184, 185,
186, 187, 188, 189, 190)
# list of tower IDs (DQM numbering scheme)
towerIDs = cms.untracked.vint32(1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6)
# list of corresponding strip (VFE) numbers
stripIDs = cms.untracked.vint32(1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1)
# list of channel IDs
channelIDs = cms.untracked.vint32(1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
3, 3, 3, 3, 3,
3, 3, 3, 3, 3,
4, 4, 4, 4, 4,
4, 4, 4, 4, 4,
5, 5, 5, 5, 5,
5, 5, 5, 5, 5,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
3, 3, 3, 3, 3,
3, 3, 3, 3, 3,
4, 4, 4, 4, 4,
4, 4, 4, 4, 4,
5, 5, 5, 5, 5,
5, 5, 5, 5, 5)
# list of status IDs
statusIDs = cms.untracked.vint32(1, 2, 3, 4)
# list of tower CCUIDs
ccuIDs = cms.untracked.vint32(1, 71, 80, 45)
# list of tower DQM position IDs
positionIDs = cms.untracked.vint32(6, 2, 5, 1)
|
nilq/baby-python
|
python
|
import urllib.parse
import urllib.request
from doodledashboard.component import MissingRequiredOptionException, NotificationCreator, \
ComponentCreationException
from doodledashboard.filters.contains_text import ContainsTextFilter
from doodledashboard.filters.matches_regex import MatchesRegexFilter
from doodledashboard.notifications.image.file_downloader import FileDownloader
from doodledashboard.notifications.notification import Notification
from doodledashboard.notifications.outputs import ImageNotificationOutput
class ImageDependingOnMessageContent(Notification):
"""
* First message that contains text that matches an image's filter
"""
def __init__(self):
super().__init__()
self._filtered_images = []
self._default_image_path = None
self._chosen_image_path = None
def add_image_filter(self, absolute_path, choice_filter=None):
if choice_filter:
self._filtered_images.append({"path": absolute_path, "filter": choice_filter})
else:
self._default_image_path = absolute_path
def create_output(self, messages):
if not messages:
if self._default_image_path:
return ImageNotificationOutput(self._default_image_path)
else:
return None
last_message = messages[-1]
for image_filter in self._filtered_images:
if image_filter["filter"].filter(last_message):
self._chosen_image_path = image_filter["path"]
if self._chosen_image_path:
image_path = self._chosen_image_path
else:
image_path = self._default_image_path
return ImageNotificationOutput(image_path) if image_path else None
@property
def default_image(self):
return self._default_image_path
@property
def filtered_images(self):
return self._filtered_images
def get_output_types(self):
return [ImageNotificationOutput]
def __str__(self):
notification_name = "ImageDependingOnMessageContent"
if self.name:
notification_name += " (%s)" % self._name
return notification_name
class ImageDependingOnMessageContentCreator(NotificationCreator):
def __init__(self, file_downloader=FileDownloader()):
super().__init__()
self._file_downloader = file_downloader
@staticmethod
def get_id():
return "image-depending-on-message-content"
def create(self, options, secret_store):
notification = ImageDependingOnMessageContent()
has_images = "images" in options
has_default_image = "default-image" in options
if not has_images and not has_default_image:
raise MissingRequiredOptionException("Expected 'images' list and/or default-image to exist")
if has_default_image:
image_url = self._encode_url(options["default-image"])
image_path = self.download(image_url)
notification.add_image_filter(image_path)
if has_images:
for image_config_section in options["images"]:
if "path" not in image_config_section:
raise MissingRequiredOptionException("Expected 'path' option to exist")
image_url = self._encode_url(image_config_section["path"])
image_filter = self._create_filter(image_config_section)
image_path = self.download(image_url)
notification.add_image_filter(image_path, image_filter)
return notification
def download(self, url):
try:
return self._file_downloader.download(url)
except Exception as err:
raise ImageUnavailable(url, err)
@staticmethod
def _encode_url(full_url):
"""
Encode invalid characters in URL to provide, such as spaces.
This implements code from the following URLs
https://bugs.python.org/issue14826
https://hg.python.org/cpython/rev/ebd37273e0fe
"""
return urllib.parse.quote(full_url, safe="%/:=&?~#+!$,;'@()*[]|")
@staticmethod
def _create_filter(image_config_section):
pattern_exists = "if-matches" in image_config_section
contains_exists = "if-contains" in image_config_section
if not pattern_exists and not contains_exists:
raise MissingRequiredOptionException("Expected either 'if-contains' or 'if-matches' option to exist")
if pattern_exists and contains_exists:
raise MissingRequiredOptionException("Expected either 'if-contains' or 'if-matches' option, but not both")
if pattern_exists:
return MatchesRegexFilter(image_config_section["if-matches"])
else:
return ContainsTextFilter(image_config_section["if-contains"])
class ImageUnavailable(ComponentCreationException):
def __init__(self, url, error):
super().__init__("Error downloading '%s'" % url)
self._url = url
self._error = error
@property
def url(self):
return self._url
@property
def error(self):
return self._error
|
nilq/baby-python
|
python
|
from asyncio import Future, ensure_future
from typing import Any, Callable
from websockets import WebSocketCommonProtocol as WebSocket
from .models import Notification
__all__ = [
'Notifier',
]
_Sender = Callable[[Notification], Future]
_Finalizer = Callable[[Future], Any]
class Notifier:
def __init__(self, ws: WebSocket, sender: _Sender, finalizer: _Finalizer):
self._ws = ws
self._sender = sender
self._finalizer = finalizer
self._pending = set()
@property
def closed(self) -> bool:
return self._ws.closed
def _done_callback(self, fut: Future):
self._finalizer(fut)
if fut in self._pending:
self._pending.remove(fut)
def send(self, notification: Notification) -> Future:
fut = self._sender(notification) if self._ws.open else ensure_future(self._ws.ensure_open())
fut.add_done_callback(self._done_callback)
self._pending.add(fut)
return fut
def cancel(self):
for fut in self._pending:
fut.cancel()
|
nilq/baby-python
|
python
|
"""Function for building a diatomic molecule."""
def create_diatomic_molecule_geometry(species1, species2, bond_length):
"""Create a molecular geometry for a diatomic molecule.
Args:
species1 (str): Chemical symbol of the first atom, e.g. 'H'.
species2 (str): Chemical symbol of the second atom.
bond_length (float): bond distance.
Returns:
dict: a dictionary containing the coordinates of the atoms.
"""
geometry = {"sites": [
{'species': species1, 'x': 0, 'y': 0, 'z': 0},
{'species': species2, 'x': 0, 'y': 0, 'z': bond_length}
]}
return geometry
|
nilq/baby-python
|
python
|
from abc import abstractmethod
from typing import Iterable
from .common import PipelineContext, RecordEnvelope
class Transformer:
@abstractmethod
def transform(
self, record_envelopes: Iterable[RecordEnvelope]
) -> Iterable[RecordEnvelope]:
"""
Transforms a sequence of records.
:param records: the records to be transformed
:return: 0 or more transformed records
"""
@classmethod
@abstractmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "Transformer":
pass
|
nilq/baby-python
|
python
|
from pygame.surface import Surface, SurfaceType
from typing import Union, List, Tuple
from pygame.color import Color
GRAVITY_LEFT = 0
GRAVITY_RIGHT = 1
GRAVITY_TOP = 0
GRAVITY_BOTTOM = 2
GRAVITY_CENTER_HORIZONTAL = 4
GRAVITY_CENTER_VERTICAL = 8
STYLE_NORMAL = 0
STYLE_BOLD = 1
STYLE_ITALIC = 2
MOUSE_MODE_CONFINED = 1
MOUSE_MODE_CAPTURED = 2
_pyi_Color_type = Union[Color, str, Tuple[int, int, int], List[int], int, Tuple[int, int, int, int]]
_pyi_Surface_type = Union[Surface, SurfaceType]
|
nilq/baby-python
|
python
|
import collections
from reclist.abstractions import RecList, rec_test
from typing import List
import random
class CoveoCartRecList(RecList):
@rec_test(test_type='stats')
def basic_stats(self):
"""
Basic statistics on training, test and prediction data
"""
from reclist.metrics.standard_metrics import statistics
return statistics(self._x_train,
self._y_train,
self._x_test,
self._y_test,
self._y_preds)
@rec_test(test_type='price_homogeneity')
def price_test(self):
"""
Measures the absolute log ratio of ground truth and prediction price
"""
from reclist.metrics.price_homogeneity import price_homogeneity_test
return price_homogeneity_test(y_test=self.sku_only(self._y_test),
y_preds=self.sku_only(self._y_preds),
product_data=self.product_data,
price_sel_fn=lambda x: float(x['price_bucket'])
if x['price_bucket']
else None
)
@rec_test(test_type='Coverage@10')
def coverage_at_k(self):
"""
Coverage is the proportion of all possible products which the RS
recommends based on a set of sessions
"""
from reclist.metrics.standard_metrics import coverage_at_k
return coverage_at_k(self.sku_only(self._y_preds),
self.product_data,
k=10)
@rec_test(test_type='HR@10')
def hit_rate_at_k(self):
"""
Compute the rate in which the top-k predictions contain the item to be predicted
"""
from reclist.metrics.standard_metrics import hit_rate_at_k
return hit_rate_at_k(self.sku_only(self._y_preds),
self.sku_only(self._y_test),
k=10)
@rec_test(test_type='hits_distribution')
def hits_distribution(self):
"""
Compute the distribution of hit-rate across product frequency in training data
"""
from reclist.metrics.hits import hits_distribution
return hits_distribution(self.sku_only(self._x_train),
self.sku_only(self._x_test),
self.sku_only(self._y_test),
self.sku_only(self._y_preds),
k=10,
debug=True)
@rec_test(test_type='distance_to_query')
def dist_to_query(self):
"""
Compute the distribution of distance from query to label and query to prediction
"""
from reclist.metrics.distance_metrics import distance_to_query
return distance_to_query(self.rec_model,
self.sku_only(self._x_test),
self.sku_only(self._y_test),
self.sku_only(self._y_preds), k=10, bins=25, debug=True)
def sku_only(self, l: List[List]):
return [[e['product_sku'] for e in s] for s in l]
class SpotifySessionRecList(RecList):
@rec_test(test_type='basic_stats')
def basic_stats(self):
"""
Basic statistics on training, test and prediction data for Next Event Prediction
"""
from reclist.metrics.standard_metrics import statistics
return statistics(self._x_train,
self._y_train,
self._x_test,
self._y_test,
self._y_preds)
@rec_test(test_type='HR@10')
def hit_rate_at_k(self):
"""
Compute the rate at which the top-k predictions contain the item to be predicted
"""
from reclist.metrics.standard_metrics import hit_rate_at_k
return hit_rate_at_k(self.uri_only(self._y_preds),
self.uri_only(self._y_test),
k=10)
@rec_test(test_type='perturbation_test')
def perturbation_at_k(self):
"""
Compute average consistency in model predictions when inputs are perturbed
"""
from reclist.metrics.perturbation import session_perturbation_test
from collections import defaultdict
from functools import partial
# Step 1: Generate a map from artist uri to track uri
substitute_mapping = defaultdict(list)
for track_uri, row in self.product_data.items():
substitute_mapping[row['artist_uri']].append(track_uri)
# Step 2: define a custom perturbation function
def perturb(session, sub_map):
last_item = session[-1]
last_item_artist = self.product_data[last_item['track_uri']]['artist_uri']
substitutes = set(sub_map.get(last_item_artist,[])) - {last_item['track_uri']}
if substitutes:
similar_item = random.sample(substitutes, k=1)
new_session = session[:-1] + [{"track_uri": similar_item[0]}]
return new_session
return []
# Step 3: call test
return session_perturbation_test(self.rec_model,
self._x_test,
self._y_preds,
partial(perturb, sub_map=substitute_mapping),
self.uri_only,
k=10)
@rec_test(test_type='shuffle_session')
def perturbation_shuffle_at_k(self):
"""
Compute average consistency in model predictions when inputs are re-ordered
"""
from reclist.metrics.perturbation import session_perturbation_test
# Step 1: define a custom perturbation function
def perturb(session):
return random.sample(session, len(session))
# Step 2: call test
return session_perturbation_test(self.rec_model,
self._x_test,
self._y_preds,
perturb,
self.uri_only,
k=10)
@rec_test(test_type='hits_distribution_by_slice')
def hits_distribution_by_slice(self):
"""
Compute the distribution of hit-rate across various slices of data
"""
from reclist.metrics.hits import hits_distribution_by_slice
len_map = collections.defaultdict(list)
for idx, playlist in enumerate(self._x_test):
len_map[len(playlist)].append(idx)
slices = collections.defaultdict(list)
bins = [(x * 5, (x + 1) * 5) for x in range(max(len_map) // 5 + 1)]
for bin_min, bin_max in bins:
for i in range(bin_min + 1, bin_max + 1, 1):
slices[f'({bin_min}, {bin_max}]'].extend(len_map[i])
del len_map[i]
assert len(len_map) == 0
return hits_distribution_by_slice(slices,
self.uri_only(self._y_test),
self.uri_only(self._y_preds),
debug=True)
@rec_test(test_type='Coverage@10')
def coverage_at_k(self):
"""
Coverage is the proportion of all possible products which the RS
recommends based on a set of sessions
"""
from reclist.metrics.standard_metrics import coverage_at_k
return coverage_at_k(self.uri_only(self._y_preds),
self.product_data,
# this contains all the track URIs from train and test sets
k=10)
@rec_test(test_type='Popularity@10')
def popularity_bias_at_k(self):
"""
Compute average frequency of occurrence across recommended items in training data
"""
from reclist.metrics.standard_metrics import popularity_bias_at_k
return popularity_bias_at_k(self.uri_only(self._y_preds),
self.uri_only(self._x_train),
k=10)
@rec_test(test_type='MRR@10')
def mrr_at_k(self):
"""
MRR calculates the mean reciprocal of the rank at which the first
relevant item was retrieved
"""
from reclist.metrics.standard_metrics import mrr_at_k
return mrr_at_k(self.uri_only(self._y_preds),
self.uri_only(self._y_test))
def uri_only(self, playlists: List[dict]):
return [[track['track_uri'] for track in playlist] for playlist in playlists]
class MovieLensSimilarItemRecList(RecList):
@rec_test(test_type="stats")
def basic_stats(self):
"""
Basic statistics on training, test and prediction data
"""
from reclist.metrics.standard_metrics import statistics
return statistics(
self._x_train,
self._y_train,
self._x_test,
self._y_test,
self._y_preds
)
@rec_test(test_type='HR@10')
def hit_rate_at_k(self):
"""
Compute the rate at which the top-k predictions contain the movie to be predicted
"""
from reclist.metrics.standard_metrics import hit_rate_at_k
return hit_rate_at_k(
self.movie_only(self._y_preds),
self.movie_only(self._y_test),
k=10
)
@rec_test(test_type='Coverage@10')
def coverage_at_k(self):
"""
Coverage is the proportion of all possible movies which the RS
recommends based on a set of movies and their respective ratings
"""
from reclist.metrics.standard_metrics import coverage_at_k
return coverage_at_k(
self.movie_only(self._y_preds),
self.product_data,
k=10
)
@rec_test(test_type='hits_distribution')
def hits_distribution(self):
"""
Compute the distribution of hit-rate across movie frequency in training data
"""
from reclist.metrics.hits import hits_distribution
return hits_distribution(
self.movie_only(self._x_train),
self.movie_only(self._x_test),
self.movie_only(self._y_test),
self.movie_only(self._y_preds),
k=10,
debug=True
)
@rec_test(test_type="hits_distribution_by_rating")
def hits_distribution_by_rating(self):
"""
Compute the distribution of hit-rate across movie ratings in testing data
"""
from reclist.metrics.hits import hits_distribution_by_rating
return hits_distribution_by_rating(
self._y_test,
self._y_preds,
debug=True
)
def movie_only(self, movies):
return [[x["movieId"] for x in y] for y in movies]
|
nilq/baby-python
|
python
|
"""
Method Resolution Order (MRO)
MRO é a ordem de execução dos métodos, ou seja quem será executado primeiro.
MRO tem 3 formas:
- Via propriedade da clase
- Via método MRO()
- Via help
Polimorfismo - Objetos que podem se comportar de diferentes formas
"""
class Animal:
def __init__(self, nome):
self.__nome = nome
def falar(self):
raise NotImplementedError('A classe filha precisa implementar este método')
def comer(self):
print(f'{self.__nome} esta comendo')
class Cachorro(Animal):
def __init__(self, nome):
super().__init__(nome)
def falar(self):
print(f'{self.__nome} fala wau wau')
class Gato(Animal):
def __init__(self, nome):
super().__init__(nome)
def falar(self):
print(f'{self.__nome} fala miau maiu')
class Formiga(Animal):
def __init__(self, nome):
super().__init__(nome)
def falar(self):
print(f'{self.__nome} fala algo')
|
nilq/baby-python
|
python
|
"""
[2014-11-19] Challenge #189 [Intermediate] Roman Numeral Conversion
https://www.reddit.com/r/dailyprogrammer/comments/2ms946/20141119_challenge_189_intermediate_roman_numeral/
Your friend is an anthropology major who is studying roman history. They have never been able to quite get a handle for
roman numerals and how to read them, so they've asked you to come up with a simple program that will let them input
some numbers and return roman numerals, as well as the opposite, to input roman numerals and return base-10 numbers.
They are bribing you with Indiana Jones memorabilia, so you are totally up for the challenge!
#Description
Most people learn about roman numerals at a young age. If you look at many analog clocks, you will find that many of
them actually use roman numerals for the numbers. Roman numerals do not just stop at 12 though, they actually can
represent numbers as high as 4999 using their most basic form.
The challenge, is to create a program that will allow you to convert decimal (base-10) numbers to roman numerals as
well as roman numerals to decimal numbers. The history of roman numerals is a bit debated because of their varied use
throughout history and a seeming lack of a standard definition. Some rules are well accepted and some less-so. Here are
the guidelines for your implementation:
| I | V | X | L | C | D | M |
|:---|:---|----:|:---|:------|:----|:---|
| 1 | 5 | 10 | 50 | 100 |500 |1000
#Rules
You cannot repeat the same roman numeral more than three times in a row, except for M, which can be added up to four
times. (Note: Some descriptions of roman numerals allows for IIII to represent 4 instead of IV. For the purposes of
this exercise, that is not allowed.)
When read from left to right, if successive roman numerals decrease or stay the same in value, you add them to the
total sum.
When read from left to right, if successive roman numerals increase in value, you subtract the smaller value from the
larger one and add the result to the total sum.
#Restrictions
I can only be subtracted from V or X
X can only be subtracted from L or C
C can only be subtracted from D or M
Only one smaller value can be subtracted from a following larger value. (e.g. 'IIX' would be an invalid way to
represent the number 8)
#Examples
XII = 10 + 1 + 1 = 12
MDCCLXXVI = 1000 + 500 + 100 + 100 + 50 + 10 + 10 + 5 + 1 = 1776
IX = "1 from 10" = 10 - 1 = 9
XCIV = "10 from 100" + "1 from 5" = (100 - 10) + (5 - 1) = 90 + 4 = 94
#Inputs & Outputs
Your program should be able to accept numbers in either integer or roman numeral format to return the other. You may
want to add validation checks on the input.
When converting to a roman numeral, the maximum number is 4999.
When converting from a roman numeral, I,V,X,L,C,D,M are the only valid characters.
You should be able to accept one or many numbers or numerals and convert to the other direction.
#Challenge
Some historical accounts state that roman numerals could actually go much higher than 4999. There are incredibly varied
explanations and syntactical requirements for them. Some state that an over-line (vinculum) would be used over a number
to multiply it by 1000, some say that you would put a curved line on either side of a number to multiply it by 1000.
For the challenge, see if you can add support to your code to allow parenthesis to encapsulate parts of a number that
can be multiplied by one thousand. You can nest parenthesis as well to allow for numbers that are incredibly large.
#Restriction
The last roman numeral digit inside a set of parenthesis can not be an "I". There are two reasons for this (1) because
historical accounts claimed that confusion would happen with the curved lines that encapsulate a number to be
multiplied by one thousand and (2) because the easiest way to validate your numbers is with Wolfram Alpha and they do
not allow it either.
#Examples
(V)M = 5*1000 + 1000 = 6000
(X)MMCCCXLV = 10*1000 + 1000 + 1000 + 100 + 100 + 100 + (50 - 10) + 5 = 10000 + 2000 + 300 + 40 + 5 = 12345
((XV)M)DCC = ((10 + 5) * 1000 + 1000) * 1000 + 500 + 100 + 100 = (15000 + 1000) * 1000 + 1700 = 16000000 + 1700 =
16001700
#Hints
You can visit Wolfram Alpha to validate some of your numbers if you are having any trouble.
http://www.wolframalpha.com/input/?i=314+in+roman+numerals
#Sample Data
##Basic
IV = 4
XXXIV = 34
CCLXVII = 267
DCCLXIV = 764
CMLXXXVII = 987
MCMLXXXIII = 1983
MMXIV = 2014
MMMM = 4000
MMMMCMXCIX = 4999
##Challenge
(V) = 5000
(V)CDLXXVIII = 5478
(V)M = 6000
(IX) = 9000
(X)M = 11000
(X)MM = 12000
(X)MMCCCXLV = 12345
(CCCX)MMMMCLIX = 314159
(DLXXV)MMMCCLXVII = 578267
(MMMCCXV)CDLXVIII = 3215468
(MMMMCCX)MMMMCDLXVIII = 4214468
(MMMMCCXV)CDLXVIII = 4215468
(MMMMCCXV)MMMCDLXVIII = 4218468
(MMMMCCXIX)CDLXVIII = 4219468
((XV)MDCCLXXV)MMCCXVI = 16777216
((CCCX)MMMMCLIX)CCLXV = 314159265
((MLXX)MMMDCCXL)MDCCCXXIV = 1073741824
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
Thanks to /u/pshatmsft for the submission!
"""
def main():
pass
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import DataClassJsonMixin
from .equipment_port_type_fragment import EquipmentPortTypeFragment, QUERY as EquipmentPortTypeFragmentQuery
QUERY: List[str] = EquipmentPortTypeFragmentQuery + ["""
query EquipmentPortTypeQuery($id: ID!) {
port_type: node(id: $id) {
... on EquipmentPortType {
...EquipmentPortTypeFragment
}
}
}
"""]
@dataclass
class EquipmentPortTypeQuery(DataClassJsonMixin):
@dataclass
class EquipmentPortTypeQueryData(DataClassJsonMixin):
@dataclass
class Node(EquipmentPortTypeFragment):
pass
port_type: Optional[Node]
data: EquipmentPortTypeQueryData
@classmethod
# fmt: off
def execute(cls, client: GraphqlClient, id: str) -> EquipmentPortTypeQueryData:
# fmt: off
variables = {"id": id}
response_text = client.call(''.join(set(QUERY)), variables=variables)
return cls.from_json(response_text).data
|
nilq/baby-python
|
python
|
# from .base import Base
#
# def initDB(engine):
# metadata = Base.metadata
# metadata.create_all(engine)
# print ('Database structure created')
#
#
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import time
import json
import requests
import argparse
import lxml.html
import io
from lxml.cssselect import CSSSelector
YOUTUBE_COMMENTS_URL = 'https://www.youtube.com/all_comments?v={youtube_id}'
YOUTUBE_COMMENTS_AJAX_URL = 'https://www.youtube.com/comment_ajax'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'
def find_value(html, key, num_chars=2):
pos_begin = html.find(key) + len(key) + num_chars
pos_end = html.find('"', pos_begin)
return html[pos_begin: pos_end]
def extract_comments(html):
tree = lxml.html.fromstring(html)
item_sel = CSSSelector('.comment-item')
text_sel = CSSSelector('.comment-text-content')
time_sel = CSSSelector('.time')
author_sel = CSSSelector('.user-name')
for item in item_sel(tree):
yield {'cid': item.get('data-cid'),
'text': text_sel(item)[0].text_content(),
'time': time_sel(item)[0].text_content().strip(),
'author': author_sel(item)[0].text_content()}
def extract_reply_cids(html):
tree = lxml.html.fromstring(html)
sel = CSSSelector('.comment-replies-header > .load-comments')
return [i.get('data-cid') for i in sel(tree)]
def ajax_request(session, url, params, data, retries=10, sleep=20):
for _ in range(retries):
response = session.post(url, params=params, data=data)
if response.status_code == 200:
response_dict = json.loads(response.text)
return response_dict.get('page_token', None), response_dict['html_content']
else:
time.sleep(sleep)
def download_comments(youtube_id, sleep=1):
session = requests.Session()
session.headers['User-Agent'] = USER_AGENT
# Get Youtube page with initial comments
response = session.get(YOUTUBE_COMMENTS_URL.format(youtube_id=youtube_id))
html = response.text
reply_cids = extract_reply_cids(html)
ret_cids = []
for comment in extract_comments(html):
ret_cids.append(comment['cid'])
yield comment
page_token = find_value(html, 'data-token')
session_token = find_value(html, 'XSRF_TOKEN', 4)
first_iteration = True
# Get remaining comments (the same as pressing the 'Show more' button)
while page_token:
data = {'video_id': youtube_id,
'session_token': session_token}
params = {'action_load_comments': 1,
'order_by_time': True,
'filter': youtube_id}
if first_iteration:
params['order_menu'] = True
else:
data['page_token'] = page_token
response = ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL, params, data)
if not response:
break
page_token, html = response
reply_cids += extract_reply_cids(html)
for comment in extract_comments(html):
if comment['cid'] not in ret_cids:
ret_cids.append(comment['cid'])
yield comment
first_iteration = False
time.sleep(sleep)
# Get replies (the same as pressing the 'View all X replies' link)
for cid in reply_cids:
data = {'comment_id': cid,
'video_id': youtube_id,
'can_reply': 1,
'session_token': session_token}
params = {'action_load_replies': 1,
'order_by_time': True,
'filter': youtube_id,
'tab': 'inbox'}
response = ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL, params, data)
if not response:
break
_, html = response
for comment in extract_comments(html):
if comment['cid'] not in ret_cids:
ret_cids.append(comment['cid'])
yield comment
time.sleep(sleep)
def main(argv):
parser = argparse.ArgumentParser(add_help=False, description=('Download Youtube comments without using the Youtube API'))
parser.add_argument('--help', '-h', action='help', default=argparse.SUPPRESS, help='Show this help message and exit')
parser.add_argument('--youtubeid', '-y', help='ID of Youtube video for which to download the comments')
parser.add_argument('--output', '-o', help='Output filename (output format is line delimited JSON)')
parser.add_argument('--limit', '-l', type=int, help='Limit the number of comments')
try:
args = parser.parse_args(argv)
youtube_id = args.youtubeid
output = args.output
limit = args.limit
if not youtube_id or not output:
parser.print_usage()
raise ValueError('you need to specify a Youtube ID and an output filename')
print('Downloading Youtube comments for video:', youtube_id)
count = 0
with io.open(output, 'w', encoding='utf8') as fp:
for comment in download_comments(youtube_id):
print(json.dumps(comment, ensure_ascii=False), file=fp)
count += 1
sys.stdout.write('Downloaded %d comment(s)\r' % count)
sys.stdout.flush()
if limit and count >= limit:
break
print('\nDone!')
except Exception as e:
print('Error:', str(e))
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
from django.db import models
# Create your models here.
from django.db import models
from blog.models import Artikel
from django.contrib.auth.models import User
class Comment(models.Model):
message = models.TextField()
artikel_creator_username = models.CharField(max_length=200, null=True, blank=True)
artikel_creator = models.ForeignKey(Artikel, on_delete=models.CASCADE, blank = True, null = True)
comment_creator = models.ForeignKey(User, on_delete=models.CASCADE, blank = True, null = True)
created_at = models.CharField(max_length=50,null=True, blank=True)
comment_creator_username = models.CharField(max_length=200, null=True, blank=True)
id_forum = models.CharField(max_length=200, null=True, blank=True)
id_user = models.CharField(max_length=200, null=True, blank=True)
|
nilq/baby-python
|
python
|
import json
from http import HTTPStatus
from unittest.mock import patch
from bridges.tests.api.basic_test import BasicTest
DUMMY_USER_FULL_NAME = 'John Doe'
DUMMY_USER_EMAIL = 'john.doe@company.com'
class GetWhoAmITest(BasicTest):
"""
Class to test whoami endpoint.
"""
@patch('bridges.api.endpoints.info.get_user_name_and_email_from_session',
return_value={'userFullName': DUMMY_USER_FULL_NAME, 'userEmail': DUMMY_USER_EMAIL})
def test_empty(self, _):
future = self.make_future_get_request("info/whoami")
http_response = future()
self.assertEqual(http_response.status_code, HTTPStatus.OK)
data = json.loads(http_response.get_data(as_text=True))
self.assertEquals(DUMMY_USER_FULL_NAME, data['userFullName'])
self.assertEquals(DUMMY_USER_EMAIL, data['userEmail'])
|
nilq/baby-python
|
python
|
# (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
#importing Magics module
from ???? import *
#Setting of the output file name
files = output({"output_formats":['ps', 'png'],
'output_name':'cloud_cover_asia'})
#Setting the coordinates of the geographical area
asia = mmap({ ???})
#Coastlines setting
coast = mcoast( {???})
#Import the cloud data
cloud_cover = mgrib({ "grib_input_file_name" : "cloud_cover.grb" })
colour_list= ['HSL(0,0,1)','HSL(29,0.14,0.92)',
'HSL(29,0.29,0.83)','HSL(29,0.43,0.75)','HSL(300,0.08,0.92)',
'HSL(360,0.16,0.84)','HSL(13,0.3,0.75)','HSL(18,0.44,0.67)',
'HSL(300,0.16,0.83)','HSL(340,0.22,0.75)','HSL(360,0.34,0.67)',
'HSL(8,0.47,0.58)','HSL(300,0.24,0.75)','HSL(330,0.28,0.67)',
'HSL(349,0.38,0.58)','HSL(360,0.5,0.5)','HSL(180,0.17,0.92)',
'HSL(120,0.08,0.84)','HSL(57,0.17,0.75)','HSL(44,0.3,0.67)',
'HSL(209,0.14,0.84)','HSL(187,0,0.75)','HSL(29,0.15,0.67)',
'HSL(29,0.29,0.59)','HSL(239,0.16,0.75)','HSL(299,0.08,0.67)',
'HSL(360,0.17,0.58)','HSL(13,0.3,0.5)','HSL(258,0.21,0.67)',
'HSL(299,0.16,0.59)','HSL(341,0.22,0.5)','HSL(360,0.33,0.42)',
'HSL(180,0.34,0.83)','HSL(161,0.22,0.75)','HSL(120,0.16,0.67)',
'HSL(78,0.21,0.58)','HSL(193,0.3,0.75)','HSL(180,0.17,0.67)',
'HSL(120,0.08,0.58)','HSL(59,0.16,0.5)','HSL(209,0.29,0.67)',
'HSL(209,0.15,0.58)','HSL(217,0,0.5)','HSL(29,0.14,0.42)',
'HSL(224,0.3,0.58)','HSL(237,0.17,0.5)','HSL(299,0.08,0.42)',
'HSL(360,0.16,0.33)','HSL(180,0.5, 0.75)','HSL(169,0.38,0.67)',
'HSL(150,0.28,0.58)','HSL(120,0.24,0.5)','HSL(188,0.47,0.67)',
'HSL(180,0.34,0.59)','HSL(160,0.22,0.5)','HSL(120,0.16,0.42)',
'HSL(198,0.44,0.58)','HSL(193,0.3,0.5)','HSL(180,0.17,0.42)',
'HSL(120,0.08,0.33)','HSL(209,0.43,0.5)','HSL(209,0.29,0.42)',
'HSL(209,0.14,0.33)','HSL(191,0,0.25)']
#Define the cloud cover
cloud_cover_contour = mcont({
????
'contour_shade_technique': 'cell_shading',
'contour_shade_colour_method': 'list',
'contour_shade_colour_list': colour_list,
})
texts = [ [colour_list[3], "Low"],
[colour_list[15], "L+M"],
[colour_list[12], "Medium"],
[colour_list[60], "M+H"],
[colour_list[48], "High"],
[colour_list[51], "H+L"]
]
line = " "
for t in texts:
line = line + "<font colour='" + t[0] + "'> " + t[1] + " </font>"
#Here we configure the title and add some colours
lines = ["Cloud cover valid for <grib_info key='valid-date'/>",
line
]
title = mtext({
"text_lines" : lines,
?????})
#To the plot
??????
|
nilq/baby-python
|
python
|
"""
This class is responsible for storing all the information about the current state of a chess game.
It will also be responsible for determining the valid moves at the current state.
It will also keep keep a move log.
"""
class GameState():
def __init__(self):
# board is an 8x8 2d list, each element of the list has 2 characters.
# The first character represents the color of the peice, 'b' or 'w'
# The second character represents the type of the piece, 'K', 'Q' 'R', 'B', 'N' or 'P'
# "--" - represents an empty space with no piece.
self.board = [
["bR", "bN", "bB", "bQ", "bK", "bB", "bN", "bR"],
["bp", "bp", "bp", "bp", "bp", "bp", "bp", "bp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["wp", "wp", "wp", "wp", "wp", "wp", "wp", "wp"],
["wR", "wN", "wB", "wQ", "wK", "wB", "wN", "wR"]]
self.moveFunctions = {'p': self.getPawnMoves, 'R': self.getRookMoves, 'N': self.getKnightMoves,
'B': self.getBishopMoves, 'Q': self.getQueenMoves, 'K': self.getKingMoves}
self.whiteToMove = True
self.moveLog = []
self.whiteKingLocation = (7, 4)
self.blackKingLocation = (0, 4)
self.checkMate = False # the king has no valid moves and is in check
self.staleMate = False # the king has no valid moves and is not in check
"""
Takes a Move as a parameter and executes it (this will not work for castling, pawn promotion and en-passant)
"""
def makeMove(self, move):
self.board[move.startRow][move.startCol] = "--" # leave square of moved piece blank
self.board[move.endRow][move.endCol] = move.pieceMoved
self.moveLog.append(move) # log the move so we can undo it later or display the history
self.whiteToMove = not self.whiteToMove #swap players
# update the king's location if moved
if move.pieceMoved == 'wK':
self.whiteKingLocation = (move.endRow, move.endCol)
elif move.pieceMoved == 'bK':
self.blackKingLocation = (move.endRow, move.endCol)
# pawn promotion
if move.isPawnPromotion:
self.board[move.endRow][move.endCol] = move.pieceMoved[0] + 'Q'
"""
Undo the last move made
"""
def undoMove(self):
if len(self.moveLog) != 0: # make sure that there is a move to undo
move = self.moveLog.pop()
self.board[move.startRow][move.startCol] = move.pieceMoved
self.board[move.endRow][move.endCol] = move.pieceCaptured
self.whiteToMove = not self.whiteToMove # switch turns back
# updates the kings board position
if move.pieceMoved == 'wK':
self.whiteKingLocation = (move.startRow, move.startCol)
elif move.pieceMoved == 'bK':
self.blackKingLocation = (move.startRow, move.startCol)
self.checkMate = False
self.checkMate = False
"""
All moves considering checks
"""
def getValidMoves(self):
# algorithm
# 1) generate all possible moves
moves = self.getAllPossibleMoves()
# 2) for eah move, make the move
for i in range(len(moves)-1, -1, -1):
self.makeMove(moves[i])
# # why we remove from a list backwards, avoid bugs with indecies
# nums = [0, 1, 2, 3, 4, 5]
# for num in nums:
# if num == 3:
# nums.remove(num)
# 3) generate all opponents moves
# 4) for each of your opponents moves, see if they attack your king
self.whiteToMove = not self.whiteToMove
if self.inCheck():
moves.remove(moves[i]) # 5) if they do attack your king, not a valid move
self.whiteToMove = not self.whiteToMove
self.undoMove()
if len(moves) == 0: # either checkmate or stalemate, there are 0 valid moves left
if self.inCheck():
self.checkMate = True
else:
self.staleMate = True
else:
self.checkMate = False
self.staleMate = False
return moves
"""
Determine if the current player is in check
"""
def inCheck(self):
if self.whiteToMove:
return self.squareUnderAttack(self.whiteKingLocation[0], self.whiteKingLocation[1])
else:
return self.squareUnderAttack(self.blackKingLocation[0], self.blackKingLocation[1])
"""
Determine if the enemy can attack the square r, c
"""
def squareUnderAttack(self, r, c):
self.whiteToMove = not self.whiteToMove # switch to opponent's turn
oppMoves = self.getAllPossibleMoves()
for move in oppMoves:
if move.endRow == r and move.endCol == c: # means that the kigns square is in attack
self.whiteToMove = not self.whiteToMove
return True
self.whiteToMove = not self.whiteToMove # switch turns back
return False
"""
All moves without considering checks
"""
def getAllPossibleMoves(self):
moves = []
for r in range(len(self.board)): # number of rows
for c in range(len(self.board[r])): # number of cols in given row
turn = self.board[r][c][0] # [r][c][gets one of 3 possible strings, "w, b, -"]
if (turn == 'w' and self.whiteToMove) or (turn == 'b' and not self.whiteToMove):
piece = self.board[r][c][1]
self.moveFunctions[piece](r, c, moves) # calls the appropiate move function on piece type
return moves
"""
Get all the pawn moves for the pawn located at row, col and add these moves to the list
"""
def getPawnMoves(self, r, c, moves):
if self.whiteToMove: # white pawn moves
if self.board[r-1][c] == "--": # 1 square pawn advance
moves.append(Move((r,c), (r-1,c), self.board))
if r == 6 and self.board[r-2][c] == "--": # 2 square pawn advance
moves.append(Move((r,c), (r-2,c), self.board))
if c-1 >= 0:
if self.board[r-1][c-1][0] == 'b': # there exists an enemy piece to capture to the left
moves.append(Move((r,c), (r-1,c-1), self.board))
if c+1 <= 7:
if self.board[r-1][c+1][0] == 'b': # there exists an enemy piece to capture to the right
moves.append(Move((r,c), (r-1,c+1), self.board))
else: # black pawn moves
if self.board[r+1][c] == "--": # 1 square pawn advance
moves.append(Move((r,c), (r+1,c), self.board))
if r == 1 and self.board[r+2][c] == "--": # 2 square pawn advance
moves.append(Move((r,c), (r+2,c), self.board))
if c-1 >= 0:
if self.board[r+1][c-1][0] == 'w': # there exists an enemy piece to capture to the left
moves.append(Move((r,c), (r+1,c-1), self.board))
if c+1 <= 7:
if self.board[r+1][c+1][0] == 'w': # there exists an enemy piece to capture to the right
moves.append(Move((r,c), (r+1,c+1), self.board))
"""
Get all the rook moves for the rook located at row, col and add these moves to the list
"""
def getRookMoves(self, r, c, moves): # either get to friendly, enemy, or empty moves
directions = ((-1,0), (0,-1), (1,0), (0,1)) # up, left, down, right
enemyColor = "b" if self.whiteToMove else "w"
for d in directions:
for i in range(1,8):
endRow = r + d[0] * i
endCol = c + d[1] * i
if 0 <= endRow < 8 and 0 <= endCol < 8: # on board
endPiece = self.board[endRow][endCol]
if endPiece == "--": # empty space valid
moves.append(Move((r,c), (endRow,endCol), self.board))
elif endPiece[0] == enemyColor: # enemy piece valid
moves.append(Move((r,c), (endRow, endCol), self.board))
break
else: # friendly piece invalid
break
else:
break
"""
Get all the bishop moves for the bishop located at row, col and add these moves to the list
"""
def getBishopMoves(self, r, c, moves):
directions = ((-1,-1),(-1,1),(1,-1),(1,1)) # TL, TR, BL, BR
enemyColor = "b" if self.whiteToMove else "w"
for d in directions:
for i in range(1,8): # max bishop moves = 8, the for loop gives us numbers 0-7
endRow = r + d[0] * i
endCol = c + d[1] * i
if 0 <= endRow < 8 and 0 <= endCol < 8: # on board
endPiece = self.board[endRow][endCol]
if endPiece == "--": # empty space valid
moves.append(Move((r,c), (endRow,endCol), self.board))
elif endPiece[0] == enemyColor: # enemy piece valid
moves.append(Move((r,c), (endRow, endCol), self.board))
break
else: # friendly piece invalid
break
else:
break
"""
Get all the queen moves for the queen located at row, col and add these moves to the list
"""
def getQueenMoves(self, r, c, moves):
self.getRookMoves(r, c, moves)
self.getBishopMoves(r, c, moves)
"""
Get all the knight moves for the knight located at row, col and add these moves to the list
"""
def getKnightMoves(self, r, c, moves):
knightMoves = ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1))
allyColor = "w" if self.whiteToMove else "b"
for i in range(8):
endRow = r + knightMoves[i][0]
endCol = c + knightMoves[i][1]
if 0 <= endRow < 8 and 0 <= endCol < 8:
endPiece = self.board[endRow][endCol]
if endPiece[0] != allyColor:
moves.append(Move((r,c), (endRow, endCol), self.board))
"""
Get all the king moves for the king located at row, col and add these moves to the list
"""
def getKingMoves(self, r, c, moves):
kingMoves = ((-1, -1),(-1, 0),(-1, 1),(0, -1),(0, 1),(1, -1),(1, 0),(1, 1))
allyColor = "w" if self.whiteToMove else "b"
for i in range(8):
endRow = r + kingMoves[i][0]
endCol = c + kingMoves[i][1]
if 0 <= endRow < 8 and 0 <= endCol < 8:
endPiece = self.board[endRow][endCol]
if endPiece[0] != allyColor:
moves.append(Move((r,c), (endRow, endCol), self.board))
class Move():
# maps keys to values
# key : value
ranksToRows = {"1": 7, "2": 6, "3": 5, "4": 4,
"5": 3, "6": 2, "7": 1, "8": 0}
rowToRanks = {v: k for k, v in ranksToRows.items()}
filesToCols = {"a": 0, "b": 1, "c": 2, "d": 3,
"e": 4, "f": 5, "g": 6, "h": 7}
colsToFiles = {v: k for k, v in filesToCols.items()}
def __init__(self, startSq, endSq, board):
self.startRow = startSq[0]
self.startCol = startSq[1]
self.endRow = endSq[0]
self.endCol = endSq[1]
self.pieceMoved = board[self.startRow][self.startCol]
self.pieceCaptured = board[self.endRow][self.endCol]
self.isPawnPromotion = False
if (self.pieceMoved == 'wp' and self.endRow == 0) or (self.pieceMoved == 'bp' and self.endRow == 7):
self.isPawnPromotion = True
self.moveID = self.startRow * 1000 + self.startCol * 100 + self.endRow * 10 + self.endCol
"""
Overriding the equals method
"""
def __eq__(self, other):
if isinstance(other, Move):
return self.moveID == other.moveID
return False
def getChessNotation(self):
# you can add to make this real chess notation
return self.getRankFile(self.startRow, self.startCol) + " -> " + self.getRankFile(self.endRow, self.endCol)
def getRankFile(self,r,c):
return self.colsToFiles[c] + self.rowToRanks[r]
|
nilq/baby-python
|
python
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Power flow data for IEEE 118 bus test case.
"""
from numpy import array
def case118():
"""Power flow data for IEEE 118 bus test case.
Please see L{caseformat} for details on the case file format.
This data was converted from IEEE Common Data Format
(ieee118cdf.txt) on 20-Sep-2004 by cdf2matp, rev. 1.11
See end of file for warnings generated during conversion.
Converted from IEEE CDF file from:
U{http://www.ee.washington.edu/research/pstca/}
With baseKV data take from the PSAP format file from the same site,
added manually on 10-Mar-2006.
08/25/93 UW ARCHIVE 100.0 1961 W IEEE 118 Bus Test Case
@return: Power flow data for IEEE 118 bus test case.
"""
ppc = {"version": '2'}
##----- Power Flow Data -----##
## system MVA base
ppc["baseMVA"] = 100.0
## bus data
# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
ppc["bus"] = array([
[1, 2, 51, 27, 0, 0, 1, 0.955, 10.67, 138, 1, 1.06, 0.94],
[2, 1, 20, 9, 0, 0, 1, 0.971, 11.22, 138, 1, 1.06, 0.94],
[3, 1, 39, 10, 0, 0, 1, 0.968, 11.56, 138, 1, 1.06, 0.94],
[4, 2, 39, 12, 0, 0, 1, 0.998, 15.28, 138, 1, 1.06, 0.94],
[5, 1, 0, 0, 0, -40, 1, 1.002, 15.73, 138, 1, 1.06, 0.94],
[6, 2, 52, 22, 0, 0, 1, 0.99, 13, 138, 1, 1.06, 0.94],
[7, 1, 19, 2, 0, 0, 1, 0.989, 12.56, 138, 1, 1.06, 0.94],
[8, 2, 28, 0, 0, 0, 1, 1.015, 20.77, 345, 1, 1.06, 0.94],
[9, 1, 0, 0, 0, 0, 1, 1.043, 28.02, 345, 1, 1.06, 0.94],
[10, 2, 0, 0, 0, 0, 1, 1.05, 35.61, 345, 1, 1.06, 0.94],
[11, 1, 70, 23, 0, 0, 1, 0.985, 12.72, 138, 1, 1.06, 0.94],
[12, 2, 47, 10, 0, 0, 1, 0.99, 12.2, 138, 1, 1.06, 0.94],
[13, 1, 34, 16, 0, 0, 1, 0.968, 11.35, 138, 1, 1.06, 0.94],
[14, 1, 14, 1, 0, 0, 1, 0.984, 11.5, 138, 1, 1.06, 0.94],
[15, 2, 90, 30, 0, 0, 1, 0.97, 11.23, 138, 1, 1.06, 0.94],
[16, 1, 25, 10, 0, 0, 1, 0.984, 11.91, 138, 1, 1.06, 0.94],
[17, 1, 11, 3, 0, 0, 1, 0.995, 13.74, 138, 1, 1.06, 0.94],
[18, 2, 60, 34, 0, 0, 1, 0.973, 11.53, 138, 1, 1.06, 0.94],
[19, 2, 45, 25, 0, 0, 1, 0.963, 11.05, 138, 1, 1.06, 0.94],
[20, 1, 18, 3, 0, 0, 1, 0.958, 11.93, 138, 1, 1.06, 0.94],
[21, 1, 14, 8, 0, 0, 1, 0.959, 13.52, 138, 1, 1.06, 0.94],
[22, 1, 10, 5, 0, 0, 1, 0.97, 16.08, 138, 1, 1.06, 0.94],
[23, 1, 7, 3, 0, 0, 1, 1, 21, 138, 1, 1.06, 0.94],
[24, 2, 13, 0, 0, 0, 1, 0.992, 20.89, 138, 1, 1.06, 0.94],
[25, 2, 0, 0, 0, 0, 1, 1.05, 27.93, 138, 1, 1.06, 0.94],
[26, 2, 0, 0, 0, 0, 1, 1.015, 29.71, 345, 1, 1.06, 0.94],
[27, 2, 71, 13, 0, 0, 1, 0.968, 15.35, 138, 1, 1.06, 0.94],
[28, 1, 17, 7, 0, 0, 1, 0.962, 13.62, 138, 1, 1.06, 0.94],
[29, 1, 24, 4, 0, 0, 1, 0.963, 12.63, 138, 1, 1.06, 0.94],
[30, 1, 0, 0, 0, 0, 1, 0.968, 18.79, 345, 1, 1.06, 0.94],
[31, 2, 43, 27, 0, 0, 1, 0.967, 12.75, 138, 1, 1.06, 0.94],
[32, 2, 59, 23, 0, 0, 1, 0.964, 14.8, 138, 1, 1.06, 0.94],
[33, 1, 23, 9, 0, 0, 1, 0.972, 10.63, 138, 1, 1.06, 0.94],
[34, 2, 59, 26, 0, 14, 1, 0.986, 11.3, 138, 1, 1.06, 0.94],
[35, 1, 33, 9, 0, 0, 1, 0.981, 10.87, 138, 1, 1.06, 0.94],
[36, 2, 31, 17, 0, 0, 1, 0.98, 10.87, 138, 1, 1.06, 0.94],
[37, 1, 0, 0, 0, -25, 1, 0.992, 11.77, 138, 1, 1.06, 0.94],
[38, 1, 0, 0, 0, 0, 1, 0.962, 16.91, 345, 1, 1.06, 0.94],
[39, 1, 27, 11, 0, 0, 1, 0.97, 8.41, 138, 1, 1.06, 0.94],
[40, 2, 66, 23, 0, 0, 1, 0.97, 7.35, 138, 1, 1.06, 0.94],
[41, 1, 37, 10, 0, 0, 1, 0.967, 6.92, 138, 1, 1.06, 0.94],
[42, 2, 96, 23, 0, 0, 1, 0.985, 8.53, 138, 1, 1.06, 0.94],
[43, 1, 18, 7, 0, 0, 1, 0.978, 11.28, 138, 1, 1.06, 0.94],
[44, 1, 16, 8, 0, 10, 1, 0.985, 13.82, 138, 1, 1.06, 0.94],
[45, 1, 53, 22, 0, 10, 1, 0.987, 15.67, 138, 1, 1.06, 0.94],
[46, 2, 28, 10, 0, 10, 1, 1.005, 18.49, 138, 1, 1.06, 0.94],
[47, 1, 34, 0, 0, 0, 1, 1.017, 20.73, 138, 1, 1.06, 0.94],
[48, 1, 20, 11, 0, 15, 1, 1.021, 19.93, 138, 1, 1.06, 0.94],
[49, 2, 87, 30, 0, 0, 1, 1.025, 20.94, 138, 1, 1.06, 0.94],
[50, 1, 17, 4, 0, 0, 1, 1.001, 18.9, 138, 1, 1.06, 0.94],
[51, 1, 17, 8, 0, 0, 1, 0.967, 16.28, 138, 1, 1.06, 0.94],
[52, 1, 18, 5, 0, 0, 1, 0.957, 15.32, 138, 1, 1.06, 0.94],
[53, 1, 23, 11, 0, 0, 1, 0.946, 14.35, 138, 1, 1.06, 0.94],
[54, 2, 113, 32, 0, 0, 1, 0.955, 15.26, 138, 1, 1.06, 0.94],
[55, 2, 63, 22, 0, 0, 1, 0.952, 14.97, 138, 1, 1.06, 0.94],
[56, 2, 84, 18, 0, 0, 1, 0.954, 15.16, 138, 1, 1.06, 0.94],
[57, 1, 12, 3, 0, 0, 1, 0.971, 16.36, 138, 1, 1.06, 0.94],
[58, 1, 12, 3, 0, 0, 1, 0.959, 15.51, 138, 1, 1.06, 0.94],
[59, 2, 277, 113, 0, 0, 1, 0.985, 19.37, 138, 1, 1.06, 0.94],
[60, 1, 78, 3, 0, 0, 1, 0.993, 23.15, 138, 1, 1.06, 0.94],
[61, 2, 0, 0, 0, 0, 1, 0.995, 24.04, 138, 1, 1.06, 0.94],
[62, 2, 77, 14, 0, 0, 1, 0.998, 23.43, 138, 1, 1.06, 0.94],
[63, 1, 0, 0, 0, 0, 1, 0.969, 22.75, 345, 1, 1.06, 0.94],
[64, 1, 0, 0, 0, 0, 1, 0.984, 24.52, 345, 1, 1.06, 0.94],
[65, 2, 0, 0, 0, 0, 1, 1.005, 27.65, 345, 1, 1.06, 0.94],
[66, 2, 39, 18, 0, 0, 1, 1.05, 27.48, 138, 1, 1.06, 0.94],
[67, 1, 28, 7, 0, 0, 1, 1.02, 24.84, 138, 1, 1.06, 0.94],
[68, 1, 0, 0, 0, 0, 1, 1.003, 27.55, 345, 1, 1.06, 0.94],
[69, 3, 0, 0, 0, 0, 1, 1.035, 30, 138, 1, 1.06, 0.94],
[70, 2, 66, 20, 0, 0, 1, 0.984, 22.58, 138, 1, 1.06, 0.94],
[71, 1, 0, 0, 0, 0, 1, 0.987, 22.15, 138, 1, 1.06, 0.94],
[72, 2, 12, 0, 0, 0, 1, 0.98, 20.98, 138, 1, 1.06, 0.94],
[73, 2, 6, 0, 0, 0, 1, 0.991, 21.94, 138, 1, 1.06, 0.94],
[74, 2, 68, 27, 0, 12, 1, 0.958, 21.64, 138, 1, 1.06, 0.94],
[75, 1, 47, 11, 0, 0, 1, 0.967, 22.91, 138, 1, 1.06, 0.94],
[76, 2, 68, 36, 0, 0, 1, 0.943, 21.77, 138, 1, 1.06, 0.94],
[77, 2, 61, 28, 0, 0, 1, 1.006, 26.72, 138, 1, 1.06, 0.94],
[78, 1, 71, 26, 0, 0, 1, 1.003, 26.42, 138, 1, 1.06, 0.94],
[79, 1, 39, 32, 0, 20, 1, 1.009, 26.72, 138, 1, 1.06, 0.94],
[80, 2, 130, 26, 0, 0, 1, 1.04, 28.96, 138, 1, 1.06, 0.94],
[81, 1, 0, 0, 0, 0, 1, 0.997, 28.1, 345, 1, 1.06, 0.94],
[82, 1, 54, 27, 0, 20, 1, 0.989, 27.24, 138, 1, 1.06, 0.94],
[83, 1, 20, 10, 0, 10, 1, 0.985, 28.42, 138, 1, 1.06, 0.94],
[84, 1, 11, 7, 0, 0, 1, 0.98, 30.95, 138, 1, 1.06, 0.94],
[85, 2, 24, 15, 0, 0, 1, 0.985, 32.51, 138, 1, 1.06, 0.94],
[86, 1, 21, 10, 0, 0, 1, 0.987, 31.14, 138, 1, 1.06, 0.94],
[87, 2, 0, 0, 0, 0, 1, 1.015, 31.4, 161, 1, 1.06, 0.94],
[88, 1, 48, 10, 0, 0, 1, 0.987, 35.64, 138, 1, 1.06, 0.94],
[89, 2, 0, 0, 0, 0, 1, 1.005, 39.69, 138, 1, 1.06, 0.94],
[90, 2, 163, 42, 0, 0, 1, 0.985, 33.29, 138, 1, 1.06, 0.94],
[91, 2, 10, 0, 0, 0, 1, 0.98, 33.31, 138, 1, 1.06, 0.94],
[92, 2, 65, 10, 0, 0, 1, 0.993, 33.8, 138, 1, 1.06, 0.94],
[93, 1, 12, 7, 0, 0, 1, 0.987, 30.79, 138, 1, 1.06, 0.94],
[94, 1, 30, 16, 0, 0, 1, 0.991, 28.64, 138, 1, 1.06, 0.94],
[95, 1, 42, 31, 0, 0, 1, 0.981, 27.67, 138, 1, 1.06, 0.94],
[96, 1, 38, 15, 0, 0, 1, 0.993, 27.51, 138, 1, 1.06, 0.94],
[97, 1, 15, 9, 0, 0, 1, 1.011, 27.88, 138, 1, 1.06, 0.94],
[98, 1, 34, 8, 0, 0, 1, 1.024, 27.4, 138, 1, 1.06, 0.94],
[99, 2, 42, 0, 0, 0, 1, 1.01, 27.04, 138, 1, 1.06, 0.94],
[100, 2, 37, 18, 0, 0, 1, 1.017, 28.03, 138, 1, 1.06, 0.94],
[101, 1, 22, 15, 0, 0, 1, 0.993, 29.61, 138, 1, 1.06, 0.94],
[102, 1, 5, 3, 0, 0, 1, 0.991, 32.3, 138, 1, 1.06, 0.94],
[103, 2, 23, 16, 0, 0, 1, 1.001, 24.44, 138, 1, 1.06, 0.94],
[104, 2, 38, 25, 0, 0, 1, 0.971, 21.69, 138, 1, 1.06, 0.94],
[105, 2, 31, 26, 0, 20, 1, 0.965, 20.57, 138, 1, 1.06, 0.94],
[106, 1, 43, 16, 0, 0, 1, 0.962, 20.32, 138, 1, 1.06, 0.94],
[107, 2, 50, 12, 0, 6, 1, 0.952, 17.53, 138, 1, 1.06, 0.94],
[108, 1, 2, 1, 0, 0, 1, 0.967, 19.38, 138, 1, 1.06, 0.94],
[109, 1, 8, 3, 0, 0, 1, 0.967, 18.93, 138, 1, 1.06, 0.94],
[110, 2, 39, 30, 0, 6, 1, 0.973, 18.09, 138, 1, 1.06, 0.94],
[111, 2, 0, 0, 0, 0, 1, 0.98, 19.74, 138, 1, 1.06, 0.94],
[112, 2, 68, 13, 0, 0, 1, 0.975, 14.99, 138, 1, 1.06, 0.94],
[113, 2, 6, 0, 0, 0, 1, 0.993, 13.74, 138, 1, 1.06, 0.94],
[114, 1, 8, 3, 0, 0, 1, 0.96, 14.46, 138, 1, 1.06, 0.94],
[115, 1, 22, 7, 0, 0, 1, 0.96, 14.46, 138, 1, 1.06, 0.94],
[116, 2, 184, 0, 0, 0, 1, 1.005, 27.12, 138, 1, 1.06, 0.94],
[117, 1, 20, 8, 0, 0, 1, 0.974, 10.67, 138, 1, 1.06, 0.94],
[118, 1, 33, 15, 0, 0, 1, 0.949, 21.92, 138, 1, 1.06, 0.94]
])
## generator data
# bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,
# Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf
ppc["gen"] = array([
[1, 0, 0, 15, -5, 0.955, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 0, 0, 300, -300, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[6, 0, 0, 50, -13, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[8, 0, 0, 300, -300, 1.015, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[10, 450, 0, 200, -147, 1.05, 100, 1, 550, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[12, 85, 0, 120, -35, 0.99, 100, 1, 185, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 0, 0, 30, -10, 0.97, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[18, 0, 0, 50, -16, 0.973, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[19, 0, 0, 24, -8, 0.962, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[24, 0, 0, 300, -300, 0.992, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[25, 220, 0, 140, -47, 1.05, 100, 1, 320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[26, 314, 0, 1000, -1000, 1.015, 100, 1, 414, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[27, 0, 0, 300, -300, 0.968, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[31, 7, 0, 300, -300, 0.967, 100, 1, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[32, 0, 0, 42, -14, 0.963, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[34, 0, 0, 24, -8, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[36, 0, 0, 24, -8, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[40, 0, 0, 300, -300, 0.97, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[42, 0, 0, 300, -300, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[46, 19, 0, 100, -100, 1.005, 100, 1, 119, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49, 204, 0, 210, -85, 1.025, 100, 1, 304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[54, 48, 0, 300, -300, 0.955, 100, 1, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[55, 0, 0, 23, -8, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[56, 0, 0, 15, -8, 0.954, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[59, 155, 0, 180, -60, 0.985, 100, 1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[61, 160, 0, 300, -100, 0.995, 100, 1, 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[62, 0, 0, 20, -20, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[65, 391, 0, 200, -67, 1.005, 100, 1, 491, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[66, 392, 0, 200, -67, 1.05, 100, 1, 492, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[69, 516.4, 0, 300, -300, 1.035, 100, 1, 805.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[70, 0, 0, 32, -10, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[72, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[73, 0, 0, 100, -100, 0.991, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[74, 0, 0, 9, -6, 0.958, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[76, 0, 0, 23, -8, 0.943, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[77, 0, 0, 70, -20, 1.006, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[80, 477, 0, 280, -165, 1.04, 100, 1, 577, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[85, 0, 0, 23, -8, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[87, 4, 0, 1000, -100, 1.015, 100, 1, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[89, 607, 0, 300, -210, 1.005, 100, 1, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[90, 0, 0, 300, -300, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[91, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[92, 0, 0, 9, -3, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[99, 0, 0, 100, -100, 1.01, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[100, 252, 0, 155, -50, 1.017, 100, 1, 352, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[103, 40, 0, 40, -15, 1.01, 100, 1, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[104, 0, 0, 23, -8, 0.971, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[105, 0, 0, 23, -8, 0.965, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[107, 0, 0, 200, -200, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[110, 0, 0, 23, -8, 0.973, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[111, 36, 0, 1000, -100, 0.98, 100, 1, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[112, 0, 0, 1000, -100, 0.975, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[113, 0, 0, 200, -100, 0.993, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[116, 0, 0, 1000, -1000, 1.005, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
ppc["branch"] = array([
[1, 2, 0.0303, 0.0999, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
[1, 3, 0.0129, 0.0424, 0.01082, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 5, 0.00176, 0.00798, 0.0021, 9900, 0, 0, 0, 0, 1, -360, 360],
[3, 5, 0.0241, 0.108, 0.0284, 9900, 0, 0, 0, 0, 1, -360, 360],
[5, 6, 0.0119, 0.054, 0.01426, 9900, 0, 0, 0, 0, 1, -360, 360],
[6, 7, 0.00459, 0.0208, 0.0055, 9900, 0, 0, 0, 0, 1, -360, 360],
[8, 9, 0.00244, 0.0305, 1.162, 9900, 0, 0, 0, 0, 1, -360, 360],
[8, 5, 0, 0.0267, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
[9, 10, 0.00258, 0.0322, 1.23, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 11, 0.0209, 0.0688, 0.01748, 9900, 0, 0, 0, 0, 1, -360, 360],
[5, 11, 0.0203, 0.0682, 0.01738, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 12, 0.00595, 0.0196, 0.00502, 9900, 0, 0, 0, 0, 1, -360, 360],
[2, 12, 0.0187, 0.0616, 0.01572, 9900, 0, 0, 0, 0, 1, -360, 360],
[3, 12, 0.0484, 0.16, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
[7, 12, 0.00862, 0.034, 0.00874, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 13, 0.02225, 0.0731, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 14, 0.0215, 0.0707, 0.01816, 9900, 0, 0, 0, 0, 1, -360, 360],
[13, 15, 0.0744, 0.2444, 0.06268, 9900, 0, 0, 0, 0, 1, -360, 360],
[14, 15, 0.0595, 0.195, 0.0502, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 16, 0.0212, 0.0834, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 17, 0.0132, 0.0437, 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360],
[16, 17, 0.0454, 0.1801, 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 18, 0.0123, 0.0505, 0.01298, 9900, 0, 0, 0, 0, 1, -360, 360],
[18, 19, 0.01119, 0.0493, 0.01142, 9900, 0, 0, 0, 0, 1, -360, 360],
[19, 20, 0.0252, 0.117, 0.0298, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 19, 0.012, 0.0394, 0.0101, 9900, 0, 0, 0, 0, 1, -360, 360],
[20, 21, 0.0183, 0.0849, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[21, 22, 0.0209, 0.097, 0.0246, 9900, 0, 0, 0, 0, 1, -360, 360],
[22, 23, 0.0342, 0.159, 0.0404, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 24, 0.0135, 0.0492, 0.0498, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 25, 0.0156, 0.08, 0.0864, 9900, 0, 0, 0, 0, 1, -360, 360],
[26, 25, 0, 0.0382, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[25, 27, 0.0318, 0.163, 0.1764, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 28, 0.01913, 0.0855, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[28, 29, 0.0237, 0.0943, 0.0238, 9900, 0, 0, 0, 0, 1, -360, 360],
[30, 17, 0, 0.0388, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[8, 30, 0.00431, 0.0504, 0.514, 9900, 0, 0, 0, 0, 1, -360, 360],
[26, 30, 0.00799, 0.086, 0.908, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 31, 0.0474, 0.1563, 0.0399, 9900, 0, 0, 0, 0, 1, -360, 360],
[29, 31, 0.0108, 0.0331, 0.0083, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 32, 0.0317, 0.1153, 0.1173, 9900, 0, 0, 0, 0, 1, -360, 360],
[31, 32, 0.0298, 0.0985, 0.0251, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 32, 0.0229, 0.0755, 0.01926, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 33, 0.038, 0.1244, 0.03194, 9900, 0, 0, 0, 0, 1, -360, 360],
[19, 34, 0.0752, 0.247, 0.0632, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 36, 0.00224, 0.0102, 0.00268, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 37, 0.011, 0.0497, 0.01318, 9900, 0, 0, 0, 0, 1, -360, 360],
[33, 37, 0.0415, 0.142, 0.0366, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 36, 0.00871, 0.0268, 0.00568, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 37, 0.00256, 0.0094, 0.00984, 9900, 0, 0, 0, 0, 1, -360, 360],
[38, 37, 0, 0.0375, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[37, 39, 0.0321, 0.106, 0.027, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 40, 0.0593, 0.168, 0.042, 9900, 0, 0, 0, 0, 1, -360, 360],
[30, 38, 0.00464, 0.054, 0.422, 9900, 0, 0, 0, 0, 1, -360, 360],
[39, 40, 0.0184, 0.0605, 0.01552, 9900, 0, 0, 0, 0, 1, -360, 360],
[40, 41, 0.0145, 0.0487, 0.01222, 9900, 0, 0, 0, 0, 1, -360, 360],
[40, 42, 0.0555, 0.183, 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360],
[41, 42, 0.041, 0.135, 0.0344, 9900, 0, 0, 0, 0, 1, -360, 360],
[43, 44, 0.0608, 0.2454, 0.06068, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 43, 0.0413, 0.1681, 0.04226, 9900, 0, 0, 0, 0, 1, -360, 360],
[44, 45, 0.0224, 0.0901, 0.0224, 9900, 0, 0, 0, 0, 1, -360, 360],
[45, 46, 0.04, 0.1356, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
[46, 47, 0.038, 0.127, 0.0316, 9900, 0, 0, 0, 0, 1, -360, 360],
[46, 48, 0.0601, 0.189, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 49, 0.0191, 0.0625, 0.01604, 9900, 0, 0, 0, 0, 1, -360, 360],
[42, 49, 0.0715, 0.323, 0.086, 9900, 0, 0, 0, 0, 1, -360, 360],
[42, 49, 0.0715, 0.323, 0.086, 9900, 0, 0, 0, 0, 1, -360, 360],
[45, 49, 0.0684, 0.186, 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360],
[48, 49, 0.0179, 0.0505, 0.01258, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 50, 0.0267, 0.0752, 0.01874, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 51, 0.0486, 0.137, 0.0342, 9900, 0, 0, 0, 0, 1, -360, 360],
[51, 52, 0.0203, 0.0588, 0.01396, 9900, 0, 0, 0, 0, 1, -360, 360],
[52, 53, 0.0405, 0.1635, 0.04058, 9900, 0, 0, 0, 0, 1, -360, 360],
[53, 54, 0.0263, 0.122, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 54, 0.073, 0.289, 0.0738, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 54, 0.0869, 0.291, 0.073, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 55, 0.0169, 0.0707, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 56, 0.00275, 0.00955, 0.00732, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 56, 0.00488, 0.0151, 0.00374, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 57, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
[50, 57, 0.0474, 0.134, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 58, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
[51, 58, 0.0255, 0.0719, 0.01788, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 59, 0.0503, 0.2293, 0.0598, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 59, 0.0825, 0.251, 0.0569, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 59, 0.0803, 0.239, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 59, 0.04739, 0.2158, 0.05646, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 60, 0.0317, 0.145, 0.0376, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 61, 0.0328, 0.15, 0.0388, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 61, 0.00264, 0.0135, 0.01456, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 62, 0.0123, 0.0561, 0.01468, 9900, 0, 0, 0, 0, 1, -360, 360],
[61, 62, 0.00824, 0.0376, 0.0098, 9900, 0, 0, 0, 0, 1, -360, 360],
[63, 59, 0, 0.0386, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[63, 64, 0.00172, 0.02, 0.216, 9900, 0, 0, 0, 0, 1, -360, 360],
[64, 61, 0, 0.0268, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
[38, 65, 0.00901, 0.0986, 1.046, 9900, 0, 0, 0, 0, 1, -360, 360],
[64, 65, 0.00269, 0.0302, 0.38, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 66, 0.0482, 0.218, 0.0578, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 67, 0.0258, 0.117, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
[65, 66, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[66, 67, 0.0224, 0.1015, 0.02682, 9900, 0, 0, 0, 0, 1, -360, 360],
[65, 68, 0.00138, 0.016, 0.638, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 69, 0.0844, 0.2778, 0.07092, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 69, 0.0985, 0.324, 0.0828, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 69, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[69, 70, 0.03, 0.127, 0.122, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 70, 0.00221, 0.4115, 0.10198, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 71, 0.00882, 0.0355, 0.00878, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 72, 0.0488, 0.196, 0.0488, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 72, 0.0446, 0.18, 0.04444, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 73, 0.00866, 0.0454, 0.01178, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 74, 0.0401, 0.1323, 0.03368, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 75, 0.0428, 0.141, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 75, 0.0405, 0.122, 0.124, 9900, 0, 0, 0, 0, 1, -360, 360],
[74, 75, 0.0123, 0.0406, 0.01034, 9900, 0, 0, 0, 0, 1, -360, 360],
[76, 77, 0.0444, 0.148, 0.0368, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 77, 0.0309, 0.101, 0.1038, 9900, 0, 0, 0, 0, 1, -360, 360],
[75, 77, 0.0601, 0.1999, 0.04978, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 78, 0.00376, 0.0124, 0.01264, 9900, 0, 0, 0, 0, 1, -360, 360],
[78, 79, 0.00546, 0.0244, 0.00648, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.017, 0.0485, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.0294, 0.105, 0.0228, 9900, 0, 0, 0, 0, 1, -360, 360],
[79, 80, 0.0156, 0.0704, 0.0187, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 81, 0.00175, 0.0202, 0.808, 9900, 0, 0, 0, 0, 1, -360, 360],
[81, 80, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[77, 82, 0.0298, 0.0853, 0.08174, 9900, 0, 0, 0, 0, 1, -360, 360],
[82, 83, 0.0112, 0.03665, 0.03796, 9900, 0, 0, 0, 0, 1, -360, 360],
[83, 84, 0.0625, 0.132, 0.0258, 9900, 0, 0, 0, 0, 1, -360, 360],
[83, 85, 0.043, 0.148, 0.0348, 9900, 0, 0, 0, 0, 1, -360, 360],
[84, 85, 0.0302, 0.0641, 0.01234, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 86, 0.035, 0.123, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[86, 87, 0.02828, 0.2074, 0.0445, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 88, 0.02, 0.102, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 89, 0.0239, 0.173, 0.047, 9900, 0, 0, 0, 0, 1, -360, 360],
[88, 89, 0.0139, 0.0712, 0.01934, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 90, 0.0518, 0.188, 0.0528, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 90, 0.0238, 0.0997, 0.106, 9900, 0, 0, 0, 0, 1, -360, 360],
[90, 91, 0.0254, 0.0836, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 92, 0.0099, 0.0505, 0.0548, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 92, 0.0393, 0.1581, 0.0414, 9900, 0, 0, 0, 0, 1, -360, 360],
[91, 92, 0.0387, 0.1272, 0.03268, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 93, 0.0258, 0.0848, 0.0218, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 94, 0.0481, 0.158, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
[93, 94, 0.0223, 0.0732, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 95, 0.0132, 0.0434, 0.0111, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 96, 0.0356, 0.182, 0.0494, 9900, 0, 0, 0, 0, 1, -360, 360],
[82, 96, 0.0162, 0.053, 0.0544, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 96, 0.0269, 0.0869, 0.023, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 97, 0.0183, 0.0934, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 98, 0.0238, 0.108, 0.0286, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 99, 0.0454, 0.206, 0.0546, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 100, 0.0648, 0.295, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 100, 0.0178, 0.058, 0.0604, 9900, 0, 0, 0, 0, 1, -360, 360],
[95, 96, 0.0171, 0.0547, 0.01474, 9900, 0, 0, 0, 0, 1, -360, 360],
[96, 97, 0.0173, 0.0885, 0.024, 9900, 0, 0, 0, 0, 1, -360, 360],
[98, 100, 0.0397, 0.179, 0.0476, 9900, 0, 0, 0, 0, 1, -360, 360],
[99, 100, 0.018, 0.0813, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 101, 0.0277, 0.1262, 0.0328, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 102, 0.0123, 0.0559, 0.01464, 9900, 0, 0, 0, 0, 1, -360, 360],
[101, 102, 0.0246, 0.112, 0.0294, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 103, 0.016, 0.0525, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 104, 0.0451, 0.204, 0.0541, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 104, 0.0466, 0.1584, 0.0407, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 105, 0.0535, 0.1625, 0.0408, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 106, 0.0605, 0.229, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
[104, 105, 0.00994, 0.0378, 0.00986, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 106, 0.014, 0.0547, 0.01434, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 108, 0.0261, 0.0703, 0.01844, 9900, 0, 0, 0, 0, 1, -360, 360],
[106, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[108, 109, 0.0105, 0.0288, 0.0076, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 110, 0.03906, 0.1813, 0.0461, 9900, 0, 0, 0, 0, 1, -360, 360],
[109, 110, 0.0278, 0.0762, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
[110, 111, 0.022, 0.0755, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360],
[110, 112, 0.0247, 0.064, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 113, 0.00913, 0.0301, 0.00768, 9900, 0, 0, 0, 0, 1, -360, 360],
[32, 113, 0.0615, 0.203, 0.0518, 9900, 0, 0, 0, 0, 1, -360, 360],
[32, 114, 0.0135, 0.0612, 0.01628, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 115, 0.0164, 0.0741, 0.01972, 9900, 0, 0, 0, 0, 1, -360, 360],
[114, 115, 0.0023, 0.0104, 0.00276, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 116, 0.00034, 0.00405, 0.164, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 117, 0.0329, 0.14, 0.0358, 9900, 0, 0, 0, 0, 1, -360, 360],
[75, 118, 0.0145, 0.0481, 0.01198, 9900, 0, 0, 0, 0, 1, -360, 360],
[76, 118, 0.0164, 0.0544, 0.01356, 9900, 0, 0, 0, 0, 1, -360, 360]
])
##----- OPF Data -----##
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
ppc["gencost"] = array([
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0222222, 20, 0],
[2, 0, 0, 3, 0.117647, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0454545, 20, 0],
[2, 0, 0, 3, 0.0318471, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 1.42857, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.526316, 20, 0],
[2, 0, 0, 3, 0.0490196, 20, 0],
[2, 0, 0, 3, 0.208333, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0645161, 20, 0],
[2, 0, 0, 3, 0.0625, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0255754, 20, 0],
[2, 0, 0, 3, 0.0255102, 20, 0],
[2, 0, 0, 3, 0.0193648, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0209644, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 2.5, 20, 0],
[2, 0, 0, 3, 0.0164745, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0396825, 20, 0],
[2, 0, 0, 3, 0.25, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.277778, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0]
])
return ppc
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Tests the tdb data store - in memory implementation."""
import shutil
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.data_stores import tdb_data_store
# pylint: mode=test
class TDBTestMixin(object):
def InitDatastore(self):
self.token = access_control.ACLToken(username="test",
reason="Running tests")
config_lib.CONFIG.Set("Datastore.location", "%s/tdb_test/" % self.temp_dir)
self.DestroyDatastore()
data_store.DB = tdb_data_store.TDBDataStore()
data_store.DB.security_manager = test_lib.MockSecurityManager()
def testCorrectDataStore(self):
self.assertTrue(isinstance(data_store.DB, tdb_data_store.TDBDataStore))
def DestroyDatastore(self):
try:
shutil.rmtree(config_lib.CONFIG.Get("Datastore.location"))
except (OSError, IOError):
pass
class TDBDataStoreTest(TDBTestMixin, data_store_test._DataStoreTest):
"""Test the tdb data store."""
class TDBDataStoreBenchmarks(TDBTestMixin,
data_store_test.DataStoreBenchmarks):
"""Benchmark the TDB data store abstraction."""
class TDBDataStoreCSVBenchmarks(TDBTestMixin,
data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the TDB data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
nilq/baby-python
|
python
|
from radiacode.bytes_buffer import BytesBuffer
from radiacode.radiacode import spectrum_channel_to_energy, RadiaCode
from radiacode.types import *
|
nilq/baby-python
|
python
|
class GameActuator:
filename = None
mode = None
|
nilq/baby-python
|
python
|
import codecs
import re
import sys
def warn(s):
sys.stderr.write(s)
sys.stderr.flush()
class CfgParserError(Exception):
pass
_name_pat = re.compile("[a-z][a-z0-9]*", re.UNICODE)
class CfgParser:
"""Important note: parser related methods and attributes are capitalized.
You can access (get and set) actual configuration values using
lower case letters.
@param ancestor: use this parameter to specify default config values on the same level. E.g. you can merge
two different config files by giving an ancestor. If the actual config file does not have a value
for a given key, then its ancestor will be queried.
"""
def __init__(self, ancestor=None):
self.Values = {}
self.Ancestor = ancestor
self.Fpaths = []
self.Fpath = None
self.Lineno = -1
def __str__(self):
return "CfgParser(%s)" % self.Fpaths
def ParseFile(self, fpath, encoding="UTF-8"):
"""Note: we use capital letters here so that we do not collide with keys."""
self.Fpath = fpath
self.Fpaths.append(fpath)
try:
fin = codecs.open(fpath, "r", encoding=encoding)
self.Lineno = 0
for line in fin:
self.Lineno += 1
if line.strip() and not line.strip().startswith("#"):
pos = line.strip().find("=")
if pos < 0:
raise CfgParserError("%s: invalid syntax at line %d" % (
fpath, self.Lineno))
key = line[:pos].strip()
value = line[pos + 1:].strip() # remove \n ???
if not key:
raise CfgParserError("%s: empty key at line %d" % (
fpath, self.Lineno))
names = key.split('.')
self.SetValue(names, value)
finally:
self.Lineno = -1
return self
def SetValue(self, names, value):
if isinstance(names, str):
self.SetValue(names.split("."), value)
else:
key = []
for name in names:
key.append(self.CheckName(name))
key = tuple(key)
self.Values[key] = self.CheckValue(value)
def GetValue(self, names):
if isinstance(names, str):
return self.GetValue(names.split("."))
else:
key = tuple(names)
if key in self.Values:
return self.Values[key]
elif self.Ancestor:
return self.Ancestor.GetValue(key)
else:
raise AttributeError("no such config key: %s" % ".".join(key))
def CheckName(self, name):
global _name_pat
if name == "value":
raise CfgParserError("%s: reserved key 'value' at line %d" % (
self.Fpath, self.Lineno))
if not _name_pat.match(name):
raise CfgParserError("%s: invalid key at line %d" % (
self.Fpath, self.Lineno))
return str(name)
def CheckValue(self, value):
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def __getattr__(self, key):
return CfgResolver(self, [key])
class CfgResolver:
"""Resolver allows attribute-style access."""
def __init__(self, cfgparser, namepath):
self._cfgparser = cfgparser
self._namepath = tuple(namepath)
def __getattr__(self, name):
if name == "value":
return self.GetValue()
else:
return CfgResolver(self._cfgparser, list(self._namepath) + [name])
def __setattr__(self, name, value):
if name in ["_cfgparser", "_namepath"]:
self.__dict__[name] = value
elif name == "value":
self.SetValue(value)
else:
raise AttributeError("Cannot set any attribute except 'value'.")
def GetValue(self):
return self._cfgparser.GetValue(self._namepath)
def SetValue(self, value):
self._cfgparser.SetValue(self._namepath, value)
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name='jkx',
version='1.0.4',
license='MIT',
author="Andrew Heaney",
author_email='heaneyandrew11@gmail.com',
long_description=long_description,
long_description_content_type='text/markdown',
entry_points={
'console_scripts': [
'jkx=jkx.main:start'
]
},
packages=['jkx'],
url='https://github.com/AndrewHeaney/json-key-explorer',
keywords='json',
install_requires=[
'inquirer',
],
)
|
nilq/baby-python
|
python
|
#Crie um algoritimo que leia um numero e mostre o seu dobro
# o seu triplo e a raiz quadrada
n = float(input('Digite um numero: '))
print('Seu dobro é {}'.format(n * 2))
print('Seu triplo é {}'.format(n * 3))
print('Sua raiz quadrada é: {}'.format(n**(1/2)))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
flask_babelplus.domain
~~~~~~~~~~~~~~~~~~~~~~
Localization domain.
:copyright: (c) 2013 by Armin Ronacher, Daniel Neuhäuser and contributors.
:license: BSD, see LICENSE for more details.
"""
import os
from babel import support
from .utils import get_state, get_locale
from .speaklater import LazyString
class Domain(object):
"""Localization domain. By default it will look for tranlations in the
Flask application directory and "messages" domain - all message
catalogs should be called ``messages.mo``.
"""
def __init__(self, dirname=None, domain='messages'):
self.dirname = dirname
self.domain = domain
self.cache = dict()
def as_default(self):
"""Set this domain as the default one for the current request"""
get_state().domain = self
def get_translations_cache(self):
"""Returns a dictionary-like object for translation caching"""
return self.cache
def get_translations_path(self, app):
"""Returns the translations directory path. Override if you want
to implement custom behavior.
"""
return self.dirname or os.path.join(app.root_path, 'translations')
def get_translations(self):
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
state = get_state(silent=True)
if state is None:
return support.NullTranslations()
locale = get_locale()
cache = self.get_translations_cache()
translations = cache.get(str(locale))
if translations is None:
dirname = self.get_translations_path(state.app)
translations = support.Translations.load(
dirname,
locale,
domain=self.domain
)
self.cache[str(locale)] = translations
return translations
def gettext(self, string, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
::
gettext(u'Hello World!')
gettext(u'Hello %(name)s!', name='World')
"""
t = self.get_translations()
if variables:
return t.ugettext(string) % variables
return t.ugettext(string)
def ngettext(self, singular, plural, num, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
The `num` parameter is used to dispatch between singular and various
plural forms of the message. It is available in the format string
as ``%(num)d`` or ``%(num)s``. The source language should be
English or a similar language which only has one plural form.
::
ngettext(u'%(num)d Apple', u'%(num)d Apples', num=len(apples))
"""
variables.setdefault('num', num)
t = self.get_translations()
return t.ungettext(singular, plural, num) % variables
def pgettext(self, context, string, **variables):
"""Like :func:`gettext` but with a context.
Gettext uses the ``msgctxt`` notation to distinguish different
contexts for the same ``msgid``
For example::
pgettext(u'Button label', 'Log in')
Learn more about contexts here:
https://www.gnu.org/software/gettext/manual/html_node/Contexts.html
.. versionadded:: 0.7
"""
t = self.get_translations()
if variables:
return t.upgettext(context, string) % variables
return t.upgettext(context, string)
def npgettext(self, context, singular, plural, num, **variables):
"""Like :func:`ngettext` but with a context.
.. versionadded:: 0.7
"""
variables.setdefault('num', num)
t = self.get_translations()
return t.unpgettext(context, singular, plural, num) % variables
def lazy_gettext(self, string, **variables):
"""Like :func:`gettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
Example::
hello = lazy_gettext(u'Hello World')
@app.route('/')
def index():
return unicode(hello)
"""
return LazyString(self.gettext, string, **variables)
def lazy_ngettext(self, singular, plural, num, **variables):
"""Like :func:`ngettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
Example::
apples = lazy_ngettext(u'%(num)d Apple', u'%(num)d Apples', num=len(apples))
@app.route('/')
def index():
return unicode(apples)
"""
return LazyString(self.ngettext, singular, plural, num, **variables)
def lazy_pgettext(self, context, string, **variables):
"""Like :func:`pgettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
.. versionadded:: 0.7
"""
return LazyString(self.pgettext, context, string, **variables)
# This is the domain that will be used if there is no request context
# and thus no app.
# It will also use this domain if the app isn't initialized for babel.
# Note that if there is no request context, then the standard
# Domain will use NullTranslations.
domain = Domain()
def get_domain():
"""Return the correct translation domain that is used for this request.
This will return the default domain
e.g. "messages" in <approot>/translations" if none is set for this
request.
"""
state = get_state(silent=True)
if state is None:
return domain
return state.domain
# Create shortcuts for the default Flask domain
def gettext(*args, **kwargs):
return get_domain().gettext(*args, **kwargs)
_ = gettext # noqa
def ngettext(*args, **kwargs):
return get_domain().ngettext(*args, **kwargs)
def pgettext(*args, **kwargs):
return get_domain().pgettext(*args, **kwargs)
def npgettext(*args, **kwargs):
return get_domain().npgettext(*args, **kwargs)
def lazy_gettext(*args, **kwargs):
return LazyString(gettext, *args, **kwargs)
def lazy_ngettext(*args, **kwargs):
return LazyString(ngettext, *args, **kwargs)
def lazy_pgettext(*args, **kwargs):
return LazyString(pgettext, *args, **kwargs)
|
nilq/baby-python
|
python
|
from __future__ import print_function
from botocore.exceptions import ClientError
import json
import datetime
import boto3
import os
def handler(event, context):
print("log -- Event: %s " % json.dumps(event))
response = "Error auto-remediating the finding."
try:
# Set Clients
ec2 = boto3.client('ec2')
# Current Time
time = datetime.datetime.utcnow().isoformat()
# Send Response Email
response = "GuardDuty Remediation"
sns = boto3.client('sns')
sns.publish(
TopicArn='guardduty_response',
Message=response
)
except ClientError as e:
print(e)
print("log -- Response: %s " % response)
return response
|
nilq/baby-python
|
python
|
import pytest
from whatlies.language import BytePairLang
@pytest.fixture()
def lang():
return BytePairLang("en", vs=1000, dim=25, cache_dir="tests/cache")
def test_single_token_words(lang):
assert lang["red"].vector.shape == (25,)
assert len(lang[["red", "blue"]]) == 2
def test_similar_retreival(lang):
assert len(lang.score_similar("hi", 10)) == 10
assert len(lang.embset_similar("hi", 10)) == 10
@pytest.mark.parametrize("item", [2, 0.12341])
def test_raise_error(lang, item):
with pytest.raises(ValueError):
_ = lang[item]
|
nilq/baby-python
|
python
|
class Solution:
@staticmethod
def addBinary(a: str, b: str) -> str:
length = max(len(a), len(b))
answer = ''
rem = 0
answer, rem = Solution.calculate(a.zfill(length), answer, b.zfill(length), length, rem)
if rem != 0:
answer = '1' + answer
return answer.zfill(length)
@staticmethod
def calculate(a, answer, b, length, rem):
for i in range(length - 1, -1, -1):
r = rem
r += 1 if a[i] == '1' else 0
r += 1 if b[i] == '1' else 0
answer = ('1' if r % 2 == 1 else '0') + answer
rem = 0 if r < 2 else 1
return answer, rem
def print_hi(name):
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
if __name__ == '__main__':
print_hi('PyCharm')
print(Solution.addBinary("1"))
|
nilq/baby-python
|
python
|
import sys
sys.path.append('/root/csdc3/src/sensors')
import unittest
import time
from sensor_manager import SensorManager
from sensor_constants import *
class Tests(unittest.TestCase):
def setUp(self):
pass
def test_ds1624(self):
ds1624 = [TEMP_PAYLOAD_A, TEMP_BAT_1]
for sensor in ds1624:
SensorManager.init_temp_sensor(sensor)
value = SensorManager.read_temp_sensor(sensor)
self.assertNotEqual(value, -1)
def test_ds18b20(self):
ds18b20 = [PANEL0, PANEL1]
for sensor in ds18b20:
value = SensorManager.get_panel_data(sensor)
self.assertNotEqual(value, -1)
def test_gpio(self):
for i in range(5):
SensorManager.gpio_output(PAYLOAD_HTR_A_GPIO, ON)
time.sleep(0.2)
retval = SensorManager.gpio_output(PAYLOAD_HTR_A_GPIO, OFF)
time.sleep(0.2)
self.assertEqual(True, retval)
def test_read_mag(self):
"""
SensorManager.init_magnetometer()
for i in range(5):
x, y, z = SensorManager.read_magnetometer()
print(x, y, z)
time.sleep(1)
self.assertNotEqual(-1, x)
self.assertNotEqual(-1, y)
self.assertNotEqual(-1, z)
"""
self.assertEqual(1, 1)
def test_read_power(self):
"""
SensorManager.init_power_sensor()
for i in range(5):
current, shunt, bus, power = read_power_sensor()
time.sleep(1)
SensorManager.stop_power_sensor()
"""
self.assertEqual(1, 1)
def test_power_init(self):
"""
SensorManager.mux_select(POWER_0)
SensorManager.init_power_sensor(POWER_0)
addr = SensorEntropy.addr(POWER_0)
adc_reg = SensorEntropy.reg(POWER_0)
bus = SensorManager.bus
calibration = bus.read_byte_data(addr, power_reg['REG_CALIBRATION'])
config = bus.read_byte_data(addr, power_reg['REG_CONFIG'])
self.assertEqual(calibration, 0x1000)
self.assertEqual(config, 0x00)
"""
pass
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
import sys
sys.dont_write_bytecode = True
import json
from PyQt5.QtWidgets import QApplication
from models.Authenticate import Dialog
from models.Base import MainWindow
if __name__ == "__main__":
try:
cfg_file = open("config.json","r")
config = json.loads(cfg_file.read())
ip = config['server_ip']
port = config['server_port']
except (IndexError, FileNotFoundError, json.decoder.JSONDecodeError):
cfg_file = open("config.json","w")
demo = {"server_ip" : "127.0.0.1", "server_port" : 65432}
cfg_file.write(json.dumps(demo))
ip = demo['server_ip']
port = demo['server_port']
app = QApplication(sys.argv)
app.aboutToQuit.connect(lambda : sys.exit())
main = MainWindow()
dial = Dialog(main,(ip,port))
dial.exec()
sys.exit(app.exec())
|
nilq/baby-python
|
python
|
from logging import getLogger
from threading import Thread
from time import sleep
from signalrc.ws_transport import WebSocketsTransport
logger = getLogger('signalr.client')
class SignalRClient:
def __init__(self, url, hub, session=None):
self.url = url
self._invokes_counter = -1
self.token = None
self.id = None
self.invokes_data = {}
self.received = EventHook()
self.error = EventHook()
self.starting = EventHook()
self.stopping = EventHook()
self.exception = EventHook()
self.is_open = False
self._transport = WebSocketsTransport(self.url, session)
self._message_listener = None
self.started = False
self.hub_name = hub
self.received.add_hooks(self.handle_hub_message, self.handle_error)
self._hub_handlers = {}
def handle_hub_message(self, data):
if 'R' in data and not isinstance(data['R'], bool):
if 'R' in self._hub_handlers:
self._hub_handlers['R'].trigger_hooks({'R': data['R']})
messages = data['M'] if 'M' in data and len(data['M']) > 0 else {}
for inner_data in messages:
method = inner_data['M']
if method in self._hub_handlers:
arguments = inner_data['A']
self._hub_handlers[method].trigger_hooks(*arguments)
def handle_error(self, data):
if 'E' in data:
invoke_index = int(data.get('I', -1))
self.error.trigger_hooks({'error': data['E'],
'call_arguments': self.invokes_data.get(invoke_index)})
def start(self):
logger.info('Starting connection')
self.starting.trigger_hooks()
negotiate_data = self._transport.negotiate(self.hub_name)
self.token = negotiate_data['ConnectionToken']
self.id = negotiate_data['ConnectionId']
self._transport.init_connection(self.token, self.hub_name)
self.is_open = True
self._message_listener = Thread(target=self.wrapped_listener)
self._message_listener.start()
self.started = True
def wrapped_listener(self):
while self.is_open:
try:
data = self._transport.receive()
self.received.trigger_hooks(data)
except Exception as error:
logger.exception('Failed to receive the data via transport')
try:
self.exception.trigger_hooks(error)
finally:
self.is_open = False
def close(self):
logger.info('Closing connection')
if self.is_open:
self.is_open = False
self._message_listener.join()
self._transport.close()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def run_while_open(self):
try:
while self.is_open:
sleep(0.01)
except KeyboardInterrupt:
self.close()
self.stopping.trigger_hooks()
raise
def invoke(self, method, *data):
self._invokes_counter += 1
self._transport.send({'H': self.hub_name, 'M': method, 'A': data,
'I': self._invokes_counter})
self.invokes_data[self._invokes_counter] = {'hub_name': self.hub_name, 'method': method,
'data': data}
def subscribe_to_event(self, event_id, handler):
if event_id not in self._hub_handlers:
self._hub_handlers[event_id] = EventHook()
self._hub_handlers[event_id].add_hooks(handler)
class EventHook:
def __init__(self):
self._handlers = []
def add_hooks(self, *handlers):
self._handlers.extend(handlers)
return self
def trigger_hooks(self, *args, **kwargs):
for handler in self._handlers:
handler(*args, **kwargs)
|
nilq/baby-python
|
python
|
import shutil
from fastapi import APIRouter, File, HTTPException, UploadFile
from models.migration_models import ChowdownURL
from services.migrations.chowdown import chowdown_migrate as chowdow_migrate
from services.migrations.nextcloud import migrate as nextcloud_migrate
from app_config import MIGRATION_DIR
from utils.snackbar import SnackResponse
router = APIRouter(tags=["Migration"])
# Chowdown
@router.post("/api/migration/chowdown/repo/")
def import_chowdown_recipes(repo: ChowdownURL):
""" Import Chowsdown Recipes from Repo URL """
try:
report = chowdow_migrate(repo.url)
return SnackResponse.success(
"Recipes Imported from Git Repo, see report for failures.",
additional_data=report,
)
except:
return HTTPException(
status_code=400,
detail=SnackResponse.error(
"Unable to Migrate Recipes. See Log for Details"
),
)
# Nextcloud
@router.get("/api/migration/nextcloud/available/")
def get_avaiable_nextcloud_imports():
""" Returns a list of avaiable directories that can be imported into Mealie """
available = []
for dir in MIGRATION_DIR.iterdir():
if dir.is_dir():
available.append(dir.stem)
elif dir.suffix == ".zip":
available.append(dir.name)
return available
@router.post("/api/migration/nextcloud/{selection}/import/")
def import_nextcloud_directory(selection: str):
""" Imports all the recipes in a given directory """
return nextcloud_migrate(selection)
@router.delete("/api/migration/{file_folder_name}/delete/")
def delete_migration_data(file_folder_name: str):
""" Removes migration data from the file system """
remove_path = MIGRATION_DIR.joinpath(file_folder_name)
if remove_path.is_file():
remove_path.unlink()
elif remove_path.is_dir():
shutil.rmtree(remove_path)
else:
SnackResponse.error("File/Folder not found.")
return SnackResponse.info(f"Migration Data Remove: {remove_path.absolute()}")
@router.post("/api/migration/upload/")
def upload_nextcloud_zipfile(archive: UploadFile = File(...)):
""" Upload a .zip File to later be imported into Mealie """
dest = MIGRATION_DIR.joinpath(archive.filename)
with dest.open("wb") as buffer:
shutil.copyfileobj(archive.file, buffer)
if dest.is_file:
return SnackResponse.success("Migration data uploaded")
else:
return SnackResponse.error("Failure uploading file")
|
nilq/baby-python
|
python
|
"""Top-level package for django-extra-field-validation."""
__author__ = """Tonye Jack"""
__email__ = "jtonye@ymail.com"
__version__ = "1.1.1"
from .field_validation import FieldValidationMixin
__all__ = ["FieldValidationMixin"]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import random
import rospy
from std_msgs.msg import UInt32
if __name__ == '__main__':
random.seed()
rospy.init_node('random')
pub = rospy.Publisher('rand_int', UInt32, queue_size = 1)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
pub.publish(random.randint(0, 1000))
rate.sleep()
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.5 on 2021-04-23 10:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('cse', '0011_delete_dev'),
]
operations = [
migrations.CreateModel(
name='semester',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sem', models.CharField(blank=True, max_length=200, null=True)),
('subjects', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200, null=True)),
('book', models.TextField(blank=True, null=True)),
('other', models.TextField(blank=True, null=True)),
],
),
]
|
nilq/baby-python
|
python
|
# Generated by Django 2.1 on 2018-08-18 02:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
('inventories', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='ItemInventory',
new_name='Inventory',
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
The view ports widget
@author: Chris Scott
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import logging
from PySide2 import QtWidgets
from . import rendererSubWindow
class ViewPortsWidget(QtWidgets.QWidget):
"""
Class for holding view ports (renderer windows)
"""
def __init__(self, parent=None):
super(ViewPortsWidget, self).__init__(parent)
self._logger = logging.getLogger(__name__)
self._viewPorts = []
self._layout = QtWidgets.QGridLayout(self)
self._mainWindow = parent
def numViewPortsChanged(self, num):
"""Add/remove view ports."""
currentNum = len(self._viewPorts)
if num == currentNum:
self._logger.debug("No change in number of view ports ({0})".format(num))
else:
if num > currentNum:
self._logger.debug("Adding more view ports ({0} was {1})".format(num, currentNum))
for i in range(currentNum, num):
row = i // 2
col = i % 2
self._logger.debug("Adding view port with index {0} ({1}, {2})".format(i, row, col))
rw = rendererSubWindow.RendererWindow(self._mainWindow, i, parent=self)
self._viewPorts.append(rw)
self._layout.addWidget(rw, row, col)
else:
self._logger.debug("Removing view ports ({0} was {1})".format(num, currentNum))
while len(self._viewPorts) > num:
rw = self._viewPorts.pop()
self._layout.removeWidget(rw)
rw.deleteLater()
# for rw in self._viewPorts:
# rw.outputDialog.imageTab.imageSequenceTab.refreshLinkedRenderers()
def getViewPorts(self):
"""Return the list of current view ports."""
return self._viewPorts
|
nilq/baby-python
|
python
|
from .views import SearchContact, markSpam, detailView
from django.urls import path
urlpatterns = [
path('Search/', SearchContact.as_view()),
path('mark/<int:id>', markSpam, ),
path('Detail/<int:id>', detailView),
]
|
nilq/baby-python
|
python
|
from . import data
from . import datasets
from . import layers
from . import losses
from . import metrics
from . import models
from . import optimizers
from . import utils
|
nilq/baby-python
|
python
|
"""!
@brief Unit-test runner for core wrapper.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
from pyclustering.tests.suite_holder import suite_holder
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.core.tests import ut_package as core_package_unit_tests
import os
import warnings
from pyclustering.core.definitions import PATH_PYCLUSTERING_CCORE_LIBRARY
from pyclustering.core.wrapper import ccore_library
class remove_library(object):
"""!
@brief Decorator for tests where ccore library should be removed.
"""
def __init__(self, call_object):
self.call_object = call_object
def __call__(self, *args):
test_result = True
try:
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY, PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted")
warnings.filterwarnings("ignore", category=ResourceWarning)
ccore_library.initialize()
self.call_object(args)
except os.error:
warnings.warn("Test skipped: no rights to rename C/C++ pyclustering library for testing.")
return
except:
test_result = False
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted", PATH_PYCLUSTERING_CCORE_LIBRARY)
ccore_library.initialize()
warnings.filterwarnings("default", category=ResourceWarning)
if test_result is False:
raise AssertionError("Test failed")
class corrupt_library(object):
"""!
@brief Decorator for tests where ccore library should be corrupted.
"""
def __init__(self, call_object):
self.call_object = call_object
def __create_corrupted_library(self, filepath):
with open(filepath, 'wb') as binary_file_descriptor:
binary_file_descriptor.write(bytes("corrupted binary library", 'UTF-8'))
def __remove_corrupted_library(self, filepath):
os.remove(filepath)
def __call__(self, *args):
try:
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY, PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted")
except os.error:
warnings.warn("Test skipped: no rights to rename C/C++ pyclustering library for testing.")
return
self.__create_corrupted_library(PATH_PYCLUSTERING_CCORE_LIBRARY)
warnings.filterwarnings("ignore", category=ResourceWarning)
ccore_library.initialize()
self.call_object(args)
self.__remove_corrupted_library(PATH_PYCLUSTERING_CCORE_LIBRARY)
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted", PATH_PYCLUSTERING_CCORE_LIBRARY)
ccore_library.initialize()
warnings.filterwarnings("default", category=ResourceWarning)
class core_tests(suite_holder):
def __init__(self):
super().__init__()
core_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(core_suite):
core_suite.addTests(unittest.TestLoader().loadTestsFromModule(core_package_unit_tests))
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#===============================================================================#
#title :MangaPark.py #
#description :contains the MangaPark class #
#author :August B. Sandoval (asandova) #
#date :2020-3-2 #
#version :0.3 #
#usage :defineds the MangaPark class #
#notes : #
#python_version :3.6.9 #
#===============================================================================#
from .Chapter import Chapter
from .TitleSource import TitleSource
from .Stream import Stream
from bs4 import BeautifulSoup
import requests, re, json, os
class MangaPark_Source(TitleSource):
Versions = {
"Duck" : 4,
4 : "Duck",
"Rock" : 6,
6 : "Rock",
"Fox" : 1,
1 : "Fox",
"Panda" : 3,
3 : "Panda"
}
def __init__(self):
TitleSource.__init__(self)
self.site_url = "https://www.mangapark.net"
self.site_domain = "https://www.mangapark.net"
def from_dict(self, dictionary):
self.site_url = dictionary["Site URL"]
self.site_domain = dictionary["Site Domain"]
self.manga_extention = dictionary["Manga Extention"]
self.Title = dictionary["Title"]
self.directory = self.Title.replace(' ', '_')
self.summary = dictionary["Summary"]
self.authors = dictionary["Author(s)"]
self.artists = dictionary["Artist(s)"]
self.genres = dictionary["Genre(s)"]
self.cover_location = dictionary["Cover Location"]
for s in dictionary["Manga Stream(s)"]:
stream = Stream()
stream.from_dict( s )
self.streams.append( stream )
def to_dict(self):
dic = {}
dic["Site URL"] = self.site_url
dic["Site Domain"] = self.site_domain
dic["Manga Extention"] = self.manga_extention
dic["Title"] = self.Title
dic["Summary"] = self.summary
dic["Author(s)"] = self.authors
dic["Artist(s)"] = self.artists
dic["Genre(s)"] = self.genres
dic["Cover Location"] = self.cover_location
dic["Manga Stream(s)"] = []
for s in self.streams:
dic["Manga Stream(s)"].append( s.to_dict() )
return dic
def Download_Manga(self, location="",keep=False):
save_location = self.save_location
if location != "":
save_location == location
for s in self.streams:
for c in s.chapters:
if keep == True:
if self.keep.get(s) == False:
self.keep[s.name] = []
self.keep[s.name].append(c.get_chapter_number)
else:
if self.keep[s.name].count(c.get_chapter_number) == 0:
self.keep[s.name].append(c.get_chapter_number)
#title = self.Title.replace(" ", '_')
stream_name = s.name.replace(' ', '_')
c.download_chapter( save_location +'/'+self.directory+'/'+ stream_name)
def Download_Manga_stream(self, stream_id, location="",Keep=False):
save_location = self.save_location
if location != "":
save_location == location
for s in self.streams:
if s.id == stream_id:
for c in s.chapters:
if Keep == True:
if self.keep.get(s) == False:
self.keep[s.name] = []
self.keep[s.name].append(c.get_chapter_number)
else:
if self.keep[s.name].count(c.get_chapter_number) == 0:
self.keep[s.name].append(c.get_chapter_number)
#title = self.Title.replace(" ", '_')
stream_name = self.streams[stream_id].name.replace(' ', '_')
c.download_chapter( save_location +'/'+self.directory+'/'+ stream_name)
return
def Download_Manga_Chapter(self, stream_id, chapter_number, location="", KillDownload=[False]):
save_location = self.save_location
if location != "":
save_location == location
for s in self.streams:
if s.id == stream_id:
for k in s.chapters.keys():
if s.chapters[k].get_chapter_number() == chapter_number:
#title = self.Title.replace(" ", '_')
stream = self.get_stream_with_id(stream_id)
stream_name = stream.name.replace(' ', '_')
code = s.chapters[k].download_chapter(save_location +'/'+self.directory+'/'+ stream_name,KillDownload)
return code
return -1
return -2
def _extract_cover(self):
cover_data = self.site_html.find('div', class_="w-100 cover")
if os.path.exists(self.save_location+'/'+self.directory) == False:
os.mkdir(self.save_location+'/'+self.directory)
cover_image_link = cover_data.img["src"]
cover = requests.get("https:"+ cover_image_link)
ext_loc = 0
for i in range(0,len(cover_image_link)):
if cover_image_link[i] == '.':
ext_loc = i
extention = cover_image_link[ext_loc:]
if cover.ok != True:
print("Failed to download cover")
return
self.cover_location = self.save_location+'/'+self.directory+"/cover"+extention
with open(self.cover_location, 'wb') as f:
f.write(cover.content)
f.close()
def _extract_title(self):
self.Title = self.site_html.find('div', class_="pb-1 mb-2 line-b-f hd").h2.a.text
self.directory = self.Title.replace(' ', '_')
def _extract_summary(self):
s = self.site_html.find('p', class_='summary').text
self.summary = s
def _extract_managa_info(self):
table = self.site_html.find('table', class_="attr")
Author_data = table.find('th', text="Author(s)").parent
Artist_data = table.find('th', text="Artist(s)").parent
Genre_data = table.find('th', text="Genre(s)").parent
for a in Author_data.find_all('a', target='_blank'):
self.authors.append( a.text )
for a in Artist_data.find_all('a', target="_blank"):
self.artists.append( a.text )
for g in Genre_data.find_all('a', target='_blank'):
if g.b != None:
self.genres.append(g.b.text)
else:
self.genres.append(g.text)
def _extract_streams(self):
stream_list = self.site_html.find('div', class_='book-list-1')
streams = stream_list.find_all('div', class_='mt-3 stream')
streams += stream_list.find_all('div', class_='mt-3 stream collapsed')
for s in streams:
stream_id_str = s['id'].split('_')
stream_id = int(stream_id_str[-1])
version_tag = "ml-1 stream-text-" + str(stream_id)
version_name = s.find('span', class_=version_tag).text
manga_stream = Stream(version_name, stream_id)
chapters = s.find_all('a', class_="ml-1 visited ch")
for c in chapters:
link = c.parent.parent
link = link.find('a', text="all")["href"]
number_str = c.text
number_str_elements = re.compile("[vV]ol(ume)*[.]*[ ]*[0-9]+[ ]").split(number_str)
#print(number_str_elements)
number_start = -1
number_end = -1
#print(number_str_elements[-1])
for num in range(0, len(number_str_elements[-1])):
if number_start == -1 and number_str_elements[-1][num].isnumeric():
number_start = num
elif number_start != -1 and number_str_elements[-1][num].isnumeric() == False:
if number_str_elements[-1][num+1].isnumeric() == True:
continue
else:
number_end = num
#print(number_end)
break
#print(number_str_elements)
#print(f"start Number: {number_start}\tend Number: {number_end}")
if number_end != -1:
number = float(number_str_elements[-1][number_start:number_end])
elif number_end == -1 and number_start == -1:
print("encountered non-numbered chapter")
continue
else:
number = float(number_str_elements[-1][number_start:])
number_str_elements = number_str_elements[-1].split(': ')
name = ""
if len( number_str_elements) > 1:
name = number_str_elements[-1]
else:
#if stream_id == 4:
#print(c.parent.parent.prettify())
Title_tag = c.parent.parent.find('div', class_="d-none d-md-flex align-items-center ml-0 ml-md-1 txt")
if Title_tag != None:
#print(Title_tag.text)
name = Title_tag.text
start = 0
for c in name:
if c.isalpha() == True:
break
start += 1
name = name[start:]
#print(name)
else:
name = ""
if len(name) > 0:
end = len(name)-1
for i in range( len(name)-1, -1,-1 ):
#print(name[i])
if name[i] != ' ':
end = i+1
break
name = name[0:end]
chap = Chapter(name, number)
chap.set_link( self.site_domain + link)
#print(f"adding chapter {chap.get_full_title()}")
manga_stream.add_chapter(chap)
#print("adding stream " + manga_stream.name)
self.add_stream(manga_stream)
print("extraction of streams: Complete")
def __str__(self):
s = "----------Manga Park----------\n"
s += "Title: " + self.Title + "\n"
s += "Author(s): "
for a in self.authors:
s += a + " | "
s += "\nArtist(s): "
for a in self.artists:
s += a + ' | '
s+= "\nGenre(s): "
for g in self.genres:
s += g + ' | '
s += "\nSummary: "+ self.summary + "\n"
for stream in self.streams:
s += str(stream) + "\n"
return s
"""
if __name__ == "__main__":
#test = MangaPark_Source()
test2 = MangaPark_Source()
test2.set_default_save_location('./Manga')
#test.request_manga("https://mangapark.net/manga/ryoumin-0-nin-start-no-henkyou-ryoushusama-fuurou")
test2.request_manga("https://mangapark.net/manga/tensei-shitara-ken-deshita")
test2.extract_manga()
with open('test.json', 'w') as f:
f.write( json.dumps( test2.to_dict(),indent=1 ) )
test2.Download_Manga_Chapter(stream_id=MangaPark_Source.Versions["Fox"],chapter_number=1 , location="./Manga")
"""
|
nilq/baby-python
|
python
|
__all__ = ['HttpCacheControlMixin']
class HttpCacheControlMixin:
http_cache_control_max_age = None
def get_http_cache_control_max_age(self):
return self.http_cache_control_max_age
def dispatch(self, *args, **kwargs):
response = super().dispatch(*args, **kwargs)
if response.status_code in [200, 304]:
max_age = self.get_http_cache_control_max_age()
if max_age:
response['Cache-Control'] = 'max-age=%s' % max_age
return response
|
nilq/baby-python
|
python
|
from __future__ import annotations
# python
import logging
import os
import random
import importlib
import json
import datetime
from halo_app.classes import AbsBaseClass
from halo_app.app.context import HaloContext, InitCtxFactory
from halo_app.infra.providers.providers import get_provider,ONPREM
from halo_app.app.response import HaloResponseFactory, AbsHaloResponse
from halo_app.entrypoints.client_type import ClientType
from halo_app.infra.providers.util import ProviderUtil
from .notification import Notification, ValidError
from .request import AbsHaloRequest
from .result import Result
from ..error import Error
from ..reflect import Reflect
from ..settingsx import settingsx
settings = settingsx()
logger = logging.getLogger(__name__)
def strx(str1):
"""
:param str1:
:return:
"""
if str1:
try:
return str1.encode('utf-8').strip()
except AttributeError as e:
return str(str1)
except Exception as e:
return str(str1)
return ''
class Util(AbsBaseClass):
@classmethod
def init_halo_context(cls,env:dict=None):
if settings.HALO_CONTEXT_CLASS:
context = Reflect.instantiate(settings.HALO_CONTEXT_CLASS,HaloContext,env)
else:
context = InitCtxFactory.get_initial_context(env)
return context
@classmethod
def get_client_type(cls)->ClientType:
if settings.HALO_CLIENT_CLASS:
client_type_ins = Reflect.instantiate(settings.HALO_CLIENT_CLASS,ClientType)
else:
client_type_ins = ClientType()
return client_type_ins
@classmethod
def get_response_factory(cls)->HaloResponseFactory:
if settings.HALO_RESPONSE_FACTORY_CLASS:
response_factory_ins = Reflect.instantiate(settings.HALO_RESPONSE_FACTORY_CLASS, HaloResponseFactory)
else:
response_factory_ins = HaloResponseFactory()
return response_factory_ins
@staticmethod
def create_result_response(halo_request:AbsHaloRequest, result:Result)->AbsHaloResponse:
# for result - OK or FAIL
response_factory = Util.get_response_factory()
success = result.success
if success:
data = result.payload
else:
data = result.error
return response_factory.get_halo_response(halo_request,success, data)
@staticmethod
def create_notification_response(halo_request:AbsHaloRequest, notification:Notification) -> AbsHaloResponse:
# for validation errors
response_factory = Util.get_response_factory()
success = not notification.hasErrors()
return response_factory.get_halo_response(halo_request, success, notification.errors)
@staticmethod
def create_payload_response(halo_request: AbsHaloRequest,data) -> AbsHaloResponse:
# for query result
response_factory = Util.get_response_factory()
return response_factory.get_halo_response(halo_request, True, data)
@staticmethod
def create_response(halo_request,success, data=None) -> AbsHaloResponse:
# for async command
response_factory = Util.get_response_factory()
return response_factory.get_halo_response(halo_request, success, data)
@staticmethod
def create_exception_response(halo_request: AbsHaloRequest, e: Exception) -> AbsHaloResponse:
# for exception
response_factory = Util.get_response_factory()
success = False
data = Error("exception thrown!",e)
return response_factory.get_halo_response(halo_request, success, data)
@classmethod
def get_timeout(cls, halo_context:HaloContext):
"""
:param request:
:return:
"""
if "timeout" in halo_context.keys():
timeout = halo_context.get("timeout")
if timeout:
return timeout
return settings.SERVICE_CONNECT_TIMEOUT_IN_SC
@classmethod
def get_halo_timeout1(cls, halo_request):
"""
:param request:
:return:
"""
if "timeout" in halo_request.context.keys():
timeout = halo_request.context.get("timeout")
if timeout:
return timeout
return settings.SERVICE_CONNECT_TIMEOUT_IN_SC
"""
env = {HaloContext.items[HaloContext.USER_AGENT]: x_user_agent,
HaloContext.items[HaloContext.REQUEST]: request_id,
HaloContext.items[HaloContext.CORRELATION]: x_correlation_id,
HaloContext.items[HaloContext.DEBUG_LOG]: dlog}
if api_key:
env[HaloContext.items[HaloContext.API_KEY]] = api_key
"""
@staticmethod
def get_func_name():
"""
:return:
"""
provider = get_provider()
if provider.PROVIDER_NAME != ONPREM:
return provider.get_func_name()
return settings.FUNC_NAME
@staticmethod
def get_func_ver():
"""
:return:
"""
provider = get_provider()
if provider.PROVIDER_NAME != ONPREM:
return provider.get_func_ver()
return settings.FUNC_VER
@classmethod
def get_system_debug_enabled(cls):
"""
:return:
"""
# check if env var for sampled debug logs is on and activate for percentage in settings (5%)
if ('DEBUG_LOG' in os.environ and os.environ['DEBUG_LOG'] == 'true') or (ProviderUtil.get_debug_param() == 'true'):
rand = random.random()
if settings.LOG_SAMPLE_RATE > rand:
return 'true'
return 'false'
@classmethod
def isDebugEnabled(cls, halo_context):
"""
:param req_context:
:param request:
:return:
"""
# disable debug logging by default, but allow override via env variables
# or if enabled via forwarded request context or if debug flag is on
if halo_context.get(
HaloContext.DEBUG_LOG) == 'true' or cls.get_system_debug_enabled() == 'true':
return True
return False
@staticmethod
def json_error_response(halo_context, clazz,err:Error): # code, msg, requestId):
"""
:param req_context:
:param clazz:
:param e:
:return:
"""
module = importlib.import_module(clazz)
my_class = getattr(module, 'ErrorMessages')
msgs = my_class()
e = err.cause
error_code, message = msgs.get_code(e)
error_detail = type(e)
e_msg = err.message
if hasattr(e, 'detail'):
error_detail = e.detail
elif hasattr(e, 'original_exception'):
error_detail = Util.get_detail(e.original_exception)
else:
if hasattr(e, 'message'):
e_msg = e.message
else:
e_msg = str(e)
if e_msg is not None and e_msg != 'None' and e_msg != "":
error_detail = e_msg
#@todo check when to use data
error_data = {}
if hasattr(e, 'view'):
error_data = json.dumps(e.data)
payload = {"error":
{"error_code": error_code, "error_message": message, "error_detail": error_detail,"timestamp": datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)"),
"view": error_data, "trace_id": halo_context.get(HaloContext.items[HaloContext.CORRELATION])}
}
if Util.isDebugEnabled(halo_context) and hasattr(e, 'stack'):
payload["stack"] = json.dumps(e.stack)
payload["context"] = json.dumps(halo_context.table)
return payload
@staticmethod
def json_exception_response(halo_context, clazz, e): # code, msg, requestId):
"""
:param req_context:
:param clazz:
:param e:
:return:
"""
module = importlib.import_module(clazz)
my_class = getattr(module, 'ErrorMessages')
msgs = my_class()
error_code, message = msgs.get_code(e)
error_detail = type(e)
e_msg = ""
if hasattr(e, 'detail'):
error_detail = e.detail
elif hasattr(e, 'original_exception'):
error_detail = Util.get_detail(e.original_exception)
else:
if hasattr(e, 'message'):
e_msg = e.message
else:
e_msg = str(e)
if e_msg is not None and e_msg != 'None' and e_msg != "":
error_detail = e_msg
#@todo check when to use data
error_data = {}
if hasattr(e, 'view'):
error_data = json.dumps(e.data)
payload = {"error":
{"error_code": error_code, "error_message": message, "error_detail": error_detail,"timestamp": datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)"),
"view": error_data, "trace_id": halo_context.get(HaloContext.items[HaloContext.CORRELATION])}
}
if Util.isDebugEnabled(halo_context) and hasattr(e, 'stack'):
payload["stack"] = json.dumps(e.stack)
payload["context"] = json.dumps(halo_context.table)
return payload
@staticmethod
def get_detail(e):
detail = None
if e.original_exception:
detail = Util.get_detail(e.original_exception)
if detail:
return str(e)+':'+detail
return str(e)
@staticmethod
def json_notification_response(halo_context, errors:[ValidError]): # code, msg, requestId):
"""
:param req_context:
:param clazz:
:param e:
:return:
"""
default_message = 'A Validation error occurred!'
#@todo set validation error code
error_code = "validation"
payload = {
"error_code": error_code,
"error_message": default_message,
"timestamp": datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)"),
"trace_id": halo_context.get(HaloContext.items[HaloContext.CORRELATION]),
"errors": [],
}
for error in errors:
payload['errors'].append({"name": error.name,"error": error.message})
if Util.isDebugEnabled(halo_context):
payload["context"] = json.dumps(halo_context.table)
return payload
|
nilq/baby-python
|
python
|
"""A helper rule for testing detect_root function."""
load("@rules_foreign_cc//tools/build_defs:detect_root.bzl", "detect_root")
def _impl(ctx):
detected_root = detect_root(ctx.attr.srcs)
out = ctx.actions.declare_file(ctx.attr.out)
ctx.actions.write(
output = out,
content = detected_root,
)
return [DefaultInfo(files = depset([out]))]
detect_root_test_rule = rule(
implementation = _impl,
attrs = {
"srcs": attr.label(mandatory = True),
"out": attr.string(mandatory = True),
},
)
|
nilq/baby-python
|
python
|
import sys
import json
from .kafka import Consumer
from .postgres import PGClient
from .model import URLStatus
if __name__ == '__main__':
try:
with PGClient() as pg_client, Consumer() as kafka_consumer:
# TODO: change to subscript
# TODO: try https://github.com/aio-libs/aiokafka
while True:
msg = kafka_consumer.consume()
if msg:
print(msg.decode('utf-8'))
url_status = URLStatus(**json.loads(msg.decode('utf-8')))
# print(url_status)
pg_client.insert(url_status)
except KeyboardInterrupt:
print('Ctrl+C to exit...')
sys.exit()
|
nilq/baby-python
|
python
|
import os
import neat
import pygame
from bird import Bird
from pipe import Pipe
from base import Base
from background import Background
class Game:
WIN_WIDTH = 500
WIN_HEIGHT = 800
def __init__(self):
self.isRunning = True
self.score = 0
self.birds = []
self.nets = []
self.ge = []
self.base = Base(730)
self.pipes = []
self.background = Background()
pygame.font.init()
self.font = pygame.font.Font(pygame.font.get_default_font(), 50)
self.win = pygame.display.set_mode((self.WIN_WIDTH, self.WIN_HEIGHT))
def draw_score(self):
text = self.font.render(f"Score: {self.score}", 1, (255, 255, 255))
self.win.blit(text, (self.WIN_WIDTH - 10 - text.get_width(), 10))
def draw_game(self):
self.background.draw(self.win)
for bird in self.birds:
bird.draw(self.win)
for pipe in self.pipes:
pipe.draw(self.win)
self.base.draw(self.win)
self.draw_score()
pygame.display.update()
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.isRunning = False
def kill_bird(self, bird_idx):
self.ge[bird_idx].fitness -= 1
self.birds.pop(bird_idx)
self.nets.pop(bird_idx)
def update_score(self):
self.score += 1
for bird_idx, bird in enumerate(self.birds):
self.ge[bird_idx].fitness += 5
def move_pipes(self):
pipes_to_remove = []
add_pipe = False
for pipe in self.pipes:
is_pipe_off_screen = pipe.x + pipe.PIPE_BOTTOM.get_width() < 0
for bird_idx, bird in enumerate(self.birds):
is_bird_pass_pipe = pipe.x + pipe.PIPE_BOTTOM.get_width() < bird.x
if pipe.collide(bird):
self.kill_bird(bird_idx)
if not pipe.passed and is_bird_pass_pipe:
pipe.passed = True
add_pipe = True
if is_pipe_off_screen:
pipes_to_remove.append(pipe)
pipe.move()
self.pipes = [
pipe for pipe in self.pipes if pipe not in pipes_to_remove]
if add_pipe:
self.update_score()
self.add_pipe()
def check_bird_hit_limits(self, bird_idx, bird):
is_bird_touching_ground = bird.y + bird.img.get_height() >= 730
is_bird_above_screen = bird.y + bird.img.get_height() < 0
if is_bird_touching_ground:
self.kill_bird(bird_idx)
if is_bird_above_screen:
self.kill_bird(bird_idx)
def move_birds(self):
for bird_idx, bird in enumerate(self.birds):
bird.move()
self.check_bird_hit_limits(bird_idx, bird)
def add_pipe(self):
self.pipes.append(Pipe(550))
def get_next_pipe(self):
for pipe in self.pipes:
if not pipe.passed:
return pipe
def command_birds(self):
pipe = self.get_next_pipe()
if not pipe:
return
for bird_idx, bird in enumerate(self.birds):
self.ge[bird_idx].fitness += 0.1
neat = self.nets[bird_idx]
dist_bird_pipe = {
"top_pipe": abs(bird.y - pipe.height),
"bottom_pipe": abs(bird.y - pipe.bottom)
}
output = neat.activate((bird.y, dist_bird_pipe["top_pipe"],
dist_bird_pipe["bottom_pipe"]))
if (output[0] > 0.5):
bird.jump()
def is_birds_alive(self):
return len(self.birds) > 0
def reset(self):
self.pipes = []
self.score = 0
self.add_pipe()
def gameloop(self, genomes, config):
for genome_id, genome in genomes:
net = neat.nn.FeedForwardNetwork.create(genome, config)
self.nets.append(net)
self.birds.append(Bird(230, 350))
genome.fitness = 0
self.ge.append(genome)
clock = pygame.time.Clock()
while self.isRunning and self.is_birds_alive():
clock.tick(30)
self.handle_events()
self.background.move()
self.move_birds()
self.command_birds()
self.base.move()
self.move_pipes()
self.draw_game()
self.reset()
# pygame.quit()
# quit()
def run(fitness, config_path):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
population = neat.Population(config)
population.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
population.add_reporter(stats)
winner = population.run(fitness, 50)
if __name__ == "__main__":
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'neat.config')
run(Game().gameloop, config_path)
|
nilq/baby-python
|
python
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("ANALYSIS")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:clustering.root')
)
process.pfClusterAnalyzer = cms.EDAnalyzer("PFClusterAnalyzer",
PFClusters = cms.InputTag("particleFlowClusterECAL"),
verbose = cms.untracked.bool(True),
printBlocks = cms.untracked.bool(False)
)
process.p = cms.Path(process.pfClusterAnalyzer)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# David Prihoda
# Calculate coverage of BGCs by a DataFrame of BGC Candidates
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import argparse
def get_single_contig_coverage(a_cands, b_cands):
"""
Get coverage of each BGC candidate in a_cands by BGC candidates in b_cands,
where all candidates come from the same contig.
:param a_cands: Reference DataFrame of BGC candidates (from a single contig)
:param b_cands: Compared DataFrame of BGC candidates (from a single contig)
:return: row of each BGC candidate in a_cands with 'coverage' column that defines
fractional coverage by overlapping BGC candidates in b_cands
"""
if b_cands is None:
remaining_cands = []
else:
remaining_cands = list(b_cands.reset_index(drop=True).iterrows())
# Create binary mask based on longest canidate length
max_len = int((a_cands['nucl_end'] - a_cands['nucl_start'] + 1).max())
mask = np.zeros(max_len)
# For each A candidate
coverages = []
for c, cand in a_cands.iterrows():
# For each suitable candidate from other model
cand_start = int(cand['nucl_start']) - 1
cand_end = int(cand['nucl_end'])
cand_len = cand_end - cand_start
#print('Cand {}: {}-{} (len {})'.format(c, cand_start, cand_end, cand_len))
any_exact = False
max_covered = 0
for i, other in remaining_cands:
other_start = int(other['nucl_start']) - 1
other_end = int(other['nucl_end'])
other_len = other_end - other_start
# No overlap anymore
if other_start > cand_end:
continue
# No overlap yet
if other_end < cand_start:
# Discard all previous candidates up to current one
continue
# Exact match
if other_start == cand_start and other_end == cand_end:
any_exact = True
# Start and end coordinates relative from cand_start
overlap_start = max(other_start, cand_start) - cand_start
overlap_end = min(other_end, cand_end) - cand_start
overlap_length = overlap_end - overlap_start
mask[overlap_start:overlap_end] = 1
max_covered = max(max_covered, overlap_length / other_len)
num_covered = sum(mask[:cand_len])
mask[:cand_len] = 0
#print('overlap {}/{} = {}'.format(num_covered, cand_len, num_covered / cand_len))
coverage = pd.Series(
[num_covered / cand_len, any_exact, max_covered],
['coverage', 'any_exact', 'max_covered']
).append(cand)
if 'model' in coverage:
del coverage['model']
coverages.append(coverage)
return coverages
def get_coverage(a_cands, b_cands):
"""
Get coverage of each BGC candidate in a_cands by BGC candidates in b_cands,
where each candidate can be found in a different contig as indicated by the 'contig_id' column.
:param a_cands: Reference DataFrame of BGC candidates
:param b_cands: Compared DataFrame of BGC candidates
:return: row of each BGC candidate in a_cands with 'coverage' column that defines
fractional coverage by overlapping BGC candidates in b_cands
"""
a_grouped = a_cands.groupby('contig_id')
b_grouped = b_cands.groupby('contig_id')
coverages = []
# Get coverage separately for all contigs
for contig_id in a_grouped.groups:
#print(contig_id)
a_contig_cands = a_grouped.get_group(contig_id)
b_contig_cands = b_grouped.get_group(contig_id) if contig_id in b_grouped.groups else None
coverages += get_single_contig_coverage(a_contig_cands, b_contig_cands)
return pd.DataFrame(coverages)
def plot_coverage_hist(coverage, title, label, **kwargs):
"""
Plot histogram of coverage by model
:param coverage: DataFrame with BGC candidates and their 'coverage' column and 'model' column
:param title: Plot title
:param label: Plot x-axis label
:param kwargs: Arguments to pass to the histogram plot function
"""
cols = len(coverage['model'].unique())
axes = coverage[['coverage', 'model']].hist(by='model', bins=25, figsize=(cols * 3, 2.7), layout=(1, cols),
sharey=True, **kwargs)
axes[0].set_ylabel('# BGCs')
plt.suptitle(title)
plt.tight_layout()
plt.subplots_adjust(top=0.77)
for ax in axes:
ax.set_xlim(0, 1)
ax.set_xlabel(label)
ax.set_xticklabels(['{:.0f}%'.format(x * 100) for x in ax.get_xticks()])
def plot_coverage_boxplot(coverage, title, label, **kwargs):
"""
Plot boxplot of coverage by model
:param coverage: DataFrame with BGC candidates and their 'coverage' column and 'model' column
:param title: Plot title
:param label: Plot x-axis label
:param kwargs: Arguments to pass to the boxplot function
"""
cols = len(coverage['model'].unique())
ax = coverage[['coverage', 'model']].boxplot(by='model', figsize=(cols * 0.7+1, 2.7), **kwargs)
plt.suptitle(title)
plt.tight_layout()
plt.xticks(rotation=90)
plt.subplots_adjust(top=0.80)
ax.set_ylabel(label)
ax.set_yticklabels(['{:.0f}%'.format(x * 100) for x in ax.get_yticks()])
if __name__ == "__main__":
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input", required=True,
help="Target model candidate csv file path.", metavar="FILE")
parser.add_argument("-o", "--output", dest="output", required=True,
help="Output file path.", metavar="FILE")
parser.add_argument(dest='candidates', nargs='+',
help="Paths to other models' candidate files.", metavar="FILE")
options = parser.parse_args()
target_cands = pd.read_csv(options.input)
other_cands: pd.DataFrame = pd.concat([pd.read_csv(path) for path in options.candidates])
coverage = get_coverage(target_cands, other_cands)
coverage.to_csv(options.output, index=False)
print('Saved {} candidates to: {}'.format(len(coverage), options.output))
|
nilq/baby-python
|
python
|
import tensorflow as tf
class Generator(object):
def __init__(self, n_node, node_emd_init, config):
self.n_node = n_node
self.node_emd_init = node_emd_init
self.motif_size = config.motif_size
self.max_value = config.max_value
with tf.compat.v1.variable_scope('generator'):
self.embedding_matrix = tf.compat.v1.get_variable(name="embedding",
shape=self.node_emd_init.shape,
initializer=tf.constant_initializer(self.node_emd_init),
trainable=True)
self.motifs = tf.compat.v1.placeholder(tf.int32, shape=[None, config.motif_size])
self.reward = tf.compat.v1.placeholder(tf.float32, shape=[None])
self.node_embedding = tf.nn.embedding_lookup(self.embedding_matrix, self.motifs) # Batch * motif_size * embedding_size
self.score = tf.reduce_sum(tf.reduce_prod(self.node_embedding, axis=1), axis=1)
self.p = 1 - tf.exp(-self.score)
self.p = tf.clip_by_value(self.p, 1e-5, 1)
self.loss = -tf.reduce_mean((self.p) * (self.reward))
optimizer = tf.optimizers.Adam(config.lr_gen)
print("here")
# model = tf.keras.Sequential()
# var_list_fn = lambda: model.trainable_weights
# print(type(var_list_fn))
self.g_updates = optimizer.minimize(self.loss)
self.clip_op = tf.assign(self.embedding_matrix, tf.clip_by_value(self.embedding_matrix, 0, self.max_value))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.conf import settings
FRAME_FORMATTER = getattr(settings, 'FRAME_FORMATTER', None)
FRAME_SEPARATOR = getattr(settings, 'FRAME_SEPARATOR', None)
if FRAME_FORMATTER is None:
raise ValueError('Improperly Configured FRAME_FORMATTER')
|
nilq/baby-python
|
python
|
from __future__ import division
from utils.utils import *
from utils.datasets import *
from utils.parse_config import *
from models.darknet import *
from models.yolo_nano_helper import YoloNano
from torch.nn.parallel import DataParallel
import os
import sys
import time
import datetime
import argparse
import tqdm
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import cv2
import os.path as osp
VIDEO_SIZE={
"vid_65132":(1920,1080),
"vid_64708": (1920, 1080),
"multi_person":(1920,1080)
}
def str_id(cnt):
cnt = str(cnt)
pre=""
for _ in range(8-len(cnt)):
pre+='0'
return pre+cnt
@torch.no_grad()
def inference(model, path, conf_thres, nms_thres, img_size, batch_size,data_type,video_id):
model.eval()
# Get dataloader
dataset = InferenceDataset(path, img_size=img_size, augment=False, multiscale=False,data_type =data_type)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn
)
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
fourcc = cv2.VideoWriter_fourcc(*'XVID')
vider_write = cv2.VideoWriter(osp.join("./result", "{}.mp4".format(video_id)), fourcc, 30.0,
VIDEO_SIZE[video_id])
for batch_i, (img_id, imgs, pads) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")):
# Extract labels
imgs = Variable(imgs.type(Tensor), requires_grad=False)
outputs = model(imgs)
outputs = non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres)
for id,output,pad in zip(img_id,outputs,pads):
img =cv2.imread(id)
h,w,c = img.shape
square_edge = max(h,w)
ratio = square_edge/imgs.shape[-1]
if output is None:
vider_write.write(img)
continue
output = output.detach().cpu().numpy()[:]
output[:,:4]*=ratio
output[:,0]-= pad[0]
output[:,1]-= pad[2]
output[:,2]-= pad[1]
output[:,3]-= pad[3]
for out in output:
category = int(out[-1])
if category>0:
continue
out = out[:4].astype(np.int).tolist()
img = cv2.rectangle(img,tuple(out[:2]),tuple(out[2:4]),(0,0,255),3)
vider_write.write(img)
# Concatenate sample statistics
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=8, help="size of each image batch")
parser.add_argument("--model_def", type=str, default="config/yolov3.cfg", help="path to model definition file")
parser.add_argument("--data_config", type=str, default="config/coco.data", help="path to data config file")
parser.add_argument("--weights_path", type=str, default="weights/yolov3.weights", help="path to weights file")
parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file")
parser.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected")
parser.add_argument("--conf_thres", type=float, default=0.7, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.5, help="iou thresshold for non-maximum suppression")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
parser.add_argument("--data_type", type=str, default="coco_test", help="Dataset type")
parser.add_argument("--video_id", type=str, default="vid_65132", help=" video id info")
opt = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_config = parse_data_config(opt.data_config)
valid_path = data_config["valid"]
class_names = load_classes(data_config["names"])
# Initiate model
if "yolov3" in opt.model_def:
model = Darknet(opt.model_def).to(device)
model.apply(weights_init_normal)
else:
kargs = get_nano_info(opt.model_def)
model = YoloNano(**kargs).to(device)
model.apply(weights_init_normal)
model = DataParallel(model)
if opt.weights_path.endswith(".weights"):
# Load darknet weights
model.load_darknet_weights(opt.weights_path)
else:
# Load checkpoint weights
model.load_state_dict(torch.load(opt.weights_path))
inference(
model,
path=valid_path,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres,
img_size=opt.img_size,
batch_size=1,
data_type = opt.data_type,
video_id = opt.video_id
)
|
nilq/baby-python
|
python
|
"""A module to generate OpenAPI and JSONSchemas."""
import json
import os
from pkg_resources import get_distribution
from pydantic_openapi_helper.core import get_openapi
from pydantic_openapi_helper.inheritance import class_mapper
from queenbee.repository import RepositoryIndex
from queenbee.job import Job, JobStatus
from queenbee.recipe import Recipe, RecipeInterface
from queenbee.plugin import Plugin
folder = os.path.join(os.path.dirname(__file__), 'docs/_static/schemas')
if not os.path.isdir(folder):
os.mkdir(folder)
VERSION = '.'.join(get_distribution('queenbee').version.split('.')[:3])
info = {
"description": "",
"version": VERSION,
"title": "",
"contact": {
"name": "Ladybug Tools",
"email": "info@ladybug.tools",
"url": "https://github.com/ladybug-tools/queenbee"
},
"x-logo": {
"url": "https://www.ladybug.tools/assets/img/honeybee.png",
"altText": "Queenbee logo"
},
"license": {
"name": "MIT",
"url": "https://github.com/ladybug-tools/queenbee-schema/blob/master/LICENSE"
}
}
with open(os.path.join(folder, 'job-openapi.json'), 'w') as out_file:
json.dump(
get_openapi(
base_object=[Job], title='Queenbee Job Schema',
description='Schema documentation for Queenbee Jobs',
version=VERSION
),
out_file,
indent=2
)
with open(os.path.join(folder, 'plugin-openapi.json'), 'w') as out_file:
json.dump(
get_openapi(
base_object=[Plugin], title='Queenbee Plugin Schema',
description='Schema documentation for Queenbee Plugins',
version=VERSION
),
out_file,
indent=2
)
with open(os.path.join(folder, 'recipe-openapi.json'), 'w') as out_file:
json.dump(
get_openapi(
base_object=[Recipe], title='Queenbee Recipe Schema',
description='Schema documentation for Queenbee Recipes',
version=VERSION
),
out_file,
indent=2
)
with open(os.path.join(folder, 'repository-openapi.json'), 'w') as out_file:
json.dump(
get_openapi(
base_object=[RepositoryIndex], title='Queenbee Repository Schema',
description='Schema documentation for Queenbee Recipes',
version=VERSION
),
out_file,
indent=2
)
with open(os.path.join(folder, 'job-schema.json'), 'w') as out_file:
out_file.write(Job.schema_json())
with open(os.path.join(folder, 'plugin-schema.json'), 'w') as out_file:
out_file.write(Plugin.schema_json())
with open(os.path.join(folder, 'recipe-schema.json'), 'w') as out_file:
out_file.write(Recipe.schema_json())
with open(os.path.join(folder, 'repository-schema.json'), 'w') as out_file:
out_file.write(RepositoryIndex.schema_json())
# write openapi with inheritance and mapper json files
# these files are mainly used for creating .NET SDK
external_docs = {
"description": "OpenAPI Specification with Inheritance",
"url": "./queenbee_inheritance.json"
}
models = [Recipe, Plugin, Job, RepositoryIndex, RecipeInterface, JobStatus]
openapi = get_openapi(
models,
title='Queenbee Schema',
description='Documentation for Queenbee schema.',
version=VERSION, info=info,
external_docs=external_docs
)
with open(os.path.join(folder, 'queenbee.json'), 'w') as out_file:
json.dump(openapi, out_file, indent=2)
# with inheritance
openapi = get_openapi(
models,
title='Queenbee Schema with Inheritance',
description='Documentation for Queenbee schema.',
version=VERSION, info=info,
inheritance=True,
external_docs=external_docs
)
with open(os.path.join(folder, 'queenbee_inheritance.json'), 'w') as out_file:
json.dump(openapi, out_file, indent=2)
# add the mapper file
with open(os.path.join(folder, 'queenbee_mapper.json'), 'w') as out_file:
json.dump(
class_mapper(
models,
['queenbee', 'queenbee.interface']
),
out_file, indent=2
)
|
nilq/baby-python
|
python
|
import unittest
from time import sleep
from random import randint
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import json
# from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
#
#
# class MyListener(AbstractEventListener):
# def before_find(self, by, value, driver):
# print(by, value)
# def after_find(self, by, value, driver):
# print(by, value, "found")
# def on_exception(self, exception, driver):
# print(exception)
class CreateQuizes(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path="../browsers/chromedriver")
# self.driver = EventFiringWebDriver(webdriver.Chrome(executable_path="../browsers/chromedriver"), MyListener())
#self.driver = webdriver.Firefox(executable_path="../browsers/geckodriver")
self.wait = WebDriverWait(self.driver, 20)
def test_create_quizes(self):
""" Verify that user with Teachers role can Create Quiz with 3 Textual, 3 Single-Choice,
3 Multiple-Choice questions 75% passing rate."""
driver = self.driver
wait = self.wait
# Test data:
number = randint(100,1000)
quiz_name = "QA BASIC {}".format(number)
textual_question_1 = "What is Software Testing?"
textual_question_2 = "What is Software Quality Assurance?"
textual_question_3 = "Explain SDLC methodology?"
# 1. Login with tichers role
email_teacher = 'alina.korolevich@yopmail.com'
password_teacher = 'internship'
login_url = "http://local.school.portnov.com:4520/#/login"
driver.get(login_url)
driver.find_element_by_id("mat-input-0").send_keys(email_teacher)
driver.find_element_by_id("mat-input-1").send_keys(password_teacher)
driver.find_element_by_css_selector("button[type='submit']").click()
wait.until(EC.presence_of_element_located((By.XPATH, "// div[@class = 'info']/p[contains(text(),'TEACHER')]")))
sleep(1)
wait.until(EC.visibility_of_element_located((By.XPATH, "// div[@class = 'info']/p[contains(text(),'TEACHER')]")))
driver.find_element(By.PARTIAL_LINK_TEXT, "Quizzes").click()
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "a[href='#/quiz-builder']")))
driver.find_element(By.PARTIAL_LINK_TEXT,"Create New Quiz").click()
driver.find_element(By.TAG_NAME,"input").send_keys(quiz_name)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Textual')]").click()
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.mat-input-infix.mat-form-field-infix textarea").send_keys(textual_question_1)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Textual')]").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']").send_keys(textual_question_2)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Textual')]").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']").send_keys(textual_question_3)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
# Single choice questions:
single_choice_1 = "What is a Defect?"
single_choice_1_opt_1 = "Any flaw or imperfection in a software work product"
single_choice_1_opt_2 = "without any issues"
single_choice_2 = "What is Priority?"
single_choice_2_opt_1 = "It indicates the importance or urgency of fixing a defect"
single_choice_2_opt_2 = "anytime can fix this bug. No time limit"
single_choice_3 = "What is the difference between static testing?"
single_choice_3_opt_1 = "without code executing the program is called as Static Testing."
single_choice_3_opt_2 = "with code"
driver.find_element(By.XPATH, "//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Single-Choice')]").click()
sleep(1)
driver.find_element(By.XPATH, "//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']").send_keys(single_choice_1)
sleep(1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(single_choice_1)).send_keys(single_choice_1_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']/../../../../..//mat-radio-button".format(single_choice_1)).click()
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(single_choice_1)).send_keys(single_choice_1_opt_2)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.left.wide mat-slider")))
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Single-Choice')]").click()
wait.until(EC.visibility_of_element_located((By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']"))).send_keys(single_choice_2)
wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(single_choice_2)))).send_keys(single_choice_2_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']/../../../../..//mat-radio-button".format(single_choice_2)).click()
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(single_choice_2)).send_keys(single_choice_2_opt_2)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.left.wide mat-slider")))
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Single-Choice')]").click()
wait.until(EC.visibility_of_element_located((By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']"))).send_keys(single_choice_3)
wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(single_choice_3)))).send_keys(single_choice_3_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']/../../../../..//mat-radio-button".format(single_choice_3)).click()
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(single_choice_3)).send_keys(single_choice_3_opt_2)
# Multiple choice questions:
multiple_choice_1 = "What is a Bug?"
multiple_choice_1_opt_1 = "Mismatch between actual and intended behaviors of the software"
multiple_choice_1_opt_2 = "Some small insect that flies around"
multiple_choice_2 = "Are Java and Javascript same languages?"
multiple_choice_2_opt_1 = "Yes"
multiple_choice_2_opt_2 = "No"
multiple_choice_3 = "What is a prime objective of a bug tracking database?"
multiple_choice_3_opt_1 = "Tracking the bugs"
multiple_choice_3_opt_2 = "To get a bug fixed"
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Multiple-Choice')]").click()
wait.until(EC.visibility_of_element_located((By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']"))).send_keys(multiple_choice_1)
wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(multiple_choice_1)))).send_keys(multiple_choice_1_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']/../../../../../mat-checkbox".format(multiple_choice_1)).click()
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(multiple_choice_1)).send_keys(multiple_choice_1_opt_2)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Multiple-Choice')]").click()
wait.until(EC.visibility_of_element_located((By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']"))).send_keys(multiple_choice_2)
wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(multiple_choice_2)))).send_keys(multiple_choice_2_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(multiple_choice_2)).send_keys(multiple_choice_2_opt_2)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']/../../../../../mat-checkbox".format(multiple_choice_2)).click()
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
driver.find_element(By.XPATH, "//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Multiple-Choice')]").click()
wait.until(EC.visibility_of_element_located((By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']"))).send_keys(multiple_choice_3)
wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(multiple_choice_3)))).send_keys(multiple_choice_3_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(multiple_choice_3)).send_keys(multiple_choice_3_opt_2)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']/../../../../../mat-checkbox".format(multiple_choice_3)).click()
driver.find_element(By.XPATH, "//button/*[contains(text(),'Save')]").click()
quiz_locator = "//ac-quizzes-list//div[@class = 'quizzes']//*[contains(text(),'{}')]".format(quiz_name)
wait.until(EC.visibility_of_element_located((By.XPATH, quiz_locator)))
element = driver.find_element_by_xpath(quiz_locator)
driver.execute_script("arguments[0].scrollIntoView();", element)
element.click()
driver.get_screenshot_as_file('{} created.png'.format(quiz_name))
driver.find_element_by_xpath("//div[@class='mat-list-item-content']//h5[contains(text(),'Log Out')]").click()
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".mat-button.mat-warn")))
driver.find_element_by_css_selector(".mat-button.mat-warn").click()
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,"button[type='submit']")))
#Sign in with role TEACHER
email = 'alina.korolevich@yopmail.com'
password = 'internship'
url = "http://local.school.portnov.com:4520/api/v1/sign-in"
payload = {
'email': email,
'password': password
}
headers = {
'content-type': "application/json",
'Connection': "keep-alive"
}
response = requests.post(url, data=json.dumps(payload), headers=headers)
parsed_json = json.loads(response.text)
token = parsed_json["token"]
url = "http://local.school.portnov.com:4520/api/v1/quizzes"
headers = {
'Authorization': "Bearer {}".format(token)
}
r = requests.get(url, headers=headers)
parsed_json = json.loads(r.text)
quiz_id = None
for i in parsed_json:
if i["name"] == quiz_name:
quiz_id = i["id"]
else:
continue
url = "http://local.school.portnov.com:4520/api/v1/quiz/{}".format(quiz_id)
r = requests.delete(url, headers=headers)
print(r.status_code)
self.assertTrue(r.status_code == 200)
print("Quiz: {} with id {} was permanently deleted".format(quiz_name, quiz_id))
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
for line in sys.stdin:
# extract data
key, val = line.strip().split('\t', 1)
s_val = val.split(',')
# day
day = key.split(',')[3][:10]
# revenue
try:
revenue = float(s_val[11]) + float(s_val[12]) + float(s_val[14])
except ValueError:
continue
# tolls
try:
tolls = float(s_val[15])
except ValueError:
continue
# print
print '%s\t%s,%s' % (day, revenue, tolls)
'''
cd ~/hw1/Task2-d
rm -rf TotalRevenueSamp.out
hfs -rm -r TotalRevenueSamp.out
hjs -D mapreduce.job.reduces=0 \
-file ~/hw1/Task2-d/src/ \
-mapper src/mapper.sh \
-input /user/wl2154/TripFareJoinSamp.txt \
-output /user/wl2154/TotalRevenueSamp.out
hfs -get TotalRevenueSamp.out
hfs -getmerge TotalRevenueSamp.out TotalRevenueSamp.txt
cat TotalRevenueSamp.txt
'''
|
nilq/baby-python
|
python
|
from typing import TypeVar
T = TypeVar("T")
class Node:
def __init__(self, item: T):
self.item = item
self.next = None
|
nilq/baby-python
|
python
|
import re
from File import *
from Base import *
from subprocess import call
class Animation_Html():
##!
##! Animation main HTML path
##!
def Animation_HTML_Path(self):
return "/".join( [ self.Path,self.FileName,self.Curve_Parms_Path ] )
##!
##! Animation main HTML file name
##!
def Animation_HTML_FileName(self):
return "/".join( [self.Animation_HTML_Path(),self.Name+".html"] )
##!
##! HTML head section
##!
def Animation_HTML_Doc_Head(self):
return (
"<!DOCTYPE html>\n"+
self.XML_Tag_Start("HTML")+
self.Animation_HTML_Head()
)
##!
##! Write animation main HTML file
##!
def Animation_HTML_Write(self):
html=self.Animation_HTML_Doc_Head()
html=html+self.Animation_HTML_Body()
html=html+self.XML_Tag_End("HTML")
outfile=self.Animation_HTML_FileName()
self.File_Path_Create(outfile)
res=self.File_Write(outfile,[html])
print outfile+":",res,"bytes"
##!
##! Write animation css
##!
def Animation_HTML_CSS(self):
return self.XML_TagIt(
"LINK",
{
"rel": "stylesheet",
"href": self.HTML_Root+"/W3.css",
}
)+"\n"+self.XML_TagIt(
"LINK",
{
"rel": "stylesheet",
"href": self.HTML_Root+"/Poops.css",
}
)
##!
##! Write animation script section
##!
def Animation_HTML_Script(self):
return self.XML_Tags_NL(
"SCRIPT",
self.File_Read("poops.js")
)
##!
##! Write animation head section
##!
def Animation_HTML_Head(self):
return self.XML_Tags_NL(
"HEAD",
self.Animation_HTML_Title()+self.Animation_HTML_CSS()
)
##!
##! Write animation title section
##!
def Animation_HTML_Title(self):
return self.XML_Tags_NL(
"TITLE",
"TITLE"
)
##!
##! Write animation body section
##!
def Animation_HTML_Body(self):
return self.XML_Tags_NL(
"BODY",
self.Animation_HTML_Animation_Element()
)
##!
##! Write animation SVG animation element.
##!
def Animation_HTML_Animation_Element(self):
imgs=[""]
n=0
for svgfile in (self.Iteration_Files):
imgs.append(
self.Animation_HTML_Body_File_IMG(svgfile,n)
)
n+=1
return self.XML_Tags(
"DIV",
"\n".join(imgs)+"\n",
{
"class": "w3-content w3-section",
}
)+"\n"+self.Animation_HTML_Script()
##!
##! Generate animated image tag.
##!
def Animation_HTML_Body_File_IMG(self,svgfile,n=0):
href=self.CGI_Root+"?"+svgfile
return self.XML_Tag(
"IMG",
{
"src": href,
"width": "800px",
"class": "mySlides",
}
)
|
nilq/baby-python
|
python
|
from django.contrib.postgres.fields import ArrayField
from django.db import models
from osf.models import Node
from osf.models import OSFUser
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.validators import validate_subscription_type
from osf.utils.fields import NonNaiveDateTimeField
from website.notifications.constants import NOTIFICATION_TYPES
class NotificationSubscription(BaseModel):
primary_identifier_name = '_id'
_id = models.CharField(max_length=50, db_index=True, unique=True) # pxyz_wiki_updated, uabc_comment_replies
event_name = models.CharField(max_length=50) # wiki_updated, comment_replies
user = models.ForeignKey('OSFUser', related_name='notification_subscriptions',
null=True, blank=True, on_delete=models.CASCADE)
node = models.ForeignKey('Node', related_name='notification_subscriptions',
null=True, blank=True, on_delete=models.CASCADE)
# Notification types
none = models.ManyToManyField('OSFUser', related_name='+') # reverse relationships
email_digest = models.ManyToManyField('OSFUser', related_name='+') # for these
email_transactional = models.ManyToManyField('OSFUser', related_name='+') # are pointless
@classmethod
def load(cls, q):
# modm doesn't throw exceptions when loading things that don't exist
try:
return cls.objects.get(_id=q)
except cls.DoesNotExist:
return None
@property
def owner(self):
# ~100k have owner==user
if self.user is not None:
return self.user
# ~8k have owner=Node
elif self.node is not None:
return self.node
@owner.setter
def owner(self, value):
if isinstance(value, OSFUser):
self.user = value
elif isinstance(value, Node):
self.node = value
def add_user_to_subscription(self, user, notification_type, save=True):
for nt in NOTIFICATION_TYPES:
if getattr(self, nt).filter(id=user.id).exists():
if nt != notification_type:
getattr(self, nt).remove(user)
else:
if nt == notification_type:
getattr(self, nt).add(user)
if notification_type != 'none' and isinstance(self.owner, Node) and self.owner.parent_node:
user_subs = self.owner.parent_node.child_node_subscriptions
if self.owner._id not in user_subs.setdefault(user._id, []):
user_subs[user._id].append(self.owner._id)
self.owner.parent_node.save()
if save:
self.save()
def remove_user_from_subscription(self, user, save=True):
for notification_type in NOTIFICATION_TYPES:
try:
getattr(self, notification_type, []).remove(user)
except ValueError:
pass
if isinstance(self.owner, Node) and self.owner.parent_node:
try:
self.owner.parent_node.child_node_subscriptions.get(user._id, []).remove(self.owner._id)
self.owner.parent_node.save()
except ValueError:
pass
if save:
self.save()
class NotificationDigest(ObjectIDMixin, BaseModel):
user = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=models.CASCADE)
timestamp = NonNaiveDateTimeField()
send_type = models.CharField(max_length=50, db_index=True, validators=[validate_subscription_type, ])
event = models.CharField(max_length=50)
message = models.TextField()
# TODO: Could this be a m2m with or without an order field?
node_lineage = ArrayField(models.CharField(max_length=5))
|
nilq/baby-python
|
python
|
import logging
log = logging.getLogger(__name__)
from dogpile.cache import make_region
from dogpile.cache.api import NO_VALUE
import os, errno
CACHE_FAILS = (NO_VALUE,)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
class CachedData(object):
keys = None
request = None
dbSession = None
query_args = None
regions_manager = None
keyed_multiples = None
class LazyloadedFunction(object):
"""a deferred function"""
def __init__(
self,
object,
object_attribute,
cache_function,
*cache_function_args,
**cache_function_kwargs
):
self.object = object
self.object_attribute = object_attribute
self.cache_function = cache_function
self.cache_function_args = cache_function_args
self.cache_function_kwargs = cache_function_kwargs
try:
self.__doc__ = function.__doc__
except: # pragma: no cover
pass
def execute(self):
val = self.cache_function(
*self.cache_function_args, **self.cache_function_kwargs
)
return val
class ObjectifiedDict(dict):
"""Dict that allows for .dotted access"""
def __getitem__(self, attr):
if attr in self:
item = dict.__getitem__(self, attr)
if isinstance(item, LazyloadedFunction):
item = item.execute()
dict.__setitem__(self, attr, item)
return item
def __getattr__(self, attr):
if attr in self:
if isinstance(self[attr], LazyloadedFunction):
value = self[attr].execute()
self[attr] = value
return self[attr]
return self.__getattribute__(attr)
def _lazyload(self, attr, function, *args, **kwargs):
self[attr] = LazyloadedFunction(self, attr, function, *args, **kwargs)
def _expand(self):
for k, v in self.iteritems():
if isinstance(v, LazyloadedFunction):
v = v.execute()
dict.__setitem__(self, k, v)
def _cacheable(self, exclude=None):
copied = self.copy()
for k, v in copied.iteritems():
if isinstance(v, LazyloadedFunction):
del copied[k]
if exclude:
for k in exclude:
if k in copied:
del copied[k]
return copied
class AttributeSafeObject(object):
"""
Object with lax attribute access. Returns an empty string ('') when the
attribute does not exist; good for templating). Based on Pylons.
"""
def __init__(self, **kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
def __getattr__(self, name):
try:
## note that we're using the object class directly
return object.__getattribute__(self, name)
except AttributeError:
if name[:2] == "__":
raise
if DEBUG_ATTRIB_SAFE:
log.debug(
"No attribute `%s` found in AttributeSafeObject instance,"
"returning empty string",
name,
)
return ""
def keys(self):
return self.__dict__.keys()
class AttributeSafeObject_set(AttributeSafeObject):
"""An AttributeSafeObject that sets & gets `set({})` on misses"""
def __getattr__(self, k):
try:
return object.__getattribute__(self, k)
except AttributeError:
if k[:2] == "__":
raise
setattr(self, k, set())
return object.__getattribute__(self, k)
class AttributeSafeObject_dict(AttributeSafeObject):
"""An AttributeSafeObject that sets & gets dict `{}` on misses"""
def __getattr__(self, k):
try:
return object.__getattribute__(self, k)
except AttributeError:
if k[:2] == "__":
raise
setattr(self, k, {})
return object.__getattribute__(self, k)
class AttributeSafeObject_dict_ids(AttributeSafeObject_dict):
"""An AttributeSafeObject_dict_ids used to manage ids"""
def add_unknown(self, key, items_to_update, v=None):
store = getattr(self, key)
for k in items_to_update:
if k not in store:
store[k] = v
def update(self, key, items_to_update, v=None):
store = getattr(self, key)
for k in items_to_update:
store[k] = v
def get_true(self, key):
store = getattr(self, key)
rval = [k for k in store.keys() if store[k]]
return rval
def get_false(self, key):
store = getattr(self, key)
rval = [k for k in store.keys() if not store[k]]
return rval
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8
import sys
import os
import math
import statistics
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from matplotlib.backends.backend_pdf import PdfPages
from datetime import datetime
import collections
import errno
import gzip
from ruamel_yaml import YAML
PATHS = {
"local": {
"sourcepath" : "./asciigrids_debug/",
"outputpath" : ".",
"png-out" : "png_debug/" , # path to png images
"pdf-out" : "pdf-out_debug/" , # path to pdf package
},
"test": {
"sourcepath" : "./asciigrid/",
"outputpath" : "./testout/",
"png-out" : "png2/" , # path to png images
"pdf-out" : "pdf-out2/" , # path to pdf package
},
"cluster": {
"sourcepath" : "/source/",
"outputpath" : "/out/",
"png-out" : "png/" , # path to png images
"pdf-out" : "pdf-out/" , # path to pdf package
}
}
USER = "local"
NONEVALUE = -9999
def build() :
"main"
pathId = USER
sourceFolder = ""
outputFolder = ""
if len(sys.argv) > 1 and __name__ == "__main__":
for arg in sys.argv[1:]:
k, v = arg.split("=")
if k == "path":
pathId = v
if k == "source" :
sourceFolder = v
if k == "out" :
outputFolder = v
if not sourceFolder :
sourceFolder = PATHS[pathId]["sourcepath"]
if not outputFolder :
outputFolder = PATHS[pathId]["outputpath"]
pngFolder = os.path.join(outputFolder, PATHS[pathId]["png-out"])
pdfFolder = os.path.join(outputFolder,PATHS[pathId]["pdf-out"])
for root, dirs, files in os.walk(sourceFolder):
if len(files) > 0 :
print("root", root)
print("dirs", dirs)
scenario = os.path.basename(root)
pdfpath = os.path.join(pdfFolder, "scenario_{0}.pdf".format(scenario))
makeDir(pdfpath)
pdf = PdfPages(pdfpath)
files.sort()
for file in files:
if not file.endswith(".meta"):
print("file", file)
pngfilename = file[:-3]+"png"
metafilename = file+".meta"
isGZ = file.endswith(".gz")
if isGZ :
pngfilename = file[:-6]+"png"
metafilename = file[:-2]+"meta"
filepath = os.path.join(root, file)
metapath = os.path.join(root, metafilename)
out_path = os.path.join(pngFolder, scenario, pngfilename)
createImgFromMeta( filepath, metapath, out_path, pdf=pdf)
pdf.close()
def createImgFromMeta(ascii_path, meta_path, out_path, pdf=None) :
if ascii_path.endswith(".gz") :
# Read in ascii header data
with gzip.open(ascii_path, 'rt') as source:
ascii_header = source.readlines()[:6]
else :
# Read in ascii header data
with open(ascii_path, 'r') as source:
ascii_header = source.readlines()[:6]
# Read the ASCII raster header
ascii_header = [item.strip().split()[-1] for item in ascii_header]
ascci_cols = int(ascii_header[0])
ascii_rows = int(ascii_header[1])
ascii_xll = float(ascii_header[2])
ascii_yll = float(ascii_header[3])
ascii_cs = float(ascii_header[4])
ascii_nodata = float(ascii_header[5])
title=""
label=""
colormap = 'viridis'
cMap = None
cbarLabel = None
factor = 0.001
ticklist = None
maxValue = ascii_nodata
maxLoaded = False
minValue = ascii_nodata
minLoaded = False
with open(meta_path, 'rt') as meta:
# documents = yaml.load(meta, Loader=yaml.FullLoader)
yaml=YAML(typ='safe') # default, if not specfied, is 'rt' (round-trip)
documents = yaml.load(meta)
#documents = yaml.full_load(meta)
for item, doc in documents.items():
print(item, ":", doc)
if item == "title" :
title = doc
elif item == "labeltext" :
label = doc
elif item == "factor" :
factor = float(doc)
elif item == "maxValue" :
maxValue = float(doc)
maxLoaded = True
elif item == "minValue" :
minValue = float(doc)
minLoaded = True
elif item == "colormap" :
colormap = doc
elif item == "colorlist" :
cMap = doc
elif item == "cbarLabel" :
cbarLabel = doc
elif item == "ticklist" :
ticklist = list()
for i in doc :
ticklist.append(float(i))
# Read in the ascii data array
ascii_data_array = np.loadtxt(ascii_path, dtype=np.float, skiprows=6)
# Set the nodata values to nan
ascii_data_array[ascii_data_array == ascii_nodata] = np.nan
# data is stored as an integer but scaled by a factor
ascii_data_array *= factor
maxValue *= factor
minValue *= factor
image_extent = [
ascii_xll, ascii_xll + ascci_cols * ascii_cs,
ascii_yll, ascii_yll + ascii_rows * ascii_cs]
# Plot data array
fig, ax = plt.subplots()
ax.set_title(title)
# Get the img object in order to pass it to the colorbar function
if cMap :
colorM = ListedColormap(cMap)
if minLoaded and maxLoaded:
img_plot = ax.imshow(ascii_data_array, cmap=colorM, extent=image_extent, interpolation='none', vmin=minValue, vmax=maxValue)
elif minLoaded :
img_plot = ax.imshow(ascii_data_array, cmap=colorM, extent=image_extent, interpolation='none', vmax=minValue)
elif maxLoaded :
img_plot = ax.imshow(ascii_data_array, cmap=colorM, extent=image_extent, interpolation='none', vmax=maxValue)
else :
img_plot = ax.imshow(ascii_data_array, cmap=colorM, extent=image_extent, interpolation='none')
else :
if minLoaded and maxLoaded:
img_plot = ax.imshow(ascii_data_array, cmap=colormap, extent=image_extent, interpolation='none', vmin=minValue, vmax=maxValue)
elif minLoaded :
img_plot = ax.imshow(ascii_data_array, cmap=colormap, extent=image_extent, interpolation='none', vmax=minValue)
elif maxLoaded :
img_plot = ax.imshow(ascii_data_array, cmap=colormap, extent=image_extent, interpolation='none', vmax=maxValue)
else :
img_plot = ax.imshow(ascii_data_array, cmap=colormap, extent=image_extent, interpolation='none')
if ticklist :
# Place a colorbar next to the map
cbar = plt.colorbar(img_plot, ticks=ticklist, orientation='vertical', shrink=0.5, aspect=14)
else :
# Place a colorbar next to the map
cbar = plt.colorbar(img_plot, orientation='vertical', shrink=0.5, aspect=14)
cbar.set_label(label)
if cbarLabel :
cbar.ax.set_yticklabels(cbarLabel)
ax.grid(True, alpha=0.5)
# save image and pdf
makeDir(out_path)
if pdf :
pdf.savefig()
plt.savefig(out_path, dpi=150)
plt.close(fig)
def makeDir(out_path) :
if not os.path.exists(os.path.dirname(out_path)):
try:
os.makedirs(os.path.dirname(out_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
if __name__ == "__main__":
build()
|
nilq/baby-python
|
python
|
r"""
``cotk.metrics`` provides classes and functions evaluating results of models. It provides
a fair metric for every model.
"""
import random
import multiprocessing
from multiprocessing import Pool
import numpy as np
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction
from .._utils.unordered_hash import UnorderedSha256
class MetricBase:
'''Base class for metrics.
'''
def __init__(self):
pass
class _PrecisionRecallMetric(MetricBase):
'''Base class for precision recall metrics. This is an abstract class.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_allvocabs_key (str): Reference sentences are passed to :func:`forward` by
``data[reference_allvocabs_key]``. Default: ``resp_allvocabs``.
gen_key (str): Sentences generated by model are passed to :func:.forward by
``data[gen_key]``. Default: ``gen``.
Attributes:
res_prefix (str): Prefix added to the front of each key
in the result dict of ^close^
'''
def __init__(self, dataloader, reference_allvocabs_key='resp_allvocabs', gen_key='gen'):
super().__init__()
self.dataloader = dataloader
self.reference_allvocabs_key = reference_allvocabs_key
self.gen_key = gen_key
self.prec_list = []
self.rec_list = []
self.res_prefix = ""
def score(self, gen, reference):
r'''This function is called by ^forward^
Arguments:
* gen (list): list of generated word ids
* reference (list): list of word ids of a reference
Returns:
(scalar): score \in [0, 1]
'''
raise NotImplementedError( \
"This function should be implemented by subclasses.")
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[reference_allvocabs_key] (list of list of list): Reference sentences.
Does not contain start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Outermost list: batch_size
Innermost list: number of words, allow different sizes
Second innermost list: number of sentences, allow different sizes
data[gen_prob_key] (list of list of list): Sentence generations model outputs
similar to data[reference_allvocabs_key]
'''
references = data[self.reference_allvocabs_key]
gens = data[self.gen_key]
if len(references) != len(gens):
raise ValueError("Batch num is not matched.")
for reference, gen in zip(references, gens):
# pylint: disable=no-member
matrix = np.zeros((len(reference), len(gen)), dtype=np.float32)
for i, single_ref in enumerate(reference):
for j, single_gen in enumerate(gen):
matrix[i][j] = self.score(single_gen, single_ref)
self.prec_list.append(float(np.sum(np.max(matrix, 0))) / len(gen))
self.rec_list.append(float(np.sum(np.max(matrix, 1))) / len(references))
def close(self):
'''Return a dict which contains:
* **precision**: average precision
* **recall**: average recall
'''
return {'{} precision'.format(self.res_prefix): np.average(self.prec_list), \
'{} recall'.format(self.res_prefix): np.average(self.rec_list)}
class BleuPrecisionRecallMetric(_PrecisionRecallMetric):
'''Metric for calculating sentence BLEU precision and recall
Arguments:
* ngram (int): Specifies BLEU-ngram
'''
def __init__(self, dataloader, ngram, reference_allvocabs_key='resp_allvocabs', gen_key='gen'):
super().__init__(dataloader, reference_allvocabs_key, gen_key)
if ngram not in range(1, 5):
raise ValueError("ngram should belong to [1, 4]")
self.ngram = ngram
self.weights = [1 / ngram] * ngram
self.res_prefix = 'BLEU-{}'.format(ngram)
def score(self, gen, reference):
r'''Score_fn of BLEU-ngram precision and recall
Returns:
(scalar): sentence bleu score \in [0, 1]
'''
return sentence_bleu([reference], gen, self.weights, SmoothingFunction().method1)
class EmbSimilarityPrecisionRecallMetric(_PrecisionRecallMetric):
'''Metric for calculating cosine similarity precision and recall
Arguments:
* embed (:class:^numpy.array^): A 2-d padded array of word embeddings
* mode (str): Specifies the operation that computes the bag-of-word representation.
Must be 'avg' or 'extrema':
'avg': element-wise average word embeddings
'extrema': element-wise maximum word embeddings
'''
def __init__(self, dataloader, embed, mode, \
reference_allvocabs_key='resp_allvocabs', gen_key='gen'):
super().__init__(dataloader, reference_allvocabs_key, gen_key)
if not isinstance(embed, np.ndarray) or len(np.shape(embed)) != 2:
raise ValueError("invalid type or shape or embed.")
if mode not in ['avg', 'extrema']:
raise ValueError("mode should be 'avg' or 'extrema'.")
if len(embed) != self.dataloader.vocab_size:
raise ValueError("embed size not equal to vocab size.")
self.embed = embed
self.mode = mode
self.res_prefix = '{}-bow'.format(mode)
def score(self, gen, reference):
r'''Score_fn of cosine similarity precision and recall
Returns:
(Scalar): cosine similarity between two sentence embeddings \in [0, 1]
'''
gen_vec = []
ref_vec = []
for i in gen:
if i < 0:
raise ValueError("gen index out of range.")
elif i >= self.dataloader.vocab_size:
gen_vec.append(self.embed[self.dataloader.unk_id])
else:
gen_vec.append(self.embed[i])
for i in reference:
if i < 0:
raise ValueError("reference index out of range.")
elif i >= self.dataloader.vocab_size:
ref_vec.append(self.embed[self.dataloader.unk_id])
else:
ref_vec.append(self.embed[i])
if self.mode == 'avg':
gen_embed = np.average(gen_vec, 0)
ref_embed = np.average(ref_vec, 0)
else:
gen_embed = np.max(gen_vec, 0)
ref_embed = np.max(ref_vec, 0)
cos = np.sum(gen_embed * ref_embed) / \
np.sqrt(np.sum(gen_embed * gen_embed) * np.sum(ref_embed * ref_embed))
norm = (cos + 1) / 2
return norm
class PerplexityMetric(MetricBase):
'''Metric for calculating perplexity.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_allvocabs_key (str): Reference sentences with all vocabs
are passed to :func:`forward` by ``data[reference_allvocabs_key]``.
Default: ``resp_allvocabs``.
reference_len_key (str): Length of reference sentences are passed to :func:`forward`
by ``data[reference_len_key]``. Default: ``resp_length``.
gen_log_prob_key (str): Sentence generations model outputs of **log softmax** probability
are passed to :func:`forward` by ``data[gen_log_prob_key]``. Default: ``gen_log_prob``.
invalid_vocab (bool): whether gen_log_prob contains invalid vocab. Default: False
full_check (bool): whether perform full checks on `gen_log_prob` to make sure the sum
of probability is 1. Otherwise, a random check will be performed for efficiency.
Default: False
'''
def __init__(self, dataloader, \
reference_allvocabs_key="resp_allvocabs", \
reference_len_key="resp_length", \
gen_log_prob_key="gen_log_prob", \
invalid_vocab=False, \
full_check=False \
):
super().__init__()
self.dataloader = dataloader
self.reference_allvocabs_key = reference_allvocabs_key
self.reference_len_key = reference_len_key
self.gen_log_prob_key = gen_log_prob_key
self.word_loss = 0
self.length_sum = 0
self.invalid_vocab = invalid_vocab
self.full_check = full_check
def forward(self, data):
'''Processing a batch of data. Smoothing will be performed for invalid vocabs.
Unknowns vocabs will be ignored.
TODO:
Find a place to explain valid vocabs, invalid vocabs, and unknown vocabs.
Arguments:
data (dict): A dict at least contains the following keys.
data[reference_allvocabs_key] (list or :class:`numpy.array`): Reference sentences with all vocabs
with all vocabs. Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_sentence_length]`
data[reference_len_key] (list): Length of Reference sentences. Contains start token (eg:``<go>``)
and end token (eg:``<eos>``). Size: `[batch_size]`
data[gen_log_prob_key] (list or :class:`numpy.array`): Sentence generations model outputs of
**log softmax** probability. Contains end token (eg:``<eos>``), but without start token
(eg: ``<go>``). The 2nd dimension can be jagged.
Size: `[batch_size, gen_sentence_length, vocab_size]` for ``invalid_vocab = False``.
`[batch_size, gen_sentence_length, all_vocab_size]` for ``invalid_vocab = True``.
Warning:
``data[gen_log_prob_key]`` must be processed after log_softmax. That means,
``np.sum(np.exp(gen_log_prob), -1)`` equals ``np.ones((batch_size, gen_sentence_length))``
'''
resp_allvocabs = data[self.reference_allvocabs_key]
resp_length = data[self.reference_len_key]
gen_log_prob = data[self.gen_log_prob_key]
if len(resp_allvocabs) != len(resp_length) or len(resp_allvocabs) != len(gen_log_prob):
raise ValueError("Batch num is not matched.")
# perform random check to assert the probability is valid
checkid = random.randint(0, len(resp_length)-1)
if resp_length[checkid] < 2:
raise ValueError("resp_length must no less than 2, because <go> and <eos> are always included.")
checkrow = random.randint(0, resp_length[checkid]-2)
if not np.isclose(np.sum(np.exp(gen_log_prob[checkid][checkrow])), 1):
print("gen_log_prob[%d][%d] exp sum is equal to %f." % (checkid, checkrow, \
np.sum(np.exp(gen_log_prob[checkid][checkrow]))))
raise ValueError("data[gen_log_prob_key] must be processed after log_softmax.")
if not isinstance(resp_allvocabs, np.ndarray):
resp_allvocabs = np.array(resp_allvocabs)
if not isinstance(gen_log_prob, np.ndarray):
gen_log_prob = np.array(gen_log_prob)
invalid_vocab_num = self.dataloader.all_vocab_size - self.dataloader.vocab_size
#resp = resp_allvocabs.copy()
#resp[resp >= self.dataloader.vocab_size] = self.dataloader.unk_id
for i, single_length in enumerate(resp_length):
# perform full check to assert the probability is valid
if self.full_check:
expsum = np.sum(np.exp(gen_log_prob[i][:single_length-1]), -1)
if not np.allclose(expsum, [1] * (single_length - 1)):
raise ValueError("data[gen_log_prob_key] must be processed after log_softmax.")
resp_now = np.array(resp_allvocabs[i][1:single_length])
gen_log_prob_now = np.array(gen_log_prob[i])
if not self.invalid_vocab:
if gen_log_prob_now.shape[1] != self.dataloader.vocab_size:
raise ValueError("The third dimension gen_log_prob should be equals to vocab_size when \
invalid_vocab = False, \
but %d != %d" % (gen_log_prob_now.shape[1], self.dataloader.vocab_size))
else:
if gen_log_prob_now.shape[1] != self.dataloader.all_vocab_size:
raise ValueError("The third dimension gen_log_prob should be equals to all_vocab_size \
when invalid_vocab = True, \
but %d != %d" % (gen_log_prob_now.shape[1], self.dataloader.vocab_size))
# calc normal vocab
normal_idx = np.where(np.logical_and(resp_now != self.dataloader.unk_id, \
resp_now < self.dataloader.vocab_size))
self.word_loss += -np.sum(gen_log_prob_now[normal_idx, resp_now[normal_idx]])
self.length_sum += np.array(normal_idx).shape[1]
# calc invalid vocab
invalid_idx = np.where(resp_now >= self.dataloader.vocab_size)
invalid_log_prob = gen_log_prob_now[\
invalid_idx, [self.dataloader.unk_id] * len(invalid_idx) \
] - np.log(invalid_vocab_num)
if self.invalid_vocab:
extra_invalid_log_prob = gen_log_prob_now[invalid_idx, resp_now[invalid_idx]]
self.word_loss += -np.sum(np.log( \
np.exp(invalid_log_prob) + np.exp(extra_invalid_log_prob) \
))
else:
self.word_loss += -np.sum(invalid_log_prob)
self.length_sum += np.array(invalid_idx).shape[1]
def close(self):
'''Return a dict which contains:
* **perplexity**: perplexity value
'''
return {"perplexity": np.exp(self.word_loss / self.length_sum)}
class MultiTurnPerplexityMetric(MetricBase):
'''Metric for calculating multi-turn perplexity.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_allvocabs_key (str): Reference sentences with all vocabs
are passed to :func:`forward` by ``data[reference_allvocabs_key]``.
Default: ``sent_allvocabs``.
reference_len_key (str): Length of reference sentences are passed to :func:`forward`
by ``data[reference_len_key]``. Default: ``sent_length``.
gen_log_prob_key (str): Sentence generations model outputs of **log softmax** probability
are passed to :func:`forward` by ``data[gen_log_prob_key]``. Default: ``gen_log_prob``.
invalid_vocab (bool): whether gen_log_prob contains invalid vocab. Default: False
full_check (bool): whether perform full checks on `gen_log_prob` to make sure the sum
of probability is 1. Otherwise, a random check will be performed for efficiency.
Default: False
'''
def __init__(self, dataloader, reference_allvocabs_key="sent_allvocabs", \
reference_len_key="sent_length", \
gen_log_prob_key="gen_log_prob", \
invalid_vocab=False, \
full_check=False \
):
super().__init__()
self.dataloader = dataloader
self.reference_allvocabs_key = reference_allvocabs_key
self.reference_len_key = reference_len_key
self.gen_log_prob_key = gen_log_prob_key
self.invalid_vocab = invalid_vocab
self.sub_metric = PerplexityMetric(dataloader, \
reference_allvocabs_key="sent_allvocabs", \
reference_len_key="sent_length", \
gen_log_prob_key="gen_log_prob", \
invalid_vocab=invalid_vocab, \
full_check=full_check)
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[reference_allvocabs_key] (list or :class:`numpy.array`): Reference sentences
with all vocabs.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_turn_length, max_sentence_length]`
data[reference_len_key] (list of list): Length of Reference sentences. Contains
start token (eg:``<go>``) and end token (eg:``<eos>``). It must NOT be padded,
which means the inner lists may have different length.
Length of outer list: `batch_size`
data[gen_log_prob_key] (list or :class:`numpy.array`): Sentence generations model outputs of
**log softmax** probability. Contains end token (eg:``<eos>``), but without start token
(eg: ``<go>``). The 2nd / 3rd dimension can be jagged or padded.
Size: `[batch_size, max_turn_length, gen_sentence_length, vocab_size]`.
Warning:
``data[gen_log_prob_key]`` must be processed after log_softmax. That means,
``np.sum(np.exp(gen_log_prob), -1)`` equals ``np.ones((batch_size, gen_sentence_length))``
'''
reference_allvocabs = data[self.reference_allvocabs_key]
length = data[self.reference_len_key]
gen_log_prob = data[self.gen_log_prob_key]
if len(length) != len(reference_allvocabs) or len(length) != len(gen_log_prob):
raise ValueError("Batch num is not matched.")
for i, sent_length in enumerate(length):
# Pass turn as batch for sub_metric, the result will be same.
turn_length = len(sent_length)
if len(reference_allvocabs[i]) < turn_length or len(gen_log_prob[i]) < turn_length:
raise ValueError("Turn num is not matched.")
self.sub_metric.forward({"sent_allvocabs": reference_allvocabs[i][:turn_length], \
"sent_length": sent_length, \
"gen_log_prob": gen_log_prob[i][:turn_length]})
def close(self):
'''Return a dict which contains:
* **perplexity**: perplexity value
'''
return self.sub_metric.close()
class BleuCorpusMetric(MetricBase):
'''Metric for calculating BLEU.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_allvocabs_key (str): Reference sentences with all vocabs
are passed to :func:.forward by ``data[reference_allvocabs_key]``.
Default: ``resp``.
gen_key (str): Sentences generated by model are passed to :func:.forward by
``data[gen_key]``. Default: ``gen``.
'''
def __init__(self, dataloader, reference_allvocabs_key="resp_allvocabs", gen_key="gen"):
super().__init__()
self.dataloader = dataloader
self.reference_allvocabs_key = reference_allvocabs_key
self.gen_key = gen_key
self.refs = []
self.hyps = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[reference_allvocabs_key] (list or :class:`numpy.array` of `int`):
reference_allvocabs sentences.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_sentence_length]`
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
Size: `[batch_size, gen_sentence_length]`.
'''
gen = data[self.gen_key]
resp = data[self.reference_allvocabs_key]
if len(resp) != len(gen):
raise ValueError("Batch num is not matched.")
for gen_sen, resp_sen in zip(gen, resp):
self.hyps.append(self.dataloader.trim_index(gen_sen))
self.refs.append([self.dataloader.trim_index(resp_sen[1:])])
def close(self):
'''Return a dict which contains:
* **bleu**: bleu value.
'''
try:
return {"bleu": \
corpus_bleu(self.refs, self.hyps, smoothing_function=SmoothingFunction().method7)}
except ZeroDivisionError as _:
raise ZeroDivisionError("Bleu smoothing divided by zero. This is a known bug of corpus_bleu, \
usually caused when there is only one sample and the sample length is 1.")
class SelfBleuCorpusMetric(MetricBase):
'''Metric for calculating Self-BLEU.
Arguments:
gen_key (str): Sentences generated by model are passed to :func:.forward by
``data[gen_key]``. Default: ``gen``.
sample (int): Number of samples sampled from the generated sentences. Default: 1000.
'''
def __init__(self, dataloader, gen_key="gen", sample=1000):
super().__init__()
self.dataloader = dataloader
self.gen_key = gen_key
self.sample = sample
self.refs = []
self.hyps = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
Size: `[batch_size, gen_sentence_length]`.
'''
gen = data[self.gen_key]
for gen_sen in gen:
self.hyps.append(self.dataloader.trim_index(gen_sen))
def run_f(self, ele):
'''Auxiliary function which returns:
* **sentence-self-bleu**: sentence-self-bleu value.
'''
return sentence_bleu(ele[0], ele[1], smoothing_function=SmoothingFunction().method1)
def close(self):
'''Return a dict which contains:
* **self-bleu**: self-bleu value.
'''
if self.sample > len(self.hyps):
self.sample = len(self.hyps)
random.shuffle(self.hyps)
ref = self.hyps[:self.sample]
try:
bleu_irl = []
if self.sample >= 1000:
pool = Pool(multiprocessing.cpu_count())
bleu_irl = pool.map(self.run_f, [(ref[:i]+ref[i+1:self.sample], ref[i]) \
for i in range(self.sample)])
pool.close()
pool.join()
elif self.sample > 1:
for i in range(self.sample):
bleu_irl.append(self.run_f((ref[:i]+ref[i+1:], ref[i])))
return {"self-bleu" : 1.0 * sum(bleu_irl) / len(bleu_irl)}
except ZeroDivisionError as _:
raise ZeroDivisionError("Bleu smoothing divided by zero. This is a known bug of corpus_bleu, \
usually caused when there is only one sample and the sample length is 1.")
class FwBwBleuCorpusMetric(MetricBase):
'''Metric for calculating BLEU.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_test_key (str): Reference sentences with all vocabs in test data
are passed to :func:.forward by ``data[reference_test_key]``.
gen_key (str): Sentences generated by model are passed to :func:.forward by
``data[gen_key]``. Default: ``gen``.
sample (int): Number of samples sampled from the generated sentences. Default: 1000.
'''
def __init__(self, dataloader, \
reference_test_key, \
gen_key="gen", \
sample=1000):
super().__init__()
self.dataloader = dataloader
self.reference_test_key = reference_test_key
self.gen_key = gen_key
self.sample = sample
self.refs = []
self.hyps = []
resp = self.dataloader.data["test"][self.reference_test_key]
for resp_sen in resp:
self.refs.append(self.dataloader.trim_index(resp_sen[1:]))
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
Size: `[batch_size, gen_sentence_length]`.
'''
gen = data[self.gen_key]
for gen_sen in gen:
self.hyps.append(self.dataloader.trim_index(gen_sen))
def run_f(self, ele):
'''Auxiliary function which returns:
* **sentence-self-bleu**: sentence-self-bleu value.
'''
return sentence_bleu(ele[0], ele[1], ele[2], smoothing_function=SmoothingFunction().method1)
def close(self):
'''Return a dict which contains:
* **fwbwbleu**: fw/bw bleu value.
'''
max_len = max([len(self.hyps), len(self.refs)])
if self.sample > max_len:
self.sample = max_len
random.shuffle(self.hyps)
random.shuffle(self.refs)
try:
result = {}
for ngram in range(2, 5):
weight = tuple((1. / ngram for _ in range(ngram)))
if self.sample >= 1000:
pool = Pool(multiprocessing.cpu_count())
bleu_irl_fw = pool.map(self.run_f, \
[(self.refs, self.hyps[i], weight) for i in range(self.sample)])
bleu_irl_bw = pool.map(self.run_f, \
[(self.hyps, self.refs[i], weight) for i in range(self.sample)])
pool.close()
pool.join()
else:
bleu_irl_fw, bleu_irl_bw = [], []
for i in range(self.sample):
bleu_irl_fw.append(self.run_f((self.refs, self.hyps[i], weight)))
bleu_irl_bw.append(self.run_f((self.hyps, self.refs[i], weight)))
fw_bleu = (1.0 * sum(bleu_irl_fw) / len(bleu_irl_fw))
bw_bleu = (1.0 * sum(bleu_irl_bw) / len(bleu_irl_bw))
result["fw-bleu-%d"%ngram] = fw_bleu
result["bw-bleu-%d"%ngram] = bw_bleu
result["fw-bw-bleu-%d"%ngram] = 2.0 * bw_bleu * fw_bleu / (fw_bleu + bw_bleu)
return result
except ZeroDivisionError as _:
raise ZeroDivisionError("Bleu smoothing divided by zero. This is a known bug of corpus_bleu, \
usually caused when there is only one sample and the sample length is 1.")
class MultiTurnBleuCorpusMetric(MetricBase):
'''Metric for calculating multi-turn BLEU.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_allvocabs_key (str): Reference sentences with all vocabs are passed to
:func:`forward` by ``data[reference_allvocabs_key]``.
Default: ``reference_allvocabs``.
gen_key (str): Sentences generated by model are passed to :func:.forward by
``data[gen_key]``. Default: ``gen``.
turn_len_key (str): Turn length are passed to :func:.forward by
``data[turn_len_key]``. Default: ``turn_length``.
'''
def __init__(self, dataloader, reference_allvocabs_key="reference_allvocabs", \
gen_key="gen", \
turn_len_key="turn_length" \
):
super().__init__()
self.dataloader = dataloader
self.reference_allvocabs_key = reference_allvocabs_key
self.turn_len_key = turn_len_key
self.gen_key = gen_key
self.refs = []
self.hyps = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[reference_allvocabs_key] (list or :class:`numpy.array`):
Reference sentences with all vocabs.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_turn_length, max_sentence_length]`
data[gen_key] (list or :class:`numpy.array`): 3-d array of int.
Sentences generated by model.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
The 2nd / 3rd dimension can be jagged.
Size: `[batch_size, max_turn_length, gen_sentence_length]`.
data[turn_len_key] (list or :class:`numpy.array`): Length of turns in each sample.
Size: `[batch_size]`
'''
reference_allvocabs = data[self.reference_allvocabs_key]
length = data[self.turn_len_key]
gen = data[self.gen_key]
if len(length) != len(reference_allvocabs) or len(length) != len(gen):
raise ValueError("Batch num is not matched.")
for i, turn_length in enumerate(length):
gen_session = gen[i]
ref_session = reference_allvocabs[i]
for j in range(turn_length):
self.hyps.append(self.dataloader.trim_index(gen_session[j]))
self.refs.append([self.dataloader.trim_index(ref_session[j])[1:]])
def close(self):
'''Return a dict which contains:
* **bleu**: bleu value.
'''
try:
return {"bleu": \
corpus_bleu(self.refs, self.hyps, smoothing_function=SmoothingFunction().method7)}
except ZeroDivisionError as _:
raise ZeroDivisionError("Bleu smoothing divided by zero. This is a known bug of corpus_bleu, \
usually caused when there is only one sample and the sample length is 1.")
class SingleTurnDialogRecorder(MetricBase):
'''A metric-like class for recording generated sentences and references.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
post_allvocabs_key (str): Dialog post are passed to :func:`forward`
by ``data[post_allvocabs_key]``.
Default: ``post``.
resp_allvocabs_key (str): Dialog responses are passed to :func:`forward`
by ``data[resp_allvocabs_key]``.
Default: ``resp``.
gen_key (str): Sentence generated by model are passed to :func:`forward` by
``data[gen_key]``. Default: ``gen``.
'''
def __init__(self, dataloader, post_allvocabs_key="post_allvocabs", \
resp_allvocabs_key="resp_allvocabs", gen_key="gen"):
super().__init__()
self.dataloader = dataloader
self.post_allvocabs_key = post_allvocabs_key
self.resp_allvocabs_key = resp_allvocabs_key
self.gen_key = gen_key
self.post_list = []
self.resp_list = []
self.gen_list = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[post_allvocabs_key] (list or :class:`numpy.array` of `int`):
Dialog posts with all vocabs.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_sentence_length]`
data[resp_allvocabs_key] (list or :class:`numpy.array` of `int`):
Dialog responses with all vocabs.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_sentence_length]`
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
Contains end token (eg: ``<eos>``)`, but without start token (eg: ``<go>``).
Size: `[batch_size, gen_sentence_length]`.
'''
post_allvocabs = data[self.post_allvocabs_key]
resp_allvocabs = data[self.resp_allvocabs_key]
gen = data[self.gen_key]
if len(post_allvocabs) != len(resp_allvocabs) or len(resp_allvocabs) != len(gen):
raise ValueError("Batch num is not matched.")
for i, post_sen in enumerate(post_allvocabs):
self.post_list.append(self.dataloader.index_to_sen(post_sen[1:]))
self.resp_list.append(self.dataloader.index_to_sen(resp_allvocabs[i][1:]))
self.gen_list.append(self.dataloader.index_to_sen(gen[i]))
def close(self):
'''Return a dict which contains:
* **post**: a list of post sentences.
* **resp**: a list of response sentences.
* **gen**: a list of generated sentences.
'''
return {"post": self.post_list, "resp": self.resp_list, "gen": self.gen_list}
class MultiTurnDialogRecorder(MetricBase):
'''A metric-like class for recording generated sentences and references.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
context_allvocabs_key (str): Dialog context are passed to :func:`forward` by
``data[context_key]``. Default: ``context_allvocabs``.
reference_allvocabs_key (str): Dialog references with all vocabs
are passed to :func:`forward` by ``data[reference_allvocabs_key]``.
Default: ``reference_allvocabs``.
gen_key (str): Sentences generated by model are passed to :func:`forward` by
``data[gen_key]``. Default: ``gen``.
turn_len_key (str): Turn length are passed to :func:.forward by
``data[turn_len_key]``. Default: ``turn_length``.
'''
def __init__(self, dataloader, context_allvocabs_key="context_allvocabs", \
reference_allvocabs_key="reference_allvocabs", gen_key="gen", \
turn_len_key="turn_length"):
super().__init__()
self.dataloader = dataloader
self.context_allvocabs_key = context_allvocabs_key
self.reference_allvocabs_key = reference_allvocabs_key
self.gen_key = gen_key
self.turn_len_key = turn_len_key
self.context_list = []
self.reference_list = []
self.gen_list = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[context_allvocabs_key] (list or :class:`numpy.array` of `int`): Dialog post.
A 3-d padded array containing id of words.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, _turn_length, max_sentence_length]`
data[reference_allvocabs_key] (list or :class:`numpy.array` of `int`):
Dialog responses with all vocabs. A 3-d padded array containing id of words.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_turn_length, max_sentence_length]`
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
A 3-d padded array containing id of words.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
Size: `[batch_size, max_turn_length, gen_sentence_length]`.
data[turn_len_key] (list or :class:`numpy.array`): Length of turns in each sample.
Size: `[batch_size]`
'''
context_allvocabs = data[self.context_allvocabs_key]
reference_allvocabs = data[self.reference_allvocabs_key]
gen = data[self.gen_key]
turn_length = data[self.turn_len_key]
if len(gen) != len(reference_allvocabs):
raise ValueError("Batch num is not matched.")
for i, context_sen in enumerate(context_allvocabs):
self.context_list.append(self.dataloader.multi_turn_index_to_sen( \
np.array(context_sen), ignore_first_token=True))
self.reference_list.append(self.dataloader.multi_turn_index_to_sen( \
np.array(reference_allvocabs[i]), turn_length=turn_length[i], ignore_first_token=True))
self.gen_list.append(self.dataloader.multi_turn_index_to_sen( \
np.array(gen[i]), turn_length=turn_length[i]))
print(turn_length[i])
print(len(self.reference_list[-1]))
if len(self.reference_list[-1]) != len(self.gen_list[-1]):
raise ValueError("Reference turn num %d != gen turn num %d." % \
(len(self.reference_list[-1]), len(self.gen_list[-1])))
def close(self):
'''Return a dict which contains:
* **context**: a list of post sentences.
* **reference**: a list of response sentences.
* **gen**: a list of generated sentences.
'''
return {"context": self.context_list, "reference": self.reference_list, "gen": self.gen_list}
class LanguageGenerationRecorder(MetricBase):
'''A metric-like class for recorder BLEU.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
gen_key (str): Sentences generated by model are passed to :func:`forward` by
``data[gen_key]``. Default: ``gen``.
'''
def __init__(self, dataloader, gen_key="gen"):
super().__init__()
self.dataloader = dataloader
self.gen_key = gen_key
self.gen_list = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
Size: `[batch_size, gen_sentence_length]`.
'''
gen = data[self.gen_key]
for sen in gen:
self.gen_list.append(self.dataloader.index_to_sen(sen))
def close(self):
'''Return a dict which contains:
* **gen**: a list of generated sentences.
'''
return {"gen": self.gen_list}
class HashValueRecorder(MetricBase):
'''A metric-like class for recording hash value metric.
'''
def __init__(self, hash_key="hashvalue"):
super().__init__()
self._hash_key = hash_key
self.unordered_hash = None
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains hashvalue.
'''
if "hashvalue" in data:
if self.unordered_hash is None:
self.unordered_hash = UnorderedSha256()
self.unordered_hash.update_hash(data["hashvalue"])
def close(self):
'''Return a dict which contains the items which all the
metric components returned.
'''
if self.unordered_hash:
return {self._hash_key: self.unordered_hash.digest()}
else:
return {}
class MetricChain(MetricBase):
'''A metric-like class for stacked metric. You can use this class
making multiples metric combination like one.
Examples:
>>> metric = MetricChain()
>>> metric.add_metric(BleuCorpusMetric())
>>> metric.add_metric(SingleDialogRecorder(dataloader))
'''
def __init__(self):
super().__init__()
self.metric_list = []
def add_metric(self, metric):
'''Add metric for processing.
Arguments:
metric (MetricBase): a metric class
'''
if not isinstance(metric, MetricBase):
raise TypeError("Metric must be a subclass of MetricBase")
self.metric_list.append(metric)
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains keys which all the
metric components need.
'''
for metric in self.metric_list:
metric.forward(data)
def close(self):
'''Return a dict which contains the items which all the
metric components returned.
'''
ret_dict = {}
for metric in self.metric_list:
ret_dict.update(metric.close())
return ret_dict
|
nilq/baby-python
|
python
|
from re import search
from requests import get, post
from requests.exceptions import ConnectionError, MissingSchema, ReadTimeout
from sqlalchemy import Boolean, case, ForeignKey, Integer
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import true
from eNMS import app
from eNMS.database import db
from eNMS.models import models
from eNMS.models.base import AbstractBase
@db.set_custom_properties
class Task(AbstractBase):
__tablename__ = type = "task"
id = db.Column(Integer, primary_key=True)
name = db.Column(db.SmallString, unique=True)
description = db.Column(db.SmallString)
scheduling_mode = db.Column(db.SmallString, default="standard")
frequency = db.Column(Integer)
frequency_unit = db.Column(db.SmallString, default="seconds")
start_date = db.Column(db.SmallString)
end_date = db.Column(db.SmallString)
crontab_expression = db.Column(db.SmallString)
is_active = db.Column(Boolean, default=False)
initial_payload = db.Column(db.Dict)
devices = relationship(
"Device", secondary=db.task_device_table, back_populates="tasks"
)
pools = relationship("Pool", secondary=db.task_pool_table, back_populates="tasks")
service_id = db.Column(Integer, ForeignKey("service.id"))
service = relationship("Service", back_populates="tasks")
service_name = association_proxy("service", "name")
model_properties = ["next_run_time", "time_before_next_run", "status"]
def __init__(self, **kwargs):
super().update(**kwargs)
def update(self, **kwargs):
super().update(**kwargs)
if self.is_active:
self.schedule()
def delete(self):
post(f"{app.scheduler_address}/delete_job", json=self.id)
@hybrid_property
def status(self):
return "Active" if self.is_active else "Inactive"
@status.expression
def status(cls): # noqa: N805
return case([(cls.is_active, "Active")], else_="Inactive")
@classmethod
def rbac_filter(cls, query, mode, user):
public_tasks = query.join(cls.service).filter(
models["service"].public == true()
)
user_access_tasks = (
query.join(cls.service)
.join(models["access"], models["service"].access)
.join(models["user"], models["access"].users)
.filter(models["user"].name == user.name)
)
user_group_access_tasks = (
query.join(cls.service)
.join(models["access"], models["service"].access)
.join(models["group"], models["access"].groups)
.join(models["user"], models["group"].users)
.filter(models["user"].name == user.name)
)
return public_tasks.union(user_access_tasks, user_group_access_tasks)
@property
def next_run_time(self):
try:
return get(
f"{app.scheduler_address}/next_runtime/{self.id}", timeout=0.01
).json()
except (ConnectionError, MissingSchema, ReadTimeout):
return "Scheduler Unreachable"
@property
def time_before_next_run(self):
try:
return get(
f"{app.scheduler_address}/time_left/{self.id}", timeout=0.01
).json()
except (ConnectionError, MissingSchema, ReadTimeout):
return "Scheduler Unreachable"
def schedule(self, mode="schedule"):
try:
result = post(
f"{app.scheduler_address}/schedule",
json={"mode": mode, "task": self.get_properties()},
).json()
except ConnectionError:
return {"alert": "Scheduler Unreachable: the task cannot be scheduled."}
self.is_active = result.get("active", False)
return result
@db.set_custom_properties
class Event(AbstractBase):
__tablename__ = type = "event"
id = db.Column(Integer, primary_key=True)
name = db.Column(db.SmallString, unique=True)
log_source = db.Column(db.SmallString)
log_source_regex = db.Column(Boolean, default=False)
log_content = db.Column(db.SmallString)
log_content_regex = db.Column(Boolean, default=False)
service_id = db.Column(Integer, ForeignKey("service.id"))
service = relationship("Service", back_populates="events")
service_name = association_proxy("service", "name")
def match_log(self, source, content):
source_match = (
search(self.log_source, source)
if self.log_source_regex
else self.log_source in source
)
content_match = (
search(self.log_content, content)
if self.log_content_regex
else self.log_content in content
)
if source_match and content_match:
self.service.run()
|
nilq/baby-python
|
python
|
# Generated by Django 2.1 on 2020-08-04 10:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('encounterapp', '0032_auto_20200801_1914'),
('encounterapp', '0032_auto_20200801_1758'),
]
operations = [
]
|
nilq/baby-python
|
python
|
import os
#BASE_DIR = os.path.abspath('.')
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) #os.path.abspath('.')
ROUGE_DIR = os.path.join(BASE_DIR,'summariser','rouge','ROUGE-RELEASE-1.5.5/') #do not delete the '/' at the end
PROCESSED_PATH = os.path.join(BASE_DIR,'data','summaries_processed_data')
SUMMARY_DB_DIR = os.path.join(BASE_DIR,'data','sampled_summaries')
DOC_SEQUENCE_PATH = os.path.join(BASE_DIR,'summariser','utils','DocsSequence.txt')
LANGUAGE = 'english'
|
nilq/baby-python
|
python
|
""""Utilities for Diffie-Hellman key exchange."""
from __future__ import unicode_literals
import base64
import warnings
import six
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric.dh import DHParameterNumbers, DHPublicNumbers
from openid import cryptutil
from openid.constants import DEFAULT_DH_GENERATOR, DEFAULT_DH_MODULUS
from openid.oidutil import toBase64
def _xor(a_b):
# Python 2 only
a, b = a_b
return chr(ord(a) ^ ord(b))
def strxor(x, y):
if len(x) != len(y):
raise ValueError('Inputs to strxor must have the same length')
if six.PY2:
return b"".join(_xor((a, b)) for a, b in zip(x, y))
else:
assert six.PY3
return bytes((a ^ b) for a, b in zip(x, y))
class DiffieHellman(object):
"""Utility for Diffie-Hellman key exchange."""
def __init__(self, modulus, generator):
"""Create a new instance.
@type modulus: six.text_type, Union[six.integer_types] are deprecated
@type generator: six.text_type, Union[six.integer_types] are deprecated
"""
if isinstance(modulus, six.integer_types):
warnings.warn("Modulus should be passed as base64 encoded string.")
else:
modulus = cryptutil.base64ToLong(modulus)
if isinstance(generator, six.integer_types):
warnings.warn("Generator should be passed as base64 encoded string.")
else:
generator = cryptutil.base64ToLong(generator)
self.parameter_numbers = DHParameterNumbers(modulus, generator)
parameters = self.parameter_numbers.parameters(default_backend())
self.private_key = parameters.generate_private_key()
@classmethod
def fromDefaults(cls):
"""Create Diffie-Hellman with the default modulus and generator."""
return cls(DEFAULT_DH_MODULUS, DEFAULT_DH_GENERATOR)
@property
def modulus(self):
"""Return the prime modulus value.
@rtype: Union[six.integer_types]
"""
warnings.warn("Modulus property will return base64 encoded string.", DeprecationWarning)
return self.parameter_numbers.p
@property
def generator(self):
"""Return the generator value.
@rtype: Union[six.integer_types]
"""
warnings.warn("Generator property will return base64 encoded string.", DeprecationWarning)
return self.parameter_numbers.g
@property
def parameters(self):
"""Return base64 encoded modulus and generator.
@return: Tuple with modulus and generator
@rtype: Tuple[six.text_type, six.text_type]
"""
modulus = self.parameter_numbers.p
generator = self.parameter_numbers.g
return cryptutil.longToBase64(modulus), cryptutil.longToBase64(generator)
@property
def public(self):
"""Return the public key.
@rtype: Union[six.integer_types]
"""
warnings.warn("Attribute 'public' is deprecated. Use 'public_key' instead.", DeprecationWarning)
return self.private_key.public_key().public_numbers().y
@property
def public_key(self):
"""Return base64 encoded public key.
@rtype: six.text_type
"""
return cryptutil.longToBase64(self.private_key.public_key().public_numbers().y)
def usingDefaultValues(self):
return self.parameters == (DEFAULT_DH_MODULUS, DEFAULT_DH_GENERATOR)
def getSharedSecret(self, composite):
"""Return a shared secret.
@param composite: Public key of the other party.
@type composite: Union[six.integer_types]
@rtype: Union[six.integer_types]
"""
warnings.warn("Method 'getSharedSecret' is deprecated in favor of '_get_shared_secret'.", DeprecationWarning)
return cryptutil.bytes_to_int(self._get_shared_secret(composite))
def _get_shared_secret(self, public_key):
"""Return a shared secret.
@param public_key: Base64 encoded public key of the other party.
@type public_key: six.text_type
@rtype: six.binary_type
"""
public_numbers = DHPublicNumbers(cryptutil.base64ToLong(public_key), self.parameter_numbers)
return self.private_key.exchange(public_numbers.public_key(default_backend()))
def xorSecret(self, composite, secret, hash_func):
warnings.warn("Method 'xorSecret' is deprecated, use 'xor_secret' instead.", DeprecationWarning)
dh_shared = self._get_shared_secret(cryptutil.longToBase64(composite))
# The DH secret must be `btwoc` compatible.
# See http://openid.net/specs/openid-authentication-2_0.html#rfc.section.8.2.3 for details.
dh_shared = cryptutil.fix_btwoc(dh_shared)
hashed_dh_shared = hash_func(dh_shared)
return strxor(secret, hashed_dh_shared)
def xor_secret(self, public_key, secret, algorithm):
"""Return a base64 encoded XOR of a secret key and hash of a DH exchanged secret.
@param public_key: Base64 encoded public key of the other party.
@type public_key: six.text_type
@param secret: Base64 encoded secret
@type secret: six.text_type
@type algorithm: hashes.HashAlgorithm
@rtype: six.text_type
"""
dh_shared = self._get_shared_secret(public_key)
# The DH secret must be `btwoc` compatible.
# See http://openid.net/specs/openid-authentication-2_0.html#rfc.section.8.2.3 for details.
dh_shared = cryptutil.fix_btwoc(dh_shared)
digest = hashes.Hash(algorithm, backend=default_backend())
digest.update(dh_shared)
hashed_dh_shared = digest.finalize()
return toBase64(strxor(base64.b64decode(secret), hashed_dh_shared))
|
nilq/baby-python
|
python
|
import imp
import astropy.units as un
import astropy.coordinates as coord
import matplotlib.pyplot as plt
import gala.coordinates as gal_coord
from astropy.table import Table
from vector_plane_calculations import *
from velocity_transformations import *
imp.load_source('helper', '../tSNE_test/helper_functions.py')
from helper import move_to_dir
imp.load_source('gal_move', '../tSNE_test/convert_gal_movement.py')
from gal_move import gal_uvw
imp.load_source('veltrans', '../tSNE_test/velocity_transform.py')
from veltrans import *
# --------------------------------------------------------
# ---------------- FUNCTIONS -----------------------------
# --------------------------------------------------------
def _prepare_hist_data(d, bins, range, norm=True):
heights, edges = np.histogram(d, bins=bins, range=range)
width = np.abs(edges[0] - edges[1])
if norm:
heights = 1.*heights / np.max(heights)
return edges[:-1], heights, width
def _get_range(data, perc_cut=2.):
return (np.nanpercentile(data, perc_cut), np.nanpercentile(data, 100-perc_cut))
# return (np.nanmin(data), np.nanmax(data))
def plot_hist(obs, obs_f, galx, galx_f, path=None, title='', hist_bins = 100):
hist_range = _get_range(obs[obs_f])
# zgal_range = _get_range(galaxia_sub['pz'])
plt.title(title)
h_edg, h_hei, h_wid = _prepare_hist_data(obs[obs_f], hist_bins, hist_range, norm=True)
plt.bar(h_edg, h_hei, width=h_wid, color='green', alpha=0.2)
h_edg, h_hei, h_wid = _prepare_hist_data(galx[galx_f], hist_bins, hist_range, norm=True)
plt.bar(h_edg, h_hei, width=h_wid, color='blue', alpha=0.2)
plt.show()
plt.close()
# --------------------------------------------------------
# ---------------- CONSTANTS AND SETTINGS ----------------
# --------------------------------------------------------
# GALAH
# simulation_dir = '/home/klemen/GALAH_data/Galaxia_simulation/GALAH/'
# simulation_ebf = 'galaxy_galah_complete.ebf'
# simulation_ebf = 'galaxy_galah_fields.ebf'
# RAVE
simulation_dir = '/home/klemen/GALAH_data/Galaxia_simulation/RAVE/'
simulation_ebf = 'galaxy_rave_complete.ebf'
simulation_fits = simulation_ebf.split('.')[0]+'.fits'
obs_file_fits = 'RAVE_GALAH_TGAS_stack.fits'
# analysis constants
l_center = 310.
b_center = -70.
r_center = 10.
# --------------------------------------------------------
# ---------------- INPUT DATA HANDLING -------------------
# --------------------------------------------------------
print 'Reading data'
glaxia_data = Table.read(simulation_dir + simulation_fits)
obs_data = Table.read(obs_file_fits)
obs_data = obs_data.filled()
# compute observation galactic coordinates
l_b_obs = coord.ICRS(ra=obs_data['ra_gaia']*un.deg, dec=obs_data['dec_gaia']*un.deg).transform_to(coord.Galactic)
obs_data['l'] = l_b_obs.l.value
obs_data['b'] = l_b_obs.b.value
# create a subset of data
lb_center = coord.Galactic(l=l_center*un.deg, b=b_center*un.deg)
xyz_vel_stream = compute_xyz_vel(np.deg2rad(lb_center.l.value), np.deg2rad(lb_center.b.value), 10)
galaxia_sub = glaxia_data[coord.Galactic(l=glaxia_data['glon']*un.deg, b=glaxia_data['glat']*un.deg).separation(lb_center) < r_center*un.deg]
obs_sub = obs_data[coord.Galactic(l=obs_data['l']*un.deg, b=obs_data['b']*un.deg).separation(lb_center) < r_center*un.deg]
print 'Galaxia stars: '+str(len(galaxia_sub))
print 'Observation stars: '+str(len(obs_sub))
galaxia_sub['px'] *= 1e3 # kpc to pc conversion
galaxia_sub['py'] *= 1e3
galaxia_sub['pz'] *= 1e3
# galaxia_sub['vx'] *= -1. # it has different orientation than our coordinate system
# compute galactic velocities and positions for the obs stars
obs_gal_coord = coord.Galactic(l=obs_sub['l']*un.deg, b=obs_sub['b']*un.deg, distance=1e3/obs_sub['parallax'].data*un.pc)
obs_gal_xyz = obs_gal_coord.cartesian
obs_sub['x_gal'] = obs_gal_xyz.x.value
obs_sub['y_gal'] = obs_gal_xyz.y.value
obs_sub['z_gal'] = obs_gal_xyz.z.value
plot_hist(obs_sub, 'x_gal', galaxia_sub, 'px', path=None, title='')
plot_hist(obs_sub, 'y_gal', galaxia_sub, 'py', path=None, title='')
plot_hist(obs_sub, 'z_gal', galaxia_sub, 'pz', path=None, title='')
# convert velocities from ra/de/pmra/pmdec to more consisten units
u_gal, v_gal, w_gal = gal_uvw(obs_sub['ra_gaia'], obs_sub['dec_gaia'], obs_sub['pmra'], obs_sub['pmdec'], obs_sub['RV'],
plx=obs_sub['parallax'])
obs_sub['u_gal'] = u_gal * -1.
obs_sub['v_gal'] = v_gal
obs_sub['w_gal'] = w_gal
ra_dec_pm = np.vstack((obs_sub['pmra'], obs_sub['pmdec'])) * un.mas/un.yr
l_b_pm = gal_coord.pm_icrs_to_gal(coord.ICRS(ra=obs_sub['ra_gaia']*un.deg, dec=obs_sub['dec_gaia']*un.deg), ra_dec_pm)
obs_sub['pml'] = l_b_pm[0].value
obs_sub['pmb'] = l_b_pm[1].value
xyz_vel = motion_to_cartesic(np.array(obs_sub['l']), np.array(obs_sub['b']),
np.array(obs_sub['pml']), np.array(obs_sub['pmb']),
np.array(obs_sub['RV']), plx=np.array(obs_sub['parallax']))
obs_sub['vx_gal'] = xyz_vel[0]
obs_sub['vy_gal'] = xyz_vel[1]
obs_sub['vz_gal'] = xyz_vel[2]
# plot_hist(obs_sub, 'u_gal', obs_sub, 'vx_gal', path=None, title='')
# plot_hist(obs_sub, 'v_gal', obs_sub, 'vy_gal', path=None, title='')
# plot_hist(obs_sub, 'w_gal', obs_sub, 'vz_gal', path=None, title='')
plot_hist(obs_sub, 'u_gal', galaxia_sub, 'vx', path=None, title='')
plot_hist(obs_sub, 'v_gal', galaxia_sub, 'vy', path=None, title='')
plot_hist(obs_sub, 'w_gal', galaxia_sub, 'vz', path=None, title='')
xyz_pos_stars = np.vstack((obs_sub['x_gal'],obs_sub['y_gal'],obs_sub['z_gal'])).T
xyz_vel_stars = np.vstack((obs_sub['u_gal'],obs_sub['v_gal'],obs_sub['w_gal'])).T
print xyz_pos_stars
print xyz_vel_stars
print xyz_vel_stream
obs_plane_intersects_3D = stream_plane_vector_intersect(xyz_pos_stars, xyz_vel_stars, xyz_vel_stream)
obs_plane_intersects_2D = intersects_to_2dplane(obs_plane_intersects_3D, xyz_vel_stream)
xyz_pos_stars = np.vstack((galaxia_sub['px'],galaxia_sub['py'],galaxia_sub['pz'])).T
xyz_vel_stars = np.vstack((galaxia_sub['vx'],galaxia_sub['vy'],galaxia_sub['vz'])).T
galaxia_plane_intersects_3D = stream_plane_vector_intersect(xyz_pos_stars, xyz_vel_stars, xyz_vel_stream)
galaxia_plane_intersects_2D = intersects_to_2dplane(galaxia_plane_intersects_3D, xyz_vel_stream)
plot_lim = (-1000, 1000)
# Create a plot
fig, ax = plt.subplots(1, 1)
ax.scatter(obs_plane_intersects_2D[:, 0], obs_plane_intersects_2D[:, 1], lw=0, c='red', s=2, alpha=1.)
ax.scatter(galaxia_plane_intersects_2D[:, 0], galaxia_plane_intersects_2D[:, 1], lw=0, c='blue', s=2, alpha=1.)
ax.scatter(0, 0, lw=0, c='black', s=10, marker='*') # solar position
ax.set(xlabel='X stream plane', ylabel='Y stream plane', xlim=plot_lim, ylim=plot_lim)
fig.tight_layout()
plt.show()
plt.close()
|
nilq/baby-python
|
python
|
#import needed packages
import os, json, sys
#creates functions global
commands = {}
#import modules
modules = os.listdir(path='modules')
print('Importing modules')
count_mod = 0
count_ok_mod = 0
for module in modules:
try:
with open('modules/' + module + '/index.json') as read_modules:
mod_data = json.load(read_modules)
print('Importing ' + module + '... OK')
for com in mod_data['functions']:
commands[com] = module + '/' + mod_data['functions'][com]
count_ok_mod += 1
except:
print('Importing ' + module + '... ERROR')
count_mod += 1
print("{} of {} modules loaded".format(count_ok_mod, count_mod))
|
nilq/baby-python
|
python
|
from django.urls import path, include
from management import views
from rest_framework_simplejwt import views as jwt_views
urlpatterns = [
# Used to signup as a teacher or a student
path('signup/', views.SignUpView.as_view(), name = 'signup'),
# Used to obtain refresh and access token
path('login/access/', views.MyTokenObtainPairView.as_view(), name = 'access-token'),
# Used to obtain access token from refresh token
path('login/refresh/', jwt_views.TokenRefreshView.as_view(), name='token-refresh'),
# Used to reset password if forgotten
path('login/changepassword/', views.ChangePasswordView.as_view(), name='reset-password')
]
|
nilq/baby-python
|
python
|
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso, Ridge
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
model = lambda x: (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
# sample n_samples in uniform distribution between [-1.2,2]
x = np.array(np.linspace(-1.2, 2, n_samples))
clean_y = model(x)
noise_data = np.random.normal(loc=0, scale=noise, size=len(clean_y))
dirty_y = clean_y + noise_data
x.flatten()
# split into training and testing portions (2/3 for training, 1/3 for testing)
test_x_clean, test_y_clean, train_x_clean, train_y_clean = Q_1_plot_data(clean_y, dirty_y, x, noise)
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
train_errors_clean = validate_errors_clean = []
if noise == 0:
train_errors_clean, validate_errors_clean = Q_2_poly_over_clean(train_x_clean, train_y_clean)
# split into training and testing portions (2/3 for training, 1/3 for testing)
test_x_dirty, test_y_dirty, train_errors_dirty, train_x_dirty, train_y_dirty, validate_errors_dirty = Q_2_poly_over_dirty(
dirty_y, x, noise)
# best degree is
test_results_over_best_fit(test_x_clean, test_x_dirty, test_y_clean, test_y_dirty, train_errors_clean,
train_errors_dirty, train_x_clean, train_x_dirty, train_y_clean, train_y_dirty,
validate_errors_clean, validate_errors_dirty, noise)
def Practical_part_1():
select_polynomial_degree()
print()
select_polynomial_degree(100, 0)
print()
select_polynomial_degree(1500, 10)
def test_results_over_best_fit(test_x_clean, test_x_dirty, test_y_clean, test_y_dirty, train_errors_clean,
train_errors_dirty, train_x_clean, train_x_dirty, train_y_clean, train_y_dirty,
validate_errors_clean, validate_errors_dirty, noise=5):
print(train_errors_dirty)
print(validate_errors_dirty)
best_degree_dirty = np.argmin(validate_errors_dirty)
best_degree_clean = 0
if validate_errors_clean or noise == 0:
best_degree_clean = np.argmin(validate_errors_clean)
print(f"Best degree for dirty noise: {noise} data is {best_degree_dirty}, {best_degree_clean} for clean data")
# fit a polynoimal model with the best degree and plot the mean square error results
poly_dirty = PolynomialFitting(best_degree_dirty)
poly_dirty.fit(train_x_dirty, train_y_dirty)
# predict the test data
test_y_pred_dirty = poly_dirty.predict(test_x_dirty)
# present the error results
print(f"Mean square error for dirty noise: {noise} data is {mean_square_error(test_y_pred_dirty, test_y_dirty)} noise level is {noise}")
print(f"Mean square error for training data: {noise} data is {mean_square_error(poly_dirty.predict(train_x_dirty), train_y_dirty)} noise level is {noise}")
def Q_2_poly_over_dirty(dirty_y, x, noise=5):
train_x_dirty, train_y_dirty, test_x_dirty, test_y_dirty = split_train_test(x, dirty_y, 0.667)
train_x_dirty = train_x_dirty.flatten()
test_x_dirty = test_x_dirty.flatten()
train_errors_dirty = []
validate_errors_dirty = []
for degree in range(0, 10):
# Create a polynomial fitting object
train_error, validate_error = cross_validate(PolynomialFitting(degree), train_x_dirty, train_y_dirty,
mean_square_error, cv=5)
train_errors_dirty.append(train_error)
validate_errors_dirty.append(validate_error)
# plot the training and validation errors
fig = go.Figure()
fig.add_trace(go.Scatter(x=np.arange(0, 10), y=train_errors_dirty, mode='lines+markers', name='Training error'))
fig.add_trace(
go.Scatter(x=np.arange(0, 10), y=validate_errors_dirty, mode='lines+markers', name='Validation error'))
# add axes titles and graph title
fig.update_layout(title_text=f'Polynomial fitting over dirty data with noise level {noise}', xaxis_title_text='Degree',
yaxis_title_text='Error')
fig.show()
return test_x_dirty, test_y_dirty, train_errors_dirty, train_x_dirty, train_y_dirty, validate_errors_dirty
def Q_2_poly_over_clean(train_x_clean, train_y_clean):
train_errors_clean = []
validate_errors_clean = []
for degree in range(0, 10):
# Create a polynomial fitting object
train_error, validate_error = cross_validate(PolynomialFitting(degree), train_x_clean, train_y_clean,
mean_square_error, cv=5)
train_errors_clean.append(train_error)
validate_errors_clean.append(validate_error)
# plot the training and validation errors
fig = go.Figure()
fig.add_trace(go.Scatter(x=np.arange(0, 10), y=train_errors_clean, mode='lines+markers', name='Training error'))
fig.add_trace(
go.Scatter(x=np.arange(0, 10), y=validate_errors_clean, mode='lines+markers', name='Validation error'))
# add axes titles and graph title
fig.update_layout(title_text='Polynomial fitting over clean data', xaxis_title_text='Degree',
yaxis_title_text='Error')
fig.show()
return train_errors_clean, validate_errors_clean
def Q_1_plot_data(clean_y, dirty_y ,x, noise=0):
train_x_clean, train_y_clean, test_x_clean, test_y_clean = split_train_test(x, clean_y, 0.667)
train_x_dirty, train_y_dirty, test_x_dirty, test_y_dirty = split_train_test(x, dirty_y, 0.667)
train_x_clean = train_x_clean.flatten()
test_x_clean = test_x_clean.flatten()
train_x_dirty = train_x_dirty.flatten()
test_x_dirty = test_x_dirty.flatten()
x.flatten()
fig = go.Figure()
fig.add_trace(
go.Scatter(x=train_x_dirty, y=train_y_dirty, mode='markers', name='Training data', marker_color='blue'))
fig.add_trace(go.Scatter(x=test_x_dirty, y=test_y_dirty, mode='markers', name='Test data', marker_color='red'))
fig.add_trace(go.Scatter(x=x, y=clean_y, mode='markers', name='Training data', marker_color='green'))
fig.update_layout(title=f'Training and test data with noise level of {noise}', xaxis_title='x', yaxis_title='y')
fig.show()
return test_x_clean, test_y_clean, train_x_clean, train_y_clean
def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X_test, X_train, l1_ratios, y_test, y_train = load_data(n_evaluations, n_samples)
# use cross_validate to evaluate the performance of Ridge and Lasso regression for each regularization parameter
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
train_errors_lasso, train_errors_ridge, validate_errors_lasso, validate_errors_ridge = hyper_parameters_eval(
X_train, l1_ratios, y_train)
# Question 8 - Plot the training and validation errors for Ridge and Lasso regressions for each regularization parameter
plot_lasso_ridge_errors(l1_ratios, train_errors_lasso, train_errors_ridge, validate_errors_lasso,
validate_errors_ridge)
compare_regression_with_best_lambda(X_test, X_train, l1_ratios, validate_errors_lasso, validate_errors_ridge,
y_test, y_train)
def compare_regression_with_best_lambda(X_test, X_train, l1_ratios, validate_errors_lasso, validate_errors_ridge,
y_test, y_train):
print(f'Best lambda for Ridge: {l1_ratios[np.argmin(validate_errors_ridge)]}')
print(f'Best lambda for Lasso: {l1_ratios[np.argmin(validate_errors_lasso)]}')
# fit the model with the best regularization parameter over the training data for both Ridge and Lasso and Linear Regression
ridge_model = Ridge(alpha=l1_ratios[np.argmin(validate_errors_ridge)])
ridge_model.fit(X_train, y_train)
lasso_model = Lasso(alpha=l1_ratios[np.argmin(validate_errors_lasso)])
lasso_model.fit(X_train, y_train)
linear_model = LinearRegression()
linear_model.fit(X_train, y_train)
# print the mean squared error for the Ridge, Lasso and Linear Regression models
print(f'Mean squared error for Ridge: {mean_square_error(y_test, ridge_model.predict(X_test))}')
print(f'Mean squared error for Lasso: {mean_square_error(y_test, lasso_model.predict(X_test))}')
print(f'Mean squared error for Linear Regression: {mean_square_error(y_test, linear_model.predict(X_test))}')
def plot_lasso_ridge_errors(l1_ratios, train_errors_lasso, train_errors_ridge, validate_errors_lasso,
validate_errors_ridge):
fig = go.Figure()
fig.add_trace(go.Scatter(x=l1_ratios, y=train_errors_ridge, mode='lines+markers', name='Training error - Ridge'))
fig.add_trace(
go.Scatter(x=l1_ratios, y=validate_errors_ridge, mode='lines+markers', name='Validation error - Ridge'))
fig.add_trace(go.Scatter(x=l1_ratios, y=train_errors_lasso, mode='lines+markers', name='Training error - Lasso'))
fig.add_trace(
go.Scatter(x=l1_ratios, y=validate_errors_lasso, mode='lines+markers', name='Validation error- Lasso'))
# add axes titles and graph title
fig.update_layout(title_text='Training and validation errors for Ridge and Lasso regressions',
xaxis_title_text='Lambda',
yaxis_title_text='Error')
fig.show()
def hyper_parameters_eval(X_train, l1_ratios, y_train):
train_errors_ridge, validate_errors_ridge = [], []
train_errors_lasso, validate_errors_lasso = [], []
for lam in l1_ratios:
# Question 6 - Use cross_validate to evaluate the performance of Ridge and Lasso regression for each regularization parameter
ridge_model = RidgeRegression(lam=lam, include_intercept=True)
train_error_ridge, validate_error_ridge = cross_validate(ridge_model, X_train, y_train, mean_square_error, cv=5)
train_errors_ridge.append(train_error_ridge)
validate_errors_ridge.append(validate_error_ridge)
lasso_model = Lasso(alpha=lam)
train_error_lasso, validate_error_lasso = cross_validate(lasso_model, X_train, y_train, mean_square_error, cv=5)
train_errors_lasso.append(train_error_lasso)
validate_errors_lasso.append(validate_error_lasso)
return train_errors_lasso, train_errors_ridge, validate_errors_lasso, validate_errors_ridge
def load_data(n_evaluations, n_samples):
X, y = datasets.load_diabetes(return_X_y=True)
# choose the first n_samples samples as training data
X_train, y_train = X[:n_samples], y[:n_samples]
# choose the remaining samples as testing data
X_test, y_test = X[n_samples:], y[n_samples:]
# Question 6 - Create a list of regularization parameter values to evaluate
l1_ratios = np.linspace(0.001, 1, 500)
return X_test, X_train, l1_ratios, y_test, y_train
if __name__ == '__main__':
np.random.seed(0)
Practical_part_1()
select_regularization_parameter()
|
nilq/baby-python
|
python
|
import chardet
import codecs
def WriteFile(filePath, lines, encoding="utf-8"):
with codecs.open(filePath, "w", encoding) as f:
actionR = '' #定位到[Events]区域的标记
for sline in lines:
if '[Events]' in sline:
actionR = 'ok'
f.write(sline)
continue
if actionR == 'ok':
f.write(sline.replace(
'Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding',\
'Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text'))
actionR = ''
print("├ 已为Emby改善字幕兼容性")
else:
f.write(sline)
def CONV_UTF8(src, dst):
# 检测编码,coding可能检测不到编码,有异常
f = open(src, "rb")
coding = chardet.detect(f.read())["encoding"]
f.close()
# if coding != "utf-8":
with codecs.open(src, "r", coding) as f:
try:
WriteFile(dst, f.readlines(), encoding="utf-8")
except Exception:
print(src + " " + coding + " read error")
if __name__ == "__main__":
filename="the.walking.dead.s10e05.1080p.web.h264-xlf.zh.ass"
CONV_UTF8(filename,filename)
|
nilq/baby-python
|
python
|
from screenplay import Action, Actor
from screenplay.actions import fail_with_message
class _if_nothing_is_found_fail_with_message(Action):
def __init__(self, action: Action, fail_actions: list, message: str):
super().__init__()
self.action = action
self.fail_actions = fail_actions
self.message = message
def perform_as(self, actor: Actor):
value = actor.attempts_to(
self.action
)
if value is None:
actor.attempts_to(
*self.fail_actions,
fail_with_message(self.message)
)
return value
def _create_empty_additional_actions():
return []
class find_base_action(Action):
create_fail_actions_callback = _create_empty_additional_actions
def if_nothing_is_found_fail_with_message(self, message: str):
return _if_nothing_is_found_fail_with_message(self, find_base_action.create_fail_actions_callback(), message)
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_serialization import jsonutils
from senlin.objects.requests import policies
from senlin.tests.unit.common import base as test_base
class TestPolicyList(test_base.SenlinTestCase):
def test_policy_list_request_body_full(self):
params = {
'name': ['policy1'],
'type': ['senlin.policy.scaling-1.0'],
'limit': 2,
'marker': 'd6901ce0-1403-4b9c-abf5-25c59cf79823',
'sort': 'name:asc',
'project_safe': False
}
sot = policies.PolicyListRequest(**params)
self.assertEqual(['policy1'], sot.name)
self.assertEqual(['senlin.policy.scaling-1.0'], sot.type)
self.assertEqual(2, sot.limit)
self.assertEqual('d6901ce0-1403-4b9c-abf5-25c59cf79823', sot.marker)
self.assertEqual('name:asc', sot.sort)
self.assertFalse(sot.project_safe)
class TestPolicyCreate(test_base.SenlinTestCase):
spec = {
"properties": {
"adjustment": {
"min_step": 1,
"number": 1,
"type": "CHANGE_IN_CAPACITY"
},
"event": "CLUSTER_SCALE_IN"
},
"type": "senlin.policy.scaling",
"version": "1.0"
}
def test_policy_create_body(self):
spec = copy.deepcopy(self.spec)
sot = policies.PolicyCreateRequestBody(name='foo', spec=spec)
self.assertEqual('foo', sot.name)
self.assertEqual('senlin.policy.scaling', sot.spec['type'])
self.assertEqual('1.0', sot.spec['version'])
def test_policy_create_request(self):
spec = copy.deepcopy(self.spec)
policy = policies.PolicyCreateRequestBody(name='foo', spec=spec)
sot = policies.PolicyCreateRequest(policy=policy)
self.assertIsInstance(sot.policy, policies.PolicyCreateRequestBody)
def test_request_body_to_primitive(self):
spec = copy.deepcopy(self.spec)
sot = policies.PolicyCreateRequestBody(name='foo', spec=spec)
self.assertEqual('foo', sot.name)
res = sot.obj_to_primitive()
# request body
self.assertEqual('PolicyCreateRequestBody', res['senlin_object.name'])
self.assertEqual('1.0', res['senlin_object.version'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertIn('name', res['senlin_object.changes'])
self.assertIn('spec', res['senlin_object.changes'])
# spec
data = res['senlin_object.data']
self.assertEqual(u'foo', data['name'])
spec_data = jsonutils.loads(data['spec'])
self.assertEqual('senlin.policy.scaling', spec_data['type'])
self.assertEqual('1.0', spec_data['version'])
def test_request_to_primitive(self):
spec = copy.deepcopy(self.spec)
body = policies.PolicyCreateRequestBody(name='foo', spec=spec)
sot = policies.PolicyCreateRequest(policy=body)
self.assertIsInstance(sot.policy, policies.PolicyCreateRequestBody)
self.assertEqual('foo', sot.policy.name)
res = sot.obj_to_primitive()
self.assertIn('policy', res['senlin_object.changes'])
self.assertEqual('PolicyCreateRequest', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
data = res['senlin_object.data']['policy']
self.assertEqual('PolicyCreateRequestBody', data['senlin_object.name'])
self.assertEqual('senlin', data['senlin_object.namespace'])
self.assertEqual('1.0', data['senlin_object.version'])
self.assertIn('name', data['senlin_object.changes'])
self.assertIn('spec', data['senlin_object.changes'])
pd = data['senlin_object.data']
self.assertEqual(u'foo', pd['name'])
spec_data = jsonutils.loads(pd['spec'])
self.assertEqual('senlin.policy.scaling', spec_data['type'])
self.assertEqual('1.0', spec_data['version'])
class TestPolicyGet(test_base.SenlinTestCase):
def test_policy_get(self):
sot = policies.PolicyGetRequest(identity='foo')
self.assertEqual('foo', sot.identity)
class TestPolicyUpdate(test_base.SenlinTestCase):
def test_policy_update_body(self):
data = {'name': 'foo'}
sot = policies.PolicyUpdateRequestBody(**data)
self.assertEqual('foo', sot.name)
def test_policy_update(self):
data = {'name': 'foo'}
body = policies.PolicyUpdateRequestBody(**data)
request = {
'identity': 'pid',
'policy': body
}
sot = policies.PolicyUpdateRequest(**request)
self.assertEqual('pid', sot.identity)
self.assertIsInstance(sot.policy, policies.PolicyUpdateRequestBody)
def test_policy_data_to_primitive(self):
data = {'name': 'foo'}
sot = policies.PolicyUpdateRequestBody(**data)
res = sot.obj_to_primitive()
self.assertIn('name', res['senlin_object.changes'])
self.assertEqual(u'foo', res['senlin_object.data']['name'])
self.assertEqual('PolicyUpdateRequestBody', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
def test_request_to_primitive(self):
data = {'name': 'foo'}
name = policies.PolicyUpdateRequestBody(**data)
request = {
'identity': 'pid',
'name': name
}
sot = policies.PolicyUpdateRequest(**request)
res = sot.obj_to_primitive()
self.assertIn('identity', res['senlin_object.changes'])
self.assertEqual(u'pid', res['senlin_object.data']['identity'])
self.assertEqual('PolicyUpdateRequest', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
class TestPolicyValidate(test_base.SenlinTestCase):
spec = {
"properties": {
"adjustment": {
"min_step": 1,
"number": 1,
"type": "CHANGE_IN_CAPACITY"
},
"event": "CLUSTER_SCALE_IN"
},
"type": "senlin.policy.scaling",
"version": "1.0"
}
def test_validate_request_body(self):
spec = copy.deepcopy(self.spec)
body = policies.PolicyValidateRequestBody(spec=spec)
self.assertEqual(spec['type'], body.spec['type'])
self.assertEqual(spec['version'], body.spec['version'])
def test_validate_request(self):
spec = copy.deepcopy(self.spec)
body = policies.PolicyValidateRequestBody(spec=spec)
policy = policies.PolicyValidateRequest(policy=body)
self.assertIsInstance(
policy.policy, policies.PolicyValidateRequestBody)
def test_request_body_to_primitive(self):
spec = copy.deepcopy(self.spec)
sot = policies.PolicyValidateRequestBody(spec=spec)
res = sot.obj_to_primitive()
self.assertIn('spec', res['senlin_object.changes'])
self.assertEqual(
'PolicyValidateRequestBody', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
pd = res['senlin_object.data']['spec']
data = jsonutils.loads(pd)
self.assertEqual('senlin.policy.scaling', data['type'])
self.assertEqual('1.0', data['version'])
def test_request_to_primitive(self):
spec = copy.deepcopy(self.spec)
body = policies.PolicyValidateRequestBody(spec=spec)
policy = policies.PolicyValidateRequest(policy=body)
res = policy.obj_to_primitive()
self.assertIn('policy', res['senlin_object.changes'])
self.assertEqual('PolicyValidateRequest', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
body = res['senlin_object.data']['policy']
self.assertIn('spec', body['senlin_object.changes'])
self.assertEqual(
'PolicyValidateRequestBody', body['senlin_object.name'])
self.assertEqual('senlin', body['senlin_object.namespace'])
self.assertEqual('1.0', body['senlin_object.version'])
pd = body['senlin_object.data']['spec']
data = jsonutils.loads(pd)
self.assertEqual('senlin.policy.scaling', data['type'])
self.assertEqual('1.0', data['version'])
class TestPolicyDelete(test_base.SenlinTestCase):
def test_policy_delete(self):
sot = policies.PolicyDeleteRequest(identity='foo')
self.assertEqual('foo', sot.identity)
|
nilq/baby-python
|
python
|
from .base import *
BOOST_PER_SECOND = 80 * 1 / .93 # boost used per second out of 255
REPLICATED_PICKUP_KEY = 'TAGame.VehiclePickup_TA:ReplicatedPickupData'
REPLICATED_PICKUP_KEY_168 = 'TAGame.VehiclePickup_TA:NewReplicatedPickupData'
def get_boost_actor_data(actor: dict):
if REPLICATED_PICKUP_KEY in actor:
actor = actor[REPLICATED_PICKUP_KEY]
if actor is not None and actor != -1:
actor = actor['pickup']
if actor is not None and 'instigator_id' in actor and actor["instigator_id"] != -1:
return actor
elif REPLICATED_PICKUP_KEY_168 in actor:
actor = actor[REPLICATED_PICKUP_KEY_168]
if actor is not None and actor != -1:
actor = actor['pickup_new']
if actor is not None and 'instigator_id' in actor and actor["instigator_id"] != -1:
return actor
return None
class BoostHandler(BaseActorHandler):
type_name = 'Archetypes.CarComponents.CarComponent_Boost'
def update(self, actor: dict, frame_number: int, time: float, delta: float) -> None:
car_actor_id = actor.get('TAGame.CarComponent_TA:Vehicle', None)
if car_actor_id is None or car_actor_id not in self.parser.current_car_ids_to_collect:
return
player_actor_id = self.parser.car_player_ids[car_actor_id]
boost_is_active_random_int = actor.get(
COMPONENT_ACTIVE_KEY,
actor.get(COMPONENT_REPLICATED_ACTIVE_KEY, False))
# boost_is_active when random_int is odd?!
boost_is_active = (boost_is_active_random_int % 2 == 1)
if boost_is_active:
# manually decrease car boost amount (not shown in replay)
# i assume game calculates the decrease itself similarly
boost_amount = max(0, actor.get('TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount',
0) - delta * BOOST_PER_SECOND)
actor['TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount'] = boost_amount
else:
boost_amount = actor.get('TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount', None)
self.parser.player_data[player_actor_id][frame_number]['boost'] = boost_amount
self.parser.player_data[player_actor_id][frame_number]['boost_active'] = boost_is_active
class BoostPickupHandler(BaseActorHandler):
@classmethod
def can_handle(cls, actor: dict) -> bool:
return actor['ClassName'] == 'TAGame.VehiclePickup_Boost_TA'
def update(self, actor: dict, frame_number: int, time: float, delta: float) -> None:
boost_actor = get_boost_actor_data(actor)
if boost_actor is not None:
car_actor_id = boost_actor['instigator_id']
if car_actor_id in self.parser.car_player_ids:
player_actor_id = self.parser.car_player_ids[car_actor_id]
if frame_number in self.parser.player_data[player_actor_id]:
actor = self.parser.player_data[player_actor_id]
frame_number_look_back = frame_number - 1
previous_boost_data = None
while frame_number_look_back >= 0:
try:
previous_boost_data = actor[frame_number_look_back]['boost']
except KeyError:
previous_boost_data = None
if previous_boost_data is not None:
break
frame_number_look_back -= 1
try:
current_boost_data = actor[frame_number]['boost']
except KeyError:
current_boost_data = None
# Ignore any phantom boosts
if (previous_boost_data is not None and current_boost_data is not None and
(255 > previous_boost_data < current_boost_data)):
actor[frame_number]['boost_collect'] = True
# set to false after acknowledging it's turned True
# it does not turn back false immediately although boost is only collected once.
# using actor_id!=-1
boost_actor["instigator_id"] = -1
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Unit-e Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import sha256
from test_framework.regtest_mnemonics import regtest_mnemonics
from test_framework.script import CScript, OP_2, hash160
from test_framework.test_framework import UnitETestFramework, STAKE_SPLIT_THRESHOLD
from test_framework.util import assert_equal, assert_greater_than, bytes_to_hex_str, hex_str_to_bytes, wait_until
def stake_p2wsh(node, staking_node, amount):
"""
Send funds to witness v2 remote staking output.
Args:
node: the node which will be able to spend funds
staking_node: the node which will be able to stake nodes
amount: the amount to send
"""
multisig = node.addmultisigaddress(2, [node.getnewaddress(), node.getnewaddress()])
bare = CScript(hex_str_to_bytes(multisig['redeemScript']))
spending_script_hash = sha256(bare)
addr_info = staking_node.validateaddress(staking_node.getnewaddress('', 'legacy'))
staking_key_hash = hash160(hex_str_to_bytes(addr_info['pubkey']))
rs_p2wsh = CScript([OP_2, staking_key_hash, spending_script_hash])
outputs = [{'address': 'script', 'amount': amount, 'script': bytes_to_hex_str(rs_p2wsh)}]
node.sendtypeto('unite', 'unite', outputs)
class RemoteStakingTest(UnitETestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args=[
[],
['-minimumchainwork=0', '-maxtipage=1000000000']
]
def run_test(self):
alice, bob = self.nodes
alice.importmasterkey(regtest_mnemonics[0]['mnemonics'])
alice.generate(1)
assert_equal(len(alice.listunspent()), regtest_mnemonics[0]['balance'] / STAKE_SPLIT_THRESHOLD)
alices_addr = alice.getnewaddress()
# 'legacy': we need the PK hash, not a script hash
bobs_addr = bob.getnewaddress('', 'legacy')
# Estimate staking fee
recipient = {"address": bobs_addr, "amount": 1}
result = alice.stakeat(recipient, True)
assert_greater_than(0.001, result['fee'])
ps = bob.proposerstatus()
assert_equal(ps['wallets'][0]['stakeable_balance'], 0)
# Stake the funds
result = alice.stakeat(recipient)
stake_p2wsh(alice, staking_node=bob, amount=1)
alice.generatetoaddress(1, alices_addr)
self.sync_all()
wi = alice.getwalletinfo()
assert_equal(wi['remote_staking_balance'], 2)
def bob_is_staking_the_new_coin():
ps = bob.proposerstatus()
return ps['wallets'][0]['stakeable_balance'] == 2
wait_until(bob_is_staking_the_new_coin, timeout=10)
# Change outputs for both staked coins, and the balance staked remotely
assert_equal(len(alice.listunspent()), 2 + (regtest_mnemonics[0]['balance'] // STAKE_SPLIT_THRESHOLD))
if __name__ == '__main__':
RemoteStakingTest().main()
|
nilq/baby-python
|
python
|
from sanic import Sanic
from sanic.blueprints import Blueprint
from sanic.response import stream, text
from sanic.views import HTTPMethodView
from sanic.views import stream as stream_decorator
bp = Blueprint("bp_example")
app = Sanic("Example")
class SimpleView(HTTPMethodView):
@stream_decorator
async def post(self, request):
result = ""
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.post("/stream", stream=True)
async def handler(request):
async def streaming(response):
while True:
body = await request.stream.get()
if body is None:
break
body = body.decode("utf-8").replace("1", "A")
await response.write(body)
return stream(streaming)
@bp.put("/bp_stream", stream=True)
async def bp_handler(request):
result = ""
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode("utf-8").replace("1", "A")
return text(result)
async def post_handler(request):
result = ""
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode("utf-8")
return text(result)
app.blueprint(bp)
app.add_route(SimpleView.as_view(), "/method_view")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
|
nilq/baby-python
|
python
|
import argparse
class ArgumentParser(argparse.ArgumentParser):
def __init__(self):
self.parser = argparse.ArgumentParser(description="Robyn, a fast async web framework with a rust runtime.")
self.parser.add_argument('--processes', type=int, default=1, required=False)
self.parser.add_argument('--workers', type=int, default=1, required=False)
self.parser.add_argument('--dev', default=False, type=lambda x: (str(x).lower() == 'true'))
self.args = self.parser.parse_args()
def num_processes(self):
return self.args.processes
def workers(self):
return self.args.workers
def is_dev(self):
_is_dev = self.args.dev
if _is_dev and ( self.num_processes() != 1 or self.workers() != 1 ):
raise Exception("--processes and --workers shouldn't be used with --dev")
return _is_dev
|
nilq/baby-python
|
python
|
r = 's'
while r == 's':
n1 = int(input('Digite o 1º valor: '))
n2 = int(input('Digite o 2º valor: '))
print(' [ 1 ] SOMAR')
print(' [ 2 ] Multiplicar')
print(' [ 3 ] Maior')
print(' [ 4 ] Novos Números')
print(' [ 5 ] Sair do Programa')
opcao = int(input('Escolha uma operação: '))
if (opcao == 1):
soma = n1 + n2
print('Resultado da SOMA entre {} e {} = {}'.format(n1, n2, soma))
r = 's'
elif (opcao == 2):
m = n1 * n2
print('Resultado da MULTIPLICAÇÃO entre {} e {} = {}'.format(n1, n2, m))
r = 's'
elif (opcao == 3):
if (n1 > n2):
maior = n1
print('Maior valor digitado entre {} e {} = {}'.format(n1, n2, maior))
r = 's'
elif (n2 > n1):
maior = n2
print('Maior valor digitado entre {} e {} = {}'.format(n1, n2, maior))
r = 's'
elif (opcao == 4):
r = 's'
print('Você escolheu digitar novos valores!')
elif (opcao == 5):
r = 'n'
print('Finalizando Programa')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 28 10:00:06 2017
@author: ldn
"""
EndPointCoordinate=((-3.6,0.0,7.355),(123.6,0.0,7.355)) #west & east end point
rGirderRigidarmCoordinate=((10,8.13,0),(15,8.3675,0),(20,8.58,0),
(25,8.7675,0),(30,8.93,0),(35,9.0675,0),(40,9.18,0),(45,9.2675,0),(50,9.33,0),(55,9.3675,0),
(60,9.38,0),
(65,9.3675,0),(70,9.33,0),(75,9.2675,0),(80,9.18,0),(85,9.0675,0),(90,8.93,0),(95,8.7675,0),
(100,8.58,0),(105,8.3675,0),(110,8.13,0))
rRigidarmSuspenderCoordinate=(((10,7.73,-3.75),(15,7.9675,-3.75),(20,8.18,-3.75),
(30,8.53,-3.75),(35,8.6675,-3.75),(40,8.78,-3.75),(45,8.8675,-3.75),(50,8.93,-3.75),(55,8.9675,-3.75),
(60,8.98,-3.75),
(65,8.9675,-3.75),(70,8.93,-3.75),(75,8.8675,-3.75),(80,8.78,-3.75),(85,8.6675,-3.75),(90,8.53,-3.75),
(100,8.18,-3.75),(105,7.9675,-3.75),(110,7.73,-3.75)),
((10,7.73,3.75),(15,7.9675,3.75),(20,8.18,3.75),
(30,8.53,3.75),(35,8.6675,3.75),(40,8.78,3.75),(45,8.8675,3.75),(50,8.93,3.75),(55,8.9675,3.75),
(60,8.98,3.75),
(65,8.9675,3.75),(70,8.93,3.75),(75,8.8675,3.75),(80,8.78,3.75),(85,8.6675,3.75),(90,8.53,3.75),
(100,8.18,3.75),(105,7.9675,3.75),(110,7.73,3.75)))
lst=[]
lst.append(EndPointCoordinate[0])
for i in range(len(rGirderRigidarmCoordinate)):
lst.append(rGirderRigidarmCoordinate[i])
lst.append(EndPointCoordinate[1])
l=tuple(lst)
3432/3
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module SGTE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SGTE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:53:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, TimeTicks, IpAddress, enterprises, Bits, MibIdentifier, ObjectIdentity, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, iso, Gauge32, ModuleIdentity, NotificationType, Unsigned32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "TimeTicks", "IpAddress", "enterprises", "Bits", "MibIdentifier", "ObjectIdentity", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "iso", "Gauge32", "ModuleIdentity", "NotificationType", "Unsigned32", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
sgte = MibIdentifier((1, 3, 6, 1, 4, 1, 13743))
sEci48VP = MibIdentifier((1, 3, 6, 1, 4, 1, 13743, 1))
cIDENTIFICATION = MibIdentifier((1, 3, 6, 1, 4, 1, 13743, 1, 1))
iNomEquipement = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: iNomEquipement.setStatus('optional')
iNomConstructeur = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: iNomConstructeur.setStatus('optional')
iMarqueCommerciale = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: iMarqueCommerciale.setStatus('optional')
iVersionLogiciel = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: iVersionLogiciel.setStatus('optional')
iCaracterisationFine = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: iCaracterisationFine.setStatus('optional')
cMESURES = MibIdentifier((1, 3, 6, 1, 4, 1, 13743, 1, 2))
mTensionUtilisation = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTensionUtilisation.setStatus('optional')
mTensionBatterie = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTensionBatterie.setStatus('optional')
mCourantUtilisation = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantUtilisation.setStatus('optional')
mCourantBatterie1A = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1A.setStatus('optional')
mCourantBatterie2A = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2A.setStatus('optional')
mCourantBatterie3A = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3A.setStatus('optional')
mCourantBatterie1B = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1B.setStatus('optional')
mCourantBatterie2B = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2B.setStatus('optional')
mCourantBatterie3B = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3B.setStatus('optional')
mCourantRedresseur = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantRedresseur.setStatus('optional')
mTauxCharge = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTauxCharge.setStatus('optional')
mEtape = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mEtape.setStatus('optional')
mTensionDebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTensionDebutTestBatt.setStatus('optional')
mTensionFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTensionFinTestBatt.setStatus('optional')
mCourantBatterie1ADebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1ADebutTestBatt.setStatus('optional')
mCourantBatterie1AFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1AFinTestBatt.setStatus('optional')
mCourantBatterie2ADebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2ADebutTestBatt.setStatus('optional')
mCourantBatterie2AFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2AFinTestBatt.setStatus('optional')
mCourantBatterie3ADebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3ADebutTestBatt.setStatus('optional')
mCourantBatterie3AFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3AFinTestBatt.setStatus('optional')
mCourantBatterie1BDebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1BDebutTestBatt.setStatus('optional')
mCourantBatterie1BFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1BFinTestBatt.setStatus('optional')
mCourantBatterie2BDebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2BDebutTestBatt.setStatus('optional')
mCourantBatterie2BFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2BFinTestBatt.setStatus('optional')
mCourantBatterie3BDebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 25), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3BDebutTestBatt.setStatus('optional')
mCourantBatterie3BFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 26), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3BFinTestBatt.setStatus('optional')
mTemperature = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 27), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTemperature.setStatus('optional')
cETATS = MibIdentifier((1, 3, 6, 1, 4, 1, 13743, 1, 3))
eModifHeure = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eModifHeure.setStatus('optional')
eModifParam = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eModifParam.setStatus('optional')
eLiaisonJbus = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eLiaisonJbus.setStatus('optional')
eTestEnCours = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestEnCours.setStatus('optional')
eUBMin = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eUBMin.setStatus('optional')
eTestNonRealise = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestNonRealise.setStatus('optional')
eDefUnRed = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eDefUnRed.setStatus('optional')
eDefPlusRed = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eDefPlusRed.setStatus('optional')
eAlimSecteur = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eAlimSecteur.setStatus('optional')
eFuseBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eFuseBatt.setStatus('optional')
eFuseDep = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eFuseDep.setStatus('optional')
eFuseAux = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eFuseAux.setStatus('optional')
eUMin = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eUMin.setStatus('optional')
eUMax = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eUMax.setStatus('optional')
eTauxCharge = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTauxCharge.setStatus('optional')
eTemperature = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTemperature.setStatus('optional')
eIBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eIBatt.setStatus('optional')
eChargeI = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eChargeI.setStatus('optional')
eChargeU = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eChargeU.setStatus('optional')
eFloating = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eFloating.setStatus('optional')
eComptAH = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eComptAH.setStatus('optional')
eTestBattOK = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestBattOK.setStatus('optional')
eTestBattKO = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestBattKO.setStatus('optional')
eTestImpossible = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestImpossible.setStatus('optional')
eTestRepousse = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 25), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestRepousse.setStatus('optional')
eTestInterrompu = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 26), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestInterrompu.setStatus('optional')
eTestMiniKO = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 27), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestMiniKO.setStatus('optional')
ePuissTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 28), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ePuissTestBatt.setStatus('optional')
eDefEprom = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 29), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eDefEprom.setStatus('optional')
eDetectionCSB = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 30), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eDetectionCSB.setStatus('optional')
eRAZ = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 31), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eRAZ.setStatus('optional')
cALARMES = MibIdentifier((1, 3, 6, 1, 4, 1, 13743, 1, 4))
aModifHeure = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,1))
aModifParam = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,2))
aLiaisonJbus = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,3))
aTestEnCours = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,4))
aUBMin = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,5))
aTestNonRealise = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,6))
aDefUnRed = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,7))
aDefPlusRed = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,8))
aAlimSecteur = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,9))
aFuseBatt = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,10))
aFuseDep = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,11))
aFuseAux = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,12))
aUMin = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,13))
aUMax = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,14))
aTauxCharge = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,15))
aTemperature = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,16))
aIBatt = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,17))
aChargeI = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,18))
aChargeU = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,19))
aFloating = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,20))
aComptAH = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,21))
aTestBattOK = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,22))
aTestBattKO = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,23))
aTestImpossible = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,24))
aTestRepousse = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,25))
aTestInterrompu = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,26))
aTestMiniKO = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,27))
aPuissTestBatt = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,28))
aDefEprom = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,29))
aDetectionCSB = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,30))
aRAZ = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,31))
mibBuilder.exportSymbols("SGTE-MIB", mTemperature=mTemperature, aIBatt=aIBatt, aChargeI=aChargeI, aChargeU=aChargeU, iMarqueCommerciale=iMarqueCommerciale, mCourantRedresseur=mCourantRedresseur, eChargeU=eChargeU, sEci48VP=sEci48VP, eTestBattKO=eTestBattKO, aTestInterrompu=aTestInterrompu, eIBatt=eIBatt, cMESURES=cMESURES, iVersionLogiciel=iVersionLogiciel, eModifHeure=eModifHeure, eLiaisonJbus=eLiaisonJbus, aTauxCharge=aTauxCharge, mCourantBatterie1BDebutTestBatt=mCourantBatterie1BDebutTestBatt, aTestBattKO=aTestBattKO, aRAZ=aRAZ, aUMin=aUMin, mCourantBatterie3ADebutTestBatt=mCourantBatterie3ADebutTestBatt, aTemperature=aTemperature, eTestNonRealise=eTestNonRealise, aFuseBatt=aFuseBatt, eTestImpossible=eTestImpossible, mCourantBatterie1BFinTestBatt=mCourantBatterie1BFinTestBatt, aFloating=aFloating, mCourantBatterie2AFinTestBatt=mCourantBatterie2AFinTestBatt, eFuseBatt=eFuseBatt, eRAZ=eRAZ, eModifParam=eModifParam, aModifParam=aModifParam, aUBMin=aUBMin, aTestNonRealise=aTestNonRealise, aDefPlusRed=aDefPlusRed, mCourantBatterie3AFinTestBatt=mCourantBatterie3AFinTestBatt, eComptAH=eComptAH, iNomEquipement=iNomEquipement, aTestMiniKO=aTestMiniKO, aAlimSecteur=aAlimSecteur, iNomConstructeur=iNomConstructeur, mCourantBatterie1AFinTestBatt=mCourantBatterie1AFinTestBatt, eTestBattOK=eTestBattOK, aTestRepousse=aTestRepousse, aUMax=aUMax, eChargeI=eChargeI, cALARMES=cALARMES, cETATS=cETATS, eDefEprom=eDefEprom, mCourantBatterie3A=mCourantBatterie3A, eAlimSecteur=eAlimSecteur, eUMin=eUMin, aTestEnCours=aTestEnCours, mCourantBatterie3B=mCourantBatterie3B, aFuseDep=aFuseDep, mCourantBatterie2B=mCourantBatterie2B, mEtape=mEtape, ePuissTestBatt=ePuissTestBatt, aDetectionCSB=aDetectionCSB, mCourantBatterie1B=mCourantBatterie1B, eDefPlusRed=eDefPlusRed, mCourantBatterie1A=mCourantBatterie1A, aDefUnRed=aDefUnRed, sgte=sgte, mTensionUtilisation=mTensionUtilisation, mCourantBatterie1ADebutTestBatt=mCourantBatterie1ADebutTestBatt, aModifHeure=aModifHeure, iCaracterisationFine=iCaracterisationFine, eFuseDep=eFuseDep, eTauxCharge=eTauxCharge, mCourantUtilisation=mCourantUtilisation, aDefEprom=aDefEprom, mCourantBatterie3BDebutTestBatt=mCourantBatterie3BDebutTestBatt, cIDENTIFICATION=cIDENTIFICATION, aFuseAux=aFuseAux, aComptAH=aComptAH, aTestBattOK=aTestBattOK, mCourantBatterie2BFinTestBatt=mCourantBatterie2BFinTestBatt, eTestRepousse=eTestRepousse, eTestMiniKO=eTestMiniKO, aPuissTestBatt=aPuissTestBatt, aTestImpossible=aTestImpossible, eTestInterrompu=eTestInterrompu, eFloating=eFloating, mCourantBatterie2ADebutTestBatt=mCourantBatterie2ADebutTestBatt, mTensionFinTestBatt=mTensionFinTestBatt, eUBMin=eUBMin, mCourantBatterie2BDebutTestBatt=mCourantBatterie2BDebutTestBatt, eTestEnCours=eTestEnCours, aLiaisonJbus=aLiaisonJbus, mCourantBatterie2A=mCourantBatterie2A, eTemperature=eTemperature, eDetectionCSB=eDetectionCSB, mTensionDebutTestBatt=mTensionDebutTestBatt, eDefUnRed=eDefUnRed, mTauxCharge=mTauxCharge, mTensionBatterie=mTensionBatterie, eFuseAux=eFuseAux, eUMax=eUMax, mCourantBatterie3BFinTestBatt=mCourantBatterie3BFinTestBatt)
|
nilq/baby-python
|
python
|
from django import forms
from django.contrib.auth.models import User
from .models import Profile
class UserCreationForm(forms.ModelForm):
username = forms.CharField(label='اسم المستخدم', max_length=30,
help_text='اسم المستخدم يجب ألا يحتوي على مسافات.')
email = forms.EmailField(label='البريد الإلكتروني')
first_name = forms.CharField(label='الاسم الأول')
last_name = forms.CharField(label='الاسم الأخير')
password1 = forms.CharField(
label='كلمة المرور', widget=forms.PasswordInput(), min_length=8)
password2 = forms.CharField(
label='تأكيد كلمة المرور', widget=forms.PasswordInput(), min_length=8)
class Meta:
model = User
fields = ('username', 'email', 'first_name',
'last_name', 'password1', 'password2')
def clean_password2(self):
cd = self.cleaned_data
if cd['password1'] != cd['password2']:
raise forms.ValidationError('كلمة المرور غير متطابقة')
return cd['password2']
def clean_username(self):
cd = self.cleaned_data
if User.objects.filter(username=cd['username']).exists():
raise forms.ValidationError('يوجد مستخدم مسجل بهذا الاسم.')
return cd['username']
class LoginForm(forms.ModelForm):
username = forms.CharField(label='اسم المستخدم')
password = forms.CharField(
label='كلمة المرور', widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'password')
class UserUpdateForm(forms.ModelForm):
first_name = forms.CharField(label='الاسم الأول')
last_name = forms.CharField(label='الاسم الأخير')
email = forms.EmailField(label='البريد الإلكتروني')
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('image',)
|
nilq/baby-python
|
python
|
class Seat:
"""Seat contains features of the seat"""
def __init__(self):
self.left_handed = False
self.special_needs = False
self.broken = False
# Describes sid of person sitting there
self.sid = -1
# Used for ChunkIncrease
# True to use the seat, False to keep empty
self.enable = True
class SeatGroups:
"""SeatGroups define a contiguous seats in a row.
This helps determine how to place empty seats in order to minimize student chunks
"""
def __init__(self, _chunk_begin, _chunk_end):
# Chunk ranges from (chunk_begin, chunk_end) inclusive
self.chunk_begin = _chunk_begin
self.chunk_end = _chunk_end
if self.chunk_begin[0] != self.chunk_end[0]:
raise Exception("Rows don't match, can't be a chunk.")
# Used for ConsecDivide (only stores cols)
self.empty = []
def size(self):
return self.chunk_end[1] - self.chunk_begin[1] + 1
def max_chunk_size(self):
max_chunk = 0
cur_chunk = 0
for col in range(self.chunk_begin[1], self.chunk_end[1] + 1):
if col in self.empty:
max_chunk = max(max_chunk, cur_chunk)
cur_chunk = 0
else:
cur_chunk += 1
return max(max_chunk, cur_chunk)
def avail_size(self):
return self.size() - len(self.empty)
def __str__(self):
return str(self.max_chunk_size())
def __repr__(self):
return str(self)
|
nilq/baby-python
|
python
|
nJoints = 16
accIdxs = [0, 1, 2, 3, 4, 5, 10, 11, 14, 15]
shuffleRef = [[0, 5], [1, 4], [2, 3],
[10, 15], [11, 14], [12, 13]]
edges = [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5],
[10, 11], [11, 12], [12, 8], [8, 13], [13, 14], [14, 15],
[6, 8], [8, 9]]
ntuImgSize = 224
h36mImgSize = 224
outputRes = 64
inputRes = 256
eps = 1e-6
momentum = 0.0
weightDecay = 0.0
alpha = 0.99
epsilon = 1e-8
scale = 0.25
rotate = 30
hmGauss = 1
hmGaussInp = 20
shiftPX = 50
disturb = 10
expDir = '../exp'
dataDir = '../data/'
ntuDataDir = dataDir + 'ntu'
h36mDataDir = dataDir + 'h36m'
mpiiDataDir = dataDir + 'mpii'
posetrackDataDir = dataDir + 'posetrack'
nThreads = 4
root = 7
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from ConfigParser import NoOptionError
import calendar
import datetime
from taxi import remote
from taxi.exceptions import CancelException, UsageError
from taxi.projects import Project
from taxi.timesheet import (
NoActivityInProgressError, Timesheet, TimesheetCollection, TimesheetFile
)
from taxi.timesheet.entry import TimesheetEntry, EntriesCollection
from taxi.timesheet.parser import ParseError
from taxi.settings import Settings
from taxi.utils import file
from taxi.utils.structures import OrderedSet
class BaseCommand(object):
def __init__(self, app_container):
self.options = app_container.options
self.arguments = app_container.arguments
self.view = app_container.view
self.projects_db = app_container.projects_db
self.settings = app_container.settings
def setup(self):
pass
def validate(self):
pass
def run(self):
pass
class BaseTimesheetCommand(BaseCommand):
def get_timesheet_collection(self, skip_cache=False):
timesheet_collection = getattr(self, '_current_timesheet_collection',
None)
if timesheet_collection is not None and not skip_cache:
return timesheet_collection
timesheet_collection = TimesheetCollection()
timesheet_files = self.get_files(
self.options['unparsed_file'],
int(self.settings.get('nb_previous_files'))
)
self.alias_mappings = self.settings.get_aliases()
for file_path in timesheet_files:
timesheet_file = TimesheetFile(file_path)
try:
timesheet_contents = timesheet_file.read()
except IOError:
timesheet_contents = ''
t = Timesheet(
EntriesCollection(
timesheet_contents,
self.settings.get('date_format')
),
self.alias_mappings,
timesheet_file
)
# Force new entries direction if necessary
if (self.settings.get('auto_add') in [
Settings.AUTO_ADD_OPTIONS['TOP'],
Settings.AUTO_ADD_OPTIONS['BOTTOM']]):
t.entries.add_date_to_bottom = (
self.settings.get('auto_add') ==
Settings.AUTO_ADD_OPTIONS['BOTTOM']
)
timesheet_collection.timesheets.append(t)
# Fix `add_date_to_bottom` attribute of timesheet entries based on
# previous timesheets. When a new timesheet is started it won't have
# any direction defined, so we take the one from the previous
# timesheet, if any
previous_timesheet = None
for timesheet in reversed(timesheet_collection.timesheets):
if (timesheet.entries.add_date_to_bottom is None
and previous_timesheet
and previous_timesheet.entries.add_date_to_bottom
is not None):
timesheet.entries.add_date_to_bottom = (
previous_timesheet.entries.add_date_to_bottom
)
previous_timesheet = timesheet
setattr(self, '_current_timesheet_collection', timesheet_collection)
return timesheet_collection
def get_files(self, filename, nb_previous_files):
date_units = ['m', 'Y']
smallest_unit = None
for date in date_units:
if '%%%s' % date in filename:
smallest_unit = date
break
if smallest_unit is None:
return OrderedSet([filename])
files = OrderedSet()
file_date = datetime.date.today()
for i in xrange(0, nb_previous_files + 1):
files.add(file.expand_filename(filename, file_date))
if smallest_unit == 'm':
if file_date.month == 1:
file_date = file_date.replace(day=1,
month=12,
year=file_date.year - 1)
else:
file_date = file_date.replace(day=1,
month=file_date.month - 1)
elif smallest_unit == 'Y':
file_date = file_date.replace(day=1, year=file_date.year - 1)
return files
class AddCommand(BaseCommand):
"""
Usage: add search_string
Searches and prompts for project, activity and alias and adds that as a new
entry to .tksrc.
"""
def validate(self):
if len(self.arguments) < 1:
raise UsageError()
def run(self):
search = self.arguments
projects = self.projects_db.search(search, active_only=True)
projects = sorted(projects, key=lambda project: project.name)
if len(projects) == 0:
self.view.msg(
u"No active project matches your search string '%s'" %
''.join(search)
)
return
self.view.projects_list(projects, True)
try:
number = self.view.select_project(projects)
except CancelException:
return
project = projects[number]
mappings = self.settings.get_reversed_aliases()
self.view.project_with_activities(project, mappings,
numbered_activities=True)
try:
number = self.view.select_activity(project.activities)
except CancelException:
return
retry = True
while retry:
try:
alias = self.view.select_alias()
except CancelException:
return
if self.settings.activity_exists(alias):
mapping = self.settings.get_aliases()[alias]
overwrite = self.view.overwrite_alias(alias, mapping)
if not overwrite:
return
elif overwrite:
retry = False
# User chose "retry"
else:
retry = True
else:
retry = False
activity = project.activities[number]
self.settings.add_alias(alias, project.id, activity.id)
self.settings.write_config()
self.view.alias_added(alias, (project.id, activity.id))
class AliasCommand(BaseCommand):
"""
Usage: alias [alias]
alias [project_id]
alias [project_id/activity_id]
alias [alias] [project_id/activity_id]
- The first form will display the mappings whose aliases start with the
search string you entered
- The second form will display the mapping(s) you've defined for this
project and all of its activities
- The third form will display the mapping you've defined for this exact
project/activity tuple
- The last form will add a new alias in your configuration file
You can also run this command without any argument to view all your
mappings.
"""
MODE_SHOW_MAPPING = 0
MODE_ADD_ALIAS = 1
MODE_LIST_ALIASES = 2
def validate(self):
if len(self.arguments) > 2:
raise UsageError()
def setup(self):
if len(self.arguments) == 2:
self.alias = self.arguments[0]
self.mapping = self.arguments[1]
self.mode = self.MODE_ADD_ALIAS
elif len(self.arguments) == 1:
self.alias = self.arguments[0]
self.mode = self.MODE_SHOW_MAPPING
else:
self.alias = None
self.mode = self.MODE_LIST_ALIASES
def run(self):
# 2 arguments, add a new alias
if self.mode == self.MODE_ADD_ALIAS:
self._add_alias(self.alias, self.mapping)
# 1 argument, display the alias or the project id/activity id tuple
elif self.mode == self.MODE_SHOW_MAPPING:
mapping = Project.str_to_tuple(self.alias)
if mapping is not None:
for m in self.settings.search_aliases(mapping):
self.view.mapping_detail(m, self.projects_db.get(m[1][0]))
else:
self.mode = self.MODE_LIST_ALIASES
# No argument, display the mappings
if self.mode == self.MODE_LIST_ALIASES:
for m in self.settings.search_mappings(self.alias):
self.view.alias_detail(
m,
self.projects_db.get(m[1][0]) if m[1] is not None else None
)
def _add_alias(self, alias_name, mapping):
project_activity = Project.str_to_tuple(mapping)
if project_activity is None:
raise UsageError("The mapping must be in the format xxxx/yyyy")
if self.settings.activity_exists(alias_name):
existing_mapping = self.settings.get_aliases()[alias_name]
confirm = self.view.overwrite_alias(alias_name, existing_mapping,
False)
if not confirm:
return
self.settings.add_alias(alias_name, project_activity[0],
project_activity[1])
self.settings.write_config()
self.view.alias_added(alias_name, project_activity)
class AutofillCommand(BaseTimesheetCommand):
"""
Usage: autofill
Fills your timesheet up to today, for the defined auto_fill_days.
"""
def run(self):
auto_fill_days = self.settings.get_auto_fill_days()
if auto_fill_days:
today = datetime.date.today()
last_day = calendar.monthrange(today.year, today.month)
last_date = datetime.date(today.year, today.month, last_day[1])
timesheet_collection = self.get_timesheet_collection()
t = timesheet_collection.timesheets[0]
t.prefill(auto_fill_days, last_date)
t.file.write(t.entries)
self.view.msg(u"Your entries file has been filled.")
else:
self.view.err(u"The parameter `auto_fill_days` must be set to "
"use this command.")
class KittyCommand(BaseCommand):
"""
|\ _,,,---,,_
/,`.-'`' -. ;-;;,_
|,4- ) )-,_..;\ ( `'-'
'---''(_/--' `-'\_)
Soft kitty, warm kitty
Little ball of fur
Happy kitty, sleepy kitty
Purr, purr, purr
"""
def run(self):
self.view.msg(self.__doc__)
class CleanAliasesCommand(BaseCommand):
"""
Usage: clean-aliases
Removes aliases from your config file that point to inactive projects.
"""
def run(self):
aliases = self.settings.get_aliases()
inactive_aliases = []
for (alias, mapping) in aliases.iteritems():
# Ignore local aliases
if mapping is None:
continue
project = self.projects_db.get(mapping[0])
if (project is None or not project.is_active() or
(mapping[1] is not None
and project.get_activity(mapping[1]) is None)):
inactive_aliases.append(((alias, mapping), project))
if not inactive_aliases:
self.view.msg(u"No inactive aliases found.")
return
if not self.options.get('force_yes'):
confirm = self.view.clean_inactive_aliases(inactive_aliases)
if self.options.get('force_yes') or confirm:
self.settings.remove_aliases(
[item[0][0] for item in inactive_aliases]
)
self.settings.write_config()
self.view.msg(u"%d inactive aliases have been successfully"
" cleaned." % len(inactive_aliases))
class CommitCommand(BaseTimesheetCommand):
"""
Usage: commit
Commits your work to the server.
"""
def run(self):
timesheet_collection = self.get_timesheet_collection()
if (self.options.get('date', None) is None
and not self.options.get('ignore_date_error', False)):
non_workday_entries = (
timesheet_collection.get_non_current_workday_entries()
)
if non_workday_entries:
self.view.non_working_dates_commit_error(
non_workday_entries.keys()
)
return
self.view.pushing_entries()
r = remote.ZebraRemote(self.settings.get('site'),
self.settings.get('username'),
self.settings.get('password'))
all_pushed_entries = []
all_failed_entries = []
for timesheet in timesheet_collection.timesheets:
entries_to_push = timesheet.get_entries(
self.options.get('date', None), exclude_ignored=True,
exclude_local=True, exclude_unmapped=True, regroup=True
)
(pushed_entries, failed_entries) = r.send_entries(
entries_to_push, self.alias_mappings, self._entry_pushed
)
local_entries = timesheet.get_local_entries(
self.options.get('date', None)
)
local_entries_list = []
for (date, entries) in local_entries.iteritems():
local_entries_list.extend(entries)
for entry in local_entries_list + pushed_entries:
entry.commented = True
for (entry, _) in failed_entries:
entry.fix_start_time()
# Also fix start time for ignored entries. Since they won't get
# pushed, there's a chance their previous sibling gets commented
for (date, entries) in timesheet.get_ignored_entries().items():
for entry in entries:
entry.fix_start_time()
timesheet.file.write(timesheet.entries)
all_pushed_entries.extend(pushed_entries)
all_failed_entries.extend(failed_entries)
ignored_entries = timesheet_collection.get_ignored_entries(
self.options.get('date', None)
)
ignored_entries_list = []
for (date, entries) in ignored_entries.iteritems():
ignored_entries_list.extend(entries)
self.view.pushed_entries_summary(all_pushed_entries,
all_failed_entries,
ignored_entries_list)
def _entry_pushed(self, entry, error):
self.view.pushed_entry(entry, error, self.alias_mappings)
class EditCommand(BaseTimesheetCommand):
"""
Usage: edit
Opens your zebra file in your favourite editor.
"""
def run(self):
timesheet_collection = None
try:
timesheet_collection = self.get_timesheet_collection()
except ParseError:
pass
if timesheet_collection:
t = timesheet_collection.timesheets[0]
if (self.settings.get('auto_add') !=
Settings.AUTO_ADD_OPTIONS['NO']
and not self.options.get('forced_file')):
auto_fill_days = self.settings.get_auto_fill_days()
if auto_fill_days:
t.prefill(auto_fill_days, limit=None)
t.file.write(t.entries)
try:
editor = self.settings.get('editor')
except NoOptionError:
editor = None
file.spawn_editor(self.options['file'], editor)
try:
timesheet_collection = self.get_timesheet_collection(True)
except ParseError as e:
self.view.err(e)
else:
self.view.show_status(
timesheet_collection.get_entries(regroup=True),
self.alias_mappings, self.settings
)
class HelpCommand(BaseCommand):
"""
YO DAWG you asked for help for the help command. Try to search Google in
Google instead.
"""
def __init__(self, application_container):
super(HelpCommand, self).__init__(application_container)
self.commands_mapping = application_container.commands_mapping
def setup(self):
if len(self.arguments) == 0:
raise UsageError()
else:
self.command = self.arguments[0]
def run(self):
if self.command == 'help':
self.view.command_usage(self)
else:
if self.command in self.commands_mapping:
self.view.command_usage(self.commands_mapping[self.command])
else:
self.view.err(u"Command %s doesn't exist." % self.command)
class SearchCommand(BaseCommand):
"""
Usage: search search_string
Searches for a project by its name. The letter in the first column
indicates the status of the project: [N]ot started, [A]ctive, [F]inished,
[C]ancelled.
"""
def validate(self):
if len(self.arguments) < 1:
raise UsageError()
def run(self):
projects = self.projects_db.search(self.arguments)
projects = sorted(projects, key=lambda project: project.name.lower())
self.view.search_results(projects)
class ShowCommand(BaseCommand):
"""
Usage: show project_id
Shows the details of the given project_id (you can find it with the search
command).
"""
def validate(self):
if len(self.arguments) < 1:
raise UsageError()
try:
int(self.arguments[0])
except ValueError:
raise UsageError("The project id must be a number")
def setup(self):
self.project_id = int(self.arguments[0])
def run(self):
try:
project = self.projects_db.get(self.project_id)
except IOError:
raise Exception("Error: the projects database file doesn't exist. "
"Please run `taxi update` to create it")
if project is None:
self.view.err(
u"The project `%s` doesn't exist" % (self.project_id)
)
else:
mappings = self.settings.get_reversed_aliases()
self.view.project_with_activities(project, mappings)
class StartCommand(BaseTimesheetCommand):
"""
Usage: start project_name
Use it when you start working on the project project_name. This will add
the project name and the current time to your entries file. When you're
finished, use the stop command.
"""
def validate(self):
if len(self.arguments) != 1:
raise UsageError()
def setup(self):
self.project_name = self.arguments[0]
def run(self):
today = datetime.date.today()
try:
timesheet_collection = self.get_timesheet_collection()
except ParseError as e:
self.view.err(e)
return
t = timesheet_collection.timesheets[0]
# If there's a previous entry on the same date, check if we can use its
# end time as a start time for the newly started entry
today_entries = t.get_entries(today)
if(today in today_entries and today_entries[today]
and isinstance(today_entries[today][-1].duration, tuple)
and today_entries[today][-1].duration[1] is not None):
new_entry_start_time = today_entries[today][-1].duration[1]
else:
new_entry_start_time = datetime.datetime.now()
duration = (new_entry_start_time, None)
e = TimesheetEntry(self.project_name, duration, '?')
t.entries[today].append(e)
t.file.write(t.entries)
class StatusCommand(BaseTimesheetCommand):
"""
Usage: status
Shows the summary of what's going to be committed to the server.
"""
def setup(self):
self.date = self.options.get('date', None)
def run(self):
try:
timesheet_collection = self.get_timesheet_collection()
except ParseError as e:
self.view.err(e)
else:
self.view.show_status(
timesheet_collection.get_entries(self.date, regroup=True),
self.alias_mappings,
self.settings
)
class StopCommand(BaseTimesheetCommand):
"""
Usage: stop [description]
Use it when you stop working on the current task. You can add a description
to what you've done.
"""
def setup(self):
if len(self.arguments) == 0:
self.description = None
else:
self.description = ' '.join(self.arguments)
def run(self):
try:
timesheet_collection = self.get_timesheet_collection()
current_timesheet = timesheet_collection.timesheets[0]
current_timesheet.continue_entry(
datetime.date.today(),
datetime.datetime.now().time(),
self.description
)
except ParseError as e:
self.view.err(e)
except NoActivityInProgressError:
self.view.err(u"You don't have any activity in progress for today")
else:
current_timesheet.file.write(current_timesheet.entries)
class UpdateCommand(BaseCommand):
"""
Usage: update
Synchronizes your project database with the server and updates the shared
aliases.
"""
def setup(self):
self.site = self.settings.get('site')
self.username = self.settings.get('username')
self.password = self.settings.get('password')
def run(self):
self.view.updating_projects_database()
aliases_before_update = self.settings.get_aliases()
local_aliases = self.settings.get_aliases(include_shared=False)
r = remote.ZebraRemote(self.site, self.username, self.password)
projects = r.get_projects()
self.projects_db.update(projects)
# Put the shared aliases in the config file
shared_aliases = {}
for project in projects:
if project.is_active():
for alias, activity_id in project.aliases.iteritems():
self.settings.add_shared_alias(alias, project.id,
activity_id)
shared_aliases[alias] = (project.id, activity_id)
aliases_after_update = self.settings.get_aliases()
self.settings.write_config()
self.view.projects_database_update_success(aliases_before_update,
aliases_after_update,
local_aliases,
shared_aliases,
self.projects_db)
|
nilq/baby-python
|
python
|
"""This module will contain everything needed to train a neural Network.
Authors:
- Johannes Cartus, QCIEP, TU Graz
"""
from os.path import join
from uuid import uuid4
import tensorflow as tf
import numpy as np
from SCFInitialGuess.utilities.usermessages import Messenger as msg
from SCFInitialGuess.nn.cost_functions import MSE, RegularizedMSE
def mse_with_l2_regularisation(
network,
expectation_tensor,
regularisation_parameter=0.001
):
with tf.name_scope("mse_with_l2_regularisation"):
error = tf.losses.mean_squared_error(
network.output_tensor,
expectation_tensor
)
regularisation = tf.contrib.layers.apply_regularization(
tf.contrib.layers.l2_regularizer(regularisation_parameter),
network.weights
)
cost = error + regularisation
tf.summary.scalar("weight_decay", regularisation)
tf.summary.scalar("error", error)
tf.summary.scalar("total_loss", cost)
return cost, error, regularisation
class Trainer(object):
def __init__(
self,
network,
optimizer=None,
error_function=None,
cost_function=None):
self.network = network
if optimizer is None:
self.optimizer = tf.train.AdamOptimizer(
learning_rate=0.001
)
else:
self.optimizer = optimizer
if cost_function is None:
self.cost_function = RegularizedMSE()
else:
self.cost_function = cost_function
if error_function is None:
self.error_function = MSE()
else:
self.error_function = error_function
self.training_step = None
self.test_error = None
def setup(self, target_graph=None):
if target_graph is None:
msg.info("No target graph specified for Trainer setup. " + \
"Creating new graph ...", 1)
self.graph = tf.Graph()
else:
msg.info("Appending to graph: " + str(target_graph))
self.graph = target_graph
with self.graph.as_default():
msg.info("Setting up the training in the target graph ...", 1)
# placeholder for dataset target-values
self.target_placeholder = tf.placeholder(
dtype="float32",
shape=[None, self.network.structure[-1]],
name="y"
)
msg.info("network ...", 1)
with tf.name_scope("network/"):
network_output = self.network.setup()
self.input_placeholder = self.network.input_tensor
msg.info("error function ...", 1)
with tf.name_scope("error_function/"):
self.error = self.error_function.function(
self.network,
self.target_placeholder
)
msg.info("cost function ...", 1)
with tf.name_scope("cost_function/"):
self.cost = self.cost_function.function(
self.network,
self.target_placeholder
)
msg.info("training step", 1)
with tf.name_scope("training/"):
self.training_step = self.optimizer.minimize(self.cost)
return self.graph, self.network, self.target_placeholder
def train(
self,
dataset,
max_steps=100000,
evaluation_period=200,
mini_batch_size=0.2,
convergence_threshold=1e-5,
summary_save_path=None
):
with self.graph.as_default():
sess = tf.Session(graph=self.graph)
if self.training_step is None:
self.setup()
#--- prep the writer ---
if not summary_save_path is None:
summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(summary_save_path)
writer.add_graph(sess.graph)
#---
#--- train the network ---
old_error = 1e10
sess.run(tf.global_variables_initializer())
msg.info("Starting network training ...", 1)
for step in range(max_steps):
mini_batch = dataset.sample_minibatch(mini_batch_size)
if step % np.ceil(evaluation_period / 10):
if not summary_save_path is None:
writer.add_summary(
sess.run(
summary,
feed_dict={
self.input_placeholder: mini_batch[0],
self.target_placeholder: mini_batch[1]
}
),
step
)
if step % evaluation_period == 0:
error = sess.run(
self.error,
feed_dict={
self.input_placeholder: dataset.validation[0],
self.target_placeholder: dataset.validation[1]
}
)
cost = sess.run(
self.cost,
feed_dict={
self.input_placeholder: dataset.validation[0],
self.target_placeholder: dataset.validation[1]
}
)
# compare to previous error
diff = np.abs(error - old_error)
# convergence check
if diff < convergence_threshold:
msg.info(
"Convergence reached after " + str(step) + " steps.",
1
)
break
else:
msg.info(
"Val. Cost: " + \
"{:0.3E}. Error: {:0.3E}. Diff: {:0.1E}".format(
cost,
error,
diff
)
)
old_error = error
# do training step
sess.run(
self.training_step,
feed_dict={
self.input_placeholder: mini_batch[0],
self.target_placeholder: mini_batch[1]
}
)
#---
if not summary_save_path is None:
writer.close()
test_error = sess.run(
self.error,
feed_dict={
self.input_placeholder: dataset.testing[0],
self.target_placeholder: dataset.testing[1]
}
)
self.test_error = test_error
msg.info("Test error: {:0.5E}".format(test_error), 1)
return self.network, sess
class ContinuousTrainer(Trainer):
"""This trainer will train a network until the training is interrupted
by the user. Everytime a new minimum on the validation error is reached,
the model is exported.
"""
def train(
self,
dataset,
network_save_path,
comment=None,
old_error=1e10,
evaluation_period=2000,
mini_batch_size=40
):
"""Similaraly to the train function in the superclass, the function will
start the training. However it will continue to train until the user
aborts it. It will be exported after evaluation_period training
steps if a new minumim of error on the validation training set is reached.
"""
with self.graph.as_default():
sess = tf.Session(graph=self.graph)
if self.training_step is None:
self.setup()
#--- train the network ---
sess.run(tf.global_variables_initializer())
msg.info("Starting network training ...", 1)
#Training will run until user aborts it.
while True:
#--- do training ---
for step in range(evaluation_period):
mini_batch = dataset.sample_minibatch(mini_batch_size)
sess.run(
self.training_step,
feed_dict={
self.input_placeholder: mini_batch[0],
self.target_placeholder: mini_batch[1]
}
)
#---
#--- evaluation ---
# calculate validation errors ...
error = sess.run(
self.error,
feed_dict={
self.input_placeholder: dataset.validation[0],
self.target_placeholder: dataset.validation[1]
}
)
# ... and costs.
cost = sess.run(
self.cost,
feed_dict={
self.input_placeholder: dataset.validation[0],
self.target_placeholder: dataset.validation[1]
}
)
# Check for new validation error minimum
diff = error - old_error
# if a new minimum was found notify user
# and save the model.
if diff < 0:
message = (
"New Minimum found! Val. Cost: {:0.1E}. " + \
"Error: {:0.3E}. Diff: {:0.1E}"
).format(cost, error, diff)
msg.info(message)
# export network
self.network.export(sess, network_save_path, error, comment)
# store new minimum
old_error = error
#---
#---
def train_network(
network,
dataset,
sess=None,
learning_rate=0.001,
regularisation_parameter=0.01,
max_steps=100000,
evaluation_period=200,
mini_batch_size=0.2,
convergence_threshold=1e-5,
summary_save_path=None
):
"""Train a neural Neutwork from nn.networks with the AdamOptimizer,
to minimize the mean squared error with l2 regularisation.
Args:
- network <nn.networks.AbstractNeuralNetwork>: the network to be trained.
- dataset <utilities.dataset.Dataset>: the dataset to train the net on.
- learning_rate <float>: the learning rate to use for training w/
AdamOptimizer
- regularisation_parameter <float>: the factor with which the
regularisation is added to the total cost.
- max_steps <int>: max number of learning steps to take if convergence
not met before.
- evaluation_period <int>: period of training steps after which there
will be a check for convergence.
mini_batch_size <int>: size of the minibatch that is randomly sampled
from the training dataset in every training step.
- convergence_threshold <float>: training convergence is reached if
difference in error drops below this value.
- summary_save_path <str>: the full path to a folder in which the
tensorboard data will be written. If None given nothing will be exported.
Returns:
- the trained network
- the session
"""
if sess is None:
sess = tf.Session()
#--- set up the graph ---
msg.info("Setting up the graph ...", 1)
network_output = network.setup()
x = network.input_tensor
y = tf.placeholder(
dtype="float32",
shape=[None, network.structure[-1]],
name="y"
)
# cost is mse w/ l2 regularisation
cost, mse, _ = mse_with_l2_regularisation(
network,
expectation_tensor=y,
regularisation_parameter=regularisation_parameter
)
#optimizer and training
with tf.name_scope("training"):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_step = optimizer.minimize(cost)
#---
#--- prep the writer ---
if not summary_save_path is None:
msg.warn("Careful! If more than 1 network is in current graph, " + \
"it should be cleared before merging the summary!"
)
summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(summary_save_path)
writer.add_graph(sess.graph)
#---
#--- train the network ---
msg.info("Starting network training ...", 1)
old_error = 1e10
sess.run(tf.global_variables_initializer())
for step in range(max_steps):
mini_batch = dataset.sample_minibatch(mini_batch_size)
if step % np.ceil(evaluation_period / 10) == 0:
if not summary_save_path is None:
writer.add_summary(
sess.run(
summary,
feed_dict={
x: mini_batch[0],
y: mini_batch[1]
}
),
step
)
if step % evaluation_period == 0:
error = sess.run(
mse,
feed_dict={x: dataset.validation[0], y: dataset.validation[1]}
)
# compare to previous error
diff = np.abs(error - old_error)
# convergence check
if diff < convergence_threshold:
msg.info(
"Convergence reached after " + str(step) + " steps.",
1
)
break
else:
msg.info(
"Validation cost: {:0.5E}. Diff to prev.: {:0.1E}".format(
error,
diff
)
)
old_error = error
# do training step
sess.run(train_step, feed_dict={x: mini_batch[0], y: mini_batch[1]})
#---
if not summary_save_path is None:
writer.close()
test_error = sess.run(
mse,
feed_dict={x: dataset.testing[0], y: dataset.testing[1]}
)
msg.info("Test error: {:0.5E}".format(test_error), 1)
return network, sess
def network_benchmark(
models,
dataset,
logdir,
steps_report=250,
max_training_steps=100000,
convergence_eps=1e-7
):
for model in models:
msg.info("Investigating model " + str(model), 2)
save_path = join(logdir, str(model))
# make new session and build graph
tf.reset_default_graph()
sess = tf.Session()
dim_in = model.network.structure[0]
dim_out = model.network.structure[-1]
f = model.network.setup()
x = model.input_tensor
y = tf.placeholder(tf.float32, shape=[None, dim_out])
with tf.name_scope("loss"):
error = tf.losses.mean_squared_error(y, f) / dim_out # sum_i (f8(x_i) - y_i)^2
weight_decay = tf.contrib.layers.apply_regularization(
tf.contrib.layers.l2_regularizer(0.001),
model.network.weights
)
loss = error + weight_decay
tf.summary.scalar("weight_decay", weight_decay)
tf.summary.scalar("error_per_element", error)
tf.summary.scalar("total_loss", loss)
# define loss
with tf.name_scope("train"):
train_step = model.optimizer.minimize(loss)
summary = tf.summary.merge_all()
#saver = tf.train.Saver()
writer = tf.summary.FileWriter(save_path)
writer.add_graph(sess.graph)
msg.info("Start training ... ", 1)
old_error = 1e13
sess.run(tf.global_variables_initializer())
for step in range(max_training_steps):
batch = dataset.sample_minibatch(0.2)
# log progress
if step % 50 == 0:
writer.add_summary(sess.run(
summary,
feed_dict={x: batch[0], y: batch[1]}
), step)
# save graph and report error
if step % steps_report == 0:
validation_error = sess.run(
error,
feed_dict={x: dataset.validation[0], y: dataset.validation[1]}
) / dim_out
#saver.save(sess, log_dir, step)
diff = np.abs(old_error - validation_error)
msg.info("Error: {:0.4E}. Diff to before: {:0.4E}".format(
validation_error,
diff
))
if diff < convergence_eps:
msg.info(
"Convergence reached after " + str(step) + " steps.", 1
)
break
else:
old_error = validation_error
if step + 1 == max_training_steps:
msg.info("Max iterations exceeded.", 1)
sess.run(train_step, feed_dict={x: batch[0], y: batch[1]})
test_error = sess.run(
error,
feed_dict={x: dataset.validation[0], y: dataset.validation[1]}
) / dim_out
msg.info("Test error: {:0.1E}".format(test_error))
|
nilq/baby-python
|
python
|
from django.contrib import admin
from . import models
from django.conf import settings
admin.site.register(models.OfferCategory)
class OfferAdmin(admin.ModelAdmin):
if settings.MULTI_VENDOR:
list_display = ['title', 'total_vendors', 'starts_from', 'ends_at']
list_filter = ('vendor',)
else:
list_display = ['title', 'create_at', 'starts_from', 'ends_at']
list_per_page = 25
search_fields = ['title', 'description', 'ends_at']
readonly_fields = ['big_banner_tag', 'small_banner_tag']
# autocomplete_fields = ['category']
admin.site.register(models.Offer, OfferAdmin)
|
nilq/baby-python
|
python
|
from __future__ import annotations
from amulet.world_interface.chunk.interfaces.leveldb.base_leveldb_interface import (
BaseLevelDBInterface,
)
class LevelDB4Interface(BaseLevelDBInterface):
def __init__(self):
BaseLevelDBInterface.__init__(self)
self.features["chunk_version"] = 4
self.features["finalised_state"] = "int0-2"
self.features["data_2d"] = "unused_height512|biome256"
self.features["block_entities"] = "31list"
self.features["block_entity_format"] = "str-id"
self.features["block_entity_coord_format"] = "xyz-int"
self.features["entities"] = "32list"
self.features["entity_format"] = "int-id"
self.features["entity_coord_format"] = "Pos-list-float"
self.features["terrain"] = "2farray"
INTERFACE_CLASS = LevelDB4Interface
|
nilq/baby-python
|
python
|
import unittest
from ui.stub_io import StubIO
class StubIOTest(unittest.TestCase):
def setUp(self):
self.io = StubIO()
def test_method_write_adds_argument_to_output_list(self):
self.io.write("test")
self.assertEqual(self.io.output, ["test"])
def test_method_set_input_adds_argument_to_input_list(self):
self.io.set_input("test")
self.assertEqual(self.io.input, ["test"])
def test_return_empty_string_when_input_list_is_empty(self):
result = self.io.read("")
self.assertEqual(result, "")
def test_return_first_item_of_input_list_when_it_is_not_empty(self):
self.io.set_input("test")
result = self.io.read("")
self.assertEqual(result, "test")
|
nilq/baby-python
|
python
|
import unittest
from kafka_influxdb.encoder import heapster_event_json_encoder
class TestHeapsterEventJsonEncoder(unittest.TestCase):
def setUp(self):
self.encoder = heapster_event_json_encoder.Encoder()
def testEncoder(self):
msg = b'{"EventValue":"{\\n \\"metadata\\": {\\n \\"name\\": \\"etcd-operator-562633149-vvr85.149bd41846d603d4\\",\\n \\"namespace\\": \\"default\\",\\n \\"selfLink\\": \\"/api/v1/namespaces/default/events/etcd-operator-562633149-vvr85.149bd41846d603d4\\",\\n \\"uid\\": \\"09f904cd-dff1-11e6-bd3e-005056923a7e\\",\\n \\"resourceVersion\\": \\"21782526\\",\\n \\"creationTimestamp\\": \\"2017-01-21T15:48:22Z\\"\\n },\\n \\"involvedObject\\": {\\n \\"kind\\": \\"Pod\\",\\n \\"namespace\\":\\"default\\",\\n \\"name\\": \\"etcd-operator-562633149-vvr85\\",\\n \\"uid\\":\\"a5f12e21-de53-11e6-bd3e-005056923a7e\\",\\n \\"apiVersion\\": \\"v1\\",\\n \\"resourceVersion\\":\\"21339961\\",\\n \\"fieldPath\\": \\"spec.containers{etcd-operator}\\"\\n },\\n \\"reason\\": \\"BackOff\\",\\n\\"message\\": \\"Back-off pulling image \\\\\\"10.58.9.201:5000/dc/etcd-operator:latest\\\\\\"\\",\\n \\"source\\":{\\n \\"component\\": \\"kubelet\\",\\n \\"host\\": \\"10.58.9.212\\"\\n },\\n \\"firstTimestamp\\":\\"2017-01-21T15:48:22Z\\",\\n \\"lastTimestamp\\": \\"2017-01-22T07:10:28Z\\",\\n \\"count\\": 3955,\\n \\"type\\": \\"Normal\\"\\n}","EventTimestamp":"2017-01-22T07:10:28Z","EventTags":{"eventID":"09f904cd-dff1-11e6-bd3e-005056923a7e","hostname":"10.58.9.212","pod_id":"a5f12e21-de53-11e6-bd3e-005056923a7e","pod_name":"etcd-operator-562633149-vvr85"}}'
expected_msg = ['events,kind=Pod,namespace_name=default,object_name=etcd-operator-562633149-vvr85,reason=BackOff,hostname="10.58.9.212" message="Back-off pulling image \\"10.58.9.201:5000/dc/etcd-operator:latest\\"" 1485069028']
encoded_message = self.encoder.encode(msg)
self.assertEqual(encoded_message, expected_msg)
|
nilq/baby-python
|
python
|
import os
import numpy as np
from PIL import Image
import subprocess
import cv2
def vision():
output = False # False: Disable display output & True: Enable display output
# subprocess.run(["sudo fswebcam --no-banner -r 2048x1536 image3.jpg"], capture_output=True)
# subprocess.run("sudo fswebcam /home/pi/Desktop/Frame.jpg", capture_output=True)
# path = r"C:\Users\thephysicist\Desktop\pic.jpeg"
# path = r'/home/pi/Desktop/image3.jpg'
path = r"pic_5.jpeg"
cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
ret, frame = cap.read()
cv2.imwrite(path, frame)
imcolor = Image.open(path)
# imcolor = Image.open(path)
im = imcolor.convert('L')
pixel = im.load()
x = 0
y = 0
nb = 0
for i in range(im.size[0]):
for j in range(im.size[1]):
if j > (im.size[1]-200):
im.putpixel([i,j], 0)
elif j < (0):
im.putpixel([i,j], 0)
elif i > (im.size[0]-0):
im.putpixel([i,j], 0)
elif i < (0):
im.putpixel([i,j], 0)
elif pixel[i,j] > 220:
x += i
nb += 1
y += j
x = int(x/nb)
y = int(y/nb)
coord = [(0.20*(x-(im.size[0]/2))/(im.size[0]/2)), (0.20*(y)/(im.size[1]/2))] #[x,y] in meters, origin at the A axis
if output:
for i in range(x-10,x+10,1):
for j in range(y-10,y+10,1):
imcolor.putpixel([i,j], (255,0,0))
imcolor.show()
return coord
if __name__ == '__main__':
vision()
print("Done with Fred's vision")
|
nilq/baby-python
|
python
|
# 生成矩形的周长上的坐标
import numpy as np
from skimage.draw import rectangle_perimeter
img = np.zeros((5, 6), dtype=np.uint8)
start = (2, 3)
end = (3, 4)
rr, cc = rectangle_perimeter(start, end=end, shape=img.shape)
img[rr, cc] = 1
print(img)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import logging
from openstack import exceptions as openstack_exception
from cinderclient import client as volume_client
from cinderclient import exceptions as cinder_exception
import oslo_messaging
from oslo_config import cfg
from BareMetalControllerBackend.conf.env import env_config
from common import exceptions as exc
from common import utils
LOG = logging.getLogger(__name__)
DEFAULT_URL = None
TRANSPORTS = {}
def get_transport(url, optional=False, cache=True, exchange='vianet_guest'):
global TRANSPORTS, DEFAULT_URL
cache_key = url or DEFAULT_URL
cache_key = '%s_%s' % (cache_key, exchange)
transport = TRANSPORTS.get(cache_key)
if not transport or not cache:
try:
oslo_messaging.set_transport_defaults(exchange)
transport = oslo_messaging.get_transport(cfg.CONF, url)
except (oslo_messaging.InvalidTransportURL,
oslo_messaging.DriverLoadFailure):
if not optional or url:
# NOTE(sileht): oslo_messaging is configured but unloadable
# so reraise the exception
raise
return None
else:
if cache:
TRANSPORTS[cache_key] = transport
return transport
class BaremetalGuestApi(object):
def __init__(self, topic):
self.topic = topic
transport = get_transport(env_config.guest_transport_url,
exchange=env_config.guest_exchange)
target = oslo_messaging.Target(exchange=env_config.guest_exchange,
server=self.topic,
topic=self.topic)
self.client = oslo_messaging.RPCClient(transport, target)
def get_guest_connector(self):
ctxt = {}
return self.client.call(ctxt, method='get_guest_connector')
def guest_connect_volume(self, attachments):
"""
Rpc client to guest
:param attachments: cinder attachments
:return:
"""
ctxt = {}
connection = attachments['connection_info']
return self.client.call(ctxt, method='guest_connect_volume',
connection=connection)
def guest_deconnect_volume(self, attachments):
ctxt = {}
connection = attachments['connection_info']
return self.client.call(ctxt, method='guest_deconnect_volume',
connection=connection)
@utils.check_instance_state(vm_state=['active'])
def baremetal_attach_volume(server, volume, openstack_client):
"""
Baremetal attach volume
:param openstack_client: openstack client
:param server: the server object get by server id
:param volume: volume object get by volume id
:return: attachments
"""
if volume.status != 'available':
raise exc.VolumeInvalidState(state=volume.status)
guest_id = server.metadata.get('guest_id')
if not guest_id:
raise exc.GuestAgentTopicNotFound
guest_client = BaremetalGuestApi(guest_id)
connector_properties = guest_client.get_guest_connector()
server_id = server.id
volume_id = volume.id
cinder = volume_client.Client('3.44', session=openstack_client.session)
info = cinder.attachments.create(volume_id, connector_properties, server_id)
try:
connection = info['connection_info']
# now we only support ISCSI
if connection['driver_volume_type'].lower() != 'iscsi':
raise exc.ProtocolNotSupported
device_info = guest_client.guest_connect_volume(info)
cinder.attachments.complete(info['connection_info']['attachment_id'])
return device_info
except Exception as e:
attachment_id = info.get('connection_info').get('attachment_id')
cinder.attachments.delete(attachment_id)
raise e
@utils.check_instance_state(vm_state=['active'])
def baremetal_detach_volume(server, volume_id, openstack_client, attachment_uuid=None):
"""
Baremetal detach volume
:param openstack_client: openstack client
:param server: the server object get by server id
:param volume: volume id
:return: attachments
"""
guest_id = server.metadata.get('guest_id')
if not guest_id:
raise exc.GuestAgentTopicNotFound
guest_client = BaremetalGuestApi(guest_id)
server_id = server.id
cinder = volume_client.Client('3.44', session=openstack_client.session)
if not attachment_uuid:
# We need the specific attachment uuid to know which one to detach.
# if None was passed in we can only work if there is one and only
# one attachment for the volume.
# Get the list of attachments for the volume.
search_opts = {'volume_id': volume_id}
attachments = cinder.attachments.list(search_opts=search_opts)
if len(attachments) == 0:
raise exc.NoAttachmentsFound(volume_id=volume_id)
if len(attachments) == 1:
attachment_uuid = attachments[0].id
else:
# We have more than 1 attachment and we don't know which to use
raise exc.NeedAttachmentUUID(volume_id=volume_id)
attachment = cinder.attachments.show(attachment_uuid)
guest_client.guest_deconnect_volume(attachment.to_dict())
cinder.attachments.delete(attachment_uuid)
def volume_backup_restore(openstack_client, backup_id, volume_id=None, volume_name=None):
cinder = volume_client.Client('3.44', session=openstack_client.session)
backups = cinder.restores.restore(backup_id, volume_id, volume_name)
return backups
def volume_extend(openstack_client, volume_id, new_size):
try:
cinder = volume_client.Client('2', session=openstack_client.session)
volume = cinder.volumes.extend(volume_id, new_size)
return volume
except cinder_exception.OverLimit as e:
raise openstack_exception.HttpException(details=e.message)
|
nilq/baby-python
|
python
|
class nodo_error:
def __init__(self, linea, columna, valor, descripcion):
self.line = str(linea)
self.column = str(columna)
self.valor = str(valor)
self.descripcion = str(descripcion)
errores = []
|
nilq/baby-python
|
python
|
import pytest
from telliot_core.apps.core import TelliotCore
from telliot_core.queries.price.spot_price import SpotPrice
from telliot_core.utils.response import ResponseStatus
from telliot_core.utils.timestamp import TimeStamp
@pytest.mark.asyncio
async def test_main(mumbai_cfg):
async with TelliotCore(config=mumbai_cfg) as core:
chain_id = core.config.main.chain_id
flex = core.get_tellorflex_contracts()
governance_address = await flex.oracle.get_governance_address()
if chain_id == 137:
assert governance_address == "0x2cFC5bCE14862D46fBA3bb46A36A8b2d7E4aC040"
elif chain_id == 80001:
# Old one, TODO confirm w/ Tim it switched
# assert governance_address == "0x0Fe623d889Ad1c599E5fF3076A57D1D4F2448CDe"
# New one
assert governance_address == "0x8A868711e3cE97429faAA6be476F93907BCBc2bc"
stake_amount = await flex.oracle.get_stake_amount()
assert stake_amount == 10.0
print(stake_amount)
tlnv, status = await flex.oracle.get_time_of_last_new_value()
assert isinstance(status, ResponseStatus)
if status.ok:
assert isinstance(tlnv, TimeStamp)
else:
assert tlnv is None
print(tlnv)
lock = await flex.oracle.get_reporting_lock()
print(lock)
token_address = await flex.oracle.get_token_address()
if chain_id == 137:
assert token_address == "0xE3322702BEdaaEd36CdDAb233360B939775ae5f1"
elif chain_id == 80001:
assert token_address == "0x45cAF1aae42BA5565EC92362896cc8e0d55a2126"
total_stake = await flex.oracle.get_total_stake_amount()
print(f"Total Stake: {total_stake}")
staker_info, status = await flex.oracle.get_staker_info(core.get_account().address)
assert isinstance(status, ResponseStatus)
if status.ok:
for info in staker_info:
assert isinstance(info, int)
else:
assert staker_info is None
q = SpotPrice(asset="btc", currency="USD")
count, status = await flex.oracle.get_new_value_count_by_qeury_id(q.query_id)
assert isinstance(status, ResponseStatus)
if status.ok:
assert isinstance(count, int)
else:
assert count is None
|
nilq/baby-python
|
python
|
from swockets import swockets, SwocketError, SwocketClientSocket, SwocketHandler
handle = SwocketHandler()
server = swockets(swockets.ISSERVER, handle)
handle.sock = server
while(True):
user_input = {"message":raw_input("")}
if len(server.clients) > 0:
server.send(user_input, server.clients[0], server.clients[0].sock)
|
nilq/baby-python
|
python
|
from tkinter import *
root = Tk()
root.geometry('800x800')
root.title('Rythmic Auditory Device')
root.configure(background="#ececec")
f = ("Times bold", 54)
def next_page():
"""Go to next page of GUI
Function destroys current calibration page and moves on to next main page.
"""
root.destroy()
import calibration
Label(
root,
text="WELCOME",
padx=20,
pady=20,
bg='#ffc0cb',
font=f
).pack(expand=True, fill=BOTH)
Button(
root,
text="Next",
font=f,
command=next_page
).pack(fill=X, expand=TRUE, side=LEFT)
root.mainloop()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.