max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
Game/MapClasses.py | mrElnekave/Hollow-Valley | 0 | 6620751 | <gh_stars>0
import pygame
import objects
import random
import math
import time
import webbrowser
import Abilities
from BasicClasses import Obj
from images import create_path
import rubato as rb
# Quest System Steps
# 1) Somewhere to remember all of our quests - List
# 2) A format to save our quests that has the completions state and the instructions - Class
# Things the quests need to know: Instructions, Quest complete state
# 3) A way to display our quests, either in pause menu or on the UI - A part of gameFunctions
# 4) Creating all of our quests main and side - A document/file or somthing
# Reset Main Boss Functionality
# 1) Pause our other functionality - State machine
# 2) Setup a
# Chunks of the map
class Chunk:
def __init__(self, location, image, size, chunk_type: str):
self.location = location
self.contents = []
self.image = image
self.image = pygame.transform.scale(self.image, size)
self.rect = self.image.get_rect()
self.nightOverlay = pygame.Surface(objects.size)
self.nightOverlay.fill((0,0,50))
self.chunk_type = chunk_type
def render(self):
objects.display.blit(self.image, self.rect)
for resource in self.contents:
resource.render()
if self.location[0] is not objects.mapWidth and objects.daytime is False:
self.nightOverlay.set_alpha(100)
objects.display.blit(self.nightOverlay, (0,0))
def update(self):
for thing in self.contents:
thing.update()
def __repr__(self):
if self.location[0]== objects.mapWidth:
return f"{self.chunk_type}"
return f"{self.location} {self.chunk_type}"
class Quest:
def __init__(self, text, condition, name): # all inputs are strings
self.text = text
self.condition = condition
self.name = name
self.data = 0
self.complete = False
def render(self):
pass
def update(self):
if eval(self.condition):
self.complete = True
# Resource Class
class Resource(Obj):
def __init__(self, item, quantity, location):
super().__init__(pygame.image.load(create_path("Gold Coin.png")), location)
self.item = item
self.quantity = quantity
self.type = "resource"
def update(self):
if objects.player.rect.colliderect(self.rect):
objects.currentChunk.contents.remove(self)
objects.resourceAmounts["coins"] += 10
class Obstacle(Obj):
def __init__(self, image, location):
super().__init__(image, location)
self.type = "obstacle"
self.interact = ["arrow"]
def update(self):
if self.rect.colliderect(objects.player.rect):
objects.player.hit_this_frame = True
class MovementBarrier(Obj):
def __init__(self, image, location):
super().__init__(image, location)
self.type = "obstacle"
def update(self):
if self.rect.colliderect(objects.player.rect):
objects.player.hit_this_frame = True
class Button(Obj):
def __init__(self, image, location, effects):
super().__init__(image, location)
self.effects = effects
def update(self):
if pygame.mouse.get_pressed(3)[0]:
mousePos = objects.mapMousePos(pygame.mouse.get_pos())
if self.rect.collidepoint(mousePos):
for action in self.effects:
exec(action)
class Button_func(Obj):
def __init__(self, image, location, to_run):
super().__init__(image, location)
self.to_run = to_run
def update(self):
if pygame.mouse.get_pressed(3)[0]:
mousePos = objects.mapMousePos(pygame.mouse.get_pos())
if self.rect.collidepoint(mousePos):
self.to_run()
class NPC(Obj):
def __init__(self, image, location, effects):
super().__init__(image, location)
self.effects = effects
self.type = "NPC" #TODO: wait till up before being pressed down
self.canClick = True
def update(self):
if self.canClick:
if pygame.mouse.get_pressed(3)[0]:
mousePos = objects.mapMousePos(pygame.mouse.get_pos())
if self.rect.collidepoint(mousePos):
objects.NPC_clicked = True
for action in self.effects:
exec(action)
self.canClick = False
rb.Time.delayed_call(1000, self.setCanClick)
def setCanClick(self):
self.canClick = True
def blit_alpha(target, source, location, opacity):
x = location[0]
y = location[1]
temp = pygame.Surface((source.get_width(), source.get_height())).convert()
temp.blit(target, (-x, -y))
temp.blit(source, (0, 0))
temp.set_alpha(opacity)
target.blit(temp, location)
class UpdateLog(Obj):
def __init__(self, location, archives):
self.capsule = pygame.Surface((175, 25))
self.capsule.fill((0, 0, 0))
self.exclamation_black = pygame.image.load(create_path("Notification Button.png"))
self.exclamation_white = pygame.image.load(create_path("White Notification Button.png"))
self.exclamation = self.exclamation_black
self.image = pygame.Surface((1, 1)) # just for super
self.text = None
self.tab_start = rb.Vector(100, 100)
super().__init__(self.image, location)
self.message: str = ""
self.archived = archives
def in_relation(self, x, y):
return (self.rect.x + x, self.rect.y + y)
#if player in dungeon: return
def render(self):
if objects.player.chunk[0] != -1:
blit_alpha(objects.display, self.capsule, self.in_relation(25, 0), 10)
if self.text != None:
objects.display.blit(self.text, self.in_relation(25, 0))
objects.display.blit(self.exclamation, self.in_relation(0, 0))
def tabRender(self):
for i in range(len(self.archived)):
message = self.clamp_message(self.archived[i])
image = objects.myFont.render(message, True, (200,0,0))
objects.display.blit(self.image, self.tab_in_relation(0, i*objects.myFont.get_height()))
def tab_in_relation(self, x, y):
return (self.tab_start.x + x, self.tab_start.y + y)
def regenerate_image(self):
message = self.clamp_message(self.message)
self.text = objects.myFont.render(message, True, (200,0,0))
def clamp_message(self, message):
text_width, text_height = objects.myFont.size(message)
while text_width > self.capsule.get_size()[0] - 5:
text_width, text_height = objects.myFont.size(message)
message = message[:-1]
return message
def addMessage(self, message: str):
self.archived.append(message)
self.message = message
self.regenerate_image()
# def todo():
# self.log.remove(message)
# self.regenerate_image()
# print("TODONE")
# rb.Time.delayed_call(5 * 1000, todo)
class Building(Obj):
def __init__(self, image, location, subchunk, doorSize):
super().__init__(image, location)
self.subchunk = subchunk
self.type = "building"
self.doorRect = pygame.Rect((0,0), doorSize)
self.doorRect.midbottom = self.rect.midbottom
def update(self):
if objects.player.rect.colliderect(self.doorRect):
objects.player.chunk = (objects.mapWidth, self.subchunk)
objects.player.rect.center = (250, 425)
#for obj in objects.currentChunk.contents:
# if obj.type in self.interact:
# if self.rect.colliderect(obj.rect):
# if obj.type == "projectile":
# objects.currentChunk.contents.remove(obj)
#if self.rect.colliderect(objects.player.rect):
# if objects.player.rect.center = objects.player.last_valid_position
class CollisionButton(Obj):
def __init__(self, image, location, effects):
self.effects = effects
super().__init__(image, location)
self.type = "collisionButton"
def update(self):
if objects.player.rect.colliderect(self.rect):
objects.player.cancel_abilities()
for effect in self.effects:
exec(effect)
class QuestionCube(Obj):
boosts = [
["objects.player.currentHealth += 25", 25, "25 health"],
["objects.resourceAmounts['ghostEnergy'] += 25", 50,"25 ghost energy"],
["objects.moveSpeed = 10; rb.Time.delayed_call(10*1000, QuestionCube.decrement_speed)", 60,"a speed boost"],
["objects.resourceAmounts['purple'] += 1", 65,"a purple potion"],
["objects.resourceAmounts['red'] += 1", 67,"a red potion"],
["objects.resourceAmounts['blue'] += 1", 69,"a blue potion"],
["objects.resourceAmounts['gold'] += 1", 70,"a gold potion"],
]
count = 0
@staticmethod
def decrement_speed():
objects.moveSpeed = 5
def __init__(self, location):
image = pygame.image.load(create_path("QuestionCube.png"))
super().__init__(image, location)
self.type = "qcube"
def update(self):
if objects.player.rect.colliderect(self.rect):
objects.currentChunk.contents.remove(self)
QuestionCube.count += 1
if QuestionCube.count >= 5:
objects.gamestate = 3
QuestionCube.count -= 5
objects.currentProblem = random.choice(objects.problems)
@staticmethod
def randBoost():
choice = random.randint(1,70)
for boost in QuestionCube.boosts:
if choice <= boost[1]:
exec(boost[0])
objects.update_log.addMessage("You used 5 question cubes and got..."+boost[2]+"!")
return
| import pygame
import objects
import random
import math
import time
import webbrowser
import Abilities
from BasicClasses import Obj
from images import create_path
import rubato as rb
# Quest System Steps
# 1) Somewhere to remember all of our quests - List
# 2) A format to save our quests that has the completions state and the instructions - Class
# Things the quests need to know: Instructions, Quest complete state
# 3) A way to display our quests, either in pause menu or on the UI - A part of gameFunctions
# 4) Creating all of our quests main and side - A document/file or somthing
# Reset Main Boss Functionality
# 1) Pause our other functionality - State machine
# 2) Setup a
# Chunks of the map
class Chunk:
def __init__(self, location, image, size, chunk_type: str):
self.location = location
self.contents = []
self.image = image
self.image = pygame.transform.scale(self.image, size)
self.rect = self.image.get_rect()
self.nightOverlay = pygame.Surface(objects.size)
self.nightOverlay.fill((0,0,50))
self.chunk_type = chunk_type
def render(self):
objects.display.blit(self.image, self.rect)
for resource in self.contents:
resource.render()
if self.location[0] is not objects.mapWidth and objects.daytime is False:
self.nightOverlay.set_alpha(100)
objects.display.blit(self.nightOverlay, (0,0))
def update(self):
for thing in self.contents:
thing.update()
def __repr__(self):
if self.location[0]== objects.mapWidth:
return f"{self.chunk_type}"
return f"{self.location} {self.chunk_type}"
class Quest:
def __init__(self, text, condition, name): # all inputs are strings
self.text = text
self.condition = condition
self.name = name
self.data = 0
self.complete = False
def render(self):
pass
def update(self):
if eval(self.condition):
self.complete = True
# Resource Class
class Resource(Obj):
def __init__(self, item, quantity, location):
super().__init__(pygame.image.load(create_path("Gold Coin.png")), location)
self.item = item
self.quantity = quantity
self.type = "resource"
def update(self):
if objects.player.rect.colliderect(self.rect):
objects.currentChunk.contents.remove(self)
objects.resourceAmounts["coins"] += 10
class Obstacle(Obj):
def __init__(self, image, location):
super().__init__(image, location)
self.type = "obstacle"
self.interact = ["arrow"]
def update(self):
if self.rect.colliderect(objects.player.rect):
objects.player.hit_this_frame = True
class MovementBarrier(Obj):
def __init__(self, image, location):
super().__init__(image, location)
self.type = "obstacle"
def update(self):
if self.rect.colliderect(objects.player.rect):
objects.player.hit_this_frame = True
class Button(Obj):
def __init__(self, image, location, effects):
super().__init__(image, location)
self.effects = effects
def update(self):
if pygame.mouse.get_pressed(3)[0]:
mousePos = objects.mapMousePos(pygame.mouse.get_pos())
if self.rect.collidepoint(mousePos):
for action in self.effects:
exec(action)
class Button_func(Obj):
def __init__(self, image, location, to_run):
super().__init__(image, location)
self.to_run = to_run
def update(self):
if pygame.mouse.get_pressed(3)[0]:
mousePos = objects.mapMousePos(pygame.mouse.get_pos())
if self.rect.collidepoint(mousePos):
self.to_run()
class NPC(Obj):
def __init__(self, image, location, effects):
super().__init__(image, location)
self.effects = effects
self.type = "NPC" #TODO: wait till up before being pressed down
self.canClick = True
def update(self):
if self.canClick:
if pygame.mouse.get_pressed(3)[0]:
mousePos = objects.mapMousePos(pygame.mouse.get_pos())
if self.rect.collidepoint(mousePos):
objects.NPC_clicked = True
for action in self.effects:
exec(action)
self.canClick = False
rb.Time.delayed_call(1000, self.setCanClick)
def setCanClick(self):
self.canClick = True
def blit_alpha(target, source, location, opacity):
x = location[0]
y = location[1]
temp = pygame.Surface((source.get_width(), source.get_height())).convert()
temp.blit(target, (-x, -y))
temp.blit(source, (0, 0))
temp.set_alpha(opacity)
target.blit(temp, location)
class UpdateLog(Obj):
def __init__(self, location, archives):
self.capsule = pygame.Surface((175, 25))
self.capsule.fill((0, 0, 0))
self.exclamation_black = pygame.image.load(create_path("Notification Button.png"))
self.exclamation_white = pygame.image.load(create_path("White Notification Button.png"))
self.exclamation = self.exclamation_black
self.image = pygame.Surface((1, 1)) # just for super
self.text = None
self.tab_start = rb.Vector(100, 100)
super().__init__(self.image, location)
self.message: str = ""
self.archived = archives
def in_relation(self, x, y):
return (self.rect.x + x, self.rect.y + y)
#if player in dungeon: return
def render(self):
if objects.player.chunk[0] != -1:
blit_alpha(objects.display, self.capsule, self.in_relation(25, 0), 10)
if self.text != None:
objects.display.blit(self.text, self.in_relation(25, 0))
objects.display.blit(self.exclamation, self.in_relation(0, 0))
def tabRender(self):
for i in range(len(self.archived)):
message = self.clamp_message(self.archived[i])
image = objects.myFont.render(message, True, (200,0,0))
objects.display.blit(self.image, self.tab_in_relation(0, i*objects.myFont.get_height()))
def tab_in_relation(self, x, y):
return (self.tab_start.x + x, self.tab_start.y + y)
def regenerate_image(self):
message = self.clamp_message(self.message)
self.text = objects.myFont.render(message, True, (200,0,0))
def clamp_message(self, message):
text_width, text_height = objects.myFont.size(message)
while text_width > self.capsule.get_size()[0] - 5:
text_width, text_height = objects.myFont.size(message)
message = message[:-1]
return message
def addMessage(self, message: str):
self.archived.append(message)
self.message = message
self.regenerate_image()
# def todo():
# self.log.remove(message)
# self.regenerate_image()
# print("TODONE")
# rb.Time.delayed_call(5 * 1000, todo)
class Building(Obj):
def __init__(self, image, location, subchunk, doorSize):
super().__init__(image, location)
self.subchunk = subchunk
self.type = "building"
self.doorRect = pygame.Rect((0,0), doorSize)
self.doorRect.midbottom = self.rect.midbottom
def update(self):
if objects.player.rect.colliderect(self.doorRect):
objects.player.chunk = (objects.mapWidth, self.subchunk)
objects.player.rect.center = (250, 425)
#for obj in objects.currentChunk.contents:
# if obj.type in self.interact:
# if self.rect.colliderect(obj.rect):
# if obj.type == "projectile":
# objects.currentChunk.contents.remove(obj)
#if self.rect.colliderect(objects.player.rect):
# if objects.player.rect.center = objects.player.last_valid_position
class CollisionButton(Obj):
def __init__(self, image, location, effects):
self.effects = effects
super().__init__(image, location)
self.type = "collisionButton"
def update(self):
if objects.player.rect.colliderect(self.rect):
objects.player.cancel_abilities()
for effect in self.effects:
exec(effect)
class QuestionCube(Obj):
boosts = [
["objects.player.currentHealth += 25", 25, "25 health"],
["objects.resourceAmounts['ghostEnergy'] += 25", 50,"25 ghost energy"],
["objects.moveSpeed = 10; rb.Time.delayed_call(10*1000, QuestionCube.decrement_speed)", 60,"a speed boost"],
["objects.resourceAmounts['purple'] += 1", 65,"a purple potion"],
["objects.resourceAmounts['red'] += 1", 67,"a red potion"],
["objects.resourceAmounts['blue'] += 1", 69,"a blue potion"],
["objects.resourceAmounts['gold'] += 1", 70,"a gold potion"],
]
count = 0
@staticmethod
def decrement_speed():
objects.moveSpeed = 5
def __init__(self, location):
image = pygame.image.load(create_path("QuestionCube.png"))
super().__init__(image, location)
self.type = "qcube"
def update(self):
if objects.player.rect.colliderect(self.rect):
objects.currentChunk.contents.remove(self)
QuestionCube.count += 1
if QuestionCube.count >= 5:
objects.gamestate = 3
QuestionCube.count -= 5
objects.currentProblem = random.choice(objects.problems)
@staticmethod
def randBoost():
choice = random.randint(1,70)
for boost in QuestionCube.boosts:
if choice <= boost[1]:
exec(boost[0])
objects.update_log.addMessage("You used 5 question cubes and got..."+boost[2]+"!")
return | en | 0.555381 | # Quest System Steps # 1) Somewhere to remember all of our quests - List # 2) A format to save our quests that has the completions state and the instructions - Class # Things the quests need to know: Instructions, Quest complete state # 3) A way to display our quests, either in pause menu or on the UI - A part of gameFunctions # 4) Creating all of our quests main and side - A document/file or somthing # Reset Main Boss Functionality # 1) Pause our other functionality - State machine # 2) Setup a # Chunks of the map # all inputs are strings # Resource Class #TODO: wait till up before being pressed down # just for super #if player in dungeon: return # def todo(): # self.log.remove(message) # self.regenerate_image() # print("TODONE") # rb.Time.delayed_call(5 * 1000, todo) #for obj in objects.currentChunk.contents: # if obj.type in self.interact: # if self.rect.colliderect(obj.rect): # if obj.type == "projectile": # objects.currentChunk.contents.remove(obj) #if self.rect.colliderect(objects.player.rect): # if objects.player.rect.center = objects.player.last_valid_position | 3.110356 | 3 |
MediaComp/pictures.py | paulbuis/MediaComp | 0 | 6620752 | <gh_stars>0
#
# Package: MediaComp
# Module: pictures
# Author: <NAME>, <EMAIL>
#
# Derived from source code, power point slides, and media (pictures, movies, sounds) from
# http://mediacomputation.org created by <NAME> and <NAME>
# licensed under the Creative Commons Attribution 3.0 United States License,
# See: http://creativecommons.org/licenses/by/3.0/us/).
"""
Module docstring goes here!
"""
# module base64 is standard in Python 3: https://docs.python.org/3/library/base64.html
# using base64.b64encode in Picture.to_base64
import base64
# module io is standard in Python 3: https://docs.python.org/3/library/io.html
# using io.BytesIO in Picture.to_base64
import io
import os
import pathlib
import collections.abc
# module typing is standard in Python 3.5+: https://docs.python.org/3/library/typing.html
# used for type hints used in static type checking in PEP 484
# PEP 484 -- Type Hints: https://www.python.org/dev/peps/pep-0484/
# PEP 525 -- Syntax for Variable Annotations: https://www.python.org/dev/peps/pep-0526/
# use mypy for static type checking of Pyhton code: http://mypy-lang.org/
# note that just because a parameter is annotated to be of a specific type, doesn't mean
# that at runtime it will actually be of that type: dynamic checking or casting/conversion
# still needs to be done
import typing
# PIL refers to the Pillow library installed by default in the Anaconda distribution of Python
# PIL is the Python Image Library, Pillow is a fork of PIL
# For documentation on Pillow, see: https://pillow.readthedocs.io/en/stable/
# suppress static type checker complain "error: No library stub file for module 'PIL.Image'"
import PIL.Image # type: ignore
# suppress static type checker complain "error: No library stub file for module 'PIL.ImageDraw'"
import PIL.ImageDraw # type: ignore
# suppress static type checker complain "error: No library stub file for module 'PIL.ImageFont'"
import PIL.ImageFont # type: ignore
# suppress static type checker complain "error: No library stub file for module 'PIL.PyAccess'"
from PIL.PyAccess import PyAccess as PixelAccess # type: ignore
# suppress static type checker complain "error: No library stub file for
# module 'matplotlob.font_manager'"
# suppress static type checker complain "error: No library stub file for module 'matplotlob'"
import matplotlib.font_manager # type: ignore
# The IPython.core.display module is specific to IPython which is used in Jupyter
# See https://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html
# import IPython.core.display
# import the color and file modules in this package
from . import colors
from . import files
# make the names in the color and file modules public to
# consumers of this module so they don't have to import colors module also
Colors = colors.Colors
Color = colors.Color
def set_media_path(path: typing.Optional[str] = None) -> bool:
"""
forwards to files.set_media_path so module that imports this module
does not have to import files module also
:param path:
:return:
:rtype bool:
"""
return files.set_media_path(path)
def media_path(filename: typing.Optional[str]) -> pathlib.Path:
"""
forwards to files.media_path so module that imports this module
does not have to import files module also
:param Optional[str] filename:
:return:
:rtype pathlib.Path:
"""
return files.media_path(filename)
# Type aliases
RGB = typing.Tuple[int, int, int]
ImageSize = typing.Tuple[int, int]
Point = typing.Tuple[int, int]
PointSequence = typing.Sequence[Point]
BaseRGB = colors.BaseRGB
PixelInfoTuple = typing.Tuple[Point, RGB]
Transform = typing.Callable[['PixelInfo'], Color]
Transform2 = typing.Callable[[PixelInfoTuple], PixelInfoTuple]
Predicate = typing.Callable[['PixelInfo'], bool]
Combine = typing.Callable[['PixelInfo', 'PixelInfo'], Color]
def type_error_message(fun_name: str, param_name: str, expected: str, actual: typing.Any) -> str:
""" generates error message for TypeError """
return f"In MediaComp.pictures.{fun_name}: {param_name} " +\
f"expected a {expected}, actually {type(actual)}"
class PixelInfo(colors.Color):
"""
Class level docstring goes here
"""
def __init__(self, xy: Point, rgb: typing.Optional[RGB] = None):
super().__init__(rgb=rgb)
self._xy: Point = (int(xy[0]), int(xy[1]))
# Overrides Color.__repr__
def __repr__(self) -> str:
return f"PixelInfo(xy = ({self.x}, {self.y}), " + \
f"pixel_color = Color(red={self.red}, green={self.green}, blue={self.blue}))"
# Overrides Color.__str__
def __str__(self) -> str:
return f"Pixel(red={self.red}, green={self.green}, " + \
f"blue={self.blue}, x={self.x}, y={self.y})"
@property
def color(self) -> colors.Color:
"""
Write better docstring
:type: colors.Color
"""
rgb: RGB = self.rgb
return colors.Color(rgb[0], rgb[1], rgb[2])
@property
def x(self) -> int: # pylint: disable=invalid-name
"""
Write better docstring
:type: int
"""
return int(self._xy[0])
@property
def y(self) -> int: # pylint: disable=invalid-name
"""
Write better docstring
:type: int
"""
return int(self._xy[1])
class Pixel(PixelInfo):
"""
Class level docstring goes here
"""
def __init__(self, xy: Point, pixel_access: PixelAccess):
self.__pixel_access: PixelAccess = pixel_access
super().__init__(xy)
# Overrides PixelInfo.__str__ method
def __str__(self) -> str:
return f"Pixel(xy=({self.x}, {self.y}), Color(r={self.red}, g={self.green}, b={self.blue}))"
@property
def color(self) -> colors.Color:
"""
Write better docstring
:type: colors.Color
"""
rgb: RGB = self.__pixel_access[self._xy]
return colors.Color(rgb[0], rgb[1], rgb[2])
@color.setter
def color(self, rgb: colors.BaseRGB) -> None:
if not isinstance(rgb, colors.BaseRGB):
raise TypeError
self.rgb = rgb.rgb
# Overrides BaseRGB.red property getter
@property
def red(self) -> int:
"""
Write better docstring
:type: int
"""
return self.rgb[0]
@red.setter
def red(self, value: int) -> None:
value = min(255, max(0, int(value)))
rgb: RGB = self.__pixel_access[self._xy]
self.__pixel_access[self._xy] = (value, rgb[1], rgb[2])
# Overrides BaseRGB.green property getter
@property
def green(self) -> int:
"""
Write better docstring
:type: int
"""
return self.rgb[1]
@green.setter
def green(self, value: int) -> None:
value = min(255, max(0, int(value)))
rgb: RGB = self.__pixel_access[self._xy]
self.__pixel_access[self._xy] = (rgb[0], value, rgb[2])
# Overrides BaseRGB.blue property getter
@property
def blue(self) -> int:
"""
Write better docstring
:type: int
"""
return self.rgb[2]
@blue.setter
def blue(self, value: int) -> None:
value = min(255, max(0, int(value)))
rgb: RGB = self.__pixel_access[self._xy]
self.__pixel_access[self._xy] = (rgb[0], rgb[1], value)
# Overrides BaseRGB.rgb property getter
@property
def rgb(self) -> RGB:
"""
Write better docstring
:type: RGB
"""
rgb: RGB = self.__pixel_access[self._xy]
return int(rgb[0]), int(rgb[1]), int(rgb[2])
@rgb.setter
def rgb(self, value: RGB) -> None:
self.__pixel_access[self._xy] = value
class TextStyle:
"""
Class-level docstring goes here
"""
@staticmethod
def find_font_file(query: str) -> typing.Optional[str]:
"""
Write better docstring
:param str query:
:return:
"""
matches = list(filter(lambda path: query in os.path.basename(path),
matplotlib.font_manager.findSystemFonts()))
if len(matches) == 0:
return None
return matches[0]
def __init__(self, font_name: str, emphasis: str, size: float):
"""
Write better docstring
:param font_name:
:param emphasis:
:param size:
"""
self.__font_name = str(font_name)
font_file: typing.Optional[str] = TextStyle.find_font_file(self.__font_name)
# TODO: emphasis still ignored in font searching
self.__emphasis = str(emphasis)
self.__size = float(size)
self.__font: PIL.ImageFont.ImageFont = PIL.ImageFont.truetype(font_file, self.__size)
@property
def font_name(self) -> str:
"""
name of font used to draw with
:type: str
"""
return self.__font_name
@property
def font(self) -> PIL.ImageFont.ImageFont:
"""
Write better docstring
:type: PIL.ImageFont.ImageFont
"""
return self.__font
@property
def emphasis(self) -> str:
"""
kind of emphasis to use, 'bold', 'italic', 'bold + italic'
:type: str
"""
return self.__emphasis
@property
def size(self) -> float:
"""
size of font in points
:type: float
"""
return self.__size
# class PILImage has no pixel-level operations and is agnostic about how many and what kind
# of channels are in the image. Such things will be found in subclasseses of PILImage
class PILImage:
"""
Class level docstring
"""
def __init__(self, pil_image: PIL.Image.Image):
self._pil_image = pil_image
@property
def height(self) -> int:
"""
height of image in pixels
:type: int
"""
image_height = self._pil_image.height
return int(image_height)
@property
def width(self) -> int:
"""
width of image in pixels
:type: int
"""
image_width = self._pil_image.width
return int(image_width)
@property
def size(self) -> ImageSize:
"""
(height, width) tuple
:type: ImageSize
"""
return self.height, self.width
# overriden by Picture subclass
def copy(self) -> 'PILImage':
"""
Makes a deep copy of this object
:return: the copy
:rtype PILImage:
"""
return PILImage(self._pil_image.copy())
def set_color(self, color: colors.BaseRGB = colors.Colors.black):
"""
Write better docstring
:param color:
:return:
"""
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.rectangle([(0, 0), (self.width, self.height)], fill=color)
def copy_into(self, big_picture: 'PILImage', left: int, top: int):
"""
Write better docstring
:param big_picture:
:param int left:
:param int top:
:return:
"""
big_picture._pil_image.paste(self._pil_image, (left, top)) # pylint: disable=protected-access
def add_arc(self, x: int, y: int, # pylint: disable=invalid-name; # pylint: disable=too-many-arguments
width: int, height: int,
start: float, angle: float,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int x:
:param int y:
:param int width:
:param int height:
:param float start:
:param float angle:
:param colors.Color color:
:raises TypeError:
"""
x = int(x) # pylint: disable=invalid-name
y = int(y) # pylint: disable=invalid-name
width = int(width)
height = int(height)
start = float(start)
angle = float(angle)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_arc", "c", "Color", color))
fill_color: RGB = color.rgb
draw = PIL.ImageDraw.Draw(self._pil_image)
bounding_box: PointSequence = [(x, y), (x + width, y + height)]
draw.arc(bounding_box, start=start, end=start+angle, fill=fill_color, width=1)
def add_arc_filled(self, x: int, y: int, # pylint: disable=invalid-name; # pylint: disable=too-many-arguments
width: int, height: int,
start: float, angle: float,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int x:
:param int y:
:param int width:
:param int height:
:param float start:
:param float angle:
:param colors.Color color:
:raises TypeError:
"""
x = int(x) # pylint: disable=invalid-name
y = int(y) # pylint: disable=invalid-name
width = int(width)
height = int(height)
start = float(start)
angle = float(angle)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_arc_filled", "color", "Color", color))
fill_color: RGB = color.rgb
bounding_box: PointSequence = [(x, y), (x + width, y + height)]
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.pieslice(bounding_box, start=start, end=start+angle, fill=fill_color, width=1)
def add_line(self, start_x: int, start_y: int, # pylint: disable=too-many-arguments
width: int, height: int,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int start_x:
:param int start_y:
:param int width:
:param int height:
:param colors.Color color:
:raises TypeError:
"""
start_x = int(start_x)
start_y = int(start_y)
width = int(width)
height = int(height)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_line", "color", "Color", color))
bounding_box: PointSequence = [(start_x, start_y), (start_x + width, start_y + height)]
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.line(bounding_box, fill=color.rgb, width=1)
def add_oval(self, center_x: int, center_y: int, # pylint: disable=too-many-arguments
width: int, height: int,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int center_x:
:param int center_y:
:param int width:
:param int height:
:param colors.Color color:
:raises TypeError:
"""
center_x = int(center_x)
center_y = int(center_y)
width = int(width)
height = int(height)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_oval", "color", "Color", color))
bounding_box: PointSequence = [(center_x, center_y), (center_x + width, center_y + height)]
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.ellipse(bounding_box, outline=color.rgb, width=1)
def add_oval_filled(self, center_x: int, center_y: int, # pylint: disable=too-many-arguments
width: int, height: int,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int center_x:
:param int center_y:
:param int width:
:param int height:
:param colors.Color color:
:raises TypeError:
"""
center_x = int(center_x)
center_y = int(center_y)
width = int(width)
height = int(height)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_oval_filled", "color", "Color", color))
left_top = (center_x - width//2, center_y - height//2)
right_bottom = (center_x + width//2, center_y + width//2)
bounding_box = [left_top, right_bottom]
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.ellipse(bounding_box, outline=color.rgb, fill=color.rgb, width=1)
def add_rect(self, left: int, top: int, # pylint: disable=too-many-arguments
width: int, height: int,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Takes a picture, a starting (left, top) position (two numbers),
a width and height (two more numbers, four total),
and (optionally) a color as input. Adds a rectangular outline of the
specified dimensions using the (left,top) as the upper left corner.
Default color is black.
Wrapped by for :py:func:`.jes.addRect` function.
:param int left:
:param int top:
:param int width:
:param int height:
:param colors.Color color:
:raises TypeError:
"""
left = int(left)
top = int(top)
width = int(width)
height = int(height)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_rect", "color", "Color", color))
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.rectangle([(left, top), (left + width, top + height)], outline=color.rgb, width=1)
def add_rect_filled(self, left: int, top: int, width: int, height: int, # pylint: disable=too-many-arguments
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int left:
:param int top:
:param int width:
:param int height:
:param colors.Color color:
:raises TypeError:
"""
left = int(left)
top = int(top)
width = int(width)
height = int(height)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_rect_filled", "color", "Color", color))
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.rectangle([(left, top), (left+width, top+height)], fill=color.rgb, width=1)
def add_text(self, x_pos: int, y_pos: int, text: str,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int x_pos:
:param int y_pos:
:param str text:
:param colors.Color color:
:raises TypeError
"""
x_pos = int(x_pos)
y_pos = int(y_pos)
text = str(text)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_text", "color", "Color", color))
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.text((x_pos, y_pos), text, fill=color.rgb)
def add_text_with_style(self, x_pos: int, y_pos: int, # pylint: disable=too-many-arguments
text: str, style: TextStyle,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int x_pos:
:param int y_pos:
:param str text:
:param str style:
:param colors.Color color:
:raises TypeError:
"""
x_pos = int(x_pos)
y_pos = int(y_pos)
text = str(text)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_text_styled",
"color", "Color", color))
if not isinstance(style, TextStyle):
raise TypeError(type_error_message("PILImage.add_text_styled",
"style",
"TextStyle", style))
draw: PIL.ImageDraw.ImageDraw = PIL.ImageDraw.Draw(self._pil_image)
draw.text((x_pos, y_pos), text, font=style.font, fill=color.rgb)
#
# Picture operates on files containing RGB images
#
# All interactions with the filesystem should be here
# not in superclasses
#
# class Picture adds pixel-level operations to PILImage
# and works only with 3-channel 8-bit/channel images
#
class Picture(PILImage, collections.abc.Iterable):
"""
Class-level docstrings
"""
def __init__(self, pil_image: PIL.Image.Image):
if pil_image.mode == "RGB":
super().__init__(pil_image)
else:
super().__init__(pil_image.convert(mode="RGB"))
self.__pixel_access: PIL.PyAccess.PyAccess = self._pil_image.load()
@classmethod
def from_file(cls, filename: typing.Union[str, os.PathLike]) -> 'Picture':
"""
Write better docstring
:param filename:
:return:
:rtype: Picture
"""
if not isinstance(filename, os.PathLike):
filename = files.media_path(str(filename))
img = PIL.Image.open(filename)
img.load()
return cls(img)
@classmethod
def make_empty(cls, width: int, height: int,
color: colors.BaseRGB = colors.Colors.black) -> 'Picture':
"""
Write better docstring
:param int width:
:param int height:
:param colors.Color color:
:return:
:rtype: Picture
"""
height = int(height)
width = int(width)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("setAllPixelsToAColor", "color", "Color", color))
return cls(PIL.Image.new("RGB", (width, height), color.rgb))
def __str__(self) -> str:
return "<image> size:" + str(self.size)
def _repr_html_(self) -> str:
return '<img src="' + self.to_base64() + '" />'
# TODO: fix it so it uses IPython display mechanism for PNG rather than HTML
# def _repr_png_(self):
# pass
def __getitem__(self, key: Point) -> Pixel:
index_x = int(key[0])
index_y = int(key[1])
if index_x >= self.width:
index_x = self.width - 1
if index_y >= self.height:
index_y = self.height - 1
if index_x < 0:
index_x = 0
if index_y < 0:
index_y = 0
return Pixel((index_x, index_y), self.__pixel_access)
def __setitem__(self, key: Point, value: colors.BaseRGB) -> None:
index_x = int(key[0]) # pylint: disable=invalid-name
index_y = int(key[1]) # pylint: disable=invalid-name
# silently discard value if out of range, is that really a good thing?
if index_x < 0 or index_x >= self.width:
return
if index_y < 0 or index_y >= self.height:
return
if not isinstance(value, colors.BaseRGB):
raise TypeError(type_error_message("Picture.setitem", "value", "Color", value))
self.__pixel_access[index_x, index_y] = value.rgb
def __iter__(self) -> typing.Iterator[Pixel]:
for j in range(self.height):
for i in range(self.width):
yield Pixel((i, j), self.__pixel_access)
def to_base64(self) -> str:
"""
convert to base64 string of bytes in PNG encoding
:return:
"""
file_like_backed_by_byte_buffer = io.BytesIO()
self._pil_image.save(file_like_backed_by_byte_buffer, format='PNG', optimize=True)
unencoded_byte_buffer = file_like_backed_by_byte_buffer.getvalue()
encoded_byte_buffer = base64.b64encode(unencoded_byte_buffer)
base64_string = str(encoded_byte_buffer)[2:-1] # discard "b'" and beginning and "'" at end
return 'data:image/png;base64,' + base64_string
# TODO test this
def save(self, file_name: str) -> None:
"""
save image to file
:param file_name: name of file to save
"""
self._pil_image.save(file_name)
def copy(self) -> 'Picture':
"""
Makes a copy of the picture
:return: The copy
:rtype: Picture
"""
return Picture(self._pil_image.copy())
def resize(self, height: int, width: int) -> 'Picture':
"""
Write better docstring
:param int height:
:param int width:
:return:
"""
height = int(height)
width = int(width)
new_image = self._pil_image.resize((width, height))
return Picture(new_image)
def map(self, transform: Transform, left_top: Point = (0, 0),
right_bottom: Point = (1000000, 1000000)) -> 'Picture':
"""
Write better docstring
:param transform:
:param Point left_top:
:param Point right_bottom:
:return:
"""
left: int = int(left_top[0])
top: int = int(left_top[1])
right: int = int(right_bottom[0])
bottom: int = int(right_bottom[1])
if left > right:
(right, left) = (left, right)
if top > bottom:
(top, bottom) = (bottom, top)
if left < 0:
left = 0
if right > self.width:
right = self.width
if top < 0:
top = 0
if bottom > self.height:
bottom = self.height
copy = self.copy()
pixel_access: PixelAccess = copy.__pixel_access # pylint: disable=protected-access
for j in range(top, bottom):
for i in range(left, right):
index: Point = (i, j)
pixel_info = PixelInfo(index, rgb=pixel_access[index])
color_out: colors.Color = transform(pixel_info)
pixel_access[index] = color_out.rgb
return copy
def remap(self, transform: Transform2, color: Color = Colors.black) -> 'Picture':
"""
Write better docstring
:param transform:
:param Color color:
:return:
"""
width = self.width
height = self.height
target = Picture.make_empty(width, height, color)
target_pixel_access: PixelAccess = target.__pixel_access # pylint: disable=protected-access
self_pixel_access: PixelAccess = self.__pixel_access
for j in range(0, height):
for i in range(0, width):
index_in: Point = (i, j)
tuple_in: PixelInfoTuple = (index_in, Color.unpack(self_pixel_access[index_in]))
tuple_out: PixelInfoTuple = transform(tuple_in)
((target_x, target_y), rgb_out) = tuple_out
target_pixel_access[target_x % width, target_y % height] = Color.clamp(rgb_out)
return target
def combine(self, pixel_combine: Combine, other: 'Picture', resize=False) -> 'Picture':
"""
Writie better docstring
:param pixel_combine:
:param Picture other:
:param bool resize:
:return:
:rtype: Picture
"""
copy = self.copy()
if resize:
if (not copy.height == other.height) or (not copy.width == other.width):
other = other.resize(copy.height, copy.width)
pixel_access: PixelAccess = copy.__pixel_access # pylint: disable=protected-access
other_pixel_access: PixelAccess = other.__pixel_access # pylint: disable=protected-access
for j in range(copy.height):
for i in range(copy.width):
index: Point = (i, j)
pixel_info = PixelInfo(index, rgb=pixel_access[index])
other_pixel_info = PixelInfo(index, rgb=other_pixel_access[index])
color_out: colors.Color = pixel_combine(pixel_info, other_pixel_info)
pixel_access[index] = color_out.rgb
return copy
def map_if(self, predicate: Predicate, transform: Transform) -> 'Picture':
"""
Write better docstring
:param predicate:
:param transform:
:return:
:rtype: Picture
"""
copy = self.copy()
pixel_access: PixelAccess = copy.__pixel_access # pylint: disable=protected-access
for j in range(copy.height):
for i in range(copy.width):
index: Point = (i, j)
pixel_info = PixelInfo(index, rgb=pixel_access[index])
if predicate(pixel_info):
color_out: colors.Color = transform(pixel_info)
pixel_access[index] = color_out.rgb
return copy
def replace_if(self, predicate: Predicate, other: 'Picture',
resize=False) -> 'Picture':
"""
Write better docstring
:param predicate:
:param other:
:param bool resize:
:return:
:rtype: Picture
"""
copy = self.copy()
if resize:
if (not copy.height == other.height) or (not copy.width == other.width):
other = other.resize(copy.height, copy.width)
pixel_access: PixelAccess = copy.__pixel_access # pylint: disable=protected-access
other_pixel_access: PixelAccess = other.__pixel_access # pylint: disable=protected-access
for j in range(copy.height):
for i in range(copy.width):
index: Point = (i, j)
pixel_info = PixelInfo(index, rgb=pixel_access[index])
if predicate(pixel_info):
pixel_access[index] = other_pixel_access[index]
return copy
| #
# Package: MediaComp
# Module: pictures
# Author: <NAME>, <EMAIL>
#
# Derived from source code, power point slides, and media (pictures, movies, sounds) from
# http://mediacomputation.org created by <NAME> and <NAME>
# licensed under the Creative Commons Attribution 3.0 United States License,
# See: http://creativecommons.org/licenses/by/3.0/us/).
"""
Module docstring goes here!
"""
# module base64 is standard in Python 3: https://docs.python.org/3/library/base64.html
# using base64.b64encode in Picture.to_base64
import base64
# module io is standard in Python 3: https://docs.python.org/3/library/io.html
# using io.BytesIO in Picture.to_base64
import io
import os
import pathlib
import collections.abc
# module typing is standard in Python 3.5+: https://docs.python.org/3/library/typing.html
# used for type hints used in static type checking in PEP 484
# PEP 484 -- Type Hints: https://www.python.org/dev/peps/pep-0484/
# PEP 525 -- Syntax for Variable Annotations: https://www.python.org/dev/peps/pep-0526/
# use mypy for static type checking of Pyhton code: http://mypy-lang.org/
# note that just because a parameter is annotated to be of a specific type, doesn't mean
# that at runtime it will actually be of that type: dynamic checking or casting/conversion
# still needs to be done
import typing
# PIL refers to the Pillow library installed by default in the Anaconda distribution of Python
# PIL is the Python Image Library, Pillow is a fork of PIL
# For documentation on Pillow, see: https://pillow.readthedocs.io/en/stable/
# suppress static type checker complain "error: No library stub file for module 'PIL.Image'"
import PIL.Image # type: ignore
# suppress static type checker complain "error: No library stub file for module 'PIL.ImageDraw'"
import PIL.ImageDraw # type: ignore
# suppress static type checker complain "error: No library stub file for module 'PIL.ImageFont'"
import PIL.ImageFont # type: ignore
# suppress static type checker complain "error: No library stub file for module 'PIL.PyAccess'"
from PIL.PyAccess import PyAccess as PixelAccess # type: ignore
# suppress static type checker complain "error: No library stub file for
# module 'matplotlob.font_manager'"
# suppress static type checker complain "error: No library stub file for module 'matplotlob'"
import matplotlib.font_manager # type: ignore
# The IPython.core.display module is specific to IPython which is used in Jupyter
# See https://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html
# import IPython.core.display
# import the color and file modules in this package
from . import colors
from . import files
# make the names in the color and file modules public to
# consumers of this module so they don't have to import colors module also
Colors = colors.Colors
Color = colors.Color
def set_media_path(path: typing.Optional[str] = None) -> bool:
"""
forwards to files.set_media_path so module that imports this module
does not have to import files module also
:param path:
:return:
:rtype bool:
"""
return files.set_media_path(path)
def media_path(filename: typing.Optional[str]) -> pathlib.Path:
"""
forwards to files.media_path so module that imports this module
does not have to import files module also
:param Optional[str] filename:
:return:
:rtype pathlib.Path:
"""
return files.media_path(filename)
# Type aliases
RGB = typing.Tuple[int, int, int]
ImageSize = typing.Tuple[int, int]
Point = typing.Tuple[int, int]
PointSequence = typing.Sequence[Point]
BaseRGB = colors.BaseRGB
PixelInfoTuple = typing.Tuple[Point, RGB]
Transform = typing.Callable[['PixelInfo'], Color]
Transform2 = typing.Callable[[PixelInfoTuple], PixelInfoTuple]
Predicate = typing.Callable[['PixelInfo'], bool]
Combine = typing.Callable[['PixelInfo', 'PixelInfo'], Color]
def type_error_message(fun_name: str, param_name: str, expected: str, actual: typing.Any) -> str:
""" generates error message for TypeError """
return f"In MediaComp.pictures.{fun_name}: {param_name} " +\
f"expected a {expected}, actually {type(actual)}"
class PixelInfo(colors.Color):
"""
Class level docstring goes here
"""
def __init__(self, xy: Point, rgb: typing.Optional[RGB] = None):
super().__init__(rgb=rgb)
self._xy: Point = (int(xy[0]), int(xy[1]))
# Overrides Color.__repr__
def __repr__(self) -> str:
return f"PixelInfo(xy = ({self.x}, {self.y}), " + \
f"pixel_color = Color(red={self.red}, green={self.green}, blue={self.blue}))"
# Overrides Color.__str__
def __str__(self) -> str:
return f"Pixel(red={self.red}, green={self.green}, " + \
f"blue={self.blue}, x={self.x}, y={self.y})"
@property
def color(self) -> colors.Color:
"""
Write better docstring
:type: colors.Color
"""
rgb: RGB = self.rgb
return colors.Color(rgb[0], rgb[1], rgb[2])
@property
def x(self) -> int: # pylint: disable=invalid-name
"""
Write better docstring
:type: int
"""
return int(self._xy[0])
@property
def y(self) -> int: # pylint: disable=invalid-name
"""
Write better docstring
:type: int
"""
return int(self._xy[1])
class Pixel(PixelInfo):
"""
Class level docstring goes here
"""
def __init__(self, xy: Point, pixel_access: PixelAccess):
self.__pixel_access: PixelAccess = pixel_access
super().__init__(xy)
# Overrides PixelInfo.__str__ method
def __str__(self) -> str:
return f"Pixel(xy=({self.x}, {self.y}), Color(r={self.red}, g={self.green}, b={self.blue}))"
@property
def color(self) -> colors.Color:
"""
Write better docstring
:type: colors.Color
"""
rgb: RGB = self.__pixel_access[self._xy]
return colors.Color(rgb[0], rgb[1], rgb[2])
@color.setter
def color(self, rgb: colors.BaseRGB) -> None:
if not isinstance(rgb, colors.BaseRGB):
raise TypeError
self.rgb = rgb.rgb
# Overrides BaseRGB.red property getter
@property
def red(self) -> int:
"""
Write better docstring
:type: int
"""
return self.rgb[0]
@red.setter
def red(self, value: int) -> None:
value = min(255, max(0, int(value)))
rgb: RGB = self.__pixel_access[self._xy]
self.__pixel_access[self._xy] = (value, rgb[1], rgb[2])
# Overrides BaseRGB.green property getter
@property
def green(self) -> int:
"""
Write better docstring
:type: int
"""
return self.rgb[1]
@green.setter
def green(self, value: int) -> None:
value = min(255, max(0, int(value)))
rgb: RGB = self.__pixel_access[self._xy]
self.__pixel_access[self._xy] = (rgb[0], value, rgb[2])
# Overrides BaseRGB.blue property getter
@property
def blue(self) -> int:
"""
Write better docstring
:type: int
"""
return self.rgb[2]
@blue.setter
def blue(self, value: int) -> None:
value = min(255, max(0, int(value)))
rgb: RGB = self.__pixel_access[self._xy]
self.__pixel_access[self._xy] = (rgb[0], rgb[1], value)
# Overrides BaseRGB.rgb property getter
@property
def rgb(self) -> RGB:
"""
Write better docstring
:type: RGB
"""
rgb: RGB = self.__pixel_access[self._xy]
return int(rgb[0]), int(rgb[1]), int(rgb[2])
@rgb.setter
def rgb(self, value: RGB) -> None:
self.__pixel_access[self._xy] = value
class TextStyle:
"""
Class-level docstring goes here
"""
@staticmethod
def find_font_file(query: str) -> typing.Optional[str]:
"""
Write better docstring
:param str query:
:return:
"""
matches = list(filter(lambda path: query in os.path.basename(path),
matplotlib.font_manager.findSystemFonts()))
if len(matches) == 0:
return None
return matches[0]
def __init__(self, font_name: str, emphasis: str, size: float):
"""
Write better docstring
:param font_name:
:param emphasis:
:param size:
"""
self.__font_name = str(font_name)
font_file: typing.Optional[str] = TextStyle.find_font_file(self.__font_name)
# TODO: emphasis still ignored in font searching
self.__emphasis = str(emphasis)
self.__size = float(size)
self.__font: PIL.ImageFont.ImageFont = PIL.ImageFont.truetype(font_file, self.__size)
@property
def font_name(self) -> str:
"""
name of font used to draw with
:type: str
"""
return self.__font_name
@property
def font(self) -> PIL.ImageFont.ImageFont:
"""
Write better docstring
:type: PIL.ImageFont.ImageFont
"""
return self.__font
@property
def emphasis(self) -> str:
"""
kind of emphasis to use, 'bold', 'italic', 'bold + italic'
:type: str
"""
return self.__emphasis
@property
def size(self) -> float:
"""
size of font in points
:type: float
"""
return self.__size
# class PILImage has no pixel-level operations and is agnostic about how many and what kind
# of channels are in the image. Such things will be found in subclasseses of PILImage
class PILImage:
"""
Class level docstring
"""
def __init__(self, pil_image: PIL.Image.Image):
self._pil_image = pil_image
@property
def height(self) -> int:
"""
height of image in pixels
:type: int
"""
image_height = self._pil_image.height
return int(image_height)
@property
def width(self) -> int:
"""
width of image in pixels
:type: int
"""
image_width = self._pil_image.width
return int(image_width)
@property
def size(self) -> ImageSize:
"""
(height, width) tuple
:type: ImageSize
"""
return self.height, self.width
# overriden by Picture subclass
def copy(self) -> 'PILImage':
"""
Makes a deep copy of this object
:return: the copy
:rtype PILImage:
"""
return PILImage(self._pil_image.copy())
def set_color(self, color: colors.BaseRGB = colors.Colors.black):
"""
Write better docstring
:param color:
:return:
"""
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.rectangle([(0, 0), (self.width, self.height)], fill=color)
def copy_into(self, big_picture: 'PILImage', left: int, top: int):
"""
Write better docstring
:param big_picture:
:param int left:
:param int top:
:return:
"""
big_picture._pil_image.paste(self._pil_image, (left, top)) # pylint: disable=protected-access
def add_arc(self, x: int, y: int, # pylint: disable=invalid-name; # pylint: disable=too-many-arguments
width: int, height: int,
start: float, angle: float,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int x:
:param int y:
:param int width:
:param int height:
:param float start:
:param float angle:
:param colors.Color color:
:raises TypeError:
"""
x = int(x) # pylint: disable=invalid-name
y = int(y) # pylint: disable=invalid-name
width = int(width)
height = int(height)
start = float(start)
angle = float(angle)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_arc", "c", "Color", color))
fill_color: RGB = color.rgb
draw = PIL.ImageDraw.Draw(self._pil_image)
bounding_box: PointSequence = [(x, y), (x + width, y + height)]
draw.arc(bounding_box, start=start, end=start+angle, fill=fill_color, width=1)
def add_arc_filled(self, x: int, y: int, # pylint: disable=invalid-name; # pylint: disable=too-many-arguments
width: int, height: int,
start: float, angle: float,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int x:
:param int y:
:param int width:
:param int height:
:param float start:
:param float angle:
:param colors.Color color:
:raises TypeError:
"""
x = int(x) # pylint: disable=invalid-name
y = int(y) # pylint: disable=invalid-name
width = int(width)
height = int(height)
start = float(start)
angle = float(angle)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_arc_filled", "color", "Color", color))
fill_color: RGB = color.rgb
bounding_box: PointSequence = [(x, y), (x + width, y + height)]
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.pieslice(bounding_box, start=start, end=start+angle, fill=fill_color, width=1)
def add_line(self, start_x: int, start_y: int, # pylint: disable=too-many-arguments
width: int, height: int,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int start_x:
:param int start_y:
:param int width:
:param int height:
:param colors.Color color:
:raises TypeError:
"""
start_x = int(start_x)
start_y = int(start_y)
width = int(width)
height = int(height)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_line", "color", "Color", color))
bounding_box: PointSequence = [(start_x, start_y), (start_x + width, start_y + height)]
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.line(bounding_box, fill=color.rgb, width=1)
def add_oval(self, center_x: int, center_y: int, # pylint: disable=too-many-arguments
width: int, height: int,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int center_x:
:param int center_y:
:param int width:
:param int height:
:param colors.Color color:
:raises TypeError:
"""
center_x = int(center_x)
center_y = int(center_y)
width = int(width)
height = int(height)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_oval", "color", "Color", color))
bounding_box: PointSequence = [(center_x, center_y), (center_x + width, center_y + height)]
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.ellipse(bounding_box, outline=color.rgb, width=1)
def add_oval_filled(self, center_x: int, center_y: int, # pylint: disable=too-many-arguments
width: int, height: int,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int center_x:
:param int center_y:
:param int width:
:param int height:
:param colors.Color color:
:raises TypeError:
"""
center_x = int(center_x)
center_y = int(center_y)
width = int(width)
height = int(height)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_oval_filled", "color", "Color", color))
left_top = (center_x - width//2, center_y - height//2)
right_bottom = (center_x + width//2, center_y + width//2)
bounding_box = [left_top, right_bottom]
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.ellipse(bounding_box, outline=color.rgb, fill=color.rgb, width=1)
def add_rect(self, left: int, top: int, # pylint: disable=too-many-arguments
width: int, height: int,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Takes a picture, a starting (left, top) position (two numbers),
a width and height (two more numbers, four total),
and (optionally) a color as input. Adds a rectangular outline of the
specified dimensions using the (left,top) as the upper left corner.
Default color is black.
Wrapped by for :py:func:`.jes.addRect` function.
:param int left:
:param int top:
:param int width:
:param int height:
:param colors.Color color:
:raises TypeError:
"""
left = int(left)
top = int(top)
width = int(width)
height = int(height)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_rect", "color", "Color", color))
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.rectangle([(left, top), (left + width, top + height)], outline=color.rgb, width=1)
def add_rect_filled(self, left: int, top: int, width: int, height: int, # pylint: disable=too-many-arguments
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int left:
:param int top:
:param int width:
:param int height:
:param colors.Color color:
:raises TypeError:
"""
left = int(left)
top = int(top)
width = int(width)
height = int(height)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_rect_filled", "color", "Color", color))
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.rectangle([(left, top), (left+width, top+height)], fill=color.rgb, width=1)
def add_text(self, x_pos: int, y_pos: int, text: str,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int x_pos:
:param int y_pos:
:param str text:
:param colors.Color color:
:raises TypeError
"""
x_pos = int(x_pos)
y_pos = int(y_pos)
text = str(text)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_text", "color", "Color", color))
draw = PIL.ImageDraw.Draw(self._pil_image)
draw.text((x_pos, y_pos), text, fill=color.rgb)
def add_text_with_style(self, x_pos: int, y_pos: int, # pylint: disable=too-many-arguments
text: str, style: TextStyle,
color: colors.BaseRGB = colors.Colors.black
) -> None:
"""
Write better docstring
:param int x_pos:
:param int y_pos:
:param str text:
:param str style:
:param colors.Color color:
:raises TypeError:
"""
x_pos = int(x_pos)
y_pos = int(y_pos)
text = str(text)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("PILImage.add_text_styled",
"color", "Color", color))
if not isinstance(style, TextStyle):
raise TypeError(type_error_message("PILImage.add_text_styled",
"style",
"TextStyle", style))
draw: PIL.ImageDraw.ImageDraw = PIL.ImageDraw.Draw(self._pil_image)
draw.text((x_pos, y_pos), text, font=style.font, fill=color.rgb)
#
# Picture operates on files containing RGB images
#
# All interactions with the filesystem should be here
# not in superclasses
#
# class Picture adds pixel-level operations to PILImage
# and works only with 3-channel 8-bit/channel images
#
class Picture(PILImage, collections.abc.Iterable):
"""
Class-level docstrings
"""
def __init__(self, pil_image: PIL.Image.Image):
if pil_image.mode == "RGB":
super().__init__(pil_image)
else:
super().__init__(pil_image.convert(mode="RGB"))
self.__pixel_access: PIL.PyAccess.PyAccess = self._pil_image.load()
@classmethod
def from_file(cls, filename: typing.Union[str, os.PathLike]) -> 'Picture':
"""
Write better docstring
:param filename:
:return:
:rtype: Picture
"""
if not isinstance(filename, os.PathLike):
filename = files.media_path(str(filename))
img = PIL.Image.open(filename)
img.load()
return cls(img)
@classmethod
def make_empty(cls, width: int, height: int,
color: colors.BaseRGB = colors.Colors.black) -> 'Picture':
"""
Write better docstring
:param int width:
:param int height:
:param colors.Color color:
:return:
:rtype: Picture
"""
height = int(height)
width = int(width)
if not isinstance(color, colors.BaseRGB):
raise TypeError(type_error_message("setAllPixelsToAColor", "color", "Color", color))
return cls(PIL.Image.new("RGB", (width, height), color.rgb))
def __str__(self) -> str:
return "<image> size:" + str(self.size)
def _repr_html_(self) -> str:
return '<img src="' + self.to_base64() + '" />'
# TODO: fix it so it uses IPython display mechanism for PNG rather than HTML
# def _repr_png_(self):
# pass
def __getitem__(self, key: Point) -> Pixel:
index_x = int(key[0])
index_y = int(key[1])
if index_x >= self.width:
index_x = self.width - 1
if index_y >= self.height:
index_y = self.height - 1
if index_x < 0:
index_x = 0
if index_y < 0:
index_y = 0
return Pixel((index_x, index_y), self.__pixel_access)
def __setitem__(self, key: Point, value: colors.BaseRGB) -> None:
index_x = int(key[0]) # pylint: disable=invalid-name
index_y = int(key[1]) # pylint: disable=invalid-name
# silently discard value if out of range, is that really a good thing?
if index_x < 0 or index_x >= self.width:
return
if index_y < 0 or index_y >= self.height:
return
if not isinstance(value, colors.BaseRGB):
raise TypeError(type_error_message("Picture.setitem", "value", "Color", value))
self.__pixel_access[index_x, index_y] = value.rgb
def __iter__(self) -> typing.Iterator[Pixel]:
for j in range(self.height):
for i in range(self.width):
yield Pixel((i, j), self.__pixel_access)
def to_base64(self) -> str:
"""
convert to base64 string of bytes in PNG encoding
:return:
"""
file_like_backed_by_byte_buffer = io.BytesIO()
self._pil_image.save(file_like_backed_by_byte_buffer, format='PNG', optimize=True)
unencoded_byte_buffer = file_like_backed_by_byte_buffer.getvalue()
encoded_byte_buffer = base64.b64encode(unencoded_byte_buffer)
base64_string = str(encoded_byte_buffer)[2:-1] # discard "b'" and beginning and "'" at end
return 'data:image/png;base64,' + base64_string
# TODO test this
def save(self, file_name: str) -> None:
"""
save image to file
:param file_name: name of file to save
"""
self._pil_image.save(file_name)
def copy(self) -> 'Picture':
"""
Makes a copy of the picture
:return: The copy
:rtype: Picture
"""
return Picture(self._pil_image.copy())
def resize(self, height: int, width: int) -> 'Picture':
"""
Write better docstring
:param int height:
:param int width:
:return:
"""
height = int(height)
width = int(width)
new_image = self._pil_image.resize((width, height))
return Picture(new_image)
def map(self, transform: Transform, left_top: Point = (0, 0),
right_bottom: Point = (1000000, 1000000)) -> 'Picture':
"""
Write better docstring
:param transform:
:param Point left_top:
:param Point right_bottom:
:return:
"""
left: int = int(left_top[0])
top: int = int(left_top[1])
right: int = int(right_bottom[0])
bottom: int = int(right_bottom[1])
if left > right:
(right, left) = (left, right)
if top > bottom:
(top, bottom) = (bottom, top)
if left < 0:
left = 0
if right > self.width:
right = self.width
if top < 0:
top = 0
if bottom > self.height:
bottom = self.height
copy = self.copy()
pixel_access: PixelAccess = copy.__pixel_access # pylint: disable=protected-access
for j in range(top, bottom):
for i in range(left, right):
index: Point = (i, j)
pixel_info = PixelInfo(index, rgb=pixel_access[index])
color_out: colors.Color = transform(pixel_info)
pixel_access[index] = color_out.rgb
return copy
def remap(self, transform: Transform2, color: Color = Colors.black) -> 'Picture':
"""
Write better docstring
:param transform:
:param Color color:
:return:
"""
width = self.width
height = self.height
target = Picture.make_empty(width, height, color)
target_pixel_access: PixelAccess = target.__pixel_access # pylint: disable=protected-access
self_pixel_access: PixelAccess = self.__pixel_access
for j in range(0, height):
for i in range(0, width):
index_in: Point = (i, j)
tuple_in: PixelInfoTuple = (index_in, Color.unpack(self_pixel_access[index_in]))
tuple_out: PixelInfoTuple = transform(tuple_in)
((target_x, target_y), rgb_out) = tuple_out
target_pixel_access[target_x % width, target_y % height] = Color.clamp(rgb_out)
return target
def combine(self, pixel_combine: Combine, other: 'Picture', resize=False) -> 'Picture':
"""
Writie better docstring
:param pixel_combine:
:param Picture other:
:param bool resize:
:return:
:rtype: Picture
"""
copy = self.copy()
if resize:
if (not copy.height == other.height) or (not copy.width == other.width):
other = other.resize(copy.height, copy.width)
pixel_access: PixelAccess = copy.__pixel_access # pylint: disable=protected-access
other_pixel_access: PixelAccess = other.__pixel_access # pylint: disable=protected-access
for j in range(copy.height):
for i in range(copy.width):
index: Point = (i, j)
pixel_info = PixelInfo(index, rgb=pixel_access[index])
other_pixel_info = PixelInfo(index, rgb=other_pixel_access[index])
color_out: colors.Color = pixel_combine(pixel_info, other_pixel_info)
pixel_access[index] = color_out.rgb
return copy
def map_if(self, predicate: Predicate, transform: Transform) -> 'Picture':
"""
Write better docstring
:param predicate:
:param transform:
:return:
:rtype: Picture
"""
copy = self.copy()
pixel_access: PixelAccess = copy.__pixel_access # pylint: disable=protected-access
for j in range(copy.height):
for i in range(copy.width):
index: Point = (i, j)
pixel_info = PixelInfo(index, rgb=pixel_access[index])
if predicate(pixel_info):
color_out: colors.Color = transform(pixel_info)
pixel_access[index] = color_out.rgb
return copy
def replace_if(self, predicate: Predicate, other: 'Picture',
resize=False) -> 'Picture':
"""
Write better docstring
:param predicate:
:param other:
:param bool resize:
:return:
:rtype: Picture
"""
copy = self.copy()
if resize:
if (not copy.height == other.height) or (not copy.width == other.width):
other = other.resize(copy.height, copy.width)
pixel_access: PixelAccess = copy.__pixel_access # pylint: disable=protected-access
other_pixel_access: PixelAccess = other.__pixel_access # pylint: disable=protected-access
for j in range(copy.height):
for i in range(copy.width):
index: Point = (i, j)
pixel_info = PixelInfo(index, rgb=pixel_access[index])
if predicate(pixel_info):
pixel_access[index] = other_pixel_access[index]
return copy | en | 0.606241 | # # Package: MediaComp # Module: pictures # Author: <NAME>, <EMAIL> # # Derived from source code, power point slides, and media (pictures, movies, sounds) from # http://mediacomputation.org created by <NAME> and <NAME> # licensed under the Creative Commons Attribution 3.0 United States License, # See: http://creativecommons.org/licenses/by/3.0/us/). Module docstring goes here! # module base64 is standard in Python 3: https://docs.python.org/3/library/base64.html # using base64.b64encode in Picture.to_base64 # module io is standard in Python 3: https://docs.python.org/3/library/io.html # using io.BytesIO in Picture.to_base64 # module typing is standard in Python 3.5+: https://docs.python.org/3/library/typing.html # used for type hints used in static type checking in PEP 484 # PEP 484 -- Type Hints: https://www.python.org/dev/peps/pep-0484/ # PEP 525 -- Syntax for Variable Annotations: https://www.python.org/dev/peps/pep-0526/ # use mypy for static type checking of Pyhton code: http://mypy-lang.org/ # note that just because a parameter is annotated to be of a specific type, doesn't mean # that at runtime it will actually be of that type: dynamic checking or casting/conversion # still needs to be done # PIL refers to the Pillow library installed by default in the Anaconda distribution of Python # PIL is the Python Image Library, Pillow is a fork of PIL # For documentation on Pillow, see: https://pillow.readthedocs.io/en/stable/ # suppress static type checker complain "error: No library stub file for module 'PIL.Image'" # type: ignore # suppress static type checker complain "error: No library stub file for module 'PIL.ImageDraw'" # type: ignore # suppress static type checker complain "error: No library stub file for module 'PIL.ImageFont'" # type: ignore # suppress static type checker complain "error: No library stub file for module 'PIL.PyAccess'" # type: ignore # suppress static type checker complain "error: No library stub file for # module 'matplotlob.font_manager'" # suppress static type checker complain "error: No library stub file for module 'matplotlob'" # type: ignore # The IPython.core.display module is specific to IPython which is used in Jupyter # See https://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html # import IPython.core.display # import the color and file modules in this package # make the names in the color and file modules public to # consumers of this module so they don't have to import colors module also forwards to files.set_media_path so module that imports this module does not have to import files module also :param path: :return: :rtype bool: forwards to files.media_path so module that imports this module does not have to import files module also :param Optional[str] filename: :return: :rtype pathlib.Path: # Type aliases generates error message for TypeError Class level docstring goes here # Overrides Color.__repr__ # Overrides Color.__str__ Write better docstring :type: colors.Color # pylint: disable=invalid-name Write better docstring :type: int # pylint: disable=invalid-name Write better docstring :type: int Class level docstring goes here # Overrides PixelInfo.__str__ method Write better docstring :type: colors.Color # Overrides BaseRGB.red property getter Write better docstring :type: int # Overrides BaseRGB.green property getter Write better docstring :type: int # Overrides BaseRGB.blue property getter Write better docstring :type: int # Overrides BaseRGB.rgb property getter Write better docstring :type: RGB Class-level docstring goes here Write better docstring :param str query: :return: Write better docstring :param font_name: :param emphasis: :param size: # TODO: emphasis still ignored in font searching name of font used to draw with :type: str Write better docstring :type: PIL.ImageFont.ImageFont kind of emphasis to use, 'bold', 'italic', 'bold + italic' :type: str size of font in points :type: float # class PILImage has no pixel-level operations and is agnostic about how many and what kind # of channels are in the image. Such things will be found in subclasseses of PILImage Class level docstring height of image in pixels :type: int width of image in pixels :type: int (height, width) tuple :type: ImageSize # overriden by Picture subclass Makes a deep copy of this object :return: the copy :rtype PILImage: Write better docstring :param color: :return: Write better docstring :param big_picture: :param int left: :param int top: :return: # pylint: disable=protected-access # pylint: disable=invalid-name; # pylint: disable=too-many-arguments Write better docstring :param int x: :param int y: :param int width: :param int height: :param float start: :param float angle: :param colors.Color color: :raises TypeError: # pylint: disable=invalid-name # pylint: disable=invalid-name # pylint: disable=invalid-name; # pylint: disable=too-many-arguments Write better docstring :param int x: :param int y: :param int width: :param int height: :param float start: :param float angle: :param colors.Color color: :raises TypeError: # pylint: disable=invalid-name # pylint: disable=invalid-name # pylint: disable=too-many-arguments Write better docstring :param int start_x: :param int start_y: :param int width: :param int height: :param colors.Color color: :raises TypeError: # pylint: disable=too-many-arguments Write better docstring :param int center_x: :param int center_y: :param int width: :param int height: :param colors.Color color: :raises TypeError: # pylint: disable=too-many-arguments Write better docstring :param int center_x: :param int center_y: :param int width: :param int height: :param colors.Color color: :raises TypeError: # pylint: disable=too-many-arguments Takes a picture, a starting (left, top) position (two numbers), a width and height (two more numbers, four total), and (optionally) a color as input. Adds a rectangular outline of the specified dimensions using the (left,top) as the upper left corner. Default color is black. Wrapped by for :py:func:`.jes.addRect` function. :param int left: :param int top: :param int width: :param int height: :param colors.Color color: :raises TypeError: # pylint: disable=too-many-arguments Write better docstring :param int left: :param int top: :param int width: :param int height: :param colors.Color color: :raises TypeError: Write better docstring :param int x_pos: :param int y_pos: :param str text: :param colors.Color color: :raises TypeError # pylint: disable=too-many-arguments Write better docstring :param int x_pos: :param int y_pos: :param str text: :param str style: :param colors.Color color: :raises TypeError: # # Picture operates on files containing RGB images # # All interactions with the filesystem should be here # not in superclasses # # class Picture adds pixel-level operations to PILImage # and works only with 3-channel 8-bit/channel images # Class-level docstrings Write better docstring :param filename: :return: :rtype: Picture Write better docstring :param int width: :param int height: :param colors.Color color: :return: :rtype: Picture # TODO: fix it so it uses IPython display mechanism for PNG rather than HTML # def _repr_png_(self): # pass # pylint: disable=invalid-name # pylint: disable=invalid-name # silently discard value if out of range, is that really a good thing? convert to base64 string of bytes in PNG encoding :return: # discard "b'" and beginning and "'" at end # TODO test this save image to file :param file_name: name of file to save Makes a copy of the picture :return: The copy :rtype: Picture Write better docstring :param int height: :param int width: :return: Write better docstring :param transform: :param Point left_top: :param Point right_bottom: :return: # pylint: disable=protected-access Write better docstring :param transform: :param Color color: :return: # pylint: disable=protected-access Writie better docstring :param pixel_combine: :param Picture other: :param bool resize: :return: :rtype: Picture # pylint: disable=protected-access # pylint: disable=protected-access Write better docstring :param predicate: :param transform: :return: :rtype: Picture # pylint: disable=protected-access Write better docstring :param predicate: :param other: :param bool resize: :return: :rtype: Picture # pylint: disable=protected-access # pylint: disable=protected-access | 2.33618 | 2 |
gbe/ticketing_idd_interface.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 1 | 6620753 | <gh_stars>1-10
# ticketing_idd.py - Interface Design Description (IDD) between GBE and
# TICKETING modules
# See documentation in https://github.com/bethlakshmi/GBE2/wiki/Ticketing-To-Do
# section: "By Friday - needed for integration"
# - Betty 8/15
from gbe_logging import logger
from ticketing.models import (
Purchaser,
TicketingEvents,
PayPalSettings,
RoleEligibilityCondition,
TicketingEligibilityCondition,
TicketItem,
Transaction,
)
from ticketing.forms import (
DonationForm,
TicketPayForm,
)
from gbe.models import (
Conference,
)
from scheduler.idd import get_roles
from ticketing.brown_paper import *
from ticketing.functions import get_fee_list
from ticketing.brown_paper import import_bpt_ticket_items
from django.db.models import Count
from django.db.models import Q
from datetime import datetime
from django.forms import HiddenInput
from paypal.standard.forms import PayPalPaymentsForm
from django.urls import reverse
def fee_paid(bid_type, user_name, conference):
if bid_type == "Act":
return verify_performer_app_paid(user_name, conference)
elif bid_type == "Vendor":
return verify_vendor_app_paid(user_name, conference)
return True
def comp_act(user, conference):
if not TicketItem.objects.filter(
add_on=False,
ticketing_event__act_submission_event=True,
ticketing_event__conference=conference).exists():
return False
comp_ticket = TicketItem.objects.filter(
add_on=False,
ticketing_event__act_submission_event=True,
ticketing_event__conference=conference).first()
purchaser = Purchaser(
matched_to_user=user,
first_name=user.first_name,
last_name=user.last_name,
email=user.email)
purchaser.save()
transaction = Transaction(
purchaser=purchaser,
ticket_item=comp_ticket,
amount=0,
order_date=datetime.now(),
shipping_method="Comp'ed",
order_notes="Comped through IDD",
reference="auto",
payment_source="GBE")
transaction.save()
return True
def verify_performer_app_paid(user_name, conference):
'''
Verifies if a user has paid his or her application fee.
NOTE: This function assumes that there is a record of the application,
saved in the database with a status of "submitted", at the time the check
is performed.
user_name - This is the user name of the user in question.
returns - true if the system recognizes the application submittal fee is
paid
'''
from gbe.models import Act
acts_submitted = 0
# First figure out how many acts this user has purchased
act_fees_purchased = Transaction.objects.filter(
ticket_item__add_on=False,
ticket_item__ticketing_event__act_submission_event=True,
ticket_item__ticketing_event__conference=conference,
purchaser__matched_to_user__username=str(user_name)).count()
# Then figure out how many acts have already been submitted.
acts_submitted = Act.objects.filter(
submitted=True,
b_conference=conference,
performer__contact__user_object__username=user_name).count()
logger.info("Purchased Count: %s Submitted Count: %s" %
(act_fees_purchased, acts_submitted))
return act_fees_purchased > acts_submitted
def verify_vendor_app_paid(user_name, conference):
'''
Verifies user has paid a vendor submittal fee.
NOTE: This function assumes that there is a record of the application,
saved in the database, with a status of "submitted", at the time the check
is performed.
user_name - This is the user name of the user in question.
returns - true if the system recognizes the vendor submittal fee is paid
'''
from gbe.models import Vendor
vendor_apps_submitted = 0
# First figure out how many vendor spots this user has purchased
vendor_fees_purchased = Transaction.objects.filter(
ticket_item__add_on=False,
ticket_item__ticketing_event__vendor_submission_event=True,
ticket_item__ticketing_event__conference=conference,
purchaser__matched_to_user__username=str(user_name)).count()
# Then figure out how many vendor applications have already been submitted.
vendor_apps_submitted = Vendor.objects.filter(
submitted=True,
b_conference=conference,
business__owners__user_object__username=user_name).count()
logger.info("Purchased Count: %s Submitted Count: %s" %
(vendor_fees_purchased, vendor_apps_submitted))
return vendor_fees_purchased > vendor_apps_submitted
def verify_bought_conference(user, conference):
return TicketItem.objects.filter(
Q(ticketing_event__conference=conference),
Q(transaction__purchaser__matched_to_user=user),
Q(ticketing_event__include_conference=True) | Q(
ticketing_event__include_most=True)
).exists()
def get_purchased_tickets(user):
'''
get the tickets purchased by the given profile
'''
ticket_by_conf = []
conferences = Conference.objects.exclude(
status="completed").order_by('status')
for conf in conferences:
tickets = TicketItem.objects.filter(
ticketing_event__conference=conf,
transaction__purchaser__matched_to_user=user).annotate(
number_of_tickets=Count('transaction')).order_by('title')
if tickets:
ticket_by_conf.append({'conference': conf, 'tickets': tickets})
return ticket_by_conf
def get_checklist_items_for_tickets(profile, user_schedule, tickets):
'''
get the checklist items for a purchaser in the BTP
'''
checklist_items = []
transactions = Transaction.objects.filter(
purchaser__matched_to_user=profile.user_object)
for ticket in set(tickets):
items = []
count = transactions.filter(ticket_item=ticket).count()
if count > 0:
for condition in TicketingEligibilityCondition.objects.filter(
tickets=ticket):
if not condition.is_excluded(tickets, user_schedule):
items += [condition.checklistitem]
if len(items) > 0:
checklist_items += [{'ticket': ticket.title,
'count': count,
'items': items}]
return checklist_items
def get_checklist_items_for_roles(user_schedule, tickets):
'''
get the checklist items for the roles a person does in this conference
'''
checklist_items = {}
roles = []
for booking in user_schedule:
if booking.role not in roles:
roles += [booking.role]
for condition in RoleEligibilityCondition.objects.filter(role__in=roles):
if not condition.is_excluded(tickets, user_schedule):
if condition.role in checklist_items:
checklist_items[condition.role] += [condition.checklistitem]
else:
checklist_items[condition.role] = [condition.checklistitem]
return checklist_items
def get_checklist_items(profile, conference, user_schedule):
'''
get the checklist items for a person with a profile
'''
tickets = TicketItem.objects.filter(
ticketing_event__conference=conference,
transaction__purchaser__matched_to_user=profile.user_object).distinct()
ticket_items = get_checklist_items_for_tickets(
profile,
user_schedule,
tickets)
role_items = get_checklist_items_for_roles(user_schedule, tickets)
return (ticket_items, role_items)
def create_ticketing_event(event_id, conference, events=[], display_icon=None):
event = TicketingEvents.objects.create(
event_id=event_id,
conference=conference,
display_icon=display_icon)
if len(events) > 0:
event.linked_events.add(*events)
event.save()
count = import_bpt_ticket_items([event])
return event, count
def get_ticket_form(bid_type, conference, post=None):
form = None
ticket_items = get_fee_list(bid_type, conference)
if ticket_items.filter(is_minimum=True).exists():
minimum = ticket_items.filter(is_minimum=True).order_by(
'cost').first().cost
form = DonationForm(post, initial={'donation_min': minimum,
'donation': minimum})
else:
form = TicketPayForm(post)
form.fields['main_ticket'].queryset = ticket_items.filter(
add_on=False).order_by('cost')
if ticket_items.filter(add_on=True).exists():
form.fields['add_ons'].queryset = ticket_items.filter(
add_on=True).order_by('cost')
else:
form.fields['add_ons'].widget = HiddenInput()
return form
def get_paypal_button(request, total, user_id, number_list, bid_type, bid_id):
paypal_dict = {
"business": PayPalSettings.objects.first().business_email,
"amount": total,
"notify_url": request.build_absolute_uri(reverse('paypal-ipn')),
"invoice": str(datetime.now()),
"custom": "%s-%d-User-%d" % (bid_type, bid_id, user_id),
"return": request.build_absolute_uri(
reverse(
"%s_view" % bid_type.lower(),
urlconf='gbe.urls',
args=[bid_id])),
"cancel_return": request.build_absolute_uri("%s?cancel=paypal" % (
reverse(
"%s_edit" % bid_type.lower(),
urlconf='gbe.urls',
args=[bid_id]))),
"item_name": "%s Fee(s)" % bid_type,
"item_number": number_list,
}
return PayPalPaymentsForm(initial=paypal_dict)
def get_payment_details(request, form, bid_type, bid_id, user_id):
cart = []
paypal_button = None
total = 0
minimum = None
main_ticket = None
number_list = ""
if 'donation' in list(form.cleaned_data.keys()):
cart += [("%s Submission Fee" % bid_type,
form.cleaned_data['donation'])]
total = total + form.cleaned_data['donation']
else:
cart += [(form.cleaned_data['main_ticket'].title,
form.cleaned_data['main_ticket'].cost)]
number_list = str(form.cleaned_data['main_ticket'].id)
total = total + form.cleaned_data['main_ticket'].cost
for item in form.cleaned_data['add_ons']:
cart += [(item.title, item.cost)]
number_list = "%s %d" % (number_list, item.id)
total = total + item.cost
return (
cart,
get_paypal_button(
request,
total,
user_id,
number_list,
bid_type,
bid_id),
total)
| # ticketing_idd.py - Interface Design Description (IDD) between GBE and
# TICKETING modules
# See documentation in https://github.com/bethlakshmi/GBE2/wiki/Ticketing-To-Do
# section: "By Friday - needed for integration"
# - Betty 8/15
from gbe_logging import logger
from ticketing.models import (
Purchaser,
TicketingEvents,
PayPalSettings,
RoleEligibilityCondition,
TicketingEligibilityCondition,
TicketItem,
Transaction,
)
from ticketing.forms import (
DonationForm,
TicketPayForm,
)
from gbe.models import (
Conference,
)
from scheduler.idd import get_roles
from ticketing.brown_paper import *
from ticketing.functions import get_fee_list
from ticketing.brown_paper import import_bpt_ticket_items
from django.db.models import Count
from django.db.models import Q
from datetime import datetime
from django.forms import HiddenInput
from paypal.standard.forms import PayPalPaymentsForm
from django.urls import reverse
def fee_paid(bid_type, user_name, conference):
if bid_type == "Act":
return verify_performer_app_paid(user_name, conference)
elif bid_type == "Vendor":
return verify_vendor_app_paid(user_name, conference)
return True
def comp_act(user, conference):
if not TicketItem.objects.filter(
add_on=False,
ticketing_event__act_submission_event=True,
ticketing_event__conference=conference).exists():
return False
comp_ticket = TicketItem.objects.filter(
add_on=False,
ticketing_event__act_submission_event=True,
ticketing_event__conference=conference).first()
purchaser = Purchaser(
matched_to_user=user,
first_name=user.first_name,
last_name=user.last_name,
email=user.email)
purchaser.save()
transaction = Transaction(
purchaser=purchaser,
ticket_item=comp_ticket,
amount=0,
order_date=datetime.now(),
shipping_method="Comp'ed",
order_notes="Comped through IDD",
reference="auto",
payment_source="GBE")
transaction.save()
return True
def verify_performer_app_paid(user_name, conference):
'''
Verifies if a user has paid his or her application fee.
NOTE: This function assumes that there is a record of the application,
saved in the database with a status of "submitted", at the time the check
is performed.
user_name - This is the user name of the user in question.
returns - true if the system recognizes the application submittal fee is
paid
'''
from gbe.models import Act
acts_submitted = 0
# First figure out how many acts this user has purchased
act_fees_purchased = Transaction.objects.filter(
ticket_item__add_on=False,
ticket_item__ticketing_event__act_submission_event=True,
ticket_item__ticketing_event__conference=conference,
purchaser__matched_to_user__username=str(user_name)).count()
# Then figure out how many acts have already been submitted.
acts_submitted = Act.objects.filter(
submitted=True,
b_conference=conference,
performer__contact__user_object__username=user_name).count()
logger.info("Purchased Count: %s Submitted Count: %s" %
(act_fees_purchased, acts_submitted))
return act_fees_purchased > acts_submitted
def verify_vendor_app_paid(user_name, conference):
'''
Verifies user has paid a vendor submittal fee.
NOTE: This function assumes that there is a record of the application,
saved in the database, with a status of "submitted", at the time the check
is performed.
user_name - This is the user name of the user in question.
returns - true if the system recognizes the vendor submittal fee is paid
'''
from gbe.models import Vendor
vendor_apps_submitted = 0
# First figure out how many vendor spots this user has purchased
vendor_fees_purchased = Transaction.objects.filter(
ticket_item__add_on=False,
ticket_item__ticketing_event__vendor_submission_event=True,
ticket_item__ticketing_event__conference=conference,
purchaser__matched_to_user__username=str(user_name)).count()
# Then figure out how many vendor applications have already been submitted.
vendor_apps_submitted = Vendor.objects.filter(
submitted=True,
b_conference=conference,
business__owners__user_object__username=user_name).count()
logger.info("Purchased Count: %s Submitted Count: %s" %
(vendor_fees_purchased, vendor_apps_submitted))
return vendor_fees_purchased > vendor_apps_submitted
def verify_bought_conference(user, conference):
return TicketItem.objects.filter(
Q(ticketing_event__conference=conference),
Q(transaction__purchaser__matched_to_user=user),
Q(ticketing_event__include_conference=True) | Q(
ticketing_event__include_most=True)
).exists()
def get_purchased_tickets(user):
'''
get the tickets purchased by the given profile
'''
ticket_by_conf = []
conferences = Conference.objects.exclude(
status="completed").order_by('status')
for conf in conferences:
tickets = TicketItem.objects.filter(
ticketing_event__conference=conf,
transaction__purchaser__matched_to_user=user).annotate(
number_of_tickets=Count('transaction')).order_by('title')
if tickets:
ticket_by_conf.append({'conference': conf, 'tickets': tickets})
return ticket_by_conf
def get_checklist_items_for_tickets(profile, user_schedule, tickets):
'''
get the checklist items for a purchaser in the BTP
'''
checklist_items = []
transactions = Transaction.objects.filter(
purchaser__matched_to_user=profile.user_object)
for ticket in set(tickets):
items = []
count = transactions.filter(ticket_item=ticket).count()
if count > 0:
for condition in TicketingEligibilityCondition.objects.filter(
tickets=ticket):
if not condition.is_excluded(tickets, user_schedule):
items += [condition.checklistitem]
if len(items) > 0:
checklist_items += [{'ticket': ticket.title,
'count': count,
'items': items}]
return checklist_items
def get_checklist_items_for_roles(user_schedule, tickets):
'''
get the checklist items for the roles a person does in this conference
'''
checklist_items = {}
roles = []
for booking in user_schedule:
if booking.role not in roles:
roles += [booking.role]
for condition in RoleEligibilityCondition.objects.filter(role__in=roles):
if not condition.is_excluded(tickets, user_schedule):
if condition.role in checklist_items:
checklist_items[condition.role] += [condition.checklistitem]
else:
checklist_items[condition.role] = [condition.checklistitem]
return checklist_items
def get_checklist_items(profile, conference, user_schedule):
'''
get the checklist items for a person with a profile
'''
tickets = TicketItem.objects.filter(
ticketing_event__conference=conference,
transaction__purchaser__matched_to_user=profile.user_object).distinct()
ticket_items = get_checklist_items_for_tickets(
profile,
user_schedule,
tickets)
role_items = get_checklist_items_for_roles(user_schedule, tickets)
return (ticket_items, role_items)
def create_ticketing_event(event_id, conference, events=[], display_icon=None):
event = TicketingEvents.objects.create(
event_id=event_id,
conference=conference,
display_icon=display_icon)
if len(events) > 0:
event.linked_events.add(*events)
event.save()
count = import_bpt_ticket_items([event])
return event, count
def get_ticket_form(bid_type, conference, post=None):
form = None
ticket_items = get_fee_list(bid_type, conference)
if ticket_items.filter(is_minimum=True).exists():
minimum = ticket_items.filter(is_minimum=True).order_by(
'cost').first().cost
form = DonationForm(post, initial={'donation_min': minimum,
'donation': minimum})
else:
form = TicketPayForm(post)
form.fields['main_ticket'].queryset = ticket_items.filter(
add_on=False).order_by('cost')
if ticket_items.filter(add_on=True).exists():
form.fields['add_ons'].queryset = ticket_items.filter(
add_on=True).order_by('cost')
else:
form.fields['add_ons'].widget = HiddenInput()
return form
def get_paypal_button(request, total, user_id, number_list, bid_type, bid_id):
paypal_dict = {
"business": PayPalSettings.objects.first().business_email,
"amount": total,
"notify_url": request.build_absolute_uri(reverse('paypal-ipn')),
"invoice": str(datetime.now()),
"custom": "%s-%d-User-%d" % (bid_type, bid_id, user_id),
"return": request.build_absolute_uri(
reverse(
"%s_view" % bid_type.lower(),
urlconf='gbe.urls',
args=[bid_id])),
"cancel_return": request.build_absolute_uri("%s?cancel=paypal" % (
reverse(
"%s_edit" % bid_type.lower(),
urlconf='gbe.urls',
args=[bid_id]))),
"item_name": "%s Fee(s)" % bid_type,
"item_number": number_list,
}
return PayPalPaymentsForm(initial=paypal_dict)
def get_payment_details(request, form, bid_type, bid_id, user_id):
cart = []
paypal_button = None
total = 0
minimum = None
main_ticket = None
number_list = ""
if 'donation' in list(form.cleaned_data.keys()):
cart += [("%s Submission Fee" % bid_type,
form.cleaned_data['donation'])]
total = total + form.cleaned_data['donation']
else:
cart += [(form.cleaned_data['main_ticket'].title,
form.cleaned_data['main_ticket'].cost)]
number_list = str(form.cleaned_data['main_ticket'].id)
total = total + form.cleaned_data['main_ticket'].cost
for item in form.cleaned_data['add_ons']:
cart += [(item.title, item.cost)]
number_list = "%s %d" % (number_list, item.id)
total = total + item.cost
return (
cart,
get_paypal_button(
request,
total,
user_id,
number_list,
bid_type,
bid_id),
total) | en | 0.940078 | # ticketing_idd.py - Interface Design Description (IDD) between GBE and # TICKETING modules # See documentation in https://github.com/bethlakshmi/GBE2/wiki/Ticketing-To-Do # section: "By Friday - needed for integration" # - Betty 8/15 Verifies if a user has paid his or her application fee. NOTE: This function assumes that there is a record of the application, saved in the database with a status of "submitted", at the time the check is performed. user_name - This is the user name of the user in question. returns - true if the system recognizes the application submittal fee is paid # First figure out how many acts this user has purchased # Then figure out how many acts have already been submitted. Verifies user has paid a vendor submittal fee. NOTE: This function assumes that there is a record of the application, saved in the database, with a status of "submitted", at the time the check is performed. user_name - This is the user name of the user in question. returns - true if the system recognizes the vendor submittal fee is paid # First figure out how many vendor spots this user has purchased # Then figure out how many vendor applications have already been submitted. get the tickets purchased by the given profile get the checklist items for a purchaser in the BTP get the checklist items for the roles a person does in this conference get the checklist items for a person with a profile | 1.964805 | 2 |
conftest.py | adamghill/django-rich-logging | 5 | 6620754 | from pathlib import Path
from django.conf import settings
def pytest_configure():
base_dir = Path(".")
settings.configure(
BASE_DIR=base_dir,
SECRET_KEY="<PASSWORD>",
ROOT_URLCONF="tests.urls",
INSTALLED_APPS=[],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["tests/templates"],
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
],
},
}
],
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
}
},
LOGGING={
"version": 1,
"disable_existing_loggers": False,
"formatters": {},
"handlers": {
"django_rich_logging": {
"class": "django_rich_logging.logging.DjangoRequestHandler",
"level": "DEBUG",
},
},
"loggers": {
"django.server": {
"handlers": ["django_rich_logging"],
"level": "INFO",
},
},
},
)
| from pathlib import Path
from django.conf import settings
def pytest_configure():
base_dir = Path(".")
settings.configure(
BASE_DIR=base_dir,
SECRET_KEY="<PASSWORD>",
ROOT_URLCONF="tests.urls",
INSTALLED_APPS=[],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["tests/templates"],
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
],
},
}
],
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
}
},
LOGGING={
"version": 1,
"disable_existing_loggers": False,
"formatters": {},
"handlers": {
"django_rich_logging": {
"class": "django_rich_logging.logging.DjangoRequestHandler",
"level": "DEBUG",
},
},
"loggers": {
"django.server": {
"handlers": ["django_rich_logging"],
"level": "INFO",
},
},
},
)
| none | 1 | 1.869052 | 2 | |
metatrain.py | matteodalessio/Reptile-DCGAN | 1 | 6620755 | import torch
from args import get_args
from torch import nn
from torch.autograd import Variable
# Parsing params
args = get_args()
INPUT_SIZE = 784
REAL_LABEL = 1
FAKE_LABEL = 0
# Loss
criterion = nn.MSELoss()
dom_weight = 1
adv_weight = 1
# Compute gradient penalty: (L2_norm(dy/dx) - 1)**2
def gradient_penalty(y, x):
weight = torch.ones(y.size()).cuda()
dydx = torch.autograd.grad(
outputs=y,
inputs=x,
grad_outputs=weight,
retain_graph=True,
create_graph=True,
only_inputs=True
)[0]
dydx = dydx.view(dydx.size(0), -1)
dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
return torch.mean((dydx_l2norm - 1)**2)
def train_meta_learner( model_d, model_g, cloned_d, cloned_g, meta_optimizer_d, meta_optimizer_g, full_loader, loader, metal_iteration, metal_epochs, d=True):
# Main loop
# Update learning rate
meta_lr = args.meta_lr #args.meta_lr * (1. - metal_iteration/float(metal_epochs))
set_learning_rate(meta_optimizer_d, meta_lr)
set_learning_rate(meta_optimizer_g, meta_lr)
# Clone models
net_d = model_d.clone()
optimizer_d = get_optimizer(net_d)
net_g = model_g.clone()
optimizer_g = get_optimizer(net_g)
# Sample base task from Meta-Train
full_train_iter = make_infinite(full_loader)
train_iter = make_infinite(loader)
# Update fast net
ret_values_d, ret_values_g = do_learning(
net_d, net_g, optimizer_d, optimizer_g, full_train_iter, train_iter,
args.iterations, d=d)
model_d.point_grad_to(net_d)
model_g.point_grad_to(net_g)
meta_optimizer_d.step()
meta_optimizer_g.step()
return ret_values_d, ret_values_g
def Discriminator_training( batch_size, optimizer, model_d, model_g, full_x, train_x, train_y, noise, label):
train_x = train_x.cuda()
full_x = full_x.cuda()
label_real = Variable(label).cuda()
# Discriminator training, real examples
output, dom_out = model_d(train_x)
optimizer.zero_grad()
errD_real = -output.mean()
label_real.resize_(dom_out.data.size()).fill_(REAL_LABEL).cuda()
err_dom_real = criterion(dom_out, label_real)
realD_mean = output.data.cpu().mean()
noise.resize_(batch_size, 100, 1, 1).normal_(0, 1)
[noisev] = Variable_([noise])
# Fake examples
g_out = model_g(noisev)
output, _ = model_d(g_out.detach())
errD_fake = output.mean()
fakeD_mean = output.data.cpu().mean()
# Compute loss for gradient penalty
alpha = torch.rand(train_x.size(0), 1, 1, 1).cuda()
x_hat = (alpha * train_x.data + (1 - alpha) * g_out.data).requires_grad_(True)
out_src, _ = model_d(x_hat)
d_loss_gp = gradient_penalty(out_src, x_hat)
# Domain
_, out_dom_fake = model_d(full_x)
label_fake = torch.FloatTensor(args.batch_size).cuda().fill_(FAKE_LABEL)
label_fake.resize_(out_dom_fake.data.size())
err_dom_fake = criterion(out_dom_fake, label_fake)
loss = errD_real + 10 * err_dom_real + errD_fake + err_dom_fake + 10*d_loss_gp
loss.backward()
# Next
optimizer.step()
return errD_real, errD_fake, fakeD_mean, realD_mean
def Generator_training(batch_size, optimizer, model_d, model_g, noise, label):
noise.resize_(batch_size, 100, 1, 1).normal_(0, 1)
[noisev] = Variable_([noise])
label_real = Variable(label).cuda()
# GAN training
g_out = model_g(noisev)
output, out_dom_real = model_d(g_out)
err = - output.mean()
label_real.resize_(out_dom_real.data.size()).fill_(REAL_LABEL).cuda()
err_dom_real = criterion(out_dom_real, label_real)
g_err = adv_weight * err + dom_weight * err_dom_real
optimizer.zero_grad()
g_err.backward()
# Next
optimizer.step()
return err
def do_learning(model_d, model_g, optimizer_d, optimizer_g, full_train_iter, train_iter,iterations, d=True):
it = 0
while(it < iterations):
noise = torch.FloatTensor(args.batch_size, 100, 1, 1).cuda()
label = torch.FloatTensor(args.batch_size).cuda()
train_x,train_y = next(iter(train_iter))
full_x,_ = next(iter(full_train_iter))
for i in range(5):
noise = torch.FloatTensor(args.batch_size, 100, 1, 1).cuda()
label = torch.FloatTensor(args.batch_size).cuda()
train_x,train_y = next(iter(train_iter))
full_x,_ = next(iter(full_train_iter))
# Stop condition
actual_batch_size = train_x.size(0)
if actual_batch_size % args.batch_size != 0:
continue
if train_x.size(0) != full_x.size(0):
continue
ret_values_d = Discriminator_training(
actual_batch_size,
optimizer_d, model_d, model_g,
full_x,
train_x, train_y,
noise, label
)
noise = torch.FloatTensor(args.batch_size, 100, 1, 1).cuda()
label = torch.FloatTensor(args.batch_size).cuda()
train_x,train_y = next(iter(train_iter))
full_x,_ = next(iter(full_train_iter))
# Stop condition
actual_batch_size = train_x.size(0)
if actual_batch_size % args.batch_size != 0:
continue
if train_x.size(0) != full_x.size(0):
continue
# Run Generator training
ret_values_g = Generator_training(
actual_batch_size,
optimizer_g, model_d, model_g,
noise, label
)
it += 1
return ret_values_d, ret_values_g
def do_evaluation(model_g, fixed_noise):
g_out = model_g(fixed_noise)
return g_out.cpu()
def test_meta_learner(model_g, model_d, full_loader, loader, fixed_noise):
full_test_iter = make_infinite(full_loader)
test_iter = make_infinite(loader)
net_d = model_d.clone()
optimizer_d = get_optimizer(net_d)
net_g = model_g.clone()
optimizer_g = get_optimizer(net_g)
do_learning(net_d, net_g, optimizer_d, optimizer_g, full_test_iter, test_iter,
args.test_iterations, d=True)
return do_evaluation(net_g, fixed_noise)
# Utils
def get_optimizer(net, state=None):
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.5, 0.9))
if state is not None:
optimizer.load_state_dict(state)
return optimizer
def set_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def initialize_meta_optimizer(model):
return torch.optim.SGD(model.parameters(), lr=args.meta_lr, momentum=0.5)
def make_infinite(dataloader):
while True:
for x in dataloader:
yield x
# Make variable cuda depending on the arguments
def Variable_(tensor, *args_, **kwargs):
# Unroll list or tuple
if type(tensor) in (list, tuple):
return [Variable_(t, *args_, **kwargs) for t in tensor]
# Unroll dictionary
if isinstance(tensor, dict):
return {key: Variable_(v, *args_, **kwargs)
for key, v in tensor.items()}
# Normal tensor
return Variable(tensor, *args_, **kwargs).cuda()
| import torch
from args import get_args
from torch import nn
from torch.autograd import Variable
# Parsing params
args = get_args()
INPUT_SIZE = 784
REAL_LABEL = 1
FAKE_LABEL = 0
# Loss
criterion = nn.MSELoss()
dom_weight = 1
adv_weight = 1
# Compute gradient penalty: (L2_norm(dy/dx) - 1)**2
def gradient_penalty(y, x):
weight = torch.ones(y.size()).cuda()
dydx = torch.autograd.grad(
outputs=y,
inputs=x,
grad_outputs=weight,
retain_graph=True,
create_graph=True,
only_inputs=True
)[0]
dydx = dydx.view(dydx.size(0), -1)
dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
return torch.mean((dydx_l2norm - 1)**2)
def train_meta_learner( model_d, model_g, cloned_d, cloned_g, meta_optimizer_d, meta_optimizer_g, full_loader, loader, metal_iteration, metal_epochs, d=True):
# Main loop
# Update learning rate
meta_lr = args.meta_lr #args.meta_lr * (1. - metal_iteration/float(metal_epochs))
set_learning_rate(meta_optimizer_d, meta_lr)
set_learning_rate(meta_optimizer_g, meta_lr)
# Clone models
net_d = model_d.clone()
optimizer_d = get_optimizer(net_d)
net_g = model_g.clone()
optimizer_g = get_optimizer(net_g)
# Sample base task from Meta-Train
full_train_iter = make_infinite(full_loader)
train_iter = make_infinite(loader)
# Update fast net
ret_values_d, ret_values_g = do_learning(
net_d, net_g, optimizer_d, optimizer_g, full_train_iter, train_iter,
args.iterations, d=d)
model_d.point_grad_to(net_d)
model_g.point_grad_to(net_g)
meta_optimizer_d.step()
meta_optimizer_g.step()
return ret_values_d, ret_values_g
def Discriminator_training( batch_size, optimizer, model_d, model_g, full_x, train_x, train_y, noise, label):
train_x = train_x.cuda()
full_x = full_x.cuda()
label_real = Variable(label).cuda()
# Discriminator training, real examples
output, dom_out = model_d(train_x)
optimizer.zero_grad()
errD_real = -output.mean()
label_real.resize_(dom_out.data.size()).fill_(REAL_LABEL).cuda()
err_dom_real = criterion(dom_out, label_real)
realD_mean = output.data.cpu().mean()
noise.resize_(batch_size, 100, 1, 1).normal_(0, 1)
[noisev] = Variable_([noise])
# Fake examples
g_out = model_g(noisev)
output, _ = model_d(g_out.detach())
errD_fake = output.mean()
fakeD_mean = output.data.cpu().mean()
# Compute loss for gradient penalty
alpha = torch.rand(train_x.size(0), 1, 1, 1).cuda()
x_hat = (alpha * train_x.data + (1 - alpha) * g_out.data).requires_grad_(True)
out_src, _ = model_d(x_hat)
d_loss_gp = gradient_penalty(out_src, x_hat)
# Domain
_, out_dom_fake = model_d(full_x)
label_fake = torch.FloatTensor(args.batch_size).cuda().fill_(FAKE_LABEL)
label_fake.resize_(out_dom_fake.data.size())
err_dom_fake = criterion(out_dom_fake, label_fake)
loss = errD_real + 10 * err_dom_real + errD_fake + err_dom_fake + 10*d_loss_gp
loss.backward()
# Next
optimizer.step()
return errD_real, errD_fake, fakeD_mean, realD_mean
def Generator_training(batch_size, optimizer, model_d, model_g, noise, label):
noise.resize_(batch_size, 100, 1, 1).normal_(0, 1)
[noisev] = Variable_([noise])
label_real = Variable(label).cuda()
# GAN training
g_out = model_g(noisev)
output, out_dom_real = model_d(g_out)
err = - output.mean()
label_real.resize_(out_dom_real.data.size()).fill_(REAL_LABEL).cuda()
err_dom_real = criterion(out_dom_real, label_real)
g_err = adv_weight * err + dom_weight * err_dom_real
optimizer.zero_grad()
g_err.backward()
# Next
optimizer.step()
return err
def do_learning(model_d, model_g, optimizer_d, optimizer_g, full_train_iter, train_iter,iterations, d=True):
it = 0
while(it < iterations):
noise = torch.FloatTensor(args.batch_size, 100, 1, 1).cuda()
label = torch.FloatTensor(args.batch_size).cuda()
train_x,train_y = next(iter(train_iter))
full_x,_ = next(iter(full_train_iter))
for i in range(5):
noise = torch.FloatTensor(args.batch_size, 100, 1, 1).cuda()
label = torch.FloatTensor(args.batch_size).cuda()
train_x,train_y = next(iter(train_iter))
full_x,_ = next(iter(full_train_iter))
# Stop condition
actual_batch_size = train_x.size(0)
if actual_batch_size % args.batch_size != 0:
continue
if train_x.size(0) != full_x.size(0):
continue
ret_values_d = Discriminator_training(
actual_batch_size,
optimizer_d, model_d, model_g,
full_x,
train_x, train_y,
noise, label
)
noise = torch.FloatTensor(args.batch_size, 100, 1, 1).cuda()
label = torch.FloatTensor(args.batch_size).cuda()
train_x,train_y = next(iter(train_iter))
full_x,_ = next(iter(full_train_iter))
# Stop condition
actual_batch_size = train_x.size(0)
if actual_batch_size % args.batch_size != 0:
continue
if train_x.size(0) != full_x.size(0):
continue
# Run Generator training
ret_values_g = Generator_training(
actual_batch_size,
optimizer_g, model_d, model_g,
noise, label
)
it += 1
return ret_values_d, ret_values_g
def do_evaluation(model_g, fixed_noise):
g_out = model_g(fixed_noise)
return g_out.cpu()
def test_meta_learner(model_g, model_d, full_loader, loader, fixed_noise):
full_test_iter = make_infinite(full_loader)
test_iter = make_infinite(loader)
net_d = model_d.clone()
optimizer_d = get_optimizer(net_d)
net_g = model_g.clone()
optimizer_g = get_optimizer(net_g)
do_learning(net_d, net_g, optimizer_d, optimizer_g, full_test_iter, test_iter,
args.test_iterations, d=True)
return do_evaluation(net_g, fixed_noise)
# Utils
def get_optimizer(net, state=None):
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.5, 0.9))
if state is not None:
optimizer.load_state_dict(state)
return optimizer
def set_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def initialize_meta_optimizer(model):
return torch.optim.SGD(model.parameters(), lr=args.meta_lr, momentum=0.5)
def make_infinite(dataloader):
while True:
for x in dataloader:
yield x
# Make variable cuda depending on the arguments
def Variable_(tensor, *args_, **kwargs):
# Unroll list or tuple
if type(tensor) in (list, tuple):
return [Variable_(t, *args_, **kwargs) for t in tensor]
# Unroll dictionary
if isinstance(tensor, dict):
return {key: Variable_(v, *args_, **kwargs)
for key, v in tensor.items()}
# Normal tensor
return Variable(tensor, *args_, **kwargs).cuda()
| en | 0.601056 | # Parsing params # Loss # Compute gradient penalty: (L2_norm(dy/dx) - 1)**2 # Main loop # Update learning rate #args.meta_lr * (1. - metal_iteration/float(metal_epochs)) # Clone models # Sample base task from Meta-Train # Update fast net # Discriminator training, real examples # Fake examples # Compute loss for gradient penalty # Domain # Next # GAN training # Next # Stop condition # Stop condition # Run Generator training # Utils # Make variable cuda depending on the arguments # Unroll list or tuple # Unroll dictionary # Normal tensor | 2.338115 | 2 |
authz_group/authz_implementation/all_ok.py | uw-asa/authz_group | 0 | 6620756 | class AllOK():
def is_member_of_group(self, user_name, group_source_id):
return True
| class AllOK():
def is_member_of_group(self, user_name, group_source_id):
return True
| none | 1 | 2.076484 | 2 | |
exploits/household/exploit_2/fake_jwt.py | HackerDom/ructfe-2019 | 23 | 6620757 | # pip install pyjwt, cryptography...
import jwt
def get_fake_jwt_token(victim_ip, user_id, private_key=None):
payload = {
"nbf": "1574490000",
"exp": "1999999999",
"iss": victim_ip,
"aud": "HouseholdAPI",
"client_id": "Household",
"sub": user_id,
"auth_time": "1574492300",
"idp": "local",
"scope": [
"openid",
"profile",
"HouseholdAPI"
],
"amr": [
"<PASSWORD>"
]
}
if private_key is None:
with open('private.pem', 'r') as f:
private_key = f.read()
encoded = jwt.encode(payload, private_key, algorithm='PS512')
return encoded
| # pip install pyjwt, cryptography...
import jwt
def get_fake_jwt_token(victim_ip, user_id, private_key=None):
payload = {
"nbf": "1574490000",
"exp": "1999999999",
"iss": victim_ip,
"aud": "HouseholdAPI",
"client_id": "Household",
"sub": user_id,
"auth_time": "1574492300",
"idp": "local",
"scope": [
"openid",
"profile",
"HouseholdAPI"
],
"amr": [
"<PASSWORD>"
]
}
if private_key is None:
with open('private.pem', 'r') as f:
private_key = f.read()
encoded = jwt.encode(payload, private_key, algorithm='PS512')
return encoded
| en | 0.523296 | # pip install pyjwt, cryptography... | 2.909421 | 3 |
fixtures/python_output/post_image.py | martinsirbe/curlconverter | 4,955 | 6620758 | <filename>fixtures/python_output/post_image.py
import requests
files = {
'image': ('image.jpg', open('image.jpg', 'rb')),
}
response = requests.post('http://example.com/targetservice', files=files)
| <filename>fixtures/python_output/post_image.py
import requests
files = {
'image': ('image.jpg', open('image.jpg', 'rb')),
}
response = requests.post('http://example.com/targetservice', files=files)
| none | 1 | 1.975209 | 2 | |
team/add_team_city.py | xiaolinzi-xl/nba-player | 1 | 6620759 | #!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author:xd
@file: add_team_city.py
@time: 2018/04/24
"""
from selenium import webdriver
import json
import os
from scrapy.selector import Selector
dir_name = os.path.dirname(__file__)
file_name = os.path.join(dir_name,'team_1.json')
with open(file_name,'r') as f:
data = json.load(f)
base_url = 'https://stats.nba.com/stats/teamdetails?teamID=%s'
browser = webdriver.Chrome()
team_datas = []
for team in data:
url = base_url % team['id']
browser.get(url)
html = Selector(text=browser.page_source)
team_data = html.css("pre::text").extract_first()
team_data = json.loads(team_data)
city = team_data['resultSets'][0]['rowSet'][0][4]
res = dict(team)
res['city'] = city
team_datas.append(res)
file_name = os.path.join(dir_name,'team_all.json')
with open(file_name, 'w') as f:
f.write(json.dumps(team_datas))
browser.close()
| #!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author:xd
@file: add_team_city.py
@time: 2018/04/24
"""
from selenium import webdriver
import json
import os
from scrapy.selector import Selector
dir_name = os.path.dirname(__file__)
file_name = os.path.join(dir_name,'team_1.json')
with open(file_name,'r') as f:
data = json.load(f)
base_url = 'https://stats.nba.com/stats/teamdetails?teamID=%s'
browser = webdriver.Chrome()
team_datas = []
for team in data:
url = base_url % team['id']
browser.get(url)
html = Selector(text=browser.page_source)
team_data = html.css("pre::text").extract_first()
team_data = json.loads(team_data)
city = team_data['resultSets'][0]['rowSet'][0][4]
res = dict(team)
res['city'] = city
team_datas.append(res)
file_name = os.path.join(dir_name,'team_all.json')
with open(file_name, 'w') as f:
f.write(json.dumps(team_datas))
browser.close()
| en | 0.480385 | #!usr/bin/env python #-*- coding:utf-8 _*- @author:xd @file: add_team_city.py @time: 2018/04/24 | 2.695085 | 3 |
model_evaluation.py | dlkt-review-and-empirical-evaluation/dlkt-review-and-empirical-evaluation | 0 | 6620760 | <gh_stars>0
from sklearn.metrics import accuracy_score as acc, roc_auc_score as auc, f1_score as f1, matthews_corrcoef as mc, \
mean_squared_error as mse, precision_score as prec, recall_score as recall, log_loss
from tensorflow.keras.callbacks import Callback
import numpy as np
# from tensorflow.python.keras.utils import layer_utils
# from custom_metrics import bic, aic, aicc
metrics = ('acc', 'auc', 'prec', 'recall', 'f1', 'mcc',
'rmse', 'log-loss',
# 'aic', 'aicc', 'bic',
)
def model_evaluate(test_gen, model, data=None):
def unonehot_targets(labels, preds):
target_skills = labels[:, :, 0:test_gen.n_skills]
target_labels = labels[:, :, test_gen.n_skills]
target_preds = np.sum(preds * target_skills, axis=2)
return target_labels, target_preds
def predict_in_batches():
test_gen.reset()
labels, predictions = None, None
while not test_gen.done:
batch_features, batch_labels = test_gen.generate_batch()
batch_predictions = model.predict_on_batch(batch_features)
batch_labels = np.squeeze(batch_labels)
batch_predictions = np.squeeze(batch_predictions)
if test_gen.onehot_output:
batch_labels, batch_predictions = unonehot_targets(batch_labels, batch_predictions)
labels = batch_labels.ravel() if labels is None else np.concatenate([labels, batch_labels.ravel()], axis=0)
predictions = batch_predictions.ravel() if predictions is None else np.concatenate(
[predictions, batch_predictions.ravel()], axis=0)
return labels, predictions
def predict():
features, labels = data
predictions = model.predict(features)
labels = np.squeeze(labels)
predictions = np.squeeze(predictions)
if test_gen.onehot_output:
labels, predictions = unonehot_targets(labels, predictions)
return labels, predictions
if data is None:
labels, predictions = predict_in_batches()
else:
labels, predictions = predict()
y_true, y_pred = labels, predictions
# padding_i = np.flatnonzero(y_true == -1.0).tolist()
# y_t = y_true.ravel()[padding_i]
# assert np.all(y_t == -1.0)
not_padding_i = np.flatnonzero(y_true != -1.0).tolist()
y_true = y_true.ravel()[not_padding_i]
y_pred = y_pred.ravel()[not_padding_i]
# assert np.all(y_true >= 0.0)
bin_pred = y_pred.round()
# mses = mse(y_true, y_pred)
# n_params = layer_utils.count_params(model.trainable_weights)
results = {}
results['acc'] = acc(y_true, bin_pred)
results['auc'] = auc(y_true, y_pred)
results['prec'] = prec(y_true, bin_pred)
results['recall'] = recall(y_true, bin_pred)
results['f1'] = f1(y_true, bin_pred)
results['mcc'] = mc(y_true, bin_pred)
results['rmse'] = np.sqrt(mse(y_true, y_pred))
# results['aic'] = aic(mses, len(y_true), n_params)
# results['aicc'] = aicc(mses, len(y_true), n_params)
# results['bic'] = bic(mses, len(y_true), n_params)
results['log-loss'] = log_loss(y_true, y_pred)
return results
class MetricsCallback(Callback):
def __init__(self, val_data_gen, val_data=None):
super(MetricsCallback, self).__init__()
assert (metrics is not None)
self.val_data = val_data
self.val_data_gen = val_data_gen
def on_train_begin(self, logs={}):
for metric in metrics:
if not 'metrics' in self.params: return
self.params['metrics'].append('val_' + metric)
def on_epoch_end(self, epoch, logs={}):
results = model_evaluate(self.val_data_gen, self.model, self.val_data)
for metric in metrics:
logs['val_' + metric] = results[metric]
| from sklearn.metrics import accuracy_score as acc, roc_auc_score as auc, f1_score as f1, matthews_corrcoef as mc, \
mean_squared_error as mse, precision_score as prec, recall_score as recall, log_loss
from tensorflow.keras.callbacks import Callback
import numpy as np
# from tensorflow.python.keras.utils import layer_utils
# from custom_metrics import bic, aic, aicc
metrics = ('acc', 'auc', 'prec', 'recall', 'f1', 'mcc',
'rmse', 'log-loss',
# 'aic', 'aicc', 'bic',
)
def model_evaluate(test_gen, model, data=None):
def unonehot_targets(labels, preds):
target_skills = labels[:, :, 0:test_gen.n_skills]
target_labels = labels[:, :, test_gen.n_skills]
target_preds = np.sum(preds * target_skills, axis=2)
return target_labels, target_preds
def predict_in_batches():
test_gen.reset()
labels, predictions = None, None
while not test_gen.done:
batch_features, batch_labels = test_gen.generate_batch()
batch_predictions = model.predict_on_batch(batch_features)
batch_labels = np.squeeze(batch_labels)
batch_predictions = np.squeeze(batch_predictions)
if test_gen.onehot_output:
batch_labels, batch_predictions = unonehot_targets(batch_labels, batch_predictions)
labels = batch_labels.ravel() if labels is None else np.concatenate([labels, batch_labels.ravel()], axis=0)
predictions = batch_predictions.ravel() if predictions is None else np.concatenate(
[predictions, batch_predictions.ravel()], axis=0)
return labels, predictions
def predict():
features, labels = data
predictions = model.predict(features)
labels = np.squeeze(labels)
predictions = np.squeeze(predictions)
if test_gen.onehot_output:
labels, predictions = unonehot_targets(labels, predictions)
return labels, predictions
if data is None:
labels, predictions = predict_in_batches()
else:
labels, predictions = predict()
y_true, y_pred = labels, predictions
# padding_i = np.flatnonzero(y_true == -1.0).tolist()
# y_t = y_true.ravel()[padding_i]
# assert np.all(y_t == -1.0)
not_padding_i = np.flatnonzero(y_true != -1.0).tolist()
y_true = y_true.ravel()[not_padding_i]
y_pred = y_pred.ravel()[not_padding_i]
# assert np.all(y_true >= 0.0)
bin_pred = y_pred.round()
# mses = mse(y_true, y_pred)
# n_params = layer_utils.count_params(model.trainable_weights)
results = {}
results['acc'] = acc(y_true, bin_pred)
results['auc'] = auc(y_true, y_pred)
results['prec'] = prec(y_true, bin_pred)
results['recall'] = recall(y_true, bin_pred)
results['f1'] = f1(y_true, bin_pred)
results['mcc'] = mc(y_true, bin_pred)
results['rmse'] = np.sqrt(mse(y_true, y_pred))
# results['aic'] = aic(mses, len(y_true), n_params)
# results['aicc'] = aicc(mses, len(y_true), n_params)
# results['bic'] = bic(mses, len(y_true), n_params)
results['log-loss'] = log_loss(y_true, y_pred)
return results
class MetricsCallback(Callback):
def __init__(self, val_data_gen, val_data=None):
super(MetricsCallback, self).__init__()
assert (metrics is not None)
self.val_data = val_data
self.val_data_gen = val_data_gen
def on_train_begin(self, logs={}):
for metric in metrics:
if not 'metrics' in self.params: return
self.params['metrics'].append('val_' + metric)
def on_epoch_end(self, epoch, logs={}):
results = model_evaluate(self.val_data_gen, self.model, self.val_data)
for metric in metrics:
logs['val_' + metric] = results[metric] | en | 0.324672 | # from tensorflow.python.keras.utils import layer_utils # from custom_metrics import bic, aic, aicc # 'aic', 'aicc', 'bic', # padding_i = np.flatnonzero(y_true == -1.0).tolist() # y_t = y_true.ravel()[padding_i] # assert np.all(y_t == -1.0) # assert np.all(y_true >= 0.0) # mses = mse(y_true, y_pred) # n_params = layer_utils.count_params(model.trainable_weights) # results['aic'] = aic(mses, len(y_true), n_params) # results['aicc'] = aicc(mses, len(y_true), n_params) # results['bic'] = bic(mses, len(y_true), n_params) | 2.272738 | 2 |
classes/detective_solver.py | Togohogo1/Mathtermind | 4 | 6620761 | from classes.classic_solver import ClassicSolver
class DetectiveSolver(ClassicSolver):
def gen_embed(self):
"""Creates an embed displaying all possible solutions of valid guesses if there are 64 or less"""
# If guesses so far are unverified
if len(self.rounds) <= 4 and True not in self.verified:
self.sol_panel.title = f"Valid Solutions Unknown"
self.sol_panel.description = f"No verified guesses exist for solutions to be determined. Guesses 5 to 8 are guaranteed to be verified and guesses 1 to 4 can be verified by using the `;id` command."
return
self.sol_panel.title = f"{self.valid_cnt} Valid Solution{'s'*(self.valid_cnt != 1)}"
if self.valid_cnt > 64:
self.sol_panel.description = f"Solutions will not be listed since there are over 64 possible valid combinations"
else:
self.sol_panel.description = f"||{', '.join(self.valid)}||"
| from classes.classic_solver import ClassicSolver
class DetectiveSolver(ClassicSolver):
def gen_embed(self):
"""Creates an embed displaying all possible solutions of valid guesses if there are 64 or less"""
# If guesses so far are unverified
if len(self.rounds) <= 4 and True not in self.verified:
self.sol_panel.title = f"Valid Solutions Unknown"
self.sol_panel.description = f"No verified guesses exist for solutions to be determined. Guesses 5 to 8 are guaranteed to be verified and guesses 1 to 4 can be verified by using the `;id` command."
return
self.sol_panel.title = f"{self.valid_cnt} Valid Solution{'s'*(self.valid_cnt != 1)}"
if self.valid_cnt > 64:
self.sol_panel.description = f"Solutions will not be listed since there are over 64 possible valid combinations"
else:
self.sol_panel.description = f"||{', '.join(self.valid)}||"
| en | 0.797997 | Creates an embed displaying all possible solutions of valid guesses if there are 64 or less # If guesses so far are unverified | 3.047751 | 3 |
pw_dict_10.py | pixelpip/Learning-Lib | 0 | 6620762 | <reponame>pixelpip/Learning-Lib<filename>pw_dict_10.py<gh_stars>0
import sys
rk='email'
moto='email_2'
pword_dict={'anaconda': {'username': 'none', 'login': rk, 'password': '<PASSWORD>'},
'bank': {'username': 'none', 'login': rk, 'password': '<PASSWORD>'}}
def pword(company):
matched = 'not found' #set sentry variable
for keys in pword_dict.keys():
if keys.startswith(company):
matched = 'found' #set sentry to matched
print(keys, '\n',
'Username:', pword_dict[keys]['username'], '\n',
'login:', pword_dict[keys]['login'], '\n',
'password:', pword_dict[keys]['password'], '\n')
if matched == 'not found':
print ("not match found")
try:
pword(sys.argv[1])
except:
print ("error:--> ::nothing entered::")
| import sys
rk='email'
moto='email_2'
pword_dict={'anaconda': {'username': 'none', 'login': rk, 'password': '<PASSWORD>'},
'bank': {'username': 'none', 'login': rk, 'password': '<PASSWORD>'}}
def pword(company):
matched = 'not found' #set sentry variable
for keys in pword_dict.keys():
if keys.startswith(company):
matched = 'found' #set sentry to matched
print(keys, '\n',
'Username:', pword_dict[keys]['username'], '\n',
'login:', pword_dict[keys]['login'], '\n',
'password:', pword_dict[keys]['password'], '\n')
if matched == 'not found':
print ("not match found")
try:
pword(sys.argv[1])
except:
print ("error:--> ::nothing entered::") | en | 0.783078 | #set sentry variable #set sentry to matched | 3.429095 | 3 |
mobile/mobile_admin.py | dequelabs/MobileTools | 0 | 6620763 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (C) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Unless otherwise stated in the comments, "id", in e.g. user_id, refers to the
# internal Keycloak server ID, usually a uuid string
from builtins import isinstance
from typing import Iterable
import datetime
import json
from .mobileconnection import MobileConnectionManager
from .mobile_exceptions import araise_error_from_response, MobileGetError
from .mobile_openid import MobileOpenID
class MobileAdmin:
PAGE_SIZE = 100
_server_url = None
_username = None
_password = None
_realm_name = None
_client_id = None
_verify = None
_client_secret_key = None
_auto_refresh_token = None
_connection = None
_token = None
_custom_headers = None
_user_realm_name = None
def __init__(self, auth_server_url, server_url, username=None, password=None, realm_name='master', client_id='admin-cli', verify=True,
client_secret_key=None, custom_headers=None, user_realm_name=None, auto_refresh_token=None):
"""
:param auth_server_url: Keycloak server url
:param server_url: Mobile server url
:param username: admin username
:param password: <PASSWORD>
:param realm_name: realm name
:param client_id: client id
:param verify: True if want check connection SSL
:param client_secret_key: client secret key
:param custom_headers: dict of custom header to pass to each HTML request
:param user_realm_name: The realm name of the user, if different from realm_name
:param auto_refresh_token: list of methods that allows automatic token refresh. ex: ['get', 'put', 'post', 'delete']
"""
self.auth_server_url = auth_server_url
self.server_url = server_url
self.username = username
self.password = password
self.realm_name = realm_name
self.client_id = client_id
self.verify = verify
self.client_secret_key = client_secret_key
self.auto_refresh_token = auto_refresh_token or []
self.user_realm_name = user_realm_name
self.custom_headers = custom_headers
self.last_refresh_token_timestamp = 0
# Get token Admin
self.get_token()
self.last_refresh_token_timestamp = datetime.datetime.now()
@property
def server_url(self):
return self._server_url
@server_url.setter
def server_url(self, value):
self._server_url = value
@property
def auth_server_url(self):
return self._auth_server_url
@auth_server_url.setter
def auth_server_url(self, value):
self._auth_server_url = value
@property
def realm_name(self):
return self._realm_name
@realm_name.setter
def realm_name(self, value):
self._realm_name = value
@property
def connection(self):
return self._connection
@connection.setter
def connection(self, value):
self._connection = value
@property
def client_id(self):
return self._client_id
@client_id.setter
def client_id(self, value):
self._client_id = value
@property
def client_secret_key(self):
return self._client_secret_key
@client_secret_key.setter
def client_secret_key(self, value):
self._client_secret_key = value
@property
def verify(self):
return self._verify
@verify.setter
def verify(self, value):
self._verify = value
@property
def username(self):
return self._username
@username.setter
def username(self, value):
self._username = value
@property
def password(self):
return self._password
@password.setter
def password(self, value):
self._password = value
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
@property
def auto_refresh_token(self):
return self._auto_refresh_token
@property
def user_realm_name(self):
return self._user_realm_name
@user_realm_name.setter
def user_realm_name(self, value):
self._user_realm_name = value
@property
def custom_headers(self):
return self._custom_headers
@custom_headers.setter
def custom_headers(self, value):
self._custom_headers = value
@auto_refresh_token.setter
def auto_refresh_token(self, value):
allowed_methods = {'get', 'post', 'put', 'delete'}
if not isinstance(value, Iterable):
raise TypeError('Expected a list of strings among {allowed}'.format(allowed=allowed_methods))
if not all(method in allowed_methods for method in value):
raise TypeError('Unexpected method in auto_refresh_token, accepted methods are {allowed}'.format(allowed=allowed_methods))
self._auto_refresh_token = value
def __fetch_all(self, url, query=None):
'''Wrapper function to paginate GET requests
:param url: The url on which the query is executed
:param query: Existing query parameters (optional)
:return: Combined results of paginated queries
'''
results = []
# initalize query if it was called with None
if not query:
query = {}
page = 0
query['max'] = self.PAGE_SIZE
# fetch until we can
while True:
query['first'] = page*self.PAGE_SIZE
partial_results = araise_error_from_response(
self.raw_get(url, **query),
MobileGetError)
if not partial_results:
break
results.extend(partial_results)
page += 1
return results
def __fetch_page(self, url, query):
''' KAR: Wrapper function for *real* paginated GET requests
'''
results = []
# initalize query if it was called with None
if not query:
return results
results = araise_error_from_response(
self.raw_get(url, **query),
MobileGetError)
return results
def update_user_and_group(self, old_user_id, old_group_id, new_user_id, new_group_id, max_count):
"""
Assign scan to user
:return: Mobile server response (RealmRepresentation)
"""
params_path = { "oldid": old_user_id, "oldgroupid": old_group_id, "newuid": new_user_id, "groupid": new_group_id, "count": max_count }
data_raw = self.raw_post("/admin/user/modify/{oldid}/{oldgroupid}?user-id={newuid}&org-id={groupid}&count={count}".format(**params_path), data=None)
return araise_error_from_response(data_raw, MobileGetError, expected_code=200)
def get_mobile_scan(self, user_id, package_name, result_id):
"""
retrieve scan for user
:return: Mobile server response (scanResult)
"""
params_path = { "userid": user_id, "packagename": package_name, "resultid": result_id }
data_raw = self.raw_get("/attest/result/axe/{userid}/{packagename}/{resultid}".format(**params_path))
return araise_error_from_response(data_raw, MobileGetError, expected_code=200)
def set_mobile_scan_tag(self, user_id, package_name, result_id, tag_list):
"""
set tag for scan
:return: Mobile server response
AxeResultKey {
String userId;
String packageName;
String resultId;
}
"""
params_path = { "userid": user_id, "packagename": package_name, "resultid": result_id }
data_raw = self.raw_post("/attest/result/tag/{userid}/{packagename}/{resultid}".format(**params_path), data=json.dumps(tag_list))
return araise_error_from_response(data_raw, MobileGetError, expected_code=200)
def raw_get(self, *args, **kwargs):
"""
Calls connection.raw_get.
If auto_refresh is set for *get* and *access_token* is expired, it will refresh the token
and try *get* once more.
"""
if 'get' in self.auto_refresh_token:
self.validate_token()
r = self.connection.raw_get(*args, **kwargs)
return r
def raw_post(self, *args, **kwargs):
"""
Calls connection.raw_post.
If auto_refresh is set for *post* and *access_token* is expired, it will refresh the token
and try *post* once more.
"""
if 'post' in self.auto_refresh_token:
self.validate_token()
r = self.connection.raw_post(*args, **kwargs)
return r
def raw_put(self, *args, **kwargs):
"""
Calls connection.raw_put.
If auto_refresh is set for *put* and *access_token* is expired, it will refresh the token
and try *put* once more.
"""
if 'put' in self.auto_refresh_token:
self.validate_token()
r = self.connection.raw_put(*args, **kwargs)
return r
def raw_delete(self, *args, **kwargs):
"""
Calls connection.raw_delete.
If auto_refresh is set for *delete* and *access_token* is expired, it will refresh the token
and try *delete* once more.
"""
if 'delete' in self.auto_refresh_token:
self.validate_token()
r = self.connection.raw_delete(*args, **kwargs)
return r
def get_token(self):
self.mobile_openid = MobileOpenID(auth_server_url=self.auth_server_url, server_url=self.server_url, client_id=self.client_id,
realm_name=self.user_realm_name or self.realm_name, verify=self.verify,
client_secret_key=self.client_secret_key,
custom_headers=self.custom_headers)
grant_type = ["password"]
#if self.client_secret_key:
# grant_type = ["client_credentials"]
self._token = self.mobile_openid.token(self.username, self.password, grant_type=grant_type)
headers = {
'Authorization': 'Bearer ' + self.token.get('access_token'),
'Content-Type': 'application/json'
}
if self.custom_headers is not None:
# merge custom headers to main headers
headers.update(self.custom_headers)
self._connection = MobileConnectionManager(auth_base_url=self.auth_server_url,
base_url=self.server_url,
headers=headers,
timeout=180,
verify=self.verify)
def refresh_token(self):
refresh_token = self.token.get('refresh_token')
try:
self.token = self.mobile_openid.refresh_token(refresh_token)
self.last_refresh_token_timestamp = datetime.datetime.now()
except MobileGetError as e:
if e.response_code == 400 and b'Refresh token expired' in e.response_body:
self.get_token()
else:
raise
self.connection.add_param_headers('Authorization', 'Bearer ' + self.token.get('access_token'))
def validate_token(self):
if 'expires_in' in self.token and self.last_refresh_token_timestamp != 0:
expire_time = self.last_refresh_token_timestamp + datetime.timedelta(seconds = self.token['expires_in'] )
if expire_time <= datetime.datetime.now():
self.refresh_token()
| # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (C) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Unless otherwise stated in the comments, "id", in e.g. user_id, refers to the
# internal Keycloak server ID, usually a uuid string
from builtins import isinstance
from typing import Iterable
import datetime
import json
from .mobileconnection import MobileConnectionManager
from .mobile_exceptions import araise_error_from_response, MobileGetError
from .mobile_openid import MobileOpenID
class MobileAdmin:
PAGE_SIZE = 100
_server_url = None
_username = None
_password = None
_realm_name = None
_client_id = None
_verify = None
_client_secret_key = None
_auto_refresh_token = None
_connection = None
_token = None
_custom_headers = None
_user_realm_name = None
def __init__(self, auth_server_url, server_url, username=None, password=None, realm_name='master', client_id='admin-cli', verify=True,
client_secret_key=None, custom_headers=None, user_realm_name=None, auto_refresh_token=None):
"""
:param auth_server_url: Keycloak server url
:param server_url: Mobile server url
:param username: admin username
:param password: <PASSWORD>
:param realm_name: realm name
:param client_id: client id
:param verify: True if want check connection SSL
:param client_secret_key: client secret key
:param custom_headers: dict of custom header to pass to each HTML request
:param user_realm_name: The realm name of the user, if different from realm_name
:param auto_refresh_token: list of methods that allows automatic token refresh. ex: ['get', 'put', 'post', 'delete']
"""
self.auth_server_url = auth_server_url
self.server_url = server_url
self.username = username
self.password = password
self.realm_name = realm_name
self.client_id = client_id
self.verify = verify
self.client_secret_key = client_secret_key
self.auto_refresh_token = auto_refresh_token or []
self.user_realm_name = user_realm_name
self.custom_headers = custom_headers
self.last_refresh_token_timestamp = 0
# Get token Admin
self.get_token()
self.last_refresh_token_timestamp = datetime.datetime.now()
@property
def server_url(self):
return self._server_url
@server_url.setter
def server_url(self, value):
self._server_url = value
@property
def auth_server_url(self):
return self._auth_server_url
@auth_server_url.setter
def auth_server_url(self, value):
self._auth_server_url = value
@property
def realm_name(self):
return self._realm_name
@realm_name.setter
def realm_name(self, value):
self._realm_name = value
@property
def connection(self):
return self._connection
@connection.setter
def connection(self, value):
self._connection = value
@property
def client_id(self):
return self._client_id
@client_id.setter
def client_id(self, value):
self._client_id = value
@property
def client_secret_key(self):
return self._client_secret_key
@client_secret_key.setter
def client_secret_key(self, value):
self._client_secret_key = value
@property
def verify(self):
return self._verify
@verify.setter
def verify(self, value):
self._verify = value
@property
def username(self):
return self._username
@username.setter
def username(self, value):
self._username = value
@property
def password(self):
return self._password
@password.setter
def password(self, value):
self._password = value
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
@property
def auto_refresh_token(self):
return self._auto_refresh_token
@property
def user_realm_name(self):
return self._user_realm_name
@user_realm_name.setter
def user_realm_name(self, value):
self._user_realm_name = value
@property
def custom_headers(self):
return self._custom_headers
@custom_headers.setter
def custom_headers(self, value):
self._custom_headers = value
@auto_refresh_token.setter
def auto_refresh_token(self, value):
allowed_methods = {'get', 'post', 'put', 'delete'}
if not isinstance(value, Iterable):
raise TypeError('Expected a list of strings among {allowed}'.format(allowed=allowed_methods))
if not all(method in allowed_methods for method in value):
raise TypeError('Unexpected method in auto_refresh_token, accepted methods are {allowed}'.format(allowed=allowed_methods))
self._auto_refresh_token = value
def __fetch_all(self, url, query=None):
'''Wrapper function to paginate GET requests
:param url: The url on which the query is executed
:param query: Existing query parameters (optional)
:return: Combined results of paginated queries
'''
results = []
# initalize query if it was called with None
if not query:
query = {}
page = 0
query['max'] = self.PAGE_SIZE
# fetch until we can
while True:
query['first'] = page*self.PAGE_SIZE
partial_results = araise_error_from_response(
self.raw_get(url, **query),
MobileGetError)
if not partial_results:
break
results.extend(partial_results)
page += 1
return results
def __fetch_page(self, url, query):
''' KAR: Wrapper function for *real* paginated GET requests
'''
results = []
# initalize query if it was called with None
if not query:
return results
results = araise_error_from_response(
self.raw_get(url, **query),
MobileGetError)
return results
def update_user_and_group(self, old_user_id, old_group_id, new_user_id, new_group_id, max_count):
"""
Assign scan to user
:return: Mobile server response (RealmRepresentation)
"""
params_path = { "oldid": old_user_id, "oldgroupid": old_group_id, "newuid": new_user_id, "groupid": new_group_id, "count": max_count }
data_raw = self.raw_post("/admin/user/modify/{oldid}/{oldgroupid}?user-id={newuid}&org-id={groupid}&count={count}".format(**params_path), data=None)
return araise_error_from_response(data_raw, MobileGetError, expected_code=200)
def get_mobile_scan(self, user_id, package_name, result_id):
"""
retrieve scan for user
:return: Mobile server response (scanResult)
"""
params_path = { "userid": user_id, "packagename": package_name, "resultid": result_id }
data_raw = self.raw_get("/attest/result/axe/{userid}/{packagename}/{resultid}".format(**params_path))
return araise_error_from_response(data_raw, MobileGetError, expected_code=200)
def set_mobile_scan_tag(self, user_id, package_name, result_id, tag_list):
"""
set tag for scan
:return: Mobile server response
AxeResultKey {
String userId;
String packageName;
String resultId;
}
"""
params_path = { "userid": user_id, "packagename": package_name, "resultid": result_id }
data_raw = self.raw_post("/attest/result/tag/{userid}/{packagename}/{resultid}".format(**params_path), data=json.dumps(tag_list))
return araise_error_from_response(data_raw, MobileGetError, expected_code=200)
def raw_get(self, *args, **kwargs):
"""
Calls connection.raw_get.
If auto_refresh is set for *get* and *access_token* is expired, it will refresh the token
and try *get* once more.
"""
if 'get' in self.auto_refresh_token:
self.validate_token()
r = self.connection.raw_get(*args, **kwargs)
return r
def raw_post(self, *args, **kwargs):
"""
Calls connection.raw_post.
If auto_refresh is set for *post* and *access_token* is expired, it will refresh the token
and try *post* once more.
"""
if 'post' in self.auto_refresh_token:
self.validate_token()
r = self.connection.raw_post(*args, **kwargs)
return r
def raw_put(self, *args, **kwargs):
"""
Calls connection.raw_put.
If auto_refresh is set for *put* and *access_token* is expired, it will refresh the token
and try *put* once more.
"""
if 'put' in self.auto_refresh_token:
self.validate_token()
r = self.connection.raw_put(*args, **kwargs)
return r
def raw_delete(self, *args, **kwargs):
"""
Calls connection.raw_delete.
If auto_refresh is set for *delete* and *access_token* is expired, it will refresh the token
and try *delete* once more.
"""
if 'delete' in self.auto_refresh_token:
self.validate_token()
r = self.connection.raw_delete(*args, **kwargs)
return r
def get_token(self):
self.mobile_openid = MobileOpenID(auth_server_url=self.auth_server_url, server_url=self.server_url, client_id=self.client_id,
realm_name=self.user_realm_name or self.realm_name, verify=self.verify,
client_secret_key=self.client_secret_key,
custom_headers=self.custom_headers)
grant_type = ["password"]
#if self.client_secret_key:
# grant_type = ["client_credentials"]
self._token = self.mobile_openid.token(self.username, self.password, grant_type=grant_type)
headers = {
'Authorization': 'Bearer ' + self.token.get('access_token'),
'Content-Type': 'application/json'
}
if self.custom_headers is not None:
# merge custom headers to main headers
headers.update(self.custom_headers)
self._connection = MobileConnectionManager(auth_base_url=self.auth_server_url,
base_url=self.server_url,
headers=headers,
timeout=180,
verify=self.verify)
def refresh_token(self):
refresh_token = self.token.get('refresh_token')
try:
self.token = self.mobile_openid.refresh_token(refresh_token)
self.last_refresh_token_timestamp = datetime.datetime.now()
except MobileGetError as e:
if e.response_code == 400 and b'Refresh token expired' in e.response_body:
self.get_token()
else:
raise
self.connection.add_param_headers('Authorization', 'Bearer ' + self.token.get('access_token'))
def validate_token(self):
if 'expires_in' in self.token and self.last_refresh_token_timestamp != 0:
expire_time = self.last_refresh_token_timestamp + datetime.timedelta(seconds = self.token['expires_in'] )
if expire_time <= datetime.datetime.now():
self.refresh_token()
| en | 0.747684 | # -*- coding: utf-8 -*- # # The MIT License (MIT) # # Copyright (C) 2017 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Unless otherwise stated in the comments, "id", in e.g. user_id, refers to the # internal Keycloak server ID, usually a uuid string :param auth_server_url: Keycloak server url :param server_url: Mobile server url :param username: admin username :param password: <PASSWORD> :param realm_name: realm name :param client_id: client id :param verify: True if want check connection SSL :param client_secret_key: client secret key :param custom_headers: dict of custom header to pass to each HTML request :param user_realm_name: The realm name of the user, if different from realm_name :param auto_refresh_token: list of methods that allows automatic token refresh. ex: ['get', 'put', 'post', 'delete'] # Get token Admin Wrapper function to paginate GET requests :param url: The url on which the query is executed :param query: Existing query parameters (optional) :return: Combined results of paginated queries # initalize query if it was called with None # fetch until we can KAR: Wrapper function for *real* paginated GET requests # initalize query if it was called with None Assign scan to user :return: Mobile server response (RealmRepresentation) retrieve scan for user :return: Mobile server response (scanResult) set tag for scan :return: Mobile server response AxeResultKey { String userId; String packageName; String resultId; } Calls connection.raw_get. If auto_refresh is set for *get* and *access_token* is expired, it will refresh the token and try *get* once more. Calls connection.raw_post. If auto_refresh is set for *post* and *access_token* is expired, it will refresh the token and try *post* once more. Calls connection.raw_put. If auto_refresh is set for *put* and *access_token* is expired, it will refresh the token and try *put* once more. Calls connection.raw_delete. If auto_refresh is set for *delete* and *access_token* is expired, it will refresh the token and try *delete* once more. #if self.client_secret_key: # grant_type = ["client_credentials"] # merge custom headers to main headers | 1.553467 | 2 |
elasticache_hashring/backend.py | justcompile/django-elasticache-hashring | 0 | 6620764 | from django.core.cache.backends.memcached import MemcachedCache
from django.utils import six
from elasticache_hashring import (
auto_discovery,
client as memcache
)
class MemcachedHashRingCache(MemcachedCache):
"An implementation of a cache binding using python-memcached and hash_ring"
def __init__(self, server, params):
if isinstance(server, six.string_types):
initial_servers = server.split(';')
else:
initial_servers = server
host, port = initial_servers[0].split(':')
servers = auto_discovery.get_cluster_info(host, port)
super(MemcachedHashRingCache, self).__init__(servers, params,
library=memcache,
value_not_found_exception=ValueError)
| from django.core.cache.backends.memcached import MemcachedCache
from django.utils import six
from elasticache_hashring import (
auto_discovery,
client as memcache
)
class MemcachedHashRingCache(MemcachedCache):
"An implementation of a cache binding using python-memcached and hash_ring"
def __init__(self, server, params):
if isinstance(server, six.string_types):
initial_servers = server.split(';')
else:
initial_servers = server
host, port = initial_servers[0].split(':')
servers = auto_discovery.get_cluster_info(host, port)
super(MemcachedHashRingCache, self).__init__(servers, params,
library=memcache,
value_not_found_exception=ValueError)
| none | 1 | 2.102383 | 2 | |
coinex_api.py | jeremyhahn/altcoin-autosell | 1 | 6620765 | import exchange_api
import hashlib
import hmac
import json
import urllib2
class CoinEx(exchange_api.Exchange):
name = 'CoinEx'
def __init__(self, api_key, api_secret):
self.api_url = 'https://coinex.pw/api/v2/'
self.api_headers = {'Content-type' : 'application/json',
'Accept' : 'application/json',
'User-Agent' : 'autocoin-autosell'}
self.api_key = api_key
self.api_secret = api_secret
def GetName(self):
return 'CoinEx'
def _Request(self, method, headers=None, post_data=None):
if headers is None:
headers = {}
headers.update(self.api_headers.items())
try:
request = urllib2.Request(self.api_url + method, post_data, headers)
response = urllib2.urlopen(request)
try:
response_json = json.loads(response.read())
if not method in response_json:
raise exchange_api.ExchangeException('Root not in %s.' % method)
return response_json[method]
finally:
response.close()
except (urllib2.URLError, urllib2.HTTPError, ValueError) as e:
raise exchange_api.ExchangeException(e)
def _PrivateRequest(self, method, post_data=None):
hmac_data = '' if not post_data else post_data
digest = hmac.new(self.api_secret, hmac_data, hashlib.sha512).hexdigest()
headers = {'API-Key' : self.api_key,
'API-Sign': digest}
return self._Request(method, headers, post_data)
def GetCurrencies(self):
currencies = {}
try:
for currency in self._Request('currencies'):
currencies[currency['name']] = currency['id']
except (TypeError, KeyError) as e:
raise exchange_api.ExchangeException(e)
return currencies
def GetBalances(self):
balances = {}
try:
for balance in self._PrivateRequest('balances'):
balances[balance['currency_id']] = float(balance['amount']) / pow(10, 8)
except (TypeError, KeyError) as e:
raise exchange_api.ExchangeException(e)
return balances
def GetMarkets(self):
try:
return [exchange_api.Market(trade_pair['currency_id'], trade_pair['market_id'],
trade_pair['id']) for
trade_pair in self._Request('trade_pairs')]
except (TypeError, KeyError) as e:
raise exchange_api.ExchangeException(e)
return markets
def CreateOrder(self, market_id, amount, bid=True, price=0):
order = {'trade_pair_id' : market_id,
'amount' : int(amount * pow(10, 8)),
'bid' : bid,
'rate' : max(1, int(price * pow(10, 8)))}
post_data = json.dumps({'order' : order})
try:
return self._PrivateRequest('orders', post_data)[0]['id']
except (TypeError, KeyError, IndexError) as e:
raise exchange_api.ExchangeException(e)
| import exchange_api
import hashlib
import hmac
import json
import urllib2
class CoinEx(exchange_api.Exchange):
name = 'CoinEx'
def __init__(self, api_key, api_secret):
self.api_url = 'https://coinex.pw/api/v2/'
self.api_headers = {'Content-type' : 'application/json',
'Accept' : 'application/json',
'User-Agent' : 'autocoin-autosell'}
self.api_key = api_key
self.api_secret = api_secret
def GetName(self):
return 'CoinEx'
def _Request(self, method, headers=None, post_data=None):
if headers is None:
headers = {}
headers.update(self.api_headers.items())
try:
request = urllib2.Request(self.api_url + method, post_data, headers)
response = urllib2.urlopen(request)
try:
response_json = json.loads(response.read())
if not method in response_json:
raise exchange_api.ExchangeException('Root not in %s.' % method)
return response_json[method]
finally:
response.close()
except (urllib2.URLError, urllib2.HTTPError, ValueError) as e:
raise exchange_api.ExchangeException(e)
def _PrivateRequest(self, method, post_data=None):
hmac_data = '' if not post_data else post_data
digest = hmac.new(self.api_secret, hmac_data, hashlib.sha512).hexdigest()
headers = {'API-Key' : self.api_key,
'API-Sign': digest}
return self._Request(method, headers, post_data)
def GetCurrencies(self):
currencies = {}
try:
for currency in self._Request('currencies'):
currencies[currency['name']] = currency['id']
except (TypeError, KeyError) as e:
raise exchange_api.ExchangeException(e)
return currencies
def GetBalances(self):
balances = {}
try:
for balance in self._PrivateRequest('balances'):
balances[balance['currency_id']] = float(balance['amount']) / pow(10, 8)
except (TypeError, KeyError) as e:
raise exchange_api.ExchangeException(e)
return balances
def GetMarkets(self):
try:
return [exchange_api.Market(trade_pair['currency_id'], trade_pair['market_id'],
trade_pair['id']) for
trade_pair in self._Request('trade_pairs')]
except (TypeError, KeyError) as e:
raise exchange_api.ExchangeException(e)
return markets
def CreateOrder(self, market_id, amount, bid=True, price=0):
order = {'trade_pair_id' : market_id,
'amount' : int(amount * pow(10, 8)),
'bid' : bid,
'rate' : max(1, int(price * pow(10, 8)))}
post_data = json.dumps({'order' : order})
try:
return self._PrivateRequest('orders', post_data)[0]['id']
except (TypeError, KeyError, IndexError) as e:
raise exchange_api.ExchangeException(e)
| none | 1 | 2.870986 | 3 | |
app/app.py | adityajn105/FaceGAN-Generating-Random-Faces | 8 | 6620766 | <reponame>adityajn105/FaceGAN-Generating-Random-Faces
from flask import Flask, render_template, request, send_from_directory
from build_generator import Generator
import os
import matplotlib.pyplot as plt
import numpy as np
import base64
app = Flask(__name__, template_folder=os.curdir)
generator=None
@app.route('/')
def home():
noise = np.random.normal(np.random.normal(0,1,(1,100)))
img = (generator.predict(noise)[0]/2)+0.5
img = np.clip(img,0,1)
plt.imsave('temp/temp.png',img)
encoded_string=''
with open("temp/temp.png", "rb") as image_file:
encoded_string += str(base64.b64encode(image_file.read()))[2:-1]
return render_template('facegen.html',data = {'img':encoded_string})
@app.route('/<path:path>')
def getStaticFiles(path):
print(path)
return send_from_directory(os.curdir, path)
if __name__ == "__main__":
generator = Generator('../saved_weights/generator_weights.h5')
print('Generator Built!! You are ready with app.')
app.run(host = '0.0.0.0',port = int(5000)) | from flask import Flask, render_template, request, send_from_directory
from build_generator import Generator
import os
import matplotlib.pyplot as plt
import numpy as np
import base64
app = Flask(__name__, template_folder=os.curdir)
generator=None
@app.route('/')
def home():
noise = np.random.normal(np.random.normal(0,1,(1,100)))
img = (generator.predict(noise)[0]/2)+0.5
img = np.clip(img,0,1)
plt.imsave('temp/temp.png',img)
encoded_string=''
with open("temp/temp.png", "rb") as image_file:
encoded_string += str(base64.b64encode(image_file.read()))[2:-1]
return render_template('facegen.html',data = {'img':encoded_string})
@app.route('/<path:path>')
def getStaticFiles(path):
print(path)
return send_from_directory(os.curdir, path)
if __name__ == "__main__":
generator = Generator('../saved_weights/generator_weights.h5')
print('Generator Built!! You are ready with app.')
app.run(host = '0.0.0.0',port = int(5000)) | none | 1 | 2.596429 | 3 | |
src/bot/bot_utils.py | RazCrimson/Nucleo | 1 | 6620767 | import asyncio
from typing import Union, List
import discord
from discord.ext.commands import Context
def generate_embed(title: str, author: discord.Member, *, description: str = '', color: int = 0) -> discord.Embed:
return discord.Embed(
title=title,
color=color,
description=description
).set_footer(text=author.display_name, icon_url=author.avatar_url)
async def emoji_selection_detector(ctx: Context, emoji_list: List[Union[discord.Emoji, discord.PartialEmoji, str]],
embed: discord.Embed = None, wait_for: int = 30, *, message_content: str = None,
show_reject: bool = True) -> Union[None, discord.Emoji, discord.PartialEmoji, str]:
def reaction_check(reaction, user_obj):
if ctx.author.id == user_obj.id and reaction.emoji in [*emoji_list, '❌']:
return True
return False
m = await ctx.send(content=message_content, embed=embed)
[asyncio.create_task(m.add_reaction(emote)) for emote in emoji_list]
if show_reject:
asyncio.create_task(m.add_reaction('❌'))
try:
reaction_used, user = await ctx.bot.wait_for('reaction_add', check=reaction_check, timeout=wait_for)
await m.delete()
if show_reject and reaction_used.emoji == '❌':
return None
if reaction_used.emoji in emoji_list:
return reaction_used.emoji
except asyncio.TimeoutError:
await m.delete()
return None
| import asyncio
from typing import Union, List
import discord
from discord.ext.commands import Context
def generate_embed(title: str, author: discord.Member, *, description: str = '', color: int = 0) -> discord.Embed:
return discord.Embed(
title=title,
color=color,
description=description
).set_footer(text=author.display_name, icon_url=author.avatar_url)
async def emoji_selection_detector(ctx: Context, emoji_list: List[Union[discord.Emoji, discord.PartialEmoji, str]],
embed: discord.Embed = None, wait_for: int = 30, *, message_content: str = None,
show_reject: bool = True) -> Union[None, discord.Emoji, discord.PartialEmoji, str]:
def reaction_check(reaction, user_obj):
if ctx.author.id == user_obj.id and reaction.emoji in [*emoji_list, '❌']:
return True
return False
m = await ctx.send(content=message_content, embed=embed)
[asyncio.create_task(m.add_reaction(emote)) for emote in emoji_list]
if show_reject:
asyncio.create_task(m.add_reaction('❌'))
try:
reaction_used, user = await ctx.bot.wait_for('reaction_add', check=reaction_check, timeout=wait_for)
await m.delete()
if show_reject and reaction_used.emoji == '❌':
return None
if reaction_used.emoji in emoji_list:
return reaction_used.emoji
except asyncio.TimeoutError:
await m.delete()
return None
| none | 1 | 2.657268 | 3 | |
colt_steele_python_bootcamp_udemy/excercises/bouncer.py | phiratio/lpthw | 1 | 6620768 | <reponame>phiratio/lpthw
# ask for age
# 18-21 wristband
# 21+ normal entry
# too young
import sys
try:
age = int(input("How old are ya lad?"))
except:
print("Valid numbers please!")
sys.exit()
print(age)
if age >= 21:
print('cul')
elif age >= 18:
print('cul')
else:
print('get lost lad')
| # ask for age
# 18-21 wristband
# 21+ normal entry
# too young
import sys
try:
age = int(input("How old are ya lad?"))
except:
print("Valid numbers please!")
sys.exit()
print(age)
if age >= 21:
print('cul')
elif age >= 18:
print('cul')
else:
print('get lost lad') | en | 0.779105 | # ask for age # 18-21 wristband # 21+ normal entry # too young | 3.764333 | 4 |
lang/py/cookbook/v2/source/cb2_20_15_sol_1.py | ch1huizong/learning | 0 | 6620769 | from opcode import opmap, HAVE_ARGUMENT, EXTENDED_ARG
globals().update(opmap)
def _insert_constant(value, i, code, constants):
''' insert LOAD_CONST for value at code[i:i+3]. Reuse an existing
constant if values coincide, otherwise append new value to the
list of constants; return index of the value in constants. '''
for pos, v in enumerate(constants):
if v is value: break
else:
pos = len(constants)
constants.append(value)
code[i] = LOAD_CONST
code[i+1] = pos & 0xFF
code[i+2] = pos >> 8
return pos
def _arg_at(i, code):
''' return argument number of the opcode at code[i] '''
return code[i+1] | (code[i+2] << 8)
| from opcode import opmap, HAVE_ARGUMENT, EXTENDED_ARG
globals().update(opmap)
def _insert_constant(value, i, code, constants):
''' insert LOAD_CONST for value at code[i:i+3]. Reuse an existing
constant if values coincide, otherwise append new value to the
list of constants; return index of the value in constants. '''
for pos, v in enumerate(constants):
if v is value: break
else:
pos = len(constants)
constants.append(value)
code[i] = LOAD_CONST
code[i+1] = pos & 0xFF
code[i+2] = pos >> 8
return pos
def _arg_at(i, code):
''' return argument number of the opcode at code[i] '''
return code[i+1] | (code[i+2] << 8)
| en | 0.477471 | insert LOAD_CONST for value at code[i:i+3]. Reuse an existing constant if values coincide, otherwise append new value to the list of constants; return index of the value in constants. return argument number of the opcode at code[i] | 2.927222 | 3 |
mods/mcpython/Inventorys/__init__.py | uuk0/mcpython-a-minecraft-clone-in-python | 2 | 6620770 | from . import Inventory
from .player_hotbar import *
from .player_rows import *
from .player_armor import *
from .player_crafting import *
from .block_craftingtable import *
from .block_chest import *
from .block_furnes import *
| from . import Inventory
from .player_hotbar import *
from .player_rows import *
from .player_armor import *
from .player_crafting import *
from .block_craftingtable import *
from .block_chest import *
from .block_furnes import *
| none | 1 | 1.153852 | 1 | |
Problems/P0006 - Soma quadrados.py | clasenback/EulerProject | 0 | 6620771 | <filename>Problems/P0006 - Soma quadrados.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 15:44:36 2021
@author: User
SUM SQUARE DIFFERENCE
The sum of the squares of the first ten natural numbers is,
1**2 + 2**2 + ... + 10**2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10) ** 2 = 3025
Hence the difference between the sum of the squares of the first ten natural
numbers and the square of the sum is
3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one hundred
natural numbers and the square of the sum.
"""
soma = 0
somaQuadrados = 0
for i in range(100+1):
soma += i
somaQuadrados += i**2
print(soma**2 - somaQuadrados)
| <filename>Problems/P0006 - Soma quadrados.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 15:44:36 2021
@author: User
SUM SQUARE DIFFERENCE
The sum of the squares of the first ten natural numbers is,
1**2 + 2**2 + ... + 10**2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10) ** 2 = 3025
Hence the difference between the sum of the squares of the first ten natural
numbers and the square of the sum is
3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one hundred
natural numbers and the square of the sum.
"""
soma = 0
somaQuadrados = 0
for i in range(100+1):
soma += i
somaQuadrados += i**2
print(soma**2 - somaQuadrados)
| en | 0.812057 | # -*- coding: utf-8 -*- Created on Tue Mar 2 15:44:36 2021
@author: User
SUM SQUARE DIFFERENCE
The sum of the squares of the first ten natural numbers is,
1**2 + 2**2 + ... + 10**2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10) ** 2 = 3025
Hence the difference between the sum of the squares of the first ten natural
numbers and the square of the sum is
3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one hundred
natural numbers and the square of the sum. | 3.859333 | 4 |
app/views/users_page.py | ngocjr7/scoss_webapp | 3 | 6620772 | <reponame>ngocjr7/scoss_webapp
from models.models import User
import os
import sys
from werkzeug.utils import secure_filename
from flask import Flask, render_template, url_for, request, redirect, session, jsonify, Blueprint, flash
from scoss import smoss
import requests
from sctokenizer import Source
from scoss import Scoss
from scoss.metrics import all_metrics
from models.models import db, MessageStatus
from werkzeug.security import generate_password_hash, check_password_hash
from jinja2 import Environment
from config import URL
import config
user = Blueprint('users_page', __name__)
@user.route('/admin', methods=['GET', 'POST'])
def admin():
if 'logged_in' in session:
if session['logged_in'] == True:
if session['role'] == 0:
if request.method == 'GET':
url = URL + '/api/users'
headers = {'Authorization': "Bearer {}".format(session['token'])}
data = requests.get(url=url, headers=headers)
# print(data.json())
if data.status_code != 200 and 'msg' in data.json():
session.clear()
return redirect(url_for('login_page.login_page'))
if 'error' in data.json().keys():
flash(data.json()['error'], MessageStatus.error)
return render_template('admin.html', data=data.json()['users'])
else:
username = request.form['username']
email = request.form['email']
password = '<PASSWORD>'
role = 1
data_form = {'username': username, 'email': email, 'role': role, 'password': password}
url = URL + '/api/users/add'
headers = {'Authorization': "Bearer {}".format(session['token'])}
req = requests.post(url=url,json=data_form, headers=headers)
if req.status_code != 200 and 'msg' in req.json():
session.clear()
return redirect(url_for('login_page.login_page'))
if 'error' in req.json().keys():
flash(req.json()['error'], MessageStatus.error)
return redirect(url_for('users_page.admin'))
@user.route('/admin/redis', methods=['GET'])
def admin_rq():
return redirect('/rq')
@user.route('/admin/mongo', methods=['GET'])
def admin_mg():
if not config.server_name:
config.server_name = request.host.split(":")[0]
url = 'http://{}:{}'.format(config.server_name, config.MONGO_PORT)
return redirect(url)
@user.route('/users/<user_id>/update', methods=['GET', 'POST'])
def update_password(user_id):
if 'logged_in' in session:
if session['logged_in'] == True:
if request.method == 'GET':
data = User.objects.get(user_id=user_id)
return render_template('profile.html', data=data.to_mongo())
if request.method == 'POST':
email = request.form['email']
old_pass = request.form['<PASSWORD>']
new_pass = request.form['<PASSWORD>']
data_form = {
'email': email,
'old_password': <PASSWORD>_pass,
'new_password': <PASSWORD>
}
base_url = request.referrer
url = URL + '/api/users/{}'.format(user_id)
headers = {'Authorization': "Bearer {}".format(session['token'])}
req = requests.put(url=url, json=data_form, headers=headers)
if req.status_code != 200 and 'msg' in req.json():
session.clear()
return redirect(url_for('login_page.login_page'))
if 'error' in req.json().keys():
flash(req.json()['error'], MessageStatus.error)
else:
flash(req.json()['info'], MessageStatus.success)
return redirect(base_url)
else:
return redirect(url_for('login_page.login_page'))
| from models.models import User
import os
import sys
from werkzeug.utils import secure_filename
from flask import Flask, render_template, url_for, request, redirect, session, jsonify, Blueprint, flash
from scoss import smoss
import requests
from sctokenizer import Source
from scoss import Scoss
from scoss.metrics import all_metrics
from models.models import db, MessageStatus
from werkzeug.security import generate_password_hash, check_password_hash
from jinja2 import Environment
from config import URL
import config
user = Blueprint('users_page', __name__)
@user.route('/admin', methods=['GET', 'POST'])
def admin():
if 'logged_in' in session:
if session['logged_in'] == True:
if session['role'] == 0:
if request.method == 'GET':
url = URL + '/api/users'
headers = {'Authorization': "Bearer {}".format(session['token'])}
data = requests.get(url=url, headers=headers)
# print(data.json())
if data.status_code != 200 and 'msg' in data.json():
session.clear()
return redirect(url_for('login_page.login_page'))
if 'error' in data.json().keys():
flash(data.json()['error'], MessageStatus.error)
return render_template('admin.html', data=data.json()['users'])
else:
username = request.form['username']
email = request.form['email']
password = '<PASSWORD>'
role = 1
data_form = {'username': username, 'email': email, 'role': role, 'password': password}
url = URL + '/api/users/add'
headers = {'Authorization': "Bearer {}".format(session['token'])}
req = requests.post(url=url,json=data_form, headers=headers)
if req.status_code != 200 and 'msg' in req.json():
session.clear()
return redirect(url_for('login_page.login_page'))
if 'error' in req.json().keys():
flash(req.json()['error'], MessageStatus.error)
return redirect(url_for('users_page.admin'))
@user.route('/admin/redis', methods=['GET'])
def admin_rq():
return redirect('/rq')
@user.route('/admin/mongo', methods=['GET'])
def admin_mg():
if not config.server_name:
config.server_name = request.host.split(":")[0]
url = 'http://{}:{}'.format(config.server_name, config.MONGO_PORT)
return redirect(url)
@user.route('/users/<user_id>/update', methods=['GET', 'POST'])
def update_password(user_id):
if 'logged_in' in session:
if session['logged_in'] == True:
if request.method == 'GET':
data = User.objects.get(user_id=user_id)
return render_template('profile.html', data=data.to_mongo())
if request.method == 'POST':
email = request.form['email']
old_pass = request.form['<PASSWORD>']
new_pass = request.form['<PASSWORD>']
data_form = {
'email': email,
'old_password': <PASSWORD>_pass,
'new_password': <PASSWORD>
}
base_url = request.referrer
url = URL + '/api/users/{}'.format(user_id)
headers = {'Authorization': "Bearer {}".format(session['token'])}
req = requests.put(url=url, json=data_form, headers=headers)
if req.status_code != 200 and 'msg' in req.json():
session.clear()
return redirect(url_for('login_page.login_page'))
if 'error' in req.json().keys():
flash(req.json()['error'], MessageStatus.error)
else:
flash(req.json()['info'], MessageStatus.success)
return redirect(base_url)
else:
return redirect(url_for('login_page.login_page')) | sv | 0.114855 | # print(data.json()) | 2.080947 | 2 |
todayPP.py | nag8/todayPowerPoint | 0 | 6620773 | # coding: UTF-8
from pptx import Presentation
from pptx.util import Pt
from pptx.enum.text import PP_ALIGN
from bs4 import BeautifulSoup
import configparser
import requests
import feedparser
import datetime
import csv
import subprocess
# メイン処理
def main():
print('処理開始!')
print('自動でファイルと、メモ帳が開きます…')
# 設定ファイル取得
iniFile = getIniFile()
# PowerPointファイルを生成
createPP(iniFile)
print('処理終了!')
# 設定ファイル取得
def getIniFile():
iniFile = configparser.ConfigParser()
iniFile.read('./config.ini', 'UTF-8')
return iniFile
# パワーポイントを生成
def createPP(iniFile):
prs = Presentation(iniFile.get('settings', 'IN'))
# htmlから情報を取得
inputTable = getInputTable(iniFile)
# 表を変更
editPPTable(iniFile, prs.slides[0].shapes[0].table, prs.slides[1].shapes[1].table, inputTable)
# ファイルを保存
prs.save(iniFile.get('settings', 'OUT') + createFileName() + '.pptx')
subprocess.call("start " + iniFile.get('settings', 'OUT') + createFileName() + '.pptx',shell=True)
# データ元の情報を取得
def getInputTable(iniFile):
# 保存したhtmlを取得
with open(iniFile.get('settings', 'HTML'), encoding="shift_JIS", errors='ignore') as f:
html = f.read()
#要素を抽出
soup = BeautifulSoup(html, 'lxml')
# テーブルを指定
return soup.findAll("table")[0]
# セルのフォントサイズを変更して、中央揃えにする
def changeLayout(cell, size):
for paragraph in cell.text_frame.paragraphs:
for run in paragraph.runs:
run.font.size = Pt(size)
# 中央揃えにもする
paragraph.alignment = PP_ALIGN.CENTER
# パワーポイントのテーブルを修正
def editPPTable(iniFile, table1, table2, inputTable):
# 要素を取得
tdList = inputTable.findAll("td", attrs = {"class": "p11pa2"})
# 設定に必要なcsvを取得
directory = getDirectory(iniFile.get('settings', 'CSV'))
for td in tdList:
# 行番号がある場合
if directory[td.text[:3]][1] != '':
# 行番号をcsvから取得
rowNum = int(directory[td.text[:3]][1])
# 部屋の名前をログ出力
print('--------------------- -------------')
print('部屋→→→→→→→→→→ ' + directory[td.text[:3]][0])
# 要素を取得
contents = td.parent.findAll("td", attrs = {"class": "p11"})
for i,content in enumerate(contents):
# content.replace("®","")
print(1)
# テーブル番号、列番号を設定
if directory[td.text[:3]][2] == '1':
changeTable = table1
columnNum = 2 + i
pageId = 1
else:
changeTable = table2
columnNum = 1 + i
pageId = 2
# テキストを設定
changeCell = changeTable.cell(rowNum, columnNum)
changeCell.text = getStr(content, pageId)
# エラーになるのでこれだけ除外
if "®" not in changeCell.text:
print(changeCell.text)
# レイアウトを修正
changeLayout(changeCell, 10)
# いつかやる
# table2.cell(1, 3).merge(table2.cell(2, 3))
# 文字を取得
def getStr(content, pageId):
strList = content.get_text(';').split(';')
# 未入金の場合があるので、それを削除
if strList[0] == '未':
strList.pop(0)
nameStr = removeFirstName(strList[0])
if len(strList) >= 2:
if pageId == 1:
return strList[1] + '\n(' + strList[0] + ' 様)'
else:
return strList[0] + ' 様'
elif len(strList) == 1:
return strList[0]
else:
return 'error'
# CSVを取得
def getDirectory(csvPath):
directory = {}
with open(csvPath, 'r') as f:
reader = csv.reader(f)
for row in reader:
directory[row[0]] = [row[1],row[2],row[3]]
return directory
def removeFirstName(fullName):
# return fullName.translate(fullNameTable)
return fullName
# ファイルネームを生成
def createFileName():
# 曜日
yobi = ["月","火","水","木","金","土","日"]
# 明日を取得
tomorrow = datetime.datetime.now() + datetime.timedelta(days = 1)
# 整形して返却
return '{}月{}日({})'.format(tomorrow.month, tomorrow.day, yobi[tomorrow.weekday()])
if __name__ == '__main__':
main()
| # coding: UTF-8
from pptx import Presentation
from pptx.util import Pt
from pptx.enum.text import PP_ALIGN
from bs4 import BeautifulSoup
import configparser
import requests
import feedparser
import datetime
import csv
import subprocess
# メイン処理
def main():
print('処理開始!')
print('自動でファイルと、メモ帳が開きます…')
# 設定ファイル取得
iniFile = getIniFile()
# PowerPointファイルを生成
createPP(iniFile)
print('処理終了!')
# 設定ファイル取得
def getIniFile():
iniFile = configparser.ConfigParser()
iniFile.read('./config.ini', 'UTF-8')
return iniFile
# パワーポイントを生成
def createPP(iniFile):
prs = Presentation(iniFile.get('settings', 'IN'))
# htmlから情報を取得
inputTable = getInputTable(iniFile)
# 表を変更
editPPTable(iniFile, prs.slides[0].shapes[0].table, prs.slides[1].shapes[1].table, inputTable)
# ファイルを保存
prs.save(iniFile.get('settings', 'OUT') + createFileName() + '.pptx')
subprocess.call("start " + iniFile.get('settings', 'OUT') + createFileName() + '.pptx',shell=True)
# データ元の情報を取得
def getInputTable(iniFile):
# 保存したhtmlを取得
with open(iniFile.get('settings', 'HTML'), encoding="shift_JIS", errors='ignore') as f:
html = f.read()
#要素を抽出
soup = BeautifulSoup(html, 'lxml')
# テーブルを指定
return soup.findAll("table")[0]
# セルのフォントサイズを変更して、中央揃えにする
def changeLayout(cell, size):
for paragraph in cell.text_frame.paragraphs:
for run in paragraph.runs:
run.font.size = Pt(size)
# 中央揃えにもする
paragraph.alignment = PP_ALIGN.CENTER
# パワーポイントのテーブルを修正
def editPPTable(iniFile, table1, table2, inputTable):
# 要素を取得
tdList = inputTable.findAll("td", attrs = {"class": "p11pa2"})
# 設定に必要なcsvを取得
directory = getDirectory(iniFile.get('settings', 'CSV'))
for td in tdList:
# 行番号がある場合
if directory[td.text[:3]][1] != '':
# 行番号をcsvから取得
rowNum = int(directory[td.text[:3]][1])
# 部屋の名前をログ出力
print('--------------------- -------------')
print('部屋→→→→→→→→→→ ' + directory[td.text[:3]][0])
# 要素を取得
contents = td.parent.findAll("td", attrs = {"class": "p11"})
for i,content in enumerate(contents):
# content.replace("®","")
print(1)
# テーブル番号、列番号を設定
if directory[td.text[:3]][2] == '1':
changeTable = table1
columnNum = 2 + i
pageId = 1
else:
changeTable = table2
columnNum = 1 + i
pageId = 2
# テキストを設定
changeCell = changeTable.cell(rowNum, columnNum)
changeCell.text = getStr(content, pageId)
# エラーになるのでこれだけ除外
if "®" not in changeCell.text:
print(changeCell.text)
# レイアウトを修正
changeLayout(changeCell, 10)
# いつかやる
# table2.cell(1, 3).merge(table2.cell(2, 3))
# 文字を取得
def getStr(content, pageId):
strList = content.get_text(';').split(';')
# 未入金の場合があるので、それを削除
if strList[0] == '未':
strList.pop(0)
nameStr = removeFirstName(strList[0])
if len(strList) >= 2:
if pageId == 1:
return strList[1] + '\n(' + strList[0] + ' 様)'
else:
return strList[0] + ' 様'
elif len(strList) == 1:
return strList[0]
else:
return 'error'
# CSVを取得
def getDirectory(csvPath):
directory = {}
with open(csvPath, 'r') as f:
reader = csv.reader(f)
for row in reader:
directory[row[0]] = [row[1],row[2],row[3]]
return directory
def removeFirstName(fullName):
# return fullName.translate(fullNameTable)
return fullName
# ファイルネームを生成
def createFileName():
# 曜日
yobi = ["月","火","水","木","金","土","日"]
# 明日を取得
tomorrow = datetime.datetime.now() + datetime.timedelta(days = 1)
# 整形して返却
return '{}月{}日({})'.format(tomorrow.month, tomorrow.day, yobi[tomorrow.weekday()])
if __name__ == '__main__':
main()
| ja | 0.999667 | # coding: UTF-8 # メイン処理 # 設定ファイル取得 # PowerPointファイルを生成 # 設定ファイル取得 # パワーポイントを生成 # htmlから情報を取得 # 表を変更 # ファイルを保存 # データ元の情報を取得 # 保存したhtmlを取得 #要素を抽出 # テーブルを指定 # セルのフォントサイズを変更して、中央揃えにする # 中央揃えにもする # パワーポイントのテーブルを修正 # 要素を取得 # 設定に必要なcsvを取得 # 行番号がある場合 # 行番号をcsvから取得 # 部屋の名前をログ出力 # 要素を取得 # content.replace("®","") # テーブル番号、列番号を設定 # テキストを設定 # エラーになるのでこれだけ除外 # レイアウトを修正 # いつかやる # table2.cell(1, 3).merge(table2.cell(2, 3)) # 文字を取得 # 未入金の場合があるので、それを削除 # CSVを取得 # return fullName.translate(fullNameTable) # ファイルネームを生成 # 曜日 # 明日を取得 # 整形して返却 | 2.690106 | 3 |
tests/unit/credentials/test_injection.py | datavaluepeople/tentaclio | 12 | 6620774 | import pytest
from tentaclio import URL
from tentaclio.credentials import injection
@pytest.mark.parametrize(
["with_creds", "raw", "expected"],
[
[
"scheme://user:password@octo.energy/path",
"scheme://octo.energy/path",
"scheme://user:password@octo.energy/path",
],
[
"scheme://user:password@octo.energy/path/",
"scheme://octo.energy/path/with/more/elements",
"scheme://user:password@octo.energy/path/with/more/elements",
],
[
"scheme://user:password@octo.energy/path/",
"scheme://octo.energy/path/with/more/elements",
"scheme://user:password@octo.energy/path/with/more/elements",
],
[
"scheme://user:password@octo.energy/database",
"scheme://octo.energy/database::table",
"scheme://user:password@octo.energy/database::table",
],
[
"scheme://user:password@octo.energy/database",
"scheme://hostname/database", # hostname wildcard
"scheme://user:password@octo.energy/database",
],
[
"scheme://user:password@octo.energy:5544/database",
"scheme://octo.energy/database",
"scheme://user:password@octo.energy:5544/database",
],
[
"scheme://user:password@octo.energy/database",
"scheme://octo.energy/database2",
"scheme://octo.energy/database2", # the path is similar but not identical
],
[
"scheme://user:password@octo.energy:5544/database?key=value",
"scheme://octo.energy/database",
"scheme://user:password@octo.energy:5544/database?key=value",
],
[
"scheme://user:password@octo.energy:5544/database?key=value_1",
"scheme://octo.energy/database?key=value_2",
"scheme://user:password@octo.energy:5544/database?key=value_2",
],
[
"scheme://user:password@octo.energy/", # trailing slash
"scheme://octo.energy/file",
"scheme://user:password@octo.energy/file",
],
[
"scheme://user:password@octo.energy/", # trailing slash
"scheme://octo.energy/path/",
"scheme://user:password@octo.energy/path/",
],
[
"scheme://user:password@octo.energy/path", # specifying user
"scheme://user:@octo.energy/path",
"scheme://user:password@octo.energy/path",
],
[
"scheme://octo.energy/path", # specified user not found
"scheme://user:@octo.energy/path",
"scheme://user:@octo.energy/path",
],
],
)
def test_simple_authenticate(with_creds, raw, expected):
injector = injection.CredentialsInjector()
with_creds_url = URL(with_creds)
raw_url = URL(raw)
expected_url = URL(expected)
injector.register_credentials(with_creds_url)
result = injector.inject(raw_url)
assert expected_url == result
@pytest.mark.parametrize(
"path_1, path_2, expected",
[
("", "", 0.5),
("path", None, 0.5),
("path_elem_1", "path_elem_1/path_elem_2", 1),
("path_elem_1/path_elem_2", "path_elem_1/path_elem_2", 2),
],
)
def test_similarites(path_1, path_2, expected):
result = injection._similarity(path_1, path_2)
assert result == expected
def test_hostname_is_wildcard():
matches = injection._filter_by_hostname(
URL("scheme://hostname/"), [URL("scheme://google.com/path")]
)
assert matches == [URL("scheme://google.com/path")]
def test_filter_by_hostname():
matches = injection._filter_by_hostname(
URL("scheme://google.com/"),
[URL("scheme://google.com/path"), URL("scheme://yahoo.com/path")],
)
assert matches == [URL("scheme://google.com/path")]
| import pytest
from tentaclio import URL
from tentaclio.credentials import injection
@pytest.mark.parametrize(
["with_creds", "raw", "expected"],
[
[
"scheme://user:password@octo.energy/path",
"scheme://octo.energy/path",
"scheme://user:password@octo.energy/path",
],
[
"scheme://user:password@octo.energy/path/",
"scheme://octo.energy/path/with/more/elements",
"scheme://user:password@octo.energy/path/with/more/elements",
],
[
"scheme://user:password@octo.energy/path/",
"scheme://octo.energy/path/with/more/elements",
"scheme://user:password@octo.energy/path/with/more/elements",
],
[
"scheme://user:password@octo.energy/database",
"scheme://octo.energy/database::table",
"scheme://user:password@octo.energy/database::table",
],
[
"scheme://user:password@octo.energy/database",
"scheme://hostname/database", # hostname wildcard
"scheme://user:password@octo.energy/database",
],
[
"scheme://user:password@octo.energy:5544/database",
"scheme://octo.energy/database",
"scheme://user:password@octo.energy:5544/database",
],
[
"scheme://user:password@octo.energy/database",
"scheme://octo.energy/database2",
"scheme://octo.energy/database2", # the path is similar but not identical
],
[
"scheme://user:password@octo.energy:5544/database?key=value",
"scheme://octo.energy/database",
"scheme://user:password@octo.energy:5544/database?key=value",
],
[
"scheme://user:password@octo.energy:5544/database?key=value_1",
"scheme://octo.energy/database?key=value_2",
"scheme://user:password@octo.energy:5544/database?key=value_2",
],
[
"scheme://user:password@octo.energy/", # trailing slash
"scheme://octo.energy/file",
"scheme://user:password@octo.energy/file",
],
[
"scheme://user:password@octo.energy/", # trailing slash
"scheme://octo.energy/path/",
"scheme://user:password@octo.energy/path/",
],
[
"scheme://user:password@octo.energy/path", # specifying user
"scheme://user:@octo.energy/path",
"scheme://user:password@octo.energy/path",
],
[
"scheme://octo.energy/path", # specified user not found
"scheme://user:@octo.energy/path",
"scheme://user:@octo.energy/path",
],
],
)
def test_simple_authenticate(with_creds, raw, expected):
injector = injection.CredentialsInjector()
with_creds_url = URL(with_creds)
raw_url = URL(raw)
expected_url = URL(expected)
injector.register_credentials(with_creds_url)
result = injector.inject(raw_url)
assert expected_url == result
@pytest.mark.parametrize(
"path_1, path_2, expected",
[
("", "", 0.5),
("path", None, 0.5),
("path_elem_1", "path_elem_1/path_elem_2", 1),
("path_elem_1/path_elem_2", "path_elem_1/path_elem_2", 2),
],
)
def test_similarites(path_1, path_2, expected):
result = injection._similarity(path_1, path_2)
assert result == expected
def test_hostname_is_wildcard():
matches = injection._filter_by_hostname(
URL("scheme://hostname/"), [URL("scheme://google.com/path")]
)
assert matches == [URL("scheme://google.com/path")]
def test_filter_by_hostname():
matches = injection._filter_by_hostname(
URL("scheme://google.com/"),
[URL("scheme://google.com/path"), URL("scheme://yahoo.com/path")],
)
assert matches == [URL("scheme://google.com/path")]
| en | 0.722338 | # hostname wildcard # the path is similar but not identical # trailing slash # trailing slash # specifying user # specified user not found | 2.169857 | 2 |
xy_cli/commands/say.py | exiahuang/xy-cli | 0 | 6620775 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, argparse
from ..libs.tts import g_tts
import tempfile
command_name = os.path.basename(__file__).split('.', 1)[0].replace("_", ":")
def main(args):
if args.sentence:
g_tts(args.sentence, args.tmpdir, args.lang, args.engine)
def register(parser, subparsers, **kwargs):
def handler(args):
if args.sentence is None:
print(parser.parse_args([command_name, '--help']))
return
if args.sentence:
main(args)
subcommand = subparsers.add_parser(command_name,
help='say sentence.')
subcommand.add_argument('-s',
'--sentence',
type=str,
default=None,
help='sentence',
required=False)
subcommand.add_argument('-e',
'--engine',
type=str,
default='mpg123',
help='mp3 engine, default mpg123',
required=False)
subcommand.add_argument('-l',
'--lang',
type=str,
default='en',
help='IETF language tag, example: en, ja, zh-CN, zh-TW',
required=False)
subcommand.add_argument('-t',
'--tmpdir',
type=str,
default=os.path.join(tempfile.gettempdir(), 'tts'),
help='temp directory',
required=False)
subcommand.set_defaults(handler=handler)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, argparse
from ..libs.tts import g_tts
import tempfile
command_name = os.path.basename(__file__).split('.', 1)[0].replace("_", ":")
def main(args):
if args.sentence:
g_tts(args.sentence, args.tmpdir, args.lang, args.engine)
def register(parser, subparsers, **kwargs):
def handler(args):
if args.sentence is None:
print(parser.parse_args([command_name, '--help']))
return
if args.sentence:
main(args)
subcommand = subparsers.add_parser(command_name,
help='say sentence.')
subcommand.add_argument('-s',
'--sentence',
type=str,
default=None,
help='sentence',
required=False)
subcommand.add_argument('-e',
'--engine',
type=str,
default='mpg123',
help='mp3 engine, default mpg123',
required=False)
subcommand.add_argument('-l',
'--lang',
type=str,
default='en',
help='IETF language tag, example: en, ja, zh-CN, zh-TW',
required=False)
subcommand.add_argument('-t',
'--tmpdir',
type=str,
default=os.path.join(tempfile.gettempdir(), 'tts'),
help='temp directory',
required=False)
subcommand.set_defaults(handler=handler)
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.677687 | 3 |
Flappy bird2/main.py | Sherlocks6/Flappy-bird | 0 | 6620776 |
import random
import pip
import pygame
import sys
#Zeichnet den Hintergrund des Spiels
def draw_bg():
screen.blit(bg_surface,(0, 0))
#Zeichnet den Boden
def draw_floor():
screen.blit(floor_surface,(floor_x_postion, 900))
screen.blit(floor_surface,(floor_x_postion + WIDTH, 900))
#Erschafft die pipes
def create_pipe():
#Kreiert ein der drei mögichen höhen für die pipe
random_pipe_pos = random.choice(pipe_height)
#Erschafft ein Viereck um die Zufällige höhe wo dann ein bild gemalt werden kann
bottom_pipe = pipe_surface.get_rect(midtop =(700, random_pipe_pos))
top_pipe = pipe_surface.get_rect(midbottom =(700, random_pipe_pos - 300))
#gibt die Ergebnisse aus
return bottom_pipe, top_pipe
#Verschiebt die Piped
def move_pipes(pipes):
for pipe in pipes:
#verschiebt das Zentrum um -5 auf der x achse
pipe.centerx -=5
return pipes
#Zeichnet die Pipes
def draw_pipes(pipes):
for pipe in pipes:
if pipe.bottom >= 1024:
screen.blit(pipe_surface, pipe)
else:
#Dreht die Pipes um um einen Tunnel zu schaffen
flip_pipe = pygame.transform.flip(pipe_surface, False, True)
screen.blit(flip_pipe,pipe)
#Rotiert den Vogel
def rotate_bird(bird):
#rotozoom = Rotieren und vergößern
new_bird = pygame.transform.rotozoom(bird, -bird_movment * 3,1)
return new_bird
#animiert den Vogel
def bird_animation():
#Liste wo die 3 Frames drin sind
new_bird = bird_frames[bird_index]
new_bird_rect = new_bird.get_rect(center =(100, bird_rect.centery))
return new_bird, new_bird_rect
#checkt die berührungen des Vogels mit anderen Oberflächen
def check_collision(pipes):
for pipe in pipes:
if bird_rect.colliderect(pipe):
print("Kollision")
death_sound.play()
return False
if bird_rect.top <= -100 or bird_rect.bottom >=900:
print("collision")
death_sound.play()
return False
return True
#displayed den Punktestand (wenn normales game dann nur score)
#wenn game over dann auch highscore
def score_display(game_state):
#normaler score
if game_state == "main_game":
score_surface = game_font.render(f"Score: {int(score)}",True,(white))#f-string um in dem String code schreiben zu können#
score_rect = score_surface.get_rect(center= (288, 100))
screen.blit(score_surface,score_rect)
if game_state == "game_over":
#highscore
global high_score #global um auf einen Wert auserhalb einer funktion zuzugreifen und ihn zu verändern
high_score_surface = game_font.render(f"High Score: {int(high_score)}",True,(white)) #f-string um in dem String code schreiben zu können#
high_score_rect = high_score_surface.get_rect(center = (288,850))
screen.blit(high_score_surface,high_score_rect)
score_surface = game_font.render(f"Score: {int(score)}",True,(white)) #f-string um in dem String code schreiben zu können#
score_rect = score_surface.get_rect(center= (288, 100))
screen.blit(score_surface,score_rect)
#Updated den score immer wenn der Timer abgelaufen ist(loop)
def update_score(score,high_score):
if score > high_score:
high_score = score
return high_score
#
#Variablen
#
WIDTH, HEIGHT = 576, 1024
#Zeigt Spielzustand an
game_active = True
#Gravitation
gravity = 0.5
#joa selbsterklärend
high_score = 0
#Vogel Geschwindigkeit
bird_movment = 0
#Score
score = 0
#Score countdown zum genauen Gräusch
score_sound_countdown = 100
#Farben
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
#Erschafft ein fenster Welches die Größe von WIDTH und HEIGTH hat
screen = pygame.display.set_mode((WIDTH, HEIGHT))
#Überschrift gestalten
pygame.display.set_caption("Flappy Bird")
#lässt mich audios benutzen
pygame.mixer.init()
#lässt mich pygame benutzen (i guess)
pygame.init()
#ist die refresh rate des Bildes
clock = pygame.time.Clock()
#Schrftart einfügen
game_font = pygame.font.Font("04B_19.ttf",40)
#Bilder
#Bild vom vogel frame 1
bird_downflap = pygame.image.load("assets//bluebird-downflap.png")
#Vergrößert das bild um den Faktor 2
bird_downflap = pygame.transform.scale2x(bird_downflap)
#Frame 2
bird_midflap = pygame.image.load("assets//bluebird-midflap.png")
#Vergrößert das bild um den Faktor 2
bird_midflap = pygame.transform.scale2x(bird_midflap)
#Frame 3
bird_upflap = pygame.image.load("assets//bluebird-upflap.png")
#Vergrößert das bild um den Faktor 2
bird_upflap = pygame.transform.scale2x(bird_upflap)
#Alle Frames (Bilder) in einer Liste
bird_frames = [bird_downflap,bird_midflap,bird_upflap]
#Gibt die Zahl für die untrige Liste an
bird_index = 0
#Das erste Bild in Der Liste ist der Surface für den Vogel
bird_surface = bird_frames[bird_index]
#erschafft ein Viereck um das Hauptvogelbild welches man kontrollieren kann
bird_rect = bird_surface.get_rect(center = (100, 512))
#Userevent für den Vogel(sagt dass etwas mit dem Vogel passiert)
BIRDFLAP = pygame.USEREVENT + 1
#Ich verstehe die Zeile Nicht(Funktioniertr aber)
pygame.time.set_timer(BIRDFLAP,200)
#Bilder vom Boden
floor_surface = pygame.image.load("assets//base.png")
#Vergrößert das bild um den Faktor 2
floor_surface = pygame.transform.scale2x(floor_surface)
#Zeigt die x Koordinate des Boden
floor_x_postion = 0
#Bilder vom Hintergrund
bg_surface = pygame.image.load("assets//background-night.png")
#Vergrößert das bild um den Faktor 2
bg_surface = pygame.transform.scale2x(bg_surface)
#Bild der Pipes
pipe_surface = pygame.image.load("assets//pipe-green.png")
#Vergrößert das bild um den Faktor 2
pipe_surface = pygame.transform.scale2x(pipe_surface)
#Liste mit den pipes die gemacht werden sollen
pipe_list = []
#Userevent für die Pipes wenn sie erscheinen
SPAWNPIPE = pygame.USEREVENT
pygame.time.set_timer(SPAWNPIPE,1200)
#höhe der pipes
pipe_height = [400, 600, 800]
#
#gameover Bilder zum anzeigen
#
#Zeigt welches Bild verwendet werden soll
game_over_surface = pygame.image.load("assets//message.png")
#Vergrößert das bild um den Faktor 2
game_over_surface = pygame.transform.scale2x(game_over_surface)
#Schafft ein Viereck um das Bild und zeigt wo es liegt
game_over_rect = game_over_surface.get_rect(center =(288,512))
#Geräusche einbauen
#kannste dir ja denken OMEGALUL
flap_sound = pygame.mixer.Sound("sound//sfx_wing.wav")
death_sound = pygame.mixer.Sound("sound//sfx_die.wav")
score_sound = pygame.mixer.Sound("sound//sfx_point.wav")
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and game_active:
bird_movment = 0
bird_movment -=12
flap_sound.play()
if event.key == pygame.K_SPACE and game_active is False:
game_active = True
pipe_list.clear()
bird_rect.center = (100,512)
bird_movment = 0
score = 0
if event.type == SPAWNPIPE:
pipe_list.extend(create_pipe())
#print(pipe_list)
#Wechselt durch die verschiedenen Bilder des Vogels
if event.type == BIRDFLAP:
if bird_index < 2:
bird_index +=1
else:
bird_index = 0
bird_surface, bird_rect = bird_animation()
#Zeichnet den Hintergrund
draw_bg()
#läuft immer durch
if game_active is True:
#regelt geschwindigkeit und gravitation des Vogels
bird_movment += gravity
rotated_bird = rotate_bird(bird_surface)
bird_rect.centery += bird_movment
screen.blit(rotated_bird, bird_rect)
game_active = check_collision(pipe_list)
pipe_list = move_pipes(pipe_list)
draw_pipes(pipe_list)
#bewegt den Boden im main_game
floor_x_postion -=1
draw_floor()
if floor_x_postion <= -576:
floor_x_postion = 0
#Punkte anzeigen
score += 0.01
score_display("main_game")
#Punkte Sound
score_sound_countdown -= 1
if score_sound_countdown <= 0:
score_sound.play()
score_sound_countdown = 100
else:
score_sound_countdown = 100
screen.blit(game_over_surface,game_over_rect)
high_score = update_score(score, high_score)
score_display("game_over")
#bewegt den Boden auch wenn das Spiel nicht läuft
floor_x_postion -=1
draw_floor()
#setzt den Boden zurück
if floor_x_postion <= -576:
floor_x_postion = 0
#updatet bei jedem Loop den screen
pygame.display.update()
#updated so oft wies in der Klammer steht
clock.tick(60)
|
import random
import pip
import pygame
import sys
#Zeichnet den Hintergrund des Spiels
def draw_bg():
screen.blit(bg_surface,(0, 0))
#Zeichnet den Boden
def draw_floor():
screen.blit(floor_surface,(floor_x_postion, 900))
screen.blit(floor_surface,(floor_x_postion + WIDTH, 900))
#Erschafft die pipes
def create_pipe():
#Kreiert ein der drei mögichen höhen für die pipe
random_pipe_pos = random.choice(pipe_height)
#Erschafft ein Viereck um die Zufällige höhe wo dann ein bild gemalt werden kann
bottom_pipe = pipe_surface.get_rect(midtop =(700, random_pipe_pos))
top_pipe = pipe_surface.get_rect(midbottom =(700, random_pipe_pos - 300))
#gibt die Ergebnisse aus
return bottom_pipe, top_pipe
#Verschiebt die Piped
def move_pipes(pipes):
for pipe in pipes:
#verschiebt das Zentrum um -5 auf der x achse
pipe.centerx -=5
return pipes
#Zeichnet die Pipes
def draw_pipes(pipes):
for pipe in pipes:
if pipe.bottom >= 1024:
screen.blit(pipe_surface, pipe)
else:
#Dreht die Pipes um um einen Tunnel zu schaffen
flip_pipe = pygame.transform.flip(pipe_surface, False, True)
screen.blit(flip_pipe,pipe)
#Rotiert den Vogel
def rotate_bird(bird):
#rotozoom = Rotieren und vergößern
new_bird = pygame.transform.rotozoom(bird, -bird_movment * 3,1)
return new_bird
#animiert den Vogel
def bird_animation():
#Liste wo die 3 Frames drin sind
new_bird = bird_frames[bird_index]
new_bird_rect = new_bird.get_rect(center =(100, bird_rect.centery))
return new_bird, new_bird_rect
#checkt die berührungen des Vogels mit anderen Oberflächen
def check_collision(pipes):
for pipe in pipes:
if bird_rect.colliderect(pipe):
print("Kollision")
death_sound.play()
return False
if bird_rect.top <= -100 or bird_rect.bottom >=900:
print("collision")
death_sound.play()
return False
return True
#displayed den Punktestand (wenn normales game dann nur score)
#wenn game over dann auch highscore
def score_display(game_state):
#normaler score
if game_state == "main_game":
score_surface = game_font.render(f"Score: {int(score)}",True,(white))#f-string um in dem String code schreiben zu können#
score_rect = score_surface.get_rect(center= (288, 100))
screen.blit(score_surface,score_rect)
if game_state == "game_over":
#highscore
global high_score #global um auf einen Wert auserhalb einer funktion zuzugreifen und ihn zu verändern
high_score_surface = game_font.render(f"High Score: {int(high_score)}",True,(white)) #f-string um in dem String code schreiben zu können#
high_score_rect = high_score_surface.get_rect(center = (288,850))
screen.blit(high_score_surface,high_score_rect)
score_surface = game_font.render(f"Score: {int(score)}",True,(white)) #f-string um in dem String code schreiben zu können#
score_rect = score_surface.get_rect(center= (288, 100))
screen.blit(score_surface,score_rect)
#Updated den score immer wenn der Timer abgelaufen ist(loop)
def update_score(score,high_score):
if score > high_score:
high_score = score
return high_score
#
#Variablen
#
WIDTH, HEIGHT = 576, 1024
#Zeigt Spielzustand an
game_active = True
#Gravitation
gravity = 0.5
#joa selbsterklärend
high_score = 0
#Vogel Geschwindigkeit
bird_movment = 0
#Score
score = 0
#Score countdown zum genauen Gräusch
score_sound_countdown = 100
#Farben
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
#Erschafft ein fenster Welches die Größe von WIDTH und HEIGTH hat
screen = pygame.display.set_mode((WIDTH, HEIGHT))
#Überschrift gestalten
pygame.display.set_caption("Flappy Bird")
#lässt mich audios benutzen
pygame.mixer.init()
#lässt mich pygame benutzen (i guess)
pygame.init()
#ist die refresh rate des Bildes
clock = pygame.time.Clock()
#Schrftart einfügen
game_font = pygame.font.Font("04B_19.ttf",40)
#Bilder
#Bild vom vogel frame 1
bird_downflap = pygame.image.load("assets//bluebird-downflap.png")
#Vergrößert das bild um den Faktor 2
bird_downflap = pygame.transform.scale2x(bird_downflap)
#Frame 2
bird_midflap = pygame.image.load("assets//bluebird-midflap.png")
#Vergrößert das bild um den Faktor 2
bird_midflap = pygame.transform.scale2x(bird_midflap)
#Frame 3
bird_upflap = pygame.image.load("assets//bluebird-upflap.png")
#Vergrößert das bild um den Faktor 2
bird_upflap = pygame.transform.scale2x(bird_upflap)
#Alle Frames (Bilder) in einer Liste
bird_frames = [bird_downflap,bird_midflap,bird_upflap]
#Gibt die Zahl für die untrige Liste an
bird_index = 0
#Das erste Bild in Der Liste ist der Surface für den Vogel
bird_surface = bird_frames[bird_index]
#erschafft ein Viereck um das Hauptvogelbild welches man kontrollieren kann
bird_rect = bird_surface.get_rect(center = (100, 512))
#Userevent für den Vogel(sagt dass etwas mit dem Vogel passiert)
BIRDFLAP = pygame.USEREVENT + 1
#Ich verstehe die Zeile Nicht(Funktioniertr aber)
pygame.time.set_timer(BIRDFLAP,200)
#Bilder vom Boden
floor_surface = pygame.image.load("assets//base.png")
#Vergrößert das bild um den Faktor 2
floor_surface = pygame.transform.scale2x(floor_surface)
#Zeigt die x Koordinate des Boden
floor_x_postion = 0
#Bilder vom Hintergrund
bg_surface = pygame.image.load("assets//background-night.png")
#Vergrößert das bild um den Faktor 2
bg_surface = pygame.transform.scale2x(bg_surface)
#Bild der Pipes
pipe_surface = pygame.image.load("assets//pipe-green.png")
#Vergrößert das bild um den Faktor 2
pipe_surface = pygame.transform.scale2x(pipe_surface)
#Liste mit den pipes die gemacht werden sollen
pipe_list = []
#Userevent für die Pipes wenn sie erscheinen
SPAWNPIPE = pygame.USEREVENT
pygame.time.set_timer(SPAWNPIPE,1200)
#höhe der pipes
pipe_height = [400, 600, 800]
#
#gameover Bilder zum anzeigen
#
#Zeigt welches Bild verwendet werden soll
game_over_surface = pygame.image.load("assets//message.png")
#Vergrößert das bild um den Faktor 2
game_over_surface = pygame.transform.scale2x(game_over_surface)
#Schafft ein Viereck um das Bild und zeigt wo es liegt
game_over_rect = game_over_surface.get_rect(center =(288,512))
#Geräusche einbauen
#kannste dir ja denken OMEGALUL
flap_sound = pygame.mixer.Sound("sound//sfx_wing.wav")
death_sound = pygame.mixer.Sound("sound//sfx_die.wav")
score_sound = pygame.mixer.Sound("sound//sfx_point.wav")
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and game_active:
bird_movment = 0
bird_movment -=12
flap_sound.play()
if event.key == pygame.K_SPACE and game_active is False:
game_active = True
pipe_list.clear()
bird_rect.center = (100,512)
bird_movment = 0
score = 0
if event.type == SPAWNPIPE:
pipe_list.extend(create_pipe())
#print(pipe_list)
#Wechselt durch die verschiedenen Bilder des Vogels
if event.type == BIRDFLAP:
if bird_index < 2:
bird_index +=1
else:
bird_index = 0
bird_surface, bird_rect = bird_animation()
#Zeichnet den Hintergrund
draw_bg()
#läuft immer durch
if game_active is True:
#regelt geschwindigkeit und gravitation des Vogels
bird_movment += gravity
rotated_bird = rotate_bird(bird_surface)
bird_rect.centery += bird_movment
screen.blit(rotated_bird, bird_rect)
game_active = check_collision(pipe_list)
pipe_list = move_pipes(pipe_list)
draw_pipes(pipe_list)
#bewegt den Boden im main_game
floor_x_postion -=1
draw_floor()
if floor_x_postion <= -576:
floor_x_postion = 0
#Punkte anzeigen
score += 0.01
score_display("main_game")
#Punkte Sound
score_sound_countdown -= 1
if score_sound_countdown <= 0:
score_sound.play()
score_sound_countdown = 100
else:
score_sound_countdown = 100
screen.blit(game_over_surface,game_over_rect)
high_score = update_score(score, high_score)
score_display("game_over")
#bewegt den Boden auch wenn das Spiel nicht läuft
floor_x_postion -=1
draw_floor()
#setzt den Boden zurück
if floor_x_postion <= -576:
floor_x_postion = 0
#updatet bei jedem Loop den screen
pygame.display.update()
#updated so oft wies in der Klammer steht
clock.tick(60)
| de | 0.992531 | #Zeichnet den Hintergrund des Spiels #Zeichnet den Boden #Erschafft die pipes #Kreiert ein der drei mögichen höhen für die pipe #Erschafft ein Viereck um die Zufällige höhe wo dann ein bild gemalt werden kann #gibt die Ergebnisse aus #Verschiebt die Piped #verschiebt das Zentrum um -5 auf der x achse #Zeichnet die Pipes #Dreht die Pipes um um einen Tunnel zu schaffen #Rotiert den Vogel #rotozoom = Rotieren und vergößern #animiert den Vogel #Liste wo die 3 Frames drin sind #checkt die berührungen des Vogels mit anderen Oberflächen #displayed den Punktestand (wenn normales game dann nur score) #wenn game over dann auch highscore #normaler score #f-string um in dem String code schreiben zu können# #highscore #global um auf einen Wert auserhalb einer funktion zuzugreifen und ihn zu verändern #f-string um in dem String code schreiben zu können# #f-string um in dem String code schreiben zu können# #Updated den score immer wenn der Timer abgelaufen ist(loop) # #Variablen # #Zeigt Spielzustand an #Gravitation #joa selbsterklärend #Vogel Geschwindigkeit #Score #Score countdown zum genauen Gräusch #Farben #Erschafft ein fenster Welches die Größe von WIDTH und HEIGTH hat #Überschrift gestalten #lässt mich audios benutzen #lässt mich pygame benutzen (i guess) #ist die refresh rate des Bildes #Schrftart einfügen #Bilder #Bild vom vogel frame 1 #Vergrößert das bild um den Faktor 2 #Frame 2 #Vergrößert das bild um den Faktor 2 #Frame 3 #Vergrößert das bild um den Faktor 2 #Alle Frames (Bilder) in einer Liste #Gibt die Zahl für die untrige Liste an #Das erste Bild in Der Liste ist der Surface für den Vogel #erschafft ein Viereck um das Hauptvogelbild welches man kontrollieren kann #Userevent für den Vogel(sagt dass etwas mit dem Vogel passiert) #Ich verstehe die Zeile Nicht(Funktioniertr aber) #Bilder vom Boden #Vergrößert das bild um den Faktor 2 #Zeigt die x Koordinate des Boden #Bilder vom Hintergrund #Vergrößert das bild um den Faktor 2 #Bild der Pipes #Vergrößert das bild um den Faktor 2 #Liste mit den pipes die gemacht werden sollen #Userevent für die Pipes wenn sie erscheinen #höhe der pipes # #gameover Bilder zum anzeigen # #Zeigt welches Bild verwendet werden soll #Vergrößert das bild um den Faktor 2 #Schafft ein Viereck um das Bild und zeigt wo es liegt #Geräusche einbauen #kannste dir ja denken OMEGALUL #print(pipe_list) #Wechselt durch die verschiedenen Bilder des Vogels #Zeichnet den Hintergrund #läuft immer durch #regelt geschwindigkeit und gravitation des Vogels #bewegt den Boden im main_game #Punkte anzeigen #Punkte Sound #bewegt den Boden auch wenn das Spiel nicht läuft #setzt den Boden zurück #updatet bei jedem Loop den screen #updated so oft wies in der Klammer steht | 3.072315 | 3 |
Beginner/1072.py | pedrodanieljardim/DesafiosURI-feitos-em-JAVA | 1 | 6620777 | <gh_stars>1-10
numberCases = int(input())
countIn: int = 0
countOut: int = 0
for i in range (0, numberCases, 1):
n = int(input())
if 10 <= n <= 20:
countIn += 1
else:
countOut += 1
else:
print("{} in".format(countIn))
print("{} out".format(countOut))
| numberCases = int(input())
countIn: int = 0
countOut: int = 0
for i in range (0, numberCases, 1):
n = int(input())
if 10 <= n <= 20:
countIn += 1
else:
countOut += 1
else:
print("{} in".format(countIn))
print("{} out".format(countOut)) | none | 1 | 3.542301 | 4 | |
Report.py | Benjamin-Fairy/scu-covid-auto-checkin | 1 | 6620778 | <filename>Report.py
# coding=utf-8
import yagmail
import os
f = os.popen("python3 checkin.py", 'r')
res = f.readlines()
f.close()
receiver = os.environ['RCV'] # 要接收邮件的邮箱
body = res # 邮件正文
filename = "" # 要发送的附件
yag = yagmail.SMTP(
user=os.environ['ACT'], # 要发送邮件的邮箱,可以自己发送给自己
password=os.<PASSWORD>['<PASSWORD>'], # 授权码
host='smtp.163.com')
if "已经" in str(body):
print("已填报,不发送")
else:
yag.send(
to=receiver,
subject="打卡结果", # 邮件标题
contents=body)
print("已发送邮件")
print("操作结束")
| <filename>Report.py
# coding=utf-8
import yagmail
import os
f = os.popen("python3 checkin.py", 'r')
res = f.readlines()
f.close()
receiver = os.environ['RCV'] # 要接收邮件的邮箱
body = res # 邮件正文
filename = "" # 要发送的附件
yag = yagmail.SMTP(
user=os.environ['ACT'], # 要发送邮件的邮箱,可以自己发送给自己
password=os.<PASSWORD>['<PASSWORD>'], # 授权码
host='smtp.163.com')
if "已经" in str(body):
print("已填报,不发送")
else:
yag.send(
to=receiver,
subject="打卡结果", # 邮件标题
contents=body)
print("已发送邮件")
print("操作结束")
| zh | 0.968449 | # coding=utf-8 # 要接收邮件的邮箱 # 邮件正文 # 要发送的附件 # 要发送邮件的邮箱,可以自己发送给自己 # 授权码 # 邮件标题 | 2.491982 | 2 |
bcs-ui/backend/components/paas_auth.py | laodiu/bk-bcs | 599 | 6620779 | <filename>bcs-ui/backend/components/paas_auth.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from backend.iam import legacy_perms as permissions
from .ssm import get_client_access_token
logger = logging.getLogger(__name__)
def get_access_token():
"""获取非用户态access_token"""
return get_client_access_token()
def get_role_list(access_token, project_id, need_user=False):
"""获取角色列表(权限中心暂时没有角色的概念,先获取所有用户)"""
project_perm = permissions.ProjectPermission()
users = project_perm.query_authorized_users(project_id, permissions.ProjectActions.VIEW.value)
role_list = []
for _u in users:
# 所有用户都设置为项目成员
role_list.append(
{
"display_name": "项目成员",
"role_id": 0,
"role_name": "manager",
"user_id": _u.get("id"),
"user_type": "user",
}
)
return role_list
try:
from .paas_auth_ext import * # noqa
except ImportError as e:
logger.debug("Load extension failed: %s", e)
| <filename>bcs-ui/backend/components/paas_auth.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from backend.iam import legacy_perms as permissions
from .ssm import get_client_access_token
logger = logging.getLogger(__name__)
def get_access_token():
"""获取非用户态access_token"""
return get_client_access_token()
def get_role_list(access_token, project_id, need_user=False):
"""获取角色列表(权限中心暂时没有角色的概念,先获取所有用户)"""
project_perm = permissions.ProjectPermission()
users = project_perm.query_authorized_users(project_id, permissions.ProjectActions.VIEW.value)
role_list = []
for _u in users:
# 所有用户都设置为项目成员
role_list.append(
{
"display_name": "项目成员",
"role_id": 0,
"role_name": "manager",
"user_id": _u.get("id"),
"user_type": "user",
}
)
return role_list
try:
from .paas_auth_ext import * # noqa
except ImportError as e:
logger.debug("Load extension failed: %s", e)
| en | 0.777942 | # -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 获取非用户态access_token 获取角色列表(权限中心暂时没有角色的概念,先获取所有用户) # 所有用户都设置为项目成员 # noqa | 1.655395 | 2 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/api/__init__.py | osoco/better-ways-of-thinking-about-software | 3 | 6620780 | <gh_stars>1-10
# lint-amnesty, pylint: disable=missing-module-docstring
default_app_config = 'cms.djangoapps.api.apps.ApiConfig'
| # lint-amnesty, pylint: disable=missing-module-docstring
default_app_config = 'cms.djangoapps.api.apps.ApiConfig' | en | 0.206496 | # lint-amnesty, pylint: disable=missing-module-docstring | 1.091099 | 1 |
Chapter1/simple_plot_program.py | luwei0917/Phys517 | 0 | 6620781 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 09:44:39 2016
@author: toffo
Simple python based plotting program
"""
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-2*np.pi,2*np.pi,51) # create an array x
plt.figure(1) # open up a figure window
plt.clf() # clear the window
# now plot
plt.plot(x,np.sin(x))
plt.xlabel('x') #x label
plt.ylabel('sin(x)') # y label
plt.show() # shows the plot window | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 09:44:39 2016
@author: toffo
Simple python based plotting program
"""
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-2*np.pi,2*np.pi,51) # create an array x
plt.figure(1) # open up a figure window
plt.clf() # clear the window
# now plot
plt.plot(x,np.sin(x))
plt.xlabel('x') #x label
plt.ylabel('sin(x)') # y label
plt.show() # shows the plot window | en | 0.669931 | # -*- coding: utf-8 -*- Created on Mon Jan 11 09:44:39 2016 @author: toffo Simple python based plotting program # create an array x # open up a figure window # clear the window # now plot #x label # y label # shows the plot window | 3.904584 | 4 |
binanceTradeTimeProgress.py | mhl5k/binanceCheck | 0 | 6620782 | # shows all Binance trading pairs with progress over 5min, 1h, 24h, ...
#
# License: MIT
# Author: mhl5k
import sys
import logging
from binance.lib.utils import config_logging
from mhl5k.binance.dataset import BinanceDataSet
from mhl5k.settings import Settings
from mhl5k.files import Files
from mhl5k.app import App
VERSION = "0.12"
# Functions and constants
# ------------------------
APIURL="https://api.binance.com"
# Main start up
# -------------
App.printName(version=VERSION)
# limit to command line parameter
argFilter=""
if len(sys.argv) == 2:
argFilter=sys.argv[1]
# load settings
settings=Settings()
config_logging(logging, logging.DEBUG, Files.getLoggingFilenameWithPath())
try:
# Binance Account Data Set
binanceAccountDataSet=BinanceDataSet(settings)
binanceAccountDataSet.showTradeTimeProgress(filter=argFilter)
except Exception as E:
print("Error: %s" % E)
exit(1)
# exit
exit(0)
| # shows all Binance trading pairs with progress over 5min, 1h, 24h, ...
#
# License: MIT
# Author: mhl5k
import sys
import logging
from binance.lib.utils import config_logging
from mhl5k.binance.dataset import BinanceDataSet
from mhl5k.settings import Settings
from mhl5k.files import Files
from mhl5k.app import App
VERSION = "0.12"
# Functions and constants
# ------------------------
APIURL="https://api.binance.com"
# Main start up
# -------------
App.printName(version=VERSION)
# limit to command line parameter
argFilter=""
if len(sys.argv) == 2:
argFilter=sys.argv[1]
# load settings
settings=Settings()
config_logging(logging, logging.DEBUG, Files.getLoggingFilenameWithPath())
try:
# Binance Account Data Set
binanceAccountDataSet=BinanceDataSet(settings)
binanceAccountDataSet.showTradeTimeProgress(filter=argFilter)
except Exception as E:
print("Error: %s" % E)
exit(1)
# exit
exit(0)
| en | 0.566146 | # shows all Binance trading pairs with progress over 5min, 1h, 24h, ... # # License: MIT # Author: mhl5k # Functions and constants # ------------------------ # Main start up # ------------- # limit to command line parameter # load settings # Binance Account Data Set # exit | 2.022122 | 2 |
setup.py | slavama/django_weather_darksky | 4 | 6620783 | import os
import sys
from setuptools import find_packages, setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = find_packages()
setup(
name='django_weather_darksky',
version='0.1.1',
description='Wrapper for darksky.net API for django',
long_description='Simply wrapper for https://darksky.net API for django',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/slavama/django_weather_darksky',
packages=packages,
include_package_data=True,
py_modules=['django_weather_darksky'],
requires = ['python (>= 2.7)', 'django (>= 1.8)'],
install_requires=[
'requests>=2',
'jsonfield>=2.0.1'
],
license='MIT License',
zip_safe=False,
keywords='forecast forecast.io darksky.net weather',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
]
)
| import os
import sys
from setuptools import find_packages, setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = find_packages()
setup(
name='django_weather_darksky',
version='0.1.1',
description='Wrapper for darksky.net API for django',
long_description='Simply wrapper for https://darksky.net API for django',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/slavama/django_weather_darksky',
packages=packages,
include_package_data=True,
py_modules=['django_weather_darksky'],
requires = ['python (>= 2.7)', 'django (>= 1.8)'],
install_requires=[
'requests>=2',
'jsonfield>=2.0.1'
],
license='MIT License',
zip_safe=False,
keywords='forecast forecast.io darksky.net weather',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
]
)
| none | 1 | 1.640066 | 2 | |
cloudauthz/providers/azure.py | nsoranzo/cloudauthz | 0 | 6620784 | <reponame>nsoranzo/cloudauthz<filename>cloudauthz/providers/azure.py
"""
Implements means of exchanging client credentials with temporary access token to access Azure.
"""
from ..exceptions import *
from ..interfaces.providers import *
import adal
class Authorize(IProvider):
AUTH_ENDPOINT = "https://login.microsoftonline.com/{}"
RESOURCE = "https://storage.azure.com/"
def __parse_error(self, exception):
if isinstance(exception, adal.adal_error.AdalError):
return InvalidRequestException(str(exception.error_response))
def get_credentials(self, tenant_id, client_id, client_secret):
authority_url = self.AUTH_ENDPOINT.format(tenant_id)
context = adal.AuthenticationContext(
authority_url,
validate_authority=tenant_id != 'adfs',
api_version=None,
verify_ssl=False)
try:
return context.acquire_token_with_client_credentials(self.RESOURCE, client_id, client_secret)
except Exception as e:
raise self.__parse_error(e)
| """
Implements means of exchanging client credentials with temporary access token to access Azure.
"""
from ..exceptions import *
from ..interfaces.providers import *
import adal
class Authorize(IProvider):
AUTH_ENDPOINT = "https://login.microsoftonline.com/{}"
RESOURCE = "https://storage.azure.com/"
def __parse_error(self, exception):
if isinstance(exception, adal.adal_error.AdalError):
return InvalidRequestException(str(exception.error_response))
def get_credentials(self, tenant_id, client_id, client_secret):
authority_url = self.AUTH_ENDPOINT.format(tenant_id)
context = adal.AuthenticationContext(
authority_url,
validate_authority=tenant_id != 'adfs',
api_version=None,
verify_ssl=False)
try:
return context.acquire_token_with_client_credentials(self.RESOURCE, client_id, client_secret)
except Exception as e:
raise self.__parse_error(e) | en | 0.918816 | Implements means of exchanging client credentials with temporary access token to access Azure. | 2.63806 | 3 |
npc/formatters/helpers.py | Arent128/npc | 13 | 6620785 | """
Helper functions for generating lists
The functions here can be used to easily get the appropriate formatting function
for a use case and output type.
"""
from . import markdown, json, html
BINARY_TYPES = ['html']
"""tuple: Format names that require a binary stream instead of a text stream"""
CANONICAL_FORMATS = {
"md": "markdown",
"markdown": "markdown",
"htm": "html",
"html": "html",
"json": "json"
}
"""
dict: mapping of accepted format names and abbreviations to their canonical
format name keys
"""
def get_listing_formatter(format_name):
"""
Get the correct npc listing output function for a named format
Args:
format_name (str): Name of the desired format
Returns:
A formatting output function if the format is recognized, or None if it
is not.
"""
format_name = get_canonical_format_name(format_name)
if format_name == 'markdown':
return markdown.listing
if format_name == 'html':
return html.listing
if format_name == 'json':
return json.listing
return None
def get_report_formatter(format_name):
"""
Get the correct report table output function for a named format
Args:
format_name (str): Name of the desired format
Returns:
A formatting output function if the format is recognized, or None if it
is not.
"""
format_name = get_canonical_format_name(format_name)
if format_name == 'markdown':
return markdown.report
if format_name == 'html':
return html.report
if format_name == 'json':
return json.report
return None
def get_canonical_format_name(format_name):
"""
Get the canonical format name for a possible abbreviation
Args:
format_name (str): Format name or abbreviation
Returns:
The canonical name from CANONICAL_FORMATS, or None if the format is
not recognized.
"""
try:
return CANONICAL_FORMATS[format_name.lower()]
except KeyError:
return None
| """
Helper functions for generating lists
The functions here can be used to easily get the appropriate formatting function
for a use case and output type.
"""
from . import markdown, json, html
BINARY_TYPES = ['html']
"""tuple: Format names that require a binary stream instead of a text stream"""
CANONICAL_FORMATS = {
"md": "markdown",
"markdown": "markdown",
"htm": "html",
"html": "html",
"json": "json"
}
"""
dict: mapping of accepted format names and abbreviations to their canonical
format name keys
"""
def get_listing_formatter(format_name):
"""
Get the correct npc listing output function for a named format
Args:
format_name (str): Name of the desired format
Returns:
A formatting output function if the format is recognized, or None if it
is not.
"""
format_name = get_canonical_format_name(format_name)
if format_name == 'markdown':
return markdown.listing
if format_name == 'html':
return html.listing
if format_name == 'json':
return json.listing
return None
def get_report_formatter(format_name):
"""
Get the correct report table output function for a named format
Args:
format_name (str): Name of the desired format
Returns:
A formatting output function if the format is recognized, or None if it
is not.
"""
format_name = get_canonical_format_name(format_name)
if format_name == 'markdown':
return markdown.report
if format_name == 'html':
return html.report
if format_name == 'json':
return json.report
return None
def get_canonical_format_name(format_name):
"""
Get the canonical format name for a possible abbreviation
Args:
format_name (str): Format name or abbreviation
Returns:
The canonical name from CANONICAL_FORMATS, or None if the format is
not recognized.
"""
try:
return CANONICAL_FORMATS[format_name.lower()]
except KeyError:
return None
| en | 0.640807 | Helper functions for generating lists The functions here can be used to easily get the appropriate formatting function for a use case and output type. tuple: Format names that require a binary stream instead of a text stream dict: mapping of accepted format names and abbreviations to their canonical format name keys Get the correct npc listing output function for a named format Args: format_name (str): Name of the desired format Returns: A formatting output function if the format is recognized, or None if it is not. Get the correct report table output function for a named format Args: format_name (str): Name of the desired format Returns: A formatting output function if the format is recognized, or None if it is not. Get the canonical format name for a possible abbreviation Args: format_name (str): Format name or abbreviation Returns: The canonical name from CANONICAL_FORMATS, or None if the format is not recognized. | 3.5733 | 4 |
graph-measures/features_algorithms/edges/edge_betweenness_centrality.py | Unknown-Data/QGCN | 3 | 6620786 | import networkx as nx
from features_infra.feature_calculators import EdgeFeatureCalculator, FeatureMeta
class EdgeBetweennessCalculator(EdgeFeatureCalculator):
def _calculate(self, include: set):
self._features = nx.edge_betweenness_centrality(self._gnx)
def is_relevant(self):
return True
feature_entry = {
"edge_betweenness": FeatureMeta(EdgeBetweennessCalculator, {"e_bet"}),
}
if __name__ == 'main':
pass
| import networkx as nx
from features_infra.feature_calculators import EdgeFeatureCalculator, FeatureMeta
class EdgeBetweennessCalculator(EdgeFeatureCalculator):
def _calculate(self, include: set):
self._features = nx.edge_betweenness_centrality(self._gnx)
def is_relevant(self):
return True
feature_entry = {
"edge_betweenness": FeatureMeta(EdgeBetweennessCalculator, {"e_bet"}),
}
if __name__ == 'main':
pass
| none | 1 | 2.569245 | 3 | |
tables.py | rayjustinhuang/BitesofPy | 0 | 6620787 | <reponame>rayjustinhuang/BitesofPy
class MultiplicationTable:
def __init__(self, length):
"""Create a 2D self._table of (x, y) coordinates and
their calculations (form of caching)"""
self.x = length
self.y = length
self._table = self.calc_cell(self.x, self.y)
pass
def __len__(self):
"""Returns the area of the table (len x* len y)"""
return self.x * self.y
pass
def __str__(self):
"""Returns a string representation of the table"""
output = ''
rows = []
for i in range(1, self.x+1):
new_row = [i*j for j in range(1, self.y+1)]
rows.append(new_row)
for row in rows:
output += ' | '.join(str(x) for x in row)
output += "\n"
return output
pass
def calc_cell(self, x, y):
"""Takes x and y coords and returns the re-calculated result"""
if x > self.x or y > self.y:
raise IndexError
return x * y
pass | class MultiplicationTable:
def __init__(self, length):
"""Create a 2D self._table of (x, y) coordinates and
their calculations (form of caching)"""
self.x = length
self.y = length
self._table = self.calc_cell(self.x, self.y)
pass
def __len__(self):
"""Returns the area of the table (len x* len y)"""
return self.x * self.y
pass
def __str__(self):
"""Returns a string representation of the table"""
output = ''
rows = []
for i in range(1, self.x+1):
new_row = [i*j for j in range(1, self.y+1)]
rows.append(new_row)
for row in rows:
output += ' | '.join(str(x) for x in row)
output += "\n"
return output
pass
def calc_cell(self, x, y):
"""Takes x and y coords and returns the re-calculated result"""
if x > self.x or y > self.y:
raise IndexError
return x * y
pass | en | 0.719795 | Create a 2D self._table of (x, y) coordinates and their calculations (form of caching) Returns the area of the table (len x* len y) Returns a string representation of the table Takes x and y coords and returns the re-calculated result | 4.166443 | 4 |
PythonByMia_Finding_Misspelled_Words1.py | MiaSabre/Kaggle-Projects | 0 | 6620788 | <filename>PythonByMia_Finding_Misspelled_Words1.py
def clean_text(text_string, special_characters):
cleaned_string = text_string
for string in special_characters:
cleaned_string = cleaned_string.replace(string, "")
cleaned_string = cleaned_string.lower()
return(cleaned_string)
def tokenize(text_string, special_characters, clean=False):
cleaned_text = text_string
if clean:
cleaned_text = clean_text(text_string, special_characters)
tokens = cleaned_text.split(" ")
return(tokens)
final_misspelled_words = []
def spell_check(vocabulary_file, text_file, special_characters=[",",".","'",";","\n"]):
misspelled_words = []
vocabulary = open(vocabulary_file).read()
text = open(text_file).read()
tockenized_vocabulary = tokenize(vocabulary, special_characters)
tokenized_text = tokenize(text, special_characters, clean=True)
for tt in tokenized_text:
if tt not in tokenized_vocabulary and tt != '':
misspelled_words.append(tt)
return(misspelled_words)
final_misspelled_words = spell_check(vocabulary_file="dictionary.txt", text_file="story.txt")
print(final_misspelled_words)
| <filename>PythonByMia_Finding_Misspelled_Words1.py
def clean_text(text_string, special_characters):
cleaned_string = text_string
for string in special_characters:
cleaned_string = cleaned_string.replace(string, "")
cleaned_string = cleaned_string.lower()
return(cleaned_string)
def tokenize(text_string, special_characters, clean=False):
cleaned_text = text_string
if clean:
cleaned_text = clean_text(text_string, special_characters)
tokens = cleaned_text.split(" ")
return(tokens)
final_misspelled_words = []
def spell_check(vocabulary_file, text_file, special_characters=[",",".","'",";","\n"]):
misspelled_words = []
vocabulary = open(vocabulary_file).read()
text = open(text_file).read()
tockenized_vocabulary = tokenize(vocabulary, special_characters)
tokenized_text = tokenize(text, special_characters, clean=True)
for tt in tokenized_text:
if tt not in tokenized_vocabulary and tt != '':
misspelled_words.append(tt)
return(misspelled_words)
final_misspelled_words = spell_check(vocabulary_file="dictionary.txt", text_file="story.txt")
print(final_misspelled_words)
| none | 1 | 3.977673 | 4 | |
Encryptor/scripts/Enigma.py | serd2011/TP_EncryptorApp | 0 | 6620789 | import random
first = list(range(0, 256))
random.shuffle(first)
second = list(range(0, 256))
random.shuffle(second)
third = list(range(0, 256))
random.shuffle(third)
firstReverse = [None for _ in range(256)]
for num, value in enumerate(first, start=0):
firstReverse[value] = num
secondReverse = [None for _ in range(256)]
for num, value in enumerate(second, start=0):
secondReverse[value] = num
thirdReverse = [None for _ in range(256)]
for num, value in enumerate(third, start=0):
thirdReverse[value] = num
reflector = [None for _ in range(256)]
randomForReflector = list(range(0, 256))
random.shuffle(randomForReflector)
for i in range(0, 256, 2):
reflector[randomForReflector[i]] = randomForReflector[i + 1]
reflector[randomForReflector[i + 1]] = randomForReflector[i]
print("static const unsigned char first[256] {" + (", ".join(map(str, first))) + "};")
print("static const unsigned char firstReverse[256] {" + (", ".join(map(str, firstReverse))) + "};")
print("static const unsigned char second[256] {" + (", ".join(map(str, second))) + "};")
print("static const unsigned char secondReverse[256] {" + (", ".join(map(str, secondReverse))) + "};")
print("static const unsigned char third[256] {" + (", ".join(map(str, third))) + "};")
print("static const unsigned char thirdReverse[256] {" + (", ".join(map(str, thirdReverse))) + "};")
print("static const unsigned char reflector[256] {" + (", ".join(map(str, reflector))) + "};")
| import random
first = list(range(0, 256))
random.shuffle(first)
second = list(range(0, 256))
random.shuffle(second)
third = list(range(0, 256))
random.shuffle(third)
firstReverse = [None for _ in range(256)]
for num, value in enumerate(first, start=0):
firstReverse[value] = num
secondReverse = [None for _ in range(256)]
for num, value in enumerate(second, start=0):
secondReverse[value] = num
thirdReverse = [None for _ in range(256)]
for num, value in enumerate(third, start=0):
thirdReverse[value] = num
reflector = [None for _ in range(256)]
randomForReflector = list(range(0, 256))
random.shuffle(randomForReflector)
for i in range(0, 256, 2):
reflector[randomForReflector[i]] = randomForReflector[i + 1]
reflector[randomForReflector[i + 1]] = randomForReflector[i]
print("static const unsigned char first[256] {" + (", ".join(map(str, first))) + "};")
print("static const unsigned char firstReverse[256] {" + (", ".join(map(str, firstReverse))) + "};")
print("static const unsigned char second[256] {" + (", ".join(map(str, second))) + "};")
print("static const unsigned char secondReverse[256] {" + (", ".join(map(str, secondReverse))) + "};")
print("static const unsigned char third[256] {" + (", ".join(map(str, third))) + "};")
print("static const unsigned char thirdReverse[256] {" + (", ".join(map(str, thirdReverse))) + "};")
print("static const unsigned char reflector[256] {" + (", ".join(map(str, reflector))) + "};")
| none | 1 | 3.753986 | 4 | |
todo.py | thomas-maurice/todo | 1 | 6620790 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pypodcatcher.py : A simple podcast client commandline software
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2013 <NAME> <<EMAIL>>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "WTFPL"
__version__ = "0.2"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import xml.dom.minidom as minidom
import sys
import os
from termcolor import *
class TodoList:
"""
TodoList class implementation
This class is used to represend a todo list object.
It has several components. On the one hand some
general info about the todo list (the filename
the xml nodelist...) and a more conveinient to use
xml nodelist with only the interesting xml nodes.
This is to say that you shall never use the *self.todo_file*
xml nodelist but always the *self.todo_list*
The main important variables here are:
- self.todo_file_name: Obviously the xml file
- self.todo_file: The xml nodelist directly loaded from the file
- self.todo_list: The xml nodelist stripped from the blank one
"""
def __init__(self, todo_file):
"""
Initializes a TodoList obect from an XML file
"""
self.todo_file_name = todo_file
try:
self.todo_file = minidom.parse(todo_file)
self.regenerate_todo_list()
except:
self.create_new_file(todo_file)
print "The file was non-existant, created it"
self.todo_file = minidom.parse(todo_file)
self.regenerate_todo_list()
def regenerate_todo_list(self):
"""
Regenerates the self.todo_list variable
This will strip out the blank nodes of
the main xml nodelist. Must be called
after each addition/deletion of members
"""
self.todo_list = []
for e in self.todo_file.firstChild.childNodes:
if e.nodeType == e.ELEMENT_NODE:
self.todo_list.append(e)
def save(self):
"""
Saves the todolist
Saves the self.todo_file to a file
"""
f = open(self.todo_file_name, "w")
f.write(self.todo_file.toprettyxml().replace("\t", "").replace("\n\n", "").encode("utf-8"))
f.close()
def create_new_file(self, file_name):
"""
Creates a new bare file containing nothing to do
"""
f = open(self.todo_file_name, "w")
f.write("<?xml version=\"0.1\" encoding=\"UTF-8\" ?><todolist></todolist>")
f.close()
def remove_todo_by_id(self, i):
"""
Removes a node in the self.todo_file by its ID
"""
for t in self.todo_list:
if t.nodeType == t.ELEMENT_NODE:
if int(t.attributes["id"].value) == int(i):
self.todo_file.firstChild.removeChild(t)
return
def set_priority(self, i, p=""):
"""
Changes the priority of a todo
NOT IMPLEMENTED YET, the priority still has no effect on
nothing. Will be implemented soon.
"""
for e in self.todo_file.firstChild.childNodes:
if e.nodeType == e.ELEMENT_NODE:
if int(e.attributes["id"].value) == int(i):
e.attributes["priority"] = p
return
def add_todo(self, content):
"""
Creates a new Todo containing the "content" content
"""
i = self.get_max_id()+1
e = self.todo_file.createElement("todo")
e.attributes["id"] = str(i)
e.attributes["task"] = content.decode("utf-8")
e.attributes["priority"] = ""
print "Ajout du todo :"
self.print_todo(e)
self.todo_file.firstChild.appendChild(e)
def print_todo(self, todo):
"""
Prints a todo with pretty colors <3
"""
print colored(" #%2d" % int(todo.attributes["id"].value), "blue"), ":", self.colorize_todo(todo.attributes["task"].value)
def get_todo_by_id(self, i, todo_list=None):
"""
Returns the XML node corresponding to the todo with the id "i"
within the given todo_list. If None, then the one used by
the class shall be used. This is the case most of the time
"""
if todo_list == None:
todo_list = self.todo_list
for e in self.todo_file.firstChild.childNodes:
if e.nodeType == e.ELEMENT_NODE:
if int(e.attributes["id"].value) == int(i):
return e
return None
def get_todos_by_tag(self, tag, marker="#"):
"""
Search all the todos which match the given tag
"""
if tag[0] != marker:
tag = marker+tag
l = []
for e in self.todo_file.firstChild.childNodes:
if e.nodeType == e.ELEMENT_NODE:
text = e.attributes["task"].value.split(" ")
for i in range(0, len(text)):
if text[i][0] == marker:
if text[i].lower() == tag.lower():
l.append(e)
return l
def sort_todos_by_id(self, todo_list=None):
"""
Sorts the todo list by ID
"""
if todo_list == None:
todo_list = self.todo_list
return sorted(todo_list, key=lambda k: k.attributes["id"].value)
def count_todos(self):
"""
Returns the number of todos currently registered
"""
return len(self.todo_list)
def print_all(self):
"""
Print all the todos registered
"""
for todo in self.todo_list:
self.print_todo(todo)
def colorize_todo(self, text):
"""
Colorizes a todo, this means setting a pretty color on hashtags
"""
text = text.split(" ")
for i in range(0, len(text)):
if text[i][0] == '#':
text[i] = colored(text[i], "green")
elif text[i][0] == '+':
text[i] = colored(text[i], "magenta")
elif text[i][0] == '@':
text[i] = colored(text[i], "yellow")
return " ".join(text)
def print_todos(self, todo_list=None):
"""
Prints all the todos within a list
"""
if todo_list == None:
todo_list = self.todo_list
for t in todo_list:
self.print_todo(t)
def get_max_id(self):
"""
Returns the maximum ID within the todo list
"""
m = 0
for e in self.todo_list:
if int(e.attributes["id"].value) > m:
m = int(e.attributes["id"].value)
return m
if __name__ == "__main__":
fname = os.environ["HOME"] + "/.todo.xml"
if len(sys.argv) == 1:
print "Help for", sys.argv[0]
print "\ttodo.py ls -- Display all the todos"
print "\ttodo.py add <texte> -- Add a todo, the text may contain #hastags, +contexts or @peoples"
print "\ttodo.py rm #number -- Removes a todo"
print "\ttodo.py sh hastag -- Prints all the todos with the given #hashtag"
print "\ttodo.py sc context -- Prints all the todos with the given +context"
print "\ttodo.py sp person -- Prints all the todos with the given @person"
elif len(sys.argv) == 2:
if sys.argv[1] == "ls":
t = TodoList(fname)
print colored("> %d todo(s) en mémoire" % t.count_todos(), "white", attrs=["bold"])
t.print_all()
elif len(sys.argv) >= 3:
t = TodoList(fname)
if sys.argv[1] == "add":
t.add_todo(" ".join(sys.argv[2:]))
t.save()
elif sys.argv[1] == "rm":
t.remove_todo_by_id(sys.argv[2])
t.save()
elif sys.argv[1] == "sh":
l = t.get_todos_by_tag(sys.argv[2])
l = t.sort_todos_by_id(l)
t.print_todos(l)
elif sys.argv[1] == "sc":
l = t.get_todos_by_tag(sys.argv[2], "+")
l = t.sort_todos_by_id(l)
t.print_todos(l)
elif sys.argv[1] == "sp":
l = t.get_todos_by_tag(sys.argv[2], "@")
l = t.sort_todos_by_id(l)
t.print_todos(l)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pypodcatcher.py : A simple podcast client commandline software
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2013 <NAME> <<EMAIL>>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "WTFPL"
__version__ = "0.2"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import xml.dom.minidom as minidom
import sys
import os
from termcolor import *
class TodoList:
"""
TodoList class implementation
This class is used to represend a todo list object.
It has several components. On the one hand some
general info about the todo list (the filename
the xml nodelist...) and a more conveinient to use
xml nodelist with only the interesting xml nodes.
This is to say that you shall never use the *self.todo_file*
xml nodelist but always the *self.todo_list*
The main important variables here are:
- self.todo_file_name: Obviously the xml file
- self.todo_file: The xml nodelist directly loaded from the file
- self.todo_list: The xml nodelist stripped from the blank one
"""
def __init__(self, todo_file):
"""
Initializes a TodoList obect from an XML file
"""
self.todo_file_name = todo_file
try:
self.todo_file = minidom.parse(todo_file)
self.regenerate_todo_list()
except:
self.create_new_file(todo_file)
print "The file was non-existant, created it"
self.todo_file = minidom.parse(todo_file)
self.regenerate_todo_list()
def regenerate_todo_list(self):
"""
Regenerates the self.todo_list variable
This will strip out the blank nodes of
the main xml nodelist. Must be called
after each addition/deletion of members
"""
self.todo_list = []
for e in self.todo_file.firstChild.childNodes:
if e.nodeType == e.ELEMENT_NODE:
self.todo_list.append(e)
def save(self):
"""
Saves the todolist
Saves the self.todo_file to a file
"""
f = open(self.todo_file_name, "w")
f.write(self.todo_file.toprettyxml().replace("\t", "").replace("\n\n", "").encode("utf-8"))
f.close()
def create_new_file(self, file_name):
"""
Creates a new bare file containing nothing to do
"""
f = open(self.todo_file_name, "w")
f.write("<?xml version=\"0.1\" encoding=\"UTF-8\" ?><todolist></todolist>")
f.close()
def remove_todo_by_id(self, i):
"""
Removes a node in the self.todo_file by its ID
"""
for t in self.todo_list:
if t.nodeType == t.ELEMENT_NODE:
if int(t.attributes["id"].value) == int(i):
self.todo_file.firstChild.removeChild(t)
return
def set_priority(self, i, p=""):
"""
Changes the priority of a todo
NOT IMPLEMENTED YET, the priority still has no effect on
nothing. Will be implemented soon.
"""
for e in self.todo_file.firstChild.childNodes:
if e.nodeType == e.ELEMENT_NODE:
if int(e.attributes["id"].value) == int(i):
e.attributes["priority"] = p
return
def add_todo(self, content):
"""
Creates a new Todo containing the "content" content
"""
i = self.get_max_id()+1
e = self.todo_file.createElement("todo")
e.attributes["id"] = str(i)
e.attributes["task"] = content.decode("utf-8")
e.attributes["priority"] = ""
print "Ajout du todo :"
self.print_todo(e)
self.todo_file.firstChild.appendChild(e)
def print_todo(self, todo):
"""
Prints a todo with pretty colors <3
"""
print colored(" #%2d" % int(todo.attributes["id"].value), "blue"), ":", self.colorize_todo(todo.attributes["task"].value)
def get_todo_by_id(self, i, todo_list=None):
"""
Returns the XML node corresponding to the todo with the id "i"
within the given todo_list. If None, then the one used by
the class shall be used. This is the case most of the time
"""
if todo_list == None:
todo_list = self.todo_list
for e in self.todo_file.firstChild.childNodes:
if e.nodeType == e.ELEMENT_NODE:
if int(e.attributes["id"].value) == int(i):
return e
return None
def get_todos_by_tag(self, tag, marker="#"):
"""
Search all the todos which match the given tag
"""
if tag[0] != marker:
tag = marker+tag
l = []
for e in self.todo_file.firstChild.childNodes:
if e.nodeType == e.ELEMENT_NODE:
text = e.attributes["task"].value.split(" ")
for i in range(0, len(text)):
if text[i][0] == marker:
if text[i].lower() == tag.lower():
l.append(e)
return l
def sort_todos_by_id(self, todo_list=None):
"""
Sorts the todo list by ID
"""
if todo_list == None:
todo_list = self.todo_list
return sorted(todo_list, key=lambda k: k.attributes["id"].value)
def count_todos(self):
"""
Returns the number of todos currently registered
"""
return len(self.todo_list)
def print_all(self):
"""
Print all the todos registered
"""
for todo in self.todo_list:
self.print_todo(todo)
def colorize_todo(self, text):
"""
Colorizes a todo, this means setting a pretty color on hashtags
"""
text = text.split(" ")
for i in range(0, len(text)):
if text[i][0] == '#':
text[i] = colored(text[i], "green")
elif text[i][0] == '+':
text[i] = colored(text[i], "magenta")
elif text[i][0] == '@':
text[i] = colored(text[i], "yellow")
return " ".join(text)
def print_todos(self, todo_list=None):
"""
Prints all the todos within a list
"""
if todo_list == None:
todo_list = self.todo_list
for t in todo_list:
self.print_todo(t)
def get_max_id(self):
"""
Returns the maximum ID within the todo list
"""
m = 0
for e in self.todo_list:
if int(e.attributes["id"].value) > m:
m = int(e.attributes["id"].value)
return m
if __name__ == "__main__":
fname = os.environ["HOME"] + "/.todo.xml"
if len(sys.argv) == 1:
print "Help for", sys.argv[0]
print "\ttodo.py ls -- Display all the todos"
print "\ttodo.py add <texte> -- Add a todo, the text may contain #hastags, +contexts or @peoples"
print "\ttodo.py rm #number -- Removes a todo"
print "\ttodo.py sh hastag -- Prints all the todos with the given #hashtag"
print "\ttodo.py sc context -- Prints all the todos with the given +context"
print "\ttodo.py sp person -- Prints all the todos with the given @person"
elif len(sys.argv) == 2:
if sys.argv[1] == "ls":
t = TodoList(fname)
print colored("> %d todo(s) en mémoire" % t.count_todos(), "white", attrs=["bold"])
t.print_all()
elif len(sys.argv) >= 3:
t = TodoList(fname)
if sys.argv[1] == "add":
t.add_todo(" ".join(sys.argv[2:]))
t.save()
elif sys.argv[1] == "rm":
t.remove_todo_by_id(sys.argv[2])
t.save()
elif sys.argv[1] == "sh":
l = t.get_todos_by_tag(sys.argv[2])
l = t.sort_todos_by_id(l)
t.print_todos(l)
elif sys.argv[1] == "sc":
l = t.get_todos_by_tag(sys.argv[2], "+")
l = t.sort_todos_by_id(l)
t.print_todos(l)
elif sys.argv[1] == "sp":
l = t.get_todos_by_tag(sys.argv[2], "@")
l = t.sort_todos_by_id(l)
t.print_todos(l)
| en | 0.769432 | #!/usr/bin/env python # -*- coding: utf-8 -*- pypodcatcher.py : A simple podcast client commandline software DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE Version 2, December 2004 Copyright (C) 2013 <NAME> <<EMAIL>> Everyone is permitted to copy and distribute verbatim or modified copies of this license document, and changing it is allowed as long as the name is changed. DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. You just DO WHAT THE FUCK YOU WANT TO. TodoList class implementation This class is used to represend a todo list object. It has several components. On the one hand some general info about the todo list (the filename the xml nodelist...) and a more conveinient to use xml nodelist with only the interesting xml nodes. This is to say that you shall never use the *self.todo_file* xml nodelist but always the *self.todo_list* The main important variables here are: - self.todo_file_name: Obviously the xml file - self.todo_file: The xml nodelist directly loaded from the file - self.todo_list: The xml nodelist stripped from the blank one Initializes a TodoList obect from an XML file Regenerates the self.todo_list variable This will strip out the blank nodes of the main xml nodelist. Must be called after each addition/deletion of members Saves the todolist Saves the self.todo_file to a file Creates a new bare file containing nothing to do Removes a node in the self.todo_file by its ID Changes the priority of a todo NOT IMPLEMENTED YET, the priority still has no effect on nothing. Will be implemented soon. Creates a new Todo containing the "content" content Prints a todo with pretty colors <3 #%2d" % int(todo.attributes["id"].value), "blue"), ":", self.colorize_todo(todo.attributes["task"].value) Returns the XML node corresponding to the todo with the id "i" within the given todo_list. If None, then the one used by the class shall be used. This is the case most of the time Search all the todos which match the given tag Sorts the todo list by ID Returns the number of todos currently registered Print all the todos registered Colorizes a todo, this means setting a pretty color on hashtags Prints all the todos within a list Returns the maximum ID within the todo list #hastags, +contexts or @peoples" #number -- Removes a todo" #hashtag" | 3.050085 | 3 |
resample.py | dhuppenkothen/UTools | 1 | 6620791 | import numpy as numpy
def resample(n, step_size):
output = []
for i in numpy.arange(0, len(n), step_size):
total = 0
print "Bin is " + str(i)
prev_frac = int(i+1) - i
prev_bin = int(i)
print "Fractional part of bin %d is %f" %(prev_bin, prev_frac)
total += prev_frac * n[prev_bin]
if i + step_size < len(n):
# Fractional part of next bin:
next_frac = i+step_size - int(i+step_size)
next_bin = int(i+step_size)
print "Fractional part of bin %d is %f" %(next_bin, next_frac)
total += next_frac * n[next_bin]
print "Fully included bins: %d to %d" % (int(i+1), int(i+step_size)-1)
total += sum(n[int(i+1):int(i+step_size)])
output.append(total)
return output
if __name__ == "__main__":
my_array = numpy.array([19,11,13,11,12,10])
print regrid(my_array, 1.2)
| import numpy as numpy
def resample(n, step_size):
output = []
for i in numpy.arange(0, len(n), step_size):
total = 0
print "Bin is " + str(i)
prev_frac = int(i+1) - i
prev_bin = int(i)
print "Fractional part of bin %d is %f" %(prev_bin, prev_frac)
total += prev_frac * n[prev_bin]
if i + step_size < len(n):
# Fractional part of next bin:
next_frac = i+step_size - int(i+step_size)
next_bin = int(i+step_size)
print "Fractional part of bin %d is %f" %(next_bin, next_frac)
total += next_frac * n[next_bin]
print "Fully included bins: %d to %d" % (int(i+1), int(i+step_size)-1)
total += sum(n[int(i+1):int(i+step_size)])
output.append(total)
return output
if __name__ == "__main__":
my_array = numpy.array([19,11,13,11,12,10])
print regrid(my_array, 1.2)
| en | 0.860504 | # Fractional part of next bin: | 3.43643 | 3 |
web2py-appliances-master/UrlShortening/controllers/myservices.py | wantsomechocolate/WantsomeBeanstalk | 0 | 6620792 | <reponame>wantsomechocolate/WantsomeBeanstalk
@service.json
@service.xml
@service.jsonrpc
@service.xmlrpc
@service.amfrpc3('domain')
@service.soap()
def linksby(key=''):
return db(Link.id==Bookmark.link)\
(Bookmark.tags.contains(key)).select(Link.ALL,distinct=True)
@request.restful()
def api():
def POST():
raise HTTP(501)
def GET(key=''):
return dict(result=linksby(key).as_list())
return locals()
def call(): return service()
| @service.json
@service.xml
@service.jsonrpc
@service.xmlrpc
@service.amfrpc3('domain')
@service.soap()
def linksby(key=''):
return db(Link.id==Bookmark.link)\
(Bookmark.tags.contains(key)).select(Link.ALL,distinct=True)
@request.restful()
def api():
def POST():
raise HTTP(501)
def GET(key=''):
return dict(result=linksby(key).as_list())
return locals()
def call(): return service() | none | 1 | 2.10521 | 2 | |
eagerx_pybullet/engine.py | eager-dev/eagerx_pybullet | 1 | 6620793 | # Pybullet imports
from typing import Optional, Dict, List
# ROS IMPORTS
import rospy
from std_msgs.msg import UInt64, Float32
# RX IMPORTS
from eagerx.core.constants import process, ERROR
import eagerx.core.register as register
from eagerx.core.entities import Engine, SpaceConverter
from eagerx.core.specs import EngineSpec
from eagerx_pybullet.world import World
from eagerx_pybullet.robot import URDFBasedRobot
try:
import pybullet
import pybullet_data
from pybullet_utils import bullet_client
except ImportError as e:
from gym import error
raise error.DependencyNotInstalled("{}. (HINT: you need to install PyBullet)".format(e))
class PybulletEngine(Engine):
"""A engine between the pybullet physics server and EAGERx engine nodes."""
@staticmethod
@register.spec("PybulletEngine", Engine)
def spec(
spec: EngineSpec,
rate: float,
process: Optional[int] = process.NEW_PROCESS,
sync: Optional[bool] = True,
real_time_factor: Optional[float] = 0,
simulate_delays: Optional[bool] = True,
log_level: Optional[int] = ERROR,
states: List[str] = None,
world_fn: Optional[str] = None,
gui: bool = True,
egl: bool = True,
gravity: float = -9.81,
physics_engine_params: Optional[Dict] = None,
):
"""A spec to create a PybulletEngine node that interfaces with a pybullet physics server.
:param spec: Holds the desired configuration in a Spec object.
:param rate: Rate of the engine
:param process: {0: NEW_PROCESS, 1: ENVIRONMENT, 2: ENGINE, 3: EXTERNAL}
:param sync: Run reactive or async
:param real_time_factor: Simulation speed. 0 == "as fast as possible".
:param simulate_delays: Boolean flag to simulate delays.
:param log_level: {0: SILENT, 10: DEBUG, 20: INFO, 30: WARN, 40: ERROR, 50: FATAL}
:param states: Physics engine parameters that are to be varied over episodes as a form of domain randomization.
Currently available: `erp`, `contactERP`, `frictionERP`.
:param world_fn: A string with syntax `module/WorldFnName` that received `bullet_client` as an argument. The
function builds-up the (static) world (i.e. loads urdfs into pybullet). See
`eagerx_pybullet.world/empty_world_with_plane` for an example.
:param gui: Create a GUI connection with 3D OpenGL rendering within the same process space as PyBullet.
:param egl: Enable hardware accelerated OpenGL rendering without a X11 context for faster headless rendering.
:param gravity: Sets the gravity constant along the y-axis.
:param physics_engine_params: Parameter keys with their desired value. See the pybullet documentation for more
info on the physics engine parameters:
https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#heading=h.k37c0drzdf21
.. note:: fixedTimeStep cannot be set, as it is determined by the specified rate of
the engine. Per default, numSubSteps is set such that simulation steps are
taken at 240 hz. Tune numSubSteps to trade performance over accuracy.
:return: EngineSpec
"""
# Modify default engine params
spec.config.rate = rate
spec.config.process = process
spec.config.sync = sync
spec.config.real_time_factor = real_time_factor
spec.config.simulate_delays = simulate_delays
spec.config.log_level = log_level
spec.config.color = "magenta"
spec.config.states = states if isinstance(states, list) else []
# Add custom params
spec.config.world_fn = world_fn
spec.config.gui = gui
spec.config.egl = egl
spec.config.gravity = gravity
spec.config.physics_engine_params = physics_engine_params if isinstance(physics_engine_params, dict) else None
# Set space converters for registered physics engine parameters.
# todo: Default values for erp, contactERP, frictionERP? --> getPhysicsEngineParameters() does not include them...
spec.states.erp.space_converter = SpaceConverter.make("Space_Float32", 0.2, 0.2, dtype="float32")
spec.states.contactERP.space_converter = SpaceConverter.make("Space_Float32", 0.2, 0.2, dtype="float32")
spec.states.frictionERP.space_converter = SpaceConverter.make("Space_Float32", 0.2, 0.2, dtype="float32")
def initialize(self, world_fn, gui, egl, gravity, physics_engine_params: Dict = None):
"""
Initializes the engine to pybullet.
:param world_fn: A string with syntax `module/WorldFnName` that received `bullet_client` as an argument. The
function builds-up the (static) world (i.e. loads urdfs into pybullet). See
`eagerx_pybullet.world/empty_world_with_plane` for an example.
:param gui: Create a GUI connection with 3D OpenGL rendering within the same process space as PyBullet.
:param egl: Enable hardware accelerated OpenGL rendering without a X11 context for faster headless rendering.
:param gravity: Sets the gravity constant along the y-axis.
:param physics_engine_params: Parameter keys with their desired value. See the pybullet documentation for more
info on the physics engine parameters:
"""
# Connect to pybullet
self._p, self.physics_client_id = self._start_simulator(gui, egl)
# Initialzize
world = World(
self._p,
gravity=gravity,
world_fn=world_fn,
timestep=1 / self.rate,
)
# Set physics parameters
if physics_engine_params:
assert "fixedTimeStep" not in physics_engine_params, (
"Cannot set the fixedTimeStep via the physics_engine_params. "
f"This is determined by the engine's rate: dt = 1/{self.rate} s."
)
self._p.setPhysicsEngineParameter(**physics_engine_params)
# Create pybullet simulator that will be shared with all EngineStates & EngineNodes (if launched in same process).
self.simulator = dict(client=self._p, world=world, robots={})
def _start_simulator(self, gui, egl):
if gui:
p = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
# p = bullet_client.BulletClient(pybullet.SHARED_MEMORY, options="-shared_memory_key 1234")
p = bullet_client.BulletClient()
physics_client_id = p._client
p.resetSimulation()
p.setPhysicsEngineParameter(deterministicOverlappingPairs=1)
# optionally enable EGL for faster headless rendering
if egl:
con_mode = p.getConnectionInfo()["connectionMethod"]
if con_mode == p.DIRECT:
import pkgutil
egl = pkgutil.get_loader("eglRenderer")
if egl:
p.loadPlugin(egl.get_filename(), "_eglRendererPlugin")
else:
p.loadPlugin("eglRendererPlugin")
# Add search path for urdfs
pybullet.setAdditionalSearchPath(pybullet_data.getDataPath(), physicsClientId=p._client)
return p, physics_client_id
def pre_reset(self, *args, **kwargs):
pass
@register.engine_config(
urdf=None,
basePosition=[0, 0, 0],
baseOrientation=[0, 0, 0, 0],
fixed_base=True,
self_collision=False,
globalScaling=1.0,
flags=0,
)
def add_object(self, config, engine_config, node_params, state_params):
"""
Adds an object to the connected Pybullet physics server.
:param config: The (agnostic) config of the :class:`~eagerx.core.entities.Object` that is to be added.
:param engine_config: The engine-specific config of the :class:`~eagerx.core.entities.Object` that is to be added.
This dict contains the registered parameters:
See https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#
for all available flags.
- **urdf**: A fullpath (ending with .urdf), a key that points to the urdf (xml)string on the
rosparam server, or a urdf within pybullet's search path. The `pybullet_data` package is
included in the search path.
- **basePosition**: Base position of the object [x, y, z].
- **baseOrientation**: Base orientation of the object in quaternion [x, y, z, w].
- **fixed_base**: Force the base of the loaded object to be static.
- **self_collision**: Sets the `URDF_USE_SELF_COLLISION` flag to allow self collisions.
- **globalScaling**: globalScaling will apply a scale factor to the URDF model.
- **flags**: Flags (see link below) that can be combined using a bitwise OR, |.
:param node_params: A list containing the config of every :class:`~eagerx.core.entities.EngineNode` that represents
an :class:`~eagerx.core.entities.Object`'s sensor or actuator that is to be added.
:param state_params: A list containing the parameters of every the :class:`~eagerx.core.entities.Object`'s
:class:`~eagerx.core.entities.EngineState` that is to be added.
"""
obj_name = config["name"]
entity_id = config["entity_id"]
# add objects to simulator (we have a ref to the simulator with self.simulator)
rospy.loginfo(f'Adding object "{obj_name}" of type "{entity_id}" to the simulator.')
# Add self collision to flag
if engine_config["self_collision"]:
flags = engine_config["flags"] | pybullet.URDF_USE_SELF_COLLISION
else:
flags = engine_config["flags"]
# Add object
if engine_config["urdf"]:
self.simulator["robots"][obj_name] = URDFBasedRobot(
self._p,
model_urdf=engine_config["urdf"], # Can be path (ending with .urdf), or ros param key to urdf (xml)string.
robot_name=obj_name,
basePosition=engine_config["basePosition"],
baseOrientation=engine_config["baseOrientation"],
fixed_base=engine_config["fixed_base"],
flags=flags,
)
else: # if no urdf is provided, create dummy robot.
self.simulator["robots"][obj_name] = None
@register.states(erp=Float32, contactERP=Float32, frictionERP=Float32)
def reset(self, erp: Float32 = None, contactERP: Float32 = None, frictionERP: Float32 = None):
"""Set any of the physics engine parameters (registered as states) if they were selected."""
physics_engine_params = {}
if erp:
physics_engine_params["erp"] = erp.data
if contactERP:
physics_engine_params["contactERP"] = contactERP.data
if frictionERP:
physics_engine_params["frictionERP"] = frictionERP.data
if len(physics_engine_params) > 0:
self._p.setPhysicsEngineParameter(**physics_engine_params)
@register.outputs(tick=UInt64)
def callback(self, t_n: float):
"""Here, we step the world by 1/rate seconds."""
self.simulator["world"].step()
def shutdown(self) -> None:
"""Disconnects the engine from the pybullet physics server"""
self._p.disconnect()
| # Pybullet imports
from typing import Optional, Dict, List
# ROS IMPORTS
import rospy
from std_msgs.msg import UInt64, Float32
# RX IMPORTS
from eagerx.core.constants import process, ERROR
import eagerx.core.register as register
from eagerx.core.entities import Engine, SpaceConverter
from eagerx.core.specs import EngineSpec
from eagerx_pybullet.world import World
from eagerx_pybullet.robot import URDFBasedRobot
try:
import pybullet
import pybullet_data
from pybullet_utils import bullet_client
except ImportError as e:
from gym import error
raise error.DependencyNotInstalled("{}. (HINT: you need to install PyBullet)".format(e))
class PybulletEngine(Engine):
"""A engine between the pybullet physics server and EAGERx engine nodes."""
@staticmethod
@register.spec("PybulletEngine", Engine)
def spec(
spec: EngineSpec,
rate: float,
process: Optional[int] = process.NEW_PROCESS,
sync: Optional[bool] = True,
real_time_factor: Optional[float] = 0,
simulate_delays: Optional[bool] = True,
log_level: Optional[int] = ERROR,
states: List[str] = None,
world_fn: Optional[str] = None,
gui: bool = True,
egl: bool = True,
gravity: float = -9.81,
physics_engine_params: Optional[Dict] = None,
):
"""A spec to create a PybulletEngine node that interfaces with a pybullet physics server.
:param spec: Holds the desired configuration in a Spec object.
:param rate: Rate of the engine
:param process: {0: NEW_PROCESS, 1: ENVIRONMENT, 2: ENGINE, 3: EXTERNAL}
:param sync: Run reactive or async
:param real_time_factor: Simulation speed. 0 == "as fast as possible".
:param simulate_delays: Boolean flag to simulate delays.
:param log_level: {0: SILENT, 10: DEBUG, 20: INFO, 30: WARN, 40: ERROR, 50: FATAL}
:param states: Physics engine parameters that are to be varied over episodes as a form of domain randomization.
Currently available: `erp`, `contactERP`, `frictionERP`.
:param world_fn: A string with syntax `module/WorldFnName` that received `bullet_client` as an argument. The
function builds-up the (static) world (i.e. loads urdfs into pybullet). See
`eagerx_pybullet.world/empty_world_with_plane` for an example.
:param gui: Create a GUI connection with 3D OpenGL rendering within the same process space as PyBullet.
:param egl: Enable hardware accelerated OpenGL rendering without a X11 context for faster headless rendering.
:param gravity: Sets the gravity constant along the y-axis.
:param physics_engine_params: Parameter keys with their desired value. See the pybullet documentation for more
info on the physics engine parameters:
https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#heading=h.k37c0drzdf21
.. note:: fixedTimeStep cannot be set, as it is determined by the specified rate of
the engine. Per default, numSubSteps is set such that simulation steps are
taken at 240 hz. Tune numSubSteps to trade performance over accuracy.
:return: EngineSpec
"""
# Modify default engine params
spec.config.rate = rate
spec.config.process = process
spec.config.sync = sync
spec.config.real_time_factor = real_time_factor
spec.config.simulate_delays = simulate_delays
spec.config.log_level = log_level
spec.config.color = "magenta"
spec.config.states = states if isinstance(states, list) else []
# Add custom params
spec.config.world_fn = world_fn
spec.config.gui = gui
spec.config.egl = egl
spec.config.gravity = gravity
spec.config.physics_engine_params = physics_engine_params if isinstance(physics_engine_params, dict) else None
# Set space converters for registered physics engine parameters.
# todo: Default values for erp, contactERP, frictionERP? --> getPhysicsEngineParameters() does not include them...
spec.states.erp.space_converter = SpaceConverter.make("Space_Float32", 0.2, 0.2, dtype="float32")
spec.states.contactERP.space_converter = SpaceConverter.make("Space_Float32", 0.2, 0.2, dtype="float32")
spec.states.frictionERP.space_converter = SpaceConverter.make("Space_Float32", 0.2, 0.2, dtype="float32")
def initialize(self, world_fn, gui, egl, gravity, physics_engine_params: Dict = None):
"""
Initializes the engine to pybullet.
:param world_fn: A string with syntax `module/WorldFnName` that received `bullet_client` as an argument. The
function builds-up the (static) world (i.e. loads urdfs into pybullet). See
`eagerx_pybullet.world/empty_world_with_plane` for an example.
:param gui: Create a GUI connection with 3D OpenGL rendering within the same process space as PyBullet.
:param egl: Enable hardware accelerated OpenGL rendering without a X11 context for faster headless rendering.
:param gravity: Sets the gravity constant along the y-axis.
:param physics_engine_params: Parameter keys with their desired value. See the pybullet documentation for more
info on the physics engine parameters:
"""
# Connect to pybullet
self._p, self.physics_client_id = self._start_simulator(gui, egl)
# Initialzize
world = World(
self._p,
gravity=gravity,
world_fn=world_fn,
timestep=1 / self.rate,
)
# Set physics parameters
if physics_engine_params:
assert "fixedTimeStep" not in physics_engine_params, (
"Cannot set the fixedTimeStep via the physics_engine_params. "
f"This is determined by the engine's rate: dt = 1/{self.rate} s."
)
self._p.setPhysicsEngineParameter(**physics_engine_params)
# Create pybullet simulator that will be shared with all EngineStates & EngineNodes (if launched in same process).
self.simulator = dict(client=self._p, world=world, robots={})
def _start_simulator(self, gui, egl):
if gui:
p = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
# p = bullet_client.BulletClient(pybullet.SHARED_MEMORY, options="-shared_memory_key 1234")
p = bullet_client.BulletClient()
physics_client_id = p._client
p.resetSimulation()
p.setPhysicsEngineParameter(deterministicOverlappingPairs=1)
# optionally enable EGL for faster headless rendering
if egl:
con_mode = p.getConnectionInfo()["connectionMethod"]
if con_mode == p.DIRECT:
import pkgutil
egl = pkgutil.get_loader("eglRenderer")
if egl:
p.loadPlugin(egl.get_filename(), "_eglRendererPlugin")
else:
p.loadPlugin("eglRendererPlugin")
# Add search path for urdfs
pybullet.setAdditionalSearchPath(pybullet_data.getDataPath(), physicsClientId=p._client)
return p, physics_client_id
def pre_reset(self, *args, **kwargs):
pass
@register.engine_config(
urdf=None,
basePosition=[0, 0, 0],
baseOrientation=[0, 0, 0, 0],
fixed_base=True,
self_collision=False,
globalScaling=1.0,
flags=0,
)
def add_object(self, config, engine_config, node_params, state_params):
"""
Adds an object to the connected Pybullet physics server.
:param config: The (agnostic) config of the :class:`~eagerx.core.entities.Object` that is to be added.
:param engine_config: The engine-specific config of the :class:`~eagerx.core.entities.Object` that is to be added.
This dict contains the registered parameters:
See https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#
for all available flags.
- **urdf**: A fullpath (ending with .urdf), a key that points to the urdf (xml)string on the
rosparam server, or a urdf within pybullet's search path. The `pybullet_data` package is
included in the search path.
- **basePosition**: Base position of the object [x, y, z].
- **baseOrientation**: Base orientation of the object in quaternion [x, y, z, w].
- **fixed_base**: Force the base of the loaded object to be static.
- **self_collision**: Sets the `URDF_USE_SELF_COLLISION` flag to allow self collisions.
- **globalScaling**: globalScaling will apply a scale factor to the URDF model.
- **flags**: Flags (see link below) that can be combined using a bitwise OR, |.
:param node_params: A list containing the config of every :class:`~eagerx.core.entities.EngineNode` that represents
an :class:`~eagerx.core.entities.Object`'s sensor or actuator that is to be added.
:param state_params: A list containing the parameters of every the :class:`~eagerx.core.entities.Object`'s
:class:`~eagerx.core.entities.EngineState` that is to be added.
"""
obj_name = config["name"]
entity_id = config["entity_id"]
# add objects to simulator (we have a ref to the simulator with self.simulator)
rospy.loginfo(f'Adding object "{obj_name}" of type "{entity_id}" to the simulator.')
# Add self collision to flag
if engine_config["self_collision"]:
flags = engine_config["flags"] | pybullet.URDF_USE_SELF_COLLISION
else:
flags = engine_config["flags"]
# Add object
if engine_config["urdf"]:
self.simulator["robots"][obj_name] = URDFBasedRobot(
self._p,
model_urdf=engine_config["urdf"], # Can be path (ending with .urdf), or ros param key to urdf (xml)string.
robot_name=obj_name,
basePosition=engine_config["basePosition"],
baseOrientation=engine_config["baseOrientation"],
fixed_base=engine_config["fixed_base"],
flags=flags,
)
else: # if no urdf is provided, create dummy robot.
self.simulator["robots"][obj_name] = None
@register.states(erp=Float32, contactERP=Float32, frictionERP=Float32)
def reset(self, erp: Float32 = None, contactERP: Float32 = None, frictionERP: Float32 = None):
"""Set any of the physics engine parameters (registered as states) if they were selected."""
physics_engine_params = {}
if erp:
physics_engine_params["erp"] = erp.data
if contactERP:
physics_engine_params["contactERP"] = contactERP.data
if frictionERP:
physics_engine_params["frictionERP"] = frictionERP.data
if len(physics_engine_params) > 0:
self._p.setPhysicsEngineParameter(**physics_engine_params)
@register.outputs(tick=UInt64)
def callback(self, t_n: float):
"""Here, we step the world by 1/rate seconds."""
self.simulator["world"].step()
def shutdown(self) -> None:
"""Disconnects the engine from the pybullet physics server"""
self._p.disconnect()
| en | 0.717114 | # Pybullet imports # ROS IMPORTS # RX IMPORTS A engine between the pybullet physics server and EAGERx engine nodes. A spec to create a PybulletEngine node that interfaces with a pybullet physics server. :param spec: Holds the desired configuration in a Spec object. :param rate: Rate of the engine :param process: {0: NEW_PROCESS, 1: ENVIRONMENT, 2: ENGINE, 3: EXTERNAL} :param sync: Run reactive or async :param real_time_factor: Simulation speed. 0 == "as fast as possible". :param simulate_delays: Boolean flag to simulate delays. :param log_level: {0: SILENT, 10: DEBUG, 20: INFO, 30: WARN, 40: ERROR, 50: FATAL} :param states: Physics engine parameters that are to be varied over episodes as a form of domain randomization. Currently available: `erp`, `contactERP`, `frictionERP`. :param world_fn: A string with syntax `module/WorldFnName` that received `bullet_client` as an argument. The function builds-up the (static) world (i.e. loads urdfs into pybullet). See `eagerx_pybullet.world/empty_world_with_plane` for an example. :param gui: Create a GUI connection with 3D OpenGL rendering within the same process space as PyBullet. :param egl: Enable hardware accelerated OpenGL rendering without a X11 context for faster headless rendering. :param gravity: Sets the gravity constant along the y-axis. :param physics_engine_params: Parameter keys with their desired value. See the pybullet documentation for more info on the physics engine parameters: https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#heading=h.k37c0drzdf21 .. note:: fixedTimeStep cannot be set, as it is determined by the specified rate of the engine. Per default, numSubSteps is set such that simulation steps are taken at 240 hz. Tune numSubSteps to trade performance over accuracy. :return: EngineSpec # Modify default engine params # Add custom params # Set space converters for registered physics engine parameters. # todo: Default values for erp, contactERP, frictionERP? --> getPhysicsEngineParameters() does not include them... Initializes the engine to pybullet. :param world_fn: A string with syntax `module/WorldFnName` that received `bullet_client` as an argument. The function builds-up the (static) world (i.e. loads urdfs into pybullet). See `eagerx_pybullet.world/empty_world_with_plane` for an example. :param gui: Create a GUI connection with 3D OpenGL rendering within the same process space as PyBullet. :param egl: Enable hardware accelerated OpenGL rendering without a X11 context for faster headless rendering. :param gravity: Sets the gravity constant along the y-axis. :param physics_engine_params: Parameter keys with their desired value. See the pybullet documentation for more info on the physics engine parameters: # Connect to pybullet # Initialzize # Set physics parameters # Create pybullet simulator that will be shared with all EngineStates & EngineNodes (if launched in same process). # p = bullet_client.BulletClient(pybullet.SHARED_MEMORY, options="-shared_memory_key 1234") # optionally enable EGL for faster headless rendering # Add search path for urdfs Adds an object to the connected Pybullet physics server. :param config: The (agnostic) config of the :class:`~eagerx.core.entities.Object` that is to be added. :param engine_config: The engine-specific config of the :class:`~eagerx.core.entities.Object` that is to be added. This dict contains the registered parameters: See https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit# for all available flags. - **urdf**: A fullpath (ending with .urdf), a key that points to the urdf (xml)string on the rosparam server, or a urdf within pybullet's search path. The `pybullet_data` package is included in the search path. - **basePosition**: Base position of the object [x, y, z]. - **baseOrientation**: Base orientation of the object in quaternion [x, y, z, w]. - **fixed_base**: Force the base of the loaded object to be static. - **self_collision**: Sets the `URDF_USE_SELF_COLLISION` flag to allow self collisions. - **globalScaling**: globalScaling will apply a scale factor to the URDF model. - **flags**: Flags (see link below) that can be combined using a bitwise OR, |. :param node_params: A list containing the config of every :class:`~eagerx.core.entities.EngineNode` that represents an :class:`~eagerx.core.entities.Object`'s sensor or actuator that is to be added. :param state_params: A list containing the parameters of every the :class:`~eagerx.core.entities.Object`'s :class:`~eagerx.core.entities.EngineState` that is to be added. # add objects to simulator (we have a ref to the simulator with self.simulator) # Add self collision to flag # Add object # Can be path (ending with .urdf), or ros param key to urdf (xml)string. # if no urdf is provided, create dummy robot. Set any of the physics engine parameters (registered as states) if they were selected. Here, we step the world by 1/rate seconds. Disconnects the engine from the pybullet physics server | 2.223622 | 2 |
huey_legacy/importer.py | yipstar/surf_python | 0 | 6620794 | import numpy as np
import pandas as pd
import os
import datetime
import requests
from huey.models import Buoy, BuoyRealtimeWaveDetail, BuoyRawSpectralWaveData
def import_buoy_realtime_wave_detail(db_session):
station_id = "46025"
buoy = db_session.query(Buoy).filter(Buoy.station_id == station_id).first()
latest_ob = db_session.query(BuoyRealtimeWaveDetail).filter(BuoyRealtimeWaveDetail.buoy_id == buoy.
id ).order_by(BuoyRealtimeWaveDetail.ts.desc()).first()
realtime_url = f"https://www.ndbc.noaa.gov/data/realtime2/{station_id}.spec"
df = pd.read_csv(realtime_url, delim_whitespace=True)
df = df.replace('MM', np.NaN)
# skip first row which is header
for (index, row) in df[1:].iterrows():
ob = BuoyRealtimeWaveDetail.from_pd_row(row)
ob.buoy = buoy
if (latest_ob is None or ob.ts > latest_ob.ts):
print(f"inserting observation for date: {ob.ts}")
db_session.add(ob)
else:
print(f"observation for date: {ob.ts} already present, skipping.")
break
db_session.commit()
print("import complete")
def import_buoy_raw_spectral_wave_data(db_session):
station_id = "46025"
buoy = db_session.query(Buoy).filter(Buoy.station_id == station_id).first()
latest_ob = db_session.query(BuoyRawSpectralWaveData).filter(BuoyRawSpectralWaveData.buoy_id == buoy.id ).order_by(BuoyRawSpectralWaveData.ts.desc()).first()
raw_spec_url = f"https://www.ndbc.noaa.gov/data/realtime2/{station_id}.data_spec"
response = requests.get(raw_spec_url)
data = response.text
# skip first row which is header
for line in data.splitlines()[1:]:
ob = BuoyRawSpectralWaveData.from_data_line(line)
ob.buoy = buoy
if (latest_ob is None or ob.ts > latest_ob.ts):
print(f"inserting observation for date: {ob.ts}")
db_session.add(ob)
else:
print(f"observation for date: {ob.ts} already present, skipping.")
break
db_session.commit()
print("import complete")
| import numpy as np
import pandas as pd
import os
import datetime
import requests
from huey.models import Buoy, BuoyRealtimeWaveDetail, BuoyRawSpectralWaveData
def import_buoy_realtime_wave_detail(db_session):
station_id = "46025"
buoy = db_session.query(Buoy).filter(Buoy.station_id == station_id).first()
latest_ob = db_session.query(BuoyRealtimeWaveDetail).filter(BuoyRealtimeWaveDetail.buoy_id == buoy.
id ).order_by(BuoyRealtimeWaveDetail.ts.desc()).first()
realtime_url = f"https://www.ndbc.noaa.gov/data/realtime2/{station_id}.spec"
df = pd.read_csv(realtime_url, delim_whitespace=True)
df = df.replace('MM', np.NaN)
# skip first row which is header
for (index, row) in df[1:].iterrows():
ob = BuoyRealtimeWaveDetail.from_pd_row(row)
ob.buoy = buoy
if (latest_ob is None or ob.ts > latest_ob.ts):
print(f"inserting observation for date: {ob.ts}")
db_session.add(ob)
else:
print(f"observation for date: {ob.ts} already present, skipping.")
break
db_session.commit()
print("import complete")
def import_buoy_raw_spectral_wave_data(db_session):
station_id = "46025"
buoy = db_session.query(Buoy).filter(Buoy.station_id == station_id).first()
latest_ob = db_session.query(BuoyRawSpectralWaveData).filter(BuoyRawSpectralWaveData.buoy_id == buoy.id ).order_by(BuoyRawSpectralWaveData.ts.desc()).first()
raw_spec_url = f"https://www.ndbc.noaa.gov/data/realtime2/{station_id}.data_spec"
response = requests.get(raw_spec_url)
data = response.text
# skip first row which is header
for line in data.splitlines()[1:]:
ob = BuoyRawSpectralWaveData.from_data_line(line)
ob.buoy = buoy
if (latest_ob is None or ob.ts > latest_ob.ts):
print(f"inserting observation for date: {ob.ts}")
db_session.add(ob)
else:
print(f"observation for date: {ob.ts} already present, skipping.")
break
db_session.commit()
print("import complete")
| en | 0.966524 | # skip first row which is header # skip first row which is header | 2.950546 | 3 |
src/emr/scripts/jdbc_load.py | anorth848/aws-data-analytics | 1 | 6620795 | <reponame>anorth848/aws-data-analytics<gh_stars>1-10
import argparse
import json
import logging
import os
from datetime import datetime
from jdbc import get_spark
lake_location_uri = os.path.join(os.environ['BRONZE_LAKE_S3URI'], '')
log_level = os.environ.get('LOG_LEVEL', 'INFO')
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=log_level)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--table_name', default='public.customer')
args = parser.parse_args()
return args
def main():
args = get_args()
table_name = args.table_name
f = open('/mnt/var/lib/instance-controller/public/runtime_configs/configs.json')
config_dict = json.load(f)
logging.debug(json.dumps(config_dict, indent=4))
database_config = config_dict['DatabaseConfig']
table_config = config_dict['StepConfigs'][table_name]
secret_id = database_config['secret']
spark_jdbc_config = table_config['spark_jdbc_config'] if 'spark_jdbc_config' in table_config else None
spark, spark_jdbc, source_db = get_spark(secret_id, table_name, spark_jdbc_config)
spark.sparkContext.setLogLevel(log_level)
table_prefix = f"{source_db}/{table_name.replace('.','/')}"
precombine_field = table_config['hudi_config']['watermark']
if precombine_field == 'trx_seq':
# Downstream we will merge CDC using AR_H_CHANGE_SEQ as the key if trx_seq is the precombine field
# https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.SelectionTransformation.Expressions.html#CHAP_Tasks.CustomizingTasks.TableMapping.SelectionTransformation.Expressions-Headers
# Generate this field for the full load
trx_seq = datetime.now().strftime('%Y%m%d%H%M%S000000000000000000000')
else:
raise RuntimeError('Only trx_seq for precombine is currently supported')
spark_jdbc.load().createOrReplaceTempView('temp_view')
# Hudi requires columns to be in the same order, data type, and null constraints
df = spark.sql(f"""
SELECT CASE WHEN 1=0 THEN NULL ELSE 'I' END AS Op,
t.*,
CASE WHEN 1=0 THEN NULL ELSE '{trx_seq}' END AS trx_seq,
CASE WHEN 1=0 THEN NULL ELSE FALSE END AS _hoodie_is_deleted
FROM temp_view t
""")
df.write \
.format('parquet') \
.mode('overwrite') \
.save(os.path.join(lake_location_uri, 'full', table_prefix))
if __name__ == "__main__":
main()
| import argparse
import json
import logging
import os
from datetime import datetime
from jdbc import get_spark
lake_location_uri = os.path.join(os.environ['BRONZE_LAKE_S3URI'], '')
log_level = os.environ.get('LOG_LEVEL', 'INFO')
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=log_level)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--table_name', default='public.customer')
args = parser.parse_args()
return args
def main():
args = get_args()
table_name = args.table_name
f = open('/mnt/var/lib/instance-controller/public/runtime_configs/configs.json')
config_dict = json.load(f)
logging.debug(json.dumps(config_dict, indent=4))
database_config = config_dict['DatabaseConfig']
table_config = config_dict['StepConfigs'][table_name]
secret_id = database_config['secret']
spark_jdbc_config = table_config['spark_jdbc_config'] if 'spark_jdbc_config' in table_config else None
spark, spark_jdbc, source_db = get_spark(secret_id, table_name, spark_jdbc_config)
spark.sparkContext.setLogLevel(log_level)
table_prefix = f"{source_db}/{table_name.replace('.','/')}"
precombine_field = table_config['hudi_config']['watermark']
if precombine_field == 'trx_seq':
# Downstream we will merge CDC using AR_H_CHANGE_SEQ as the key if trx_seq is the precombine field
# https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.SelectionTransformation.Expressions.html#CHAP_Tasks.CustomizingTasks.TableMapping.SelectionTransformation.Expressions-Headers
# Generate this field for the full load
trx_seq = datetime.now().strftime('%Y%m%d%H%M%S000000000000000000000')
else:
raise RuntimeError('Only trx_seq for precombine is currently supported')
spark_jdbc.load().createOrReplaceTempView('temp_view')
# Hudi requires columns to be in the same order, data type, and null constraints
df = spark.sql(f"""
SELECT CASE WHEN 1=0 THEN NULL ELSE 'I' END AS Op,
t.*,
CASE WHEN 1=0 THEN NULL ELSE '{trx_seq}' END AS trx_seq,
CASE WHEN 1=0 THEN NULL ELSE FALSE END AS _hoodie_is_deleted
FROM temp_view t
""")
df.write \
.format('parquet') \
.mode('overwrite') \
.save(os.path.join(lake_location_uri, 'full', table_prefix))
if __name__ == "__main__":
main() | en | 0.629871 | # Downstream we will merge CDC using AR_H_CHANGE_SEQ as the key if trx_seq is the precombine field # https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.SelectionTransformation.Expressions.html#CHAP_Tasks.CustomizingTasks.TableMapping.SelectionTransformation.Expressions-Headers # Generate this field for the full load # Hudi requires columns to be in the same order, data type, and null constraints SELECT CASE WHEN 1=0 THEN NULL ELSE 'I' END AS Op, t.*, CASE WHEN 1=0 THEN NULL ELSE '{trx_seq}' END AS trx_seq, CASE WHEN 1=0 THEN NULL ELSE FALSE END AS _hoodie_is_deleted FROM temp_view t | 2.193132 | 2 |
testcases/basic_func_tests/tc_006_ssh_test_ext_ntp.py | akraino-edge-stack/ta-cloudtaf | 0 | 6620796 | import sys
import os
from decorators_for_robot_functionalities import *
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from test_constants import *
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
import common_utils # noqa
ex = BuiltIn().get_library_instance('execute_command')
stack_infos = BuiltIn().get_library_instance('stack_infos')
def tc_006_ssh_test_ext_ntp():
steps = ['step1_check_ntpd_service_and_ext_ntp_ip_on_crf_nodes']
common_utils.keyword_runner(steps)
def step1_check_ntpd_service_and_ext_ntp_ip_on_crf_nodes():
crf_nodes = stack_infos.get_crf_nodes()
check_ntpd_status(crf_nodes)
check_if_nokia_ntp_server_address_set_on_crf_node(crf_nodes)
@robot_log
def check_ntpd_status(nodes):
if not nodes:
logger.info("Nodes dictionary is empty, nothing to check.")
return
command = 'systemctl status ntpd.service | grep --color=no "Active"'
for node in nodes:
logger.console("\nCheck ntpd status " + node + " " + nodes[node])
stdout = ex.execute_unix_command_on_remote_as_user(command, nodes[node])
if "running" not in stdout:
raise Exception("ntpd.service is not running!")
@robot_log
def get_ext_ntp_ips_from_node():
return stack_infos.get_inventory()["all"]["vars"]["time"]["ntp_servers"]
@robot_log
def filter_valid_ntp_servers(ntp_servers):
valid_servers = []
for server in ntp_servers:
stdout = ex.execute_unix_command("ntpdate -q {}".format(server), fail_on_non_zero_rc=False)
if "no server suitable for synchronization found" not in stdout:
valid_servers.append(server)
return valid_servers
@robot_log
def is_ntp_server_set_on_node(server_ip, node):
command = 'ntpq -pn | grep -w --color=no ' + server_ip
stdout = ex.execute_unix_command_on_remote_as_user(command, node, {}, fail_on_non_zero_rc=False)
return server_ip in str(stdout)
@robot_log
def check_if_nokia_ntp_server_address_set_on_crf_node(nodes):
ext_ntp_server_ips = get_ext_ntp_ips_from_node()
valid_servers = filter_valid_ntp_servers(ext_ntp_server_ips)
logger.info("The following ntp_servers will be tested:")
logger.info(valid_servers)
is_ip_set = True
for node in nodes:
for ntp_serv_ip in valid_servers:
if not is_ntp_server_set_on_node(ntp_serv_ip, node):
is_ip_set = False
if not is_ip_set:
raise Exception("Wrong or no NTP server address set!")
| import sys
import os
from decorators_for_robot_functionalities import *
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from test_constants import *
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
import common_utils # noqa
ex = BuiltIn().get_library_instance('execute_command')
stack_infos = BuiltIn().get_library_instance('stack_infos')
def tc_006_ssh_test_ext_ntp():
steps = ['step1_check_ntpd_service_and_ext_ntp_ip_on_crf_nodes']
common_utils.keyword_runner(steps)
def step1_check_ntpd_service_and_ext_ntp_ip_on_crf_nodes():
crf_nodes = stack_infos.get_crf_nodes()
check_ntpd_status(crf_nodes)
check_if_nokia_ntp_server_address_set_on_crf_node(crf_nodes)
@robot_log
def check_ntpd_status(nodes):
if not nodes:
logger.info("Nodes dictionary is empty, nothing to check.")
return
command = 'systemctl status ntpd.service | grep --color=no "Active"'
for node in nodes:
logger.console("\nCheck ntpd status " + node + " " + nodes[node])
stdout = ex.execute_unix_command_on_remote_as_user(command, nodes[node])
if "running" not in stdout:
raise Exception("ntpd.service is not running!")
@robot_log
def get_ext_ntp_ips_from_node():
return stack_infos.get_inventory()["all"]["vars"]["time"]["ntp_servers"]
@robot_log
def filter_valid_ntp_servers(ntp_servers):
valid_servers = []
for server in ntp_servers:
stdout = ex.execute_unix_command("ntpdate -q {}".format(server), fail_on_non_zero_rc=False)
if "no server suitable for synchronization found" not in stdout:
valid_servers.append(server)
return valid_servers
@robot_log
def is_ntp_server_set_on_node(server_ip, node):
command = 'ntpq -pn | grep -w --color=no ' + server_ip
stdout = ex.execute_unix_command_on_remote_as_user(command, node, {}, fail_on_non_zero_rc=False)
return server_ip in str(stdout)
@robot_log
def check_if_nokia_ntp_server_address_set_on_crf_node(nodes):
ext_ntp_server_ips = get_ext_ntp_ips_from_node()
valid_servers = filter_valid_ntp_servers(ext_ntp_server_ips)
logger.info("The following ntp_servers will be tested:")
logger.info(valid_servers)
is_ip_set = True
for node in nodes:
for ntp_serv_ip in valid_servers:
if not is_ntp_server_set_on_node(ntp_serv_ip, node):
is_ip_set = False
if not is_ip_set:
raise Exception("Wrong or no NTP server address set!")
| none | 1 | 2.063728 | 2 | |
examples/docs_snippets_crag/docs_snippets_crag_tests/conftest.py | dbatten5/dagster | 4,606 | 6620797 | <reponame>dbatten5/dagster
import pytest
from dagster import file_relative_path
@pytest.fixture
def docs_snippets_crag_folder():
return file_relative_path(__file__, "../docs_snippets_crag/")
| import pytest
from dagster import file_relative_path
@pytest.fixture
def docs_snippets_crag_folder():
return file_relative_path(__file__, "../docs_snippets_crag/") | none | 1 | 1.635606 | 2 | |
src/models/base_model.py | mana-ysh/knowledge-graph-embeddings | 248 | 6620798 | <gh_stars>100-1000
import dill
class BaseModel(object):
def __init__(self, **kwargs):
raise NotImplementedError
def cal_rank(self, **kwargs):
raise NotImplementedError
# For max-margin loss
def _pairwisegrads(self, **kwargs):
raise NotImplementedError
# For log-likelihood
def _singlegrads(self, **kwargs):
raise NotImplementedError
def _composite(self, **kwargs):
raise NotImplementedError
def _cal_similarity(self, **kwargs):
raise NotImplementedError
def pick_ent(self, **kwargs):
raise NotImplementedError
def pick_rel(self, **kwargs):
raise NotImplementedError
def cal_scores(self, **kwargs):
raise NotImplementedError
def cal_scores_inv(self, **kwargs):
raise NotImplementedError
def cal_triplet_scores(self, **kwargs):
raise NotImplementedError
def zerograds(self):
for param in self.params.values():
param.clear()
def prepare(self):
self.zerograds()
def save_model(self, model_path):
with open(model_path, 'wb') as fw:
dill.dump(self, fw)
@classmethod
def load_model(cls, model_path):
with open(model_path, 'rb') as f:
model = dill.load(f)
return model
| import dill
class BaseModel(object):
def __init__(self, **kwargs):
raise NotImplementedError
def cal_rank(self, **kwargs):
raise NotImplementedError
# For max-margin loss
def _pairwisegrads(self, **kwargs):
raise NotImplementedError
# For log-likelihood
def _singlegrads(self, **kwargs):
raise NotImplementedError
def _composite(self, **kwargs):
raise NotImplementedError
def _cal_similarity(self, **kwargs):
raise NotImplementedError
def pick_ent(self, **kwargs):
raise NotImplementedError
def pick_rel(self, **kwargs):
raise NotImplementedError
def cal_scores(self, **kwargs):
raise NotImplementedError
def cal_scores_inv(self, **kwargs):
raise NotImplementedError
def cal_triplet_scores(self, **kwargs):
raise NotImplementedError
def zerograds(self):
for param in self.params.values():
param.clear()
def prepare(self):
self.zerograds()
def save_model(self, model_path):
with open(model_path, 'wb') as fw:
dill.dump(self, fw)
@classmethod
def load_model(cls, model_path):
with open(model_path, 'rb') as f:
model = dill.load(f)
return model | en | 0.57936 | # For max-margin loss # For log-likelihood | 2.156693 | 2 |
events/migrations/0004_auto_20180529_0854.py | RyanRMurray/warwick_gg | 5 | 6620799 | # Generated by Django 2.0.2 on 2018-05-29 07:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0003_auto_20180529_0847'),
]
operations = [
migrations.AlterField(
model_name='event',
name='slug',
field=models.SlugField(max_length=40, unique=True),
),
]
| # Generated by Django 2.0.2 on 2018-05-29 07:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0003_auto_20180529_0847'),
]
operations = [
migrations.AlterField(
model_name='event',
name='slug',
field=models.SlugField(max_length=40, unique=True),
),
]
| en | 0.814174 | # Generated by Django 2.0.2 on 2018-05-29 07:54 | 1.441333 | 1 |
Main/zscore_trading_app.py | DylanScotney/cryptocurrency_backtesting | 0 | 6620800 | import pandas as pd
import numpy as np
import os.path
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
from ..Lib.data_loading.file_loading_strategies import fileLoadingDF
from ..Lib.strategies.zscore_trend import zScoreTrader
from ..Lib.strategy_backtester import backtest
cpath = os.path.dirname(__file__) # current path
def main():
# Load Dataframe
#--------------------------------------------------------------------------
infile = cpath+"\\..\\Data\\mock_df.csv"
loader = fileLoadingDF(infile)
df = loader.get_data()
df_csv = df[['date']]
#--------------------------------------------------------------------------
# Define trading parameters
#--------------------------------------------------------------------------
save_results = False
plot_results = True
results_outfile = "dualSMAZscores"
symbols = [key for key in df.keys() if key not in ['date']]
bandwidths = [1.0, 1.5, 2.0]
MAs = [80, 100]
num_faster_MAs = 3 # number of faster MAs for each MA
ZScore_MAs = [5, 8, 12]
#--------------------------------------------------------------------------
# Execute trading
#--------------------------------------------------------------------------
for bandwidth in bandwidths:
if save_results:
df_csv = df[['date']]
returns = np.zeros((len(MAs)*num_faster_MAs, len(ZScore_MAs)))
ylabels = [] # plot labels
for iter_cnt, symbol in enumerate(symbols):
for i in range(len(MAs)):
MAslow = MAs[i]
faster_MAs = np.linspace(1, MAslow, num=num_faster_MAs,
endpoint=False)
faster_MAs = [int(item) for item in faster_MAs]
for k in range(num_faster_MAs):
MAfast = faster_MAs[k]
if iter_cnt == 0:
# only append for first symbol so no repetitions
if num_faster_MAs == 1:
ylabels.append(MAslow)
else:
ylabels.append('{}v{}'.format(MAslow, MAfast))
for j in range(len(ZScore_MAs)):
Z_MA = ZScore_MAs[j]
print("Trading {} for Z score: {}, SMAs: {}v{}"
.format(symbol, Z_MA, MAfast, MAslow))
asset_df = df.loc[:, ['date', symbol]].reset_index()
strategy = zScoreTrader(asset_df, symbol, "SMA",
MAslow, Z_MA, bandwidth,
fast_MA=MAfast)
trader = backtest(strategy)
cum_returns = trader.trade()
loc = num_faster_MAs*i + k, j
returns[loc] += cum_returns
print("Cumulative returns: {0:.2}%\n"
.format(cum_returns*100))
if save_results:
key = '{}_{}v{}_{}_{}'.format(symbol, MAslow,
MAfast, Z_MA,
bandwidth)
df_csv[key] = asset_df['returns']
# ---------------------------------------------------------------------
# Plot Results
# ---------------------------------------------------------------------
if plot_results:
num_symbols = len(symbols)
returns = returns*100/num_symbols # average percentage rets
plt.imshow(returns, cmap='RdBu')
plt.colorbar(format=FuncFormatter(fmt))
max_ret = max(returns.min(), returns.max(), key=abs)
plt.clim(vmin=-max_ret, vmax=max_ret)
plt.yticks(np.arange(len(ylabels)), ylabels)
plt.xticks(np.arange(len(ZScore_MAs)), ZScore_MAs)
plt.ylabel("SMA Period")
plt.xlabel("Z Score Period")
plt.title("Total Returns Bandwidth={}".format(bandwidth))
plt.show()
#----------------------------------------------------------------------
if save_results:
df_csv.to_csv(results_outfile)
def fmt(x, pos):
"""
Formats colourbar to display as a percentage
"""
return '{}%'.format(np.round(x, 0))
if __name__ == "__main__":
main() | import pandas as pd
import numpy as np
import os.path
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
from ..Lib.data_loading.file_loading_strategies import fileLoadingDF
from ..Lib.strategies.zscore_trend import zScoreTrader
from ..Lib.strategy_backtester import backtest
cpath = os.path.dirname(__file__) # current path
def main():
# Load Dataframe
#--------------------------------------------------------------------------
infile = cpath+"\\..\\Data\\mock_df.csv"
loader = fileLoadingDF(infile)
df = loader.get_data()
df_csv = df[['date']]
#--------------------------------------------------------------------------
# Define trading parameters
#--------------------------------------------------------------------------
save_results = False
plot_results = True
results_outfile = "dualSMAZscores"
symbols = [key for key in df.keys() if key not in ['date']]
bandwidths = [1.0, 1.5, 2.0]
MAs = [80, 100]
num_faster_MAs = 3 # number of faster MAs for each MA
ZScore_MAs = [5, 8, 12]
#--------------------------------------------------------------------------
# Execute trading
#--------------------------------------------------------------------------
for bandwidth in bandwidths:
if save_results:
df_csv = df[['date']]
returns = np.zeros((len(MAs)*num_faster_MAs, len(ZScore_MAs)))
ylabels = [] # plot labels
for iter_cnt, symbol in enumerate(symbols):
for i in range(len(MAs)):
MAslow = MAs[i]
faster_MAs = np.linspace(1, MAslow, num=num_faster_MAs,
endpoint=False)
faster_MAs = [int(item) for item in faster_MAs]
for k in range(num_faster_MAs):
MAfast = faster_MAs[k]
if iter_cnt == 0:
# only append for first symbol so no repetitions
if num_faster_MAs == 1:
ylabels.append(MAslow)
else:
ylabels.append('{}v{}'.format(MAslow, MAfast))
for j in range(len(ZScore_MAs)):
Z_MA = ZScore_MAs[j]
print("Trading {} for Z score: {}, SMAs: {}v{}"
.format(symbol, Z_MA, MAfast, MAslow))
asset_df = df.loc[:, ['date', symbol]].reset_index()
strategy = zScoreTrader(asset_df, symbol, "SMA",
MAslow, Z_MA, bandwidth,
fast_MA=MAfast)
trader = backtest(strategy)
cum_returns = trader.trade()
loc = num_faster_MAs*i + k, j
returns[loc] += cum_returns
print("Cumulative returns: {0:.2}%\n"
.format(cum_returns*100))
if save_results:
key = '{}_{}v{}_{}_{}'.format(symbol, MAslow,
MAfast, Z_MA,
bandwidth)
df_csv[key] = asset_df['returns']
# ---------------------------------------------------------------------
# Plot Results
# ---------------------------------------------------------------------
if plot_results:
num_symbols = len(symbols)
returns = returns*100/num_symbols # average percentage rets
plt.imshow(returns, cmap='RdBu')
plt.colorbar(format=FuncFormatter(fmt))
max_ret = max(returns.min(), returns.max(), key=abs)
plt.clim(vmin=-max_ret, vmax=max_ret)
plt.yticks(np.arange(len(ylabels)), ylabels)
plt.xticks(np.arange(len(ZScore_MAs)), ZScore_MAs)
plt.ylabel("SMA Period")
plt.xlabel("Z Score Period")
plt.title("Total Returns Bandwidth={}".format(bandwidth))
plt.show()
#----------------------------------------------------------------------
if save_results:
df_csv.to_csv(results_outfile)
def fmt(x, pos):
"""
Formats colourbar to display as a percentage
"""
return '{}%'.format(np.round(x, 0))
if __name__ == "__main__":
main() | en | 0.182181 | # current path # Load Dataframe #-------------------------------------------------------------------------- #-------------------------------------------------------------------------- # Define trading parameters #-------------------------------------------------------------------------- # number of faster MAs for each MA #-------------------------------------------------------------------------- # Execute trading #-------------------------------------------------------------------------- # plot labels # only append for first symbol so no repetitions # --------------------------------------------------------------------- # Plot Results # --------------------------------------------------------------------- # average percentage rets #---------------------------------------------------------------------- Formats colourbar to display as a percentage | 2.269447 | 2 |
run.py | aeroc7/mnist_cnn | 0 | 6620801 | import torch
import torch.nn.functional as F
from net import CNN
class RunModel():
def __init__(self):
FILE = 'model.pth'
self.net = CNN().to(self.device())
self.net.load_state_dict(torch.load(FILE))
def device(self):
return torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def run_model(self, img):
self.net.eval()
output = self.net(img)
pred = output.max(1).indices
probs = F.softmax(output, dim=1)
conf, _ = torch.max(probs, 1)
return (pred[0], conf[0])
| import torch
import torch.nn.functional as F
from net import CNN
class RunModel():
def __init__(self):
FILE = 'model.pth'
self.net = CNN().to(self.device())
self.net.load_state_dict(torch.load(FILE))
def device(self):
return torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def run_model(self, img):
self.net.eval()
output = self.net(img)
pred = output.max(1).indices
probs = F.softmax(output, dim=1)
conf, _ = torch.max(probs, 1)
return (pred[0], conf[0])
| none | 1 | 2.663986 | 3 | |
crawler/jandan/jandan/management/cross_compare_mongodb.py | hmumixaM/anything | 0 | 6620802 | import pymongo
client = pymongo.MongoClient(host='127.0.0.1', port=27017)
db = client.jandan
tucao = db.tucao
comments = db.comments
result = tucao.find({'pid': {'$gt': 1}})
b = 0
for i in result:
if not comments.find_one({'pid': i['pid']}):
print(i['pid'])
b += 1
print(b)
client.close() | import pymongo
client = pymongo.MongoClient(host='127.0.0.1', port=27017)
db = client.jandan
tucao = db.tucao
comments = db.comments
result = tucao.find({'pid': {'$gt': 1}})
b = 0
for i in result:
if not comments.find_one({'pid': i['pid']}):
print(i['pid'])
b += 1
print(b)
client.close() | none | 1 | 2.81798 | 3 | |
QTA/Signals/ExponentialMovingAverage.py | TheDTFC/QTA_v1.0.0 | 1 | 6620803 | # -*- coding: utf-8 -*-
from pandas_datareader import data
import numpy as np
import pandas as pd
import datetime as dt
import QTA.DataConfiguration.RetrieveData as RD
class ExponentialMovingAverage:
def __init__(self, alpha, stock_symbol, start_date, end_date):
#the variable point_m is the interval the moving average is set
self.alpha = alpha
self.stock_symbol = stock_symbol
self.start_date = start_date
self.end_date = end_date
def calculate_EMA_data(self):
testData = RD.get_data(self.stock_symbol, self.start_date, self.end_date)
dataClose = testData['Close']
#print(type(dataClose))
return dataClose
def EMA_data(self):
data1 = self.calculate_EMA_data().to_frame()
data1['EMA_PointM'] = data1.ewm(alpha=self.alpha, adjust=False).mean()
return data1 | # -*- coding: utf-8 -*-
from pandas_datareader import data
import numpy as np
import pandas as pd
import datetime as dt
import QTA.DataConfiguration.RetrieveData as RD
class ExponentialMovingAverage:
def __init__(self, alpha, stock_symbol, start_date, end_date):
#the variable point_m is the interval the moving average is set
self.alpha = alpha
self.stock_symbol = stock_symbol
self.start_date = start_date
self.end_date = end_date
def calculate_EMA_data(self):
testData = RD.get_data(self.stock_symbol, self.start_date, self.end_date)
dataClose = testData['Close']
#print(type(dataClose))
return dataClose
def EMA_data(self):
data1 = self.calculate_EMA_data().to_frame()
data1['EMA_PointM'] = data1.ewm(alpha=self.alpha, adjust=False).mean()
return data1 | en | 0.823393 | # -*- coding: utf-8 -*- #the variable point_m is the interval the moving average is set #print(type(dataClose)) | 2.963025 | 3 |
pyno/process.py | cgarjun/Pyno | 163 | 6620804 | <filename>pyno/process.py
from .serializer import Serializer
from .fileOperator import FileOperator
class Process():
'''
Abstract process
'''
def __init__(self):
self.running = -1 # -1: run continously, 0: pause/stop, n: do n steps
self.serializer = Serializer(self)
self.file_operator = FileOperator()
self.nodes = []
self.global_scope = {} # local space for in-pyno programs
self.global_scope['G'] = self.global_scope # to get global stuff
def nodes_update(self):
if not self.running:
return
if self.running > 0:
self.running -= 1
for node in self.nodes:
node.reset_proc()
for node in self.nodes:
node.processor()
def new_pyno(self):
for node in self.nodes:
node.delete(fully=True)
del node
self.nodes = []
print('New pyno!')
def save_pyno(self, filepath=None):
data = self.serializer.serialize(self.nodes)
return self.file_operator.save(data, filepath=filepath, initialfile=self.filename)
def load_pyno(self, filepath=None):
data, self.filename = self.file_operator.load(filepath)
if data is None: # Loading data failed
return None
elif data:
self.new_pyno()
return self.load_data(data)
def load_data(self, data, anchor=(0, 0)):
nodes = self.serializer.deserialize(data, anchor)
for node in nodes:
self.nodes.append(node)
return nodes
| <filename>pyno/process.py
from .serializer import Serializer
from .fileOperator import FileOperator
class Process():
'''
Abstract process
'''
def __init__(self):
self.running = -1 # -1: run continously, 0: pause/stop, n: do n steps
self.serializer = Serializer(self)
self.file_operator = FileOperator()
self.nodes = []
self.global_scope = {} # local space for in-pyno programs
self.global_scope['G'] = self.global_scope # to get global stuff
def nodes_update(self):
if not self.running:
return
if self.running > 0:
self.running -= 1
for node in self.nodes:
node.reset_proc()
for node in self.nodes:
node.processor()
def new_pyno(self):
for node in self.nodes:
node.delete(fully=True)
del node
self.nodes = []
print('New pyno!')
def save_pyno(self, filepath=None):
data = self.serializer.serialize(self.nodes)
return self.file_operator.save(data, filepath=filepath, initialfile=self.filename)
def load_pyno(self, filepath=None):
data, self.filename = self.file_operator.load(filepath)
if data is None: # Loading data failed
return None
elif data:
self.new_pyno()
return self.load_data(data)
def load_data(self, data, anchor=(0, 0)):
nodes = self.serializer.deserialize(data, anchor)
for node in nodes:
self.nodes.append(node)
return nodes
| en | 0.777473 | Abstract process # -1: run continously, 0: pause/stop, n: do n steps # local space for in-pyno programs # to get global stuff # Loading data failed | 2.725548 | 3 |
learn/data-science-from-scratch/ch2-python-basic/object-oriented-programming.py | hustbill/Python-auto | 0 | 6620805 | # Object Oriented Programming
# by convention, we give classes PascalCase names
class Set:
# these are the member functions
# every one takes a first parameter "self" (another convention)
# that refers to the particular Set object being used
def __init__(self, values=None):
"""This is the constructor.
It gets called when you create a new Set.
You would use it like
s1 = Set() # empty set
s2 = Set([1, 2, 2, 3]) # initialize with values"""
self.dict = {} # each instance of Set has its own dict property
# which is what we'll use to track memnerships
if values is not None:
for value in values:
self.add(value)
def __repr__(self):
"""this is the string representation of a Set object
if you type it at the Python prompt or pass it to str()"""
return "Set: " + str(self.dict.keys())
# we'll represent membership by being a key in self.dict with value True
def add(self, value):
self.dict[value] = True
# value is in the Set if it's a key in the dictionary
def contains(self, value):
return value in self.dict
def remove(self, value):
del self.dict[value]
# Test case
s = Set([1, 2, 3])
s.add(4)
print s.contains(4) # True
s.remove(4)
print s.contains(4) # False
| # Object Oriented Programming
# by convention, we give classes PascalCase names
class Set:
# these are the member functions
# every one takes a first parameter "self" (another convention)
# that refers to the particular Set object being used
def __init__(self, values=None):
"""This is the constructor.
It gets called when you create a new Set.
You would use it like
s1 = Set() # empty set
s2 = Set([1, 2, 2, 3]) # initialize with values"""
self.dict = {} # each instance of Set has its own dict property
# which is what we'll use to track memnerships
if values is not None:
for value in values:
self.add(value)
def __repr__(self):
"""this is the string representation of a Set object
if you type it at the Python prompt or pass it to str()"""
return "Set: " + str(self.dict.keys())
# we'll represent membership by being a key in self.dict with value True
def add(self, value):
self.dict[value] = True
# value is in the Set if it's a key in the dictionary
def contains(self, value):
return value in self.dict
def remove(self, value):
del self.dict[value]
# Test case
s = Set([1, 2, 3])
s.add(4)
print s.contains(4) # True
s.remove(4)
print s.contains(4) # False
| en | 0.913074 | # Object Oriented Programming # by convention, we give classes PascalCase names # these are the member functions # every one takes a first parameter "self" (another convention) # that refers to the particular Set object being used This is the constructor. It gets called when you create a new Set. You would use it like s1 = Set() # empty set s2 = Set([1, 2, 2, 3]) # initialize with values # each instance of Set has its own dict property # which is what we'll use to track memnerships this is the string representation of a Set object if you type it at the Python prompt or pass it to str() # we'll represent membership by being a key in self.dict with value True # value is in the Set if it's a key in the dictionary # Test case # True # False | 4.473823 | 4 |
python/space-age/space_age.py | baduker/exercism.io | 0 | 6620806 | from typing import Callable, Optional
SECONDS_IN_EARTH_YEAR = 31557600
ORBITAL_PERIODS = {
'earth': 1,
'mercury': 0.2408467,
'venus': 0.61519726,
'mars': 1.8808158,
'jupiter': 11.862615,
'saturn': 29.447498,
'uranus': 84.016846,
'neptune': 164.79132,
}
def age_on_planet(seconds, planet):
annual_seconds = SECONDS_IN_EARTH_YEAR * ORBITAL_PERIODS[planet]
return round(seconds / annual_seconds, 2)
class SpaceAge(object):
def __init__(self, seconds):
self.seconds = seconds
def __getattr__(self, planet):
if planet.startswith("on_"):
return lambda: age_on_planet(self.seconds, planet[3:])
return None | from typing import Callable, Optional
SECONDS_IN_EARTH_YEAR = 31557600
ORBITAL_PERIODS = {
'earth': 1,
'mercury': 0.2408467,
'venus': 0.61519726,
'mars': 1.8808158,
'jupiter': 11.862615,
'saturn': 29.447498,
'uranus': 84.016846,
'neptune': 164.79132,
}
def age_on_planet(seconds, planet):
annual_seconds = SECONDS_IN_EARTH_YEAR * ORBITAL_PERIODS[planet]
return round(seconds / annual_seconds, 2)
class SpaceAge(object):
def __init__(self, seconds):
self.seconds = seconds
def __getattr__(self, planet):
if planet.startswith("on_"):
return lambda: age_on_planet(self.seconds, planet[3:])
return None | none | 1 | 3.615134 | 4 | |
desafioAula07-6.py | barbosa-henrique/Python | 0 | 6620807 | #Crie um algoritmo que leia um número e mostre seu dobro, triplo e raiz quadrada
n = int(input('Informe um número inteiro '))
dobro = n * 2
triplo = n * 3
raiz = n ** (1/2)
print('Número informado {}, dobro = a {}, triplo igual a {} e raiz igual a {}'.format(n, dobro, triplo, raiz)) | #Crie um algoritmo que leia um número e mostre seu dobro, triplo e raiz quadrada
n = int(input('Informe um número inteiro '))
dobro = n * 2
triplo = n * 3
raiz = n ** (1/2)
print('Número informado {}, dobro = a {}, triplo igual a {} e raiz igual a {}'.format(n, dobro, triplo, raiz)) | pt | 0.89963 | #Crie um algoritmo que leia um número e mostre seu dobro, triplo e raiz quadrada | 3.969079 | 4 |
breakfastbot/console.py | marwano/breakfastbot | 0 | 6620808 | <gh_stars>0
import pprint
import time
import tty
import sys
import termios
from .motors import move
EXIT_KEYS = ['\x1b', '\x03'] # escape and CTRL+C
STDIN_FD = sys.stdin.fileno()
ACTIONS = {
'1': 'rotate_clockwise',
'3': 'rotate_anticlockwise',
'4': 'swing_clockwise',
'6': 'swing_anticlockwise',
'7': 'drop_spoon_small',
'9': 'drop_spoon_big',
}
def get_char():
old_settings = termios.tcgetattr(STDIN_FD)
try:
tty.setraw(STDIN_FD)
return sys.stdin.read(1)
finally:
termios.tcsetattr(STDIN_FD, termios.TCSADRAIN, old_settings)
def main():
pprint.pprint(sorted(ACTIONS.items()))
print('Press numbers to move motors or escape/CTRL+C to exit.')
while True:
char = get_char()
if char in EXIT_KEYS:
return
if char in ACTIONS.keys():
move(ACTIONS[char])
if __name__ == "__main__":
main()
| import pprint
import time
import tty
import sys
import termios
from .motors import move
EXIT_KEYS = ['\x1b', '\x03'] # escape and CTRL+C
STDIN_FD = sys.stdin.fileno()
ACTIONS = {
'1': 'rotate_clockwise',
'3': 'rotate_anticlockwise',
'4': 'swing_clockwise',
'6': 'swing_anticlockwise',
'7': 'drop_spoon_small',
'9': 'drop_spoon_big',
}
def get_char():
old_settings = termios.tcgetattr(STDIN_FD)
try:
tty.setraw(STDIN_FD)
return sys.stdin.read(1)
finally:
termios.tcsetattr(STDIN_FD, termios.TCSADRAIN, old_settings)
def main():
pprint.pprint(sorted(ACTIONS.items()))
print('Press numbers to move motors or escape/CTRL+C to exit.')
while True:
char = get_char()
if char in EXIT_KEYS:
return
if char in ACTIONS.keys():
move(ACTIONS[char])
if __name__ == "__main__":
main() | en | 0.700218 | # escape and CTRL+C | 2.471913 | 2 |
inspector/tests/tests_engine.py | Samael500/social-inspector | 1 | 6620809 | # -*- coding: utf-8 -*-
import unittest
from inspector.engine import Inspector
from inspector import settings
from datetime import datetime, timedelta
import os
import shutil
class TestInspectorClass(unittest.TestCase):
""" Test inspector class """
def setUp(self):
settings.RESULT_DIR = os.path.join(settings.BASE_DIR, 'test_out')
self.inspector = Inspector()
def test_inspector_langs(self):
""" Check langs for inspector """
for lang in ('ru', 'en', 'es', 'pt', 'de'):
self.assertIn(lang, Inspector.languages)
def test_inspector_classify(self):
""" Check correct classify """
self.assertEquals('positive', self.inspector.classify('happy'))
self.assertEquals('negative', self.inspector.classify('sad'))
def test_inspector_search(self):
self.inspector.twitter.twitter.query_string = u'#test lang:en'
self.inspector.twitter.twitter.timeout = 0
since = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
until = datetime.now().strftime('%Y-%m-%d')
search_list = self.inspector.search(query=u'#test lang:en', since=since, until=until)
for tweet in search_list:
self.assertTrue(isinstance(tweet[0], unicode))
self.assertTrue(isinstance(tweet[1], tuple))
self.assertTrue(isinstance(tweet[2], unicode))
self.assertTrue(isinstance(tweet[3], str))
# check map
self.assertFalse(os.path.exists(settings.RESULT_DIR))
os.makedirs(settings.RESULT_DIR)
self.assertTrue(os.path.exists(settings.RESULT_DIR))
self.assertNotIn('.html', ''.join(os.listdir(settings.RESULT_DIR)))
self.inspector.create_map(search_list, 'test')
self.assertIn('.html', ''.join(os.listdir(settings.RESULT_DIR)))
# clear result
shutil.rmtree(settings.RESULT_DIR)
| # -*- coding: utf-8 -*-
import unittest
from inspector.engine import Inspector
from inspector import settings
from datetime import datetime, timedelta
import os
import shutil
class TestInspectorClass(unittest.TestCase):
""" Test inspector class """
def setUp(self):
settings.RESULT_DIR = os.path.join(settings.BASE_DIR, 'test_out')
self.inspector = Inspector()
def test_inspector_langs(self):
""" Check langs for inspector """
for lang in ('ru', 'en', 'es', 'pt', 'de'):
self.assertIn(lang, Inspector.languages)
def test_inspector_classify(self):
""" Check correct classify """
self.assertEquals('positive', self.inspector.classify('happy'))
self.assertEquals('negative', self.inspector.classify('sad'))
def test_inspector_search(self):
self.inspector.twitter.twitter.query_string = u'#test lang:en'
self.inspector.twitter.twitter.timeout = 0
since = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
until = datetime.now().strftime('%Y-%m-%d')
search_list = self.inspector.search(query=u'#test lang:en', since=since, until=until)
for tweet in search_list:
self.assertTrue(isinstance(tweet[0], unicode))
self.assertTrue(isinstance(tweet[1], tuple))
self.assertTrue(isinstance(tweet[2], unicode))
self.assertTrue(isinstance(tweet[3], str))
# check map
self.assertFalse(os.path.exists(settings.RESULT_DIR))
os.makedirs(settings.RESULT_DIR)
self.assertTrue(os.path.exists(settings.RESULT_DIR))
self.assertNotIn('.html', ''.join(os.listdir(settings.RESULT_DIR)))
self.inspector.create_map(search_list, 'test')
self.assertIn('.html', ''.join(os.listdir(settings.RESULT_DIR)))
# clear result
shutil.rmtree(settings.RESULT_DIR)
| en | 0.559411 | # -*- coding: utf-8 -*- Test inspector class Check langs for inspector Check correct classify # check map # clear result | 2.711752 | 3 |
examples/demo/workflows/python/python.py | SteNicholas/ai-flow | 0 | 6620810 | <filename>examples/demo/workflows/python/python.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List
from ai_flow_plugins.job_plugins import python
import ai_flow as af
from ai_flow_plugins.job_plugins.python.python_processor import ExecutionContext
# Define the HelloProcessor.
class HelloProcessor(python.PythonProcessor):
def process(self, execution_context: ExecutionContext, input_list: List) -> List:
print("Hello World!")
return []
# Initialize the project and workflow environment.
af.init_ai_flow_context()
# Define a job with job_1 config.
with af.job_config('job_1'):
# Define the python job.
af.user_define_operation(processor=HelloProcessor())
| <filename>examples/demo/workflows/python/python.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List
from ai_flow_plugins.job_plugins import python
import ai_flow as af
from ai_flow_plugins.job_plugins.python.python_processor import ExecutionContext
# Define the HelloProcessor.
class HelloProcessor(python.PythonProcessor):
def process(self, execution_context: ExecutionContext, input_list: List) -> List:
print("Hello World!")
return []
# Initialize the project and workflow environment.
af.init_ai_flow_context()
# Define a job with job_1 config.
with af.job_config('job_1'):
# Define the python job.
af.user_define_operation(processor=HelloProcessor())
| en | 0.857669 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Define the HelloProcessor. # Initialize the project and workflow environment. # Define a job with job_1 config. # Define the python job. | 2.360489 | 2 |
ufld.py | Glutamat42/Ultra-Fast-Lane-Detection | 0 | 6620811 | import sys
from src import runtime, train
from src.common.config.global_config import cfg
if __name__ == "__main__":
# do some basic cfg validation and call runtime or train according to mode
if not cfg.data_root or not cfg.work_dir:
raise Exception('data_root and work_dir have to be specified')
if cfg.mode == 'runtime':
if not cfg.trained_model:
raise Exception('define your trained_model')
try:
runtime.main()
except KeyboardInterrupt:
print('quitting because of keyboard interrupt (probably ctrl + c)')
sys.exit(0)
elif cfg.mode == 'train':
train.main()
else:
raise Exception('invalid mode')
| import sys
from src import runtime, train
from src.common.config.global_config import cfg
if __name__ == "__main__":
# do some basic cfg validation and call runtime or train according to mode
if not cfg.data_root or not cfg.work_dir:
raise Exception('data_root and work_dir have to be specified')
if cfg.mode == 'runtime':
if not cfg.trained_model:
raise Exception('define your trained_model')
try:
runtime.main()
except KeyboardInterrupt:
print('quitting because of keyboard interrupt (probably ctrl + c)')
sys.exit(0)
elif cfg.mode == 'train':
train.main()
else:
raise Exception('invalid mode')
| en | 0.866068 | # do some basic cfg validation and call runtime or train according to mode | 2.47383 | 2 |
example.py | shivck13/image_extractor | 0 | 6620812 | from extractor import GoogleImageExtractor
bot = GoogleImageExtractor("Rakul", 10, "./Rakul Images", False)
bot.apply_safesearch(False)
bot.apply_search_filters(color="red")
bot.run() | from extractor import GoogleImageExtractor
bot = GoogleImageExtractor("Rakul", 10, "./Rakul Images", False)
bot.apply_safesearch(False)
bot.apply_search_filters(color="red")
bot.run() | none | 1 | 1.795515 | 2 | |
Day 08/2.py | Xerisu/Advent-of-Code | 1 | 6620813 | INPUT_PATH = "./cursed-numbers.txt"
input_file = open(INPUT_PATH, "r")
temp = input_file.readlines()
input_file.close()
coded_numbers = [[[set(x) for x in x.split()] for x in line.split(" | ")] for line in temp]
good_order = [None, None, None, None, None, None, None]
codes = []
for line in coded_numbers:
five = []
six = []
for letter in line[0]:
if len(letter) == 2:
two_segments = letter
elif len(letter) == 3:
three_segments = letter
elif len(letter) == 4:
four_segments = letter
elif len(letter) == 5:
five.append(letter)
elif len(letter) == 6:
six.append(letter)
else:
seven_segments = letter
five_segments = five[0].intersection(five[1], five[2])
six_segments = six[0].intersection(six[1], six[2])
good_order[0] = three_segments.difference(two_segments)
good_order[3] = five_segments.intersection(four_segments)
good_order[6] = five_segments.difference(good_order[0].union(good_order[3]))
good_order[5] = six_segments.intersection(two_segments)
good_order[2] = two_segments.difference(good_order[5])
good_order[1] = six_segments.difference(good_order[0].union(good_order[5], good_order[6]))
good_order[4] = seven_segments.difference(good_order[0].union(good_order[0], good_order[2], good_order[3], good_order[1], good_order[5], good_order[6]))
code = ''
for letter in line[1]:
if len(letter) == 2:
code += '1'
elif len(letter) == 3:
code += '7'
elif len(letter) == 4:
code += '4'
elif letter == seven_segments.difference(good_order[1], good_order[5]):
code += '2'
elif letter == seven_segments.difference(good_order[1], good_order[4]):
code += '3'
elif letter == seven_segments.difference(good_order[2], good_order[4]):
code += '5'
elif letter == seven_segments.difference(good_order[3]):
code += '0'
elif letter == seven_segments.difference(good_order[2]):
code += '6'
elif letter == seven_segments.difference(good_order[4]):
code += '9'
else:
code += '8'
codes.append(int(code))
print(codes)
print(sum(codes)) | INPUT_PATH = "./cursed-numbers.txt"
input_file = open(INPUT_PATH, "r")
temp = input_file.readlines()
input_file.close()
coded_numbers = [[[set(x) for x in x.split()] for x in line.split(" | ")] for line in temp]
good_order = [None, None, None, None, None, None, None]
codes = []
for line in coded_numbers:
five = []
six = []
for letter in line[0]:
if len(letter) == 2:
two_segments = letter
elif len(letter) == 3:
three_segments = letter
elif len(letter) == 4:
four_segments = letter
elif len(letter) == 5:
five.append(letter)
elif len(letter) == 6:
six.append(letter)
else:
seven_segments = letter
five_segments = five[0].intersection(five[1], five[2])
six_segments = six[0].intersection(six[1], six[2])
good_order[0] = three_segments.difference(two_segments)
good_order[3] = five_segments.intersection(four_segments)
good_order[6] = five_segments.difference(good_order[0].union(good_order[3]))
good_order[5] = six_segments.intersection(two_segments)
good_order[2] = two_segments.difference(good_order[5])
good_order[1] = six_segments.difference(good_order[0].union(good_order[5], good_order[6]))
good_order[4] = seven_segments.difference(good_order[0].union(good_order[0], good_order[2], good_order[3], good_order[1], good_order[5], good_order[6]))
code = ''
for letter in line[1]:
if len(letter) == 2:
code += '1'
elif len(letter) == 3:
code += '7'
elif len(letter) == 4:
code += '4'
elif letter == seven_segments.difference(good_order[1], good_order[5]):
code += '2'
elif letter == seven_segments.difference(good_order[1], good_order[4]):
code += '3'
elif letter == seven_segments.difference(good_order[2], good_order[4]):
code += '5'
elif letter == seven_segments.difference(good_order[3]):
code += '0'
elif letter == seven_segments.difference(good_order[2]):
code += '6'
elif letter == seven_segments.difference(good_order[4]):
code += '9'
else:
code += '8'
codes.append(int(code))
print(codes)
print(sum(codes)) | none | 1 | 3.484712 | 3 | |
nlci-deva.py | nlci/charsets | 0 | 6620814 | <filename>nlci-deva.py
# -*- coding: utf8 -*-
# Copyright 2017-2019 NLCI (http://www.nlci.in/fonts/)
# Apache License v2.0
class Charset:
common_name = 'NLCI: Devanagari script'
native_name = 'Devanagari script'
abbreviation = 'Deva'
key = 0x0915
glyphs = \
[0x0310] + \
list(range(0x0900, 0x097F+1))
| <filename>nlci-deva.py
# -*- coding: utf8 -*-
# Copyright 2017-2019 NLCI (http://www.nlci.in/fonts/)
# Apache License v2.0
class Charset:
common_name = 'NLCI: Devanagari script'
native_name = 'Devanagari script'
abbreviation = 'Deva'
key = 0x0915
glyphs = \
[0x0310] + \
list(range(0x0900, 0x097F+1))
| en | 0.344222 | # -*- coding: utf8 -*- # Copyright 2017-2019 NLCI (http://www.nlci.in/fonts/) # Apache License v2.0 | 1.299102 | 1 |
assignments/08_lists_and_tuples/8-5_tic_tac_toe/text.py | MrDDaye/cna_cp1855 | 0 | 6620815 | """CLI and text entities of tic tac toe game."""
def display_grid(grid: list[list[str]], vertice_chr, vert_chr, hori_chr) -> None:
"""Display the current tic tac toe grid."""
print()
for row in grid:
_display_horizontal_divider(grid, vertice_chr, hori_chr)
_display_row(row, vert_chr)
_display_horizontal_divider(grid, vertice_chr, hori_chr)
print()
def _display_horizontal_divider(grid: list[list[str]], vertice_chr, border_chr) -> None:
"""Display the horizontal divider for the grid."""
print(((vertice_chr+border_chr)*len(grid))+vertice_chr)
def _display_row(row: list[str], border_chr) -> None:
"""Display the row with dividers between values for the grid."""
display: str = ''
for value in row:
display += f'{border_chr} {value} '
print(display + border_chr)
def display_farewell() -> None:
"""Display a farewell message."""
print('Game over!')
def display_player_turn(player: str) -> None:
"""Display the current players turn."""
print(f'{player}\'s turn')
def display_winner(player: str) -> None:
"""Display the winner of the game."""
print(f'{player} wins!\n')
def display_tie() -> None:
"""Display indication that the game is a tie."""
print('It\'s a tie!\n')
| """CLI and text entities of tic tac toe game."""
def display_grid(grid: list[list[str]], vertice_chr, vert_chr, hori_chr) -> None:
"""Display the current tic tac toe grid."""
print()
for row in grid:
_display_horizontal_divider(grid, vertice_chr, hori_chr)
_display_row(row, vert_chr)
_display_horizontal_divider(grid, vertice_chr, hori_chr)
print()
def _display_horizontal_divider(grid: list[list[str]], vertice_chr, border_chr) -> None:
"""Display the horizontal divider for the grid."""
print(((vertice_chr+border_chr)*len(grid))+vertice_chr)
def _display_row(row: list[str], border_chr) -> None:
"""Display the row with dividers between values for the grid."""
display: str = ''
for value in row:
display += f'{border_chr} {value} '
print(display + border_chr)
def display_farewell() -> None:
"""Display a farewell message."""
print('Game over!')
def display_player_turn(player: str) -> None:
"""Display the current players turn."""
print(f'{player}\'s turn')
def display_winner(player: str) -> None:
"""Display the winner of the game."""
print(f'{player} wins!\n')
def display_tie() -> None:
"""Display indication that the game is a tie."""
print('It\'s a tie!\n')
| en | 0.841808 | CLI and text entities of tic tac toe game. Display the current tic tac toe grid. Display the horizontal divider for the grid. Display the row with dividers between values for the grid. Display a farewell message. Display the current players turn. Display the winner of the game. Display indication that the game is a tie. | 3.646879 | 4 |
1_shinno/sample/w2v2.py | yfur/dl-chainer | 0 | 6620816 | #!/usr/bin/env python
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable, \
optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.utils import walker_alias
import collections
# Set data
index2word = {}
word2index = {}
counts = collections.Counter()
dataset = []
with open('ptb.train.txt') as f:
for line in f:
for word in line.split():
if word not in word2index:
ind = len(word2index)
word2index[word] = ind
index2word[ind] = word
counts[word2index[word]] += 1
dataset.append(word2index[word])
n_vocab = len(word2index)
datasize = len(dataset)
cs = [counts[w] for w in range(len(counts))]
power = np.float32(0.75)
p = np.array(cs, power.dtype)
sampler = walker_alias.WalkerAlias(p)
# Define model
class MyW2V2(chainer.Chain):
def __init__(self, v, m):
super(MyW2V2, self).__init__(
embed = L.EmbedID(v,m),
)
def __call__(self, xb, eb, sampler, ngs):
loss = None
for i in range(len(xb)):
x = Variable(np.array([xb[i]], dtype=np.int32))
e = eb[i]
ls = F.negative_sampling(e, x, self.embed.W, sampler, ngs)
loss = ls if loss is None else loss + ls
return loss
# my functions
ws = 3 ### window size
def mkbatset(model, dataset, ids):
xb, eb = [], []
for pos in ids:
xid = dataset[pos]
for i in range(1,ws):
p = pos - i
if p >= 0:
xb.append(xid)
eid = dataset[p]
eidv = Variable(np.array([eid], dtype=np.int32))
ev = model.embed(eidv)
eb.append(ev)
p = pos + i
if p < datasize:
xb.append(xid)
eid = dataset[p]
eidv = Variable(np.array([eid], dtype=np.int32))
ev = model.embed(eidv)
eb.append(ev)
return [xb, eb]
# Initialize model
model = MyW2V2(n_vocab, 100)
optimizer = optimizers.Adam()
optimizer.setup(model)
# Learn
bs = 50
ngs = 5
for epoch in range(10):
print('epoch: {0}'.format(epoch))
indexes = np.random.permutation(datasize)
for pos in range(0, datasize, bs):
print epoch, pos
ids = indexes[pos:(pos+bs) if (pos+bs) < datasize else datasize]
xb, eb = mkbatset(model, dataset, ids)
model.zerograds()
loss = model(xb, eb, sampler.sample, ngs)
loss.backward()
optimizer.update()
# Save model
with open('w2v2.model', 'w') as f:
f.write('%d %d\n' % (len(index2word), 100))
w = model.lf.W.data
for i in range(w.shape[0]):
v = ' '.join(['%f' % v for v in w[i]])
f.write('%s %s\n' % (index2word[i], v))
| #!/usr/bin/env python
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable, \
optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.utils import walker_alias
import collections
# Set data
index2word = {}
word2index = {}
counts = collections.Counter()
dataset = []
with open('ptb.train.txt') as f:
for line in f:
for word in line.split():
if word not in word2index:
ind = len(word2index)
word2index[word] = ind
index2word[ind] = word
counts[word2index[word]] += 1
dataset.append(word2index[word])
n_vocab = len(word2index)
datasize = len(dataset)
cs = [counts[w] for w in range(len(counts))]
power = np.float32(0.75)
p = np.array(cs, power.dtype)
sampler = walker_alias.WalkerAlias(p)
# Define model
class MyW2V2(chainer.Chain):
def __init__(self, v, m):
super(MyW2V2, self).__init__(
embed = L.EmbedID(v,m),
)
def __call__(self, xb, eb, sampler, ngs):
loss = None
for i in range(len(xb)):
x = Variable(np.array([xb[i]], dtype=np.int32))
e = eb[i]
ls = F.negative_sampling(e, x, self.embed.W, sampler, ngs)
loss = ls if loss is None else loss + ls
return loss
# my functions
ws = 3 ### window size
def mkbatset(model, dataset, ids):
xb, eb = [], []
for pos in ids:
xid = dataset[pos]
for i in range(1,ws):
p = pos - i
if p >= 0:
xb.append(xid)
eid = dataset[p]
eidv = Variable(np.array([eid], dtype=np.int32))
ev = model.embed(eidv)
eb.append(ev)
p = pos + i
if p < datasize:
xb.append(xid)
eid = dataset[p]
eidv = Variable(np.array([eid], dtype=np.int32))
ev = model.embed(eidv)
eb.append(ev)
return [xb, eb]
# Initialize model
model = MyW2V2(n_vocab, 100)
optimizer = optimizers.Adam()
optimizer.setup(model)
# Learn
bs = 50
ngs = 5
for epoch in range(10):
print('epoch: {0}'.format(epoch))
indexes = np.random.permutation(datasize)
for pos in range(0, datasize, bs):
print epoch, pos
ids = indexes[pos:(pos+bs) if (pos+bs) < datasize else datasize]
xb, eb = mkbatset(model, dataset, ids)
model.zerograds()
loss = model(xb, eb, sampler.sample, ngs)
loss.backward()
optimizer.update()
# Save model
with open('w2v2.model', 'w') as f:
f.write('%d %d\n' % (len(index2word), 100))
w = model.lf.W.data
for i in range(w.shape[0]):
v = ' '.join(['%f' % v for v in w[i]])
f.write('%s %s\n' % (index2word[i], v))
| en | 0.453865 | #!/usr/bin/env python # Set data # Define model # my functions ### window size # Initialize model # Learn # Save model | 2.328783 | 2 |
src/backend/rpaper/apps/reservations/api/urls.py | lambdalisue/rpaper | 0 | 6620817 | <gh_stars>0
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (
ThingCreateAPIView,
ThingRetrieveUpdateDestroyAPIView,
RecordListCreateAPIView,
RecordRetrieveUpdateDestroyAPIView,
)
urlpatterns = [
url(r'^$',
ThingCreateAPIView.as_view(),
name='things-list'),
url(r'^(?P<pk>\w+)/$',
ThingRetrieveUpdateDestroyAPIView.as_view(),
name='things-detail'),
url(r'^(?P<thing_pk>\w+)/records/$',
RecordListCreateAPIView.as_view(),
name='records-list'),
url(r'^(?P<thing_pk>\w+)/records/(?P<pk>\w+)/$',
RecordRetrieveUpdateDestroyAPIView.as_view(),
name='records-detail'),
]
urlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', 'html'])
| from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (
ThingCreateAPIView,
ThingRetrieveUpdateDestroyAPIView,
RecordListCreateAPIView,
RecordRetrieveUpdateDestroyAPIView,
)
urlpatterns = [
url(r'^$',
ThingCreateAPIView.as_view(),
name='things-list'),
url(r'^(?P<pk>\w+)/$',
ThingRetrieveUpdateDestroyAPIView.as_view(),
name='things-detail'),
url(r'^(?P<thing_pk>\w+)/records/$',
RecordListCreateAPIView.as_view(),
name='records-list'),
url(r'^(?P<thing_pk>\w+)/records/(?P<pk>\w+)/$',
RecordRetrieveUpdateDestroyAPIView.as_view(),
name='records-detail'),
]
urlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', 'html']) | none | 1 | 2.089551 | 2 | |
pyro/poutine/reparam_messenger.py | alexander-held/pyro | 0 | 6620818 | from collections import OrderedDict
import torch
from pyro.distributions.delta import Delta
from .messenger import Messenger
from .runtime import apply_stack
class ReparamMessenger(Messenger):
"""
Reparametrizes each affected sample site into one or more auxiliary sample
sites followed by a deterministic transformation [1].
To specify :class:`~pyro.distributions.reparameterize.Reparameterizer` s,
either pass a ``config`` dict to the constructor, configure
``site["infer"]["reparam"] = my_reparameterizer`` for each desired sample
site, or use :func:`~pyro.poutine.infer_config` .
See `available reparameterizers <distributions.html#reparameterizers>`_
.. warning:: Reparameterizers are recursive; take care to avoid infinite
loops in your ``config`` filters.
[1] <NAME>, <NAME>, <NAME> (2019)
"Automatic Reparameterisation of Probabilistic Programs"
https://arxiv.org/pdf/1906.03028.pdf
:param dict config: Optional configuration mapping site name to
:class:`~pyro.distributions.reparameterize.Reparameterizer` object.
"""
def __init__(self, config=None):
super().__init__()
if config is None:
config = {}
self.config = config
def _pyro_sample(self, msg):
if msg["is_observed"]:
return None
if msg["name"] in self.config:
msg["infer"]["reparam"] = self.config[msg["name"]]
reparam = msg["infer"].get("reparam")
if reparam is None:
return None
# Create auxiliary sites.
new_fns = reparam.get_dists(msg["fn"])
assert isinstance(new_fns, OrderedDict)
new_values = OrderedDict()
for name, fn in new_fns.items():
new_msg = msg.copy()
new_msg["name"] = "{}_{}".format(msg["name"], name)
new_msg["fn"] = fn
new_msg["cond_indep_stack"] = ()
new_msg["infer"] = new_msg["infer"].copy()
new_msg["infer"]["reparam"] = None
apply_stack(new_msg)
new_values[name] = new_msg["value"]
# Combine auxiliary values via pyro.deterministic().
# TODO(https://github.com/pyro-ppl/pyro/issues/2214) refactor to
# use site type "deterministic" when it exists.
value = reparam.transform_values(msg["fn"], new_values)
assert isinstance(value, torch.Tensor)
if getattr(msg["fn"], "_validation_enabled", False):
# Validate while the original msg["fn"] is known.
msg["fn"]._validate_sample(value)
msg["value"] = value
msg["fn"] = Delta(value, event_dim=msg["fn"].event_dim).mask(False)
msg["is_observed"] = True
| from collections import OrderedDict
import torch
from pyro.distributions.delta import Delta
from .messenger import Messenger
from .runtime import apply_stack
class ReparamMessenger(Messenger):
"""
Reparametrizes each affected sample site into one or more auxiliary sample
sites followed by a deterministic transformation [1].
To specify :class:`~pyro.distributions.reparameterize.Reparameterizer` s,
either pass a ``config`` dict to the constructor, configure
``site["infer"]["reparam"] = my_reparameterizer`` for each desired sample
site, or use :func:`~pyro.poutine.infer_config` .
See `available reparameterizers <distributions.html#reparameterizers>`_
.. warning:: Reparameterizers are recursive; take care to avoid infinite
loops in your ``config`` filters.
[1] <NAME>, <NAME>, <NAME> (2019)
"Automatic Reparameterisation of Probabilistic Programs"
https://arxiv.org/pdf/1906.03028.pdf
:param dict config: Optional configuration mapping site name to
:class:`~pyro.distributions.reparameterize.Reparameterizer` object.
"""
def __init__(self, config=None):
super().__init__()
if config is None:
config = {}
self.config = config
def _pyro_sample(self, msg):
if msg["is_observed"]:
return None
if msg["name"] in self.config:
msg["infer"]["reparam"] = self.config[msg["name"]]
reparam = msg["infer"].get("reparam")
if reparam is None:
return None
# Create auxiliary sites.
new_fns = reparam.get_dists(msg["fn"])
assert isinstance(new_fns, OrderedDict)
new_values = OrderedDict()
for name, fn in new_fns.items():
new_msg = msg.copy()
new_msg["name"] = "{}_{}".format(msg["name"], name)
new_msg["fn"] = fn
new_msg["cond_indep_stack"] = ()
new_msg["infer"] = new_msg["infer"].copy()
new_msg["infer"]["reparam"] = None
apply_stack(new_msg)
new_values[name] = new_msg["value"]
# Combine auxiliary values via pyro.deterministic().
# TODO(https://github.com/pyro-ppl/pyro/issues/2214) refactor to
# use site type "deterministic" when it exists.
value = reparam.transform_values(msg["fn"], new_values)
assert isinstance(value, torch.Tensor)
if getattr(msg["fn"], "_validation_enabled", False):
# Validate while the original msg["fn"] is known.
msg["fn"]._validate_sample(value)
msg["value"] = value
msg["fn"] = Delta(value, event_dim=msg["fn"].event_dim).mask(False)
msg["is_observed"] = True
| en | 0.578667 | Reparametrizes each affected sample site into one or more auxiliary sample sites followed by a deterministic transformation [1]. To specify :class:`~pyro.distributions.reparameterize.Reparameterizer` s, either pass a ``config`` dict to the constructor, configure ``site["infer"]["reparam"] = my_reparameterizer`` for each desired sample site, or use :func:`~pyro.poutine.infer_config` . See `available reparameterizers <distributions.html#reparameterizers>`_ .. warning:: Reparameterizers are recursive; take care to avoid infinite loops in your ``config`` filters. [1] <NAME>, <NAME>, <NAME> (2019) "Automatic Reparameterisation of Probabilistic Programs" https://arxiv.org/pdf/1906.03028.pdf :param dict config: Optional configuration mapping site name to :class:`~pyro.distributions.reparameterize.Reparameterizer` object. # Create auxiliary sites. # Combine auxiliary values via pyro.deterministic(). # TODO(https://github.com/pyro-ppl/pyro/issues/2214) refactor to # use site type "deterministic" when it exists. # Validate while the original msg["fn"] is known. | 2.133502 | 2 |
mctweetyface/test/__init__.py | jacebrowning/mctweetface | 1 | 6620819 | """Unit tests for the `mctweetyface` package."""
| """Unit tests for the `mctweetyface` package."""
| en | 0.544545 | Unit tests for the `mctweetyface` package. | 0.879675 | 1 |
myutils/tools.py | mmiikeke/calligraphy_project | 0 | 6620820 | <reponame>mmiikeke/calligraphy_project
import os
import pandas as pd
import requests
def get_files_path(root_path, end_string, interrupt = True):
filepath_list = list()
for root, dirs, files in os.walk(root_path):
for file in files:
if file.endswith(end_string):
filepath_list.append(os.path.join(root, file))
if (interrupt and len(filepath_list) == 0):
raise ValueError('Error: Can\'t find any ' + end_string + ' file in ' + root_path)
return filepath_list
def download_img(link, savepath, warning = True, show_info=True):
dirpath = os.path.dirname(savepath)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
if os.path.isfile(savepath):
print('Warning: Image file already exists! ' + savepath)
img = requests.get(link)
if show_info:
print('Save file: ' + savepath)
with open(savepath, "wb") as file:
file.write(img.content)
def save_csv(df, savepath, index = False, header = False, warning = True, show_info=True):
dirpath = os.path.dirname(savepath)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
if os.path.isfile(savepath):
print('Warning: Csv file already exists! ' + savepath)
if show_info:
print('Save file: ' + savepath)
df.to_csv(savepath, index=index, header=header)
def save_xlsx(df, savepath, index = False, header = False, warning = True, show_info=True):
dirpath = os.path.dirname(savepath)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
if os.path.isfile(savepath):
print('Warning: Csv file already exists! ' + savepath)
if show_info:
print('Save file: ' + savepath)
df.to_excel(savepath, index=index, header=header) | import os
import pandas as pd
import requests
def get_files_path(root_path, end_string, interrupt = True):
filepath_list = list()
for root, dirs, files in os.walk(root_path):
for file in files:
if file.endswith(end_string):
filepath_list.append(os.path.join(root, file))
if (interrupt and len(filepath_list) == 0):
raise ValueError('Error: Can\'t find any ' + end_string + ' file in ' + root_path)
return filepath_list
def download_img(link, savepath, warning = True, show_info=True):
dirpath = os.path.dirname(savepath)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
if os.path.isfile(savepath):
print('Warning: Image file already exists! ' + savepath)
img = requests.get(link)
if show_info:
print('Save file: ' + savepath)
with open(savepath, "wb") as file:
file.write(img.content)
def save_csv(df, savepath, index = False, header = False, warning = True, show_info=True):
dirpath = os.path.dirname(savepath)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
if os.path.isfile(savepath):
print('Warning: Csv file already exists! ' + savepath)
if show_info:
print('Save file: ' + savepath)
df.to_csv(savepath, index=index, header=header)
def save_xlsx(df, savepath, index = False, header = False, warning = True, show_info=True):
dirpath = os.path.dirname(savepath)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
if os.path.isfile(savepath):
print('Warning: Csv file already exists! ' + savepath)
if show_info:
print('Save file: ' + savepath)
df.to_excel(savepath, index=index, header=header) | none | 1 | 3.031895 | 3 | |
codeforces/greedy贪心/1200/1200B块游戏.py | yofn/pyacm | 0 | 6620821 | <reponame>yofn/pyacm<filename>codeforces/greedy贪心/1200/1200B块游戏.py
#!/usr/bin/env python3
#https://codeforces.com/problemset/problem/1200/B
#n=列数;m=包里初始块;k=允许向右的非负正数(i->i+1)
def fPass(hl,n,m,k):
for i in range(n-1):
m += hl[i]-max(hl[i+1]-k,0) #NOTE: the lower bound 0!!
if m<0:
return False
return True
tc = int(input())
rl = []
for i in range(tc):
n,m,k = list(map(int,input().split()))
hl = list(map(int,input().split()))
rl.append(fPass(hl,n,m,k))
[print('YES' if r else 'NO') for r in rl]
| #!/usr/bin/env python3
#https://codeforces.com/problemset/problem/1200/B
#n=列数;m=包里初始块;k=允许向右的非负正数(i->i+1)
def fPass(hl,n,m,k):
for i in range(n-1):
m += hl[i]-max(hl[i+1]-k,0) #NOTE: the lower bound 0!!
if m<0:
return False
return True
tc = int(input())
rl = []
for i in range(tc):
n,m,k = list(map(int,input().split()))
hl = list(map(int,input().split()))
rl.append(fPass(hl,n,m,k))
[print('YES' if r else 'NO') for r in rl] | en | 0.49946 | #!/usr/bin/env python3 #https://codeforces.com/problemset/problem/1200/B #n=列数;m=包里初始块;k=允许向右的非负正数(i->i+1) #NOTE: the lower bound 0!! | 2.862106 | 3 |
textminer/validator.py | gcrowder/express-yourself | 0 | 6620822 | <filename>textminer/validator.py
import re
def binary(string):
return re.match(r'^[0,1]+$', string)
def binary_even(string):
if binary(string):
return not int(string[-1])
else:
return False
def hex(string):
return re.match(r'^[0-9, A-F]+$', string)
def word(string):
return re.match(r'^\w*[A-Za-z-]\w*$', string)
def words(string, count=''):
words = re.findall(r'\w*[A-Za-z-]\w*', string)
if type(count) == int:
return words and len(words) == count
else:
return words
def phone_number(string):
return re.search(r'(\d{3})\D*(\d{3})\D*(\d{4})\D*(\d*)$', string)
def money(string):
return re.search(r'^((^\$\d{1,}\.\d{2})|(^\$\d{1,3},\d{3},\d{3}\.\d{2})|(^\$\d,\d{3}\.\d{2})|(^\$\d{1,3},\d{3},\d{3})|(^\$\d{1,3},\d{3})|(^\$\d{1,4}))+$', string)
def zipcode(string):
return re.search(r'^(^\d{5}-\d{4})|(^\d{5})+$', string)
def date(string):
d_m_y = re.search(r'(\b\d{1,2})[-\/:](\d{1,2})[-\/:](\d{4}\b)', string)
y_m_d = re.search(r'(\d{4})[-\/](\d{2})[-\/](\d{2})', string)
return d_m_y or y_m_d
| <filename>textminer/validator.py
import re
def binary(string):
return re.match(r'^[0,1]+$', string)
def binary_even(string):
if binary(string):
return not int(string[-1])
else:
return False
def hex(string):
return re.match(r'^[0-9, A-F]+$', string)
def word(string):
return re.match(r'^\w*[A-Za-z-]\w*$', string)
def words(string, count=''):
words = re.findall(r'\w*[A-Za-z-]\w*', string)
if type(count) == int:
return words and len(words) == count
else:
return words
def phone_number(string):
return re.search(r'(\d{3})\D*(\d{3})\D*(\d{4})\D*(\d*)$', string)
def money(string):
return re.search(r'^((^\$\d{1,}\.\d{2})|(^\$\d{1,3},\d{3},\d{3}\.\d{2})|(^\$\d,\d{3}\.\d{2})|(^\$\d{1,3},\d{3},\d{3})|(^\$\d{1,3},\d{3})|(^\$\d{1,4}))+$', string)
def zipcode(string):
return re.search(r'^(^\d{5}-\d{4})|(^\d{5})+$', string)
def date(string):
d_m_y = re.search(r'(\b\d{1,2})[-\/:](\d{1,2})[-\/:](\d{4}\b)', string)
y_m_d = re.search(r'(\d{4})[-\/](\d{2})[-\/](\d{2})', string)
return d_m_y or y_m_d
| none | 1 | 3.120757 | 3 | |
settings/__init__.py | bameda/icarus-twitter-bot | 1 | 6620823 | import sys
try:
from . import local
except ImportError:
local = {}
print('No "settings/local.py" file found.', file=sys.stderr)
ACCESS_TOKEN = getattr(local, 'ACCESS_TOKEN', 'undefined')
ACCESS_SECRET = getattr(local, 'ACCESS_SECRET', 'undefined')
CONSUMER_KEY = getattr(local, 'CONSUMER_KEY', 'undefined')
CONSUMER_SECRET = getattr(local, 'CONSUMER_SECRET', 'undefined')
| import sys
try:
from . import local
except ImportError:
local = {}
print('No "settings/local.py" file found.', file=sys.stderr)
ACCESS_TOKEN = getattr(local, 'ACCESS_TOKEN', 'undefined')
ACCESS_SECRET = getattr(local, 'ACCESS_SECRET', 'undefined')
CONSUMER_KEY = getattr(local, 'CONSUMER_KEY', 'undefined')
CONSUMER_SECRET = getattr(local, 'CONSUMER_SECRET', 'undefined')
| none | 1 | 2.002952 | 2 | |
emulation.py | joernschellhaas/kivybooth | 0 | 6620824 | import os
def active():
return True if os.getenv("KBOOTH_EMULATE") else False
| import os
def active():
return True if os.getenv("KBOOTH_EMULATE") else False
| none | 1 | 1.588954 | 2 | |
main.py | akirsche/geoLIMES | 0 | 6620825 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
from sys import path
from config import load_config
from geolimes import goeLIMES
path.append("${HOME}/.local/lib/python3.7/site-packages/")
def get_arguments():
parser = ArgumentParser(description="Python LIMES")
parser.add_argument("-c", "--config", type=str, dest="config_file", help="Path to a config file", required=True)
parser.add_argument("-d", "--database", type=str, dest="database_config_file", help="Path to a database config file", required=True)
parser.add_argument("-v", "--version", action="version", version="0.0.1", help="Show program version and exit")
arguments = parser.parse_args()
return arguments.config_file, arguments.database_config_file
def main():
try:
connfig_file_path, database_config_file_path = get_arguments()
config = load_config(connfig_file_path)
database_config = load_config(database_config_file_path)
limes = goeLIMES(database_config)
limes.run(config)
except FileNotFoundError as e:
print(e)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
from sys import path
from config import load_config
from geolimes import goeLIMES
path.append("${HOME}/.local/lib/python3.7/site-packages/")
def get_arguments():
parser = ArgumentParser(description="Python LIMES")
parser.add_argument("-c", "--config", type=str, dest="config_file", help="Path to a config file", required=True)
parser.add_argument("-d", "--database", type=str, dest="database_config_file", help="Path to a database config file", required=True)
parser.add_argument("-v", "--version", action="version", version="0.0.1", help="Show program version and exit")
arguments = parser.parse_args()
return arguments.config_file, arguments.database_config_file
def main():
try:
connfig_file_path, database_config_file_path = get_arguments()
config = load_config(connfig_file_path)
database_config = load_config(database_config_file_path)
limes = goeLIMES(database_config)
limes.run(config)
except FileNotFoundError as e:
print(e)
if __name__ == "__main__":
main() | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.645053 | 3 |
oled_object.py | vanfalen/simple-pi-oled | 0 | 6620826 | <filename>oled_object.py
class oled_object(object):
def __init__(self,init_x,init_y,display,draw,image,lock,show=False):
self.current_x=init_x
self.current_y=init_y
self.display=display
self.draw=draw
self.image=image
self.lock=lock
if show:
with self.lock:
self.drawObject()
def drawObject(self):
self.display.image(self.image)
self.display.show()
def moveShape(self,delta_x,delta_y):
with self.lock:
self.deleteObject()
self.current_x+=delta_x
self.current_y+=delta_y
self.drawShape()
def deleteObject(self):
pass
def teleportObject(self,new_x,new_y):
with self.lock:
self.deleteObject()
self.current_x=new_x
self.current_y=new_y
self.drawObject()
| <filename>oled_object.py
class oled_object(object):
def __init__(self,init_x,init_y,display,draw,image,lock,show=False):
self.current_x=init_x
self.current_y=init_y
self.display=display
self.draw=draw
self.image=image
self.lock=lock
if show:
with self.lock:
self.drawObject()
def drawObject(self):
self.display.image(self.image)
self.display.show()
def moveShape(self,delta_x,delta_y):
with self.lock:
self.deleteObject()
self.current_x+=delta_x
self.current_y+=delta_y
self.drawShape()
def deleteObject(self):
pass
def teleportObject(self,new_x,new_y):
with self.lock:
self.deleteObject()
self.current_x=new_x
self.current_y=new_y
self.drawObject()
| none | 1 | 2.840559 | 3 | |
lintcode/easy/add_digits/py/add_digits.py | lilsweetcaligula/Online-Judges | 0 | 6620827 | class Solution:
# @param {int} num a non-negative integer
# @return {int} one digit
def addDigits(self, num):
while len(str(num)) > 1:
num = sum(map(int, str(num)))
return num
| class Solution:
# @param {int} num a non-negative integer
# @return {int} one digit
def addDigits(self, num):
while len(str(num)) > 1:
num = sum(map(int, str(num)))
return num
| en | 0.126969 | # @param {int} num a non-negative integer # @return {int} one digit | 3.277385 | 3 |
rads/xml/lxml.py | ccarocean/python-rads | 0 | 6620828 | <gh_stars>0
"""XML tools using the lxml_ library.
.. _lxml: https://lxml.de/
"""
from typing import (
IO,
TYPE_CHECKING,
Any,
Iterator,
Mapping,
Optional,
Sequence,
Text,
Union,
cast,
)
from lxml import etree # type: ignore
from lxml.etree import ETCompatXMLParser, ParseError, XMLParser # type: ignore
from ..xml import base
# TODO: Change to functools.cached_property when dropping support for
# Python 3.7
if TYPE_CHECKING:
# property behaves properly with Mypy but cached_property does not, even
# with the same type stub.
cached_property = property
else:
from cached_property import cached_property
__all__ = [
"ParseError",
"Element",
"XMLParser",
"parse",
"fromstring",
"fromstringlist",
"error_with_file",
]
class Element(base.Element):
"""XML element that encapsulates an element from lxml_.
Supports line number examination.
.. _lxml: https://lxml.de/
"""
def __init__(self, element: etree._Element, *, file: Optional[str] = None):
"""
:param:
XML element from the lxml_ library.
:param file:
Optional filename/path the element is from.
"""
self._element = element
self._file = file
def __len__(self) -> int:
return len(self._element)
def __iter__(self) -> Iterator["Element"]:
for e in self._element:
yield Element(e, file=self._file)
def next(self) -> "Element": # noqa: D102
element = self._element.getnext()
if element is None:
raise StopIteration()
return Element(element, file=self._file)
def prev(self) -> "Element": # noqa: D102
element = self._element.getprevious()
if element is None:
raise StopIteration()
return Element(element, file=self._file)
def up(self) -> "Element": # noqa: D102
element = self._element.getparent()
if element is None:
raise StopIteration()
return Element(element, file=self._file)
def down(self) -> "Element": # noqa: D102
# throws StopIteration if there are no children
return Element(next(self._element.iterchildren()), file=self._file)
@property
def file(self) -> str:
if self._file:
return self._file
return cast(str, self._element.base)
@property
def opening_line(self) -> int:
return cast(int, self._element.sourceline)
@cached_property
def num_lines(self) -> int:
return len(etree.tostring(self._element).strip().split(b"\n"))
@cached_property
def closing_line(self) -> int:
return self.opening_line + self.num_lines - 1
@property
def tag(self) -> str:
return cast(str, self._element.tag)
@property
def text(self) -> Optional[str]:
return cast(str, self._element.text)
@property
def attributes(self) -> Mapping[str, str]:
return cast(Mapping[str, str], self._element.attrib)
_ParserInputType = Union[bytes, Text]
_FileOrFilename = Union[str, bytes, int, IO[Any]]
# The following functions are here to make lxml more compatible with etree.
def parse(
source: _FileOrFilename, parser: Optional[XMLParser] = None
) -> etree._ElementTree:
"""Parse XML document into element tree.
This is wrapper around :func:`lxml.etree.parse` to make it behave like
:func:`xml.etree.ElementTree.parse`.
:param source:
Filename or file object containing XML data.
:param parser:
Optional parser instance, defaulting to
:class:`lxml.etree.ETCompatXMLParser`.
:return:
An ElementTree instance.
"""
if parser is None:
parser = ETCompatXMLParser()
return etree.parse(source, parser)
def fromstring(
text: _ParserInputType, parser: Optional[XMLParser] = None
) -> etree._Element:
"""Parse XML document from string constant.
This function can be used to embed 'XML Literals' in Python code.
This is wrapper around :func:`lxml.etree.fromstring` to make it behave like
:func:`xml.etree.ElementTree.fromtstring`.
:param text:
A string containing XML data.
:param parser:
Optional parser instance, defaulting to
:class:`lxml.etree.ETCompatXMLParser`.
:return:
An Element instance.
"""
if parser is None:
parser = ETCompatXMLParser()
return etree.fromstring(text, parser)
def fromstringlist(
sequence: Sequence[_ParserInputType], parser: Optional[XMLParser] = None
) -> etree._Element:
"""Parse XML document from sequence of string fragments.
:param sequence:
A list or other sequence of strings containing XML data.
:param parser:
Optional parser instance, defaulting to
:class:`lxml.etree.ETCompatXMLParser`.
:return:
An Element instance.
"""
if parser is None:
parser = ETCompatXMLParser()
return etree.fromstringlist(sequence, parser)
def error_with_file(error: ParseError, file: str) -> ParseError:
"""Add filename to an XML parse error.
:param error:
Original XML parse error.
:param file:
Filename to add.
:return:
A new parse error (of the same type as `error`) with the `filename`
added.
"""
error.filename = file
return type(error)(
error.msg, error.code, error.position[0], error.position[1], file
)
| """XML tools using the lxml_ library.
.. _lxml: https://lxml.de/
"""
from typing import (
IO,
TYPE_CHECKING,
Any,
Iterator,
Mapping,
Optional,
Sequence,
Text,
Union,
cast,
)
from lxml import etree # type: ignore
from lxml.etree import ETCompatXMLParser, ParseError, XMLParser # type: ignore
from ..xml import base
# TODO: Change to functools.cached_property when dropping support for
# Python 3.7
if TYPE_CHECKING:
# property behaves properly with Mypy but cached_property does not, even
# with the same type stub.
cached_property = property
else:
from cached_property import cached_property
__all__ = [
"ParseError",
"Element",
"XMLParser",
"parse",
"fromstring",
"fromstringlist",
"error_with_file",
]
class Element(base.Element):
"""XML element that encapsulates an element from lxml_.
Supports line number examination.
.. _lxml: https://lxml.de/
"""
def __init__(self, element: etree._Element, *, file: Optional[str] = None):
"""
:param:
XML element from the lxml_ library.
:param file:
Optional filename/path the element is from.
"""
self._element = element
self._file = file
def __len__(self) -> int:
return len(self._element)
def __iter__(self) -> Iterator["Element"]:
for e in self._element:
yield Element(e, file=self._file)
def next(self) -> "Element": # noqa: D102
element = self._element.getnext()
if element is None:
raise StopIteration()
return Element(element, file=self._file)
def prev(self) -> "Element": # noqa: D102
element = self._element.getprevious()
if element is None:
raise StopIteration()
return Element(element, file=self._file)
def up(self) -> "Element": # noqa: D102
element = self._element.getparent()
if element is None:
raise StopIteration()
return Element(element, file=self._file)
def down(self) -> "Element": # noqa: D102
# throws StopIteration if there are no children
return Element(next(self._element.iterchildren()), file=self._file)
@property
def file(self) -> str:
if self._file:
return self._file
return cast(str, self._element.base)
@property
def opening_line(self) -> int:
return cast(int, self._element.sourceline)
@cached_property
def num_lines(self) -> int:
return len(etree.tostring(self._element).strip().split(b"\n"))
@cached_property
def closing_line(self) -> int:
return self.opening_line + self.num_lines - 1
@property
def tag(self) -> str:
return cast(str, self._element.tag)
@property
def text(self) -> Optional[str]:
return cast(str, self._element.text)
@property
def attributes(self) -> Mapping[str, str]:
return cast(Mapping[str, str], self._element.attrib)
_ParserInputType = Union[bytes, Text]
_FileOrFilename = Union[str, bytes, int, IO[Any]]
# The following functions are here to make lxml more compatible with etree.
def parse(
source: _FileOrFilename, parser: Optional[XMLParser] = None
) -> etree._ElementTree:
"""Parse XML document into element tree.
This is wrapper around :func:`lxml.etree.parse` to make it behave like
:func:`xml.etree.ElementTree.parse`.
:param source:
Filename or file object containing XML data.
:param parser:
Optional parser instance, defaulting to
:class:`lxml.etree.ETCompatXMLParser`.
:return:
An ElementTree instance.
"""
if parser is None:
parser = ETCompatXMLParser()
return etree.parse(source, parser)
def fromstring(
text: _ParserInputType, parser: Optional[XMLParser] = None
) -> etree._Element:
"""Parse XML document from string constant.
This function can be used to embed 'XML Literals' in Python code.
This is wrapper around :func:`lxml.etree.fromstring` to make it behave like
:func:`xml.etree.ElementTree.fromtstring`.
:param text:
A string containing XML data.
:param parser:
Optional parser instance, defaulting to
:class:`lxml.etree.ETCompatXMLParser`.
:return:
An Element instance.
"""
if parser is None:
parser = ETCompatXMLParser()
return etree.fromstring(text, parser)
def fromstringlist(
sequence: Sequence[_ParserInputType], parser: Optional[XMLParser] = None
) -> etree._Element:
"""Parse XML document from sequence of string fragments.
:param sequence:
A list or other sequence of strings containing XML data.
:param parser:
Optional parser instance, defaulting to
:class:`lxml.etree.ETCompatXMLParser`.
:return:
An Element instance.
"""
if parser is None:
parser = ETCompatXMLParser()
return etree.fromstringlist(sequence, parser)
def error_with_file(error: ParseError, file: str) -> ParseError:
"""Add filename to an XML parse error.
:param error:
Original XML parse error.
:param file:
Filename to add.
:return:
A new parse error (of the same type as `error`) with the `filename`
added.
"""
error.filename = file
return type(error)(
error.msg, error.code, error.position[0], error.position[1], file
) | en | 0.658424 | XML tools using the lxml_ library. .. _lxml: https://lxml.de/ # type: ignore # type: ignore # TODO: Change to functools.cached_property when dropping support for # Python 3.7 # property behaves properly with Mypy but cached_property does not, even # with the same type stub. XML element that encapsulates an element from lxml_. Supports line number examination. .. _lxml: https://lxml.de/ :param: XML element from the lxml_ library. :param file: Optional filename/path the element is from. # noqa: D102 # noqa: D102 # noqa: D102 # noqa: D102 # throws StopIteration if there are no children # The following functions are here to make lxml more compatible with etree. Parse XML document into element tree. This is wrapper around :func:`lxml.etree.parse` to make it behave like :func:`xml.etree.ElementTree.parse`. :param source: Filename or file object containing XML data. :param parser: Optional parser instance, defaulting to :class:`lxml.etree.ETCompatXMLParser`. :return: An ElementTree instance. Parse XML document from string constant. This function can be used to embed 'XML Literals' in Python code. This is wrapper around :func:`lxml.etree.fromstring` to make it behave like :func:`xml.etree.ElementTree.fromtstring`. :param text: A string containing XML data. :param parser: Optional parser instance, defaulting to :class:`lxml.etree.ETCompatXMLParser`. :return: An Element instance. Parse XML document from sequence of string fragments. :param sequence: A list or other sequence of strings containing XML data. :param parser: Optional parser instance, defaulting to :class:`lxml.etree.ETCompatXMLParser`. :return: An Element instance. Add filename to an XML parse error. :param error: Original XML parse error. :param file: Filename to add. :return: A new parse error (of the same type as `error`) with the `filename` added. | 2.644632 | 3 |
vetkit/word2vec_models.py | edponce/vetk | 0 | 6620829 | """Interface functions to word2vec embedding models.
Todo:
* In docstrings, add links for references to module functions.
"""
import os
from math import ceil
from collections import OrderedDict
import numpy
from .utils import convert_to_range
import smarttimers
def read_word_vector(fd, vector_bytes, chunk_size=2**20):
chunk = b''
while True:
# First part of current line
if not chunk:
chunk = fd.read(chunk_size)
# EOF?
if not chunk: break
blank_idx = chunk.index(b' ') # find word/vector separator
word = chunk[:blank_idx]
chunk = chunk[blank_idx + 1:] # skip blank space
# Read remaining vector bytes
while (len(chunk) <= vector_bytes):
partial_chunk = fd.read(chunk_size)
# EOF? We are not done processing file
if not partial_chunk: break
chunk += partial_chunk
# Extract vector
vector = chunk[:vector_bytes]
# Trim chunk, skip newline
chunk = chunk[vector_bytes + 1:]
yield word, vector
@smarttimers.smarttime
def load_vectors_word2vec(file, load_vocab=True, filter=None, blacklist=False, dtype=numpy.float32):
"""Load vectors of embedding model from given file in word2vec format.
Notes:
* *file* encoding (ASCII or binary) is automatically detected during
processing.
Args:
file (str): Input file.
load_vocab (bool, optional): If True, vocabulary will be extracted from
file (occurrences will be set to 1). Otherwise an empty vocabulary
is returned. Default is True.
filter (range, slice, list, tuple, float, int, set, dict, None, optional):
Values representing a filter operation for file processing, see
*utils.convert_to_range()*. If string, consider it a file with a
list of words. If None, entire file is processed. Default is None.
blacklist (bool, optional): If True, consider *filter* as a blacklist.
If False, consider *filter* as a whitelist. Only applicable when
*filter* is a set or dict. Default is False.
dtype (numpy.dtype, optional): Type of vector data. Default is
numpy.float32.
Returns:
numpy.ndarray, OrderedDict: Vectors and vocabulary of embedding model.
Raises:
EOFError: For *file* in binary format, if EOF is reached before all
possible data requested is extracted.
"""
# Check file format and get data dimensions
try:
with open(file) as fd:
dims = tuple(int(dim) for dim in fd.readline().split())
binary = False
except UnicodeDecodeError as ex:
with open(file, 'rb') as fd:
dims = tuple(int(dim) for dim in fd.readline().split())
binary = True
# Get lines to process
if isinstance(filter, (set, dict)):
erange = convert_to_range(None, dims[0])
else:
blacklist = None # Disable blacklisting
erange = convert_to_range(filter, dims[0])
n_elems = ceil((erange[1] - erange[0]) / erange[2])
vectors = numpy.empty(shape=(n_elems, dims[1]), dtype=dtype)
vocab= OrderedDict()
if binary:
with open(file, 'rb') as fd:
next(fd) # discard header, already read
next_line = erange[0]
line_length = dims[1] * 4 # float is default in word2vec
chunk_size = 2**20 # read file in 1MB chunks
# chunk = b''
gen = read_word_vector(fd, line_length, chunk_size)
i = -1 # begin at -1 because i+=1 is done before comparisons
j = 0
while True:
i += 1
if i >= erange[1]: break
# # First part of current line
# if not chunk:
# chunk = fd.read(chunk_size)
#
# # EOF?
# if not chunk: break
#
# blank_idx = chunk.index(b' ') # find word/vector separator
# word = chunk[:blank_idx]
# chunk = chunk[blank_idx + 1:] # skip blank space
#
# # Read remaining vector bytes
# while (len(chunk) <= line_length):
# partial_chunk = fd.read(chunk_size)
#
# # EOF? We are not done processing file
# if not partial_chunk: break
# chunk += partial_chunk
#
# # Extract vector
# vector = chunk[:line_length]
#
# # Trim chunk, skip newline
# chunk = chunk[line_length + 1:]
word, vector = next(gen)
if i < erange[0]: continue
if i == next_line:
word = word.decode()
if blacklist is None or (not blacklist and word in filter) or (blacklist and word not in filter):
if load_vocab:
vocab[word] = 1
vectors[j][:] = numpy.frombuffer(vector, dtype=dtype)
j += 1
next_line += erange[2]
# Check if processing stopped before it should, not if blacklisting
if blacklist is None and j < n_elems and erange[1] - j >= erange[2]:
raise EOFError("failed to parse vectors file")
else:
with open(file) as fd:
next(fd) # discard header, already read
next_line = erange[0]
j = 0
for i, line in enumerate(fd):
if i < erange[0]: continue
if i >= erange[1]: break
if i == next_line:
word, vector = line.split(maxsplit=1)
if blacklist is None or (not blacklist and word in filter) or (blacklist and word not in filter):
if load_vocab:
vocab[word] = 1
vectors[j][:] = numpy.fromstring(vector, dtype, sep=' ')
j += 1
next_line += erange[2]
# Resize array, only if given a blacklist where final size was unknown
if j < vectors.shape[0]:
vectors = vectors[:j,:]
return vectors, vocab
@smarttimers.smarttime
def load_vocabulary_word2vec(file, filter=None, blacklist=False):
"""Load vocabulary of embedding model from given file in word2vec format.
Notes:
* *file* consists of two columns, words and occurrences.
Args:
file (str): Input file.
filter (range, slice, list, tuple, float, int, set, dict, None, optional):
Values representing a filter operation for file processing, see
*utils.convert_to_range()*. If string, consider it a file with a
list of words. If None, entire file is processed. Default is None.
blacklist (bool, optional): If True, consider *filter* as a blacklist.
If False, consider *filter* as a whitelist. Only applicable when
*filter* is a set or dict. Default is False.
"""
# Get lines to process
if isinstance(filter, (set, dict)):
erange = convert_to_range(None, file)
else:
blacklist = None
erange = convert_to_range(filter, file)
vocab = OrderedDict()
with open(file) as fd:
next_line = erange[0]
for i, line in enumerate(fd):
if i < erange[0]: continue
if erange[1] is not None and i >= erange[1]: break
if i == next_line:
word, count = line.split(maxsplit=1)
if blacklist is None or (not blacklist and word in filter) or (blacklist and word not in filter):
vocab[word] = int(count)
next_line += erange[2]
return vocab
@smarttimers.smarttime
def dump_vectors_word2vec(file, vectors, vocab, binary=False):
"""Write vectors of embedding model to given file in word2vec format.
Notes:
* Order of vectors and vocabulary should match.
* For ASCII format, floating-point precision is 6 decimal places.
Args:
file (str): Output file.
vectors (numpy.ndarray): Vectors of embedding model.
vocab (dict): Vocabulary of embedding model.
binary (bool, optional): Select encoding format. Default is False.
"""
if binary:
with open(file, 'wb') as fd:
newline = os.linesep.encode()
fd.write("{} {}".format(*vectors.shape).encode() + newline)
fmt = "{} "
for word, vector in zip(vocab.keys(), vectors):
fd.write(fmt.format(word).encode())
vector.tofile(fd)
fd.write(newline)
else:
with open(file, 'w') as fd:
newline = os.linesep
fd.write("{} {}".format(*vectors.shape) + newline)
fmt = ' '.join(['{}'] + vectors.shape[1] * ['{:6f}']) + newline
for word, vector in zip(vocab.keys(), vectors):
fd.write(fmt.format(word, *vector))
@smarttimers.smarttime
def dump_vocabulary_word2vec(file, vocab):
"""Write vocabulary of embedding model to given file in word2vec format.
Args:
file (str): Output file.
vocab (dict): Vocabulary of embedding model.
"""
with open(file, 'w') as fd:
newline = os.linesep
fmt = "{} {}" + newline
for word, count in vocab.items():
fd.write(fmt.format(word, count))
| """Interface functions to word2vec embedding models.
Todo:
* In docstrings, add links for references to module functions.
"""
import os
from math import ceil
from collections import OrderedDict
import numpy
from .utils import convert_to_range
import smarttimers
def read_word_vector(fd, vector_bytes, chunk_size=2**20):
chunk = b''
while True:
# First part of current line
if not chunk:
chunk = fd.read(chunk_size)
# EOF?
if not chunk: break
blank_idx = chunk.index(b' ') # find word/vector separator
word = chunk[:blank_idx]
chunk = chunk[blank_idx + 1:] # skip blank space
# Read remaining vector bytes
while (len(chunk) <= vector_bytes):
partial_chunk = fd.read(chunk_size)
# EOF? We are not done processing file
if not partial_chunk: break
chunk += partial_chunk
# Extract vector
vector = chunk[:vector_bytes]
# Trim chunk, skip newline
chunk = chunk[vector_bytes + 1:]
yield word, vector
@smarttimers.smarttime
def load_vectors_word2vec(file, load_vocab=True, filter=None, blacklist=False, dtype=numpy.float32):
"""Load vectors of embedding model from given file in word2vec format.
Notes:
* *file* encoding (ASCII or binary) is automatically detected during
processing.
Args:
file (str): Input file.
load_vocab (bool, optional): If True, vocabulary will be extracted from
file (occurrences will be set to 1). Otherwise an empty vocabulary
is returned. Default is True.
filter (range, slice, list, tuple, float, int, set, dict, None, optional):
Values representing a filter operation for file processing, see
*utils.convert_to_range()*. If string, consider it a file with a
list of words. If None, entire file is processed. Default is None.
blacklist (bool, optional): If True, consider *filter* as a blacklist.
If False, consider *filter* as a whitelist. Only applicable when
*filter* is a set or dict. Default is False.
dtype (numpy.dtype, optional): Type of vector data. Default is
numpy.float32.
Returns:
numpy.ndarray, OrderedDict: Vectors and vocabulary of embedding model.
Raises:
EOFError: For *file* in binary format, if EOF is reached before all
possible data requested is extracted.
"""
# Check file format and get data dimensions
try:
with open(file) as fd:
dims = tuple(int(dim) for dim in fd.readline().split())
binary = False
except UnicodeDecodeError as ex:
with open(file, 'rb') as fd:
dims = tuple(int(dim) for dim in fd.readline().split())
binary = True
# Get lines to process
if isinstance(filter, (set, dict)):
erange = convert_to_range(None, dims[0])
else:
blacklist = None # Disable blacklisting
erange = convert_to_range(filter, dims[0])
n_elems = ceil((erange[1] - erange[0]) / erange[2])
vectors = numpy.empty(shape=(n_elems, dims[1]), dtype=dtype)
vocab= OrderedDict()
if binary:
with open(file, 'rb') as fd:
next(fd) # discard header, already read
next_line = erange[0]
line_length = dims[1] * 4 # float is default in word2vec
chunk_size = 2**20 # read file in 1MB chunks
# chunk = b''
gen = read_word_vector(fd, line_length, chunk_size)
i = -1 # begin at -1 because i+=1 is done before comparisons
j = 0
while True:
i += 1
if i >= erange[1]: break
# # First part of current line
# if not chunk:
# chunk = fd.read(chunk_size)
#
# # EOF?
# if not chunk: break
#
# blank_idx = chunk.index(b' ') # find word/vector separator
# word = chunk[:blank_idx]
# chunk = chunk[blank_idx + 1:] # skip blank space
#
# # Read remaining vector bytes
# while (len(chunk) <= line_length):
# partial_chunk = fd.read(chunk_size)
#
# # EOF? We are not done processing file
# if not partial_chunk: break
# chunk += partial_chunk
#
# # Extract vector
# vector = chunk[:line_length]
#
# # Trim chunk, skip newline
# chunk = chunk[line_length + 1:]
word, vector = next(gen)
if i < erange[0]: continue
if i == next_line:
word = word.decode()
if blacklist is None or (not blacklist and word in filter) or (blacklist and word not in filter):
if load_vocab:
vocab[word] = 1
vectors[j][:] = numpy.frombuffer(vector, dtype=dtype)
j += 1
next_line += erange[2]
# Check if processing stopped before it should, not if blacklisting
if blacklist is None and j < n_elems and erange[1] - j >= erange[2]:
raise EOFError("failed to parse vectors file")
else:
with open(file) as fd:
next(fd) # discard header, already read
next_line = erange[0]
j = 0
for i, line in enumerate(fd):
if i < erange[0]: continue
if i >= erange[1]: break
if i == next_line:
word, vector = line.split(maxsplit=1)
if blacklist is None or (not blacklist and word in filter) or (blacklist and word not in filter):
if load_vocab:
vocab[word] = 1
vectors[j][:] = numpy.fromstring(vector, dtype, sep=' ')
j += 1
next_line += erange[2]
# Resize array, only if given a blacklist where final size was unknown
if j < vectors.shape[0]:
vectors = vectors[:j,:]
return vectors, vocab
@smarttimers.smarttime
def load_vocabulary_word2vec(file, filter=None, blacklist=False):
"""Load vocabulary of embedding model from given file in word2vec format.
Notes:
* *file* consists of two columns, words and occurrences.
Args:
file (str): Input file.
filter (range, slice, list, tuple, float, int, set, dict, None, optional):
Values representing a filter operation for file processing, see
*utils.convert_to_range()*. If string, consider it a file with a
list of words. If None, entire file is processed. Default is None.
blacklist (bool, optional): If True, consider *filter* as a blacklist.
If False, consider *filter* as a whitelist. Only applicable when
*filter* is a set or dict. Default is False.
"""
# Get lines to process
if isinstance(filter, (set, dict)):
erange = convert_to_range(None, file)
else:
blacklist = None
erange = convert_to_range(filter, file)
vocab = OrderedDict()
with open(file) as fd:
next_line = erange[0]
for i, line in enumerate(fd):
if i < erange[0]: continue
if erange[1] is not None and i >= erange[1]: break
if i == next_line:
word, count = line.split(maxsplit=1)
if blacklist is None or (not blacklist and word in filter) or (blacklist and word not in filter):
vocab[word] = int(count)
next_line += erange[2]
return vocab
@smarttimers.smarttime
def dump_vectors_word2vec(file, vectors, vocab, binary=False):
"""Write vectors of embedding model to given file in word2vec format.
Notes:
* Order of vectors and vocabulary should match.
* For ASCII format, floating-point precision is 6 decimal places.
Args:
file (str): Output file.
vectors (numpy.ndarray): Vectors of embedding model.
vocab (dict): Vocabulary of embedding model.
binary (bool, optional): Select encoding format. Default is False.
"""
if binary:
with open(file, 'wb') as fd:
newline = os.linesep.encode()
fd.write("{} {}".format(*vectors.shape).encode() + newline)
fmt = "{} "
for word, vector in zip(vocab.keys(), vectors):
fd.write(fmt.format(word).encode())
vector.tofile(fd)
fd.write(newline)
else:
with open(file, 'w') as fd:
newline = os.linesep
fd.write("{} {}".format(*vectors.shape) + newline)
fmt = ' '.join(['{}'] + vectors.shape[1] * ['{:6f}']) + newline
for word, vector in zip(vocab.keys(), vectors):
fd.write(fmt.format(word, *vector))
@smarttimers.smarttime
def dump_vocabulary_word2vec(file, vocab):
"""Write vocabulary of embedding model to given file in word2vec format.
Args:
file (str): Output file.
vocab (dict): Vocabulary of embedding model.
"""
with open(file, 'w') as fd:
newline = os.linesep
fmt = "{} {}" + newline
for word, count in vocab.items():
fd.write(fmt.format(word, count))
| en | 0.787899 | Interface functions to word2vec embedding models. Todo: * In docstrings, add links for references to module functions. # First part of current line # EOF? # find word/vector separator # skip blank space # Read remaining vector bytes # EOF? We are not done processing file # Extract vector # Trim chunk, skip newline Load vectors of embedding model from given file in word2vec format. Notes: * *file* encoding (ASCII or binary) is automatically detected during processing. Args: file (str): Input file. load_vocab (bool, optional): If True, vocabulary will be extracted from file (occurrences will be set to 1). Otherwise an empty vocabulary is returned. Default is True. filter (range, slice, list, tuple, float, int, set, dict, None, optional): Values representing a filter operation for file processing, see *utils.convert_to_range()*. If string, consider it a file with a list of words. If None, entire file is processed. Default is None. blacklist (bool, optional): If True, consider *filter* as a blacklist. If False, consider *filter* as a whitelist. Only applicable when *filter* is a set or dict. Default is False. dtype (numpy.dtype, optional): Type of vector data. Default is numpy.float32. Returns: numpy.ndarray, OrderedDict: Vectors and vocabulary of embedding model. Raises: EOFError: For *file* in binary format, if EOF is reached before all possible data requested is extracted. # Check file format and get data dimensions # Get lines to process # Disable blacklisting # discard header, already read # float is default in word2vec # read file in 1MB chunks # chunk = b'' # begin at -1 because i+=1 is done before comparisons # # First part of current line # if not chunk: # chunk = fd.read(chunk_size) # # # EOF? # if not chunk: break # # blank_idx = chunk.index(b' ') # find word/vector separator # word = chunk[:blank_idx] # chunk = chunk[blank_idx + 1:] # skip blank space # # # Read remaining vector bytes # while (len(chunk) <= line_length): # partial_chunk = fd.read(chunk_size) # # # EOF? We are not done processing file # if not partial_chunk: break # chunk += partial_chunk # # # Extract vector # vector = chunk[:line_length] # # # Trim chunk, skip newline # chunk = chunk[line_length + 1:] # Check if processing stopped before it should, not if blacklisting # discard header, already read # Resize array, only if given a blacklist where final size was unknown Load vocabulary of embedding model from given file in word2vec format. Notes: * *file* consists of two columns, words and occurrences. Args: file (str): Input file. filter (range, slice, list, tuple, float, int, set, dict, None, optional): Values representing a filter operation for file processing, see *utils.convert_to_range()*. If string, consider it a file with a list of words. If None, entire file is processed. Default is None. blacklist (bool, optional): If True, consider *filter* as a blacklist. If False, consider *filter* as a whitelist. Only applicable when *filter* is a set or dict. Default is False. # Get lines to process Write vectors of embedding model to given file in word2vec format. Notes: * Order of vectors and vocabulary should match. * For ASCII format, floating-point precision is 6 decimal places. Args: file (str): Output file. vectors (numpy.ndarray): Vectors of embedding model. vocab (dict): Vocabulary of embedding model. binary (bool, optional): Select encoding format. Default is False. Write vocabulary of embedding model to given file in word2vec format. Args: file (str): Output file. vocab (dict): Vocabulary of embedding model. | 2.987672 | 3 |
config.py | mortbauer/webapp | 0 | 6620830 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfig(object):
SECRET_KEY = "SO_SECURE"
class TestingConfig(BaseConfig):
"""Development configuration."""
TESTING = True
DEBUG = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s/app.db'%basedir
DEBUG_TB_ENABLED = True
PRESERVE_CONTEXT_ON_EXCEPTION = False
class ProductionConfig(BaseConfig):
DEBUG = False
WTF_CSRF_ENABLED = True
SECRET_KEY = os.urandom(24).decode('latin1')
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/webapp'
| import os
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfig(object):
SECRET_KEY = "SO_SECURE"
class TestingConfig(BaseConfig):
"""Development configuration."""
TESTING = True
DEBUG = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s/app.db'%basedir
DEBUG_TB_ENABLED = True
PRESERVE_CONTEXT_ON_EXCEPTION = False
class ProductionConfig(BaseConfig):
DEBUG = False
WTF_CSRF_ENABLED = True
SECRET_KEY = os.urandom(24).decode('latin1')
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/webapp'
| en | 0.702422 | Development configuration. | 2.179682 | 2 |
garageofcode/mip/convex_hull.py | tpi12jwe/garageofcode | 2 | 6620831 | <gh_stars>1-10
from itertools import product
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
from sentian_miami import get_solver
from garageofcode.sampling.timeseries import get_ts
tol = 1e-4
def draw_planes(ax, planes):
t = np.linspace(-10, 10)
eps = 0.05
for plane in planes:
a, b, d = plane
assert abs(a + b) > tol
x = -d/(a + b) + b*t
y = -d/(a + b) - a*t
ax.plot(x, y, color='b')
x_p = x + np.sign(a) * eps
y_p = y + np.sign(b) * eps
ax.plot(x_p, y_p, color='r')
def in_hull(u, V, **kwargs):
"""
Checks if u is in convex hull of V using linear programming.
V is a list of points
u is a point
"""
solver = get_solver("mono")
X = [solver.NumVar(lb=0) for _ in range(len(V))]
for V_i, u_i in zip(zip(*V), u):
solver.Add(solver.Dot(V_i, X) == u_i)
solver.Add(solver.Sum(X) == 1)
return solver.Solve(time_limit=10, **kwargs)
def is_inside(point, planes):
A, d = planes[:, :-1], planes[:, -1]
proj = np.matmul(A, point) + d
return np.all(proj >= 0)
def make_plane(points, ref):
"""
Make a plane with a normal that is orthogonal
to all (u - v) where u and v are in points
The plane intersects all points
The plane is oriented such that the point
ref will have a positive value
"""
p0 = points[0]
A = np.matrix([p_i - p0 for p_i in points[1:]])
normal = linalg.null_space(A)
d = -np.dot(p0, normal)
sgn = np.dot(ref, normal) + d
normal *= sgn
d *= sgn
plane = np.concatenate([normal.T[0], d])
return plane
def is_bounded(planes):
R = 1000
tol = 1e-6
if not len(planes):
return False
solver = get_solver("CBC")
X = [solver.NumVar(lb=-R, ub=R) for _ in range(len(planes[0]) - 1)]
obj = 0
for A in planes:
#print(A)
a, d = A[:-1], A[-1]
proj = solver.Dot(a, X)
obj += proj * np.random.random()
solver.Add(proj >= -d)
#solver.Add(X[0] <= 1)
#solver.Add(X[0] >= -1)
#obj = solver.Dot(np.sum(planes[:, :-1], axis=0), X)
solver.SetObjective(obj, maximize=True)
result = solver.Solve(time_limit=10)
result = status2str[result]
if result == "INFEASIBLE":
print("Infeasible!")
return True
else:
sol = [solver.solution_value(x) for x in X]
print(sol)
if any([np.abs(y - R) < tol for y in sol]):
print("Unbounded")
else:
print("Bounded")
print()
def volume(V, n_iter=100):
"""
Monte Carlo estimate of volume of
convex hull of V, intersected with the unit cube
"""
dim = len(V[0])
included = []
num_in = 0
for _ in range(n_iter):
x = np.random.random(dim) - 0.5
incl = in_hull(x, V)
num_in += incl
included.append(incl)
return num_in / n_iter, included
def k_fold_inclusion(V):
"""
Checks if v_i in ConvHull(V-v_i) for v_i in V
"""
if len(V) == 0:
return 0
included = []
num_in = 0
for i, v_i in enumerate(V):
incl = in_hull(v_i, [v for j, v in enumerate(V) if j != i])
num_in += incl
included.append(incl)
return num_in / len(V), included
def get_time_correlated_points(dim, N):
X = get_ts(N+dim-1, p=dim)
return np.array([X[i:i+dim] for i in range(N)])
def get_correlated_points(dim, N, alpha=0.1):
A = np.random.random([dim, dim])-0.5
#Q, _ = np.linalg.qr(A)
#D = np.diag(10 * np.random.random([dim]))
#B = np.matmul(np.matmul(Q.T, D), Q)
#I = np.eye(dim)
#V = (1 - alpha) * I + alpha * B
#C = np.linalg.cholesky(V)
#for row in C:
# print([float("{0:.3f}".format(c)) for c in row])
#e = np.random.randn(dim, N)
e = np.random.random([dim, N]) - 0.5
X = np.dot(A, e).T
return X
def main():
'''
for _ in range(1000):
A = np.random.random([5, 3]) - 0.5
#A = np.array([[1, -1],
# [-1, -1]])
#print(A)
is_bounded(A)
'''
#points = [[0, 0], [10, 0], [0, 10]]
np.random.seed(0)
avg = 0
dim = 2
num_points = 100
n_iter = 1
for _ in range(n_iter):
points = np.random.random([num_points, dim]) - 0.5
#points = get_correlated_points(dim, num_points, alpha=1)
x, y = zip(*points)
vol, included = k_fold_inclusion(points)
col = ['b' if incl else 'r' for incl in included]
plt.scatter(x, y, color=col)
plt.title("K-fold inclusion: {0:.3f}".format(vol))
plt.show()
#exit(0)
print("Volume:", vol)
avg += vol
avg = avg / n_iter
print("Dim={1:d}, Num_points={2:d}, Total avg: {0:.3f}" \
.format(avg, dim, num_points))
'''
fig, ax = plt.subplots()
for x, y in product(range(-10, 12), repeat=2):
col = 'r' if in_hull([x, y], points) else 'b'
ax.scatter(x, y, color=col)
x, y = zip(*points)
ax.scatter(x, y, color='g')
#ax.set_title("Convex hull for S = {(0, 0), (10, 0), (0, 10)}, in red")
plt.show()
'''
'''
points = np.random.random([10, 2])*10 - 5
c0 = np.random.choice(len(points), 3, replace=False)
c = [points[ch] for ch in c0]
plane1 = make_plane([c[0], c[1]], c[2])
plane2 = make_plane([c[0], c[2]], c[1])
plane3 = make_plane([c[1], c[2]], c[0])
planes = np.array([plane1, plane2, plane3])
fig, ax = plt.subplots()
for x, y in product(np.linspace(-10, 10, 20), repeat=2):
col = 'r' if is_inside([x, y], planes) else 'b'
ax.scatter(x, y, color=col)
draw_planes(ax, planes)
plt.show()
'''
#planes = np.random.random([3, 3]) - 0.5
#point = np.array([[0], [0]])
#points = np.array([[1, 0], [0, 1]])
#ref = [0, 0]
#make_plane(points, ref)
'''
'''
#for _ in range(100):
# print("is inside:", is_inside(point, planes))
if __name__ == '__main__':
main() | from itertools import product
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
from sentian_miami import get_solver
from garageofcode.sampling.timeseries import get_ts
tol = 1e-4
def draw_planes(ax, planes):
t = np.linspace(-10, 10)
eps = 0.05
for plane in planes:
a, b, d = plane
assert abs(a + b) > tol
x = -d/(a + b) + b*t
y = -d/(a + b) - a*t
ax.plot(x, y, color='b')
x_p = x + np.sign(a) * eps
y_p = y + np.sign(b) * eps
ax.plot(x_p, y_p, color='r')
def in_hull(u, V, **kwargs):
"""
Checks if u is in convex hull of V using linear programming.
V is a list of points
u is a point
"""
solver = get_solver("mono")
X = [solver.NumVar(lb=0) for _ in range(len(V))]
for V_i, u_i in zip(zip(*V), u):
solver.Add(solver.Dot(V_i, X) == u_i)
solver.Add(solver.Sum(X) == 1)
return solver.Solve(time_limit=10, **kwargs)
def is_inside(point, planes):
A, d = planes[:, :-1], planes[:, -1]
proj = np.matmul(A, point) + d
return np.all(proj >= 0)
def make_plane(points, ref):
"""
Make a plane with a normal that is orthogonal
to all (u - v) where u and v are in points
The plane intersects all points
The plane is oriented such that the point
ref will have a positive value
"""
p0 = points[0]
A = np.matrix([p_i - p0 for p_i in points[1:]])
normal = linalg.null_space(A)
d = -np.dot(p0, normal)
sgn = np.dot(ref, normal) + d
normal *= sgn
d *= sgn
plane = np.concatenate([normal.T[0], d])
return plane
def is_bounded(planes):
R = 1000
tol = 1e-6
if not len(planes):
return False
solver = get_solver("CBC")
X = [solver.NumVar(lb=-R, ub=R) for _ in range(len(planes[0]) - 1)]
obj = 0
for A in planes:
#print(A)
a, d = A[:-1], A[-1]
proj = solver.Dot(a, X)
obj += proj * np.random.random()
solver.Add(proj >= -d)
#solver.Add(X[0] <= 1)
#solver.Add(X[0] >= -1)
#obj = solver.Dot(np.sum(planes[:, :-1], axis=0), X)
solver.SetObjective(obj, maximize=True)
result = solver.Solve(time_limit=10)
result = status2str[result]
if result == "INFEASIBLE":
print("Infeasible!")
return True
else:
sol = [solver.solution_value(x) for x in X]
print(sol)
if any([np.abs(y - R) < tol for y in sol]):
print("Unbounded")
else:
print("Bounded")
print()
def volume(V, n_iter=100):
"""
Monte Carlo estimate of volume of
convex hull of V, intersected with the unit cube
"""
dim = len(V[0])
included = []
num_in = 0
for _ in range(n_iter):
x = np.random.random(dim) - 0.5
incl = in_hull(x, V)
num_in += incl
included.append(incl)
return num_in / n_iter, included
def k_fold_inclusion(V):
"""
Checks if v_i in ConvHull(V-v_i) for v_i in V
"""
if len(V) == 0:
return 0
included = []
num_in = 0
for i, v_i in enumerate(V):
incl = in_hull(v_i, [v for j, v in enumerate(V) if j != i])
num_in += incl
included.append(incl)
return num_in / len(V), included
def get_time_correlated_points(dim, N):
X = get_ts(N+dim-1, p=dim)
return np.array([X[i:i+dim] for i in range(N)])
def get_correlated_points(dim, N, alpha=0.1):
A = np.random.random([dim, dim])-0.5
#Q, _ = np.linalg.qr(A)
#D = np.diag(10 * np.random.random([dim]))
#B = np.matmul(np.matmul(Q.T, D), Q)
#I = np.eye(dim)
#V = (1 - alpha) * I + alpha * B
#C = np.linalg.cholesky(V)
#for row in C:
# print([float("{0:.3f}".format(c)) for c in row])
#e = np.random.randn(dim, N)
e = np.random.random([dim, N]) - 0.5
X = np.dot(A, e).T
return X
def main():
'''
for _ in range(1000):
A = np.random.random([5, 3]) - 0.5
#A = np.array([[1, -1],
# [-1, -1]])
#print(A)
is_bounded(A)
'''
#points = [[0, 0], [10, 0], [0, 10]]
np.random.seed(0)
avg = 0
dim = 2
num_points = 100
n_iter = 1
for _ in range(n_iter):
points = np.random.random([num_points, dim]) - 0.5
#points = get_correlated_points(dim, num_points, alpha=1)
x, y = zip(*points)
vol, included = k_fold_inclusion(points)
col = ['b' if incl else 'r' for incl in included]
plt.scatter(x, y, color=col)
plt.title("K-fold inclusion: {0:.3f}".format(vol))
plt.show()
#exit(0)
print("Volume:", vol)
avg += vol
avg = avg / n_iter
print("Dim={1:d}, Num_points={2:d}, Total avg: {0:.3f}" \
.format(avg, dim, num_points))
'''
fig, ax = plt.subplots()
for x, y in product(range(-10, 12), repeat=2):
col = 'r' if in_hull([x, y], points) else 'b'
ax.scatter(x, y, color=col)
x, y = zip(*points)
ax.scatter(x, y, color='g')
#ax.set_title("Convex hull for S = {(0, 0), (10, 0), (0, 10)}, in red")
plt.show()
'''
'''
points = np.random.random([10, 2])*10 - 5
c0 = np.random.choice(len(points), 3, replace=False)
c = [points[ch] for ch in c0]
plane1 = make_plane([c[0], c[1]], c[2])
plane2 = make_plane([c[0], c[2]], c[1])
plane3 = make_plane([c[1], c[2]], c[0])
planes = np.array([plane1, plane2, plane3])
fig, ax = plt.subplots()
for x, y in product(np.linspace(-10, 10, 20), repeat=2):
col = 'r' if is_inside([x, y], planes) else 'b'
ax.scatter(x, y, color=col)
draw_planes(ax, planes)
plt.show()
'''
#planes = np.random.random([3, 3]) - 0.5
#point = np.array([[0], [0]])
#points = np.array([[1, 0], [0, 1]])
#ref = [0, 0]
#make_plane(points, ref)
'''
'''
#for _ in range(100):
# print("is inside:", is_inside(point, planes))
if __name__ == '__main__':
main() | en | 0.502004 | Checks if u is in convex hull of V using linear programming. V is a list of points u is a point Make a plane with a normal that is orthogonal to all (u - v) where u and v are in points The plane intersects all points The plane is oriented such that the point ref will have a positive value #print(A) #solver.Add(X[0] <= 1) #solver.Add(X[0] >= -1) #obj = solver.Dot(np.sum(planes[:, :-1], axis=0), X) Monte Carlo estimate of volume of convex hull of V, intersected with the unit cube Checks if v_i in ConvHull(V-v_i) for v_i in V #Q, _ = np.linalg.qr(A) #D = np.diag(10 * np.random.random([dim])) #B = np.matmul(np.matmul(Q.T, D), Q) #I = np.eye(dim) #V = (1 - alpha) * I + alpha * B #C = np.linalg.cholesky(V) #for row in C: # print([float("{0:.3f}".format(c)) for c in row]) #e = np.random.randn(dim, N) for _ in range(1000): A = np.random.random([5, 3]) - 0.5 #A = np.array([[1, -1], # [-1, -1]]) #print(A) is_bounded(A) #points = [[0, 0], [10, 0], [0, 10]] #points = get_correlated_points(dim, num_points, alpha=1) #exit(0) fig, ax = plt.subplots() for x, y in product(range(-10, 12), repeat=2): col = 'r' if in_hull([x, y], points) else 'b' ax.scatter(x, y, color=col) x, y = zip(*points) ax.scatter(x, y, color='g') #ax.set_title("Convex hull for S = {(0, 0), (10, 0), (0, 10)}, in red") plt.show() points = np.random.random([10, 2])*10 - 5 c0 = np.random.choice(len(points), 3, replace=False) c = [points[ch] for ch in c0] plane1 = make_plane([c[0], c[1]], c[2]) plane2 = make_plane([c[0], c[2]], c[1]) plane3 = make_plane([c[1], c[2]], c[0]) planes = np.array([plane1, plane2, plane3]) fig, ax = plt.subplots() for x, y in product(np.linspace(-10, 10, 20), repeat=2): col = 'r' if is_inside([x, y], planes) else 'b' ax.scatter(x, y, color=col) draw_planes(ax, planes) plt.show() #planes = np.random.random([3, 3]) - 0.5 #point = np.array([[0], [0]]) #points = np.array([[1, 0], [0, 1]]) #ref = [0, 0] #make_plane(points, ref) #for _ in range(100): # print("is inside:", is_inside(point, planes)) | 2.506313 | 3 |
8_het/6_charlie.py | ArDrift/InfoPy_scripts | 0 | 6620832 | #!/usr/bin/env python3
class Fagyi:
def __init__(self, iz, db):
self.iz = iz
self.db = db
def izkeres(flist, iz):
for f in range(0, len(flist)):
if flist[f].iz == iz:
return f
return None
def vasarlas(flist):
try:
vett = input("Fagyi íze: ")
except EOFError:
return
while vett != "":
if flist[izkeres(flist, vett)].db >= 1:
flist[izkeres(flist, vett)].db -= 1
print("Sikeres vásárlás.")
if flist[izkeres(flist, vett)].db == 0:
print("Kifogyott.")
else:
print("Nem is volt!")
try:
vett = input("Fagyi íze: ")
except EOFError:
return
def main():
fagyilista = [Fagyi("pisztácia", 0), Fagyi("vanília", 3),
Fagyi("tutti-frutti", 8), Fagyi("karamell", 4), Fagyi("<NAME>", 5),
Fagyi("kávé", 9)]
vasarlas(fagyilista)
main()
| #!/usr/bin/env python3
class Fagyi:
def __init__(self, iz, db):
self.iz = iz
self.db = db
def izkeres(flist, iz):
for f in range(0, len(flist)):
if flist[f].iz == iz:
return f
return None
def vasarlas(flist):
try:
vett = input("Fagyi íze: ")
except EOFError:
return
while vett != "":
if flist[izkeres(flist, vett)].db >= 1:
flist[izkeres(flist, vett)].db -= 1
print("Sikeres vásárlás.")
if flist[izkeres(flist, vett)].db == 0:
print("Kifogyott.")
else:
print("Nem is volt!")
try:
vett = input("Fagyi íze: ")
except EOFError:
return
def main():
fagyilista = [Fagyi("pisztácia", 0), Fagyi("vanília", 3),
Fagyi("tutti-frutti", 8), Fagyi("karamell", 4), Fagyi("<NAME>", 5),
Fagyi("kávé", 9)]
vasarlas(fagyilista)
main()
| fr | 0.221828 | #!/usr/bin/env python3 | 3.469869 | 3 |
bestfitting/src/post_processing/s1_calculate_distance.py | guitarmind/HPA-competition-solutions | 0 | 6620833 | <gh_stars>0
import sys
sys.path.insert(0, '..')
import os
import gc
import numpy as np
import pandas as pd
from config.config import *
from sklearn import preprocessing
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
from timeit import default_timer as timer
opj = os.path.join
ope = os.path.exists
def load_data(dataset='train'):
feature_fname = opj(model_dir, 'extract_feats_%s.npz' % dataset)
X = np.load(feature_fname)
features = X['feats']
df = pd.DataFrame({ID: X['ids']})
if debug:
num = 2000
df = df[:num]
features = features[:num]
if dataset == 'train':
meta_df = pd.read_csv(opj(DATA_DIR, 'split/external_trainset_antibody_split.csv'))
elif dataset == 'val':
meta_df = pd.read_csv(opj(DATA_DIR, 'split/external_validset_antibody_split.csv'))
elif dataset == 'ext':
meta_df = pd.read_csv(opj(DATA_DIR, 'meta/external_antibody_correct_meta.csv'))
elif dataset == 'test':
meta_df = pd.read_csv(opj(DATA_DIR, 'meta/test_leak_meta.csv'))
else:
raise ValueError(dataset)
df = pd.merge(df, meta_df, on=ID, how='left')
print('dataset %s, num: %d' % (dataset, len(df)))
data = (df, features)
return data
def cosin_metric(x1, x2):
'''
:param x1: (m, k)
:param x2: (n, k)
:return: (m, n)
'''
x1 = preprocessing.scale(x1)
x2 = preprocessing.scale(x2)
assert x1.shape[-1] == x2.shape[-1]
norm_x1 = np.linalg.norm(x1, axis=1).reshape(-1, 1) # (m, 1)
norm_x2 = np.linalg.norm(x2, axis=1).reshape(1, -1) # (1, n)
return np.dot(x1, x2.T) / norm_x1 / norm_x2
def generate_label(y1, y2):
'''
:param y1: (m,)
:param y2: (n,)
:return: (m, n)
'''
y1 = y1.reshape(-1, 1) # (m, 1)
y2 = y2.reshape(1, -1) # (1, n)
label1 = np.ones_like(y2) # (1, n)
label1 = y1 * label1 # (m, n)
label2 = np.ones_like(y1) # (m, 1)
label2 = label2 * y2 # (m, n)
label = (label1 == label2).astype('uint8')
return label
def save_top3_results(train_df, valid_df, cosin_dist, data_type):
valid_ids = valid_df[ID].values
top_index = [sub[::-1][:3] for sub in np.argsort(cosin_dist, axis=1)]
top_data = []
for index, valid_id in tqdm(enumerate(valid_ids)):
data = [valid_id]
top_ix = top_index[index]
dists = list(cosin_dist[index][top_ix])
match_ids = list(train_df.iloc[top_ix][ID].values)
data.extend(match_ids)
data.extend(dists)
top_data.append(data)
columns = [ID, 'top1', 'top2', 'top3', 'top1_score', 'top2_score', 'top3_score']
df = pd.DataFrame(data=top_data, columns=columns)
out_dir = opj(RESULT_DIR, 'cache', 'match', model_name)
os.makedirs(out_dir, exist_ok=True)
fname = opj(out_dir, '%s_top3.csv'%(data_type))
df.to_csv(fname, index=False)
def do_match(data_type='val'):
if data_type == 'val':
train_data = load_data(dataset='train') # df, features
else:
train_data = load_data(dataset='ext') # df, features
valid_data = load_data(dataset=data_type) # df, features
train_features = train_data[-1]
valid_features = valid_data[-1]
cosin_dist = cosin_metric(valid_features, train_features)
train_df = train_data[0]
valid_df = valid_data[0]
save_top3_results(train_df, valid_df, cosin_dist, data_type)
train_label = train_df[ANTIBODY_CODE].values
valid_label = valid_df[ANTIBODY_CODE].values
label = generate_label(valid_label, train_label)
match_max_num = (label.sum(axis=1)>0).sum()
print('label match count', match_max_num)
max_cosin = np.max(cosin_dist, axis=1)
out_dir = opj(RESULT_DIR, 'cache', 'match', model_name)
os.makedirs(out_dir, exist_ok=True)
for threshold in np.arange(0.6, 0.8, 0.01):
ix = max_cosin > threshold
sel_cosin_dist = cosin_dist[ix]
sel_label = label[ix]
sel_valid_df = valid_df[ix].copy()
sample_num = len(sel_label)
argmax = np.argmax(sel_cosin_dist, axis=1)
max_cosin_dist = np.max(sel_cosin_dist, axis=1)
is_match = sel_label[range(sample_num), argmax]
match_train_df = train_df.iloc[argmax]
df = sel_valid_df[[ID, ANTIBODY_CODE, TARGET]]
df['train_id'] = match_train_df[ID].values
df['train_antibody_code'] = match_train_df[ANTIBODY_CODE].values
df['train_antibody'] = match_train_df[ANTIBODY].values
df['train_target'] = match_train_df[TARGET].values
is_correct = df[TARGET]==df['train_target']
df['is_match'] = is_match
df['is_correct'] = is_correct
df['cosin_dist'] = max_cosin_dist
match_num = np.sum(is_match)
correct_num = np.sum(is_correct)
acc = correct_num/(sample_num+EPS)
recall = correct_num / len(valid_df)
f1 = acc * recall * 2 / (acc + recall + EPS)
print('threshold:%.2f, count:%d, match_num:%d, correct_num:%d, f1:%.4f acc:%.4f' % (threshold, sample_num, match_num, correct_num, f1, acc))
fname = opj(out_dir, '%s_match_th%.2f.csv'%(data_type, threshold))
df.to_csv(fname, index=False)
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--model_name', default='face_all_class_resnet50_dropout_i768_aug2_5folds', type=str, help='model_name')
parser.add_argument('--epoch_name', default='045', type=str, help='cfg name')
parser.add_argument('--debug', default=0, type=int, help='cfg name')
parser.add_argument('--do_valid', default=1, type=int, help='')
parser.add_argument('--do_test', default=1, type=int, help='')
args = parser.parse_args()
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
start = timer()
model_name = args.model_name
epoch_name = args.epoch_name
epoch_name = 'epoch_%s' % epoch_name
debug = args.debug == 1
do_valid = args.do_valid == 1
do_test = args.do_test == 1
target_dict = {}
model_dir = opj(RESULT_DIR, 'submissions', model_name, epoch_name)
if do_valid:
do_match(data_type='val')
if do_test:
do_match(data_type='test')
end = timer()
time0 = (end - start) / 60
print('Time spent for cluster: %3.1f min' % time0)
print('\nsuccess!')
| import sys
sys.path.insert(0, '..')
import os
import gc
import numpy as np
import pandas as pd
from config.config import *
from sklearn import preprocessing
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
from timeit import default_timer as timer
opj = os.path.join
ope = os.path.exists
def load_data(dataset='train'):
feature_fname = opj(model_dir, 'extract_feats_%s.npz' % dataset)
X = np.load(feature_fname)
features = X['feats']
df = pd.DataFrame({ID: X['ids']})
if debug:
num = 2000
df = df[:num]
features = features[:num]
if dataset == 'train':
meta_df = pd.read_csv(opj(DATA_DIR, 'split/external_trainset_antibody_split.csv'))
elif dataset == 'val':
meta_df = pd.read_csv(opj(DATA_DIR, 'split/external_validset_antibody_split.csv'))
elif dataset == 'ext':
meta_df = pd.read_csv(opj(DATA_DIR, 'meta/external_antibody_correct_meta.csv'))
elif dataset == 'test':
meta_df = pd.read_csv(opj(DATA_DIR, 'meta/test_leak_meta.csv'))
else:
raise ValueError(dataset)
df = pd.merge(df, meta_df, on=ID, how='left')
print('dataset %s, num: %d' % (dataset, len(df)))
data = (df, features)
return data
def cosin_metric(x1, x2):
'''
:param x1: (m, k)
:param x2: (n, k)
:return: (m, n)
'''
x1 = preprocessing.scale(x1)
x2 = preprocessing.scale(x2)
assert x1.shape[-1] == x2.shape[-1]
norm_x1 = np.linalg.norm(x1, axis=1).reshape(-1, 1) # (m, 1)
norm_x2 = np.linalg.norm(x2, axis=1).reshape(1, -1) # (1, n)
return np.dot(x1, x2.T) / norm_x1 / norm_x2
def generate_label(y1, y2):
'''
:param y1: (m,)
:param y2: (n,)
:return: (m, n)
'''
y1 = y1.reshape(-1, 1) # (m, 1)
y2 = y2.reshape(1, -1) # (1, n)
label1 = np.ones_like(y2) # (1, n)
label1 = y1 * label1 # (m, n)
label2 = np.ones_like(y1) # (m, 1)
label2 = label2 * y2 # (m, n)
label = (label1 == label2).astype('uint8')
return label
def save_top3_results(train_df, valid_df, cosin_dist, data_type):
valid_ids = valid_df[ID].values
top_index = [sub[::-1][:3] for sub in np.argsort(cosin_dist, axis=1)]
top_data = []
for index, valid_id in tqdm(enumerate(valid_ids)):
data = [valid_id]
top_ix = top_index[index]
dists = list(cosin_dist[index][top_ix])
match_ids = list(train_df.iloc[top_ix][ID].values)
data.extend(match_ids)
data.extend(dists)
top_data.append(data)
columns = [ID, 'top1', 'top2', 'top3', 'top1_score', 'top2_score', 'top3_score']
df = pd.DataFrame(data=top_data, columns=columns)
out_dir = opj(RESULT_DIR, 'cache', 'match', model_name)
os.makedirs(out_dir, exist_ok=True)
fname = opj(out_dir, '%s_top3.csv'%(data_type))
df.to_csv(fname, index=False)
def do_match(data_type='val'):
if data_type == 'val':
train_data = load_data(dataset='train') # df, features
else:
train_data = load_data(dataset='ext') # df, features
valid_data = load_data(dataset=data_type) # df, features
train_features = train_data[-1]
valid_features = valid_data[-1]
cosin_dist = cosin_metric(valid_features, train_features)
train_df = train_data[0]
valid_df = valid_data[0]
save_top3_results(train_df, valid_df, cosin_dist, data_type)
train_label = train_df[ANTIBODY_CODE].values
valid_label = valid_df[ANTIBODY_CODE].values
label = generate_label(valid_label, train_label)
match_max_num = (label.sum(axis=1)>0).sum()
print('label match count', match_max_num)
max_cosin = np.max(cosin_dist, axis=1)
out_dir = opj(RESULT_DIR, 'cache', 'match', model_name)
os.makedirs(out_dir, exist_ok=True)
for threshold in np.arange(0.6, 0.8, 0.01):
ix = max_cosin > threshold
sel_cosin_dist = cosin_dist[ix]
sel_label = label[ix]
sel_valid_df = valid_df[ix].copy()
sample_num = len(sel_label)
argmax = np.argmax(sel_cosin_dist, axis=1)
max_cosin_dist = np.max(sel_cosin_dist, axis=1)
is_match = sel_label[range(sample_num), argmax]
match_train_df = train_df.iloc[argmax]
df = sel_valid_df[[ID, ANTIBODY_CODE, TARGET]]
df['train_id'] = match_train_df[ID].values
df['train_antibody_code'] = match_train_df[ANTIBODY_CODE].values
df['train_antibody'] = match_train_df[ANTIBODY].values
df['train_target'] = match_train_df[TARGET].values
is_correct = df[TARGET]==df['train_target']
df['is_match'] = is_match
df['is_correct'] = is_correct
df['cosin_dist'] = max_cosin_dist
match_num = np.sum(is_match)
correct_num = np.sum(is_correct)
acc = correct_num/(sample_num+EPS)
recall = correct_num / len(valid_df)
f1 = acc * recall * 2 / (acc + recall + EPS)
print('threshold:%.2f, count:%d, match_num:%d, correct_num:%d, f1:%.4f acc:%.4f' % (threshold, sample_num, match_num, correct_num, f1, acc))
fname = opj(out_dir, '%s_match_th%.2f.csv'%(data_type, threshold))
df.to_csv(fname, index=False)
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--model_name', default='face_all_class_resnet50_dropout_i768_aug2_5folds', type=str, help='model_name')
parser.add_argument('--epoch_name', default='045', type=str, help='cfg name')
parser.add_argument('--debug', default=0, type=int, help='cfg name')
parser.add_argument('--do_valid', default=1, type=int, help='')
parser.add_argument('--do_test', default=1, type=int, help='')
args = parser.parse_args()
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
start = timer()
model_name = args.model_name
epoch_name = args.epoch_name
epoch_name = 'epoch_%s' % epoch_name
debug = args.debug == 1
do_valid = args.do_valid == 1
do_test = args.do_test == 1
target_dict = {}
model_dir = opj(RESULT_DIR, 'submissions', model_name, epoch_name)
if do_valid:
do_match(data_type='val')
if do_test:
do_match(data_type='test')
end = timer()
time0 = (end - start) / 60
print('Time spent for cluster: %3.1f min' % time0)
print('\nsuccess!') | en | 0.757545 | :param x1: (m, k) :param x2: (n, k) :return: (m, n) # (m, 1) # (1, n) :param y1: (m,) :param y2: (n,) :return: (m, n) # (m, 1) # (1, n) # (1, n) # (m, n) # (m, 1) # (m, n) # df, features # df, features # df, features | 2.179097 | 2 |
hivemind_webchat/__init__.py | JarbasHiveMind/HiveMind-webchat | 2 | 6620834 | <reponame>JarbasHiveMind/HiveMind-webchat
import asyncio
import os
import os.path
import socket
import threading
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
def get_ip():
# taken from https://stackoverflow.com/a/28950776/13703283
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class StaticFileHandler(tornado.web.RequestHandler):
def get(self):
self.render('js/app.js')
class WebChat(threading.Thread):
def __init__(self, port, *args, **kwargs):
super().__init__(*args, **kwargs)
self.port = port
def run(self):
asyncio.set_event_loop(asyncio.new_event_loop())
routes = [
tornado.web.url(r"/", MainHandler, name="main"),
tornado.web.url(r"/static/(.*)", tornado.web.StaticFileHandler,
{'path': './'})
]
settings = {
"debug": False,
"template_path": os.path.join(os.path.dirname(__file__), "templates"),
"static_path": os.path.join(os.path.dirname(__file__), "static"),
}
application = tornado.web.Application(routes, **settings)
httpServer = tornado.httpserver.HTTPServer(application)
httpServer.listen(self.port)
print(f"Starting WebChat: {get_ip()}:{self.port}")
tornado.ioloop.IOLoop.instance().start()
def stop(self):
tornado.ioloop.IOLoop.instance().stop()
| import asyncio
import os
import os.path
import socket
import threading
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
def get_ip():
# taken from https://stackoverflow.com/a/28950776/13703283
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class StaticFileHandler(tornado.web.RequestHandler):
def get(self):
self.render('js/app.js')
class WebChat(threading.Thread):
def __init__(self, port, *args, **kwargs):
super().__init__(*args, **kwargs)
self.port = port
def run(self):
asyncio.set_event_loop(asyncio.new_event_loop())
routes = [
tornado.web.url(r"/", MainHandler, name="main"),
tornado.web.url(r"/static/(.*)", tornado.web.StaticFileHandler,
{'path': './'})
]
settings = {
"debug": False,
"template_path": os.path.join(os.path.dirname(__file__), "templates"),
"static_path": os.path.join(os.path.dirname(__file__), "static"),
}
application = tornado.web.Application(routes, **settings)
httpServer = tornado.httpserver.HTTPServer(application)
httpServer.listen(self.port)
print(f"Starting WebChat: {get_ip()}:{self.port}")
tornado.ioloop.IOLoop.instance().start()
def stop(self):
tornado.ioloop.IOLoop.instance().stop() | en | 0.956703 | # taken from https://stackoverflow.com/a/28950776/13703283 # doesn't even have to be reachable | 2.591337 | 3 |
pandas_gen_sql.py | nic-crouch-sfc/snowflake-table-gen | 0 | 6620835 | <gh_stars>0
#!/usr/bin/env python
import pandas
from sqlalchemy import create_engine
from parameters import user, password, account, database, schema, warehouse, role, table_name, csv_file
from snowflake.connector.pandas_tools import pd_writer
engine = create_engine(
'snowflake://{user}:{password}@{account}/{database}/{schema}?warehouse={warehouse}&role={role}'.format(
user=user,
password=password,
account=account,
database=database,
schema=schema,
warehouse=warehouse,
role=role
)
)
table_name = table_name
csv_file = csv_file
with engine.connect() as connection:
results = connection.execute('select current_version()').fetchone()
print(results[0])
df = pandas.read_csv(csv_file)
df.to_sql(table_name, connection, if_exists='fail', index=False, chunksize=16000, method=pd_writer)
| #!/usr/bin/env python
import pandas
from sqlalchemy import create_engine
from parameters import user, password, account, database, schema, warehouse, role, table_name, csv_file
from snowflake.connector.pandas_tools import pd_writer
engine = create_engine(
'snowflake://{user}:{password}@{account}/{database}/{schema}?warehouse={warehouse}&role={role}'.format(
user=user,
password=password,
account=account,
database=database,
schema=schema,
warehouse=warehouse,
role=role
)
)
table_name = table_name
csv_file = csv_file
with engine.connect() as connection:
results = connection.execute('select current_version()').fetchone()
print(results[0])
df = pandas.read_csv(csv_file)
df.to_sql(table_name, connection, if_exists='fail', index=False, chunksize=16000, method=pd_writer) | ru | 0.26433 | #!/usr/bin/env python | 2.907651 | 3 |
src/test.py | zhearing/SqueezeSeg | 0 | 6620836 | <filename>src/test.py
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:charles
# datetime:18-9-28 下午8:16
# software:PyCharm
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import sys
import time
import glob
import numpy as np
from six.moves import xrange
import tensorflow as tf
from PIL import Image
from config import *
from imdb import kitti
from utils.util import *
from nets import *
import pandas as pd
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'checkpoint', '../scripts/log/train/model.ckpt-49999',
"""Path to the model parameter file.""")
tf.app.flags.DEFINE_string(
'input_path', '/media/zyzhong/data3/Alibaba/TestSet/npy/*',
"""Input lidar scan to be detected. Can process glob input such as """
"""./data/samples/*.npy or single input.""")
tf.app.flags.DEFINE_string(
'out_dir', '../scripts/log/answers/', """Directory to dump output.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
# my code
def geneate_results():
pass
def test():
"""Detect LiDAR data."""
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.Graph().as_default():
mc = alibaba_squeezeSeg_config()
mc.LOAD_PRETRAINED_MODEL = False
mc.BATCH_SIZE = 1 # TODO(bichen): fix this hard-coded batch size.
model = SqueezeSeg(mc)
saver = tf.train.Saver(model.model_params)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, FLAGS.checkpoint)
def generate_pred_cls(f, mc, model, sess):
lidar = f
lidar_mask = np.reshape(
(lidar[:, :, 4] > 0),
[mc.ZENITH_LEVEL, mc.AZIMUTH_LEVEL, 1]
)
lidar = (lidar - mc.INPUT_MEAN) / mc.INPUT_STD
pred_cls = sess.run(
model.pred_cls,
feed_dict={
model.lidar_input: [lidar],
model.keep_prob: 1.0,
model.lidar_mask: [lidar_mask]
}
)
return pred_cls
for f in glob.iglob(FLAGS.input_path):
# save the data
file_name = f.strip('.npy').split('/')[-1]
file_path = FLAGS.out_dir + file_name + '.csv'
if os.path.exists(file_path):
print(file_path)
continue
fnpy = np.load(f).astype(np.float32, copy=False)
if np.shape(fnpy)[0] >= 32768:
f1 = np.load(f).astype(np.float32, copy=False)[:32768, :5]
f1 = np.reshape(f1, (64, 512, 5))
fillnp = np.zeros((32768, 5)).astype(np.float32)
f2 = np.load(f).astype(np.float32, copy=False)[32768:, :5]
avildable_number = np.shape(f2)[0]
padding_number = 32768 - avildable_number # adding number
fillnp[:np.shape(f2)[0], :5] = f2[:]
# print(np.shape(fnpy))
# print(np.shape(f1), np.shape(fillnp))
fillnp = np.reshape(fillnp, (64, 512, 5))
pred_cls1 = generate_pred_cls(f1, mc, model, sess)
pred_cls2 = generate_pred_cls(fillnp, mc, model, sess)
result1 = np.reshape(pred_cls1, (32768, 1))
result2 = np.reshape(pred_cls2, (32768, 1))
result = np.zeros((np.shape(fnpy)[0], 1)).astype(np.float32, copy=True)
result[:32768, :] = result1
result[32768:(32768+avildable_number), :] = result2[:avildable_number, :]
else:
f1 = np.zeros((32768, 5))
avildable_number = np.shape(fnpy)[0]
f1[:np.shape(fnpy)[0], :5] = fnpy[:, :5]
f1 = np.reshape(f1, (64, 512, 5))
pred_cls = generate_pred_cls(f1, mc, model, sess)
result = np.reshape(pred_cls, (32768, 1))
result = result[:avildable_number, :]
# print(file_name)
# print(pred_cls)
pdata = pd.DataFrame(np.reshape(result, (-1, 1)),columns=['category'])
if not os.path.exists(file_path):
pdata[['category']].astype('int32').to_csv(file_path, index=None, header=None)
# np.save(
# os.path.join(FLAGS.out_dir, 'pred_' + file_name + '.npy'),
# pred_cls[0]
# )
def main(argv=None):
if not tf.gfile.Exists(FLAGS.out_dir):
tf.gfile.MakeDirs(FLAGS.out_dir)
print('Detection output written to {}'.format(FLAGS.out_dir))
test()
if __name__ == '__main__':
tf.app.run()
| <filename>src/test.py
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:charles
# datetime:18-9-28 下午8:16
# software:PyCharm
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import sys
import time
import glob
import numpy as np
from six.moves import xrange
import tensorflow as tf
from PIL import Image
from config import *
from imdb import kitti
from utils.util import *
from nets import *
import pandas as pd
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'checkpoint', '../scripts/log/train/model.ckpt-49999',
"""Path to the model parameter file.""")
tf.app.flags.DEFINE_string(
'input_path', '/media/zyzhong/data3/Alibaba/TestSet/npy/*',
"""Input lidar scan to be detected. Can process glob input such as """
"""./data/samples/*.npy or single input.""")
tf.app.flags.DEFINE_string(
'out_dir', '../scripts/log/answers/', """Directory to dump output.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
# my code
def geneate_results():
pass
def test():
"""Detect LiDAR data."""
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.Graph().as_default():
mc = alibaba_squeezeSeg_config()
mc.LOAD_PRETRAINED_MODEL = False
mc.BATCH_SIZE = 1 # TODO(bichen): fix this hard-coded batch size.
model = SqueezeSeg(mc)
saver = tf.train.Saver(model.model_params)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, FLAGS.checkpoint)
def generate_pred_cls(f, mc, model, sess):
lidar = f
lidar_mask = np.reshape(
(lidar[:, :, 4] > 0),
[mc.ZENITH_LEVEL, mc.AZIMUTH_LEVEL, 1]
)
lidar = (lidar - mc.INPUT_MEAN) / mc.INPUT_STD
pred_cls = sess.run(
model.pred_cls,
feed_dict={
model.lidar_input: [lidar],
model.keep_prob: 1.0,
model.lidar_mask: [lidar_mask]
}
)
return pred_cls
for f in glob.iglob(FLAGS.input_path):
# save the data
file_name = f.strip('.npy').split('/')[-1]
file_path = FLAGS.out_dir + file_name + '.csv'
if os.path.exists(file_path):
print(file_path)
continue
fnpy = np.load(f).astype(np.float32, copy=False)
if np.shape(fnpy)[0] >= 32768:
f1 = np.load(f).astype(np.float32, copy=False)[:32768, :5]
f1 = np.reshape(f1, (64, 512, 5))
fillnp = np.zeros((32768, 5)).astype(np.float32)
f2 = np.load(f).astype(np.float32, copy=False)[32768:, :5]
avildable_number = np.shape(f2)[0]
padding_number = 32768 - avildable_number # adding number
fillnp[:np.shape(f2)[0], :5] = f2[:]
# print(np.shape(fnpy))
# print(np.shape(f1), np.shape(fillnp))
fillnp = np.reshape(fillnp, (64, 512, 5))
pred_cls1 = generate_pred_cls(f1, mc, model, sess)
pred_cls2 = generate_pred_cls(fillnp, mc, model, sess)
result1 = np.reshape(pred_cls1, (32768, 1))
result2 = np.reshape(pred_cls2, (32768, 1))
result = np.zeros((np.shape(fnpy)[0], 1)).astype(np.float32, copy=True)
result[:32768, :] = result1
result[32768:(32768+avildable_number), :] = result2[:avildable_number, :]
else:
f1 = np.zeros((32768, 5))
avildable_number = np.shape(fnpy)[0]
f1[:np.shape(fnpy)[0], :5] = fnpy[:, :5]
f1 = np.reshape(f1, (64, 512, 5))
pred_cls = generate_pred_cls(f1, mc, model, sess)
result = np.reshape(pred_cls, (32768, 1))
result = result[:avildable_number, :]
# print(file_name)
# print(pred_cls)
pdata = pd.DataFrame(np.reshape(result, (-1, 1)),columns=['category'])
if not os.path.exists(file_path):
pdata[['category']].astype('int32').to_csv(file_path, index=None, header=None)
# np.save(
# os.path.join(FLAGS.out_dir, 'pred_' + file_name + '.npy'),
# pred_cls[0]
# )
def main(argv=None):
if not tf.gfile.Exists(FLAGS.out_dir):
tf.gfile.MakeDirs(FLAGS.out_dir)
print('Detection output written to {}'.format(FLAGS.out_dir))
test()
if __name__ == '__main__':
tf.app.run()
| en | 0.396428 | #!/usr/bin/env python #-*- coding:utf-8 -*- # author:charles # datetime:18-9-28 下午8:16 # software:PyCharm Path to the model parameter file. Input lidar scan to be detected. Can process glob input such as ./data/samples/*.npy or single input. Directory to dump output. gpu id. # my code Detect LiDAR data. # TODO(bichen): fix this hard-coded batch size. # save the data # adding number # print(np.shape(fnpy)) # print(np.shape(f1), np.shape(fillnp)) # print(file_name) # print(pred_cls) # np.save( # os.path.join(FLAGS.out_dir, 'pred_' + file_name + '.npy'), # pred_cls[0] # ) | 2.197749 | 2 |
scrape_mars.py | sineekarn/web-scraping-challenge | 0 | 6620837 | #!/usr/bin/env python
# coding: utf-8
# Dependencies
from bs4 import BeautifulSoup as bs
from splinter import Browser
import pandas as pd
import os
import time
import requests
from pprint import pprint
import re
def scrape():
# # https://splinter.readthedocs.io/en/latest/drivers/chrome.html
# get_ipython().system('which chromedriver')
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=True)
# # NASA Mars News
# Scrape the [NASA Mars News Site](https://mars.nasa.gov/news/) and collect the latest News Title and Paragraph Text.
# Assign the text to variables that you can reference later.
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
# Html object
html = browser.html
# Create a Beautiful Soup object / Parse the HTML using the Beautiful Soup library
soup = bs(html, 'html.parser')
# print(soup.prettify())
# Retrieve the latest element that contains news title
news_find = soup.find("ul", class_="item_list")
news_title = news_find.find("div", class_="content_title").text
# Display scrapped data
print(news_title)
# Retrieve the latest element that contains news_paragraph
news_pg = soup.find("div", class_="article_teaser_body").text
# # Display scrapped data
print(news_pg)
# # JPL Mars Space Images - Featured Image
# Use splinter to navigate the site
space_image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(space_image_url)
# HTML Object
html_image = browser.html
# Parse HTML with Beautiful Soup
soup = bs(html_image, "html.parser")
# Retrieve full size `.jpg` background-image url from style tag attribute for the current Featured Mars Image
image_path = soup.find('article')['style'].replace('background-image: url(','').replace(');', '') [1:-1]
# Website Url
featured_image_url = "https://www.jpl.nasa.gov" + image_path
# Display full link to featured image
featured_image_url
# # Mars Weather
# Use splinter to navigate Mars Weather Twitter
weather_url = 'https://twitter.com/marswxreport?lang=en'
browser.driver.set_window_size(1000,8000)
browser.visit(weather_url)
time.sleep(8)
# HTML Object
html_weather = browser.html
# Parse HTML with Beautiful Soup
soup = bs(html_weather, 'html.parser')
results = soup.find_all("span", class_="css-901oao css-16my406 r-1qd0xha r-ad9z0x r-bcqeeo r-qvutc0")
print(results)
# scrape the latest Mars weather tweet from the page.
mars_weather = [v.text for v in results if "gusting" in v.text]
print(mars_weather[0])
# # Mars Facts
# Use splinter to navigate Mars Facts webpage
fact_url = "https://space-facts.com/mars/"
# browser.visit(url)
# Use Pandas to "read_html"
table = pd.read_html(fact_url)
table[0]
table_df = table[0].to_html(classes = 'table table-striped', index = False, header = False)
print(table_df)
# # Mars Hemispheres
# Visit hemispheres website through splinter module
hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemispheres_url)
# HTML Object
html_hemispheres = browser.html
# Parse HTML with Beautiful Soup
soup = bs(html_hemispheres, 'html.parser')
# Retreive all items that contain mars hemispheres information
items = soup.find_all('div', class_='description')
items
main_url = "https://astrogeology.usgs.gov"
hemisphere_titles_image_urls = []
for i in items:
page_url = main_url + i.find("a")['href']
browser.visit(page_url)
page_url_soup = bs(browser.html, 'html.parser')
hemispheres_image_url = page_url_soup.find("ul").find("a")['href']
hemispheres_title = page_url_soup.find("h2", class_="title").text
hemisphere_titles_image_urls.append({"Title": hemispheres_title, "Image_Url": hemispheres_image_url})
hemisphere_titles_image_urls
mission_to_mars = {
"news_title": news_title,
"news_pg": news_pg,
"featured_image_url": featured_image_url,
"mars_weather": mars_weather[0],
"table_df": table_df,
"hemisphere_titles_image_urls": hemisphere_titles_image_urls
}
return mission_to_mars
| #!/usr/bin/env python
# coding: utf-8
# Dependencies
from bs4 import BeautifulSoup as bs
from splinter import Browser
import pandas as pd
import os
import time
import requests
from pprint import pprint
import re
def scrape():
# # https://splinter.readthedocs.io/en/latest/drivers/chrome.html
# get_ipython().system('which chromedriver')
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=True)
# # NASA Mars News
# Scrape the [NASA Mars News Site](https://mars.nasa.gov/news/) and collect the latest News Title and Paragraph Text.
# Assign the text to variables that you can reference later.
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
# Html object
html = browser.html
# Create a Beautiful Soup object / Parse the HTML using the Beautiful Soup library
soup = bs(html, 'html.parser')
# print(soup.prettify())
# Retrieve the latest element that contains news title
news_find = soup.find("ul", class_="item_list")
news_title = news_find.find("div", class_="content_title").text
# Display scrapped data
print(news_title)
# Retrieve the latest element that contains news_paragraph
news_pg = soup.find("div", class_="article_teaser_body").text
# # Display scrapped data
print(news_pg)
# # JPL Mars Space Images - Featured Image
# Use splinter to navigate the site
space_image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(space_image_url)
# HTML Object
html_image = browser.html
# Parse HTML with Beautiful Soup
soup = bs(html_image, "html.parser")
# Retrieve full size `.jpg` background-image url from style tag attribute for the current Featured Mars Image
image_path = soup.find('article')['style'].replace('background-image: url(','').replace(');', '') [1:-1]
# Website Url
featured_image_url = "https://www.jpl.nasa.gov" + image_path
# Display full link to featured image
featured_image_url
# # Mars Weather
# Use splinter to navigate Mars Weather Twitter
weather_url = 'https://twitter.com/marswxreport?lang=en'
browser.driver.set_window_size(1000,8000)
browser.visit(weather_url)
time.sleep(8)
# HTML Object
html_weather = browser.html
# Parse HTML with Beautiful Soup
soup = bs(html_weather, 'html.parser')
results = soup.find_all("span", class_="css-901oao css-16my406 r-1qd0xha r-ad9z0x r-bcqeeo r-qvutc0")
print(results)
# scrape the latest Mars weather tweet from the page.
mars_weather = [v.text for v in results if "gusting" in v.text]
print(mars_weather[0])
# # Mars Facts
# Use splinter to navigate Mars Facts webpage
fact_url = "https://space-facts.com/mars/"
# browser.visit(url)
# Use Pandas to "read_html"
table = pd.read_html(fact_url)
table[0]
table_df = table[0].to_html(classes = 'table table-striped', index = False, header = False)
print(table_df)
# # Mars Hemispheres
# Visit hemispheres website through splinter module
hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemispheres_url)
# HTML Object
html_hemispheres = browser.html
# Parse HTML with Beautiful Soup
soup = bs(html_hemispheres, 'html.parser')
# Retreive all items that contain mars hemispheres information
items = soup.find_all('div', class_='description')
items
main_url = "https://astrogeology.usgs.gov"
hemisphere_titles_image_urls = []
for i in items:
page_url = main_url + i.find("a")['href']
browser.visit(page_url)
page_url_soup = bs(browser.html, 'html.parser')
hemispheres_image_url = page_url_soup.find("ul").find("a")['href']
hemispheres_title = page_url_soup.find("h2", class_="title").text
hemisphere_titles_image_urls.append({"Title": hemispheres_title, "Image_Url": hemispheres_image_url})
hemisphere_titles_image_urls
mission_to_mars = {
"news_title": news_title,
"news_pg": news_pg,
"featured_image_url": featured_image_url,
"mars_weather": mars_weather[0],
"table_df": table_df,
"hemisphere_titles_image_urls": hemisphere_titles_image_urls
}
return mission_to_mars
| en | 0.570099 | #!/usr/bin/env python # coding: utf-8 # Dependencies # # https://splinter.readthedocs.io/en/latest/drivers/chrome.html # get_ipython().system('which chromedriver') # # NASA Mars News # Scrape the [NASA Mars News Site](https://mars.nasa.gov/news/) and collect the latest News Title and Paragraph Text. # Assign the text to variables that you can reference later. # Html object # Create a Beautiful Soup object / Parse the HTML using the Beautiful Soup library # print(soup.prettify()) # Retrieve the latest element that contains news title # Display scrapped data # Retrieve the latest element that contains news_paragraph # # Display scrapped data # # JPL Mars Space Images - Featured Image # Use splinter to navigate the site # HTML Object # Parse HTML with Beautiful Soup # Retrieve full size `.jpg` background-image url from style tag attribute for the current Featured Mars Image # Website Url # Display full link to featured image # # Mars Weather # Use splinter to navigate Mars Weather Twitter # HTML Object # Parse HTML with Beautiful Soup # scrape the latest Mars weather tweet from the page. # # Mars Facts # Use splinter to navigate Mars Facts webpage # browser.visit(url) # Use Pandas to "read_html" # # Mars Hemispheres # Visit hemispheres website through splinter module # HTML Object # Parse HTML with Beautiful Soup # Retreive all items that contain mars hemispheres information | 3.507245 | 4 |
product/migrations/0010_product_brand.py | bpaszcza/pola-backend | 30 | 6620838 | # Generated by Django 2.0.5 on 2018-06-12 14:48
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0015_brand'),
('product', '0009_auto_20180527_0848'),
]
operations = [
migrations.AddField(
model_name='product',
name='brand',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='company.Brand',
verbose_name='Marka produktu',
),
),
]
| # Generated by Django 2.0.5 on 2018-06-12 14:48
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0015_brand'),
('product', '0009_auto_20180527_0848'),
]
operations = [
migrations.AddField(
model_name='product',
name='brand',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='company.Brand',
verbose_name='Marka produktu',
),
),
]
| en | 0.702413 | # Generated by Django 2.0.5 on 2018-06-12 14:48 | 1.550253 | 2 |
admin_api/chalicelib/payment_routes.py | open-tag-manager/open-tag-manager | 2 | 6620839 | <reponame>open-tag-manager/open-tag-manager<gh_stars>1-10
from chalice import Blueprint, Response
from . import app, authorizer
from .dynamodb import get_org_table
from .decorator import check_org_permission, check_json_body
from decimal import Decimal
import os
import stripe
import time
payment_routes = Blueprint(__name__)
stripe.api_key = os.environ.get('STRIPE_SK')
@payment_routes.route('/', methods=['GET'], cors=True, authorizer=authorizer)
@check_org_permission('admin')
def get_payment_customer(org):
org_info = get_org_table().get_item(Key={'name': org})
if 'Item' not in org_info:
return Response(body={'error': 'not found'}, status_code=404)
if 'payment' in org_info['Item']:
data = org_info['Item']['payment']
customer = stripe.Customer.retrieve(data['id'])
pm_id = customer['invoice_settings']['default_payment_method']
pm = stripe.PaymentMethod.retrieve(pm_id)
return {'id': data['id'], 'name': customer['name'], 'email': customer['email'], 'payment_method': pm}
return None
@payment_routes.route('/', methods=['PUT'], cors=True, authorizer=authorizer)
@check_org_permission('admin')
@check_json_body({
'email': {'type': 'string', 'required': True, 'empty': False},
'name': {'type': 'string', 'required': True, 'empty': False},
'payment_method': {'type': 'string', 'required': True, 'empty': False}
})
def put_payment_customer(org):
org_info = get_org_table().get_item(Key={'name': org})
if 'Item' not in org_info:
return Response(body={'error': 'not found'}, status_code=404)
request = app.current_request
body = request.json_body
if 'payment' in org_info['Item']:
data = org_info['Item']['payment']
stripe.PaymentMethod.attach(body['payment_method'], customer=data['id'])
customer = stripe.Customer.modify(
data['id'],
email=body['email'],
name=body['name'],
invoice_settings={'default_payment_method': body['payment_method']}
)
else:
customer = stripe.Customer.create(
email=body['email'],
name=body['name'],
metadata={'org': org},
payment_method=body['payment_method'],
invoice_settings={'default_payment_method': body['payment_method']}
)
ts = Decimal(time.time())
org_info['Item']['updated_at'] = ts
org_info['Item']['payment'] = customer
get_org_table().put_item(Item=org_info['Item'])
pm_id = customer['invoice_settings']['default_payment_method']
pm = stripe.PaymentMethod.retrieve(pm_id)
return {'id': customer['id'], 'name': body['name'], 'email': body['email'], 'payment_method': pm}
| from chalice import Blueprint, Response
from . import app, authorizer
from .dynamodb import get_org_table
from .decorator import check_org_permission, check_json_body
from decimal import Decimal
import os
import stripe
import time
payment_routes = Blueprint(__name__)
stripe.api_key = os.environ.get('STRIPE_SK')
@payment_routes.route('/', methods=['GET'], cors=True, authorizer=authorizer)
@check_org_permission('admin')
def get_payment_customer(org):
org_info = get_org_table().get_item(Key={'name': org})
if 'Item' not in org_info:
return Response(body={'error': 'not found'}, status_code=404)
if 'payment' in org_info['Item']:
data = org_info['Item']['payment']
customer = stripe.Customer.retrieve(data['id'])
pm_id = customer['invoice_settings']['default_payment_method']
pm = stripe.PaymentMethod.retrieve(pm_id)
return {'id': data['id'], 'name': customer['name'], 'email': customer['email'], 'payment_method': pm}
return None
@payment_routes.route('/', methods=['PUT'], cors=True, authorizer=authorizer)
@check_org_permission('admin')
@check_json_body({
'email': {'type': 'string', 'required': True, 'empty': False},
'name': {'type': 'string', 'required': True, 'empty': False},
'payment_method': {'type': 'string', 'required': True, 'empty': False}
})
def put_payment_customer(org):
org_info = get_org_table().get_item(Key={'name': org})
if 'Item' not in org_info:
return Response(body={'error': 'not found'}, status_code=404)
request = app.current_request
body = request.json_body
if 'payment' in org_info['Item']:
data = org_info['Item']['payment']
stripe.PaymentMethod.attach(body['payment_method'], customer=data['id'])
customer = stripe.Customer.modify(
data['id'],
email=body['email'],
name=body['name'],
invoice_settings={'default_payment_method': body['payment_method']}
)
else:
customer = stripe.Customer.create(
email=body['email'],
name=body['name'],
metadata={'org': org},
payment_method=body['payment_method'],
invoice_settings={'default_payment_method': body['payment_method']}
)
ts = Decimal(time.time())
org_info['Item']['updated_at'] = ts
org_info['Item']['payment'] = customer
get_org_table().put_item(Item=org_info['Item'])
pm_id = customer['invoice_settings']['default_payment_method']
pm = stripe.PaymentMethod.retrieve(pm_id)
return {'id': customer['id'], 'name': body['name'], 'email': body['email'], 'payment_method': pm} | none | 1 | 1.846697 | 2 | |
src/communicate_net.py | tyburam/neural-cryptography-pytorch | 7 | 6620840 | import torch.nn as nn
import torch.nn.functional as F
from src.config import *
class CommunicateNet(nn.Module):
def __init__(self):
super(CommunicateNet, self).__init__()
self.input = nn.Linear(MSG_LEN + KEY_LEN, MSG_LEN + KEY_LEN)
self.hidden = nn.Sigmoid()
self.conv0 = nn.Conv1d(1, 2, 1, stride=1)
self.conv1 = nn.Conv1d(2, 4, 1, stride=2)
self.conv2 = nn.Conv1d(4, 4, 1, stride=1)
self.conv3 = nn.Conv1d(4, 1, 1, stride=1)
def forward(self, x):
x = self.input(x)
x = self.hidden(x).unsqueeze(1)
x = F.relu(self.conv0(x))
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.conv3(x).tanh()
x = x.squeeze()
return x
| import torch.nn as nn
import torch.nn.functional as F
from src.config import *
class CommunicateNet(nn.Module):
def __init__(self):
super(CommunicateNet, self).__init__()
self.input = nn.Linear(MSG_LEN + KEY_LEN, MSG_LEN + KEY_LEN)
self.hidden = nn.Sigmoid()
self.conv0 = nn.Conv1d(1, 2, 1, stride=1)
self.conv1 = nn.Conv1d(2, 4, 1, stride=2)
self.conv2 = nn.Conv1d(4, 4, 1, stride=1)
self.conv3 = nn.Conv1d(4, 1, 1, stride=1)
def forward(self, x):
x = self.input(x)
x = self.hidden(x).unsqueeze(1)
x = F.relu(self.conv0(x))
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.conv3(x).tanh()
x = x.squeeze()
return x
| none | 1 | 2.909465 | 3 | |
erdos/erdos001.py | JSON-Welbourne/CodingChallenges | 0 | 6620841 | # Problem #1
# Minimum Value
# Let there be a set of N natural numbers 1,2,3,4...N.
# We are allowed to insert + or − sign in front of each number and add all the resultant numbers.
# The minimum non-negative value obtained is denoted as D(N).
# Find the value of D(1)+D(2)+...+D(19216812112)
def D(n):
return min([
sum([i if "{0:b}".format(j).zfill(n)[i-1] == '1' else -i for i in range(1,n+1)])
for j in range(2**n)
if sum([i if "{0:b}".format(j).zfill(n)[i-1] == '1' else -i for i in range(1,n+1)]) > -1])
def s(n):
return sum([D(i) for i in range(n+1)])
def main():
for i in range(1,1000):
print("{}, {}, {}".format(i,D(i),s(i)))
main()
| # Problem #1
# Minimum Value
# Let there be a set of N natural numbers 1,2,3,4...N.
# We are allowed to insert + or − sign in front of each number and add all the resultant numbers.
# The minimum non-negative value obtained is denoted as D(N).
# Find the value of D(1)+D(2)+...+D(19216812112)
def D(n):
return min([
sum([i if "{0:b}".format(j).zfill(n)[i-1] == '1' else -i for i in range(1,n+1)])
for j in range(2**n)
if sum([i if "{0:b}".format(j).zfill(n)[i-1] == '1' else -i for i in range(1,n+1)]) > -1])
def s(n):
return sum([D(i) for i in range(n+1)])
def main():
for i in range(1,1000):
print("{}, {}, {}".format(i,D(i),s(i)))
main()
| en | 0.825675 | # Problem #1 # Minimum Value # Let there be a set of N natural numbers 1,2,3,4...N. # We are allowed to insert + or − sign in front of each number and add all the resultant numbers. # The minimum non-negative value obtained is denoted as D(N). # Find the value of D(1)+D(2)+...+D(19216812112) | 3.275178 | 3 |
laimbionet/networks/no_pool_net.py | Fjaviervera/LaimbioNet | 0 | 6620842 | <reponame>Fjaviervera/LaimbioNet<gh_stars>0
import tensorflow as tf
class No_pool_net:
def __init__(self, name, input, input_channels, output_channels = 1, is_training=True, reuse=None, filters=32, deep=1, n_classes=2,
batch_size=None, second_stride_unit=False, dropout=False, dropout_prob=0.5,
renorm=False):
self.imgs = input
self.input_channels = input_channels
self.output_channels = output_channels
self.batch_size = batch_size
self.is_training = is_training
self.reuse = reuse
self.name = name
self.filters = filters
self.deep = deep
self.n_classes = n_classes
self.second_stride_unit = second_stride_unit
self.dropout = dropout
self.dropout_prob = dropout_prob
self.entry_flow_counter = 0
self.middle_flow_counter = 0
self.atrous_counter = 0
self.aspp_counter = 0
self.conv_counter = 0
self.deconv_counter = 0
self.momentum = 0.99
self.renorm = renorm
self.output = self.build_net()
def __str__(self):
return ('U-net estandar designed by <EMAIL>')
def conv(self, x, filters, kernel = 3, stride=1, dilatation = 1):
self.conv_counter += 1
with tf.variable_scope('conv_unit_%d' % self.conv_counter, reuse=self.reuse):
print('conv_unit_%d' % self.conv_counter)
x = tf.layers.conv2d(
x, filters, [kernel, kernel], padding='SAME', strides=(stride, stride),dilation_rate=dilatation)
x = tf.layers.batch_normalization(x, training=self.is_training, reuse=self.reuse,
momentum=self.momentum, renorm=self.renorm, name='bn')
x = tf.nn.relu(x, name='relu')
return x
def deconv(self, x, filters, stride=2):
self.deconv_counter += 1
with tf.variable_scope('deconv_unit_%d' % self.conv_counter, reuse=self.reuse):
print('deconv_unit_%d' % self.conv_counter)
x = tf.layers.conv2d_transpose(
x, filters, [3, 3], padding='SAME', strides=(2, 2))
x = tf.layers.batch_normalization(x, training=self.is_training, reuse=self.reuse,
momentum=self.momentum, renorm=self.renorm, name='bn')
x = tf.nn.relu(x, name='relu')
return x
def n_convs(self, x, n, filters, kernel = 3, dilatation = 1):
for _ in range(n):
x = self.conv(x, filters, kernel = kernel, dilatation = dilatation)
return x
def build_net(self):
print('Building Network:')
print(self.name)
with tf.variable_scope(self.name, reuse=self.reuse):
x = self.imgs
x.set_shape([None, 256, 256, self.input_channels])
x_res = x
orig_shapes = [tf.shape(x)[1], tf.shape(x)[2]]
filters_num = self.filters
x = self.n_convs(x, 1, self.filters, kernel = 7)
x = self.n_convs(x, 1, self.filters, kernel=5)
x = self.n_convs(x, 2, 2*self.filters)
x = self.n_convs(x, 3, self.filters)
# with tf.variable_scope('res_conv', reuse=self.reuse):
# print('res_conv')
#
# x = tf.layers.conv2d(
# x, self.output_channels, [3, 3], padding='SAME', strides=(1, 1), dilation_rate=1)
#
# x = tf.layers.batch_normalization(x, training=self.is_training, reuse=self.reuse,
# momentum=self.momentum, renorm=self.renorm, name='bn')
#
#
# # x_res = tf.layers.conv2d(
# # x_res, self.filters, [1, 1], padding='SAME', strides=(1, 1), dilation_rate=1)
# #
# # x_res = tf.layers.batch_normalization(x_res, training=self.is_training, reuse=self.reuse,
# # momentum=self.momentum, renorm=self.renorm, name='bn_res')
# #
# # x = tf.nn.relu(x_res + x, name='relu_res')
# x = tf.nn.relu( x, name='relu_')
x = self.n_convs(x, 1, self.filters/2)
with tf.variable_scope('final_conv' , reuse=self.reuse):
print('final_conv')
x = tf.layers.conv2d(
x, self.output_channels, [1, 1], padding='SAME', strides=(1, 1), dilation_rate=1)
return x
| import tensorflow as tf
class No_pool_net:
def __init__(self, name, input, input_channels, output_channels = 1, is_training=True, reuse=None, filters=32, deep=1, n_classes=2,
batch_size=None, second_stride_unit=False, dropout=False, dropout_prob=0.5,
renorm=False):
self.imgs = input
self.input_channels = input_channels
self.output_channels = output_channels
self.batch_size = batch_size
self.is_training = is_training
self.reuse = reuse
self.name = name
self.filters = filters
self.deep = deep
self.n_classes = n_classes
self.second_stride_unit = second_stride_unit
self.dropout = dropout
self.dropout_prob = dropout_prob
self.entry_flow_counter = 0
self.middle_flow_counter = 0
self.atrous_counter = 0
self.aspp_counter = 0
self.conv_counter = 0
self.deconv_counter = 0
self.momentum = 0.99
self.renorm = renorm
self.output = self.build_net()
def __str__(self):
return ('U-net estandar designed by <EMAIL>')
def conv(self, x, filters, kernel = 3, stride=1, dilatation = 1):
self.conv_counter += 1
with tf.variable_scope('conv_unit_%d' % self.conv_counter, reuse=self.reuse):
print('conv_unit_%d' % self.conv_counter)
x = tf.layers.conv2d(
x, filters, [kernel, kernel], padding='SAME', strides=(stride, stride),dilation_rate=dilatation)
x = tf.layers.batch_normalization(x, training=self.is_training, reuse=self.reuse,
momentum=self.momentum, renorm=self.renorm, name='bn')
x = tf.nn.relu(x, name='relu')
return x
def deconv(self, x, filters, stride=2):
self.deconv_counter += 1
with tf.variable_scope('deconv_unit_%d' % self.conv_counter, reuse=self.reuse):
print('deconv_unit_%d' % self.conv_counter)
x = tf.layers.conv2d_transpose(
x, filters, [3, 3], padding='SAME', strides=(2, 2))
x = tf.layers.batch_normalization(x, training=self.is_training, reuse=self.reuse,
momentum=self.momentum, renorm=self.renorm, name='bn')
x = tf.nn.relu(x, name='relu')
return x
def n_convs(self, x, n, filters, kernel = 3, dilatation = 1):
for _ in range(n):
x = self.conv(x, filters, kernel = kernel, dilatation = dilatation)
return x
def build_net(self):
print('Building Network:')
print(self.name)
with tf.variable_scope(self.name, reuse=self.reuse):
x = self.imgs
x.set_shape([None, 256, 256, self.input_channels])
x_res = x
orig_shapes = [tf.shape(x)[1], tf.shape(x)[2]]
filters_num = self.filters
x = self.n_convs(x, 1, self.filters, kernel = 7)
x = self.n_convs(x, 1, self.filters, kernel=5)
x = self.n_convs(x, 2, 2*self.filters)
x = self.n_convs(x, 3, self.filters)
# with tf.variable_scope('res_conv', reuse=self.reuse):
# print('res_conv')
#
# x = tf.layers.conv2d(
# x, self.output_channels, [3, 3], padding='SAME', strides=(1, 1), dilation_rate=1)
#
# x = tf.layers.batch_normalization(x, training=self.is_training, reuse=self.reuse,
# momentum=self.momentum, renorm=self.renorm, name='bn')
#
#
# # x_res = tf.layers.conv2d(
# # x_res, self.filters, [1, 1], padding='SAME', strides=(1, 1), dilation_rate=1)
# #
# # x_res = tf.layers.batch_normalization(x_res, training=self.is_training, reuse=self.reuse,
# # momentum=self.momentum, renorm=self.renorm, name='bn_res')
# #
# # x = tf.nn.relu(x_res + x, name='relu_res')
# x = tf.nn.relu( x, name='relu_')
x = self.n_convs(x, 1, self.filters/2)
with tf.variable_scope('final_conv' , reuse=self.reuse):
print('final_conv')
x = tf.layers.conv2d(
x, self.output_channels, [1, 1], padding='SAME', strides=(1, 1), dilation_rate=1)
return x | en | 0.237387 | # with tf.variable_scope('res_conv', reuse=self.reuse): # print('res_conv') # # x = tf.layers.conv2d( # x, self.output_channels, [3, 3], padding='SAME', strides=(1, 1), dilation_rate=1) # # x = tf.layers.batch_normalization(x, training=self.is_training, reuse=self.reuse, # momentum=self.momentum, renorm=self.renorm, name='bn') # # # # x_res = tf.layers.conv2d( # # x_res, self.filters, [1, 1], padding='SAME', strides=(1, 1), dilation_rate=1) # # # # x_res = tf.layers.batch_normalization(x_res, training=self.is_training, reuse=self.reuse, # # momentum=self.momentum, renorm=self.renorm, name='bn_res') # # # # x = tf.nn.relu(x_res + x, name='relu_res') # x = tf.nn.relu( x, name='relu_') | 2.562455 | 3 |
.sample_configs/param_handlers/create_batch_prediction_job_tabular_forecasting_sample.py | dizcology/python-aiplatform | 180 | 6620843 | <filename>.sample_configs/param_handlers/create_batch_prediction_job_tabular_forecasting_sample.py
def make_parent(parent: str) -> str:
parent = parent
return parent
def make_batch_prediction_job(
display_name: str,
model_name: str,
gcs_source_uri: str,
gcs_destination_output_uri_prefix: str,
predictions_format: str,
) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob:
batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model_name,
"input_config": {
"instances_format": predictions_format,
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": predictions_format,
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
}
return batch_prediction_job
| <filename>.sample_configs/param_handlers/create_batch_prediction_job_tabular_forecasting_sample.py
def make_parent(parent: str) -> str:
parent = parent
return parent
def make_batch_prediction_job(
display_name: str,
model_name: str,
gcs_source_uri: str,
gcs_destination_output_uri_prefix: str,
predictions_format: str,
) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob:
batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model_name,
"input_config": {
"instances_format": predictions_format,
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": predictions_format,
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
}
return batch_prediction_job
| en | 0.435225 | # Format: 'projects/{project}/locations/{location}/models/{model_id}' | 1.942949 | 2 |
mundo3/D79.py | KayanOkagawa/Cursoemvideo-Python3-Exercicios | 0 | 6620844 | <reponame>KayanOkagawa/Cursoemvideo-Python3-Exercicios
titulo = '\033[01;36m'
txt = '\033[36m'
nada = '\033[m'
erro = '\033[01;31m'
sucesso = '\033[01;32m'
resposta = 'S'
lista = list()
print(f'\n{titulo:-<16} Análise de Valor Único {nada:->13}')
while True:
if resposta in 'S':
lista.append(int(input(f'\n{txt}Digite um número: {nada}')))
if len(lista) > 1:
if lista.count(lista[len(lista)-1]) > 1:
for c in range(0, len(lista)-1):
if lista[len(lista)-1] == lista[c]:
lista.pop()
print(f'\n{erro}Valor repetido, não foi possível adicionar!{nada}')
break
else:
print(f'\n{sucesso}Valor Adicionado com Sucesso!{nada}')
else:
print(f'\n{sucesso}Valor Adicionado com Sucesso!{nada}')
resposta = ' '
while not resposta in 'SN':
resposta = input(f'\n{txt}Deseja continuar? [S/N]: {nada}').strip().upper()[0]
print(f'\n{titulo:-<33}{nada:->20}')
if resposta in 'N':
lista.sort()
print(f'\n{titulo}O valores salvos foram: ', end=' ')
for c in lista:
print(c, end=' ')
break | titulo = '\033[01;36m'
txt = '\033[36m'
nada = '\033[m'
erro = '\033[01;31m'
sucesso = '\033[01;32m'
resposta = 'S'
lista = list()
print(f'\n{titulo:-<16} Análise de Valor Único {nada:->13}')
while True:
if resposta in 'S':
lista.append(int(input(f'\n{txt}Digite um número: {nada}')))
if len(lista) > 1:
if lista.count(lista[len(lista)-1]) > 1:
for c in range(0, len(lista)-1):
if lista[len(lista)-1] == lista[c]:
lista.pop()
print(f'\n{erro}Valor repetido, não foi possível adicionar!{nada}')
break
else:
print(f'\n{sucesso}Valor Adicionado com Sucesso!{nada}')
else:
print(f'\n{sucesso}Valor Adicionado com Sucesso!{nada}')
resposta = ' '
while not resposta in 'SN':
resposta = input(f'\n{txt}Deseja continuar? [S/N]: {nada}').strip().upper()[0]
print(f'\n{titulo:-<33}{nada:->20}')
if resposta in 'N':
lista.sort()
print(f'\n{titulo}O valores salvos foram: ', end=' ')
for c in lista:
print(c, end=' ')
break | none | 1 | 3.631679 | 4 | |
python/qdevs.py | UofSC-QDEVS/shipsys | 2 | 6620845 | """Generic DEVS and QDEVS Models."""
from __future__ import division
from collections import deque
from scipy.signal import resample
from scipy.interpolate import interp1d
import numpy as np
_INF = float("inf")
_EPS = 1e-9
class DevsEvent(object):
"""Generic DEVS Event"""
def __init__(self, sender, time, value):
self.sender = sender
self.time = time
self.value = value
class DevsDevice(object):
"""Generic Atomic DEVS Device."""
def __init__(self, state0=0.0):
self.state0 = state0
self.state = state0
self.last_state = state0
self.tnext = _INF
self.tlast = 0.0
self.input = 0.0
self.sender = None
self.input_events = deque()
self.output_devices = []
self.time_history = []
self.state_history = []
def connect_outputs(self, *devices):
"""Connect this device to an output devices. When this
device goes through an internal transistion, it will trigger
an external event on these devices and send the event data.
"""
for device in devices:
self.output_devices.append(device)
def add_input(self, event):
"""Append an event to the input event queue.
"""
self.input_events.appendleft(event)
def process_inputs(self):
"""Processes all input events in the input queue.
"""
while self.input_events:
event = self.input_events.pop()
self.sender = event.sender
self.input = event.value
self.update(event.time)
def broadcast(self, time):
"""Send external events to the connected output devices.
"""
if self.state != self.last_state:
for output_device in self.output_devices:
output_device.add_input(DevsEvent(self, time, self.state))
def save(self, time, reset=False):
"""Save the current time and state to the history arrays.
"""
if reset:
self.time_history = [time]
self.state_history = [self.state]
elif self.state != self.last_state:
self.time_history.append(time)
self.state_history.append(self.state)
def initialize(self, time):
"""Can be overridden in derived class. This is called at the
beginning of the simulation. Usually, initial states and the
initial tnext values are set here.
"""
self.state = self.state0
self.last_state = self.state0
self.tlast = time
self.tnext = _INF
self.save(time, reset=True)
self.broadcast(time)
def update(self, time):
"""Must be implemented in derived class. This will be called when
the simulation advances to the current tnext value of this
device. Usually the state is updated to the appropriate next
value here.
"""
raise NotImplementedError()
class QdevsDevice(DevsDevice):
"""Generic Atomic QDEVS Device. Contains some additional data and
functionality speicific to Quantized DEVS devices.
"""
def __init__(self, state0=0.0, granularity=1e-3, epsilon=None):
DevsDevice.__init__(self, state0)
self.granularity = granularity
if epsilon:
self.epsilon = epsilon
elif granularity:
self.epsilon = 0.5 * granularity
self.internal_state = state0
self.derivative = 0.0
self.epsilon = 0.0
def initialize(self, time):
self.state = self.state0
self.internal_state = self.state0
self.derivative = 0.0
self.tlast = time
self.tnext = _INF
self.update(time)
self.save(time, reset=True)
self.broadcast(time)
class DevsSystem(object):
"""Generic DEVS system representation and simulator."""
def __init__(self):
self.devices = []
self.time = 0.0
def add_devices(self, *devices):
"""Adds one or more devices to the system.
"""
for device in devices:
self.devices.append(device)
def initialize(self, t0=0.0):
"""This should be called at the start of a simulation.
"""
self.time = t0
self.tstop = 0.0
for device in self.devices:
device.initialize(t0)
for device in self.devices:
device.broadcast(t0)
for device in self.devices:
device.process_inputs()
def run(self, tstop):
"""Run the simulation from the current time until tstop.
initialize() must be called before running for the first time.
The simulator can have multiple run() calls in the same
simulation to enable external events to be implemented.
"""
self.tstop = tstop
while(self.time < tstop):
self.advance()
def advance(self):
"""Advances the simulation to the next scheduled event,
imminent devices will have internal transitions, those devices
will broadcasts events to their output devices who will then
process those events.
"""
tnext = _INF
for device in self.devices:
tnext = min(tnext, device.tnext)
self.time = max(tnext, self.time + _EPS)
if self.time > self.tstop:
return
imminent_devices = []
for device in self.devices:
if device.tnext <= self.time:
imminent_devices.append(device)
for device in imminent_devices:
device.update(self.time)
for device in imminent_devices:
device.broadcast(self.time)
for device in self.devices:
device.process_inputs()
class QdevsSystem(DevsSystem):
""" Generic QDEVS system representation and simulator. Contains
specific additions for handling quantized devices.
"""
def __init__(self, granularity=1e-3, epsilon=None):
DevsSystem.__init__(self)
if granularity:
self.granularity = granularity
else:
self.granularity = 1e-3
if epsilon:
self.epsilon = epsilon
else:
self.epsilon = 0.25 * granularity
def add_devices(self, *devices):
"""Adds one or more devices to the system and cascades
the default granularity to the devices.
"""
for device in devices:
if isinstance(device, QdevsDevice):
if not device.granularity:
device.granularity = self.granularity
if not device.epsilon:
device.epsilon = self.epsilon
self.devices.append(device)
class ConstantSource(DevsDevice):
"""Constant source model
"""
def __init__(self, value):
DevsDevice.__init__(self)
self.state0 = value
self.state = value
def initialize(self, time):
self.state = self.state0
self.tlast = time
self.tnext = _INF
self.save(time, reset=True)
self.broadcast(time)
def set_value(self, value):
if value != self.value:
self.value = value
self.tnext = self.tlast
def update(self, time):
self.save(time)
class SquareWaveSource(DevsDevice):
"""Simple square wave with variable duty and zero rise/fall
time.
"""
def __init__(self, x1, x2, t1, t2):
DevsDevice.__init__(self)
self.state0 = x1
self.x1 = x1
self.x2 = x2
self.t1 = t1
self.t2 = t2
def initialize(self, time):
self.state = self.state0
self.tlast = time
self.tnext = time + self.t1
self.save(time, reset=True)
self.broadcast(time)
def update(self, time):
self.last_state = self.state
self.tnext = self.t1 + self.t2
if self.state == self.x1:
self.state = self.x2
self.tnext = time + self.t2
elif self.state == self.x2:
self.state = self.x1
self.tnext = time + self.t1
self.tlast = time
self.save(time)
class Integrator(QdevsDevice):
"""Simple linear integrator with gain and no limits with form:
x' = k*u
"""
def __init__(self, gain, x0=0.0, granularity=None, epsilon=None):
QdevsDevice.__init__(self, x0, granularity, epsilon)
self.gain = gain
def update(self, time):
self.last_state = self.state
dt = time - self.tlast
next_dt = _INF
self.internal_state += self.derivative * dt
if self.internal_state >= self.state + self.granularity - self.epsilon:
self.state += self.granularity
self.broadcast(time)
elif self.internal_state <= self.state - 0.5 * self.granularity + self.epsilon:
self.state -= self.granularity
self.broadcast(time)
self.derivative = self.gain * self.input
if self.derivative > 0.0:
next_dt = (self.state + self.granularity - self.internal_state) / self.derivative
elif self.derivative < 0.0:
next_dt = (self.state - 0.5 * self.granularity - self.internal_state) / self.derivative
self.tnext = time + abs(next_dt)
self.tlast = time
self.save(time)
class DifferentialEquation(QdevsDevice):
"""Represents a continuous first order ODE of the form:
x' = a * x + b * u
"""
def __init__(self, a, b, x0=0.0, granularity=None, epsilon=None):
QdevsDevice.__init__(self, x0, granularity, epsilon)
self.a = a
self.b = b
def update(self, time):
self.last_state = self.state
dt = time - self.tlast
next_dt = _INF
self.internal_state += self.derivative * dt
if self.internal_state >= self.state + self.granularity - self.epsilon:
self.state += self.granularity
self.broadcast(time)
elif self.internal_state <= self.state - 0.5 * self.granularity + self.epsilon:
self.state -= self.granularity
self.broadcast(time)
self.derivative = self.a * self.internal_state + self.b * self.input
if self.derivative > 0.0:
next_dt = (self.state + self.granularity - self.internal_state) / self.derivative
elif self.derivative < 0.0:
next_dt = (self.state - 0.5 * self.granularity - self.internal_state) / self.derivative
self.tnext = time + abs(next_dt)
self.tlast = time
self.save(time)
def resample(times, values, tf, npoints=1000):
"""Resamples the given time/value event arrays from time 0 to tf
for npoints using a zero-order hold. This is useful for plotting
results and quantifying error.
"""
values.append(values[-1])
times.append(tf)
f = interp1d(times, values, kind='zero')
times2 = np.linspace(times[0], times[-1], npoints)
values2 = f(times2)
return times2, values2
| """Generic DEVS and QDEVS Models."""
from __future__ import division
from collections import deque
from scipy.signal import resample
from scipy.interpolate import interp1d
import numpy as np
_INF = float("inf")
_EPS = 1e-9
class DevsEvent(object):
"""Generic DEVS Event"""
def __init__(self, sender, time, value):
self.sender = sender
self.time = time
self.value = value
class DevsDevice(object):
"""Generic Atomic DEVS Device."""
def __init__(self, state0=0.0):
self.state0 = state0
self.state = state0
self.last_state = state0
self.tnext = _INF
self.tlast = 0.0
self.input = 0.0
self.sender = None
self.input_events = deque()
self.output_devices = []
self.time_history = []
self.state_history = []
def connect_outputs(self, *devices):
"""Connect this device to an output devices. When this
device goes through an internal transistion, it will trigger
an external event on these devices and send the event data.
"""
for device in devices:
self.output_devices.append(device)
def add_input(self, event):
"""Append an event to the input event queue.
"""
self.input_events.appendleft(event)
def process_inputs(self):
"""Processes all input events in the input queue.
"""
while self.input_events:
event = self.input_events.pop()
self.sender = event.sender
self.input = event.value
self.update(event.time)
def broadcast(self, time):
"""Send external events to the connected output devices.
"""
if self.state != self.last_state:
for output_device in self.output_devices:
output_device.add_input(DevsEvent(self, time, self.state))
def save(self, time, reset=False):
"""Save the current time and state to the history arrays.
"""
if reset:
self.time_history = [time]
self.state_history = [self.state]
elif self.state != self.last_state:
self.time_history.append(time)
self.state_history.append(self.state)
def initialize(self, time):
"""Can be overridden in derived class. This is called at the
beginning of the simulation. Usually, initial states and the
initial tnext values are set here.
"""
self.state = self.state0
self.last_state = self.state0
self.tlast = time
self.tnext = _INF
self.save(time, reset=True)
self.broadcast(time)
def update(self, time):
"""Must be implemented in derived class. This will be called when
the simulation advances to the current tnext value of this
device. Usually the state is updated to the appropriate next
value here.
"""
raise NotImplementedError()
class QdevsDevice(DevsDevice):
"""Generic Atomic QDEVS Device. Contains some additional data and
functionality speicific to Quantized DEVS devices.
"""
def __init__(self, state0=0.0, granularity=1e-3, epsilon=None):
DevsDevice.__init__(self, state0)
self.granularity = granularity
if epsilon:
self.epsilon = epsilon
elif granularity:
self.epsilon = 0.5 * granularity
self.internal_state = state0
self.derivative = 0.0
self.epsilon = 0.0
def initialize(self, time):
self.state = self.state0
self.internal_state = self.state0
self.derivative = 0.0
self.tlast = time
self.tnext = _INF
self.update(time)
self.save(time, reset=True)
self.broadcast(time)
class DevsSystem(object):
"""Generic DEVS system representation and simulator."""
def __init__(self):
self.devices = []
self.time = 0.0
def add_devices(self, *devices):
"""Adds one or more devices to the system.
"""
for device in devices:
self.devices.append(device)
def initialize(self, t0=0.0):
"""This should be called at the start of a simulation.
"""
self.time = t0
self.tstop = 0.0
for device in self.devices:
device.initialize(t0)
for device in self.devices:
device.broadcast(t0)
for device in self.devices:
device.process_inputs()
def run(self, tstop):
"""Run the simulation from the current time until tstop.
initialize() must be called before running for the first time.
The simulator can have multiple run() calls in the same
simulation to enable external events to be implemented.
"""
self.tstop = tstop
while(self.time < tstop):
self.advance()
def advance(self):
"""Advances the simulation to the next scheduled event,
imminent devices will have internal transitions, those devices
will broadcasts events to their output devices who will then
process those events.
"""
tnext = _INF
for device in self.devices:
tnext = min(tnext, device.tnext)
self.time = max(tnext, self.time + _EPS)
if self.time > self.tstop:
return
imminent_devices = []
for device in self.devices:
if device.tnext <= self.time:
imminent_devices.append(device)
for device in imminent_devices:
device.update(self.time)
for device in imminent_devices:
device.broadcast(self.time)
for device in self.devices:
device.process_inputs()
class QdevsSystem(DevsSystem):
""" Generic QDEVS system representation and simulator. Contains
specific additions for handling quantized devices.
"""
def __init__(self, granularity=1e-3, epsilon=None):
DevsSystem.__init__(self)
if granularity:
self.granularity = granularity
else:
self.granularity = 1e-3
if epsilon:
self.epsilon = epsilon
else:
self.epsilon = 0.25 * granularity
def add_devices(self, *devices):
"""Adds one or more devices to the system and cascades
the default granularity to the devices.
"""
for device in devices:
if isinstance(device, QdevsDevice):
if not device.granularity:
device.granularity = self.granularity
if not device.epsilon:
device.epsilon = self.epsilon
self.devices.append(device)
class ConstantSource(DevsDevice):
"""Constant source model
"""
def __init__(self, value):
DevsDevice.__init__(self)
self.state0 = value
self.state = value
def initialize(self, time):
self.state = self.state0
self.tlast = time
self.tnext = _INF
self.save(time, reset=True)
self.broadcast(time)
def set_value(self, value):
if value != self.value:
self.value = value
self.tnext = self.tlast
def update(self, time):
self.save(time)
class SquareWaveSource(DevsDevice):
"""Simple square wave with variable duty and zero rise/fall
time.
"""
def __init__(self, x1, x2, t1, t2):
DevsDevice.__init__(self)
self.state0 = x1
self.x1 = x1
self.x2 = x2
self.t1 = t1
self.t2 = t2
def initialize(self, time):
self.state = self.state0
self.tlast = time
self.tnext = time + self.t1
self.save(time, reset=True)
self.broadcast(time)
def update(self, time):
self.last_state = self.state
self.tnext = self.t1 + self.t2
if self.state == self.x1:
self.state = self.x2
self.tnext = time + self.t2
elif self.state == self.x2:
self.state = self.x1
self.tnext = time + self.t1
self.tlast = time
self.save(time)
class Integrator(QdevsDevice):
"""Simple linear integrator with gain and no limits with form:
x' = k*u
"""
def __init__(self, gain, x0=0.0, granularity=None, epsilon=None):
QdevsDevice.__init__(self, x0, granularity, epsilon)
self.gain = gain
def update(self, time):
self.last_state = self.state
dt = time - self.tlast
next_dt = _INF
self.internal_state += self.derivative * dt
if self.internal_state >= self.state + self.granularity - self.epsilon:
self.state += self.granularity
self.broadcast(time)
elif self.internal_state <= self.state - 0.5 * self.granularity + self.epsilon:
self.state -= self.granularity
self.broadcast(time)
self.derivative = self.gain * self.input
if self.derivative > 0.0:
next_dt = (self.state + self.granularity - self.internal_state) / self.derivative
elif self.derivative < 0.0:
next_dt = (self.state - 0.5 * self.granularity - self.internal_state) / self.derivative
self.tnext = time + abs(next_dt)
self.tlast = time
self.save(time)
class DifferentialEquation(QdevsDevice):
"""Represents a continuous first order ODE of the form:
x' = a * x + b * u
"""
def __init__(self, a, b, x0=0.0, granularity=None, epsilon=None):
QdevsDevice.__init__(self, x0, granularity, epsilon)
self.a = a
self.b = b
def update(self, time):
self.last_state = self.state
dt = time - self.tlast
next_dt = _INF
self.internal_state += self.derivative * dt
if self.internal_state >= self.state + self.granularity - self.epsilon:
self.state += self.granularity
self.broadcast(time)
elif self.internal_state <= self.state - 0.5 * self.granularity + self.epsilon:
self.state -= self.granularity
self.broadcast(time)
self.derivative = self.a * self.internal_state + self.b * self.input
if self.derivative > 0.0:
next_dt = (self.state + self.granularity - self.internal_state) / self.derivative
elif self.derivative < 0.0:
next_dt = (self.state - 0.5 * self.granularity - self.internal_state) / self.derivative
self.tnext = time + abs(next_dt)
self.tlast = time
self.save(time)
def resample(times, values, tf, npoints=1000):
"""Resamples the given time/value event arrays from time 0 to tf
for npoints using a zero-order hold. This is useful for plotting
results and quantifying error.
"""
values.append(values[-1])
times.append(tf)
f = interp1d(times, values, kind='zero')
times2 = np.linspace(times[0], times[-1], npoints)
values2 = f(times2)
return times2, values2
| en | 0.889032 | Generic DEVS and QDEVS Models. Generic DEVS Event Generic Atomic DEVS Device. Connect this device to an output devices. When this device goes through an internal transistion, it will trigger an external event on these devices and send the event data. Append an event to the input event queue. Processes all input events in the input queue. Send external events to the connected output devices. Save the current time and state to the history arrays. Can be overridden in derived class. This is called at the beginning of the simulation. Usually, initial states and the initial tnext values are set here. Must be implemented in derived class. This will be called when the simulation advances to the current tnext value of this device. Usually the state is updated to the appropriate next value here. Generic Atomic QDEVS Device. Contains some additional data and functionality speicific to Quantized DEVS devices. Generic DEVS system representation and simulator. Adds one or more devices to the system. This should be called at the start of a simulation. Run the simulation from the current time until tstop. initialize() must be called before running for the first time. The simulator can have multiple run() calls in the same simulation to enable external events to be implemented. Advances the simulation to the next scheduled event, imminent devices will have internal transitions, those devices will broadcasts events to their output devices who will then process those events. Generic QDEVS system representation and simulator. Contains specific additions for handling quantized devices. Adds one or more devices to the system and cascades the default granularity to the devices. Constant source model Simple square wave with variable duty and zero rise/fall time. Simple linear integrator with gain and no limits with form: x' = k*u Represents a continuous first order ODE of the form: x' = a * x + b * u Resamples the given time/value event arrays from time 0 to tf for npoints using a zero-order hold. This is useful for plotting results and quantifying error. | 2.402872 | 2 |
medium/166-Fraction to Recurring Decimal.py | Davidxswang/leetcode | 2 | 6620846 | """
https://leetcode.com/problems/fraction-to-recurring-decimal/
Given two integers representing the numerator and denominator of a fraction, return the fraction in string format.
If the fractional part is repeating, enclose the repeating part in parentheses.
Example 1:
Input: numerator = 1, denominator = 2
Output: "0.5"
Example 2:
Input: numerator = 2, denominator = 1
Output: "2"
Example 3:
Input: numerator = 2, denominator = 3
Output: "0.(6)"
"""
# time complexity: O(hard to estimate), space complexity: O(hard to estimate)
# this solution is inspired by @tusizi in the discussion area.
# It's not that hard, it's just not easy integrate all of these together in a concise way.
class Solution:
def fractionToDecimal(self, numerator: int, denominator: int) -> str:
sign = '' if numerator * denominator >= 0 else '-'
div, remainder = divmod(abs(numerator), abs(denominator))
if remainder == 0:
return sign+str(div)
result = [sign+str(div)+'.']
remainder_stack = []
while remainder not in remainder_stack:
remainder_stack.append(remainder)
div, remainder = divmod(remainder*10, abs(denominator))
result.append(str(div))
if not remainder:
return (''.join(result))[:-1]
index = remainder_stack.index(remainder)
result.insert(index+1, '(')
result.append(')')
return ''.join(result)
| """
https://leetcode.com/problems/fraction-to-recurring-decimal/
Given two integers representing the numerator and denominator of a fraction, return the fraction in string format.
If the fractional part is repeating, enclose the repeating part in parentheses.
Example 1:
Input: numerator = 1, denominator = 2
Output: "0.5"
Example 2:
Input: numerator = 2, denominator = 1
Output: "2"
Example 3:
Input: numerator = 2, denominator = 3
Output: "0.(6)"
"""
# time complexity: O(hard to estimate), space complexity: O(hard to estimate)
# this solution is inspired by @tusizi in the discussion area.
# It's not that hard, it's just not easy integrate all of these together in a concise way.
class Solution:
def fractionToDecimal(self, numerator: int, denominator: int) -> str:
sign = '' if numerator * denominator >= 0 else '-'
div, remainder = divmod(abs(numerator), abs(denominator))
if remainder == 0:
return sign+str(div)
result = [sign+str(div)+'.']
remainder_stack = []
while remainder not in remainder_stack:
remainder_stack.append(remainder)
div, remainder = divmod(remainder*10, abs(denominator))
result.append(str(div))
if not remainder:
return (''.join(result))[:-1]
index = remainder_stack.index(remainder)
result.insert(index+1, '(')
result.append(')')
return ''.join(result)
| en | 0.779711 | https://leetcode.com/problems/fraction-to-recurring-decimal/ Given two integers representing the numerator and denominator of a fraction, return the fraction in string format. If the fractional part is repeating, enclose the repeating part in parentheses. Example 1: Input: numerator = 1, denominator = 2 Output: "0.5" Example 2: Input: numerator = 2, denominator = 1 Output: "2" Example 3: Input: numerator = 2, denominator = 3 Output: "0.(6)" # time complexity: O(hard to estimate), space complexity: O(hard to estimate) # this solution is inspired by @tusizi in the discussion area. # It's not that hard, it's just not easy integrate all of these together in a concise way. | 3.913601 | 4 |
components/collector/tests/source_collectors/api_source_collectors/test_jacoco_jenkins_plugin.py | Gamer1120/quality-time | 1 | 6620847 | <reponame>Gamer1120/quality-time
"""Unit tests for the JaCoCo Jenkins plugin source."""
from .jenkins_plugin_test_case import JenkinsPluginTestCase, JenkinsPluginTestsMixin
class JaCoCoJenkinsPluginTest(JenkinsPluginTestCase, JenkinsPluginTestsMixin):
"""Unit tests for the JaCoCo Jenkins plugin metrics."""
source_type = "jacoco_jenkins_plugin"
async def test_uncovered_lines(self):
"""Test that the number of uncovered lines and the total number of lines are returned."""
metric = dict(type="uncovered_lines", sources=self.sources, addition="sum")
response = await self.collect(metric, get_request_json_return_value=dict(lineCoverage=dict(total=6, missed=2)))
self.assert_measurement(response, value="2", total="6")
async def test_uncovered_branches(self):
"""Test that the number of uncovered branches and the total number of branches are returned."""
metric = dict(type="uncovered_branches", sources=self.sources, addition="sum")
response = await self.collect(
metric, get_request_json_return_value=dict(branchCoverage=dict(total=6, missed=2)))
self.assert_measurement(response, value="2", total="6")
| """Unit tests for the JaCoCo Jenkins plugin source."""
from .jenkins_plugin_test_case import JenkinsPluginTestCase, JenkinsPluginTestsMixin
class JaCoCoJenkinsPluginTest(JenkinsPluginTestCase, JenkinsPluginTestsMixin):
"""Unit tests for the JaCoCo Jenkins plugin metrics."""
source_type = "jacoco_jenkins_plugin"
async def test_uncovered_lines(self):
"""Test that the number of uncovered lines and the total number of lines are returned."""
metric = dict(type="uncovered_lines", sources=self.sources, addition="sum")
response = await self.collect(metric, get_request_json_return_value=dict(lineCoverage=dict(total=6, missed=2)))
self.assert_measurement(response, value="2", total="6")
async def test_uncovered_branches(self):
"""Test that the number of uncovered branches and the total number of branches are returned."""
metric = dict(type="uncovered_branches", sources=self.sources, addition="sum")
response = await self.collect(
metric, get_request_json_return_value=dict(branchCoverage=dict(total=6, missed=2)))
self.assert_measurement(response, value="2", total="6") | en | 0.906291 | Unit tests for the JaCoCo Jenkins plugin source. Unit tests for the JaCoCo Jenkins plugin metrics. Test that the number of uncovered lines and the total number of lines are returned. Test that the number of uncovered branches and the total number of branches are returned. | 2.551715 | 3 |
nuttssh/permissions.py | oofnikj/nuttssh | 8 | 6620848 | <gh_stars>1-10
import enum
class Permissions(enum.Enum):
# Open (virtual) ports for listening
LISTEN = 1
# Connecting to (virtual) ports
INITIATE = 2
# Connecting to (virtual) ports
LIST_LISTENERS = 3
# Admin shell
ADMIN = 4
"""
Predefined access levels, mapping to a more fine-grained list of permissions.
"""
access_levels = {
'listen': {Permissions.LISTEN},
'initiate': {Permissions.INITIATE},
'list': {Permissions.LIST_LISTENERS},
'admin': {Permissions.ADMIN},
}
"""
Default access granted to new users
"""
default_access = {
'access': ['listen', 'initiate']
}
| import enum
class Permissions(enum.Enum):
# Open (virtual) ports for listening
LISTEN = 1
# Connecting to (virtual) ports
INITIATE = 2
# Connecting to (virtual) ports
LIST_LISTENERS = 3
# Admin shell
ADMIN = 4
"""
Predefined access levels, mapping to a more fine-grained list of permissions.
"""
access_levels = {
'listen': {Permissions.LISTEN},
'initiate': {Permissions.INITIATE},
'list': {Permissions.LIST_LISTENERS},
'admin': {Permissions.ADMIN},
}
"""
Default access granted to new users
"""
default_access = {
'access': ['listen', 'initiate']
} | en | 0.811434 | # Open (virtual) ports for listening # Connecting to (virtual) ports # Connecting to (virtual) ports # Admin shell Predefined access levels, mapping to a more fine-grained list of permissions. Default access granted to new users | 2.945125 | 3 |
server/framework/start_script.py | tetelevm/OrdeRPG | 0 | 6620849 | from .db.models import ModelWorker
from .db.connection import DbEngine
ModelWorker.metadata.create_all(DbEngine)
| from .db.models import ModelWorker
from .db.connection import DbEngine
ModelWorker.metadata.create_all(DbEngine)
| none | 1 | 1.300526 | 1 | |
zenchi/mappings/group_status.py | fnzr/zenchi | 4 | 6620850 | <gh_stars>1-10
"""Group status constants."""
ONGOING = 1
STALLED = 2
COMPLETE = 3
DROPPED = 4
FINISHED = 5
SPECIALS_ONLY = 6
| """Group status constants."""
ONGOING = 1
STALLED = 2
COMPLETE = 3
DROPPED = 4
FINISHED = 5
SPECIALS_ONLY = 6 | en | 0.668401 | Group status constants. | 0.938901 | 1 |