content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
import time import random from const import * from util.logger import logger class Session(): def __init__(self): # requests.adapters.DEFAULT_RETRIES = 5 # 增加重試次數,避免連線失效 self.has_login = False self.session = requests.Session() self.session.headers = { 'User-Agent': make_ua(), 'Accept-Charset': 'utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Connection': 'keep-alive' } self.timeout = 20 def request(self, method, url, data=None,delay=0): for i in range(RETRY_CNT): try: if delay:time.sleep(delay) return self.session.request( method, url, allow_redirects=False, data=data, timeout=self.timeout) except (requests.HTTPError, requests.Timeout,requests.ConnectionError) as e: logger.warning('Warning: {0}, retrying({1}) ...'.format(str(e), i)) pass logger.error("can't get res: "+url) return None def make_ua(): rrange = lambda a, b, c=1: c == 1 and random.randrange(a, b) or int(1.0 * random.randrange(a * c, b * c) / c) ua = 'Mozilla/%d.0 (Windows NT %d.%d) AppleWebKit/%d (KHTML, like Gecko) Chrome/%d.%d Safari/%d' % ( rrange(4, 7, 10), rrange(5, 7), rrange(0, 3), rrange(535, 538, 10), rrange(21, 27, 10), rrange(0, 9999, 10), rrange(535, 538, 10) )
nilq/baby-python
python
# Requests may need to be installed for this script to work import requests import re import config # Here we pass our client id and secret token auth = requests.auth.HTTPBasicAuth(config.client_id, config.secret_token) # Here we pass our login method (password), username, and password data = {'grant_type': 'password', 'username': config.username, 'password': config.password} # Setup our header info, which gives reddit a brief description of our app headers = {'User-Agent': config.botname + 'Bot/0.0.1'} # Send our request for an OAuth token res = requests.post('https://www.reddit.com/api/v1/access_token', auth=auth, data=data, headers=headers) # Convert response to JSON and pull access_token value TOKEN = res.json()['access_token'] # Add authorization to our headers dictionary headers = {**headers, **{'Authorization': f"bearer {TOKEN}"}} # While the token is valid (~2 hours) we just add headers=headers to our requests requests.get('https://oauth.reddit.com/api/v1/me', headers=headers) # Pull results from desired subreddits for subreddit in config.subreddits: res = requests.get("https://oauth.reddit.com/r/" + subreddit + "/hot", headers=headers, params={'limit': '5'}) for post in res.json()['data']['children']: print(post['data']['subreddit']) print(post['data']['title']) print(post['data']['permalink']) # Working on a regex to filter out relevant content... # ------------------------------------ #for post in res.json()['data']['children']: # print(post['data']['subreddit']) # print(post['data']['title']) # print(post['data']['selftext']) # print(post['data']['permalink']) # print(post['data']['upvote_ratio']) # print(post['data']['ups']) # print(post['data']['downs']) # print(post['data']['score'])
nilq/baby-python
python
""" # ============================================================================= # Simulating the double pendulum using Runge–Kutta method (RK4) # ============================================================================= Created on Fri Jul 17 2020 @author: Ahmed Alkharusi """ import numpy as np import matplotlib.pyplot as plt from matplotlib import animation # ============================================================================= # globals # ============================================================================= m1 = 1 #mass of the 1st pendulum m2 = 1 #mass of the 2nd pendulum g = 10 #gravity r1 = 1 #length of the 1st pendulum r2 = 1 #length of the 2nd pendulum x = y = [] # ============================================================================= # Functions defn. # ============================================================================= def angular_acc1(a1_arr,a2_arr): """Calculate the angular acceleration for the 1st pendulum Inputs-> a1_arr: np.array([Initial angle, Initial angular velocity]); a2_arr: np.array([Initial angle, Initial angular velocity]);""" num = -g *(2*m1+m2)*np.sin(a1_arr[0]) - m2*g*np.sin(a1_arr[0]-2*a2_arr[0])- 2* m2*np.sin(a1_arr[0]-a2_arr[0]) * (r2*pow(a2_arr[1],2)+r1*pow(a1_arr[1],2)*np.cos(a1_arr[0]-a2_arr[0])) den = r1*(2*m1+m2-m2 * np.cos(2*a1_arr[0]-2*a2_arr[0])) return num/den def angular_acc2(a1_arr,a2_arr): """Calculate the angular acceleration for the 2nd pendulum Inputs-> a1_arr: np.array([Initial angle, Initial angular velocity]); a2_arr: np.array([Initial angle, Initial angular velocity]);""" temp = (2*np.sin(a1_arr[0]-a2_arr[0])) num = temp * (r1*pow(a1_arr[1],2)*(m1+m2)+g*(m1+m2)*np.cos(a1_arr[0])+r2*pow(a2_arr[1],2)*m2*np.cos(a1_arr[0]-a2_arr[0])) den = r2*(2*m1+m2-m2 * np.cos(2*a1_arr[0]-2*a2_arr[0])) return num/den def deriv_a1(a1_arr,a2_arr,t): """ Returns an array np.array([first derivative, 2nd derivative]) Inputs-> a1_arr: np.array([Initial angle, Initial angular velocity]); a2_arr: np.array([Initial angle, Initial angular velocity]); t: the dependent variable; """ return np.array([a1_arr[1],angular_acc1(a1_arr,a2_arr)]) def deriv_a2(a2_arr,a1_arr,t): return np.array([a2_arr[1],angular_acc2(a1_arr,a2_arr)]) def rk4(deriv,func_i,func_i2, x_i,h): """ Implements the RK4 method Inputs-> deriv: a function that takes two arguments; func_i: the function to be calculated; func_i2: this is just passed as an argument for func_i (see above deriv_a1 and deriv_a2); x_i: the dependent variable of func_i; h: the step size; """ k1 = deriv(func_i,func_i2,x_i) k2 = deriv(func_i+h/2,func_i2,h*k1/2) k3 = deriv(func_i+h/2,func_i2,h*k2/2) k4 = deriv(func_i+h,func_i2,h*k3) func = func_i + (1/6) * h * (k1 +2*k2+2*k3+k4) x = x_i + h return (x,func) # ============================================================================= # def init(): #Uncomment these for the animation # line.set_data([], []) # time_text.set_text('') # return line, time_text # # def animate(i): # x = [0, pendulum1_x[i], pendulum2_x[i]] # y = [0, pendulum1_y[i], pendulum2_y[i]] # # line.set_data(x,y) # #time_text.set_text(time_template % (i*h)) #Uncomment this to display the time in the animated plot # return line, time_text # # ============================================================================= # ============================================================================= # The results # ============================================================================= #Initial conditions ([initial angle, initail angular speed]) a1_arr = np.array([np.pi/2,0]) a2_arr = np.array([np.pi/2,1]) t = 0 # starting time h = 0.001 # step size for the RK4 method steps_no = 100000 # number of steps of the RK4 method time_arr = np.array([t]) func_array1 = np.array([a1_arr]) func_array2 = np.array([a2_arr]) for i in range(steps_no): temp =a1_arr (t,a1_arr) = rk4(deriv_a1,a1_arr,a2_arr,t,h) t -=h (t,a2_arr) = rk4(deriv_a2,a2_arr,temp,t,h) time_arr2 = np.append(time_arr, t) func_array1 = np.vstack((func_array1,np.array([a1_arr]))) func_array2 = np.vstack((func_array2,np.array([a2_arr]))) # You can plot the pendulum's position or angular speed/acceleration as a function of time [pendulum1_theta, pendulum1_angular_speed] = func_array1.transpose() [pendulum2_theta, pendulum2_angular_speed] = func_array2.transpose() pendulum1_x = r1*np.sin(pendulum1_theta) pendulum1_y = - r1*np.cos(pendulum1_theta) pendulum2_x = r2*np.sin(pendulum2_theta) + pendulum1_x pendulum2_y = pendulum1_y - r2*np.cos(pendulum2_theta) # Here I used the matplotlib template of the double pendulum animation to animate the plot # ============================================================================= # fig = plt.figure() # ax = fig.add_subplot(111, autoscale_on=False, xlim=(-3.9, 3.9), ylim=(-2, 2)) # ax.set_xlabel('$x-Axis$',fontsize=12) # ax.set_ylabel('$y-Axis$',fontsize=12) # ax.set_title('Double pendulum simulation (RK4 method)',fontsize=14) # ax.grid() # # line, = ax.plot([], [], 'o-',lw=3,color='mediumvioletred',markersize=15) # time_template = 'time = %0.1fs' # time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes) # # ani = animation.FuncAnimation(fig, animate, np.arange(1, len(pendulum1_y)), # interval=0, blit=True, init_func=init) # # #ax.scatter(pendulum2_x, pendulum2_y,s=5, color='black',alpha=0.5) # #ani.save('double_pendulum_200.avi', fps=20, dpi =8) # plt.show() # ============================================================================= # ============================================================================= # #Save each frame separately # ============================================================================= scatter_x = [] scatter_y = [] counter = 0 save_every_n_frames = 25 for j in range(int(len(pendulum1_y)/save_every_n_frames)): i = j*save_every_n_frames fig = plt.figure() ax = fig.add_subplot(111, autoscale_on=False, xlim=(-4,4), ylim=(-2.1, 2.1)) x = [0, pendulum1_x[i]] y = [0, pendulum1_y[i]] ax.plot(x,y,lw=3,color='mediumvioletred') x1 = [pendulum1_x[i], pendulum2_x[i]] y1 = [pendulum1_y[i], pendulum2_y[i]] scatter_x.append(pendulum2_x[i]) scatter_y.append(pendulum2_y[i]) ax.plot(x1,y1,'o-',lw=3,color='mediumvioletred',markersize=15) ax.scatter(scatter_x,scatter_y,lw=0.0005,color='black') ax.set_xlabel('$x-Axis$',fontsize=12) ax.set_ylabel('$y-Axis$',fontsize=12) ax.set_title('Double pendulum simulation (RK4 method)',fontsize=14) ax.grid() fig.savefig(str(j)+'.png',dpi=600) plt.show() """ # ============================================================================= # Please check the answers!!! # ============================================================================= References: #Implementing the RK4 method in Python https://youtu.be/mqoqAovXxWA by Prof. Niels Walet #The formulas for the angular acceleration https://www.myphysicslab.com/pendulum/double-pendulum-en.html #Animating the double pendulum (N.B. the implementation used here is different) https://matplotlib.org/3.2.1/gallery/animation/double_pendulum_sgskip.html """
nilq/baby-python
python
from __future__ import annotations from datetime import date from typing import ( Literal, Optional, Sequence, ) from pydantic.fields import Field from pydantic.types import StrictBool from ..api import ( BodyParams, EndpointData, Methods, WrApiQueryParams, ) from ..types_.endpoint import BaseEndpoint from ..types_.enums import BillingType from ..types_.inputs import DateRange, TimelogOptionalFields from ..types_.scalar import ( ContactId, FolderId, TaskId, TimelogCategoryId, TimelogId, ) class _BaseTimelogs(BaseEndpoint): created_date: Optional[DateRange] updated_date: Optional[DateRange] tracked_date: Optional[DateRange] me: Optional[StrictBool] descendants: Optional[StrictBool] sub_tasks: Optional[StrictBool] plain_text: Optional[StrictBool] timelog_categories: Optional[Sequence[TimelogCategoryId]] billing_types: Optional[Sequence[BillingType]] fields_: Optional[Sequence[Literal[TimelogOptionalFields.BILLING_TYPE]]] = Field(None, alias="fields") @property def endpoint_data(self) -> EndpointData: return EndpointData( method="GET", url=self._url, query_params=self._query_params, ) @property def _url(self) -> str: raise NotImplementedError() @property def _query_params(self) -> WrApiQueryParams: params = WrApiQueryParams() if self.created_date: params["createdDate"] = self._convert_input(self.created_date) if self.updated_date: params["updatedDate"] = self._convert_input(self.updated_date) if self.tracked_date: params[" trackedDate"] = self._convert_input(self.tracked_date) if self.me is not None: params["me"] = self._convert_bool(self.me) if self.descendants is not None: params["descendants"] = self._convert_bool(self.descendants) if self.sub_tasks is not None: params["subTasks"] = self._convert_bool(self.sub_tasks) if self.plain_text is not None: params["plainText"] = self._convert_bool(self.plain_text) if self.timelog_categories is not None: params["timelogCategories"] = self._convert_seq(self.timelog_categories) if self.billing_types is not None: params["billingTypes"] = self._convert_seq(self.billing_types) if self.fields_: params["fields"] = self._convert_seq(self.fields_) return params class Timelogs(_BaseTimelogs): @property def _url(self) -> str: return "/timelogs" class ContactTimelogs(_BaseTimelogs): contact_id: ContactId @property def _url(self) -> str: return f"/contacts/{self.contact_id}/timelogs" class FolderTimelogs(_BaseTimelogs): folder_id: FolderId @property def _url(self) -> str: return f"/folders/{self.folder_id}/timelogs" class TaskTimelogs(_BaseTimelogs): task_id: TaskId @property def _url(self) -> str: return f"/tasks/{self.task_id}/timelogs" class TimelogCategoryTimelogs(_BaseTimelogs): timelog_category_id: TimelogCategoryId @property def _url(self) -> str: return f"/timelog_categories/{self.timelog_category_id}/timelogs" class TimelogsById(BaseEndpoint): timelog_ids: Sequence[TimelogId] = Field(..., max_length=100) plain_text: Optional[StrictBool] fields_: Optional[Sequence[Literal[TimelogOptionalFields.BILLING_TYPE]]] = Field(None, alias="fields") @property def endpoint_data(self) -> EndpointData: return EndpointData( method="GET", url=f"/timelogs/{','.join(self.timelog_ids)}", query_params=self._query_params, ) @property def _query_params(self) -> WrApiQueryParams: params = WrApiQueryParams() if self.plain_text is not None: params["plainText"] = self._convert_bool(self.plain_text) if self.fields_: params["fields"] = self._convert_seq(self.fields_) return params class _CreateOrModifyTimelog(BaseEndpoint): plain_text: Optional[StrictBool] category_id: Optional[TimelogCategoryId] fields_: Optional[Sequence[Literal[TimelogOptionalFields.BILLING_TYPE]]] = Field(None, alias="fields") @property def endpoint_data(self) -> EndpointData: return EndpointData( method=self._method, url=self._url, body_params=self._body_params, ) @property def _url(self) -> str: raise NotImplementedError() @property def _method(self) -> Methods: raise NotImplementedError() @property def _body_params(self) -> BodyParams: params = {} if self.plain_text is not None: params["plainText"] = self._convert_bool(self.plain_text) if self.category_id is not None: params["categoryId"] = self.category_id if self.fields_: params["fields"] = self._convert_seq(self.fields_) return params class CreateTimelog(_CreateOrModifyTimelog): task_id: TaskId comment: str hours: int tracked_date: date @property def _url(self) -> str: return f"/tasks/{self.task_id}/timelogs" @property def _method(self) -> Methods: return "POST" @property def _body_params(self) -> BodyParams: params = super()._body_params return { **params, **{"comment": self.comment, "hours": str(self.hours), "trackedDate": self.tracked_date.isoformat()}, } class ModifyTimelog(_CreateOrModifyTimelog): timelog_id: TimelogId comment: Optional[str] hours: Optional[int] tracked_date: Optional[date] @property def _url(self) -> str: return f"/timelogs/{self.timelog_id}" @property def _method(self) -> Methods: return "PUT" @property def _body_params(self) -> BodyParams: params = super()._body_params if self.comment is not None: params["comment"] = self.comment if self.hours is not None: params["hours"] = str(self.hours) if self.tracked_date: params["trackedDate"] = self.tracked_date.isoformat() return params class DeleteTimelog(BaseEndpoint): timelog_id: TimelogId @property def endpoint_data(self) -> EndpointData: return EndpointData( method="DELETE", url=f"/timelogs/{self.timelog_id}", )
nilq/baby-python
python
import datetime as _datetime import os import random import string import inflect import six from . import mock_random inflectify = inflect.engine() def _slugify(string): """ This is not as good as a proper slugification function, but the input space is limited >>> _slugify("beets") 'beets' >>> _slugify("Toaster Strudel") 'toaster-strudel' Here's why: It handles very little. It doesn't handle esoteric whitespace or symbols: >>> _slugify("Hat\\nBasket- of justice and some @#*(! symbols") 'hat-basket--of-justice-and-some-@#*(!-symbols' """ return string.replace(" ", "-").replace("\n", "-").replace(".", "").replace(",", "").lower() people = [ "I", "You", "Nobody", "The government", "Everybody", "The illuminati", "God himself", "The President of the United States", "The world", "The United Nations", "The Oakland Raiders", "Your dad", "Your mom", "The band 'Queen'", "Customs & Immigration" ] titles = [ 'captain', 'lieutenant', 'leftenant', 'colonel', 'general', 'major', 'sir', 'sensei', 'lord', 'duke', 'president', 'master', 'mister', 'miss', 'lady', 'queen', 'king', 'doctor', 'monsieur', 'madame', 'senor', 'senorita', 'lord commander', 'commodore', 'emperor', 'super-emperor', 'madam', 'dame', 'professor', 'father', 'brother', 'sister', 'reverend', ] streets = [ 'street', 'boulevard', 'drive', 'block', 'place', 'boardwalk', ] countries = [ 'testonia', 'testasia', 'arztotzka', 'mordor', 'xanth', 'stankonia', 'strongbadia', 'westeros', 'qarth', 'gallifrey', 'tatooine', 'cybertron', 'aiur', 'lordaeron', 'yemen', ] adjectives = [ 'heroic', 'magnificent', 'mighty', 'amazing', 'wonderful', 'fantastic', 'incredible', 'spectacular', 'tremendous', 'throbbing', 'enormous', 'terrific', 'wondrous', 'spectacular', 'big', 'tiny', 'small', 'mighty', 'musky', 'sky', 'transparent', 'opaque', 'light', 'dark', 'sassy', 'scary', 'extraneous', 'huge', 'aqua', 'aqua', 'marine', 'azure', 'beige', 'black', 'almond', 'blue', 'brown', 'chartreuse', 'coral', 'corn', 'flower', 'crimson', 'cyan', 'navy', 'golden', 'rod', 'gray', 'grey', 'green', 'khaki', 'magenta', 'olive', 'salmon', 'slate', 'turquoise', 'violet', 'pink', 'brick', 'white', 'golden', 'honeydew', 'indigo', 'ivory', 'lavender', 'lemon', 'chiffon', 'purple', 'orchid', 'linen', 'rose', 'orange', 'pale', 'sandy', 'sea', 'shell', 'silver', 'tan', 'teal', 'thistle', 'violet', 'plaid', 'polka', 'dot', 'paisley', 'iron', 'bronze', 'stone', 'birch', 'cedar', 'cherry', 'sandal', 'pine', 'fir', 'yew', 'hem', 'lock', 'spruce', 'chest', 'box', 'butter', 'nut', 'camphor', 'elm', 'oak', 'huckle', 'berry', 'wood' 'maple', 'poplar', 'teak', 'beech', 'nutmeg', 'willow', 'cinnamon', 'spice', 'basil', 'cardamom', 'clove', 'garlic', 'juniper', 'rum', 'lime', 'capable', 'heavy', 'fast', 'slow', 'charming', 'noticeable', 'sly', 'slippery', 'sluggish', 'casual', 'cautious', 'cement', 'evil', 'banana', 'good', 'neutral', 'apple', 'pear', 'winter', 'spring', 'fall', 'autumn', 'summer', 'garbage', 'imposing', 'correct', 'iced', 'handed', 'salty', 'coffee', 'cheese', 'floppy', 'popular', 'misty', 'soulful', 'boaty', 'gassy', 'spectacular', 'sleepy', 'laudable', 'comfortable', 'soft', 'dicey', 'memorable', 'patterned', 'greasy', 'elongated', 'long', 'collapsible', 'mysterious', 'expandible', 'delicious', 'edible', 'scattered', 'impenetrable', 'sexy', 'curvaceous', 'avoidable', 'tractable', 'fussy', 'touchable', 'touchy', 'scandalous', 'murky', 'sloshing', 'damp', 'chubby', ] containers = [ 'bucket', 'bale', 'cluster', 'armload', 'group', 'container', 'box', 'bunch', 'bag', 'tub', 'tote', 'wad', ] directions = [ "west", "east", "north", "south", "central", ] city_suffixes = [ "ford", "berg", "shire", "town", "hall", " city", "sound", "ton", ] tlds = [ '.xyz', '.blue', '.org', '.com', '.net', '.link', '.click', '.wedding', '.sexy', '.red', '.black', '.pics' ] nouns = [ 'onion', 'chimp', 'blister', 'poop', 'britches', 'mystery', 'boat' 'bench', 'secret', 'mouse', 'house', 'butt', 'hunter', 'fisher', 'bean', 'harvest', 'mixer', 'hand', 'finger', 'nose', 'eye', 'belly', 'jean', 'plan', 'disk', 'horse', 'staple', 'face', 'arm', 'cheek', 'monkey', 'shin', 'button', 'byte', 'cabinet', 'canyon', 'dance', 'crayon', 'sausage', 'meat', 'wad', 'napkin', 'device', 'cape', 'chair', 'person', 'burger', 'ham', 'place', 'beef', 'kitten', 'puppy', 'book', 'clamp', 'cloud', 'code', 'coast', 'coin', 'concern', 'space', 'key', 'bucket', 'object', 'heart', 'stapler', 'mug', 'bottle', 'cable', 'note', 'lamp', 'shelf', 'blanket', 'dong', 'board', 'issue', 'job', 'knife', 'thing', 'phone', 'sweater', 'pant', 'boot', 'sock', 'socks', 'hat', 'ring', 'dong', 'wang', 'wrap', 'holder', 'pen', 'pencil', 'bag', 'potato', 'sword', 'shield', 'spear', 'staff', 'shaft', 'slab', 'grub', 'song', 'axe', 'boat', 'armour', 'lamp', 'club', 'cage', 'hole', 'ass', 'chump', 'jerk', 'foot', 'spud', ] verbs = [ 'jump', 'twirl', 'spin', 'smell', 'slap', 'smack', 'poke', 'prod', 'drop', 'punch', 'grab', 'throw', 'slide', 'dunk', 'braise', 'scatter', 'slide', 'dice', 'hurl', 'buy', 'toast', 'align', 'sell', 'move', 'shoop', 'trade', 'steal', 'flip', 'blast', 'clean', 'hide', 'pinch', 'grasp', 'palm', 'examine', 'taste', 'ingest', 'swallow', 'snort', 'juggle', 'lift', 'eat', 'quaff', 'chug', 'fear', 'assemble', ] firstnames = [ 'testy', 'carl', 'agatha', 'agnes', 'carol', 'harry', 'maya', 'judy', 'mike', 'albert', 'cornelius', 'tim', 'mary', 'peter', 'kiko', 'wilhelm', 'kimmy', 'steve', 'jennifer', 'frank', 'pierre', 'george', 'aya', 'thiago', 'rodrigo', 'aasif', 'mohammed', 'daniel', 'liam', 'jack', 'agustin', 'santiago', 'noah', 'sofia', 'olivia', 'madison', 'chloe', 'camilla', 'carla', 'gary', 'hiroto', 'rasmus', 'charlie', 'miguel', 'alexander', 'youssef', 'emma', 'sara', 'amelia', 'tiffany', 'arnold', 'ronald', 'hogan', 'doug', 'pete', 'jim', 'james', 'mandy', 'andy', 'cole', 'francis', 'david', 'margaret', 'tracy', 'jonathan', 'daniel', 'heather', 'travis', 'courteney', 'yang', 'vivian', 'ryan', 'phil', 'shana', 'allen', 'karen', 'henry', 'graham', 'jesse', 'shirley', 'rafa', 'dylan', 'javier', 'ashley', 'drew', 'tomas', 'taylor', 'matt', 'shigeru', 'shayla', 'stephanie', 'oliver', 'ron', 'jason', 'seth', 'ronald', 'miloslav', 'walter', ] def slugify_argument(func): """ Wraps a function that returns a string, adding the 'slugify' argument. >>> slugified_fn = slugify_argument(lambda *args, **kwargs: "YOU ARE A NICE LADY") >>> slugified_fn() 'YOU ARE A NICE LADY' >>> slugified_fn(slugify=True) 'you-are-a-nice-lady' """ @six.wraps(func) def wrapped(*args, **kwargs): if "slugify" in kwargs and kwargs['slugify']: return _slugify(func(*args, **kwargs)) else: return func(*args, **kwargs) return wrapped def capitalize_argument(func): """ Wraps a function that returns a string, adding the 'capitalize' argument. >>> capsified_fn = capitalize_argument(lambda *args, **kwargs: "what in the beeswax is this?") >>> capsified_fn() 'what in the beeswax is this?' >>> capsified_fn(capitalize=True) 'What In The Beeswax Is This?' """ @six.wraps(func) def wrapped(*args, **kwargs): if "capitalize" in kwargs and kwargs['capitalize']: return func(*args, **kwargs).title() else: return func(*args, **kwargs) return wrapped def datetime(past=True, random=random): """ Returns a random datetime from the past... or the future! >>> mock_random.seed(0) >>> datetime(random=mock_random).isoformat() '1950-02-03T03:04:05' >>> datetime(random=mock_random, past=False).isoformat() '2023-08-09T09:00:01' """ def year(): if past: return random.choice(range(1950,2005)) else: return _datetime.datetime.now().year + random.choice(range(1, 50)) def month(): return random.choice(range(1,12)) def day(): return random.choice(range(1,31)) def hour(): return random.choice(range(0,23)) def minute(): return random.choice(range(0,59)) def second(): return random.choice(range(0,59)) try: return _datetime.datetime(year=year(), month=month(), day=day(), hour=hour(), minute=minute(), second=second()) except ValueError: return datetime(past=past) @capitalize_argument def letter(random=random, *args, **kwargs): """ Return a letter! >>> mock_random.seed(0) >>> letter(random=mock_random) 'a' >>> letter(random=mock_random) 'b' >>> letter(random=mock_random, capitalize=True) 'C' """ return random.choice(string.ascii_lowercase) def number(random=random, *args, **kwargs): """ Return a number! >>> number(random=mock_random) 0 """ return random.randint(0,9) @slugify_argument @capitalize_argument def title(random=random, *args, **kwargs): """ Return a title! >>> mock_random.seed(0) >>> title(random=mock_random) 'captain' >>> title(random=mock_random, capitalize=True) 'Lieutenant' >>> title(random=mock_random, slugify=True) 'leftenant' """ return random.choice(titles) @slugify_argument @capitalize_argument def adjective(random=random, *args, **kwargs): """ Return an adjective! >>> mock_random.seed(0) >>> adjective(random=mock_random) 'heroic' >>> adjective(random=mock_random, capitalize=True) 'Magnificent' >>> adjective(random=mock_random, slugify=True) 'mighty' """ return random.choice(adjectives) @slugify_argument @capitalize_argument def noun(random=random, *args, **kwargs): """ Return a noun! >>> mock_random.seed(0) >>> noun(random=mock_random) 'onion' >>> noun(random=mock_random, capitalize=True) 'Chimp' >>> noun(random=mock_random, slugify=True) 'blister' """ return random.choice(nouns) @slugify_argument @capitalize_argument def a_noun(random=random, *args, **kwargs): """ Return a noun, but with an 'a' in front of it. Or an 'an', depending! >>> mock_random.seed(0) >>> a_noun(random=mock_random) 'an onion' >>> a_noun(random=mock_random, capitalize=True) 'A Chimp' >>> a_noun(random=mock_random, slugify=True) 'a-blister' """ return inflectify.a(noun(random=random)) @slugify_argument @capitalize_argument def plural(random=random, *args, **kwargs): """ Return a plural noun. >>> mock_random.seed(0) >>> plural(random=mock_random) 'onions' >>> plural(random=mock_random, capitalize=True) 'Chimps' >>> plural(random=mock_random, slugify=True) 'blisters' """ return inflectify.plural(random.choice(nouns)) @slugify_argument @capitalize_argument def verb(random=random, *args, **kwargs): """ Return a verb! >>> mock_random.seed(0) >>> verb(random=mock_random) 'jump' >>> verb(random=mock_random, capitalize=True) 'Twirl' >>> verb(random=mock_random, slugify=True) 'spin' """ return random.choice(verbs) @slugify_argument @capitalize_argument def firstname(random=random, *args, **kwargs): """ Return a first name! >>> mock_random.seed(0) >>> firstname(random=mock_random) 'testy' >>> firstname(random=mock_random, capitalize=True) 'Carl' >>> firstname(random=mock_random, slugify=True) 'agatha' """ return random.choice(firstnames) @slugify_argument @capitalize_argument def lastname(random=random, *args, **kwargs): """ Return a first name! >>> mock_random.seed(0) >>> lastname(random=mock_random) 'chimp' >>> mock_random.seed(1) >>> lastname(random=mock_random, capitalize=True) 'Wonderful' >>> mock_random.seed(2) >>> lastname(random=mock_random, slugify=True) 'poopbritches' >>> [lastname(random=mock_random) for x in range(0,10)] ['wonderful', 'chimp', 'onionmighty', 'magnificentslap', 'smellmouse', 'secretbale', 'boatbenchtwirl', 'spectacularmice', 'incrediblebritches', 'poopbritches'] """ types = [ "{noun}", "{adjective}", "{noun}{second_noun}", "{adjective}{noun}", "{adjective}{plural}", "{noun}{verb}", "{noun}{container}", "{verb}{noun}", "{adjective}{verb}", "{noun}{adjective}", "{noun}{firstname}", "{noun}{title}", "{adjective}{title}", "{adjective}-{noun}", "{adjective}-{plural}" ] return random.choice(types).format(noun=noun(random=random), second_noun=noun(random=random), adjective=adjective(random=random), plural=plural(random=random), container=container(random=random), verb=verb(random=random), firstname=firstname(random=random), title=title(random=random)) @slugify_argument @capitalize_argument def container(random=random, *args, **kwargs): """ Return a container! >>> mock_random.seed(0) >>> container(random=mock_random) 'bucket' >>> container(random=mock_random, capitalize=True) 'Bale' >>> container(random=mock_random, slugify=True) 'cluster' """ return random.choice(containers) @slugify_argument @capitalize_argument def numberwang(random=random, *args, **kwargs): """ Return a number that is spelled out. >>> numberwang(random=mock_random) 'two' >>> numberwang(random=mock_random, capitalize=True) 'Two' >>> numberwang(random=mock_random, slugify=True) 'two' """ n = random.randint(2, 150) return inflectify.number_to_words(n) @slugify_argument @capitalize_argument def direction(random=random, *args, **kwargs): """ Return a direction! >>> mock_random.seed(0) >>> direction(random=mock_random) 'west' >>> direction(random=mock_random, capitalize=True) 'East' >>> direction(random=mock_random, slugify=True) 'north' """ return random.choice(directions) @slugify_argument @capitalize_argument def city_suffix(random=random, *args, **kwargs): """ Return a city suffix, like 'berg' or 'hall'. >>> mock_random.seed(0) >>> city_suffix(random=mock_random) 'ford' >>> city_suffix(random=mock_random, capitalize=True) 'Berg' >>> city_suffix(random=mock_random, slugify=True) 'shire' """ return random.choice(city_suffixes) @slugify_argument @capitalize_argument def tld(random=random, *args, **kwargs): """ Return a direction! >>> mock_random.seed(0) >>> tld(random=mock_random) '.xyz' >>> tld(random=mock_random, capitalize=True) '.Blue' >>> tld(random=mock_random, slugify=True) 'org' """ return random.choice(tlds) @slugify_argument @capitalize_argument def thing(random=random, *args, **kwargs): """ Return a ... thing. >>> mock_random.seed(0) >>> thing(random=mock_random) 'two secrets' >>> mock_random.seed(1) >>> thing(random=mock_random, capitalize=True) 'Mighty Poop' >>> mock_random.seed(2) >>> thing(random=mock_random, slugify=True) 'poop' >>> mock_random.seed(4) >>> thing(random=mock_random, slugify=True) 'two-chimps' """ def noun_or_adjective_noun(): if random.choice([True, False]): return noun(random=random) else: return adjective(random=random) + " " + noun(random=random) def plural_or_adjective_plural(): if random.choice([True, False]): return plural(random=random) else: return adjective(random=random) + " " + plural(random=random) def container_of_nouns(): return container(random=random) + " of " + plural_or_adjective_plural() def number_of_plurals(): return numberwang(random=random) + " " + plural_or_adjective_plural() if "an" in kwargs and kwargs['an']: return random.choice([ inflectify.a(noun_or_adjective_noun()), inflectify.a(container_of_nouns()), number_of_plurals(), ]) else: return random.choice([ noun_or_adjective_noun(), container_of_nouns(), number_of_plurals(), ]) @slugify_argument def a_thing(random=random, *args, **kwargs): """ Return a ... thing. >>> mock_random.seed(0) >>> a_thing(random=mock_random) 'two secrets' >>> mock_random.seed(1) >>> a_thing(random=mock_random, capitalize=True) 'A Mighty Poop' >>> mock_random.seed(2) >>> a_thing(random=mock_random, slugify=True) 'a-poop' >>> mock_random.seed(4) >>> a_thing(random=mock_random, slugify=True) 'two-chimps' """ return thing(random=random, an=True, *args, **kwargs) @slugify_argument @capitalize_argument def things(random=random, *args, **kwargs): """ Return a set of things. >>> mock_random.seed(0) >>> things(random=mock_random) 'two secrets, two secrets, and two secrets' >>> mock_random.seed(1) >>> things(random=mock_random, capitalize=True) 'A Mighty Poop, A Mighty Poop, And A Mighty Poop' """ return inflectify.join([a_thing(random=random), a_thing(random=random), a_thing(random=random)]) @slugify_argument @capitalize_argument def name(random=random, *args, **kwargs): """ Return someone's name >>> mock_random.seed(0) >>> name(random=mock_random) 'carl poopbritches' >>> mock_random.seed(7) >>> name(random=mock_random, capitalize=True) 'Duke Testy Wonderful' """ if random.choice([True, True, True, False]): return firstname(random=random) + " " + lastname(random=random) elif random.choice([True, False]): return title(random=random) + " " + firstname(random=random) + " " + lastname(random=random) else: return title(random=random) + " " + lastname(random=random) @slugify_argument @capitalize_argument def domain(random=random, *args, **kwargs): """ Return a domain >>> mock_random.seed(0) >>> domain(random=mock_random) 'onion.net' >>> domain(random=mock_random) 'bag-of-heroic-chimps.sexy' """ words = random.choice([ noun(random=random), thing(random=random), adjective(random=random)+noun(random=random), ]) return _slugify(words)+tld(random=random) def email(random=random, *args, **kwargs): """ Return an e-mail address >>> mock_random.seed(0) >>> email(random=mock_random) 'onion@bag-of-heroic-chimps.sexy' >>> email(random=mock_random) 'agatha-incrediblebritches+spam@amazingbritches.click' >>> email(random=mock_random, name="charles") 'charles@secret.xyz' """ if 'name' in kwargs and kwargs['name']: words = kwargs['name'] else: words = random.choice([ noun(random=random), name(random=random), name(random=random)+"+spam", ]) return _slugify(words)+"@"+domain(random=random) def phone_number(random=random, *args, **kwargs): """ Return a phone number >>> mock_random.seed(0) >>> phone_number(random=mock_random) '555-0000' >>> phone_number(random=mock_random) '1-604-555-0000' >>> phone_number(random=mock_random) '864-70-555-0000' """ return random.choice([ '555-{number}{other_number}{number}{other_number}', '1-604-555-{number}{other_number}{number}{other_number}', '864-70-555-{number}{other_number}{number}{other_number}', '867-5309' ]).format(number=number(random=random), other_number=number(random=random)) @slugify_argument @capitalize_argument def sentence(random=random, *args, **kwargs): """ Return a whole sentence >>> mock_random.seed(0) >>> sentence(random=mock_random) "Agatha Incrediblebritches can't wait to smell two chimps in Boatbencheston." >>> mock_random.seed(2) >>> sentence(random=mock_random, slugify=True) 'blistersecret-studios-is-the-best-company-in-liveronion' """ if 'name' in kwargs and kwargs['name']: nm = kwargs(name) elif random.choice([True, False, False]): nm = name(capitalize=True, random=random) else: nm = random.choice(people) def type_one(): return "{name} will {verb} {thing}.".format(name=nm, verb=verb(random=random), thing=random.choice([a_thing(random=random), things(random=random)])) def type_two(): return "{city} is in {country}.".format(city=city(capitalize=True, random=random), country=country(capitalize=True, random=random)) def type_three(): return "{name} can't wait to {verb} {thing} in {city}.".format(name=nm, verb=verb(random=random), thing=a_thing(random=random), city=city(capitalize=True, random=random)) def type_four(): return "{name} will head to {company} to buy {thing}.".format(name=nm, company=company(capitalize=True, random=random), thing=a_thing(random=random)) def type_five(): return "{company} is the best company in {city}.".format(city=city(capitalize=True, random=random), company=company(capitalize=True, random=random)) def type_six(): return "To get to {country}, you need to go to {city}, then drive {direction}.".format( country=country(capitalize=True, random=random), city=city(capitalize=True, random=random), direction=direction(random=random)) def type_seven(): return "{name} needs {thing}, badly.".format(name=nm, thing=a_thing(random=random)) def type_eight(): return "{verb} {noun}!".format(verb=verb(capitalize=True, random=random), noun=noun(random=random)) return random.choice([type_one, type_two, type_three, type_four, type_five, type_six, type_seven, type_eight])() @slugify_argument @capitalize_argument def paragraph(random=random, length=10, *args, **kwargs): """ Produces a paragraph of text. >>> mock_random.seed(0) >>> paragraph(random=mock_random, length=2) "Agatha Incrediblebritches can't wait to smell two chimps in Boatbencheston. Wonderfulsecretsound is in Gallifrey." >>> mock_random.seed(2) >>> paragraph(random=mock_random, length=2, slugify=True) 'blistersecret-studios-is-the-best-company-in-liveronion-wonderfulsecretsound-is-in-gallifrey' """ return " ".join([sentence(random=random) for x in range(0, length)]) def markdown(random=random, length=10, *args, **kwargs): """ Produces a bunch of markdown text. >>> mock_random.seed(0) >>> markdown(random=mock_random, length=2) 'Nobody will **head** _to_ Mystery Studies Department **to** _buy_ a mighty poop.\\nNobody will **head** _to_ Mystery Studies Department **to** _buy_ a mighty poop.' """ def title_sentence(): return "\n" + "#"*random.randint(1,5) + " " + sentence(capitalize=True, random=random) def embellish(word): return random.choice([word, word, word, "**"+word+"**", "_"+word+"_"]) def randomly_markdownify(string): return " ".join([embellish(word) for word in string.split(" ")]) sentences = [] for i in range(0, length): sentences.append(random.choice([ title_sentence(), sentence(random=random), sentence(random=random), randomly_markdownify(sentence(random=random)) ])) return "\n".join(sentences) @slugify_argument @capitalize_argument def gender(random=random, *args, **kwargs): return "Awesome" @slugify_argument @capitalize_argument def company(random=random, *args, **kwargs): """ Produce a company name >>> mock_random.seed(0) >>> company(random=mock_random) 'faculty of applied chimp' >>> mock_random.seed(1) >>> company(random=mock_random) 'blistersecret studios' >>> mock_random.seed(2) >>> company(random=mock_random) 'pooppooppoop studios' >>> mock_random.seed(3) >>> company(random=mock_random) 'britchesshop' >>> mock_random.seed(4) >>> company(random=mock_random, capitalize=True) 'Mystery Studies Department' >>> mock_random.seed(5) >>> company(random=mock_random, slugify=True) 'the-law-offices-of-magnificentslap-boatbench-and-smellmouse' """ return random.choice([ "faculty of applied {noun}", "{noun}{second_noun} studios", "{noun}{noun}{noun} studios", "{noun}shop", "{noun} studies department", "the law offices of {lastname}, {noun}, and {other_lastname}", "{country} ministry of {plural}", "{city} municipal {noun} department", "{city} plumbing", "department of {noun} studies", "{noun} management systems", "{plural} r us", "inter{verb}", "the {noun} warehouse", "integrated {noun} and {second_noun}", "the {noun} and {second_noun} pub", "e-cyber{verb}", "{adjective}soft", "{domain} Inc.", "{thing} incorporated", "{noun}co", ]).format(noun=noun(random=random), plural=plural(random=random), country=country(random=random), city=city(random=random), adjective=adjective(random=random), lastname=lastname(random=random), other_lastname=lastname(random=random), domain=domain(random=random), second_noun=noun(random=random), verb=verb(random=random), thing=thing(random=random)) @slugify_argument @capitalize_argument def country(random=random, *args, **kwargs): """ Produce a country name >>> mock_random.seed(0) >>> country(random=mock_random) 'testasia' >>> country(random=mock_random, capitalize=True) 'West Xanth' >>> country(random=mock_random, slugify=True) 'westeros' """ return random.choice([ "{country}", "{direction} {country}" ]).format(country=random.choice(countries), direction=direction(random=random)) @slugify_argument @capitalize_argument def city(random=random, *args, **kwargs): """ Produce a city name >>> mock_random.seed(0) >>> city(random=mock_random) 'east mysteryhall' >>> city(random=mock_random, capitalize=True) 'Birmingchimp' >>> city(random=mock_random, slugify=True) 'wonderfulsecretsound' """ return random.choice([ "{direction} {noun}{city_suffix}", "{noun}{city_suffix}", "{adjective}{noun}{city_suffix}", "{plural}{city_suffix}", "{adjective}{city_suffix}", "liver{noun}", "birming{noun}", "{noun}{city_suffix} {direction}" ]).format(direction=direction(random=random), adjective=adjective(random=random), plural=plural(random=random), city_suffix=city_suffix(random=random), noun=noun(random=random)) @slugify_argument @capitalize_argument def postal_code(random=random, *args, **kwargs): """ Produce something that vaguely resembles a postal code >>> mock_random.seed(0) >>> postal_code(random=mock_random) 'b0b 0c0' >>> postal_code(random=mock_random, capitalize=True) 'E0E 0F0' >>> postal_code(random=mock_random, slugify=True) 'h0h-0i0' """ return random.choice([ "{letter}{number}{letter} {other_number}{other_letter}{other_number}", "{number}{other_number}{number}{number}{other_number}", "{number}{letter}{number}{other_number}{other_letter}" ]).format( number=number(random=random), other_number=number(random=random), letter=letter(random=random), other_letter=letter(random=random) ) @slugify_argument @capitalize_argument def street(random=random, *args, **kwargs): """ Produce something that sounds like a street name >>> mock_random.seed(0) >>> street(random=mock_random) 'chimp place' >>> street(random=mock_random, capitalize=True) 'Boatbench Block' >>> mock_random.seed(3) >>> street(random=mock_random, slugify=True) 'central-britches-boulevard' """ return random.choice([ "{noun} {street_type}", "{adjective}{verb} {street_type}", "{direction} {adjective}{verb} {street_type}", "{direction} {noun} {street_type}", "{direction} {lastname} {street_type}", ]).format(noun=noun(random=random), lastname=lastname(random=random), direction=direction(random=random), adjective=adjective(random=random), verb=verb(random=random), street_type=random.choice(streets)) @slugify_argument @capitalize_argument def address(random=random, *args, **kwargs): """ A street name plus a number! >>> mock_random.seed(0) >>> address(random=mock_random) '0000 amazingslap boardwalk' >>> address(random=mock_random, capitalize=True) '0000 South Throbbingjump Boulevard' >>> address(random=mock_random, slugify=True) 'two-central-britches-boulevard' """ return random.choice([ "{number}{other_number}{number}{other_number} {street}", "{number}{other_number} {street}", "{numberwang} {street}", "apt {numberwang}, {number}{other_number}{other_number} {street}", "apt {number}{other_number}{number}, {numberwang} {street}", "po box {number}{other_number}{number}{other_number}", ]).format(number=number(random=random), other_number=number(random=random), numberwang=numberwang(random=random), street=street(random=random)) def image(random=random, width=800, height=600, https=False, *args, **kwargs): """ Generate the address of a placeholder image. >>> mock_random.seed(0) >>> image(random=mock_random) 'http://dummyimage.com/800x600/292929/e3e3e3&text=mighty poop' >>> image(random=mock_random, width=60, height=60) 'http://placekitten.com/60/60' >>> image(random=mock_random, width=1920, height=1080) 'http://dummyimage.com/1920x1080/292929/e3e3e3&text=To get to Westeros, you need to go to Britchestown, then drive west.' >>> image(random=mock_random, https=True, width=1920, height=1080) 'https://dummyimage.com/1920x1080/292929/e3e3e3&text=East Mysteryhall is in Westeros.' """ target_fn = noun if width+height > 300: target_fn = thing if width+height > 2000: target_fn = sentence s = "" if https: s = "s" if random.choice([True, False]): return "http{s}://dummyimage.com/{width}x{height}/292929/e3e3e3&text={text}".format( s=s, width=width, height=height, text=target_fn(random=random)) else: return "http{s}://placekitten.com/{width}/{height}".format(s=s, width=width, height=height)
nilq/baby-python
python
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. from twisted.internet import reactor from twisted.web import proxy, server site = server.Site(proxy.ReverseProxyResource('www.yahoo.com', 80, '')) reactor.listenTCP(8080, site) reactor.run()
nilq/baby-python
python
from torch.optim.lr_scheduler import LambdaLR def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def jaccard(str1, str2): a = set(str1.lower().split()) b = set(str2.lower().split()) c = a.intersection(b) return float(len(c)) / (len(a) + len(b) - len(c)) class AverageMeter: """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def calculate_jaccard_score( original_tweet, target_string, sentiment_val, idx_start, idx_end, offsets, verbose=False): if idx_end < idx_start: idx_end = idx_start filtered_output = "" for ix in range(idx_start, idx_end + 1): filtered_output += original_tweet[offsets[ix][0]: offsets[ix][1]] if (ix+1) < len(offsets) and offsets[ix][1] < offsets[ix+1][0]: filtered_output += " " if sentiment_val == "neutral" or len(original_tweet.split()) < 2: filtered_output = original_tweet jac1 = utils.jaccard(target_string.strip(), filtered_output.strip()) st1 = filtered_output if idx_end < idx_start: idx_start = idx_end filtered_output = "" for ix in range(idx_start, idx_end + 1): filtered_output += original_tweet[offsets[ix][0]: offsets[ix][1]] if (ix+1) < len(offsets) and offsets[ix][1] < offsets[ix+1][0]: filtered_output += " " if sentiment_val == "neutral" or len(original_tweet.split()) < 2: filtered_output = original_tweet jac2 = utils.jaccard(target_string.strip(), filtered_output.strip()) st2 = filtered_output if jac1 > jac2: jac = jac1 filtered_output = st1 else: jac = jac2 filtered_output = st2 return jac, filtered_output
nilq/baby-python
python
# Copyright (c) 2019-2021, Jonas Eschle, Jim Pivarski, Eduardo Rodrigues, and Henry Schreiner. # # Distributed under the 3-clause BSD license, see accompanying file LICENSE # or https://github.com/scikit-hep/vector for details. import pytest import vector ak = pytest.importorskip("awkward") numba = pytest.importorskip("numba") pytest.importorskip("vector._backends.numba_object") @pytest.mark.numba def test(): @numba.njit def extract(x): return x[2][0] array = vector.Array([[{"x": 1, "y": 2}], [], [{"x": 3, "y": 4}, {"x": 5, "y": 6}]]) out = extract(array) assert isinstance(out, vector._backends.object_.VectorObject2D) assert out.x == pytest.approx(3) assert out.y == pytest.approx(4) array = vector.Array( [[{"x": 1, "y": 2, "z": 3, "E": 4}], [], [{"x": 5, "y": 6, "z": 7, "E": 15}]] ) out = extract(array) assert isinstance(out, vector._backends.object_.MomentumObject4D) assert out.x == pytest.approx(5) assert out.y == pytest.approx(6) assert out.z == pytest.approx(7) assert out.t == pytest.approx(15)
nilq/baby-python
python
import socket #for sockets import sys #for exit # create dgram udp socket try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) except socket.error: print('Failed to create socket') sys.exit() HOST = '' # Symbolic name meaning all available interfaces PORT = 6000 # Arbitrary non-privileged port s.bind((HOST, PORT)) #while(1) : #msg = input('Enter message to send : ') try : #Set the whole string msg = b'\xc0\xa8\x01\x0fHDLMIRACLE\xaa\xaa\x0f\x01\x17\x00\x95\x001\x01J\x01d\x00\x03\xd7\xd1' s.sendto(msg, (HOST, PORT)) print(msg) # receive data from client (data, addr) #d = s.recvfrom(1024) #reply = d[0] #addr = d[1] #print('Server reply : ' + reply) except socket.error as msg: print('Error Code : ' + str(msg[0]) + ' Message ' + msg[1]) sys.exit()
nilq/baby-python
python
# Copyright (c) 2018 Sony Pictures Imageworks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from socket import gethostname import Utils from Manifest import QtCore, QtWidgets, opencue, os class LocalBookingWidget(QtWidgets.QWidget): """ A widget for creating opencue RenderParitions, otherwise know as local core booking. """ hosts_changed = QtCore.Signal() def __init__(self, target, parent=None): QtWidgets.QWidget.__init__(self, parent) # Can either be a opencue job, layer, or frame. self.__target = target self.__parent = parent self.jobName = self.getTargetJobName() QtWidgets.QVBoxLayout(self) layout = QtWidgets.QGridLayout() self.__select_host = QtWidgets.QComboBox(self) self.__lba_group = QtWidgets.QGroupBox("Settings", self) try: owner = opencue.api.getOwner(os.environ["USER"]) for host in owner.getHosts(): if host.data.lockState != opencue.api.host_pb2.OPEN: self.__select_host.addItem(host.data.name) except Exception, e: pass self.__deed_button = None self.__msg_widget = None if self.__select_host.count() == 0: self.__deed_button = QtWidgets.QPushButton("Deed This Machine", self) msg = "You have not deeded any hosts or they are not NIMBY locked." self.__msg_widget = QtWidgets.QLabel(msg, self) self.layout().addWidget(self.__msg_widget) self.layout().addWidget(self.__deed_button) self.__deed_button.pressed.connect(self.deedLocalhost) self.__lba_group.setDisabled(True) self.__text_target = QtWidgets.QLabel(self.__target.data.name, self) self.__num_threads = QtWidgets.QSpinBox(self) self.__num_threads.setValue(1); self.__num_cores = QtWidgets.QLineEdit(self) self.__num_cores.setText("1"); self.__num_cores.setReadOnly(True) self.__num_frames = QtWidgets.QSpinBox(self) self.__num_frames.setValue(1) self.__frame_warn = QtWidgets.QLabel(self) self.__num_mem = QtWidgets.QSlider(self) self.__num_mem.setValue(4); self.__num_mem.setOrientation(QtCore.Qt.Horizontal) self.__num_mem.setTickPosition(QtWidgets.QSlider.TicksBelow) self.__num_mem.setTickInterval(1) self.__text_num_mem = QtWidgets.QSpinBox(self) self.__text_num_mem.setValue(4) self.__text_num_mem.setSuffix("GB") # # Next layout is if the deed is in use. # layout2 = QtWidgets.QGridLayout() self.__run_group = QtWidgets.QGroupBox("Deed Currently in Use", self) self.__run_cores = QtWidgets.QSpinBox(self) self.__run_mem = QtWidgets.QSlider(self) self.__run_mem.setValue(4) self.__run_mem.setOrientation(QtCore.Qt.Horizontal) self.__run_mem.setTickPosition(QtWidgets.QSlider.TicksBelow) self.__run_mem.setTickInterval(1) self.__text_run_mem = QtWidgets.QSpinBox(self) self.__text_run_mem.setValue(4) self.__text_run_mem.setSuffix("GB") self.__btn_clear = QtWidgets.QPushButton("Clear", self) # # Setup the signals. # self.__btn_clear.pressed.connect(self.clearCurrentHost) self.__select_host.activated.connect(self.__host_changed) self.__num_mem.valueChanged.connect(self.__text_num_mem.setValue) self.__text_num_mem.valueChanged.connect(self.__num_mem.setValue) self.__num_threads.valueChanged.connect(self.__calculateCores) self.__num_frames.valueChanged.connect(self.__calculateCores) self.__run_mem.valueChanged.connect(self.__text_run_mem.setValue) self.__text_run_mem.valueChanged.connect(self.__run_mem.setValue) self.layout().addWidget(QtWidgets.QLabel("Target Host:")) self.layout().addWidget(self.__select_host) layout.addWidget(QtWidgets.QLabel("Target:"), 1, 0) layout.addWidget(self.__text_target, 1, 1, 1, 3) layout.addWidget(QtWidgets.QLabel("Parallel Frames:"), 2, 0) layout.addWidget(self.__num_frames, 2, 1) layout.addWidget(QtWidgets.QLabel("Threads: "), 2, 2) layout.addWidget(self.__num_threads, 2, 3) layout.addWidget(QtWidgets.QLabel("Cores: "), 3, 0) layout.addWidget(self.__num_cores, 3, 1) layout.addWidget(self.__frame_warn, 3, 2, 1, 2) layout.addWidget(QtWidgets.QLabel("Memory (GB): "), 4, 0) layout.addWidget(self.__num_mem, 4, 1, 1, 2) layout.addWidget(self.__text_num_mem, 4, 3) # # Layout 2 # layout2.addWidget(QtWidgets.QLabel("Running Cores:"), 1, 0) layout2.addWidget(self.__run_cores, 1, 1) layout2.addWidget(QtWidgets.QLabel("Memory (GB): "), 3, 0) layout2.addWidget(self.__run_mem, 3, 1, 1, 2) layout2.addWidget(self.__text_run_mem, 3, 3) layout2.addWidget(self.__btn_clear, 4, 0) # # Set up overall layouts # self.__run_group.setLayout(layout2) self.__lba_group.setLayout(layout) self.__stack = QtWidgets.QStackedLayout() self.__stack.addWidget(self.__lba_group) self.__stack.addWidget(self.__run_group) self.layout().addLayout(self.__stack) ## Set initial values. self.__host_changed(self.__select_host.currentText()) self.resize(400, 400) def getTargetJobName(self): if Utils.isJob(self.__target): return self.__target.data.name elif Utils.isLayer(self.__target): return self.__target.name elif Utils.isFrame(self.__target): return self.__parent.getJob().data.name else: return '' def hostAvailable(self): return self.__select_host.count() > 0 def __host_changed(self, hostname): hostname = str(hostname) if not hostname: return host = opencue.api.findHost(str(hostname)) try: rp = [r for r in host.getRenderPartitions() if r.job == self.jobName] if rp: rp = rp[0] self.__stack.setCurrentIndex(1) self.__btn_clear.setText("Clear") self.__btn_clear.setDisabled(False) self.__run_cores.setRange(1, int(host.data.idleCores) + rp.maxCores / 100) self.__run_cores.setValue(rp.maxCores / 100) self.__run_mem.setRange(1, int(host.data.totalMemory / 1024 / 1024)) self.__run_mem.setValue(int(rp.maxMemory / 1024 / 1024)) else: self.__stack.setCurrentIndex(0) self.__num_frames.setRange(1, host.data.idleCores) self.__num_threads.setRange(1, host.data.idleCores) self.__num_mem.setRange(1, int(host.data.totalMemory / 1024 / 1024)) self.__num_threads.setRange(1, host.data.idleCores) except Exception, e: print "Failed to get RenderParition information, %s" % e def deedLocalhost(self): show_name = os.environ.get("SHOW", "pipe") try: _show = opencue.api.findShow(show_name) except Exception, e: msg = QtWidgets.QMessageBox(self) msg.setText("Error %s, please setshot and rerun cuetopia", e) msg.exec_() return user = os.environ["USER"] try: owner = opencue.api.getOwner(user) except opencue.EntityNotFoundException, e: # Owner does not exist owner = _show.createOwner(user) hostname = gethostname() try: host = opencue.api.findHost(hostname.rsplit(".",2)[0]) owner.takeOwnership(host.data.name) self.__select_host.addItem(host.data.name) self.__lba_group.setDisabled(False) if self.__deed_button: self.__deed_button.setVisible(False) if self.__msg_widget: self.__msg_widget.setVisible(False) self.__deed_button = None self.__msg_widget = None self.hosts_changed.emit() except Exception, e: msg = QtWidgets.QMessageBox(self) msg.setText("Unable to determine your machine's hostname. " + "It is not setup properly for local booking") msg.exec_() def __calculateCores(self, ignore): frames = self.__num_frames.value() threads = self.__num_threads.value() self.__num_cores.setText(str(frames * threads)) if self.__hasError(): self.__frame_warn.setText("Invalid thread ratio") else: self.__frame_warn.setText("") def __hasError(self): cores = int(self.__num_cores.text()) frames = self.__num_frames.value() threads = self.__num_threads.value() if frames * threads > self.__num_frames.maximum(): return True elif frames == 0: return True elif cores % threads > 0: return True elif threads > cores: return True return False def clearCurrentHost(self): hostname = str(self.__select_host.currentText()) if not hostname: return try: self.__btn_clear.setText("Clearing....") self.__btn_clear.setDisabled(True) host = opencue.api.findHost(str(hostname)) rp = [r for r in host.getRenderPartitions() if r.job == self.jobName] if rp: rp = rp[0] rp.delete() ## Wait for hosts to clear out, then switch ## back to the booking widget for i in range(0, 10): try: rp = [r for r in host.getRenderPartitions() if r.job == self.jobName][0] time.sleep(1) except: break self.__host_changed(hostname) except Exception,e: print "Error clearing host: %s" % e def bookCurrentHost(self): if self.__hasError(): return host = opencue.api.findHost(str(self.__select_host.currentText())) rp = [r for r in host.getRenderPartitions() if r.job == self.jobName] if rp: # A render partition already exists on this hosts and user is modifying rp[0].setMaxResources(int(self.__run_cores.value() * 100), int(self.__run_mem.value()) * 1024 * 1024, 0) else: self.__target.addRenderPartition(str(self.__select_host.currentText()), int(self.__num_threads.value()), int(self.__num_cores.text()), int(self.__num_mem.value() * 1048576), 0) class LocalBookingDialog(QtWidgets.QDialog): """ A dialog to wrap a LocalBookingWidget. Provides action buttons. """ def __init__(self, target, parent=None): QtWidgets.QDialog.__init__(self, parent) QtWidgets.QVBoxLayout(self) btn_layout = QtWidgets.QHBoxLayout() self.setWindowTitle("Assign Local Cores") self.__booking = LocalBookingWidget(target, parent) self.__btn_ok = QtWidgets.QPushButton("Ok") self.__btn_cancel = QtWidgets.QPushButton("Cancel") self.__updateOkButtion() btn_layout.addStretch() btn_layout.addWidget(self.__btn_ok) btn_layout.addWidget(self.__btn_cancel) self.layout().addWidget(self.__booking) self.layout().addLayout(btn_layout) self.__booking.hosts_changed.connect(self.__updateOkButtion) self.__btn_ok.pressed.connect(self.doLocalBooking) self.__btn_cancel.pressed.connect(self.close) def __updateOkButtion(self): self.__btn_ok.setDisabled(not self.__booking.hostAvailable()) def doLocalBooking(self): try: self.__booking.bookCurrentHost() self.close() except Exception, e: msg = QtWidgets.QMessageBox(self) msg.setText("Failed to book local cores. \ There were no pending frames that met your criteria. Be sure to double check \ if your allocating enough memory and that your job has waiting frames.") msg.setDetailedText(str(e)) msg.exec_()
nilq/baby-python
python
#!/usr/bin/env python3 # # Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center # Distributed under the terms of the 3-clause BSD License. import time import unittest from ipykernel.tests.utils import execute, wait_for_idle from sos_notebook.test_utils import flush_channels, sos_kernel, NotebookTest from selenium.webdriver.common.keys import Keys class TestFrontEnd(NotebookTest): def test_toggle_console(self, notebook): time.sleep(2) assert notebook.is_console_panel_open() notebook.toggle_console_panel() time.sleep(2) assert not notebook.is_console_panel_open() notebook.toggle_console_panel() time.sleep(2) assert notebook.is_console_panel_open() def test_run_in_console(self, notebook): idx = notebook.call("print(1)", kernel="SoS") notebook.execute_cell(idx, in_console=True) # the latest history cell assert "1" == notebook.get_cell_output(-1, in_console=True) # if the cell is non-SoS, the console should also change kernel idx = notebook.call("cat(123)", kernel="R") notebook.execute_cell(idx, in_console=True) # the latest history cell assert "123" == notebook.get_cell_output(-1, in_console=True) idx = notebook.call("print(12345)", kernel="SoS") notebook.execute_cell(idx, in_console=True) # the latest history cell assert "12345" == notebook.get_cell_output(-1, in_console=True) def test_run_directly_in_console(self, notebook): notebook.edit_prompt_cell('print("haha")', kernel='SoS', execute=True) assert "haha" == notebook.get_cell_output(-1, in_console=True) notebook.edit_prompt_cell('cat("haha2")', kernel="R", execute=True) assert "haha2" == notebook.get_cell_output(-1, in_console=True) def test_history_in_console(self, notebook): notebook.edit_prompt_cell("a = 1", execute=True) assert "" == notebook.get_prompt_content() notebook.edit_prompt_cell("b <- 2", kernel="R", execute=True) assert "" == notebook.get_prompt_content() notebook.prompt_cell.send_keys(Keys.UP) assert "b <- 2" == notebook.get_prompt_content() notebook.prompt_cell.send_keys(Keys.UP) assert "a = 1" == notebook.get_prompt_content() # FIXME: down keys does not work, perhaps because the cell is not focused and # the first step would be jumping to the end of the line notebook.prompt_cell.send_keys(Keys.DOWN) notebook.prompt_cell.send_keys(Keys.DOWN) # assert 'b <- 2' == notebook.get_prompt_content() def test_clear_history(self, notebook): notebook.edit_prompt_cell("a = 1", execute=True) notebook.edit_prompt_cell("b <- 2", kernel="R", execute=True) # use "clear" to clear all panel cells notebook.edit_prompt_cell("clear", kernel="SoS", execute=False) # we cannot wait for the completion of the cell because the cells # will be cleared notebook.prompt_cell.send_keys(Keys.CONTROL, Keys.ENTER) assert not notebook.panel_cells def test_switch_kernel(self, notebook): kernels = notebook.get_kernel_list() assert "SoS" in kernels assert "R" in kernels backgroundColor = { "SoS": [0, 0, 0], "R": [220, 220, 218], "python3": [255, 217, 26], } # test change to R kernel by click notebook.select_kernel(index=0, kernel_name="R", by_click=True) # check background color for R kernel assert backgroundColor["R"], notebook.get_input_backgroundColor(0) # the cell keeps its color after evaluation notebook.edit_cell( index=0, content="""\ %preview -n rn rn <- rnorm(5) """, render=True, ) output = notebook.get_cell_output(0) assert "rn" in output and "num" in output assert backgroundColor["R"], notebook.get_output_backgroundColor(0) # test $get and shift to SoS kernel idx = notebook.call( """\ %get rn --from R len(rn) """, kernel="SoS", ) assert backgroundColor["SoS"], notebook.get_input_backgroundColor(idx) assert "5" in notebook.get_cell_output(idx) # switch to python3 kernel idx = notebook.call( """\ %use Python3 """, kernel="SoS", ) assert backgroundColor["python3"] == notebook.get_input_backgroundColor( idx) notebook.append_cell("") assert backgroundColor["python3"] == notebook.get_input_backgroundColor( idx) # def testInterrupt(self, notebook): # # switch to python3 kernel # from textwrap import dedent # from selenium.webdriver.common.by import By # from selenium.webdriver import ActionChains # import time # index = len(notebook.cells) # notebook.add_cell( # index=index - 1, cell_type="code", content=dedent( # """\ # import time # while True: # time.sleep(1) # """, # )) # notebook.select_kernel(index=index, kernel_name='SoS', by_click=True) # notebook._focus_cell(index) # notebook.current_cell.send_keys(Keys.CONTROL, Keys.ENTER) # time.sleep(2) # top_menu = notebook.browser.find_element_by_id("kernel_menu") # ActionChains(notebook.browser).move_to_element(top_menu).click().perform() # int_menu = notebook.browser.find_element_by_id("int_kernel").find_elements_by_tag_name('a')[0] # ActionChains(notebook.browser).move_to_element(int_menu).click().perform() # notebook._wait_for_done(index, expect_error=True) def get_completions(kc, text): flush_channels() kc.complete(text, len(text)) reply = kc.get_shell_msg(timeout=2) return reply["content"] def inspect(kc, name, pos=0): flush_channels() kc.inspect(name, pos) reply = kc.get_shell_msg(timeout=2) return reply["content"] def is_complete(kc, code): flush_channels() kc.is_complete(code) reply = kc.get_shell_msg(timeout=2) return reply["content"] class TestKernelInteraction(unittest.TestCase): def testInspector(self): with sos_kernel() as kc: # match magics self.assertTrue("%get " in get_completions(kc, "%g")["matches"]) self.assertTrue("%get " in get_completions(kc, "%")["matches"]) self.assertTrue("%with " in get_completions(kc, "%w")["matches"]) # path complete self.assertGreater(len(get_completions(kc, "!ls ")["matches"]), 0) self.assertEqual( len(get_completions(kc, "!ls SOMETHING")["matches"]), 0) # wait_for_idle(kc) # variable complete execute(kc=kc, code="alpha=5") wait_for_idle(kc) execute(kc=kc, code="%use Python3") wait_for_idle(kc) self.assertTrue("alpha" in get_completions(kc, "al")["matches"]) self.assertTrue("all(" in get_completions(kc, "al")["matches"]) # for no match self.assertEqual( len(get_completions(kc, "alphabetatheta")["matches"]), 0) # get with all variables in self.assertTrue("alpha" in get_completions(kc, "%get ")["matches"]) self.assertTrue( "alpha" in get_completions(kc, "%get al")["matches"]) # with use and restart has kernel name self.assertTrue( "Python3" in get_completions(kc, "%with ")["matches"]) self.assertTrue( "Python3" in get_completions(kc, "%use ")["matches"]) self.assertTrue( "Python3" in get_completions(kc, "%shutdown ")["matches"]) self.assertTrue( "Python3" in get_completions(kc, "%shutdown ")["matches"]) self.assertTrue( "Python3" in get_completions(kc, "%use Py")["matches"]) # self.assertEqual( len(get_completions(kc, "%use SOME")["matches"]), 0) # wait_for_idle(kc) execute(kc=kc, code="%use SoS") wait_for_idle(kc) def testCompleter(self): with sos_kernel() as kc: # match magics ins_print = inspect(kc, "print")["data"]["text/plain"] self.assertTrue("print" in ins_print, "Returned: {}".format(ins_print)) wait_for_idle(kc) # # keywords ins_depends = inspect(kc, "depends:")["data"]["text/plain"] self.assertTrue("dependent targets" in ins_depends, "Returned: {}".format(ins_depends)) wait_for_idle(kc) # execute(kc=kc, code="alpha=5") wait_for_idle(kc) execute(kc=kc, code="%use Python3") wait_for_idle(kc) # action ins_run = inspect(kc, "run:")["data"]["text/plain"] self.assertTrue("sos.actions" in ins_run, "Returned: {}".format(ins_run)) wait_for_idle(kc) # ins_alpha = inspect(kc, "alpha")["data"]["text/plain"] self.assertTrue("5" in ins_alpha, "Returned: {}".format(ins_alpha)) wait_for_idle(kc) for magic in ("get", "run", "sosrun"): ins_magic = inspect(kc, "%" + magic, 2)["data"]["text/plain"] self.assertTrue("usage: %" + magic in ins_magic, "Returned: {}".format(ins_magic)) wait_for_idle(kc) execute(kc=kc, code="%use SoS") wait_for_idle(kc) def testIsComplete(self): with sos_kernel() as kc: # match magics status = is_complete(kc, "prin") self.assertEqual(status["status"], "complete") # status = is_complete(kc, "a=1") self.assertEqual(status["status"], "complete") # status = is_complete(kc, "") self.assertEqual(status["status"], "complete") # the status seems to be version dependent on ipython #status = is_complete(kc, "input:\n a=1,") #self.assertEqual(status["status"], "complete") # #status = is_complete(kc, "parameter: a=1,") #self.assertEqual(status["status"], "complete") # status = is_complete(kc, "%dict -r") self.assertEqual(status["status"], "complete") wait_for_idle(kc) if __name__ == "__main__": unittest.main()
nilq/baby-python
python
"""Base method for all global interpretations. Is a subclass of base ModelInterpreter""" from ..model_interpreter import ModelInterpreter class BaseGlobalInterpretation(ModelInterpreter): """Base class for global model interpretations""" pass
nilq/baby-python
python
import json import unittest from pyshared.server.ref import CallCommand from pyshared.server.ref import DelCommand from pyshared.server.ref import ListCommand from pyshared.server.ref import LocalSharedResourcesManager from pyshared.server.ref import SetCommand from pyshared.server.ref import default_command_mapper from pyshared.server.rx import ReactiveSharedResourcesServer from rx import Observable class DefaultTest(unittest.TestCase): shared_resource = None def setUp(self): self.shared_resource = LocalSharedResourcesManager({ 'number': 10 }) def test_call(self): call_command = CallCommand( resource_name='number', method='__add__', args=[7] ) self.assertEqual(17, call_command.exec(self.shared_resource)) class ReactiveTest(unittest.TestCase): reactive_server = None def setUp(self): self.reactive_server = ReactiveSharedResourcesServer(LocalSharedResourcesManager({ 'number': 10 })) def test_call(self): call_command = CallCommand( resource_name='number', method='__sub__', args=[5] ) result = [] self.reactive_server(call_command).subscribe(result.append) self.assertEqual(result, [5]) def test_call_with_result(self): call_command = CallCommand( resource_name='number', method='__add__', args=[5], result='result' ) result = [] self.reactive_server(call_command).subscribe(result.append) self.assertEqual(result, [{'result': 15}]) def test_call_any_result(self): call_command = CallCommand( resource_name='number', method='__add__', args=[5], result=True ) result = [] self.reactive_server(call_command).subscribe(result.append) self.assertEqual(15, list(result[0].values())[0]) def test_list(self): list_command = ListCommand() result = [] self.reactive_server(list_command).subscribe(result.append) self.assertEqual(result, [['number']]) def test_set(self): set_command = SetCommand( resource_name='a', value=10 ) result = [] self.reactive_server(set_command).subscribe(result.append) self.assertEqual(result, [{'a': 10}]) def test_del(self): del_command = DelCommand(resource_name='number') result = [] self.reactive_server(del_command).subscribe(result.append) self.assertEqual(result, ['number']) def test_mapper(self): result = [] Observable.from_([ {'cmd': 'call', 'resource_name': 'number', 'method': '__sub__', 'args': [1]}, {'cmd': 'call', 'resource_name': 'number', 'method': '__add__', 'args': [5]}, ]).map(default_command_mapper) \ .flat_map(self.reactive_server) \ .subscribe(result.append) self.assertEqual(result, [9, 15]) def test_json_mapper(self): result = [] Observable.from_([ '{"cmd": "call", "resource_name": "number", "method": "__sub__", "args": [1]}', '{"cmd": "call", "resource_name": "number", "method": "__add__", "args": [5]}', ]).map(json.loads) \ .map(default_command_mapper) \ .flat_map(self.reactive_server) \ .subscribe(result.append) self.assertEqual(result, [9, 15])
nilq/baby-python
python
from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys class RightBoard: def __init__(self, driver): self.driver = driver self.elements = RightBoardElements(self.driver) def click(self, elem): self.driver.execute_script( "arguments[0].click();", elem ) @staticmethod def __input_into(elem, value): current_input_text = elem.get_attribute("value") elem.send_keys(len(current_input_text) * Keys.BACKSPACE) elem.send_keys(value) # OPERATIONS def open_orders(self): self.click(self.elements.open_orders_tab) def completed_orders(self): self.click(self.elements.completed_orders_tab) def buy_tab(self): self.click(self.elements.buy_tab) def sell_tab(self): self.click(self.elements.sell_tab) def input_buy_at_price(self, new_value): self.__input_into(self.elements.at_price_input, new_value) def input_amount_to_buy(self, new_value): self.__input_into(self.elements.amount_input, new_value) def input_total_money_to_spend(self, new_value): self.__input_into(self.elements.total_input, new_value) def buy_sell_button(self): self.click(self.elements.buy_sell_button) class RightBoardElements: CONTAINER_PATH = "./div/div[3]/div/div[3]" # UPPER SECTION OPEN_ORDERS_PATH = CONTAINER_PATH + "/div[3]/div/div[1]" COMPLETED_ORDERS_PATH = CONTAINER_PATH + "/div[3]/div/div[2]" TOGGLE_CONTEXT_CURRENCY_PATH = CONTAINER_PATH + "/div[3]/div[2]/div/div/div[1]/div/div/input" CANCEL_ALL_ORDERS_PATH = CONTAINER_PATH + "/div[3]/div[2]/div/div/div[1]/button" ORDER_LIST_PATH = CONTAINER_PATH + "/div[3]/div[2]/div/div/div[3]/div/div" # LOWER SECTION BUY_TAB_PATH = CONTAINER_PATH + "/div[4]/div/div[1]/div[1]" SELL_TAB_PATH = CONTAINER_PATH + "/div[4]/div/div[1]/div[2]" AT_PRICE_PATH = CONTAINER_PATH + "/div[4]/div/div[2]/div/form/div[2]/div/div/input" AMOUNT_PATH = CONTAINER_PATH + "/div[4]/div/div[2]/div/form/div[3]/div/div/input" TOTAL_BASE_CURRENCY_PATH = CONTAINER_PATH + "/div[4]/div/div[2]/div/form/div[4]/div/div/input" BUY_OR_SELL_BUTTON_PATH = CONTAINER_PATH + "/div[4]/div/div[2]/div/form/button" def __init__(self, driver): self.driver = driver self.root = self.driver.find_element(By.ID, "root") self.container = self.root.find_element(By.XPATH, self.CONTAINER_PATH) # UPPER SECTION self.open_orders_tab = self.root.find_element(By.XPATH, self.OPEN_ORDERS_PATH) self.completed_orders_tab = self.root.find_element(By.XPATH, self.COMPLETED_ORDERS_PATH) # self.toggle_context_only_orders = self.root.find_element(By.XPATH, self.TOGGLE_CONTEXT_CURRENCY_PATH) # self.cancel_all_orders = self.root.find_element(By.XPATH, self.CANCEL_ALL_ORDERS_PATH) # self.order_list_view = self.root.find_element(By.XPATH, self.ORDER_LIST_PATH) # LOWER SECTION self.buy_tab = self.root.find_element(By.XPATH, self.BUY_TAB_PATH) self.sell_tab = self.root.find_element(By.XPATH, self.SELL_TAB_PATH) @property def at_price_input(self): return self.root.find_element(By.XPATH, self.AT_PRICE_PATH) @property def amount_input(self): return self.root.find_element(By.XPATH, self.AMOUNT_PATH) @property def total_input(self): return self.root.find_element(By.XPATH, self.TOTAL_BASE_CURRENCY_PATH) @property def buy_sell_button(self): return self.root.find_element(By.XPATH, self.BUY_OR_SELL_BUTTON_PATH)
nilq/baby-python
python
import threading import time import signal import sys from callbacks_event_listener import EventListener from wpwithin_python import WPWithin,\ PricePerUnit,\ Price,\ Service,\ CommonPSPKeys,\ WorldpayPSPKeys,\ WP_PSP_NAME client = WPWithin("127.0.0.1", 9090, True, start_callback_server=True, callback_port=9092, event_listener=EventListener()) client.setup("Python3 Device", "Sample Python3 producer device") psp_config = { CommonPSPKeys.psp_name: WP_PSP_NAME, CommonPSPKeys.hte_public_key: "T_C_6a38539b-89d0-4db9-bec3-d825779c1809", CommonPSPKeys.hte_private_key: "T_S_6b0f27d5-3787-4304-a596-01160c49a55d", WorldpayPSPKeys.wp_api_endpoint: "https://api.worldpay.com/v1", WorldpayPSPKeys.wp_merchant_client_key: "T_C_6a38539b-89d0-4db9-bec3-d825779c1809", WorldpayPSPKeys.wp_merchant_service_key: "T_S_6b0f27d5-3787-4304-a596-01160c49a55d" } client.init_producer(psp_config) price_per_unit = PricePerUnit(amount=650, currency_code="GBP") rw_price = Price(price_id=1, description="Car Wash", price_per_unit=price_per_unit, unit_id=2, unit_description="Single wash") service = Service(service_id=1, name="RoboWash", description="Car washed by robot", prices={1: rw_price}) client.add_service(service) print("Start service broadcast for 20 seconds") client.start_service_broadcast(20000) def signal_handler(signal_number, stack_frame): print("shutting down...") client.shutdown() signal.signal(signal.SIGINT, signal_handler) while True: pass
nilq/baby-python
python
from django.urls import path, include urlpatterns = [ path('', include('accounts.urls.accounts')), path('', include('accounts.urls.employers')), path('', include('accounts.urls.professionals')), ]
nilq/baby-python
python
from django.shortcuts import render from django.http import HttpResponse from notice.models import Notice, Qna from main.models import search_word from django.views.generic import ListView from django.db.models import Q from django.utils import timezone import datetime def main(request): # Main_Notice = Notice.objects.order_by('-id')[0:4] # 메인페이지 게시물 4개 최신순 Hot_QNA = Qna.objects.filter(create_at__gte=timezone.now()-datetime.timedelta(days=7)).order_by('-hits')[0:4] # QNA 게시물 조회수 순 4개 (현재날짜부터 7일까지 데이터중) Rank = search_word.objects.order_by('-hits')[0:10] # 랭킹 10개 검색횟수 많은순 time1 = timezone.now() time7 = timezone.now()-datetime.timedelta(days=7) return render(request, 'main/main.html', { # 'MainNotice':Main_Notice, 'Rank':Rank, 'HotQNA':Hot_QNA, 'time1' : time1, 'time7' : time7, }) class SearchView(ListView): # 게시글 model = search_word template_name = 'main/Search_result.html' def get_queryset(self): keyword = self.request.GET.get('q', '') if keyword: if search_word.objects.filter(keyword=keyword): a = search_word.objects.get(keyword=keyword) a.hits += 1 a.save() else: b = search_word(keyword=keyword) b.save() return keyword
nilq/baby-python
python
import json from django import template register = template.Library() @register.filter def here(page, request): return request.path.startswith(page.get_absolute_url()) @register.simple_tag def node_module(path): return '/node_modules/{}'.format(path) @register.assignment_tag(takes_context=True) def navigation_json(context, pages, section=None): """ Renders a navigation list for the given pages. The pages should all be a subclass of PageBase, and possess a get_absolute_url() method. You can also specify an alias for the navigation, at which point it will be set in the context rather than rendered. """ request = context["request"] # Compile the entries. def page_entry(page): # Do nothing if the page is to be hidden from not logged in users if page.hide_from_anonymous and not request.user.is_authenticated(): return # Do nothing if the page is set to offline if not page.is_online: return url = page.get_absolute_url() return { "url": url, "title": str(page), "here": request.path.startswith(url), "children": [page_entry(x) for x in page.navigation if page is not request.pages.homepage] } # All the applicable nav items entries = [page_entry(x) for x in pages if page_entry(x) is not None] # Add the section. if section: section_entry = page_entry(section) entries = [section_entry] + list(entries) return json.dumps(entries)
nilq/baby-python
python
from django.db import models from django.contrib.auth.models import User from pyuploadcare.dj.models import ImageField # Create your models here. class Neighborhood(models.Model): name = models.CharField(max_length=100) location = models.CharField(max_length=100) admin = models.ForeignKey("Profile", on_delete=models.CASCADE, related_name='hood') health_department = models.TextField(null=True, blank=True) police_department = models.TextField(null=True, blank=True) description = models.TextField() logo = models.ImageField(upload_to = 'images/', default='') def __str__(self): return self.name def create_neighborhood(self): self.save() def delete_neighborhood(self): self.delete() @classmethod def find_neighborhood(cls, neighborhood_id): return cls.objects.filter(id=neighborhood_id) class Meta: ordering =['-pk'] class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile') name = models.CharField(max_length=50, blank=True) bio = models.TextField(max_length=300, blank=True, default='No bio') profile_pic = models.ImageField(upload_to='images/', default='default.png') location = models.CharField(max_length=100, blank=True, null=True) neighborhood = models.ForeignKey(Neighborhood, on_delete=models.SET_NULL, null=True, related_name='members', blank=True) contact = models.CharField(max_length=20, blank=True) def __str__(self): return self.user.username def save_user_profile(self): self.save() @classmethod def get_hood_members(cls,hood): members=cls.objects.filter(hood__icontains=hood) return members class Business(models.Model): name = models.CharField(max_length=300) user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='owner') neighborhood = models.ForeignKey(Neighborhood, on_delete=models.CASCADE, related_name='business') email = models.EmailField(max_length=100) description = models.TextField(max_length=1000) def __str__(self): return self.name def create_business(self): self.save() def delete_business(self): self.delete() @classmethod def find_business(cls,business_id): found=cls.objects.get(id=business_id) return found class Post(models.Model): title = models.CharField(max_length=100, null=True) post = models.TextField() posted_on = models.DateTimeField(auto_now_add=True) user = models.ForeignKey(Profile, on_delete=models.CASCADE,related_name='post_owner') hood = models.ForeignKey(Neighborhood, on_delete=models.CASCADE,related_name='hood_post') def __str__(self): return self.title def create_post(self): self.save() def delete_post(self): self.delete() class Meta: ordering =['-pk']
nilq/baby-python
python
## # Copyright © 2020, The Gust Framework Authors. All rights reserved. # # The Gust/Elide framework and tools, and all associated source or object computer code, except where otherwise noted, # are licensed under the Zero Prosperity license, which is enclosed in this repository, in the file LICENSE.txt. Use of # this code in object or source form requires and implies consent and agreement to that license in principle and # practice. Source or object code not listing this header, or unless specified otherwise, remain the property of # Elide LLC and its suppliers, if any. The intellectual and technical concepts contained herein are proprietary to # Elide LLC and its suppliers and may be covered by U.S. and Foreign Patents, or patents in process, and are protected # by trade secret and copyright law. Dissemination of this information, or reproduction of this material, in any form, # is strictly forbidden except in adherence with assigned license requirements. ## load( "@io_bazel_rules_closure//closure/private/rules:soy_library.bzl", _soy_library = "soy_library", ) load( "@io_bazel_rules_closure//closure:defs.bzl", _closure_js_template_library = "closure_js_template_library", _closure_py_template_library = "closure_py_template_library", _closure_java_template_library = "closure_java_template_library", _closure_messages = "closure_messages", ) load( "//defs/toolchain:schema.bzl", "JAVAPROTO_POSTFIX_", "CLOSUREPROTO_POSTFIX_", ) load( "//defs:config.bzl", _JS_TEMPLATES = "JS_TEMPLATES", _JAVA_TEMPLATES = "JAVA_TEMPLATES", _PYTHON_TEMPLATES = "PYTHON_TEMPLATES", ) INJECTED_SSR_SOY_DEPS = [ "@gust//gust/page:page_soy", ] INJECTED_SSR_PROTO_DEPS = [ "@gust//gust/page:page_proto", ] def _template_library(name, srcs, soy_deps = [], js_deps = [], py_deps = [], java_deps = [], proto_deps = [], style_deps = [], js = _JS_TEMPLATES, java = _JAVA_TEMPLATES, python = _PYTHON_TEMPLATES, java_package = None, precompile = True): """ Declare a universal, cross-platform template library, making use of the built-in Soy integration. """ _soy_library( name = name, srcs = srcs, deps = soy_deps, proto_deps = proto_deps, ) if js: _closure_js_template_library( name = "%s-js" % name, srcs = srcs, deps = js_deps + style_deps, proto_deps = proto_deps, ) if python: _closure_py_template_library( name = "%s-py" % name, srcs = srcs, deps = soy_deps + style_deps, proto_deps = proto_deps, ) if java: _closure_java_template_library( name = "%s-java" % name, srcs = srcs, deps = soy_deps, java_deps = ( [("%s-%s" % (p, JAVAPROTO_POSTFIX_)) for p in proto_deps] + [ "@safe_html_types//:java", "@safe_html_types//:java-proto", ] + [("%s-java_jcompiled" % p) for p in soy_deps]), proto_deps = proto_deps, precompile = precompile, java_package = java_package, ) def _ssr_library(name, srcs, soy_deps = [], js_deps = [], py_deps = [], java_deps = [], proto_deps = [], style_deps = [], java = _JAVA_TEMPLATES, python = _PYTHON_TEMPLATES, java_package = None, precompile = True, **kwargs): """ Declare a template for use exclusively during SSR (Server-Side Rendering). This also injects additional SSR-related dependencies automatically. """ _template_library( name = name, srcs = srcs, soy_deps = (soy_deps or []) + INJECTED_SSR_SOY_DEPS, proto_deps = (proto_deps or []) + INJECTED_SSR_PROTO_DEPS, java_package = java_package, js = False, python = _PYTHON_TEMPLATES, ) def _template_messages(name, deps, targetLocale, sourceLocale = "en", **kwargs): """ Generate an XLIFF messages file for the provided set of templates. """ _closure_messages( name = name, deps = deps, targetLocale = targetLocale, sourceLocale = sourceLocale, **kwargs ) ssr_library = _ssr_library template_library = _template_library template_messages = _template_messages
nilq/baby-python
python
#!/usr/bin/env python3 import sys from os import path sys.path.insert(0, path.join(path.dirname(__file__))) from importers.monzo_debit import Importer as monzo_debit_importer from beancount.ingest import extract account_id = "acc_yourMonzoAccountId" account = "Assets:Monzo:Something" CONFIG = [ monzo_debit_importer(account_id, account), ] extract.HEADER = ';; -*- mode: org; mode: beancount; coding: utf-8; -*-\n'
nilq/baby-python
python
import os import sys import tensorflow as tf from absl import app, logging from absl.flags import argparse_flags import _jsonnet def parse_args(args, parser): # Parse command line arguments parser = parser if parser else argparse_flags.ArgumentParser() parser.add_argument("input", type=str) # Name of TPU to train on, if any def local_parse_args(args): parser = argparse_flags.ArgumentParser() parse_args(args, parser) return parser.parse_args(args[1:]) # Returns content if worked, None if file not found, or throws an exception def try_path(dir, rel): if not rel: raise RuntimeError("Got invalid filename (empty string).") if rel[0] == "/": full_path = rel else: full_path = dir + rel if full_path[-1] == "/": raise RuntimeError("Attempted to import a directory") if not os.path.isfile(full_path): return full_path, None with open(full_path) as f: return full_path, f.read() def import_callback(dir, rel): full_path, content = try_path(dir, rel) if content: return full_path, content raise RuntimeError("File not found") def main(args): try: _jsonnet.evaluate_file( args.input, ext_vars={"MODEL_PATH": "Bob"}, import_callback=import_callback, ) except RuntimeError as e: logging.error(e) sys.exit(-1) if __name__ == "__main__": tf.disable_v2_behavior() app.run(main, flags_parser=parse_args)
nilq/baby-python
python
import logging from typing import Any, List, Optional from homeassistant.components.select import SelectEntity from gehomesdk import ErdCodeType from ...devices import ApplianceApi from .ge_erd_entity import GeErdEntity from .options_converter import OptionsConverter _LOGGER = logging.getLogger(__name__) class GeErdSelect(GeErdEntity, SelectEntity): """ERD-based selector entity""" device_class = "select" def __init__(self, api: ApplianceApi, erd_code: ErdCodeType, converter: OptionsConverter, erd_override: str = None, icon_override: str = None, device_class_override: str = None): super().__init__(api, erd_code, erd_override=erd_override, icon_override=icon_override, device_class_override=device_class_override) self._converter = converter @property def current_option(self): return self._converter.to_option_string(self.appliance.get_erd_value(self.erd_code)) @property def options(self) -> List[str]: "Return a list of options" return self._converter.options async def async_select_option(self, option: str) -> None: _LOGGER.debug(f"Setting select from {self.current_option} to {option}") """Change the selected option.""" if option != self.current_option: await self.appliance.async_set_erd_value(self.erd_code, self._converter.from_option_string(option))
nilq/baby-python
python
import webbrowser def open_page(url: str, new: int = 0, autoraise: bool = True): webbrowser.open(url, new=new, autoraise=autoraise) actions = {'open webpage': open_page}
nilq/baby-python
python
def util(node,visited,recstack): visited[node]=True recstack[node]=True for i in graph[node]: if visited[i]==False: if util(i,visited,recstack): return True elif recstack[i]==True: return True recstack[node]=False return False def isCyclic(n, graph): visited=[False]*(n) recstack=[False]*(n) for i in range(n): if util(i,visited,recstack)==True: return 1 return 0
nilq/baby-python
python
#!/usr/bin/env python """ @package mi.dataset.parser.test @file marine-integrations/mi/dataset/parser/test/test_adcpt_m_log9.py @author Tapana Gupta @brief Test code for adcpt_m_log9 data parser Files used for testing: ADCPT_M_LOG9_simple.txt File contains 25 valid data records ADCPT_M_LOG9_large.txt File contains 615 valid data records ADCPT_M_LOG9_bad.txt File contains 4 invalid data records """ import unittest import os from nose.plugins.attrib import attr from mi.core.log import get_logger; log = get_logger() from mi.dataset.test.test_parser import ParserUnitTestCase from mi.dataset.dataset_parser import DataSetDriverConfigKeys from mi.dataset.parser.adcpt_m_log9 import AdcptMLog9Parser from mi.dataset.test.test_parser import BASE_RESOURCE_PATH RESOURCE_PATH = os.path.join(BASE_RESOURCE_PATH, 'adcpt_m', 'resource') MODULE_NAME = 'mi.dataset.parser.adcpt_m_log9' SIMPLE_LOG_FILE = "ADCPT_M_LOG9_simple.txt" LARGE_LOG_FILE = "ADCPT_M_LOG9_large.txt" # Define number of expected records/exceptions for various tests NUM_REC_LARGE_LOG_FILE = 615 NUM_REC_SIMPLE_LOG_FILE = 25 YAML_FILE = "ADCPT_M_LOG9_simple.yml" LARGE_YAML_FILE = "ADCPT_M_LOG9_large.yml" INVALID_DATA_FILE_1 = 'ADCPT_M_LOG9_bad.txt' NUM_INVALID_EXCEPTIONS = 9 @attr('UNIT', group='mi') class AdcptMLog9ParserUnitTestCase(ParserUnitTestCase): """ adcpt_m_log9 Parser unit test suite """ def setUp(self): ParserUnitTestCase.setUp(self) self.rec_config = { DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME, DataSetDriverConfigKeys.PARTICLE_CLASS: None } def open_file(self, filename): file = open(os.path.join(RESOURCE_PATH, filename), mode='r') return file def open_file_write(self, filename): file = open(os.path.join(RESOURCE_PATH, filename), mode='w') return file def create_rec_parser(self, file_handle): """ This function creates a Adcpt_m_log9 parser for recovered data. """ parser = AdcptMLog9Parser(self.rec_config, file_handle, self.exception_callback) return parser def test_verify_record(self): """ Simple test to verify that records are successfully read and parsed from a data file """ log.debug('===== START SIMPLE TEST =====') in_file = self.open_file(SIMPLE_LOG_FILE) parser = self.create_rec_parser(in_file) # In a single read, get all particles in this file. number_expected_results = NUM_REC_SIMPLE_LOG_FILE result = parser.get_records(number_expected_results) self.assertEqual(len(result), number_expected_results) in_file.close() self.assertListEqual(self.exception_callback_value, []) log.debug('===== END SIMPLE TEST =====') def test_invalid_data(self): """ Read data from a file containing invalid data. Verify that no particles are created and the correct number of exceptions are detected. """ log.debug('===== START TEST INVALID SENSOR DATA =====') in_file = self.open_file(INVALID_DATA_FILE_1) parser = self.create_rec_parser(in_file) # Try to get records and verify that none are returned. result = parser.get_records(1) self.assertEqual(result, []) self.assertEqual(len(self.exception_callback_value), NUM_INVALID_EXCEPTIONS) in_file.close() log.debug('===== END TEST INVALID SENSOR DATA =====') def test_verify_record_against_yaml(self): """ Read data from a file and pull out data particles one at a time. Verify that the results are those we expected. """ log.debug('===== START YAML TEST =====') in_file = self.open_file(LARGE_LOG_FILE) parser = self.create_rec_parser(in_file) # In a single read, get all particles in this file. number_expected_results = NUM_REC_LARGE_LOG_FILE result = parser.get_records(number_expected_results) self.assert_particles(result, LARGE_YAML_FILE, RESOURCE_PATH) in_file.close() self.assertListEqual(self.exception_callback_value, []) log.debug('===== END YAML TEST =====') def create_yml_file(self): """ Create a yml file corresponding to an actual recovered dataset. This is not an actual test - it allows us to create what we need for integration testing, i.e. a yml file. """ in_file = self.open_file(LARGE_LOG_FILE) parser = self.create_rec_parser(in_file) log.debug("Getting records...") # In a single read, get all particles in this file. result = parser.get_records(NUM_REC_LARGE_LOG_FILE) log.debug("Done.") self.particle_to_yml(result, LARGE_YAML_FILE) def particle_to_yml(self, particles, filename): """ This is added as a testing helper, not actually as part of the parser tests. Since the same particles will be used for the driver test it is helpful to write them to .yml in the same form they need in the results.yml fids here. """ # open write append, if you want to start from scratch manually delete this fid fid = self.open_file_write(filename) fid.write('header:\n') fid.write(" particle_object: 'MULTIPLE'\n") fid.write(" particle_type: 'MULTIPLE'\n") fid.write('data:\n') for i in range(0, len(particles)): particle_dict = particles[i].generate_dict() fid.write(' - _index: %d\n' % (i+1)) fid.write(' particle_object: %s\n' % particles[i].__class__.__name__) fid.write(' particle_type: %s\n' % particle_dict.get('stream_name')) fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp')) for val in particle_dict.get('values'): if isinstance(val.get('value'), float): fid.write(' %s: %16.3f\n' % (val.get('value_id'), val.get('value'))) elif isinstance(val.get('value'), str): fid.write(" %s: '%s'\n" % (val.get('value_id'), val.get('value'))) else: fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value'))) fid.close()
nilq/baby-python
python
# Inspired by ABingo: www.bingocardcreator.com/abingo HANDY_Z_SCORE_CHEATSHEET = ( (1, float('-Inf')), (0.10, 1.29), (0.05, 1.65), (0.025, 1.96), (0.01, 2.33), (0.001, 3.08))[::-1] PERCENTAGES = {0.10: '90%', 0.05: '95%', 0.01: '99%', 0.001: '99.9%'} DESCRIPTION_IN_WORDS = {0.10: 'fairly confident', 0.05: 'confident', 0.01: 'very confident', 0.001: 'extremely confident'} def calculate_variance(n, p): """ Calculate the sample variance for a binominal distribution """ return p * (1 - p) / n def zscore(alternatives): """ Calculate the z-score """ if len(alternatives) != 2: raise ValueError("Cant compute more than two alternatives") n0 = alternatives[0].participants n1 = alternatives[1].participants if n0 == 0 or n1 == 0: raise ValueError("No participants for at least one of the experiments") hits0 = alternatives[0].hits hits1 = alternatives[1].hits cr0 = n0 / hits0 # cr: conversion rate cr1 = n1 / hits1 numerator = cr0 - cr1 variance0 = calculate_variance(n0, cr0) variance1 = calculate_variance(n1, cr1) return numerator / ((variance0 + variance1) ** 0.5) def best_p(zscore): """ Find the the p-value using a table """ for p, z in HANDY_Z_SCORE_CHEATSHEET: if zscore > z: break return (p, z) def test(data): pass def describe(alternatives, p, best, worst): index_best = 0 index_worst = 1 words = "" n0 = alternatives[0].participants n1 = alternatives[1].participants if n0 < 10 or n1 < 10: words += "Take these results with a grain of salt since your " + \ "samples are so small: " words += "The best alternative you have is: %s, which had " % \ alternatives[best].content words += "%d conversions from %d participants " \ % (alternatives[best].hits, alternatives[best].participants) words += "(%f). The other alternative was %s, " \ % (alternatives[best].hits / alternatives[best].participants, alternatives[worst].content) words += "which had %d conversions from %d participants " \ % (alternatives[worst].hits, alternatives[worst].participants) words += "(%f). " % (alternatives[best].hits / alternatives[best].participants) if p == 1: words += "However, this difference is not statistically significant." else: words += "This difference is %f likely to be " % p words += " statistically significant, which means you can be " words += "%s that it is the result of your alternatives actually " \ % "foo" words += " mattering, rather than " words += "being due to random chance. However, this statistical test" words += " can't measure how likely the currently " words += "observed magnitude of the difference is to be accurate or" words + " not. It only says \"better\", not \"better " words += "by so much\"." return words
nilq/baby-python
python
from __future__ import division from __future__ import unicode_literals from __future__ import print_function from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from past.utils import old_div import rlpy import numpy as np from hyperopt import hp param_space = { 'kernel_resolution': hp.loguniform("kernel_resolution", np.log(5), np.log(50)), 'discover_threshold': hp.loguniform( "discover_threshold", np.log(1e4), np.log(1e8)), 'lambda_': hp.uniform("lambda_", 0., 1.), 'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)), 'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))} def make_experiment( exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/", discover_threshold=88044., boyan_N0=64502, lambda_=0.43982644088, initial_learn_rate=0.920244401, kernel_resolution=11.6543336229): opt = {} opt["exp_id"] = exp_id opt["path"] = path opt["max_steps"] = 150000 opt["num_policy_checks"] = 30 opt["checks_per_policy"] = 1 active_threshold = 0.01 max_base_feat_sim = 0.5 sparsify = 1 domain = rlpy.Domains.BicycleRiding() opt["domain"] = domain kernel_width = old_div((domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]), kernel_resolution) representation = rlpy.Representations.KernelizediFDD(domain, sparsify=sparsify, kernel=rlpy.Representations.linf_triangle_kernel, kernel_args=[kernel_width], active_threshold=active_threshold, discover_threshold=discover_threshold, normalization=True, max_active_base_feat=10, max_base_feat_sim=max_base_feat_sim) policy = rlpy.Policies.eGreedy(representation, epsilon=0.1) # agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate, # lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0) opt["agent"] = rlpy.Agents.Q_Learning(policy, representation, discount_factor=domain.discount_factor, lambda_=lambda_, initial_learn_rate=initial_learn_rate, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0) experiment = rlpy.Experiments.Experiment(**opt) return experiment if __name__ == '__main__': from rlpy.Tools.run import run_profiled # run_profiled(make_experiment) experiment = make_experiment(1) experiment.run(visualize_learning=True, visualize_performance=True) experiment.plot() # experiment.save()
nilq/baby-python
python
import os from jobControl import jobControl from pyspark.sql import SparkSession from pyspark.sql import functions as f from pyspark.sql.types import IntegerType, StringType from utils import arg_utils, dataframe_utils job_args = arg_utils.get_job_args() job_name = os.path.basename(__file__).split(".")[0] num_partitions = 6 jobExec = jobControl.Job(job_name, job_args) jobExec.target_schema = ( jobExec.target_schema if jobExec.target_schema else jobExec.database_edw ) def main(): table_columns = dataframe_utils.return_hive_table_columns( spark, jobExec.target_schema, jobExec.target_table ) df_page_view = ( spark.table(f"{jobExec.database_web_analytics}.page_view") .filter(f.col("reference_date") == int(jobExec.reference_date)) .filter(f.col("app_id") == f.lit("GymPass")) ) df_stg_person_type = spark.table(f"{jobExec.database_work}.stg_person_type") df_dim_products = ( df_page_view.withColumnRenamed("event_id", "page_view_id") .join(df_stg_person_type, "person_id", "left") .withColumn( "created_at_converted", f.when( f.col("os_timezone").isNotNull(), f.expr("from_utc_timestamp(derived_tstamp, os_timezone)"), ) .when( f.col("geo_timezone").isNotNull(), f.expr("from_utc_timestamp(derived_tstamp, os_timezone)"), ) .otherwise(f.col("derived_tstamp")), ) .withColumn( "date", (f.date_format("created_at_converted", "yyyyMMdd")).cast(IntegerType()), ) .withColumn( "hour", (f.date_format("created_at_converted", "H")).cast(IntegerType()), ) .withColumn( "minute", (f.date_format("created_at_converted", "m")).cast(IntegerType()), ) .withColumn( "person_type", f.when(f.upper(f.col("useragent")).like("%BOT%"), f.lit("BOT")) .when( df_stg_person_type["person_type"].isNotNull(), df_stg_person_type["person_type"], ) .otherwise(f.lit("REGULAR USER")), ) .withColumn("utc_date", f.col("reference_date")) .select( f.col("page_view_id"), "date", "hour", "minute", f.col("geo_country").alias("page_view_country_name"), "viewer_id", "person_id", "company_id", "person_type", f.col("mkt_source").alias("utm_source"), f.col("mkt_medium").alias("utm_medium"), f.col("mkt_campaign").alias("utm_campaign"), f.col("mkt_term").alias("utm_term"), f.col("mkt_content").alias("utm_content"), "latitude", "longitude", "utc_date", "reference_date", ) ) df_dim_products = dataframe_utils.createPartitionColumns( df_dim_products, jobExec.reference_date ) df_dim_products = jobExec.select_dataframe_columns( spark, df_dim_products, table_columns ) df_dim_products = df_dim_products.repartition(num_partitions, "page_view_id") df_dim_products.write.insertInto( f"{jobExec.target_schema}.{jobExec.target_table}", overwrite=True ) jobExec.totalLines = ( (spark.table(f"{jobExec.target_schema}.{jobExec.target_table}")) .filter(f.col("reference_date") == jobExec.reference_date) .count() ) if jobExec.totalLines > 0: table_location = dataframe_utils.returnHiveTableLocation( spark, jobExec.target_schema, jobExec.target_table, True, jobExec.reference_date, ) delete_statement = f"DELETE FROM {jobExec.database_public}.{jobExec.target_table} WHERE utc_date = {jobExec.reference_date}" jobExec.redshift.executeStatement(delete_statement, "delete") jobExec.redshift.LoadS3toRedshift( table_location, jobExec.database_public, jobExec.target_table ) else: jobExec.logger.warning("Target table is empty") if __name__ == "__main__": spark = SparkSession.builder.appName(job_name).enableHiveSupport().getOrCreate() jobExec.execJob( main, spark, add_hive_path=True, delete_excessive_files=True, infer_partitions=True, )
nilq/baby-python
python
from fontbakery.checkrunner import Section from fontbakery.fonts_spec import spec_factory def check_filter(item_type, item_id, item): # Filter out external tool checks for testing purposes. if item_type == "check" and item_id in ( "com.google.fonts/check/035", # ftxvalidator "com.google.fonts/check/036", # ots-sanitize "com.google.fonts/check/037", # Font Validator "com.google.fonts/check/038", # Fontforge "com.google.fonts/check/039", # Fontforge ): return False return True def test_external_specification(): """Test the creation of external specifications.""" specification = spec_factory(default_section=Section("Dalton Maag OpenType")) specification.auto_register( globals(), spec_imports=["fontbakery.specifications.opentype"], filter_func=check_filter) # Probe some tests expected_tests = ["com.google.fonts/check/002", "com.google.fonts/check/171"] specification.test_expected_checks(expected_tests) # Probe tests we don't want assert "com.google.fonts/check/035" not in specification._check_registry.keys() assert len(specification.sections) > 1 def test_spec_imports(): """ When a names array in spec_imports contained sub module names, the import would fail. https://github.com/googlefonts/fontbakery/issues/1886 """ def _test(spec_imports, expected_tests,expected_conditions=tuple()): specification = spec_factory(default_section=Section("Testing")) specification.auto_register({}, spec_imports=spec_imports) specification.test_expected_checks(expected_tests) if expected_conditions: registered_conditions = specification.conditions.keys() for name in expected_conditions: assert name in registered_conditions, ('"{}" is expected to be ' 'registered as a condition.'.format(name)) # this is in docs/writing specifications spec_imports = [ ['fontbakery.specifications', ['cmap', 'head']] ] # Probe some tests expected_tests = [ "com.google.fonts/check/076", # in cmap "com.google.fonts/check/043" # in head ] _test(spec_imports, expected_tests) # the example from issue #1886 spec_imports = ( ( "fontbakery.specifications", ( "general", "cmap", "head", "os2", "post", "name", "hhea", "dsig", "hmtx", "gpos", "gdef", "kern", "glyf", "fvar", "shared_conditions", ), ), ) # Probe some tests expected_tests = [ "com.google.fonts/check/076", # in cmap "com.google.fonts/check/043" # in head ] _test(spec_imports, expected_tests) # make sure the suggested workaround still works: # https://github.com/googlefonts/fontbakery/issues/1886#issuecomment-392535435 spec_imports = ( "fontbakery.specifications.general", "fontbakery.specifications.cmap", "fontbakery.specifications.head", "fontbakery.specifications.os2", "fontbakery.specifications.post", "fontbakery.specifications.name", "fontbakery.specifications.hhea", "fontbakery.specifications.dsig", "fontbakery.specifications.hmtx", "fontbakery.specifications.gpos", "fontbakery.specifications.gdef", "fontbakery.specifications.kern", "fontbakery.specifications.glyf", "fontbakery.specifications.fvar", "fontbakery.specifications.shared_conditions" ) # Probe some tests expected_tests = [ "com.google.fonts/check/076", # in cmap "com.google.fonts/check/043" # in head ] _test(spec_imports, expected_tests) # cherry pick attributes from a module (instead of getting submodules) # also from this is in docs/writing specifications # Import just certain attributes from modules. # Also, using absolute import module names: spec_imports = [ # like we do in fontbakery.specifications.fvar ('fontbakery.specifications.shared_conditions', ('is_variable_font', 'regular_wght_coord', 'regular_wdth_coord', 'regular_slnt_coord', 'regular_ital_coord', 'regular_opsz_coord', 'bold_wght_coord')), # just as an example: import a check and a dependency/condition of # that check from the googlefonts specific spec: ('fontbakery.specifications.googlefonts', ( # "License URL matches License text on name table?" 'com_google_fonts_check_030', # This condition is a dependency of the check above: 'familyname', )) ] # Probe some tests expected_tests = [ "com.google.fonts/check/030" # in googlefonts ] expected_conditions = ('is_variable_font', 'regular_wght_coord', 'regular_wdth_coord', 'regular_slnt_coord', 'regular_ital_coord', 'regular_opsz_coord', 'bold_wght_coord', 'familyname') _test(spec_imports, expected_tests, expected_conditions) def test_opentype_checks_load(): spec_imports = ("fontbakery.specifications.opentype", ) specification = spec_factory(default_section=Section("OpenType Testing")) specification.auto_register({}, spec_imports=spec_imports) specification.test_dependencies() def test_googlefonts_checks_load(): spec_imports = ("fontbakery.specifications.googlefonts", ) specification = spec_factory(default_section=Section("Google Fonts Testing")) specification.auto_register({}, spec_imports=spec_imports) specification.test_dependencies() def test_in_and_exclude_checks(): spec_imports = ("fontbakery.specifications.opentype", ) specification = spec_factory(default_section=Section("OpenType Testing")) specification.auto_register({}, spec_imports=spec_imports) specification.test_dependencies() explicit_checks = ["06", "07"] # "06" or "07" in check ID exclude_checks = ["065", "079"] # "065" or "079" in check ID iterargs = {"font": 1} check_names = { c[1].id for c in specification.execution_order( iterargs, explicit_checks=explicit_checks, exclude_checks=exclude_checks) } check_names_expected = set() for section in specification.sections: for check in section.checks: if any(i in check.id for i in explicit_checks) and not any( x in check.id for x in exclude_checks): check_names_expected.add(check.id) assert check_names == check_names_expected def test_in_and_exclude_checks_default(): spec_imports = ("fontbakery.specifications.opentype",) specification = spec_factory(default_section=Section("OpenType Testing")) specification.auto_register({}, spec_imports=spec_imports) specification.test_dependencies() explicit_checks = None # "All checks aboard" exclude_checks = None # "No checks left behind" iterargs = {"font": 1} check_names = { c[1].id for c in specification.execution_order( iterargs, explicit_checks=explicit_checks, exclude_checks=exclude_checks) } check_names_expected = set() for section in specification.sections: for check in section.checks: check_names_expected.add(check.id) assert check_names == check_names_expected
nilq/baby-python
python
# Generated by Django 3.2.5 on 2021-07-18 12:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('src', '0006_auto_20210718_1014'), ] operations = [ migrations.AddField( model_name='job', name='delivery_address', field=models.CharField(blank=True, max_length=255), ), migrations.AddField( model_name='job', name='delivery_latitude', field=models.FloatField(default=0), ), migrations.AddField( model_name='job', name='delivery_longitude', field=models.FloatField(default=0), ), migrations.AddField( model_name='job', name='delivery_name', field=models.CharField(blank=True, max_length=255), ), migrations.AddField( model_name='job', name='delivery_phone', field=models.CharField(blank=True, max_length=50), ), ]
nilq/baby-python
python
"""fix Contact's name constraint Revision ID: 41414dd03c5e Revises: 508756c1b8b3 Create Date: 2021-11-26 20:42:31.599524 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '41414dd03c5e' down_revision = '508756c1b8b3' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_constraint('contacts_name_key', 'contacts', type_='unique') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_unique_constraint('contacts_name_key', 'contacts', ['name']) # ### end Alembic commands ###
nilq/baby-python
python
#/usr/bin/env python import sys import logging logger = logging.getLogger('utility_to_osm.ssr2.git_diff') import utility_to_osm.file_util as file_util from osmapis_stedsnr import OSMstedsnr if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) # diff is called by git with 7 parameters: # path old-file old-hex old-mode new-file new-hex new-mode new_file, old_file = sys.argv[1], sys.argv[2] logger.info('Reading %s', old_file) content = file_util.read_file(old_file) old_osm = OSMstedsnr.from_xml(content) logger.info('Reading %s', new_file) content = file_util.read_file(new_file) new_osm = OSMstedsnr.from_xml(content) print('\n=== Missing stedsnr ===\n') old_stedsnr = sorted(old_osm.stedsnr.keys()) new_stedsnr = sorted(new_osm.stedsnr.keys()) for key in old_stedsnr: if key not in new_stedsnr: print('Diff, %s missing in old' % key) print(old_osm.stedsnr[key][0]) for key in new_stedsnr: if key not in old_stedsnr: print('Diff, %s missing in new' % key) print(new_osm.stedsnr[key][0]) print('\n=== Tagging differences ===\n') stedsnr = set(old_stedsnr).intersection(new_stedsnr) for key in stedsnr: old = old_osm.stedsnr[key][0] new = new_osm.stedsnr[key][0] limit_distance = 1e-5 # FIXME: resonable? old_lat, old_lon = float(old.attribs['lat']), float(old.attribs['lon']) new_lat, new_lon = float(new.attribs['lat']), float(new.attribs['lon']) if abs(old_lat - new_lat) > limit_distance or abs(old_lon - new_lon) > limit_distance: print('Diff in position %s old [%s, %s] != new [%s, %s]' % (key, old_lat, old_lon, new_lat, new_lon)) for tag_key in old.tags: if tag_key not in new.tags: print('Diff %s, %s missing in new:' % (key, tag_key)) print(' old[%s] = %s\n' % (tag_key, old.tags[tag_key])) for tag_key in new.tags: if tag_key not in old.tags: print('Diff %s, %s missing in old:' % (key, tag_key)) print(' new[%s] = %s\n' % (tag_key, new.tags[tag_key])) common_tags = set(old.tags.keys()).intersection(new.tags.keys()) for tag_key in common_tags: if tag_key in ('ssr:date', ): continue # don't care o, n = new.tags[tag_key], old.tags[tag_key] if o != n: print('Diff %s:\n old[%s] = %s\n new[%s] = %s\n' % (key, tag_key, o, tag_key, n))
nilq/baby-python
python
# THIS FILE IS GENERATED FROM SIGPROFILEMATRIXGENERATOR SETUP.PY short_version = '1.1.0' version = '1.1.0'
nilq/baby-python
python
from gym_minigrid.minigrid import * from gym_minigrid.register import register class WarehouseSortEnv(MiniGridEnv): """ Environment with a door and key, sparse reward """ def __init__(self, size=8): super().__init__( grid_size=size, max_steps=10*size*size ) def _gen_grid(self, width, height): # Create an empty grid self.grid = Grid(width, height) # Generate the surrounding walls self.grid.wall_rect(0, 0, width, height) # Place a goal in the bottom-right corner self.put_obj(Goal(), width - 2, height - 2) # Create a vertical splitting wall # splitIdx = self._rand_int(2, width-2) # self.grid.vert_wall(splitIdx, 0) # Place the agent at a random position and orientation # on the left side of the splitting wall self.place_agent(size=(width, height)) # Place a door in the wall # doorIdx = self._rand_int(1, width-2) # self.put_obj(Door('yellow', is_locked=True), splitIdx, doorIdx) # Place a yellow key on the left side self.package = Package('yellow') self.place_obj(self.package) # self.put_obj( # obj=self.package, # i=1, # j=1, # ) self.mission = "use the key to open the door and then get to the goal" def step(self, action): self.step_count += 1 reward = 0 done = False # Get the position in front of the agent fwd_pos = self.front_pos # print(fwd_pos) # Get the contents of the cell in front of the agent fwd_cell = self.grid.get(*fwd_pos) # Rotate left if action == self.actions.left: self.agent_dir -= 1 if self.agent_dir < 0: self.agent_dir += 4 reward = -0.06 # Rotate right elif action == self.actions.right: self.agent_dir = (self.agent_dir + 1) % 4 reward = -0.06 # Move forward elif action == self.actions.forward: if fwd_cell == None or fwd_cell.can_overlap(): self.agent_pos = fwd_pos if fwd_cell != None and fwd_cell.type == 'goal' and self.carrying: done = True reward = self._reward() # if fwd_cell != None and fwd_cell.type == 'goal': # done = True # reward = self._reward() if fwd_cell != None and fwd_cell.type == 'lava': done = True # Pick up an object elif action == self.actions.pickup: if fwd_cell and fwd_cell.can_pickup(): if self.carrying is None: self.carrying = fwd_cell self.carrying.cur_pos = np.array([-1, -1]) self.grid.set(*fwd_pos, None) # Drop an object elif action == self.actions.drop: if not fwd_cell and self.carrying: self.grid.set(*fwd_pos, self.carrying) self.carrying.cur_pos = fwd_pos self.carrying = None # Toggle/activate an object elif action == self.actions.toggle: if fwd_cell: fwd_cell.toggle(self, fwd_pos) # Done action (not used by default) elif action == self.actions.done: pass else: assert False, "unknown action" if self.step_count >= self.max_steps: done = True # Pickup the Package if np.all(np.array(self.package.cur_pos) == self.agent_pos): package_cell = self.grid.get(*self.agent_pos) if self.carrying is None: self.carrying = package_cell self.carrying.cur_pos = np.array([-1, -1]) self.grid.set(*self.agent_pos, None) obs = self.gen_obs() return obs, reward, done, {} class WarehouseSortEnv7x7(WarehouseSortEnv): def __init__(self): super().__init__(size=10) register( id='MiniGrid-WarehouseSort-7x7-v0', entry_point='gym_minigrid.envs:WarehouseSortEnv7x7' )
nilq/baby-python
python
"""Plot road network """ import os import cartopy.crs as ccrs import geopandas import matplotlib.patches as mpatches import matplotlib.pyplot as plt from atra.utils import load_config, get_axes, plot_basemap, scale_bar, plot_basemap_labels, save_fig def main(config): """Read shapes, plot map """ data_path = config['paths']['data'] # data output_file = os.path.join(config['paths']['figures'], 'network-road-map.png') road_edge_file_national = os.path.join(data_path, 'network', 'road_edges_national.shp') road_edge_file_provincial = os.path.join(data_path, 'network', 'road_edges_provincial.shp') # basemap proj_lat_lon = ccrs.PlateCarree() ax = get_axes() plot_basemap(ax, data_path) scale_bar(ax, location=(0.8, 0.05)) plot_basemap_labels(ax, data_path, include_regions=False) colors = { 'National': '#ba0f03', 'Provincial': '#e0881f' } # edges edges_provincial = geopandas.read_file(road_edge_file_provincial) ax.add_geometries( list(edges_provincial.geometry), crs=proj_lat_lon, linewidth=1.25, edgecolor=colors['Provincial'], facecolor='none', zorder=4 ) edges_national = geopandas.read_file(road_edge_file_national) ax.add_geometries( list(edges_national.geometry), crs=proj_lat_lon, linewidth=1.25, edgecolor=colors['National'], facecolor='none', zorder=5 ) # legend legend_handles = [ mpatches.Patch(color=color, label=label) for label, color in colors.items() ] plt.legend(handles=legend_handles, loc='lower left') # save save_fig(output_file) if __name__ == '__main__': CONFIG = load_config() main(CONFIG)
nilq/baby-python
python
import torch import torch.nn as nn from utils.util import count_parameters class Embedding(nn.Module): """A conditional RNN decoder with attention.""" def __init__(self, input_size, emb_size, dropout=0.0, norm=False): super(Embedding, self).__init__() self.embedding = nn.Embedding(input_size, emb_size) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(emb_size) self.norm = norm def forward(self, x): x = self.dropout(self.embedding(x)) if self.norm: x = self.layer_norm(x) return x
nilq/baby-python
python
class FileReader(object): def read(self, file): with open(file) as f: return f.read() def read_lines(self, file): lines = [] with open(file) as f: for line in f: lines.append(line) return lines
nilq/baby-python
python
from locust import HttpUser, task from locust import User import tensorflow as tf from locust.contrib.fasthttp import FastHttpUser def read_image(file_name, resize=True): img = tf.io.read_file(filename=file_name) img = tf.io.decode_image(img) if resize: img = tf.image.resize(img, [224, 224]) return img # class QuickstartUser(HttpUser): # # wait_time = between(1, 2.5) # @task # def tf_serving_test(self): # data_path = '/home/Tiexin-RS/dataset/tr3_cropped/data/1.png' # ori_data = read_image(data_path, False) # ori_data = tf.cast(tf.reshape(ori_data, (-1, 1024, 1024, 3)), tf.float32) # # ori_data = tf.random.uniform((1, 1024, 1024, 3), # # minval=0, # # maxval=255, # # dtype=tf.float32) # data = ori_data.numpy() # payload = {"inputs": {'input_1': data.tolist()}} # self.client.post("v1/models/deeplab_52_unfreeze:predict", json=payload) class QuickstartUser(FastHttpUser): # wait_time = between(1, 2.5) def on_start(self): data_path = '/home/Tiexin-RS/dataset/tr3_cropped/data/1.png' ori_data = read_image(data_path, False) ori_data = tf.cast(tf.reshape(ori_data, (-1, 1024, 1024, 3)), tf.float32) data = ori_data.numpy() self.payload = {"inputs": {'input_1': data.tolist()}} @task def tf_serving_test(self): self.client.request(method='POST', path="v1/models/deeplab_52_unfreeze:predict", json=self.payload)
nilq/baby-python
python
# coding=utf-8 import unittest import urllib2 import zipfile import random from tempfile import NamedTemporaryFile from StringIO import StringIO from . import EPUB try: import lxml.etree as ET except ImportError: import xml.etree.ElementTree as ET class EpubTests(unittest.TestCase): def setUp(self): # get a small epub test file as a file-like object self.epub2file = NamedTemporaryFile(delete=False) test_file_content = urllib2.urlopen('http://www.hxa.name/articles/content/EpubGuide-hxa7241.epub') self.epub2file.write(test_file_content.read()) self.epub2file.seek(0) # get an epub with no guide element self.epub2file2 = NamedTemporaryFile(delete=False) test_file_content2 = urllib2.urlopen('http://www.gutenberg.org/ebooks/2701.epub.noimages') self.epub2file2.write(test_file_content2.read()) self.epub2file2.seek(0) def test_instantiation(self): epub=EPUB(self.epub2file) members = len(epub.namelist()) self.assertNotEqual(epub.filename, None) self.assertEqual(len(epub.opf),4) self.assertEqual(len(epub.opf[0]),11) #metadata items self.assertEqual(len(epub.opf[1]),11) #manifest items self.assertEqual(len(epub.opf[2]),8) #spine items self.assertEqual(len(epub.opf[3]),3) #guide items # test writing new_epub=StringIO() #epub.writetodisk("test_instantiation") epub.writetodisk(new_epub) epub=EPUB(new_epub) self.assertEqual(len(epub.opf),4) self.assertEqual(members,len(epub.namelist())) self.assertTrue(zipfile.is_zipfile(new_epub)) def test_addpart(self): epub=EPUB(self.epub2file,mode='a') members = len(epub.namelist()) self.assertNotEqual(epub.filename, None) part = StringIO('<?xml version="1.0" encoding="utf-8" standalone="yes"?>') epub.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2) self.assertEqual(len(epub.opf[2]),9) #spine items # test writing new_epub=StringIO() epub.writetodisk(new_epub) epub=EPUB(new_epub) self.assertEqual(len(epub.opf[2]),9) self.assertEqual(members+1,len(epub.namelist())) #test delete epub._delete("testpart.xhtml") new_epub=StringIO() epub.writetodisk(new_epub) new_zip = zipfile.ZipFile(new_epub) self.assertEqual(members,len(new_zip.namelist())) self.assertTrue(zipfile.is_zipfile(new_epub)) def test_addpart_noguide(self): epub2=EPUB(self.epub2file2,mode='a') self.assertEqual(len(epub2.opf),3) self.assertEqual(epub2.info['guide'],None) num_spine_items = len(epub2.opf[2]) uxml = u'<?xml version="1.0" encoding="utf-8" standalone="yes"?><test>VojtěchVojtíšek</test>' part = StringIO(unicode(uxml)) epub2.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2) self.assertEqual(len(epub2.opf[2]), num_spine_items +1) #spine items new_epub=StringIO() epub2.writetodisk(new_epub) epub2=EPUB(new_epub) def test_addmetadata(self): epub=EPUB(self.epub2file,mode='a') members = len(epub.namelist()) epub.addmetadata('test', 'GOOD') self.assertIn('<dc:test>GOOD<',ET.tostring(epub.opf, encoding="UTF-8")) self.assertTrue(epub.opf.find('.//{http://purl.org/dc/elements/1.1/}test') is not None) self.assertEqual(epub.info['metadata']['test'], 'GOOD') # test writing new_epub=StringIO() epub.writetodisk(new_epub) epub=EPUB(new_epub) self.assertEqual(epub.info['metadata']['test'], 'GOOD') new_zip = zipfile.ZipFile(new_epub) self.assertEqual(members,len(new_zip.namelist())) self.assertTrue(zipfile.is_zipfile(new_epub)) def test_new_epub(self): f = '%012x.epub' % random.randrange(16**12) # random name epub = EPUB(f, mode='w') epub.addmetadata('test', 'GOOD') uxml = u'<?xml version="1.0" encoding="utf-8" standalone="yes"?><test>VojtěchVojtíšek</test>' part = StringIO(unicode(uxml)) epub.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2) epub.close() epub = EPUB(f, mode='r') self.assertEqual(len(epub.opf), 4) # opf lenght self.assertEqual(len(epub.opf[0]), 6) # metadata self.assertEqual(len(epub.opf[1]), 2) # manifest self.assertEqual(len(epub.opf[2]), 1) # spine self.assertEqual(len(epub.opf[3]), 0) # guide
nilq/baby-python
python
"""Collection of Object.""" import sqlite3 class Connection(sqlite3.Connection): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.execute('pragma foreign_keys=1') class CustomCommand: """Object for custom command.""" __slots__ = ( "id", "type", "name", "invokedName", "brief", "description", "help", "category", "content", "aliases", "url", "uses", ) def __init__(self, id, name, category, **kwargs): self.id = id # NOTE: Can be 'text' or 'imported' # - text: using text and not imported from pastebin/gist # - imported: imported from pastebin/gist self.type = kwargs.pop("type", "text") # Will always return None unless type == 'imported' self.url = kwargs.pop("url", None) self.name = name # Incase its invoked using its alias self.invokedName = kwargs.pop("invokedName", name) # TODO: Add "brief" self.brief = None self.description = kwargs.pop("description", None) self.help = self.description self.content = kwargs.pop("content", "NULL") self.category = category self.aliases = kwargs.pop("aliases", []) self.uses = kwargs.pop("uses", -1) def __str__(self): return self.name
nilq/baby-python
python
import networkx as nx import numpy as np import sys from scipy.io import mmread from scipy.sparse import coo_matrix np.set_printoptions(threshold=sys.maxsize) if len(sys.argv) != 2: print("Usage: python3 ./hits.py <file.mtx>") exit() graph_coo = mmread(sys.argv[1]) print("Loading COO matrix") print(graph_coo.nnz, " edges") graph_nx = nx.DiGraph(graph_coo) print("Creating NetworkX Graph") print("NetworkX is Directed: ", nx.is_directed(graph_nx)) print("NetworkX Graph has ", graph_nx.number_of_edges(), " edges") max_iter = 10000 tol = 1e-6 hubs_nx, auths_nx = nx.hits(graph_nx, max_iter, tol, normalized=True) # Numpy implementation hrank = np.zeros((graph_coo.shape[0], 1)) arank = np.zeros((graph_coo.shape[0], 1)) hrank += 1/graph_coo.shape[0] arank += 1/graph_coo.shape[0] for _ in range(0, max_iter): hlast = hrank alast = arank hrank = np.zeros((graph_coo.shape[0], 1)) arank = np.zeros((graph_coo.shape[0], 1)) for edge in range(0, graph_coo.nnz): src = int(graph_coo.row[edge]) dest = int(graph_coo.col[edge]) arank[dest] += hlast[src] hrank[src] += alast[dest] # Normalize hrank = hrank / np.max(hrank) arank = arank / np.max(arank) err = np.sum(np.absolute(hrank-hlast)) if err < tol: break hrank = hrank / np.linalg.norm(hrank, ord=1) arank = arank / np.linalg.norm(arank, ord=1) hubs_np = {} auths_np = {} for i in range(0, graph_coo.shape[0]): hubs_np[i] = hrank[i] auths_np[i] = arank[i] print("Hubs: ") for key, val in sorted(hubs_nx.items(), key=lambda x: x[1], reverse=True): print(key, val, hubs_nx[key]) print("Authorities: ") for key, val in sorted(auths_nx.items(), key=lambda x: x[1], reverse=True): print(key, val, auths_nx[key])
nilq/baby-python
python
import pickle from typing import Any, Union from datetime import datetime class DataStorage: _DataStorageObj = None def __new__(cls, *args, **kwargs): if cls._DataStorageObj is None: cls._DataStorageObj = super().__new__(cls) return cls._DataStorageObj def __init__(self): self.path = r"word_dump" self._catalogue = [] self._data: dict[str, Any] = {} self._setup() def _setup(self): self._data = self._data_storage_handler() if self._data['creation'] != datetime.today().strftime('%Y-%m-%d'): self._data.clear() self._data['creation'] = datetime.today().strftime('%Y-%m-%d') self._data_storage_handler(self._data) def _data_storage_handler(self, obj_to_store=None) -> Union[dict[str, Any], None]: if obj_to_store is not None: with open(self.path, "wb") as handle: pickle.dump(obj_to_store, handle, protocol=pickle.HIGHEST_PROTOCOL) else: try: with open(self.path, 'rb') as handle: return pickle.load(handle) except FileNotFoundError: create_file = {"creation": datetime.today().strftime('%Y-%m-%d')} self._data_storage_handler(create_file) return create_file def store_object(self, code: int, words): self._data[code] = words self._data_storage_handler(self._data) def load_object(self, code: int): yield from self._data.setdefault(code, None) def remove(self, *names): for name in names: del self._data[name] def __getitem__(self, key): return self._data[key] def saved(self, item): return item in self._data def __setitem__(self, key, value): self._data[key] = value def __contains__(self, item): return item in self._data
nilq/baby-python
python
class discord: Colour = None class datetime: datetime = None
nilq/baby-python
python
import math import os def activator(data, train_x, sigma): #data = [p, q] #train_x = [3, 5] distance = 0 for i in range(len(data)): #0 -> 1 distance += math.pow(data[i] - train_x[i], 2) # 計算 D() 函式 return math.exp(- distance / (math.pow(sigma, 2))) # 最後返回 W() 函式 def grnn(data, train_x, train_y, sigma): #data = [p, q] #train_x = [[3, 5], [3, 11], ...] result = [] out_dim = len(train_y[0]) # 檢查 train_y 的維度 for dim in range(out_dim): factor, divide = 0, 0 for i in range(len(train_x)): #0 -> 13 cache = activator(data, train_x[i], sigma) # cache 儲存 W() 函式 factor += train_y[i][dim] * cache # train_y * W() 函式累加,公式中的分子 divide += cache # W() 函式累加,公式中的分母 result.append(factor / divide) # 最終的預測值[list] # print("grnn finish !") return result # 返回預測值[list],result = [y*] # def activator_n(data, train_x, sigma): #data = [p, q] #train_x = [3, 5] # distance = 0 # mu = 15.0 # for i in range(len(data)): #0 -> 1 # distance += math.pow(data[i] - train_x[i], 2) # 計算 D() 函式 # # e = math.exp(-distance/(2 * (sigma ** 2))) # e = math.exp(-((math.sqrt(distance) - mu) ** 2)/(2 * (sigma ** 2))) # return e * (1 / (sigma * math.sqrt(2 * math.pi))) # 最後返回 W() 函式 # def grnn(data, train_x, train_y, sigma): #data = [p, q] #train_x = [[3, 5], [3, 11], ...] # result = [] # out_dim = len(train_y[0]) # 檢查 train_y 的維度 # for dim in range(out_dim): # factor, divide = 0, 0 # for i in range(len(train_x)): #0 -> 13 # # cache = activator(data, train_x[i], sigma) # cache 儲存 W() 函式 # cache_n = activator_n(data, train_x[i], sigma) # # print("W(x) = ", cache, " f(x) = ", cache_n) # # os.system("pause") # factor += train_y[i][dim] * cache_n # train_y * W() 函式累加,公式中的分子 # divide += cache_n # W() 函式累加,公式中的分母 # result.append(factor / divide) # 最終的預測值[list] # # print("grnn finish !") # return result # 返回預測值[list],result = [y*]
nilq/baby-python
python
# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring,line-too-long from unittest import mock import os import pytest from eze.plugins.tools.checkmarx_kics import KicsTool from eze.utils.io import create_tempfile_path from tests.plugins.tools.tool_helper import ToolMetaTestBase class TestKicsTool(ToolMetaTestBase): ToolMetaClass = KicsTool SNAPSHOT_PREFIX = "kics" def test_creation__no_config(self): # Given input_config = {} expected_config = { "SOURCE": ".", "CONFIG_FILE": None, "REPORT_FILE": create_tempfile_path("tmp-kics-report.json"), "REPORT_PATH": os.path.dirname(create_tempfile_path("tmp-kics-report.json")), "REPORT_FILENAME": "tmp-kics-report.json", "INCLUDE_FULL_REASON": True, # "ADDITIONAL_ARGUMENTS": "", "IGNORED_FILES": None, "EXCLUDE": [], "IGNORED_VULNERABILITIES": None, "IGNORE_BELOW_SEVERITY": None, "DEFAULT_SEVERITY": None, } # When testee = KicsTool(input_config) # Then assert testee.config == expected_config def test_creation__with_config(self): # Given input_config = { "SOURCE": "eze", "ADDITIONAL_ARGUMENTS": "--something foo", "REPORT_FILE": "C:/Users/User1/temp-kics-file.json", "CONFIG_FILE": None, "INCLUDE_FULL_REASON": True, } expected_config = { "SOURCE": "eze", "REPORT_PATH": "C:/Users/User1", "REPORT_FILENAME": "temp-kics-file.json", "REPORT_FILE": "C:/Users/User1/temp-kics-file.json", "CONFIG_FILE": None, "INCLUDE_FULL_REASON": True, # "ADDITIONAL_ARGUMENTS": "--something foo", "IGNORED_FILES": None, "EXCLUDE": [], "IGNORED_VULNERABILITIES": None, "IGNORE_BELOW_SEVERITY": None, "DEFAULT_SEVERITY": None, } # When testee = KicsTool(input_config) # Then assert testee.config == expected_config @mock.patch("eze.plugins.tools.checkmarx_kics.extract_cmd_version", mock.MagicMock(return_value="""1.4.4""")) def test_check_installed__success(self): # When expected_output = "1.4.4" output = KicsTool.check_installed() # Then assert output == expected_output def test_parse_report__snapshot(self, snapshot): # Test container fixture and snapshot self.assert_parse_report_snapshot_test(snapshot) @mock.patch("eze.utils.cli.async_subprocess_run") @mock.patch("eze.utils.cli.is_windows_os", mock.MagicMock(return_value=True)) @pytest.mark.asyncio async def test_run_scan__cli_command__multi_value_flag_exclude_and_no_folder_path_given( self, mock_async_subprocess_run ): # Given input_config = { "SOURCE": "eze", "REPORT_FILE": "tmp-kics-report.json", "EXCLUDE": [ "PATH-TO-EXCLUDED-FOLDER/.*", "PATH-TO-NESTED-FOLDER/SOME_NESTING/.*", "PATH-TO-EXCLUDED-FILE.js", ], } expected_cmd = 'kics scan -s -p eze --output-path . --output-name tmp-kics-report.json -e= "PATH-TO-EXCLUDED-FOLDER/.*" "PATH-TO-NESTED-FOLDER/SOME_NESTING/.*" PATH-TO-EXCLUDED-FILE.js' # Test run calls correct program await self.assert_run_scan_command(input_config, expected_cmd, mock_async_subprocess_run) @mock.patch("eze.utils.cli.async_subprocess_run") @pytest.mark.asyncio async def test_run_scan__cli_command_with_multi_sources_and_report(self, mock_async_subprocess_run): # Given input_config = { "SOURCE": "Dockerfile,azure-pipelines.yml", "REPORT_FILE": "C:/Users/User1/tmp-kics-report.json", } expected_cmd = "kics scan -s -p Dockerfile,azure-pipelines.yml --output-path C:/Users/User1 --output-name tmp-kics-report.json" # Test run calls correct program await self.assert_run_scan_command(input_config, expected_cmd, mock_async_subprocess_run)
nilq/baby-python
python
"""Simple templating engine. See `TemplateEngine` class.""" import os import re import inspect __all__ = ['TemplateEngine', 'TemplateSyntaxError', 'annotate_block'] class TemplateEngine: """Simple templating engine. WARNING: do NOT use this engine with templates from untrusted sources. Expressions in the template file are passed to `eval()`, and can therefore call any Python function. This engine supports: - inline replacement through `$<expr>$`; - block replacement with template-controlled indentation through `\n$<indent><name>\n`; - blocks can be defined both programmatically and from within the template through `\n$block <name>\n` and `\n$endblock\n`; - conditional blocks through `\n$if <expr>\n`, `\n$else\n`, and `\n$endif\n`. Note that `<indent>` is one space less than the actual indent to make the block name line up with where the block should be. If no indent is specified, no indent is added, so it's not currently possible to output blocks indented by a single space. A dollar sign can be inserted into the output by writing `$$`. To indent the template itself for readability with lots of nesting, any number of spaces followed by a | sign will be removed before any other processing (when a line in the output needs to start with a |, just add a second | in front). Additionally, some pretty-printing is supported through @ characters: - Inline @ signs are replaced with spaces or newlines based on line length in a way that preserves indentation. An additional four-space indent is added when auto-wrapping. - Double inline @ signs escape this; they are changed into a single @ sign. - A single @ sign at the start of a line, usually followed by a space, indicates that the line is a comment. The appropriate comment character sequence will be prefixed when the comment is inserted. The content is interpreted as markdown text; heuristics are used to try to rewrap the text to the appropriate line width. @ signs on these lines are NOT interpreted as spacing (they are literal), since this would have no effect anyway. - A double @ sign at the start of a line is replaced by the appropriate comment sequence, but otherwise the line is treated the same way as normal code. That is, wrapping points have to be specified explicitly using @ symbols, and @@ is escaped to @. - Three @ signs at the start of a line are replaced with a single @ sign in the output. The line is treated as regular code. The formatting step can be disabled, allowing the output of the template engine to be used as a block within a subsequent engine. Unlike the C preprocessor, line numbers are NOT preserved. The focus is on generating well-formatted, readable code. To use it, first define inline replacements, conditions, or add to blocks. Then `apply_*` the engine on files or strings.""" def __init__(self): super().__init__() self._variables = {} self._blocks = {} def __setitem__(self, key, value): """Defines a variable within the expression engine.""" self._variables[str(key)] = value def __getitem__(self, key): """Returns the current value of a variable within the expression engine.""" return self._variables[str(key)] def __delitem__(self, key): """Undefines a variable within the expression engine.""" del self._variables[str(key)] def __iter__(self): """Iterates over the variables defined within the expression engine.""" return iter(self._variables) def passthrough(self, *names): """Pass expansion of the given variable names on to the next template by assigning them to `$<name>$`.""" for name in names: self[name] = '$%s$' % name def _get_scope(self): """Returns the dictionary of variables that should be available for eval()-based directives.""" variables = self._variables.copy() variables['defined'] = lambda x: bool(self._blocks.get(x, [])) variables['re'] = re return variables def append_block(self, key, code, *args): """Add a block of code to the given key. `code` must be a string or a list of strings, the latter case being equivalent with passing `'\n'.join(code)`. Regardless of the number of terminating newlines, the spacing between consecutive blocks is always a single empty line.""" # Preprocess the arguments to allow for different calling conventions. if isinstance(code, list): code = '\n'.join(code) if args: code += '\n' + '\n'.join(args) # Blocks can contain directives and are internally stored as directive # lists. So split the code into directives now. directives = self._split_directives(code) # Save the block. key = str(key) if key not in self._blocks: self._blocks[key] = [] self._blocks[key].append(directives) def reset_block(self, key): """Removes all code blocks associated with the given key.""" key = str(key) if key in self._blocks: del self._blocks[key] def apply_file_to_file(self, template_filename, output_filename, *args, **kwargs): """Applies this template engine to the given template file, writing the result to the given output file. Extra arguments are passed to `apply_str_to_str()` and are documented there.""" output = self.apply_file_to_str(template_filename, *args, **kwargs) with open(output_filename, 'w') as output_file: output_file.write(output) def apply_str_to_file(self, template, output_filename, *args, **kwargs): """Applies this template engine to the given template string, writing the result to the given output file. Extra arguments are passed to `apply_str_to_str()` and are documented there.""" output = self.apply_str_to_str(template, *args, **kwargs) with open(output_filename, 'w') as output_file: output_file.write(output) def apply_file_to_str(self, template_filename, *args, **kwargs): """Applies this template engine to the given template file, returning the result as a string. Extra arguments are passed to `apply_str_to_str()` and are documented there.""" with open(template_filename, 'r') as template_file: template = annotate_block( template_file.read(), template_filename, kwargs.get('comment', '#').strip()) try: return self.apply_str_to_str(template, *args, **kwargs) except TemplateSyntaxError as exc: exc.set_filename(template_filename) raise def apply_str_to_str(self, template, comment='# ', wrap=80, postprocess=True, annotate=False): """Applies this template engine to the given template string, returning the result as a string. The `comment` keyword argument specifies the character sequence that leads comment lines; it defaults to '# ' for Python files. The `wrap` keyword argument specifies the desired number of characters per line when wrapping; it defaults to 80. The `postprocess` keyword argument can be set to `False` to disable post-processing altogether; use this when the output of this templating step will be used within a later templating step.""" # If the template is specified as a list of strings, join them first. if isinstance(template, list): template = '\n'.join(template) # Remove any template indentation, which is separated from output # indentation through pipe symbols. template = re.sub(r'\n *\|', '\n', template) # Split the template file into a list of alternating literals and # directives. directives = self._split_directives(template) # Handle $ directives. markers = self._process_directives(directives) output = self._process_markers(markers) # Process @ directives to clean up the output. if postprocess: output = self._process_wrapping(output, comment, wrap, annotate) return output @staticmethod def _split_directives(template): """Splits a template string into directives. The resulting list contains an odd amount of items, where every even-indexed item is a literal string and every odd-indexed item is a two-tuple of a line number and a directive. Inline directives include the surrounding dollar signs. Non-inline directives include the dollar prefix and newline suffix, while the newline before the directive is considered part of the preceding literal.""" # Split the directive using regular expressions. A newline is prefixed and # suffixed to ensure that the newlines matched by block directives at the # start and end of the input are always there. The prefixed newline is # stripped immediately; the final newline is stripped when we finish # parsing when the template engine ensures that all files end in a single # newline. directives = re.split(r'(\$[^$\n]*\$|(?<=\n)\$[^\n]+\n)', '\n' + template + '\n') directives[0] = directives[0][1:] # Insert line number information. line_number = 1 directive_line_number = 1 directive_source = None for idx, item in enumerate(directives): if directive_source is None: directive_line_number = line_number line_number += item.count('\n') if idx % 2 == 1: directive = item directives[idx] = ((directive_source, directive_line_number), directive) else: source = re.findall(r'@![v\^]->[^\n]+\n', item) if not source: continue source = source[-1] if source.startswith('@!^->'): directive_source = None elif source.startswith('@!v->source='): directive_source, directive_line_number = source[12:].rsplit(':', maxsplit=1) directive_line_number = int(directive_line_number) else: assert False return directives def _process_directives(self, directives, block_recursion_limit=100): #pylint: disable=R0912,R0914,R0915 """Process a directive list as returned by `_split_directives()` into a list of literals and markers. Literals and markers are distinguished by type: literals are strings, markers are N-tuples. The first entry of a marker tuple is a string that identifies what it represents. Currently the only marker is 'indent'. It's a two-tuple; the second entry is an integer representing an indentation delta (number of spaces). This indentation needs to be applied to subsequent literals.""" # Make a copy of the directive list so we can consume it one entry at a # time without affecting the argument. directive_stack = list(directives) # Conditional code block stack. For code to be handled, all entries in # this list must be True (or there must be zero entries). Each $if # directive appends its condition to the list, $else directives invert # the last one, and $endif directives remove from the list. condition_stack = [] # Line number of the outermost $if statement, used for line number info # when we're missing an $endif. outer_if_line_nr = None # Block definition buffer. block_buffer = None block_key = None # Number of recursive $block definitions. block_level = 0 # Block definitions. block_definitions = {} # Number of recursive block insertions. block_recursion = 0 # Line number of the outermost $block statement, used for line number # info when we're missing an $endblock. outer_block_line_nr = None # Output buffer. output_buffer = [] # Iterate over all the directives and literals. while directive_stack: directive_or_literal = directive_stack.pop(0) # Handle literals first. if isinstance(directive_or_literal, str): literal = directive_or_literal # If we're in the middle of a block definition, save the # literal to the block buffer. if block_buffer is not None: block_buffer.append(literal) continue # Delete literals that have been conditioned away. if not all(condition_stack): continue # Output the literal. output_buffer.append(literal) continue # Unpack the directive. directive_tuple = directive_or_literal line_nr, directive = directive_tuple # Handle markers inserted into the stack by this function. if line_nr is None: marker = directive if marker[0] == 'end_block': block_recursion -= 1 else: output_buffer.append(marker) continue # Parse/simplify the directive syntax. if directive.endswith('$'): indent = 0 directive = directive[1:-1] argument = None else: matches = re.match(r'\$( *)([^ ]*)(?: (.*))?$', directive) indent = len(matches.group(1)) if indent: indent += 1 directive = '$' + matches.group(2).rstrip() argument = matches.group(3) # Handle $block directive. if directive == '$block': if not argument: raise TemplateSyntaxError( line_nr, '$block without key') block_level += 1 if block_level == 1: block_buffer = [] block_key = argument outer_block_line_nr = line_nr continue # Don't continue here; save nested $block directives to the # buffer! # Handle $endblock directive. if directive == '$endblock': if argument: raise TemplateSyntaxError( line_nr, 'unexpected argument for $endblock') if block_level == 0: raise TemplateSyntaxError( line_nr, '$endblock without $block') block_level -= 1 if block_level == 0: if block_key not in block_definitions: block_definitions[block_key] = [] block_definitions[block_key].append(block_buffer) block_key = None block_buffer = None continue # Don't continue here; save nested $endblock directives to the # buffer! # If we're in the middle of a block definition, don't process # directives yet. if block_buffer is not None: block_buffer.append(directive_tuple) continue # Handle $if directive. if directive == '$if': if not argument: raise TemplateSyntaxError( line_nr, '$if without expression') if not condition_stack: outer_if_line_nr = line_nr if not all(condition_stack): # Don't try to evaluate the condition if we're already # conditioned away. condition = False else: try: condition = bool(eval(argument, self._get_scope())) #pylint: disable=W0123 except (NameError, ValueError, TypeError, SyntaxError) as exc: raise TemplateSyntaxError( line_nr, 'error in $if expression: {}'.format(exc)) condition_stack.append(condition) continue # Handle $else directive. if directive == '$else': if argument: raise TemplateSyntaxError( line_nr, 'unexpected argument for $else') if not condition_stack: raise TemplateSyntaxError( line_nr, '$else without $if') condition_stack[-1] = not condition_stack[-1] continue # Handle $endif directive. if directive == '$endif': if argument: raise TemplateSyntaxError( line_nr, 'unexpected argument for $endif') if not condition_stack: raise TemplateSyntaxError( line_nr, '$endif without $if') del condition_stack[-1] continue # Don't process directives further if we're inside a false conditional # block. if not all(condition_stack): continue # Handle dollar escape sequences. if directive == '': output_buffer.append('$') continue # Handle inline directives. if not directive.startswith('$'): try: result = str(eval(directive, self._get_scope())) #pylint: disable=W0123 except (NameError, ValueError, TypeError, SyntaxError) as exc: raise TemplateSyntaxError( line_nr, 'error in inline expression: {}'.format(exc)) output_buffer.append(result) continue # Handle block insertions. if directive.startswith('$') and not argument: block_recursion += 1 if block_recursion > block_recursion_limit: raise TemplateSyntaxError( line_nr, 'block recursion limit reached ({})'.format(block_recursion_limit)) key = directive[1:] # Get the blocks associated with the given key, if any. blocks = self._blocks.get(key, []) blocks.extend(block_definitions.get(key, [])) # Flatten the directive lists. directives = [(None, ('indent', indent))] for block_directives in blocks: directives.extend(block_directives) directives.append('\n\n') directives.append((None, ('indent', -indent))) directives.append((None, ('end_block',))) # Insert the directives at the start of our directive stack. directive_stack[0:0] = directives continue # Unknown directive. raise TemplateSyntaxError( line_nr, 'unknown directive: {}'.format(directive)) # Raise errors when we have unterminated blocks. if condition_stack: raise TemplateSyntaxError( outer_if_line_nr, '$if without $endif') if block_buffer is not None: raise TemplateSyntaxError( outer_block_line_nr, '$block without $endblock') return output_buffer @staticmethod def _process_markers(markers): """Processes a list of literals and markers as returned by `_process_directives()` into a single string representing the source code.""" # Join all consecutive literals together, then split them into lines. # That allows us to prefix indentation properly. marker_buffer = [[]] for marker_or_literal in markers: if isinstance(marker_or_literal, tuple): marker_buffer[-1] = ''.join(marker_buffer[-1]).split('\n') marker_buffer.append(marker_or_literal) marker_buffer.append([]) else: marker_buffer[-1].append(marker_or_literal) marker_buffer[-1] = ''.join(marker_buffer[-1]).split('\n') # Current number of spaces to indent by. indent = 0 # Buffer to output processed literals to. output_buffer = [] # State variables used to collapse empty lines and annotations. empty_line = False source_annotation = None for marker_or_literals in marker_buffer: # Handle markers. if isinstance(marker_or_literals, tuple): marker = marker_or_literals if marker[0] == 'indent': indent += marker[1] continue raise AssertionError('unknown marker: {}'.format(indent)) # Handle blocks of literals. We process indentation markers and # collapse multiple newlines and source markers into one to # (hopefully) improve readability. for literal in marker_or_literals: literal = literal.rstrip() if not literal: empty_line = True elif literal.startswith('@!'): source_annotation = literal else: if output_buffer and empty_line: output_buffer.append('') if source_annotation is not None: output_buffer.append(source_annotation) output_buffer.append(' ' * indent + literal) empty_line = False source_annotation = None # Make sure we output the source termination marker at the end, if any. if source_annotation and source_annotation.startswith('!@^->'): output_buffer.append(source_annotation) return '\n'.join(output_buffer) def _process_wrapping(self, text, comment, wrap, annotate): #pylint disable=R0912 """Post-processes code by handling comment and wrapping markers.""" output_lines = [] # Since multiple subsequent lines of commented text should be # interpreted as a single paragraph before they're wrapped, we need to # postpone this wrapping until we encounter a line that doesn't belong # to the current paragraph. `paragraph_buffer` maintains a list of # words within the current paragraph, while `paragraph_buffer_indent` # contains the indentation characters of the first line of the # paragraph, where indentation characters means any set of spaces, # dashes, and asterisks. For subsequent lines to belong to the same # paragraph, they must have the same indentation, except using only # spaces. Those rules make markdown-styled lists parse correctly. paragraph_buffer = None paragraph_buffer_leading = None paragraph_buffer_hanging = None # List of source annotations that have not been written yet. annotations = [] for line in text.split('\n'): # Strip trailing spaces. line = line.rstrip() # Add indentation in the input block to the output indent. match = re.match(r'( *)(.*)$', line) indent = match.group(1) line = match.group(2) # Detect the type of input line (normal code, text comment, or code # comment). line_is_text = False if line.startswith('@@@'): # Escape sequence for @ at start of line in code. Just strip # the first at to turn it into an inline escape. line = line[1:] elif line.startswith('@@'): # Code comment. indent += comment # Strip the '@@' sequence. line = line[2:] elif line.startswith('@!'): # Source annotation. if annotate: annotations.append(comment.strip() + line[2:]) continue elif line.startswith('@'): # Text comment. indent += comment line_is_text = True # Strip the '@' or '@ ' sequence. if line.startswith('@ '): line = line[2:] else: line = line[1:] # If this is a comment line, figure out its indentation to # determine whether it's a continuation of the previous comment # paragraph, if any. If it is, or it starts a new block, buffer it # until we get a line that isn't a continuation of it. if line_is_text: # Output source annotations before processing the comment. output_lines.extend(annotations) annotations = [] match = re.match(r'([-* ]*)(.*)$', line) comment_indent = match.group(1) line = match.group(2) if paragraph_buffer is not None: if line and indent + comment_indent == paragraph_buffer_hanging: # Continuation of that paragraph. paragraph_buffer.extend(line.split()) continue # Not a continuation of the buffered paragraph. Output the # current buffer so we can start a new one. output_lines.extend(self._wrap( paragraph_buffer_leading, paragraph_buffer_hanging, paragraph_buffer, wrap)) paragraph_buffer = None if line: # Start a new paragraph. paragraph_buffer = line.split() paragraph_buffer_leading = indent + comment_indent paragraph_buffer_hanging = indent + ' '*len(comment_indent) else: # Output empty lines immediately to maintain them. They'd # be lost if we'd stick them in the paragraph buffer. output_lines.append((indent + comment_indent).rstrip()) continue # The current line is not commented text, so we need to write and # invalidate the current paragraph buffer, if any, before we can # continue. if paragraph_buffer is not None: output_lines.extend(self._wrap( paragraph_buffer_leading, paragraph_buffer_hanging, paragraph_buffer, wrap)) paragraph_buffer = None # Output annotations after dumping the comment paragraph buffer, # but before outputting the statement. output_lines.extend(annotations) annotations = [] # Split the text into tokens split by single at signs. Also # handle escaping, which admittedly is a little awkward right now # with the double replacing. line = line.replace('@@', '@_') line = re.split(r'\@(?!_)', line) line = (token.replace('@_', '@') for token in line) # Wrap the text. output_lines.extend(self._wrap( indent, indent + ' ', line, wrap)) # If we were still buffering a paragraph of commented text, output it # now. if paragraph_buffer is not None: output_lines.extend(self._wrap( paragraph_buffer_leading, paragraph_buffer_hanging, paragraph_buffer, wrap)) # Join the lines together and ensure that the file ends in a single # newline. return '\n'.join(output_lines).rstrip() + '\n' @staticmethod def _wrap(leading_indent, hanging_indent, tokens, wrap): """Wraps tokenized text. `tokens` is a list of non-breakable strings representing the line or paragraph that is to be wrapped. The first line is prefixed with `leading_indent`, subsequent lines are prefixed with `hanging_indent`. `wrap` specifies the maximum desired number of characters on a single line.""" line = leading_indent first = True for token in tokens: # The first token gets some special treatment here. if first: line += token first = False continue if len(line) + len(token) + 1 > wrap: # Too long, need to wrap: yield the previous line and start a # new one. yield line.rstrip() line = hanging_indent + token else: # No overflow, add to current line. line += ' ' + token # If we saw at least one token, yield the final line. if not first: yield line.rstrip() class TemplateSyntaxError(Exception): """Template syntax error class. Contains line number and source file information.""" def __init__(self, source, message, filename=None): super().__init__(message) if isinstance(source, int): self._filename = filename self._line_nr = source else: self._filename, self._line_nr = source self._message = message def set_filename(self, filename): """Sets the filename associated with this syntax error.""" if self._filename is None: self._filename = filename def __str__(self): filename = self._filename if filename is None: filename = '<unknown>' return 'on {} line {}: {}'.format(filename, self._line_nr, self._message) def annotate_block(template, fname=None, comment='#'): """Annotates template source file + line number to every non-directive line of the given template. If `fname` is `None`, the filename and line number offset is taken from the caller of this function.""" comment = comment.strip() if fname is None: previous_frame = inspect.currentframe().f_back (fname, offset, _, _, _) = inspect.getframeinfo(previous_frame) # inspect returns the last line of a statement. We assume that blocks # are defined as a """ multiline string, so we need to subtract the # number of newlines in the block. offset -= template.count('\n') else: offset = 1 template = template.split('\n') annotated = [] for line_no, line in enumerate(template): annotated.append('@!v->source=%s:%s' % (fname, line_no + offset)) annotated.append(line) annotated.append('@!^->end') return '\n'.join(annotated) def preload_template(fname, comment='#'): """Preloads a template from a file relative to the calling Python file.""" comment = comment.strip() if not os.path.isabs(fname): previous_frame = inspect.currentframe().f_back caller_fname, _, _, _, _ = inspect.getframeinfo(previous_frame) fname = os.path.dirname(caller_fname) + os.sep + fname with open(fname, 'r') as fil: template = fil.read() return annotate_block(template, fname, comment)
nilq/baby-python
python
from rdkit import Chem from rdkit.ML.Descriptors import MoleculeDescriptors from rdkit.Chem import Descriptors from padelpy import from_smiles import re import time nms=[x[0] for x in Descriptors._descList] print('\n') calc = MoleculeDescriptors.MolecularDescriptorCalculator(nms) f=open('/scratch/woon/b3lyp_2017/datasmile2.txt') for i, line in enumerate(f): mydes=[] mylist=[] line=line.split(',') number=str(line[0]) print(number) line=line[1] m = Chem.MolFromSmiles(line) try: time.sleep(1) des= from_smiles(line,fingerprints=True,timeout=180) des=str(des).split(',') for ii in range(len(des)): b=des[ii].split(',') b=des[ii].strip('[').strip(']') b=re.sub('[^.,a-zA-Z0-9 \n\.]', '', b) b=b.replace('[',' ') b=b.replace(']',' ') b=b.strip() b=b.split(' ') mylist.append(b[0]) try: b=b[1] except: b='' if bool(b)==True: mydes.append(float(b)) if bool(b)==False: mydes.append('NA') # print(len(mylist)) a=calc.CalcDescriptors(m) a=str(a) a=a.replace('(', '') a=a.replace(')', '') line=str(line) towrite=str(number+','+line.strip(' ').strip('\n')+','+a+','+str(mydes)) with open('/scratch/woon/b3lyp_2017/book4.csv', 'a') as mydata: mydata.write(towrite+'\n') except: time.sleep(3)
nilq/baby-python
python
from torch import randn from torch.nn import Linear from backpack import extend def data_linear(device="cpu"): N, D1, D2 = 100, 64, 256 X = randn(N, D1, requires_grad=True, device=device) linear = extend(Linear(D1, D2).to(device=device)) out = linear(X) vin = randn(N, D2, device=device) vout = randn(N, D1, device=device) return { "X": X, "module": linear, "output": out, "vout_ag": vout, "vout_bp": vout.unsqueeze(2), "vin_ag": vin, "vin_bp": vin.unsqueeze(2), }
nilq/baby-python
python
# -*- coding: utf-8 -*- import logging from pathlib import Path import yaml logger = logging.getLogger(__name__) def recursive_update(original_dict: dict, new_dict: dict) -> dict: """Recursively update original_dict with new_dict""" for new_key, new_value in new_dict.items(): if isinstance(new_value, dict): original_dict[new_key] = recursive_update( original_dict.get(new_key, {}), new_value ) else: original_dict[new_key] = new_value return original_dict class Password: def __init__(self, password: str) -> None: self.password = password or "" def __repr__(self) -> str: return "*" * len(self.password) def get(self) -> str: return self.password def __bool__(self): return bool(self.password) # Configurations are loaded from the defaults of the package and eventually a local config.yaml file config_files = [ Path(__file__).parent / "resources" / "default_config.yaml", Path("config.yaml"), ] config = {} for config_file in config_files: if config_file.exists(): new_config = yaml.safe_load(config_file.read_text()) if isinstance(new_config, dict): config = recursive_update(config, new_config) config["backup-dir"] = Path(config["backup-dir"]).absolute() config["password"] = Password(config.get("password"))
nilq/baby-python
python
# Generated by Django 3.2.6 on 2021-11-29 00:15 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('kube', '0003_auto_20210917_0032'), ] operations = [ migrations.RemoveField( model_name='kubecluster', name='type', ), ]
nilq/baby-python
python
import json from flask import make_response from marshmallow import fields, Schema, post_load, EXCLUDE from flask_apispec.utils import Ref from flask_apispec.views import MethodResource from flask_apispec import doc, use_kwargs, marshal_with # All the following schemas are set with unknown = EXCLUDE # because part of a multiple schema input. # This way none of them will raise errors for unknown fields handled by others class NameSchema(Schema): name = fields.Str() class Meta: unknown = EXCLUDE class NameGenreSchema(Schema): name = fields.Str() genre = fields.Str() class Meta: unknown = EXCLUDE class GenreSchema(Schema): genre = fields.Str() class Meta: unknown = EXCLUDE class InstrumentSchema(Schema): instrument = fields.Str() class Meta: unknown = EXCLUDE class TestFunctionViews: def test_use_kwargs(self, app, client): @app.route('/') @use_kwargs({'name': fields.Str()}, location='querystring') def view(**kwargs): return kwargs res = client.get('/', {'name': 'freddie'}) assert res.json == {'name': 'freddie'} def test_use_kwargs_nolocation(self, app, client): @app.route('/') @use_kwargs({'name': fields.Str()}) def view(**kwargs): return kwargs res = client.get('/', {'name': 'freddie'}) # default location is 'json', i.e. no kwargs will be received here assert res.json == {} def test_view_returning_tuple(self, app, client): @app.route('/all') @use_kwargs({'name': fields.Str()}, location='querystring') def all(**kwargs): return kwargs, 202, {'x-msg': 'test'} @app.route('/headers') @use_kwargs({'name': fields.Str()}, location='querystring') def view_headers(**kwargs): return kwargs, {'x-msg': 'test'} @app.route('/code') @use_kwargs({'name': fields.Str()}, location='querystring') def view_code(**kwargs): return kwargs, 202 res_all = client.get('/all', {'name': 'freddie'}) assert res_all.json == {'name': 'freddie'} assert res_all.status_code == 202 assert res_all.headers.get('x-msg') == 'test' res_headers = client.get('/headers', {'name': 'freddie'}) assert res_headers.json == {'name': 'freddie'} assert res_headers.status_code == 200 assert res_headers.headers.get('x-msg') == 'test' res_code = client.get('/code', {'name': 'freddie'}) assert res_code.json == {'name': 'freddie'} assert res_code.status_code == 202 assert 'x-msg' not in res_code.headers def test_use_kwargs_schema(self, app, client): class ArgSchema(Schema): name = fields.Str() @app.route('/') @use_kwargs(ArgSchema, location='querystring') def view(**kwargs): return kwargs res = client.get('/', {'name': 'freddie'}) assert res.json == {'name': 'freddie'} def test_use_kwargs_schema_with_post_load(self, app, client): class User: def __init__(self, name): self.name = name def update(self, name): self.name = name class ArgSchema(Schema): name = fields.Str() @post_load def make_object(self, data, **kwargs): return User(**data) @app.route('/', methods=('POST', )) @use_kwargs(ArgSchema(), location='json_or_form') def view(user): assert isinstance(user, User) return {'name': user.name} data = {'name': 'freddie'} res = client.post('/', data) assert res.json == data def test_use_kwargs_schema_many(self, app, client): class ArgSchema(Schema): name = fields.Str() @app.route('/', methods=('POST',)) @use_kwargs(ArgSchema(many=True), location='json') def view(*args): return list(args) data = [{'name': 'freddie'}, {'name': 'john'}] res = client.post('/', json.dumps(data), content_type='application/json') assert res.json == data def test_use_kwargs_multiple(self, app, client): @app.route('/') @use_kwargs(NameSchema, location='querystring') @use_kwargs(InstrumentSchema, location='querystring') def view(**kwargs): return kwargs res = client.get('/', {'name': 'freddie', 'instrument': 'vocals'}) assert res.json == {'name': 'freddie', 'instrument': 'vocals'} def test_use_kwargs_callable_as_schema(self, app, client): def schema_factory(request): assert request.method == 'GET' assert request.path == '/' class ArgSchema(Schema): name = fields.Str() return ArgSchema @app.route('/') @use_kwargs(schema_factory, location='querystring') def view(**kwargs): return kwargs res = client.get('/', {'name': 'freddie'}) assert res.json == {'name': 'freddie'} def test_marshal_with_default(self, app, client, models, schemas): @app.route('/') @marshal_with(schemas.BandSchema) def view(): return models.Band('queen', 'rock') res = client.get('/') assert res.json == {'name': 'queen', 'genre': 'rock'} def test_marshal_with_codes(self, app, client, models, schemas): @app.route('/') @marshal_with(schemas.BandSchema) @marshal_with(schemas.BandSchema(only=('name', )), code=201) def view(): return models.Band('queen', 'rock'), 201 res = client.get('/') assert res.json == {'name': 'queen'} def test_integration(self, app, client, models, schemas): @app.route('/') @use_kwargs( {'name': fields.Str(), 'genre': fields.Str()}, location='querystring' ) @marshal_with(schemas.BandSchema) def view(**kwargs): return models.Band(**kwargs) res = client.get('/', {'name': 'queen', 'genre': 'rock'}) assert res.json == {'name': 'queen', 'genre': 'rock'} class TestClassViews: def test_inheritance_unidirectional(self, app, client): @doc(tags=['base']) class BaseResource(MethodResource): @doc(description='parent') def get(self, **kwargs): pass @doc(tags=['child']) class ChildResource(BaseResource): @doc(description='child') def get(self, **kwargs): return kwargs assert not any(MethodResource.__apispec__.values()) assert BaseResource.__apispec__['docs'][0].options[0]['tags'] == ['base'] assert ChildResource.__apispec__['docs'][0].options[0]['tags'] == ['child'] assert BaseResource.get.__apispec__['docs'][0].options[0]['description'] == 'parent' assert ChildResource.get.__apispec__['docs'][0].options[0]['description'] == 'child' def test_inheritance_only_http_methods(self, app): @use_kwargs({'genre': fields.Str()}) class ConcreteResource(MethodResource): def _helper(self, **kwargs): return kwargs with app.test_request_context(): resource = ConcreteResource() assert resource._helper() == {} def test_kwargs_inheritance(self, app, client): class BaseResource(MethodResource): @use_kwargs(NameSchema, location='querystring') def get(self, **kwargs): pass class ConcreteResource(BaseResource): @use_kwargs(GenreSchema, location='querystring') def get(self, **kwargs): return kwargs app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete')) res = client.get('/', {'name': 'queen', 'genre': 'rock'}) assert res.json == {'name': 'queen', 'genre': 'rock'} def test_kwargs_inheritance_ref(self, app, client, schemas): class BaseResource(MethodResource): @use_kwargs(NameSchema, location='querystring') def get(self, **kwargs): pass class ConcreteResource(BaseResource): kwargs = GenreSchema @use_kwargs(Ref('kwargs'), location='querystring') @marshal_with(schemas.BandSchema) def get(self, **kwargs): return kwargs app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete')) res = client.get('/', {'name': 'queen', 'genre': 'rock'}) assert res.json == {'name': 'queen', 'genre': 'rock'} def test_kwargs_inheritance_false(self, app, client, models, schemas): class BaseResource(MethodResource): @use_kwargs(NameGenreSchema, location='querystring') def get(self): pass class ConcreteResource(BaseResource): @use_kwargs(NameSchema, inherit=False, location='querystring') def get(self, **kwargs): return kwargs app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete')) res = client.get('/', {'name': 'queen', 'genre': 'rock'}) assert res.json == {'name': 'queen'} def test_kwargs_apply_false(self, app, client): class ConcreteResource(MethodResource): @use_kwargs(GenreSchema, apply=False) def get(self, **kwargs): return kwargs app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete')) res = client.get('/', {'name': 'queen', 'genre': 'rock'}) assert res.json == {} def test_schemas_class(self, app, client, models, schemas): @marshal_with(schemas.BandSchema) class ConcreteResource(MethodResource): @marshal_with(schemas.BandSchema(only=('genre', )), code=201) def get(self, **kwargs): return models.Band('slowdive', 'shoegaze'), 201 app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete')) res = client.get('/') assert res.json == {'genre': 'shoegaze'} def test_schemas_class_inheritance(self, app, client, models, schemas): @marshal_with(schemas.BandSchema(only=('genre', ))) class BaseResource(MethodResource): def get(self): pass class ConcreteResource(BaseResource): def get(self, **kwargs): return models.Band('slowdive', 'shoegaze'), 201 app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete')) res = client.get('/') assert res.json == {'genre': 'shoegaze'} def test_schemas_inheritance(self, app, client, models, schemas): class BaseResource(MethodResource): @marshal_with(schemas.BandSchema) def get(self): pass class ConcreteResource(BaseResource): @marshal_with(schemas.BandSchema(only=('genre', )), code=201) def get(self, **kwargs): return models.Band('slowdive', 'shoegaze'), 201 app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete')) res = client.get('/') assert res.json == {'genre': 'shoegaze'} def test_schemas_inheritance_refs(self, app, client, models, schemas): class BaseResource(MethodResource): schema = None @marshal_with(Ref('schema')) def get(self): pass class ConcreteResource(BaseResource): schema = schemas.BandSchema def get(self, **kwargs): return models.Band('slowdive', 'shoegaze') app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete')) res = client.get('/') assert res.json == {'name': 'slowdive', 'genre': 'shoegaze'} def test_schemas_inheritance_false(self, app, client, models, schemas): class BaseResource(MethodResource): @marshal_with(schemas.BandSchema, code=201) def get(self): pass class ConcreteResource(BaseResource): @marshal_with(schemas.BandSchema(only=('genre', )), inherit=False) def get(self, **kwargs): return models.Band('slowdive', 'shoegaze'), 201 app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete')) res = client.get('/') assert res.json == {'genre': 'shoegaze'} def test_schemas_apply_false(self, app, client, models, schemas): class ConcreteResource(MethodResource): @marshal_with(schemas.BandSchema, apply=False) def get(self, **kwargs): return {'genre': 'spacerock'} app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete')) res = client.get('/') assert res.json == {'genre': 'spacerock'} def test_schemas_none(self, app, client, models, schemas): class ConcreteResource(MethodResource): @marshal_with(None, code=204) def delete(self, **kwargs): response = make_response('', 204) response.headers = {} return response app.add_url_rule('/<id>/', view_func=ConcreteResource.as_view('concrete')) res = client.delete('/5/') assert res.body == b''
nilq/baby-python
python
from abc import ABC, abstractmethod import itertools import numpy as np import matplotlib.pyplot as plt import tqdm from . import _heatmap from . import preprocessing class Regressor(ABC): ''' Mix-in class for Regression models. ''' @abstractmethod def get_output(self): ''' Returns the output activations of the model. Returns ------- numpy.array The output activations. ''' pass @abstractmethod def feed(self, input_data): ''' Accepts input array and feeds it to the model. Parameters ---------- input_data : numpy.array The input to feed the model. Raises ------ ValueError If the input data has invalid dimensions/shape. Note ---- This function only feeds the input data, to get the output after calling this function use :py:func:`get_output` or :py:func:`get_output_onehot` ''' pass @property @abstractmethod def _out_size(self): ''' Returns number of nodes/neurons in the output layer. ''' pass def r2score(self, testing_data, testing_targets): ''' Return R-squared or coefficient of determination value. Parameters ---------- testing_data : numpy.array numpy array containing testing data. testing_targets : numpy.array numpy array containing testing targets, corresponding to the testing data. Returns ------- r2score : float The average cost of the model over the testing data. Raises ------ ValueError If :code:`testing_data` or :code:`testing_tagets` has invalid dimensions/shape. ''' self.feed(testing_data) output = self.get_output() error = ((output-testing_targets)**2).sum() var = ((testing_targets-testing_targets.mean(axis=0)) ** 2).sum() return 1-error/var
nilq/baby-python
python
from __future__ import absolute_import, print_function import pytest from steam_friends import app from steam_friends.views import api, auth, main def test_app(flask_app): assert flask_app.debug is False # todo: should this be True? assert flask_app.secret_key assert flask_app.testing is True assert api.blueprint == flask_app.blueprints['api'] assert auth.blueprint == flask_app.blueprints['auth'] assert main.blueprint == flask_app.blueprints['steam_friends'] def test_app_more_config(monkeypatch): key = "TEST_ENV_VAR" envvar = "SF_{}".format(key) value = "False" monkeypatch.setenv(envvar, value) flask_app = app.create_app(app_env="test") assert flask_app.config.get(key) is False def test_debug_app(monkeypatch): monkeypatch.setenv("SF_DEBUG", "True") flask_app = app.create_app(app_env="test") assert flask_app.debug is True def test_bad_app(monkeypatch): monkeypatch.setenv("SF_ENV", "") with pytest.raises(SystemExit): app.create_app() with pytest.raises(SystemExit): app.create_app(app_env="")
nilq/baby-python
python
# from classify.data.loaders.snli import SNLIDataLoader # __all__ = ["SNLIDataLoader"]
nilq/baby-python
python
import numpy as np class InvertedPendulum: def __init__(self, length, mass, gravity=9.81): self.length = length self.mass = mass self.gravity = gravity # matrices of the linearized system self.A = np.array([[0, 1, 0, 0], [gravity/length, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]]) self.B = np.array([0, 1/length, 0, 1]) def calc_force(self, X, x_acc): angle = X[0] s = np.sin(angle) c = np.cos(angle) acc_normal = self.gravity * c - x_acc * s # force exerted on the EE by the pendulum f = np.array([self.mass * acc_normal * s, -self.mass * acc_normal * c]) return f def step(self, X, u, dt): ''' State X = [angle, dangle, x, dx], input u = ddx ''' angle = X[0] s = np.sin(angle) c = np.cos(angle) acc_tangential = self.gravity * s + u * c angle_acc = acc_tangential / self.length dX = np.array([X[1], angle_acc, X[3], u]) X = X + dt * dX return X
nilq/baby-python
python
class Base: @property def id(self): return self._id def __repr__(self): return '({} {})'.format(self.__class__.__name__, self.id) def __unicode__(self): return u'({} {})'.format(self.__class__.__name__, self.id) def __eq__(self, other): return self.id == other.id def __ne__(self, other): return self.id != other.id
nilq/baby-python
python
# from DETR main.py with modifications. import argparse import datetime import json import random import time from pathlib import Path import math import sys from PIL import Image import requests import matplotlib.pyplot as plt import numpy as np from torch.utils.data import DataLoader, DistributedSampler import torch from torch import nn from torchvision.models import resnet50 import torchvision.transforms as T from skimage import io from models.transformer import TransformerModel from models.tramap import TraMapModel from models.backbone import BackboneModel from custom_criterion import MSLELoss from dataset import MapQueryDataset def get_args_parser(): parser = argparse.ArgumentParser('TransforMap', add_help=False) parser.add_argument('--lr', default=1e-4, type=float) parser.add_argument('--lr_backbone', default=1e-4, type=float) parser.add_argument('--batch_size', default=2, type=int) parser.add_argument('--weight_decay', default=1e-4, type=float) parser.add_argument('--epochs', default=300, type=int) parser.add_argument('--lr_drop', default=200, type=int) parser.add_argument('--clip_max_norm', default=0.1, type=float, help='gradient clipping max norm') # Map backbone parser.add_argument('--backbone', default='resnet50', type=str, help="Name of the convolutional backbone to use") # * Transformer parser.add_argument('--enc_layers', default=6, type=int, help="Number of encoding layers in the transformer") parser.add_argument('--dec_layers', default=6, type=int, help="Number of decoding layers in the transformer") parser.add_argument('--dim_feedforward', default=2048, type=int, help="Intermediate size of the feedforward layers in the transformer blocks") parser.add_argument('--hidden_dim', default=256, type=int, help="Size of the embeddings (dimension of the transformer)") parser.add_argument('--dropout', default=0.1, type=float, help="Dropout applied in the transformer") parser.add_argument('--nheads', default=8, type=int, help="Number of attention heads inside the transformer's attentions") # dataset parameters parser.add_argument('--dataset_path', type=str) parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true') parser.add_argument('--num_workers', default=2, type=int) return parser def main(args): device = torch.device(args.device) # Seed seed = args.seed torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) # Build the models backbone_model = BackboneModel(hidden_dim=args.hidden_dim, arch=args.backbone) transformer_model = TransformerModel( d_model=args.hidden_dim, n_head=args.nheads, num_encoder_layers=args.enc_layers, num_decoder_layers=args.dec_layers, dim_feedforward=args.dim_feedforward, dropout=args.dropout, activation="relu", normalize_before=False ) model = TraMapModel(backbone_model, transformer_model) print("DEVICE:", device) backbone_model.to(device) transformer_model.to(device) model.to(device) n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print('number of params:', n_parameters) param_dicts = [ {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]}, { "params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad], "lr": args.lr_backbone, }, ] optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop) # Data loader transforms = T.Compose([ T.ToTensor(), T.Normalize(mean=[0.1888, 0.2168, 0.2469], std=[0.3322, 0.2871, 0.2899]) ]) dataset_train = MapQueryDataset(transforms=transforms, split='train') sampler_train = torch.utils.data.RandomSampler(dataset_train) batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=False) data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, num_workers=args.num_workers) output_dir = Path(args.output_dir) if args.resume: checkpoint = torch.load(args.resume, map_location='cpu') model.load_state_dict(checkpoint['model']) if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint: optimizer.load_state_dict(checkpoint['optimizer']) lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) args.start_epoch = checkpoint['epoch'] + 1 if args.eval: test_stats = None # Criterion / Loss function # criterion = MSLELoss() # criterion = nn.MSELoss() # criterion = nn.L1Loss() criterion = nn.SmoothL1Loss() criterion.to(device) # Logger thing MB = 1024.0 * 1024.0 print_every = 10 target = data_loader_train print("Start Training") start_time = time.time() for epoch in range(args.start_epoch, args.epochs): model.train() criterion.train() print("EPOCH:", epoch) i = 0 ## Training process ## # Move to GPU or CPU for sample, query, duration in data_returner(data_loader_train): query = query.to(device) sample = sample.to(device) ## Target duration duration = duration.to(device) duration = duration.float() outputs = model(sample, query) outputs = outputs.flatten() # RMSE if criterion set to MSE # loss = torch.sqrt(criterion(outputs, duration) + 1e-8) # Else loss = criterion(outputs, duration) loss_value = loss.item() if not math.isfinite(loss_value): print("Loss is {}, stop the training process".format(loss_value)) sys.exit(1) optimizer.zero_grad() loss.backward() if args.clip_max_norm > 0: torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_max_norm) optimizer.step() if i % print_every == 0: # print("Output: {} Target: {}".format(outputs.tolist()[0], duration.tolist()[0])) if torch.cuda.is_available(): print("Iter: {} Memory: {:d}MB Loss: {}".format(i, math.trunc(torch.cuda.max_memory_allocated() / MB), loss_value)) # print(outputs[0].item(), duration[0].item()) else: print("Iter: {} Loss:{}".format(i, loss_value)) i += 1 lr_scheduler.step() ## Saving or Not saving, there is no in between if args.output_dir: checkpoint_paths = [output_dir / 'checkpoint.pth'] if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0: checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth') for checkpoint_path in checkpoint_paths: torch.save({ 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'args': args, }, checkpoint_path) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) def data_returner(iteratable, print_freq=10): for obj in iteratable: yield obj if __name__ == '__main__': parser = argparse.ArgumentParser('TransfoMap training and evaluation script', parents=[get_args_parser()]) args = parser.parse_args() if args.output_dir: Path(args.output_dir).mkdir(parents=True, exist_ok=True) main(args)
nilq/baby-python
python
# Generated by Django 2.1.2 on 2018-12-05 14:28 import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("barriers", "0020_auto_20181025_1545")] operations = [ migrations.RemoveField(model_name="barriercontributor", name="barrier"), migrations.RemoveField(model_name="barriercontributor", name="contributor"), migrations.RemoveField(model_name="barriercontributor", name="created_by"), migrations.RemoveField(model_name="barriercontributor", name="modified_by"), migrations.RemoveField( model_name="historicalbarriercontributor", name="barrier" ), migrations.RemoveField( model_name="historicalbarriercontributor", name="contributor" ), migrations.RemoveField( model_name="historicalbarriercontributor", name="created_by" ), migrations.RemoveField( model_name="historicalbarriercontributor", name="history_user" ), migrations.RemoveField( model_name="historicalbarriercontributor", name="modified_by" ), migrations.RemoveField( model_name="barrierinstance", name="commercial_sensitivities" ), migrations.RemoveField(model_name="barrierinstance", name="fta_infringement"), migrations.RemoveField( model_name="barrierinstance", name="has_legal_infringement" ), migrations.RemoveField( model_name="barrierinstance", name="infringement_summary" ), migrations.RemoveField(model_name="barrierinstance", name="other_infringement"), migrations.RemoveField( model_name="barrierinstance", name="political_sensitivities" ), migrations.RemoveField(model_name="barrierinstance", name="wto_infringement"), migrations.RemoveField( model_name="historicalbarrierinstance", name="commercial_sensitivities" ), migrations.RemoveField( model_name="historicalbarrierinstance", name="fta_infringement" ), migrations.RemoveField( model_name="historicalbarrierinstance", name="has_legal_infringement" ), migrations.RemoveField( model_name="historicalbarrierinstance", name="infringement_summary" ), migrations.RemoveField( model_name="historicalbarrierinstance", name="other_infringement" ), migrations.RemoveField( model_name="historicalbarrierinstance", name="political_sensitivities" ), migrations.RemoveField( model_name="historicalbarrierinstance", name="wto_infringement" ), migrations.DeleteModel(name="BarrierContributor"), migrations.DeleteModel(name="HistoricalBarrierContributor"), ]
nilq/baby-python
python
#special thanks to this solution from: #https://stackoverflow.com/questions/40237952/get-scrapy-crawler-output-results-in-script-file-function #https://stackoverflow.com/questions/41495052/scrapy-reactor-not-restartable from scrapy import signals from scrapy.signalmanager import dispatcher from twisted.internet import reactor from scrapy.crawler import CrawlerRunner from multiprocessing import Process, Queue from EveroutSpider import EveroutSpider from TravelPDXSpider import TravelPortlandSpider import random def spider_runner_results(): #List of available spiders, one of which is randomly chosen list = [EveroutSpider, TravelPortlandSpider] Spider = random.choice(list) results = [] def crawler_results(signal, sender, item, response, spider): results.append(item) dispatcher.connect(crawler_results, signal=signals.item_scraped) runner = CrawlerRunner() d = runner.crawl(Spider) d.addBoth(lambda _: reactor.stop()) reactor.run() # the script will block here until the crawling is finished return results if __name__ == '__main__': print(spider_runner_results())
nilq/baby-python
python
# -*- coding: utf-8 -*- """Helper module to work with files.""" import fnmatch import logging import os import re from stat import S_IRGRP, S_IROTH, S_IRUSR, S_IWGRP, S_IWOTH, S_IWUSR # pylint: disable=redefined-builtin from ._exceptions import FileNotFoundError MAXLEN = 120 ILEGAL = r'<>:"/\|?*' LOGGER = logging.getLogger(__name__) MODE666 = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH # pylint: disable=useless-object-inheritance class File(object): """Easy manipulation of files in the SAME directory.""" def __init__(self, fp): """Set and validate the basic properties.""" if not self.isfile(fp): raise FileNotFoundError(fp) self.path = os.path.dirname(fp) or os.getcwd() self.basename = os.path.basename(fp) self.name, self.ext = os.path.splitext(self.basename) self.writable = os.access(fp, os.W_OK) def siblings(self): """Collect files and directories in the same directory.""" return [f for f in os.listdir(self.path) if f != self.basename] @staticmethod def isfile(path): """Check if a given path is a file.""" return os.path.isfile(path) @staticmethod def exists(path): """Check if a given path is a file or a directory.""" return os.path.exists(path) @staticmethod def mkwinsafe(name, space=' '): """Delete most common characters not allowed in Windows filenames.""" space = space if space not in ILEGAL else ' ' name = ''.join(c for c in name if c not in ILEGAL).replace(' ', space).strip() name = re.sub(r'\s\s+', ' ', name) if space == ' ' else name return name[:MAXLEN] @staticmethod def validate(basename): """Check for a proper basename.""" if basename != os.path.basename(basename): LOGGER.critical('This (%s) is not a basename!', basename) return False name, ext = os.path.splitext(basename) if not name: LOGGER.critical('Not a valid name (lenght 0)!') return False if not ext: LOGGER.critical('Not a valid extension (lenght 0)!') return False return True def baserename(self, new_basename): """Rename the file to a 'safe' basename.""" if not self.validate(new_basename): return False name, ext = os.path.splitext(new_basename) name = self.mkwinsafe(name) new_basename = name + ext if new_basename == self.basename: return True if new_basename not in self.siblings(): try: os.rename(self.basename, new_basename) except OSError as err: LOGGER.critical('%s', err) return False self.basename = new_basename self.name = name self.ext = ext else: LOGGER.info('The file (%s) already exist in the directory!', new_basename) return True @staticmethod def uxchmod(fp, mode=MODE666): """Change the mode of the file (default is 0666).""" return os.chmod(fp, mode) def cwdfiles(pattern='*'): """List the files in current directory that match a given pattern.""" return fnmatch.filter(os.listdir('.'), pattern)
nilq/baby-python
python
from numpy import dtype db_spec = True try: import sqlalchemy.types as sqlt except: db_spec = False return_keys = [ 'id', 'created_at', 'number', 'total_price', 'subtotal_price', 'total_weight', 'total_tax', 'total_discounts', 'total_line_items_price', 'name', 'total_price_usd', 'order_number', 'processing_method', 'source_name', 'fulfillment_status', 'payment_gateway_names', 'customer', 'line_items', 'refunds', 'email', 'discount_applications', 'discount_codes', 'updated_at', 'shipping_lines' ] keys_list = [ 'id', 'created_at', 'number', 'total_price', 'subtotal_price', 'total_weight', 'total_tax', 'total_discounts', 'total_line_items_price', 'name', 'total_price_usd', 'order_number', 'processing_method', 'source_name', 'fulfillment_status', 'payment_gateway_names', 'email', 'updated_at' ] def DBSpec(): if db_spec is True: order_types = { 'id': sqlt.BigInteger, 'order_date': sqlt.DateTime, 'number': sqlt.BigInteger, 'total_price': sqlt.Float, 'subtotal_price': sqlt.Float, 'total_weight': sqlt.Float, 'total_tax': sqlt.Float, 'total_discounts': sqlt.Float, 'total_line_items_price': sqlt.Float, 'name': sqlt.String, 'total_price_usd': sqlt.Float, 'order_number': sqlt.BigInteger, 'processing_method': sqlt.String, 'source_name': sqlt.String, 'fulfillment_status': sqlt.String, 'payment_gateway_names': sqlt.String, 'email': sqlt.String, 'updated_at': sqlt.DateTime } ref_types = { 'id': sqlt.BigInteger, 'refund_date': sqlt.DateTime, 'order_id': sqlt.BigInteger } refli_types = { 'id': sqlt.BigInteger, 'refund_id': sqlt.BigInteger, 'order_id': sqlt.BigInteger, 'line_item_id': sqlt.BigInteger, 'quantity': sqlt.Integer, 'variant_id': sqlt.BigInteger, 'subtotal': sqlt.Float, 'total_tax': sqlt.Float } adj_types = { 'id': sqlt.BigInteger, 'refund_id': sqlt.BigInteger, 'order_id': sqlt.BigInteger, 'amount': sqlt.Float, 'tax_amount': sqlt.Float, 'kind': sqlt.String, 'reason': sqlt.String } item_types = { 'id': sqlt.BigInteger, 'order_id': sqlt.BigInteger, 'order_date': sqlt.DateTime, 'variant_id': sqlt.BigInteger, 'quantity': sqlt.Integer, 'price': sqlt.Float, 'name': sqlt.String, 'product_id': sqlt.BigInteger, 'sku': sqlt.String, 'title': sqlt.String, 'total_discount': sqlt.Float, 'variant_title': sqlt.String } trans_types = { 'id': sqlt.BigInteger, 'source_order_id': sqlt.BigInteger, 'type': sqlt.String, 'fee': sqlt.Float, 'amount': sqlt.Float } cust_types = { 'order_id': sqlt.BigInteger, 'order_date': sqlt.DateTime, 'customer_id': sqlt.BigInteger, 'orders_count': sqlt.Integer, 'email': sqlt.String, 'created_at': sqlt.DateTime, 'total_spent': sqlt.Float } discapp_types = { 'order_id': sqlt.BigInteger, 'order_date': sqlt.DateTime, 'type': sqlt.String, 'title': sqlt.String, 'description': sqlt.String, 'value': sqlt.NUMERIC, 'value_type': sqlt.String, 'allocation_method': sqlt.String, 'target_selection': sqlt.String, 'target_type': sqlt.String } disccodes_types = { 'order_id': sqlt.BigInteger, 'order_date': sqlt.DateTime, 'code': sqlt.String, 'amount': sqlt.DECIMAL, 'type': sqlt.String, } shipline_types = { 'id': sqlt.String, 'carrier_identifier': sqlt.String, 'code': sqlt.String, 'delivery_category': sqlt.String, 'ship_discount_price': sqlt.Float, 'phone': sqlt.String, 'ship_price': sqlt.Float, 'requested_fulfillment_id': sqlt.String, 'source': sqlt.String, 'title': sqlt.String, 'order_id': sqlt.BigInteger, 'order_date': sqlt.DateTime, } else: order_types = {} ref_types = {} refli_types = {} adj_types = {} item_types = {} trans_types = {} cust_types = {} discapp_types = {} disccodes_types = {} shipline_types = {} return { 'Refunds': ref_types, 'Orders': order_types, 'LineItems': item_types, 'RefundLineItem': refli_types, 'Adjustments': adj_types, 'Transactions': trans_types, 'Customers': cust_types, 'DiscountApps': discapp_types, 'DiscountCodes': disccodes_types, 'ShipLines': shipline_types } order_dtypes = { 'number': dtype('int64'), 'total_price': dtype('float64'), 'subtotal_price': dtype('float64'), 'total_weight': dtype('float64'), 'total_tax': dtype('float64'), 'total_discounts': dtype('float64'), 'total_line_items_price': dtype('float64'), 'name': dtype('O'), 'total_price_usd': dtype('float64'), 'order_number': dtype('int64'), 'processing_method': dtype('O'), 'source_name': dtype('O'), 'fulfillment_status': dtype('O'), 'email': dtype('O') } ref_keys = [ 'created_at', 'id', 'order_id' ] ref_dtypes = { 'id': dtype('int64'), 'order_id': dtype('int64') } refli_keys = [ 'id', 'refund_id', 'order_id', 'line_item_id', 'quantity', 'variant_id', 'subtotal', 'total_tax' ] refli_dtypes = { 'id': dtype('int64'), 'refund_id': dtype('int64'), 'order_id': dtype('int64'), 'line_item_id': dtype('int64'), 'quantity': dtype('int64'), 'variant_id': dtype('int64'), 'subtotal': dtype('float64'), 'total_tax': dtype('float64') } adj_keys = [ 'id', 'refund_id', 'order_id', 'amount', 'tax_amount', 'kind', 'reason' ] adj_dtypes = { 'id': dtype('int64'), 'refund_id': dtype('int64'), 'order_id': dtype('int64'), 'amount': dtype('float64'), 'tax_amount': dtype('float64'), 'kind': dtype('O'), 'reason': dtype('O') } item_keys = [ 'id', 'order_id', 'variant_id', 'quantity', 'price', 'order_date', 'name', 'product_id', 'sku', 'title', 'total_discount', 'variant_title', ] item_dtypes = { 'id': dtype('int64'), 'order_id': dtype('int64'), 'variant_id': dtype('int64'), 'quantity': dtype('float64'), 'price': dtype('float64'), 'name': dtype('O'), 'product_id': dtype('int64'), 'sku': dtype('O'), 'title': dtype('O'), 'total_discount': dtype('float64'), 'variant_title': dtype('O') } trans_keys = [ 'id', 'source_order_id', 'type', 'fee', 'amount', 'processed_at' ] trans_dtypes = { 'id': dtype('int64'), 'source_order_id': dtype('int64'), 'type': dtype('O'), 'fee': dtype('float64'), 'amount': dtype('float64') } cust_dtypes = { 'order_id': dtype('int64'), 'customer_id': dtype('int64'), 'orders_count': dtype('int64'), 'email': dtype('O'), 'total_spent': dtype('float64') } cust_keys = [ 'id', 'order_date', 'customer_id', 'orders_count', 'email', 'created_at', 'total_spent' ] cust_cols = [ 'id', 'created_at', 'customer_id', 'customer_orders_count', 'customer_email', 'customer_created_at', 'customer_total_spent' ] cust_map = { 'id': 'order_id', 'created_at': 'order_date', 'customer_id': 'customer_id', 'customer_orders_count': 'orders_count', 'customer_email': 'email', 'customer_created_at': 'created_at', 'customer_total_spent': 'total_spent' } discapp_keys = [ 'order_id', 'order_date', 'type', 'code', 'title', 'description', 'value', 'value_type', 'allocation_method', 'target_selection', 'target_type' ] discapp_dtypes = { 'order_id': dtype('int64'), 'type': dtype('O'), 'title': dtype('O'), 'value': dtype('float64'), 'value_type': dtype('O'), 'allocation_method': dtype('O'), 'target_selection': dtype('O'), 'target_type': dtype('O'), 'code': dtype('O') } discapp_map = { 'orders_id': 'order_id', 'orders_created_at': 'order_date' } disccode_keys = [ 'order_id', 'created_at', 'code', 'amount', 'type' ] disccode_dtypes = { 'order_id': 'int64', 'code': 'string', 'type': 'string', 'amount': 'float64' } disccode_map = { 'orders_id': 'order_id', 'orders_created_at': 'order_date' } shipline_keys = [ 'id', 'carrier_identifier', 'code', 'delivery_category', 'discounted_price', 'phone', 'price', 'discounted_price', 'requested_fulfillment_id', 'source', 'title', 'orders.id', 'orders.created_at' ] shipline_dtypes = { 'id': 'string', 'carrier_identifier': 'string', 'code': 'string', 'delivery_category': 'string', 'ship_discount_price': 'float64', 'phone': 'string', 'ship_price': 'float64', 'requested_fulfillment_id': 'string', 'source': 'string', 'title': 'string', 'order_id': 'int64', } shipline_map = { 'orders.id': 'order_id', 'orders.created_at': 'order_date', 'price': 'ship_price', 'discounted_price': 'ship_discount_price' } proc_dict = { 'Orders': 'orders_update', 'Refunds': 'refunds_update', 'LineItems': 'lineitems_update', 'RefundLineItem': 'reflineitem_update', 'Adjustments': 'adjustments_update', 'Customers': 'cust_update', 'DiscountApps': 'discapp_update', 'DiscountCodes': 'disccode_update', 'ShipLines': 'shipline_update' }
nilq/baby-python
python
from setuptools import find_packages, setup setup( name='serverlessworkflow_sdk', packages=find_packages(include=['serverlessworkflow_sdk']), version='0.1.0', description='Serverless Workflow Specification - Python SDK', author='Serverless Workflow Contributors', license='http://www.apache.org/licenses/LICENSE-2.0.txt', install_requires=[], setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite='tests', )
nilq/baby-python
python
#!/usr/bin/env python3 # coding: utf-8 # PSMN: $Id: 02.py 1.3 $ # SPDX-License-Identifier: CECILL-B OR BSD-2-Clause """ https://github.com/OpenClassrooms-Student-Center/demarrez_votre_projet_avec_python/ Bonus 1, json """ import json import random def read_values_from_json(fichier, key): """ create an new empty list open a json file load all data add each item in the list return completed list """ values = [] with open(fichier) as f: data = json.load(f) for entry in data: values.append(entry[key]) return values def message(character, quote): """ retourne une citation d'un personnage """ n_character = character.capitalize() n_quote = quote.capitalize() return "{} a dit : {}".format(n_character, n_quote) def get_random_item_in(my_list): """ retourne un item au hasard dans la liste """ rand_numb = random.randint(0, len(my_list) - 1) item = my_list[rand_numb] # get a quote from a list return item # return the item def get_random_quote(): """ retourne une citation """ all_values = read_values_from_json('quotes.json', 'quote') return get_random_item_in(all_values) def get_random_character(): """ retourne un personnage """ all_values = read_values_from_json('characters.json', 'character') return get_random_item_in(all_values) if __name__ == '__main__': """ ask user, print quote or quit """ user_answer = input('<Enter> pour afficher une autre citation ou Q pour quitter.') while user_answer != "Q": print(message(get_random_character(), get_random_quote())) user_answer = input('<Enter> pour afficher une autre citation ou Q pour quitter.')
nilq/baby-python
python
# encoding: utf8 from pygubu import BuilderObject, register_custom_property, register_widget from pygubu.widgets.pathchooserinput import PathChooserInput class PathChooserInputBuilder(BuilderObject): class_ = PathChooserInput OPTIONS_CUSTOM = ('type', 'path', 'image', 'textvariable', 'state', 'initialdir', 'mustexist', 'title',) properties = OPTIONS_CUSTOM virtual_events = ('<<PathChooserPathChanged>>',) def _code_set_property(self, targetid, pname, value, code_bag): if pname == 'type': code_bag[pname] = "'{0}'".format(value) elif pname in ('initialdir', 'mustexist', 'title'): code_bag[pname] = "'{0}'".format(value) elif pname == 'textvariable': code_bag[pname] = self._code_set_tkvariable_property(pname, value) else: super(PathChooserInputBuilder, self)._code_set_property( targetid, pname, value, code_bag) _builder_id = 'pygubu.builder.widgets.pathchooserinput' register_widget(_builder_id, PathChooserInputBuilder, 'PathChooserInput', ('ttk', 'Pygubu Widgets')) _help = 'Dialog type' register_custom_property(_builder_id, 'type', 'choice', values=(PathChooserInput.FILE, PathChooserInput.DIR), state='readonly', default_value=PathChooserInput.FILE, help=_help) _help = 'Initial path value.' register_custom_property(_builder_id, 'path', 'entry', help=_help) _help = 'Image for the button.' register_custom_property(_builder_id, 'image', 'imageentry', help=_help) _help = 'Tk variable associated to the path property.' register_custom_property(_builder_id, 'textvariable', 'tkvarentry', help=_help) _help = 'Path entry state.' register_custom_property(_builder_id, 'state', 'choice', values=('', 'normal', 'disabled', 'readonly'), state='readonly', help=_help) _help = 'Dialog option. Determines if path must exist for directory dialog.' register_custom_property(_builder_id, 'mustexist', 'choice', values=('', 'false', 'true'), state='readonly', help=_help) _help = 'Dialog option. Sets initial directory.' register_custom_property(_builder_id, 'initialdir', 'entry', help=_help) _help = 'Dialog option. Sets dialog title.' register_custom_property(_builder_id, 'title', 'entry', help=_help)
nilq/baby-python
python
""" Anserini: A toolkit for reproducible information retrieval research built on Lucene Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import print_function import os import re import argparse import logging import json logging.basicConfig() class XFoldValidate(object): """ Perform X-Fold cross validation for various parameters and report the average effectiveness for each fold. fold_mapping is an optional argument. It can be a dictionary {qid:fold_id} that maps each qid to its corresponding fold. """ def __init__(self,output_root,collection, fold=5,fold_mapping=None): self.logger = logging.getLogger('x_fold_cv.XFlodValidate') self.output_root = output_root self.eval_files_root = 'eval_files' self.collection = collection self.fold = fold self.fold_mapping = fold_mapping def _get_param_average(self): # For each parameter set, get its # average performances in each fold, # metric, reranking model, and base # ranking model avg_performances = {} eval_root_dir = os.path.join(self.output_root, self.collection,self.eval_files_root) # do x-fold cv for the collection for metric in os.listdir(eval_root_dir): eval_dir = os.path.join(eval_root_dir,metric) if os.path.isfile(eval_dir): continue # if it is a directory containing effectiveness # for a metric, do x-fold cv for the metric for fn in os.listdir(eval_dir): model, param = fn.split('_', 1) if model not in avg_performances: avg_performances[model] = {} param_avg_performances = self._get_param_avg_performances(os.path.join(eval_dir,fn)) for metric in param_avg_performances: if metric not in avg_performances[model]: avg_performances[model][metric] = {} for fold_id in param_avg_performances[metric]: if fold_id not in avg_performances[model][metric]: avg_performances[model][metric][fold_id] = {} avg_performances[model][metric][fold_id][param] = param_avg_performances[metric][fold_id] return avg_performances def _compute_fold_id(self,qid): # compute fold id if self.fold_mapping: # use the fold mapping passed to it return self.fold_mapping[qid] else: # compute the fold id based on qid return int(qid) % self.fold def tune(self,verbose): # Tune parameter with x-fold. Use x-1 fold # for training and 1 fold for testing. Do # it for each fold and report average avg_performances = self._get_param_average() res = {} for model in avg_performances: res[model] = {} for metric in avg_performances[model]: if verbose: print('model: {}, metric: {}'.format(model, metric)) metric_fold_performances = [] for test_idx in range(self.fold): test_fold_performances = avg_performances[model][metric][test_idx] training_data = {} for train_idx in range(self.fold): if train_idx == test_idx: continue fold_performance = avg_performances[model][metric][train_idx] for param in fold_performance: if param not in training_data: training_data[param] = .0 training_data[param] += fold_performance[param] # sort in descending order based on performance first, then use filenames(x[0]) to break ties sorted_training_performance = sorted(training_data.items(), key=lambda x:(x[1], x[0]), reverse=True) best_param = sorted_training_performance[0][0] if verbose: print('\tFold: {}'.format(test_idx)) print('\t\tBest param: {}'.format(best_param)) print('\t\ttest performance: {0:.4f}'.format(test_fold_performances[best_param])) metric_fold_performances.append(test_fold_performances[best_param]) res[model][metric] = round(sum(metric_fold_performances) / len(metric_fold_performances), 4) return res def _get_param_avg_performances(self,file_path): # Given a file, return its average effectiveness # for each metric in each fold param_performance_list = {} for fold_id in range(self.fold): param_performance_list[fold_id] = {} with open(file_path) as f: for line in f: line = line.strip() if line: row = line.split() metric = row[0] if metric not in param_performance_list[0]: for fold_id in param_performance_list: param_performance_list[fold_id][metric] = [] qid = row[1] try: value = float(row[2]) except: self.logger.error( 'Cannot parse %s' %(row[2]) ) continue else: if qid != 'all': # compute fold id base on qid fold_id = self._compute_fold_id(qid) param_performance_list[fold_id][metric].append(value) param_avg_performances = {} for metric in param_performance_list[0].keys(): param_avg_performances[metric] = {} for fold_id in param_performance_list: param_avg_performances[metric][fold_id] = round(sum(param_performance_list[fold_id][metric])/len(param_performance_list[fold_id][metric]), 4) return param_avg_performances def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--output_root', default='fine_tuning_results', help='output directory of all results') parser.add_argument('--fold', '-f', default=2, type=int, help='number of fold') parser.add_argument('--verbose', '-v', action='store_true', help='output in verbose mode') parser.add_argument('--collection', required=True, help='the collection key in yaml') parser.add_argument('--fold_dir', help='directory of drr fold files') args=parser.parse_args() fold_mapping = {} if args.fold_dir: from run_batch import load_drr_fold_mapping fold_mapping = load_drr_fold_mapping(args.fold_dir) print(json.dumps(XFoldValidate(args.output_root, args.collection, args.fold, fold_mapping).tune(args.verbose), sort_keys=True, indent=2)) if __name__ == '__main__': main()
nilq/baby-python
python
from picamera import PiCamera from time import sleep def record_video(sec): pi_cam = PiCamera() pi_cam.start_preview() pi_cam.start_recording('./video.mp4') sleep(sec) pi_cam.stop_recording() pi_cam.stop_preview() record_video(5)
nilq/baby-python
python
import logging from hearthstone.enums import CardType, Zone, GameTag from hslog import LogParser, packets from hslog.export import EntityTreeExporter from entity.game_entity import GameEntity from entity.hero_entity import HeroEntity from entity.spell_entity import SpellEntity # import entity.cards as ecards logger = logging.getLogger() class LogUtil: def __init__(self, log_path): self.log_path = log_path self.parser = LogParser() self.game = None # parse 完后可直接拿来用 self.game_entity = None def read_log(self): with open(self.log_path, encoding='utf-8') as f: self.parser.read(f) self.parser.flush() # 最近一场战斗 packet_tree = self.parser.games[-1] exporter = EntityTreeExporter(packet_tree, player_manager=self.parser.player_manager) ee = exporter.export() self.game = ee.game def parse_game(self) -> GameEntity: self.read_log() for e in self.game.entities: # 以下为游戏状态 if e.type == CardType.GAME: # print(e, e.tags, end='\n\n\n') # player = e.players # for p in player: # print(p.tags, end='\n\n') self.game_entity = GameEntity(e) pass elif e.type == CardType.MINION: minion = HeroEntity(e) # print(e, e.tags, end='\n\n\n') self.game_entity.add_hero(minion) pass # 佣兵技能信息 elif e.type == CardType.LETTUCE_ABILITY: # print(e, e.tags, end='\n\n\n') owner = e.tags.get(GameTag.LETTUCE_ABILITY_OWNER) # print(e.card_id) if owner in self.game_entity.hero_entities.keys(): # hcid = self.game_entity.hero_entities[owner].card_id[:-3] # cid = e.card_id[:-3] # cname = 'ecards.' + hcid + '.' + cid + '.' + cid + '(e)' # print(cname) # try: # spell_entity = eval(cname) # except Exception as ex: # logger.warning(ex) spell_entity = SpellEntity(e) # spell_entity = SpellEntity(e) self.game_entity.hero_entities[owner].add_spell(spell_entity) pass # 对战技能记录 elif e.type == CardType.SPELL: # print(e, e.tags, end='\n\n\n') pass # for h in self.game_entity.my_hero: # if h.card_id[:-3] not in HEROS.keys(): # continue # hd = HEROS[h.card_id[:-3]] # for i, s in enumerate(h.spell): # if i > 2: # break # s.read_from_config(hd[3][i]) return self.game_entity pass if __name__ == '__main__': path = "C:/var/Hearthstone/Logs/Power.log" hs_log = LogUtil(path) game_entity = hs_log.parse_game() for i in game_entity.my_hero: print(i) for i in game_entity.enemy_hero: print(i) pass
nilq/baby-python
python
''' Train all cort models Usage: train_all.py [--num_processes=<n>] --type=<t> <consolidated_conll_dir> <out_dir> ''' import os from cort.core.corpora import Corpus import codecs import random import subprocess from cort_driver import train from joblib import Parallel, delayed import sys import itertools from docopt import docopt def main(type_, inp_dir, out_dir, num_processes): assert type_ in ('pair', 'latent', 'tree'), "Invalid type: %s" %type_ os.makedirs(out_dir, exist_ok=True) results = Parallel(n_jobs=num_processes, backend="threading", verbose=10)(train_jobs(type_, inp_dir, out_dir)) assert all(results) def train_jobs(system, inp_dir, out_dir): manipulations = sorted(os.listdir(inp_dir)) for manipulation in manipulations: yield delayed(train_single_system)(system, inp_dir, out_dir, manipulation) def train_single_system(system, inp_dir, out_dir, manipulation): conll_path = os.path.join(inp_dir, manipulation, 'train.m_gold_conll') out_model_path = os.path.join(out_dir, 'model-%s-%s.obj' %(system, manipulation)) print('Training %s on %s ...' %(system, conll_path), file=sys.stderr) if train(system, os.path.abspath(conll_path), os.path.abspath(out_model_path)) == 0: print('Model written to %s' %out_model_path, file=sys.stderr) return out_model_path if __name__ == '__main__': args = docopt(__doc__) main(args['--type'], args['<consolidated_conll_dir>'], args['<out_dir>'], int(args.get('--num_processes') or 1))
nilq/baby-python
python
""" Yoga style module """ from enum import Enum from typing import List class YogaStyle(Enum): """ Yoga style enum """ undefined = 0 hatha = 1 yin = 2 chair = 3 def get_all_yoga_styles() -> List[YogaStyle]: """ Returns a list of all yoga styles in the enum """ return [YogaStyle.hatha, YogaStyle.yin, YogaStyle.chair] def str_to_yoga_style(name: str) -> YogaStyle: """ Converts a string to yoga style enum """ if name == YogaStyle.chair.name: return YogaStyle.chair if name == YogaStyle.hatha.name: return YogaStyle.hatha if name == YogaStyle.yin.name: return YogaStyle.yin return YogaStyle.undefined
nilq/baby-python
python
#!/usr/bin/python import os import sys import argparse from collections import defaultdict import re import fileUtils def isBegining(line): m = re.match(r'^[A-Za-z]+.*', line) return True if m else False def isEnd(line): return True if line.startswith('#end') else False def getItem(item): m = re.match(r'[\"\[]*([-\.0-9e]*)[\]\"\,]*', item) if m: return m.group(1) else: raise ValueError('pattern failed here {}'.format(item)) def readline(line): tmpList = line.split(' ') tmpList = list(filter(lambda x: x!='', tmpList)) rtnList = [] for item in tmpList: item = item.strip() if item.startswith('"[') or item.endswith(']","'): item = getItem(item) if not item: continue try: rtnList.append(fileUtils.str2float(item)) except ValueError: raise ValueError('item is: {} and line is: {}'.format(item, line)) return rtnList def loadData(fpath): vecDict = defaultdict(list) vecList = [] with open(fpath, 'r') as f: for line in f: if line == '"\n': continue if isBegining(line): tmp = line.split(',') title = tmp[0] title = title.replace(' ', '_') title = title.replace('?', '') title = title.replace('.', '') title = title.lower() tmpList = readline(tmp[1]) vecList.extend(tmpList) elif isEnd(line): vecDict[title] = vecList vecList = [] title = '' else: tmpList = readline(line) vecList.extend(tmpList) return vecDict def main(opts): import pdb pdb.set_trace() dataList = loadData(opts.file) print(dataList) def parseOpts(argv): parser = argparse.ArgumentParser() parser.add_argument('-f', '--file', help='file to word2vec file') opts = parser.parse_args() return opts if __name__ == "__main__": opts = parseOpts(sys.argv) main(opts)
nilq/baby-python
python
import speech_recognition as sr import pyaudio #optional # get audio from the microphone while True: #this loop runs the below code infinite times until any inturrupt is generated r = sr.Recognizer() with sr.Microphone() as source: r.adjust_for_ambient_noise(source) print("Speak:") audio = r.listen(source) try: output= r.recognize_google(audio,language='en') #can change language to your desired language print("You said: " +output) except sr.UnknownValueError: print("Could not understand audio") except sr.RequestError as e: print("Could not request results.Please check your internet connection and try again; {0}".format(e)) if output=='exit': #if the input voice is matched with the passed argument the loop terminates break else: #loop continues to run continue
nilq/baby-python
python
from django.conf.urls import url from django.urls import path from . import views from . import dal_views from .models import * app_name = 'vocabs' urlpatterns = [ url( r'^altname-autocomplete/$', dal_views.AlternativeNameAC.as_view( model=AlternativeName,), name='altname-autocomplete', ), url( r'^place-autocomplete/$', dal_views.PlaceAC.as_view( model=Place,), name='place-autocomplete', ), url( r'^crash-place-autocomplete/$', dal_views.CrashPlaceAC.as_view( model=Place,), name='crash-place-autocomplete', ), url( r'^search-place-autocomplete/$', dal_views.PlaceAC.as_view(), name='search-place-autocomplete', ), url( r'^search-region-autocomplete/$', dal_views.Region.as_view(), name='search-region-autocomplete', ), url( r'^person-autocomplete/$', dal_views.PersonAC.as_view( model=Place,), name='person-autocomplete', ), url( r'^institution-autocomplete/$', dal_views.InstitutionAC.as_view( model=Institution,), name='institution-autocomplete', ), url( r'^bomberplanetype-autocomplete/$', dal_views.BomberPlaneTypeAC.as_view( model=SkosConcept), name='bomberplanetype-autocomplete', ), url( r'^bombersquadron-autocomplete/$', dal_views.BomberSquadronAC.as_view( model=Institution,), name='bombersquadron-autocomplete', ), url( r'^bomberreasonofcrash-autocomplete/$', dal_views.BomberReasonOfCrashAC.as_view( model=SkosConcept), name='bomberreasonofcrash-autocomplete', ), url( r'^personpartofbomber-autocomplete/$', dal_views.PersonPartOfBomberAC.as_view( model=Bomber), name='personpartofbomber-autocomplete', ), url( r'^personrank-autocomplete/$', dal_views.PersonRankAC.as_view( model=SkosConcept), name='personrank-autocomplete', ), url( r'^persondestinystated-autocomplete/$', dal_views.PersonDestinyStatedAC.as_view( model=SkosConcept), name='persondestinystated-autocomplete', ), url( r'^persondestinychecked-autocomplete/$', dal_views.PersonDestinyCheckedAC.as_view( model=SkosConcept), name='persondestinychecked-autocomplete', ), url( r'^personmia-autocomplete/$', dal_views.PersonMIAAC.as_view( model=SkosConcept), name='personmia-autocomplete', ), url( r'^onlineressourcerelatedpersons-autocomplete/$', dal_views.OnlineRessourceRelatedPersonsAC.as_view( model=Person,), name='onlineressourcerelatedpersons-autocomplete', ), url( r'^onlineressourcerelatedbombers-autocomplete/$', dal_views.OnlineRessourceRelatedBombersAC.as_view( model=Bomber,), name='onlineressourcerelatedbombers-autocomplete', ), url( r'^onlineressourcerelatedwarcrimecases-autocomplete/$', dal_views.OnlineRessourceRelatedWarCrimeCasesAC.as_view( model=WarCrimeCase,), name='onlineressourcerelatedwarcrimecases-autocomplete', ), url( r'^personwarcrimecaserelatedpersons-autocomplete/$', dal_views.PersonWarCrimeCaseRelatedPersonsAC.as_view( model=Person,), name='personwarcrimecaserelatedpersons-autocomplete', ), url( r'^personwarcrimecaserelatedcases-autocomplete/$', dal_views.PersonWarCrimeCaseRelatedCasesAC.as_view( model=WarCrimeCase,), name='personwarcrimecaserelatedcases-autocomplete', ), url( r'^personwarcrimecaserelationtype-autocomplete/$', dal_views.PersonWarCrimeCaseRelationTypeAC.as_view( model=SkosConcept), name='personwarcrimecaserelationtype-autocomplete', ), url( r'^warcrimecaserelatedpersons-autocomplete/$', dal_views.WarCrimeCaseRelatedPersonsAC.as_view( model=Person,), name='warcrimecaserelatedpersons-autocomplete', ), url( r'^warcrimecaserelatedcases-autocomplete/$', dal_views.WarCrimeCaseRelatedCasesAC.as_view( model=WarCrimeCase,), name='warcrimecaserelatedcases-autocomplete', ), url( r'^warcrimecaserelatedplaces-autocomplete/$', dal_views.WarCrimeCaseRelatedPlacesAC.as_view( model=Place,), name='warcrimecaserelatedplaces-autocomplete', ), url( r'^warcrimecasecrimetype-autocomplete/$', dal_views.WarCrimeCaseCrimeTypeAC.as_view( model=SkosConcept), name='warcrimecasecrimetype-autocomplete', ), url( r'^airstriketarget-autocomplete/$', dal_views.AirstrikeTargetAC.as_view( model=Place), name='airstriketarget-autocomplete', ), url( r'^airstrikeplanetype-autocomplete/$', dal_views.AirstrikePlaneTypeAC.as_view( model=SkosConcept), name='airstrikeplanetype-autocomplete', ), url( r'^airstrikeairforce-autocomplete/$', dal_views.AirstrikeAirforceAC.as_view( model=Institution,), name='airstrikeairforce-autocomplete', ), path( r'specific-place-ac/<str:lookup>', dal_views.PlaceConstraintAC.as_view( model=Place), name='specific-place-ac', ), path( r'specific-person-ac/<str:lookup>', dal_views.PersonConstraintAC.as_view( model=Place), name='specific-person-ac', ) ]
nilq/baby-python
python
from dancerl.models.base import CreateCNN,CreateMLP import torch.nn as nn if __name__ == '__main__': mlp=CreateMLP(model_config=[[4,32,nn.ReLU()], [32,64,nn.ReLU()], [64,3,nn.Identity()]]) print(mlp) cnn=CreateCNN(model_config=[[4,32,3,2,1,nn.ReLU()], [32,32,3,2,1,nn.ReLU()], [32,32,3,2,1,nn.ReLU()], [32,32,3,2,1,nn.ReLU()]]) print(cnn) cnn1=CreateCNN(model_config=[[4,32,(3,2),(2,1),(1,2),nn.ReLU()], [32,32,(3,2),(2,1),(1,2),nn.ReLU()], [32,32,(3,2),(2,1),(1,2),nn.ReLU()], [32,32,(3,2),(2,1),(1,2),nn.ReLU()]]) print(cnn1) cnn2=CreateCNN(model_config=[[4,32,3,2,1,nn.ReLU()], [32,32,3,2,1,nn.ReLU()], [32,32,3,2,1,nn.ReLU()], [32,32,3,2,1,nn.ReLU()]], post_fcnet_config=[[32*6*6,512,nn.ReLU()], [512,3,nn.Identity()]]) print(cnn2)
nilq/baby-python
python
from DB import Database db = Database("db") MENU = range(1) def reminder_handler(user_id, obj): date, type, name = obj db.add_event(user_id, date, "birthday" if type == "Birthday" else "regular", name) if type == "Birthday": db.add_reminder(user_id, date - 7 * 24 * 60 * 60, "birthday" if type == "Birthday" else "regular", name) db.add_reminder(user_id, date - 3 * 24 * 60 * 60, "birthday" if type == "Birthday" else "regular", name) db.add_reminder(user_id, date - 1 * 24 * 60 * 60, "birthday" if type == "Birthday" else "regular", name) db.add_reminder(user_id, date, "birthday" if type == "Birthday" else "regular", name)
nilq/baby-python
python
import glob import os import sys import argparse import time from datetime import datetime import random import numpy as np import copy from matplotlib import cm import open3d as o3d VIRIDIS = np.array(cm.get_cmap('plasma').colors) VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0]) LABEL_COLORS = np.array([ (255, 255, 255), # None (70, 70, 70), # Building (100, 40, 40), # Fences (55, 90, 80), # Other (255, 255, 0), # Pedestrian (153, 153, 153), # Pole (157, 234, 50), # RoadLines (0, 0, 255), # Road (255, 255, 255), # Sidewalk (0, 155, 0), # Vegetation (255, 0, 0), # Vehicle (102, 102, 156), # Wall (220, 220, 0), # TrafficSign (70, 130, 180), # Sky (0, 0, 0), # Ground (150, 100, 100), # Bridge (230, 150, 140), # RailTrack (180, 165, 180), # GuardRail (250, 170, 30), # TrafficLight (110, 190, 160), # Static (170, 120, 50), # Dynamic (45, 60, 150), # Water (145, 170, 100), # Terrain ]) / 255.0 # normalize each channel [0-1] since is what Open3D uses def add_open3d_axis(vis): """Add a small 3D axis on Open3D Visualizer""" axis = o3d.geometry.LineSet() axis.points = o3d.utility.Vector3dVector(np.array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])) axis.lines = o3d.utility.Vector2iVector(np.array([ [0, 1], [0, 2], [0, 3]])) axis.colors = o3d.utility.Vector3dVector(np.array([ [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])) vis.add_geometry(axis) def main(args): try: load_dir = args["load_dir"] point_list = o3d.geometry.PointCloud() vis = o3d.visualization.Visualizer() vis.create_window( window_name='Segmented Scene', width=960, height=540, left=480, top=270) vis.get_render_option().background_color = [0.05, 0.05, 0.05] vis.get_render_option().point_size = 3 vis.get_render_option().show_coordinate_frame = True frame = 0 while True: # Load points frame_str = load_dir + str(frame).zfill(6) labels = np.fromfile(frame_str + ".label", dtype=np.uint32) points = np.fromfile(frame_str + ".bin", dtype=np.float32).reshape(-1, 3) non_free = labels != 0 points = points[non_free, :] new_points = np.zeros(points.shape) new_points[:, 0] = points[:, 1] new_points[:, 1] = points[:, 0] new_points[:, 2] = points[:, 2] points = new_points labels = labels[non_free] # Fill in voxels voxel_resolution = 0.4 N, __ = points.shape num_samples = 50 new_points = np.reshape(points, (N, 3, 1)) new_points = np.random.uniform(new_points - voxel_resolution/2, new_points + voxel_resolution/2, (N, 3, num_samples)) new_labels = np.zeros((N, num_samples), dtype=np.uint32) new_labels = new_labels + labels.reshape(-1, 1) points = np.transpose(new_points, axes=(0, 2, 1)).reshape(-1, 3) labels = new_labels.reshape(-1) print(points.shape, labels.shape) int_color = LABEL_COLORS[labels] point_list.points = o3d.utility.Vector3dVector(points) point_list.colors = o3d.utility.Vector3dVector(int_color) if frame == 0: vis.add_geometry(point_list) # Update vis vis.update_geometry(point_list) # Sleep in a loop for i in range(5000): vis.poll_events() vis.update_renderer() time.sleep(0.005) frame += 1 finally: vis.destroy_window() if __name__ == "__main__": args = { "load_dir": "/home/tigeriv/Data/Carla/Data/Scenes/Town01_Heavy/dbki/evaluation/all/" } try: main(args) except KeyboardInterrupt: print(' - Exited by user.')
nilq/baby-python
python
from __future__ import absolute_import, division, print_function from distutils.version import LooseVersion import pickle import numpy as np import pytest import xarray as xr import xarray.ufuncs as xu from . import ( assert_array_equal, assert_identical as assert_identical_, mock, raises_regex, ) requires_numpy113 = pytest.mark.skipif(LooseVersion(np.__version__) < '1.13', reason='numpy 1.13 or newer required') def assert_identical(a, b): assert type(a) is type(b) or (float(a) == float(b)) # noqa if isinstance(a, (xr.DataArray, xr.Dataset, xr.Variable)): assert_identical_(a, b) else: assert_array_equal(a, b) @requires_numpy113 def test_unary(): args = [0, np.zeros(2), xr.Variable(['x'], [0, 0]), xr.DataArray([0, 0], dims='x'), xr.Dataset({'y': ('x', [0, 0])})] for a in args: assert_identical(a + 1, np.cos(a)) @requires_numpy113 def test_binary(): args = [0, np.zeros(2), xr.Variable(['x'], [0, 0]), xr.DataArray([0, 0], dims='x'), xr.Dataset({'y': ('x', [0, 0])})] for n, t1 in enumerate(args): for t2 in args[n:]: assert_identical(t2 + 1, np.maximum(t1, t2 + 1)) assert_identical(t2 + 1, np.maximum(t2, t1 + 1)) assert_identical(t2 + 1, np.maximum(t1 + 1, t2)) assert_identical(t2 + 1, np.maximum(t2 + 1, t1)) @requires_numpy113 def test_binary_out(): args = [1, np.ones(2), xr.Variable(['x'], [1, 1]), xr.DataArray([1, 1], dims='x'), xr.Dataset({'y': ('x', [1, 1])})] for arg in args: actual_mantissa, actual_exponent = np.frexp(arg) assert_identical(actual_mantissa, 0.5 * arg) assert_identical(actual_exponent, arg) @requires_numpy113 def test_groupby(): ds = xr.Dataset({'a': ('x', [0, 0, 0])}, {'c': ('x', [0, 0, 1])}) ds_grouped = ds.groupby('c') group_mean = ds_grouped.mean('x') arr_grouped = ds['a'].groupby('c') assert_identical(ds, np.maximum(ds_grouped, group_mean)) assert_identical(ds, np.maximum(group_mean, ds_grouped)) assert_identical(ds, np.maximum(arr_grouped, group_mean)) assert_identical(ds, np.maximum(group_mean, arr_grouped)) assert_identical(ds, np.maximum(ds_grouped, group_mean['a'])) assert_identical(ds, np.maximum(group_mean['a'], ds_grouped)) assert_identical(ds.a, np.maximum(arr_grouped, group_mean.a)) assert_identical(ds.a, np.maximum(group_mean.a, arr_grouped)) with raises_regex(ValueError, 'mismatched lengths for dimension'): np.maximum(ds.a.variable, ds_grouped) @requires_numpy113 def test_alignment(): ds1 = xr.Dataset({'a': ('x', [1, 2])}, {'x': [0, 1]}) ds2 = xr.Dataset({'a': ('x', [2, 3]), 'b': 4}, {'x': [1, 2]}) actual = np.add(ds1, ds2) expected = xr.Dataset({'a': ('x', [4])}, {'x': [1]}) assert_identical_(actual, expected) with xr.set_options(arithmetic_join='outer'): actual = np.add(ds1, ds2) expected = xr.Dataset({'a': ('x', [np.nan, 4, np.nan]), 'b': np.nan}, coords={'x': [0, 1, 2]}) assert_identical_(actual, expected) @requires_numpy113 def test_kwargs(): x = xr.DataArray(0) result = np.add(x, 1, dtype=np.float64) assert result.dtype == np.float64 @requires_numpy113 def test_xarray_defers_to_unrecognized_type(): class Other(object): def __array_ufunc__(self, *args, **kwargs): return 'other' xarray_obj = xr.DataArray([1, 2, 3]) other = Other() assert np.maximum(xarray_obj, other) == 'other' assert np.sin(xarray_obj, out=other) == 'other' @requires_numpy113 def test_xarray_handles_dask(): da = pytest.importorskip('dask.array') x = xr.DataArray(np.ones((2, 2)), dims=['x', 'y']) y = da.ones((2, 2), chunks=(2, 2)) result = np.add(x, y) assert result.chunks == ((2,), (2,)) assert isinstance(result, xr.DataArray) @requires_numpy113 def test_dask_defers_to_xarray(): da = pytest.importorskip('dask.array') x = xr.DataArray(np.ones((2, 2)), dims=['x', 'y']) y = da.ones((2, 2), chunks=(2, 2)) result = np.add(y, x) assert result.chunks == ((2,), (2,)) assert isinstance(result, xr.DataArray) @requires_numpy113 def test_gufunc_methods(): xarray_obj = xr.DataArray([1, 2, 3]) with raises_regex(NotImplementedError, 'reduce method'): np.add.reduce(xarray_obj, 1) @requires_numpy113 def test_out(): xarray_obj = xr.DataArray([1, 2, 3]) # xarray out arguments should raise with raises_regex(NotImplementedError, '`out` argument'): np.add(xarray_obj, 1, out=xarray_obj) # but non-xarray should be OK other = np.zeros((3,)) np.add(other, xarray_obj, out=other) assert_identical(other, np.array([1, 2, 3])) @requires_numpy113 def test_gufuncs(): xarray_obj = xr.DataArray([1, 2, 3]) fake_gufunc = mock.Mock(signature='(n)->()', autospec=np.sin) with raises_regex(NotImplementedError, 'generalized ufuncs'): xarray_obj.__array_ufunc__(fake_gufunc, '__call__', xarray_obj) def test_xarray_ufuncs_deprecation(): with pytest.warns(PendingDeprecationWarning, match='xarray.ufuncs'): xu.cos(xr.DataArray([0, 1])) def test_xarray_ufuncs_pickle(): a = 1.0 cos_pickled = pickle.loads(pickle.dumps(xu.cos)) assert_identical(cos_pickled(a), xu.cos(a))
nilq/baby-python
python
from .utils import * from .ps.dist_model import DistModel from .ps import ps_util def evaluate_ps(gpu_available, options): model_path = os.path.join(options.model_path, 'alex.pth') model = DistModel() model.initialize(model='net-lin', net='alex', model_path=model_path, use_gpu=gpu_available) dist_stats = AverageMeter() original_path = os.path.join(options.eval_root_path, 'original/test') if options.eval_type == 'colorized': compare_path = os.path.join(options.eval_root_path, options.full_model_name, 'test') else: compare_path = os.path.join(options.eval_root_path, options.eval_type, 'test') for class_dir in os.listdir(original_path): if not os.path.isdir(os.path.join(original_path, class_dir)): continue original_class_path = os.path.join(original_path, class_dir) compare_class_path = os.path.join(compare_path, class_dir) class_sum_distances = 0 class_count = 0 for img_file in os.listdir(original_class_path): if not os.path.isfile(os.path.join(original_path, img_file)) and img_file[-3:] != 'jpg': continue img_original = ps_util.im2tensor( ps_util.load_image(os.path.join(original_class_path, img_file))) img_compare = ps_util.im2tensor( ps_util.load_image(os.path.join(compare_class_path, img_file))) distance = model.forward(img_original, img_compare)[0] dist_stats.update(distance, 1) print_ts('Folder: {0}\tavg_dist {1:.3f}\tse_dist +/-{2:.3f}'.format(class_dir, dist_stats.avg, dist_stats.se)) output_path = options.experiment_output_path epoch_stats = { 'avg_dist': [dist_stats.avg], 'se_dist': [dist_stats.se]} save_stats(output_path, 'ps_distances.csv', epoch_stats, 1)
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """Cisco-IOS-XR-Get-Full-ClearText-Running-Config Console Script. Copyright (c) 2021 Cisco and/or its affiliates. This software is licensed to you under the terms of the Cisco Sample Code License, Version 1.1 (the "License"). You may obtain a copy of the License at https://developer.cisco.com/docs/licenses All use of the material herein must be in accordance with the terms of the License. All rights not expressly granted by the License are reserved. Unless required by applicable law or agreed to separately in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. """ __author__ = "Tahsin Chowdhury" __email__ = "tchowdhu@cisco.com" __version__ = "0.1.0" __copyright__ = "Copyright (c) 2021 Cisco and/or its affiliates." __license__ = "Cisco Sample Code License, Version 1.1" #importing libraries from ncclient import manager import getpass #filter to retrieve config cli_cfg_filter= """ <filter type="subtree"> <cli xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-cli-cfg"/> </filter> """ while(True): # Device credential for netconf host = input("\nEnter Host IP-Address('q/Q to quit'): ") if host.upper() == 'Q': break username = input('Enter Username: ') password = getpass.getpass('Enter Password: ') # Retrieving config from node using netconf with manager.connect(host=host, port=830, username=username, password=password, hostkey_verify=False, look_for_keys=False) as netconf_connection: config = netconf_connection.get_config("running", filter=cli_cfg_filter) # Parsing CLI config in text format i.e. removing xml tags, # This portion of code will be added to the existing code config_data_string = config.data_xml parsed_config = '' for item in config_data_string.split("\n"): if ('<data' not in item) and \ ('</data>' not in item) and \ ('<cli' not in item) and \ ('</cli>' not in item): parsed_config += item + '\n' parsed_config = parsed_config.strip() print(parsed_config) # Storing device configruation in a text file filename = "%s.txt" % host.replace('.', '_') with open(filename, 'w') as f: f.write(parsed_config) print('\nThe running-configuration has been saved in the file: {}'.format(filename)) print('\nGoing for the next host...')
nilq/baby-python
python
#Uses Money Flow Index to determine when to buy and sell stock import numpy as np import pandas as pd #import warnings import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') #warnings.filterwarnings('ignore') df = pd.read_csv('StockTickers/JPM.csv',nrows=200) df.set_index(pd.DatetimeIndex(df['Date'].values),inplace=True) typical_price = (df['Close'] + df['High'] + df['Low'])/3 period = 14 money_flow = typical_price * df['Volume'] positive_flow = list() negative_flow = list() for i in range(1,len(typical_price)): if(typical_price)[i] > typical_price[i-1]: positive_flow.append(money_flow[i-1]) negative_flow.append(0) elif typical_price[i] < typical_price[i-1]: positive_flow.append(0) negative_flow.append(money_flow[i-1]) else: positive_flow.append(0) negative_flow.append(0) positive_mf = list() negative_mf = list() for i in range(period-1,len(positive_flow)): positive_mf.append(sum(positive_flow[i+1-period:i+1])) for i in range(period-1,len(negative_flow)): negative_mf.append(sum(negative_flow[i+1-period:i+1])) mfi = 100*(np.array(positive_mf)/(np.array(positive_mf)+np.array(negative_mf))) new_df = pd.DataFrame() new_df = df[period:] new_df['MFI'] = mfi def get_signal(data,high,low): buy_signal = list() sell_signal = list() flag = -1 for i in range(len(data['MFI'])): if data['MFI'][i] > high: buy_signal.append(np.NaN) if flag!=-1: sell_signal.append(data['Close'][i]) flag = -1 else: sell_signal.append(np.NaN) elif data['MFI'][i] < low: if flag!=1: buy_signal.append(data['Close'][i]) flag = 1 else: buy_signal.append(np.NaN) sell_signal.append(np.NaN) else: buy_signal.append(np.NaN) sell_signal.append(np.NaN) return (buy_signal,sell_signal) new_df['Buy'] = get_signal(new_df,80,20)[0] new_df['Sell'] = get_signal(new_df,80,20)[1] #plotting plt.figure(figsize=(12.2,6.4)) plt.plot(df['Close'],label="Close Price",alpha=0.5) plt.scatter(new_df.index,new_df['Buy'],color='green',marker='^',label='Buy Signal',alpha=1) plt.scatter(new_df.index,new_df['Sell'],color='red',marker='v',label='Sell Signal',alpha=1) plt.title('Apple Close Price') plt.xlabel('Date') plt.ylabel('Close Price USD($)') plt.legend(loc='upper left') plt.show() #shows MFI value plot plt.figure(figsize=(12.2,6.4)) plt.plot(new_df['MFI'],label="MFI") plt.axhline(10,linestyle='--',color = 'orange') plt.axhline(20,linestyle='--',color = 'blue') plt.axhline(80,linestyle='--',color = 'blue') plt.axhline(90,linestyle='--',color = 'orange') plt.title('MFI') plt.ylabel('MFI Values') plt.show()
nilq/baby-python
python
from synapse import Synapse from timemodule import Clock from neuron import * import random clock = Clock() neurons = [] for i in range(20): neurons.append(Neuron(clock.get_time(),7/10)) for i in range(30): connect(neurons[random.randrange(20)],neurons[random.randrange(20)] ,random.randrange(2),1) for neuron in neurons: print(neuron) while(True): command = input().lower() if(command == 't'): clock.tick(1) for neuron in neurons: neuron.update_inputs(clock.get_time()) neuron.update_outputs() for neuron in neurons: print(neuron)
nilq/baby-python
python
#!/usr/bin/env python3 import logging import os import yaml from jinja2 import Environment, FileSystemLoader logging.getLogger().setLevel(logging.DEBUG) def main(template_name, vars_file): logging.info("Enter main.") with open(vars_file, 'r') as yaml_vars: variables = yaml.load(yaml_vars) these_variables = {'creature': variables['creature'][0], 'meta': {'wide': False}} rendered = basic(template_name, these_variables) print(rendered) def basic(template_target, variables): templates_dir = os.path.join(os.path.dirname(__file__), 'templates') env = Environment( loader=FileSystemLoader(templates_dir), extensions=['jinja2.ext.autoescape'], autoescape=True) template = env.get_template(template_target) return template.render(variables)
nilq/baby-python
python
from status import Status import errors from unittest import TestCase class TestStatus(TestCase): def test_ok(self): try: Status.divide(Status.OK.value, None) self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_no_results(self): try: Status.divide(Status.NO_RESULTS.value, '') self.assertTrue(False) except errors.NoResultsError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_too_many_results(self): try: Status.divide(Status.TOO_MANY_RESULTS.value, '') self.assertTrue(False) except errors.TooManyResultsError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_too_many_requests(self): try: Status.divide(Status.TOO_MANY_REQUESTS.value, '') self.assertTrue(False) except errors.TooManyRequestsError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_invalid_query(self): try: Status.divide(Status.INVALID_QUERY.value, '') self.assertTrue(False) except errors.InvalidQueryError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_empty_required_field(self): try: Status.divide(Status.EMPTY_REQUIRED_FIELD.value, None) self.assertTrue(False) except errors.EmptyRequiredFieldError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_invalid_year_value(self): try: Status.divide(Status.INVALID_YEAR_VALUE.value, '') self.assertTrue(False) except errors.InvalidYearValueError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_invalid_counts(self): try: Status.divide(Status.INVALID_COUNTS.value, '') self.assertTrue(False) except errors.InvalidCountsError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_invalid_issn(self): try: Status.divide(Status.INVALID_ISSN.value, '') self.assertTrue(False) except errors.InvalidIssnError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_system_fatal(self): try: Status.divide(Status.SYSTEM_FATAL.value, '') self.assertTrue(False) except errors.SystemFatalError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_invalid_url(self): try: Status.divide(Status.INVALID_URL.value, '') self.assertTrue(False) except errors.InvalidUrlError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_list_no_query(self): try: Status.divide(Status.LIST_NO_QUERY.value, '') self.assertTrue(False) except errors.ListNoQueryError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_search_no_query(self): try: Status.divide(Status.SEARCH_NO_QUERY.value, '') self.assertTrue(False) except errors.SearchNoQueryError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_list_unspecified(self): try: Status.divide(Status.LIST_UNSPECIFIED.value, '') self.assertTrue(False) except errors.ListUnspecifiedError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False) def test_search_unsortable(self): try: Status.divide(Status.SEARCH_UNSORTABLE.value, '') self.assertTrue(False) except errors.SearchUnsortableError: self.assertTrue(True) except errors.JstageError: self.assertTrue(False)
nilq/baby-python
python
# -*- coding: utf-8 -*- # file: train_atepc_english.py # time: 2021/6/8 0008 # author: yangheng <yangheng@m.scnu.edu.cn> # github: https://github.com/yangheng95 # Copyright (C) 2021. All Rights Reserved. ######################################################################################################################## # ATEPC training_tutorials script # ######################################################################################################################## from pyabsa.functional import ATEPCModelList from pyabsa.functional import Trainer, ATEPCTrainer from pyabsa.functional import ABSADatasetList from pyabsa.functional import ATEPCConfigManager config = ATEPCConfigManager.get_atepc_config_english() config.model = ATEPCModelList.LCF_ATEPC config.evaluate_begin = 5 config.num_epoch = 6 config.log_step = 100 semeval = ABSADatasetList.SemEval aspect_extractor = Trainer(config=config, dataset=semeval, checkpoint_save_mode=1, auto_device=True )
nilq/baby-python
python
#!env python3 import pyd4 import sys file = pyd4.D4File(sys.argv[1]) chrom = sys.argv[2] begin = int(sys.argv[3]) end = int(sys.argv[4]) for (chrom, pos, value) in pyd4.enumerate_values(file, chrom, begin, end): print(chrom, pos, value)
nilq/baby-python
python
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXX XXXXXXXXXXXX XXX XXXXXXXXXXXX XXXXXXXXXX XXX XXXXXXXXXX XXXXXXX XXXXXXXXXXX XXXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXX XXXX XXXX XX XXXXXX XXXXXX XXXX X XXXXXXXXX XXXX XXXXXX X XXXX XXXX XXXXXXXX XX X XXXXXXXXX XXXXXXX XX XXXXXXX XXXX XXXXX XXXX XXXX XXXXXX XX X XXXXXX XXXXX XX XXXXXXX XX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXX XX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XX XXXXXX XXXXX XXX X XXXX XXXXX X XXXXXX XX XXXX XXX XXXXX XXXX XXXX XX XXXXXXXX XX XXXXX X XXXX XXX XXXXXX XX XXXX XXXXXX XXXX XXX XXXXX XXX XXX XXXX XXX XXXXXXXXXXXX XX XXXXXXXX XXXX XXXXX XXXXXX XX XX XXXX XXXXXXX XXXXXX XX XXX XX XXXXX XXX XXXXXX XXX XXXX XXX XXXXXXX XXXX XX X XXXXXXXXXXX XXXXXX XXXX XXXX XXX XXXX XXXX XXXXXX XX XXXXXXXXX XXXXXXXX XX XXX XX XXXX XXXXXXX XXXXXXX XX XXX XXXX XXXXXX XXX XXXXXXXXXX XXXXX XXXXXXXX XX XXXX XXXXXXX XXXX XXXXX XX XXXXXXXXXXX XXXXX XX XXX XXXX XXXX XXXXXX XXX XX XXXXXXXXX XXXXXXXX XX XXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXX XXXXX XXX XXXXXXXXXXXXXXXXXX XXXX XXXXXX XXXX XXXXXXXXX XXXXXXX XX XXX X XXXXXXXX XX XXXXXXX XX XXXX XXXXXX X XXXXX XX XXXXXXX XX XXX XXXXXXXX XXXXXXXX XXX XXXXXX XXXX X XXXXX XX XXXXXXXXXXXXXX XXXX XXX XXXXXXX XXXX XXXX XXXX XXX XXXXXXXX XX X XXXXX XX XXX XXXXXXX XXXXXXXXXX XXXX XXX XXXX XXXXX XX XXX XXXXXX XX XXX XX XXXXXXXX XXXX XXX XXXXXXX XXXX XXXXX XXX XXXXXXXX XXXX XXXXXXX XX X XXXXX XX XXXXXX XXXXX XXXX XXXX XXX XXXXXXXX XX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXX X XXX XXXX XX XXXXXXX XXXX XXXXXX XX XXXXXXXXX XXXX XXXX XXXXX XX X XXXXXXXXXX XXXX XXXX XXX XXXXXX XX XXX XXXXXXXXXX XX X XXX XX XXXX XXXXXXXXXX XXXX XXX XX XXXX XX XXXXXXXX XXX XXXXXXXXXX XX XX XXXX XXX XXX XXXXXX XXX XXXXXXX XXXXXX XXX XXXXXXXXXXXXXXXXXXX XXXX XXXXXX XXXX XXXX XX XXX XX XXX XXXXXXXXXX XX XXXX XXXXXXXXXX XX XX XX XXX XXXXXXX XXXXXXX XX XXX XXXXXXXXX XXX XX XXX XX XXXXXXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXX XXX XXX XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXX XXXX XXXXXXXXXXXXXXXXXXX XXXXX XXX XXX XXXXXXXXX XX XXXX XXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX X X XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXXXXXXXXX XX XXXXXXX XX XXX XXXXXXXX XXXX XXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX XXX XXXXXXXXXX XXXX XXX XXX XXXXXXXXXXXXXXXXXXX XXXXXX XX XXXXXXXX XXX XXXXX XXXXXXXX XXXXXXXXXXXXXXXXX XX XXX XXXX XXX XXXXX XXXXX XXX XXXX XXXXXX XXXXXXXX XXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXX X XXXXXXXXXXXX XXXX XXXXX XXX XXXXXXXXXX XXXXX XXX XX XXXX XXXXX XXXXX XXX XX XXXXXXX XXXX XXX XXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXX XXXXX XXXXXX XXX XXX XXX X XXXX XXXXXXXXX XXXX XXXXX XXX XXXXXXXXXX XXX XX XXX XXX XXXXXXXX XXXX XXXXXXXXXXX XXXXXX XXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXX XXXX XX XXXX XX XXXX XX X XXXXX XX XXX XXXXXXX XX XXXXXX XX XXX XXXX XX XXXXXX X XXXX XX XXXXXX X XXXXXXX XXXXXX XXXXXX X XXXXX XX XXXXXXX XXX XXX XXXX XX XXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXX XXXXXXXXXXXXXXXXXXX XXXXXX XX X XXX XXXX XXXXXX XXXXXX XXXXXXXX XXXXX XXXXXXXXXXXX XXXX XXXXXXXXXXX XXXXXX XXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXX XXXXX XXXXXXX XX XXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXXXX XX X XXXXX XX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXX XXXX XXX XXX XXX XXX XXXXXXXXXX XX XXXXXXXXXXX XXXX XXX XXXXXXXXXXX XXX XX X XXXXXXXXX XXXXXXXXX XXX XXXXXXXXXXX XXXXXXXXXXX XXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXXX XXXXXXX XXXXXXX XXXXXXXXXXXX XX X XXXXXX XXXXXXXX XXXXX XXXXXXXX XXXX XXXXXXX XXXXXX XXX XXXXXX XXX XXXX XXXXXX XXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXX XXXXXXXX XXXX XXXXXXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXXX XXXXXXXX XX X XXXX X XXXXXXX XXXXXX X XXXXX XXXXX X XXXXXX XX X XXXXXX XXXX XXX XXXX XXXXXXXX X XXXXXXXXX XXXXXX XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXX XX XXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXX XXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
nilq/baby-python
python
import datetime model_date_to_read = '20200125' model_version_to_read = '2.0' model_date_to_write = datetime.datetime.today().strftime('%Y%m%d') model_version_to_write = '2.0'
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on 2017-8-23 @author: cheng.li """ import numpy as np import pandas as pd from PyFin.api import * from alphamind.api import * from matplotlib import pyplot as plt plt.style.use('ggplot') import datetime as dt start = dt.datetime.now() universe = Universe('custom', ['zz800']) factor_name = 'Beta20' base1 = LAST('roe_q') base2 = CSRes(LAST('ep_q'), 'roe_q') simple_expression = CSRes(CSRes(LAST(factor_name), base1), base2) alpha_factor_name = factor_name + '_res' alpha_factor = {alpha_factor_name: simple_expression} # end of formula definition engine = SqlEngine('postgresql+psycopg2://postgres:A12345678!@10.63.6.220/alpha') neutralize_risk = ['SIZE', 'LEVERAGE'] + industry_styles freq = '5b' n_bins = 5 horizon = map_freq(freq) start_date = '2012-01-01' end_date = '2018-01-05' dates = makeSchedule(start_date, end_date, tenor=freq, calendar='china.sse') factor_all_data = engine.fetch_data_range(universe, alpha_factor, dates=dates)['factor'] return_all_data = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon) factor_groups = factor_all_data.groupby('trade_date') return_groups = return_all_data.groupby('trade_date') final_res = np.zeros((len(factor_groups.groups), n_bins)) index_dates = [] for i, value in enumerate(factor_groups): date = value[0] data = value[1][['code', alpha_factor_name, 'isOpen'] + neutralize_risk] codes = data.code.tolist() ref_date = value[0].strftime('%Y-%m-%d') returns = return_groups.get_group(date) total_data = pd.merge(data, returns, on=['code']).dropna() risk_exp = total_data[neutralize_risk].values.astype(float) dx_return = total_data.dx.values index_dates.append(date) f_data = total_data[[alpha_factor_name]] try: er = factor_processing(total_data[[alpha_factor_name]].values, pre_process=[winsorize_normal, standardize], risk_factors=risk_exp, post_process=[winsorize_normal, standardize]) res = er_quantile_analysis(er, n_bins=n_bins, dx_return=dx_return) except Exception as e: print(e) res = np.zeros(n_bins) final_res[i] = res df = pd.DataFrame(final_res, index=index_dates) start_date = advanceDateByCalendar('china.sse', dates[0], '-1d') df.loc[start_date] = 0. df.sort_index(inplace=True) fig, axes = plt.subplots(1, 2, figsize=(18, 6)) df = df.cumsum().plot(ax=axes[0], title='Quantile Analysis for {0}'.format(alpha_factor_name)) # =================================================================== # alpha_factor_name = alpha_factor_name + '_1w_diff' alpha_factor = {alpha_factor_name: DIFF(simple_expression)} dates = makeSchedule(start_date, end_date, tenor=freq, calendar='china.sse') factor_all_data = engine.fetch_data_range(universe, alpha_factor, dates=dates)['factor'] return_all_data = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon) factor_groups = factor_all_data.groupby('trade_date') return_groups = return_all_data.groupby('trade_date') final_res = np.zeros((len(factor_groups.groups), n_bins)) index_dates = [] for i, value in enumerate(factor_groups): date = value[0] data = value[1][['code', alpha_factor_name, 'isOpen'] + neutralize_risk] codes = data.code.tolist() ref_date = value[0].strftime('%Y-%m-%d') returns = return_groups.get_group(date) total_data = pd.merge(data, returns, on=['code']).dropna() risk_exp = total_data[neutralize_risk].values.astype(float) dx_return = total_data.dx.values index_dates.append(date) f_data = total_data[[alpha_factor_name]] try: er = factor_processing(total_data[[alpha_factor_name]].values, pre_process=[winsorize_normal, standardize], risk_factors=risk_exp, post_process=[winsorize_normal, standardize]) res = er_quantile_analysis(er, n_bins=n_bins, dx_return=dx_return) except Exception as e: print(e) res = np.zeros(n_bins) final_res[i] = res df = pd.DataFrame(final_res, index=index_dates) start_date = advanceDateByCalendar('china.sse', dates[0], '-1d') df.loc[start_date] = 0. df.sort_index(inplace=True) df = df.cumsum().plot(ax=axes[1], title='Quantile Analysis for {0}'.format(alpha_factor_name)) plt.show() print(dt.datetime.now() - start)
nilq/baby-python
python
# Other imports import numpy as np import torch # DeepCASE Imports from deepcase.preprocessing import Preprocessor from deepcase import DeepCASE if __name__ == "__main__": ######################################################################## # Loading data # ######################################################################## # Create preprocessor preprocessor = Preprocessor( length = 10, # 10 events in context timeout = 86400, # Ignore events older than 1 day (60*60*24 = 86400 seconds) ) # Load data from file context, events, labels, mapping = preprocessor.csv('data/example.csv') # In case no labels are provided, set labels to -1 if labels is None: labels = np.full(events.shape[0], -1, dtype=int) # Cast to cuda if available if torch.cuda.is_available(): events = events .to('cuda') context = context.to('cuda') ######################################################################## # Splitting data # ######################################################################## # Split into train and test sets (20:80) by time - assuming events are ordered chronologically events_train = events [:events.shape[0]//5 ] events_test = events [ events.shape[0]//5:] context_train = context[:events.shape[0]//5 ] context_test = context[ events.shape[0]//5:] labels_train = labels [:events.shape[0]//5 ] labels_test = labels [ events.shape[0]//5:] ######################################################################## # Using DeepCASE # ######################################################################## deepcase = DeepCASE( # ContextBuilder parameters features = 300, # Number of input features to expect max_length = 10, # Length of the context, should be same as context in Preprocessor hidden_size = 128, # Number of nodes in hidden layer, in paper we set this to 128 # Interpreter parameters eps = 0.1, # Epsilon value to use for DBSCAN clustering, in paper this was 0.1 min_samples = 5, # Minimum number of samples to use for DBSCAN clustering, in paper this was 5 threshold = 0.2, # Confidence threshold used for determining if attention from the ContextBuilder can be used, in paper this was 0.2 ) # Cast to cuda if available if torch.cuda.is_available(): deepcase = deepcase.to('cuda') ######################################################################## # Fit DeepCASE # ######################################################################## # Train the ContextBuilder # Conveniently, the fit and fit_predict methods have the same API, so if you # do not require the predicted values on the train dataset, simply # substitute fit_predict with fit and it will run slightly quicker because # DeepCASE skip the prediction over the training dataset and simply return # the deepcase object itself. Other than that, both calls are exactly the # same. prediction_train = deepcase.fit_predict( # Input data X = context_train, # Context to train with y = events_train.reshape(-1, 1), # Events to train with, note that these should be of shape=(n_events, 1) scores = labels_train, # Labels used to compute score (either as loaded by Preprocessor, or put your own labels here) # ContextBuilder-specific parameters epochs = 10, # Number of epochs to train with batch_size = 128, # Number of samples in each training batch, in paper this was 128 learning_rate = 0.01, # Learning rate to train with, in paper this was 0.01 # Interpreter-specific parameters iterations = 100, # Number of iterations to use for attention query, in paper this was 100 query_batch_size = 1024, # Batch size to use for attention query, used to limit CUDA memory usage strategy = "max", # Strategy to use for scoring (one of "max", "min", "avg") NO_SCORE = -1, # Any sequence with this score will be ignored in the strategy. # If assigned a cluster, the sequence will inherit the cluster score. # If the sequence is not present in a cluster, it will receive a score of NO_SCORE. # Verbosity level verbose = True, # If True, prints progress ) ######################################################################## # Predict with DeepCASE # ######################################################################## # Compute predicted scores prediction_test = deepcase.predict( X = context_test, # Context to predict y = events_test.reshape(-1, 1), # Events to predict, note that these should be of shape=(n_events, 1) iterations = 100, # Number of iterations to use for attention query, in paper this was 100 batch_size = 1024, # Batch size to use for attention query, used to limit CUDA memory usage verbose = True, # If True, prints progress )
nilq/baby-python
python
""" [summary] [extended_summary] """ # region [Imports] # * Standard Library Imports ------------------------------------------------------------------------------------------------------------------------------------> import os from datetime import datetime import re # * Third Party Imports -----------------------------------------------------------------------------------------------------------------------------------------> from PIL import Image, ImageDraw, ImageFont from async_property import async_property, async_cached_property import discord import asyncio from discord.ext import commands # * Gid Imports -------------------------------------------------------------------------------------------------------------------------------------------------> import gidlogger as glog # * Local Imports -----------------------------------------------------------------------------------------------------------------------------------------------> from antipetros_discordbot.init_userdata.user_data_setup import ParaStorageKeeper from antipetros_discordbot.utility.gidtools_functions import pathmaker from antipetros_discordbot.utility.exceptions import FaqNumberParseError, FaqQuestionParseError, FaqAnswerParseError, ClassAttributesNotSetError # endregion[Imports] # region [TODO] # TODO: Refractor and sort whole image logic # TODO: Maybe better parser # TODO: check if asyncio image creation or to_thread image creation is better # endregion [TODO] # region [AppUserData] APPDATA = ParaStorageKeeper.get_appdata() BASE_CONFIG = ParaStorageKeeper.get_config('base_config') COGS_CONFIG = ParaStorageKeeper.get_config('cogs_config') # endregion [AppUserData] # region [Logging] log = glog.aux_logger(__name__) log.info(glog.imported(__name__)) # endregion[Logging] # region [Constants] THIS_FILE_DIR = os.path.abspath(os.path.dirname(__file__)) # endregion[Constants] class FaqItem: __slots__ = ("_raw_content", "creation_date_time", "url", "_image", "number", "_number_thumbnail", "question", "answer") bot = None faq_channel = None question_parse_emoji = "🇶" answer_parse_emoji = "🇦" question_emoji = None answer_emoji = None config_name = None number_regex = re.compile(r".*?FAQ No.*?(?P<faq_number>\d+)", re.IGNORECASE) question_regex = re.compile(r"🇶(?P<question>.*)") answer_regex = re.compile(r"🇦(?P<answer>.*)", re.DOTALL) background_image = None start_font_size = 125 def __init__(self, raw_content: str, created_at: datetime, url: str, image: discord.Attachment = None) -> None: self._check_class_attr() self._raw_content = str(raw_content) self.creation_date_time = created_at self.url = url self._image = image self.number = self._get_number() self._number_thumbnail = None self.question = self._get_question() self.answer = self._get_answer() @classmethod def set_background_image(cls): image_name = COGS_CONFIG.retrieve(cls.config_name, 'numbers_background_image', typus=str, direct_fallback="ASFlagexp.png") image_path = APPDATA[image_name] cls.background_image = Image.open(image_path).copy() def _check_class_attr(self): if self.bot is None: raise ClassAttributesNotSetError('bot') if self.question_parse_emoji is None: raise ClassAttributesNotSetError('question_parse_emoji') if self.answer_parse_emoji is None: raise ClassAttributesNotSetError('answer_parse_emoji') if self.config_name is None: raise ClassAttributesNotSetError('config_name') def _get_number(self): number_match = self.number_regex.match(self._raw_content) if number_match: return int(number_match.group('faq_number')) else: raise FaqNumberParseError(self._raw_content, self.url) @property def antistasi_icon(self): return BASE_CONFIG.retrieve('embeds', 'antistasi_author_icon', typus=str, direct_fallback="https://pbs.twimg.com/profile_images/1123720788924932098/C5bG5UPq.jpg") def _get_question(self): question_match = self.question_regex.search(self._raw_content) if question_match: question_emoji = self.question_parse_emoji if self.question_emoji is None else self.question_emoji return f"{question_emoji} {question_match.group('question').strip()}" else: raise FaqQuestionParseError(self._raw_content, self.url) def _get_answer(self): answer_match = self.answer_regex.search(self._raw_content) if answer_match: answer_emoji = self.answer_parse_emoji if self.answer_emoji is None else self.answer_emoji answer = answer_match.group('answer').strip() return f"{answer_emoji} {answer}" else: raise FaqAnswerParseError(self._raw_content, self.url) @property def image(self): if self._image is None: return None return self._image.url async def get_number_thumbnail(self): if self._number_thumbnail is None: self._number_thumbnail = await self._make_number_image() return self._number_thumbnail async def _get_text_dimensions(self, font): # https://stackoverflow.com/a/46220683/9263761 text_string = str(self.number) ascent, descent = font.getmetrics() text_width = font.getmask(text_string).getbbox()[2] await asyncio.sleep(0) text_height = font.getmask(text_string).getbbox()[3] + descent return (text_width, text_height) async def _make_perfect_fontsize(self, image_width, image_height): padding_width = image_width // 5 padding_height = image_height // 5 font_size = self.start_font_size font = ImageFont.truetype(APPDATA['stencilla.ttf'], font_size) text_size = await self._get_text_dimensions(font) while text_size[0] <= (image_width - padding_width) and text_size[1] <= (image_height - padding_height): font_size += await asyncio.sleep(0, 2) font = ImageFont.truetype(APPDATA['stencilla.ttf'], font_size) text_size = await self._get_text_dimensions(font) return ImageFont.truetype(APPDATA['stencilla.ttf'], font_size - 2) async def _make_number_image(self): number_string = str(self.number) image = self.background_image.copy() width, height = image.size font = await self._make_perfect_fontsize(width, height) draw = await asyncio.to_thread(ImageDraw.Draw, image) w, h = await asyncio.to_thread(draw.textsize, number_string, font=font) h += int(h * 0.01) await asyncio.to_thread(draw.text, ((width - w) / 2, (height - h) / 2), number_string, fill=self.bot.color('white').rgb, stroke_width=width // 25, stroke_fill=(0, 0, 0), font=font) return image async def to_embed_data(self): author = {"name": f"FAQ No {self.number} 🔗", "url": self.url, "icon_url": self.antistasi_icon} return await self.bot.make_generic_embed(author=author, thumbnail=await self.get_number_thumbnail(), image=self.image, title=self.question, description=self.answer + '\n\n' + self.faq_channel.mention, timestamp=self.creation_date_time, color="random", typus="faq_embed") def __repr__(self): return f"{self.__class__.__name__}(number={self.number},question={self.question})" # region[Main_Exec] if __name__ == '__main__': pass # endregion[Main_Exec]
nilq/baby-python
python
import math import sys import time import numpy as np import owl from net import Net import net from net_helper import CaffeNetBuilder from caffe import * from PIL import Image class NetTrainer: ''' Class for training neural network Allows user to train using Caffe's network configure format but on multiple GPUs. One could use NetTrainer as follows: >>> trainer = NetTrainer(solver_file, snapshot, num_gpu) >>> trainer.build_net() >>> trainer.run() :ivar str solver_file: path of the solver file in Caffe's proto format :ivar int snapshot: the idx of snapshot to start with :ivar int num_gpu: the number of gpu to use :ivar int sync_freq: the frequency to stop lazy evaluation and print some information. The frequency means every how many minibatches will the trainer call ``owl.wait_for_all()``. Note that this will influence the training speed. Normally, the higher value is given, the faster the training speed but the more memory is used during execution. ''' def __init__(self, solver_file, snapshot = 0, num_gpu = 1, sync_freq=1): self.solver_file = solver_file self.snapshot = snapshot self.num_gpu = num_gpu self.sync_freq = sync_freq self.gpu = [owl.create_gpu_device(i) for i in range(num_gpu)] def build_net(self): ''' Build network structure using Caffe's proto definition. It will also initialize the network either from given snapshot or from scratch (using proper initializer). During initialization, it will first try to load weight from snapshot. If failed, it will then initialize the weight accordingly. ''' self.owl_net = Net() self.builder = CaffeNetBuilder(self.solver_file) self.snapshot_dir = self.builder.snapshot_dir self.builder.build_net(self.owl_net, self.num_gpu) self.owl_net.compute_size() self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot) def run(s): ''' Run the training algorithm on multiple GPUs The basic logic is similar to the traditional single GPU training code as follows (pseudo-code):: for epoch in range(MAX_EPOCH): for i in range(NUM_MINI_BATCHES): # load i^th minibatch minibatch = loader.load(i, MINI_BATCH_SIZE) net.ff(minibatch.data) net.bp(minibatch.label) grad = net.gradient() net.update(grad, MINI_BATCH_SIZE) With Minerva's lazy evaluation and dataflow engine, we are able to modify the above logic to perform data parallelism on multiple GPUs (pseudo-code):: for epoch in range(MAX_EPOCH): for i in range(0, NUM_MINI_BATCHES, NUM_GPU): gpu_grad = [None for i in range(NUM_GPU)] for gpuid in range(NUM_GPU): # specify which gpu following codes are running on owl.set_device(gpuid) # each minibatch is split among GPUs minibatch = loader.load(i + gpuid, MINI_BATCH_SIZE / NUM_GPU) net.ff(minibatch.data) net.bp(minibatch.label) gpu_grad[gpuid] = net.gradient() net.accumulate_and_update(gpu_grad, MINI_BATCH_SIZE) So each GPU will take charge of one *mini-mini batch* training, and since all their ``ff``, ``bp`` and ``gradient`` calculations are independent among each others, they could be paralleled naturally using Minerva's DAG engine. The only problem let is ``accumulate_and_update`` of the the gradient from all GPUs. If we do it on one GPU, that GPU would become a bottleneck. The solution is to also partition the workload to different GPUs (pseudo-code):: def accumulate_and_update(gpu_grad, MINI_BATCH_SIZE): num_layers = len(gpu_grad[0]) for layer in range(num_layers): upd_gpu = layer * NUM_GPU / num_layers # specify which gpu to update the layer owl.set_device(upd_gpu) for gid in range(NUM_GPU): if gid != upd_gpu: gpu_grad[upd_gpu][layer] += gpu_grad[gid][layer] net.update_layer(layer, gpu_grad[upd_gpu][layer], MINI_BATCH_SIZE) Since the update of each layer is independent among each others, the update could be paralleled affluently. Minerva's dataflow engine transparently handles the dependency resolving, scheduling and memory copying among different devices, so users don't need to care about that. ''' wgrad = [[] for i in range(s.num_gpu)] bgrad = [[] for i in range(s.num_gpu)] last = time.time() wunits = s.owl_net.get_weighted_unit_ids() last_start = time.time() start_idx = s.snapshot * s.owl_net.solver.snapshot end_idx = s.owl_net.solver.max_iter for iteridx in range(start_idx, end_idx): # get the learning rate if s.owl_net.solver.lr_policy == "poly": s.owl_net.current_lr = s.owl_net.base_lr * pow(1 - float(iteridx) / s.owl_net.solver.max_iter, s.owl_net.solver.power) elif s.owl_net.solver.lr_policy == "step": s.owl_net.current_lr = s.owl_net.base_lr * pow(s.owl_net.solver.gamma, iteridx / s.owl_net.solver.stepsize) # train on multi-gpu for gpuid in range(s.num_gpu): owl.set_device(s.gpu[gpuid]) s.owl_net.forward('TRAIN') s.owl_net.backward('TRAIN') for wid in wunits: wgrad[gpuid].append(s.owl_net.units[wid].weightgrad) bgrad[gpuid].append(s.owl_net.units[wid].biasgrad) # weight update for i in range(len(wunits)): wid = wunits[i] upd_gpu = i * s.num_gpu / len(wunits) owl.set_device(s.gpu[upd_gpu]) for gid in range(s.num_gpu): if gid == upd_gpu: continue wgrad[upd_gpu][i] += wgrad[gid][i] bgrad[upd_gpu][i] += bgrad[gid][i] s.owl_net.units[wid].weightgrad = wgrad[upd_gpu][i] s.owl_net.units[wid].biasgrad = bgrad[upd_gpu][i] s.owl_net.update(wid) if iteridx % s.sync_freq == 0: owl.wait_for_all() thistime = time.time() - last speed = s.owl_net.batch_size * s.sync_freq / thistime print "Finished training %d minibatch (time: %s; speed: %s img/s)" % (iteridx, thistime, speed) last = time.time() wgrad = [[] for i in range(s.num_gpu)] # reset gradients bgrad = [[] for i in range(s.num_gpu)] # decide whether to display loss if (iteridx + 1) % (s.owl_net.solver.display) == 0: lossunits = s.owl_net.get_loss_units() for lu in lossunits: print "Training Loss %s: %f" % (lu.name, lu.getloss()) # decide whether to test if (iteridx + 1) % (s.owl_net.solver.test_interval) == 0: acc_num = 0 test_num = 0 for testiteridx in range(s.owl_net.solver.test_iter[0]): s.owl_net.forward('TEST') all_accunits = s.owl_net.get_accuracy_units() accunit = all_accunits[len(all_accunits)-1] #accunit = all_accunits[0] test_num += accunit.batch_size acc_num += (accunit.batch_size * accunit.acc) print "Accuracy the %d mb: %f" % (testiteridx, accunit.acc) sys.stdout.flush() print "Testing Accuracy: %f" % (float(acc_num)/test_num) # decide whether to save model if (iteridx + 1) % (s.owl_net.solver.snapshot) == 0: print "Save to snapshot %d, current lr %f" % ((iteridx + 1) / (s.owl_net.solver.snapshot), s.owl_net.current_lr) s.builder.save_net_to_file(s.owl_net, s.snapshot_dir, (iteridx + 1) / (s.owl_net.solver.snapshot)) sys.stdout.flush() def gradient_checker(s, checklayer_name): ''' Check backpropagation on multiple GPUs ''' h = 1e-2 threshold = 1e-4 checklayer = s.owl_net.units[s.owl_net.name_to_uid[checklayer_name][0]] losslayer = [] for i in xrange(len(s.owl_net.units)): if isinstance(s.owl_net.units[i], net.SoftmaxUnit): losslayer.append(i) last = None ''' wunits = [] for i in xrange(len(s.owl_net.units)): if isinstance(s.owl_net.units[i], net.WeightedComputeUnit): wunits.append(i) ''' wunits = s.owl_net.get_weighted_unit_ids() accunits = s.owl_net.get_accuracy_units() owl.set_device(s.gpu[0]) for iteridx in range(100): #disturb the weights oriweight = checklayer.weight npweight = checklayer.weight.to_numpy() weightshape = np.shape(npweight) npweight = npweight.reshape(np.prod(weightshape[0:len(weightshape)])) position = np.random.randint(0, np.shape(npweight)[0]) disturb = np.zeros(np.shape(npweight), dtype = np.float32) disturb[position] = h oriposval = npweight[position] npweight += disturb newposval = npweight[position] npweight = npweight.reshape(weightshape) checklayer.weight = owl.from_numpy(npweight) all_loss = 0 # train on multi-gpu s.owl_net.forward_check() for i in range(len(losslayer)): if len(s.owl_net.units[losslayer[i]].loss_weight) == 1: all_loss += (s.owl_net.units[losslayer[i]].getloss() * s.owl_net.units[losslayer[i]].loss_weight[0]) else: all_loss += s.owl_net.units[losslayer[i]].getloss() #get origin loss checklayer.weight = oriweight ori_all_loss = 0 # train on multi-gpu s.owl_net.forward_check() for i in range(len(losslayer)): if len(s.owl_net.units[losslayer[i]].loss_weight) == 1: ori_all_loss += (s.owl_net.units[losslayer[i]].getloss() * s.owl_net.units[losslayer[i]].loss_weight[0]) else: ori_all_loss += s.owl_net.units[losslayer[i]].getloss() s.owl_net.backward('TEST') #get analytic gradient npgrad = checklayer.weightgrad.to_numpy() npgrad = npgrad.reshape(np.prod(weightshape[0:len(weightshape)])) analy_grad = npgrad[position] / s.owl_net.units[losslayer[i]].out.shape[1] num_grad = (all_loss - ori_all_loss) / h info = "Gradient Check at positon: %d analy: %f num: %f ratio: %f" % (position, analy_grad, num_grad, analy_grad / num_grad) print info class NetTester: ''' Class for performing testing, it can be single-view or multi-view, can be top-1 or top-5 Run it as:: >>> tester = NetTester(solver_file, softmax_layer, accuracy_layer, snapshot, gpu_idx) >>> tester.build_net() >>> tester.run(multiview) :ivar str solver_file: path of the solver file in Caffe's proto format :ivar int snapshot: the snapshot for testing :ivar str softmax_layer_name: name of the softmax layer that produce prediction :ivar str accuracy_layer_name: name of the accuracy layer that produce prediction :ivar int gpu_idx: which gpu to perform the test :ivar bool multiview: whether to use multiview tester ''' def __init__(self, solver_file, softmax_layer_name, accuracy_layer_name, snapshot, gpu_idx = 0): self.solver_file = solver_file self.softmax_layer_name = softmax_layer_name self.accuracy_layer_name = accuracy_layer_name self.snapshot = snapshot self.gpu = owl.create_gpu_device(gpu_idx) owl.set_device(self.gpu) def build_net(self): self.owl_net = Net() self.builder = CaffeNetBuilder(self.solver_file) self.snapshot_dir = self.builder.snapshot_dir self.builder.build_net(self.owl_net) self.owl_net.compute_size('TEST') self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot) def run(s, multiview): #multi-view test acc_num = 0 test_num = 0 loss_unit = s.owl_net.units[s.owl_net.name_to_uid[s.softmax_layer_name][0]] accunit = s.owl_net.units[s.owl_net.name_to_uid[s.accuracy_layer_name][0]] data_unit = None for data_idx in range(len(s.owl_net.data_layers)): for i in range(len(s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]])): if s.owl_net.units[s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]][i]].params.include[0].phase == 1: data_unit = s.owl_net.units[s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]][i]] assert(data_unit) if multiview == True: data_unit.multiview = True for testiteridx in range(s.owl_net.solver.test_iter[0]): if multiview == True: for i in range(10): s.owl_net.forward('TEST') if i == 0: softmax_val = loss_unit.ff_y batch_size = softmax_val.shape[1] softmax_label = loss_unit.y else: softmax_val = softmax_val + loss_unit.ff_y test_num += batch_size if accunit.top_k == 5: predict = softmax_val.to_numpy() top_5 = np.argsort(predict, axis=1)[:,::-1] ground_truth = softmax_label.max_index(0).to_numpy() correct = 0 for i in range(batch_size): for t in range(5): if ground_truth[i] == top_5[i,t]: correct += 1 break acc_num += correct else: predict = softmax_val.max_index(0) truth = softmax_label.max_index(0) correct = (predict - truth).count_zero() acc_num += correct else: s.owl_net.forward('TEST') all_accunits = s.owl_net.get_accuracy_units() batch_size = accunit.batch_size test_num += batch_size acc_num += (batch_size * accunit.acc) correct = batch_size * accunit.acc print "Accuracy of the %d mb: %f, batch_size: %d, current mean accuracy: %f" % (testiteridx, (correct * 1.0)/batch_size, batch_size, float(acc_num)/test_num) sys.stdout.flush() print "Testing Accuracy: %f" % (float(acc_num)/test_num) class FeatureExtractor: ''' Class for extracting trained features Feature will be stored in a txt file as a matrix. The size of the feature matrix is [num_img, feature_dimension] Run it as:: >>> extractor = FeatureExtractor(solver_file, snapshot, gpu_idx) >>> extractor.build_net() >>> extractor.run(layer_name, feature_path) :ivar str solver_file: path of the solver file in Caffe's proto format :ivar int snapshot: the snapshot for testing :ivar str layer_name: name of the ayer that produce feature :ivar int gpu_idx: which gpu to perform the test ''' def __init__(self, solver_file, snapshot, gpu_idx = 0): self.solver_file = solver_file self.snapshot = snapshot self.gpu = owl.create_gpu_device(gpu_idx) owl.set_device(self.gpu) def build_net(self): self.owl_net = Net() self.builder = CaffeNetBuilder(self.solver_file) self.snapshot_dir = self.builder.snapshot_dir self.builder.build_net(self.owl_net) self.owl_net.compute_size('TEST') self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot) def run(s, layer_name, feature_path): ''' Run feature extractor :param str layer_name: the layer to extract feature from :param str feature_path: feature output path ''' feature_unit = s.owl_net.units[s.owl_net.name_to_uid[layer_name][0]] feature_file = open(feature_path, 'w') batch_dir = 0 for testiteridx in range(s.owl_net.solver.test_iter[0]): s.owl_net.forward('TEST') feature = feature_unit.out.to_numpy() feature_shape = np.shape(feature) img_num = feature_shape[0] feature_length = np.prod(feature_shape[1:len(feature_shape)]) feature = np.reshape(feature, [img_num, feature_length]) for imgidx in range(img_num): for feaidx in range(feature_length): info ='%f ' % (feature[imgidx, feaidx]) feature_file.write(info) feature_file.write('\n') print "Finish One Batch %d" % (batch_dir) batch_dir += 1 feature_file.close() class FilterVisualizer: ''' Class of filter visualizer. Find the most interested patches of a filter to demostrate the pattern that filter insterested in. It first read in several images to conduct feed-forward and find the patches have the biggest activation value for a filter. Those patches usually contains the pattern of that filter. :ivar str solver_file: name of the solver_file, it will tell Minerva the network configuration and model saving path :ivar snapshot: saved model snapshot index :ivar str layer_name: name of the layer that will be viusualized, we will visualize all the filters in that layer in one time :ivar str result_path: path for the result of visualization, filtervisualizer will generate a jpg contains the nine selected patches for each filter in layer_name and save the image under result path. :ivar gpu: the gpu to run testing ''' def __init__(self, solver_file, snapshot, layer_name, result_path, gpu_idx = 0): self.solver_file = solver_file self.snapshot = snapshot self.layer_name = layer_name self.result_path = result_path self.gpu = owl.create_gpu_device(gpu_idx) owl.set_device(self.gpu) def build_net(self): self.owl_net = Net() self.builder = CaffeNetBuilder(self.solver_file) self.snapshot_dir = self.builder.snapshot_dir self.builder.build_net(self.owl_net) self.owl_net.compute_size('TEST') self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot) def run(s): #Need Attention, here we may have multiple data layer, just choose the TEST layer data_unit = None for data_idx in range(len(s.owl_net.data_layers)): for i in range(len(s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]])): if s.owl_net.units[s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]][i]].params.include[0].phase == 1: data_unit = s.owl_net.units[s.owl_net.name_to_uid[s.owl_net.data_layers[data_idx]][i]] assert(data_unit) bp = BlobProto() #get mean file if len(data_unit.params.transform_param.mean_file) == 0: mean_data = np.ones([3, 256, 256], dtype=np.float32) assert(len(data_unit.params.transform_param.mean_value) == 3) mean_data[0] = data_unit.params.transform_param.mean_value[0] mean_data[1] = data_unit.params.transform_param.mean_value[1] mean_data[2] = data_unit.params.transform_param.mean_value[2] h_w = 256 else: with open(data_unit.params.transform_param.mean_file, 'rb') as f: bp.ParseFromString(f.read()) mean_narray = np.array(bp.data, dtype=np.float32) h_w = np.sqrt(np.shape(mean_narray)[0] / 3) mean_data = np.array(bp.data, dtype=np.float32).reshape([3, h_w, h_w]) #get the cropped img crop_size = data_unit.params.transform_param.crop_size crop_h_w = (h_w - crop_size) / 2 mean_data = mean_data[:, crop_h_w:crop_h_w + crop_size, crop_h_w:crop_h_w + crop_size] feature_unit = s.owl_net.units[s.owl_net.name_to_uid[s.layer_name][0]] batch_dir = 0 #we use 10000 images to conduct visualization all_data = np.zeros([10000, 3, crop_size, crop_size], dtype=np.float32) feature_shape = feature_unit.out_shape all_feature = np.zeros([10000, feature_shape[2], feature_shape[1], feature_shape[0]], dtype=np.float32) print 'Begin Generating Activations from Testing Set' curimg = 0 for testiteridx in range(s.owl_net.solver.test_iter[0]): s.owl_net.forward('TEST') feature = feature_unit.out.to_numpy() batch_size = np.shape(feature)[0] all_feature[curimg:curimg+batch_size,:] = feature data = data_unit.out.to_numpy() all_data[curimg:curimg+batch_size,:] = data curimg += batch_size #HACK TODO: only take 10000 images if curimg >= 10000: break info = 'Now Processed %d images' % (curimg) print info print 'Begin Selecting Patches' #get the result patch_shape = feature_unit.rec_on_ori min_val = -float('inf') #add back the mean file for i in range(np.shape(all_data)[0]): all_data[i,:,:,:] += mean_data if len(feature_shape) == 4: #iter for each filter, for each filter, we choose nine patch from different image for i in range(feature_shape[2]): #create the result image for nine patches res_img = np.zeros([feature_unit.rec_on_ori * 3, feature_unit.rec_on_ori * 3, 3]) filter_feature = np.copy(all_feature[:,i,:,:]) for patchidx in range(9): maxidx = np.argmax(filter_feature) colidx = maxidx % feature_shape[0] maxidx = (maxidx - colidx) / feature_shape[0] rowidx = maxidx % feature_shape[1] maxidx = (maxidx - rowidx) / feature_shape[1] imgidx = maxidx info = '%d %d %d' % (imgidx, rowidx, colidx) filter_feature[imgidx,:,:] = min_val #get the patch place patch_start_row = max(0,feature_unit.start_on_ori + rowidx * feature_unit.stride_on_ori) patch_end_row = min(feature_unit.start_on_ori + rowidx * feature_unit.stride_on_ori + feature_unit.rec_on_ori, data_unit.crop_size) if patch_start_row == 0: patch_end_row = feature_unit.rec_on_ori if patch_end_row == data_unit.crop_size: patch_start_row = data_unit.crop_size - feature_unit.rec_on_ori patch_start_col = max(0,feature_unit.start_on_ori + colidx * feature_unit.stride_on_ori) patch_end_col = min(feature_unit.start_on_ori + colidx * feature_unit.stride_on_ori + feature_unit.rec_on_ori, data_unit.crop_size) if patch_start_col == 0: patch_end_col = feature_unit.rec_on_ori if patch_end_col == data_unit.crop_size: patch_start_col = data_unit.crop_size - feature_unit.rec_on_ori patch = all_data[imgidx, :, patch_start_row:patch_end_row, patch_start_col:patch_end_col] #save img to image row_in_res = patchidx / 3 col_in_res = patchidx % 3 st_row = row_in_res * patch_shape st_col = col_in_res * patch_shape #turn gbr into rgb res_img[st_row:st_row+patch_end_row - patch_start_row, st_col:st_col + patch_end_col - patch_start_col, 2] = patch[0,:,:] res_img[st_row:st_row+patch_end_row - patch_start_row, st_col:st_col + patch_end_col - patch_start_col, 1] = patch[1,:,:] res_img[st_row:st_row+patch_end_row - patch_start_row, st_col:st_col + patch_end_col - patch_start_col, 0] = patch[2,:,:] #save img res_img = Image.fromarray(res_img.astype(np.uint8)) res_path = '%s/%d.jpg' % (s.result_path, i) print res_path res_img.save(res_path, format = 'JPEG') else: #Fully Layers #iter for each filter, for each filter, we choose nine patch from different image print feature_shape for i in range(feature_shape[0]): #create the result image for nine patches res_img = np.zeros([data_unit.crop_size * 3, data_unit.crop_size * 3, 3]) filter_feature = np.copy(all_feature[:,i]) for patchidx in range(9): maxidx = np.max_index(filter_feature) imgidx = maxidx filter_feature[imgidx] = min_val #save img to image row_in_res = patchidx / 3 col_in_res = patchidx % 3 st_row = row_in_res * data_unit.crop_size st_col = col_in_res * data_unit.crop_size #turn gbr into rgb patch = all_data[imgidx,:,:,:] res_img[st_row:st_row+data_unit.crop_size,st_col:st_col+data_unit.crop_size, 2] = patch[0,:,:] res_img[st_row:st_row+data_unit.crop_size,st_col:st_col+data_unit.crop_size, 1] = patch[1,:,:] res_img[st_row:st_row+data_unit.crop_size,st_col:st_col+data_unit.crop_size, 0] = patch[2,:,:] #save img res_img = Image.fromarray(res_img.astype(np.uint8)) res_path = '%s/%d.jpg' % (s.result_path, i) print res_path res_img.save(res_path, format = 'JPEG')
nilq/baby-python
python
import numpy as np import torch import torch.nn.functional as F import os, copy, time #from tqdm import tqdm import pandas as pd from ipdb import set_trace path = '/home/vasu/Desktop/project/' ### helper functions def to_np(t): return np.array(t.cpu()) ### Losses def calc_class_weight(x, fac=2): """calculate inverse normalized count, multiply by given factor""" _, counts = np.unique(x, return_counts=True) tmp = 1/counts/sum(counts) tmp /= max(tmp) return tmp*fac def get_class_weights(): # class weights, calculated on the training set df_all = pd.read_csv(path + 'annotations.csv') return { 'red_light': torch.Tensor(calc_class_weight(df_all['red_light'])), 'hazard_stop': torch.Tensor(calc_class_weight(df_all['hazard_stop'])), 'speed_sign': torch.Tensor(calc_class_weight(df_all['speed_sign'])), 'relative_angle': torch.Tensor([1]), 'center_distance': torch.Tensor([1]), 'veh_distance': torch.Tensor([1]), } w = get_class_weights() print(w)
nilq/baby-python
python
# Copyright Jetstack Ltd. See LICENSE for details. # Generates kube-oidc-proxy Changelog # Call from the branch with 3 parameters: # 1. Date from which to start looking # 2. Github Token # requires python-dateutil and requests from pip from subprocess import * import re from datetime import datetime import dateutil.parser import sys import requests def parseIssues(message): issuesRet = [] issues = re.findall('[#][0-9]+',message) if issues != None: for issue in issues: issuesRet.append(issue[1:]) return issuesRet def f4(seq): # order preserving noDupes = [] [noDupes.append(i) for i in seq if not noDupes.count(i)] return noDupes headers = {'Authorization':'token ' + sys.argv[2]} GIT_COMMIT_FIELDS = ['id', 'author_name', 'author_email', 'date', 'message'] GIT_LOG_FORMAT = ['%H', '%an', '%ae', '%ai', '%s'] GIT_LOG_FORMAT = '%x1f'.join(GIT_LOG_FORMAT) + '%x1e' #print repo.git.log(p=False) allIssues = [] p = Popen('git log --format="%s" ' % GIT_LOG_FORMAT, shell=True, stdout=PIPE) (logb, _) = p.communicate() log = str(logb,"utf-8") log = log.strip('\n\x1e').split("\x1e") log = [row.strip().split("\x1f") for row in log] log = [dict(zip(GIT_COMMIT_FIELDS, row)) for row in log] notbefore = dateutil.parser.parse(sys.argv[1] + ' 00:00:00 -0400') for commit in log: created = dateutil.parser.parse(commit['date']) if created > notbefore: message = commit['message'] allIssues.extend(parseIssues(message)) allIssues = f4(allIssues) bylabels = {} for issue in allIssues: issueURL = 'https://api.github.com/repos/TremoloSecurity/kube-oidc-proxy/issues/' + issue r = requests.get(issueURL,headers=headers) json = r.json(); if "labels" in json: for label in json['labels']: if not (label['name'] in bylabels): labelGroup = [] bylabels[label["name"]] = labelGroup labelGroup = bylabels[label['name']] labelGroup.append(json) for label in bylabels: print('**' + label + 's:**') for issue in bylabels[label]: print(' - ' + issue['title'] + ' [\\#' + str(issue['number']) + '](' + issue['html_url'] + ')') print()
nilq/baby-python
python
from PIL import Image import numpy as np from matplotlib import pylab as plt img = np.array(Image.new("RGB", (28, 28))) img[:,:,:] = 255 img[2,2,:] = 0 img[2,5,:] = 0 img[2,6,:] = 0 img[2,10,:] = 0 img[2,11,:] = 0 img[3,10,:] = 0 img[3,11,:] = 0 plt.imshow(img) plt.imsave("p.png", img) np.save('p,npy', img, allow_pickle=False)
nilq/baby-python
python
""" Hyperparameters for MJC peg insertion trajectory optimization. """ from __future__ import division from datetime import datetime import os.path import numpy as np from gps import __file__ as gps_filepath from gps.agent.mjc.agent_mjc import AgentMuJoCo from gps.algorithm.algorithm_traj_opt import AlgorithmTrajOpt from gps.algorithm.cost.cost_fk import CostFK from gps.algorithm.cost.cost_action import CostAction from gps.algorithm.cost.cost_sum import CostSum from gps.algorithm.dynamics.dynamics_lr_prior import DynamicsLRPrior from gps.algorithm.dynamics.dynamics_prior_gmm import DynamicsPriorGMM from gps.algorithm.traj_opt.traj_opt_lqr_python import TrajOptLQRPython from gps.algorithm.policy.lin_gauss_init import init_lqr from gps.proto.gps_pb2 import JOINT_ANGLES, JOINT_VELOCITIES, \ END_EFFECTOR_POINTS, END_EFFECTOR_POINT_VELOCITIES, ACTION from gps.gui.config import generate_experiment_info SENSOR_DIMS = { JOINT_ANGLES: 7, JOINT_VELOCITIES: 7, END_EFFECTOR_POINTS: 6, END_EFFECTOR_POINT_VELOCITIES: 6, ACTION: 7, } PR2_GAINS = np.array([3.09, 1.08, 0.393, 0.674, 0.111, 0.152, 0.098]) BASE_DIR = '/'.join(str.split(gps_filepath, '/')[:-2]) EXP_DIR = BASE_DIR + '/../experiments/mjc_example/' common = { 'experiment_name': 'my_experiment' + '_' + \ datetime.strftime(datetime.now(), '%m-%d-%y_%H-%M'), 'experiment_dir': EXP_DIR, 'data_files_dir': EXP_DIR + 'data_files/', 'target_filename': EXP_DIR + 'target.npz', 'log_filename': EXP_DIR + 'log.txt', 'conditions': 4, } if not os.path.exists(common['data_files_dir']): os.makedirs(common['data_files_dir']) agent = { 'type': AgentMuJoCo, 'filename': './mjc_models/pr2_arm3d.xml', 'x0': np.concatenate([np.array([0.1, 0.1, -1.54, -1.7, 1.54, -0.2, 0]), np.zeros(7)]), 'dt': 0.05, 'substeps': 5, 'conditions': common['conditions'], 'pos_body_idx': np.array([1]), 'pos_body_offset': [[np.array([0, 0.2, 0])], [np.array([0, 0.1, 0])], [np.array([0, -0.1, 0])], [np.array([0, -0.2, 0])]], 'T': 100, 'sensor_dims': SENSOR_DIMS, 'state_include': [JOINT_ANGLES, JOINT_VELOCITIES, END_EFFECTOR_POINTS, END_EFFECTOR_POINT_VELOCITIES], 'obs_include': [], 'camera_pos': np.array([0., 0., 2., 0., 0.2, 0.5]), } algorithm = { 'type': AlgorithmTrajOpt, 'conditions': common['conditions'], 'iterations': 10, } algorithm['init_traj_distr'] = { 'type': init_lqr, 'init_gains': 1.0 / PR2_GAINS, 'init_acc': np.zeros(SENSOR_DIMS[ACTION]), 'init_var': 1.0, 'stiffness': 1.0, 'stiffness_vel': 0.5, 'dt': agent['dt'], 'T': agent['T'], } torque_cost = { 'type': CostAction, 'wu': 5e-5 / PR2_GAINS, } fk_cost = { 'type': CostFK, 'target_end_effector': np.array([0.0, 0.3, -0.5, 0.0, 0.3, -0.2]), 'wp': np.array([1, 1, 1, 1, 1, 1]), 'l1': 0.1, 'l2': 10.0, 'alpha': 1e-5, } algorithm['cost'] = { 'type': CostSum, 'costs': [torque_cost, fk_cost], 'weights': [1.0, 1.0], } algorithm['dynamics'] = { 'type': DynamicsLRPrior, 'regularization': 1e-6, 'prior': { 'type': DynamicsPriorGMM, 'max_clusters': 20, 'min_samples_per_cluster': 40, 'max_samples': 20, }, } algorithm['traj_opt'] = { 'type': TrajOptLQRPython, } algorithm['policy_opt'] = {} config = { 'iterations': algorithm['iterations'], 'num_samples': 5, 'verbose_trials': 1, 'common': common, 'agent': agent, 'gui_on': True, 'algorithm': algorithm, } common['info'] = generate_experiment_info(config)
nilq/baby-python
python
import os import sys PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.join(PROJECT_ROOT, 'vendor', 'pyyaml', 'lib')) import yaml from git import apply as git_apply class Patch: def __init__(self, file_path, repo_path): self.file_path = file_path self.repo_path = repo_path def apply(self, reverse=False): return git_apply(self.repo_path, self.file_path, reverse=reverse) def reverse(self): return self.apply(reverse=True) def get_file_path(self): return self.file_path class PatchesList: def __init__(self, patches): self.patches = patches def __len__(self): return len(self.patches) def apply(self, reverse=False, stop_on_error=True): all_patches_applied = True failed_patches = [] for patch in self.patches: applied_successfully = patch.apply(reverse=reverse) if not applied_successfully: all_patches_applied = False failed_patches.append(patch) should_stop_now = not applied_successfully and stop_on_error if should_stop_now: break return (all_patches_applied, failed_patches) def reverse(self, stop_on_error=True): return self.apply(reverse=True, stop_on_error=stop_on_error) class PatchesConfig: @staticmethod def from_directory(dir_path, config_name='.patches.yaml'): config_path = os.path.join(dir_path, config_name) return PatchesConfig(config_path) def __init__(self, config_path): self.path = config_path def __parse(self): contents = None if os.path.isfile(self.path): with open(self.path, 'r') as stream: try: contents = yaml.load(stream) except yaml.YAMLError as e: print(e) return contents def __create_patch(self, raw_data, base_directory, repo_path): relative_file_path = raw_data['file'] absolute_file_path = os.path.join(base_directory, relative_file_path) return Patch(absolute_file_path, repo_path) def get_patches_list(self): config_contents = self.__parse() if config_contents is None: return None repo_path = config_contents['repo'] if sys.platform == 'win32': repo_path = repo_path.replace('/', '\\') patches_data = config_contents['patches'] base_directory = os.path.dirname(self.path) patches = [self.__create_patch(data, base_directory, repo_path) for data in patches_data] patches_list = PatchesList(patches) return patches_list
nilq/baby-python
python
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80: # See LICENSE comming with the source of python-quilt for details. import runpy import sys from unittest import TestCase from six.moves import cStringIO from helpers import tmp_mapping class Test(TestCase): def test_registration(self): with tmp_mapping(vars(sys)) as temp_sys: temp_sys.set("argv", ["pquilt", "push", "--help"]) temp_sys.set("stdout", cStringIO()) try: runpy.run_path("pquilt", run_name="__main__") except SystemExit as exit: self.assertEqual(exit.code, 0) self.assertGreater(sys.stdout.getvalue(), "")
nilq/baby-python
python
from django.test import TestCase from rest_framework.test import APIRequestFactory from rest_framework.test import APIClient import json from django.utils import timezone from datetime import timedelta # Create your tests here. from .models import Contact from .serializers import ContactSerializer from agape.people.models import Person from agape.people.serializers import PersonSerializer from agape.signals import on # from .settings import AUTHENTICATION class ContactTestCase(TestCase): def setUp(self): self.person = Person(first_name="Elvis",middle_name="Aaron",last_name="Presley", birthday="1935-01-08", gender="m") self.person.save() def test_sanity(self): self.assertTrue(True, "Sane") def test_create(self): instance = Contact(progenitor=self.person.moniker(), type=Contact.TELEPHONE, label="Home", value="+507 5555 5555") instance.save() self.assertEqual(instance.id, 1, 'Created contact') self.assertEqual(instance.progenitor, 'person:1', 'Progenitor set') self.assertEqual(instance.type, Contact.TELEPHONE, 'Contact type set') self.assertEqual(instance.value, '+507 5555 5555', 'Contact value set') class ContactSerializerTestCase(TestCase): def setUp(self): self.person = Person(first_name="Elvis",middle_name="Aaron",last_name="Presley", birthday="1935-01-08", gender="m") self.person.save() def test_serialize(self): instance = Contact(progenitor=self.person.moniker(), type=Contact.TELEPHONE, label="Home", value="+507 5555 5555") instance.save() serializer = ContactSerializer(instance) expect = { 'id':1, 'progenitor':'person:1', 'type':Contact.TELEPHONE, 'value':'+507 5555 5555', 'label':'Home' } self.assertDictEqual(expect, serializer.data) def test_inflate(self): data = { 'progenitor':'person:1', 'type':Contact.TELEPHONE, 'value':'+507 5555 5555', 'label':'Home' } # serializer data is valid serializer = ContactSerializer(data=data) self.assertTrue(serializer.is_valid(), 'Serializer data is valid') # create an instance via the serializer instance = serializer.create(serializer.validated_data) self.assertTrue(instance, 'Created instance from serializer data') # verify instance values self.assertEqual(instance.id, 1, 'Contact assigned new ID') self.assertEqual(instance.type, Contact.TELEPHONE) self.assertEqual(instance.progenitor, 'person:1') self.assertEqual(instance.value, '+507 5555 5555') self.assertEqual(instance.label, "Home") # verify partial modification data = { 'id': instance.id, 'value': '+507 4444 4444' } serializer = ContactSerializer(data=data,partial=True) self.assertTrue(serializer.is_valid(), 'Serializer data is valid') # update the instance serializer.update(instance,data) self.assertEqual(instance.value, '+507 4444 4444') class APITestCase(TestCase): def setUp(self): self.client = APIClient() self.person = Person(first_name="Elvis",middle_name="Aaron",last_name="Presley", birthday="1935-01-08", gender="m") self.person.save() def test_create_contact(self): data = { 'progenitor':'person:1', 'type':Contact.TELEPHONE, 'value':'+507 5555 5555', 'label':'Home' } response = self.client.post('/api/v1/contacts/', data) self.assertEqual(response.status_code, 201, "Created new contact") self.assertEqual(response.data.get('id'), 1, 'Contact assigned new ID') self.assertEqual(response.data.get('progenitor'), "person:1") self.assertEqual(response.data.get('type'), Contact.TELEPHONE ) self.assertEqual(response.data.get('value'), '+507 5555 5555') self.assertEqual(response.data.get('label'), "Home" ) # verify actual database record was created instance = Contact.objects.get(id=response.data.get('id')) self.assertTrue(instance) def test_retrieve(self): data = { 'progenitor':'person:1', 'type':Contact.TELEPHONE, 'value':'+507 5555 5555', 'label':'Home' } response = self.client.post('/api/v1/contacts/', data) self.assertEqual(response.status_code, 201, "Created new contact") response = None response = self.client.get('/api/v1/contacts/1/') self.assertEqual(response.data.get('id'), 1, 'Contact assigned new ID') self.assertEqual(response.data.get('progenitor'), "person:1") self.assertEqual(response.data.get('type'), Contact.TELEPHONE ) self.assertEqual(response.data.get('value'), '+507 5555 5555') self.assertEqual(response.data.get('label'), "Home" ) def test_update_contact(self): data = { 'progenitor':'person:1', 'type':Contact.TELEPHONE, 'value':'+507 5555 5555', 'label':'Home' } response = self.client.post('/api/v1/contacts/', data) self.assertEqual(response.status_code, 201, "Created new contact") data = { 'value':'+507 4444 4444' } response = self.client.patch( '/api/v1/contacts/{}/'.format(response.data.get('id')), json.dumps(data), content_type='application/json') self.assertEqual(response.data.get('value'), '+507 4444 4444') def test_delete_contact(self): data = { 'progenitor':'person:1', 'type':Contact.TELEPHONE, 'value':'+507 5555 5555', 'label':'Home' } response = self.client.post('/api/v1/contacts/', data) self.assertEqual(response.status_code, 201, "Created new contact") id = response.data.get('id') uri = '/api/v1/contacts/{}/'.format(response.data.get('id')) response = self.client.delete( uri ) self.assertEqual(response.status_code, 204, "Deleted") query = Contact.objects.filter(id=id) self.assertEqual(len(query),0, "Deleted") class ContactsConnectionTestCase(TestCase): def setUp(self): self.client = APIClient() from .connector import ContactsConnector connector = ContactsConnector() connector.connect_to_entity('person') # connector.connect('agape.people') def test_create(self): data = { 'first_name':'Elvis', 'middle_name':'Aaron', 'last_name':'Presley', 'birthday': "1935-01-08", 'gender': 'm', 'contacts': [ {'type':Contact.TELEPHONE, 'label':'Home', 'value':'Priceless'}, {'type':Contact.EMAIL, 'label':'Work', 'value':'test@example.com'}, ] } response = self.client.post('/api/v1/people/', data) self.assertEqual(response.status_code, 201, "Created new person") # find the contact that was created contact = Contact.objects.get(id=1) self.assertEqual(contact.value, 'Priceless', "Created phone record") contact = Contact.objects.get(id=2) self.assertEqual(contact.value, 'test@example.com', "Created email record") # class PeopleContactsConnectionTestCase(TestCase): # def setUp(self): # self.client = APIClient() # # create callbacks on person creation # scope = {} # def catch_incoming_data(o,request): # scope['contactData'] = None # scope['contactData'] = request.data.get('contacts',[]) # def create_contacts(o,person): # print( scope['contactData'] ); # for contact in scope['contactData']: # contact['progenitor'] = person.moniker() # serializer = ContactSerializer(data=scope['contactData'], many=True) # if serializer.is_valid(): # recordSet = serializer.save() # on('person.create:before',catch_incoming_data) # on('person.create:success',create_contacts) # def test_create_contacts(self): # data = { # 'first_name':'Elvis', # 'middle_name':'Aaron', # 'last_name':'Presley', # 'birthday': "1935-01-08", # 'gender': 'm', # 'contacts': [ # {'type':Contact.TELEPHONE, 'label':'Home', 'value':'Priceless'}, # {'type':Contact.EMAIL, 'label':'Work', 'value':'test@example.com'}, # ] # } # response = self.client.post('/api/v1/people/', data) # self.assertEqual(response.status_code, 201, "Created new person") # # find the contact that was created # contact = Contact.objects.get(id=1) # self.assertEqual(contact.value, 'Priceless', "Created phone record") # contact = Contact.objects.get(id=2) # self.assertEqual(contact.value, 'test@example.com', "Created email record")
nilq/baby-python
python
#!/usr/bin/python import sys def __test1__(__host__): import pylibmc print 'Testing <pylibmc> ... %s' % (__host__) mc = pylibmc.Client([__host__], binary=True, behaviors={"tcp_nodelay": True,"ketama": True}) print mc mc["some_key"] = "Some value" print mc["some_key"] assert(mc["some_key"] == "Some value") print 'DONE !!!' def __test2__(__host__): import umemcache print 'Testing <umemcache> ... %s' % (__host__) mc = umemcache.Client(__host__) mc.connect() print mc mc.set('key', 'Some value') print mc.get('key')[0] #mc["some_key"] = "Some value" #print mc["some_key"][0] assert(mc.get('key')[0] == "Some value") print 'DONE !!!' try: __test1__('127.0.0.1:11211') __test1__('memcached1.fs7l9z.cfg.use1.cache.amazonaws.com:11211') except: print 'ERROR #1' sys.exit(1) try: __test2__('127.0.0.1:11211') __test2__('memcached1.fs7l9z.cfg.use1.cache.amazonaws.com:11211') except: print 'ERROR #2' sys.exit(1) print "It's all good."
nilq/baby-python
python
import json import argparse import requests parser = argparse.ArgumentParser() parser.add_argument("--extension-uuid", dest="extension_uuid", type=str) parser.add_argument("--gnome-version", dest="gnome_version", type=str) args = parser.parse_args() def get_extension_url(extension_uuid, gnome_version): base_url = "https://extensions.gnome.org" info = requests.get(f"{base_url}/ajax/detail/", params={ "uuid": extension_uuid }).json() version_tag = info["shell_version_map"][gnome_version]["pk"] download_url = (f"{base_url}/download-extension/" f"{extension_uuid}.shell-extension.zip" f"?version_tag={version_tag}") return json.dumps({ "download_url": download_url, "extension_uuid": extension_uuid }) print(get_extension_url(args.extension_uuid, args.gnome_version))
nilq/baby-python
python