id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
115686 | from utils.models import LinkedListNode
def merge_two_sorted_lists(l1: LinkedListNode, l2: LinkedListNode) -> LinkedListNode:
temp_head = tail = LinkedListNode(data=0)
while l1 and l2:
if l1.data < l2.data:
tail.next = l1
l1 = l1.next
else:
tail.next = l2
l2 = l2.next
tail = tail.next
tail.next = l1 or l2
return temp_head.next # skip the first data node (0)
if __name__ == '__main__':
list1 = LinkedListNode(2, next=LinkedListNode(5, next=LinkedListNode(7)))
list2 = LinkedListNode(3, next=LinkedListNode(11))
merged_list = merge_two_sorted_lists(list1, list2)
| StarcoderdataPython |
3324020 | <filename>scrapers/scraper_primorske.py
import requests
from bs4 import BeautifulSoup as bs
import hashlib
from database.dbExecutor import dbExecutor
import datetime
import sys
from tqdm import tqdm
import re
"""
formatDate ni uporaben, datum je na strani ze v pravi obliki
napake, so verjetno zaradi praznih 'clankov' (white space na page-u)
nov
created by markzakelj
"""
SOURCE = 'PRIMORSKE'
firstRunBool = False
num_pages_to_check = 1
num_errors = 0
base_url = 'https://www.primorske.si'
full_urls = ['https://www.primorske.si/primorska/istra?page=',
'https://www.primorske.si/primorska/goriska?page=',
'https://www.primorske.si/primorska/srednja-primorska?page=',
'https://www.primorske.si/kultura?page=']
#dodaj se stevilo strani - prva stran je 0
#full_urls = ['https://www.primorske.si/primorska/istra?page='] use this variable when testing - it's faster ;)
headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}
def make_hash(title, date):
return hashlib.sha1((title + date).encode('utf-8')).hexdigest()
def find_last_page(url, session):
r = get_connection(url, session)
soup = bs(r.text, 'html.parser')
num = soup.find('ul', class_='pagination').find_all('a')[-1].get('href').split('=')[-1]
return int(num)
def log_error(text):
global num_errors
num_errors += 1
log_file = open('error_log_zakelj.log', 'a+')
log_file.write(str(datetime.datetime.today()) + '\n')
log_file.write(sys.argv[0] + '\n')
log_file.write(text + '\n\n')
log_file.close()
def get_connection(url, session):
#time.sleep(3)
try:
r = session.get(url, timeout=10)
return r
except requests.exceptions.MissingSchema:
log_error('invalid url: ' + url)
return session.get(url)
except requests.exceptions.ConnectionError as e:
log_error('connection error: '+url+'\n'+str(e))
def is_article_new(hash_str):
if dbExecutor.getByHash(hash_str):
return False
return True
def get_title(soup):
title = soup.find('a', class_='article-title')
if title:
return ' '.join(title.text.split())
log_error('title not found, update select() method')
return 'title not found'
def get_date(soup):
date = soup.find('div', class_='article-published need_to_be_rendered')
if date:
return date.get('datetime')[:10]
log_error('date not found')
return '1111-01-01' #code for date not found
def get_link(soup):
link = soup.find('a')
if link:
return base_url + link.get('href')
log_error('link not found')
return base_url #return base url to avoid exceptions
def get_content(soup):
content = soup.find('div', class_='content-column')
if content:
return ' '.join(content.text.split())
log_error('content not found')
return 'content not found'
def get_articles_on_pages(num_pages_to_check, session):
articles = []
print('\tgathering articles ...')
for url in full_urls:
if firstRunBool:
num_pages_to_check = find_last_page(url+str(1), session)
for n in tqdm(range(num_pages_to_check)):
r = get_connection(url + str(n+1), session)
soup = bs(r.text, 'html.parser')
articles += soup.find_all('div', class_='article-full')
articles += soup.find_all('div', class_='article-medium')
return articles
def main():
print('=========================')
print(sys.argv[0])
print('=========================')
num_new_articles = 0
articles_checked = 0
with requests.Session() as session:
session.headers.update(headers)
articles = get_articles_on_pages(num_pages_to_check,session)
articles_checked = len(articles)
print('\tgathering article info ...')
for x in tqdm(articles):
title = get_title(x)
date = get_date(x)
hash_str = make_hash(title, date)
if is_article_new(hash_str):
link = get_link(x)
r = get_connection(link, session)
soup = bs(r.text, 'html.parser')
content = get_content(soup)
new_tup = (str(datetime.date.today()), title, content, date, hash_str, link, SOURCE)
dbExecutor.insertOne(new_tup)
num_new_articles += 1
print(num_new_articles, 'new articles found,', articles_checked,'articles checked', num_errors,'errors found\n')
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == "-F":
firstRunBool = True
main() | StarcoderdataPython |
127827 | <reponame>bcgov/CIT<gh_stars>1-10
from django.contrib.gis.db import models
from django.contrib.gis.db.models import MultiPolygonField
from django.contrib.gis.geos import Point
from pipeline.constants import WGS84_SRID
class HealthAuthorityBoundary(models.Model):
NAME_FIELD = "HLTH_AUTHORITY_NAME"
hlth_authority_id = models.CharField(max_length=32)
hlth_authority_code = models.IntegerField(null=True)
name = models.CharField(max_length=127)
geom = models.MultiPolygonField(srid=WGS84_SRID, null=True)
geom_simplified = models.MultiPolygonField(srid=WGS84_SRID, null=True)
class Meta:
ordering = ("id", )
def __str__(self):
return self.name
| StarcoderdataPython |
91761 | <reponame>kponder/astrorapid
import numpy as np
def find_nearest(array, value):
"""
Find the index nearest to a given value.
Adapted from: https://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def delete_indexes(deleteindexes, *args):
newarrs = []
for arr in args:
newarr = np.delete(arr, deleteindexes)
newarrs.append(newarr)
return newarrs
def convert_lists_to_arrays(*args):
output = []
for arg in args:
out_array = np.asarray(arg)
output.append(out_array)
return output
def calc_luminosity(flux, fluxerr, mu):
""" Normalise flux light curves with distance modulus.
Parameters
----------
flux : array
List of floating point flux values.
fluxerr : array
List of floating point flux errors.
mu : float
Distance modulus from luminosity distance.
Returns
-------
fluxout : array
Same shape as input flux.
fluxerrout : array
Same shape as input fluxerr.
"""
d = 10 ** (mu/5 + 1)
dsquared = d**2
norm = 1e18
fluxout = flux * (4 * np.pi * dsquared/norm)
fluxerrout = fluxerr * (4 * np.pi * dsquared/norm)
return fluxout, fluxerrout
def get_sntypes():
sntypes_map = {1: 'SNIa-norm',
11: 'SNIa-norm',
2: 'SNII',
12: 'SNIIpca',
14: 'SNIIn',
3: 'SNIbc',
13: 'SNIbc',
5: 'SNIbc',
6: 'SNII',
41: 'SNIa-91bg',
43: 'SNIa-x',
45: 'point-Ia',
50: 'Kilonova-GW170817',
51: 'Kilonova',
60: 'SLSN-I',
61: 'PISN',
62: 'ILOT',
63: 'CART',
64: 'TDE',
70: 'AGN',
80: 'RRLyrae',
81: 'Mdwarf',
83: 'EBE',
84: 'Mira',
90: 'uLens-BSR',
91: 'uLens-1STAR',
92: 'uLens-String',
93: 'uLens - Point',
99: 'Rare'}
return sntypes_map
def get_sntypes_PLAsTiCC():
sntypes_map = {90: 'SNIa-norm',
42: 'SNII',
62: 'SNIbc',
67: 'SNIa-91bg',
52: 'SNIax',
64: 'Kilonova',
95: 'SLSN-I',
994: 'PISN',
992: 'ILOT',
993: 'CART',
15: 'TDE',
88: 'AGN',
92: 'RRLyrae',
65: 'Mdwarf',
16: 'EBE',
53: 'Mira',
991: 'uLens-BSR',
6: 'uLens-1STAR',
995: 'uLens-String',
99: 'Rare'}
return sntypes_map
| StarcoderdataPython |
3350183 | <filename>src/help_command.py
"""help command functionality"""
import os
import discord
from discord import Embed
Test_bot_application_ID = int(os.getenv('TEST_BOT_APP_ID'))
###########################
# Function: helper
# Description: Directs the help functions when called
# and is executes !help command
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help
###########################
async def helper(ctx):
embed = Embed(title='help',
description="Use !help <command> for extended support",
colour=discord.Colour.red())
embed.add_field(name='answer',
value='Answer specific question. Please put question ext in quotes')
embed.add_field(name='ask',
value='Ask question. Please put question text in quotes')
embed.add_field(name='attendance',
value='Gets the attendance of channel')
embed.add_field(name='begin-tests',
value='start test command')
embed.add_field(name='create',
value='Create a new event')
embed.add_field(name='end-tests',
value='end tests command')
embed.add_field(name='oh',
value='Operations relevant for office hours')
embed.add_field(name='ping',
value='Returns Latency')
embed.add_field(name='poll',
value='Set Poll for a specified time and topic')
embed.add_field(name='setInstructor',
value='Set member to Instructor')
embed.add_field(name='stats',
value='Shows bot stats')
embed.add_field(name='chart',
value='Creates a custom chart for data visualization')
embed.add_field(name='test',
value='Simple sanity check')
embed.add_field(name='regrade-request',
value='Add a regrade request')
embed.add_field(name='update-request',
value='Update a regrade request')
embed.add_field(name='display-requests',
value='display regrade requests')
embed.add_field(name='remove-request',
value='remove a regrade request')
embed.add_field(name='create_email',
value='configure email address')
embed.add_field(name='remove_email',
value='unconfigure email address')
embed.add_field(name='view_email',
value='display configured email address')
embed.add_field(name='update_email',
value='update configured email address')
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('help')
###########################
# Function: answer
# Description: Help for command answer
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help answer
###########################
async def answer(ctx):
embed = Embed(title='answer',
description='Answer specific question. Please put question ext in quotes',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!answer <Question Number> ["Answer"]',
inline=False)
embed.add_field(name='*Channel*',
value='q-and-a',
inline=False)
embed.add_field(name='*Authorization*',
value='Anyone',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('answer')
###########################
# Function: ask
# Description: Help for command ask
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help ask
###########################
async def ask(ctx):
embed = Embed(title='ask',
description='Answer specific question. Please put question ext in quotes',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!ask ["Question?"]',
inline=False)
embed.add_field(name='*Channel*',
value='q-and-a',
inline=False)
embed.add_field(name='*Authorization*',
value='Anyone',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('ask')
###########################
# Function: attendance
# Description: Help for command attendance
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help attendance
###########################
async def attendance(ctx):
embed = Embed(title='attendance',
description='Gets the attendance of voice channel',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!attendance',
inline=False)
embed.add_field(name='*Channel*',
value='instructor-commands',
inline=False)
embed.add_field(name='*Authorization*',
value='Instructor',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('attendance')
###########################
# Function: begin_tests
# Description: Help for command begin-tests
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help begin-tests
###########################
async def begin_tests(ctx):
embed = Embed(title='begin-tests', description='start test command',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*', value='!begin-tests', inline=False)
embed.add_field(name='*Channel*', value='Any', inline=False)
embed.add_field(name='*Authorization*', value='Bot', inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('begin-tests')
###########################
# Function: create
# Description: Help for command create
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help create
###########################
async def create(ctx):
embed = Embed(title='create', description='Create a new event',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*', value='!create', inline=False)
embed.add_field(name='*Channel*', value='instructor-commands', inline=False)
embed.add_field(name='*Authorization*', value='Instructor', inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('create')
###########################
# Function: end_tests
# Description: Help for command end-tests
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help end-tests
###########################
async def end_tests(ctx):
embed = Embed(title='end-tests', description='end tests command',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*', value='!end-tests', inline=False)
embed.add_field(name='*Channel*', value='Any', inline=False)
embed.add_field(name='*Authorization*', value='Bot', inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('end-tests')
###########################
# Function: oh
# Description: Help for command oh
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help oh
###########################
async def oh(ctx):
embed = Embed(title='oh', description='Operations relevant for office hours',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*', value='!oh <enter>', inline=True)
embed.add_field(name='*Channel*', value='office-hour', inline=True)
embed.add_field(name='*Authorization*', value='Anyone', inline=True)
embed.add_field(name='*Syntax*', value='!oh <exit>', inline=True)
embed.add_field(name='*Channel*', value='office-hour', inline=True)
embed.add_field(name='*Authorization*', value='Anyone', inline=True)
embed.add_field(name='*Syntax*', value='!oh <next>', inline=True)
embed.add_field(name='*Channel*', value='office-hour', inline=True)
embed.add_field(name='*Authorization*', value='Instructor', inline=True)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('oh')
###########################
# Function: ping
# Description: Help for command ping
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help ping
###########################
async def ping(ctx):
embed = Embed(title='ping', description='Returns Latency',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*', value='!ping', inline=False)
embed.add_field(name='*Channel*', value='Any', inline=False)
embed.add_field(name='*Authorization*', value='Anyone', inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('ping')
###########################
# Function: poll
# Description: Help for command poll
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help poll
###########################
async def poll(ctx):
embed = Embed(title='poll', description='Set Poll for a specified time and topic',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*', value='!poll', inline=False)
embed.add_field(name='*Channel*', value='Any', inline=False)
embed.add_field(name='*Authorization*', value='Instructor', inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('poll')
###########################
# Function: setInstructor
# Description: Help for command setInstructor
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help setInstructor
###########################
async def setInstructor(ctx):
embed = Embed(title='setInstructor',
description='Set Poll for a specified time and topic',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*', value='!poll', inline=False)
embed.add_field(name='*Channel*', value='instructor-commands', inline=False)
embed.add_field(name='*Authorization*', value='Instructor', inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('setInstructor')
###########################
# Function: stats
# Description: Help for command stats
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help stats
###########################
async def stats(ctx):
embed = Embed(title='stats', description='Shows bot stats',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*', value='!stats', inline=False)
embed.add_field(name='*Channel*', value='Any', inline=False)
embed.add_field(name='*Authorization*', value='Anyone', inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('stats')
###########################
# Function: test
# Description: Help for command test
# Inputs:
# - ctx: Context of the function activation
# Outputs: Result of !help test
###########################
async def test(ctx):
embed = Embed(title='test', description='Simple sanity check',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*', value='!test', inline=False)
embed.add_field(name='*Channel*', value='Any', inline=False)
embed.add_field(name='*Authorization*', value='Anyone', inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('test')
async def update_request(ctx):
"""
Function: update_request
Description: Help for command update_request
Inputs:
- ctx: Context of the function activation
Outputs: Result of !help update_request
"""
embed = Embed(title='update-request',
description=' command to update a regrade request',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!update-request <"Student name"> <q1,q2,q3>',
inline=False)
embed.add_field(name='*Channel*',
value='regrade-requests',
inline=False)
embed.add_field(name='*Authorization*',
value='Anyone',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('update-request')
async def regrade_request(ctx):
"""
Function: regrade_request
Description: Help for command regrade_request
Inputs:
- ctx: Context of the function activation
Outputs: Result of !help regrade_request
"""
embed = Embed(title='regrade-request',
description=' command to add regrade request',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!regrade-request <"Student name"> <q1,q2,q3>',
inline=False)
embed.add_field(name='*Channel*',
value='regrade-requests',
inline=False)
embed.add_field(name='*Authorization*',
value='Anyone',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('regrade-request')
async def remove_request(ctx):
"""
Function: remove_request
Description: Help for command remove_request
Inputs:
- ctx: Context of the function activation
Outputs: Result of !help remove_request
"""
embed = Embed(title='remove-request',
description=' command to remove a regrade request',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!remove-request <"Student name"> <q1,q2,q3>',
inline=False)
embed.add_field(name='*Channel*',
value='remove-requests',
inline=False)
embed.add_field(name='*Authorization*',
value='Anyone',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('remove-request')
async def display_requests(ctx):
"""
Function: display_requests
Description: Help for command display_requests
Inputs:
- ctx: Context of the function activation
Outputs: Result of !help display_requests
"""
embed = Embed(title='display-requests',
description=' command to remove a regrade request',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!display-requests',
inline=False)
embed.add_field(name='*Channel*',
value='remove-requests',
inline=False)
embed.add_field(name='*Authorization*',
value='Anyone',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('display-requests')
async def create_email(ctx):
"""
Function: create_email
Description: Help for command create_email
Inputs:
- ctx: Context of the function activation
Outputs: Result of !help create_email
"""
embed = Embed(title='create_email',
description=' command to configure email address',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!create_email <"email_address">',
inline=False)
embed.add_field(name='*Channel*',
value='Any',
inline=False)
embed.add_field(name='*Authorization*',
value='Anyone',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('create_email')
async def update_email(ctx):
"""
Function: update_email
Description: Help for command update_email
Inputs:
- ctx: Context of the function activation
Outputs: Result of !help update_email
"""
embed = Embed(title='update_email',
description=' command to update configured email address',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!update_email <"email_address">',
inline=False)
embed.add_field(name='*Channel*',
value='Any',
inline=False)
embed.add_field(name='*Authorization*',
value='Anyone',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('update_email')
async def view_email(ctx):
"""
Function: view_email
Description: Help for command view_email
Inputs:
- ctx: Context of the function activation
Outputs: Result of !help view_email
"""
embed = Embed(title='view_email',
description=' command to view email address',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!view_email',
inline=False)
embed.add_field(name='*Channel*',
value='Any',
inline=False)
embed.add_field(name='*Authorization*',
value='Anyone',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('view_email')
async def remove_email(ctx):
"""
Function: remove_email
Description: Help for command remove_email
Inputs:
- ctx: Context of the function activation
Outputs: Result of !help remove_email
"""
embed = Embed(title='remove_email',
description=' command to unconfigure email address',
colour=discord.Colour.orange())
embed.add_field(name='*Syntax*',
value='!remove_email',
inline=False)
embed.add_field(name='*Channel*',
value='Any',
inline=False)
embed.add_field(name='*Authorization*',
value='Anyone',
inline=False)
await ctx.send(embed=embed)
if ctx.author.id == Test_bot_application_ID:
await ctx.send('remove_email')
| StarcoderdataPython |
3296020 | <gh_stars>10-100
import numpy as np
import itertools
import os
A_DIM = 6
BITRATE_LEVELS = 6
class VideoLoader:
def __init__(self, filename):
self.filename = filename
self.video_size = {} # in bytes
self.vmaf_size = {}
self.VIDEO_BIT_RATE = [375, 750, 1050, 1750, 3000, 4300] # Kbps
for bitrate in range(BITRATE_LEVELS):
self.video_size[bitrate] = []
self.vmaf_size[bitrate] = []
VIDEO_SIZE_FILE = None
for p in os.listdir(self.filename + '/size/'):
if str(self.VIDEO_BIT_RATE[bitrate]) in p:
VIDEO_SIZE_FILE = p
break
with open(self.filename + '/size/' + VIDEO_SIZE_FILE) as f:
for line in f:
self.video_size[bitrate].append(int(line.split()[0]))
with open(self.filename + '/vmaf/' + VIDEO_SIZE_FILE) as f:
for line in f:
self.vmaf_size[bitrate].append(float(line))
def get_video_size(self):
return self.video_size
def get_vmaf_size(self):
return self.vmaf_size
def get_chunk_count(self):
return len(self.video_size[0])
| StarcoderdataPython |
128951 | # import the pandas, os, and sys libraries and load the nls and covid data
import pandas as pd
import os
import sys
import pprint
nls97 = pd.read_pickle("data/nls97f.pkl")
covidtotals = pd.read_pickle("data/covidtotals720.pkl")
# import the outliers module
sys.path.append(os.getcwd() + "/helperfunctions")
import outliers as ol
# import importlib
importlib.reload(ol)
pd.set_option('display.width', 72)
pd.set_option('display.max_columns', 5)
pd.set_option('display.max_rows', 100)
# get the distribution of a variable
dist = ol.getdistprops(covidtotals.total_cases_pm)
pprint.pprint(dist)
# show outlier rows
sumvars = ['satmath','wageincome']
othervars = ['originalid','highestdegree','gender','maritalstatus']
outliers = ol.getoutliers(nls97, sumvars, othervars)
outliers.varname.value_counts(sort=False)
outliers.loc[outliers.varname=='satmath', othervars + sumvars]
outliers.to_excel("views/nlsoutliers.xlsx")
# do histogram or boxplot of a series
ol.makeplot(nls97.satmath, "Histogram of SAT Math", "SAT Math")
ol.makeplot(nls97.satmath, "Boxplot of SAT Math", "SAT Math", "box")
nls97.dtypes
| StarcoderdataPython |
1636222 | <reponame>36000/cnn_colorflow<gh_stars>0
import numpy as np
import sys
import os
from keras.models import load_model
sys.path.append("../utilities")
import constants
from data import get_train_test
from metrics import plot_n_roc_sic
datasets_c = ['h_qq_rot_charged', 'h_gg_rot_charged', 'cp_qq_rot_charged', 'qx_qg_rot_charged', 's8_gg_rot_charged', 'zp_qq_rot_charged']
datasets_s = ['h_qq', 'h_gg', 'cp_qq', 'qx_qg', 's8_gg', 'zp_qq']
def comp_all(i, datasets = datasets_s, n = 150000):
name = 'all_' + datasets[i] + '_comps'
X_tests = []
y_yests = []
models = []
model_types = []
labels = []
sig = datasets[i]
for j in range(6):
if j == i:
continue
bg = datasets[j]
constants.SIG_H5 = os.path.join(constants.DATA_DIR, sig + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, bg + '.h5')
X_train, X_test, y_train, y_test, \
_, _, sig_metadata, \
bg_metadata, _ = get_train_test(n=n)
if os.path.isfile('../best_model/' + sig + '_vs_' + bg + '_model'):
model_name = sig + '_vs_' + bg
else:
model_name = bg + '_vs_' + sig
model = load_model('../best_model/' + model_name + '_model')
X_tests.append(X_test)
y_yests.append(y_test)
models.append(model)
model_types.append(True)
labels.append(model_name)
plot_n_roc_sic(name, 'final_curves/sic_'+name, X_tests, y_yests, models, model_types, labels, True, fontfac=0.5)
plot_n_roc_sic(name, 'final_curves/roc_'+name, X_tests, y_yests, models, model_types, labels, False, fontfac=0.5)
if __name__ == '__main__':
for i in range(len(datasets_s)):
comp_all(i)
| StarcoderdataPython |
3351934 | # source:https://github.com/zhuogege1943/dgc/blob/439fde259c/layers.py
# arxiv:https://arxiv.org/abs/2007.04242
import torch
import torch.nn as nn
import torch.nn.functional as F
class DynamicMultiHeadConv(nn.Module):
global_progress = 0.0
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, heads=4, squeeze_rate=16, gate_factor=0.25):
super(DynamicMultiHeadConv, self).__init__()
self.norm = nn.BatchNorm2d(in_channels)
self.relu = nn.ReLU(inplace=True)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.squeeze_rate = squeeze_rate
self.gate_factor = gate_factor
self.stride = stride
self.padding = padding
self.dilation = dilation
self.is_pruned = True
self.register_buffer('_inactive_channels', torch.zeros(1))
### Check if arguments are valid
assert self.in_channels % self.heads == 0, \
"head number can not be divided by input channels"
assert self.out_channels % self.heads == 0, \
"head number can not be divided by output channels"
assert self.gate_factor <= 1.0, "gate factor is greater than 1"
for i in range(self.heads):
self.__setattr__('headconv_%1d' % i,
HeadConv(in_channels, out_channels // self.heads, squeeze_rate,
kernel_size, stride, padding, dilation, 1, gate_factor))
def forward(self, x):
"""
The code here is just a coarse implementation.
The forward process can be quite slow and memory consuming, need to be optimized.
"""
if self.training:
progress = DynamicMultiHeadConv.global_progress
# gradually deactivate input channels
if progress < 3.0 / 4 and progress > 1.0 / 12:
self.inactive_channels = round(self.in_channels * (1 - self.gate_factor) * 3.0 / 2 * (progress - 1.0 / 12))
elif progress >= 3.0 / 4:
self.inactive_channels = round(self.in_channels * (1 - self.gate_factor))
_lasso_loss = 0.0
x = self.norm(x)
x = self.relu(x)
x_averaged = self.avg_pool(x)
x_mask = []
weight = []
for i in range(self.heads):
i_x, i_lasso_loss= self.__getattr__('headconv_%1d' % i)(x, x_averaged, self.inactive_channels)
x_mask.append(i_x)
weight.append(self.__getattr__('headconv_%1d' % i).conv.weight)
_lasso_loss = _lasso_loss + i_lasso_loss
x_mask = torch.cat(x_mask, dim=1) # batch_size, 4 x C_in, H, W
weight = torch.cat(weight, dim=0) # 4 x C_out, C_in, k, k
out = F.conv2d(x_mask, weight, None, self.stride,
self.padding, self.dilation, self.heads)
b, c, h, w = out.size()
out = out.view(b, self.heads, c // self.heads, h, w)
out = out.transpose(1, 2).contiguous().view(b, c, h, w)
return [out, _lasso_loss]
@property
def inactive_channels(self):
return int(self._inactive_channels[0])
@inactive_channels.setter
def inactive_channels(self, val):
self._inactive_channels.fill_(val)
class HeadConv(nn.Module):
def __init__(self, in_channels, out_channels, squeeze_rate, kernel_size, stride=1,
padding=0, dilation=1, groups=1, gate_factor=0.25):
super(HeadConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups=1, bias=False)
self.target_pruning_rate = gate_factor
if in_channels < 80:
squeeze_rate = squeeze_rate // 2
self.fc1 = nn.Linear(in_channels, in_channels // squeeze_rate, bias=False)
self.relu_fc1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(in_channels // squeeze_rate, in_channels, bias=True)
self.relu_fc2 = nn.ReLU(inplace=True)
nn.init.kaiming_normal_(self.fc1.weight)
nn.init.kaiming_normal_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 1.0)
def forward(self, x, x_averaged, inactive_channels):
b, c, _, _ = x.size()
x_averaged = x_averaged.view(b, c)
y = self.fc1(x_averaged)
y = self.relu_fc1(y)
y = self.fc2(y)
mask = self.relu_fc2(y) # b, c
_lasso_loss = mask.mean()
mask_d = mask.detach()
mask_c = mask
if inactive_channels > 0:
mask_c = mask.clone()
topk_maxmum, _ = mask_d.topk(inactive_channels, dim=1, largest=False, sorted=False)
clamp_max, _ = topk_maxmum.max(dim=1, keepdim=True)
mask_index = mask_d.le(clamp_max)
mask_c[mask_index] = 0
mask_c = mask_c.view(b, c, 1, 1)
x = x * mask_c.expand_as(x)
return x, _lasso_loss
| StarcoderdataPython |
107325 | <filename>u8timeseries/tests/test_timeseries.py<gh_stars>1-10
import unittest
import pandas as pd
from timeseries import TimeSeries
class TimeSeriesTestCase(unittest.TestCase):
__test__ = True
pd_series1 = pd.Series(range(10), index=pd.date_range('20130101', '20130110'))
series1: TimeSeries = TimeSeries(pd_series1)
def test_creation(self):
with self.assertRaises(AssertionError):
# Conf interval must be same length as main series
pd_lo = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))
TimeSeries(self.pd_series1, pd_lo)
with self.assertRaises(AssertionError):
# Conf interval must have same time index as main series
pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))
TimeSeries(self.pd_series1, pd_lo)
with self.assertRaises(AssertionError):
# Main series cannot have date holes
range_ = pd.date_range('20130101', '20130104').append(pd.date_range('20130106', '20130110'))
TimeSeries(pd.Series(range(9), index=range_))
def test_eq(self):
seriesA: TimeSeries = TimeSeries(self.pd_series1)
self.assertTrue(self.series1 == seriesA)
# with a defined CI
seriesB: TimeSeries = TimeSeries(self.pd_series1,
confidence_hi=pd.Series(range(10, 20),
index=pd.date_range('20130101', '20130110')))
self.assertFalse(self.series1 == seriesB)
self.assertTrue(self.series1 != seriesB)
# with different dates
seriesC = TimeSeries(pd.Series(range(10), index=pd.date_range('20130102', '20130111')))
self.assertFalse(self.series1 == seriesC)
def test_dates(self):
self.assertEqual(self.series1.start_time(), pd.Timestamp('20130101'))
self.assertEqual(self.series1.end_time(), pd.Timestamp('20130110'))
self.assertEqual(self.series1.duration(), pd.Timedelta(days=9))
def test_split(self):
seriesA, seriesB = self.series1.split_after(pd.Timestamp('20130104'))
self.assertEqual(seriesA.end_time(), pd.Timestamp('20130104'))
self.assertEqual(seriesB.start_time(), pd.Timestamp('20130105'))
with self.assertRaises(AssertionError):
# Timestamp must be in time series
_, _ = self.series1.split_after(pd.Timestamp('20130103 10:30:00'))
def test_slice(self):
# base case
seriesA = self.series1.slice(pd.Timestamp('20130104'), pd.Timestamp('20130107'))
self.assertEqual(seriesA.start_time(), pd.Timestamp('20130104'))
self.assertEqual(seriesA.end_time(), pd.Timestamp('20130107'))
# time stamp not in series
seriesB = self.series1.slice(pd.Timestamp('20130104 12:00:00'), pd.Timestamp('20130107'))
self.assertEqual(seriesB.start_time(), pd.Timestamp('20130105'))
self.assertEqual(seriesB.end_time(), pd.Timestamp('20130107'))
# end timestamp after series
seriesC = self.series1.slice(pd.Timestamp('20130108'), pd.Timestamp('20130201'))
self.assertEqual(seriesC.start_time(), pd.Timestamp('20130108'))
self.assertEqual(seriesC.end_time(), pd.Timestamp('20130110'))
# n points, base case
seriesD = self.series1.slice_n_points_after(pd.Timestamp('20130102'), n=3)
self.assertEqual(seriesD.start_time(), pd.Timestamp('20130102'))
self.assertEqual(seriesD.end_time(), pd.Timestamp('20130104'))
seriesE = self.series1.slice_n_points_after(pd.Timestamp('20130107 12:00:10'), n=10)
self.assertEqual(seriesE.start_time(), pd.Timestamp('20130108'))
self.assertEqual(seriesE.end_time(), pd.Timestamp('20130110'))
def test_ops(self):
seriesA = TimeSeries(pd.Series([2 for _ in range(10)], index=self.pd_series1.index))
targetAdd = TimeSeries(pd.Series(range(2, 12), index=self.pd_series1.index))
targetSub = TimeSeries(pd.Series(range(-2, 8), index=self.pd_series1.index))
self.assertEqual(self.series1 + seriesA, targetAdd)
self.assertEqual(self.series1 - seriesA, targetSub)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3326419 | <filename>tests/test_handshake.py<gh_stars>10-100
import unittest
import sys
import os
proj_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
socket_folder = os.path.join(proj_folder, 'websocket')
sys.path.insert(0, socket_folder)
import websock as WS
class TestHandShake(unittest.TestCase):
def test_digest(self):
"""Test the calculation of the Accept-Key."""
client_key = '<KEY>
expected = 's3pPLMBiTxaQ9kYGzzhZRbK+xOo='
result = WS.WebSocketServer._digest(client_key)
self.assertEqual(expected, result)
def test_handshake_valid(self):
"""Test the handshake output for a valid upgrade request."""
upgrade_request = (
"GET /chat HTTP/1.1\r\n"
"Host: example.com:8000\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Key: <KEY>"
"Sec-WebSocket-Version: 13\r\n\r\n"
)
expected_upgrade_response = (
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n\r\n"
)
ws = WS.WebSocketServer(None, None)
valid, upgrade_response = ws._opening_handshake(None, upgrade_request.encode())
self.assertTrue(valid)
self.assertEqual(expected_upgrade_response, upgrade_response.decode())
def test_handshake_invalid(self):
"""Test the handshake output for an invalid upgrade request."""
upgrade_request_bad_upgrade = (
"GET /chat HTTP/1.1\r\n"
"Host: example.com:8000\r\n"
"Upgrade: http\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Key: <KEY>"
"Sec-WebSocket-Version: 13\r\n\r\n"
)
ws = WS.WebSocketServer(None, None)
valid, _ = ws._opening_handshake(
None, upgrade_request_bad_upgrade.encode())
self.assertFalse(valid)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1721269 | <reponame>JuliaHuck/nighres
from nighres.microscopy.mgdm_cells import mgdm_cells
from nighres.microscopy.stack_intensity_regularisation import stack_intensity_regularisation
| StarcoderdataPython |
3253106 | <reponame>quick-sort/scrapy-spiders
# -*- coding: utf-8 -*-
from scrapy import Spider
from scrapy.utils.spider import iterate_spider_output
from scrapy.exceptions import NotConfigured, NotSupported
import xlrd
import logging
logger = logging.getLogger(__name__)
def xlsiter(response, headers = None, sheet_index = 0):
with xlrd.open_workbook(file_contents=response.body) as wb:
sh = wb.sheet_by_index(sheet_index)
if sh.nrows > 0:
start_line = 0
if not headers:
headers = sh.row_values(0)
start_line = 1
for i in range(start_line, sh.nrows):
row = sh.row_values(i)
if len(headers) != len(row):
logger.warning("ignoring row %(csvlnum)d (length: %(csvrow)d, "
"should be: %(csvheader)d)",
{'csvlnum': i + 1, 'csvrow': len(row),
'csvheader': len(headers)})
continue
else:
yield dict(zip(headers, row))
class XLSFeedSpider(Spider):
headers = None
sheet_index = 0
def adapt_response(self, response):
return response
def process_result(self, response, results):
return results
def parse_row(self, response, row):
raise NotImplementedError
def parse_rows(self, response):
for row in xlsiter(response, self.headers, self.sheet_index):
ret = iterate_spider_output(self.parse_row(response, row))
for result_item in self.process_result(response, ret):
yield result_item
def parse(self, response):
if not hasattr(self, 'parse_row'):
raise NotConfigured('You must define parse_row method in order to scrape this XLS feed')
response = self.adapt_response(response)
return self.parse_rows(response)
| StarcoderdataPython |
1640170 | <filename>django/tango_with_django_project/__init__.py
__author__ = 'matheuskonzeniser'
| StarcoderdataPython |
3215067 | # coding=utf-8
# pynput
# Copyright (C) 2015-2017 <NAME>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
The keyboard implementation for *Xorg*.
"""
# pylint: disable=C0111
# The documentation is extracted from the base classes
# pylint: disable=E1101,E1102
# We dynamically generate the Button class
# pylint: disable=R0903
# We implement stubs
import enum
import Xlib.display
import Xlib.ext
import Xlib.ext.xtest
import Xlib.X
import Xlib.protocol
from pynput._util.xorg import (
display_manager,
ListenerMixin)
from . import _base
# pylint: disable=C0103
Button = enum.Enum(
'Button',
module=__name__,
names=[
('unknown', None),
('left', 1),
('middle', 2),
('right', 3),
('scroll_up', 4),
('scroll_down', 5),
('scroll_left', 6),
('scroll_right', 7)] + [
('button%d' % i, i)
for i in range(8, 31)])
# pylint: enable=C0103
class Controller(_base.Controller):
def __init__(self):
self._display = Xlib.display.Display()
def __del__(self):
if hasattr(self, '_display'):
self._display.close()
def _position_get(self):
with display_manager(self._display) as dm:
qp = dm.screen().root.query_pointer()
return (qp.root_x, qp.root_y)
def _position_set(self, pos):
px, py = self._check_bounds(*pos)
with display_manager(self._display) as dm:
Xlib.ext.xtest.fake_input(dm, Xlib.X.MotionNotify, x=px, y=py)
def _scroll(self, dx, dy):
dx, dy = self._check_bounds(dx, dy)
if dy:
self.click(
button=Button.scroll_up if dy > 0 else Button.scroll_down,
count=abs(dy))
if dx:
self.click(
button=Button.scroll_right if dx > 0 else Button.scroll_left,
count=abs(dx))
def _press(self, button):
with display_manager(self._display) as dm:
Xlib.ext.xtest.fake_input(dm, Xlib.X.ButtonPress, button.value)
def _release(self, button):
with display_manager(self._display) as dm:
Xlib.ext.xtest.fake_input(dm, Xlib.X.ButtonRelease, button.value)
def _check_bounds(self, *args):
"""Checks the arguments and makes sure they are within the bounds of a
short integer.
:param args: The values to verify.
"""
if not all(
(-0x7fff - 1) <= number <= 0x7fff
for number in args):
raise ValueError(args)
else:
return tuple(int(p) for p in args)
class Listener(ListenerMixin, _base.Listener):
#: A mapping from button values to scroll directions
_SCROLL_BUTTONS = {
Button.scroll_up.value: (0, 1),
Button.scroll_down.value: (0, -1),
Button.scroll_right.value: (1, 0),
Button.scroll_left.value: (-1, 0)}
_EVENTS = (
Xlib.X.ButtonPressMask,
Xlib.X.ButtonReleaseMask)
def _handle(self, dummy_display, event):
px = event.root_x
py = event.root_y
if event.type == Xlib.X.ButtonPress:
# Scroll events are sent as button presses with the scroll
# button codes
scroll = self._SCROLL_BUTTONS.get(event.detail, None)
if scroll:
self.on_scroll(px, py, *scroll)
else:
self.on_click(px, py, self._button(event.detail), True)
elif event.type == Xlib.X.ButtonRelease:
# Send an event only if this was not a scroll event
if event.detail not in self._SCROLL_BUTTONS:
self.on_click(px, py, self._button(event.detail), False)
else:
self.on_move(px, py)
def _suppress_start(self, display):
display.screen().root.grab_pointer(
True, self._event_mask, Xlib.X.GrabModeAsync, Xlib.X.GrabModeAsync,
0, 0, Xlib.X.CurrentTime)
def _suppress_stop(self, display):
display.ungrab_pointer(Xlib.X.CurrentTime)
# pylint: disable=R0201
def _button(self, detail):
"""Creates a mouse button from an event detail.
If the button is unknown, :attr:`Button.unknown` is returned.
:param detail: The event detail.
:return: a button
"""
try:
return Button(detail)
except ValueError:
return Button.unknown
# pylint: enable=R0201
| StarcoderdataPython |
1761165 | <reponame>gollum18/enterprise-db-systems-labs
from . import app
import os
def main():
# setup the flask environment variables
os.putenv("FLASK_APP", "company-app")
os.putenv("FLASK_ENV", "development")
# start the flask app
app.create_app().run()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1636540 | from typing import Dict
import math
import numpy as np
import os
import json
import time
# ROS
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import JointState, LaserScan, Imu
from geometry_msgs.msg import Pose, Twist, Quaternion, Transform, Point, Vector3
from nav_msgs.msg import Odometry
import tf2_ros
from tf2_msgs.msg import TFMessage
from std_msgs.msg import Header, Float64, Float32
from std_srvs.srv import Empty
from ament_index_python.packages import get_package_share_directory
# import Microsoft Bonsai dependencies
from microsoft_bonsai_api.simulator.client import BonsaiClient, BonsaiClientConfig
from microsoft_bonsai_api.simulator.generated.models import (
SimulatorInterface,
SimulatorState,
SimulatorSessionResponse,
)
from azure.core.exceptions import HttpResponseError
class SimulatorConnection(TurtleBot3BonsaiConnection):
def __init__(self, package, config_file):
# Calls Node.__init__('listener')
super().__init__(NODE_NAME)
# API for for resetting the simulation environment
# Gazebo Example:
# self.reset_client = self.create_client(Empty, "/reset_world")
# while not self.reset_client.wait_for_service(timeout_sec=5.0):
# self.get_logger().info('service not available, waiting again...')
# LOAD BONSAI CONFIG
with open(os.path.join(get_package_share_directory(package), config_file)) as file:
self.interface = json.load(file)
self.iteration = 0
self.episode = 0
self.sim_delay = 0.0
self.state = {}
self.episode_config = {}
self.done = False
self.event_timer = self.create_timer(
0.250, # unit: s
self.event_callback)
def step(self, actions: dict):
"""
Function executed each time step.
Here we get actions from Bonsai, execute them in a time step in the simulator,
and retrieve the observations generated by that action.
:param action:
"""
# PARSE ACTIONS FROM BONSAI
for action, value in actions.items():
if action == "<action name>":
# parse action and assign to ros message (eg: Twist() to be published to /cmd_vel)
self.get_logger().debug(
"Action {} of value {} received".format(action, value))
# publish action message to ROS topic
time.sleep(0.1) # Wait for some time to execute sim state change
# RETRIEVE OBSERVATIONS FROM SIMULATION
# AND TRANSLATE TO BONSAI RECOGNIZABLE STATE TUPLE
# Eg: self.state["temperature"] = simulation.temperature
# end of step
def reset(self):
# Reset simulation
# step with empty action space to retrieve state data post-rest
self.step({})
self.event_timer.reset()
# Load episode config data to state space
def get_episode_config(self, config: dict):
self.episode_config = config
self.get_logger().debug(
"Episode config loaded:\n{}".format(json.dumps(config, indent=2))
)
def halted(self) -> bool:
"""Halt current episode. Note, this should only return True if the simulator has reached an unexpected state.
Returns
-------
bool
Whether to terminate current episode
"""
return False
def get_event(self):
sim_state = SimulatorState(
sequence_id=self.sequence_id, state=self.state, halted=self.halted(),
)
self.get_logger().debug(
"SimulatorState: \n{}".format(json.dumps(sim_state.state, indent=2))
)
try:
event = self.client.session.advance(
workspace_name=self.config_client.workspace,
session_id=self.registered_session.session_id,
body=sim_state,
)
self.sequence_id = event.sequence_id
# self.get_logger().info(
# "[{}] Last Event: {}".format(time.strftime("%H:%M:%S"), event.type)
# )
except HttpResponseError as ex:
self.get_logger().error(
"HttpResponseError in Advance: StatusCode: {}, Error: {}, Exception: {}".format(
ex.status_code, ex.error.message, ex
)
)
# This can happen in network connectivity issue, though SDK has retry logic, but even after that request may fail,
# if your network has some issue, or sim session at platform is going away..
# So let's re-register sim-session and get a new session and continue iterating. :-)
self.registered_session, self.sequence_id = self.CreateSession(self.registration_info, self)
except Exception as err:
self.get_logger().error("Unexpected error in Advance: {}".format(err))
# Ideally this shouldn't happen, but for very long-running sims It can happen with various reasons, let's re-register sim & Move on.
# If possible try to notify Bonsai team to see, if this is platform issue and can be fixed.
self.registered_session, self.sequence_id = self.CreateSession(self.registration_info, self)
return event
def event_callback(self):
event = self.get_event()
# Event loop
if event.type == "Idle":
time.sleep(event.idle.callback_time)
self.get_logger().info("Idling...")
elif event.type == "EpisodeStart":
self.episode += 1
self.get_logger().info("Episode {} Starting...".format(self.episode))
config = event.episode_start.config
if config is None:
raise ValueError("No episode start config received from Bonsai")
self.get_episode_config(config)
self.reset()
elif event.type == "EpisodeStep":
self.iteration += 1
self.step(event.episode_step.action)
elif event.type == "EpisodeFinish":
self.get_logger().info("Episode {} Finishing...".format(self.episode))
self.iteration = 0
elif event.type == "Unregister":
self.get_logger().info(
"Simulator Session unregistered by platform because {}".format(
event.unregister.details
)
)
def register_simulator(self):
# Get keys to connect to Bonsai Workspace
try:
self.workspace = os.environ["SIM_WORKSPACE"]
self.accesskey = os.environ["SIM_ACCESS_KEY"]
except:
raise IndexError(
f"Workspace or access key not set or found. Use --config-setup for help setting up."
)
# Configure client to interact with Bonsai service
self.config_client = BonsaiClientConfig()
self.client = BonsaiClient(self.config_client)
# Create simulator session and init sequence id
self.registration_info = SimulatorInterface(
name=SIM_NAME,
timeout=self.interface["timeout"],
simulator_context=self.config_client.simulator_context,
description=self.interface["description"],
)
self.registered_session, self.sequence_id = self.CreateSession(self.registration_info)
def CreateSession(
self,
registration_info: SimulatorInterface
):
"""Creates a new Simulator Session and returns new session, sequenceId
"""
try:
self.get_logger().info(
"config: {}, {}".format(self.config_client.server, self.config_client.workspace)
)
registered_session: SimulatorSessionResponse = self.client.session.create(
workspace_name=self.config_client.workspace, body=registration_info
)
self.get_logger().info("Registered simulator. {}".format(registered_session.session_id))
return registered_session, 1
except HttpResponseError as ex:
self.get_logger().info(
"HttpResponseError in Registering session: StatusCode: {}, Error: {}, Exception: {}".format(
ex.status_code, ex.error.message, ex
)
)
raise ex
except Exception as ex:
self.get_logger().error(
"UnExpected error: {}, Most likely, it's some network connectivity issue, make sure you are able to reach bonsai platform from your network.".format(
ex
)
)
raise ex
| StarcoderdataPython |
4824516 | <filename>lib/surface/logging/cmek_settings/describe.py
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'logging cmek-settings describe' command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import base
class Describe(base.DescribeCommand):
# pylint: disable=line-too-long
"""Displays the CMEK settings for the Cloud Logging Logs Router.
If *kmsKeyName* is present in the output, then CMEK is enabled for your
project, folder, organization or billing-account. You can also find the Logs
Router service account using this command.
## EXAMPLE
To describe the Logs Router CMEK settings for a project, run:
$ {command} --project=[PROJECT_ID]
To describe the Logs Router CMEK settings for an organization, run:
$ {command} --organization=[ORGANIZATION_ID]
kmsKeyName:
'projects/my-project/locations/my-location/keyRings/my-keyring/cryptoKeys/key'
name: 'organizations/[ORGANIZATION_ID]/cmekSettings'
serviceAccountId:
'[SERVICE_<EMAIL>_ID]<EMAIL>'
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
util.AddParentArgs(parser, 'Describe CMEK settings')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The CMEK settings for the specified project, folder, organizations
or billing-account.
"""
parent_name = util.GetParentFromArgs(args)
return util.GetClient().v2.GetCmekSettings(
util.GetMessages().LoggingGetCmekSettingsRequest(name=parent_name))
| StarcoderdataPython |
1701017 | <reponame>fossabot/hotpot
import config
import fasttext
import numpy as np
import pandas as pd
from annoy import AnnoyIndex
from gensim.models import KeyedVectors
from scipy.spatial import cKDTree
from tqdm import tqdm
# ============================================================
# Script purpose:
# Use nearest-neighbor word embeddings to find suggested words
# ============================================================
# number of nearest-neighbor words to save
NUM_NEAREST = 16
# python reduce_model.py ./data/raw/cc.zh.300.bin 100
# zh_model = fasttext.load_model("./data/raw/cc.zh.100.bin")
tencent_vectors = KeyedVectors.load("data/intermediate/tencent_vectors", mmap="r")
vocab = set(tencent_vectors.wv.vocab.keys())
cedict = pd.read_csv(f"./data/intermediate/cedict.txt", sep="\t", index_col=0)
# unify simplified+traditional words
words = list(set(cedict["simplified"]) | set(cedict["traditional"]))
# compute embeddings
embedded_words = []
word_index = 0
nn_index = AnnoyIndex(200, "angular")
print("Constructing nn index")
for word in tqdm(words):
if word in vocab:
word_embedding = tencent_vectors.wv.word_vec(word)
nn_index.add_item(word_index, word_embedding)
embedded_words.append(word)
word_index += 1
nn_index.build(256)
nearest_indices = []
nearest_dists = []
print("Retrieving nearest neighbors")
for word_idx, word in tqdm(enumerate(embedded_words), total=len(embedded_words),):
indices, dists = nn_index.get_nns_by_item(
word_idx, n=NUM_NEAREST + 1, include_distances=True
)
# remove query itself
indices = indices[1:]
dists = dists[1:]
nearest_indices.append(indices)
nearest_dists.append(dists)
# convert embedding distances and nearest-words to dataframe
print("Exporting results")
embeddings_dists = pd.DataFrame(nearest_dists, index=embedded_words)
embeddings_nearest = pd.DataFrame(nearest_indices, index=embedded_words)
embeddings_nearest = embeddings_nearest.applymap(lambda x: embedded_words[x])
embeddings_dists.to_hdf(
"./data/intermediate/embeddings_dists.h5",
key="embeddings_dists",
complevel=config.HDF_COMPLEVEL,
complib=config.HDF_COMPLIB,
mode="w",
)
embeddings_nearest.to_hdf(
"./data/intermediate/embeddings_nearest.h5",
key="embeddings_nearest",
complevel=config.HDF_COMPLEVEL,
complib=config.HDF_COMPLIB,
mode="w",
)
| StarcoderdataPython |
3217479 | # -*-coding:utf-8-*-
import cv2
import numpy as np
import os
"""
이 스크립트는 기본적으로 수집한 원본 데이터를 기반으로 데이터 증가 작업을 수행하여
딥러닝 모델이 학습 할 수 있는 데이터셋을 늘리기 위한 스크립트이다.
openCV 라이브러리를 직접 사용하여 이미지 수정 및 저장을 하였으나, 나중에 찾아보니
Keras 에서는 ImageGenerator를 사용해서 유사한 작업을 할 수 있다고 한다... ㅎ
"""
TS_PATH = "C:\\Users\\dry8r3ad\\PycharmProjects\\catBreedClassifier\\data\\"
TS_ORIG_PATH = TS_PATH + "original\\"
TS_MODI_PATH = TS_PATH + "argumentation\\"
original_file_list = os.listdir(TS_ORIG_PATH)
blur_ks_list = [3, 5]
gaussian_std_list = [5, 10, 15, 20]
translation_list = [5, 10]
rotation_list = [-10, -5, 5, 10]
def blur_img(img, blur_ks):
return cv2.blur(img, (blur_ks, blur_ks))
def gaussian_img(img, std):
mean = 0
row, col, ch = img.shape
gauss = np.random.normal(mean, std, (row, col, ch))
img = img + gauss
return img
def translate_img(img, trans):
row, col = img.shape[:2]
m = np.float32([[1, 0, trans], [0, 1, trans]])
return cv2.warpAffine(img, m, (col, row))
def rotate_img(img, rotate):
row, col = img.shape[:2]
m = cv2.getRotationMatrix2D((col / 2, row / 2), rotate, 1)
return cv2.warpAffine(img, m, (col, row))
def data_arg(orig_file, argumentation_dir_path, original_dir_path):
idx = 0
print(original_dir_path + orig_file)
if not cv2.haveImageReader(original_dir_path + orig_file):
print("Invalid file(non-processable) was entered (filename:" + orig_file + "). Skipping")
return
img = cv2.imread(original_dir_path + orig_file)
working_path = argumentation_dir_path + orig_file
if not os.path.exists(working_path):
os.mkdir(working_path)
for blur_ks in blur_ks_list:
img = blur_img(img, blur_ks)
for gaussian_std in gaussian_std_list:
img = gaussian_img(img, gaussian_std)
for trans in translation_list:
img = translate_img(img, trans)
for rotate in rotation_list:
img = rotate_img(img, rotate)
filename = str(idx) + ".jpg"
cv2.imwrite(os.path.join(working_path, filename), img)
idx += 1
img = cv2.imread(original_dir_path + orig_file)
return
def check_exclude_breed(breed):
done_list = ["Abyssinian", "American Bobtail", "American Curl", "American Shorthair",
"American Wirehair","Applehead Siamese", "Balinese", "Bengal", "Birman", "Bombay",
"British Shorthair", "Burmese", "Burmilla", "Calico", "Canadian Hairless",
"Chartreux", "Chausie", "Chinchilla", "Cornish Rex", "Cymric", "Devon Rex",
"Dilute Calico", "Dilute Tortoiseshell", "Domestic Long Hair", "Domestic Medium Hair",
"Domestic Short Hair", "Egyptian Mau", "Exotic Shorthair", "Extra-Toes Cat - Hemingway Polydactyl",
"Havana", "Himalayan", "Japanese Bobtail", "Javanese", "Korat", "LaPerm", "Maine Coon",
"Manx", "Munchkin", "Nebelung", "Norwegian Forest Cat", "Ocicat", "Oriental Long Hair",
"Oriental Short Hair", "Oriental Tabby", "Persian", "Pixiebob", "Ragamuffin", "Ragdoll",
"Russian Blue", "Scottish Fold", "Selkirk Rex", "Siamese", "Siberian", "Silver",
"Singapura", "Snowshoe", "Somali", "Sphynx - Hairless Cat", "Tabby"]
exclude_list = ["York Chocolate", "Chinchilla", "Canadian Hairless", "Burmilla", "LaPerm",
"Cymric", "American Wirehair", "Singapura", "Chausie", "Javanese", "Somali",
"Oriental Long Hair", "Korat", "Selkirk Rex", "Chartreux", "Silver",
"Domestic Long Hair", "Domestic Medium Hair", "Domestic Short Hair"]
if breed in done_list:
return True
if breed in exclude_list:
return True
return False
def main():
original_file_list.sort()
for breed in original_file_list:
if check_exclude_breed(breed):
continue
print("Data Argumentation: " + breed)
working_breed_dir = TS_MODI_PATH + breed
if not os.path.exists(working_breed_dir):
os.mkdir(working_breed_dir)
original_dir_path = (TS_ORIG_PATH + breed)
for img_file in os.listdir(original_dir_path):
data_arg(img_file, working_breed_dir + "\\", original_dir_path + "\\")
if __name__ == "__main__":
main()
| StarcoderdataPython |
61888 | <gh_stars>0
# -------------------------------------------------------------------------
#
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
# ----------------------------------------------------------------------------------
# The example companies, organizations, products, domain names,
# e-mail addresses, logos, people, places, and events depicted
# herein are fictitious. No association with any real company,
# organization, product, domain name, email address, logo, person,
# places, or events is intended or should be inferred.
# --------------------------------------------------------------------------
# Global constant variables (Azure Storage account/Batch details)
# import "config.py" in "batch_python_tutorial_ffmpeg.py"
# Update the Batch and Storage account credential strings below with the values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
_BATCH_ACCOUNT_NAME = ''
_BATCH_ACCOUNT_KEY = ''
_BATCH_ACCOUNT_URL = ''
_STORAGE_ACCOUNT_NAME = ''
_STORAGE_ACCOUNT_KEY = ''
_INPUT_BLOB_PREFIX = '' # E.g. if files in container/READS/ then put 'READS'. Keep blank if files are directly in container and not in a sub-directory
_INPUT_CONTAINER = ''
_OUTPUT_CONTAINER = ''
_POOL_ID = ''
_DEDICATED_POOL_NODE_COUNT = 0
_LOW_PRIORITY_POOL_NODE_COUNT = 1
_POOL_VM_SIZE = 'STANDARD_D64_v3'
_JOB_ID = ''
| StarcoderdataPython |
3248459 | '''
* publisher
*
* Copyright (c) 2020-2021, Magik-Eye Inc.
* author: <NAME>, <EMAIL>
'''
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import PointCloud2, PointField
from std_msgs.msg import String
import threading
import time
import pymkeapi
import numpy as np
from pymkeros2.device_info import DeviceInfo
# ============================================================================
# MkEPointCloudPublisher - TODO :Check for thread safe and correct termination
class MkEPointCloudPublisher(Node):
def __init__(self, device_info):
self.device_info_ = device_info
self.publishing_flag_ = False
self.pub_ = None
super().__init__(self.device_info_.getNodeName())
# Other variables
self.tcp_bus_ = None
self.client_ = None
self.thread_ = None
# =========================================================================
# Check - Needed ? Calls the function again
def __del__(self):
self.stopPublishing()
# =========================================================================
def publish(self):
self.publishing_flag_ = True
try:
while self.publishing_flag_:
frame = self.client_.get_frame(pymkeapi.MKE_FRAME_TYPE_1)
ftype = frame.frame_type
num = frame.pts3d.shape[0]
pt_size = {1: 8, 2: 12}.get(ftype)
dim = {1: 4, 2: 6}.get(ftype)
frame.pts3d = 0.001 * frame.pts3d # frame.pts3d(mm) to ROS(m)
data = np.concatenate((frame.pts3d,
frame.uids.reshape(frame.uids.shape[0], 1)), axis=1)
msg = PointCloud2()
msg.header.frame_id = 'map'
msg.height = 1
msg.width = num
msg.fields = [
PointField(name='x', offset=0,
datatype=PointField.FLOAT32, count=1),
PointField(name='y', offset=4,
datatype=PointField.FLOAT32, count=1),
PointField(name='z', offset=8,
datatype=PointField.FLOAT32, count=1),
PointField(name='uid', offset=12,
datatype=PointField.FLOAT32, count=1)
]
msg.is_bigendian = False
msg.point_step = 16
msg.row_step = msg.point_step * num
msg.is_dense = False
msg.data = np.asarray(data, np.float32).tostring()
self.pub_.publish(msg)
except Exception as e:
self.get_logger().error(f"Error while publishing \
({self.device_info_.getUnitId()} : {self.device_info_.getIpAddr()}) \
: {str(e)}")
# ===========================================================================
def closeConnection(self):
# TODO: Check if reset exists. (del vs assiging to None)
self.client_ = None
self.tcp_bus_ = None
self.pub_ = None
# ===========================================================================
def startPublishing(self):
if self.publishing_flag_:
return
try:
self.tcp_bus_ = pymkeapi.TcpBus(self.device_info_.getIpAddr(),
8888)
self.client_ = pymkeapi.SyncClient(self.tcp_bus_)
state = self.client_.get_state()
if state == pymkeapi.MKE_STATE_IDLE:
self.client_.set_state(pymkeapi.MKE_STATE_DEPTH_SENSOR)
elif state != pymkeapi.MKE_STATE_DEPTH_SENSOR:
self.client_.set_state(pymkeapi.MKE_STATE_IDLE)
self.client_.set_state(pymkeapi.MKE_STATE_DEPTH_SENSOR)
# Create topic publisher
topic_name = self.device_info_.getTopicName()
self.pub_ = self.create_publisher(PointCloud2, topic_name, 1)
except Exception as e:
self.closeConnection()
print(e)
self.thread_ = threading.Thread(target=self.publish)
self.thread_.start()
# ===========================================================================
def stopPublishing(self):
if not self.publishing_flag_:
return
self.publishing_flag_ = False
self.thread_.join()
if not self.client_:
self.closeConnection()
return
state = self.client_.get_state()
if state != pymkeapi.MKE_STATE_IDLE:
self.client_.set_state(pymkeapi.MKE_STATE_IDLE)
self.closeConnection()
# ===========================================================================
def getDeviceInfo(self):
return self.device_info_
# ===========================================================================
def setDeviceInfo(self, device_info):
self.device_info_ = device_info
# ===========================================================================
if __name__ == "__main__":
device_info = DeviceInfo("34cff660", "192.168.0.117")
# ===============================================================================
# ===============================================================================
# Test the constructor
device_info1 = DeviceInfo()
mkepub = MkEPointCloudPublisher(device_info1)
print(mkepub)
mkepub.setDeviceInfo(device_info)
print(mkepub.getDeviceInfo())
# Test start publishing
mkepub.startPublishing()
time.sleep(5)
# Test stop publishing
mkepub.stopPublishing()
| StarcoderdataPython |
1737008 | import sys
import time
from neopixel import *
# Read percentage value passed to script when called.
input = sys.stdin.readline().rstrip('\n')
percents = input.split(',')
dlPercent = float(percents[0])+0.5
ulPercent = float(percents[1])+0.5
dlMax = int(dlPercent * 0.55)
ulMax = 120 - int(ulPercent * 0.55)
print dlMax,ulMax
# LED Strip configuration:
LED_COUNT = 120 # Number of LED Pixels.
LED_PIN = 18 # GPIO Pin connected to the pixels
LED_FREQ_HZ = 800000 # LED signal frequency in hertz
LED_DMA = 5 # DMA Channel to use for generating signal
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
# Create NeoPixel object with appropriate configuration as defined above.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT)
# Intialize the library.
strip.begin()
# Specifying what color should be set depending on LED position
"""LED COLORS ARE (RED, GREEN, BLUE)"""
def blank(strip):
for i in range(strip.numPixels()):
strip.setPixelColor(i,Color(0,0,0))
strip.show()
def dlWheel(led):
# DOWNLOAD Section
# """If current LED is less than position 23 Should be Green w/Fade to Yellow"""
if led < 23:
return Color(led * 4, 255, 0)
# """If current LED is between 24 and 35 Should be Primarly Yellow"""
elif led < 40:
return Color(led*13, 255-led*2, 0)
# """If current LED is above 35 Should be RED faded from Yellow"""
else:
return Color(255, 110-led*2+5, 0)
def ulWheel(led):
# UPLOAD Section
# """If current LED is """
if led < 80:
return Color(255, led*2-128, 0)
# """If current LED is """
elif led < 95:
led -= 70
return Color(128-led*2, led*6, 0)
# """If current LED is """
else:
led -= 97
return Color(128 - led *5 , 255, 0)
def rainbowCycle(strip):
for i in range(dlMax):
strip.setPixelColor(i, dlWheel(i))
for i in range(120,ulMax-1,-1):
strip.setPixelColor(i, ulWheel(i))
# Create percentage array to assign colors to LEDs
#
blank(strip)
rainbowCycle(strip)
strip.setPixelColor(13,Color(0,0,255))
strip.setPixelColor(27,Color(0,0,255))
strip.setPixelColor(41,Color(0,0,255))
strip.show()
#
# Update the strip with the buffered values.
# strip.show()
| StarcoderdataPython |
3270304 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: kingofwolf
# @Date: 2018-11-20 18:34:53
# @Last Modified by: kingofwolf
# @Last Modified time: 2019-03-21 10:45:17
# @Email: <EMAIL>
'Info: a Python file '
__author__ = 'Wang'
TASK_FILE_MODE=1
# when this equals 0:
#task file like:
# # 0
# 1 1774760
# 8 3413000
# # 1
# 0 1774760
# 2 1774760
# 9 3413000
# whi this equals 1:
#task file like:
# 128 232 001
# 2 1774760 9 3413000
# 1 1774760 3 1774760 10 3413000
# 2 1774760 4 1774760 11 3413000
COMPARE_ALF=10
COST_FUNCTION_MODE=0
# 0 for hopbytes compare
# 1 for max hopbytes compare
# 2 for max load compare
#ignore the DeprecationWarning of Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
import warnings
warnings.filterwarnings("ignore")
from collections import Iterable
from multiprocessing import Pool
from GreedMap import GreedMap
import sys, getopt
import logging
import functools
# logger = logging.getLogger("System.ParMapper")
# logger.setLevel(level = logging.INFO)
# LogHandler = logging.FileHandler("ParMapper.log")
# handler.setLevel(logging.INFO)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# logger.addHandler(handler)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename="ParMapper.log",
filemode='w')
logger=logging.getLogger("System.ParMapper")
class TaskGraph(object):
"""docstring for TaskGraph"""
def __init__(self):
super(TaskGraph, self).__init__()
def readgraphfile0(self, IOstream, size):
TGlist=[0 for i in range(self.__locat(size-1,size-1)+1)] #initial TGlist
TGadj_matrix=[[] for i in range(size)]
self.__size=size
if isinstance(IOstream,Iterable)&('read' in dir(IOstream)): #IOstream is Iterable and has 'read' function
try:
for lines in IOstream: #for every lines in IOstream,
(i,j)=lines.strip().split()
if i=='#':
m=int(j)
else:
n=int(i)
TGlist[self.__locat(m,n)]=int(j)
TGadj_matrix[m].append(n)
except Exception as e:
raise
finally:
pass
self.__TGadj_matrix=TGadj_matrix
self.__TGmatrix=tuple(TGlist)
logger.info("TaskGraph initial done:size %d"%self.__size)
logger.debug("TaskGraph:\n"+str(self))
def readgraphfile1(self, IOstream):
(size,nedges,fmt)=IOstream.readline().strip().split()
(size,nedges,fmt)=(int(size),int(nedges),int(fmt))
self.__size=size
TGlist=[0 for i in range(self.__locat(size-1,size-1)+1)] #initial TGlist
TGadj_matrix=[[] for i in range(size)]
ptask=-1
for lines in IOstream:
ptask+=1
lines_tmp=lines.strip().split()
for i in range(0,len(lines_tmp),2):
qtask=int(lines_tmp[i])-1
weight=int(lines_tmp[i+1])
TGlist[self.__locat(ptask,qtask)]=weight
TGadj_matrix[ptask].append(qtask)
self.__TGadj_matrix=TGadj_matrix
self.__TGmatrix=tuple(TGlist)
logger.info("TaskGraph initial done:size %d"%self.__size)
logger.debug("TaskGraph:\n"+str(self))
def getweight(self,i,j):
if isinstance(i,int)&isinstance(j,int):
if (i>=0) & (i<self.__size) & (j>=0) & (j<self.__size):
return self.__TGmatrix[self.__locat(i,j)]
else:
return 0
else:
raise ValueError("i,j is out of exception")
def getadj(self,i):
if (i>=0) & (i<self.__size) :
return tuple(self.__TGadj_matrix[i])
else:
return ()
def __locat(self,i,j):
if i>=j:
return int(((i+1)*i)/2+j)
else:
return int(((j+1)*j)/2+i)
def __str__(self):
matrix_str=""
for i in range(self.__size):
for j in range(self.__size):
matrix_str+=str(self.getweight(i,j))+" "
matrix_str+="\n"
return matrix_str
@property
def size(self):
return self.__size
@property
def TGmatrix(self):
return [[self.getweight(i,j) for j in range(self.__size)] for i in range(self.__size)]
def getlist(self):
return [i for i in range(self.__size)]
class NetGraph(object):
"""docstring for NetGraph"""
def __init__(self, IOstream, ct_size, node_size, core_size):
super(NetGraph, self).__init__()
NGlist=[]
item=[]
self.__node_size=node_size
self.__core_size=core_size
self.__ct_size=ct_size
if isinstance(IOstream,Iterable) & ('read' in dir(IOstream)):
try:
for lines in IOstream:
NGlist.append(tuple(map(int,lines.strip().split())))
except Exception as e:
raise
finally:
pass
self.__NGmatrix=tuple(NGlist)
logger.info("NetGraph initial done: ct_size:%d,node_size:%d,core_size:%d"%(self.__ct_size,self.__node_size,self.__core_size))
logger.debug("NetGraph:\n"+str(self))
def getnodedistance(self,i,j):
if isinstance(i,int)&isinstance(j,int):
if (i>=0) & (i<self.__node_size) & (j>=0) & (j<self.__node_size):
return self.__NGmatrix[i][j]
else:
return 0
else:
raise ValueError("i,j is out of exception")
def getnodeoder(self,coreoder):
if coreoder>=self.__ct_size:
raise OverflowError("coreoder:%d is overflow above:%d"%(coreoder,self.__ct_size))
return int(coreoder/self.__core_size)
def getnodecore(self,nodeoder):
begin=nodeoder*self.__core_size
end=(nodeoder+1)*self.core_size
if end>self.__ct_size:
end=self.__ct_size
return [i for i in range(begin,end)]
def __str__(self):
netstr=""
for line in self.__NGmatrix:
netstr+=str(line)+"\n"
return netstr
@property
def node_size(self):
return self.__node_size
@property
def core_size(self):
return self.__core_size
@property
def ct_size(self):
return self.__ct_size
class TaskList(object):
"""docstring for TaskList"""
def __init__(self,TG,files=[]):
super(TaskList, self).__init__()
self.__T=[]
self.__T.append(self.OO(TG))
self.__T.append(self.BFS(TG))
self.__T.append(self.BFS_DFS(TG))
#self.__T.append(self.GPART(TG))
self.FileAnalysis(TG,files)
logger.info("Task initial done")
logger.debug("Task_orders:\n"+str(self))
def OO(self,TG):
Tlist=[i for i in range(TG.size)]
return tuple(Tlist)
def BFS(self,TG):
Tlist=[]
for base in TG.getlist():
for ext in TG.getadj(base):
if ext in Tlist:
continue
else:
Tlist.append(ext)
return tuple(Tlist)
def BFS_DFS(self,TG):
Tlist=[]
#searched=[]
def DFS(nextT):
for ext in TG.getadj(nextT): #BFS
if ext in Tlist:
continue
else:
Tlist.append(ext)
DFS(ext)
#Tlist.append(ext) #DFS
Tlist.append(0)
DFS(0)
#Tlist.append(0)
return tuple(Tlist)
def GPART(self,TG):
Tlist=[]
pass
return tuple(Tlist)
def FileAnalysis(self,TG,files):
for f in files:
if isinstance(f,Iterable) & ('read' in dir(f)):
try:
Tlist=tuple(map(int,f.read().strip().split()))
if TG.getlist()==sorted(Tlist):
self.__T.append(Tlist)
else:
print('list:',Tlist,' is not available')
except Exception as e:
raise
finally:
pass
def __str__(self):
Tstr="task_order:"
for t in self.__T:
for i in t:
Tstr+=str(i)+" "
Tstr+="\ntask_order:"
return Tstr
@property
def T(self):
return self.__T
def Hopbytes(Gt,Gn,S):
hopbyteslist=[]
tasklist=[t for t in S if t!=-1]
#caculate the hopbytes of each task in tasklist
for t in tasklist:
hopbytes=0
tcore=S.index(t)
for u in tasklist:
if u==t:
continue
ucore=S.index(u)
weight=Gt.getweight(u,t)
distance=Gn.getnodedistance(Gn.getnodeoder(ucore),Gn.getnodeoder(tcore))
hopbytes+=weight*distance
hopbyteslist.append((t,hopbytes))
return hopbyteslist
def Loads(Gt,Gn,S):
#caculate the load of each node, because the property of fat-tree net struct
#this only useful for fat-tree
loadlist=[0 for pnode in range(Gn.node_size)]
tasklist=[t for t in S if t!=-1]
for t in tasklist:
tnode=Gn.getnodeoder(S.index(t))
for u in tasklist:
if u==t:
continue
unode=Gn.getnodeoder(S.index(u))
if unode==tnode:
continue
weight=Gt.getweight(u,t)
loadlist[tnode]+=weight
return loadlist
def cost_function(Gt,Gn,S,task,mode=0):
#caculate the hopbytes of each task in tasklist
hopbyteslist=Hopbytes(Gt,Gn,S)
hopbytes=hopbyteslist[list(map(lambda x:x[0],hopbyteslist)).index(task)][1]
if mode==0: return hopbytes
maxhopbytes=sorted(hopbyteslist,key=lambda x:x[1],reverse=True)[0][1]
if mode==1: return maxhopbytes
#caculate the load of each node, because the property of fat-tree net struct
#this only useful for fat-tree
loadlist=Loads(Gt,Gn,S)
maxload=sorted(loadlist,reverse=True)[0]
# the cost of S
return maxload
def default_compare(Gt,Gn,Slist,alf=10):
clist=[]
#for each S in Slist, caculate their average hopbytes and max hopbytes
for S in Slist:
hopbyteslist=Hopbytes(Gt,Gn,S)
ave_hopbytes=sum([hop[1] for hop in hopbyteslist])/len(hopbyteslist)
max_hopbytes=max([hop[1] for hop in hopbyteslist])
clist.append((ave_hopbytes,max_hopbytes))
ave_h0=sorted(clist,key=lambda x:x[0])[0] #h0:sorted by average hopbytes
sort_clist=sorted(clist,key=lambda x:x[1]) #sorted by max hopbytes
expect_result=Slist[clist.index(sort_clist[0])]
if sort_clist[0][0]<alf*ave_h0[0]:
return expect_result
else:
logger.info("Slist:\n"+str(expect_result)+"\ndo not find a Pareto result with alf="+str(alf))
return []
def S2ST(S):
ST=[-1 for i in S if i>=0]
for i in range(len(S)):
if S[i]>=0:
ST[S[i]]=i
return ST
def helpmsg():
print("usage:python ParMapper -t <taskgraph file> --tsize <task number> -n <netgraph file> --ct <core total number> --nnode <node number> --ncore <core number>")
print(" the Default setting will be <taskgraph file>:CloverLeaf128ProcessTopology_Volume.lgl")
print(" <task number>:128")
print(" <netgraph file>:MapGraph.txt")
print(" <node number>:48 <core number>:24")
def ParMapper(Gt,Gn,TList,process=4,strategy=default_compare,compare_alf=10,cost_function_mode=0):
pool=Pool(process)
configures=[]
pool_result=[]
Slist=[]
if cost_function_mode==0:
use_cost_function=cost_function
elif cost_function_mode==1:
use_cost_function=functools.partial(cost_function,mode=1)
else:
use_cost_function=functools.partial(cost_function,mode=2)
#GreedMap(Gt,Gn,T,packNodeFirst,cost_function)
for T in TList:
if T==():
continue
configures.append((Gt,Gn,T,True,use_cost_function))
configures.append((Gt,Gn,T,False,use_cost_function))
for i in range(process):
pool_result.append(pool.apply_async(GreedMap,args=configures[i%len(configures)]))
pool.close()
pool.join()
for pr in pool_result:
if pr.successful():
Slist.append(pr.get())
S=strategy(Gt,Gn,Slist,alf=compare_alf)
return S
def TopoMapping(Gt,Gn,T,savefile):
#GreedMap(Gt,Gn,T,packNodeFirst,cost_function)
result_S=ParMapper(Gt,Gn,T,process=4,strategy=default_compare,compare_alf=COMPARE_ALF,cost_function_mode=COST_FUNCTION_MODE)
print("result_S:"+str(S2ST(result_S)))
with open(savefile,'w') as sf:
for s in S2ST(result_S):
sf.write(str(s)+'\n')
return result_S
def main(task_file,net_file,task_size,net_ct,net_node,net_core,debug_mode=False,resultfile=''):
#read task graph file
try:
with open(task_file,'r') as tgf:
taskgraph=TaskGraph()
#taskgraph.readgraphfile0(tgf,task_size)
taskgraph.readgraphfile1(tgf)
except Exception as e:
print("task graph read error")
raise
finally:
pass
#read net graph file
try:
with open(net_file,'r') as ngf:
netgraph=NetGraph(ngf,net_ct,net_node,net_core)
except Exception as e:
print("net graph read error")
raise
finally:
pass
#caculate tasklist
tasklists=TaskList(taskgraph)
#debug&test
if debug_mode:
T_test=tasklists.T[0]
logger.debug("T_test:\n"+str(T_test))
S_test=GreedMap(taskgraph,netgraph,T_test,False,cost_function)
logger.debug("S_test:"+str(S_test))
print("S_test:"+str(S2ST(S_test)))
else:
if resultfile == '':
savefile=task_file+'.'+net_file.split("/").pop()
else :
savefile=resultfile
return TopoMapping(taskgraph,netgraph,tasklists.T,savefile)
if __name__ == '__main__':
#bash args
opts, args = getopt.getopt(sys.argv[1:], "ht:n:",["debug","tsize=", "nnode=", "ncore=", "ct="])
debug_mode=False
logger.setLevel(logging.INFO)
#initialize
task_file="CloverLeaf128ProcessTopology_Volume.lgl"
net_file="MapGraph.txt"
task_size=128
net_node=48
net_core=24
net_ct=48*24
#choose option
for op, value in opts:
if op == "-t":
task_file = value
elif op == "-n":
net_file = value
elif op == "-h":
helpmsg()
sys.exit()
elif op == "--debug":
debug_mode=True
logger.setLevel(logging.DEBUG)
elif op == "--tsize":
task_size = int(value)
elif op == "--ct":
net_ct = int(value)
elif op == "--nnode":
net_node = int(value)
elif op == "--ncore":
net_core = int(value)
logger.info("task_file:%s,task_size:%d"%(task_file,task_size))
logger.info("net_file:%s,net_ct:%d,net_node:%d,net_core:%d"%(net_file,net_ct,net_node,net_core))
main(task_file,net_file,task_size,net_ct,net_node,net_core,debug_mode)
| StarcoderdataPython |
3283710 | <filename>bsbolt/Utils/ParserHelpMessages.py<gh_stars>1-10
alignment_help = '''
bsbolt Align -F1 {fastq1} -DB {bsbolt db} -O {output}
-h, --help show this help message and exit
Input / Output Options:
-F1 File path to fastq 1
-F2 File path to fastq 2 [null]
-O File output Prefix
-OT Int number of bam output threads [1]
-DB File path to bsbolt database
-R Str read group header line such as '@RG ID:foo SM:bar' [null]
-H Str insert STR to header if it starts with @; or insert lines in FILE [null]
-XA Int,Int if there are <INT hits with score >80 percent of the max score, output all in XA [100,200]
-DR Float drop ratio for alternative hits reported in XA tag,
for best bisulfite alignment performance set at or above default [0.95]
-p smart pairing (ignoring in2.fq)
Scoring Options
-A Int score for a sequence match, which scales options -TdBOELU unless overridden [1]
-B Int penalty for a mismatch [4]
-INDEL Int gap open penalties for deletions and insertions [6,6]
-E Int gap extension penalty; a gap of size k cost '{-O} + {-E}*k' [1,1]
-L Int,Int penalty for 5'- and 3'-end clipping [30,30]
-U Int penalty for an unpaired read pair [17]
Bisulfite Options
-UN library undirectional, ie. consider PCR products of bisulfite converted DNA
-CP Float CH conversion proportion threshold [0.5]
-CT Int number of CH sites needed to assess read conversion
-SP Float substitution threshold for read bisulfite conversion patterns (ie C2T, G2A) [0.1]
for undirectional libraries the substitution pattern with the fewer number of
substitutions relative to the total read length (if < threshold) is aligned preferentially
Algorithm Options
-t Int number of bwa threads [1]
-k Int minimum seed length [19]
-w Int band width for banded alignment [100]
-d Int off-diagonal X drop off [100]
-r Float look for internal seeds inside a seed longer than {-k} * FLOAT [1.5]
-y Int seed occurrence for the 3rd round seeding [20]
-c Int skip seeds with more than INT occurrences [500]
-D Float drop chains shorter than FLOAT fraction of the longest overlapping chain [0.50]
-W Int discard a chain if seeded bases shorter than INT [0]
-m Int perform at most INT rounds of mate rescues for each read [50]
-S skip mate rescue
-P skip pairing; mate rescue performed unless -S also in use
-j ignore ALT contigs
-T Int minimum score to output [10]
-M mark shorter split hits as secondary
-I Fl,Fl,Int,Int specify the mean, standard deviation (10 percent of the mean if absent),
max (4 sigma from the mean if absent) and min of the insert size distribution.
FR orientation only. [inferred]
'''
index_help = '''
bsbolt Index -G {fasta reference} -DB {database output}
-h, --help show this help message and exit
Index Options:
-G File path to reference genome fasta file, fasta file should contain all contigs
-DB File path to index directory, alignment index generated inside existing directory
or new directory made if directory doesn't exist
-B Int block size for bwtsw algorithm,
increasing block will speed up indexing and increase memory consumption [10000000]
-MR File path to bed file of mappable regions.
Index will be built using using masked contig sequence [null]
-IA ignore alt contigs when constructing alignment index
-rrbs generate a Reduced Representative Bisulfite Sequencing (RRBS) index
-rrbs-cut-format Str Cut format to use for generation of RRBS database, [C-CGG] MSPI,
input multiple enzymes as a comma separate string, C-CGG,C-CGG,...
-rrbs-lower Int lower bound fragment size to consider RRBS index generation [40]
-rrbs-upper Int upper bound fragment size to consider RRBS index generation [500]
'''
meth_help = '''
bsbolt Module CallMethylation -I {input.bam} -DB {bsbolt DB} -O {output prefix}
-h, --help show this help message and exit
Input / Output Options:
-I File input BAM, input file must be in BAM format with index file
-DB File path to index directory
-O File output prefix
-text output plain text files [False]
-BG output calls in bedGraph format [False]
-CG only output CpG sites in CGmap file [False]
Algorithm Options:
-remove-ccgg remove methylation calls in ccgg sites [False]
-verbose verbose Output [False]
-ignore-ov only consider higher quality base when paired end reads overlap [True]
-max Int max read depth to call methylation [8000]
-min Int minimum read depth required to report methylation site [10]
-t Int methylation calling threads [1]
-BQ Int minimum base quality [10]
-MQ Int minimum alignment quality [20]
-IO ignore orphans reads, (not proper read pair)
'''
aggregate_help = '''
bsbolt AggregateMatrix -F {file1.CGmap,file2.CGmap,...} -O {output_matrix.txt}
-h, --help show this help message and exit
Options:
-F File,File,. comma separated list of CGmap files,
or path to text file with list of line separated CGmap files
-S Str,Str,. comma separated list of samples labels. If sample labels are not provided sample labels
are extracted from CGmap files. Can also pass path to txt for line separated sample labels.
-min-coverage Int minimum site read depth coverage
-min-sample Float proportion of samples that must have a valid site (above minimum coverage threshold)
-O File Aggregate matrix output path
-CG Only output CG sites
-verbose Verbose aggregation
-t Int Number of threads to use when assembling matrix
-count Output a count matrix with count of methylated cytosines and total observed cytosines
'''
sim_help = '''
bsbolt Simulate -G {genome.fa} -O {output_directory}
-h, --help show this help message and exit
Input / Output Options:
-G File path for reference genome fasta file
-O File output prefix
-CG File path to CGmap file reference profile [Null]
-overwrite overwrite previously generated simulation database
-BR File Path to previously generated bsbolt methylation reference (directory)
-NS don't output simulated methylation counts
-verbose verbose read simulation
Algorithm Options:
-PE simulate Paired End Reads, default Single End
-RL Int simulated Read Length [125]
-RD Int simulated Read Depth [20]
-U simulate undirectional reads, (bisulfite converted reference strands and PCR products)
-MR Float mutation rate [0.005]
-MI Float mutation indel fraction [0.20]
-ME Float mutation indel extension probability [0.20]
-RS Int random seed for variant generation [-1]
-HA haplotype mode, homozygous variants only
-CH skip simulation of CH methylation, all CH sites unmethylated
-SE Float sequencing Error [0.001]
-NF Float cutoff threshold for ambiguous bases, simulated reads with a proportion of ambiguous
bases above this threshold will not be output [0.05]
-FM Int max fragment size [400]
-IM Int insert length mean [50]
-SM Int insert length standard deviation [50]
'''
impute_help = '''
bsbolt Impute -M {BSBolt_matrix.txt} -O {imputed_matrix.txt}
-h, --help show this help message and exit
Options:
-M File path to bsbolt matrix file
-B Int imputation sample batch size kNN imputation, by default the all of the samples
will be processed as a single batch
-W Int sliding window size for imputation [3000000]
-k Int number of neighbors to use for imputation [5]
-t Int number of threads available for imputation [1]
-verbose verbose imputation
-O File output path for imputed matrix
-R randomize batches
''' | StarcoderdataPython |
3211425 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-07 10:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0004_message_last_reply_date'),
]
operations = [
migrations.AlterUniqueTogether(
name='moduleasset',
unique_together=set([]),
),
migrations.RemoveField(
model_name='moduleasset',
name='module',
),
migrations.AlterUniqueTogether(
name='moduledata',
unique_together=set([]),
),
migrations.RemoveField(
model_name='moduledata',
name='module',
),
migrations.DeleteModel(
name='ModuleAsset',
),
migrations.DeleteModel(
name='ModuleData',
),
]
| StarcoderdataPython |
1798712 | """
Flask-MAB
-------------
An implementation of the multi-armed bandit optimization pattern as a Flask extension
If you can pass it, we can test it
"""
from setuptools import setup
setup(
name='Flask-MAB',
version='2.0.1',
url='http://github.com/deacondesperado/flask_mab',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description='Multi-armed bandits for flask',
long_description=__doc__,
packages=['flask_mab'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask>=1.0.2',
'flask-debugtoolbar==0.10.1',
'future==0.17.1'
],
setup_requires = [
'future>=0.17.1',
'coverage>=3.7.0',
'mock>=1.0.0',
'pytest-runner'
],
tests_requires=[
'pytest'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| StarcoderdataPython |
1727618 |
# Copyright (c) 2012, <NAME> [see LICENSE.txt]
# Python 2 to 3 workarounds
import sys
if sys.version_info[0] == 2:
_strobj = str
_xrange = xrange
elif sys.version_info[0] == 3:
_strobj = str
_xrange = range
import os
import pylab
import numpy as np
from collections import Counter
from pyvttbl.misc.support import _flatten
def box_plot(df, val, factors=None, where=None,
fname=None, output_dir='', quality='medium'):
"""
Makes a box plot
args:
df:
a pyvttbl.DataFrame object
val:
the label of the dependent variable
kwds:
factors:
a list of factors to include in boxplot
where:
a string, list of strings, or list of tuples
applied to the DataFrame before plotting
fname:
output file name
quality:
{'low' | 'medium' | 'high'} specifies image file dpi
"""
if factors == None:
factors = []
if where == None:
where = []
# check to see if there is any data in the table
if df == {}:
raise Exception('Table must have data to print data')
# check to see if data columns have equal lengths
if not df._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
# check the supplied arguments
if val not in list(df.keys()):
raise KeyError(val)
if not hasattr(factors, '__iter__'):
raise TypeError( "'%s' object is not iterable"
% type(factors).__name__)
for k in factors:
if k not in list(df.keys()):
raise KeyError(k)
# check for duplicate names
dup = Counter([val]+factors)
del dup[None]
if not all([count==1 for count in list(dup.values())]):
raise Exception('duplicate labels specified as plot parameters')
# check fname
if not isinstance(fname, _strobj) and fname != None:
raise TypeError('fname must be None or string')
if isinstance(fname, _strobj):
if not (fname.lower().endswith('.png') or \
fname.lower().endswith('.svg')):
raise Exception('fname must end with .png or .svg')
test = {}
if factors == []:
d = df.select_col(val, where=where)
fig = pylab.figure()
pylab.boxplot(np.array(d))
xticks = pylab.xticks()[0]
xlabels = [val]
pylab.xticks(xticks, xlabels)
test['d'] = d
test['val'] = val
else:
D = df.pivot(val, rows=factors,
where=where,
aggregate='tolist')
fig = pylab.figure(figsize=(6*len(factors),6))
fig.subplots_adjust(left=.05, right=.97, bottom=0.24)
pylab.boxplot([np.array(_flatten(d)) for d in D])
xticks = pylab.xticks()[0]
xlabels = ['\n'.join('%s = %s'%fc for fc in c) for c in D.rnames]
pylab.xticks(xticks, xlabels,
rotation=35,
verticalalignment='top')
test['d'] = [np.array(_flatten(d)) for d in D]
test['xlabels'] = xlabels
maintitle = '%s'%val
if factors != []:
maintitle += ' by '
maintitle += ' * '.join(factors)
fig.text(0.5, 0.95, maintitle,
horizontalalignment='center',
verticalalignment='top')
test['maintitle'] = maintitle
if fname == None:
fname = 'box(%s'%val
if factors != []:
fname += '~' + '_X_'.join([str(f) for f in factors])
fname += ').png'
fname = os.path.join(output_dir, fname)
test['fname'] = fname
# save figure
if quality == 'low' or fname.endswith('.svg'):
pylab.savefig(fname)
elif quality == 'medium':
pylab.savefig(fname, dpi=200)
elif quality == 'high':
pylab.savefig(fname, dpi=300)
else:
pylab.savefig(fname)
pylab.close()
if df.TESTMODE:
return test
| StarcoderdataPython |
4836167 | <reponame>trucktar/inquisitive
from datetime import datetime, timedelta
import jwt
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
class UserManager(BaseUserManager):
def create_user(self, username, email, password, **extra_fields):
"""
Create and save a user with the given username, email and password.
"""
email = email = self.normalize_email(email)
username = self.model.normalize_username(username)
user = self.model(username=username, email=email, **extra_fields)
user.set_password(password)
user.save()
return user
class User(AbstractBaseUser):
username = models.CharField(max_length=150, unique=True)
email = models.EmailField(unique=True)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=150)
is_admin = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = "email"
def __str__(self):
"""Returns a string representation of `User` instance."""
return self.email
@property
def token(self):
"""Gets the generated JWT for this user's id."""
return self._generate_jwt_token()
def _generate_jwt_token(self):
"""
Generates a JSON Web Token that stores this user's ID and has an expiry
date set to 60 days into the future.
"""
dt = datetime.now() + timedelta(days=60)
token = jwt.encode(
{"id": self.pk, "exp": int(dt.strftime("%s"))},
settings.SECRET_KEY,
algorithm="HS256",
)
return token.decode()
| StarcoderdataPython |
3275752 | from django.conf.urls import url
urlpatterns = [
url(r'^list/$', 'screenshots_listing', prefix='crits.screenshots.views'),
url(r'^list/(?P<option>\S+)/$', 'screenshots_listing', prefix='crits.screenshots.views'),
url(r'^add/$', 'add_new_screenshot', prefix='crits.screenshots.views'),
url(r'^find/$', 'find_screenshot', prefix='crits.screenshots.views'),
url(r'^remove_from_object/$', 'remove_screenshot_from_object', prefix='crits.screenshots.views'),
url(r'^render/(?P<_id>\S+)/(?P<thumb>\S+)/$', 'render_screenshot', prefix='crits.screenshots.views'),
url(r'^render/(?P<_id>\S+)/$', 'render_screenshot', prefix='crits.screenshots.views'),
url(r'^render/$', 'render_screenshot', prefix='crits.screenshots.views'),
]
| StarcoderdataPython |
1619082 | import os
import sys
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
sys.path.append(BASE_DIR)
from tools.path import ILSVRC2012_path
from simpleAICV.classification import backbones
from simpleAICV.classification import losses
import torchvision.transforms as transforms
import torchvision.datasets as datasets
class config:
train_dataset_path = os.path.join(ILSVRC2012_path, 'train')
val_dataset_path = os.path.join(ILSVRC2012_path, 'val')
network = 'efficientnet_b0'
pretrained = False
num_classes = 1000
input_image_size = 224
scale = 256 / 224
model = backbones.__dict__[network](**{
'pretrained': pretrained,
'num_classes': num_classes,
})
criterion = losses.__dict__['CELoss']()
train_dataset = datasets.ImageFolder(
train_dataset_path,
transforms.Compose([
transforms.RandomResizedCrop(input_image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]))
val_dataset = datasets.ImageFolder(
val_dataset_path,
transforms.Compose([
transforms.Resize(int(input_image_size * scale)),
transforms.CenterCrop(input_image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]))
# val_dataset.class_to_idx保存了类别对应的索引,所谓类别即每个子类文件夹的名字,索引即模型训练时的target
seed = 0
# batch_size is total size in DataParallel mode
# batch_size is per gpu node size in DistributedDataParallel mode
batch_size = 64
num_workers = 16
# choose 'SGD' or 'AdamW'
optimizer = 'SGD'
# 'AdamW' doesn't need gamma and momentum variable
gamma = 0.1
momentum = 0.9
# choose 'MultiStepLR' or 'CosineLR'
# milestones only use in 'MultiStepLR'
scheduler = 'CosineLR'
lr = 0.1
weight_decay = 1e-4
milestones = [30, 60]
warm_up_epochs = 5
epochs = 90
accumulation_steps = 1
print_interval = 10
# only in DistributedDataParallel mode can use sync_bn
distributed = True
sync_bn = False
apex = True
| StarcoderdataPython |
1667539 | <reponame>andrewnc/geometric-neural-processes
from skimage.transform import resize
import matplotlib.pyplot as plt
import numpy as np
import os
from tqdm import tqdm
#for f in tqdm(os.listdir("./train2014")):
# image = plt.imread("./train2014/{}".format(f))
# resized = resize(image, (32,32), anti_aliasing=True)
# plt.imsave("./resized_small_train2014/{}".format(f), resized)
for f in tqdm(os.listdir("../data/img_align_celeba")):
image = plt.imread("../data/img_align_celeba/{}".format(f))
resized = resize(image, (32,32), anti_aliasing=True)
plt.imsave("../data/resized_small_img_align_celeb_a/{}".format(f), resized)
| StarcoderdataPython |
3396143 | <reponame>GravYong/scalarized_ns<gh_stars>0
""" A quadratic scalar-tensor theory
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "GPL"
import numpy as np
class STG_quadratic(object):
""" The quadratic scalar-tensor theory defined in
Damour & Esposito-Farese 1996
"""
def __init__(self, xi=-4.4, msq = 1):
""" The curvature parameter in the coupling function """
self.xi = xi
self.msq = msq
def U(self, bphi):
""" dimensionless potential """
return self.msq*bphi**2.0
def dUdph(self, bphi):
""" dimensionless potential """
return 2.0*self.msq*bphi
def A(self, bphi):
return 1.0/np.sqrt(1.0+self.xi*bphi**2.0)
def alpha(self, bphi):
""" d[ln(A)]/d[bphi] """
return -( self.xi*bphi )/(1.0+self.xi*bphi**2.0)
def phtranbph(self, bph):
"""ph in terms of bph without the integral constant"""
return ( np.sqrt(self.xi*(1 + 6*self.xi)) * np.log(1 + 2*np.sqrt(self.xi*(1 + 6*self.xi))* bph*(np.sqrt(1 + (np.sqrt(self.xi*(1 + 6*self.xi)))**2*bph**2) + np.sqrt(self.xi*(1 + 6*self.xi))*bph)) + np.sqrt(6)*self.xi*np.log( 1 - 2*np.sqrt(6)*self.xi*bph*(np.sqrt(1 + (np.sqrt(self.xi*(1 + 6*self.xi)))**2*bph**2) - np.sqrt(6)*self.xi*bph)/( 1 + self.xi*bph**2)) ) /2.0/np.sqrt(2)/self.xi
def dphsq(self, bphi):
"""( d[phi]/d[bphi] )^2"""
return ( 1.0 + (1.0+6.0*self.xi)*self.xi*bphi**2.0 )/( 2.0*(1.0+self.xi*bphi**2.0)**2.0 )
def ddphsq(self, bphi):
""" d/d[bphi] ( d[phi]/d[bphi] )^2"""
return - ( self.xi*bphi*( 1.0 - 6.0*self.xi + (1.0+6.0*self.xi)*self.xi*bphi**2.0 ) )/( (1.0+self.xi*bphi**2.0)**3.0 )
| StarcoderdataPython |
1682444 | import proverb
def main():
print(proverb.saying())
| StarcoderdataPython |
3292188 | from pyfirmata import Arduino, util
import time
import argparse
import yaml
import logging.config
config = None
with open('config.yml') as f:
config = yaml.load(f)
config['logging'].setdefault('version', 1)
logging.config.dictConfig(config['logging'])
logger = logging.getLogger(__name__)
logger.debug('Using configuration in \'config.yml\'')
class Board(object):
_light_level = 0
@property
def light_level(self):
return self._light_level
@light_level.setter
def light_level(self, level):
level = int(level)
if level < 0:
level = 0
elif level > 100:
level = 100
self._light_level = level
logger.info('Setting light level to %d', self._light_level)
self.pin_led.write(level / 100.0)
@property
def photoresistor_level(self):
ret = None
count = 0
while ret is None and count < 10:
count += 1
time.sleep(0.01)
ret = self.pin_photo.read()
return ret
def __init__(self):
logger.info('Connecting to Arduino Uno on %s', config['port'])
self.board = Arduino(config['port'])
self.pin_led = self.board.get_pin('d:%d:p' % (config['light']['pin'],))
self.pin_photo = self.board.get_pin('a:%d:i' % (config['photoresistor']['pin'],))
self.pin_13 = self.board.get_pin('d:13:o')
self._iter = util.Iterator(self.board)
self._iter.start()
logger.debug('Iterator started')
logger.debug('enable_reporting on photoresistor')
self.pin_photo.enable_reporting()
def close(self):
logger.debug('Close called on board')
self.board.exit()
# Kill the _iter thread
self._iter.board = None
def blink13(self):
while True:
self.pin_13.write(1)
time.sleep(1)
self.pin_13.write(0)
time.sleep(1)
commands = {}
def command(cls):
if cls.name is None:
raise NotImplementedError("Class did not specify command name")
if cls.help is None:
raise NotImplementedError("Class did not specify help message")
commands[cls.name] = cls
class CommandBase(object):
name = None
help = None
def __init__(self, brd):
self.board = brd
def setup_arg_parser(self, parser):
raise NotImplementedError()
def execute(self, args):
raise NotImplementedError()
@command
class Light(CommandBase):
name = 'light'
help = 'Set light brightness level'
def setup_arg_parser(self, parser):
parser.add_argument('level', type=int, choices=range(0, 101), metavar='{0..100}',
help='Light brightness from 0 to 100')
def execute(self, args):
self.board.light_level = args.level
@command
class Blink13(CommandBase):
name = 'blink13'
help = 'Blink the LED on pin 13'
def setup_arg_parser(self, parser):
pass
def execute(self, args):
self.board.blink13()
@command
class Print(CommandBase):
name = 'print'
help = 'Print hooks'
def setup_arg_parser(self, parser):
parser.add_argument('event', choices=['start', 'stop'], help='Print event to respond to')
def execute(self, args):
if args.event == 'start':
photo = self.board.photoresistor_level
if photo <= config['photoresistor']['level']:
self.board.light_level = config['light']['level']
elif args.event == 'stop':
self.board.light_level = 0
@command
class Photoresistor(CommandBase):
name = 'photoresistor'
help = 'Report the current photoresistor value'
def setup_arg_parser(self, parser):
pass
def execute(self, args):
print self.board.photoresistor_level
logger.info('Available commands: %s', [key for key in commands])
def main():
logger.debug('Starting main code')
board = Board()
root_parser = argparse.ArgumentParser(description='OctoPrint Auxiliary functionality')
subparsers = root_parser.add_subparsers()
for key in commands:
cls = commands[key]
parser = subparsers.add_parser(cls.name, help=cls.help)
cls = cls(board)
cls.setup_arg_parser(parser)
parser.set_defaults(name=key)
commands[key] = cls
try:
arguments = root_parser.parse_args()
logging.info('Requested command: %s', arguments.name)
try:
commands[arguments.name].execute(arguments)
except Exception as e:
logger.critical('An unknown exception occurred', exc_info=e)
raise
finally:
board.close()
logging.info('Completed run')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3268272 | import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import matplotlib.cm as mplcm
import matplotlib.colors as colors
from utils.embedding import *
from utils.geometry import *
from utils.constants import *
from shapely.geometry import Point, LineString, Polygon, LinearRing
chip_1, chip_2, connections = data_prepare()
new_points, subsequences = layers(chip_1, chip_2, connections)
subsequences_by_layers_1 = [[0, 1], [4, 7], [3, 6], [2, 5]]
subsequences_by_layers_2 = [[0, 7], [1, 6], [2, 5], [3, None], [4, None]]
K, L, V, S, mind, int_lines_list, ext_lines_list = \
objective(connections, subsequences, subsequences_by_layers_1, chip_1, chip_2)
jump_coordinates, jump_lines = get_jumps(connections, subsequences[subsequences_by_layers_1[0][0]],
subsequences[subsequences_by_layers_1[0][1]], chip_1, chip_2, 1)
submit("submission", int_lines_list, napilnik_lol(ext_lines_list[:]), jump_lines, jump_coordinates)
print "K = ", K
print "L = ", L
print "V = ", V
print "S = ", S
print "mind = ", mind
| StarcoderdataPython |
153368 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import mptt.fields
import django.db.models.deletion
import yepes.fields
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial_schema'),
]
initial = True
operations = [
migrations.CreateModel(
name='CommentStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('index', yepes.fields.IntegerField(default=0, min_value=0, db_index=True, verbose_name='Index', blank=True)),
('label', yepes.fields.CharField(max_length=63, verbose_name='Label')),
('api_id', yepes.fields.IdentifierField(help_text='This field is for internally identify the comment status. Can only contain lowercase letters, numbers and underscores.', verbose_name='API Id')),
('color', yepes.fields.ColorField(help_text='This color is used on the admin site for visually identify the comment status.', verbose_name='Color')),
('publish_comment', yepes.fields.BooleanField(default=True, help_text='Uncheck this box to make the comments effectively disappear from the blog.', verbose_name='Publishes Comments')),
('comment_replacement', yepes.fields.TextField(help_text='The content of this field will replace the text of the user comments. E.g.: "Inappropriate comment."', verbose_name='Comment Replacement', blank=True)),
],
options={
'verbose_name': 'Comment Status',
'verbose_name_plural': 'Comment Statuses',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name='Creation Date')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('parent', mptt.fields.TreeForeignKey(related_name='children', verbose_name='Parent', blank=True, to='comments.Comment', null=True)),
('author_name', yepes.fields.CharField(max_length=63, verbose_name='Name')),
('author_email', yepes.fields.EmailField(verbose_name='Email Address')),
('author_url', models.URLField(max_length=127, verbose_name='URL', blank=True)),
('ip_address', models.GenericIPAddressField(unpack_ipv4=True, null=True, verbose_name='IP Address', blank=True)),
('user_agent', yepes.fields.CharField(max_length=255, verbose_name='User Agent', blank=True)),
('karma', yepes.fields.IntegerField(default=0, verbose_name='Karma', blank=True)),
('status', yepes.fields.CachedForeignKey(related_name='comments', on_delete=django.db.models.deletion.PROTECT, verbose_name='Status', to='comments.CommentStatus')),
('is_published', yepes.fields.BooleanField(default=True, verbose_name='Is Published?', editable=False)),
],
options={
'ordering': ['-creation_date'],
'permissions': [('can_moderate', 'Can moderate comments')],
'verbose_name': 'Comment',
'verbose_name_plural': 'Comments',
},
),
migrations.AlterIndexTogether(
name='comment',
index_together=set([('status', 'creation_date')]),
),
]
| StarcoderdataPython |
1708049 | import numpy as np
from nose.plugins.attrib import attr
from cStringIO import StringIO
from nose.tools import raises
from microscopes.lda import utils
def test_docs_from_document_term_matrix():
dtm = [[2, 1], [3, 2]]
docs = [[0, 0, 1], [0, 0, 0, 1, 1]]
assert utils.docs_from_document_term_matrix(dtm) == docs
def test_docs_from_document_term_matrix_with_vocab():
dtm = [[2, 1], [3, 2]]
docs = [['cat', 'cat', 2], ['cat', 'cat', 'cat', 2, 2]]
gen_docs = utils.docs_from_document_term_matrix(dtm, vocab=['cat', 2])
assert gen_docs == docs
def test_docs_from_dtm_with_gaps():
dtm = [[2, 0, 1], [1, 1, 1]]
docs = [[0, 0, 2], [0, 1, 2]]
assert utils.docs_from_document_term_matrix(dtm) == docs
def test_docs_from_numpy_dtp():
dtm = np.array([[2, 1], [3, 2]])
docs = [[0, 0, 1], [0, 0, 0, 1, 1]]
assert utils.docs_from_document_term_matrix(dtm) == docs
def test_docs_from_ldac_simple():
stream = StringIO()
stream.write("2 0:2 1:1\n2 0:3 1:2")
stream.seek(0) # rewind stream
docs = [[0, 0, 1], [0, 0, 0, 1, 1]]
assert utils.docs_from_ldac(stream) == docs
stream = StringIO()
stream.write("2 1:1 0:2\n3 2:1 0:3 1:1")
stream.seek(0) # rewind stream
docs = [[1, 0, 0], [2, 0, 0, 0, 1]]
assert utils.docs_from_ldac(stream) == docs
@raises(AssertionError)
def test_bad_ldac_data():
stream = StringIO()
stream.write("2 0:1")
stream.seek(0) # rewind stream
utils.docs_from_ldac(stream)
def test_num_terms():
docs = [[0, 1, 2], [1, 2, 3]]
assert utils.num_terms(docs) == 4
def test_row_major_form_conversion():
l = [[1, 2, 3, 4], [1, 2, 3], [1, 2, 3, 4, 5, 6]]
rmf = utils.ragged_array_to_row_major_form(l)
assert utils.row_major_form_to_ragged_array(*rmf) == l
| StarcoderdataPython |
1623066 | import redis
from board import Board
class RedisBoard(Board):
"""This will create a message board that is backed by Redis."""
def __init__(self, *args, **kwargs):
"""Creates the Redis connection."""
self.redis = redis.Redis(*args, **kwargs)
def set_owner(self, owner):
self.owner = owner
def post_message(self, message):
"""This will append the message to the list."""
pass
def get_message(self):
"""This will pop a message off the list."""
pass
def _key(self):
if not self.key:
self.key = "%s-queue" % self.owner
return self.key
| StarcoderdataPython |
1790834 | u = 0.5*sin(2*pi*x)+1
| StarcoderdataPython |
1662938 | <filename>libraries/stats/widgets/RedRvar_test.py<gh_stars>1-10
"""F Test widget
.. helpdoc::
Performs the F-test on connected data.
"""
"""<widgetXML>
<name>F Test</name>
<icon>default.png</icon>
<summary>Performs an F test.</summary>
<tags>
<tag priority="10">
Stats
</tag>
</tags>
<author>
<authorname>Red-R Core Development Team</authorname>
<authorcontact>www.red-r.org</authorcontact>
</author>
</widgetXML>
"""
"""
<name>F Test</name>
<RFunctions>stats:var.test</RFunctions>
<tags>Stats</tags>
"""
from OWRpy import *
import redRGUI, signals
import redRGUI
class RedRvar_test(OWRpy):
def __init__(self, **kwargs):
OWRpy.__init__(self, **kwargs)
self.setRvariableNames(["var.test"])
self.data = {}
self.RFunctionParam_y = ''
self.RFunctionParam_x = ''
""".. rrsignals::
:description: `X data`"""
self.inputs.addInput('id1', 'x', signals.base.RVector, self.processx)
""".. rrsignals::
:description: `Y data`"""
self.inputs.addInput('id0', 'y', signals.base.RVector, self.processy)
""".. rrgui::
:description: `alternative.`
"""
self.RFunctionParamalternative_comboBox = redRGUI.base.comboBox(self.controlArea, label = "alternative:", items = ["two.sided","less","greater"])
""".. rrgui::
:description: `ratio.`
"""
self.RFunctionParamratio_lineEdit = redRGUI.base.lineEdit(self.controlArea, label = "ratio:", text = '1')
""".. rrgui::
:description: `Confidence Interval.`
"""
self.RFunctionParamconf_level_lineEdit = redRGUI.base.lineEdit(self.controlArea, label = 'Confidence Interval:', text = '0.95')
""".. rrgui::
:description: `Commit.`
"""
self.commit = redRGUI.base.commitButton(self.bottomAreaRight, "Commit", callback = self.commitFunction,
processOnInput=True)
self.RoutputWindow = redRGUI.base.textEdit(self.controlArea, label = "RoutputWindow")
def processy(self, data):
if data:
self.RFunctionParam_y=data.getData()
if self.commit.processOnInput():
self.commitFunction()
else:
self.RFunctionParam_y=''
def processx(self, data):
if data:
self.RFunctionParam_x=data.getData()
if self.commit.processOnInput():
self.commitFunction()
else:
self.RFunctionParam_x=''
def commitFunction(self):
if unicode(self.RFunctionParam_y) == '': return
if unicode(self.RFunctionParam_x) == '': return
injection = []
string = 'alternative='+unicode(self.RFunctionParamalternative_comboBox.currentText())+''
injection.append(string)
if unicode(self.RFunctionParamratio_lineEdit.text()) != '':
string = 'ratio='+unicode(self.RFunctionParamratio_lineEdit.text())+''
injection.append(string)
if unicode(self.RFunctionParamconf_level_lineEdit.text()) != '':
try:
float(self.RFunctionParamconf_level_lineEdit.text())
string = 'conf.level = '+unicode(self.RFunctionParamconf_level_lineEdit.text())
injection.append(string)
except:
self.status.setText('Confidence Interval not a number')
inj = ','.join(injection)
self.R(self.Rvariables['var.test']+'<-var.test(y='+unicode(self.RFunctionParam_y)+',x='+unicode(self.RFunctionParam_x)+','+inj+')')
self.R('txt<-capture.output('+self.Rvariables['var.test']+')')
self.RoutputWindow.clear()
tmp = self.R('paste(txt, collapse ="\n")')
self.RoutputWindow.insertHtml('<br><pre>'+tmp+'</pre>')
| StarcoderdataPython |
1651316 | <filename>core/emri/data_proc.py
import SimpleITK as sitk # For loading the dataset
import numpy as np # For data manipulation
import glob # For populating the list of files
from scipy.ndimage import zoom # For resizing
import re # For parsing the filenames (to know their modality)
import cv2 # For processing images
import matplotlib.pyplot as plt
from matplotlib import colors
import math
from copy import deepcopy, copy
import pandas as pd
from ecf import *
import random
from itertools import product
from operator import itemgetter
import itertools
from scipy.ndimage import rotate
import sys
from .eclogging import load_logger
logger = load_logger()
def read_img(img_path):
"""
Reads a .nii.gz image and returns as a numpy array.
"""
return sitk.GetArrayFromImage(sitk.ReadImage(img_path))
def read_nii(img_seg_dict, types=('t1ce', 'seg')):
result_dict={}
for i, v in img_seg_dict.items():
result_dict[i]=sitk.GetArrayFromImage(sitk.ReadImage(v))
return result_dict
def resize(img, shape, mode='constant', orig_shape=None, order=3):
"""
Wrapper for scipy.ndimage.zoom suited for MRI images.
"""
if orig_shape == None: orig_shape = img.shape
assert len(shape) == 3, "Can not have more than 3 dimensions"
factors = (
shape[0]/orig_shape[0],
shape[1]/orig_shape[1],
shape[2]/orig_shape[2]
)
# Resize to the given shape
return zoom(img, factors, mode=mode, order=order)
def preprocess(img, out_shape=None, orig_shape=None, normalization=True, only_nonzero_element=True):
"""
Preprocess the image.
Just an example, you can add more preprocessing steps if you wish to.
"""
if out_shape is not None:
img = resize(img, out_shape, mode='constant', orig_shape=img.shape)
# Normalize the image (only each element is not zero.)
if normalization == False: return img
if only_nonzero_element == True:
p=np.where(img!=0)
mean=img[p].mean()
std=img[p].std()
result_array=np.where(img!=0, (img-mean)/std, img)
else:
mean=img.mean()
std=img.std()
result_array = (img - mean) / std
return result_array
def preprocess_label(img, out_shape=None, mode='nearest', closing=False):
"""
Separates out the 3 labels from the segmentation provided, namely:
GD-enhancing tumor (ET — label 4), the peritumoral edema (ED — label 2))
and the necrotic and non-enhancing tumor core (NCR/NET — label 1)
"""
ncr = img == 1 # Necrotic and Non-Enhancing Tumor (NCR/NET)
ed = img == 2 # Peritumoral Edema (ED)
et = img == 4 # GD-enhancing Tumor (ET)
if out_shape is not None:
ncr = resize(ncr, out_shape, mode=mode)
ed = resize(ed, out_shape, mode=mode)
et = resize(et, out_shape, mode=mode)
if closing == True:
kernel = np.ones((3, 3))
for t in [ncr, ed, et]:
for z in range(len(t)):
t[z]=cv2.morphologyEx(t[z], cv2.MORPH_CLOSE, kernel, iterations=3)
return np.array([ncr, ed, et], dtype=np.uint8)
def preprocess_label_(img, out_shape=None, mode='nearest', label='all', closing=False, zoom_order=3):
"""
The sub-regions considered for evaluation are: 1) the "enhancing tumor" (ET), 2) the "tumor core" (TC/1+4), and 3) the "whole tumor" (WT/1+2+4)
label : 'all' or list of label number. Annotations comprise the GD-enhancing tumor (ET — label 4), the peritumoral edema (ED — label 2), and the necrotic and non-enhancing tumor core (NCR/NET — label 1)
"""
# Select labels.
if label == 'all':
img = np.where(img > 0, 1, img)
elif len(label) == 2:
img = np.where((img == label[0]) | (img == label[1]), 1, 0)
elif len(label) == 1:
img = np.where(img == label, 1, 0)
elif label == 'Brats':
et = (img == 4) # enhancing tumor
tc = (img == 1) | (img==4) # tumor core
wt = (img == 1) | (img==2) | (img==4) # whole tumor
if out_shape is not None:
et = resize(et, out_shape, mode=mode, order=zoom_order)
tc = resize(tc, out_shape, mode=mode, order=zoom_order)
wt = resize(wt, out_shape, mode=mode, order=zoom_order)
return np.array([et, tc, wt], dtype=np.uint8)
else:
raise Exception("Label argument is not valid.")
if out_shape is not None:
img = resize(img, out_shape, mode=mode, order=zoom_order)
return np.array([img], dtype = np.uint8)
def prepare_data(data_w_num, resize_output_shape = None,
only_nonzero=False, label_criteria = None, label_zoom_order = 0, img_types = None):
i, imgs = data_w_num
try:
d = np.array(
[preprocess(imgs[m], resize_output_shape, only_nonzero_element=only_nonzero) for m in img_types[:-1]],
dtype=np.float32)
l = preprocess_label_(imgs['seg'], resize_output_shape, zoom_order=label_zoom_order, label=label_criteria)
# Print the progress bar
# increment()
# print(f'\r{counter.value}/{total} has been completed.', end='')
return i, d, l
except Exception as e:
print(f'Something went wrong with {i}th file, skipping...\n Exception:\n{str(e)}')
return i, str(e), str(e)
def find_found_path(target, search_string='Nothing'):
l=list(map(lambda x:re.search(search_string, x)==None, target))
indices = [i for i, v in enumerate(l) if v==True]
return indices
def search_file_w_kw(target, keyword, path_pattern='(.+)/.*?pre/.*?$'):
"""
keyword : a list of keywords.
path_pattern = a regex pattern which has a group of path in which you want to search files.
"""
r=[]
cl=[]
k=f"(?:{str.join('|', keyword)}).*\.nii\.gz"
for c, i in enumerate(target):
re_r1 = re.search(path_pattern, i).group(1)
# print(re_r1)
gr1 = glob.glob(f"{re_r1}/*")
# print(gr1)
ir1 = list(filter(lambda x:re.search(k, x), gr1))
if len(ir1) == 0:
ir = [f'Nothing was found. path:{re_r1}']
else:
if len(ir1) != 1: cl.append([c, ir1])
ir=ir1
r.append(ir)
# r=list(itertools.chain(*r))
return r, cl
def crop_image_(img, crop_size=None, mode='center'):
assert crop_size != None, "Crop size should be passed."
print(img.shape)
c, h, w, d=img.shape
cc, ch, cw, cd=crop_size
# print(h,w,d,'\n',ch,cw,cd)
cropped_image=np.empty(shape=crop_size)
for i in range(len(cropped_image)):
cropped_image[i]=img[i][h//2 - ch//2 : h//2 + ch//2, w//2 - cw//2 : w//2 + cw//2, d//2 - cd//2 : d//2 + cd//2]
return cropped_image
def output_even(x):
if x % 2 == 0:
return x
else:
return x + 1
def auto_crop(data_and_label, mode=None, buffer_size=10, debug=False):
"""
return cropped [img, label]
data_and_label : list of 3d-array numpy image. e.g. [data, labels]
crop area = (x of estimated brain area + 2 * buffer_size) * (y of estimated brain area + 2 * buffer_size)
"""
imgs = data_and_label[:-1]
label = data_and_label[-1]
rl = [] # ranges_list
for img in imgs:
p = np.where(img != img.min())
z_range=[p[0].min(), p[0].max()]
y_range=[p[1].min(), p[1].max()]
x_range=[p[2].min(), p[2].max()]
cz=(z_range[1] + z_range[0]) // 2
cy=(y_range[1] + y_range[0]) // 2
cx=(x_range[1] + x_range[0]) // 2
rz=z_range[1] - z_range[0]
ry=y_range[1] - y_range[0]
rx=x_range[1] - x_range[0]
bs=buffer_size
z_range = [i if i>=0 else 0 for i in [z_range[0] - bs, z_range[1] + bs]]
y_range = [i if i>=0 else 0 for i in [y_range[0] - bs, y_range[1] + bs]]
x_range = [i if i>=0 else 0 for i in [x_range[0] - bs, x_range[1] + bs]]
rl.append([z_range, y_range, x_range])
if rl.count(rl[0]) == len(rl):
z_range, y_range, x_range = rl[0] ; logger.debug(f"ranges are same.")
else:
z_range, y_range, x_range = list(zip([min([r[i][0] for r in rl]) for i in range(3)],
[max([r[i][1] for r in rl]) for i in range(3)])) ; logger.debug(f"ranges are different.")
if debug:
print('z_range: ', z_range, 'y_range: ' , y_range, 'x_range: ', x_range)
r_imgs = [img[z_range[0] : z_range[1], y_range[0] : y_range[1], x_range[0] : x_range[1]] for img in imgs]
label = label[z_range[0] : z_range[1], y_range[0] : y_range[1], x_range[0] : x_range[1]]
return [*r_imgs, label], [z_range, y_range, x_range]
def crop_image(img, crop_size=None, mode='center'):
assert crop_size != None, "Crop size should be passed."
print(img.shape)
c, h, w, d=img.shape
cc, ch, cw, cd=crop_size
# find the range of coordinates of brain
# print(h,w,d,'\n',ch,cw,cd)
cropped_image=np.empty(shape=crop_size)
for i in range(len(cropped_image)):
cropped_image[i]=img[i][h//2 - ch//2 : h//2 + ch//2, w//2 - cw//2 : w//2 + cw//2, d//2 - cd//2 : d//2 + cd//2]
return cropped_image
| StarcoderdataPython |
129821 | <filename>src/pyglue/DocStrings/ExceptionMissingFile.py
class ExceptionMissingFile:
"""
An exception class for errors detected at runtime, thrown when OCIO cannot
find a file that is expected to exist. This is provided as a custom type to
distinguish cases where one wants to continue looking for missing files,
but wants to properly fail for other error conditions.
"""
def __init__(self):
pass
| StarcoderdataPython |
167177 | import copy
import os
import torch
import torchvision
import warnings
import math
import utils.misc
import numpy as np
import os.path as osp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import models.modified_resnet_cifar as modified_resnet_cifar
import models.modified_resnetmtl_cifar as modified_resnetmtl_cifar
import models.modified_linear as modified_linear
from PIL import Image
from torch.optim import lr_scheduler
from torchvision import datasets, transforms
from tensorboardX import SummaryWriter
from utils.compute_features import compute_features
from utils.process_mnemonics import process_mnemonics
from utils.compute_accuracy import compute_accuracy
from trainer.incremental import incremental_train_and_eval
from utils.misc import *
from utils.process_fp import process_inputs_fp
warnings.filterwarnings('ignore')
class Trainer(object):
def __init__(self, the_args):
self.args = the_args
self.log_dir = './logs/'
if not osp.exists(self.log_dir):
os.mkdir(self.log_dir)
self.save_path = self.log_dir + self.args.dataset + '_nfg' + str(self.args.nb_cl_fg) + '_ncls' + str(self.args.nb_cl) + '_nproto' + str(self.args.nb_protos)
self.save_path += '_' + self.args.method
if not osp.exists(self.save_path):
os.mkdir(self.save_path)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023))])
self.transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023))])
self.trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=self.transform_train)
self.testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=self.transform_test)
self.evalset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=self.transform_test)
self.network = modified_resnet_cifar.resnet32
self.network_mtl = modified_resnetmtl_cifar.resnetmtl32
self.lr_strat_first_phase = [int(160*0.5), int(160*0.75)]
self.lr_strat = [int(self.args.epochs*0.5), int(self.args.epochs*0.75)]
self.dictionary_size = self.args.dictionary_size
def map_labels(self, order_list, Y_set):
map_Y = []
for idx in Y_set:
map_Y.append(order_list.index(idx))
map_Y = np.array(map_Y)
return map_Y
def train(self):
self.train_writer = SummaryWriter(logdir=self.save_path)
dictionary_size = self.dictionary_size
top1_acc_list_cumul = np.zeros((int(self.args.num_classes/self.args.nb_cl), 4, self.args.nb_runs))
top1_acc_list_ori = np.zeros((int(self.args.num_classes/self.args.nb_cl), 4, self.args.nb_runs))
X_train_total = np.array(self.trainset.train_data)
Y_train_total = np.array(self.trainset.train_labels)
X_valid_total = np.array(self.testset.test_data)
Y_valid_total = np.array(self.testset.test_labels)
np.random.seed(1993)
for iteration_total in range(self.args.nb_runs):
order_name = osp.join(self.save_path, "seed_{}_{}_order_run_{}.pkl".format(1993, self.args.dataset, iteration_total))
print("Order name:{}".format(order_name))
if osp.exists(order_name):
print("Loading orders")
order = utils.misc.unpickle(order_name)
else:
print("Generating orders")
order = np.arange(self.args.num_classes)
np.random.shuffle(order)
utils.misc.savepickle(order, order_name)
order_list = list(order)
print(order_list)
np.random.seed(self.args.random_seed)
X_valid_cumuls = []
X_protoset_cumuls = []
X_train_cumuls = []
Y_valid_cumuls = []
Y_protoset_cumuls = []
Y_train_cumuls = []
alpha_dr_herding = np.zeros((int(self.args.num_classes/self.args.nb_cl),dictionary_size,self.args.nb_cl),np.float32)
prototypes = np.zeros((self.args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3]))
for orde in range(self.args.num_classes):
prototypes[orde,:,:,:,:] = X_train_total[np.where(Y_train_total==order[orde])]
start_iter = int(self.args.nb_cl_fg/self.args.nb_cl)-1
for iteration in range(start_iter, int(self.args.num_classes/self.args.nb_cl)):
if iteration == start_iter:
last_iter = 0
tg_model = self.network(num_classes=self.args.nb_cl_fg)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("Out_features:", out_features)
ref_model = None
free_model = None
ref_free_model = None
elif iteration == start_iter+1:
last_iter = iteration
ref_model = copy.deepcopy(tg_model)
print("Fusion Mode: "+self.args.fusion_mode)
tg_model = self.network(num_classes=self.args.nb_cl_fg)
ref_dict = ref_model.state_dict()
tg_dict = tg_model.state_dict()
tg_dict.update(ref_dict)
tg_model.load_state_dict(tg_dict)
tg_model.to(self.device)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("Out_features:", out_features)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features, self.args.nb_cl)
new_fc.fc1.weight.data = tg_model.fc.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = out_features*1.0 / self.args.nb_cl
else:
last_iter = iteration
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features1 = tg_model.fc.fc1.out_features
out_features2 = tg_model.fc.fc2.out_features
print("Out_features:", out_features1+out_features2)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features1+out_features2, self.args.nb_cl)
new_fc.fc1.weight.data[:out_features1] = tg_model.fc.fc1.weight.data
new_fc.fc1.weight.data[out_features1:] = tg_model.fc.fc2.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = (out_features1+out_features2)*1.0 / (self.args.nb_cl)
if iteration > start_iter:
cur_lamda = self.args.lamda * math.sqrt(lamda_mult)
else:
cur_lamda = self.args.lamda
actual_cl = order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)]
indices_train_10 = np.array([i in order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)] for i in Y_train_total])
indices_test_10 = np.array([i in order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)] for i in Y_valid_total])
X_train = X_train_total[indices_train_10]
X_valid = X_valid_total[indices_test_10]
X_valid_cumuls.append(X_valid)
X_train_cumuls.append(X_train)
X_valid_cumul = np.concatenate(X_valid_cumuls)
X_train_cumul = np.concatenate(X_train_cumuls)
Y_train = Y_train_total[indices_train_10]
Y_valid = Y_valid_total[indices_test_10]
Y_valid_cumuls.append(Y_valid)
Y_train_cumuls.append(Y_train)
Y_valid_cumul = np.concatenate(Y_valid_cumuls)
Y_train_cumul = np.concatenate(Y_train_cumuls)
if iteration == start_iter:
X_valid_ori = X_valid
Y_valid_ori = Y_valid
else:
X_protoset = np.concatenate(X_protoset_cumuls)
Y_protoset = np.concatenate(Y_protoset_cumuls)
if self.args.rs_ratio > 0:
scale_factor = (len(X_train) * self.args.rs_ratio) / (len(X_protoset) * (1 - self.args.rs_ratio))
rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor))
rs_num_samples = int(len(X_train) / (1 - self.args.rs_ratio))
print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples))
X_train = np.concatenate((X_train,X_protoset),axis=0)
Y_train = np.concatenate((Y_train,Y_protoset))
print('Batch of classes number {0} arrives'.format(iteration+1))
map_Y_train = np.array([order_list.index(i) for i in Y_train])
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
is_start_iteration = (iteration == start_iter)
if iteration > start_iter:
old_embedding_norm = tg_model.fc.fc1.weight.data.norm(dim=1, keepdim=True)
average_old_embedding_norm = torch.mean(old_embedding_norm, dim=0).to('cpu').type(torch.DoubleTensor)
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
novel_embedding = torch.zeros((self.args.nb_cl, num_features))
for cls_idx in range(iteration*self.args.nb_cl, (iteration+1)*self.args.nb_cl):
cls_indices = np.array([i == cls_idx for i in map_Y_train])
assert(len(np.where(cls_indices==1)[0])==dictionary_size)
self.evalset.test_data = X_train[cls_indices].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0])
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
cls_features = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
norm_features = F.normalize(torch.from_numpy(cls_features), p=2, dim=1)
cls_embedding = torch.mean(norm_features, dim=0)
novel_embedding[cls_idx-iteration*self.args.nb_cl] = F.normalize(cls_embedding, p=2, dim=0) * average_old_embedding_norm
tg_model.to(self.device)
tg_model.fc.fc2.weight.data = novel_embedding.to(self.device)
self.trainset.train_data = X_train.astype('uint8')
self.trainset.train_labels = map_Y_train
if iteration > start_iter and self.args.rs_ratio > 0 and scale_factor > 1:
print("Weights from sampling:", rs_sample_weights)
index1 = np.where(rs_sample_weights>1)[0]
index2 = np.where(map_Y_train<iteration*self.args.nb_cl)[0]
assert((index1==index2).all())
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples)
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.args.train_batch_size, shuffle=False, sampler=train_sampler, num_workers=self.args.num_workers)
else:
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.args.train_batch_size,
shuffle=True, num_workers=self.args.num_workers)
self.testset.test_data = X_valid_cumul.astype('uint8')
self.testset.test_labels = map_Y_valid_cumul
testloader = torch.utils.data.DataLoader(self.testset, batch_size=self.args.test_batch_size,
shuffle=False, num_workers=self.args.num_workers)
print('Max and min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train)))
print('Max and min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul)))
ckp_name = osp.join(self.save_path, 'run_{}_iteration_{}_model.pth'.format(iteration_total, iteration))
ckp_name_free = osp.join(self.save_path, 'run_{}_iteration_{}_free_model.pth'.format(iteration_total, iteration))
print('Checkpoint name:', ckp_name)
if iteration==start_iter and self.args.resume_fg:
print("Loading first group models from checkpoint")
tg_model = torch.load(self.args.ckpt_dir_fg)
elif self.args.resume and os.path.exists(ckp_name):
print("Loading models from checkpoint")
tg_model = torch.load(ckp_name)
else:
if iteration > start_iter:
ref_model = ref_model.to(self.device)
ignored_params = list(map(id, tg_model.fc.fc1.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, tg_model.parameters())
base_params = filter(lambda p: p.requires_grad,base_params)
base_params = filter(lambda p: p.requires_grad,base_params)
tg_params_new =[{'params': base_params, 'lr': self.args.base_lr2, 'weight_decay': self.args.custom_weight_decay}, {'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}]
tg_model = tg_model.to(self.device)
tg_optimizer = optim.SGD(tg_params_new, lr=self.args.base_lr2, momentum=self.args.custom_momentum, weight_decay=self.args.custom_weight_decay)
else:
tg_params = tg_model.parameters()
tg_model = tg_model.to(self.device)
tg_optimizer = optim.SGD(tg_params, lr=self.args.base_lr1, momentum=self.args.custom_momentum, weight_decay=self.args.custom_weight_decay)
if iteration > start_iter:
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=self.lr_strat, gamma=self.args.lr_factor)
else:
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=self.lr_strat_first_phase, gamma=self.args.lr_factor)
print("Incremental train")
if iteration > start_iter:
tg_model = incremental_train_and_eval(self.args.epochs, tg_model, ref_model, free_model, ref_free_model, tg_optimizer, tg_lr_scheduler, trainloader, testloader, iteration, start_iter, cur_lamda, self.args.dist, self.args.K, self.args.lw_mr)
else:
tg_model = incremental_train_and_eval(self.args.epochs, tg_model, ref_model, free_model, ref_free_model, tg_optimizer, tg_lr_scheduler, trainloader, testloader, iteration, start_iter, cur_lamda, self.args.dist, self.args.K, self.args.lw_mr)
torch.save(tg_model, ckp_name)
if self.args.dynamic_budget:
nb_protos_cl = self.args.nb_protos
else:
nb_protos_cl = int(np.ceil(self.args.nb_protos*100./self.args.nb_cl/(iteration+1)))
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
for iter_dico in range(last_iter*self.args.nb_cl, (iteration+1)*self.args.nb_cl):
self.evalset.test_data = prototypes[iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0])
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
mu = np.mean(D,axis=1)
index1 = int(iter_dico/self.args.nb_cl)
index2 = iter_dico % self.args.nb_cl
alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0
w_t = mu
iter_herding = 0
iter_herding_eff = 0
while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000:
tmp_t = np.dot(w_t,D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if alpha_dr_herding[index1,ind_max,index2] == 0:
alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding
iter_herding += 1
w_t = w_t+mu-D[:,ind_max]
X_protoset_cumuls = []
Y_protoset_cumuls = []
class_means = np.zeros((64,100,2))
for iteration2 in range(iteration+1):
for iter_dico in range(self.args.nb_cl):
current_cl = order[range(iteration2*self.args.nb_cl,(iteration2+1)*self.args.nb_cl)]
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
mapped_prototypes2 = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D2 = mapped_prototypes2.T
D2 = D2/np.linalg.norm(D2,axis=0)
alph = alpha_dr_herding[iteration2,:,iter_dico]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
X_protoset_cumuls.append(prototypes[iteration2*self.args.nb_cl+iter_dico,np.where(alph==1)[0]])
Y_protoset_cumuls.append(order[iteration2*self.args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0])))
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
alph = np.ones(dictionary_size)/dictionary_size
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
current_means = class_means[:, order[range(0,(iteration+1)*self.args.nb_cl)]]
class_means = np.zeros((64,100,2))
for iteration2 in range(iteration+1):
for iter_dico in range(self.args.nb_cl):
current_cl = order[range(iteration2*self.args.nb_cl,(iteration2+1)*self.args.nb_cl)]
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
mapped_prototypes2 = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D2 = mapped_prototypes2.T
D2 = D2/np.linalg.norm(D2,axis=0)
alph = alpha_dr_herding[iteration2,:,iter_dico]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
alph = np.ones(dictionary_size)/dictionary_size
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
torch.save(class_means, osp.join(self.save_path, 'run_{}_iteration_{}_class_means.pth'.format(iteration_total, iteration)))
current_means = class_means[:, order[range(0,(iteration+1)*self.args.nb_cl)]]
is_start_iteration = (iteration == start_iter)
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
print('Computing accuracy for first-phase classes')
self.evalset.test_data = X_valid_ori.astype('uint8')
self.evalset.test_labels = map_Y_valid_ori
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
ori_acc, fast_fc = compute_accuracy(tg_model, free_model, tg_feature_model, current_means, X_protoset_cumuls, Y_protoset_cumuls, evalloader, order_list, is_start_iteration=is_start_iteration, maml_lr=self.args.maml_lr, maml_epoch=self.args.maml_epoch)
top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T
self.train_writer.add_scalar('ori_acc/LwF', float(ori_acc[0]), iteration)
self.train_writer.add_scalar('ori_acc/iCaRL', float(ori_acc[1]), iteration)
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
print('Computing accuracy for all seen classes')
self.evalset.test_data = X_valid_cumul.astype('uint8')
self.evalset.test_labels = map_Y_valid_cumul
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
cumul_acc, _ = compute_accuracy(tg_model, free_model, tg_feature_model, current_means, X_protoset_cumuls, Y_protoset_cumuls, evalloader, order_list, is_start_iteration=is_start_iteration, fast_fc=fast_fc, maml_lr=self.args.maml_lr, maml_epoch=self.args.maml_epoch)
top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T
self.train_writer.add_scalar('cumul_acc/LwF', float(cumul_acc[0]), iteration)
self.train_writer.add_scalar('cumul_acc/iCaRL', float(cumul_acc[1]), iteration)
torch.save(top1_acc_list_ori, osp.join(self.save_path, 'run_{}_top1_acc_list_ori.pth'.format(iteration_total)))
torch.save(top1_acc_list_cumul, osp.join(self.save_path, 'run_{}_top1_acc_list_cumul.pth'.format(iteration_total)))
self.train_writer.close
| StarcoderdataPython |
1793359 | #!python3
#encoding:utf-8
import dataset
import time
import random
class Language:
def __init__(self, db_path_repo):
self.db_repo = dataset.connect('sqlite:///' + db_path_repo)
"""
Insert programming language information for each GitHub repository into the database.
@params [dict] langs is [List Languages](https://developer.github.com/v3/repos/#list-languages) Response.
"""
def insert(self, repo_id, langs):
for lang in langs.keys():
self.db_repo['Languages'].insert(dict(
RepositoryId=repo_id,
Language=lang,
Size=langs[lang]))
print("{0},{1},{2}".format(repo_id, lang, langs[lang]))
| StarcoderdataPython |
1752421 | <reponame>WiitterSimithYU/-scollview-imissMusic
import uuid
import os
# 管理平台上传图片
def user_directory_path(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid.uuid4().hex[:10], ext)
str = os.path.join("music", filename)
return str
# 保存APP上传的图片
def upload_image(f, song):
file_name = song
baseDir = os.path.dirname(os.path.abspath("personico"))
jpgdir = os.path.join(baseDir, 'media')
filename = os.path.join(jpgdir, file_name)
print(filename)
fobj = open(filename, 'wb')
for chrunk in f.chunks():
fobj.write(chrunk)
fobj.close()
# 返回数据封装
def response_data(status, message, data):
return {
"status": status,
"message": message,
"data": data,
} | StarcoderdataPython |
3369381 | <reponame>stefan-feltmann/lands
"""
This file contains all possible Biome as separate classes.
"""
import re
def _un_camelize(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1).lower()
class _BiomeMetaclass(type):
def __new__(mcs, name, parents, dct):
if not hasattr(_BiomeMetaclass, "biomes"):
_BiomeMetaclass.biomes = {}
un_camelized_name = _un_camelize(name)
created_class = super(_BiomeMetaclass, mcs).__new__(mcs, name,
parents, dct)
if object not in parents:
_BiomeMetaclass.biomes[un_camelized_name] = created_class
return created_class
class Biome(object):
__metaclass__ = _BiomeMetaclass
@classmethod
def by_name(cls, name):
if name not in _BiomeMetaclass.biomes:
raise Exception("No biome named '%s'" % name)
return _BiomeMetaclass.biomes[name]()
@classmethod
def all_names(cls):
return _BiomeMetaclass.biomes.keys().sort()
@classmethod
def name(cls):
return _un_camelize(cls.__name__)
class Ocean(Biome):
pass
class Sea(Biome):
pass
class PolarDesert(Biome):
pass
class Ice(Biome):
pass
class SubpolarDryTundra(Biome):
pass
class SubpolarMoistTundra(Biome):
pass
class SubpolarWetTundra(Biome):
pass
class SubpolarRainTundra(Biome):
pass
class BorealDesert(Biome):
pass
class BorealDryScrub(Biome):
pass
class BorealMoistForest(Biome):
pass
class BorealWetForest(Biome):
pass
class BorealRainForest(Biome):
pass
class CoolTemperateDesert(Biome):
pass
class CoolTemperateDesertScrub(Biome):
pass
class CoolTemperateSteppe(Biome):
pass
class CoolTemperateMoistForest(Biome):
pass
class CoolTemperateWetForest(Biome):
pass
class CoolTemperateRainForest(Biome):
pass
class WarmTemperateDesert(Biome):
pass
class WarmTemperateDesertScrub(Biome):
pass
class WarmTemperateThornScrub(Biome):
pass
class WarmTemperateDryForest(Biome):
pass
class WarmTemperateMoistForest(Biome):
pass
class WarmTemperateWetForest(Biome):
pass
class WarmTemperateRainForest(Biome):
pass
class SubtropicalDesert(Biome):
pass
class SubtropicalDesertScrub(Biome):
pass
class SubtropicalThornWoodland(Biome):
pass
class SubtropicalDryForest(Biome):
pass
class SubtropicalMoistForest(Biome):
pass
class SubtropicalWetForest(Biome):
pass
class SubtropicalRainForest(Biome):
pass
class TropicalDesert(Biome):
pass
class TropicalDesertScrub(Biome):
pass
class TropicalThornWoodland(Biome):
pass
class TropicalVeryDryForest(Biome):
pass
class TropicalDryForest(Biome):
pass
class TropicalMoistForest(Biome):
pass
class TropicalWetForest(Biome):
pass
class TropicalRainForest(Biome):
pass
# -------------
# Serialization
# -------------
def biome_name_to_index(biome_name):
names = _BiomeMetaclass.biomes.keys()
names.sort()
for i in range(len(names)):
if names[i] == biome_name:
return i
raise Exception("Not found")
def biome_index_to_name(biome_index):
names = _BiomeMetaclass.biomes.keys()
names.sort()
if biome_index < 0 or biome_index >= len(names):
raise Exception("Not found")
return names[biome_index]
| StarcoderdataPython |
67496 | #Type of sequence multiplier must be constant
from polyphony import testbench
def list_multiplier(x):
l = [1, 2, 3] * x
return l[0]
@testbench
def test():
list_multiplier(5)
test() | StarcoderdataPython |
75799 | <reponame>osaaso3/brainiak
# The following code is designed to perform a searchlight at every voxel in the brain looking at the difference in pattern similarity between musical genres (i.e. classical and jazz). In the study where the data was obtained, subjects were required to listen to a set of 16 songs twice (two runs) in an fMRI scanner. The 16 songs consisted of 8 jazz songs and 8 classical songs. The goal of this searchlight is to find voxels that seem to represent distinct information about these different musical genres. Presumably, these voxels would be found in the auditory cortex which happens to be the most organized system in the brain for processing sound information.
import numpy as np
import time
from mpi4py import MPI
from nilearn.image import load_img
import sys
from brainiak.searchlight.searchlight import Searchlight
from scipy import stats
from scipy.sparse import random
import os
# MPI variables
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
# Generate random data
if rank == 0:
np.random.seed(0)
data1_rand = np.random.rand(91,109,91,16)
data2_rand = np.random.rand(91,109,91,16)
classical = np.random.rand(2600)
jazz = np.random.rand(2600)
d1_reshape = np.reshape(data1_rand,(91*109*91,16))
d2_reshape = np.reshape(data2_rand,(91*109*91,16))
a1 = load_img('a1plus_2mm.nii.gz')
a1_vec = np.reshape(a1.get_data(),(91*109*91))
a1_idx = np.nonzero(a1_vec)
for i in range(8):
d1_reshape[a1_idx[0],i] += classical
d1_reshape[a1_idx[0],i+8] += jazz
d2_reshape[a1_idx[0],i] += classical
d2_reshape[a1_idx[0],i+8] += jazz
data1 = np.reshape(d1_reshape,(91,109,91,16))
data2 = np.reshape(d2_reshape,(91,109,91,16))
# Flatten data, then zscore data, then reshape data back into MNI coordinate space
data1 = stats.zscore(np.reshape(data1,(91*109*91,16)))
data1 = np.reshape(data1,(91,109,91,16))
data2 = stats.zscore(np.reshape(data2,(91*109*91,16)))
data2 = np.reshape(data2,(91,109,91,16))
else:
data1 = None
data2 = None
# Load mask
mask_img = load_img('MNI152_T1_2mm_brain_mask.nii')
mask_img = mask_img.get_data()
# Definte function that takes the difference between within vs. between genre comparisons
def corr2_coeff(AB,msk,myrad,bcast_var):
if not np.all(msk):
return None
A,B = (AB[0], AB[1])
A = A.reshape((-1,A.shape[-1]))
B = B.reshape((-1,B.shape[-1]))
corrAB = np.corrcoef(A.T,B.T)[16:,:16]
classical_within = np.mean(corrAB[0:8,0:8])
jazz_within = np.mean(corrAB[8:16,8:16])
classJazz_between = np.mean(corrAB[8:16,0:8])
jazzClass_between = np.mean(corrAB[0:8,8:16])
within_genre = np.mean([classical_within,jazz_within])
between_genre = np.mean([classJazz_between,jazzClass_between])
diff = within_genre - between_genre
return diff
comm.Barrier()
begin_time = time.time()
comm.Barrier()
# Create and run searchlight
sl = Searchlight(sl_rad=1,max_blk_edge=5)
sl.distribute([data1,data2],mask_img)
sl.broadcast(None)
global_outputs = sl.run_searchlight(corr2_coeff)
comm.Barrier()
end_time = time.time()
comm.Barrier()
# Plot searchlight results
if rank == 0:
print('Searchlight Done: ', end_time - begin_time)
maxval = np.max(global_outputs[np.not_equal(global_outputs,None)])
minval = np.min(global_outputs[np.not_equal(global_outputs,None)])
global_outputs = np.array(global_outputs, dtype=np.float)
print(global_outputs)
# Save searchlight images
out_dir = "searchlight_images"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
import matplotlib.pyplot as plt
for (cnt, img) in enumerate(global_outputs):
plt.imshow(img,vmin=minval,vmax=maxval)
plt.colorbar()
plt.savefig('searchlight_images/' + 'img' + str(cnt) + '.png')
plt.clf()
| StarcoderdataPython |
65169 | <gh_stars>1000+
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the cmd_helper module."""
import unittest
import subprocess
import sys
import time
from devil import devil_env
from devil.utils import cmd_helper
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
class CmdHelperSingleQuoteTest(unittest.TestCase):
def testSingleQuote_basic(self):
self.assertEquals('hello',
cmd_helper.SingleQuote('hello'))
def testSingleQuote_withSpaces(self):
self.assertEquals("'hello world'",
cmd_helper.SingleQuote('hello world'))
def testSingleQuote_withUnsafeChars(self):
self.assertEquals("""'hello'"'"'; rm -rf /'""",
cmd_helper.SingleQuote("hello'; rm -rf /"))
def testSingleQuote_dontExpand(self):
test_string = 'hello $TEST_VAR'
cmd = 'TEST_VAR=world; echo %s' % cmd_helper.SingleQuote(test_string)
self.assertEquals(test_string,
cmd_helper.GetCmdOutput(cmd, shell=True).rstrip())
class CmdHelperDoubleQuoteTest(unittest.TestCase):
def testDoubleQuote_basic(self):
self.assertEquals('hello',
cmd_helper.DoubleQuote('hello'))
def testDoubleQuote_withSpaces(self):
self.assertEquals('"hello world"',
cmd_helper.DoubleQuote('hello world'))
def testDoubleQuote_withUnsafeChars(self):
self.assertEquals('''"hello\\"; rm -rf /"''',
cmd_helper.DoubleQuote('hello"; rm -rf /'))
def testSingleQuote_doExpand(self):
test_string = 'hello $TEST_VAR'
cmd = 'TEST_VAR=world; echo %s' % cmd_helper.DoubleQuote(test_string)
self.assertEquals('hello world',
cmd_helper.GetCmdOutput(cmd, shell=True).rstrip())
class CmdHelperShinkToSnippetTest(unittest.TestCase):
def testShrinkToSnippet_noArgs(self):
self.assertEquals('foo',
cmd_helper.ShrinkToSnippet(['foo'], 'a', 'bar'))
self.assertEquals("'foo foo'",
cmd_helper.ShrinkToSnippet(['foo foo'], 'a', 'bar'))
self.assertEquals('"$a"\' bar\'',
cmd_helper.ShrinkToSnippet(['foo bar'], 'a', 'foo'))
self.assertEquals('\'foo \'"$a"',
cmd_helper.ShrinkToSnippet(['foo bar'], 'a', 'bar'))
self.assertEquals('foo"$a"',
cmd_helper.ShrinkToSnippet(['foobar'], 'a', 'bar'))
def testShrinkToSnippet_singleArg(self):
self.assertEquals("foo ''",
cmd_helper.ShrinkToSnippet(['foo', ''], 'a', 'bar'))
self.assertEquals("foo foo",
cmd_helper.ShrinkToSnippet(['foo', 'foo'], 'a', 'bar'))
self.assertEquals('"$a" "$a"',
cmd_helper.ShrinkToSnippet(['foo', 'foo'], 'a', 'foo'))
self.assertEquals('foo "$a""$a"',
cmd_helper.ShrinkToSnippet(['foo', 'barbar'], 'a', 'bar'))
self.assertEquals('foo "$a"\' \'"$a"',
cmd_helper.ShrinkToSnippet(['foo', 'bar bar'], 'a', 'bar'))
self.assertEquals('foo "$a""$a"\' \'',
cmd_helper.ShrinkToSnippet(['foo', 'barbar '], 'a', 'bar'))
self.assertEquals('foo \' \'"$a""$a"\' \'',
cmd_helper.ShrinkToSnippet(['foo', ' barbar '], 'a', 'bar'))
_DEFAULT = 'DEFAULT'
class _ProcessOutputEvent(object):
def __init__(self, select_fds=_DEFAULT, read_contents=None, ts=_DEFAULT):
self.select_fds = select_fds
self.read_contents = read_contents
self.ts = ts
class _MockProcess(object):
def __init__(self, output_sequence=None, return_value=0):
# Arbitrary.
fake_stdout_fileno = 25
self.mock_proc = mock.MagicMock(spec=subprocess.Popen)
self.mock_proc.stdout = mock.MagicMock()
self.mock_proc.stdout.fileno = mock.MagicMock(
return_value=fake_stdout_fileno)
self.mock_proc.returncode = None
self._return_value = return_value
# This links the behavior of os.read, select.select, time.time, and
# <process>.poll. The output sequence can be thought of as a list of
# return values for select.select with corresponding return values for
# the other calls at any time between that select call and the following
# one. We iterate through the sequence only on calls to select.select.
#
# os.read is a special case, though, where we only return a given chunk
# of data *once* after a given call to select.
if not output_sequence:
output_sequence = []
# Use an leading element to make the iteration logic work.
initial_seq_element = _ProcessOutputEvent(
_DEFAULT, '',
output_sequence[0].ts if output_sequence else _DEFAULT)
output_sequence.insert(0, initial_seq_element)
for o in output_sequence:
if o.select_fds == _DEFAULT:
if o.read_contents is None:
o.select_fds = []
else:
o.select_fds = [fake_stdout_fileno]
if o.ts == _DEFAULT:
o.ts = time.time()
self._output_sequence = output_sequence
self._output_seq_index = 0
self._read_flags = [False] * len(output_sequence)
def read_side_effect(*_args, **_kwargs):
if self._read_flags[self._output_seq_index]:
return None
self._read_flags[self._output_seq_index] = True
return self._output_sequence[self._output_seq_index].read_contents
def select_side_effect(*_args, **_kwargs):
if self._output_seq_index is None:
self._output_seq_index = 0
else:
self._output_seq_index += 1
if self._output_seq_index < len(self._output_sequence):
return (self._output_sequence[self._output_seq_index].select_fds,
None, None)
else:
return([], None, None)
def time_side_effect(*_args, **_kwargs):
return self._output_sequence[self._output_seq_index].ts
def poll_side_effect(*_args, **_kwargs):
if self._output_seq_index >= len(self._output_sequence) - 1:
self.mock_proc.returncode = self._return_value
return self.mock_proc.returncode
mock_read = mock.MagicMock(side_effect=read_side_effect)
mock_select = mock.MagicMock(side_effect=select_side_effect)
mock_time = mock.MagicMock(side_effect=time_side_effect)
self.mock_proc.poll = mock.MagicMock(side_effect=poll_side_effect)
# Set up but *do not start* the mocks.
self._mocks = [
mock.patch('os.read', new=mock_read),
mock.patch('select.select', new=mock_select),
mock.patch('time.time', new=mock_time),
]
if sys.platform != 'win32':
self._mocks.append(mock.patch('fcntl.fcntl'))
def __enter__(self):
for m in self._mocks:
m.__enter__()
return self.mock_proc
def __exit__(self, exc_type, exc_val, exc_tb):
for m in reversed(self._mocks):
m.__exit__(exc_type, exc_val, exc_tb)
class CmdHelperIterCmdOutputLinesTest(unittest.TestCase):
"""Test IterCmdOutputLines with some calls to the unix 'seq' command."""
# This calls _IterCmdOutputLines rather than IterCmdOutputLines s.t. it
# can mock the process.
# pylint: disable=protected-access
_SIMPLE_OUTPUT_SEQUENCE = [
_ProcessOutputEvent(read_contents='1\n2\n'),
]
def testIterCmdOutputLines_success(self):
with _MockProcess(
output_sequence=self._SIMPLE_OUTPUT_SEQUENCE) as mock_proc:
for num, line in enumerate(
cmd_helper._IterCmdOutputLines(mock_proc, 'mock_proc'), 1):
self.assertEquals(num, int(line))
def testIterCmdOutputLines_exitStatusFail(self):
with self.assertRaises(subprocess.CalledProcessError):
with _MockProcess(output_sequence=self._SIMPLE_OUTPUT_SEQUENCE,
return_value=1) as mock_proc:
for num, line in enumerate(
cmd_helper._IterCmdOutputLines(mock_proc, 'mock_proc'), 1):
self.assertEquals(num, int(line))
# after reading all the output we get an exit status of 1
def testIterCmdOutputLines_exitStatusIgnored(self):
with _MockProcess(output_sequence=self._SIMPLE_OUTPUT_SEQUENCE,
return_value=1) as mock_proc:
for num, line in enumerate(
cmd_helper._IterCmdOutputLines(
mock_proc, 'mock_proc', check_status=False),
1):
self.assertEquals(num, int(line))
def testIterCmdOutputLines_exitStatusSkipped(self):
with _MockProcess(output_sequence=self._SIMPLE_OUTPUT_SEQUENCE,
return_value=1) as mock_proc:
for num, line in enumerate(
cmd_helper._IterCmdOutputLines(mock_proc, 'mock_proc'), 1):
self.assertEquals(num, int(line))
# no exception will be raised because we don't attempt to read past
# the end of the output and, thus, the status never gets checked
if num == 2:
break
def testIterCmdOutputLines_delay(self):
output_sequence = [
_ProcessOutputEvent(read_contents='1\n2\n', ts=1),
_ProcessOutputEvent(read_contents=None, ts=2),
_ProcessOutputEvent(read_contents='Awake', ts=10),
]
with _MockProcess(output_sequence=output_sequence) as mock_proc:
for num, line in enumerate(
cmd_helper._IterCmdOutputLines(mock_proc, 'mock_proc',
iter_timeout=5), 1):
if num <= 2:
self.assertEquals(num, int(line))
elif num == 3:
self.assertEquals(None, line)
elif num == 4:
self.assertEquals('Awake', line)
else:
self.fail()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3348667 | <filename>c7n/resources/dynamodb.py
# Copyright 2016-2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from botocore.exceptions import ClientError
from concurrent.futures import as_completed
from datetime import datetime
from c7n.actions import BaseAction, ModifyVpcSecurityGroupsAction
from c7n.filters.kms import KmsRelatedFilter
from c7n import query
from c7n.manager import resources
from c7n.tags import (
TagDelayedAction, RemoveTag, TagActionFilter, Tag, universal_augment)
from c7n.utils import (
local_session, chunks, type_schema, snapshot_identifier)
from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter
class ConfigTable(query.ConfigSource):
def load_resource(self, item):
resource = super(ConfigTable, self).load_resource(item)
resource['CreationDateTime'] = datetime.fromtimestamp(resource['CreationDateTime'] / 1000.0)
if 'LastUpdateToPayPerRequestDateTime' in resource['BillingModeSummary']:
resource['BillingModeSummary'][
'LastUpdateToPayPerRequestDateTime'] = datetime.fromtimestamp(
resource['BillingModeSummary']['LastUpdateToPayPerRequestDateTime'] / 1000.0)
sse_info = resource.pop('Ssedescription', None)
if sse_info is None:
return resource
resource['SSEDescription'] = sse_info
for k, r in (('KmsmasterKeyArn', 'KMSMasterKeyArn'),
('Ssetype', 'SSEType')):
if k in sse_info:
sse_info[r] = sse_info.pop(k)
return resource
class DescribeTable(query.DescribeSource):
def augment(self, resources):
return universal_augment(
self.manager,
super(DescribeTable, self).augment(resources))
@resources.register('dynamodb-table')
class Table(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'dynamodb'
arn_type = 'table'
enum_spec = ('list_tables', 'TableNames', None)
detail_spec = ("describe_table", "TableName", None, "Table")
id = 'TableName'
name = 'TableName'
date = 'CreationDateTime'
dimension = 'TableName'
config_type = 'AWS::DynamoDB::Table'
universal_taggable = object()
source_mapping = {
'describe': DescribeTable,
'config': ConfigTable
}
class StatusFilter:
"""Filter tables by status"""
valid_states = ()
def filter_table_state(self, tables, states=None):
states = states or self.valid_states
orig_count = len(tables)
result = [t for t in tables if t['TableStatus'] in states]
self.log.info("%s %d of %d tables" % (
self.__class__.__name__, len(result), orig_count))
return result
def filter_backup_state(self, tables, states=None):
states = states or self.valid_states
orig_count = len(tables)
result = [t for t in tables if t['BackupStatus'] in states]
self.log.info("%s %d of %d tables" % (
self.__class__.__name__, len(result), orig_count))
return result
@Table.filter_registry.register('kms-key')
class KmsFilter(KmsRelatedFilter):
"""
Filter a resource by its associcated kms key and optionally the aliasname
of the kms key by using 'c7n:AliasName'
:example:
.. code-block:: yaml
policies:
- name: dynamodb-kms-key-filters
resource: dynamodb-table
filters:
- type: kms-key
key: c7n:AliasName
value: "^(alias/aws/dynamodb)"
op: regex
"""
RelatedIdsExpression = 'SSEDescription.KMSMasterKeyArn'
@Table.action_registry.register('delete')
class DeleteTable(BaseAction, StatusFilter):
"""Action to delete dynamodb tables
:example:
.. code-block:: yaml
policies:
- name: delete-empty-tables
resource: dynamodb-table
filters:
- TableSizeBytes: 0
actions:
- delete
"""
valid_status = ('ACTIVE',)
schema = type_schema('delete')
permissions = ("dynamodb:DeleteTable",)
def delete_table(self, client, table_set):
for t in table_set:
client.delete_table(TableName=t['TableName'])
def process(self, resources):
resources = self.filter_table_state(
resources, self.valid_status)
if not len(resources):
return
futures = []
client = local_session(self.manager.session_factory).client('dynamodb')
with self.executor_factory(max_workers=2) as w:
for table_set in chunks(resources, 20):
futures.append(w.submit(self.delete_table, client, table_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception deleting dynamodb table set \n %s"
% (f.exception()))
@Table.action_registry.register('set-stream')
class SetStream(BaseAction, StatusFilter):
"""Action to enable/disable streams on table.
:example:
.. code-block:: yaml
policies:
- name: stream-update
resource: dynamodb-table
filters:
- TableName: 'test'
- TableStatus: 'ACTIVE'
actions:
- type: set-stream
state: True
stream_view_type: 'NEW_IMAGE'
"""
valid_status = ('ACTIVE',)
schema = type_schema('set-stream',
state={'type': 'boolean'},
stream_view_type={'type': 'string'})
permissions = ("dynamodb:UpdateTable",)
def process(self, tables):
tables = self.filter_table_state(
tables, self.valid_status)
if not len(tables):
self.log.warning("Table not in ACTIVE state.")
return
state = self.data.get('state')
type = self.data.get('stream_view_type')
stream_spec = {"StreamEnabled": state}
if self.data.get('stream_view_type') is not None:
stream_spec.update({"StreamViewType": type})
c = local_session(self.manager.session_factory).client('dynamodb')
with self.executor_factory(max_workers=2) as w:
futures = {w.submit(c.update_table,
TableName=t['TableName'],
StreamSpecification=stream_spec): t for t in tables}
for f in as_completed(futures):
t = futures[f]
if f.exception():
self.log.error(
"Exception updating dynamodb table set \n %s"
% (f.exception()))
continue
if self.data.get('stream_view_type') is not None:
stream_state = \
f.result()['TableDescription']['StreamSpecification']['StreamEnabled']
stream_type = \
f.result()['TableDescription']['StreamSpecification']['StreamViewType']
t['c7n:StreamState'] = stream_state
t['c7n:StreamType'] = stream_type
@Table.action_registry.register('backup')
class CreateBackup(BaseAction, StatusFilter):
"""Creates a manual backup of a DynamoDB table. Use of the optional
prefix flag will attach a user specified prefix. Otherwise,
the backup prefix will default to 'Backup'.
:example:
.. code-block:: yaml
policies:
- name: dynamodb-create-backup
resource: dynamodb-table
actions:
- type: backup
prefix: custom
"""
valid_status = ('ACTIVE',)
schema = type_schema('backup',
prefix={'type': 'string'})
permissions = ('dynamodb:CreateBackup',)
def process(self, resources):
resources = self.filter_table_state(
resources, self.valid_status)
if not len(resources):
return
c = local_session(self.manager.session_factory).client('dynamodb')
futures = {}
prefix = self.data.get('prefix', 'Backup')
with self.executor_factory(max_workers=2) as w:
for t in resources:
futures[w.submit(
c.create_backup,
BackupName=snapshot_identifier(
prefix, t['TableName']),
TableName=t['TableName'])] = t
for f in as_completed(futures):
t = futures[f]
if f.exception():
self.manager.log.warning(
"Could not complete DynamoDB backup table:%s", t)
arn = f.result()['BackupDetails']['BackupArn']
t['c7n:BackupArn'] = arn
@resources.register('dynamodb-backup')
class Backup(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'dynamodb'
arn = 'BackupArn'
enum_spec = ('list_backups', 'BackupSummaries', None)
id = 'BackupArn'
name = 'BackupName'
date = 'BackupCreationDateTime'
@Backup.action_registry.register('delete')
class DeleteBackup(BaseAction, StatusFilter):
"""Deletes backups of a DynamoDB table
:example:
.. code-block:: yaml
policies:
- name: dynamodb-delete-backup
resource: dynamodb-backup
filters:
- type: value
key: BackupCreationDateTime
op: greater-than
value_type: age
value: 28
actions:
- type: delete
"""
valid_status = ('AVAILABLE',)
schema = type_schema('delete')
permissions = ('dynamodb:DeleteBackup',)
def process(self, backups):
backups = self.filter_backup_state(
backups, self.valid_status)
if not len(backups):
return
c = local_session(self.manager.session_factory).client('dynamodb')
for table_set in chunks(backups, 20):
self.process_dynamodb_backups(table_set, c)
def process_dynamodb_backups(self, table_set, c):
for t in table_set:
try:
c.delete_backup(
BackupArn=t['BackupArn'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
self.log.warning("Could not complete DynamoDB backup deletion for table:%s", t)
continue
raise
@resources.register('dynamodb-stream')
class Stream(query.QueryResourceManager):
# Note stream management takes place on the table resource
class resource_type(query.TypeInfo):
service = 'dynamodbstreams'
permission_prefix = 'dynamodb'
# Note max rate of 5 calls per second
enum_spec = ('list_streams', 'Streams', None)
# Note max rate of 10 calls per second.
detail_spec = (
"describe_stream", "StreamArn", "StreamArn", "StreamDescription")
arn = id = 'StreamArn'
arn_type = 'stream'
name = 'TableName'
date = 'CreationDateTime'
dimension = 'TableName'
@resources.register('dax')
class DynamoDbAccelerator(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'dax'
arn_type = 'cluster'
enum_spec = ('describe_clusters', 'Clusters', None)
id = 'ClusterArn'
name = 'ClusterName'
# config_type = 'AWS::DAX::Cluster'
permissions = ('dax:ListTags',)
def get_source(self, source_type):
if source_type == 'describe':
return DescribeDaxCluster(self)
elif source_type == 'config':
return query.ConfigSource(self)
raise ValueError('invalid source %s' % source_type)
def get_resources(self, ids, cache=True, augment=True):
"""Override in order to disable the augment for serverless policies.
list_tags on dax resources always fail until the cluster is finished creating.
"""
return super(DynamoDbAccelerator, self).get_resources(ids, cache, augment=False)
class DescribeDaxCluster(query.DescribeSource):
def get_resources(self, ids, cache=True):
"""Retrieve dax resources for serverless policies or related resources
"""
client = local_session(self.manager.session_factory).client('dax')
return client.describe_clusters(ClusterNames=ids).get('Clusters')
def augment(self, clusters):
resources = super(DescribeDaxCluster, self).augment(clusters)
return list(filter(None, _dax_cluster_tags(
resources,
self.manager.session_factory,
self.manager.retry,
self.manager.log)))
def _dax_cluster_tags(tables, session_factory, retry, log):
client = local_session(session_factory).client('dax')
def process_tags(r):
try:
r['Tags'] = retry(
client.list_tags, ResourceName=r['ClusterArn'])['Tags']
return r
except (client.exceptions.ClusterNotFoundFault,
client.exceptions.InvalidClusterStateFault):
return None
return filter(None, list(map(process_tags, tables)))
DynamoDbAccelerator.filter_registry.register('marked-for-op', TagActionFilter)
@DynamoDbAccelerator.filter_registry.register('security-group')
class DaxSecurityGroupFilter(SecurityGroupFilter):
RelatedIdsExpression = "SecurityGroups[].SecurityGroupIdentifier"
@DynamoDbAccelerator.action_registry.register('tag')
class DaxTagging(Tag):
"""Action to create tag(s) on a resource
:example:
.. code-block:: yaml
policies:
- name: dax-cluster-tag
resource: dax
filters:
- "tag:target-tag": absent
actions:
- type: tag
key: target-tag
value: target-tag-value
"""
permissions = ('dax:TagResource',)
def process_resource_set(self, client, resources, tags):
mid = self.manager.resource_type.id
for r in resources:
try:
client.tag_resource(ResourceName=r[mid], Tags=tags)
except (client.exceptions.ClusterNotFoundFault,
client.exceptions.InvalidARNFault,
client.exceptions.InvalidClusterStateFault) as e:
self.log.warning('Exception tagging %s: \n%s', r['ClusterName'], e)
@DynamoDbAccelerator.action_registry.register('remove-tag')
class DaxRemoveTagging(RemoveTag):
"""Action to remove tag(s) on a resource
:example:
.. code-block:: yaml
policies:
- name: dax-remove-tag
resource: dax
filters:
- "tag:OutdatedTag": present
actions:
- type: remove-tag
tags: ["OutdatedTag"]
"""
permissions = ('dax:UntagResource',)
def process_resource_set(self, client, resources, tag_keys):
for r in resources:
try:
client.untag_resource(
ResourceName=r['ClusterArn'], TagKeys=tag_keys)
except (client.exceptions.ClusterNotFoundFault,
client.exceptions.TagNotFoundFault,
client.exceptions.InvalidClusterStateFault) as e:
self.log.warning('Exception removing tags on %s: \n%s', r['ClusterName'], e)
@DynamoDbAccelerator.action_registry.register('mark-for-op')
class DaxMarkForOp(TagDelayedAction):
"""Action to specify an action to occur at a later date
:example:
.. code-block:: yaml
policies:
- name: dax-mark-tag-compliance
resource: dax
filters:
- "tag:custodian_cleanup": absent
- "tag:OwnerName": absent
actions:
- type: mark-for-op
tag: custodian_cleanup
msg: "Missing tag 'OwnerName': {op}@{action_date}"
op: delete
days: 7
"""
@DynamoDbAccelerator.action_registry.register('delete')
class DaxDeleteCluster(BaseAction):
"""Action to delete a DAX cluster
:example:
.. code-block:: yaml
policies:
- name: dax-delete-cluster
resource: dax
filters:
- "tag:DeleteMe": present
actions:
- type: delete
"""
permissions = ('dax:DeleteCluster',)
schema = type_schema('delete')
def process(self, resources):
client = local_session(self.manager.session_factory).client('dax')
for r in resources:
try:
client.delete_cluster(ClusterName=r['ClusterName'])
except (client.exceptions.ClusterNotFoundFault,
client.exceptions.InvalidARNFault,
client.exceptions.InvalidClusterStateFault) as e:
self.log.warning('Exception marking %s: \n%s', r['ClusterName'], e)
@DynamoDbAccelerator.action_registry.register('update-cluster')
class DaxUpdateCluster(BaseAction):
"""Updates a DAX cluster configuration
:example:
.. code-block:: yaml
policies:
- name: dax-update-cluster
resource: dax
filters:
- ParameterGroup.ParameterGroupName: 'default.dax1.0'
actions:
- type: update-cluster
ParameterGroupName: 'testparamgroup'
"""
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['update-cluster']},
'Description': {'type': 'string'},
'PreferredMaintenanceWindow': {'type': 'string'},
'NotificationTopicArn': {'type': 'string'},
'NotificationTopicStatus': {'type': 'string'},
'ParameterGroupName': {'type': 'string'}
}
}
permissions = ('dax:UpdateCluster',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('dax')
params = dict(self.data)
params.pop('type')
for r in resources:
params['ClusterName'] = r['ClusterName']
try:
client.update_cluster(**params)
except (client.exceptions.ClusterNotFoundFault,
client.exceptions.InvalidClusterStateFault) as e:
self.log.warning(
'Exception updating dax cluster %s: \n%s',
r['ClusterName'], e)
@DynamoDbAccelerator.action_registry.register('modify-security-groups')
class DaxModifySecurityGroup(ModifyVpcSecurityGroupsAction):
permissions = ('dax:UpdateCluster',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('dax')
groups = super(DaxModifySecurityGroup, self).get_groups(resources)
for idx, r in enumerate(resources):
client.update_cluster(
ClusterName=r['ClusterName'], SecurityGroupIds=groups[idx])
@DynamoDbAccelerator.filter_registry.register('subnet')
class DaxSubnetFilter(SubnetFilter):
"""Filters DAX clusters based on their associated subnet group
:example:
.. code-block:: yaml
policies:
- name: dax-no-auto-public
resource: dax
filters:
- type: subnet
key: MapPublicIpOnLaunch
value: False
"""
RelatedIdsExpression = ""
def get_related_ids(self, resources):
group_ids = set()
for r in resources:
group_ids.update(
[s['SubnetIdentifier'] for s in
self.groups[r['SubnetGroup']]['Subnets']])
return group_ids
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('dax')
subnet_groups = client.describe_subnet_groups()['SubnetGroups']
self.groups = {s['SubnetGroupName']: s for s in subnet_groups}
return super(DaxSubnetFilter, self).process(resources)
| StarcoderdataPython |
72802 | <filename>justvpn/util.py
import re
import os
from urllib.parse import urlparse
| StarcoderdataPython |
3234099 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
def customise(process):
# process.dtDataIntegrityUnpacker.inputLabel = cms.untracked.InputTag('rawDataCollector')
# process.DQMOfflineCosmics.remove(process.hcalOfflineDQMSource)
# process.load("FWCore.Modules.printContent_cfi")
# process.myPath1 = cms.Path( process.printContent )
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
#using the DQMROOT means that the reco output will be empty
process.DQMoutput.outputCommands.append('drop *')
process.DQMoutput.outputCommands.append('keep *_MEtoEDMConverter_*_*')
# process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
# ignoreTotal=cms.untracked.int32(1),
# oncePerEventMode=cms.untracked.bool(False)
# )
# Do not activate by default the logging of where each histogram is booked.
process.DQMStore.verbose = cms.untracked.int32(2)
process.load("DQMServices.Components.DQMStoreStats_cfi")
process.stats = cms.Path(process.dqmStoreStats)
process.schedule.insert(-1,process.stats)
#Run only on fat events
from HLTrigger.HLTfilters.hltHighLevel_cfi import hltHighLevel
process.hltFatEventFilters = hltHighLevel.clone()
process.hltFatEventFilters.throw = cms.bool(False)
process.hltFatEventFilters.HLTPaths = cms.vstring('HLT_L1FatEvents_v*')
#Run L1TReemulation
from L1Trigger.Configuration.customiseReEmul import L1TReEmulFromRAW
process = L1TReEmulFromRAW(process)
#Put all together into one path, so that reco does not run on non-fat events
process.p=cms.Path( #process.hltFatEventFilters*
process.RawToDigi*
process.reconstruction*
process.DQMOffline*
process.L1TReEmul
)
process.e=cms.EndPath( process.DQMoutput )
process.schedule=cms.Schedule(process.p,process.e)
return(process)
| StarcoderdataPython |
3331745 | __author__ = "<NAME>"
__version__ = '0.3.0'
from log_calls import log_calls
import doctest
##############################################################################
# doctests
##############################################################################
#=============================================================================
# main__lc_class_deco__all_method_types
#=============================================================================
#-----------------------
# data
#-----------------------
@log_calls(indent=True, args_sep='\n', log_call_numbers=True, log_retval=True)
class C():
clsmember = 17
## Not needed, log_retval defaults to False for __init__, unless explicit `log_retval=True` given:
# @log_calls(log_retval=False)
def __init__(self, x):
self.x = x
def foo(self, y):
self.x = y
return self.clsmeth(y * 2) + 17
@classmethod
@log_calls(args_sep=' / ')
def clsmeth_lc(cls, z):
return cls.clsmeth(z)
@classmethod
def clsmeth(cls, z):
cls.clsmember = z
return z // 2
@staticmethod
@log_calls(log_elapsed=True)
def statmeth_lc(q):
for i in range(50000):
pass
return 2 * q
@staticmethod
def statmeth(q):
return 4 * q
#-----------------------
# doctest
#-----------------------
def main__lc_class_deco__all_method_types():
"""
>>> assert C.clsmeth_lc(15) == 7 # doctest: +NORMALIZE_WHITESPACE
C.clsmeth_lc [1] <== called by <module>
arguments: cls=<class '__main__.C'> / z=15
C.clsmeth [1] <== called by C.clsmeth_lc [1]
arguments:
cls=<class '__main__.C'>
z=15
C.clsmeth [1] return value: 7
C.clsmeth [1] ==> returning to C.clsmeth_lc [1]
C.clsmeth_lc [1] return value: 7
C.clsmeth_lc [1] ==> returning to <module>
>>> C.clsmember == 15
True
>>> assert C.statmeth_lc(100) == 200 # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
C.statmeth_lc [1] <== called by <module>
arguments:
q=100
C.statmeth_lc [1] return value: 200
elapsed time: ... [secs], process time: ... [secs]
C.statmeth_lc [1] ==> returning to <module>
>>> c = C(1000) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
C.__init__ [1] <== called by <module>
arguments:
self=<__main__.C object at 0x...>
x=1000
C.__init__ [1] ==> returning to <module>
>>> c.x == 1000
True
>>> assert c.foo(-10) == 7 # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
C.foo [1] <== called by <module>
arguments:
self=<__main__.C object at 0x...>
y=-10
C.clsmeth [2] <== called by C.foo [1]
arguments:
cls=<class '__main__.C'>
z=-20
C.clsmeth [2] return value: -10
C.clsmeth [2] ==> returning to C.foo [1]
C.foo [1] return value: 7
C.foo [1] ==> returning to <module>
>>> c.x == -10
True
>>> _ = c.statmeth(125) # doctest: +NORMALIZE_WHITESPACE
C.statmeth [1] <== called by <module>
arguments:
q=125
C.statmeth [1] return value: 500
C.statmeth [1] ==> returning to <module>
"""
pass
# SURGERY:
main__lc_class_deco__all_method_types.__doc__ = \
main__lc_class_deco__all_method_types.__doc__.replace("__main__", __name__)
#=============================================================================
# main__lc_class_deco__inner_classes
#=============================================================================
#-----------------------
# data
#-----------------------
@log_calls(args_sep='\n', log_call_numbers=True, log_retval=True)
class D():
def __init__(self):
pass
@staticmethod
def makeDI_1(x):
return D.DI_1(x)
@log_calls(args_sep='; ', log_retval=False)
class DI_1():
def __init__(self, x, y=91):
self._init_aux(x, y)
self.x = x
self.y = y
def _init_aux(self, x, y):
pass
@log_calls(log_call_numbers=False, log_retval=True)
def f(self):
return self.x * self.x + self.y
class DI_2():
def __init__(self):
pass
def g(self):
pass
@log_calls(log_call_numbers=False, log_retval=False)
def h(self):
pass
#-----------------------
# doctest
#-----------------------
def main__lc_class_deco__inner_classes():
"""
>>> di1 = D().makeDI_1(17) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
D.__init__ [1] <== called by <module>
arguments:
self=<__main__.D object at 0x...>
D.__init__ [1] ==> returning to <module>
D.makeDI_1 [1] <== called by <module>
arguments:
x=17
D.DI_1.__init__ [1] <== called by D.makeDI_1 [1]
arguments: self=<__main__.D.DI_1 object at 0x...>; x=17
defaults: y=91
D.DI_1._init_aux [1] <== called by D.DI_1.__init__ [1]
arguments: self=<__main__.D.DI_1 object at 0x...>; x=17; y=91
D.DI_1._init_aux [1] ==> returning to D.DI_1.__init__ [1]
D.DI_1.__init__ [1] ==> returning to D.makeDI_1 [1]
D.makeDI_1 [1] return value: <__main__.D.DI_1 object at 0x...>
D.makeDI_1 [1] ==> returning to <module>
>>> _ = di1.f() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
D.DI_1.f <== called by <module>
arguments: self=<__main__.D.DI_1 object at 0x...>
D.DI_1.f return value: 380
D.DI_1.f ==> returning to <module>
>>> di2 = D.DI_2() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
D.DI_2.__init__ [1] <== called by <module>
arguments:
self=<__main__.D.DI_2 object at 0x...>
D.DI_2.__init__ [1] ==> returning to <module>
>>> di2.g() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
D.DI_2.g [1] <== called by <module>
arguments:
self=<__main__.D.DI_2 object at 0x...>
D.DI_2.g [1] return value: None
D.DI_2.g [1] ==> returning to <module>
>>> di2.h() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
D.DI_2.h <== called by <module>
arguments:
self=<__main__.D.DI_2 object at 0x...>
D.DI_2.h ==> returning to <module>
"""
pass
# SURGERY:
main__lc_class_deco__inner_classes.__doc__ = \
main__lc_class_deco__inner_classes.__doc__.replace("__main__", __name__)
#=============================================================================
# main__lc_class_deco__immutable_setting
#=============================================================================
@log_calls(max_history=10)
class A():
def f(self, x): pass
@log_calls(max_history=17)
def g(self, x): pass
@log_calls(enabled=False)
def h(self): pass
def main__lc_class_deco__immutable_setting():
"""
>>> a = A()
>>> a.f.log_calls_settings.max_history
10
>>> a.g.log_calls_settings.max_history
17
>>> a.h.log_calls_settings.max_history
10
"""
#=============================================================================
# main__lc_class_deco__omit_only__basic
#=============================================================================
MINIMAL = dict(
log_args=False, log_exit=False
)
def main__lc_class_deco__omit_only__basic():
"""
>>> @log_calls(omit='f g', settings=MINIMAL)
... class E():
... def f(self): pass
... def g(self): pass
... def h(self): pass
>>> e = E(); e.f(); e.g(); e.h()
E.h <== called by <module>
>>> @log_calls(only='f, h', settings=MINIMAL)
... class F():
... def f(self): pass
... def g(self): pass
... def h(self): pass
>>> eff = F(); eff.f(); eff.g(); eff.h()
F.f <== called by <module>
F.h <== called by <module>
>>> @log_calls(only=['f', 'g'], omit=('g',), settings=MINIMAL)
... class G():
... def f(self): pass
... def g(self): pass
... def h(self): pass
>>> gee = G(); gee.f(); gee.g(); gee.h()
G.f <== called by <module>
"""
pass
#=============================================================================
# main__lc_class_deco__globs
#=============================================================================
def main__lc_class_deco__globs():
"""
Wildcard '?':
>>> @log_calls(only='f_ab?', settings=MINIMAL)
... class X():
... def f_ab(self): pass
... def f_abc(self): pass
... def f_abd(self): pass
>>> x = X(); x.f_ab(); x.f_abc(); x.f_abd()
X.f_abc <== called by <module>
X.f_abd <== called by <module>
Character sets and ranges
Match characters in set:
>>> @log_calls(only='g_ab[cd]*', settings=MINIMAL)
... class Y():
... def g_ab7_and_more(self): pass
... def g_abc_or_something(self): pass
... def g_abd_perhaps(self): pass
>>> y = Y(); y.g_ab7_and_more(); y.g_abc_or_something(); y.g_abd_perhaps()
Y.g_abc_or_something <== called by <module>
Y.g_abd_perhaps <== called by <module>
Match characters in range:
>>> @log_calls(only='g_ab[a-z]*', settings=MINIMAL)
... class Y():
... def g_ab7_and_more(self): pass
... def g_abc_or_something(self): pass
... def g_abd_perhaps(self): pass
>>> y = Y(); y.g_ab7_and_more(); y.g_abc_or_something(); y.g_abd_perhaps()
Y.g_abc_or_something <== called by <module>
Y.g_abd_perhaps <== called by <module>
Match characters not in range
>>> @log_calls(only='g_ab[!a-z]*', settings=MINIMAL)
... class Y():
... def g_ab7_and_more(self): pass
... def g_abc_or_something(self): pass
... def g_abd_perhaps(self): pass
>>> y = Y(); y.g_ab7_and_more(); y.g_abc_or_something(); y.g_abd_perhaps()
Y.g_ab7_and_more <== called by <module>
"""
pass
#=============================================================================
# main__lc_class_deco__omit_only__inner_classes
#=============================================================================
def main__lc_class_deco__omit_only__inner_classes():
"""
Qualified (class-prefixed) names; names that match more than one method
>>> @log_calls(only=('H.HI.f', 'g'), settings=MINIMAL)
... class H():
... def f(self): pass
... def g(self): pass
... def h(self): pass
... class HI():
... def f(self): pass
... def g(self): pass
... def h(self): pass
>>> aich = H(); aich.f(); aich.g(); aich.h()
H.g <== called by <module>
>>> hi = H.HI(); hi.f(); hi.g(); hi.h()
H.HI.f <== called by <module>
H.HI.g <== called by <module>
Wildcard '*'
Omitting all/any inner classes with '*.*.*':
# NOTE: qualname will *always* match '*.*' so can't use that to filter for inner classes
>>> @log_calls(omit='*.*.*', settings=MINIMAL)
... class O():
... def f(self): pass
... class I1():
... def g1(self): pass
... class I2():
... def g2(self): pass
>>> O().f(); O.I1().g1(); O.I2().g2()
O.f <== called by <module>
Only '*_handler' methods:
>>> @log_calls(only='*_handler', settings=MINIMAL)
... class O():
... def f(self): pass
... def my_handler(self): pass
... def their_handler(self): pass
... class I1():
... def g1(self): pass
... def some_handler(self): pass
... class I2():
... def another_handler(self): pass
... def g2(self): pass
>>> oh = O(); oh.f(); oh.my_handler(); oh.their_handler()
O.my_handler <== called by <module>
O.their_handler <== called by <module>
>>> ohi1 = O.I1(); ohi1.g1(); ohi1.some_handler()
O.I1.some_handler <== called by <module>
>>> ohi2 = O.I2(); ohi2.another_handler(); ohi2.g2()
O.I2.another_handler <== called by <module>
When provided and nonempty, inner `only` overrides outer `only`
In I1, only g1 is decorated, despite the outer class's `only` specifier:
>>> @log_calls(only='*_handler', settings=MINIMAL)
... class O():
... def f(self): pass
... def my_handler(self): pass
... def their_handler(self): pass
... @log_calls(only='g1')
... class I1():
... def g1(self): pass
... def some_handler(self): pass
>>> ohi1 = O.I1(); ohi1.g1(); ohi1.some_handler()
O.I1.g1 <== called by <module>
If inner class has no `only` [or if it's an empty string or empty tuple or empty list],
`only` from the outer class applies:
>>> @log_calls(only='*_handler', settings=MINIMAL)
... class O():
... def f(self): pass
... def my_handler(self): pass
... def their_handler(self): pass
... @log_calls(log_exit=True)
... class I1():
... def g1(self): pass
... def some_handler(self): pass
>>> ohi1 = O.I1(); ohi1.g1(); ohi1.some_handler()
O.I1.some_handler <== called by <module>
O.I1.some_handler ==> returning to <module>
Inner `omit` is added to outer `omit`
>>> @log_calls(omit='*_handler', settings=MINIMAL)
... class O():
... def f(self): pass
... def my_handler(self): pass
... def their_handler(self): pass
... @log_calls(omit='*_function')
... class I1():
... def g1(self): pass
... def some_handler(self): pass
... def some_function(self): pass
>>> ohi1 = O.I1(); ohi1.g1(); ohi1.some_handler(); ohi1.some_function()
O.I1.g1 <== called by <module>
"""
pass
#=============================================================================
# main__lc_class_deco__undecorate_methods
#=============================================================================
def main__lc_class_deco__undecorate_methods():
"""
Topmost-class level:
>>> @log_calls(omit='f', settings=MINIMAL)
... class O():
... @log_calls()
... def f(self): pass
>>> O().f() # (no output)
>>> @log_calls(only='g', settings=MINIMAL)
... class O():
... @log_calls()
... def f(self): pass
... def g(self): pass
>>> O().f(); O().g()
O.g <== called by <module>
Inner class:
>>> @log_calls(omit='f', settings=MINIMAL)
... class O():
... @log_calls(omit='g')
... class I():
... def f(self): pass
... def g(self): pass
>>> O.I().f(); O.I().g() # (no output)
>>> @log_calls(only='f', settings=MINIMAL)
... class O():
... @log_calls(only='g')
... class I():
... def f(self): pass
... def g(self): pass
>>> O.I().f(); O.I().g()
O.I.g <== called by <module>
"""
pass
#=============================================================================
# main__lc_class_deco__undecorate_properties
#=============================================================================
def main__lc_class_deco__undecorate_entire_property():
"""
Property specified via decorator:
Top-level:
- only
>>> @log_calls(only='f', settings=MINIMAL)
... class A():
... def f(self): pass
... @property
... def prop(self): pass
... @prop.setter
... def prop(self, val): pass
>>> A().f(); A().prop; A().prop = 17
A.f <== called by <module>
- omit
>>> @log_calls(omit='prop')
... class A():
... @property
... def prop(self): pass
... @prop.setter
... def prop(self, val): pass
>>> A().prop; A().prop = 17 # (no output)
Inner class:
- only
>>> @log_calls(only='f', settings=MINIMAL)
... class A():
... @log_calls()
... class I():
... def f(self): pass
... @property
... def prop(self): pass
... @prop.setter
... def prop(self, val): pass
>>> A.I().f(); A.I().prop; A.I().prop = 17
A.I.f <== called by <module>
- omit
>>> @log_calls(omit='prop', settings=MINIMAL)
... class A():
... @log_calls(omit='f')
... class I():
... def f(self): pass
... @property
... def prop(self): pass
... @prop.setter
... def prop(self, val): pass
>>> A.I().f(); A.I().prop; A.I().prop = 17 # (no output)
Property specified via property():
Top-level:
(FIRST, here's what happens without `only` or `omit`):
>>> @log_calls(settings=MINIMAL)
... class A():
... def f(self): pass
... @log_calls()
... def prop_get(self): pass
... @log_calls()
... def prop_set(self, val): pass
... @log_calls()
... def prop_del(self): pass
... prop = property(prop_get, prop_set, prop_del)
>>> A().f(); A().prop; A().prop = 17; del A().prop
A.f <== called by <module>
A.prop_get <== called by <module>
A.prop_set <== called by <module>
A.prop_del <== called by <module>
- only
>>> @log_calls(only='f', settings=MINIMAL)
... class A():
... def f(self): pass
... @log_calls()
... def prop_get(self): pass
... @log_calls()
... def prop_set(self, val): pass
... prop = property(prop_get, prop_set)
>>> A().f(); A().prop; A().prop = 17
A.f <== called by <module>
- omit
>>> @log_calls(omit='prop', settings=MINIMAL)
... class A():
... def f(self): pass
... @log_calls()
... def prop_get(self): pass
... @log_calls()
... def prop_set(self, val): pass
... prop = property(prop_get, prop_set)
>>> A().f(); A().prop; A().prop = 17
A.f <== called by <module>
Inner class:
- only
>>> @log_calls(only='f', settings=MINIMAL)
... class A():
... @log_calls()
... class I():
... def f(self): pass
... def prop_get(self): pass
... def prop_set(self, val): pass
... prop = property(prop_get, prop_set)
>>> A.I().f(); A.I().prop; A.I().prop = 17
A.I.f <== called by <module>
- omit
>>> @log_calls(omit='prop', settings=MINIMAL)
... class A():
... @log_calls()
... class I():
... def f(self): pass
... def prop_get(self): pass
... def prop_set(self, val): pass
... prop = property(prop_get, prop_set)
>>> A.I().f(); A.I().prop; A.I().prop = 17
A.I.f <== called by <module>
"""
pass
#=============================================================================
# main__lc_class_deco__undecorate_property_attrs
#=============================================================================
def main__lc_class_deco__undecorate_property_attrs():
"""
Property specified via decorator:
Top-level:
- only
>>> @log_calls(only='prop.getter', settings=MINIMAL)
... class A():
... @property
... def prop(self): pass
... @prop.setter
... def prop(self, val): pass
>>> A().prop; A().prop = 17
A.prop <== called by <module>
- omit
>>> @log_calls(omit='prop.setter', settings=MINIMAL)
... class A():
... def f(self): pass
... @property
... @log_calls(name='A.%s.getter')
... def prop(self): pass
... @prop.setter
... @log_calls() # outer `omit` overrides this
... def prop(self, val): pass
>>> A().f(); A().prop; A().prop = 17
A.f <== called by <module>
A.prop.getter <== called by <module>
Inner class:
- only
>>> @log_calls(only='prop.deleter', settings=MINIMAL)
... class A():
... @log_calls()
... class I():
... def f(self): pass
... @property
... def prop(self): pass
... @prop.setter
... def prop(self, val): pass
... @prop.deleter
... @log_calls(name='A.I.%s.deleter')
... def prop(self): pass
>>> A.I().f(); A.I().prop; A.I().prop = 17; del A.I().prop
A.I.prop.deleter <== called by <module>
- omit
>>> @log_calls(omit='prop.setter prop.deleter', settings=MINIMAL)
... class A():
... @log_calls(omit='f')
... class I():
... def f(self): pass
... @property
... def prop(self): pass
... @prop.setter
... def prop(self, val): pass
... @prop.deleter
... def prop(self): pass
>>> A.I().f(); A.I().prop; A.I().prop = 17; del A.I().prop
A.I.prop <== called by <module>
>>> A.log_calls_omit
('prop.setter', 'prop.deleter')
>>> A.I.log_calls_omit
('prop.setter', 'prop.deleter', 'f')
Property specified via property():
Top-level:
- only [OBSERVE, uses both ways of referring to the property attrs]
>>> @log_calls(only='prop_get prop.deleter', settings=MINIMAL)
... class A():
... def prop_get(self): pass
... # @log_calls() would have no effect
... def prop_set(self, val): pass
... def prop_del(self): pass
... prop = property(prop_get, prop_set, prop_del)
>>> A().prop; A().prop = 17; del A().prop
A.prop_get <== called by <module>
A.prop_del <== called by <module>
>>> A.log_calls_only
('prop_get', 'prop.deleter', 'prop_del')
- omit
Referring to 'prop_get' rather than 'prop.getter' works too
>>> @log_calls(omit='prop_get', settings=MINIMAL)
... class A():
... def f(self): pass
... def prop_get(self): pass
... def prop_del(self): pass
... prop = property(prop_get, None, prop_del)
>>> A().f(); A().prop; del A().prop
A.f <== called by <module>
A.prop_del <== called by <module>
Inner class:
- only
>>> @log_calls(only='prop.getter', settings=MINIMAL)
... class A():
... @log_calls()
... class I():
... def f(self): pass
... def prop_get(self): pass
... def prop_set(self, val): pass
... prop = property(prop_get, prop_set)
>>> A.I().f(); A.I().prop; A.I().prop = 17
A.I.prop_get <== called by <module>
>>> A.I.log_calls_only
('prop.getter', 'prop_get')
- omit
>>> @log_calls(omit='prop_get', settings=MINIMAL)
... class A():
... @log_calls()
... class I():
... def f(self): pass
... def prop_get(self): pass
... def prop_set(self, val): pass
... prop = property(prop_get, prop_set)
>>> A.I().f(); A.I().prop; A.I().prop = 17
A.I.f <== called by <module>
A.I.prop_set <== called by <module>
>>> A.I.log_calls_omit
('prop_get',)
"""
pass
#=============================================================================
# main__lc_class_deco__omitonly_with_property_ctor__use_qualified_names
#=============================================================================
def main__lc_class_deco__omitonly_with_property_ctor__use_qualified_names():
"""
We perform fixups on the 'omit' and 'only' lists so that you can use
propertyname.getter, propertyname.setter, propertyname.deleter
to refer to methods supplied to the 'property' constructor, which are also
in the class dictionary.
Empirically: In Python 3.4.2 & probably other versions, 'xx' is enumerated
before 'setxx' in class XX.
Without the fixup, these tests would (or, could) fail.
- omit
>>> @log_calls(omit='xx.setter')
... class XX():
... def __init__(self): pass
... def method(self): pass
... @staticmethod
... def statmethod(): pass
... @classmethod
... def clsmethod(cls): pass
... def setxx(self, val): pass
... def delxx(self): pass
... xx = property(None, setxx, delxx)
The method is NOT decorated:
>>> XX.get_log_calls_wrapper('xx.setter') is None
True
>>> XX.get_log_calls_wrapper('setxx') is None
True
>>> XX.log_calls_omit
('xx.setter', 'setxx')
- only
>>> @log_calls(only='y.setter')
... class Y():
... def __init__(self): pass
... def method(self): pass
... @staticmethod
... def statmethod(): pass
... @classmethod
... def clsmethod(cls): pass
... def sety(self, val): pass
... def dely(self): pass
... y = property(None, sety, dely)
Wrappers found for 'sety' and 'y.setter' are identical:
>>> Y.get_log_calls_wrapper('sety') is Y.get_log_calls_wrapper('y.setter')
True
and the method IS decorated:
>>> bool( Y.get_log_calls_wrapper('sety') )
True
>>> Y.log_calls_only
('y.setter', 'sety')
"""
pass
#=============================================================================
# main__lc_class_deco__omitonly_with_property_ctor__property_name_only
#=============================================================================
def main__lc_class_deco__omitonly_with_property_ctor__property_name_only():
"""
Same class we test omit='xx.setter' with,
where `xx` is a property created using `property` constructor,
and `xx` is enumerated by `cls.__dict__` before `setxx` in Py3.4.2.
This failed prior to handling entire properties
in `_deco_base._add_property_method_names`
>>> @log_calls(omit='xx')
... class XX():
... def __init__(self): pass
... def method(self): pass
... @staticmethod
... def statmethod(): pass
... @classmethod
... def clsmethod(cls): pass
... def setxx(self, val): pass
... def delxx(self): pass
... xx = property(None, setxx, delxx)
The method is NOT decorated:
>>> XX.get_log_calls_wrapper('xx.setter') is None
True
>>> XX.get_log_calls_wrapper('setxx') is None
True
>>> XX.log_calls_omit
('xx', 'setxx', 'delxx')
- only
>>> @log_calls(only='xxx')
... class XXX():
... def __init__(self): pass
... def method(self): pass
... @staticmethod
... def statmethod(): pass
... @classmethod
... def clsmethod(cls): pass
... def setxxx(self, val): pass
... def delxxx(self): pass
... xxx = property(None, setxxx, delxxx)
Wrappers found for 'setxxx' and 'xxx.setter' are identical:
>>> XXX.get_log_calls_wrapper('setxxx') is XXX.get_log_calls_wrapper('xxx.setter')
True
and the method IS decorated:
>>> bool( XXX.get_log_calls_wrapper('setxxx') )
True
>>> XXX.log_calls_only
('xxx', 'setxxx', 'delxxx')
"""
pass
#-----------------------------------------------------------------------------
# main__lc_class_deco__omitonly_locals_in_qualname
#-----------------------------------------------------------------------------
def main__lc_class_deco__omitonly_locals_in_qualname():
"""
Only A.create_objs.<locals>.I.method and A.create_objs.<locals>.I.delx will be decorated.
We have to test using `.<locals>` in `omit`/`only`. This is an artificial example,
but they all would be -- `.<locals>` only qualifies functions, classes and variables local
to a function, and `log_calls` does NOT recurse into function bodies (the locals of a function)
as it does into class members, so anything decorated inside a function will have to be
decorated explicitly, and then the `... .<locals>.` isn't needed to disambiguate anything.
Nevertheless, :
>>> @log_calls(omit='create_obj')
... class A():
... def create_obj(self):
... @log_calls(omit='A.create_obj.<locals>.I.?etx prop', settings=MINIMAL)
... class I():
... def method(self): pass
... def getx(self): pass
... def setx(self, v): pass
... def delx(self): pass
... x = property(getx, setx, delx)
... @property
... def prop(self): pass
... @prop.setter
... def prop(self, v): pass
...
... return I()
>>> aye = A().create_obj()
>>> aye.method(); aye.x; aye.x = 42; del aye.x; aye.prop; aye.prop = 101
A.create_obj.<locals>.I.method <== called by <module>
A.create_obj.<locals>.I.delx <== called by <module>
"""
pass
#-----------------------------------------------------------------------------
# main__test__get_log_calls_wrapper__from_outside
#-----------------------------------------------------------------------------
def main__test__get_log_calls_wrapper__from_outside():
"""
>>> @log_calls(omit='*_nodeco delx')
... class A():
... def __init__(self): pass
... def method(self): pass
...
... def method_nodeco(self): pass
...
... @staticmethod
... def statmethod(): pass
... @classmethod
... def clsmethod(cls): pass
...
... @staticmethod
... def statmethod_nodeco(): pass
... @classmethod
... def clsmethod_nodeco(cls): pass
...
... @property
... def prop(self): pass
... @prop.setter
... def prop(self, val): pass
...
... def setx(self, val): pass
... def delx(self): pass
...
... x = property(None, setx, delx)
>>> a = A() # doctest: +ELLIPSIS
A.__init__ <== called by <module>
arguments: self=<__main__.A object at 0x...>
A.__init__ ==> returning to <module>
First, the method names that work
---------------------------------
>>> decorated_A = (
... '__init__',
... 'method',
... 'statmethod',
... 'clsmethod',
... 'prop',
... 'prop.getter',
... 'prop.setter',
... 'x.setter',
... 'setx',
... )
>>> all(a.get_log_calls_wrapper(name) for name in decorated_A)
True
>>> not_decorated_A = (
... 'method_nodeco',
... 'statmethod_nodeco',
... 'clsmethod_nodeco',
... 'x.deleter',
... 'delx',
... )
>>> all((a.get_log_calls_wrapper(name) is None) for name in not_decorated_A)
True
>>> a.get_log_calls_wrapper('x.setter') == a.get_log_calls_wrapper('setx')
True
>>> a.get_log_calls_wrapper('x.deleter') == a.get_log_calls_wrapper('delx')
True
Stuff that fails - deco'd class
-------------------------------
>>> bad_names = (
... 'no_such_method',
... 'foo.bar.baz',
... 'foo.',
... 'prop.',
... 'prop.foo',
... 'prop.deleter',
... 'x.getter',
... 'x', # equiv to x.getter
... 'method.getter',
... '.uvwxyz',
... 'not an identifier',
... '88 < x**2',
...
... '__doc__',
... 17,
... False,
... tuple(),
... ['83'],
... None
... )
>>> for name in bad_names:
... try:
... wrapper = a.get_log_calls_wrapper(name)
... except ValueError as e:
... print("%s: %s" % (type(e).__name__, e))
... except TypeError as e:
... print("%s: %s" % (type(e).__name__, e))
ValueError: class 'A' has no such attribute as 'no_such_method'
ValueError: no such method specifier 'foo.bar.baz'
ValueError: bad method specifier 'foo.'
ValueError: bad method specifier 'prop.'
ValueError: prop.foo -- unknown qualifier 'foo'
ValueError: property 'prop' has no 'deleter' in class 'A'
ValueError: property 'x' has no 'getter' in class 'A'
ValueError: property 'x' has no 'getter' in class 'A'
ValueError: method.getter -- 'method' is not a property of class 'A'
ValueError: bad method specifier '.uvwxyz'
ValueError: class 'A' has no such attribute as 'not an identifier'
ValueError: class 'A' has no such attribute as '88 < x**2'
TypeError: item '__doc__' of class 'A' is of type 'NoneType' and can't be decorated
TypeError: expecting str for argument 'fname', got 17 of type int
TypeError: expecting str for argument 'fname', got False of type bool
TypeError: expecting str for argument 'fname', got () of type tuple
TypeError: expecting str for argument 'fname', got ['83'] of type list
TypeError: expecting str for argument 'fname', got None of type NoneType
Now, stuff that fails - non-deco'd class
----------------------------------------
>>> class NoDeco():
... pass
>>> nd = NoDeco()
>>> # 'NoDeco' object has no attribute 'get_log_calls_wrapper'
>>> print(nd.get_log_calls_wrapper) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: ...
>>> # 'NoDeco' object has no attribute 'get_log_calls_wrapper'
>>> print(nd.get_log_calls_wrapper('__init__')) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: ...
"""
pass
# SURGERY:
main__test__get_log_calls_wrapper__from_outside.__doc__ = \
main__test__get_log_calls_wrapper__from_outside.__doc__.replace("__main__", __name__)
#-----------------------------------------------------------------------------
# main__test__get_log_calls_wrapper__from_inside
# test methods accessing their OWN wrappers, the hard way
# Note: for tests of cls.get_own_log_calls_wrapper(), see:
# log_calls/tests/test_get_own_log_calls_wrapper.py
#-----------------------------------------------------------------------------
def main__test__get_log_calls_wrapper__from_inside():
"""
>>> @log_calls(omit='no_deco', mute=True)
... class B():
... def __init__(self):
... wrapper = self.get_log_calls_wrapper('__init__')
... wrapper.log_message('Hi')
... def method(self):
... wrapper = self.get_log_calls_wrapper('method')
... wrapper.log_message('Hi')
... def no_deco(self):
... wrapper = self.get_log_calls_wrapper('no_deco')
... wrapper.log_message('Hi')
... @staticmethod
... def statmethod():
... wrapper = B.get_log_calls_wrapper('statmethod')
... wrapper.log_message('Hi')
...
... @classmethod
... def clsmethod(cls):
... wrapper = B.get_log_calls_wrapper('clsmethod')
... wrapper.log_message('Hi')
...
... @property
... def prop(self):
... wrapper = self.get_log_calls_wrapper('prop.getter')
... wrapper.log_message('Hi')
... @prop.setter
... def prop(self, val):
... wrapper = self.get_log_calls_wrapper('prop.setter')
... wrapper.log_message('Hi from prop.setter')
...
... def setx(self, val):
... wrapper = self.get_log_calls_wrapper('setx')
... wrapper.log_message('Hi from setx alias x.setter')
... def delx(self):
... wrapper = self.get_log_calls_wrapper('x.deleter')
... wrapper.log_message('Hi from delx alias x.deleter')
...
... x = property(None, setx, delx)
>>> b = B()
B.__init__: Hi
>>> b.method()
B.method: Hi
>>> b.statmethod()
B.statmethod: Hi
>>> b.clsmethod()
B.clsmethod: Hi
>>> b.prop
B.prop: Hi
>>> b.prop = 17
B.prop: Hi from prop.setter
>>> b.x = 13
B.setx: Hi from setx alias x.setter
>>> del b.x
B.delx: Hi from delx alias x.deleter
`no_deco` is not decorated, so `get_log_calls_wrapper` returns None,
but the method tries to access its `log_message` attribute --
hence this error message:
>>> b.no_deco() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: 'NoneType' object has no attribute 'log_message'
"""
pass
#-----------------------------------------------------------------------------
# main__test___repr__log_calls_as_functional_applied_to_lambda
#-----------------------------------------------------------------------------
def main__test___repr__log_calls_as_functional_applied_to_lambda():
"""
>>> import math
>>> @log_calls(indent=True)
... class Point():
... def __init__(self, x, y):
... self.x = x
... self.y = y
...
... @property
... def pair(self):
... return (self.x, self.y)
...
... @pair.setter
... @log_calls(prefix='set:')
... def pair(self, pr):
... self.x, self.y = pr
...
... @staticmethod
... def distance(pt1, pt2):
... return math.sqrt((pt1.x - pt2.x)**2 + (pt1.y - pt2.y)**2)
...
... # `log_calls` as functional applied to lambda
... length_ = log_calls(log_retval=True)(
... lambda self: self.distance(Point(0, 0), self)
... )
...
... def __repr__(self):
... return "Point" + str((self.x, self.y))
>>> p = Point(1, 2) # doctest: +ELLIPSIS
Point.__init__ <== called by <module>
arguments: self=<__main__.Point object at 0x...>, x=1, y=2
Point.__init__ ==> returning to <module>
>>> print("p.length_() =", p.length_()) # doctest: +ELLIPSIS
Point.<lambda> <== called by <module>
arguments: self=Point(1, 2)
Point.__init__ <== called by Point.<lambda>
arguments: self=<__main__.Point object at 0x...>, x=0, y=0
Point.__init__ ==> returning to Point.<lambda>
Point.distance <== called by Point.<lambda>
arguments: pt1=Point(0, 0), pt2=Point(1, 2)
Point.distance ==> returning to Point.<lambda>
Point.<lambda> return value: 2.236...
Point.<lambda> ==> returning to <module>
p.length_() = 2.236...
`log_calls` does not decorate `__repr__` (anyway, not with itself!
It deco's it with reprlib.recursive_repr):
>>> hasattr(p.__repr__, 'log_calls_settings')
False
"""
pass
# SURGERY:
main__test___repr__log_calls_as_functional_applied_to_lambda.__doc__ = \
main__test___repr__log_calls_as_functional_applied_to_lambda.__doc__.replace("__main__", __name__)
#-----------------------------------------------------------------------------
# main__test__decorate_hierarchy
#-----------------------------------------------------------------------------
def main__test__decorate_class__hierarchy():
"""
>>> class Base():
... def __init__(self, x):
... self.x = x
...
... def template_method(self):
... print("**** callout returns", self.callout())
...
... def callout(self):
... pass
>>> @log_calls(omit='callout') # IGNORED; similarly, `only` ignored
... class A(Base):
... def callout(self): self.helper_A(); return 2 * self.x
... def helper_A(self): pass
>>> @log_calls(log_retval=False) # overrides setting passed to `decorate_hierarchy`
... class B(Base):
... def callout(self): self.helper_B(); return 5 * self.x
... def helper_B(self): pass
>>> log_calls.decorate_class(Base, decorate_subclasses=True,
... only="template_method callout", indent=True, log_retval=True)
>>> a = A(5)
>>> a.template_method() # doctest: +ELLIPSIS
Base.template_method <== called by <module>
arguments: self=<__main__.A object at 0x...>
A.callout <== called by Base.template_method
arguments: self=<__main__.A object at 0x...>
A.callout return value: 10
A.callout ==> returning to Base.template_method
**** callout returns 10
Base.template_method return value: None
Base.template_method ==> returning to <module>
>>> b = B(100)
>>> b.template_method() # doctest: +ELLIPSIS
Base.template_method <== called by <module>
arguments: self=<__main__.B object at 0x...>
B.callout <== called by Base.template_method
arguments: self=<__main__.B object at 0x...>
B.callout ==> returning to Base.template_method
**** callout returns 500
Base.template_method return value: None
Base.template_method ==> returning to <module>
"""
pass
# SURGERY:
main__test__decorate_class__hierarchy.__doc__ = \
main__test__decorate_class__hierarchy.__doc__.replace("__main__", __name__)
##############################################################################
# end of tests.
##############################################################################
#-----------------------------------------------------------------------------
# For unittest integration
#-----------------------------------------------------------------------------
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
#-----------------------------------------------------------------------------
if __name__ == "__main__":
doctest.testmod() # (verbose=True)
# unittest.main()
| StarcoderdataPython |
1781981 | <filename>examples/eager_examples/scripts/example_switch_engine.py
#!/usr/bin/env python3
# ROS packages required
import rospy
from eager_core.eager_env import BaseEagerEnv
from eager_core.objects import Object
from eager_core.wrappers.flatten import Flatten
from eager_bridge_webots.webots_engine import WebotsEngine # noqa: F401
from eager_bridge_pybullet.pybullet_engine import PyBulletEngine # noqa: F401
from eager_process_safe_actions.safe_actions_processor import SafeActionsProcessor
from gym import spaces
import numpy as np
from stable_baselines3 import PPO
class MyEnv(BaseEagerEnv):
def __init__(self, engine, name="my_env"):
super().__init__(engine, name=name)
self.STEPS_PER_ROLLOUT = 100
self.steps = 0
# Create ur5e object
self.ur5e = Object.create('ur5e1', 'eager_robot_ur5e', 'ur5e')
# Add preprocessing so that commanded actions are safe
processor = SafeActionsProcessor(duration=0.1,
checks_per_rad=15,
vel_limit=3.0,
robot_type='ur5e',
collision_height=0.01,
)
self.ur5e.actuators['joints'].add_preprocess(
processor=processor,
observations_from_objects=[self.ur5e],
action_space=spaces.Box(low=-np.pi, high=np.pi, shape=(6,)))
# Initialize all the services of the robots
self._init_nodes([self.ur5e])
# Define the spaces
self.observation_space = self.ur5e.observation_space
self.action_space = self.ur5e.action_space
def step(self, action):
# Set actions before stepping
self.ur5e.set_action(action)
# Step the environment
self._step()
self.steps += 1
# Get observations
obs = self.ur5e.get_obs()
return obs, self._get_reward(obs), self._is_done(obs), self.ur5e.get_state()
def reset(self) -> object:
self.steps = 0
# Set desired reset state
reset_states = dict()
reset_states['joint_pos'] = np.array([0, -np.pi / 2, 0, 0, 0, 0], dtype='float32')
reset_states['joint_vel'] = np.array([0, 0, 0, 0, 0, 0], dtype='float32')
self.ur5e.reset(states=reset_states)
# Reset the environment
self._reset()
# Get new observations
return self.ur5e.get_obs()
def render(self, mode, **kwargs):
return None
def _get_reward(self, obs):
# Quadratic reward - move to goal position [0, -np.pi/2, 0, 0, 0, 0]
return -((obs['joint_sensors'] - np.array([0, -np.pi / 2, 0, 0, 0, 0], dtype='float32')) ** 2).sum()
def _is_done(self, obs):
return self.steps >= self.STEPS_PER_ROLLOUT
if __name__ == '__main__':
rospy.init_node('example_safe_actions', anonymous=True, log_level=rospy.WARN)
# Define the engine
engine = WebotsEngine()
# engine = PyBulletEngine()
# Create environment
env = MyEnv(engine, name="my_env")
env = Flatten(env)
env.seed(42)
obs = env.reset()
for i in range(1000):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.render()
if done:
obs = env.reset()
model = PPO('MlpPolicy', env, verbose=1)
model.learn(total_timesteps=100000)
env.close()
| StarcoderdataPython |
3225342 | #-------------------------------------------------------------------------------
# Name: Pet_Game.py
# Purpose: To create a class to play with a virtual pet!
#
# Author: odanielb, <NAME>
#
# Acknowledgements:
# The following images were borrowed and then edited:
# Shop: https://cdn2.iconfinder.com/data/icons/windows-8-metro-style/512/shop.png
# Money: http://cdn.flaticon.com/png/256/16302.png
# Backpack: https://cdn3.iconfinder.com/data/icons/outdoor-and-camping-icons/512/Backpack-512.png
# Stethoscope: https://cdn3.iconfinder.com/data/icons/healthcare-and-medicine-icons-1/512/Stethoscope-512.png
# Home: https://cdn2.iconfinder.com/data/icons/windows-8-metro-style/512/home.png
#-------------------------------------------------------------------------------
from turtle_user_input import input_string #This is the string that the user inputs
from turtle_user_input import set_up_window #This sets up the onkey functions for that input
from turtle_user_input import unbind_keys #This unbinds the onkey functions as needed
from Owner import Owner
from Pet import Pet #This class was utilized by my first version of this game!
from Shop import Shop #The Shop contains instances of an Item class, which was used in my first version of this game!
import sys
import Tkinter
import turtle
import time
class Pet_Game(object):
"""To create a class to handle and play a game using the Pet, Owner, Shop,
and Item classes. It is designed so that a program using this class only
needs to create an instance of this class for the game to run correctly."""
def __init__(self):
"""Creates an instance of the Pet_Game class, which includes one Pet,
one Owner (that's you!), and it will end when quit or when the pet dies.
Note: Only one instance of this class will work at a time."""
turtle.setup(700,500) #Set up screen
self.wn = turtle.Screen()
self.wn.colormode(255)
self.wn.bgcolor(0,0,52)
name = self.__ask_for_name()
self.__add_shapes()
self.owner = Owner( Pet(name) )
self.shop = Shop()
self.is_game_over = False
self._home_cursor_positions = [(-300, 150), (270, 150), (290, -150), (-300, -150)] #Shop, Money, Status, Items
self.messenger = self._initialize_turtle( turtle.Turtle() ) #Turtle for the message box
self.instructor = self._initialize_turtle( turtle.Turtle() ) #Turtle for writing instructions
self.inventory_clerk = self._initialize_turtle( turtle.Turtle() ) #Turtle for the Owner's item menu
self.doctor = self._initialize_turtle( turtle.Turtle() ) #Turtle for Pet status
self.pet_turtle = self._initialize_turtle( turtle.Turtle() ) #Turtle to BE the pet on screen
self.shop_button = self. _initialize_turtle( turtle.Turtle() ) #Turtle to BE the shop button
self.balance = self._initialize_turtle( turtle.Turtle() ) #Turtle to BE the money symbol
self.bag = self._initialize_turtle( turtle.Turtle() ) #Turtle to BE the item bag
self.wn.onkey(self.Quit, "q")
self.wn.listen()
self.setup_home_screen()
#Instructions
self.messenger.setpos(0,100)
self.messenger.write("Welcome! Here is your new pet, please take care of it!",move=False,align='center',font=("Century Gothic",18,("bold","normal")))
self.messenger.setpos(0,80)
self.messenger.write("Its status will decrease over time, so you'll need to buy items from the Shop to help.",move=False,align='center',font=("Century Gothic",11,("bold","normal")))
self.messenger.setpos(0,60)
self.messenger.write("As you navigate, you'll earn money, and you might even earn more if you and your pet",move=False,align='center',font=("Century Gothic",11,("bold","normal")))
self.messenger.setpos(0,40)
self.messenger.write("become friends! Navigate using the arrow keys, Enter to select, and Backspace to go back.",move=False,align='center',font=("Century Gothic",11,("bold","normal")))
self.messenger.setpos(0,20)
self.messenger.write("Press 'q' to quit at any time!",move=False,align='center',font=("Century Gothic",11,("bold","normal")))
self.messenger.setpos(0,70)
self.play()
#------------ Initializing Game Methods ---------------------------------------#
def play(self):
"""Begins the to play the game. Waits until the game is quit by the user."""
Tkinter.mainloop()
def update_time_events(self):
"""Updates all of the time based events for the Shop, Pet and Owner.
post: Stock in the Shop, states of the Pet, the Owner's money may be changed."""
self.owner.check_if_pay_day()
self.owner.pet.check_states()
self.shop.check_if_restock()
def __add_shapes(self):
"""Adds the shapes for the turtles that are necessary for the game."""
self.wn.addshape("pack.gif")
self.wn.addshape("thePet1.gif")
self.wn.addshape("shop.gif")
self.wn.addshape("money2.gif")
self.wn.addshape("doctor.gif")
self.wn.addshape("arrow_right.gif")
self.wn.addshape("arrow_up.gif")
self.wn.addshape("arrow_down.gif")
self.wn.addshape("home.gif")
self.wn.addshape("thePet_dead.gif")
def __ask_for_name(self):
"""Asks user what to name their pet.
post: returns the name as a string."""
question_writer = self._initialize_turtle( turtle.Turtle() )
question_writer.setpos(0, 90)
question_writer.color(71, 230, 163)
question_writer.write("Enter a name for your pet:",move=False,align='center',font=("Century Gothic",30,("bold","normal")))
set_up_window(self.wn)
self.wn.listen() #The window listens for key presses
while input_string.writing: #Waits for user input, ends with Enter
question_writer.write("Enter a name for your pet:",move=False,align='center',font=("Century Gothic",30,("bold","normal")))
name = input_string.get_string()
question_writer.clear()
unbind_keys(self.wn)
input_string.clear_string()
return name
def _initialize_turtle(self, turtle):
"""Initializes the given turtle to be hidden, up, at the highest speed,
and as the color for the game.
pre: turtle is a Turtle object
post: returns the turtle"""
turtle.color(71, 230, 163)
turtle.hideturtle()
turtle.up()
turtle.speed(0)
return turtle
def pet_dead(self):
"""When pet is dead, this displays all the appropriate things on the
Home Screen.
post: Game can only be quit"""
self.hide_home_screen()
time.sleep(2)
self.pet_turtle.shape("thePet_dead.gif")
self.pet_turtle.showturtle()
self.messenger.setpos(0,70)
self.messenger.write("Your pet died!",move=False,align='center',font=("Century Gothic",30,("bold","normal")))
self.messenger.setpos(0,40)
self.messenger.write("Press 'q' to quit.",move=False,align='center',font=("Century Gothic",15,("bold","normal")))
def Quit(self):
"""Quits the program by closing window and then exiting using sys."""
self.wn.bye()
sys.exit()
#------------ Home Screen -----------------------------------------------------#
#------------ Screen Settings ---------------------------------------------#
def setup_home_screen(self):
"""Sets up the Home Screen.
post: The Home Screen is shown."""
self.hide_store_screen()
self.pet_turtle.setpos(0, 200) #Display pet name
self.pet_turtle.write(self.owner.pet.name,move=False,align='center',font=("Century Gothic",30,("bold","normal")))
self.pet_turtle.setpos(0,-75) #Display pet
self.pet_turtle.shape("thePet1.gif")
self.pet_turtle.showturtle()
self.shop_button.setpos(-230, 220) #Display shop button
self.shop_button.write("Shop",move=False,align='center',font=("Century Gothic",15,("bold","normal")))
self.shop_button.setpos(-300, 210)
self.shop_button.shape("shop.gif")
self.shop_button.showturtle()
self.balance.setpos(340, 190) #Display CASH MONEY
self.balance.write(self.owner.money_balance(),move=False,align='right',font=("Century Gothic",25,("bold","normal")))
self.balance.shape("money2.gif")
self.balance.setpos(200, 210)
self.balance.showturtle()
self.doctor.setpos(230, -240) #Pet Status
self.doctor.write("Status",move=False,align='center',font=("Century Gothic",15,("bold","normal")))
self.doctor.setpos(300, -200)
self.doctor.shape("doctor.gif")
self.doctor.showturtle()
self.bag.setpos(-230, -240) #Display bag button
self.bag.write("Items",move=False,align='center',font=("Century Gothic",15,("bold","normal")))
self.bag.setpos(-300, -200)
self.bag.shape("pack.gif")
self.bag.showturtle()
self.messenger.setpos(0,70) #Message box
self.__activate_home_keypresses()
self.display_home_cursor(self._home_cursor_positions[0])
if not self.owner.pet.is_alive():
self.pet_dead()
def hide_home_screen(self):
"""Hides the Home Screen, leaving the window blank except the background.
post: A blank blue screen will be shown."""
self.bag.clear()
self.bag.ht()
self.messenger.clear()
self.doctor.clear()
self.doctor.ht()
self.balance.clear()
self.balance.ht()
self.shop_button.clear()
self.shop_button.ht()
self.pet_turtle.clear()
self.pet_turtle.ht()
self.inventory_clerk.ht()
self.wn.onkey(None, 'Return')
self.wn.onkey(None, 'p')
self.wn.onkey(None, 'P')
self.wn.onkey(None, 'Up')
self.wn.onkey(None, 'Down')
self.wn.onkey(None, 'Left')
self.wn.onkey(None, 'Right')
self.wn.onkey(None, 'BackSpace')
#------------ Keypresses --------------------------------------------------#
def __activate_home_keypresses(self):
"""Activates the onkey functions for the Home Screen and makes the
window listen."""
self.wn.onkey(self.home_cursor_selection, 'Return')
self.wn.onkey(None, 'p')
self.wn.onkey(None, 'P')
self.wn.onkey(self.home_cursor_up, 'Up')
self.wn.onkey(self.home_cursor_down, 'Down')
self.wn.onkey(self.home_cursor_left, 'Left')
self.wn.onkey(self.home_cursor_right, 'Right')
self.wn.onkey(None, 'BackSpace')
self.wn.listen()
#------------ Cursors -----------------------------------------------------#
def home_cursor_down(self):
"""Moves the cursor down if possible."""
(old_x, old_y) = self.inventory_clerk.pos() #Get the current pos
if old_y == abs(old_y): #If the y value is positive it can move down, so...
if old_x != abs(old_x): #If the x is negative, it's at Shop
self.display_home_cursor(self._home_cursor_positions[3]) #Move cursor to Items
else: #Otherwise,
self.display_home_cursor(self._home_cursor_positions[2]) #Move to Status
def home_cursor_up(self):
"""Moves the cursor up if possible."""
(old_x, old_y) = self.inventory_clerk.pos() #Get the current pos
if old_y != abs(old_y): #If the y value is negative it can move up, so...
if old_x != abs(old_x): #If the x is negative, it's at Items
self.display_home_cursor(self._home_cursor_positions[0]) #Move cursor to Shop
else: #Otherwise,
self.display_home_cursor(self._home_cursor_positions[1]) #Move to Money
def home_cursor_right(self):
"""Moves the cursor right if possible."""
(old_x, old_y) = self.inventory_clerk.pos() #Get the current pos
if old_x != abs(old_x): #If the x value is negative it can move right, so...
if old_y == abs(old_y): #If the y is positive, it's at Shop
self.display_home_cursor(self._home_cursor_positions[1]) #Move the cursor to Money
else: #Otherwise,
self.display_home_cursor(self._home_cursor_positions[2]) #Move to Status
def home_cursor_left(self):
"""Moves the cursor left if possible."""
(old_x, old_y) = self.inventory_clerk.pos() #Get the current pos
if old_x == abs(old_x): #If the x value is positive it can move left, so...
if old_y == abs(old_y): #If the y is also positive, it's at money
self.display_home_cursor(self._home_cursor_positions[0]) #Move the cursor to Shop
else: #If negative,
self.display_home_cursor(self._home_cursor_positions[3]) #Move to Items
def display_home_cursor(self, option_pos):
"""Moves the home cursor to point at a something from a given point.
pre: option_pos is a tuple representing a position, (x, y).
post: The cursor will be at to this location."""
self.update_home_money() #Updates the money on the screen everytime the cursor is moved
self.update_time_events() #Updates time events
self.messenger.clear() #Clears messages when cursor is moved
cursor_x, cursor_y = option_pos #Get the x and y for the given position
if cursor_y == abs(cursor_y): #If the y is positive (ie, in the upper two quadrants)
self.inventory_clerk.shape("arrow_up.gif") #use the up arrow
else: #If not,
self.inventory_clerk.shape("arrow_down.gif") #Use the down arrow
self.inventory_clerk.setpos(cursor_x, cursor_y) #Move cursor to the given position
self.inventory_clerk.showturtle() #Show the turtle
if not self.owner.pet.is_alive(): #If Pet is dead, end game
self.pet_dead()
def home_cursor_selection(self):
"""When the user selects something on the Home Screen with the cursor,
this method is used to determine what to do. It then does the appropriate
action."""
self.update_time_events() #Updates time events
if not self.owner.pet.is_alive(): #If Pet is dead, end game
self.pet_dead()
pos = self.inventory_clerk.pos()
index = None
for i, p in enumerate(self._home_cursor_positions): #Finds which item was the selection
if p == pos:
index = i
break
if i == 0:
self.setup_store() #Go to store
elif i == 1:
self.messenger.clear() #Output money info
self.messenger.write("You have $"+str(self.owner.money_balance())+", and your salary is $"+str(self.owner.get_salary())+".",move=False,align='center',font=("Century Gothic",15,("bold","normal")))
elif i == 2:
self.messenger.clear() #Output Pet status
self.messenger.setpos(0, 20)
self.messenger.write(str(self.owner.pet),move=False,align='center',font=("Century Gothic",15,("bold","normal")))
self.messenger.setpos(0,70)
elif i == 3:
if self.owner.get_num_items() > 0: #If Owner has items, open the item menu
self.open_item_menu()
else: #If not, output that they don't have items
self.messenger.setpos(0,70)
self.messenger.write("You don't have any items!",move=False,align='center',font=("Century Gothic",15,("bold","normal")))
#------------ Item Menu Cursor --------------------------------------------#
def item_menu_cursor_up(self):
"""Moves the item menu cursor up if possible."""
self.messenger.clear()
(old_x, old_y) = self.inventory_clerk.pos() #Get the current position of the cursor
if old_y < self._positions_in_item_menu[0][1]: #If the cursor can still go up,
self.inventory_clerk.setpos(old_x, old_y + 20) #Move it up!
def item_menu_cursor_down(self):
"""Moves the item menu cursor down if possible."""
self.messenger.clear()
(old_x, old_y) = self.inventory_clerk.pos() #Get the current position of the cursor
if old_y > self._positions_in_item_menu[-1][1] + 15: #If the cursor can still go down, (position of last option on screen)
self.inventory_clerk.setpos(old_x, old_y - 20) #Move it down!
def display_item_menu_cursor(self, option_pos):
"""Moves the item menu cursor to point at the option that is located at
the provided point position.
pre: option_pos is a tuple representing a position, (x, y).
post: The cursor will be pointing to this location."""
self.update_home_money() #Updates the money on the screen everytime the cursor is moved
self.update_time_events() #Updates time events
self.messenger.clear() #Clears messages
self.inventory_clerk.shape("arrow_right.gif")
(cursor_x, cursor_y) = option_pos
cursor_x -= 25
cursor_y += 15
self.inventory_clerk.setpos(cursor_x, cursor_y)
self.inventory_clerk.st()
def item_menu_cursor_selection(self):
"""When the user selects something in the Item Menu with the cursor,
this method is used to determine what to do. It then does the appropriate
action."""
self.update_time_events() #Updates time events
self.messenger.clear()
pos = self.inventory_clerk.pos() #Get cursor's position
pos = (pos[0]+25, pos[1]-15) #Realign position of cursor with item
index = None #To hold the index of the selected item in Owner's items
for i, p in enumerate( self._positions_in_item_menu ): #For each item in their possesstion...
if p == pos: #If the positions are the same
index = i #Store the index
break
self.instructor.setpos(0,90) #Write that item was used
self.instructor.clear()
self.instructor.write("You use the "+self.owner.items[index].get_name()+".",move=False,align='center',font=("Century Gothic",20,("bold","normal")))
self.owner.use_item( self.owner.items[index] ) #Use the item!
self.update_item_menu_screen() #Reloads the menu
if self.owner.get_num_items() == 0: #If the owner has no more items, close the menu.
self.close_item_menu()
#------------ Money -------------------------------------------------------#
def update_home_money(self):
"""Updates the money display on the Home Screen."""
self.balance.clear()
self.balance.ht()
self.balance.setpos(340, 190)
self.balance.write(self.owner.money_balance(),move=False,align='right',font=("Century Gothic",25,("bold","normal")))
self.balance.shape("money2.gif")
self.balance.setpos(200, 210)
self.balance.showturtle()
#------------ Item Menu ---------------------------------------------------#
def open_item_menu(self):
"""Displays the list of items currently in the owner's possession. If
they have no items, that is outputted instead."""
self.inventory_clerk.clear()
self.wn.onkey(None, 'Return')
self.wn.onkey(None, 'Up')
self.wn.onkey(None, 'Down')
self.wn.onkey(None, 'Left')
self.wn.onkey(None, 'Right')
if self.owner.get_num_items() > 0:
positions = self.__calculate_item_menu_positions()
for i, item in enumerate( self.owner.get_items() ):
self.inventory_clerk.setpos( positions[i] )
self.inventory_clerk.write(item.get_name(), move=False,align='left',font=("Century Gothic",20,("bold","normal")))
self.display_item_menu_cursor(positions[0])
self.wn.onkey(self.item_menu_cursor_selection, 'Return')
self.wn.onkey(self.item_menu_cursor_up, 'Up')
self.wn.onkey(self.item_menu_cursor_down, 'Down')
self.wn.onkey(self.display_item_menu_item_properties,'p')
self.wn.onkey(self.display_item_menu_item_properties,'P')
self.wn.onkey(self.close_item_menu, 'BackSpace')
self.messenger.setpos(0, 80)
self.messenger.write("Press 'P' to view the properties of an item.", move=False,align='center',font=("Century Gothic",20,("bold","normal")))
self.instructor.setpos(-300, 50)
self.instructor.write("Press 'Backspace' to", move=False,align='left',font=("Century Gothic",10,("bold","normal")))
self.instructor.setpos(-300, 30)
self.instructor.write("exit the item menu.", move=False,align='left',font=("Century Gothic",10,("bold","normal")))
def update_item_menu_screen(self):
"""Updates the item menu to show any changes in the Owner's possessions."""
self.inventory_clerk.clear()
if self.owner.get_num_items() > 0:
positions = self.__calculate_item_menu_positions()
for i, item in enumerate( self.owner.get_items() ):
self.inventory_clerk.setpos( positions[i] )
self.inventory_clerk.write(item.get_name(), move=False,align='left',font=("Century Gothic",20,("bold","normal")))
self.display_item_menu_cursor(positions[0])
def close_item_menu(self):
"""Closes the Item Menu."""
self.messenger.clear()
self.inventory_clerk.clear()
self.wn.onkey(None, 'Return')
self.wn.onkey(None, 'Up')
self.wn.onkey(None, 'Down')
self.wn.onkey(None,'p')
self.wn.onkey(None,'P')
self.wn.onkey(None,'BackSpace')
self.setup_home_screen()
def __calculate_item_menu_positions(self):
"""This method will find each of the positions of the items in the item
menu.
pre: The owner must have items in their possession.
post: Returns a list of tuples containing the positions of each, in order."""
x, y = (-300, 0)
positions = []
for i, item in enumerate( self.owner.get_items() ):
positions.append( (x,y) )
y -= 20
self._positions_in_item_menu = positions #Needed for cursor onkey methods
return positions
def display_item_menu_item_properties(self):
"""Displays the Item properties on the Home Screen when an Item is
selected in the Item Menu by the Return key."""
self.messenger.clear() #Clear screen first
item_selected = "" #Will contain the Item object that was chosen
(x, y) = self.inventory_clerk.pos() #Get the current position of the cursor
pos = (x+25, y-15) #Realign the cursor's position with the printed options
for i, p in enumerate( self._positions_in_item_menu ): #For every item position,
if pos == p: #If the cursor is at that option position,
item_selected = self.owner.items[i] #Find the option/item that it is.
self.messenger.setpos(180, -20) #Use turtle to display the Item properties
if type(item_selected) is not str:
self.messenger.write(str(item_selected),move=False,align='center',font=("Century Gothic",15,("bold","normal")))
#------------ Store Screen ----------------------------------------------------#
#------------ Screen Settings ---------------------------------------------#
def setup_store(self):
"""Sets up the Store Screen.
post: The Home Screen is cleared and the Store Screen is shown"""
self.hide_home_screen()
self.instructor.setpos(0, 200) #Instructions
self.instructor.write("Welcome to the Shop!",move=False,align='center',font=("Century Gothic",30,("bold","normal")))
self.instructor.setpos(0, 180)
self.instructor.write("Choose a category using the arrow keys and choose with Enter.",move=False,align='center',font=("Century Gothic",13,("bold","normal")))
self.instructor.setpos(0, 160)
self.instructor.write("To see an item's properties, press 'P' while it is selected. Go back with 'Backspace'.",move=False,align='center',font=("Century Gothic",13,("bold","normal")))
self.shop_button.shape("home.gif") #Home Button
self.shop_button.setpos(240, -240)
self.shop_button.write("Home",move=False,align='center',font=("Century Gothic",12,("bold","normal")))
self.shop_button.setpos(300, -200)
self.shop_button.showturtle()
self.inventory_clerk.shape("arrow_right.gif")
self.update_store_screen()
self.__activate_store_keypresses()
def update_shop_money(self):
"""Updates the money display on the Store Screen."""
self.balance.clear()
self.balance.write("$"+str(self.owner.money_balance()),move=False,align='center',font=("Century Gothic",15,("bold","normal")))
def update_store_screen(self, last_choice=None):
"""Updates the Store Screen to show any selections in the Store and any
messages.
pre: last_choice is the previously selected option from the Shop (string
or Item), or is None if there is no previously selected option."""
self.inventory_clerk.clear() #Clear writing
self.balance.clear()
self.bag.clear()
self.balance.setpos(0, -220) #Write money balance on the screen
self.balance.write("$"+str(self.owner.money_balance()),move=False,align='center',font=("Century Gothic",15,("bold","normal")))
options = self.get_shop_options(last_choice) #The options at store
if len(options) > 0:
self.__calculate_shop_option_positions(options) #The positions of these options on screen
self.display_options(options, self._positions_of_shop_options) #Displays the store options
self.display_shop_cursor(self._positions_of_shop_options[0]) #Points the cursor to the first item
def hide_store_screen(self):
"""Hides the Store Screen from view.
post: Leaves the window blank except for the background."""
self.inventory_clerk.clear() #Clear screen
self.inventory_clerk.ht()
self.messenger.clear()
self.instructor.clear()
self.balance.clear()
self.bag.clear()
self.shop_button.clear()
self.shop_button.ht()
self.shop_button.onclick(None)
self.wn.onkey(None, 'Return') #Unbind keys
self.wn.onkey(None, 'p')
self.wn.onkey(None, 'P')
self.wn.onkey(None, 'Up')
self.wn.onkey(None, 'Down')
self.wn.onkey(None, 'BackSpace')
#------------ Keypress Functions ------------------------------------------#
def __activate_store_keypresses(self):
"""Activates the onkey functions for the Store Screen and makes the
window listen."""
self.wn.onkey(self.shop_cursor_select, 'Return')
self.wn.onkey(self.display_shop_item_properties, 'p')
self.wn.onkey(self.display_shop_item_properties, 'P')
self.wn.onkey(self.shop_cursor_up, 'Up')
self.wn.onkey(self.shop_cursor_down, 'Down')
self.wn.onkey(self.go_back, 'BackSpace')
self.wn.listen()
#------------ Cursor ------------------------------------------------------#
def display_shop_cursor(self, option_pos):
"""Moves the shop cursor to point at an option, given its position.
pre: option_pos is a tuple representing a position, (x, y).
post: The shop cursor will be pointing to this location."""
cursor_x, cursor_y = option_pos
cursor_x -= 30
cursor_y += 15
self.inventory_clerk.setpos(cursor_x, cursor_y)
self.inventory_clerk.showturtle()
def shop_cursor_up(self):
"""Moves the shop cursor up, if possible."""
self.bag.clear() #Clear any Item Properties that may be showing
self.update_time_events()
self.messenger.clear()
self.update_shop_money()
(old_x, old_y) = self.inventory_clerk.pos() #Get the current position of the cursor
if (old_x, old_y) == (230, -200): #If on the Home button, go to the last cursor.
self.inventory_clerk.setpos(self._positions_of_shop_options[-1][0] - 30, self._positions_of_shop_options[-1][1] + 15)
elif old_y < 120: #If the cursor can still go up,
self.inventory_clerk.setpos(old_x, old_y + 50) #Move it up!
def shop_cursor_down(self):
"""Moves the shop cursor down, if possible."""
self.bag.clear() #Clear any Item Properties that may be showing
self.update_time_events()
self.messenger.clear()
self.update_shop_money()
(old_x, old_y) = self.inventory_clerk.pos() #Get the current position of the cursos
if old_y > self._positions_of_shop_options[-1][1] + 15: #If the cursor can still go down, (position of last option on screen)
self.inventory_clerk.setpos(old_x, old_y - 50) #Move it down!
elif old_y == self._positions_of_shop_options[-1][1] + 15: #If on the last cursor, go to the Home button
self.inventory_clerk.setpos(230, -200)
def shop_cursor_select(self):
"""Selects an item that the shop cursor is pointing to and does the
intended action to it, based on the item."""
self.bag.clear() #Clear any Item Properties that may be showing
self.update_time_events()
self.messenger.clear()
self.update_shop_money()
(x, y) = self.inventory_clerk.pos() #Get the current position of the cursor
pos = (x+30, y-15) #Realign the cursor's position with the printed options
for i, p in enumerate( self._positions_of_shop_options ): #For every option position,
if pos == p: #If the cursor is at that option position,
option_selected = self._options[i] #Find the option that was chosen.
self.__handle_selection(option_selected) #Handle the chosen option accordingly
if x == abs(x): #If the home button was chosen, load home screen (its only option in quads. 2/3)
self.setup_home_screen()
def display_shop_item_properties(self):
"""Displays the Item properties on the Store Screen when an Item is
selected.
pre: What was selected was an Item."""
self.update_time_events() #Time events and screen clearing
self.messenger.clear()
self.bag.clear()
self.update_shop_money()
(x, y) = self.inventory_clerk.pos() #Get the current position of the cursor
pos = (x+30, y-15) #Realign the cursor's position with the printed options
for i, p in enumerate( self._positions_of_shop_options ): #For every option position,
if pos == p: #If the cursor is at that option position,
option_selected = self._options[i] #Find the option/item that it is.
self.bag.setpos(180, -20) #Use Bag the Turtle to display the Item properties
if type(option_selected) is not str:
self.bag.write(str(option_selected),move=False,align='center',font=("Century Gothic",15,("bold","normal")))
#------------ Store Misc. -------------------------------------------------#
def go_back(self):
"""Goes back to the last set of choices in the Shop and displays them."""
self.update_store_screen(self.shop.inventory.find_grandparent(self._options[0]))
def buy_item(self, bought_Item):
"""Buys the provided Item.
pre: bought_Item is in the Shop's inventory.
post: Removes Item from the Shop, puts it in the Owner's inventory and
updates the screen to the front of the store."""
did_sell = self.shop.sell(bought_Item, self.owner)
if did_sell:
self.owner.add_item(bought_Item)
self.bought_message(did_sell)
self.update_store_screen(bought_Item)
def bought_message(self, did_sell):
"""Outputs to the screen whether the item was bought or not.
pre: did_sell is True or False."""
self.messenger.clear()
self.messenger.setpos(0, -240)
if did_sell:
self.messenger.write("You bought the item.",move=False,align='center',font=("Century Gothic",15,("bold","normal")))
else:
self.messenger.write("You have have too little money or too many items!",move=False,align='center',font=("Century Gothic",12,("bold","normal")))
#------------ Store Options -----------------------------------------------#
def get_shop_options(self, last_choice=None):
"""Gets the current options for the shop based on last_choice.
pre: last_choice is a string (category in the store), an Item (bought
from the store), or None, meaning to start from the front of the store.
post: Returns the options as a list of strings or Items."""
options = self.shop.get_choices(last_choice) #The options at store
self._options = options #This stores the current options in the Store; needed for cursor onkey methods
return options
def display_options(self, options, positions):
"""Displays the provided store options onscreen.
pre: options is a list of strings or Items."""
positions = self.__calculate_shop_option_positions(options) #Calculate positioning
for i, item in enumerate(options):
self.inventory_clerk.setpos( positions[i] ) #Put each option into position according to previously calculated ones
if type(item) is str:
self.inventory_clerk.write(item, move=False,align='left',font=("Century Gothic",20,("bold","normal"))) #Display a string
else:
self.inventory_clerk.write(item.get_name(), move=False,align='left',font=("Century Gothic",20,("bold","normal"))) #Display an item
def __calculate_shop_option_positions(self, options):
"""Given a list of options to write to the screen, this method will find
each of their positions.
pre: options is a list of strings or Items.
post: Returns a list of tuples containing the positions of each, in order."""
x, y = (-250, 120) #Top most option
positions = []
for i, item in enumerate(options):
if i > 7:
x = 250
y = 120
positions.append( (x,y) )
y -= 50 #Options are 50 pixels apart
self._positions_of_shop_options = positions #The position of the options currently in the Shop; Needed for cursor onkey methods
return positions
def __handle_selection(self, option_selected):
"""Handles the selected option, doing one of the following:
-If the option is a string (therefore a category), get the next
set of options and display them.
-If there are no options in the next menu, do not change.
-If the option is an Item
-Buy the Item, removing it from the store.
-Add it to Owner's items
-Reset the Store Screen to the front of the store.
pre: option_seleted is a valid option in the Shop (string or Item)"""
if type(option_selected) is str:
if len( self.shop.get_choices(option_selected) ) == 0:
pass
else:
self.update_store_screen(option_selected)
else:
self.buy_item(option_selected)
#------------------------------------------------------------------------------#
| StarcoderdataPython |
1740239 | <reponame>rgerkin/pyrfume
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import os
from pathlib import Path
import pickle
import numpy as np
import pandas as pd
from rickpy import ProgressBar
import pyrfume
cids = {}
DATA = pyrfume.DATA_DIR
# ## From Sigma Fragrance and Flavor Catalog (2014)
# Create a new file with CIDs and store here in `cids` dictionary
file_path = DATA / 'sigma_2014' / 'sigma.csv'
df = pd.read_csv(file_path)
cids['sigma-2014'] = set(df['CID']) - {0}
# ## Dravnieks Atlas of Odor Character
# Create a new file with CIDs and store here in `cids` dictionary
file_path = DATA / 'dravnieks_1985' / 'dravnieks.csv'
df = pd.read_csv(file_path)
cids['dravnieks-1985'] = set(df['CID']) - {0}
# ## Abraham et al, 2013
file_path = DATA / 'abraham_2011' / 'abraham-2011-with-CIDs.csv'
df = pd.read_csv(file_path)
cids['abraham-2013'] = set(df['CID']) - {0}
# ## Bushdid et al, 2014
# Create a new file with CIDs and store here in `cids` dictionary
file_path = DATA / 'bushdid_2014' / 'bushdid.csv'
df = pd.read_csv(file_path)
cids['bushdid-2014'] = set(df['CID']) - {0}
# ## Chae et al, 2019
# Create a new file with CIDs and store here in `cids` dictionary
file_path = DATA / 'chae_2019' / 'odorants.csv'
df = pd.read_csv(file_path)
cids['chae-2019'] = set(df['CID']) - {0}
# ## Prestwick
file_path = DATA / 'prestwick' / 'prestwick.csv'
df = pd.read_csv(file_path)
cids['prestwick'] = set(df['CID']) - {0}
# ## GRAS
file_path = DATA / 'GRAS' / 'gras.csv'
df = pd.read_csv(file_path)
cids['gras'] = set(df['CID']) - {0}
# ## Sobel Lab (Weiss 2012, Snitz 2013)
file_path = DATA / 'snitz_2013' / 'snitz.csv'
df = pd.read_csv(file_path)
cids['sobel-2013'] = set(df['CID']) - {0}
# ## Leffingwell
file_path = DATA / 'westeros' / 'westeros.csv'
df = pd.read_csv(file_path)
cids['leffingwell'] = set(df['CID']) - {0}
# ## Davison
file_path = DATA / 'davison_2007' / 'davison-katz.csv'
df = pd.read_csv(file_path, index_col=0)
cids['davison-2007'] = set(df['CID']) - {0}
# ## FDB
file_path = DATA / 'fragrancedb' / 'FragranceDB_CIDs.txt'
df = pd.read_csv(file_path)
cids['fragrance-db'] = set(df['CID']) - {0}
# ## Mainland
file_path = DATA / 'cabinets' / 'Mainland Odor Cabinet with CIDs.csv'
df = pd.read_csv(file_path)
cids['mainland-cabinet'] = set(df['CID']) - {0}
file_path = DATA / 'mainland_intensity' / 'mainland-intensity-odorant-info.csv'
df = pd.read_csv(file_path)
cids['mainland-intensity'] = set(df['CID']) - {0}
file_path = DATA / 'mainland_2015' / 'Odors.tsv'
df = pd.read_csv(file_path, sep='\t')
cids['mainland-receptors'] = set(df['CID'].dropna().astype(int)) - {0}
# ## Enantiomers
file_path = DATA / 'shadmany' / 'enantiomers.csv'
df = pd.read_csv(file_path)
cids['enantiomers'] = set(df['CID']) - {0}
# ## Haddad (just the clusters)
file_path = DATA / 'haddad_2008' / 'haddad-clusters.csv'
df = pd.read_csv(file_path)
cids['haddad-2008'] = set(df['CID']) - {0}
# ## U19 PIs
from rickpy import get_sheet
gerkin_sheet = '1PlU4zHyRXtcI7Y-O6xYtlIyKoKk8hX1I9zfx8KFELdc'
u19_sheet = '1B2sEj9pCk2_zS1X1Cg2ulAB4E_BWPboJBSvH4Gwc8fs'
dfs = {}
dfs['gerkin-cabinet'] = get_sheet(gerkin_sheet, 'gerkin-compounds').set_index('CID')
dfs['smith-cabinet'] = get_sheet(gerkin_sheet, 'smith-compounds').set_index('CID')
dfs['rinberg-glomeruli'] = get_sheet(u19_sheet, 'rinberg').set_index('CID')
dfs['fleischmann-cabinet'] = get_sheet(u19_sheet, 'fleischmann').set_index('CID')
dfs['datta-cabinet'] = get_sheet(u19_sheet, 'datta').set_index('CID')
dfs['bozza-cabinet'] = get_sheet(u19_sheet, 'bozza').set_index('CID')
for name, df in dfs.items():
cids[name] = set(df.index) - {0}
# ## Goodscents
file_path = DATA / 'goodscents' / 'goodscents_cids.txt'
df = pd.read_csv(file_path, index_col=False)
cids['goodscents'] = set(df['CID']) - {0}
# ## Arctander
file_path = DATA / 'arctander_1960' / 'arctander_cids.txt'
df = pd.read_csv(file_path, index_col=False)
cids['arctander-1960'] = set(df['CID']) - {0}
# ## Flavornet
file_path = DATA / 'flavornet' / 'flavornet.csv'
df = pd.read_csv(file_path)
cids['flavornet'] = set(df['CID']) - {0}
# ## Scott et al, 2014
file_path = DATA / 'scott_2014' / 'data.csv'
df = pd.read_csv(file_path)
cids['scott-2014'] = set(df['CID']) - {0}
# ## Superscent
file_path = DATA / 'superscent' / 'superscent_cids.txt'
df = pd.read_csv(file_path)
cids['superscent'] = set(df['CID']) - {0}
# ## SenseLab
file_path = DATA / 'senselab' / 'senselab.csv'
df = pd.read_csv(file_path)
cids['senselab'] = set(df['CID']) - {0}
file_path = DATA / 'wakayama_2019' / 'wakayama-intensity_with-CIDs.txt'
df = pd.read_csv(file_path, sep='\t')
cids['wakayama-2019'] = set(df['CID']) - {0}
# ## Save
file_path = DATA / 'odorants' / 'cids.pkl'
with open(file_path, 'wb') as f:
pickle.dump(cids, f)
# ## Load
# +
#with open(file_path, 'rb') as f:
# cids2 = pickle.load(f)
# -
# ## Merge
all_cids = set()
for key in cids:
all_cids |= cids[key]
all_cids = pd.DataFrame(index=sorted(list(all_cids)), columns=sorted(list(cids))).fillna(0)
all_cids.index.name = 'CID'
for key in cids:
all_cids.loc[list(cids[key]), key] = 1
file_path = DATA / 'odorants' / 'all_cids.csv'
all_cids.to_csv(file_path)
all_cids.shape
| StarcoderdataPython |
67057 | from pyinaturalist.constants import PROJECT_ORDER_BY_PROPERTIES, JsonResponse, MultiInt
from pyinaturalist.converters import convert_all_coordinates, convert_all_timestamps
from pyinaturalist.docs import document_request_params
from pyinaturalist.docs import templates as docs
from pyinaturalist.pagination import add_paginate_all
from pyinaturalist.request_params import validate_multiple_choice_param
from pyinaturalist.v1 import get_v1
@document_request_params([docs._projects_params, docs._pagination])
@add_paginate_all(method='page')
def get_projects(**params) -> JsonResponse:
"""Given zero to many of following parameters, get projects matching the search criteria.
**API reference:** https://api.inaturalist.org/v1/docs/#!/Projects/get_projects
Example:
Search for projects about invasive species within 400km of Vancouver, BC:
>>> response = get_projects(
>>> q='invasive',
>>> lat=49.27,
>>> lng=-123.08,
>>> radius=400,
>>> order_by='distance',
>>> )
Show basic info for projects in response:
>>> pprint(response)
[8291 ] PNW Invasive Plant EDDR
[19200 ] King County (WA) Noxious and Invasive Weeds
[102925 ] Keechelus/Kachess Invasive Plants
...
.. admonition:: Example Response
:class: toggle
.. literalinclude:: ../sample_data/get_projects.py
Returns:
Response dict containing project records
"""
validate_multiple_choice_param(params, 'order_by', PROJECT_ORDER_BY_PROPERTIES)
response = get_v1('projects', **params)
projects = response.json()
projects['results'] = convert_all_coordinates(projects['results'])
projects['results'] = convert_all_timestamps(projects['results'])
return projects
def get_projects_by_id(project_id: MultiInt, rule_details: bool = None, **params) -> JsonResponse:
"""Get one or more projects by ID.
**API reference:** https://api.inaturalist.org/v1/docs/#!/Projects/get_projects_id
Example:
>>> response = get_projects_by_id([8348, 6432])
>>> pprint(response)
[8348] Tucson High Native and Invasive Species Inventory
[6432] CBWN Invasive Plants
.. admonition:: Example Response
:class: toggle
.. literalinclude:: ../sample_data/get_projects_by_id.py
Args:
project_id: Get projects with this ID. Multiple values are allowed.
rule_details: Return more information about project rules, for example return a full taxon
object instead of simply an ID
Returns:
Response dict containing project records
"""
response = get_v1('projects', ids=project_id, params={'rule_details': rule_details}, **params)
projects = response.json()
projects['results'] = convert_all_coordinates(projects['results'])
projects['results'] = convert_all_timestamps(projects['results'])
return projects
| StarcoderdataPython |
1736910 | # import argparse
import boto3
import ffmpeg
import json
import logging
import os
import posixpath
import sys
import tarfile
import tempfile
from concurrent.futures import ThreadPoolExecutor
def extract_frames(in_filename, out_directory, fps=1, height=720):
print("\n\n\n{}\n\n\n".format(os.listdir("./")))
(
ffmpeg.input(in_filename)
.filter("scale", height, -1)
.filter("fps", fps)
.output(
os.path.join(out_directory, "frame_%06d.png"),
).overwrite_output()
.run(cmd="./ffmpeg_bin")
)
def handler(event, context):
# parser = argparse.ArgumentParser()
# parser.add_argument("s3_bucket_path")
# parser.add_argument("s3_video_path")
# parser.add_argument("s3_output_frames_path")
# parser.add_argument("--num_parallel_upload_threads", default=4, type=int)
# args = parser.parse_args()
s3_bucket_path = event["s3_bucket_path"]
s3_video_path = event["s3_video_path"]
s3_output_frames_path = event["s3_output_frames_path"]
video_metadata = event["video_metadata"]
num_parallel_upload_threads = event.get("num_parallel_upload_threads", 4)
logging_level = event.get("logging_level", "INFO")
logger = logging.getLogger(name=__name__)
logger.setLevel(logging_level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info("Event: {}".format(event))
client = boto3.client('s3')
_, download_filename = tempfile.mkstemp()
frames_out_directory = tempfile.mkdtemp()
_, tarred_frames_path = tempfile.mkstemp()
logger.info("Downloading File")
client.download_file(s3_bucket_path, s3_video_path, download_filename)
logger.info("Finished downloading file")
logger.info("Extracting frames")
extract_frames(download_filename, frames_out_directory, height=video_metadata["height"])
logger.info("Finished extracting frames")
frames_info = []
with ThreadPoolExecutor(num_parallel_upload_threads) as ex:
for frame_path in os.listdir(frames_out_directory):
key = posixpath.join(s3_output_frames_path, frame_path)
ex.submit(
client.upload_file,
os.path.join(frames_out_directory, frame_path),
s3_bucket_path,
posixpath.join(s3_output_frames_path, frame_path)
)
frames_info.append(
{
"frame": frame_path,
"bucket": s3_bucket_path,
"key": key
}
)
logger.info("Finished uploading files")
return {
"frames": frames_info
}
# return {
# "frames": [frame for frame in os.listdir(frames_out_directory)],
# "zipped_tar_archive": f"s3://{posixpath.join(s3_bucket_path, s3_output_frames_path)}"
# }
| StarcoderdataPython |
24338 | <gh_stars>0
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import config
def view_data():
nrows, ncols = 3, 3
pic_index = 0
fig = plt.gcf()
fig.set_size_inches(nrows * 4, ncols * 4)
pic_index = 2
next_rock = [os.path.join(config.TRAIN_DIR, 'rock', fname) for fname in os.listdir(os.path.join(config.TRAIN_DIR, 'rock'))[pic_index-2:pic_index]]
next_paper = [os.path.join(config.TRAIN_DIR, 'paper', fname) for fname in os.listdir(os.path.join(config.TRAIN_DIR, 'paper'))[pic_index-2:pic_index]]
next_scissors = [os.path.join(config.TRAIN_DIR, 'scissors', fname) for fname in os.listdir(os.path.join(config.TRAIN_DIR, 'scissors'))[pic_index-2:pic_index]]
for i, img_path in enumerate(next_rock+next_paper+next_scissors):
sp = plt.subplot(nrows, ncols, i+1)
img = mpimg.imread(img_path)
plt.imshow(img)
plt.axis('off')
plt.show()
def plot_graphs(history, string):
plt.plot(history[string])
plt.plot(history['val_'+string])
plt.xlabel('Epochs')
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
if __name__ == '__main__':
view_data()
# history = np.load(f'{config.MODEL_PATH}my_history.npy', allow_pickle=True).item()
# plot_graphs(history, 'accuracy')
# plot_graphs(history, 'loss')
| StarcoderdataPython |
3223270 | <filename>src/find_shortest_path.py
'''
Separating code for SRP
'''
import sys
import os
sys.path.append(os.path.dirname(__file__)+"/../")
from src import constants
from src.get_vehicles import getvehicles
from src.find_orbit_time import Orbit
class Shortest:
'''
This class finds the shortest path for the rider
'''
def __str__(self):
'''
Introduced to remove the pylint warning:
Too few public methods (1/2) (too-few-public-methods)
'''
return self.__class__.__name__
@staticmethod
def get_shortest_path(climate, traffic_speed_orbit1, traffic_speed_orbit2):
'''
:param climate: User input CLimate
:param traffic_speed_orbit1: Orbit 1 Traffic Speed
:param traffic_speed_orbit2: Orbit 2 Traffic Speed
:return: Shortest time and Vehicle details
'''
print("Weather is", climate)
print("Orbit1 traffic speed is {0} megamiles/hour".format(traffic_speed_orbit1))
print("Orbit1 traffic speed is {0} megamiles/hour".format(traffic_speed_orbit2))
vehicles = getvehicles(climate)
orbit_info = Orbit()
orbit1 = orbit_info.get_orbit_time(vehicles=vehicles, traffic_speed=traffic_speed_orbit1, \
orbit_distance=constants.ORBIT1_ORBIT_DISTANCE, \
craters_count=constants.ORBIT1_CRATERS_COUNT)
orbit2 = orbit_info.get_orbit_time(vehicles=vehicles, traffic_speed=traffic_speed_orbit2, \
orbit_distance=constants.ORBIT2_ORBIT_DISTANCE, \
craters_count=constants.ORBIT2_CRATERS_COUNT)
if orbit1[1] < orbit2[1]:
print("Vehicle {0} on Orbit1".format(orbit1[0]['name']))
else:
print("Vehicle {0} on Orbit2".format(orbit2[0]['name']))
| StarcoderdataPython |
1638213 | <filename>platform/hwconf_data/efr32fg14v/modules/PIN/PIN_Snippets.py
"""
Generated from a template
"""
import efr32fg14v.PythonSnippet.RuntimeModel as RuntimeModel
from efr32fg14v.modules.PIN.PIN_Defs import PORT_PINS
def activate_runtime():
pass
| StarcoderdataPython |
3344827 | from request_log import RequestLogMiddleware
| StarcoderdataPython |
3225660 | <reponame>edrossy/tableau_tools
from tableau_connection import TableauConnection
from tableau_datasource import TableauDatasource
from tableau_parameters import TableauParameters, TableauParameter
from tableau_document import TableauColumns, TableauDocument
from tableau_file import TableauFile
from tableau_workbook import TableauWorkbook
| StarcoderdataPython |
1605906 | """adapterpattern
Example to show a Pythonic way of implementing an adapter pattern.
The example shows how to use Python's language feature, first-class
functions, to implement adapter pattern.
This example is created to illustrate a design pattern discussed in the book
Learning Python Application Development (Packt Publishing). See the book for
further details.
This module is compatible with Python version 2.7.9 as well as version 3.5.
It contains supporting code for the book, Learning Python Application Development,
Packt Publishing.
RUNNING THE PROGRAM:
Assuming you have python in your environment variable PATH, type the following
in the command prompt to run the program:
python name_of_the_file.py
(Replace name_of_the_file.py with the name of this file)
.. seealso:: `adapterpattern_multiple_methods.py`
:copyright: 2016, <NAME>
:license: The MIT License (MIT) . See LICENSE file for further details.
"""
class ElfRider:
"""Class which confirms to our existing interface that client expects.
The ElfRider represents a game character. It already has defined the methods
that the client code expects. In this trivial example, it has the 'jump'
.. seealso:: `WoodElf` , `ForeignUnitAdapter`
"""
def jump(self):
"""Tell how the unit jumps (dummy method)"""
print("Inside ElfRider.jump")
class WoodElf:
""" An imaginary third-party class that has an incompatible interface.
WoodElf represents a game character class that is provided by an imaginary
'third party' developer. It does not have the 'jump' method that the client
code expects (incompatible interface). This is used to illustrate how to
implement adapter pattern.
.. seealso:: `ElfRider` , `MountaiElf`, `ForeignUnitAdapter`
"""
def leap(self):
"""leap method is equivalent to the 'jump' method client expects
The adapter should have a jump method which in turn calls this method.
"""
print("Inside WoodElf.leap")
def climb(self):
"""Some other (dummy) method of the class.
Adapter shouldn't do anything with this method. It should just delegate
the call from the client to this method.
"""
print("Inside WoodElf.climb")
class MountainElf:
"""Example class with an incompatible interface than what we expect
Similar to WoodElf, this is yet another class which has a "spring" method
which is equivalent to `jump` method of the client interface.
"""
def spring(self):
"""spring method is equivalent to the 'jump' method client expects"""
print("Inside MountainElf.spring")
class ForeignUnitAdapter:
"""Generalized adapter class for 'fixing' incompatible interfaces.
:arg adaptee: An instance of the 'adaptee' class. For example, WoodElf
is an adaptee as it has a method 'leap' when we expect 'jump'
:arg adaptee_method: The method you want the adapter for. Example,
when client calls 'jump' method on the adapter instance, it is
delegated to 'leap' method of the adaptee.
:ivar foreign_unit: The instance of the adaptee class
:ivar jump: Instance variable jump is assigned as the adaptee_method
(e.g. 'leap').
.. todo:: This class is customized for jump method.To use it for multiple
methods, take advantage of Python first-class functions. One example is
shown in the book mentioned in the module docstrings.
"""
def __init__(self, adaptee, adaptee_method):
self.foreign_unit = adaptee
self.jump = adaptee_method
def __getattr__(self, item):
"""Handle all the undefined attributes the client code expects.
:param item: name of the attribute.
:return: Returns the corresponding attribute of the adaptee instance
(self.foreign_unit)
.. todo:: Add exception handling code.
"""
return getattr(self.foreign_unit, item)
if __name__ == '__main__':
elf = ElfRider()
elf.jump()
wood_elf = WoodElf()
wood_elf_adapter = ForeignUnitAdapter(wood_elf, wood_elf.leap)
# Internally the following calls wood_elf.leap()
wood_elf_adapter.jump()
# Internally the following calls__get_attr__
# which in turn calls wood_elf.climb()
wood_elf_adapter.climb()
mountain_elf = MountainElf()
mountain_elf_adapter = ForeignUnitAdapter(mountain_elf, mountain_elf.spring)
mountain_elf_adapter.jump()
| StarcoderdataPython |
1643514 | <filename>misc/datatools.py
#!/usr/bin/env python3
import json
from typing import Callable, List
import pandas as pd
from loguru import logger
from .taxonomy import process_keywords_str, process_single_kws
def parse_kws(kw_str, level=2):
res = kw_str.split(",")
res = map(lambda kw: [_.strip().lower() for _ in kw.split(">")], res)
res = map(lambda x: x[level if level < len(x) else len(x) - 1], res)
return list(set(res))
def load_data(path: str, level: int = 0) -> pd.DataFrame:
logger.info(f"Loading data from {path}. [KW Level={level}]")
df = pd.read_csv(path)
df = df.rename(columns={"desc": "text"})
df["text"] = df["text"].apply(str.strip)
df["labels"] = df["keywords"].apply(lambda x: parse_kws(x, level))
df["textlen"] = df["text"].apply(len)
df = df[df["textlen"] > 0]
logger.debug(f"df shape : {df.shape}")
return df
def tokenize_custom(text: str) -> List[str]:
return text.split()
def standardize_data(df: pd.DataFrame, tokenizer_func: Callable = None) -> pd.DataFrame:
df = df.copy()
if "desc" in df:
df = df.rename(columns={"desc": "text"})
assert "text" in df
assert "keywords" in df
tokenizer_func = tokenizer_func or tokenize_custom
df["tokens"] = df["text"].apply(tokenizer_func)
def _process_single_row_kw(keywords: List[str]):
keywords = list(map(process_single_kws, keywords))
keywords = list(map(lambda kws: "--".join(kws), keywords))
return keywords
df["labels_tokenized"] = df["keywords"].apply(process_keywords_str)
df["labels_standard"] = df["labels_tokenized"].apply(_process_single_row_kw)
return df.reset_index(drop=True)
def generate_data(df: pd.DataFrame, outpath: str) -> List[dict]:
logger.info("Generating data!")
assert "text" in df
assert "labels_standard" in df
assert "tokens" in df
data = []
for _df in df.itertuples():
tokens = _df.tokens
labels = _df.labels_standard
data.append(
{
"doc_label": labels,
"doc_token": tokens,
"doc_keyword": [],
"doc_topic": [],
}
)
logger.debug(f"NData = {len(data)}")
if outpath:
logger.info(f"Writing data to {outpath}")
with open(outpath, "w") as f:
for dct in data:
json.dump(dct, f)
f.write("\n")
return data
def main():
pass
if __name__ == "__main__":
main()
| StarcoderdataPython |
1677447 | <filename>CompareTheTriplets.py
"""
https://www.hackerrank.com/challenges/compare-the-triplets
"""
#!/bin/python3
import sys
a0,a1,a2 = input().strip().split(' ')
a0,a1,a2 = [int(a0),int(a1),int(a2)]
b0,b1,b2 = input().strip().split(' ')
b0,b1,b2 = [int(b0),int(b1),int(b2)]
# Write Your Code Here
"""
alice = 0
bob = 0
def cmpr(a,b):
global alice
global bob
if a > b:
alice += 1
elif b > a:
bob += 1
cmpr(a0,b0)
cmpr(a1,b1)
cmpr(a2,b2)
print(alice,bob)
"""
a = [a0,a1,a2]
b = [b0,b1,b2]
def score(a, b):
alice = 0
bob = 0
i = 0
while (i < len(a)):
if (a[i] > b[i]):
alice += 1
elif (b[i] > a[i]):
bob += 1
i += 1
print(alice, bob)
score(a, b) | StarcoderdataPython |
1722486 | <filename>tests/test_git.py
try:
from unittest.mock import patch, MagicMock as Mock
except ImportError:
from mock import patch, MagicMock as Mock
from dulwich.repo import MemoryRepo
from dulwich.objects import Tree, Blob
from gitdb.repo import Data
from gitdb.git import GitRepo
def test_current_commit_is_none_if_repo_is_empty():
assert GitRepo(MemoryRepo()).current_commit is None
def test_current_commit_is_HEAD_ref_in_repo():
repo = MemoryRepo()
tree = Tree()
repo.do_commit(tree = tree.id, message = b'first commit')
commit = repo.do_commit(tree = tree.id, message = b'second commit')
assert GitRepo(repo).current_commit.id == commit
def test_current_tree_should_be_new_if_repo_is_empty():
tree = GitRepo(MemoryRepo()).current_tree
assert tree is not None
assert list(tree) == []
def test_current_tree_should_be_from_current_commit():
repo = MemoryRepo()
tree = Tree()
repo.object_store.add_object(tree)
repo.do_commit(tree = tree.id, message = b'first commit')
tree.add(b'test', 0o100644, Blob().id)
repo.object_store.add_object(tree)
repo.do_commit(tree = tree.id, message = b'second commit')
assert GitRepo(repo).current_tree.id == tree.id
def test_commit_new_data():
data = create_data('sample/data.yml', 'test content')
data2 = create_data('sample2/data2.yml', 'test content2')
data3 = create_data('sample2/data3.yml', 'test content3')
message = 'commit message'
author = 'commit author'
repo = MemoryRepo()
git_repo = GitRepo(repo)
git_repo.commit([data, data2, data3], message, author)
assert sorted(list(git_repo.current_tree)) == [b'sample', b'sample2']
assert git_repo.current_commit.message == message.encode('utf8')
assert git_repo.current_commit.author.startswith(author.encode('utf8'))
assert repo.get_object(repo.get_object(git_repo.current_tree[b'sample'][1])[b'data.yml'][1]).data == b'test content'
assert repo.get_object(repo.get_object(git_repo.current_tree[b'sample2'][1])[b'data2.yml'][1]).data == b'test content2'
assert repo.get_object(repo.get_object(git_repo.current_tree[b'sample2'][1])[b'data3.yml'][1]).data == b'test content3'
def test_get_object():
data = create_data('sample/data.yml', 'test content')
data2 = create_data('sample2/data2.yml', 'test content2')
data3 = create_data('sample2/data3.yml', 'test content3')
message = 'commit message'
author = 'commit author'
repo = MemoryRepo()
git_repo = GitRepo(repo)
git_repo.commit([data, data2, data3], message, author)
assert git_repo.get_object('sample/data.yml') == 'test content'
def test_list():
data = create_data('sample/data.yml', 'test content')
data2 = create_data('sample2/data2.yml', 'test content2')
data3 = create_data('sample2/data3.yml', 'test content3')
message = 'commit message'
author = 'commit author'
repo = MemoryRepo()
git_repo = GitRepo(repo)
git_repo.commit([data, data2, data3], message, author)
assert all([ elem in ['test content2', 'test content3'] for elem in git_repo.list('sample2/')])
def create_data(path, content):
data = Mock()
data.path = path
data.content = content
return data
| StarcoderdataPython |
1655831 | <reponame>vadi2/codeql<gh_stars>1000+
# -*- coding: utf-8 -*-
# Autogenerated by Sphinx on Mon May 16 13:41:38 2016
topics = {'assert': '\n' }
| StarcoderdataPython |
130987 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import windtools.util as util
class Weibull(object):
def __init__(self, data, ws_field='ws', wd_field='wd', wd_bin_size=30, ws_bin_size=1, prepare_data=True):
self.data = pd.DataFrame(data)
self.data.rename(columns={ws_field: 'ws', wd_field: 'wd'})
self.param = None
if prepare_data:
self.prepare_data(wd_bin_size, ws_bin_size)
@classmethod
def load_raw_data(cls, fpath, ws_field='ws', wd_field='wd', wd_bin_size=30, **loading_options):
field_map = {ws_field: 'ws', wd_field: 'wd'}
df = util.load_data(fpath=fpath, field_map=field_map, loading_options=loading_options, dropna='any')
return cls(data=df, wd_bin_size=wd_bin_size)
def prepare_data(self, wd_bin_size, ws_bin_size):
max_ws = self.data['ws'].max()
self.data.ix[self.data['wd'] == 360] = 0
self.data['wd_bin'] = pd.cut(self.data['wd'], bins=np.arange(0, 360.1, wd_bin_size))
self.data['ws_bin'] = pd.cut(self.data['ws'], bins=np.arange(0, max_ws+0.1, ws_bin_size))
self.data.dropna(inplace=True)
def fit_distribution(self):
result_dict = {}
for bin_name, sub_df in self.data.groupby('wd_bin'):
k, mu, lam = stats.weibull_min.fit(sub_df['ws'], floc=0)
result_dict[bin_name] = {'k': k, 'mu': mu, 'lam': lam}
self.param = pd.DataFrame(result_dict).T
return self.param
def create_plots(self, savefig=False):
fig = plt.figure(figsize=(15, 12), dpi=80)
sp_n = len(self.param.shape[0])
sp_rows = int(np.sqrt(sp_n))
sp_cols = np.ceil(sp_n / sp_rows)
lab_fsize = int(-4 / 5 * sp_n + 20)
for i, (bin_name, sub_df) in enumerate(self.data.groupby('wd_bin')):
ax = fig.add_subplot(sp_rows, sp_cols, i + 1)
k, = self.param.loc[bin_name, 'k']
mu = self.param.loc[bin_name, 'mu']
lam = self.param.loc[bin_name, 'lam']
weib_x = np.linspace(0, max(sub_df['ws']), 1000)
weib_y = stats.weibull_min(k, mu, lam).pdf(weib_x)
# pt = pd.pivot_table(self.data, values=['ws'], index=['ws_bin'], aggfunc='count').fillna(0)
# bar_x = [float(x[1:].split(',')[0]) for x in pt.index]
# bar_y = [x / sum(pt['ws']) for x in pt['ws']]
# plt.bar(bar_x, bar_y, width=1, label="data")
plt.plot(weib_x, weib_y, 'r--', linewidth=2, label="weib fit")
plt.xlabel('wind speed [m/s]', fontsize=lab_fsize)
plt.ylabel('frequency', fontsize=lab_fsize)
plt.title('WD={} A={} k={} u={}'.format(bin_name, round(lam, 2), round(k, 2),
round(np.mean(sub_df['ws']), 2)), fontsize=lab_fsize)
plt.legend(fontsize=lab_fsize)
fig.suptitle('Weibull fit', fontsize=21)
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.91, wspace=0.5, hspace=0.5)
if savefig:
plt.savefig('weib_fit.png', transparent=True)
plt.show()
# todo: remove below after done refactoring
def weibull_fit(dfb, plot=False, savefig=False):
# get wd binning index sorted
wd_bin_names = [x for x in dfb['wd_bin'].unique() if pd.notnull(x)]
wd_bin_names = sorted(wd_bin_names, key=lambda x: float(x[1:].split(",")[0]))
sp_n = len(wd_bin_names)
sp_rows = int(np.sqrt(sp_n))
sp_cols = np.ceil(sp_n / sp_rows)
lab_fsize = int(-4 / 5 * sp_n + 20)
weibParam = []
if plot: fig = plt.figure(figsize=(15, 12), dpi=80)
for i, i_bin in enumerate(wd_bin_names):
data = dfb[dfb['wd_bin'] == i_bin][['ws', 'ws_bin']]
k, mu, lam = stats.weibull_min.fit(data['ws'], floc=0) # weib fitting
weibParam.append([i_bin, k, lam])
if plot:
ax = fig.add_subplot(sp_rows, sp_cols, i + 1)
pt = pd.pivot_table(data, values=['ws'], index=['ws_bin'], aggfunc='count').fillna(0)
bar_x = [float(x[1:].split(',')[0]) for x in pt.index]
bar_y = [x / sum(pt['ws']) for x in pt['ws']]
weib_x = np.linspace(0, max(data['ws']), 1000)
weib_y = stats.weibull_min(k, mu, lam).pdf(weib_x)
plt.bar(bar_x, bar_y, width=1, label="data")
plt.plot(weib_x, weib_y, 'r--', linewidth=2, label="weib fit")
plt.xlabel('wind speed [m/s]', fontsize=lab_fsize)
plt.ylabel('frequency', fontsize=lab_fsize)
plt.title('WD={} A={} k={} u={}'.format(i_bin, round(lam, 2), round(k, 2),
round(np.mean(data['ws']), 2)), fontsize=lab_fsize)
plt.legend(fontsize=lab_fsize)
if plot:
fig.suptitle('Weibull fit', fontsize=21)
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.91, wspace=0.5, hspace=0.5)
if savefig: plt.savefig('weib_fit.png', transparent=True)
plt.show()
return weibParam
if __name__ == "__main__":
import os
fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'tests', 'samples', 'sample_data.csv')
w = Weibull.load_raw_data(fpath)
w.fit_distribution()
w.create_plots()
print(w.data.head())
#weibull_fit(df, plot=True, savefig=False)
| StarcoderdataPython |
4805463 | <reponame>pkestene/COSMA
#!/usr/bin/env python3
import argparse
import os
import sys
import tempfile
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument(
'prefix',
type=str,
help='Installation prefix for dependencies'
)
args = parser.parse_args()
if not os.path.isdir(args.prefix):
print("The argument is not a directory.")
sys.exit()
def install_lib(tmppath, prefix, libname):
url = 'https://github.com/kabicm/{libname}.git'.format(**locals())
clone_dir = os.path.join(tmppath, libname)
build_dir = os.path.join(tmppath, 'build_{libname}'.format(**locals()))
install_dir ='{prefix}/{libname}-master'.format(**locals())
config_cmd = ('cmake ../{libname} '
'-DCMAKE_BUILD_TYPE=Release '
'-DCMAKE_INSTALL_PREFIX={install_dir}'.format(**locals())
)
build_and_install_cmd = 'cmake --build . --target install'
os.system('git clone --recursive {url} {clone_dir}'.format(**locals()))
os.makedirs(build_dir, exist_ok=True)
subprocess.call(config_cmd, cwd=build_dir, shell=True)
subprocess.call(build_and_install_cmd, cwd=build_dir, shell=True)
return install_dir
with tempfile.TemporaryDirectory() as tmppath:
install_dirs = ''
for libname in ['options', 'semiprof', 'grid2grid']:
install_dirs += '{};'.format(install_lib(tmppath, args.prefix, libname))
print('\nUse the following CMake parameter: -DCMAKE_PREFIX_PATH="{}"'.format(install_dirs))
| StarcoderdataPython |
4800935 | # Databricks notebook source
# MAGIC %md
# MAGIC #Import Libraries
# COMMAND ----------
# 0) Import Libraries
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.streaming import StreamingContext
# COMMAND ----------
# MAGIC %md
# MAGIC #Declare Storage Variables
# COMMAND ----------
# 1) Source directory
dbfsSrcDirPath="/mnt/ong-new-mexico/nm-wsproduction-raw"
# 2) Source File
sourceFile = dbfsSrcDirPath + "/wcproduction.xml"
# 3) Streaming directory
dbfsSrcDirPathStreaming="/mnt/ong-new-mexico/nm-wsproduction-streaming"
# 4) Destination directory
dbfsDestDirPath="/mnt/ong-new-mexico/nm-wsproduction-processed-csv"
# 5) Staging Directory after spark sql parsing
dbfsStagingDir="/mnt/ong-new-mexico/nm-wsproduction-staging"
# COMMAND ----------
# MAGIC %fs head --maxBytes=5000 /mnt/ong-new-mexico/nm-wsproduction-raw/wcproduction.xml
# COMMAND ----------
# MAGIC %md
# MAGIC # Get Spark Streaming Context
# COMMAND ----------
sc = SparkContext.getOrCreate()
spark = SparkSession(sc)
ssc = StreamingContext(sc,1)
# COMMAND ----------
dfStreamingText = ssc.textFileStream(dbfsSrcDirPathStreaming)
# COMMAND ----------
# MAGIC %md
# MAGIC #Start streaming the large xml file with lineSep = '>'
# COMMAND ----------
text_sdf = spark.readStream.text("/mnt/ong-new-mexico/nm-wsproduction-raw/wcproduction*.xml",lineSep='>' )
text_sdf.isStreaming
# COMMAND ----------
# MAGIC %md
# MAGIC #Write the streaming query results in 20 second increments in small files
# COMMAND ----------
write_stream = text_sdf.writeStream.trigger(processingTime='20 seconds').start(path='/mnt/ong-new-mexico/nm-wsproduction-streaming/wcproduction_smallchunks', queryName='wcprod_query', outputMode="append", format='text',checkpointLocation='/mnt/ong-new-mexico/nm-wsproduction-streaming/checkpoint')
# COMMAND ----------
# MAGIC %md
# MAGIC #Cancel the streaming job once it is done processing the files
# COMMAND ----------
# MAGIC %md
# MAGIC #Create a new DB and table to further process the small files from streaming
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE DATABASE IF NOT EXISTS ong_new_mexico;
# MAGIC
# MAGIC USE ong_new_mexico;
# MAGIC --DROP TABLE wcproduction;
# MAGIC CREATE EXTERNAL TABLE IF NOT EXISTS wcproduction(
# MAGIC Value STRING )
# MAGIC LOCATION "dbfs:/mnt/ong-new-mexico/nm-wsproduction-streaming/wcproduction_smallchunks"
# MAGIC STORED AS TEXTFILE;
# COMMAND ----------
# MAGIC %md
# MAGIC # Clean Up Step1
# MAGIC # 1. Remove special character
# MAGIC # 2. Remove value after and including xmlns
# MAGIC # 3. Add closing >
# COMMAND ----------
sourceDF = spark.sql("SELECT concat(split(regexp_replace(decode(value,'UTF-16'),'�',''),' xmlns')[0],'>') AS value from ong_new_mexico.wcproduction")
# COMMAND ----------
sourceDF.head(100)
# COMMAND ----------
# MAGIC %md
# MAGIC # Clean up Step 2 to facilate xml parsing
# MAGIC # 1. Remove xsd lines
# MAGIC # 2. Remove root line
# COMMAND ----------
modDF = sourceDF.filter(~sourceDF.value.contains('xsd')).filter(~sourceDF.value.contains('root'))
# COMMAND ----------
modDF.head(100)
# COMMAND ----------
modDF.write.text(dbfsStagingDir+"/nm-wsproduction-staging")
# COMMAND ----------
# MAGIC %md
# MAGIC # Quick test to see if there are more characters to clean
# MAGIC # We found a \n
# COMMAND ----------
testFile = '/mnt/ong-new-mexico/nm-wsproduction-staging/nm-wsproduction-staging/part-00000-tid-4163187932729680924-b422d691-4382-4048-8784-27bd61278598-337-1-c000.txt'
# COMMAND ----------
dbutils.fs.head(testFile)
# COMMAND ----------
# MAGIC %md
# MAGIC # Quick test
# MAGIC # Remember to install the spark xml library
# MAGIC # Coordinate
# MAGIC # com.databricks:spark-xml:0.5.0
# COMMAND ----------
spark = SparkSession.builder.getOrCreate()
sourceDFXMLFinalTest = spark.read.format('xml').options(rowTag='wcproduction').options(charset='UTF-8').load(testFile)
# COMMAND ----------
display(sourceDFXMLFinalTest)
# COMMAND ----------
# MAGIC %md
# MAGIC # Denote source and destination
# COMMAND ----------
sourceXMLFiles =dbfsStagingDir+"/nm-wsproduction-staging/part*.txt"
# COMMAND ----------
destFolder = dbfsDestDirPath + '/wcproduction'
# COMMAND ----------
# MAGIC %md
# MAGIC # Start the xml read to parse the values
# MAGIC # Remember to install the spark xml library
# MAGIC # Coordinate
# MAGIC # com.databricks:spark-xml:0.5.0
# COMMAND ----------
spark = SparkSession.builder.getOrCreate()
sourceDFXMLFinal = spark.read.format('xml').options(rowTag='wcproduction').options(charset='UTF-8').load(sourceXMLFiles)
# COMMAND ----------
# MAGIC %md
# MAGIC # Final Clean up of the values obtained after xml parsing
# COMMAND ----------
cleanedSourceDFXMLFinal = sourceDFXMLFinal.withColumn('amend_ind', regexp_replace('amend_ind', '\n', ''))\
.withColumn('api_cnty_cde', regexp_replace('api_cnty_cde', '\n', ''))\
.withColumn('api_st_cde', regexp_replace('api_st_cde', '\n', ''))\
.withColumn('api_well_idn', regexp_replace('api_well_idn', '\n', ''))\
.withColumn('c115_wc_stat_cde', regexp_replace('c115_wc_stat_cde', '\n', ''))\
.withColumn('eff_dte', regexp_replace('eff_dte', '\n', ''))\
.withColumn('mod_dte', regexp_replace('mod_dte', '\n', ''))\
.withColumn('ogrid_cde', regexp_replace('ogrid_cde', '\n', ''))\
.withColumn('pool_idn', regexp_replace('pool_idn', '\n', ''))\
.withColumn('prd_knd_cde', regexp_replace('prd_knd_cde', '\n', ''))\
.withColumn('prod_amt', regexp_replace('prod_amt', '\n', ''))\
.withColumn('prodn_day_num', regexp_replace('prodn_day_num', '\n', ''))\
.withColumn('prodn_mth', regexp_replace('prodn_mth', '\n', ''))\
.withColumn('prodn_yr', regexp_replace('prodn_yr', '\n', ''))
# COMMAND ----------
display(cleanedSourceDFXMLFinal)
# COMMAND ----------
# MAGIC %md
# MAGIC # Write to the destination folder as csv
# COMMAND ----------
cleanedSourceDFXMLFinal.write.format("com.databricks.spark.csv").option("header", "true").save(destFolder)
# COMMAND ----------
# MAGIC %md
# MAGIC # Quick tests
# COMMAND ----------
cleanedSourceDFXMLFinal.count()
# COMMAND ----------
| StarcoderdataPython |
38148 | from unittest.mock import patch, MagicMock, call
import json
from datetime import datetime
from copy import deepcopy
import pytest
from PIL import Image
from sm.engine import DB, ESExporter, QueuePublisher
from sm.engine.dataset_manager import SMapiDatasetManager, SMDaemonDatasetManager
from sm.engine.dataset_manager import Dataset, DatasetActionPriority, DatasetAction, DatasetStatus
from sm.engine.errors import DSIDExists
from sm.engine.queue import SM_ANNOTATE, SM_DS_STATUS
from sm.engine.tests.util import pysparkling_context, sm_config, ds_config, test_db
from sm.engine.png_generator import ImageStoreServiceWrapper
@pytest.fixture()
def fill_db(test_db, sm_config, ds_config):
upload_dt = '2000-01-01 00:00:00'
ds_id = '2000-01-01'
meta = {"meta": "data"}
db = DB(sm_config['db'])
db.insert('INSERT INTO dataset (id, name, input_path, upload_dt, metadata, config, '
'status, is_public, mol_dbs, adducts) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',
rows=[(ds_id, 'ds_name', 'input_path', upload_dt,
json.dumps(meta), json.dumps(ds_config), DatasetStatus.FINISHED,
True, ['HMDB-v4'], ['+H'])])
db.insert("INSERT INTO job (id, db_id, ds_id) VALUES (%s, %s, %s)",
rows=[(0, 0, ds_id)])
db.insert("INSERT INTO sum_formula (id, db_id, sf) VALUES (%s, %s, %s)",
rows=[(1, 0, 'H2O')])
db.insert(("INSERT INTO iso_image_metrics (job_id, db_id, sf, adduct, iso_image_ids) "
"VALUES (%s, %s, %s, %s, %s)"),
rows=[(0, 0, 'H2O', '+H', ['iso_image_1_id', 'iso_image_2_id'])])
db.close()
def create_ds_man(sm_config, db=None, es=None, img_store=None,
action_queue=None, status_queue=None, sm_api=False):
db = db or DB(sm_config['db'])
es_mock = es or MagicMock(spec=ESExporter)
action_queue_mock = action_queue or MagicMock(QueuePublisher)
status_queue_mock = status_queue or MagicMock(QueuePublisher)
img_store_mock = img_store or MagicMock(spec=ImageStoreServiceWrapper)
if sm_api:
return SMapiDatasetManager(db=db, es=es_mock,
mode='queue', image_store=img_store_mock,
action_queue=action_queue_mock, status_queue=status_queue_mock)
else:
return SMDaemonDatasetManager(db=db, es=es_mock,
img_store=img_store_mock, mode=None,
status_queue=status_queue_mock)
def create_ds(ds_id='2000-01-01', ds_name='ds_name', input_path='input_path', upload_dt=None,
metadata=None, ds_config=None, status=DatasetStatus.NEW, mol_dbs=None, adducts=None):
upload_dt = upload_dt or datetime.now()
if not mol_dbs:
mol_dbs = ['HMDB-v4']
if not adducts:
adducts = ['+H', '+Na', '+K']
return Dataset(ds_id, ds_name, input_path, upload_dt, metadata or {}, ds_config or {},
status=status, mol_dbs=mol_dbs, adducts=adducts, img_storage_type='fs')
class TestSMapiDatasetManager:
def test_add_new_ds(self, test_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.add(ds, priority=DatasetActionPriority.HIGH)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path',
'action': DatasetAction.ADD, 'del_first': False}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.HIGH)])
def test_delete_ds(self, test_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.delete(ds)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path', 'action': DatasetAction.DELETE}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.HIGH)])
def test_update_ds__configs_equal_metadata_diff(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds.metadata = {'new': 'metadata'}
ds_man.update(ds)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path',
'action': DatasetAction.UPDATE}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.HIGH)])
def test_update_ds__configs_metadata_equal__do_nothing(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.update(ds)
action_queue_mock.assert_not_called()
def test_add_ds__new_mol_db(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds.config['databases'] = [{'name': 'HMDB'}, {'name': 'ChEBI'}]
ds_man.add(ds)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path',
'action': DatasetAction.ADD, 'del_first': False}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.DEFAULT)])
def test_add_optical_image(self, fill_db, sm_config, ds_config):
db = DB(sm_config['db'])
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
img_store_mock = MagicMock(ImageStoreServiceWrapper)
img_store_mock.post_image.side_effect = ['opt_img_id1', 'opt_img_id2', 'opt_img_id3', 'thumbnail_id']
img_store_mock.get_image_by_id.return_value = Image.new('RGB', (100, 100))
ds_man = create_ds_man(sm_config=sm_config, db=db, es=es_mock,
img_store=img_store_mock, action_queue=action_queue_mock, sm_api=True)
ds_man._annotation_image_shape = MagicMock(return_value=(100, 100))
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
zoom_levels = [1, 2, 3]
raw_img_id = 'raw_opt_img_id'
ds_man.add_optical_image(ds, raw_img_id, [[1, 0, 0], [0, 1, 0], [0, 0, 1]],
zoom_levels=zoom_levels)
assert db.select('SELECT * FROM optical_image') == [
('opt_img_id{}'.format(i + 1), ds.id, zoom)
for i, zoom in enumerate(zoom_levels)]
assert db.select('SELECT optical_image FROM dataset where id = %s', params=(ds_id,)) == [(raw_img_id,)]
assert db.select('SELECT thumbnail FROM dataset where id = %s', params=(ds_id,)) == [('thumbnail_id',)]
class TestSMDaemonDatasetManager:
class SearchJob:
def __init__(self, *args, **kwargs):
pass
def run(self, *args, **kwargs):
pass
def test_add_ds(self, test_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
db = DB(sm_config['db'])
try:
ds_man = create_ds_man(sm_config, db=db, es=es_mock, action_queue=action_queue_mock, sm_api=False)
ds_id = '2000-01-01'
ds_name = 'ds_name'
input_path = 'input_path'
upload_dt = datetime.now()
metadata = {}
ds = create_ds(ds_id=ds_id, ds_name=ds_name, input_path=input_path, upload_dt=upload_dt,
metadata=metadata, ds_config=ds_config)
ds_man.add(ds, search_job_factory=self.SearchJob)
DS_SEL = 'select name, input_path, upload_dt, metadata, config from dataset where id=%s'
assert db.select_one(DS_SEL, params=(ds_id,)) == (ds_name, input_path, upload_dt, metadata, ds_config)
finally:
db.close()
def test_update_ds(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
ds_man = create_ds_man(sm_config, es=es_mock, action_queue=action_queue_mock, sm_api=False)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
with patch('sm.engine.dataset_manager.MolecularDB') as MolecularDB:
mol_db_mock = MolecularDB.return_value
mol_db_mock.name = 'HMDB'
with patch('sm.engine.dataset_manager.MolDBServiceWrapper') as MolDBServiceWrapper:
moldb_service_wrapper_mock = MolDBServiceWrapper.return_value
moldb_service_wrapper_mock.find_db_by_id.return_value = {'name': 'HMDB-v4'}
ds_man.update(ds)
es_mock.delete_ds.assert_called_with(ds_id)
call_args = es_mock.index_ds.call_args[1].values()
assert ds_id in call_args and mol_db_mock in call_args
def test_delete_ds(self, fill_db, sm_config, ds_config):
db = DB(sm_config['db'])
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
img_store_service_mock = MagicMock(spec=ImageStoreServiceWrapper)
ds_man = create_ds_man(sm_config, db=db, es=es_mock, img_store=img_store_service_mock,
action_queue=action_queue_mock, sm_api=False)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.delete(ds)
ids = ['iso_image_{}_id'.format(id) for id in range(1, 3)]
img_store_service_mock.delete_image_by_id.assert_has_calls(
[call('fs', 'iso_image', ids[0]), call('fs', 'iso_image', ids[1])])
es_mock.delete_ds.assert_called_with(ds_id)
assert db.select_one('SELECT * FROM dataset WHERE id = %s', params=(ds_id,)) == []
| StarcoderdataPython |
3308819 | <reponame>clauddio-silva/ope-1<filename>ope-backend/src/infra/repository/product_order_repository.py
from sqlalchemy.exc import IntegrityError, NoResultFound, MultipleResultsFound
from src.infra.config import DBConnectionHandler
from src.infra.db_entities import Products_Orders as Product_Order
class Product_OrderRepository:
@classmethod
def create_product_order(cls, product_id: int, order_id: int, price: float, amount: int):
with DBConnectionHandler() as db:
try:
new_product_order = Product_Order(product_id=product_id, order_id=order_id, price=price, amount=amount)
db.session.add(new_product_order)
db.session.commit()
return {
"data": new_product_order.to_dict(),
"status": 201,
"errors": []}
except IntegrityError:
db.session.rollback()
return {
"data": None,
"status": 409,
"errors": ["Erro na requisição"]}
except Exception as ex:
print(ex)
db.session.rollback()
return {
"data": None,
"status": 500,
"errors": ["Algo deu errado na conexão com o banco de dados"]}
finally:
db.session.close()
@classmethod
def list_products_orders(cls):
with DBConnectionHandler() as db:
try:
products_orders = []
raw_products_orders: list[Product_Order] = db.session.query(Product_Order).all()
for product_order in raw_products_orders:
products_orders.append(product_order.to_dict())
return {"data": products_orders, "status": 200, "errors": []}
except IntegrityError:
db.session.rollback()
return {"data": [], "status": 409, "errors": ["Integrity Error"]}
except Exception as ex:
print(ex)
db.session.rollback()
return {"data": [], "status": 500, "errors": ["Algo deu errado na conexão com o banco de dados"]}
finally:
db.session.close()
@classmethod
def update_product_order(cls, product_order_id: int, product_id: int, order_id: int, price: float, amount: int):
with DBConnectionHandler() as db:
try:
product_order = db.session.query(Product_Order).filter_by(id=product_order_id).first()
if product_order:
print(product_order)
product_order.product_id=product_id
product_order.order_id=order_id
product_order.price = price
product_order.amount = amount
db.session.commit()
return {"data": None, "status": 200, "errors": []}
return {"data": None, "status": 404, "errors": [f"Pedido de produtos não encontrado."]}
except IntegrityError:
return {"data": None, "status": 409, "errors": [f"Pedido de produto já existe."]}
except Exception as ex:
print(ex)
db.session.rollback()
return {"data": None, "status": 500, "errors": ["Algo deu errado na conexão com o banco de dados"]}
finally:
db.session.close()
@classmethod
def delete_product_order(cls, product_order_id: int):
with DBConnectionHandler() as db:
try:
product_order = db.session.query(Product_Order).filter_by(id=product_order_id).first()
if product_order:
db.session.delete(product_order)
db.session.commit()
return {"data": None, "status": 200, "errors": []}
return {"data": None, "status": 404, "errors": [f"Pedido de produto de id {product_order_id} não existe"]}
except MultipleResultsFound:
return {"data": None, "status": 409, "errors": [f"Conflito de pedido de produto com id {product_order_id}"]}
except Exception as ex:
return {"data": None, "status": 500, "errors": ["Algo deu errado na conexão com o banco de dados"]}
finally:
db.session.close()
@classmethod
def get_product_order_by_id(cls, product_order_id:int):
with DBConnectionHandler() as db:
try:
products_orders = []
raw_products_orders: list[Product_Order] = db.session.query(Product_Order).filter_by(order_id=product_order_id)
for product_order in raw_products_orders:
products_orders.append(product_order.to_dict())
return {"data": products_orders, "status": 200, "errors": []}
except NoResultFound:
return {"data": None, "status": 404, "errors": [f"Pedido de produto de id {product_order_id} não existe"]}
except Exception as ex:
return {"data": None, "status": 500, "errors": ["Algo deu errado na conexão com o banco de dados"]} | StarcoderdataPython |
1740218 | <filename>rbacProject/apps/system/models.py
from django.db import models
from db.baseModel import *
# Create your models here.
class SystemSetup(models.Model):
loginTitle = models.CharField(max_length=20, null=True, blank=True, verbose_name='登录标题')
mainTitle = models.CharField(max_length=20, null=True, blank=True, verbose_name='系统标题')
headTitle = models.CharField(max_length=20, null=True, blank=True, verbose_name='浏览器标题')
copyright = models.CharField(max_length=100, null=True, blank=True, verbose_name='底部版权信息')
url = models.CharField(max_length=50, null=True, blank=True, verbose_name='系统URL地址')
def __str__(self):
return self.loginTitle
class Meta:
verbose_name = "系统设置"
verbose_name_plural = verbose_name
@classmethod
def getSystemSetupLastData(self):
return dict(system_setup=SystemSetup.objects.last())
class EmailSetup(models.Model):
emailHost = models.CharField(max_length=30, verbose_name='SMTP服务器')
emailPort = models.IntegerField(verbose_name='SMTP端口')
emailUser = models.EmailField(max_length=100, verbose_name='邮箱帐号')
emailPassword = models.CharField(max_length=30, verbose_name='邮箱密码')
def __str__(self):
return self.emailHost
class Meta:
verbose_name = '发件邮箱设置'
verbose_name_plural = verbose_name
@classmethod
def getEmailSetupLastData(self):
return EmailSetup.objects.last() | StarcoderdataPython |
175713 | <reponame>gcollard/lightbus<filename>lightbus/client/commands.py
import logging
from typing import NamedTuple, Optional, List, Tuple
from lightbus.api import Api
from lightbus.message import EventMessage, RpcMessage, ResultMessage
from lightbus.utilities.internal_queue import InternalQueue
logger = logging.getLogger(__name__)
class SendEventCommand(NamedTuple):
message: EventMessage
options: dict = {}
class ConsumeEventsCommand(NamedTuple):
events: List[Tuple[str, str]]
listener_name: str
destination_queue: InternalQueue[EventMessage]
options: dict = {}
class AcknowledgeEventCommand(NamedTuple):
message: EventMessage
options: dict = {}
class CallRpcCommand(NamedTuple):
message: RpcMessage
options: dict = {}
class ConsumeRpcsCommand(NamedTuple):
api_names: List[str]
options: dict = {}
class ExecuteRpcCommand(NamedTuple):
"""An RPC call has been received and must be executed locally"""
message: RpcMessage
class PublishApiSchemaCommand(NamedTuple):
api: Api
class CloseCommand(NamedTuple):
pass
class SendResultCommand(NamedTuple):
rpc_message: RpcMessage
message: ResultMessage
class ReceiveResultCommand(NamedTuple):
message: RpcMessage
destination_queue: InternalQueue
options: dict
class ReceiveSchemaUpdateCommand(NamedTuple):
schema: dict
class ShutdownCommand(NamedTuple):
exception: Optional[BaseException]
| StarcoderdataPython |
1734734 | from FichaDB import FichaDB
class Ficha(object):
def __init__(self, lista=None):
if lista is None:
self.lista = [] * 129
self.info = {}
def insertFicha(self):
banco = FichaDB()
try:
c = banco.conexao.cursor()
c.execute(" insert into ficha (c1, c2, c3, c4_1, c4_2, c5_1,"
"c5_2, c6, c7, c8, c9, c10_1,"
"c10_2, c11, c12, c13, c14, c15,"
"c16, c17, c18, c19_1, c19_2, c20,"
"c21, c22, c23, c24, c25, c26,"
"c27, c28, c29, c30, c31, c32,"
"c33, c34, c35_1, c35_2, c35_3, c35_4,"
"c35_5, c35_6, c35_7, c35_8, c35_9,"
"c36_1, c36_2, c36_3, c36_4, c36_5, c36_6,"
"c36_7, c36_8, c36_9, c36_10, c36_11,"
"c36_12, c36_13, c36_14_1, c36_14_2, c37, c38_1,"
"c38_2, c38_3, c38_4, c38_5, c38_6, c38_7,"
"c39, c40, c41, c42, c43,"
"c44, c45_1, c45_2, c46_1, c46_2, c47,"
"c48, c49, c50, c51, c52,"
"c53, c54, c55, c56, c57,"
"c58, c59_1, c59_2, c59_3, c59_4, c59_5,"
"c59_6, c59_7, c59_8, c59_9, c60_1,"
"c60_2, c61, c62, c63_1_1, c63_1_2, c63_2,"
"c63_3, c63_4, c63_5_1, c63_5_2, c63_5_3, c63_5_4,"
"c63_5_5, c63_5_6, c63_5_7, c63_5_8, c63_5_9, c63_5_10,"
"c63_5_11, c64_1, c64_2, c65, c66, c67,"
"c68, c69, c70) values ('" + self.lista[0] + "' ,'" + self.lista[1] + "' , '" + self.lista[2] + "',"
"'" + self.lista[3] + "', '" + self.lista[4] + "', '" + self.lista[5] + "', '" + self.lista[6] + "', '" + self.lista[7] + "',"
"'" + self.lista[8] + "', '" + self.lista[9] + "', '" + self.lista[10] + "', '" + self.lista[11] + "', '" + self.lista[12] + "',"
"'" + self.lista[13] + "', '" + self.lista[14] + "', '" + self.lista[15] + "', '" + self.lista[16] + "', '" + self.lista[17] + "',"
"'" + self.lista[18] + "', '" + self.lista[19] + "', '" + self.lista[20] + "', '" + self.lista[21] + "', '" + self.lista[22] + "',"
"'" + self.lista[23] + "', '" + self.lista[24] + "', '" + self.lista[25] + "', '" + self.lista[26] + "', '" + self.lista[27] + "',"
"'" + self.lista[28] + "', '" + self.lista[29] + "', '" + self.lista[30] + "', '" + self.lista[31] + "', '" + self.lista[32] + "',"
"'" + self.lista[33] + "', '" + self.lista[34] + "', '" + self.lista[35] + "', '" + self.lista[36] + "', '" + self.lista[37] + "',"
"'" + self.lista[38] + "', '" + self.lista[39] + "', '" + self.lista[40] + "', '" + self.lista[41] + "', '" + self.lista[42] + "',"
"'" + self.lista[43] + "', '" + self.lista[44] + "', '" + self.lista[45] + "', '" + self.lista[46] + "', '" + self.lista[47] + "',"
"'" + self.lista[48] + "', '" + self.lista[49] + "', '" + self.lista[50] + "', '" + self.lista[51] + "', '" + self.lista[52] + "',"
"'" + self.lista[53] + "', '" + self.lista[54] + "', '" + self.lista[55] + "', '" + self.lista[56] + "', '" + self.lista[57] + "',"
"'" + self.lista[58] + "', '" + self.lista[59] + "', '" + self.lista[60] + "', '" + self.lista[61] + "', '" + self.lista[62] + "',"
"'" + self.lista[63] + "', '" + self.lista[64] + "', '" + self.lista[65] + "', '" + self.lista[66] + "', '" + self.lista[67] + "',"
"'" + self.lista[68] + "', '" + self.lista[69] + "', '" + self.lista[70] + "', '" + self.lista[71] + "', '" + self.lista[72] + "',"
"'" + self.lista[73] + "', '" + self.lista[74] + "', '" + self.lista[75] + "', '" + self.lista[76] + "',"
"'" + self.lista[77] + "', '" + self.lista[78] + "', '" + self.lista[79] + "', '" + self.lista[80] + "', '" + self.lista[81] + "',"
"'" + self.lista[82] + "', '" + self.lista[83] + "', '" + self.lista[84] + "', '" + self.lista[85] + "',"
"'" + self.lista[86] + "', '" + self.lista[87] + "', '" + self.lista[88] + "', '" + self.lista[89] + "',"
"'" + self.lista[90] + "', '" + self.lista[91] + "', '" + self.lista[92] + "', '" + self.lista[93] + "', '" + self.lista[94] + "', "
"'" + self.lista[95] + "', '" + self.lista[96] + "', '" + self.lista[97] + "', '" + self.lista[98] + "', '" + self.lista[99] + "',"
"'" + self.lista[100] + "', '" + self.lista[101] + "', '" + self.lista[102] + "', '" + self.lista[103] + "',"
"'" + self.lista[104] + "', '" + self.lista[105] + "', '" + self.lista[106] + "', '" + self.lista[107] + "', '" + self.lista[108] + "',"
"'" + self.lista[109] + "', '" + self.lista[110] + "', '" + self.lista[111] + "', '" + self.lista[112] + "', '" + self.lista[113] + "',"
"'" + self.lista[114] + "', '" + self.lista[115] + "', '" + self.lista[116] + "', '" + self.lista[117] + "', '" + self.lista[118] + "',"
"'" + self.lista[119] + "', '" + self.lista[120] + "', '" + self.lista[121] + "', '" + self.lista[122] + "', '" + self.lista[123] + "',"
"'" + self.lista[124] + "', '" + self.lista[125] + "', '" + self.lista[126] + "', '" + self.lista[127] + "', '" + self.lista[128] + "')")
banco.conexao.commit()
c.close()
return "Ficha cadastrada com sucesso!"
except:
return "Falha ao cadastrar Ficha!"
| StarcoderdataPython |
1603404 | <reponame>samkennerly/suneku<gh_stars>1-10
from setuptools import setup
setup(
author='<NAME>',
author_email='<EMAIL>',
description='Python package which builds its own Docker images.',
name='suneku',
packages=['suneku'],
url='https://github.com/samkennerly/suneku',
version='1.0.0')
| StarcoderdataPython |
92413 | <filename>setup.py
import os
from pathlib import Path
from setuptools import setup, find_packages
# The directory containing this file
here = Path(__file__).parent
# The text of the README file
README = (Path(__file__).parent / "README.md").read_text()
data_files = []
for pipeline in ['fastapi', 'flux_pack', 'library_webpack_ts', 'scribble_html']:
for root, dirs, files in os.walk(f"youwol/pipelines/{pipeline}/files_template", topdown=False):
data_files.append((root, [f'{root}/{f}' for f in files]))
setup(
name='youwol',
python_requires='~=3.6',
version='0.0.3',
description="Local YouWol environment",
author="<NAME>",
author_email="<EMAIL>",
long_description=README,
long_description_content_type="text/markdown",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
packages=find_packages(include=[
'youwol',
'youwol_utils',
'youwol_utils.**',
'youwol_data',
'youwol.**'
]),
data_files=data_files,
package_data={
'youwol_data': ['databases.zip', 'remotes-info.json'],
'youwol.services.fronts.dashboard_developer': ['*.html', '*.js', '*.css', '*.map'],
'youwol.services.fronts.workspace_explorer': ['*.html', '*.js', '*.css', '*.map'],
'youwol.services.fronts.flux_builder': ['*.html', '*.js', '*.css', '*.map'],
'youwol.services.fronts.flux_runner': ['*.html', '*.js', '*.css', '*.map']
},
include_package_data=True,
install_requires=[
"aiohttp==3.7.4.post0",
"fastapi==0.65.1",
"uvicorn==0.13.4",
"python-multipart==0.0.5",
"aiohttp==3.7.4.post0",
"async==0.6.2",
"websockets==9.0.2",
"watchgod==0.7",
"aiofiles==0.7.0",
"async_generator==1.10",
"brotlipy==0.7.0",
"pillow==8.2.0",
"cowpy==1.1.0"
],
entry_points={
'console_scripts': ['youwol=youwol.main:main']
}
)
| StarcoderdataPython |
50478 | <reponame>aMurryFly/Old_Courses<filename>OOP_Python/4_Jueves/calculos/__init__.py
#Son directorios donde se almacenan módulos
'''
1) Crear una carpeta con un archivo __init__.py <- constructor
'''
| StarcoderdataPython |
3329459 | class Config(object):
# Config.ini 文件目录
INI_PATH = "/etc/radiaTest/messenger.ini"
# 模式
DEBUG = False
TESTING = False
# 日志
LOG_LEVEL = "INFO"
# PXE服务器
# PXE地址(必须配置免密登录,如果和server为同一台机器,则不需要)
# dhcp配置文件
DHCP_CONF = "/etc/dhcp/dhcpd.conf"
# tftp-server 文件存储路径
# TFTP_PATH = "/var/lib/tftpboot"
# HTTP请求头
HEADERS = {"Content-Type": "application/json;charset=utf8"}
# 虚拟机
# 虚拟机创建基础信息
# 最大内存
VM_MAX_MEMEORY = 16384
# 最大core量
VM_MAX_CORE = 4
# 最大thread量
VM_MAX_THREAD = 4
# 最大socket量
VM_MAX_SOCKET = 4
# 最大磁盘大小(G)
VM_MAX_CAPACITY = 500
# 等待虚拟机建立通信时长
VM_ENABLE_SSH = 300
# 默认存活时长(days)
VM_DEFAULT_DAYS = 7
# 最大存活时长(days)
VM_MAX_DAYS = 15
# 执行任务
# worker端框架存放路径
WORKER_DOWNLOAD_PATH = "/opt"
# 每组测试环境,最大执行时长
MAX_RUN_TIME = 3600
class TestingConfig(Config):
TESTING = True
| StarcoderdataPython |
1657540 | # toda variavel pode ser alterada, sempre deve criada em minuscula
# por convensao, variaveis em maiusculas sao como se fosse constantes, nao devem ser alteradas
PI = 3.14
GRAVITY = 9.8
def get_extension(file):
return file[file.index(".") +1:] # pega o nome do arquivo, dentro qual o indice do ponto, e pega do indice declarado +1 para frente, retornando entao a extensao
def highest_number(numbers):
return max(numbers) | StarcoderdataPython |
182377 | import cv2
cap = cv2.VideoCapture('image_rec/test_video.mp4')
if (cap.isOpened()== False):
print("Error opening video stream or file")
while cap.isOpened():
ret, image = cap.read()
cv2.imshow('video', image)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
| StarcoderdataPython |
1777976 | _base_ = [
# 'configs/_base_/models/upernet_swin.py',
'configs/_base_/datasets/cityscapes.py',
'configs/_base_/default_runtime.py',
'configs/_base_/schedules/schedule_160k.py'
]
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(samples_per_gpu=4)
# model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
pretrained='pretrain/swin_small_patch4_window7_224.pth',
type='EncoderDecoder',
backbone=dict(
type='SwinTransformer',
# embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
# ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
# use_checkpoint=False
),
decode_head=dict(
type='UPerHead',
in_channels=[96, 192, 384, 768],
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=512,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=384,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
runner = dict(type='IterBasedRunner', max_iters=320000)
checkpoint_config = dict(by_epoch=False, interval=4000)
evaluation = dict(interval=4000, metric='mIoU')
| StarcoderdataPython |
3373369 | from . import input_output
from . import network
from . import plotting
from . import model
from . import simulation
from . import dhn_from_osm
from . import optimization
| StarcoderdataPython |
3310187 | <gh_stars>0
import copy
import datetime
import threading
import time
import uuid
from dateutil.relativedelta import relativedelta
class Scheduler:
thread_dict = dict()
def set_cron(self, timelist, func, *args, **kwargs):
cron_id = uuid.uuid4()
next_dt = datetime.datetime.now() + relativedelta(seconds=1)
thread = threading.Thread(target=self.__run, args=(
cron_id, timelist, next_dt, func) + args, kwargs=kwargs)
self.thread_dict[cron_id] = thread
thread.start()
return cron_id
def remove_cron(self, cron_id):
if cron_id in self.thread_dict:
del self.thread_dict[cron_id]
def remove_cron_all(self):
self.thread_dict = dict()
def join(self):
thread_dict_copy = copy.copy(self.thread_dict)
self.remove_cron_all()
for thread in thread_dict_copy.values():
thread.join()
def __run(self, cron_id, timelist, now_dt, func, *args, **kwargs):
tgt_dt = self.__get_next_dt(timelist, now_dt=now_dt)
if tgt_dt is None:
del self.thread_dict[cron_id]
return
diff_seconds = (tgt_dt.replace(microsecond=0) -
datetime.datetime.now()).total_seconds()
time.sleep(max(diff_seconds, 0))
if not cron_id in self.thread_dict:
return
func(datetime.datetime.now(), *args, **kwargs)
next_dt = tgt_dt + relativedelta(seconds=1)
thread = threading.Thread(target=self.__run, args=(
cron_id, timelist, next_dt, func) + args, kwargs=kwargs)
self.thread_dict[cron_id] = thread
thread.start()
def __get_next_dt(self, tgt_tl, now_dt=None):
if now_dt is None:
now_dt = datetime.datetime.now()
now_tl = [now_dt.year, now_dt.month, now_dt.day,
now_dt.hour, now_dt.minute, now_dt.second]
zero_tl = [1, 1, 1, 0, 0, 0]
next_tl = copy.copy(tgt_tl)
free_idx = None
future_flag = False
for i in range(len(now_tl)):
# now or future
if next_tl[i] is None:
# future
if future_flag:
next_tl[i] = zero_tl[i]
# now or future
else:
next_tl[i] = now_tl[i]
free_idx = i
# past or future
if next_tl[i] < now_tl[i]:
# past
if free_idx is None:
return
# future
if not future_flag:
future_flag = True
next_tl[free_idx] += 1
# future
if next_tl[i] > now_tl[i]:
free_idx = -1
future_flag = True
next_dt = now_dt + relativedelta(years=(next_tl[0] - now_tl[0]), months=(next_tl[1] - now_tl[1]), days=(
next_tl[2] - now_tl[2]), hours=(next_tl[3] - now_tl[3]), minutes=(next_tl[4] - now_tl[4]), seconds=(next_tl[5] - now_tl[5]))
return next_dt
| StarcoderdataPython |
107715 | <reponame>sthagen/pconf
import os
import re
from ast import literal_eval
from warnings import warn
class Env(object):
def __init__(
self,
separator=None,
match=None,
whitelist=None,
parse_values=False,
to_lower=False,
convert_underscores=False,
docker_secrets=None,
):
self.separator = separator
self.match = match
self.whitelist = whitelist
self.parse_values = parse_values
self.to_lower = to_lower
self.convert_underscores = convert_underscores
self.docker_secrets = docker_secrets
if self.match is not None:
self.re = re.compile(self.match)
self.__gather_vars()
if self.to_lower:
self.vars = self.__change_keys(self.vars, self.__to_lower)
if self.convert_underscores:
self.vars = self.__change_keys(self.vars, self.__convert_underscores)
def get(self):
return self.vars
def __valid_key(self, key):
if self.match is not None and self.whitelist is not None:
return key in self.whitelist or self.re.search(key) is not None
elif self.match is not None:
return self.re.search(key) is not None
elif self.whitelist is not None:
return key in self.whitelist
else:
return True
def __split_vars(self, env_vars):
keys_to_delete = []
dict_to_add = {}
for key in env_vars.keys():
splits = key.split(self.separator)
splits = list(filter(None, splits))
if len(splits) != 1:
split = self.__split_var(splits, env_vars[key])
keys_to_delete.append(key)
self.__merge_split(split, dict_to_add)
for key in keys_to_delete:
del env_vars[key]
env_vars.update(dict_to_add)
def __split_var(self, keys, value):
if len(keys) == 1:
return {keys[0]: value}
else:
key = keys[0]
del keys[0]
return {key: self.__split_var(keys, value)}
def __merge_split(self, split, env_vars):
key = list(split.keys())[0]
value = list(split.values())[0]
if key not in env_vars:
env_vars[key] = value
return
elif type(value) == dict:
self.__merge_split(value, env_vars[key])
else:
return
def __try_parse(self, env_vars):
for key, value in env_vars.items():
try:
if value.lower() == "true":
env_vars[key] = True
elif value.lower() == "false":
env_vars[key] = False
else:
env_vars[key] = literal_eval(value)
except (ValueError, SyntaxError):
pass
def __handle_docker_secret(self, key, value):
postfix = "_FILE"
if key.endswith(postfix):
try:
with open(value, "r") as f:
self.vars[key[0 : -len(postfix)]] = f.read().strip() # noqa: E203
except IOError:
warn("IOError when opening {}".format(value), UserWarning)
def __gather_vars(self):
self.vars = {}
env_vars = os.environ
for key in env_vars.keys():
if self.__valid_key(key):
if self.docker_secrets is not None and key in self.docker_secrets:
self.__handle_docker_secret(key, env_vars[key])
else:
self.vars[key] = env_vars[key]
if self.parse_values:
self.__try_parse(self.vars)
if self.separator is not None:
self.__split_vars(self.vars)
def __to_lower(self, key):
return key.lower()
def __convert_underscores(self, key):
return key.replace("_", "-")
def __change_keys(self, env_vars, operation):
new_dict = {}
for key, value in env_vars.items():
if type(value) == dict:
new_dict[operation(key)] = self.__change_keys(value, operation)
else:
new_dict[operation(key)] = env_vars[key]
return new_dict
| StarcoderdataPython |
3300888 | <reponame>Milstein/sorted_containers
# -*- coding: utf-8 -*-
from __future__ import print_function
from sys import hexversion
import random
from .context import sortedcontainers
from sortedcontainers import SortedSet
from nose.tools import raises
from functools import wraps
import operator
if hexversion < 0x03000000:
from itertools import izip as zip
range = xrange
random.seed(0)
actions = []
def actor(func):
actions.append(func)
return func
def test_init():
sst = SortedSet()
sst._check()
sst = SortedSet(load=10000)
assert sst._list._load == 10000
assert sst._list._twice == 20000
assert sst._list._half == 5000
sst._check()
sst = SortedSet(range(10000))
assert all(tup[0] == tup[1] for tup in zip(sst, range(10000)))
sst.clear()
assert len(sst) == 0
assert list(iter(sst)) == []
sst._check()
@actor
def stress_contains(sst):
values = list(sst)
assert all((val in sst) for val in values)
@actor
def stress_delitem(sst):
for rpt in range(100):
pos = random.randrange(0, len(sst))
del sst[pos]
@actor
def stress_operator(sst):
other = SortedSet(sst)
stress_delitem(other)
assert other < sst
assert sst > other
@actor
def stress_getitem(sst):
other = list(sst)
assert all(sst[pos] == other[pos] for pos in range(len(sst)))
@actor
def stress_reversed(sst):
other = list(reversed(list(sst)))
assert all(tup[0] == tup[1] for tup in zip(reversed(sst), other))
@actor
def stress_add(sst):
for rpt in range(100):
val = random.randrange(0, 1000)
sst.add(val)
@actor
def stress_count(sst):
for val in sst:
assert sst.count(val) == 1
@actor
def stress_difference(sst):
copy_one = sst.copy()
stress_delitem(copy_one)
copy_two = sst.copy()
stress_delitem(copy_two)
sst.difference_update(copy_one, copy_two)
@actor
def stress_discard(sst):
for rpt in range(100):
pos = random.randrange(0, len(sst))
val = sst[pos]
sst.discard(val)
@actor
def stress_index(sst):
for rpt in range(100):
pos = random.randrange(0, len(sst))
val = sst[pos]
assert pos == sst.index(val)
@actor
def stress_intersection(sst):
copy_one = sst.copy()
stress_delitem(copy_one)
copy_two = sst.copy()
stress_delitem(copy_two)
sst.intersection_update(copy_one, copy_two)
@actor
def stress_symmetric_difference(sst):
copy_one = sst.copy()
stress_delitem(copy_one)
sst.symmetric_difference_update(copy_one)
@actor
def stress_pop(sst):
val = sst[-1]
assert val == sst.pop()
for rpt in range(100):
pos = random.randrange(0, len(sst))
val = sst[pos]
assert val == sst.pop(pos)
@actor
def stress_remove(sst):
for rpt in range(100):
pos = random.randrange(0, len(sst))
val = sst[pos]
sst.remove(val)
@actor
def stress_update(sst):
def iter_randomly(start, stop, count):
for rpt in range(count):
yield random.randrange(start, stop)
sst.update(iter_randomly(0, 500, 100),
iter_randomly(500, 1000, 100),
iter_randomly(1000, 1500, 100),
iter_randomly(1500, 2000, 100))
@actor
def stress_isdisjoint(sst):
values = [-1, -2, -3]
assert sst.isdisjoint(values)
@actor
def stress_issubset(sst):
that = SortedSet(sst)
that.update(range(1000))
assert sst.issubset(that)
@actor
def stress_issuperset(sst):
that = SortedSet(sst)
assert sst.issuperset(that)
def test_stress(repeat=1000):
sst = SortedSet(range(1000))
for rpt in range(repeat):
action = random.choice(actions)
action(sst)
try:
sst._check()
except AssertionError:
print(action)
raise
start_len = len(sst)
while len(sst) < 500:
sst.add(random.randrange(0, 2000))
while len(sst) > 2000:
del sst[random.randrange(0, len(sst))]
if start_len != len(sst):
sst._check()
if __name__ == '__main__':
import sys
from datetime import datetime
start = datetime.now()
print('Python', sys.version_info)
try:
num = int(sys.argv[1])
print('Setting iterations to', num)
except:
print('Setting iterations to 1000 (default)')
num = 1000
try:
pea = int(sys.argv[2])
random.seed(pea)
print('Setting seed to', pea)
except:
print('Setting seed to 0 (default)')
random.seed(0)
try:
test_stress(num)
except:
raise
finally:
print('Exiting after', (datetime.now() - start))
| StarcoderdataPython |
1679793 | def hamming_weight(n: int) -> int:
"""Calculate the number of '1' bit in the given it.
:param int n:
:return: number of '1' bit in the int
"""
weight = 0
while n != 0:
weight += 1
n &= n - 1
return weight
| StarcoderdataPython |
1709892 | from sklearn.externals import joblib
import csv
cls = joblib.load('model.h5')
fo = open("userAgents5.csv", "r")
lines=fo.readlines()
vec = joblib.load('vec_count.joblib')
with open('results_logRegfinal.csv', mode='w') as results_file:
results_writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
correct = 0
cnt = 0
for x in range(1,len(lines)):
cnt += 1
if x <= 1000:
real_val = 'Mobile Phone'
elif x <= 2000:
real_val = 'Tablet'
elif x <= 3000:
real_val = 'Desktop'
else:
real_val = 'TV Device'
userAgent = lines[x]
features = vec.transform(
[userAgent.lower()]
)
features_userAgent = features.toarray()
pred = cls.predict(features_userAgent)[0]
results_writer.writerow([x, pred])
if real_val == pred:
correct += 1
#print(cls.predict(features_userAgent))
print('Model Logistic Regression - Accuracy on test data = {0:.4f}'.format(correct / cnt)) | StarcoderdataPython |
114036 | __author__ = 'chris'
"""
Package for holding all of our protobuf classes
"""
| StarcoderdataPython |
3372583 | <reponame>danheath/pykol-lib
from typing import Optional
from dataclasses import dataclass
import libkol
@dataclass
class ItemQuantity:
item: "libkol.Item"
quantity: int
@dataclass
class Listing:
item: Optional["libkol.Item"] = None
price: int = 0
stock: int = 0
limit: int = 0
limit_reached: bool = False
store_id: Optional[int] = None
store_name: Optional[str] = None
@dataclass
class FamiliarState:
familiar: "libkol.Familiar"
weight: int
nickname: str
experience: int = 0
kills: int = 0
| StarcoderdataPython |
1702713 | from pkg_resources import iter_entry_points
from typeguard import check_argument_types
from typing import Sequence
nodefault = object()
def traverse(obj, target:str, default=nodefault, executable:bool=False, separator:str='.', protect:bool=True):
"""Traverse down an object, using getattr or getitem.
If ``executable`` is ``True`` any executable function encountered will be, with no arguments. Traversal will
continue on the result of that call. You can change the separator as desired, i.e. to a '/'.
By default attributes (but not array elements) prefixed with an underscore are taboo. They will not resolve,
raising a LookupError.
Certain allowances are made: if a 'path segment' is numerical, it's treated as an array index. If attribute
lookup fails, it will re-try on that object using array notation and continue from there. This makes lookup
very flexible.
"""
# TODO: Support numerical slicing, i.e. ``1:4``, or even just ``:-1`` and things.
assert check_argument_types()
value = obj
remainder = target
if not target:
return obj
while separator:
name, separator, remainder = remainder.partition(separator)
numeric = name.lstrip('-').isdigit()
try:
if numeric or (protect and name.startswith('_')):
raise AttributeError()
value = getattr(value, name)
if executable and callable(value):
value = value()
except AttributeError:
try:
value = value[int(name) if numeric else name]
except (KeyError, TypeError):
if default is nodefault:
raise LookupError("Could not resolve '" + target + "' on: " + repr(obj))
return default
return value
def load(target:str, namespace:str=None, default=nodefault, executable:bool=False, separators:Sequence[str]=('.', ':'),
protect:bool=True):
"""This helper function loads an object identified by a dotted-notation string.
For example::
# Load class Foo from example.objects
load('example.objects:Foo')
# Load the result of the class method ``new`` of the Foo object
load('example.objects:Foo.new', executable=True)
If a plugin namespace is provided simple name references are allowed. For example::
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load('routing', 'web.dispatch')
The ``executable``, ``protect``, and first tuple element of ``separators`` are passed to the traverse function.
Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
"""
assert check_argument_types()
if namespace and ':' not in target:
allowable = dict((i.name, i) for i in iter_entry_points(namespace))
if target not in allowable:
raise LookupError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable))
return allowable[target].load()
parts, _, target = target.partition(separators[1])
try:
obj = __import__(parts)
except ImportError:
if default is not nodefault:
return default
raise
return traverse(
obj,
separators[0].join(parts.split(separators[0])[1:] + target.split(separators[0])),
default = default,
executable = executable,
protect = protect
) if target else obj
| StarcoderdataPython |
1726890 | <gh_stars>10-100
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
import math
import numpy as np
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
#the location of stopline
self.stopline_2d = [[1148.56, 1184.65],[1559.2, 1158.43],[2122.14, 1526.79],[2175.237, 1795.71],
[1493.29, 2947.67],[821.96, 2905.8],[161.76, 2303.82],[351.84, 1574.65]]
self.stopline_tree = KDTree(self.stopline_2d)
#set the light's initial state not red
self.red_light = [-1,-1,-1,-1,-1,-1,-1,-1]
self.lights = []
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.loop()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints:
self.publish_redlight_idx()
rate.sleep()
def publish_redlight_idx(self):
#get the closest traffic light's index in stopline_2d
closest_light_idx =self.get_closest_redlight_idx()
#get the state of closest light, if it's red light, get it's index in base_waypoint and publish, if not red light publish -1.
redlight_state = self.red_light[closest_light_idx]
if redlight_state == 1 and self.waypoint_tree:
closest_red_light_idx =self.get_closest_waypoint_idx(closest_light_idx)
self.upcoming_red_light_pub.publish(Int32(closest_red_light_idx))
else:
self.upcoming_red_light_pub.publish(Int32(-1))
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
light_state = [trafficlight.state for trafficlight in msg.lights]
for i, state in enumerate(light_state):
if state == 0:
self.red_light[i] = 1
else:
self.red_light[i] = -1
def get_closest_redlight_idx(self):
#use this function to get the closest traffic light's index in stopline_2d
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.stopline_tree.query([x,y], 1)[1]
# check if closest is ahead or behind car
closest_location = self.stopline_2d[closest_idx]
pre_closest_location = self.stopline_2d[closest_idx-1]
cl_vect = np.array(closest_location)
pre_vect = np.array(pre_closest_location)
pos_vect = np.array([x,y])
if np.dot(cl_vect - pre_vect, pos_vect - cl_vect) > 0:
closest_idx = (closest_idx + 1) % len(self.stopline_2d)
return closest_idx
def get_closest_waypoint_idx(self, closest_light_idx):
#use this function to get the redlight's index in waypoints_2d
x = self.stopline_2d[closest_light_idx][0]
y = self.stopline_2d[closest_light_idx][1]
closest_idx = self.waypoint_tree.query([x,y], 1)[1]
# check if closest is ahead or behind car
closest_waypoint = self.waypoints_2d[closest_idx]
pre_closest_waypoint = self.waypoints_2d[closest_idx-1]
cl_vect = np.array(closest_waypoint)
pre_vect = np.array(pre_closest_waypoint)
pos_vect = np.array([x,y])
if np.dot(cl_vect - pre_vect, pos_vect - cl_vect) > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| StarcoderdataPython |
3218462 | <gh_stars>10-100
from data_pipeline.pipeline import Pipeline, run_pipeline
from data_pipeline.datasets.gnomad_sv_v2 import prepare_gnomad_structural_variants
pipeline = Pipeline()
###############################################
# Variants
###############################################
pipeline.add_task(
"prepare_structural_variants",
prepare_gnomad_structural_variants,
"/gnomad_sv_v2/structural_variants.ht",
{
"vcf_path": "gs://gcp-public-data--gnomad/papers/2019-sv/gnomad_v2.1_sv.sites.vcf.gz",
"controls_vcf_path": "gs://gcp-public-data--gnomad/papers/2019-sv/gnomad_v2.1_sv.controls_only.sites.vcf.gz",
"non_neuro_vcf_path": "gs://gcp-public-data--gnomad/papers/2019-sv/gnomad_v2.1_sv.nonneuro.sites.vcf.gz",
"histograms_path": "gs://gcp-public-data--gnomad/papers/2019-sv/gnomad_sv_hists.ht",
},
)
###############################################
# Outputs
###############################################
pipeline.set_outputs({"structural_variants": "prepare_structural_variants"})
###############################################
# Run
###############################################
if __name__ == "__main__":
run_pipeline(pipeline)
| StarcoderdataPython |
1793182 | # -*- coding: utf-8 -*-
#################################################################################
# Author : Webkul Software Pvt. Ltd. (<https://webkul.com/>)
# Copyright(c): 2015-Present Webkul Software Pvt. Ltd.
# License URL : https://store.webkul.com/license.html/
# All Rights Reserved.
#
#
#
# This program is copyright property of the author mentioned above.
# You can`t redistribute it and/or modify it.
#
#
# You should have received a copy of the License along with this program.
# If not, see <https://store.webkul.com/license.html/>
#################################################################################
from odoo import models,fields,api,_
class VoucherHistory(models.Model):
_inherit = "voucher.history"
marketplace_seller_id = fields.Many2one("res.partner", string="Seller")
@api.model
def create(self, vals):
res = super(VoucherHistory, self).create(vals)
res.marketplace_seller_id = res.voucher_id.marketplace_seller_id.id
return res
class VoucherVoucher(models.Model):
_inherit = "voucher.voucher"
@api.model
def _set_seller_id(self):
user_obj = self.env['res.users'].sudo().browse(self._uid)
if user_obj.partner_id and user_obj.partner_id.seller:
return user_obj.partner_id.id
return self.env['res.partner']
marketplace_seller_id = fields.Many2one("res.partner", string="Seller", default=_set_seller_id, copy=False)
product_ids = fields.Many2many('product.template', 'voucher_id', 'product_id', 'voucher_product_rel',
string='Products',
help="Add products on which this voucher will be valid",
domain = lambda self: [('marketplace_seller_id','in',self.env['voucher.voucher'].compute_login_userid()),('status','=','approved')] if self._context.get('mp_gift_voucher') else [],
)
def compute_login_userid(self):
login_ids = []
seller_group = self.env['ir.model.data'].get_object_reference(
'odoo_marketplace', 'marketplace_seller_group')[1]
officer_group = self.env['ir.model.data'].get_object_reference(
'odoo_marketplace', 'marketplace_officer_group')[1]
groups_ids = self.env.user.sudo().groups_id.ids
if seller_group in groups_ids and officer_group not in groups_ids:
login_ids.append(self.env.user.sudo().partner_id.id)
return login_ids
elif seller_group in groups_ids and officer_group in groups_ids:
obj = self.env['res.partner'].search([('seller','=',True)])
for rec in obj:
login_ids.append(rec.id)
return login_ids
| StarcoderdataPython |
3283403 | <gh_stars>0
import json
import os
import requests
from quart import abort
from src.utils.array_utils import get_nested_value
from time import sleep
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/54.0.2840.98 Safari/537.36'
}
def fetch_data(url, params):
proxy = os.environ.get('PROXY_ADDRESS')
proxies = dict(http=proxy, https=proxy)
sleep_time = 1
while True:
response = requests.get(url, params=params, proxies=proxies if proxy else None, headers=headers)
if response.ok:
break
elif sleep_time < 60:
sleep(sleep_time)
sleep_time *= 2
else:
abort(429, 'Too many request. Please check the proxy on the server')
return response
def search(query):
"""
Get the data for a given query string
:param str query: String that is used for the search (name + address or place_id:{place_ide})
:return: The query string with its corresponding result
"""
url = 'https://www.google.de/search'
params = {
'tbm': 'map',
'tch': 1,
'hl': 'en',
'q': query,
'pb': '!4m12!1m3!1d4005.9771522653964!2d-122.42072974863942!3d37.8077459796541!2m3!1f0!2f0!3f0!3m2!1i1125!2i976'
'!4f13.1!7i20!10b1!12m6!2m3!5m1!6e2!20e3!10b1!16b1!19m3!2m2!1i392!2i106!20m61!2m2!1i203!2i100!3m2!2i4!5b1'
'!6m6!1m2!1i86!2i86!1m2!1i408!2i200!7m46!1m3!1e1!2b0!3e3!1m3!1e2!2b1!3e2!1m3!1e2!2b0!3e3!1m3!1e3!2b0!3e3!'
'1m3!1e4!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e3!2b1!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e'
'10!2b0!3e4!2b1!4b1!9b0!22m6!1sa9fVWea_MsX8adX8j8AE%3A1!2zMWk6Mix0OjExODg3LGU6MSxwOmE5ZlZXZWFfTXNYOGFkWDh'
'qOEFFOjE!7e81!12e3!17sa9fVWea_MsX8adX8j8AE%3A564!18e15!24m15!2b1!5m4!2b1!3b1!5b1!6b1!10m1!8e3!17b1!24b1!'
'25b1!26b1!30m1!2b1!36b1!26m3!2m2!1i80!2i92!30m28!1m6!1m2!1i0!2i0!2m2!1i458!2i976!1m6!1m2!1i1075!2i0!2m2!'
'1i1125!2i976!1m6!1m2!1i0!2i0!2m2!1i1125!2i20!1m6!1m2!1i0!2i956!2m2!1i1125!2i976!37m1!1e81!42b1!47m0!49m1'
'!3b1'
}
response = fetch_data(url, params)
data = response.text.split('/*""*/')[0]
jend = data.rfind('}')
if jend >= 0:
data = data[:jend + 1]
jdata = json.loads(data)['d']
jdata = json.loads(jdata[4:])
# Get info from result array, has to be adapted if api changes
data = get_nested_value(jdata, 0, 1, 0, 14)
# Check second result
if data is None:
data = get_nested_value(jdata, 0, 1, 1, 14)
return dict(query=query, data=data)
def get_by_id(pb_id):
"""
Get the data for a given id
:param str pb_id: Id that is used to retrieve a specific place over the pb query parameter
:return: The id with its corresponding result
"""
url = 'https://www.google.com/maps/preview/place'
params = {
'authuser': 0,
'hl': 'en',
'gl': 'en',
'pb': f'!1m17!1s{pb_id}!3m12!1m3!1d4005.9771522653964!2d-122.42072974863942!3d37.8077459796541!2m3!1f0!2f0'
'!3f0!3m2!1i1440!2i414!4f13.1!4m2!3d-122.42072974863942!4d37.8077459796541!12m4!2m3!1i360!2i120!4i8'
'!13m65!2m2!1i203!2i100!3m2!2i4!5b1!6m6!1m2!1i86!2i86!1m2!1i408!2i240!7m50!1m3!1e1!2b0!3e3!1m3!1e2!2b1'
'!3e2!1m3!1e2!2b0!3e3!1m3!1e3!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e3!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1'
'!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e10!2b0!3e4!2b1!4b1!9b0!14m5'
'!1sTpKbYLDlD47FUrPBo4gL!4m1!2i5210!7e81!12e3!15m55!1m17!4e2!13m7!2b1!3b1!4b1!6i1!8b1!9b1!20b1!18m7'
'!3b1!4b1!5b1!6b1!9b1!13b1!14b0!2b1!5m5!2b1!3b1!5b1!6b1!7b1!10m1!8e3!14m1!3b1!17b1!20m2!1e3!1e6!24b1'
'!25b1!26b1!29b1!30m1!2b1!36b1!43b1!52b1!54m1!1b1!55b1!56m2!1b1!3b1!65m5!3m4!1m3!1m2!1i224!2i298!89b1'
'!21m28!1m6!1m2!1i0!2i0!2m2!1i458!2i414!1m6!1m2!1i1390!2i0!2m2!1i1440!2i414!1m6!1m2!1i0!2i0!2m2!1i1440'
'!2i20!1m6!1m2!1i0!2i394!2m2!1i1440!2i414!22m1!1e81!29m0!30m1!3b1!34m2!7b1!10b1!37i557'
}
response = fetch_data(url, params)
data = response.text.split('\'\n')[1]
data = json.loads(data)
return dict(id=pb_id, data=data)
| StarcoderdataPython |
1735931 | <filename>dcompy/waitlist.py
"""
Copyright 2021-2021 The jdh99 Authors. All rights reserved.
等待队列
Authors: jdh99 <<EMAIL>>
"""
import dcompy.log as log
from dcompy.block_tx import *
from dcompy.system_error import *
from typing import Callable
import threading
class _Item:
def __init__(self):
self.protocol = 0
self.pipe = 0
self.timeout = 0
self.req = bytearray()
self.resp = bytearray()
# 启动时间.单位:us.用于判断是否超过总超时
self.start_time = 0
# 回调函数.存在则是异步调用
self.ack_callback = None
self.dst_ia = 0
self.rid = 0
self.token = 0
self.is_rx_ack = False
self.result = SYSTEM_OK
# 上次发送时间戳.单位:us.用于重传
self.last_retry_timestamp = 0
self.retry_num = 0
self.code = 0
_items = list()
_lock = threading.Lock()
async def waitlist_run():
"""
模块运行.检查等待列表重发,超时等
"""
while True:
_check_wait_items()
await asyncio.sleep(INTERVAL)
def _check_wait_items():
_lock.acquire()
for item in _items:
_retry_send(item)
_lock.release()
def _retry_send(item: _Item):
t = get_time()
if t - item.start_time > item.timeout:
log.warn('wait ack timeout!task failed!token:%d', item.token)
_items.remove(item)
if len(item.req) > SINGLE_FRAME_SIZE_MAX:
block_remove(item.protocol, item.pipe, item.dst_ia, item.code, item.rid, item.token)
if item.ack_callback:
# 回调方式
item.ack_callback(bytearray(), SYSTEM_ERROR_RX_TIMEOUT)
else:
# 同步调用
item.is_rx_ack = True
item.result = SYSTEM_ERROR_RX_TIMEOUT
return
# 块传输不用此处重传.块传输模块自己负责
if len(item.req) > SINGLE_FRAME_SIZE_MAX:
return
load_param = get_load_param()
if t - item.last_retry_timestamp < load_param.block_retry_interval * 1000:
return
# 重传
item.retry_num += 1
if item.retry_num >= load_param.block_retry_max_num:
log.warn('retry too many!task failed!token:%d', item.token)
_items.remove(item)
if item.ack_callback:
# 回调方式
item.ack_callback(bytearray(), SYSTEM_ERROR_RX_TIMEOUT)
else:
# 同步调用
item.is_rx_ack = True
item.result = SYSTEM_ERROR_RX_TIMEOUT
return
item.last_retry_timestamp = t
log.warn('retry send.token:%d retry num:%d', item.token, item.retry_num)
_send_frame(item.protocol, item.pipe, item.dst_ia, item.code, item.rid, item.token, item.req)
def call(protocol: int, pipe: int, dst_ia: int, rid: int, timeout: int, req: bytearray) -> (bytearray, int):
"""
RPC同步调用
:param protocol: 协议号
:param pipe: 通信管道
:param dst_ia: 目标ia地址
:param rid: 服务号
:param timeout: 超时时间,单位:ms.为0表示不需要应答
:param req: 请求数据.无数据可填bytearray()或者None
:return: 返回值是应答字节流和错误码.错误码非SYSTEM_OK表示调用失败
"""
log.info('call.protocol:%d pipe:0x%x dst ia:0x%x rid:%d timeout:%d', protocol, pipe, dst_ia, rid, timeout)
code = CODE_CON if timeout > 0 else CODE_NON
if not req:
req = bytearray()
token = get_token()
_send_frame(protocol, pipe, dst_ia, code, rid, token, req)
if code == CODE_NON:
return bytearray(), SYSTEM_OK
item = _Item()
item.protocol = protocol
item.pipe = pipe
item.timeout = timeout * 1000
item.req = req
item.start_time = get_time()
item.dst_ia = dst_ia
item.rid = rid
item.token = token
item.code = code
item.retry_num = 0
item.last_retry_timestamp = get_time()
_lock.acquire()
_items.append(item)
_lock.release()
while True:
if item.is_rx_ack:
break
log.info('call resp.result:%d len:%d', item.result, len(item.resp))
return item.resp, item.result
def call_async(protocol: int, pipe: int, dst_ia: int, rid: int, timeout: int, req: bytearray,
ack_callback: Callable[[bytearray, int], None]):
"""
RPC异步调用
:param protocol: 协议号
:param pipe: 通信管道
:param dst_ia: 目标ia地址
:param rid: 服务号
:param timeout: 超时时间,单位:ms.为0表示不需要应答
:param req: 请求数据.无数据可填bytearray()或者None
:param ack_callback: 回调函数.原型func(resp: bytearray, error: int).参数是应答字节流和错误码.错误码非SYSTEM_OK表示调用失败
"""
code = CODE_CON
if timeout == 0 or not callable(ack_callback):
code = CODE_NON
if not req:
req = bytearray()
token = get_token()
log.info('call async.token:%d protocol:%d pipe:0x%x dst ia:0x%x rid:%d timeout:%d', token, protocol, pipe, dst_ia,
rid, timeout)
_send_frame(protocol, pipe, dst_ia, code, rid, token, req)
if code == CODE_NON:
return
item = _Item()
item.ack_callback = ack_callback
item.protocol = protocol
item.pipe = pipe
item.timeout = timeout * 1000
item.req = req
item.start_time = get_time()
item.dst_ia = dst_ia
item.rid = rid
item.token = <PASSWORD>
item.code = code
item.retry_num = 0
item.last_retry_timestamp = get_time()
_lock.acquire()
_items.append(item)
_lock.release()
def _send_frame(protocol: int, pipe: int, dst_ia: int, code: int, rid: int, token: int, data: bytearray):
if len(data) > SINGLE_FRAME_SIZE_MAX:
block_tx(protocol, pipe, dst_ia, code, rid, token, data)
return
frame = Frame()
frame.control_word.code = code
frame.control_word.block_flag = 0
frame.control_word.rid = rid
frame.control_word.token = token
frame.control_word.payload_len = len(data)
frame.payload.extend(data)
log.info('send frame.token:%d', token)
send(protocol, pipe, dst_ia, frame)
def rx_ack_frame(protocol: int, pipe: int, src_ia: int, frame: Frame):
"""
接收到ACK帧时处理函数
"""
_lock.acquire()
log.info('rx ack frame.src ia:0x%x', src_ia)
for item in _items:
if _check_item_and_deal_ack_frame(protocol, pipe, src_ia, frame, item):
break
_lock.release()
def _check_item_and_deal_ack_frame(protocol: int, pipe: int, src_ia: int, frame: Frame, item: _Item) -> bool:
if item.protocol != protocol or item.pipe != pipe or item.dst_ia != src_ia or item.rid != frame.control_word.rid \
or item.token != frame.control_word.token:
return False
log.info('deal ack frame.token:%d', item.token)
_items.remove(item)
if item.ack_callback:
# 回调方式
item.ack_callback(frame.payload, SYSTEM_OK)
else:
# 同步调用
item.is_rx_ack = True
item.result = SYSTEM_OK
item.resp = frame.payload
return True
def rx_rst_frame(protocol: int, pipe: int, src_ia: int, frame: Frame):
"""
接收到RST帧时处理函数
"""
_lock.acquire()
log.warn('rx rst frame.src ia:0x%x', src_ia)
for item in _items:
_deal_rst_frame(protocol, pipe, src_ia, frame, item)
_lock.release()
def _deal_rst_frame(protocol: int, pipe: int, src_ia: int, frame: Frame, item: _Item):
if item.protocol != protocol or item.pipe != pipe or item.dst_ia != src_ia or item.rid != frame.control_word.rid \
or item.token != frame.control_word.token:
return False
result = frame.payload[0]
log.warn('deal rst frame.token:%d result:0x%x', item.token, result)
_items.remove(item)
if item.ack_callback:
# 回调方式
item.ack_callback(bytearray(), result)
else:
# 同步调用
item.is_rx_ack = True
item.result = result
return True
| StarcoderdataPython |
3203313 | from mathematics import PBox
import numpy as np
class PermutationCipher:
def __init__(self, key: list):
self.pbox = PBox.from_list(key)
self.inverse = self.pbox.invert()
self.columns = len(key)
self.key = np.array(key)
def encrypt(self, plaintext: str) -> str:
P = self.str_2_mat(plaintext)
print(P)
return self.mat_2_str(P[:, self.key - 1].T).upper()
def decrypt(self, ciphertext: str) -> str:
C = self.str_2_mat(ciphertext)
plaintext = ''
for index, row in enumerate(C):
plaintext += self.inverse.permutate(''.join(row)).lower()
return plaintext
def str_2_mat(self, message: str):
message = message + 'z' * (-len(message) % self.columns)
rows = len(message) // self.columns
return np.reshape(list(message), (rows, self.columns))
def mat_2_str(self, matrix) -> str:
rows, columns = matrix.shape
return ''.join(matrix.flatten())
| StarcoderdataPython |
3314815 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Mastercard
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# Neither the name of the MasterCard International Incorporated nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from functools import wraps
from oauth1.oauth import OAuth
from oauth1 import authenticationutils
from urllib.parse import urlencode
from deprecated import deprecated
class SignerInterceptor(object):
def __init__(self, key_file, key_password, consumer_key):
"""Load signing key."""
self.signing_key = authenticationutils.load_signing_key(key_file, key_password)
self.consumer_key = consumer_key
def oauth_signing(self, func):
"""Decorator for API request. func is APIClient.request"""
@wraps(func)
def request_function(*args, **kwargs): # pragma: no cover
in_body = kwargs.get("body", None)
query_params = kwargs.get("query_params", None)
uri = args[1]
if query_params:
uri += '?' + urlencode(query_params)
auth_header = OAuth.get_authorization_header(uri, args[0], in_body, self.consumer_key, self.signing_key)
in_headers = kwargs.get("headers", None)
if not in_headers:
in_headers = dict()
kwargs["headers"] = in_headers
in_headers["Authorization"] = auth_header
return func(*args, **kwargs)
request_function.__oauth__ = True
return request_function
@deprecated(version='1.1.3', reason="Use add_signer_layer(api_client, key_file, key_password, consumer_key) instead")
def add_signing_layer(self, api_client, key_file, key_password, consumer_key):
add_signer_layer(api_client, key_file, key_password, consumer_key)
def add_signer_layer(api_client, key_file, key_password, consumer_key):
"""Create and load configuration. Decorate APIClient.request with header signing"""
api_signer = SignerInterceptor(key_file, key_password, consumer_key)
api_client.request = api_signer.oauth_signing(api_client.request)
@deprecated(version='1.1.3', reason="Use get_signer_layer(api_client) instead")
def get_signing_layer(self, api_client):
return get_signer_layer(api_client)
def get_signer_layer(api_client):
return api_client.request
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.