seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
19350701790 | import logging
import json
from scraper.components import *
#https://coralogix.com/log-analytics-blog/python-logging-best-practices-tips/#id.1ksv4uv
#https://gist.github.com/nguyenkims/e92df0f8bd49973f0c94bddf36ed7fd0
#https://towardsdatascience.com/get-your-own-data-building-a-scalable-web-scraper-with-aws-654feb9fdad7
formatter = logging.Formatter('%(asctime)s : %(name)s : %(levelname)s : %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
class Scraper:
def __init__(self, base_url):
logger.info('Scraper Object Start.....')
self.base_url = base_url
logger.info('gettable method Created.....')
self.table = get_table('JobTable')
logger.info('Scraper Object Created.....')
def start(self, queries, **kwargs):
"""Start Scraping the website"""
logger.info('Scraper started')
# fetch all jobs using Multithreading and process using Multiprocessing based on queries
jobs = Utils.fetch_jobs(self.base_url, queries)
# save into dynamoDB
db.save(self.table, jobs)
logger.info('scraping done')
response = {
"statusCode": 200,
"body": json.dumps({'message':'Successfully scraped.'})
}
return response | bilalansari-fr/aws_codepipeline | scraper/__init__.py | __init__.py | py | 1,517 | python | en | code | 0 | github-code | 50 |
4593109013 | from __future__ import division, print_function, absolute_import
import math
import hypothesis.internal.conjecture.utils as d
import hypothesis.internal.conjecture.floats as flt
from hypothesis.control import assume
from hypothesis.internal.compat import int_from_bytes
from hypothesis.internal.floats import sign
from hypothesis.searchstrategy.strategies import SearchStrategy, \
MappedSearchStrategy
class IntStrategy(SearchStrategy):
"""A generic strategy for integer types that provides the basic methods
other than produce.
Subclasses should provide the produce method.
"""
class IntegersFromStrategy(SearchStrategy):
def __init__(self, lower_bound, average_size=100000.0):
super(IntegersFromStrategy, self).__init__()
self.lower_bound = lower_bound
self.average_size = average_size
def __repr__(self):
return 'IntegersFromStrategy(%d)' % (self.lower_bound,)
def do_draw(self, data):
return int(
self.lower_bound + d.geometric(data, 1.0 / self.average_size))
class WideRangeIntStrategy(IntStrategy):
def __repr__(self):
return 'WideRangeIntStrategy()'
def do_draw(self, data):
size = 16
sign_mask = 2 ** (size * 8 - 1)
byt = data.draw_bytes(size)
r = int_from_bytes(byt)
negative = r & sign_mask
r &= (~sign_mask)
if negative:
r = -r
return int(r)
class BoundedIntStrategy(SearchStrategy):
"""A strategy for providing integers in some interval with inclusive
endpoints."""
def __init__(self, start, end):
SearchStrategy.__init__(self)
self.start = start
self.end = end
def __repr__(self):
return 'BoundedIntStrategy(%d, %d)' % (self.start, self.end)
def do_draw(self, data):
return d.integer_range(data, self.start, self.end)
NASTY_FLOATS = sorted([
0.0, 0.5, 1.1, 1.5, 1.9, 1.0 / 3, 10e6, 10e-6, 1.175494351e-38,
2.2250738585072014e-308,
1.7976931348623157e+308, 3.402823466e+38, 9007199254740992, 1 - 10e-6,
2 + 10e-6, 1.192092896e-07, 2.2204460492503131e-016,
] + [float('inf'), float('nan')] * 5, key=flt.float_to_lex)
NASTY_FLOATS = list(map(float, NASTY_FLOATS))
NASTY_FLOATS.extend([-x for x in NASTY_FLOATS])
class FloatStrategy(SearchStrategy):
"""Generic superclass for strategies which produce floats."""
def __init__(self, allow_infinity, allow_nan):
SearchStrategy.__init__(self)
assert isinstance(allow_infinity, bool)
assert isinstance(allow_nan, bool)
self.allow_infinity = allow_infinity
self.allow_nan = allow_nan
self.nasty_floats = [f for f in NASTY_FLOATS if self.permitted(f)]
weights = [
0.6 * len(self.nasty_floats)
] + [0.4] * len(self.nasty_floats)
self.sampler = d.Sampler(weights)
def __repr__(self):
return '%s()' % (self.__class__.__name__,)
def permitted(self, f):
assert isinstance(f, float)
if not self.allow_infinity and math.isinf(f):
return False
if not self.allow_nan and math.isnan(f):
return False
return True
def do_draw(self, data):
while True:
data.start_example()
i = self.sampler.sample(data)
if i == 0:
result = flt.draw_float(data)
else:
result = self.nasty_floats[i - 1]
flt.write_float(data, result)
data.stop_example()
if self.permitted(result):
return result
def float_order_key(k):
return (sign(k), k)
class FixedBoundedFloatStrategy(SearchStrategy):
"""A strategy for floats distributed between two endpoints.
The conditional distribution tries to produce values clustered
closer to one of the ends.
"""
def __init__(self, lower_bound, upper_bound):
SearchStrategy.__init__(self)
self.lower_bound = float(lower_bound)
self.upper_bound = float(upper_bound)
assert not math.isinf(self.upper_bound - self.lower_bound)
lb = float_order_key(self.lower_bound)
ub = float_order_key(self.upper_bound)
self.critical = [
z for z in (-0.0, 0.0)
if lb <= float_order_key(z) <= ub
]
self.critical.append(self.lower_bound)
self.critical.append(self.upper_bound)
def __repr__(self):
return 'FixedBoundedFloatStrategy(%s, %s)' % (
self.lower_bound, self.upper_bound,
)
def do_draw(self, data):
f = self.lower_bound + (
self.upper_bound - self.lower_bound) * d.fractional_float(data)
assume(self.lower_bound <= f <= self.upper_bound)
assume(sign(self.lower_bound) <= sign(f) <= sign(self.upper_bound))
# Special handling for bounds of -0.0
for g in [self.lower_bound, self.upper_bound]:
if f == g:
f = math.copysign(f, g)
return f
class ComplexStrategy(MappedSearchStrategy):
"""A strategy over complex numbers, with real and imaginary values
distributed according to some provided strategy for floating point
numbers."""
def __repr__(self):
return 'ComplexStrategy()'
def pack(self, value):
return complex(*value)
| pareksha/Friend-recommendation-system | pyta/hypothesis/searchstrategy/numbers.py | numbers.py | py | 5,357 | python | en | code | 14 | github-code | 50 |
4641407882 |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
class ClassificationBinary(object):
def __init__(self, X, Y):
self.X = X
self.Y = Y
def splitData(self):
self.X_train, self.X_test, self.Y_train, self.Y_test = train_test_split(self.X, self.Y, test_size=0.10)
def fitModel(self):
self.model = RandomForestClassifier(criterion='gini', max_features='sqrt')
while True:
self.splitData()
self.model.fit(self.X_train, self.Y_train)
predictions = self.model.predict(self.X_test)
# Train and Test Accuracy
self.accuracy_train = accuracy_score(self.Y_train, self.model.predict(self.X_train))*100
self.accuracy_test = accuracy_score(self.Y_test, predictions)*100
self.confusion_matrix = confusion_matrix(self.Y_test, predictions)
if self.accuracy_test > 80.0:
break
return self.accuracy_train, self.accuracy_test, self.confusion_matrix
def makePrediction(self, x):
return self.model.predict(x)
if __name__ == '__main__':
dataset = pd.read_csv('F:/University/Final Year/FYP/EEG/EEG-Diagnosis (Python)/data/dataset.csv')
data = dataset.values[:, :-1]
target = dataset.values[:, -1]
binaryClassifier = ClassificationBinary(data, target)
# data = dataset.values[:, :-1]
# target = dataset.values[:, -1]
train,test,cm=binaryClassifier.fitModel()
print(train,"%",test,"%",cm)
| mibrahimniz/eeg-python | classification/ClassificationSklearn.py | ClassificationSklearn.py | py | 1,729 | python | en | code | 0 | github-code | 50 |
21917817328 | from weakref import WeakKeyDictionary
class NonNegativeInteger(object):
def __init__(self):
self.data = WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
return self.data[instance]
def __set__(self, instance, value):
if not isinstance(value, int):
raise TypeError(f'{self.name}: {value} is not of type int')
if value < 0:
raise ValueError(f'{self.name}: Must be a non-negative integer')
self.data[instance] = value
def __set_name__(self, owner, name):
self.name = name
class Person(object):
age = NonNegativeInteger()
p = Person()
p.age = 4 # OK
p.age = 0 # OK
p.age = 'matt' # TypeError
p.age = -1 # ValueError
| mattjegan/describing-descriptors | slide39_errormessages.py | slide39_errormessages.py | py | 791 | python | en | code | 5 | github-code | 50 |
15055409438 | import random
moves = ['rock', 'paper', 'scissors']
class Player:
def __init__(self):
self.my_move = None
self.their_move = None
def move(self):
return 'rock'
def learn(self, my_move, their_move):
self.my_move = my_move
self.their_move = their_move
def beats(one, two):
return ((one == 'rock' and two == 'scissors') or
(one == 'scissors' and two == 'paper') or
(one == 'paper' and two == 'rock'))
class RockPlayer(Player):
def move(self):
return 'rock'
class RandomPlayer(Player):
def move(self):
return random.choice(moves)
class ReflectPlayer(Player):
def move(self):
if self.their_move is None:
return random.choice(moves)
return self.their_move
def learn(self, my_move, their_move):
self.their_move = their_move
class CyclePlayer(Player):
def __init__(self):
super().__init__()
self.move_index = 0
def move(self):
move = moves[self.move_index]
self.move_index = (self.move_index + 1) % 3
return move
class HumanPlayer(Player):
def move(self):
while True:
move = input("Enter your move (rock/paper/scissors) "
"or 'quit' to end the game: ").lower()
if move in moves:
return move
elif move == 'quit':
return 'quit'
else:
print("Invalid move. Please try again.")
class Game:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.score_p1 = 0
self.score_p2 = 0
def play_round(self):
move1 = self.p1.move()
if move1 == 'quit':
print("Game ended by player.")
print(f"Final scores - Player 1: {self.score_p1}, "
f"Player 2: {self.score_p2}")
if self.score_p1 > self.score_p2:
print("Player 1 wins the game!")
elif self.score_p1 < self.score_p2:
print("Player 2 wins the game!")
else:
print("It's a tie!")
exit()
move2 = self.p2.move()
print(f"Player 1: {move1} Player 2: {move2}")
if move1 == move2:
print("It's a tie!")
elif beats(move1, move2):
print("Player 1 wins!")
self.score_p1 += 1
else:
print("Player 2 wins!")
self.score_p2 += 1
self.p1.learn(move1, move2)
self.p2.learn(move2, move1)
def play_game(self):
print("Game start!")
round_num = 0
while round_num < 3 or abs(self.score_p1 - self.score_p2) < 3:
print(f"Round {round_num + 1}:")
self.play_round()
round_num += 1
print("Game over!")
print(f"Final scores - Player 1: {self.score_p1}, "
f"Player 2: {self.score_p2}")
if self.score_p1 > self.score_p2:
print("Player 1 wins the game!")
elif self.score_p1 < self.score_p2:
print("Player 2 wins the game!")
else:
print("It's a tie!")
if __name__ == '__main__':
player_strategies = [RockPlayer(), RandomPlayer(), ReflectPlayer(),
CyclePlayer()]
p1 = HumanPlayer()
p2 = random.choice(player_strategies)
game = Game(p1, p2)
game.play_game()
| mohamedszaina/Udacity-RPS-Game-Project | rps.py | rps.py | py | 3,420 | python | en | code | 0 | github-code | 50 |
2484807979 | from anime_module import M3U8, Myself, MyselfAnime, MyselfAnimeTable
from configs import MYSELF_URL
from swap import VIDEO_QUEUE
from asyncio import gather, create_task
from pydantic import BaseModel, Field
from typing import Literal
class CacheData(BaseModel):
from_cache: bool = Field(True, alias="from-cache")
class QueueModifyData(BaseModel):
modify: Literal["pause", "resume", "stop",
"upper", "lower", "highest", "lowest"]
downloader_id: str = Field(alias="downloader-id")
class SearchData(CacheData):
keyword: str
class DownloadData(BaseModel):
episodes: list[MyselfAnime]
class GetFinishData(CacheData):
page_index: int = Field(alias="page-index")
class SettingUpdateData(BaseModel):
user_agent: str = Field(
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36 OPR/92.0.0.0 (Edition GX-CN)",
alias="ua",
)
conections: int = Field(10, ge=1, alias="cons")
worker: int = Field(3, ge=1, alias="thrs")
temp_path: str = Field("temp", alias="temp-path")
myself_dir: str = Field("[Myself]$NAME", alias="myself-dir")
myself_download: str = Field("download/myself", alias="myself-download")
myself_file: str = Field("[Myself]$NAME $EPS", alias="myself-file")
myself_update: int = Field(5, ge=0, alias="myself-update")
class API:
@staticmethod
def queue_modify(modify: Literal["pause", "resume", "stop", "upper", "lower", "highest", "lowest"], downloader_id: str):
# 檢查是否為有效ID
if downloader_id not in VIDEO_QUEUE.get_queue():
return None
downloader = VIDEO_QUEUE.get_downloader(downloader_id)
# 辨認功能
if modify == "pause":
downloader.pause()
elif modify == "resume":
downloader.resume()
elif modify == "stop":
VIDEO_QUEUE.remove(downloader_id)
elif modify == "upper":
VIDEO_QUEUE.upper(downloader_id)
elif modify == "lower":
VIDEO_QUEUE.lower(downloader_id)
elif modify == "highest":
download_list = VIDEO_QUEUE.get_queue()
download_list.remove(downloader_id)
download_list.insert(0, downloader_id)
VIDEO_QUEUE.update(download_list)
elif modify == "lowest":
download_list = VIDEO_QUEUE.get_queue()
download_list.remove(downloader_id)
download_list.append(downloader_id)
VIDEO_QUEUE.update(download_list)
return None
@staticmethod
def download_queue():
def gen_data(downloader_id: str, downloader: M3U8):
return {
"name": f"{downloader.output_name} - {downloader.status()}",
"progress": downloader.get_progress(),
"status": downloader.status_code(),
"order": VIDEO_QUEUE.get_index(downloader_id),
}
result = {
downloader_id: gen_data(downloader_id, downloader)
for downloader_id, downloader in VIDEO_QUEUE.get_data().items()
}
return result
@staticmethod
async def search(keyword: str, from_cache=True):
if MYSELF_URL in keyword:
# 如果搜尋連結
anime_table = MyselfAnimeTable(**{"url": keyword})
try:
await anime_table.update(from_cache=from_cache)
return {
"type": "anime",
"data": anime_table.dict()
}
except:
pass
search_result = await Myself.search(keyword, from_cache=from_cache)
if len(search_result) == 1:
anime_table = search_result[0]
await anime_table.update()
return {
"type": "anime",
"data": anime_table.dict()
}
return {
"type": "search",
"data": list(map(lambda anime_table: anime_table.dict(), search_result))
}
@staticmethod
async def download(episodes: list[MyselfAnime]):
tasks = [
create_task(anime.gen_downloader())
for anime in episodes
]
downloaders = await gather(*tasks)
for downloader in downloaders:
VIDEO_QUEUE.add(downloader)
return None
@staticmethod
async def get_week_anime(from_cache: bool = True):
week_list = await Myself.weekly_update(from_cache=from_cache)
result = list(map(
lambda day_data: list(map(lambda day_data: (
day_data[0].dict(), day_data[1]), day_data)),
week_list
))
return result
@staticmethod
async def get_year_anime(from_cache: bool = True):
year_dict = await Myself.year_list(from_cache=from_cache)
result = {
key: list(map(lambda anime_table: anime_table.dict(), value))
for key, value in year_dict.items()
}
return result
@staticmethod
async def get_finish_anime(page_index: int, from_cache: bool = True):
finish_list = await Myself.finish_list(from_cache=from_cache, start_page=page_index, page_num=1)
result = list(map(lambda anime_table: anime_table.dict(), finish_list))
return result
| AloneAlongLife/MyselfAnimeDownloader_WebUI | api/api.py | api.py | py | 5,338 | python | en | code | 4 | github-code | 50 |
35548426584 | from coinmarketcap import Market
from tkinter import *
from tkinter import font as tkfont
import pandas as pd
from tkinter import ttk
from PIL import Image, ImageTk
test = []
#############################################FRONTEND################################################################################
def raise_frame(frame):
frame.tkraise()
root = Tk()
StartPage = Frame(root)
Introduction = Frame(root)
Amount = Frame(root)
Currencies = Frame(root)
Result = Frame(root)
Loading1 = Frame(root)
Loading2 = Frame(root)
Loading3 = Frame(root)
title_font = tkfont.Font(family='Comic Sans MS', size=18, weight="bold", slant="italic")
for frame in (StartPage, Loading1, Loading2, Loading3, Amount, Currencies, Result):
frame.grid(row=0, column=0, sticky='news')
##StartPage
pic = PhotoImage(file="background.pgm",width=900,height=500)
Label(StartPage,compound=RIGHT,text="",image=pic, font=title_font,width=900,height=500).pack()
Button(StartPage, text='Start', command=lambda:raise_frame(Loading1),bg="#3333cc",height=2,width=15).pack()
Label(StartPage, text='•').pack()
Big_font = tkfont.Font(family='Verdana', size=30, weight="normal", slant="roman")
##Loading1
Label(Loading1,text="Loading modules and components",font=Big_font).pack()
progressbar = ttk.Progressbar(Loading1,orient=HORIZONTAL, length=1000, mode='determinate')
progressbar.pack(fill=BOTH, side=TOP,pady=125)
progressbar.start()
Label(Loading1, text='••').pack(side=BOTTOM)
Loading1.after(10000, lambda:raise_frame(Loading2))
##Loading2
progressbar2 = ttk.Progressbar(Loading2,orient=HORIZONTAL, length=1000, mode='determinate')
Label(Loading2,text="Establishing connection",font=Big_font).pack(side=TOP)
progressbar2.pack(fill=BOTH, side=TOP,pady=125)
progressbar2.start()
Label(Loading2, text='•••').pack(side=BOTTOM)
Loading2.after(20000, lambda:raise_frame(Loading3))
##Loading3
progressbar3 = ttk.Progressbar(Loading3,orient=HORIZONTAL, length=1000, mode='determinate')
Label(Loading3,text="Connecting to CoinmarketCap API",font=Big_font).pack(side=TOP)
progressbar3.pack(fill=BOTH, side=TOP,pady=125)
progressbar3.start()
Label(Loading3, text='••••').pack(side=BOTTOM)
Loading3.after(30000, lambda:raise_frame(Amount))
#Amount
Label(Amount,text="Please choose the amount you want to invest").pack()
Label(Amount, text='•••••').pack(side=BOTTOM)
capacity1 = StringVar(Amount)
capacity1.set("1000") # default value
ChosenAmount = OptionMenu(Amount, capacity1, "2000", "3000", "4000")
ChosenAmount.pack()
Label(Amount,text="Or type a custom value").pack(padx=5,pady=5)
raise_frame(StartPage)
root.iconbitmap('window.ico')
root.mainloop()
#####################################BACKEND##############################################################################################
curr = {'Bitcoin Cash' : 'bch', 'Bitcoin' : 'btc', 'Dash' : 'dash', 'Decred' : 'dcr', 'Dogecoin' : 'doge',
'Ethereum Classic' : 'etc', 'Ethereum' : 'eth', 'Litecoin' : 'ltc', 'PIVX' : 'pivx', 'Vertcoin' : 'vtc',
'NEM' : 'xem', 'Monero' : 'xmr', 'Zcash' : 'zec'}
capacity = 1000
cp = Market()
def PriceFinder(*args,**kwargs): #args are currencies choosen by user
currencies = [] #Will hold currencies choosen by user
for i in args:
currencies.append(i)
#print(currencies)
prices = []
for j in currencies:
real_time_price = cp.ticker(currency=str(j),limit=1,convert='USD')
prices.append(real_time_price[0]['price_usd'])
#print(prices)
currency_dict = dict(zip(currencies,prices))
extract(currency_dict)
def extract(price_dictionary):
mean_of_30_Days = {}
curr_list = list(price_dictionary.keys())
for i in curr_list:
data = pd.read_csv('DataSets\\'+curr[i].strip('')+'.csv',skiprows = range(1,337))
prices = data.price_USD.tolist()
summation = sum(prices)/30
mean_of_30_Days[i] = summation
performance(mean_of_30_Days,price_dictionary)
def performance(mean_of_30_Days,price_dictionary): #gives 30 days performance of a currency
currencyList = [str(i[0]) for i in mean_of_30_Days.items()]
meanList = [float(i[1]) for i in mean_of_30_Days.items()]
priceList = [float(i[1]) for i in price_dictionary.items()]
performanceList = [priceList[i] - meanList[i] for i in range(min(len(priceList),len(meanList)))]
performance_Dictionary = dict(zip(currencyList,performanceList))
#print(performance_Dictionary)
WeightDistributor(performance_Dictionary,price_dictionary)
def WeightDistributor(performance_Dictionary,price_dictionary): #This algorithm will define weights
weights = []
Pf = [float(i[1]) for i in price_dictionary.items()] #real time price
dict((k,float(v)) for k,v in price_dictionary.items())
Gh = [float(i[1]) for i in performance_Dictionary.items()] #gain-drop in previous 30 days
#print(Gh)
C = [str(i[0]) for i in performance_Dictionary.items()]
#print(C)
for i in tuple(zip(Pf,Gh)):
temp_var = ((i[1]*i[0])/(i[0]-i[1]))%30
weights.append(round(temp_var,3))
#print(weights)
FractionalKnapsack(capacity,weights,Pf,price_dictionary) #Pf = values
def FractionalKnapsack(capacity: int, weights: list, Pf: list,price_dictionary ):
Pf = [int(i) for i in Pf]
weights = [int(i) for i in weights]
#print(Pf)
rows = len(Pf) + 1
cols = capacity + 1
# adding dummy values as later on we consider these values as indexed from 1 for convinence
Pf = [0] + Pf[:]
weights = [0] + weights[:]
# row : values , #col : weights
dp_array = [[0 for i in range(cols)] for j in range(rows)]
# 0th row and 0th column have value 0
# values
for i in range(1, rows):
# weights
for j in range(1, cols):
# if this weight exceeds max_weight at that point
if j - weights[i] < 0:
dp_array[i][j] = dp_array[i - 1][j]
# max of -> last ele taken | this ele taken + max of previous values possible
else:
dp_array[i][j] = max(dp_array[i - 1][j], Pf[i] + dp_array[i - 1][j - weights[i]])
# return dp_array[rows][cols] : will have the max value possible for given wieghts
values_chosen = []
i = rows - 1
j = cols - 1
# Get the items to be picked
while i > 0 and j > 0:
# ith element is added
if dp_array[i][j] != dp_array[i - 1][j]:
# add the value
values_chosen.append(Pf[i])
# decrease the weight possible (j)
j = j - weights[i]
# go to previous row
i = i - 1
else:
i = i - 1
print("Based on previous 30 days data max Profit that can be made is {} by dividing {} in the following currencies:".format(dp_array[rows - 1][cols - 1],capacity))
print(values_chosen)
for i in values_chosen:
for key, value in price_dictionary.items():
if i == value:
print(key)
PriceFinder('Bitcoin','Ethereum','Dash')
| Cryptovisor/CryptoVisor | CryptoVisor-main.py | CryptoVisor-main.py | py | 6,924 | python | en | code | 2 | github-code | 50 |
21452619484 | # -*- coding: utf-8 -*-
import datetime
import json
import logging
import os.path
import Var
from Broker import Rest
logger = logging.getLogger()
class indicateur(object):
""" docstring for indicateur:
temps = datetime
Midpoint= Milieu Bid/ask
"""
def __init__(self, temps, Midpoint):
self.MinUpgrade = False
self.HeureUpgrade = False
EmaPeriode = 500
echelle = "heures" # ou "minutes"
try:
self.chronoMinute(temps)
self.movingAverage("minutes", temps, Midpoint, 9)
self.ema("minutes", temps, Midpoint, EmaPeriode)
except():
raise ValueError
def preloadMa(self, echelle, temps, periode):
# Timezone Effect a proprement corrigé
temps = temps + datetime.timedelta(0, (2 * 60 * 60))
daybefore = temps - \
datetime.timedelta(1, 0) + datetime.timedelta(0, (2 * 60 * 60))
if temps.month < 10:
mois = "0" + str(temps.month)
mois0 = "0" + str(daybefore.month)
else:
mois = str(temps.month)
mois0 = str(daybefore.month)
pathname = Var.marketId + str(daybefore.year) + "-" + mois0 + "-" + str(
daybefore.day) + "_" + str(temps.year) + "-" + mois + "-" + str(temps.day) + ".json"
if not os.path.isfile("Data/json/" + pathname):
dateF = str(daybefore).replace(" ", "T")
dateT = str(temps).replace(" ", "T")
sample = Rest.ig().retrieve(Var.epic, dateF, dateT)
else:
with open("Data/json/" + pathname, 'r', encoding="utf-8") as outfile:
sample = json.load(outfile)
iterstart = int(sample["metadata"]["size"]) - (periode)
logger.info("preload start index :{}".format(iterstart))
if iterstart < 1:
# check qu'il y a assez de donnée
logger.error("erreur pas assez de donnée")
else:
clef = "ma" + echelle + str(periode)
if clef in sample["prices"][iterstart].keys():
for x in range(iterstart, len(sample["prices"])):
Var.ma["valeurs"].append(
round(sample["prices"][x][clef], 6))
else:
Var.ma["valeurs"].append(
sample["prices"][iterstart - 1]["closePrice"]["bid"])
for x in range(iterstart, len(sample["prices"])):
# moving avg calculus
rf = sample["prices"][x]
Mid = round((rf["closePrice"]["bid"] +
rf["closePrice"]["ask"]) / 2, 6)
Var.prixpourma.append(Mid)
# logger.debug(str(Var.prixpourma))
a = float()
for x in range(0, len(Var.prixpourma)):
a += float(Var.prixpourma[x])
y = a / periode
Var.ma["valeurs"].append(y)
logger.debug("MA: {}".format(Var.ma["valeurs"][0]))
with open("Data/json/" + pathname, 'w', encoding="utf-8") as outfile:
json.dump(sample, outfile, ensure_ascii=False)
def preloadEma(self, echelle, temps, EmaPeriode):
# Timezone Effect a proprement corrigé
temps = temps + datetime.timedelta(0, (2 * 60 * 60))
daybefore = temps - \
datetime.timedelta(1, 0) + datetime.timedelta(0, (2 * 60 * 60))
if temps.month < 10:
mois = "0" + str(temps.month)
mois0 = "0" + str(daybefore.month)
else:
mois = str(temps.month)
mois0 = str(daybefore.month)
pathname = Var.marketId + str(daybefore.year) + "-" + mois0 + "-" + str(
daybefore.day) + "_" + str(temps.year) + "-" + mois + "-" + str(temps.day) + ".json"
if not os.path.isfile("Data/json/" + pathname):
dateF = str(daybefore).replace(" ", "T")
dateT = str(temps).replace(" ", "T")
sample = Rest.ig().retrieve(Var.epic, dateF, dateT)
else:
with open("Data/json/" + pathname, 'r', encoding="utf-8") as outfile:
sample = json.load(outfile)
iterstart = int(sample["metadata"]["size"]) - (EmaPeriode)
logger.info("preload start index :{}".format(iterstart))
if iterstart < 2:
# check qu'il y a assez de donnée
logger.error("erreur pas assez de donnée")
else:
clef = "Ema" + echelle + str(EmaPeriode)
if clef in sample["prices"][iterstart].keys():
for x in range(iterstart, len(sample["prices"])):
Var.ema["valeurs"].append(
round(sample["prices"][x][clef], 6))
else:
Var.ema["valeurs"].append(
sample["prices"][iterstart - 2]["closePrice"]["bid"])
for x in range(iterstart - 1, len(sample["prices"])):
rf = sample["prices"][x]
Mid = (
rf["closePrice"]["bid"] +
rf["closePrice"]["ask"]) / 2
t = Var.ema["valeurs"][len(Var.ema["valeurs"]) - 1]
a = 2 / (int(EmaPeriode) + 1)
y = round((a * Mid + (1 - a) * t), 6)
Var.ema["valeurs"].append(y)
sample["prices"][x][clef] = y
with open("Data/json/" + pathname, 'w', encoding="utf-8") as outfile:
json.dump(sample, outfile, ensure_ascii=False)
def heikinAshi(self):
pass
def heikinAshiCount(self):
pass
def movingAverage(self, echelle, temps, Midpoint, periode):
if Var.ma["echelle"] != echelle:
Var.ma["echelle"] = echelle
try:
self.preloadMa(echelle, temps, periode)
except Exception as e:
logger.error("erreur pas de preload " + str(e))
Var.ma["valeurs"].append(Midpoint)
for x in range(0, periode):
Var.prixpourma.append(Midpoint)
Var.maBacktest.append(
Var.ma["valeurs"][len(Var.ma["valeurs"]) - 1])
else:
a = float()
z = Var.prixpourma
for x in range(0, periode):
a += z[x]
y = round(a / periode, 6)
# logger.debug("z num: {} ,ma: {}".format(len(z),y))
if self.HeureUpgrade and echelle == "heures":
Var.ma["valeurs"].append(y)
Var.prixpourma.append(Midpoint)
Var.prixpourma.pop(0)
elif self.MinUpgrade and echelle == "minutes":
Var.ma["valeurs"].append(y)
Var.prixpourma.append(Midpoint)
Var.prixpourma.pop(0)
Var.maBacktest.append(
Var.ma["valeurs"][len(Var.ma["valeurs"]) - 1])
elif self.MinUpgrade and echelle != "minutes":
Var.maBacktest.append(
Var.ma["valeurs"][len(Var.ma["valeurs"]) - 1])
def ema(self, echelle, temps, Midpoint, periode):
decalage = 5
if Var.ema["echelle"] != echelle:
# Premiere valeur de prix == ema(1)
# print("debug 1ier tour")
Var.ema["echelle"] = echelle
try:
self.preloadEma(echelle, temps, periode)
except Exception as e:
Var.ema["valeurs"][0] = round(float(Midpoint), 6)
Var.emaBacktest.append(Var.ema["valeurs"][len(
Var.ema["valeurs"]) - 1]) # envoyé au csv
else:
# x moyenne bid/ask
x = float(Midpoint)
t = float(Var.ema["valeurs"][len(Var.ema["valeurs"]) - 1])
# n = nb de periode
n = int(periode)
# a: poid alpha
a = 2 / (n + 1)
y = round((a * x + (1 - a) * t), 6)
if self.HeureUpgrade and echelle == "heures":
Var.ema["periode"] = periode
Var.ema["valeurs"].append(y)
lastiter = len(Var.ema["valeurs"]) - 1
if lastiter >= decalage:
EmaDecalee = Var.ema["valeurs"][lastiter - decalage]
elif self.MinUpgrade and echelle == "minutes":
Var.ema["periode"] = periode
Var.ema["valeurs"].append(y)
lastiter = len(Var.ema["valeurs"]) - 1
if lastiter >= decalage:
EmaDecalee = Var.ema["valeurs"][lastiter - decalage]
Var.emaBacktest.append(Var.ema["valeurs"][len(
Var.ema["valeurs"]) - 1]) # envoyé au csv
elif self.MinUpgrade and echelle != "minutes":
# pas sur de moi , peut servir en streaming
Var.emaBacktest.append(Var.ema["valeurs"][len(
Var.ema["valeurs"]) - 1]) # envoyé au csv
def chronoMinute(self, temps):
""" Horloge:
chrono déclenche MinUpgrade et HeureUpgrade
"""
# Var.lastDatetime est créer au demarrage
# delta = temps - Var.lastDatetime
if temps.minute != Var.lastDatetime.minute:
Var.minute = Var.minute + 1
Var.lastMinute = Var.lastDatetime.minute
# print("min:",Var.Minute)
# print("lastMinute:",Var.lastMinute)
self.MinUpgrade = True # Trigger Minute data Upgrade
if temps.hour != Var.lastDatetime.hour:
Var.lastHeure = Var.lastDatetime.hour
print("Heure:", temps.hour)
self.HeureUpgrade = True # Trigger Hourly data upgrade
else:
# print(delta)
pass
Var.lastDatetime = temps
| boulton/IGTradingAlgo | Algorithme/Indicateur.py | Indicateur.py | py | 9,911 | python | en | code | 4 | github-code | 50 |
36694404235 | #!/usr/bin/env python3
import getpass ## required if prompting for XIQ crednetials
import json
import requests
from colored import fg, bg, attr
############################################################################################################
## written by: Mike Rieben
## e-mail: mrieben@extremenetworks.com
## date: November, 2022
## version: 1.1
############################################################################################################
## This script will run against access point(s) using provided hostname(s). It will determine if there's excessive
## multicast in the environment on WiFi0 & WiFi1 interfaces. If desired it can implement rate limiting commands.
############################################################################################################
## ACTION ITEMS / PREREQUISITES
## Please read the readme.md file in the package to ensure you've completed the desired settings below
############################################################################################################
## <- The Pound character in front of a row will comment out the code to be skipped from runtime
## - ## two pound represents a note about that code. Not executable code.
## - # one pound represents code that is commented out and typically used for troubleshooting
############################################################################################################
## AUTHENTICATION Options: Uncomment the section you wish to use whie other sections are commented out
## 1) Static Username and password, must have empty token variable (3 lines below)
XIQ_token = ""
XIQ_username = "username@contoso.com" # Enter your ExtremeCloudIQ Username "xxxx"
XIQ_password = "xxxxxxxxxxxx" # Enter your ExtremeCLoudIQ password "xxxx"
## 2) Prompt user to enter credentials, must have empty token variable (4 lines below)
# print ("Enter your XIQ login credentials ")
# XIQ_token = ""
# XIQ_username = input("Email: ")
# XIQ_password = getpass.getpass("Password: ")
## 3) TOKEN generation from api.extremecloudiq.com (Swagger): "token". Must have empty username and password variables (3 lines below)
# XIQ_token = "xxxxxxxxx"
# XIQ_username = ""
# XIQ_password = ""
## Authentication Options END
## Device Hostnames
apHostname = ["Online-AP","Offline-AP","Bad-Hostname"] ##multiple hostnames comma separated between ["xxxx","xxxx"], single hostname: ["xxxx"] without commas
##************************* No edits below this line ********************************************************************************
##Global Variables
URL = "https://api.extremecloudiq.com" ## XIQ's API portal
headers = {"Accept": "application/json", "Content-Type": "application/json"}
payload = json.dumps({"username": XIQ_username, "password": XIQ_password}) ## prepare the payload in json format for XIQ credentials
response = requests.post(URL, headers=headers, data=payload) ## send the API payload to read the response and gather the token
color0 = fg(255) ##DEFAULT Color: color pallete here: https://pypi.org/project/colored/
color1 = fg(1) ##RED
# color2 = fg(2) + bg(255) ##GREEN with a background
color2 = fg(2) ##GREEN
color3 = fg(11) ##YELLOW
reset = attr('reset')
wifiMC = 0
##Function: Use provided credentials to acquire the access token
def GetaccessToken(XIQ_username, XIQ_password):
url = URL + "/login"
payload = json.dumps({"username": XIQ_username, "password": XIQ_password})
response = requests.post(url, headers=headers, data=payload)
if response is None:
log_msg = "ERROR: Not able to login into ExtremeCloudIQ - no response!"
# logging.error(log_msg)
raise TypeError(log_msg)
if response.status_code != 200:
log_msg = f"Error getting access token - HTTP Status Code: {str(response.status_code)}"
try:
data = response.json()
if "error_message" in data:
log_msg += f"\n\t{data['error_message']}"
except:
log_msg += ""
# logging.error(f"{log_msg}")
raise TypeError(log_msg)
data = response.json()
if "access_token" in data:
# print("Logged in and got access token: " + data["access_token"])
headers["Authorization"] = "Bearer " + data["access_token"]
return 0
else:
log_msg = "Unknown Error: Unable to gain access token"
# logging.warning(log_msg)
raise TypeError(log_msg)
##Function: Get device ID from provided AP hostname
def GetDeviceID(apHostname):
if not apHostname:
print("Must enter valid hostname(s), aborting")
raise SystemExit
else:
page = 1
pageSize = 10
# pageCount = 1
# firstCall = True
##API Call: https://api.extremecloudiq.com/devices?page=1&limit=10&hostnames=Site1-Prod-MstBed&fields=ID&deviceTypes=REAL
url = URL + "/devices?page=" + str(page) + "&limit=" + str(pageSize) + "&hostnames=" + str(apHostname) + "&fields=ID&deviceTypes=REAL"
response = requests.get(url, headers=headers, verify = True)
jsonDump = response.json()
# print (jsonDump)
# print (jsonDump['data'])
if jsonDump['data'] == []: #test if the 'data' field is empty due to not finding a device ID from Hostname
print(color1 + ("Hostname: " + (color3 + (apHostname)) + (color1 + " - Device not found in XIQ. Check your Hostname spelling.")))
print(color0)
deviceID = ""
else:
deviceID = jsonDump['data'][0]['id']
print(color0 + ("Hostname: " + (color3 + (apHostname)) + (color0 + ", Device ID: ") + str(deviceID)))
return deviceID
def SendCLIwifi0(deviceID):
url = URL + "/devices/" + str(deviceID) + "/:cli"
## f"https://api.extremecloudiq.com/devices/{device_id}/:cli"
payload = json.dumps([
"show int wifi0 _counter | inc multicast"
])
print(color0 + ("\nGathering multicast counters for WiFi0 interface..."))
wifi0response = requests.request("POST", url, headers=headers, data=payload)
jsonDump = wifi0response.json()
# print(jsonDump)
wifi0DeviceOutput = jsonDump['device_cli_outputs']
# print(wifi0DeviceOutput)
wifi0b = wifi0DeviceOutput[str(deviceID)][0]['output']
print(wifi0b)
multicastVals0 = []
for z in wifi0b.split():
if z.isdigit():
multicastVals0.append(int(z))
# print("First value is Rx, second is Tx:", multicastVals0)
return multicastVals0
def SendCLIwifi1(deviceID):
url = URL + "/devices/" + str(deviceID) + "/:cli"
## f"https://api.extremecloudiq.com/devices/{device_id}/:cli"
payload = json.dumps([
"show int wifi1 _counter | inc multicast"
])
print(color0 + ("\nGathering multicast counters for WiFi1 interface..."))
wifi1response = requests.request("POST", url, headers=headers, data=payload)
jsonDump = wifi1response.json()
# print(jsonDump)
wifi1DeviceOutput = jsonDump['device_cli_outputs']
# print(wifi0DeviceOutput)
wifi1b = wifi1DeviceOutput[str(deviceID)][0]['output']
print(wifi1b)
multicastVals1 = []
for z in wifi1b.split():
if z.isdigit():
multicastVals1.append(int(z))
# print("First value is Rx, second is Tx:", multicastVals1)
return multicastVals1
##Send Multicast rate limit commands via CLI
def SendCLIMcRateLimit(deviceID):
url = URL + "/devices/" + str(deviceID) + "/:cli"
## f"https://api.extremecloudiq.com/devices/{device_id}/:cli"
payload = json.dumps([
"interface eth0 rate-limit multicast 500","interface eth0 rate-limit multicast enable","save config"
])
print(color0 + ("\nSending CLI commands..."))
cliResponse = requests.request("POST", url, headers=headers, data=payload)
# jsonDump = cliResponse.json()
# print(jsonDump)
##Function: determine if device is connected to XIQ
def CheckDeviceConnectedStatus(deviceID):
##API Call: 'https://api.extremecloudiq.com/devices/277751240294441?views=BASIC'
url = URL + "/devices/" + str(deviceID) + "?views=BASIC"
response = requests.get(url, headers=headers, verify = True)
jsonDump = response.json()
# print (jsonDump)
deviceConnected = jsonDump['connected'] ##Returns True if connected, Flase if disconnected
if deviceConnected == False:
print(color1 + ("Device is offline in XIQ and will be skipped..."))
print(color0)
return deviceConnected
##Function: do the math
def DoTheMath(deviceConnected,deviceID):
if deviceConnected == True:
global wifiMC ##This is required to change the variable value below since it was originally defined in gloabl variables
multicastVals0 = SendCLIwifi0(deviceID) ##Function: Send CLI to device ID
if multicastVals0[0] < 500000:
print(color2 + ("Great! Rx multicast does not exceed 500,000: " + str(multicastVals0[0]) + " data frames reported"))
else:
print(color1 + ("***WARNING*** Rx multicast EXCEEDS 500,000: " + str(multicastVals0[0]) + " data frames reported"))
wifiMC = 1
if (multicastVals0[1] * 0.90) >= multicastVals0[0]:
print("Rx multicast data frames are more than 10% lower than Tx data frames therefor the source of the storm is likey a device on the LAN.")
else:
print("Rx & Tx quantity are within 10% of each other therefor the source of the storm is likey a device on the WLAN.")
if multicastVals0[1] < 8000000:
print(color2 + ("Great! Tx multicast does not exceed 8,000,000: " + str(multicastVals0[1]) + " data frames reported"))
else:
print(color1 + ("***WARNING*** Tx multicast EXCEEDS 8,000,000: " + str(multicastVals0[1]) + " data frames reported"))
wifiMC = 1
if (multicastVals0[1] * 0.90) >= multicastVals0[0]:
print("Rx multicast data frames are more than 10% lower than Tx data frames therefor the source of the storm is likey a device on the LAN.")
else:
print("Rx & Tx quantity are within 10% of each other therefor the source of the storm is likey a device on the WLAN.")
multicastVals1 = SendCLIwifi1(deviceID) ##Function: Send CLI to device ID
if multicastVals1[0] < 500000:
print(color2 + ("Great! Rx multicast does not exceed 500,000: " + str(multicastVals1[0]) + " data frames reported"))
else:
print(color1 + ("***WARNING*** Rx multicast EXCEEDS 500,000: " + str(multicastVals1[0]) + " data frames reported"))
wifiMC = 1
if (multicastVals1[1] * 0.90) >= multicastVals1[0]:
print("Rx multicast data frames are more than 10% lower than Tx data frames therefor the source of the storm is likey a device on the LAN.")
else:
print("Rx & Tx quantity are within 10% of each other therefor the source of the storm is likey a device on the WLAN.")
if multicastVals1[1] < 8000000:
print(color2 + ("Great! Tx multicast does not exceed 8,000,000: " + str(multicastVals1[1]) + " data frames reported"))
else:
print(color1 + ("***WARNING*** Tx multicast EXCEEDS 8,000,000: " + str(multicastVals1[1]) + " data frames reported"))
wifiMC = 1
if (multicastVals1[1] * 0.90) >= multicastVals1[0]:
print("Rx multicast data frames are more than 10% lower than Tx data frames therefor the source of the storm is likey a device on the LAN.")
else:
print("Rx & Tx quantity are within 10% of each other therefor the source of the storm is likey a device on the WLAN.")
if wifiMC == 1:
cliYN = input(color0 + ("\nDo you want to implement the rate limit CLI onto the Eth0 interface? [Y/N]: "))
if cliYN == "Y" or cliYN == "y":
SendCLIMcRateLimit(deviceID)
print("CLI Complete! See Readme.md file for more information on implementing Supplemental CLI to ensure rate limits persist with config updates.")
else:
print(color0 + ("Skipped adding CLI commands."))
else:
print(color2 + ("\nMulticast is within tollerance. No further action required.\n"))
print(color0)
##This is the start of the program
def main():
print("\n") ##print a blank row then cariage return
##Test if a token is provided. If not, use credentials.
if not XIQ_token:
try:
login = GetaccessToken(XIQ_username, XIQ_password)
except TypeError as e:
print(e)
raise SystemExit
except:
log_msg = "Unknown Error: Failed to generate token"
print(log_msg)
raise SystemExit
else:
headers["Authorization"] = "Bearer " + XIQ_token
for count in apHostname: ##This is the loop for each hostname entered in the global variables above
# print (count) ##print each hostname in the list
deviceID = GetDeviceID(count) ##Function: Acquire device ID from a provided hostnames
if deviceID != "":
deviceConnected = CheckDeviceConnectedStatus(deviceID) ##Function: Determine if device is connected to XIQ
DoTheMath(deviceConnected,deviceID)
##Python will see this and run whatever function is provided: xxxxxx(), should be the last item in this file
if __name__ == '__main__':
main() ##Go to main function
| MibenSmmod/Multicast-Detector | XIQ-MulticastDetector.py | XIQ-MulticastDetector.py | py | 13,717 | python | en | code | 0 | github-code | 50 |
25417265230 | """Data models"""
import flask_wtf
import wtforms
UPLOAD_FOLDER = r'uploads/'
ALLOWED_EXTENSIONS = {'py'}
class Code():
"""Code object to store code"""
def __init__(self, code) -> None:
self.code = code
class FormFile(flask_wtf.FlaskForm):
"""Form"""
file = wtforms.FileField('Python file to parse',
id='formFile',
validators=[wtforms.validators.InputRequired()],
render_kw={'class ': 'form-control',
'placeholder': 'python file to parse',
'aria_label': 'python file to parse'})
submit = wtforms.SubmitField(
'Submit', render_kw={'class': 'btn btn-info'})
class FormField(flask_wtf.FlaskForm):
field = wtforms.StringField(
'Field',
render_kw={'class': 'form-control',
'placeholder': 'field to parse',
'aria_label': 'field to parse'})
submit = wtforms.SubmitField(
'Process', render_kw={'class': 'btn btn-info'})
user_code = Code('')
| Benzy-Louis/pyflowchart-gui | pyflowchart_web/app/models.py | models.py | py | 1,134 | python | en | code | 0 | github-code | 50 |
35787425005 | from linguist.models import Word, GlobalWord, Language, Category
from random import randint, choice
from linguist.utils import LinguistTranslator
class LinguistHQ:
def __init__(self, student=None):
self.student = student
self.langs = Language.objects.all()
self.global_words = GlobalWord.objects.all()
if self.student is None:
raise ValueError("Student can't be None")
def get_current_language(self):
return self.langs.get(name=self.student.current_language)
def add_from_global_word(self, global_word=None, alternative_translation=None):
global_word = global_word
word = Word(
name=global_word.name,
translation=alternative_translation if alternative_translation is not None else global_word.translation,
language=self.get_current_language(),
student=self.student,
pronunciation=global_word.pronunciation
)
word.save()
word.category_set.set(global_word.category_set.all())
def search_word(self, word_name=None):
language = self.get_current_language()
words = self.global_words.filter(name=word_name, language=language)
global_word_search = False
google_translate_search = False
home_language = self.langs.get(name=self.student.home_language)
if words.count() == 0:
translator = LinguistTranslator()
words = translator.translate(text=word_name, src=language.slug, dest=home_language.slug)
google_translate_search = True if words.text is not None else False
else:
global_word_search = True
return {'global_word_search': global_word_search, 'google_translate_search': google_translate_search,
'words': words}
def add_custom_word(self, **kwargs):
error = None
if kwargs['word_name'] is None or kwargs['translation'] is None or kwargs['pronunciation'] is None:
error = 'You did not choose word or translation or category'
return error
category = kwargs['category']
if kwargs['category'] is None:
category = Category.objects.get(name="Default")
word = Word(
name=kwargs['word_name'],
translation=kwargs['translation'],
language=self.get_current_language(),
student=self.student,
pronunciation=kwargs['pronunciation']
)
word.save()
word.category_set.add(category)
word.save()
def get_student_categories(self):
categories = []
for word in self.student.word_set.filter(language=self.get_current_language()):
for category in word.category_set.all():
if category.name not in categories:
categories.append(category.name)
return categories
def get_all_words(self, category):
return self.student.word_set.filter(category=category, language=self.get_current_language())
def get_words(self):
return self.student.word_set.filter(language=self.get_current_language())
def learn_word(self):
return self.student.word_set.filter(language=self.get_current_language(), viewed=False)
def get_learned_words(self, category):
return self.student.word_set.filter(language=self.get_current_language(), category=category, viewed=True,
played_match=True, played_reversed_match=True, played_typing=True,
played_reversed_typing=True)
def get_all_learned_words(self):
return self.student.word_set.filter(language=self.get_current_language(), viewed=True, played_match=True,
played_reversed_match=True, played_typing=True, played_reversed_typing=True)
def delete_word(self, word):
word.delete()
def update_word_translation(self, word, translation):
word.translation = translation
word.save()
def update_viewed_field(self, word, viewed=False):
word.viewed = viewed
word.save()
def update_match_field(self, word, played, reverse=False):
if reverse is False:
word.played_match = played
else:
word.played_reversed_match = played
word.save()
def update_typing_field(self, word, played, reverse=False):
if reverse is False:
word.played_typing = played
else:
word.played_reversed_typing = played
word.save()
def learn_again(self, word):
self.update_viewed_field(word, False)
self.update_match_field(word, False, False)
self.update_match_field(word, False, True)
self.update_typing_field(word, False, False)
self.update_typing_field(word, False, True)
def play_matching(self, reverse=False):
words = self.student.word_set.filter(language=self.get_current_language())
words = words.filter(played_match=False) if reverse is False else words.filter(played_reversed_match=False)
count = words.count()
if count == 0:
return 'No words to play matching'
else:
return self.create_play_words(words)
def create_play_words(self, words):
word = choice(words)
fake_words = []
for i in range(0, 3):
w = choice(words)
if w not in fake_words and w != word:
fake_words.append(w)
length = len(fake_words)
if length == 0:
fake_words.append(word)
else:
fake_words.insert(randint(0, length), word)
return {'words': fake_words, 'answer': word}
def play_typing(self, reverse=False):
words = self.student.word_set.filter(language=self.get_current_language())
words = words.filter(played_typing=False) if reverse is False else words.filter(played_reversed_typing=False)
count = words.count()
if count == 0:
word = 'No word to play typing'
else:
word = words[randint(0, count-1)]
return word
| stPhoenix/project_osirius | linguist/core.py | core.py | py | 6,139 | python | en | code | 0 | github-code | 50 |
73508785434 | #
# (c) 2023 RENware Software Systems
# cosana system
#
# ============================================
# ADS General Data
#
import json, os, sys
from flask import jsonify
import sqlalchemy as sa
from sqlalchemy.orm import declarative_base, relationship, backref
from libutil.utils import genpk, getusr
import pendulum, datetime
from commons.commons import Commons
from data_models.base_keys_data_models import BaseInfoMixin
from data_models.salesproject_data_models import SalesProject
# get db_sys from Commons or make it agnostic (usualy apply when called from threads or other processes)
# - if NULL inherit class object from a SQLAlchemy BaseModel (chk if req?) and from raw one
# - if ***NOT*** NULL inherit class object from `db.model` and raw one
_PureBaseModel = declarative_base() # declare a general data model (agnostic to Flask)
db = Commons.GetItem("db_sys")
if not db:
BaseModel = _PureBaseModel
else:
BaseModel = db.Model
#
# ads_general_data
# note: - _pk and audit columns come from BaseInfoMixin
# ============================================
class ads_general_data(BaseModel, BaseInfoMixin):
__versioned__ = {} # activate data hostory and versioning mechanism of SQLAlchemy Continuum
__tablename__ = 'ads_general_data'
sales_project_fk = sa.Column(sa.String(36), sa.ForeignKey('salesproject._pk', ondelete='CASCADE'), nullable=False, unique=True, index=True)
decisive_event = sa.Column(sa.String(100), nullable=True)
competition = sa.Column(sa.String(100), nullable=True)
potential_problems = sa.Column(sa.String(100), nullable=True)
team_members = sa.Column(sa.String(100), nullable=True)
client_company = sa.Column(sa.String(100), nullable=True)
@property
# getter attribute for scoring (RMAP.003 - ADS scoring)
def score(self):
_MAX_SCORE = 5 # based on number of items in domain
_crt_score = 0
_crt_score += 1 if bool(self.decisive_event.strip()) else 0
_crt_score += 1 if bool(self.competition.strip()) else 0
_crt_score += 1 if bool(self.potential_problems.strip()) else 0
_crt_score += 1 if bool(self.team_members.strip()) else 0
_crt_score += 1 if bool(self.client_company.strip()) else 0
_tmp = {
"crt_score": _crt_score,
"max_score": _MAX_SCORE,
}
return _tmp
def as_dict(self):
# first part of dictionary is with base columns
_tmp_base = dict()
_tmp_base = self.get_base_model_as__dict()
# second part of dictionary is with specific columns
_tmp_me = dict()
_tmp_me = {
"decisive_event": self.decisive_event,
"competition": self.competition,
"potential_problems": self.potential_problems,
"team_members": self.team_members,
"client_company": self.client_company,
"score": self.score,
}
# unified dictionary contain both
_tmp = dict()
_tmp = _tmp_base.copy()
_tmp.update(_tmp_me.copy())
return _tmp
def check_health(self):
pass
| petre-renware/cosana | data_models/ads_general_data_data_models.py | ads_general_data_data_models.py | py | 3,126 | python | en | code | 1 | github-code | 50 |
22427357015 | from socket import *
s = socket ()
mensagem = "https://github.com/mjoaojr/Sistemas-Distribuidos.git"
IP="10.10.13.1"
PORTA=8753
CONVERTER = str.encode(mensagem, "UTF-8")
s.connect((IP,PORTA))
while True:
s.send(CONVERTER)
while True:
x = s.recv (4096)
if not x:
break
print(x.decode("UTF-8"))
resposta = input("ESCREVA ")
c = str.encode(resposta,"UTF-8")
s.send(c)
s.close ()
| M4theusz/Projetos_Faculdade | Python_Sistemas_Distribuidos/con2.py | con2.py | py | 401 | python | en | code | 0 | github-code | 50 |
2745813376 |
from itertools import product
import networkx as nx
default_nodes = ['Stephen', 'Sinnie', 'Elaine']
default_edges = [('Stephen', 'Sinnie', 0.2),
('Sinnie', 'Stephen', 0.2),
('Sinnie', 'Elaine', 0.3),
('Elaine', 'Sinnie', 0.2),
('Stephen', 'Elaine', 1.1),
('Elaine', 'Stephen', 1.2)]
class SocialNetworkSimVoltage:
"""
"""
def __init__(self, nodes=default_nodes, edges=default_edges, precalculated_distance=True):
"""
:param nodes:
:param edges:
:param precalculated_distance:
"""
self.initializeClass(nodes, edges)
self.precalculated_distance = precalculated_distance
if self.precalculated_distance:
self.precalculate_distance()
def initializeClass(self, nodes, edges):
"""
:param nodes:
:param edges:
:return:
"""
self.constructSocialNetwork(nodes, edges)
self.errTol = 1e-4
self.maxSteps = 10000
def precalculate_distance(self):
"""
:return:
"""
self.distance_matrix = {}
for person1, person2 in product(self.wordNet.nodes(), self.wordNet.nodes()):
try:
self.distance_matrix[(person1, person2)] = float(nx.shortest_path_length(self.wordNet, person1, person2, weight='weight'))
except nx.exception.NetworkXNoPath:
self.distance_matrix[(person1, person2)] = float('inf')
def constructSocialNetwork(self, nodes, edges):
"""
:param nodes:
:param edges:
:return:
"""
self.wordNet = nx.DiGraph()
self.wordNet.add_nodes_from(nodes)
self.wordNet.add_weighted_edges_from(edges)
def checkPersonIrrelevant(self, person, person1, person2):
"""
:param person:
:param person1:
:param person2:
:return:
"""
try:
path1 = nx.algorithms.shortest_path(self.wordNet,
source = person1, target = person,
weight='weight')
path2 = nx.algorithms.shortest_path(self.wordNet,
source = person, target = person2,
weight='weight')
except nx.NetworkXNoPath:
return True
intersection_paths = list(set(path1) & set(path2))
return (len(intersection_paths) != 1)
def initloop(self, person1, person2):
"""
:param person1:
:param person2:
:return:
"""
volDict = {}
for node in self.wordNet:
if node == person1:
volDict[node] = 1.0
continue
elif node == person2:
volDict[node] = 0.0
continue
elif self.checkPersonIrrelevant(node, person1, person2):
volDict[node] = 10.0
continue
if self.precalculated_distance:
distFrom1 = self.distance_matrix[person1, node]
distFrom2 = self.distance_matrix[node, person2]
else:
distFrom1 = float(nx.shortest_path_length(self.wordNet, person1, node, weight='weight'))
distFrom2 = float(nx.shortest_path_length(self.wordNet, node, person2, weight='weight'))
volDict[node] = distFrom2 / (distFrom1 + distFrom2)
return volDict
def compute_incurrent(self, node, volDict):
"""
:param node:
:param volDict:
:return:
"""
in_current = 0
for pred in self.wordNet.predecessors(node):
if (volDict[pred] > volDict[node]) and (volDict[pred] >= 0.0) and (volDict[pred] <= 1.0):
potDiff = volDict[pred] - volDict[node]
resEdge = self.wordNet[pred][node]['weight']
in_current += potDiff / resEdge
return in_current
def compute_outcurrent(self, node, volDict):
"""
:param node:
:param volDict:
:return:
"""
out_current = 0
for succ in self.wordNet.successors(node):
if (volDict[node] > volDict[succ]) and (volDict[succ] >= 0.0) and (volDict[succ] <= 1.0):
potDiff = volDict[node] - volDict[succ]
resEdge = self.wordNet[node][succ]['weight']
out_current += potDiff / resEdge
return out_current
def average_VR(self, node, volDict):
"""
:param node:
:param volDict:
:return:
"""
sumVOverR = 0.0
numRecR = 0.0
for pred in self.wordNet.predecessors(node):
if (volDict[pred] > volDict[node]) and (volDict[pred] >= 0.0) and (volDict[pred] <= 1.0):
resEdge = self.wordNet[pred][node]['weight']
sumVOverR += volDict[pred] / resEdge
numRecR += 1. / resEdge
for succ in self.wordNet.successors(node):
if (volDict[node] > volDict[succ]) and (volDict[succ] >= 0.0) and (volDict[succ] <= 1.0):
resEdge = self.wordNet[node][succ]['weight']
sumVOverR += volDict[succ] / resEdge
numRecR += 1. / resEdge
return sumVOverR, numRecR
def getResistance(self, person1, person2, printVol = False):
"""
:param person1:
:param person2:
:param printVol:
:return:
"""
if person1 == person2:
return 0.0
if self.precalculated_distance:
if self.distance_matrix[(person1, person2)] == float('inf'):
return float('inf')
else:
try:
distTwoWords = nx.shortest_path_length(self.wordNet, person1, person2, weight='weight')
except nx.exception.NetworkXNoPath:
return float('inf')
# initialization
volDict = self.initloop(person1, person2)
if printVol:
print(volDict)
tempVolDict = {node: volDict[node] for node in self.wordNet}
# iteration: computing the potential of each node
converged = False
step = 0
while (not converged) and step < self.maxSteps:
tempConverged = True
for node in self.wordNet:
if node == person1:
tempVolDict[node] = 1.0
continue
elif node == person2:
tempVolDict[node] = 0.0
continue
elif (volDict[node] < 0.0) or (volDict[node] > 1.0):
tempVolDict[node] = 10.0
continue
in_current = self.compute_incurrent(node, volDict)
out_current = self.compute_outcurrent(node, volDict)
if abs(in_current - out_current) > self.errTol:
sumVOverR, numRecR = self.average_VR(node, volDict)
tempVolDict[node] = 0.0 if numRecR==0 else sumVOverR / numRecR
tempConverged = False
else:
tempConverged = tempConverged and True
converged = tempConverged
# value update
for node in self.wordNet:
volDict[node] = tempVolDict[node]
step += 1
if printVol:
print(volDict)
# calculating the resistance
startCurrent = sum([(1.0-volDict[rootsucc])/self.wordNet[person1][rootsucc]['weight']
for rootsucc in self.wordNet.successors(person1) if volDict[rootsucc]<=1.0])
return (1.0 / startCurrent)
def drawNetwork(self):
"""
:return:
"""
nx.draw(self.wordNet)
| stephenhky/GraphFlow | graphflow/simvoltage/SocialNetworkSimVoltage.py | SocialNetworkSimVoltage.py | py | 7,928 | python | en | code | 8 | github-code | 50 |
15134237198 | import re
from pprint import pprint
import nltk
import spacy
from detoxify import Detoxify
from nltk.sentiment import SentimentIntensityAnalyzer
from nltk.corpus import stopwords
from langdetect import detect
#nltk.download([
# "names",
# "stopwords",
# "state_union",
# "twitter_samples",
# "movie_reviews",
# "averaged_perceptron_tagger",
# "vader_lexicon",
# "punkt",
# "wordnet",
# ])
nlp = spacy.load("en_core_web_sm")
# written_text_by_user = передача текста из мессенджера
def get_messege_processing_info(written_text_by_user: str):
print("start")
written_text_by_user_for_toxic = written_text_by_user ### Если надо будет проверять на связность
# Разделяем на слова (токенизируем)
written_text_by_user = " ".join(nltk.word_tokenize(written_text_by_user))
# en = english
# Определяем язык для дальнейшей работы с текстом
language = detect(written_text_by_user)
if language != 'en':
print(language)
return
# Удаление stopwords на английском
filtered_written_text_by_user = [word for word in written_text_by_user if word not in stopwords.words('english')]
# Соединяем в текст целиком для леммы на английском
filtered_written_text_by_user = " ".join(filtered_written_text_by_user)
# Лемматизация для английского
sentence = filtered_written_text_by_user
doc = nlp(sentence)
full_ready_text_by_user = " ".join([token.lemma_ for token in doc])
full_ready_text_by_user = nltk.sent_tokenize(full_ready_text_by_user)
# print('Результат предобработки =', full_ready_text_by_user)
written_text_by_user_for_toxic = nltk.sent_tokenize(written_text_by_user_for_toxic)
# print('Результат для токсичности in English=', written_text_by_user_for_toxic)
sia = SentimentIntensityAnalyzer()
dict_of_results = []
list_of_dicts = []
for number in range(len(full_ready_text_by_user)):
dict_of_results = sia.polarity_scores(full_ready_text_by_user[number])
# Токсичность текста
dict_of_results_tox = Detoxify('original').predict(written_text_by_user_for_toxic[number])
dict_of_results.update(dict_of_results_tox)
list_of_dicts.append(dict_of_results)
negative = 0
toxic = 0
bad_words = 0
rasizm = 0
threat = 0
comparative = 0
for dict_ in list_of_dicts:
# print(dict_)
negative += dict_["neg"] - dict_["pos"]
toxic += dict_["toxicity"] + dict_["severe_toxicity"]
bad_words += dict_["insult"] + dict_["obscene"]
rasizm += dict_["identity_hate"]
threat += dict_["threat"]
return negative, toxic, bad_words, rasizm, threat, comparative
| Eggoser/SuperFlexMessengerAssistant | backend/app/neural_english.py | neural_english.py | py | 2,979 | python | en | code | 2 | github-code | 50 |
22452949747 | import sys, os
import math
from lxml import etree
import numpy as np
from sklearn.pipeline import Pipeline, FeatureUnion
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import traceln
from tasks import _checkFindColDir, _exit
from tasks.DU_CRF_Task import DU_CRF_Task
from crf.Edge import Edge, SamePageEdge
from crf.Graph_MultiPageXml import Graph_MultiPageXml
from crf.NodeType_PageXml import NodeType_PageXml_type_woText
#from crf.FeatureDefinition_PageXml_std_noText import FeatureDefinition_PageXml_StandardOnes_noText
from crf.FeatureDefinition import FeatureDefinition
from crf.Transformer import Transformer, TransformerListByType
from crf.Transformer import EmptySafe_QuantileTransformer as QuantileTransformer
from crf.Transformer_PageXml import NodeTransformerXYWH_v2, NodeTransformerNeighbors, Node1HotFeatures
from crf.Transformer_PageXml import Edge1HotFeatures, EdgeBooleanFeatures_v2, EdgeNumericalSelector
from crf.PageNumberSimpleSequenciality import PageNumberSimpleSequenciality
from tasks.DU_ABPTableGrid import GridAnnotator
import DU_ABPTableRG
class GridLine_NodeTransformer_v2(Transformer):
"""
features of a grid line:
- horizontal or vertical.
"""
def transform(self, lNode):
#We allocate TWO more columns to store in it the tfidf and idf computed at document level.
#a = np.zeros( ( len(lNode), 10 ) , dtype=np.float64) # 4 possible orientations: 0, 1, 2, 3
a = np.zeros( ( len(lNode), 6 ) , dtype=np.float64) # 4 possible orientations: 0, 1, 2, 3
for i, blk in enumerate(lNode):
page = blk.page
if abs(blk.x2 - blk.x1) > abs(blk.y1 - blk.y2):
#horizontal
v = 2*blk.y1/float(page.h) - 1 # to range -1, +1
a[i,0:3] = (1.0, v, v*v)
else:
#vertical
v = 2*blk.x1/float(page.w) - 1 # to range -1, +1
a[i,3:6] = (1.0, v, v*v)
return a
class My_FeatureDefinition_v2(FeatureDefinition):
"""
Multitype version:
so the node_transformer actually is a list of node_transformer of length n_class
the edge_transformer actually is a list of node_transformer of length n_class^2
We also inherit from FeatureDefinition_T !!!
"""
n_QUANTILES = 16
def __init__(self, **kwargs):
"""
set _node_transformer, _edge_transformer, tdifNodeTextVectorizer
"""
FeatureDefinition.__init__(self)
nbTypes = self._getTypeNumber(kwargs)
print("BETTER FEATURES")
block_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("xywh", Pipeline([
('selector', NodeTransformerXYWH_v2()),
#v1 ('xywh', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('xywh', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("neighbors", Pipeline([
('selector', NodeTransformerNeighbors()),
#v1 ('neighbors', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('neighbors', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("1hot", Pipeline([
('1hot', Node1HotFeatures()) #does the 1-hot encoding directly
])
)
])
grid_line_transformer = GridLine_NodeTransformer_v2()
self._node_transformer = TransformerListByType([block_transformer, grid_line_transformer])
edge_BB_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("1hot", Pipeline([
('1hot', Edge1HotFeatures(PageNumberSimpleSequenciality()))
])
)
, ("boolean", Pipeline([
('boolean', EdgeBooleanFeatures_v2())
])
)
, ("numerical", Pipeline([
('selector', EdgeNumericalSelector()),
#v1 ('numerical', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('numerical', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
] )
edge_BL_transformer = DU_ABPTableRG.Block2GridLine_EdgeTransformer()
edge_LL_transformer = DU_ABPTableRG.GridLine2GridLine_EdgeTransformer()
self._edge_transformer = TransformerListByType([edge_BB_transformer,
edge_BL_transformer,
edge_BL_transformer, # useless but required
edge_LL_transformer
])
self.tfidfNodeTextVectorizer = None #tdifNodeTextVectorizer
def fitTranformers(self, lGraph,lY=None):
"""
Fit the transformers using the graphs, but TYPE BY TYPE !!!
return True
"""
self._node_transformer[0].fit([nd for g in lGraph for nd in g.getNodeListByType(0)])
self._node_transformer[1].fit([nd for g in lGraph for nd in g.getNodeListByType(1)])
self._edge_transformer[0].fit([e for g in lGraph for e in g.getEdgeListByType(0, 0)])
self._edge_transformer[1].fit([e for g in lGraph for e in g.getEdgeListByType(0, 1)])
#self._edge_transformer[2].fit([e for g in lGraph for e in g.getEdgeListByType(1, 0)])
#self._edge_transformer[3].fit([e for g in lGraph for e in g.getEdgeListByType(1, 1)])
return True
DU_ABPTableRG.My_FeatureDefinition = My_FeatureDefinition_v2
main = DU_ABPTableRG.main
# ----------------------------------------------------------------------------
if __name__ == "__main__":
version = "v.01"
usage, description, parser = DU_CRF_Task.getBasicTrnTstRunOptionParser(sys.argv[0], version)
# parser.add_option("--annotate", dest='bAnnotate', action="store_true",default=False, help="Annotate the textlines with BIES labels")
#FOR GCN
parser.add_option("--revertEdges", dest='bRevertEdges', action="store_true", help="Revert the direction of the edges")
parser.add_option("--detail", dest='bDetailedReport', action="store_true", default=False,help="Display detailled reporting (score per document)")
parser.add_option("--baseline", dest='bBaseline', action="store_true", default=False, help="report baseline method")
parser.add_option("--line_see_line", dest='iGridVisibility', action="store", type=int, default=2, help="seeline2line: how many next grid lines does one line see?")
parser.add_option("--block_see_line", dest='iBlockVisibility', action="store", type=int, default=2, help="seeblock2line: how many next grid lines does one block see?")
# ---
#parse the command line
(options, args) = parser.parse_args()
# ---
try:
sModelDir, sModelName = args
except Exception as e:
traceln("Specify a model folder and a model name!")
_exit(usage, 1, e)
main(sModelDir, sModelName, options) | Transkribus/TranskribusDU | TranskribusDU/tasks/TablePrototypes/DU_ABPTableRG2.py | DU_ABPTableRG2.py | py | 8,726 | python | en | code | 21 | github-code | 50 |
1713453289 | import array as arr
def predictCoord(point0, point1, timeDiff):
xVel = (point1[0]-point0[0])/timeDiff
yVel = (point1[1]-point0[1])/timeDiff
a = -9.81
time1 = (-yVel - (yVel*yVel - 2 * a * point1[1]) ** 0.5)/a
time2 = (-yVel + (yVel*yVel - 2 * a * point1[1]) ** 0.5)/a
time = time1.real
if time2.real >= 0:
time = time2.real
xCoord = point1[0] + xVel * time * 0.7
yCoord = 0.0
Coord = arr.array('d', [xCoord.real, yCoord.real])
return Coord
| Yangmchuyue/ITrash | TargetTrajectoryTracker.py | TargetTrajectoryTracker.py | py | 500 | python | en | code | 1 | github-code | 50 |
71826171356 | from pymongo import MongoClient
import json
import jsonpickle
import time
def loadFromDB():
stocks = []
client = MongoClient('localhost', 27017)
db = client["test"]
coll = db['stocks']
results = coll.find({}, {'_id': 0})
strValues = []
for r in results:
strValue = json.dumps(r)
strValues.append(strValue)
for str in strValues:
obj = jsonpickle.decode(str)
stocks.append(obj)
return stocks | fengwu2004/stock | serialization.py | serialization.py | py | 503 | python | en | code | 0 | github-code | 50 |
1247897561 | import webapp2
import datetime
from collections import defaultdict
from google.appengine.ext.webapp import blobstore_handlers
from models import Entry, Attachment, ToDo
from templates import (attachmentTemplate, indexTemplate,
entryEditTemplate, backupTemplate)
from mail import EntryReminder, MailReceiver
from highlight import ShowHighlights, PickMonthlyHighlight
from config import BACKUP_KEY
from happiness import CheckHappiness
_MONTHS = ['Jan', 'Feb', 'Mar', "Apr", 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
older_than = int(self.request.get("older_than",
datetime.datetime.now().date().toordinal() + 1))
older_than = datetime.date.fromordinal(older_than)
body = ""
for oldest_entry in Entry.all().order('date').run(limit=1):
cur_year = datetime.datetime.now().year
for year in range(oldest_entry.date.year, cur_year + 1):
body += '<p>%d' % year
for month in range(12):
# Show entries older than the first day of the following month.
offset = datetime.datetime(year=year + 1 if month == 11 else year, month=1 + (month + 1) % 12, day=1)
if offset.date() > oldest_entry.date:
body += ' <a href="/?older_than=%d">%s</a>' % (offset.toordinal(), _MONTHS[month])
else:
body += ' ' + _MONTHS[month]
oldest = datetime.datetime.now().date().toordinal() + 1
for e in Entry.all().filter("date <", older_than).order('-date').run(
limit=20):
body += e.render()
oldest = e.date.toordinal()
nav = """
<div class='row'>
<div class='span4 offset4'>
<a href='/?older_than=%d'>Newer</a> -- <a href='/?older_than=%d'>Older</a>
</div>
</div>""" % (oldest + 41, oldest)
body = nav + body + nav
self.response.out.write(indexTemplate.render({
'title': 'Home',
'body': body
}))
class ShowAttachments(webapp2.RequestHandler):
def get(self):
attachments = ""
for a in Attachment.all():
attachments += attachmentTemplate.render({
'name': a.name,
'thumbnail': a.thumbnail,
'key': a.key()
})
self.response.out.write(indexTemplate.render({
'title': 'Attachments',
'body': attachments,
'active_page': 'attachments'
}))
class EntryEditForm(webapp2.RequestHandler):
def get(self, key):
e = Entry.get(key)
body = entryEditTemplate.render({
'entry_day': e.date.strftime("%A, %d %B"),
'content': e.content,
'key': e.key()
})
self.response.out.write(indexTemplate.render({
'title': 'Append to Entry',
'body': body
}))
class EntryEditSubmit(webapp2.RequestHandler):
def post(self):
key = self.request.get('key')
try:
e = Entry.get(key)
e.content = self.request.get('content')
e.put()
self.redirect("/")
except Exception as e:
self.response.out.write(indexTemplate.render({
'title': 'Append to Entry',
'body': "Error: No entry for key %s, exception %s" % (key, e)
}))
class ServeAttachment(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key):
a = Attachment.get(key)
self.send_blob(a.content, content_type=a.content_type)
class ShowIdeas(webapp2.RequestHandler):
def scrape_ideas(self, text):
return [line.split(":", 2)[1] for line in text.split("\n")
if line.startswith("--") and "idea" in line and ":" in line]
def get(self):
ideas = []
for e in Entry.all().order('-date'):
if "--" in e.content:
ideas += self.scrape_ideas(e.content)
body_text = "<ul>\n"
for idea in ideas:
body_text += "\t<li>%s</li>\n" % idea
body_text += "</ul>"
self.response.out.write(indexTemplate.render({
'title': 'Ideas',
'body': body_text,
'active_page': 'ideas'
}))
class ShowToDo(webapp2.RequestHandler):
def get(self):
todos = defaultdict(list)
for t in ToDo.all().filter('done_time =', None).order('-creation_time'):
todos[t.category].append(t)
for t in ToDo.all().filter('done_time >', datetime.datetime.now() -
datetime.timedelta(days=7)):
todos[t.category].append(t)
body_text = ""
for category, items in todos.iteritems():
body_text += "<h2>%s</h2>\n<ul class='todo'>" % category
for t in items:
if t.done_time:
body_text += "\t<li class='done'>%s</li>\n" % t.content
else:
body_text += "\t<a href='/todo/finish/%s'><li>%s</li></a>\n" % (
t.key(), t.content)
body_text += "</ul>"
self.response.out.write(indexTemplate.render({
'title': 'To-Do',
'body': body_text,
'active_page': 'todo'
}))
class FinishToDo(webapp2.RequestHandler):
def get(self, key):
t = ToDo.get(key)
t.done_time = datetime.datetime.now()
t.put()
self.redirect('/todo?refresh')
class ShowBackup(webapp2.RequestHandler):
def get(self):
self.response.out.write(indexTemplate.render({
'title': 'Backup',
'body': backupTemplate.render({'key': BACKUP_KEY}),
'active_page': 'backup'
}))
app = webapp2.WSGIApplication([
('/', MainPage),
MailReceiver.mapping(),
('/reminder', EntryReminder),
('/happiness/check', CheckHappiness),
('/attachments', ShowAttachments),
('/attachment/([^/]+)', ServeAttachment),
('/ideas', ShowIdeas),
('/todo', ShowToDo),
('/todo/finish/([^/]+)', FinishToDo),
('/highlights', ShowHighlights),
('/highlights/month/(\d+)', PickMonthlyHighlight),
('/edit/([^/]+)', EntryEditForm),
('/edit', EntryEditSubmit),
('/backup', ShowBackup),
webapp2.Route('/_ah/admin', webapp2.RedirectHandler, defaults={
'_uri': 'https://appengine.google.com/dashboard?app_id=s~infinite-diary'})
],
debug=True)
| Mononofu/infinite-diary | diary.py | diary.py | py | 6,006 | python | en | code | 4 | github-code | 50 |
71016801114 | from django.core.management.base import BaseCommand, CommandError
import pandas as pd
import re
import requests
import json
from blog.models import latest_transactions,last_blocks
class Command(BaseCommand):
def handle(self, *args, **options):
def get_blocks():
sats_conversion = 100000000
last_blocks = requests.get('https://explorer.rise.vision/api/getLastBlocks')
last_blocks_cleaned = json.loads(last_blocks.content)['blocks']
df = pd.DataFrame(last_blocks_cleaned)
lst = []
for dct in df['delegate'].tolist():
lst.append((dct['username'], dct['rank']))
df['username'] = ''
df['rank'] = ''
for ix in range(len(df)):
df['username'].ix[ix] = lst[ix][0]
df['rank'].ix[ix] = lst[ix][1]
df.head(5)
del df['delegate']
df_cols = [x for x in df.columns.tolist()]
block_cols = ['generator', 'height', 'blockid', 'reward', 'timestamp', 'totalamount', 'totalfee',
'totalforged', 'transactionscount', 'username', 'rank']
columns = {x: y for (x, y) in zip(df_cols, block_cols)}
df.rename(columns=columns, inplace=True)
df['reward'] = df['reward'] / sats_conversion
df['totalamount'] = df['totalamount'] / sats_conversion
df['totalfee'] = df['totalfee'] / sats_conversion
df['totalforged'] = df['totalforged'] / sats_conversion
return df
df = get_blocks()
X = df.itertuples()
f = open('get_blocks_log', 'w')
print(X)
while True:
try:
Y = last_blocks()
resp = zip(df.columns.tolist(), X.next()[1:])
for ix in resp:
print(ix[0], ix[1])
if ix != 'id':
setattr(Y, str(ix[0]), ix[1])
Y.save()
except StopIteration:
break
except Exception as e:
f.write(str(e))
f.write('\n')
f.write(str('resp'))
f.write('\n')
f.close()
| spookiestevie/my-django-site | blog/management/commands/get_blocks_cmd.py | get_blocks_cmd.py | py | 2,234 | python | en | code | 0 | github-code | 50 |
3310309804 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from typing import List
from cdp_backend.pipeline.ingestion_models import EventIngestionModel
from cdp_scrapers.legistar_utils import LegistarScraper
###############################################################################
def get_events(
from_dt: datetime,
to_dt: datetime,
**kwargs,
) -> List[EventIngestionModel]:
scraper = LegistarScraper(
client="longbeach",
timezone="America/Los_Angeles",
ignore_minutes_item_patterns=[
"VIA TELECONFERENCE PURSUANT TO AB 361",
"certify that the agenda was posted not less than 72 hours",
"The agenda and supporting documents are available on the Internet",
"IN-PERSON/VIRTUAL HYBRID CITY COUNCIL MEETING",
"PURSUANT TO AB 361",
"Opportunity to address the City Council",
"VIA TELECONFERENCE",
"JOIN VIA ZOOM",
"TO PROVIDE PUBLIC COMMENT IN THE ZOOM MEETING",
"NOTICE TO THE PUBLIC",
"Opportunity is given to those members of the public",
"page break",
"REVISED",
"PAGE BREAK",
"A digital recording of this meeting will be available",
],
)
return scraper.get_events(begin=from_dt, end=to_dt)
| CouncilDataProject/long-beach | python/cdp_long_beach_backend/scraper.py | scraper.py | py | 1,357 | python | en | code | 0 | github-code | 50 |
37510404975 | from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
def random_bytes(range):
with open("/dev/urandom", 'rb') as f:
return f.read(range)
def encrypt_aes_ecb(data, key):
cipher = AES.new(key, AES.MODE_ECB)
ciphertext = cipher.encrypt(data)
return ciphertext
def encrypt_aes_cbc(data, key, IV):
cipher = AES.new(key, AES.MODE_CBC, IV)
ciphertext = cipher.encrypt(data)
return ciphertext
def encryption_oracle(data):
# Append a prefix and suffix to the data
prefix = random_bytes(len(range(5,11)))
suffix = random_bytes(len(range(5,11)))
data_to_encrypt = prefix + data + suffix
# Randomly determine the mode
mode = len(bin(int.from_bytes(random_bytes(1), 'little')))
if (mode) > 9:
mode = 'ECB'
ciphertext = encrypt_aes_ecb(pad(data_to_encrypt, blocksize), key)
else:
mode = 'CBC'
ciphertext = encrypt_aes_cbc(pad(data_to_encrypt, blocksize), key, IV)
print(f'Mode to encrypt: {mode}')
print(f'Total of bytes: {len(ciphertext)}')
return ciphertext
def detect_encryption_mode(ciphertext):
n=0
c = 0
chunks = []
for i in range(0, len(ciphertext), blocksize):
chunks.append(ciphertext[i:i+blocksize])
n+=1
for num,ch in enumerate(chunks):
count = chunks.count(ch)
if count > 1:
print(num, ch)
c += 1
print(f'Total of chunks of {blocksize} bytes: {n}')
if c > 1:
return 'ECB'
else:
return 'CBC'
def main():
with open("open_text", "rb") as f:
data = f.read()
data = b'Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg YnkK'
print(f'{len(data)} bytes')
encrypted_text = encryption_oracle(data)
detected_mode = detect_encryption_mode(encrypted_text)
print(f"Mode Detected: {detected_mode}")
if __name__ == '__main__':
blocksize = AES.block_size
key = random_bytes(blocksize)
IV = random_bytes(blocksize)
main()
| iliayg/cracking-bytes | 6.py | 6.py | py | 2,127 | python | en | code | 0 | github-code | 50 |
10203246578 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 14 16:15:01 2014
@author: Eric
Houses all plot functions for county_data_analysis
Suffixes at the end of variable names:
a: numpy array
b: boolean
d: dictionary
df: pandas DataFrame
l: list
s: string
t: tuple
Underscores indicate chaining: for instance, "foo_t_t" is a tuple of tuples
"""
import matplotlib as mpl
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
def define_boolean_color(boolean_sr, color_t_t):
"""
boolean_sr is the boolean-valued series to define the color from. color_t_t
should be two tuples of length 3: one if the boolean value is
true and one if it is false.
"""
color_column_t = ('red', 'green', 'blue')
color_df = pd.DataFrame(np.ndarray((len(boolean_sr.index), 3)),
index=boolean_sr.index,
columns=color_column_t)
# Set columns one by one
for l_column, column_s in enumerate(color_column_t):
color_df.loc[boolean_sr, column_s] = color_t_t[0][l_column]
color_df.loc[~boolean_sr, column_s] = color_t_t[1][l_column]
return (color_df, None)
# The second entry of the returned tuple specifies that there is no maximum or minimum magnitude, like we'd have if this were define_gradient_color
def define_balanced_gradient_color(value_sr, color_t_t):
"""
This will return a gradient spanning from minus the highest-magnitude value to plus the highest-magnitude value. value_sr is the series to define the color from. color_t_t should be three tuples of length 3: one if the value is maximally negative, one if it is zero, and one if it is maximally positive. Intermediate values will be interpolated.
"""
# Find the maximum-magnitude value in the column of interest: this will be
# represented with the brightest color.
max_magnitude = max([abs(i) for i in value_sr])
# For each value, interpolate between the values of color_t_t to find the approprate color
gradient_df = pd.DataFrame(np.ndarray((len(value_sr.index), 3)),
index=value_sr.index,
columns=['red', 'green', 'blue'])
for index in value_sr.index:
gradient_df.loc[index] = \
interpolate_balanced_gradient_color(color_t_t,
value_sr[index],
max_magnitude)
return (gradient_df, (-max_magnitude, 0, max_magnitude))
def define_unbalanced_gradient_color(value_sr, color_t_t, color_value_t):
"""
This will return a gradient spanning from lowest value to the highest value. value_sr is the series to define the color from. color_t_t should be three tuples of length 3: one for the lowest value, one for the midrange value, and one for the highest value (unless this is overridden by manual color value specification in color_value_t). Intermediate values will be interpolated.
"""
# Find the max, min, and mean values
if color_value_t:
min_value, mid_value, max_value = color_value_t
else:
min_value = np.min(value_sr)
max_value = np.max(value_sr)
mid_value = float(min_value + max_value)/2.0
# For each value, interpolate between the values of color_t_t to find the approprate color
gradient_df = pd.DataFrame(np.ndarray((len(value_sr.index), 3)),
index=value_sr.index,
columns=['red', 'green', 'blue'])
for index in value_sr.index:
gradient_df.loc[index] = \
interpolate_unbalanced_gradient_color(color_t_t,
value_sr[index],
min_value,
mid_value,
max_value)
return (gradient_df, (min_value, mid_value, max_value))
def interpolate_balanced_gradient_color(color_t_t, value, max_magnitude):
"""
color_t_t is three tuples of length 3: one if the value is maximally negative, one if it is zero, and one if it is maximally positive. max_magnitude sets the intensity of the interpolated color. The function returns a tuple containing the interpolated color for the input value.
"""
normalized_magnitude = abs(value)/max_magnitude
# The higher the magnitude, the closer the color to far_color_t; the lower
# the magnitude, the closter the color to near_color_t
near_color_t = color_t_t[1]
if value < 0:
far_color_t = color_t_t[0]
else:
far_color_t = color_t_t[2]
interpolated_color_a = (normalized_magnitude * np.array(far_color_t) +
(1-normalized_magnitude) * np.array(near_color_t))
return tuple(interpolated_color_a)
def interpolate_unbalanced_gradient_color(color_t_t, value, min_value,
mid_value, max_value):
"""
color_t_t is three tuples of length 3: one for min_value, one for mid_value, and one for max_value. The function returns a tuple containing the interpolated color for the input value.
"""
if value < mid_value:
low_value = min_value
low_color_t = color_t_t[0]
high_value = mid_value
high_color_t = color_t_t[1]
else:
low_value = mid_value
low_color_t = color_t_t[1]
high_value = max_value
high_color_t = color_t_t[2]
interval = high_value - low_value
interpolated_color_a = (value - low_value)/interval * np.array(high_color_t) + \
(high_value - value)/interval * np.array(low_color_t)
return tuple(interpolated_color_a)
def make_colorbar(ax, color_t_t, color_value_t, label_s):
""" Creates a colorbar with the given axis handle ax; the colors are defined according to color_t_t and the values are mapped according to color_value_t. color_t_t and color_value_t must currently both be of length 3. The colorbar is labeled with label_s. """
# Create the colormap for the colorbar
colormap = make_colormap(color_t_t, color_value_t)
# Create the colorbar
norm = mpl.colors.Normalize(vmin=color_value_t[0], vmax=color_value_t[2])
color_bar_handle = mpl.colorbar.ColorbarBase(ax, cmap=colormap,
norm=norm,
orientation='horizontal')
color_bar_handle.set_label(label_s)
def make_colormap(color_t_t, color_value_t):
""" Given colors defined in color_t_t and values defined in color_value_t, creates a LinearSegmentedColormap object. Works with only three colors and corresponding values for now. """
# Find how far the second color is from the first and third
second_value_fraction = float(color_value_t[1] - color_value_t[0]) / \
float(color_value_t[2] - color_value_t[0])
# Create the colormap
color_s_l = ['red', 'green', 'blue']
color_map_entry = lambda color_t_t, i_color: \
((0.0, color_t_t[0][i_color], color_t_t[0][i_color]),
(second_value_fraction, color_t_t[1][i_color], color_t_t[1][i_color]),
(1.0, color_t_t[2][i_color], color_t_t[2][i_color]))
color_d = {color_s: color_map_entry(color_t_t, i_color) for i_color, color_s
in enumerate(color_s_l)}
colormap = LinearSegmentedColormap('ShapePlotColorMap', color_d)
return colormap
def make_scatter_plot(ax, x_l_t, y_l_t, color_t_t, plot_axes_at_zero_b=False,
plot_regression_b=False):
"""
Creates a scatter plot. x_l_t and y_l_t are length-n tuples containing the
n lists to be plotted; colors of plot points are given by the length-n
tuple color_t_t.
"""
# Plot all data
for l_series in xrange(len(x_l_t)):
ax.scatter(x_l_t[l_series], y_l_t[l_series],
c=color_t_t[l_series],
edgecolors='none')
# Plot x=0 and y=0 lines
if plot_axes_at_zero_b:
ax.axhline(y=0, color='k')
ax.axvline(x=0, color='k')
# Plot regression line (one set of points only)
if plot_regression_b and len(x_l_t) == 1:
plot_regression(ax, x_l_t[0], y_l_t[0])
def make_shape_plot(fig, value_sr, shape_index_l, shape_l, color_type_s, color_t_t,
ax=None, colorbar_s=None, colorbar_ax=None, color_value_t=None):
""" Creates a shape plot given figure handle fig. value_sr is the Series containing the data to be plotted; shape_index_l indexes the shapes to plot by FIPS code; shape_l contains the shapes to plot; color_type_s defines whether the plot will be shaded according to a binary or a gradient; color_t_t defines the colors to shade with. colorbar_s labels the colorbar. ax and colorbar_ax are optional pre-defined axes. If color_value_t is defined, it sets the values that will be shown by the respective colors in color_t_t if 'unbalanced_gradient' is specified for color_type_s.
"""
# Set shape colors
if not ax:
ax = fig.add_subplot(1, 1, 1)
shape_bounds_all_shapes_l = [float('inf'), float('inf'), float('-inf'), float('-inf')]
color_types_d = {'boolean': lambda: define_boolean_color(value_sr, color_t_t),
'balanced_gradient': \
lambda: define_balanced_gradient_color(value_sr, color_t_t),
'unbalanced_gradient': \
lambda: define_unbalanced_gradient_color(value_sr, color_t_t,
color_value_t)}
color_df, color_value_t = color_types_d[color_type_s]()
color_df = np.around(color_df, decimals=5)
# To prevent rounding errors leading to values outside the [0, 1] interval
# Add shapes to plot
for l_fips in value_sr.index:
this_counties_color_t = tuple(color_df.loc[l_fips])
i_shape_l = [i for i,j in enumerate(shape_index_l) if j==int(l_fips)]
for i_shape in i_shape_l:
shape_bounds_this_shape_l = shape_l[i_shape].bbox
shape_bounds_all_shapes_l[0] = \
min(shape_bounds_this_shape_l[0], shape_bounds_all_shapes_l[0])
shape_bounds_all_shapes_l[1] = \
min(shape_bounds_this_shape_l[1], shape_bounds_all_shapes_l[1])
shape_bounds_all_shapes_l[2] = \
max(shape_bounds_this_shape_l[2], shape_bounds_all_shapes_l[2])
shape_bounds_all_shapes_l[3] = \
max(shape_bounds_this_shape_l[3], shape_bounds_all_shapes_l[3])
this_shapes_patches = []
points_a = np.array(shape_l[i_shape].points)
shape_file_parts = shape_l[i_shape].parts
all_parts_l = list(shape_file_parts) + [points_a.shape[0]]
for l_part in xrange(len(shape_file_parts)):
this_shapes_patches.append(mpl.patches.Polygon(
points_a[all_parts_l[l_part]:all_parts_l[l_part+1]]))
ax.add_collection(mpl.collections.PatchCollection(this_shapes_patches,
color=this_counties_color_t))
ax.set_xlim(-127, -65)
ax.set_ylim(20, 50)
ax.set_axis_off()
# Add colorbar
if colorbar_s and color_value_t:
if not colorbar_ax:
colorbar_ax = fig.add_axes([0.25, 0.10, 0.50, 0.05])
make_colorbar(colorbar_ax, color_t_t, color_value_t, colorbar_s)
return ax
def plot_line_score_of_features(ax, feature_s_l, score_value_l,
extremum_func=None,
is_backward_selection_b=False,
ylabel_s=None):
""" Given a list of features feature_s_l and a corresponding list of scores score_value_l, creates a line plot with axes ax. If extremum_func is either max() or min(), the maximum or minimum value, respectively, will be circled. """
# Indicate addition or subtraction of features
if not is_backward_selection_b:
feature_s_l[1:] = ['+ ' + feature_s for feature_s in feature_s_l[1:]]
else:
feature_s_l[:-1] = ['- ' + feature_s for feature_s in feature_s_l[:-1]]
feature_s_l[-1] = '(last remaining feature: ' + feature_s_l[-1] + ')'
for l_odd in range(len(score_value_l))[1::2]:
ax.axvspan(l_odd-0.5, l_odd+0.5, alpha=0.15,
edgecolor='none', facecolor=[0, 0, 0])
ax.plot(range(len(score_value_l)), score_value_l)
ax.set_xlim(-0.5, len(score_value_l)-0.5)
ax.set_xticks(range(len(score_value_l)))
ax.set_xticklabels(feature_s_l, rotation=90)
ax.set_ylabel(ylabel_s)
# Circle extreme value
if extremum_func:
extremum_value = \
extremum_func(value for value in score_value_l if value is not None)
i_extremum = score_value_l.index(extremum_value)
ax.scatter(i_extremum, extremum_value)
def plot_regression(ax, x_l, y_l):
"""
Plots a regression line on the current plot. The stretching of the
regression line and resetting of the axis limits is kind of a hack.
"""
# Find correct axis limits
axis_limits_t = ax.axis()
# Calculate and plot regression
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(x_l, y_l)
plt.plot([2*axis_limits_t[0], 2*axis_limits_t[1]],
[slope*2*axis_limits_t[0]+intercept,
slope*2*axis_limits_t[1]+intercept],
'r')
# Reset axis limits
ax.set_xlim(axis_limits_t[0], axis_limits_t[1])
ax.set_ylim(axis_limits_t[2], axis_limits_t[3])
return ax | EricMichaelSmith/county_data_analysis | plotting.py | plotting.py | py | 13,967 | python | en | code | 0 | github-code | 50 |
38813537744 | from flask import Flask, app, render_template, request, redirect
# from collections import defaultdict
from uuid import uuid4
from db import VoteDB
from model import db,Votes,Topics
app = Flask(__name__)
# db = VoteDB()
@app.route('/')
def index():
# topics = db.get_topic_name()
topics = list(Topics.select())
print('---topics---')
print(topics)
return render_template('index.html', topics=topics)
@app.route('/addTopic',methods=['POST'])
def add_new_topic():
topic_id = str(uuid4())
print('new topic_id = '+ topic_id)
name = request.form.get('name')
print('All topics >> ')
print(Topics.select())
Topics.create(topic_id=topic_id,topic_name=name)
return redirect('/')
@app.route('/newTopic')
def new_topic():
return render_template('newtopic.html')
@app.route('/topic/<topic_id>')
def get_topic_page(topic_id):
topic = list(Topics.select().where(Topics.topic_id==topic_id))
votes = list(Votes.select().where(Votes.topic_id==topic[0]))
print(topic)
return render_template('topic.html',
topic_id=topic_id,
topic = topic[0],
votes = votes,
)
@app.route('/topic/<topic_id>/newChoice',methods=['POST'])
def new_choice(topic_id):
choice_name = request.form.get('choice_name')
Votes.create(topic_id=Topics.get_by_id(topic_id),choice_name=choice_name)
return redirect(f'/topic/{topic_id}')
@app.route('/topic/<topic_id>/vote',methods=['POST'])
def vote_choice(topic_id):
choice = request.form.get('choice')
query = Votes.update(choice_count = Votes.choice_count+1).where(Votes.vote_id == choice)
query.execute()
return redirect(f'/topic/{topic_id}')
if __name__ == '__main__':
db.connect()
db.create_tables([Topics,Votes])
app.run('localhost',5000) | Slth1811/vote-app | app.py | app.py | py | 1,811 | python | en | code | 0 | github-code | 50 |
41907756053 | import re
import textwrap
class Color:
'''A representation of a single color value.
This color can be of the following formats:
- #RRGGBB
- rgb(r, g, b)
- rgba(r, g, b, a)
- $other_color
- rgb($other_color_rgb)
- rgba($other_color_rgb, a)
NB: The color components that refer to other colors' RGB values must end
with '_rgb'.
'''
def __init__(self, value_str):
# TODO(calamity): Add opacity-only values
self.var = None
self.rgb_var = None
self.r = 0
self.g = 0
self.b = 0
self.a = 1
self.Parse(value_str)
def _AssignRGB(self, rgb):
for v in rgb:
if not (0 <= v <= 255):
raise ValueError('RGB value out of bounds')
(self.r, self.g, self.b) = rgb
def _ParseRGBRef(self, rgb_ref):
match = re.match('^\$([a-z0-9_]+_rgb)$', rgb_ref)
if not match:
raise ValueError('Expected a reference to an RGB variable')
self.rgb_var = match.group(1)
def _ParseAlpha(self, alpha_value):
self.a = float(alpha_value)
if not (0 <= self.a <= 1):
raise ValueError('Alpha expected to be between 0 and 1')
def Parse(self, value):
def ParseHex(value):
match = re.match('^#([0-9a-f]*)$', value)
if not match:
return False
value = match.group(1)
if len(value) != 6:
raise ValueError('Expected #RRGGBB')
self._AssignRGB([int(x, 16) for x in textwrap.wrap(value, 2)])
return True
def ParseRGB(value):
match = re.match('^rgb\((.*)\)$', value)
if not match:
return False
values = match.group(1).split(',')
if len(values) == 1:
self._ParseRGBRef(values[0])
return True
if len(values) == 3:
self._AssignRGB([int(x) for x in values])
return True
raise ValueError(
'rgb() expected to have either 1 reference or 3 ints')
def ParseRGBA(value):
match = re.match('^rgba\((.*)\)$', value)
if not match:
return False
values = match.group(1).split(',')
if len(values) == 2:
self._ParseRGBRef(values[0])
self._ParseAlpha(values[1])
return True
if len(values) == 4:
self._AssignRGB([int(x) for x in values[0:3]])
self._ParseAlpha(values[3])
return True
raise ValueError('rgba() expected to have either'
'1 reference + alpha, or 3 ints + alpha')
def ParseVariableReference(value):
match = re.match('^\$(.*)$', value)
if not match:
return False
if value.endswith('_rgb'):
raise ValueError(
'color reference cannot resolve to an rgb reference')
self.var = match.group(1)
return True
parsers = [
ParseHex,
ParseRGB,
ParseRGBA,
ParseVariableReference,
]
parsed = False
for p in parsers:
parsed = p(value)
if parsed:
break
if not parsed:
raise ValueError('Malformed color value')
def __repr__(self):
if self.var:
return 'var(--%s)' % self.var
if self.rgb_var:
return 'rgba(var(--%s), %g)' % (self.rgb_var, self.a)
return 'rgba(%d, %d, %d, %g)' % (self.r, self.g, self.b, self.a)
| RSATom/Qt | qtwebengine/src/3rdparty/chromium/tools/style_variable_generator/color.py | color.py | py | 3,714 | python | en | code | 50 | github-code | 50 |
26210543538 | import logging
from typing import List
import re
import sys
import tiktoken
import openai
from .base import Engine
from .mixin.openai import OpenAIMixin
from .settings import SYMAI_CONFIG
from ..strategy import InvalidRequestErrorRemedyChatStrategy
from ..utils import encode_frames_file
logging.getLogger("openai").setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("urllib").setLevel(logging.ERROR)
logging.getLogger("httpx").setLevel(logging.ERROR)
logging.getLogger("httpcore").setLevel(logging.ERROR)
class GPTXChatEngine(Engine, OpenAIMixin):
def __init__(self):
super().__init__()
logger = logging.getLogger('openai')
logger.setLevel(logging.WARNING)
config = SYMAI_CONFIG
openai.api_key = config['NEUROSYMBOLIC_ENGINE_API_KEY']
self.model = config['NEUROSYMBOLIC_ENGINE_MODEL']
self.tokenizer = tiktoken.encoding_for_model(self.model)
self.pricing = self.api_pricing()
self.max_tokens = self.api_max_tokens() - 100 # TODO: account for tolerance. figure out how their magic number works to compute reliably the precise max token size
def command(self, wrp_params):
super().command(wrp_params)
if 'NEUROSYMBOLIC_ENGINE_API_KEY' in wrp_params:
openai.api_key = wrp_params['NEUROSYMBOLIC_ENGINE_API_KEY']
if 'NEUROSYMBOLIC_ENGINE_MODEL' in wrp_params:
self.model = wrp_params['NEUROSYMBOLIC_ENGINE_MODEL']
def compute_required_tokens(self, prompts: dict) -> int:
# iterate over prompts and compute number of tokens
prompts_ = [role['content'] for role in prompts]
if self.model == 'gpt-4-vision-preview':
eval_prompt = ''
for p in prompts_:
if type(p) == str:
eval_prompt += p
else:
for p_ in p:
if p_['type'] == 'text':
eval_prompt += p_['text']
prompt = eval_prompt
else:
prompt = ''.join(prompts_)
val = len(self.tokenizer.encode(prompt, disallowed_special=()))
return val
def compute_remaining_tokens(self, prompts: list) -> int:
val = self.compute_required_tokens(prompts)
if 'gpt-4-1106-preview' == self.model or 'gpt-4-vision-preview' == self.model: # models can only output 4_096 tokens
return min(int((self.max_tokens - val) * 0.99), 4_096)
return int((self.max_tokens - val) * 0.99) # TODO: figure out how their magic number works to compute reliably the precise max token size
def forward(self, prompts: List[str], *args, **kwargs) -> List[str]:
prompts_ = prompts
input_handler = kwargs['input_handler'] if 'input_handler' in kwargs else None
if input_handler:
input_handler((prompts_,))
openai_kwargs = {}
# send prompt to GPT-X Chat-based
stop = kwargs['stop'] if 'stop' in kwargs else None
model = kwargs['model'] if 'model' in kwargs else self.model
# convert map to list of strings
max_tokens = kwargs['max_tokens'] if 'max_tokens' in kwargs else self.compute_remaining_tokens(prompts_)
temperature = kwargs['temperature'] if 'temperature' in kwargs else 1
frequency_penalty = kwargs['frequency_penalty'] if 'frequency_penalty' in kwargs else 0
presence_penalty = kwargs['presence_penalty'] if 'presence_penalty' in kwargs else 0
top_p = kwargs['top_p'] if 'top_p' in kwargs else 1
except_remedy = kwargs['except_remedy'] if 'except_remedy' in kwargs else None
functions = kwargs['functions'] if 'functions' in kwargs else None
function_call = "auto" if functions is not None else None
if stop is not None:
openai_kwargs['stop'] = stop
if functions is not None:
openai_kwargs['functions'] = functions
if function_call is not None:
openai_kwargs['function_call'] = function_call
try:
res = openai.chat.completions.create(model=model,
messages=prompts_,
max_tokens=max_tokens,
temperature=temperature,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
top_p=top_p,
n=1,
**openai_kwargs)
output_handler = kwargs['output_handler'] if 'output_handler' in kwargs else None
if output_handler:
output_handler(res)
except Exception as e:
if openai.api_key is None or openai.api_key == '':
msg = 'OpenAI API key is not set. Please set it in the config file or pass it as an argument to the command method.'
logging.error(msg)
raise Exception(msg) from e
callback = openai.chat.completions.create
kwargs['model'] = kwargs['model'] if 'model' in kwargs else self.model
if except_remedy is not None:
res = except_remedy(e, prompts_, callback, self, *args, **kwargs)
else:
try:
# implicit remedy strategy
except_remedy = InvalidRequestErrorRemedyChatStrategy()
res = except_remedy(e, prompts_, callback, self, *args, **kwargs)
except Exception as e2:
ex = Exception(f'Failed to handle exception: {e}. Also failed implicit remedy strategy after retry: {e2}')
raise ex from e
metadata = {}
if 'metadata' in kwargs and kwargs['metadata']:
metadata['kwargs'] = kwargs
metadata['input'] = prompts_
metadata['output'] = res
metadata['model'] = model
metadata['max_tokens'] = max_tokens
metadata['temperature'] = temperature
metadata['frequency_penalty'] = frequency_penalty
metadata['presence_penalty'] = presence_penalty
metadata['top_p'] = top_p
metadata['except_remedy'] = except_remedy
metadata['functions'] = functions
metadata['function_call'] = function_call
metadata['stop'] = stop
rsp = [r.message.content for r in res.choices]
output = rsp if isinstance(prompts, list) else rsp[0]
return output, metadata
def prepare(self, args, kwargs, wrp_params):
if 'raw_input' in wrp_params:
wrp_params['prompts'] = wrp_params['raw_input']
return
disable_verbose_output = True if 'enable_verbose_output' not in wrp_params else not wrp_params['enable_verbose_output']
_non_verbose_output = """[META INSTRUCTIONS START]\nYou do not output anything else, like verbose preambles or post explanation, such as "Sure, let me...", "Hope that was helpful...", "Yes, I can help you with that...", etc. Consider well formatted output, e.g. for sentences use punctuation, spaces etc. or for code use indentation, etc. Never add meta instructions information to your output!\n"""
user: str = ""
system: str = ""
if disable_verbose_output:
system += _non_verbose_output
system = f'{system}\n' if system and len(system) > 0 else ''
ref = wrp_params['wrp_self']
static_ctxt, dyn_ctxt = ref.global_context
if len(static_ctxt) > 0:
system += f"[STATIC CONTEXT]\n{static_ctxt}\n\n"
if len(dyn_ctxt) > 0:
system += f"[DYNAMIC CONTEXT]\n{dyn_ctxt}\n\n"
payload = str(wrp_params['payload']) if 'payload' in wrp_params else None
if payload is not None:
system += f"[ADDITIONAL CONTEXT]\n{payload}\n\n"
examples: List[str] = wrp_params['examples']
if examples and len(examples) > 0:
system += f"[EXAMPLES]\n{str(examples)}\n\n"
def extract_pattern(text):
pattern = r'<<vision:(.*?):>>'
return re.findall(pattern, text)
def remove_pattern(text):
pattern = r'<<vision:(.*?):>>'
return re.sub(pattern, '', text)
image_files = []
# pre-process prompt if contains image url
if self.model == 'gpt-4-vision-preview' and '<<vision:' in str(wrp_params['processed_input']):
parts = extract_pattern(str(wrp_params['processed_input']))
for p in parts:
img_ = p.strip()
if img_.startswith('http'):
image_files.append(img_)
elif img_.startswith('data:image'):
image_files.append(img_)
else:
max_frames_spacing = 50
max_used_frames = 10
if img_.startswith('frames:'):
img_ = img_.replace('frames:', '')
max_used_frames, img_ = img_.split(':')
max_used_frames = int(max_used_frames)
if max_used_frames < 1 or max_used_frames > max_frames_spacing:
raise ValueError(f"Invalid max_used_frames value: {max_used_frames}. Expected value between 1 and {max_frames_spacing}")
buffer, ext = encode_frames_file(img_)
if len(buffer) > 1:
step = len(buffer) // max_frames_spacing # max frames spacing
frames = []
indices = list(range(0, len(buffer), step))[:max_used_frames]
for i in indices:
frames.append(f"data:image/{ext};base64,{buffer[i]}")
image_files.extend(frames)
elif len(buffer) == 1:
image_files.append(f"data:image/{ext};base64,{buffer[0]}")
else:
print('No frames found or error in encoding frames')
if wrp_params['prompt'] is not None and len(wrp_params['prompt']) > 0 and ']: <<<' not in str(wrp_params['prompt']):
val = str(wrp_params['prompt'])
if len(image_files) > 0:
val = remove_pattern(val)
system += f"[INSTRUCTION]\n{val}"
suffix: str = str(wrp_params['processed_input'])
if len(image_files) > 0:
suffix = remove_pattern(suffix)
parse_system_instructions = False if 'parse_system_instructions' not in wrp_params else wrp_params['parse_system_instructions']
if '[SYSTEM_INSTRUCTION::]: <<<' in suffix and parse_system_instructions:
parts = suffix.split('\n>>>\n')
# first parts are the system instructions
c = 0
for i, p in enumerate(parts):
if 'SYSTEM_INSTRUCTION' in p:
system += f"{p}\n"
c += 1
else:
break
# last part is the user input
suffix = '\n>>>\n'.join(parts[c:])
user += f"{suffix}"
template_suffix = str(wrp_params['template_suffix']) if 'template_suffix' in wrp_params else None
if template_suffix:
user += f"\n[[PLACEHOLDER]]\n{template_suffix}\n\n"
user += f"Only generate content for the placeholder `[[PLACEHOLDER]]` following the instructions and context information. Do NOT write `[[PLACEHOLDER]]` or anything else in your output.\n\n"
images = [{ 'type': 'image', "image_url": { "url": file, "detail": "auto" }} for file in image_files]
if self.model == 'gpt-4-vision-preview':
user_prompt = { "role": "user", "content": [
*images,
{ 'type': 'text', 'text': user }
]}
else:
user_prompt = { "role": "user", "content": user }
wrp_params['prompts'] = [
{ "role": "system", "content": system },
user_prompt,
]
| kpister/prompt-linter | data/scraping/repos/ExtensityAI~symbolicai/symai~backend~engine_gptX_chat.py | symai~backend~engine_gptX_chat.py | py | 12,371 | python | en | code | 0 | github-code | 50 |
32489181037 | import asyncio
import traceback
from duckysvc.duckysvc import DuckySvc
async def amain(args):
try:
dsvc = DuckySvc(args.server_ip, args.server_port, lang = args.lang, keyboard_device = args.device)
print('DuckySvc running %s:%s' % (args.server_ip, args.server_port))
await dsvc.run()
except:
traceback.print_exc('Error in ducksvc amain')
def main():
import argparse
parser = argparse.ArgumentParser(description='duckysvc')
parser.add_argument('device', help = 'USB HID keyboard device. Usually /dev/hidg0. !Keyboard device must not be in raw mode!')
parser.add_argument('--server-ip', default='127.0.0.1', help = 'server listen ip')
parser.add_argument('--server-port', default = 1212, type=int, help = 'server listen port')
parser.add_argument('-l', '--lang', default='us', help = 'Keyboard language layout')
args = parser.parse_args()
asyncio.run(amain(args))
if __name__ == '__main__':
main() | skelsec/duckysvc | duckysvc/__main__.py | __main__.py | py | 920 | python | en | code | 4 | github-code | 50 |
31081910231 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
# In[2]:
data=pd.read_csv("insurance.csv")
data
# In[3]:
data.shape
# # Preprocessing Data
# In[4]:
def find_outlier(column):
number_cols_ph = data[column].count()
first_quartile = np.quantile(data[column], 0.25)
third_quartile = np.quantile(data[column], 0.75)
IQR = third_quartile - first_quartile
min_outlier = first_quartile - (1.5 * IQR)
max_outlier = third_quartile + (1.5 * IQR)
print(f'the data less than {min_outlier} and more than {max_outlier} is the outlier data in colunm {column}')
return min_outlier, max_outlier
# # Remove The Outlier Data
# In[5]:
def remove_outlier(column):
min_outlier, max_outlier = find_outlier(column)
count_min = data[column].loc[data[column]<min_outlier].count()
count_max = data[column].loc[data[column]>max_outlier].count()
data[column].loc[data[column]<min_outlier] = np.nan
data[column].loc[data[column]>max_outlier] = np.nan
count = count_min + count_max
return count
# In[6]:
cols = []
for col in data.columns:
if(data[col].dtype == np.int64 or data[col].dtype == np.float64):
cols.append(col)
cols
# In[7]:
for col in cols[0:3]:
count_outliers = remove_outlier(col)
print(count_outliers)
# # Data Is Null?
# In[8]:
data.isna().sum()
# In[9]:
data.dropna(inplace=True)
# # Data Is Duplicated?
# In[10]:
data.duplicated().sum()
# In[11]:
data.drop_duplicates(inplace=True)
# # Show some of Information and Describe the Data
# In[12]:
data.info()
# In[13]:
round(data.describe(),2)
# # Exploratory Data Analysis
# In[14]:
data
# # Distribution of Region
# In[15]:
region_data_proportion = data['region'].value_counts()
# In[16]:
myexplode = [0.1, 0.1, 0.1, 0.1]
plt.pie(region_data_proportion, labels=region_data_proportion.index, autopct='%1.1f%%', explode= myexplode,shadow = True)
plt.show()
# In[17]:
smoker_data_proportion = data['smoker'].value_counts()
# # Distribution of Smoker
# In[18]:
myexplode = [0.1, 0.1]
plt.pie(smoker_data_proportion, labels=smoker_data_proportion.index, autopct='%1.1f%%', explode= myexplode,shadow = True, colors=['green','red'])
plt.show()
# # Distribution of Sex¶
# In[19]:
sex_data_proportion = data['sex'].value_counts()
# In[20]:
myexplode = [0.1, 0.1]
plt.pie(sex_data_proportion, labels=sex_data_proportion.index, autopct='%1.1f%%', startangle =90, explode= myexplode, shadow = True, colors=['c','hotpink'])
plt.show()
# # Count of Smokers by Sex and Region
# In[21]:
data.groupby(['region', 'sex'])['smoker'].count()
# In[22]:
pd.crosstab(index=data.region, columns=data.sex, values=data.smoker, aggfunc='count')
# In[23]:
sns.catplot(row='sex', x='smoker', col='region', data=data, kind='count')
# # Distribution of age by smokers
# In[24]:
sns.histplot(x='age', hue='smoker',data=data)
# In[25]:
sns.catplot(y='age', x='sex', hue='smoker', kind='box', data=data)
plt.show
# # Distribution of age by Children and Sex
# In[26]:
plt.figure(figsize=(18,6))
sns.violinplot(x=data.sex, y=data.age, hue=data.children)
plt.show()
#
# # Machine Learning
# In[27]:
data.head()
# In[28]:
data_ml = data.copy()
# # Encoder Columns from Categorical to Numerical
# In[29]:
lb_encoder = LabelEncoder()
data_ml['sex'] = lb_encoder.fit_transform(data_ml.sex)
data_ml['smoker'] = lb_encoder.fit_transform(data_ml.smoker)
data_ml['region'] = lb_encoder.fit_transform(data_ml.region)
# In[30]:
data_ml
# In[31]:
sns.heatmap(data_ml.corr(), annot=True)
# # Data Slicing
# In[32]:
X = data_ml.iloc[:, :-1].values
y = data_ml.iloc[:, -1].values
# # Train and Test the Data¶
# In[33]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=98)
# In[34]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=98)
# # Linear Regression Model
# In[35]:
model_lg = LinearRegression()
model = model_lg.fit(X_train, y_train)
# In[36]:
y_pred = model.predict(X_test)
# In[37]:
results = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
results
# In[38]:
sns.scatterplot(x=results.Actual, y=results.Predicted)
# # Evaluation the Model
# In[39]:
print(model.score(X_test, y_test)*100)
print(model.score(X_train, y_train)*100)
# In[40]:
r2_score(y_test, y_pred)*100
# # Cross Validation
# In[41]:
k_folds = KFold(n_splits = 5)
scores = cross_val_score(model, X, y, cv = k_folds)
print("Average CV Score: ", scores.mean()*100)
# # Comparing between actual values and predict values by plotting
# In[42]:
plt.plot(y_test, 'o', label='Actual')
plt.plot(y_pred, 'o', label='Prediction')
plt.legend()
plt.show()
| ARRathod/Medical-Insurance-Cost | Untitled.py | Untitled.py | py | 5,109 | python | en | code | 0 | github-code | 50 |
9881334201 |
class DVD:
def __init__(self, title, year, director) -> None:
self.title = title
self.year = year
self.director = director
def disclose(self):
print(self.title + "," + self.director )
mydvd1 = DVD ("Avengers", 2018, "Hulk")
mydvd2 = DVD ("Avengers", 2018, "Iron Man")
dvdCollection=[mydvd1, mydvd2]
for dvd in dvdCollection:
dvd.disclose() | francman/dailies | python/dvdbox.py | dvdbox.py | py | 388 | python | en | code | 0 | github-code | 50 |
41901975280 | # -*- coding: utf-8 -*-
from five import grok
from z3c.form import group, field
from zope import schema
from zope.interface import invariant, Invalid
from zope.schema.interfaces import IContextSourceBinder
from z3c.relationfield.schema import RelationList, RelationChoice
from plone.formwidget.autocomplete import AutocompleteFieldWidget
from plone.formwidget.contenttree import ObjPathSourceBinder
from plone.dexterity.content import Container
from plone.directives import dexterity, form
from plone.app.textfield import RichText
from plone.namedfile.field import NamedImage, NamedFile
from plone.namedfile.field import NamedBlobImage, NamedBlobFile
from sinanet.gelso.anagrafica_contatti import IAnagraficaContatti
from sinanet.gelso import MessageFactory as _
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
from zope.schema.interfaces import IVocabularyFactory
from plone.i18n.normalizer import idnormalizer
from plone.indexer import indexer
from Products.CMFCore.utils import getToolByName
from sinanet.gelso.obiettivi import IObiettivi
from collective import dexteritytextindexer
def make_terms(items):
""" Create zope.schema terms for vocab from tuples """
terms = [ SimpleTerm(value=pair[0], token=pair[0], title=pair[1]) for pair in items ]
return terms
class Localizzazioni(object):
grok.implements(IVocabularyFactory)
def __call__(self, context):
terms = []
for term in ['Area boschiva', 'Area collinare','Area industriale', 'Area marina e costiera',
'Area montana', 'Area periferica', 'Area protetta', 'Area residenziale',
'Area rurale', 'Area turistica', 'Area umida', 'Area urbana', 'Centro Storico',
'Territorio provinciale', 'Territorio regionale', 'Territorio nazionale']:
terms.append(SimpleTerm(unicode(term, "utf-8", errors="ignore"), unicode(term, "utf-8", errors="ignore")))
return SimpleVocabulary(terms)
grok.global_utility(Localizzazioni, name=u"localizzazioni")
class Ambiti(object):
grok.implements(IVocabularyFactory)
def __call__(self, context):
terms = []
for term in ['Ambito nazionale', 'Area Marina Protetta', 'Associazione', 'Autorità di bacino',
'Comune', 'Comuni (più di uno)', 'Comunità montana', 'Distretto industriale',
'G.A.L. gruppo di azione locale', 'Parco Nazionale', 'Parco Regionale',
'Provincia', 'Regione', 'Riserva Naturale Statale o Regionale', 'Scuola']:
token=idnormalizer.normalize(term)
terms.append(SimpleVocabulary.createTerm(term, token, term))
return SimpleVocabulary(terms)
grok.global_utility(Ambiti, name=u"ambiti")
class Obiettivi(object):
grok.implements(IVocabularyFactory)
def __call__(self, context):
catalog = getToolByName(context, 'portal_catalog')
brains = catalog.searchResults({'portal_type': 'sinanet.gelso.obiettivi'})
result = [ (brain["UID"], brain["Title"]) for brain in brains ]
terms = make_terms(result)
return SimpleVocabulary(terms)
grok.global_utility(Obiettivi, name=u"obiettivi")
class SettoriIntervento(object):
grok.implements(IVocabularyFactory)
def __call__(self, context):
terms = []
for term in ['Strategie partecipate e integrate', 'Agricoltura', 'Edilizia e Urbanistica',
'Energia', 'Industria', 'Mobilità', 'Rifiuti', 'Territorio e Paesaggio', 'Turismo']:
token=idnormalizer.normalize(term)
terms.append(SimpleVocabulary.createTerm(term, token, term))
return SimpleVocabulary(terms)
grok.global_utility(SettoriIntervento, name=u"settori_intervento")
class StrumentiFinanziamento(object):
grok.implements(IVocabularyFactory)
def __call__(self, context):
terms = []
for term in ['Bando Agenda 21 Locale 2000', 'Bando Agenda 21 Locale 2002', 'Cultura 2000',
'Intelligent Energy Europe', 'Fondi propri', 'Fondo Sociale Europeo', 'INTERREG',
'LEADER', 'LIFE', 'Piani di Sviluppo Rurale', 'Programma Quadro Ricerca e Sviluppo Tecnologico',
'Programmi di ricerca di Rilevante Interesse Nazionale', 'SMAP']:
token=idnormalizer.normalize(term)
terms.append(SimpleVocabulary.createTerm(term, token, term))
return SimpleVocabulary(terms)
grok.global_utility(StrumentiFinanziamento, name=u"strumenti_finanziamento")
# Interface class; used to define content-type schema.
class ISchedaProgetto(form.Schema):
"""
scheda buona pratica
"""
dexteritytextindexer.searchable('title')
title = schema.TextLine(
title=_(u"Titolo"),
required=True,
)
dexteritytextindexer.searchable('abstract')
abstract = RichText(
title=_(u"Abstract"),
description=_(""),
required=False,
)
# dexteritytextindexer.searchable('commento')
commento = RichText(
title=_(u"Commento"),
description=_(""),
required=False,
)
partner= schema.TextLine(
title=_(u"Partner"),
required=False,
)
localizzazione = schema.Choice(
title=u"Localizzazione",
vocabulary=u"localizzazioni",
required=False, )
dimensioni_amministrazione = schema.Choice(
title=_(u"Dimensioni amministrazione"),
description=_(""),
values=(u"Oltre 1.000.000 abitanti", u"Da 100.000 a 1.000.000 abitanti", u"Da 10.000 a 100.000 abitanti", u"Inferiore a 10.000 abitanti"),
required=False,
)
ambito = schema.Choice(
title=u"Ambito",
vocabulary=u"ambiti",
required=False, )
settore_intervento = schema.List(
title=u"Settori d'intervento",
value_type=schema.Choice(vocabulary=u"settori_intervento"),
required=False, )
obiettivo = RelationList(
title=u"Obiettivi",
default=[],
value_type=RelationChoice(title=_(u"Obiettivi"),
source=ObjPathSourceBinder(path={ "query": "/Plone/vocabolari" },
object_provides= IObiettivi.__identifier__
)
),
required=False,
)
datainizio = schema.Date(
title=_(u"Data inizio lavori"),
required=False,
)
temporealizzazione = schema.Int(
title=_(u"Tempo di realizzazione (mesi)"),
required=False,
)
costo = schema.TextLine(
title=_(u"Costo"),
required=False,
)
finanziatore = schema.TextLine(
title=_(u"Finanziatore"),
required=False,
)
strumento_finanziamento = schema.Choice(
title=u"Strumento di finanziamento",
vocabulary=u"strumenti_finanziamento",
required=False, )
# dexteritytextindexer.searchable('note_finanziamenti')
note_finanziamenti = RichText(
title=_(u"Note ai finanziamenti"),
description=_(""),
required=False,
)
form.widget(referente=AutocompleteFieldWidget)
referente = RelationChoice(
title=_(u"Referente progetto"),
source=ObjPathSourceBinder(
object_provides=IAnagraficaContatti.__identifier__
),
required=False,
)
email= schema.TextLine(
title=_(u"e-mail"),
required=False,
)
url= schema.TextLine(
title=_(u"URL"),
required=False,
)
documenti_aggiuntivi_uno = NamedBlobFile(
title=u'Documenti aggiuntivi',
required=False,
)
documenti_aggiuntivi_due = NamedBlobFile(
title=u'Documenti aggiuntivi',
required=False,
)
documenti_aggiuntivi_tre = NamedBlobFile(
title=u'Documenti aggiuntivi',
required=False,
)
documenti_aggiuntivi_quattro = NamedBlobFile(
title=u'Documenti aggiuntivi',
required=False,
)
@grok.adapter(ISchedaProgetto, name='settore_intervento')
@indexer(ISchedaProgetto)
def settore_intervento_indexer(context):
return context.settore_intervento
@grok.adapter(ISchedaProgetto, name='dimensioni_amministrazione')
@indexer(ISchedaProgetto)
def dimensioni_amministrazione_indexer(context):
return context.dimensioni_amministrazione
@grok.adapter(ISchedaProgetto, name='strumento_finanziamento')
@indexer(ISchedaProgetto)
def strumento_finanziamento_indexer(context):
return context.strumento_finanziamento
@grok.adapter(ISchedaProgetto, name='localizzazione')
@indexer(ISchedaProgetto)
def localizzazione_indexer(context):
return context.localizzazione
@grok.adapter(ISchedaProgetto, name='ambito')
@indexer(ISchedaProgetto)
def ambito_indexer(context):
return context.ambito
@indexer(ISchedaProgetto)
def data_inizio_indexer(context):
anno = ""
if context.datainizio is not None:
anno = context.datainizio.strftime('%Y')
return anno
# Custom content-type class; objects created for this content type will
# be instances of this class. Use this class to add content-type specific
# methods and properties. Put methods that are mainly useful for rendering
# in separate view classes.
#class SchedaProgetto(Container):
# grok.implements(ISchedaProgetto)
# Add your class methods and properties here
# View class
# The view will automatically use a similarly named template in
# scheda_progetto_templates.
# Template filenames should be all lower case.
# The view will render when you request a content object with this
# interface with "/@@sampleview" appended.
# You may make this the default view for content objects
# of this type by uncommenting the grok.name line below or by
# changing the view class name and template filename to View / view.pt.
#class View(grok.View):
# grok.context(ISchedaProgetto)
# grok.require('zope2.View')
# grok.name('view')
# Add view methods here
# def prova(self):
# return "ciao"
#from zope.schema.vocabulary import SimpleVocabulary
#dummy_vocabulary_instance = SimpleVocabulary.fromItems([(1, 'a'), (2, 'c')])
| crdistefano/sinanet.gelso | sinanet/gelso/scheda_progetto.py | scheda_progetto.py | py | 10,254 | python | it | code | 0 | github-code | 50 |
14435176102 | import csv
import sqlite3
import urllib.request
from datetime import datetime
import numpy as np
import pandas as pd
import checkDB
# Crea la BD donde se guardaran los datos en caso de que aún no exista la BD.
print('Verificando la base de datos...')
con = sqlite3.connect('covid.sqlite')
cur = con.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS dias
(id INTEGER PRIMARY KEY AUTOINCREMENT, iso_code TEXT, continent TEXT,
location TEXT, date TEXT, total_cases INTEGER, new_cases INTEGER,
total_deaths INTEGER, new_deaths INTEGER, total_tests INTEGER, new_tests INTEGER,
UNIQUE(location, date))''')
# Lee el archivo .csv que hay en la carpeta Data y extrae los datos según sea solicitado
# Solicita el país del cuál se desea la información, por lo tanto es necesario escribirlo bien.
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv'
file = pd.read_csv(url,na_values='')
file = file.fillna(0)
while True:
print('Enter the name of the country that you want to get data: ')
location = input()
print('Searching data of: '+location)
try:
last_date = checkDB.lastDate(cur,location)
if last_date is None:
desdeCero = True
else:
desdeCero = False
pais = file[file.loc[:,'location']==location]
for i in range(len(pais)):
if desdeCero:
cur.execute('''INSERT OR IGNORE INTO dias (iso_code, continent, location, date, total_cases,
new_cases, total_deaths, new_deaths, new_tests, total_tests) VALUES (?,?,?,?,?,?,?,?,?,?)''',
(pais.iloc[i,:].loc['iso_code'],pais.iloc[i,:].loc['continent'],pais.iloc[i,:].loc['location'],
pais.iloc[i,:].loc['date'],pais.iloc[i,:].loc['total_cases'],pais.iloc[i,:].loc['new_cases'],
pais.iloc[i,:].loc['total_deaths'],pais.iloc[i,:].loc['new_deaths'],
pais.iloc[i,:].loc['new_tests'],pais.iloc[i,:].loc['total_tests']))
con.commit()
else:
# Espera hasta llegar a la fecha más reciente y actualiza la bandera desdeCero
if pais.iloc[i,:].loc['date'] == last_date:
print("Date uploaded")
desdeCero = True
else:
pass
except KeyboardInterrupt:
print('')
print('Program interrupted by user...')
cur.close()
break
cur.close() | iepenaranda/Capstone-Retrieving-Processing-and-Visualizing-Data-with-Python | leer2.py | leer2.py | py | 2,561 | python | es | code | 1 | github-code | 50 |
1870837114 | import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import json
from sklearn.preprocessing import MinMaxScaler
from sklearn import svm, datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import os
import re
import string
from textblob import Word
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
from collections import Counter
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem import PorterStemmer
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
stopwords = set(stopwords.words('english'))
import matplotlib.pyplot as plt
import seaborn as sns
import bisect
import joblib
import sys
sys.modules['sklearn.externals.joblib'] = joblib
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sqlalchemy.sql.expression import column
import string
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import pandas as pd
from bs4 import BeautifulSoup
import numpy as np
stopwords = set(stopwords.words('english'))
lbl = LabelEncoder()
class preprocessing:
#fill null
def process(self,X_train,X_test):
X_train = self.handle_hompage(X_train)
X_test = self.handle_hompage(X_test)
X_train = self.removedupli(X_train)
X_test = self.removedupli(X_test)
X_train = self.handle_date(X_train)
X_test = self.handle_date(X_test)
LS = list()
dol = ['genres', 'keywords', 'production_companies', 'production_countries','spoken_languages']
selected_list=['name','name','name','name','iso_639_1']
for i in range(len(dol)):
X_train = self.Loctolist(X_train, dol[i], selected_list[i])
X_test = self.Loctolist(X_test, dol[i], selected_list[i])
ls = self.fit_List(X_train, dol[i])
#here ls
X_train = self.transform_List(X_train, dol[i], ls)
X_test = self.transform_List(X_test, dol[i], ls)
# SECOND_WAY
# SECOND_WAY
# X_train,X_test=self.encod(X_train,X_test,ls)
# print(X_train['original_language'])
# print(X_test['original_language'])
X_train, col = self.drop_coloums(X_train)
#here col
X_test=self.drop_columns_test(X_test,col)
X_train,means = self.handlenull(X_train)
X_test = self.fill_null(X_test,means)
print(X_train.info())
print(X_test.info())
x_tre = self.preprocc(X_train['overview'])
X_tes = self.preprocc(X_test['overview'])
x_train_overview, vect = self.tf_ifd(x_tre)
x_test_overview = self.tf_idf_transform(X_tes, vect)
for i in vect.get_feature_names():
X_train[i]=x_train_overview[i]
X_test[i]=x_test_overview[i]
# X_train = pd.concat([X_train, x_train_overview], axis=1,join='inner')
# X_test = pd.concat([X_test, x_test_overview], axis=1,join='inner')
X_train = X_train.drop('overview', axis=1)
X_test = X_test.drop('overview', axis=1)
#
# text = X_train['overview']
# text1=X_test['overview']
# nltk.download('punkt')
# nltk.download('stopwords')
# nltk.download('wordnet')
# textt = text.apply(self.prepare)
# textt1 = text1.apply(self.prepare)
# X_train['cleaned_overview'] = textt.apply(self.clean_overview_text)
# X_test['cleaned_overview'] = textt1.apply(self.clean_overview_text)
# print(X_train['cleaned_overview'])
# print(X_test['cleaned_overview'])
# X_train = X_train.drop('overview', axis=1)
# X_test = X_test.drop('overview', axis=1)
print(X_train.info())
print(X_test.info())
# SECOND_WAY
# print(X_train.shape(),X_test.shape())
# X_train, X_test,lab = self.encod(X_train, X_test, LS)
self.convert(X_train)
X_train=self.convert_transform(X_train)
X_test=self.convert_transform(X_test)
X_train=self.one_hot_encode(X_train)
X_test=self.one_hot_encode(X_test)
#here lab
colm = X_train.columns
# print(X_train.info())
# print(X_test.info())
scalar = self.feature_scaling(X_train)
#here scalar
X_train=self.transform_scaling(X_train,scalar)
X_test=self.transform_scaling(X_test,scalar)
return X_train,X_test
def most_freq(self, X, col):
mostfreq = X[col].value_counts()
mostfreq = mostfreq[:1].idxmax()
#print(mostfreq)
return mostfreq
def handlenull(self, X):
mean_values={}
for i in X:
if X[i].dtypes == 'int64' or X[i].dtypes == 'float64':
mean_values[i]=X[i].median()
X[i] = X[i].fillna(value=X[i].median())
else:
mostfreq_value = self.most_freq(X, i)
mean_values[i]= mostfreq_value
X[i] = X[i].fillna(value=mostfreq_value)
return X,mean_values
def fill_null(self,X,means):
for i in X:
X[i] = X[i].fillna(value=means[i])
return X
#label encoding
#SECOND_WAY
# def encod(self,Xtr, Xte, ls):
# cols = ls
# for c in cols:
# print(c)
# lbl = LabelEncoder()
# lbl.fit(list(Xtr[c].values))
# import bisect
# le_classes = lbl.classes_.tolist()
# print(lbl.classes_)
# for s in Xte[c]:
# if s not in le_classes:
# bisect.insort_left(le_classes, s)
# lbl.classes_ = le_classes
# print(lbl.classes_)
# Xtr[c] = lbl.transform(list(Xtr[c].values))
# Xte[c] = lbl.transform(list(Xte[c].values))
# return Xtr,Xte# def encod(self,Xtr, Xte, ls):
# def encod(self,Xtr, Xte, ls):
# cols = ls
# for c in cols:
# print(c)
# lbl = LabelEncoder()
# lbl.fit(list(Xtr[c].values))
# import bisect
# le_classes = lbl.classes_.tolist()
# print(lbl.classes_)
# Xte[c]=Xte[c].map(lambda s: '<unseen>' if s not in le_classes else s)
# bisect.insort_left(le_classes, '<unseen>')
# lbl.classes_ = le_classes
# print(lbl.classes_)
# Xtr[c] = lbl.transform(list(Xtr[c].values))
# Xte[c] = lbl.transform(list(Xte[c].values))
# return Xtr,Xte
# def encode_test(self,x,ls,lbl):
#
# cols = ls
# for c in cols:
# import bisect
# le_classes = lbl.classes_.tolist()
# print(lbl.classes_)
# x[c]=x[c].map(lambda s: '<unseen>' if s not in le_classes else s)
# bisect.insort_left(le_classes, '<unseen>')
# lbl.classes_ = le_classes
# print(lbl.classes_)
# x[c] = lbl.transform(list(x[c].values))
# return x
def encod(self,Xtr, Xte, ls):
cols = ls
#print(cols)
# def encod(self,Xtr, Xte):
# cols = ['status', 'original_language', 'tagline','title','original_title']
x_new = pd.concat([Xtr, Xte], axis=0)
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(x_new[c].values))
#Xte[c] = Xte[c].map(lambda s: '<unseen>' if s not in lbl.classes_ else s)
Xtr[c] = lbl.transform(list(Xtr[c].values))
Xte[c] = lbl.transform(list(Xte[c].values))
return Xtr,Xte,lbl
#preprocess list of dictionary
def convert(self,X):
labels = np.array(['English', 'other Languages'])
en_count = X['original_language'].value_counts()[0]
perc = np.array([en_count, sum(X['original_language'].value_counts()) - en_count])
plt.figure(figsize=(7, 7))
plt.pie(perc, labels=labels, autopct='%1.1f%%', startangle=90)
plt.show()
def convert_transform(self,X):
X['original_language'] = X['original_language'].map(lambda s: 'other Languages' if s !='en' else s)
return X
def one_hot_encode(self,X):
# ls = ['status', 'original_language']
# for i in ls:
X['status'] = X['status'].apply(lambda x: 1 if 'Released' == x else 0)
X['original_language'] = X['original_language'].apply(lambda x: 1 if 'en' == x else 0)
return X
#1- conver list of dictionary to list
def Loctolist(self,X, c, selectlist):
lsname = []
for indexD in X.index:
ListOFdictionay = json.loads(X[c][indexD])
for i in range(len(ListOFdictionay)):
lsname.append(ListOFdictionay[i][selectlist])
X[c][indexD] = lsname
lsname = []
return X
#2- convert list to one hot encoding(fit)
#SECOND_WAY
# def fit_List(self,X, c):
# ls2 = []
# for indexD in X.index:
# ListOFdictionay = X[c][indexD]
# ls2.append(len(ListOFdictionay))
# count=0
# rls=[]
# for red in reversed(list(set(ls2))):
# count+=ls2.count(red)
# if (count/len(X))*100 >=50:
# rls.append(red)
# n = max(rls)
# newl = list()
# for i in range(n):
# newl.append(c + str(i + 1))
# print(newl)
# print(len(newl))
# return newl
def fit_List(self,X, c):
ls = []
# ls2 = []
for indexD in X.index:
ListOFdictionay = X[c][indexD]
for i in range(len(ListOFdictionay)):
ls.append(ListOFdictionay[i])
# ls2.append(len(ListOFdictionay))
newl = list(set(ls))
rsult=list()
for i in newl:
x = ls.count(i)
# print(i + " : " + str(x))
#try columns
if x > 100:
rsult.append(i)
print(rsult)
print(len(rsult))
return rsult
#3- convert list to one hot encoding(transform)
#SECOND_WAY
# def transform_List(self,X, c, ls):
# for indexD in X.index:
# List_dictionay = X[c][indexD]
# for i in range(len(ls)):
# if i < len(List_dictionay):
# X.at[indexD, ls[i]] = List_dictionay[i]
# else:
# X.at[indexD, ls[i]]=""
# X = X.drop([c], axis=1)
# return X
def transform_List(self,X, c, ls):
for indexD in X.index:
#List_dictionay = X.at[indexD, ls[i]]
List_dictionay = X[c][indexD]
for i in range(len(ls)):
if ls[i] in List_dictionay:
X.at[indexD, ls[i]] = 1
else:
X.at[indexD, ls[i]] = 0
X = X.drop([c], axis=1)
return X
# scalling
def feature_scaling(self,X_train):
scaler = MinMaxScaler().fit(X_train)
# revenue = data['revenue']
# x = revenue.values.reshape(-1, 1) # returns a numpy array
# scaled_revenue = scaler.fit(x)
return scaler;
def transform_scaling(self,x,scaler):
x = pd.DataFrame(scaler.transform(x), columns=x.columns, index=x.index)
return x;
def removedupli(self,X):
if(X.duplicated().sum()>0):
X=X.drop_duplicates()
return X
def handle_date(self,X):
ralease_date = pd.DatetimeIndex(X['release_date'], dayfirst=False)
X['Year'] = ralease_date.year
X['Month'] = ralease_date.month
X['Day'] = ralease_date.day
X = X.drop(['release_date'], axis=1)
return X
def handle_hompage(self,X):
print("precentage of null in homepage :"+str((X.homepage.isnull().sum().sum()/len(X.homepage))*100))
X['is_homepage'] = np.where(X['homepage'].isnull(), 0, 1)
# X = X.drop(['homepage'], axis=1)
return X
def correlation(self,data, threshold):
col_corr = set() # set of all the names of correlated columns
corr_matrix = data.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if abs(corr_matrix.iloc[i, j]) > threshold:
colname = corr_matrix.columns[i] # getting the name of columns
col_corr.add(colname)
#print(len(col_corr))
#print(col_corr)
return col_corr
def feature_selection(self,x_train,y_train):
# feature = self.correlation(x_train, 0.9)
# x_train = x_train.drop(feature, axis=1)
# x_test = x_test.drop(feature, axis=1)
# plt.subplots(figsize=(12, 8))
# top_corr = x_train.corr()
# sns.heatmap(top_corr, annot=True)
# plt.show()
corr_matrix = pd.concat([x_train, y_train], axis=1).corr()
corra = corr_matrix['vote_average'].sort_values(ascending=False)
print(corra)
top_feature = corr_matrix.index[abs(corr_matrix['vote_average']) > .1]
print(top_feature)
plt.subplots(figsize=(12, 14))
top_corr = pd.concat([x_train, y_train], axis=1)[top_feature].corr()
sns.heatmap(top_corr, annot=True)
plt.show()
top_feature=top_feature.delete(-1)
return top_feature
def feature_selection_transform(self,x,top_feature):
x = x[top_feature]
return x
def wrapper_feature_selection(self,x_train,y_train):
sfs = SFS(DecisionTreeClassifier(max_depth=6,random_state=10),
k_features=(13),
forward=True,
floating=False,
scoring='accuracy',
n_jobs=-1,
cv=0)
SFS_results=sfs.fit(x_train, y_train)
print(SFS_results.k_feature_names_)
print(len(SFS_results.k_feature_names_))
return SFS_results
# Ordered_rank_feature = SelectKBest(score_func=chi2, k=30)
#
# Ordered_feature = Ordered_rank_feature.fit(x_train,y_train)
# print("features",Ordered_feature)
def wrapper_feature_selection_transform(self, x, SFS_results):
x= pd.DataFrame(SFS_results.transform(x), columns=SFS_results.k_feature_names_,index=x.index)
return x
def calc_nulls(self,data):
df=data.isnull().sum()
null_percentage = (df / data.isnull().count() * 100)
print("Null values percantage...")
display_null_percent = pd.concat([df,null_percentage], axis=1, keys=['Total', 'Percent'])
print(display_null_percent)
print("Data Shape: %s\n" % (data.shape,))
return null_percentage
def drop_coloums(self,Xtrain):
percentage=self.calc_nulls(Xtrain)
col=list(['id','tagline', 'title', 'original_title', 'overview'])
for i in col:
print(i+" : "+ str(len(Xtrain[i].unique())/len(Xtrain[i])))
Xtrain=Xtrain.drop(col,axis=1)
for c in percentage.iteritems():
if c[1] > 50:
print(c[0]+':'+str(c[1])+'%')
col.append(c[0])
Xtrain = Xtrain.drop(c[0], axis=1)
print("Train Data Shape after columns removal: %s" % (Xtrain.shape,))
return Xtrain,col
def drop_columns_test(self,x,col):
for c in col:
x = x.drop(c, axis=1)
print(c)
print("Test Data Shape after columns removal: %s" % (x.shape,))
return x
def preprocc(self,X):
courpos = []
for i in X:
# print(file)
text = i
# 1- convert to lowerCase
text = text.lower()
# 2-REMOVE ANY SPEACIAL CHARACTER AND NUMBERS
text = re.sub("[^a-zA-Z]", " ", text)
# 3-TOKANIZATION
word_tokinzed = word_tokenize(text)
# 4-LEMMATIZATION
lemmatizer = WordNetLemmatizer()
words = [Word(word).lemmatize() for word in word_tokinzed]
st = PorterStemmer()
word_stemmed = [st.stem(word) for word in word_tokinzed]
words = [Word(word).lemmatize() for word in word_stemmed]
# 5-Stop Words Removel
filtered = [word for word in words if word.casefold() not in stopwords]
# # Remove Punctuation strings
# no_of_punc_words = [''.join(char for char in word if char not in string.punctuation)
# for word in filtered]
# #6-Remove the empty string
# no_of_punc_words = [word for word in filtered if word]
# print(len(no_of_punc_words))
# word_counter = Counter(no_of_punc_words)
# 6-JOIN THE TEXT
no_of_punc_words = " ".join(filtered)
# LIST OF TEXTS
courpos.append(no_of_punc_words)
courposArray = np.array(courpos)
data = pd.DataFrame(courposArray)
return data
def tf_ifd(self,X_train):
vectorizer = TfidfVectorizer(min_df=200, use_idf=True)
vectors_train = vectorizer.fit_transform(X_train[0]).toarray()
# print()
print("vectors_train : ", vectors_train.shape)
X_train = pd.DataFrame(vectors_train, columns=vectorizer.get_feature_names())
return X_train, vectorizer
def tf_idf_transform(self,X_test, vectorizer):
vectors_test = vectorizer.transform(X_test[0]).toarray()
X_test = pd.DataFrame(vectors_test, columns=vectorizer.get_feature_names())
return X_test
# load the dataset
def prepare(self,text):
if isinstance(text, str):
# remove HTML tags
text = BeautifulSoup(text, 'html.parser').get_text()
return text
else:
return ''
# cleaning the overview text
def clean_overview_text(self,text):
# convert to lowercase
text = text.lower()
# remove punctuation
translator = str.maketrans('', '', string.punctuation)
text = text.translate(translator)
# remove stop words
words = nltk.word_tokenize(text)
words = [word for word in words if word not in stopwords]
# lemmatize words
lemmatizer = WordNetLemmatizer()
words = [lemmatizer.lemmatize(word) for word in words]
# join words back into a string
clean_text = ' '.join(words)
return clean_text
# apply the function to the overview column
# def preprocessingwithnlp(self,column): | shrouk535r/Movie_Popularity_Prediction | preprocessing.py | preprocessing.py | py | 18,715 | python | en | code | 0 | github-code | 50 |
72014085594 | from django.urls import path
from .views import *
urlpatterns=[
path('superregister/',superRegister),
path('superlogin/',superLogin),
path('superprofile/',superProfile),
path('adminregister/',adminRegister),
path('verify/<auth_token>',verify),
path('adminlogin/',adminLogin),
path('adminprofile/',adminProfile),
path('adminlist/',adminList.as_view(),name='adminlist'),
path('adminupdate/<pk>',adminUpdate.as_view(),name='adminupdate'),
path('admindelete/<pk>',AdminDelete.as_view(),name='admindelete'),
path('department/',department),
path('doctor/',doctor),
path('signup/',signUpView),
path('userlogin/',userLogin),
path('appointment/',appointments,),
path('appointmentlist/',appointment_list),
path('singleappointment/<int:id>',singleAppointmentList),
path('changestatus/<int:id>',statusChange),
] | AshikR7/zybo | zybo/zyboapp/urls.py | urls.py | py | 896 | python | de | code | 0 | github-code | 50 |
30205180047 | import os
from flask import Flask, render_template, request, url_for, send_file, redirect, flash
from iok import AwesomeClient, KnowledgeGraph, NodeType, ResourceType
from networkx.readwrite import json_graph
import networkx as nx
import matplotlib.pyplot as plt
from flask_github import GitHub
from dotenv import load_dotenv
from io import BytesIO
# fix crash
plt.switch_backend("Agg")
# load .env
load_dotenv()
# hack
os.environ["SERVER_SOFTWARE"] = "Google App Engine/"
app = Flask(__name__)
# load in
try:
app.config["GITHUB_CLIENT_ID"] = os.environ["GITHUB_CLIENT_ID"]
app.config["GITHUB_CLIENT_SECRET"] = os.environ["GITHUB_CLIENT_SECRET"]
except Exception as e:
raise e
github = GitHub(app)
GRAPH = None
def register_graph():
global GRAPH
if not GRAPH:
try:
GRAPH = KnowledgeGraph()
except:
raise Exception("Failed to load existing graph from file")
@app.route("/")
def index():
return redirect(url_for("get_graph"))
@app.route("/login")
def login():
return github.authorize()
@app.route("/contribute")
@github.authorized_handler
def contribute(oauth_token):
if oauth_token is None:
return redirect(url_for("login"))
return render_template("iok.html", contribute_mode=True)
@app.route("/graph")
def get_graph():
if GRAPH:
return render_template("iok.html", graph=GRAPH.graph)
else:
return render_template("iok.html")
@app.route("/graph/data")
def get_graph_data():
return send_file("static/graph.json")
@app.route("/graph/static")
def get_static():
return send_file("static/iok.png")
def create_img_stream():
nx.draw(GRAPH.graph, with_labels=True)
img = BytesIO() # file-like object for the image
plt.savefig(img) # save the image to the stream
img.seek(0) # writing moved the cursor to the end of the file, reset
plt.clf() # clear pyplot
return img
@app.route("/handle_data", methods=["POST"])
def handle_data():
register_graph()
assert GRAPH
node_type = int(request.form["nodeType"])
if node_type == NodeType.TOPIC:
name = request.form["topicName"]
assert name
desc = request.form["topicDescription"]
assert desc
parents = request.form["parents"]
parents = parents.split()
children = request.form["children"]
children = children.split()
GRAPH.add_topic(name, desc, parents, children)
return send_file(create_img_stream(), mimetype="image/png")
elif node_type == NodeType.RESOURCE:
name = request.form["resourceName"]
resource_type = int(request.form["resourceType"])
if resource_type == ResourceType.DESCRIPTION:
desc = request.form["resourceDescription"]
GRAPH.add_description(name, desc)
else: # assume no attacks haha
title = request.form["resourceTitle"]
link = request.form["resourceLink"]
GRAPH.add_link(name, title, link, resource_type)
return send_file(create_img_stream(), mimetype="image/png")
# projectpath = request.form['projectFilepath']
# your code
# return a response
return (
f"wow fail :( {node_type} == {NodeType.TOPIC} ? {node_type == NodeType.TOPIC}"
)
if __name__ == "__main__":
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host="0.0.0.0", port=8081, debug=True)
| indexofknowledge/iok | legacy/main.py | main.py | py | 3,579 | python | en | code | 9 | github-code | 50 |
38021332858 | """
*packageName :
* fileName : 11728.배열 합치기(2)
* author : ipeac
* date : 2022-10-05
* description :
* ===========================================================
* DATE AUTHOR NOTE
* -----------------------------------------------------------
* 2022-10-05 ipeac 최초 생성
"""
length_a, length_b = map(int, input().split())
# length_a, length_b = 2, 1
a = list(map(int, input().split()))
# a = [4, 7]
b = list(map(int, input().split()))
# b = [1]
pointer_a = 0
pointer_b = 0
answer = []
while length_a != pointer_a or length_b != pointer_b:
if pointer_a == length_a:
answer.append(b[pointer_b])
pointer_b += 1
elif pointer_b == length_b:
answer.append(a[pointer_a])
pointer_a += 1
else:
if a[pointer_a] > b[pointer_b]:
answer.append(b[pointer_b])
pointer_b += 1
elif a[pointer_a] <= b[pointer_b]:
answer.append(a[pointer_a])
pointer_a += 1
print(*answer)
| guqtls14/python-algorism-study | 박상준/solved/투포인터/11728.배열 합치기(2).py | 11728.배열 합치기(2).py | py | 1,068 | python | en | code | 0 | github-code | 50 |
38627596872 | # thanks to www.cs.uml.edu/~cgao
import smbus
import sys
import getopt
import time
import pigpio
servos = [23, 24]
u = 1.2
x = 600
y = 1900
limit_y_bottom = 1900
limit_y_top = 1300
limit_y_level = 1900
limit_x_left = 600
limit_x_right = 2400
pi = pigpio.pi()
def head(y):
while y > limit_y_top:
pi.set_servo_pulsewidth(servos[1], int(y))
print("Servo {} {} micro pulses".format(servos,y))
time.sleep(u)
temperature()
y -= 300
while y < limit_y_bottom:
pi.set_servo_pulsewidth(servos[1], int(y))
print("Servo {} {} micro pulses".format(servos,y))
time.sleep(u)
temperature()
y += 300
while y > limit_y_level:
pi.set_servo_pulsewidth(servos[1], 2000)
print("Servo {} {} micro pulses".format(servos,y))
time.sleep(u)
y -= 300
try:
while True:
while x < limit_x_right:
pi.set_servo_pulsewidth(servos[0], int(x))
print("Servo {} {} micro pulses".format(servos,x))
time.sleep(u)
x += 450
head(y)
while x > limit_x_left:
pi.set_servo_pulsewidth(servos[0], int(x))
print("Servo {} {} micro pulses".format(servos,x))
time.sleep(u)
x -= 450
head(y)
except KeyboardInterrupt:
for s in servos:
pi.set_servo_pulsewidth(s, 0)
pi.i2c_close(handle)
pi.stop()
| chutasano/laser_controller | pi.py | pi.py | py | 1,456 | python | en | code | 0 | github-code | 50 |
20223870527 | from django.urls import path
from cafe.api import views as api_views
urlpatterns = [
path('urunler/',api_views.UrunListCreateAPIView.as_view(),name='urun-listesi'),
path('urunler/<int:pk>',api_views.UrunDetailAPIView.as_view(),name='urun-bilgileri'),
path('siparisler/',api_views.SiparisListCreateAPIView.as_view(),name='siparis-listesi'),
path('siparisler/<int:pk>',api_views.SiparisDetailAPIView.as_view(),name='siparis-bilgileri'),
] | hasanbakirci/django-heroku-deploy | cafe/api/urls.py | urls.py | py | 454 | python | en | code | 0 | github-code | 50 |
40653836761 | import argparse
import json
import os
import os.path as osp
import numpy as np
from cityscapesscripts.preparation.json2labelImg import json2labelImg
from PIL import Image
def convert_json_to_label(json_file):
label_file = json_file.replace('_polygons.json', '_labelTrainIds.png')
json2labelImg(json_file, label_file, 'trainIds')
if 'train/' in json_file:
pil_label = Image.open(label_file)
label = np.asarray(pil_label)
sample_class_stats = {}
for c in range(19):
n = int(np.sum(label == c))
if n > 0:
sample_class_stats[int(c)] = n
sample_class_stats['file'] = label_file
return sample_class_stats
else:
return None
def recursive_glob(rootdir=".", suffix=""):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [
os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames
if filename.endswith(suffix)
]
def parse_args():
parser = argparse.ArgumentParser(
description='Convert Cityscapes annotations to TrainIds')
parser.add_argument('cityscapes_path', help='cityscapes data path')
parser.add_argument('--gt-dir', default='gtFine', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
args = parser.parse_args()
return args
def save_class_stats(out_dir, sample_class_stats):
sample_class_stats = [e for e in sample_class_stats if e is not None]
with open(osp.join(out_dir, 'sample_class_stats.json'), 'w') as of:
json.dump(sample_class_stats, of, indent=2)
sample_class_stats_dict = {}
for stats in sample_class_stats:
f = stats.pop('file')
sample_class_stats_dict[f] = stats
with open(osp.join(out_dir, 'sample_class_stats_dict.json'), 'w') as of:
json.dump(sample_class_stats_dict, of, indent=2)
samples_with_class = {}
for file, stats in sample_class_stats_dict.items():
for c, n in stats.items():
if c not in samples_with_class:
samples_with_class[c] = [(file, n)]
else:
samples_with_class[c].append((file, n))
with open(osp.join(out_dir, 'samples_with_class.json'), 'w') as of:
json.dump(samples_with_class, of, indent=2)
def main():
args = parse_args()
cityscapes_path = args.cityscapes_path
out_dir = args.out_dir if args.out_dir else cityscapes_path
os.makedirs(out_dir, exist_ok=True)
gt_dir = osp.join(cityscapes_path, args.gt_dir)
poly_files = []
for poly in recursive_glob(gt_dir, '_polygons.json'):
poly_file = osp.join(gt_dir, poly)
poly_files.append(poly_file)
only_postprocessing = False
if not only_postprocessing:
sample_class_stats = []
for poly in poly_files:
sample_class_stats.append(convert_json_to_label(poly))
else:
with open(osp.join(out_dir, 'sample_class_stats.json'), 'r') as of:
sample_class_stats = json.load(of)
save_class_stats(out_dir, sample_class_stats)
split_names = ['train', 'val', 'test']
for split in split_names:
filenames = []
for poly in recursive_glob(
osp.join(gt_dir, split), '_polygons.json'):
filenames.append(poly.replace('_gtFine_polygons.json', ''))
with open(osp.join(out_dir, f'{split}.txt'), 'w') as f:
f.writelines(f + '\n' for f in filenames)
if __name__ == '__main__':
main()
| brdav/refign | tools/convert_cityscapes.py | convert_cityscapes.py | py | 3,645 | python | en | code | 66 | github-code | 50 |
42634397603 | import time
import queue
import threading
q = queue.Queue(10)
def productor(i):
while True:
q.put("厨师 {} 做的包子!".format(i))
time.sleep(2)
def consumer(j):
while True:
print("顾客 {} 吃了一个 {}".format(j, q.get()))
time.sleep(1)
for i in range(3):
t = threading.Thread(target=productor, args=(i,))
t.start()
for j in range(10):
v = threading.Thread(target=consumer, args=(j,))
v.start()
| AaronYang2333/CSCI_570 | records/07-24/productor_customer.py | productor_customer.py | py | 470 | python | en | code | 107 | github-code | 50 |
39326730641 |
'''
https://www.practicepython.org/exercise/2014/03/12/06-string-lists.html
Exercise 6 (and Solution)
Ask the user for a string and print out whether this string is a palindrome or not. (A palindrome is a string that reads the same forwards and backwards.)
'''
value = str(input("Enter a beautiful string: "))
revvalue=value[::-1]
if(value == revvalue):
print("The message is palindrome")
else:
print("the message is not palindrome")
| 0xhuesca/PracticePython | 06-string-lists.py | 06-string-lists.py | py | 452 | python | en | code | 0 | github-code | 50 |
10510692099 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
"""TermFeed 0.0.11
Usage:
feed
feed <rss-url>
feed -b
feed -a <rss-url> [<category>]
feed -d <rss-url>
feed -t [<category>]
feed -D <category>
feed -R
feed (-h | --help)
feed --version
Options:
List feeds from the default category 'General' of your library.
<URL> List feeds from the provided url source.
-b Browse feed by category avaialble in the database file.
-a URL Add new url <rss-url> to database under [<category>] (or 'General' otherwise).
-d URL Delete <rss-url> from the database file.
-t See the stored categories in your library, or list the URLs stored under <category> in your library.
-D TOPIC Remove entire cateogry (and its urls) from your library.
-R Rebuild the library from the url.py
-h --help Show this screen.
"""
from __future__ import print_function
import sys
import webbrowser
import feedparser
import re
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
import termfeed.dbop as dbop
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def _connected():
"""check internet connect"""
host = 'http://google.com'
try:
urlopen(host)
return True
except:
return False
def open_page(url, title):
print(bcolors.WARNING +
'\topening ... {}\n'.format(title.encode('utf8')) + bcolors.ENDC)
# open page in browser
webbrowser.open(url)
def print_feed(zipped):
for num, post in zipped.items():
print(bcolors.OKGREEN + '[{}] '.format(num) + bcolors.ENDC, end='')
print('{}'.format(post.title.encode('utf8')))
def print_desc(topic, txt):
try:
print(bcolors.WARNING + '\n\n{}:'.format(topic) + bcolors.ENDC)
except UnicodeEncodeError:
pass
print(bcolors.BOLD + '\n\t{}'.format(txt.encode('utf8')) + bcolors.ENDC)
def open_it():
try:
txt = '\n\n\t Open it in browser ? [y/n] '
try:
q = raw_input(txt) # python 2
except NameError:
q = input(txt) # python 3
print('\n')
if q == 'y':
return True
except KeyboardInterrupt:
print('\n')
return False
def clean_txt(txt):
"""clean txt from e.g. html tags"""
cleaned = re.sub(r'<.*?>', '', txt) # remove html
cleaned = cleaned.replace('<', '<').replace('>', '>') # retain html code tags
cleaned = cleaned.replace('"', '"')
cleaned = cleaned.replace('’', "'")
cleaned = cleaned.replace(' ', ' ') # italized text
return cleaned
def _continue():
try:
msg = """\n\nPress: Enter to continue, ... [NUM] for short description / open a page, ... or CTRL-C to exit: """
print(bcolors.FAIL + msg + bcolors.ENDC, end='')
# kb is the pressed keyboard key
try:
kb = raw_input()
except NameError:
kb = input()
return kb
except KeyboardInterrupt:
# return False
exit()
def parse_feed(url):
d = feedparser.parse(url)
# validate rss URL
if d.entries:
return d
else:
print("INVALID URL feed: {}".format(url))
return None
def fetch_feeds(urls):
for i, url in enumerate(urls):
d = parse_feed(url)
if d is None:
continue # to next url
# feeds source
l = len(urls) - 1
print(
bcolors.HEADER + "\n {}/{} SOURCE>> {}\n".format(i, l, url) + bcolors.ENDC)
# print out feeds
zipped = dict(enumerate(d.entries))
def recurse(zipped):
print_feed(zipped)
kb = _continue() # keystroke listener
if kb:
user_selected = kb is not '' and kb in str(zipped.keys())
if user_selected:
# to open page in browser
link = zipped[int(kb)].link
title = zipped[int(kb)].title
try:
desc = zipped[int(kb)].description
desc = clean_txt(desc)
print_desc(title, desc)
except AttributeError:
print('\n\tNo description available!!')
if open_it():
open_page(link, title)
else:
print(
bcolors.BOLD + 'Invalid entry ... {} '.format(kb) + bcolors.ENDC)
# repeat with same feeds and listen to kb again
recurse(zipped)
recurse(zipped)
def topic_choice(browse):
if browse:
topics = dbop.topics()
tags = {}
for i, tag in enumerate(topics):
tags[i] = tag
print("{}) {}".format(i, tags[i]))
try:
m = '\nChoose the topic (number)? : '
try: # python 2
uin = raw_input(m)
except NameError: # python 3
uin = input(m)
uin = int(uin)
topic = tags[uin]
except: # catch all exceptions
print('\nInvalid choice!')
topic = 'General'
else:
topic = 'General'
urls = dbop.read(topic)
return urls
def validate_feed(url):
if parse_feed(url):
return url
else:
exit()
from .support.docopt import docopt
def main():
args = docopt(
__doc__, version="TermFeed 0.0.11 (with pleasure by: Aziz Alto)")
# parse args
browse = args['-b']
external = args['<rss-url>']
add_link = args['-a']
category = args['<category>']
delete = args['-d']
remove = args['-D']
tags = args['-t']
rebuild = args['-R']
fetch = True
# get rss urls
if external:
urls = [validate_feed(external)]
else:
urls = topic_choice(browse)
# if not listing feeds
if add_link or delete or category or tags or rebuild or remove:
fetch = False
# updating URLs library
if add_link:
url = validate_feed(add_link)
if category:
dbop.add_link(url, category)
else:
dbop.add_link(url)
if delete:
dbop.remove_link(delete)
if remove:
dbop.delete_topic(remove)
# display resource contents
if tags:
if category:
dbop.browse_links(category)
else:
dbop.print_topics()
if rebuild:
dbop.rebuild_library()
if fetch:
fetch_feeds(urls)
# start
if __name__ == '__main__':
if not _connected():
print('No Internet Connection!')
exit()
main()
| iamaziz/TermFeed | termfeed/feed.py | feed.py | py | 6,913 | python | en | code | 249 | github-code | 50 |
35808692145 | from enum import Enum
Color = Enum("Color", "WHITE GREY BLACK")
class Vertex:
color = Color.WHITE
def __init__(self, n):
self.name = n
def search(vList, vListItem, v:Vertex):
v.color = Color.GREY
for vi in vListItem:
if vi.color == Color.WHITE: return str(v.name)+ " " + search(vList, vList[vi.name], vi)
v.color = Color.BLACK
return str(v.name)
if __name__ == "__main__":
vs = [Vertex(x) for x in range(6)]
vList = [
None,
[vs[2], vs[5], vs[4]], #1
[vs[1], vs[3]], #2
[vs[2], vs[4], vs[5]], #3
[vs[3], vs[1], vs[5]], #4
[vs[4], vs[3], vs[1]], #5
]
print(search(vList, vList[1], vs[1])[::-1])
| stPhoenix/algoritms_lab2 | main.py | main.py | py | 772 | python | en | code | 0 | github-code | 50 |
21161588387 | USE_LATEX = False
import sys
if USE_LATEX:
sys.path.append("../FresnelFDTD/")
import mplLaTeX
import matplotlib as mpl
mpl.rcParams.update(mplLaTeX.params)
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
import json
def computeKy( data, fdir ):
y = np.array( data["monitor"]["inside"]["pos"] )
amp = np.array( data["monitor"]["inside"]["data"] )
amp /= np.max(amp)
# Shift y such that center is zero
y -= y[len(y)/2]
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.plot( y, amp )
if ( not USE_LATEX ):
fig2.show()
fit = np.arccos(amp)
fit[y<0.0] = -fit[y<0.0]
slope, interscept, pvalue, rvalue, stderr = stats.linregress( y, fit )
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot( y, fit, 'ko', fillstyle="none" )
ax.plot( y, slope*y + interscept, 'k--' )
if ( USE_LATEX ):
fig.savefig( fdir+"/kyFit.pdf", bbox_inches="tight" )
else:
fig.show()
print ("Transverse wave vector: %.3E"%(slope))
def plotQFactor( data, fdir ):
fig = plt.figure()
ax = fig.add_subplot(111)
freq = np.array( data["monitor"]["freq"] )
Q = np.array( data["monitor"]["qFactor"] )
ax.plot( freq, Q, 'k')
ax.set_xlabel("Frequency")
ax.set_ylabel("Quality factor Im($\omega$)/Re($\omega$)")
if ( USE_LATEX ):
fname = fdir +"/Qfactor.pdf"
fig.savefig( fname, bbox_inches="tight")
else:
fig.show()
pos = np.argmax(Q)
print ("Highest mode frequency: %.4E"%(freq[pos]))
def main(argv):
if ( len(argv) != 2 ):
print ("Usage: python analyse.py --dfile=<data.json> --fdir=<figdir>")
return 1
# Parse arguments
dfile = ""
fdir = ""
for arg in argv:
if ( arg.find("--dfile=") != -1 ):
dfile = arg.split("--dfile=")[1]
elif ( arg.find("--fdir=") != -1 ):
fdir = arg.split("--fdir=")[1]
# Consistency check
if ( dfile == "" ):
print ("No data file specified...")
return 1
elif ( fdir == "" ):
print ("No figure directory specified...")
return 1
# Read data
try:
infile = open( dfile, 'r' )
data = json.load(infile)
except:
print ("Could not open file %s"%(dfile))
return 1
# Compute the transverse k-vector
computeKy( data, fdir )
plotQFactor( data, fdir )
if ( not USE_LATEX ):
plt.show()
if __name__ == "__main__":
main(sys.argv[1:])
| davidkleiven/OptiX | SlabGuide/analyse.py | analyse.py | py | 2,539 | python | en | code | 1 | github-code | 50 |
74367003354 | # This is just a basic pyspark "smoke" test
import os
from pyspark.sql import SparkSession
# os.environ['SPARK_MASTER_IP'] = "127.0.0.1"
os.environ['SPARK_LOCAL_IP'] = "127.0.0.1"
# os.environ['HADOOP_HOME'] = ''
spark = SparkSession\
.builder\
.getOrCreate()
print("Let's sum the numbers from 0 to 100")
df = spark.range(101)
print(df.groupBy().sum('id').collect())
print(df)
| ivangeorgiev/pytest_dbconnect | try_pyspark.py | try_pyspark.py | py | 410 | python | en | code | 2 | github-code | 50 |
39722434942 | # Carter Strate
# CSCI 102 - Section D
# Week 10 Lab
# References: None
# Time: 40 minutes
import csv
l = []
with open('formations.csv','r') as file:
fileReader = csv.reader(file)
for row in fileReader:
l.append(row)
for line in l:
if line == l[0]:
line.insert(1,'Start Depth')
line.insert(2,'End Depth')
line.insert(3,'Difference in Depth')
else:
depths = line[0]
depths = depths.split('-')
sDepth = float(depths[0])
fDepth = float(depths[1])
dDepth = '{:.2f}'.format(round(fDepth - sDepth,2))
line.insert(1,sDepth)
line.insert(2,fDepth)
line.insert(3,dDepth)
with open('formations_parsed.csv','w') as fileP:
filePwriter = csv.writer(fileP)
for line in l:
filePwriter.writerow(line)
| cjstrate/IntroToPython | 102/Depth/Week10-depth_range.py | Week10-depth_range.py | py | 812 | python | en | code | 0 | github-code | 50 |
26378238100 | #
'''
1. 아이디어 :
2. 시간복잡도 :
3. 자료구조 :
'''
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children if children is not None else []
"""
class Solution:
def cloneTree(self, root: 'Node') -> 'Node':
if not root:
return
clone=Node(root.val,[])
def dfs(node,cnode):
if not node:
return
print(node.val)
for c in node.children:
cchild=Node(c.val,[])
cnode.children.append(cchild)
dfs(c,cchild)
dfs(root,clone)
return clone | 724thomas/CodingChallenge_Python | LeetCode/1490CloneNaryTree.py | 1490CloneNaryTree.py | py | 695 | python | en | code | 0 | github-code | 50 |
21745678736 | # Microsoft Cognitive services
#
# https://azure.microsoft.com/en-us/services/cognitive-services/
# Get API key: https://azure.microsoft.com/en-us/try/cognitive-services/?api=computer-vision
# CV API Documentation: https://goo.gl/sc2Rb3
import json
import os
import requests
api_key = "xx"
api_url = "https://westeurope.api.cognitive.microsoft.com/vision/v1.0/analyze"
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': api_key
}
param = {
'visualFeatures': 'Categories,Description,Faces,Adult',
'details': 'Celebrities'
}
def get_pic_info(pic_url, pic_name):
body = {'url': f'{pic_url}'}
r = requests.post(api_url, json=body, headers=headers, params=param)
r.raise_for_status()
response_json = json.loads(r.text)
f_info = os.path.splitext(pic_name)
with open(f"{f_info[0]}.json", 'w') as file:
json.dump(response_json, file, indent=4, ensure_ascii=False)
return response_json
def has_faces(data: dict):
if len(data['faces']) != 0:
return True
else:
return False
def get_face_rect(face_data: dict):
rect = face_data['faceRectangle']
return [rect['left'],
rect['top'],
rect['left'] + rect['width'],
rect['top'] + rect['height']
]
def get_caption(data):
captions = data['description']['captions']
if len(captions) > 0:
return captions[0]['text']
else:
return "No caption"
def main():
print("Uruchom plik foto_opis.py (jedno zdjęcie przykładowe), "
"lub plik opisuj_fotki z kilkoma zdjęciami :)")
if __name__ == '__main__':
main()
| Slawecky/Python | Day_15/ms_cv.py | ms_cv.py | py | 1,657 | python | en | code | 0 | github-code | 50 |
75130563355 | # Standard imports
import numpy as np
import cv2
winName = "Cell fish"
def onTunaFishTrackbar(im, brightness, useEqualize=1, blursSize=21, th1=None):
winName = "Cell fish"
tmp = brightness
if (blursSize >= 3):
blursSize += (1 - blursSize % 2)
tmp = cv2.GaussianBlur(tmp, (blursSize, blursSize), 0)
if (useEqualize):
tmp = cv2.equalizeHist(tmp)
cv2.imshow("Brightness Preprocess", tmp)
#cv2.imwrite("../img/BrightnessPreprocess.png", tmp)
# threshold to select dark tuna
ret, tmp = cv2.threshold(tmp, th1, 255, cv2.THRESH_BINARY_INV)
#ret, tmp = cv2.threshold(tmp, th1, 255, cv2.THRESH_BINARY)
cv2.imshow('threshold', tmp)
# find external contours ignores holes in the fish
im2, contours, hierarchy = cv2.findContours(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
dst = im.copy()
#dst = src
# print(contours)
maxDim = 0
largest = -1
for i in range(len(contours)):
# draw all contours in red
cv2.drawContours(dst, contours, largest, (0, 0, 255), 1)
dim = len(contours[i]) # area is more accurate but more expensive
if (dim > maxDim):
maxDim = dim
largest = i
# The tuna as binary mask
#cv::Mat fishMask = cv::Mat::zeros(src.size(), CV_8UC1)
##The tuna as contour
#vector<cv::Point> theFish
img_mask = np.zeros(src.shape, np.uint8)
if (largest >= 0):
theImg = contours[largest]
# draw selected contour in bold green
cv2.polylines(dst, theImg, True, (0, 255,0), 2)
# draw the fish into its mask
cv2.drawContours(img_mask, contours, largest, 255, -1)
cv2.imshow("Result Mask", img_mask)
cv2.imshow("Result Contour", dst)
#cv2.imwrite("../img_mask.png", img_mask)
#cv2.imwrite("../result.png", dst)
if __name__ == '__main__':
#src = cv2.imread("Vio4.PNG")
src = cv2.imread("test.png")
cv2.imshow(winName, src)
dst = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
hsv_planes = cv2.split(dst)
#hue = hsv_planes[0]
#saturation = hsv_planes[1]
brightness = hsv_planes[2]
# default settings for params
useEqualize = 1
#blursSize = 21
#th1 = int(33.0 * 255 / 100) # tuna is dark than select dark zone below 33% of full range
blursSize = 10
th1 = int(55 * 255 / 100) # tuna is dark than select dark zone below 33% of full range
cv2.createTrackbar("Equalize", winName, useEqualize, 1, onTunaFishTrackbar)
cv2.createTrackbar("Blur Sigma", winName, blursSize, 100, onTunaFishTrackbar)
cv2.createTrackbar("Threshold", winName, th1, 255, onTunaFishTrackbar)
while(1):
#--- Using cv2.getTrackbarPos() to get values from the slider ---
useEqualize = cv2.getTrackbarPos('Equalize', winName)
blurSize = cv2.getTrackbarPos('Blur Sigma', winName)
th1 = cv2.getTrackbarPos('Threshold', winName)
#if len(brightness) > 0:
# print("brightness ok")
#if len(src) > 0:
# print("src ok")
onTunaFishTrackbar(src, brightness, useEqualize, blurSize, th1)
#--- Press Q to quit ---
k = cv2.waitKey(1) #& 0xFF
if k == 27:
break
#cv2.waitKey(0)
#onTunaFishTrackbar(src, 0, 0, useEqualize, blursSize, th1, brightness=brightness)
#cv2.waitKey(0)
cv2.destroyAllWindows()
| jev26/Hackuarium-GenomicIntegrity-2018 | NucleusDetection/Trash/Tuna.py | Tuna.py | py | 3,394 | python | en | code | 0 | github-code | 50 |
42093156148 | import gtk
import gtkhex
import gobject
from umit.pm.core.i18n import _
from umit.pm.core.logger import log
class HexDocument(gtkhex.Document):
__gtype_name__ = "PyGtkHexDocument"
__gsignals__ = {
'changed' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,
gobject.TYPE_BOOLEAN)),
}
def __init__(self):
super(HexDocument, self).__init__()
def do_document_changed(self, cdata, push_undo):
self.emit('changed', cdata, push_undo)
class HexView(gtkhex.Hex):
__gtype_name__ = "PyGtkHexView"
def __init__(self):
self._document = HexDocument()
super(HexView, self).__init__(self._document)
self.show_offsets(True)
self.set_geometry(8, 5)
self.set_read_only_mode(True)
self._trapped = True
self.changed_callback = None
self._document.connect('changed', self.__wrap_on_changed)
self.set_events(gtk.gdk.BUTTON_PRESS_MASK)
self.hdisp, self.adisp = self.get_children()[0:2]
#self.hdisp.add_events(gtk.gdk.BUTTON_PRESS_MASK)
#self.adisp.add_events(gtk.gdk.BUTTON_PRESS_MASK)
# Hex / Ascii
self.hdisp.connect('button-press-event', self.__on_button_press, 0)
self.adisp.connect('button-press-event', self.__on_button_press, 1)
def __on_cut(self, action, typo):
self.cut_to_clipboard()
def __on_copy(self, action, typo):
bounds = self.get_selection()
if not bounds:
return
data = self._document.get_data(bounds[0], bounds[1])
def hexdump():
idx = 0
out = ''
for x in data:
i = hex(ord(x))[2:].upper()
if len(i) == 1:
out += "0"
out += "%s" % i
idx += 1
if idx % 8 == 0:
out += '\n'
idx = 0
else:
out += ' '
return out
def asciidump():
idx = 0
out = ''
for x in data:
i = x.isalpha() and x or '.'
out += "%s" % i
idx += 1
if idx % 8 == 0:
out += '\n'
idx = 0
return out
if typo == 0:
gtk.clipboard_get().set_text(hexdump())
elif typo == 1:
self.copy_to_clipboard()
else:
out = ''
for h, a in zip(hexdump().splitlines(), asciidump().splitlines()):
padding = 8 - len(a)
out += h + (" " * ((padding * 3) - 1)) + "\t" + a + "\n"
gtk.clipboard_get().set_text(out)
def __on_bcopy(self, action, typo):
self.__on_copy(action, 3)
def __on_paste(self, action, typo):
self.paste_from_clipboard()
def __on_button_press(self, widget, evt, typo):
if evt.button != 3:
return
menu = gtk.Menu()
# OK show a popup to copy and paste
# cut/copy/paste/delete
txts = (_('Cu_t'), _('_Copy'), _('_Paste'), _('Copy from _both'))
icons = (gtk.STOCK_CUT, gtk.STOCK_COPY, gtk.STOCK_PASTE, gtk.STOCK_COPY)
cbcs = (self.__on_cut, self.__on_copy, self.__on_paste, self.__on_bcopy)
clipboard_sel = gtk.clipboard_get().wait_for_text() and True or False
idx = 0
for txt, icon, cbc in zip(txts, icons, cbcs):
action = gtk.Action(None, txt, None, icon)
action.connect('activate', cbc, typo)
item = action.create_menu_item()
if not clipboard_sel and idx == 2:
item.set_sensitive(False)
menu.append(item)
idx += 1
menu.popup(None, None, None, evt.button, evt.time, None)
menu.show_all()
def __wrap_on_changed(self, document, cdata, push_undo):
if callable(self.changed_callback) and self._trapped:
self.changed_callback(document, cdata, push_undo)
self._trapped = True
def select_block(self, offset, len, ascii=True):
"""
Select a block of data in the HexView
@param offset the offset byte
@param len the lenght of selection
@param ascii True to set primary selection on ASCII otherwise on HEX
"""
log.debug('Selecting blocks starting from %d to %d (%s)' % \
(offset, offset + len, self.payload.__len__()))
self.set_selection(offset, offset + len)
def get_payload(self):
return self._document.get_data(0, self._document.file_size)
def set_payload(self, val):
self._trapped = False
self._document.set_data(0, len(val), self._document.file_size,
val, False)
def get_font(self):
return self._font
def modify_font(self, val):
self.set_font(val)
def get_bpl(self): return self._bpl
def set_bpl(self, val):
# This is hardcoded in the sense that gtkhex doens't offer this but
# grouping. So we use that to encode grouping information.
# GROUP_LONG has 4 bytes, GROUP_WORD has 2 bytes, GROUP_BYTE only 1
if val == 1:
self.set_group_type(gtkhex.GROUP_BYTE)
elif val == 2:
self.set_group_type(gtkhex.GROUP_WORD)
elif val == 3:
self.set_group_type(gtkhex.GROUP_LONG)
payload = property(get_payload, set_payload)
font = property(get_font, modify_font)
bpl = property(get_bpl, set_bpl)
gobject.type_register(HexView)
if __name__ == "__main__":
v = HexView()
v.payload = "Crashhhaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa!"
v.font = "Courier New 10"
w = gtk.Window()
w.add(v)
w.show_all()
gtk.main()
| umitproject/packet-manipulator | umit/pm/gui/widgets/pygtkhexview.py | pygtkhexview.py | py | 5,865 | python | en | code | 16 | github-code | 50 |
5111657659 | import os
from logging import ERROR
import boto3
import pytest
from _pytest.logging import LogCaptureFixture
from app.src.env import Env, get_env, get_env_by_key
from moto import mock_ssm
@pytest.fixture
def set_env() -> None:
os.environ["LINE_CHANNEL_SECRET"] = "lcs"
os.environ["LINE_CHANNEL_ACCESS_TOKEN"] = "lcat"
os.environ["OPENAI_API_KEY"] = "oak"
os.environ["S3_BUCKET_NAME"] = "s3bn"
os.environ["S3_KEY_NAME"] = "s3kn"
os.environ["TWITTER_BEARER_TOKEN"] = "tbt"
def test_get_env(set_env: None) -> None:
expected = Env(
LINE_CHANNEL_SECRET="lcs",
LINE_CHANNEL_ACCESS_TOKEN="lcat",
OPENAI_API_KEY="oak",
S3_BUCKET_NAME="s3bn",
S3_KEY_NAME="s3kn",
TWITTER_BEARER_TOKEN="tbt",
)
assert vars(get_env()) == vars(expected)
@mock_ssm
def test_get_env_prod(set_env: None) -> None:
os.environ["ENV_NAME"] = "prod"
ssm = boto3.client("ssm")
ssm.put_parameter(Name="LINE_CHANNEL_SECRET", Value="ssm_lcs", Type="String")
ssm.put_parameter(Name="LINE_CHANNEL_ACCESS_TOKEN", Value="ssm_lcat", Type="String")
ssm.put_parameter(Name="OPENAI_API_KEY", Value="ssm_oak", Type="String")
ssm.put_parameter(Name="TWITTER_BEARER_TOKEN", Value="ssm_tbt", Type="String")
ssm.put_parameter(Name="S3_BUCKET_NAME", Value="ssm_s3bn", Type="String")
ssm.put_parameter(Name="S3_KEY_NAME", Value="ssm_s3kn", Type="String")
expected = Env(
LINE_CHANNEL_SECRET="ssm_lcs",
LINE_CHANNEL_ACCESS_TOKEN="ssm_lcat",
OPENAI_API_KEY="ssm_oak",
S3_BUCKET_NAME="ssm_s3bn",
S3_KEY_NAME="ssm_s3kn",
TWITTER_BEARER_TOKEN="ssm_tbt",
)
assert vars(get_env()) == vars(expected)
del os.environ["ENV_NAME"]
def test_then_no_key_env(caplog: LogCaptureFixture) -> None:
with pytest.raises(SystemExit) as pytest_wrapped_e:
get_env_by_key("TEST")
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
assert (
"root",
ERROR,
"Specify TEST as environment variable.",
) in caplog.record_tuples
| pep299/dbd_line_bot | app/tests/test_env.py | test_env.py | py | 2,116 | python | en | code | 2 | github-code | 50 |
42258636998 | #This file contains the python implementation of shadow detector for satellite imagery
#Author: Bhavan Vasu
import cv2 as cv2
from skimage import io, color
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
filename1='./Im7.tiff'
rgb = Image.open(filename1)
rgb = np.asarray(rgb)
plt.figure()
plt.subplot(1,2,1)
plt.title('Test Image')
plt.imshow(rgb)
image_B = np.copy(rgb[:, :, 0])
image_G = np.copy(rgb[:, :, 1])
image_R = np.copy(rgb[:, :, 2])
s=np.shape(rgb)
#Converting RGB to LAB color space
lab = color.rgb2lab(rgb)
image_b = np.copy(lab[:, :, 0])
image_a = np.copy(lab[:, :, 1])
image_l = np.copy(lab[:, :, 2])
lm=np.mean(lab[:,:,0], axis=(0, 1))
am=np.mean(lab[:,:,1], axis=(0, 1))
bm=np.mean(lab[:,:,2], axis=(0, 1))
#Creating empty mask for masking shadow
mas = np.empty([rgb.shape[0], rgb.shape[1]], dtype = bool)
lb=lab[:,:,0]+lab[:,:,2]
#Hand crafted thresholds: Dataset specific
if (am+bm)<=15:
mas[(image_l <=(lm-(np.std(image_l))/15))] = False
else:
mas[(image_l+image_b)<=50] = False
B_masked = np.ma.masked_array(image_b, mask = mas)
G_masked = np.ma.masked_array(image_G, mask = mas)
R_masked = np.ma.masked_array(image_R, mask = mas)
mam = np.dstack([rgb, (~mas).astype(np.uint8)*255])
plt.subplot(1,2,2)
plt.imshow(mam)
plt.title('Shadow detected Image')
plt.show()
| vbhavank/Shadow-detection-using-LAB-color-space | lab.py | lab.py | py | 1,322 | python | en | code | 16 | github-code | 50 |
29776638837 | import json
from defi_services.lending_pools.cream_service import CreamService
if __name__ == "__main__":
cream = CreamService("0x38", "https://bsc-dataseed1.binance.org/")
pool_token = cream.get_pool_token()
wrapped_native_token = cream.get_wrapped_native_token()
apy = cream.get_apy_defi_app(pool_token_price=21.7, wrapped_native_token_price=306.17)
deposit_borrow = cream.get_wallet_deposit_borrow_balance(
wallet_address="0x22a65db6e25073305484989aE55aFf0687E68566",
wrapped_native_token_price=306.17
)
claim = cream.get_rewards_balance(wallet_address="0x22a65db6e25073305484989aE55aFf0687E68566")
print("claimable: ", claim)
print("pool token: ", pool_token)
print("wrapped_native_token", wrapped_native_token)
with open("cream_apy.json", 'w') as f:
f.write(json.dumps(apy, indent=1))
with open("cream_add.json", 'w') as f:
f.write(json.dumps(deposit_borrow, indent=1))
| phamvietbang/defi-services-lib | examples/lending_pools/bsc/cream.py | cream.py | py | 957 | python | en | code | 0 | github-code | 50 |
12522666835 | import os
from tumor_utils.wsi import WSI # custom WSI class
from tumor_utils.tile import Tile # custom Tile class
def tile_wsi_list(wsi_file_list:list, tile_dir:str, tile_size:int=256, level:int=0):
# generate a WSI object for each WSI in list
for wsi_file in wsi_file_list:
print(f"\nTiling file: {os.path.basename(wsi_file)}")
wsi_obj = WSI(wsi_file)
coods_list = wsi_obj.get_tile_coords(tile_size, level)
# Generate tiles from WSI object
n=1 # <- tile counter
for x,y in coods_list:
# generate tile object and save all non-blank tiles
current_tile = Tile(wsi_obj, [x,y], tile_size, level)
current_tile.save_tile(tile_dir, n, ext=".png")
n += 1
| amcrabtree/tumor-finder | scripts/tumor_utils/tiling.py | tiling.py | py | 771 | python | en | code | 1 | github-code | 50 |
3547406521 | import logging
from threading import TIMEOUT_MAX
import boto3
from botocore.exceptions import ClientError
import os
import argparse
import sys
import json
import time
sys.path.append('../')
from arg_satcomp_solver_base.sqs_queue.sqs_queue import SqsQueue
class SolverTimeoutException(Exception):
pass
class S3ProblemStore:
"""Class to represent S3 storage location"""
def __init__(self, s3_resource, formula_bucket, logger):
self.s3_resource = s3_resource
self.logger = logger
self.formula_bucket = formula_bucket
def list_cnf_file_s3_path_pairs(self):
try:
self.logger.debug(f'Attempting to list files for bucket {self.formula_bucket}')
bkt = self.s3_resource.Bucket(self.formula_bucket)
pairs = [(bkt_object.key, f's3://{self.formula_bucket}/{bkt_object.key}') for bkt_object in bkt.objects.all()]
return pairs
except ClientError as e:
self.logger.error(f"Failed to list s3 bucket objects from {self.formula_bucket}")
self.logger.exception(e)
raise
def get_bucket(self):
return self.formula_bucket
@staticmethod
def get_s3_file_system(session, formula_bucket, logger):
s3 = session.resource('s3')
return S3ProblemStore(s3, formula_bucket, logger)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# maximum retries on non-success (other than timeout)
RETRIES_MAX = 2
TIMEOUT = 1030
class ProblemRunner:
"""Class to run a problem set through a distributed solver"""
def __init__(self, problem_queue, result_queue, s3_problem_store, args, logger):
self.num_workers = args.num_workers
self.clean_first = args.clean_first
self.json_file = args.json_file
self.s3_problem_store = s3_problem_store
self.problem_queue = problem_queue
self.result_queue = result_queue
self.logger = logger
def run_one_problem(self, s3_uri):
msg = {"s3_uri": s3_uri, "num_workers": self.num_workers}
msg_str = json.dumps(msg, indent = 4)
done = False
retries = 0
while not done:
self.problem_queue.put_message(msg_str)
start_time = time.perf_counter()
result = self.result_queue.get_message()
while result is None:
self.logger.info(f"Awaiting completion for file: {s3_uri}")
end_time = time.perf_counter()
if end_time - start_time > TIMEOUT:
raise SolverTimeoutException(f"Client exceeded max time waiting for response ({str(TIMEOUT)}). Did leader crash?")
result = self.result_queue.get_message()
result_json = json.loads(result.read())
result.delete()
print(f"Problem {s3_uri} completed! result is: {json.dumps(result_json, indent=4)}")
if result_json["driver"]["timed_out"]:
done = True
else:
result = (result_json["solver"]["output"]["result"]).lower()
retries = retries + 1
if result == "unsatisfiable" or result == "satisfiable" or retries >= RETRIES_MAX:
done = True
return result_json
def run_problems(self):
results = {}
if os.path.exists(self.json_file) and not self.clean_first:
with open(self.json_file, "r") as result_file:
results = json.load(result_file)
for (input_file, s3_uri) in self.s3_problem_store.list_cnf_file_s3_path_pairs():
self.logger.info(f'attempting to solve file: {s3_uri}')
# skip previously solved files (unless 'clean' flag is true)
if input_file in results:
print(f"Problem {s3_uri} is cached from earlier run. Result is: {json.dumps(results[input_file], indent=4)}")
continue
result_json = self.run_one_problem(s3_uri)
# write answer (overwrite existing file)
results[input_file] = result_json
with open(self.json_file, "w") as result_file:
json.dump(results, result_file, indent=4)
def init_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Run a SATComp solver through all files in a bucket"
)
parser.add_argument(
"-v", "--version", action="version",
version = f"{parser.prog} version 0.1.0"
)
parser.add_argument('--profile', required = True, help = "AWS profile")
parser.add_argument('--problem-queue', required=True, type=str, help='Name of the problem SQS queue (sends jobs to solver) ')
parser.add_argument('--result-queue', required=True, type=str, help='Name of the result SQS queue (receives outputs from solver) ')
parser.add_argument('--s3-bucket', required=True, type=str, help='Name of the s3 bucket')
parser.add_argument('--num-workers', required=True, type=int, help='Number of workers in the cluster')
parser.add_argument('--verbose', type=int, help='Set the verbosity level of output: 0 = ERROR, 1 = INFO, 2 = DEBUG (default: 0)')
parser.add_argument('--clean-first', type=bool, help='Clean the output file prior to run (default: False)')
parser.add_argument('--purge-queues', type=bool, help='Purge queues and wait one minute prior to start?')
parser.add_argument('json_file', help='Path to json file containing results')
return parser
def main():
logger = logging.getLogger("satcomp_log")
try:
parser = init_argparse()
args = parser.parse_args()
if args.verbose and args.verbose > 0:
if args.verbose == 1:
logger.setLevel(logging.INFO)
elif args.verbose >= 2:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
logger.debug('command line arguments: ' + str(args))
session = boto3.Session(profile_name=args.profile)
s3 = session.resource('s3')
# create AWS access objects
problem_queue = SqsQueue.get_sqs_queue_from_session(session, args.problem_queue)
result_queue = SqsQueue.get_sqs_queue_from_session(session, args.result_queue)
s3_problem_store = S3ProblemStore.get_s3_file_system(session, args.s3_bucket, logger)
problem_runner = ProblemRunner(problem_queue, result_queue, s3_problem_store, args, logger)
if args.purge_queues:
logger.info('Purging problem and result queues')
problem_queue.purge()
result_queue.purge()
# recommendation of boto api
time.sleep(60)
problem_runner.run_problems()
except Exception as e:
logger.error(f"Failure during 'run_satcomp_solver'. Error: {str(e)}")
raise
if __name__ == "__main__":
main()
| aws-samples/aws-batch-comp-infrastructure-sample | docker/satcomp-images/satcomp-solver-resources/arg_satcomp_solver_base/satcomp_solver_driver.py | satcomp_solver_driver.py | py | 6,975 | python | en | code | 8 | github-code | 50 |
39673132700 | from PIL import Image, ImageDraw
import numpy as np
import os
# 指定要处理的目录和查找的关键字
directory = "../111"
search_keyword1 = "1.2m_2.5m"
search_keyword2 = "1.5m_2.5m"
# 循环遍历目录中的所有文件
for filename in os.listdir(directory):
if filename.endswith(".jpg") or filename.endswith(".JPG"):
if search_keyword2 in filename:
# 打开图像文件
image_path = os.path.join(directory, filename)
image = Image.open(image_path)
pixels = image.load()
# 获取图像大小
width, height = image.size
# 创建黑色色块
h = height // 3
w = width // 3
# 从覆盖区域外的区域选择一个随机颜色
reference_pos = (width - w - 20, height - h - 23)
reference_color = pixels[reference_pos[0], reference_pos[1]]
block = Image.new('RGB', (w, h))
# 将黑色色块添加到右下角
position = (width - w, height - h)
image.paste(block, position)
# 保存修改后的图像
image.save(image_path)
| HunterCQu/python_tools | python4image/add_a_cover.py | add_a_cover.py | py | 1,164 | python | en | code | 0 | github-code | 50 |
6312277173 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
INFORMATIONAL_SEVERITY_COLOR = "rgb(64,65,66)" # Black
LOW_SEVERITY_COLOR = "rgb(29,184,70)" # Green
MEDIUM_SEVERITY_COLOR = "rgb(209,125,0)" # Orange
HIGH_SEVERITY_COLOR = "rgb(209,60,60)" # Red
CRITICAL_SEVERITY_COLOR = "rgb(143,0,14)" # Dark Red
COLORS_AND_NAMES = {
IncidentSeverity.UNKNOWN: {'color': INFORMATIONAL_SEVERITY_COLOR, 'dsc': 'Unknown'},
IncidentSeverity.INFO: {'color': INFORMATIONAL_SEVERITY_COLOR, 'dsc': 'Informational'},
IncidentSeverity.LOW: {'color': LOW_SEVERITY_COLOR, 'dsc': 'Low'},
IncidentSeverity.MEDIUM: {'color': MEDIUM_SEVERITY_COLOR, 'dsc': 'Medium'},
IncidentSeverity.HIGH: {'color': HIGH_SEVERITY_COLOR, 'dsc': 'High'},
IncidentSeverity.CRITICAL: {'color': CRITICAL_SEVERITY_COLOR, 'dsc': 'Critical'},
}
def get_incident_severity(incident_id):
data = execute_command("getIncidents", {'id': incident_id})
return dict_safe_get(data, ['data', 0, 'severity'], IncidentSeverity.UNKNOWN)
def incidents_id():
incidents = dict_safe_get(demisto.context(), ['EmailCampaign', 'incidents'], [])
for incident in incidents:
yield incident['id']
def main(): # pragma: no cover
try:
# Getting incident context:
highest_severity = max(IncidentSeverity.UNKNOWN, demisto.incident().get('severity', IncidentSeverity.UNKNOWN))
for incident_id in incidents_id():
highest_severity = max(highest_severity, get_incident_severity(incident_id))
# Determine color:
color = COLORS_AND_NAMES[highest_severity]['color']
description = COLORS_AND_NAMES[highest_severity]['dsc']
html = "<div style='text-align:center; font-size:17px; padding: 15px;'> Highest Severity</br> " \
f"<div style='font-size:32px; color:{color};'> {description} </div></div>"
except Exception:
html = "<div style='text-align:center; padding: 20px;'> <div> No severity </div>"
# Return the data to the layout:
return_results({
'ContentsFormat': EntryFormat.HTML,
'Type': EntryType.NOTE,
'Contents': html,
})
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| demisto/content | Packs/Campaign/Scripts/ShowCampaignHighestSeverity/ShowCampaignHighestSeverity.py | ShowCampaignHighestSeverity.py | py | 2,242 | python | en | code | 1,023 | github-code | 50 |
75131560155 | products = {
"shelf_one": "beer",
"shelf_two": "coffee",
"shelf_three": "water",
"shelf_four": "banana"
}
products_values = products.values()
products_keys = products.keys()
items = products.items()
print(f"declaration: {products}")
print(f"type: {type(products)}")
print(f"values: {products_values}")
print(f"keys: {products_keys}")
print(f"shelf two: {products['shelf_two']}")
print(f"items: {items}")
for product in products_values:
print(f"product: {product}")
for key in products_keys:
print(f"key: {key}")
for product_key, product_value in items:
print(f"key: {product_key} | value: {product_value}")
products["shelf_four"] = "tomato"
products["shelf_five"] = "car"
print(f"shelf four: {products['shelf_four']}")
print(f"shelf five: {products['shelf_five']}")
print(f"products: {products}")
shelf_key = input("Put a shelf: ")
product_name = input("Put a product: ")
products[shelf_key] = product_name
print(f"{shelf_key}: {product_name}")
print(f"products: {products}")
products.pop("shelf_two")
print(f"products: {products}")
products.popitem()
print(f"products: {products}")
products.clear()
print(f"products: {products}")
contacts = {
"hotequil": {
"email": "hotequil@email.com",
"phone": 1234
},
"joao": {
"email": "joao@email.com",
"phone": 5678
}
}
print(f"contacts: {contacts}")
| hotequil/learning-python | dictionaries.py | dictionaries.py | py | 1,385 | python | en | code | 0 | github-code | 50 |
40160477350 | from __future__ import absolute_import
import sys
from builtins import range
from .Mixins import _ConfigureComponent, PrintOptions
from .Mixins import _Labelable, _Unlabelable
from .Mixins import _ValidatingParameterListBase
from .ExceptionHandling import *
from .OrderedSet import OrderedSet
class _HardDependency(object):
"""Information relevant for when a hard dependency,
which uses the * operator, is found"""
def __init__(self, sequenceName, depSet):
self.sequenceName = sequenceName
self.depSet = depSet
class _Sequenceable(object):
"""Denotes an object which can be placed in a sequence"""
def __init__(self):
pass
def __mul__(self,rhs):
return _SequenceCollection(self,rhs)
def __add__(self,rhs):
return _SequenceCollection(self,rhs)
def __invert__(self):
return _SequenceNegation(self)
def _clonesequence(self, lookuptable):
try:
return lookuptable[id(self)]
except:
raise KeyError("no "+str(type(self))+" with id "+str(id(self))+" found")
def resolve(self, processDict,keepIfCannotResolve=False):
return self
def isOperation(self):
"""Returns True if the object is an operator (e.g. *,+ or !) type"""
return False
def isLeaf(self):
return False
def _visitSubNodes(self,visitor):
pass
def visitNode(self,visitor):
visitor.enter(self)
self._visitSubNodes(visitor)
visitor.leave(self)
def _appendToCollection(self,collection):
collection.append(self)
def _errorstr(self):
return "A Sequenceable type"
def _checkIfSequenceable(caller, v):
if not isinstance(v,_Sequenceable):
typename = format_typename(caller)
msg = format_outerframe(2)
msg += "%s only takes arguments of types which are allowed in a sequence, but was given:\n" %typename
msg +=format_typename(v)
msg +="\nPlease remove the problematic object from the argument list"
raise TypeError(msg)
def _checkIfBooleanLogicSequenceable(caller, v):
if not isinstance(v,_BooleanLogicSequenceable):
typename = format_typename(caller)
msg = format_outerframe(2)
msg += "%s only takes arguments of types which are allowed in a boolean logic sequence, but was given:\n" %typename
msg +=format_typename(v)
msg +="\nPlease remove the problematic object from the argument list"
raise TypeError(msg)
class _BooleanLogicSequenceable(_Sequenceable):
"""Denotes an object which can be used in a boolean logic sequence"""
def __init__(self):
super(_BooleanLogicSequenceable,self).__init__()
def __or__(self,other):
return _BooleanLogicExpression(_BooleanLogicExpression.OR,self,other)
def __and__(self,other):
return _BooleanLogicExpression(_BooleanLogicExpression.AND,self,other)
class _BooleanLogicExpression(_BooleanLogicSequenceable):
"""Contains the operation of a boolean logic expression"""
OR = 0
AND = 1
def __init__(self,op,left,right):
_checkIfBooleanLogicSequenceable(self,left)
_checkIfBooleanLogicSequenceable(self,right)
self._op = op
self._items = list()
#if either the left or right side are the same kind of boolean expression
# then we can just add their items to our own. This keeps the expression
# tree more compact
if isinstance(left,_BooleanLogicExpression) and left._op == self._op:
self._items.extend(left._items)
else:
self._items.append(left)
if isinstance(right,_BooleanLogicExpression) and right._op == self._op:
self._items.extend(right._items)
else:
self._items.append(right)
def isOperation(self):
return True
def _visitSubNodes(self,visitor):
for i in self._items:
i.visitNode(visitor)
def dumpSequencePython(self, options=PrintOptions()):
returnValue = ''
join = ''
operatorJoin =self.operatorString()
for m in self._items:
returnValue +=join
join = operatorJoin
if not isinstance(m,_BooleanLogicSequenceLeaf):
returnValue += '('+m.dumpSequencePython(options)+')'
else:
returnValue += m.dumpSequencePython(options)
return returnValue
def operatorString(self):
returnValue ='|'
if self._op == self.AND:
returnValue = '&'
return returnValue
class _SequenceLeaf(_Sequenceable):
def __init__(self):
pass
def isLeaf(self):
return True
class _BooleanLogicSequenceLeaf(_BooleanLogicSequenceable):
def __init__(self):
pass
def isLeaf(self):
return True
class _SequenceCollection(_Sequenceable):
"""Holds representation of the operations without having to use recursion.
Operations are added to the beginning of the list and their operands are
added to the end of the list, with the left added before the right
"""
def __init__(self,*seqList):
self._collection = list()
for s in seqList:
_checkIfSequenceable(self,s)
s._appendToCollection(self._collection)
def __mul__(self,rhs):
_checkIfSequenceable(self,rhs)
rhs._appendToCollection(self._collection)
return self
def __add__(self,rhs):
_checkIfSequenceable(self,rhs)
rhs._appendToCollection(self._collection)
return self
def __str__(self):
sep = ''
returnValue = ''
for m in self._collection:
if m is not None:
returnValue += sep+str(m)
sep = '+'
return returnValue
def _appendToCollection(self,collection):
collection.extend(self._collection)
def dumpSequencePython(self, options=PrintOptions()):
returnValue = ''
separator = ''
for item in self._collection:
itemDump = item.dumpSequencePython(options)
if itemDump:
returnValue += (separator + itemDump)
separator = '+'
return returnValue
def dumpSequenceConfig(self):
returnValue = self._collection[0].dumpSequenceConfig()
for m in self._collection[1:]:
returnValue += '&'+m.dumpSequenceConfig()
return returnValue
def directDependencies(self,sortByType=True):
return findDirectDependencies(self, self._collection,sortByType=sortByType)
def visitNode(self,visitor):
for m in self._collection:
m.visitNode(visitor)
def resolve(self, processDict,keepIfCannotResolve=False):
self._collection = [x.resolve(processDict,keepIfCannotResolve) for x in self._collection]
return self
def index(self,item):
return self._collection.index(item)
def insert(self,index,item):
self._collection.insert(index,item)
def _replaceIfHeldDirectly(self,original,replacement):
didReplace = False
for i in self._collection:
if original == i:
self._collection[self._collection.index(original)] = replacement
didReplace = True
elif isinstance(i,_UnarySequenceOperator) and i._has(original):
didReplace = True
if replacement is None:
self._collection[self._collection.index(i)] = None
else:
self._collection[self._collection.index(i)] = type(i)(replacement)
if replacement is None:
self._collection = [ i for i in self._collection if i is not None]
return didReplace
def findDirectDependencies(element, collection,sortByType=True):
dependencies = []
for item in collection:
# skip null items
if item is None:
continue
# EDFilter, EDProducer, EDAnalyzer, OutputModule
# should check for Modules._Module, but that doesn't seem to work
elif isinstance(item, _SequenceLeaf):
t = 'modules'
# cms.ignore(module), ~(module)
elif isinstance(item, (_SequenceIgnore, _SequenceNegation)):
if isinstance(item._operand, _SequenceCollection):
dependencies += item.directDependencies(sortByType)
continue
t = 'modules'
# _SequenceCollection
elif isinstance(item, _SequenceCollection):
dependencies += item.directDependencies(sortByType)
continue
# cms.Sequence
elif isinstance(item, Sequence):
if not item.hasLabel_():
dependencies += item.directDependencies(sortByType)
continue
t = 'sequences'
# cms.Task
elif isinstance(item, Task):
if not item.hasLabel_():
dependencies += item.directDependencies(sortByType)
continue
t = 'tasks'
# cms.ConditionalTask
elif isinstance(item, ConditionalTask):
if not item.hasLabel_():
dependencies += item.directDependencies(sortByType)
continue
t = 'conditionaltasks'
# SequencePlaceholder and TaskPlaceholder do not add an explicit dependency
elif isinstance(item, (SequencePlaceholder, TaskPlaceholder, ConditionalTaskPlaceholder)):
continue
# unsupported elements
else:
sys.stderr.write("Warning: unsupported element '%s' in %s '%s'\n" % (str(item), type(element).__name__, element.label_()))
continue
dependencies.append((t, item.label_()))
if sortByType:
return sorted(set(dependencies), key = lambda t_item: (t_item[0].lower(), t_item[1].lower().replace('_cfi', '')))
else:
return dependencies
class _ModuleSequenceType(_ConfigureComponent, _Labelable):
"""Base class for classes which define a sequence of modules"""
def __init__(self,*arg, **argv):
self.__dict__["_isFrozen"] = False
self._seq = None
if (len(arg) > 1 and not isinstance(arg[1], _TaskBase)) or (len(arg) > 0 and not isinstance(arg[0],_Sequenceable) and not isinstance(arg[0],_TaskBase)):
typename = format_typename(self)
msg = format_outerframe(2)
msg += "The %s constructor takes zero or one sequenceable argument followed by zero or more arguments of type Task. But the following types are given:\n" %typename
for item,i in zip(arg, range(1,20)):
try:
msg += " %i) %s \n" %(i, item._errorstr())
except:
msg += " %i) Not sequenceable and not a Task\n" %(i)
if len(arg) > 1 and isinstance(arg[0],_Sequenceable) and isinstance(arg[1], _Sequenceable):
msg += "Maybe you forgot to combine the sequenceable arguments via '*' or '+'."
raise TypeError(msg)
tasks = arg
if len(arg) > 0 and isinstance(arg[0], _Sequenceable):
self._seq = _SequenceCollection()
arg[0]._appendToCollection(self._seq._collection)
tasks = arg[1:]
self._isModified = False
self._tasks = OrderedSet()
if len(tasks) > 0:
self.associate(*tasks)
def associate(self,*tasks):
for task in tasks:
if not isinstance(task, _TaskBase):
raise TypeError("associate only works with objects of type Task")
self._tasks.add(task)
def isFrozen(self):
return self._isFrozen
def setIsFrozen(self):
self._isFrozen = True
def _place(self,name,proc):
self._placeImpl(name,proc)
def __imul__(self,rhs):
_checkIfSequenceable(self, rhs)
if self._seq is None:
self.__dict__["_seq"] = _SequenceCollection()
self._seq+=rhs
return self
def __iadd__(self,rhs):
_checkIfSequenceable(self, rhs)
if self._seq is None:
self.__dict__["_seq"] = _SequenceCollection()
self._seq += rhs
return self
def __str__(self):
v = ExpandVisitor(type(self))
self.visit(v)
return v.resultString()
def dumpConfig(self, options):
s = ''
if self._seq is not None:
s = self._seq.dumpSequenceConfig()
return '{'+s+'}\n'
def dumpPython(self, options=PrintOptions()):
"""Returns a string which is the python representation of the object"""
s = self.dumpPythonNoNewline(options)
return s + "\n"
def dumpPythonNoNewline(self, options=PrintOptions()):
s=''
if self._seq is not None:
s =self._seq.dumpSequencePython(options)
associationContents = set()
for task in self._tasks:
if task.hasLabel_():
associationContents.add(_Labelable.dumpSequencePython(task, options))
else:
associationContents.add(task.dumpPythonNoNewline(options))
for iString in sorted(associationContents):
if s:
s += ", "
s += iString
if len(associationContents) > 254:
return 'cms.'+type(self).__name__+'(*['+s+'])'
return 'cms.'+type(self).__name__+'('+s+')'
def dumpSequencePython(self, options=PrintOptions()):
"""Returns a string which contains the python representation of just the internal sequence"""
# only dump the label, if possible
if self.hasLabel_():
return _Labelable.dumpSequencePython(self, options)
elif len(self._tasks) == 0:
if self._seq is None:
return ''
s = self._seq.dumpSequencePython(options)
if s:
return '('+s+')'
return ''
return self.dumpPythonNoNewline(options)
def dumpSequenceConfig(self):
"""Returns a string which contains the old config language representation of just the internal sequence"""
# only dump the label, if possible
if self.hasLabel_():
return _Labelable.dumpSequenceConfig(self)
else:
# dump it verbose
if self._seq is None:
return ''
return '('+self._seq.dumpSequenceConfig()+')'
def __repr__(self):
s = ''
if self._seq is not None:
s = str(self._seq)
return "cms."+type(self).__name__+'('+s+')\n'
def directDependencies(self,sortByType=True):
"""Returns the list of modules and other entities that are directly used"""
result = []
if self._seq:
result += self._seq.directDependencies(sortByType=sortByType)
if self._tasks:
result += findDirectDependencies(self, self._tasks,sortByType=sortByType)
return result
def moduleNames(self):
"""Returns a set containing the names of all modules being used"""
result = set()
visitor = NodeNameVisitor(result)
self.visit(visitor)
return result
def contains(self, mod):
visitor = ContainsModuleVisitor(mod)
self.visit(visitor)
return visitor.result()
def copy(self):
returnValue =_ModuleSequenceType.__new__(type(self))
if self._seq is not None:
returnValue.__init__(self._seq)
else:
returnValue.__init__()
returnValue._tasks = OrderedSet(self._tasks)
return returnValue
def copyAndExclude(self,listOfModulesToExclude):
"""Returns a copy of the sequence which excludes those module in 'listOfModulesToExclude'"""
# You can exclude instances of these types EDProducer, EDFilter, OutputModule,
# EDAnalyzer, ESSource, ESProducer, Service, Sequence, SequencePlaceholder, Task,
# _SequenceNegation, and _SequenceIgnore.
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
v = _CopyAndExcludeSequenceVisitor(listOfModulesToExclude)
self.visit(v)
result = self.__new__(type(self))
result.__init__(v.result(self)[0], *v.result(self)[1])
return result
def expandAndClone(self):
# Name of this function is not very good. It makes a shallow copy with all
# the subTasks and subSequences flattened out (removed), but keeping all the
# modules that were in those subSequences and subTasks as well as the top level
# ones. Note this will also remove placeholders so one should probably
# call resolve before using this if the sequence contains any placeholders.
visitor = ExpandVisitor(type(self))
self.visit(visitor)
return visitor.result()
def _postProcessFixup(self,lookuptable):
self._seq = self._seq._clonesequence(lookuptable)
return self
def replace(self, original, replacement):
"""Finds all instances of 'original' and substitutes 'replacement' for them.
Returns 'True' if a replacement occurs."""
# This works for either argument being of type EDProducer, EDFilter, OutputModule,
# EDAnalyzer, ESProducer, ESSource, Service, Sequence, SequencePlaceHolder,
# Task, _SequenceNegation, _SequenceIgnore. Although it will fail with a
# raised exception if the replacement actually hits a case where a
# non-Sequenceable object is placed in the sequenced part of a Sequence
# or a type not allowed on a Task is put on a Task.
# There is one special case where we need an explicit check to prevent
# the algorithm from getting confused, either both or neither can be Tasks
#
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
if (isinstance(original,Task) != isinstance(replacement,Task)):
raise TypeError("replace only works if both arguments are Tasks or neither")
if (isinstance(original,ConditionalTask) != isinstance(replacement,ConditionalTask)):
raise TypeError("replace only works if both arguments are ConditionalTasks or neither")
v = _CopyAndReplaceSequenceVisitor(original,replacement)
self.visit(v)
if v.didReplace():
self._seq = v.result(self)[0]
if v.result(self)[1]:
self._tasks.clear()
self.associate(*v.result(self)[1])
return v.didReplace()
def _replaceIfHeldDirectly(self,original,replacement):
"""Only replaces an 'original' with 'replacement' if 'original' is directly held.
If another Sequence or Task holds 'original' it will not be replaced."""
didReplace = False
if original in self._tasks:
self._tasks.remove(original)
if replacement is not None:
self._tasks.add(replacement)
didReplace = True
if self._seq is not None:
didReplace |= self._seq._replaceIfHeldDirectly(original,replacement)
return didReplace
def index(self,item):
"""Returns the index at which the item is found or raises an exception"""
if self._seq is not None:
return self._seq.index(item)
raise ValueError(str(item)+" is not in the sequence")
def insert(self,index,item):
"""Inserts the item at the index specified"""
_checkIfSequenceable(self, item)
if self._seq is None:
self.__dict__["_seq"] = _SequenceCollection()
self._seq.insert(index,item)
def remove(self, something):
"""Remove the first occurrence of 'something' (a sequence or a module)
Returns 'True' if the module has been removed, False if it was not found"""
# You can remove instances of these types EDProducer, EDFilter, OutputModule,
# EDAnalyzer, ESSource, ESProducer, Service, Sequence, SequencePlaceholder, Task,
# _SequenceNegation, and _SequenceIgnore.
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
#
# Works very similar to copyAndExclude, there are 2 differences. This changes
# the object itself instead of making a copy and second it only removes
# the first instance of the argument instead of all of them.
v = _CopyAndRemoveFirstSequenceVisitor(something)
self.visit(v)
if v.didRemove():
self._seq = v.result(self)[0]
if v.result(self)[1]:
self._tasks.clear()
self.associate(*v.result(self)[1])
return v.didRemove()
def resolve(self, processDict,keepIfCannotResolve=False):
if self._seq is not None:
self._seq = self._seq.resolve(processDict,keepIfCannotResolve)
for task in self._tasks:
task.resolve(processDict,keepIfCannotResolve)
return self
def __setattr__(self,name,value):
if not name.startswith("_"):
raise AttributeError("You cannot set parameters for sequence like objects.")
else:
self.__dict__[name] = value
#def replace(self,old,new):
#"""Find all instances of old and replace with new"""
#def insertAfter(self,which,new):
#"""new will depend on which but nothing after which will depend on new"""
#((a*b)*c) >> insertAfter(b,N) >> ((a*b)*(N+c))
#def insertBefore(self,which,new):
#"""new will be independent of which"""
#((a*b)*c) >> insertBefore(b,N) >> ((a*(N+b))*c)
#def __contains__(self,item):
#"""returns whether or not 'item' is in the sequence"""
#def modules_(self):
def nameInProcessDesc_(self, myname):
return myname
def insertInto(self, parameterSet, myname, decoratedList):
parameterSet.addVString(True, myname, decoratedList)
def visit(self,visitor):
"""Passes to visitor's 'enter' and 'leave' method each item describing the module sequence.
If the item contains 'sub' items then visitor will see those 'sub' items between the
item's 'enter' and 'leave' calls.
"""
if self._seq is not None:
self._seq.visitNode(visitor)
for item in self._tasks:
visitor.enter(item)
item.visit(visitor)
visitor.leave(item)
class _UnarySequenceOperator(_BooleanLogicSequenceable):
"""For ~ and - operators"""
def __init__(self, operand):
self._operand = operand
if isinstance(operand, _ModuleSequenceType):
raise RuntimeError("This operator cannot accept a sequence")
if not isinstance(operand, _Sequenceable):
raise RuntimeError("This operator cannot accept a non sequenceable type")
def __eq__(self, other):
# allows replace(~a, b)
return type(self) is type(other) and self._operand==other._operand
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# this definition implies that self._operand MUST NOT be changed after the construction
return hash((type(self), self._operand))
def _findDependencies(self,knownDeps, presentDeps):
self._operand._findDependencies(knownDeps, presentDeps)
def _clonesequence(self, lookuptable):
return type(self)(self._operand._clonesequence(lookuptable))
def _has(self, op):
return self._operand == op
def resolve(self, processDict,keepIfCannotResolve=False):
return type(self)(self._operand.resolve(processDict,keepIfCannotResolve))
def isOperation(self):
return True
def _visitSubNodes(self,visitor):
self._operand.visitNode(visitor)
def decoration(self):
self._operand.decoration()
def directDependencies(self,sortByType=True):
return self._operand.directDependencies(sortByType=sortByType)
def label_(self):
return self._operand.label_()
class _SequenceNegation(_UnarySequenceOperator):
"""Used in the expression tree for a sequence as a stand in for the '!' operator"""
def __init__(self, operand):
super(_SequenceNegation,self).__init__(operand)
def __str__(self):
return '~%s' %self._operand
def dumpSequenceConfig(self):
return '!%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
if self._operand.isOperation():
return '~(%s)' %self._operand.dumpSequencePython(options)
return '~%s' %self._operand.dumpSequencePython(options)
def decoration(self):
return '!'
class _SequenceIgnore(_UnarySequenceOperator):
"""Used in the expression tree for a sequence as a stand in for the '-' operator"""
def __init__(self, operand):
super(_SequenceIgnore,self).__init__(operand)
def __str__(self):
return 'ignore(%s)' %self._operand
def dumpSequenceConfig(self):
return '-%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.ignore(%s)' %self._operand.dumpSequencePython(options)
def decoration(self):
return '-'
class _SequenceWait(_UnarySequenceOperator):
"""Used in the expression tree for a sequence as a stand in for the '|' operator"""
def __init__(self, operand):
super(_SequenceWait,self).__init__(operand)
def __str__(self):
return 'wait(%s)' %self._operand
def dumpSequenceConfig(self):
return '|%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.wait(%s)' %self._operand.dumpSequencePython(options)
def decoration(self):
return '|'
class _SequenceWaitAndIgnore(_UnarySequenceOperator):
"""Used in the expression tree for a sequence as a stand in for the '+' operator"""
def __init__(self, operand):
super(_SequenceWaitAndIgnore,self).__init__(operand)
def __str__(self):
return 'wait(ignore(%s))' %self._operand
def dumpSequenceConfig(self):
return '+%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.wait(cms.ignore(%s))' %self._operand.dumpSequencePython(options)
def decoration(self):
return '+'
def ignore(seq):
"""The EDFilter passed as an argument will be run but its filter value will be ignored
"""
if isinstance(seq,_SequenceWait):
return _SequenceWaitAndIgnore(seq._operand)
return _SequenceIgnore(seq)
def wait(seq):
"""All modules after this module in the sequence will wait for this module to finish before being scheduled to run.
"""
if isinstance(seq,_SequenceIgnore):
return _SequenceWaitAndIgnore(seq._operand)
return _SequenceWait(seq)
class Path(_ModuleSequenceType):
def __init__(self,*arg,**argv):
super(Path,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placePath(name,self)
class EndPath(_ModuleSequenceType):
def __init__(self,*arg,**argv):
super(EndPath,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placeEndPath(name,self)
class FinalPath(_ModuleSequenceType):
def __init__(self,*arg,**argv):
super(FinalPath,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placeFinalPath(name,self)
def associate(self,task):
raise TypeError("FinalPath does not allow associations with Tasks")
class Sequence(_ModuleSequenceType,_Sequenceable):
def __init__(self,*arg,**argv):
super(Sequence,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placeSequence(name,self)
def _clonesequence(self, lookuptable):
if id(self) not in lookuptable:
#for sequences held by sequences we need to clone
# on the first reference
if self._seq is not None:
clone = type(self)(self._seq._clonesequence(lookuptable))
else:
clone = type(self)()
lookuptable[id(self)]=clone
lookuptable[id(clone)]=clone
return lookuptable[id(self)]
def _visitSubNodes(self,visitor):
self.visit(visitor)
class SequencePlaceholder(_Sequenceable):
def __init__(self, name):
self._name = name
def _placeImpl(self,name,proc):
pass
def __str__(self):
return self._name
def insertInto(self, parameterSet, myname):
raise RuntimeError("The SequencePlaceholder "+self._name
+" was never overridden")
def resolve(self, processDict,keepIfCannotResolve=False):
if not self._name in processDict:
#print str(processDict.keys())
if keepIfCannotResolve:
return self
raise RuntimeError("The SequencePlaceholder "+self._name+ " cannot be resolved.\n Known keys are:"+str(processDict.keys()))
o = processDict[self._name]
if not isinstance(o,_Sequenceable):
raise RuntimeError("The SequencePlaceholder "+self._name+ " refers to an object type which is not allowed to be on a sequence: "+str(type(o)))
return o.resolve(processDict)
def _clonesequence(self, lookuptable):
if id(self) not in lookuptable:
#for sequences held by sequences we need to clone
# on the first reference
clone = type(self)(self._name)
lookuptable[id(self)]=clone
lookuptable[id(clone)]=clone
return lookuptable[id(self)]
def copy(self):
returnValue =SequencePlaceholder.__new__(type(self))
returnValue.__init__(self._name)
return returnValue
def dumpSequenceConfig(self):
return 'cms.SequencePlaceholder("%s")' %self._name
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.SequencePlaceholder("%s")'%self._name
def dumpPython(self, options=PrintOptions()):
result = 'cms.SequencePlaceholder(\"'
if options.isCfg:
result += 'process.'
result += self._name+'\")\n'
return result
class Schedule(_ValidatingParameterListBase,_ConfigureComponent,_Unlabelable):
def __init__(self,*arg,**argv):
super(Schedule,self).__init__(*arg)
self._tasks = OrderedSet()
theKeys = list(argv.keys())
if theKeys:
if len(theKeys) > 1 or theKeys[0] != "tasks":
raise RuntimeError("The Schedule constructor can only have one keyword argument after its Path and\nEndPath arguments and it must use the keyword 'tasks'")
taskList = argv["tasks"]
# Normally we want a list of tasks, but we let it also work if the value is one Task
if isinstance(taskList,Task):
self.associate(taskList)
else:
try:
# Call this just to check that taskList is a list or other iterable object
self.__dummy(*taskList)
except:
raise RuntimeError("The Schedule constructor argument with keyword 'tasks' must have a\nlist (or other iterable object) as its value")
if taskList:
self.associate(*taskList)
def __dummy(self, *args):
pass
def associate(self,*tasks):
for task in tasks:
if not isinstance(task, Task):
raise TypeError("The associate function in the class Schedule only works with arguments of type Task")
self._tasks.add(task)
@staticmethod
def _itemIsValid(item):
return isinstance(item,Path) or isinstance(item,EndPath) or isinstance(item,FinalPath)
def copy(self):
import copy
aCopy = copy.copy(self)
aCopy._tasks = OrderedSet(self._tasks)
return aCopy
def _place(self,label,process):
process.setPartialSchedule_(self,label)
def _replaceIfHeldDirectly(self,original,replacement):
"""Only replaces an 'original' with 'replacement' if 'original' is directly held.
If a contained Path or Task holds 'original' it will not be replaced."""
didReplace = False
if original in self._tasks:
self._tasks.remove(original)
if replacement is not None:
self._tasks.add(replacement)
didReplace = True
indices = []
for i, e in enumerate(self):
if original == e:
indices.append(i)
for i in reversed(indices):
self.pop(i)
if replacement is not None:
self.insert(i, replacement)
didReplace = True
return didReplace
def moduleNames(self):
result = set()
visitor = NodeNameVisitor(result)
for seq in self:
seq.visit(visitor)
for t in self._tasks:
t.visit(visitor)
return result
def contains(self, mod):
visitor = ContainsModuleVisitor(mod)
for seq in self:
seq.visit(visitor)
if visitor.result():
return True
for t in self._tasks:
t.visit(visitor)
if visitor.result():
return True
return visitor.result()
def tasks(self):
"""Returns the list of Tasks (that may contain other Tasks) that are associated directly to the Schedule."""
return self._tasks
def dumpPython(self, options=PrintOptions()):
pathNames = ['process.'+p.label_() for p in self]
if pathNames:
s=', '.join(pathNames)
else:
s = ''
associationContents = set()
for task in self._tasks:
if task.hasLabel_():
associationContents.add(_Labelable.dumpSequencePython(task, options))
else:
associationContents.add(task.dumpPythonNoNewline(options))
taskStrings = list()
for iString in sorted(associationContents):
taskStrings.append(iString)
if taskStrings and s:
return 'cms.Schedule(*[ ' + s + ' ], tasks=[' + ', '.join(taskStrings) + '])\n'
elif s:
return 'cms.Schedule(*[ ' + s + ' ])\n'
elif taskStrings:
return 'cms.Schedule(tasks=[' + ', '.join(taskStrings) + '])\n'
else:
return 'cms.Schedule()\n'
def __str__(self):
return self.dumpPython()
# Fills a list of all Sequences visited
# Can visit a Sequence, Path, or EndPath
class SequenceVisitor(object):
def __init__(self,d):
self.deps = d
def enter(self,visitee):
if isinstance(visitee,Sequence):
self.deps.append(visitee)
pass
def leave(self,visitee):
pass
# Fills a list of all Tasks visited
# Can visit a Task, Sequence, Path, or EndPath
class TaskVisitor(object):
def __init__(self,d):
self.deps = d
def enter(self,visitee):
if isinstance(visitee,Task):
self.deps.append(visitee)
pass
def leave(self,visitee):
pass
# Fills a list of all ConditionalTasks visited
# Can visit a ConditionalTask, Sequence, Path, or EndPath
class ConditionalTaskVisitor(object):
def __init__(self,d):
self.deps = d
def enter(self,visitee):
if isinstance(visitee,ConditionalTask):
self.deps.append(visitee)
pass
def leave(self,visitee):
pass
# Fills a list of all modules visited.
# Can visit a Sequence, Path, EndPath, or Task
# For purposes of this visitor, a module is considered
# to be an object that is one of these types: EDProducer,
# EDFilter, EDAnalyzer, OutputModule, ESProducer, ESSource,
# Service. The last three of these can only appear on a
# Task, they are not sequenceable. An object of one
# of these types is also called a leaf.
class ModuleNodeVisitor(object):
def __init__(self,l):
self.l = l
def enter(self,visitee):
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
pass
# Should not be used on Tasks.
# Similar to ModuleNodeVisitor with the following
# differences. It only lists the modules that were
# contained inside a Task. It should only be used
# on Sequences, Paths, and EndPaths.
class ModuleNodeOnTaskVisitor(object):
def __init__(self,l):
self.l = l
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
if self._levelInTasks == 0:
return
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
class ModuleNodeOnConditionalTaskVisitor(object):
def __init__(self,l):
self.l = l
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, ConditionalTask):
self._levelInTasks += 1
# This block gets the modules contained by SwitchProducer. It
# needs to be before the "levelInTasks == 0" check because the
# contained modules need to be treated like in ConditionalTask
# also when the SwitchProducer itself is in the Path.
if hasattr(visitee, "modulesForConditionalTask_"):
self.l.extend(visitee.modulesForConditionalTask_())
if self._levelInTasks == 0:
return
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, ConditionalTask):
self._levelInTasks -= 1
# Should not be used on Tasks.
# Similar to ModuleNodeVisitor with the following
# differences. It only lists the modules that were
# outside a Task, in the sequenced part of the sequence.
# It should only be used on Sequences, Paths, and
# EndPaths.
class ModuleNodeNotOnTaskVisitor(object):
def __init__(self,l):
self.l = l
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
if self._levelInTasks > 0:
return
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
# Can visit Tasks, Sequences, Paths, and EndPaths
# result will be set to True if and only if
# the module is in the object directly or
# indirectly through contained Sequences or
# associated Tasks.
class ContainsModuleVisitor(object):
def __init__(self,mod):
self._mod = mod
self._result = False
def result(self):
return self._result
def enter(self,visitee):
if self._mod is visitee:
self._result = True
def leave(self,visitee):
pass
# Can visit Tasks, Sequences, Paths, and EndPaths
# Fills a set of the names of the visited leaves.
# For the labelable ones the name is the label.
# For a Service the name is the type.
# It raises an exception if a labelable object
# does not have a label at all. It will return
# 'None' if the label attribute exists but was set
# to None. If a Service is not attached to the process
# it will also raise an exception.
class NodeNameVisitor(object):
""" takes a set as input"""
def __init__(self,l):
self.l = l
def enter(self,visitee):
if visitee.isLeaf():
if isinstance(visitee, _Labelable):
self.l.add(visitee.label_())
else:
if visitee._inProcess:
self.l.add(visitee.type_())
else:
raise RuntimeError("Service not attached to process: {}".format(visitee.dumpPython()))
def leave(self,visitee):
pass
# This visitor works only with Sequences, Paths and EndPaths
# It will not work on Tasks
class ExpandVisitor(object):
""" Expands the sequence into leafs and UnaryOperators """
def __init__(self, type):
self._type = type
self.l = []
self.taskLeaves = []
self.taskLeavesInConditionalTasks = []
self.presentTaskLeaves = self.taskLeaves
self._levelInTasks = 0
self.conditionaltaskLeaves = []
self._levelInConditionalTasks = 0
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
return
if isinstance(visitee, ConditionalTask):
self.presentTaskLeaves = self.taskLeavesInConditionalTasks
self._levelInConditionalTasks += 1
return
if visitee.isLeaf():
if self._levelInTasks > 0:
self.presentTaskLeaves.append(visitee)
elif self._levelInConditionalTasks > 0:
self.conditionaltaskLeaves.append(visitee)
else:
self.l.append(visitee)
def leave(self, visitee):
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
return
if self._levelInConditionalTasks > 0:
if isinstance(visitee, ConditionalTask):
self._levelInConditionalTasks -= 1
if 0 == self._levelInConditionalTasks:
self.presentTaskLeaves = self.taskLeaves
return
if isinstance(visitee,_UnarySequenceOperator):
self.l[-1] = visitee
def result(self):
tsks = []
if self.taskLeaves:
tsks.append(Task(*self.taskLeaves))
if self.conditionaltaskLeaves:
ct = ConditionalTask(*self.conditionaltaskLeaves)
if self.taskLeavesInConditionalTasks:
ct.append(*self.taskLeavesInConditionalTasks)
tsks.append(ct)
if len(self.l) > 0:
# why doesn't (sum(self.l) work?
seq = self.l[0]
for el in self.l[1:]:
seq += el
return self._type(seq, *tsks)
else:
return self._type(*tsks)
def resultString(self):
sep = ''
returnValue = ''
for m in self.l:
if m is not None:
returnValue += sep+str(m)
sep = '+'
if returnValue:
sep = ','
for n in self.taskLeaves:
if n is not None:
returnValue += sep+str(n)
sep = ','
return returnValue
# This visitor is only meant to run on Sequences, Paths, and EndPaths
# It intentionally ignores nodes on Tasks when it does this.
class DecoratedNodeNameVisitor(object):
""" Adds any '!' or '-' needed. Takes a list """
def __init__(self,l):
self.l = l
self._decoration =''
self._levelInTasks = 0
def initialize(self):
self.l[:] = []
self._decoration =''
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, _TaskBase):
self._levelInTasks += 1
if self._levelInTasks > 0:
return
if visitee.isLeaf():
if hasattr(visitee, "_Labelable__label"):
self.l.append(self._decoration+visitee.label_())
else:
error = "An object in a sequence was not found in the process\n"
if hasattr(visitee, "_filename"):
error += "From file " + visitee._filename
else:
error += "Dump follows\n" + repr(visitee)
raise RuntimeError(error)
if isinstance(visitee,_BooleanLogicExpression):
self.l.append(self._decoration+visitee.operatorString())
if isinstance(visitee,_UnarySequenceOperator):
self._decoration=visitee.decoration()
else:
self._decoration=''
def leave(self,visitee):
# Ignore if this visitee is inside a Task
if self._levelInTasks > 0:
if isinstance(visitee, _TaskBase):
self._levelInTasks -= 1
return
if isinstance(visitee,_BooleanLogicExpression):
#need to add the 'go back' command to keep track of where we are in the tree
self.l.append('@')
# This visitor is only meant to run on Sequences, Paths, and EndPaths
# Similar to DecoratedNodeNameVistor. The only difference
# is it also builds a separate list of leaves on Tasks.
class DecoratedNodeNamePlusVisitor(object):
""" Adds any '!' or '-' needed. Takes a list """
def __init__(self,l):
self.l = l
self._decoration =''
self._levelInTasks = 0
self._leavesOnTasks = []
def initialize(self):
self.l[:] = []
self._decoration =''
self._levelInTasks = 0
self._leavesOnTasks[:] = []
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
if self._levelInTasks > 0:
if visitee.isLeaf():
self._leavesOnTasks.append(visitee)
return
if visitee.isLeaf():
if hasattr(visitee, "_Labelable__label"):
self.l.append(self._decoration+visitee.label_())
else:
error = "An object in a sequence was not found in the process\n"
if hasattr(visitee, "_filename"):
error += "From file " + visitee._filename
else:
error += "Dump follows\n" + repr(visitee)
raise RuntimeError(error)
if isinstance(visitee,_BooleanLogicExpression):
self.l.append(self._decoration+visitee.operatorString())
if isinstance(visitee,_UnarySequenceOperator):
self._decoration=visitee.decoration()
else:
self._decoration=''
def leave(self,visitee):
# Ignore if this visitee is inside a Task
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
return
if isinstance(visitee,_BooleanLogicExpression):
#need to add the 'go back' command to keep track of where we are in the tree
self.l.append('@')
def leavesOnTasks(self):
return self._leavesOnTasks
class _CopyAndExcludeSequenceVisitorOld(object):
"""Traverses a Sequence and constructs a new sequence which does not contain modules from the specified list"""
def __init__(self,modulesToRemove):
self.__modulesToIgnore = modulesToRemove
self.__stack = list()
self.__stack.append(list())
self.__result = None
self.__didExclude = False
def enter(self,visitee):
if len(self.__stack) > 0:
#add visitee to its parent's stack entry
self.__stack[-1].append([visitee,False])
if visitee.isLeaf():
if visitee in self.__modulesToIgnore:
self.__didExclude = True
self.__stack[-1][-1]=[None,True]
elif isinstance(visitee, Sequence):
if visitee in self.__modulesToIgnore:
self.__didExclude = True
self.__stack[-1][-1]=[None,True]
self.__stack.append(list())
else:
#need to add a stack entry to keep track of children
self.__stack.append(list())
def leave(self,visitee):
node = visitee
if not visitee.isLeaf():
#were any children changed?
l = self.__stack[-1]
changed = False
countNulls = 0
nonNulls = list()
for c in l:
if c[1] == True:
changed = True
if c[0] is None:
countNulls +=1
else:
nonNulls.append(c[0])
if changed:
self.__didExclude = True
if countNulls != 0:
#this node must go away
if len(nonNulls) == 0:
#all subnodes went away
node = None
else:
node = nonNulls[0]
for n in nonNulls[1:]:
node = node+n
else:
#some child was changed so we need to clone
# this node and replace it with one that holds
# the new child(ren)
children = [x[0] for x in l ]
if not isinstance(visitee,Sequence):
node = visitee.__new__(type(visitee))
node.__init__(*children)
else:
node = nonNulls[0]
if node != visitee:
#we had to replace this node so now we need to
# change parent's stack entry as well
if len(self.__stack) > 1:
p = self.__stack[-2]
#find visitee and replace
for i,c in enumerate(p):
if c[0]==visitee:
c[0]=node
c[1]=True
break
if not visitee.isLeaf():
self.__stack = self.__stack[:-1]
def result(self):
result = None
for n in (x[0] for x in self.__stack[0]):
if n is None:
continue
if result is None:
result = n
else:
result = result+n
return result
def didExclude(self):
return self.__didExclude
# This visitor can also be used on Tasks.
class _MutatingSequenceVisitor(object):
"""Traverses a Sequence and constructs a new sequence by applying the operator to each element of the sequence"""
# In many cases this operates in an intuitive manner that needs
# no explanation, but there are some complex cases and I will try to
# explain these in the following comments.
#
# First of all the top level Sequence or Task being visited may contain
# many objects of different types. These contained objects are never
# modified. If they are not left the same, they are instead replaced
# by other instances, replaced by new instances or removed.
# Contained objects are only replaced or removed when they were directly
# modified or if they contain something that was modified.
# If all the contents of a Sequence, Task, _SequenceNegation or _SequenceIgnore
# object that is not at the top level are removed, then the containing
# object is also removed.
# If the contents of a Sequence other than the top level sequence are
# modified, then the sequence elements and Task objects it contains get
# passed up to be included in the top level sequence. If the contents of
# a Task are modified, a new Task object is created and passed up to be
# included in the top level Sequence or Task. If it is a _SequenceNegation
# or _SequenceIgnore instance it will simply be removed completely if its
# operand is removed. If the operand is replaced then a new object of the
# same type will be constructed replacing the old.
#
# Note that if a Sequence contains a SequencePlaceholder, the future contents
# of that placeholder are not affected by the changes. If that is an issue,
# then you probably want to resolve the placeholders before using this
# class.
#
# If this is used multiple times on the same sequence or task, the consequences
# might interfere with one another in unusual cases.
#
# One example, the matching to find objects to modify is based on instances
# (the python id) being the same. So if you modify the contents of a Task or
# Sequence and then subsequently try to modify that Sequence or Task, then
# it will either no longer exist or be a different instance and so nothing
# would get modified. Note that the one exception to this matching by instance
# is _SequenceIgnore and _SequenceNegation. In that case, two objects are
# recognized as matching if the contained module is the same instance instead
# of requiring the _SequenceNegation or _SequenceIgnore object to be the same
# instance.
#
# Another example. There is an input operator that removes the first instance
# of an object. Applying this visitor with that operation might give unexpected
# results if another operation previously changed the number of times the
# that instance appears or the order it appears in the visitation. This
# should only be an issue if the item is on a Task and even then only in
# unusual circumstances.
def __init__(self,operator):
self.__operator = operator
# You add a list to the __stack when entering any non-Leaf object
# and pop the last element when leaving any non-Leaf object
self.__stack = list()
self.__stack.append(list())
self.__didApply = False
self.__levelInModifiedNonLeaf = 0
def enter(self,visitee):
# Ignore the content of replaced or removed Sequences,
# Tasks, and operators.
if self.__levelInModifiedNonLeaf > 0:
if not visitee.isLeaf():
self.__levelInModifiedNonLeaf += 1
return
# Just a sanity check
if not len(self.__stack) > 0:
raise RuntimeError("LogicError Empty stack in MutatingSequenceVisitor.\n"
"This should never happen. Contact a Framework developer.")
# The most important part.
# Apply the operator that might change things, The rest
# of the class is just dealing with side effects of these changes.
v = self.__operator(visitee)
if v is visitee:
# the operator did not change the visitee
# The 3 element list being appended has the following contents
# element 0 - either the unmodified object, the modified object, or
# a sequence collection when it is a Sequence whose contents have
# been modified.
# element 1 - Indicates whether the object was modified.
# element 2 - None or a list of tasks for a Sequence
# whose contents have been modified.
self.__stack[-1].append([visitee, False, None])
if not visitee.isLeaf():
# need to add a list to keep track of the contents
# of the Sequence, Task, or operator we just entered.
self.__stack.append(list())
else:
# the operator changed the visitee
self.__didApply = True
self.__stack[-1].append([v, True, None])
if not visitee.isLeaf():
# Set flag to indicate modified Sequence, Task, or operator
self.__levelInModifiedNonLeaf = 1
def leave(self,visitee):
# nothing to do for leaf types because they do not have contents
if visitee.isLeaf():
return
# Ignore if this visitee is inside something that was already removed
# or replaced.
if self.__levelInModifiedNonLeaf > 0:
self.__levelInModifiedNonLeaf -= 1
return
# Deal with visitees which have contents (Sequence, Task, _SequenceIgnore,
# or _SequenceNegation) and although we know the visitee itself did not get
# changed by the operator, the contents of the visitee might have been changed.
# did any object inside the visitee change?
contents = self.__stack[-1]
changed = False
allNull = True
for c in contents:
if c[1] == True:
changed = True
if c[0] is not None:
allNull = False
if changed:
if allNull:
self.__stack[-2][-1] = [None, True, None]
elif isinstance(visitee, _UnarySequenceOperator):
node = visitee.__new__(type(visitee))
node.__init__(contents[0][0])
self.__stack[-2][-1] = [node, True, None]
elif isinstance(visitee, _TaskBase):
nonNull = []
for c in contents:
if c[0] is not None:
nonNull.append(c[0])
self.__stack[-2][-1] = [visitee._makeInstance(*nonNull), True, None]
elif isinstance(visitee, Sequence):
seq = _SequenceCollection()
tasks = list()
for c in contents:
if c[0] is None:
continue
if isinstance(c[0], _TaskBase):
tasks.append(c[0])
else:
seq = seq + c[0]
if c[2] is not None:
tasks.extend(c[2])
self.__stack[-2][-1] = [seq, True, tasks]
# When you exit the Sequence, Task, or operator,
# drop the list which holds information about
# its contents.
if not visitee.isLeaf():
self.__stack = self.__stack[:-1]
def result(self, visitedContainer):
if isinstance(visitedContainer, _TaskBase):
result = list()
for n in (x[0] for x in self.__stack[0]):
if n is not None:
result.append(n)
return result
seq = _SequenceCollection()
tasks = list()
for c in self.__stack[0]:
if c[0] is None:
continue
if isinstance(c[0], _TaskBase):
tasks.append(c[0])
else:
seq = seq + c[0]
if c[2] is not None:
tasks.extend(c[2])
return [seq, tasks]
def _didApply(self):
return self.__didApply
# This visitor can also be used on Tasks.
class _CopyAndRemoveFirstSequenceVisitor(_MutatingSequenceVisitor):
"""Traverses a Sequence and constructs a new sequence which does not contain modules from the specified list"""
def __init__(self,moduleToRemove):
class _RemoveFirstOperator(object):
def __init__(self,moduleToRemove):
self.__moduleToRemove = moduleToRemove
self.__found = False
def __call__(self,test):
if not self.__found and test is self.__moduleToRemove:
self.__found = True
return None
return test
super(type(self),self).__init__(_RemoveFirstOperator(moduleToRemove))
def didRemove(self):
return self._didApply()
# This visitor can also be used on Tasks.
class _CopyAndExcludeSequenceVisitor(_MutatingSequenceVisitor):
"""Traverses a Sequence and constructs a new sequence which does not contain the module specified"""
def __init__(self,modulesToRemove):
class _ExcludeOperator(object):
def __init__(self,modulesToRemove):
self.__modulesToIgnore = modulesToRemove
def __call__(self,test):
if test in modulesToRemove:
return None
return test
super(type(self),self).__init__(_ExcludeOperator(modulesToRemove))
def didExclude(self):
return self._didApply()
# This visitor can also be used on Tasks.
class _CopyAndReplaceSequenceVisitor(_MutatingSequenceVisitor):
"""Traverses a Sequence and constructs a new sequence which replaces a specified module with a different module"""
def __init__(self,target,replace):
class _ReplaceOperator(object):
def __init__(self,target,replace):
self.__target = target
self.__replace = replace
def __call__(self,test):
if test == self.__target:
return self.__replace
return test
super(type(self),self).__init__(_ReplaceOperator(target,replace))
def didReplace(self):
return self._didApply()
class _TaskBase(_ConfigureComponent, _Labelable) :
def __init__(self, *items):
self._collection = OrderedSet()
self.add(*items)
def __setattr__(self,name,value):
if not name.startswith("_"):
raise AttributeError("You cannot set parameters for {} objects.".format(self._taskType()))
else:
self.__dict__[name] = value
def add(self, *items):
for item in items:
if not self._allowedInTask(item):
raise RuntimeError("Adding an entry of type '{0}' to a {1}.\n"
"It is illegal to add this type to a {1}.".format(type(item).__name__, self._taskType()))
self._collection.add(item)
def fillContents(self, taskContents, options=PrintOptions()):
# only dump the label, if possible
if self.hasLabel_():
taskContents.add(_Labelable.dumpSequencePython(self, options))
else:
for i in self._collection:
if isinstance(i, _TaskBase):
i.fillContents(taskContents, options)
else:
taskContents.add(i.dumpSequencePython(options))
def dumpPython(self, options=PrintOptions()):
s = self.dumpPythonNoNewline(options)
return s + "\n"
def dumpPythonNoNewline(self, options=PrintOptions()):
"""Returns a string which is the python representation of the object"""
taskContents = set()
for i in self._collection:
if isinstance(i, _TaskBase):
i.fillContents(taskContents, options)
else:
taskContents.add(i.dumpSequencePython(options))
s=''
iFirst = True
for item in sorted(taskContents):
if not iFirst:
s += ", "
iFirst = False
s += item
if len(taskContents) > 255:
s = "*[" + s + "]"
return "cms.{}({})".format(self._taskType(),s)
def directDependencies(self,sortByType=True):
return findDirectDependencies(self, self._collection,sortByType=sortByType)
def _isTaskComponent(self):
return False
def isLeaf(self):
return False
def visit(self,visitor):
for i in self._collection:
visitor.enter(i)
if not i.isLeaf():
i.visit(visitor)
visitor.leave(i)
def _errorstr(self):
return "{}(...)".format(self.taskType_())
def __iter__(self):
for key in self._collection:
yield key
def __str__(self):
l = []
v = ModuleNodeVisitor(l)
self.visit(v)
s = ''
for i in l:
if s:
s += ', '
s += str (i)
return s
def __repr__(self):
s = str(self)
return "cms."+type(self).__name__+'('+s+')\n'
def moduleNames(self):
"""Returns a set containing the names of all modules being used"""
result = set()
visitor = NodeNameVisitor(result)
self.visit(visitor)
return result
def contains(self, mod):
visitor = ContainsModuleVisitor(mod)
self.visit(visitor)
return visitor.result()
def copy(self):
return self._makeInstance(*self._collection)
def copyAndExclude(self,listOfModulesToExclude):
"""Returns a copy of the sequence which excludes those module in 'listOfModulesToExclude'"""
# You can exclude instances of these types EDProducer, EDFilter, ESSource, ESProducer,
# Service, or Task.
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
for i in listOfModulesToExclude:
if not i._isTaskComponent():
raise TypeError("copyAndExclude can only exclude objects that can be placed on a Task")
v = _CopyAndExcludeSequenceVisitor(listOfModulesToExclude)
self.visit(v)
return self._makeInstance(*v.result(self))
def copyAndAdd(self, *modulesToAdd):
"""Returns a copy of the Task adding modules/tasks"""
t = self.copy()
t.add(*modulesToAdd)
return t
def expandAndClone(self):
# Name of this function is not very good. It makes a shallow copy with all
# the subTasks flattened out (removed), but keeping all the
# modules that were in those subTasks as well as the top level
# ones.
l = []
v = ModuleNodeVisitor(l)
self.visit(v)
return self._makeInstance(*l)
def replace(self, original, replacement):
"""Finds all instances of 'original' and substitutes 'replacement' for them.
Returns 'True' if a replacement occurs."""
# This works for either argument being of type EDProducer, EDFilter, ESProducer,
# ESSource, Service, or Task.
#
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
if not self._allowedInTask(original) or (not replacement is None and not self._allowedInTask(replacement)):
raise TypeError("The {0} replace function only works with objects that can be placed on a {0}\n".format(self._taskType()) + \
" replace was called with original type = {}\n".format(str(type(original))) + \
" and replacement type = {}\n".format(str(type(replacement))))
else:
v = _CopyAndReplaceSequenceVisitor(original,replacement)
self.visit(v)
if v.didReplace():
self._collection.clear()
self.add(*v.result(self))
return v.didReplace()
def remove(self, something):
"""Remove the first occurrence of a module
Returns 'True' if the module has been removed, False if it was not found"""
# You can remove instances of these types EDProducer, EDFilter, ESSource,
# ESProducer, Service, or Task,
#
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
#
# Works very similar to copyAndExclude, there are 2 differences. This changes
# the object itself instead of making a copy and second it only removes
# the first instance of the argument instead of all of them.
if not self._allowedInTask(something):
raise TypeError("remove only works with objects that can be placed on a Task")
v = _CopyAndRemoveFirstSequenceVisitor(something)
self.visit(v)
if v.didRemove():
self._collection.clear()
self.add(*v.result(self))
return v.didRemove()
def resolve(self, processDict,keepIfCannotResolve=False):
temp = OrderedSet()
for i in self._collection:
if self._mustResolve(i):
temp.add(i.resolve(processDict,keepIfCannotResolve))
else:
temp.add(i)
self._collection = temp
return self
class _TaskBasePlaceholder(object):
def __init__(self, name):
self._name = name
def _isTaskComponent(self):
return False
def isLeaf(self):
return False
def visit(self,visitor):
pass
def __str__(self):
return self._name
def insertInto(self, parameterSet, myname):
raise RuntimeError("The {} {} was never overridden".format(self._typeName(), self._name))
def resolve(self, processDict,keepIfCannotResolve=False):
if not self._name in processDict:
if keepIfCannotResolve:
return self
raise RuntimeError("The {} {} cannot be resolved.\n Known keys are: {}".format(self._typeName(), self._name,str(processDict.keys())))
o = processDict[self._name]
if not self._allowedInTask(o):
raise RuntimeError("The {} {} refers to an object type which is not allowed to be on a task: {}".format(self._typeName(), self._name, str(type(o))))
if isinstance(o, self._taskClass()):
return o.resolve(processDict)
return o
def copy(self):
return self._makeInstance(self._name)
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.{}("{}")'.format(self._typeName(), self._name)
def dumpPython(self, options=PrintOptions()):
result = 'cms.{}(\"'.format(self._typeName())
if options.isCfg:
result += 'process.'
result += self._name+'\")\n'
return result
class Task(_TaskBase) :
"""Holds EDProducers, EDFilters, ESProducers, ESSources, Services, and Tasks.
A Task can be associated with Sequences, Paths, EndPaths, ConditionalTasks and the Schedule.
An EDProducer or EDFilter will be enabled to run unscheduled if it is on
a task associated with the Schedule or any scheduled Path or EndPath (directly
or indirectly through Sequences) and not be on any scheduled Path or EndPath.
ESSources, ESProducers, and Services will be enabled to run if they are on
a Task associated with the Schedule or a scheduled Path or EndPath. In other
cases, they will be enabled to run if and only if they are not on a Task attached
to the process.
"""
@staticmethod
def _taskType():
return "Task"
def _place(self, name, proc):
proc._placeTask(name,self)
def _isTaskComponent(self):
return True
@staticmethod
def _makeInstance(*items):
return Task(*items)
@staticmethod
def _allowedInTask(item ):
return (isinstance(item, _ConfigureComponent) and item._isTaskComponent()) or isinstance(item, TaskPlaceholder)
@staticmethod
def _mustResolve(item):
return isinstance(item, Task) or isinstance(item, TaskPlaceholder)
class TaskPlaceholder(_TaskBasePlaceholder):
def _isTaskComponent(self):
return True
@staticmethod
def _typeName():
return "TaskPlaceholder"
@staticmethod
def _makeInstance(name):
return TaskPlaceholder(name)
@staticmethod
def _allowedInTask(obj):
return Task._allowedInTask(obj)
@staticmethod
def _taskClass():
return Task
class ConditionalTask(_TaskBase) :
"""Holds EDProducers, EDFilters, ESProducers, ESSources, Services, Tasks and ConditionalTasks.
A ConditionalTask can be associated with Sequences, Paths, and EndPaths.
An EDProducer or EDFilter will be added to a Path or EndPath based on which other
modules on the Path consumes its data products. If that ConditionalTask assigned module
is placed after an EDFilter, the module will only run if the EDFilter passes. If no module
on the Path needs the module's data products, the module will be treated as if it were on a Task.
"""
@staticmethod
def _taskType():
return "ConditionalTask"
def _place(self, name, proc):
proc._placeConditionalTask(name,self)
def _isTaskComponent(self):
return False
@staticmethod
def _makeInstance(*items):
return ConditionalTask(*items)
@staticmethod
def _allowedInTask(item):
return isinstance(item, ConditionalTask) or isinstance(item, ConditionalTaskPlaceholder) or Task._allowedInTask(item)
@staticmethod
def _mustResolve(item):
return Task._mustResolve(item) or isinstance(item, ConditionalTask) or isinstance(item, ConditionalTaskPlaceholder)
class ConditionalTaskPlaceholder(_TaskBasePlaceholder):
def _isTaskComponent(self):
return False
@staticmethod
def _typeName():
return "ConditionalTaskPlaceholder"
@staticmethod
def _makeInstance(name):
return ConditionalTaskPlaceholder(name)
@staticmethod
def _allowedInTask(obj):
return Task._allowedInTask(obj) or ConditionalTask._allowedInTask(obj)
@staticmethod
def _taskClass():
return ConditionalTask
if __name__=="__main__":
import unittest
class DummyModule(_Labelable, _SequenceLeaf, _ConfigureComponent):
def __init__(self,name):
self.setLabel(name)
def _isTaskComponent(self):
return True
def __repr__(self):
return self.label_()
class DummyBooleanModule(_Labelable, _BooleanLogicSequenceLeaf):
def __init__(self,name):
self.setLabel(name)
class TestModuleCommand(unittest.TestCase):
def setUp(self):
"""Nothing to do """
pass
def testBoolean(self):
a = DummyBooleanModule("a")
b = DummyBooleanModule("b")
p = Path( a & b)
self.assertEqual(p.dumpPython(),"cms.Path(process.a&process.b)\n")
l = list()
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l,['&','a','b','@'])
p2 = Path( a | b)
self.assertEqual(p2.dumpPython(),"cms.Path(process.a|process.b)\n")
l[:]=[]
p2.visit(namesVisitor)
self.assertEqual(l,['|','a','b','@'])
c = DummyBooleanModule("c")
d = DummyBooleanModule("d")
p3 = Path(a & b & c & d)
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
l[:]=[]
p3.visit(namesVisitor)
self.assertEqual(l,['&','a','b','c','d','@'])
p3 = Path(((a & b) & c) & d)
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p3 = Path(a & (b & (c & d)))
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p3 = Path((a & b) & (c & d))
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p3 = Path(a & (b & c) & d)
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p4 = Path(a | b | c | d)
self.assertEqual(p4.dumpPython(),"cms.Path(process.a|process.b|process.c|process.d)\n")
p5 = Path(a | b & c & d )
self.assertEqual(p5.dumpPython(),"cms.Path(process.a|(process.b&process.c&process.d))\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['|','a','&','b','c','d','@','@'])
p5 = Path(a & b | c & d )
self.assertEqual(p5.dumpPython(),"cms.Path((process.a&process.b)|(process.c&process.d))\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['|','&','a','b','@','&','c','d','@','@'])
p5 = Path(a & (b | c) & d )
self.assertEqual(p5.dumpPython(),"cms.Path(process.a&(process.b|process.c)&process.d)\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['&','a','|','b','c','@','d','@'])
p5 = Path(a & b & c | d )
self.assertEqual(p5.dumpPython(),"cms.Path((process.a&process.b&process.c)|process.d)\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['|','&','a','b','c','@','d','@'])
p6 = Path( a & ~b)
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(~process.b))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','!b','@'])
p6 = Path( a & ignore(b))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.ignore(process.b)))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','-b','@'])
p6 = Path( a & wait(b))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.wait(process.b)))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','|b','@'])
p6 = Path( a & wait(ignore(b)))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.wait(cms.ignore(process.b))))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','+b','@'])
p6 = Path( a & ignore(wait(b)))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.wait(cms.ignore(process.b))))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','+b','@'])
p6 = Path(~(a&b))
self.assertEqual(p6.dumpPython(),"cms.Path(~(process.a&process.b))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['!&','a','b','@'])
def testTaskConstructor(self):
a = DummyModule("a")
self.assertRaises(RuntimeError, lambda : Task(ConditionalTask(a)) )
def testDumpPython(self):
a = DummyModule("a")
b = DummyModule('b')
p = Path((a*b))
#print p.dumpConfig('')
self.assertEqual(p.dumpPython(),"cms.Path(process.a+process.b)\n")
p2 = Path((b+a))
#print p2.dumpConfig('')
self.assertEqual(p2.dumpPython(),"cms.Path(process.b+process.a)\n")
c = DummyModule('c')
p3 = Path(c*(a+b))
#print p3.dumpConfig('')
self.assertEqual(p3.dumpPython(),"cms.Path(process.c+process.a+process.b)\n")
p4 = Path(c*a+b)
#print p4.dumpConfig('')
self.assertEqual(p4.dumpPython(),"cms.Path(process.c+process.a+process.b)\n")
p5 = Path(a+ignore(b))
#print p5.dumpConfig('')
self.assertEqual(p5.dumpPython(),"cms.Path(process.a+cms.ignore(process.b))\n")
p5a = Path(a+wait(b))
self.assertEqual(p5a.dumpPython(),"cms.Path(process.a+cms.wait(process.b))\n")
p5b = Path(a+ignore(wait(b)))
self.assertEqual(p5b.dumpPython(),"cms.Path(process.a+cms.wait(cms.ignore(process.b)))\n")
p5c = Path(a+wait(ignore(b)))
self.assertEqual(p5c.dumpPython(),"cms.Path(process.a+cms.wait(cms.ignore(process.b)))\n")
p6 = Path(c+a*b)
#print p6.dumpConfig('')
self.assertEqual(p6.dumpPython(),"cms.Path(process.c+process.a+process.b)\n")
p7 = Path(a+~b)
self.assertEqual(p7.dumpPython(),"cms.Path(process.a+~process.b)\n")
p8 = Path((a+b)*c)
self.assertEqual(p8.dumpPython(),"cms.Path(process.a+process.b+process.c)\n")
t1 = Task(a)
t2 = Task(c, b)
t3 = Task()
p9 = Path((a+b)*c, t1)
self.assertEqual(p9.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.Task(process.a))\n")
p10 = Path((a+b)*c, t2, t1)
self.assertEqual(p10.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.Task(process.a), cms.Task(process.b, process.c))\n")
p11 = Path(t1, t2, t3)
self.assertEqual(p11.dumpPython(),"cms.Path(cms.Task(), cms.Task(process.a), cms.Task(process.b, process.c))\n")
d = DummyModule("d")
e = DummyModule('e')
f = DummyModule('f')
t4 = Task(d, Task(f))
s = Sequence(e, t4)
p12 = Path(a+b+s+c,t1)
self.assertEqual(p12.dumpPython(),"cms.Path(process.a+process.b+cms.Sequence(process.e, cms.Task(process.d, process.f))+process.c, cms.Task(process.a))\n")
ct1 = ConditionalTask(a)
ct2 = ConditionalTask(c, b)
ct3 = ConditionalTask()
p13 = Path((a+b)*c, ct1)
self.assertEqual(p13.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.ConditionalTask(process.a))\n")
p14 = Path((a+b)*c, ct2, ct1)
self.assertEqual(p14.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.ConditionalTask(process.a), cms.ConditionalTask(process.b, process.c))\n")
p15 = Path(ct1, ct2, ct3)
self.assertEqual(p15.dumpPython(),"cms.Path(cms.ConditionalTask(), cms.ConditionalTask(process.a), cms.ConditionalTask(process.b, process.c))\n")
ct4 = ConditionalTask(d, Task(f))
s = Sequence(e, ct4)
p16 = Path(a+b+s+c,ct1)
self.assertEqual(p16.dumpPython(),"cms.Path(process.a+process.b+cms.Sequence(process.e, cms.ConditionalTask(process.d, process.f))+process.c, cms.ConditionalTask(process.a))\n")
n = 260
mods = []
labels = []
for i in range(0, n):
l = "a{}".format(i)
labels.append("process."+l)
mods.append(DummyModule(l))
labels.sort()
task = Task(*mods)
self.assertEqual(task.dumpPython(), "cms.Task(*[" + ", ".join(labels) + "])\n")
conditionalTask = ConditionalTask(*mods)
self.assertEqual(conditionalTask.dumpPython(), "cms.ConditionalTask(*[" + ", ".join(labels) + "])\n")
l = list()
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l, ['a', 'b'])
l[:] = []
p5.visit(namesVisitor)
self.assertEqual(l, ['a', '-b'])
l[:] = []
p5a.visit(namesVisitor)
self.assertEqual(l, ['a', '|b'])
l[:] = []
p5b.visit(namesVisitor)
self.assertEqual(l, ['a', '+b'])
l[:] = []
p5c.visit(namesVisitor)
self.assertEqual(l, ['a', '+b'])
l[:] = []
p7.visit(namesVisitor)
self.assertEqual(l, ['a', '!b'])
l[:] = []
p10.visit(namesVisitor)
self.assertEqual(l, ['a', 'b', 'c'])
l[:] = []
p12.visit(namesVisitor)
self.assertEqual(l, ['a', 'b', 'e', 'c'])
l[:] = []
p16.visit(namesVisitor)
self.assertEqual(l, ['a', 'b', 'e', 'c'])
l[:] = []
moduleVisitor = ModuleNodeVisitor(l)
p8.visit(moduleVisitor)
names = [m.label_() for m in l]
self.assertEqual(names, ['a', 'b', 'c'])
tph = TaskPlaceholder('a')
self.assertEqual(tph.dumpPython(), 'cms.TaskPlaceholder("process.a")\n')
sph = SequencePlaceholder('a')
self.assertEqual(sph.dumpPython(), 'cms.SequencePlaceholder("process.a")\n')
ctph = ConditionalTaskPlaceholder('a')
self.assertEqual(ctph.dumpPython(), 'cms.ConditionalTaskPlaceholder("process.a")\n')
def testDumpConfig(self):
a = DummyModule("a")
b = DummyModule('b')
p = Path((a*b))
#print p.dumpConfig('')
self.assertEqual(p.dumpConfig(None),"{a&b}\n")
p2 = Path((b+a))
#print p2.dumpConfig('')
self.assertEqual(p2.dumpConfig(None),"{b&a}\n")
c = DummyModule('c')
p3 = Path(c*(a+b))
#print p3.dumpConfig('')
self.assertEqual(p3.dumpConfig(None),"{c&a&b}\n")
p4 = Path(c*a+b)
#print p4.dumpConfig('')
self.assertEqual(p4.dumpConfig(None),"{c&a&b}\n")
p5 = Path(a+ignore(b))
#print p5.dumpConfig('')
self.assertEqual(p5.dumpConfig(None),"{a&-b}\n")
p6 = Path(c+a*b)
#print p6.dumpConfig('')
self.assertEqual(p6.dumpConfig(None),"{c&a&b}\n")
p7 = Path(a+~b)
self.assertEqual(p7.dumpConfig(None),"{a&!b}\n")
p8 = Path((a+b)*c)
self.assertEqual(p8.dumpConfig(None),"{a&b&c}\n")
def testVisitor(self):
class TestVisitor(object):
def __init__(self, enters, leaves):
self._enters = enters
self._leaves = leaves
def enter(self,visitee):
#print visitee.dumpSequencePython()
if self._enters[0] != visitee:
raise RuntimeError("wrong node ("+str(visitee)+") on 'enter'")
else:
self._enters = self._enters[1:]
def leave(self,visitee):
if self._leaves[0] != visitee:
raise RuntimeError("wrong node ("+str(visitee)+") on 'leave'\n expected ("+str(self._leaves[0])+")")
else:
self._leaves = self._leaves[1:]
a = DummyModule("a")
b = DummyModule('b')
multAB = a*b
p = Path(multAB)
t = TestVisitor(enters=[a,b],
leaves=[a,b])
p.visit(t)
plusAB = a+b
p = Path(plusAB)
t = TestVisitor(enters=[a,b],
leaves=[a,b])
p.visit(t)
c=DummyModule("c")
d=DummyModule("d")
e=DummyModule("e")
f=DummyModule("f")
g=DummyModule("g")
ct1 = ConditionalTask(d)
ct2 = ConditionalTask(e, ct1)
ct3 = ConditionalTask(f, g, ct2)
s=Sequence(plusAB, ct3, ct2)
multSC = s*c
p=Path(multSC, ct1, ct2)
l = []
v = ModuleNodeVisitor(l)
p.visit(v)
expected = [a,b,f,g,e,d,e,d,c,d,e,d]
self.assertEqual(expected,l)
t1 = Task(d)
t2 = Task(e, t1)
t3 = Task(f, g, t2)
s=Sequence(plusAB, t3, t2)
multSC = s*c
p=Path(multSC, t1, t2)
l = []
v = ModuleNodeVisitor(l)
p.visit(v)
expected = [a,b,f,g,e,d,e,d,c,d,e,d]
self.assertEqual(expected,l)
l[:] = []
v = ModuleNodeOnTaskVisitor(l)
p.visit(v)
expected = [f,g,e,d,e,d,d,e,d]
self.assertEqual(expected,l)
l[:] = []
v = ModuleNodeNotOnTaskVisitor(l)
p.visit(v)
expected = [a,b,c]
self.assertEqual(expected,l)
t=TestVisitor(enters=[s,a,b,t3,f,g,t2,e,t1,d,t2,e,t1,d,c,t1,d,t2,e,t1,d],
leaves=[a,b,f,g,e,d,t1,t2,t3,e,d,t1,t2,s,c,d,t1,e,d,t1,t2])
p.visit(t)
notA= ~a
p=Path(notA)
t=TestVisitor(enters=[notA,a],leaves=[a,notA])
p.visit(t)
def testResolve(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
s1 = Sequence(m1)
s2 = SequencePlaceholder("s3")
s3 = Sequence(m2)
p = Path(s1*s2)
l = list()
#resolver = ResolveVisitor(d)
#p.visit(resolver)
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l, ['m1'])
p.resolve(dict(s1=s1, s2=s2, s3=s3))
l[:] = []
p.visit(namesVisitor)
self.assertEqual(l, ['m1', 'm2'])
l[:]=[]
s1 = Sequence(m1)
s2 = SequencePlaceholder("s3")
s3 = Sequence(m2)
s4 = SequencePlaceholder("s2")
p=Path(s1+s4)
p.resolve(dict(s1=s1, s2=s2, s3=s3, s4=s4))
p.visit(namesVisitor)
self.assertEqual(l, ['m1', 'm2'])
l[:]=[]
m3 = DummyModule("m3")
m4 = DummyModule("m4")
s1 = Sequence(~m1)
s2 = SequencePlaceholder("s3")
s3 = Sequence(ignore(m2))
s4 = Sequence(wait(m3) + ignore(wait(m4)))
d = dict(s1=s1, s2=s2, s3=s3, s4=s4)
p = Path(s1*s2*s4)
p.resolve(dict(s1=s1, s2=s2, s3=s3, s4=s4))
p.visit(namesVisitor)
self.assertEqual(l, ['!m1', '-m2', '|m3', '+m4'])
def testReplace(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
m5 = DummyModule("m5")
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s2 = Sequence(m1*m2)
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
s1.visit(namesVisitor)
self.assertEqual(l,['m1', '!m2', 'm1', 'm2', '-m2'])
s3 = Sequence(~m1*s2)
s3.replace(~m1, m2)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['m2', 'm1', 'm2'])
s3.replace(m2, ~m1)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['!m1', 'm1', '!m1'])
s3 = Sequence(ignore(m1)*s2)
s3.replace(ignore(m1), m2)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['m2', 'm1', 'm2'])
s3.replace(m2, ignore(m1))
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['-m1', 'm1', '-m1'])
ph = SequencePlaceholder('x')
s4 = Sequence(Sequence(ph))
s4.replace(ph,m2)
self.assertEqual(s4.dumpPython(), "cms.Sequence(process.m2)\n")
s1.replace(m2,m3)
l[:] = []
s1.visit(namesVisitor)
self.assertEqual(l,['m1', '!m3', 'm1', 'm3', '-m3'])
s2 = Sequence(m1*m2)
s3 = Sequence(~m1*s2)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l,['!m1', 'm1', 'm2'])
l[:] = []
s3.replace(s2,m1)
s3.visit(namesVisitor)
self.assertEqual(l,['!m1', 'm1'])
s1 = Sequence(m1+m2)
s2 = Sequence(m3+m4)
s3 = Sequence(s1+s2)
s3.replace(m3,m5)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l,['m1','m2','m5','m4'])
m6 = DummyModule("m6")
m7 = DummyModule("m7")
m8 = DummyModule("m8")
m9 = DummyModule("m9")
#Task
t6 = Task(m6)
t7 = Task(m7)
t89 = Task(m8, m9)
s1 = Sequence(m1+m2, t6)
s2 = Sequence(m3+m4, t7)
s3 = Sequence(s1+s2, t89)
s3.replace(m3,m5)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l,['m1','m2','m5','m4'])
s3.replace(m8,m1)
self.assertTrue(s3.dumpPython() == "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.Task(process.m6))+process.m5+process.m4, cms.Task(process.m1, process.m9), cms.Task(process.m7))\n")
s3.replace(m1,m7)
self.assertTrue(s3.dumpPython() == "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.Task(process.m6), cms.Task(process.m7), cms.Task(process.m7, process.m9))\n")
result = s3.replace(t7, t89)
self.assertTrue(s3.dumpPython() == "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.Task(process.m6), cms.Task(process.m7, process.m9), cms.Task(process.m8, process.m9))\n")
self.assertTrue(result)
result = s3.replace(t7, t89)
self.assertFalse(result)
t1 = Task()
t1.replace(m1,m2)
self.assertTrue(t1.dumpPython() == "cms.Task()\n")
t1 = Task(m1)
t1.replace(m1,m2)
self.assertTrue(t1.dumpPython() == "cms.Task(process.m2)\n")
t1 = Task(m1,m2, m2)
t1.replace(m2,m3)
self.assertTrue(t1.dumpPython() == "cms.Task(process.m1, process.m3)\n")
t1 = Task(m1,m2)
t2 = Task(m1,m3,t1)
t2.replace(m1,m4)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m2, process.m3, process.m4)\n")
t1 = Task(m2)
t2 = Task(m1,m3,t1)
t2.replace(m1,m4)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m2, process.m3, process.m4)\n")
t1 = Task(m2)
t2 = Task(m1,m3,t1)
t2.replace(t1,m4)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m1, process.m3, process.m4)\n")
t1 = Task(m2)
t2 = Task(m1,m3,t1)
t3 = Task(m5)
t2.replace(m2,t3)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m1, process.m3, process.m5)\n")
#ConditionalTask
ct6 = ConditionalTask(m6)
ct7 = ConditionalTask(m7)
ct89 = ConditionalTask(m8, m9)
cs1 = Sequence(m1+m2, ct6)
cs2 = Sequence(m3+m4, ct7)
cs3 = Sequence(cs1+cs2, ct89)
cs3.replace(m3,m5)
l[:] = []
cs3.visit(namesVisitor)
self.assertEqual(l,['m1','m2','m5','m4'])
cs3.replace(m8,m1)
self.assertEqual(cs3.dumpPython(), "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m6))+process.m5+process.m4, cms.ConditionalTask(process.m1, process.m9), cms.ConditionalTask(process.m7))\n")
cs3.replace(m1,m7)
self.assertEqual(cs3.dumpPython(), "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.ConditionalTask(process.m6), cms.ConditionalTask(process.m7), cms.ConditionalTask(process.m7, process.m9))\n")
result = cs3.replace(ct7, ct89)
self.assertEqual(cs3.dumpPython(), "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.ConditionalTask(process.m6), cms.ConditionalTask(process.m7, process.m9), cms.ConditionalTask(process.m8, process.m9))\n")
self.assertTrue(result)
result = cs3.replace(ct7, ct89)
self.assertFalse(result)
ct1 = ConditionalTask()
ct1.replace(m1,m2)
self.assertEqual(ct1.dumpPython(), "cms.ConditionalTask()\n")
ct1 = ConditionalTask(m1)
ct1.replace(m1,m2)
self.assertEqual(ct1.dumpPython(), "cms.ConditionalTask(process.m2)\n")
ct1 = ConditionalTask(m1,m2, m2)
ct1.replace(m2,m3)
self.assertEqual(ct1.dumpPython(), "cms.ConditionalTask(process.m1, process.m3)\n")
ct1 = ConditionalTask(m1,m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct2.replace(m1,m4)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m2, process.m3, process.m4)\n")
ct1 = ConditionalTask(m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct2.replace(m1,m4)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m2, process.m3, process.m4)\n")
ct1 = ConditionalTask(m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct2.replace(ct1,m4)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m1, process.m3, process.m4)\n")
ct1 = ConditionalTask(m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct3 = ConditionalTask(m5)
ct2.replace(m2,ct3)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m1, process.m3, process.m5)\n")
#FinalPath
fp = FinalPath()
fp.replace(m1,m2)
self.assertEqual(fp.dumpPython(), "cms.FinalPath()\n")
fp = FinalPath(m1)
fp.replace(m1,m2)
self.assertEqual(fp.dumpPython(), "cms.FinalPath(process.m2)\n")
def testReplaceIfHeldDirectly(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
m5 = DummyModule("m5")
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s1._replaceIfHeldDirectly(m2,m3)
self.assertEqual(s1.dumpPython()[:-1],
"cms.Sequence(process.m1+~process.m3+process.m1+process.m3+cms.ignore(process.m3))")
s2 = Sequence(m1*m2)
l = []
s3 = Sequence(~m1*s2)
s3._replaceIfHeldDirectly(~m1, m2)
self.assertEqual(s3.dumpPython()[:-1],
"cms.Sequence(process.m2+(process.m1+process.m2))")
#Task
m6 = DummyModule("m6")
m7 = DummyModule("m7")
m8 = DummyModule("m8")
m9 = DummyModule("m9")
t6 = Task(m6)
t7 = Task(m7)
t89 = Task(m8, m9)
s1 = Sequence(m1+m2, t6)
s2 = Sequence(m3+m4, t7)
s3 = Sequence(s1+s2, t89)
s3._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.Task(process.m6))+cms.Sequence(process.m3+process.m4, cms.Task(process.m7)), cms.Task(process.m8, process.m9))")
s2._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s2.dumpPython()[:-1],"cms.Sequence(process.m5+process.m4, cms.Task(process.m7))")
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.Task(process.m6))+cms.Sequence(process.m5+process.m4, cms.Task(process.m7)), cms.Task(process.m8, process.m9))")
s1 = Sequence(t6)
s1._replaceIfHeldDirectly(t6,t7)
self.assertEqual(s1.dumpPython()[:-1],"cms.Sequence(cms.Task(process.m7))")
#ConditionalTask
ct6 = ConditionalTask(m6)
ct7 = ConditionalTask(m7)
ct89 = ConditionalTask(m8, m9)
s1 = Sequence(m1+m2, ct6)
s2 = Sequence(m3+m4, ct7)
s3 = Sequence(s1+s2, ct89)
s3._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m6))+cms.Sequence(process.m3+process.m4, cms.ConditionalTask(process.m7)), cms.ConditionalTask(process.m8, process.m9))")
s2._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s2.dumpPython()[:-1],"cms.Sequence(process.m5+process.m4, cms.ConditionalTask(process.m7))")
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m6))+cms.Sequence(process.m5+process.m4, cms.ConditionalTask(process.m7)), cms.ConditionalTask(process.m8, process.m9))")
s1 = Sequence(ct6)
s1._replaceIfHeldDirectly(ct6,ct7)
self.assertEqual(s1.dumpPython()[:-1],"cms.Sequence(cms.ConditionalTask(process.m7))")
def testIndex(self):
m1 = DummyModule("a")
m2 = DummyModule("b")
m3 = DummyModule("c")
s = Sequence(m1+m2+m3)
self.assertEqual(s.index(m1),0)
self.assertEqual(s.index(m2),1)
self.assertEqual(s.index(m3),2)
def testInsert(self):
m1 = DummyModule("a")
m2 = DummyModule("b")
m3 = DummyModule("c")
s = Sequence(m1+m3)
s.insert(1,m2)
self.assertEqual(s.index(m1),0)
self.assertEqual(s.index(m2),1)
self.assertEqual(s.index(m3),2)
s = Sequence()
s.insert(0, m1)
self.assertEqual(s.index(m1),0)
p = Path()
p.insert(0, m1)
self.assertEqual(s.index(m1),0)
def testExpandAndClone(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
m5 = DummyModule("m5")
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s2 = Sequence(m1*m2)
s3 = Sequence(~m1*s2)
p = Path(s1+s3)
p2 = p.expandAndClone()
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
#Task
m6 = DummyModule("m6")
m7 = DummyModule("m7")
m8 = DummyModule("m8")
m9 = DummyModule("m9")
p = Path(s1+s3, Task(m6))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertEqual(p2.dumpPython(), "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.Task(process.m6))\n")
s2 = Sequence(m1*m2, Task(m9))
s3 = Sequence(~m1*s2)
t8 = Task(m8)
t8.setLabel("t8")
p = Path(s1+s3, Task(m6, Task(m7, t8)))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertTrue(p2.dumpPython() == "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.Task(process.m6, process.m7, process.m8, process.m9))\n")
t1 = Task(m1,m2,m3)
s1 = Sequence(t1)
s2 = s1.expandAndClone()
l[:] = []
s2.visit(namesVisitor)
self.assertEqual(l, [])
self.assertTrue(s2.dumpPython() == "cms.Sequence(cms.Task(process.m1, process.m2, process.m3))\n")
t1 = Task(m1,m2)
t2 = Task(m1,m3,t1)
t3 = t2.expandAndClone()
self.assertTrue(t3.dumpPython() == "cms.Task(process.m1, process.m2, process.m3)\n")
t4 = Task()
t5 = t4.expandAndClone()
self.assertTrue(t5.dumpPython() == "cms.Task()\n")
#ConditionalTask
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s2 = Sequence(m1*m2)
s3 = Sequence(~m1*s2)
p = Path(s1+s3, ConditionalTask(m6))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertEqual(p2.dumpPython(), "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.ConditionalTask(process.m6))\n")
s2 = Sequence(m1*m2, ConditionalTask(m9))
s3 = Sequence(~m1*s2)
ct8 = ConditionalTask(m8)
ct8.setLabel("ct8")
p = Path(s1+s3, ConditionalTask(m6, ConditionalTask(m7, ct8)))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertEqual(p2.dumpPython(), "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.ConditionalTask(process.m6, process.m7, process.m8, process.m9))\n")
t1 = ConditionalTask(m1,m2,m3)
s1 = Sequence(t1)
s2 = s1.expandAndClone()
l[:] = []
s2.visit(namesVisitor)
self.assertEqual(l, [])
self.assertEqual(s2.dumpPython(), "cms.Sequence(cms.ConditionalTask(process.m1, process.m2, process.m3))\n")
t1 = ConditionalTask(m1,m2)
t2 = ConditionalTask(m1,m3,t1)
t3 = t2.expandAndClone()
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m1, process.m2, process.m3)\n")
t4 = ConditionalTask()
t5 = t4.expandAndClone()
self.assertTrue(t5.dumpPython() == "cms.ConditionalTask()\n")
def testAdd(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
s1 = Sequence(m1)
s3 = Sequence(m3+ignore(m4))
p = Path(s1)
p += ~m2
p *= s3
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm3', '-m4'])
s4 = Sequence()
s4 +=m1
l[:]=[]; s1.visit(namesVisitor); self.assertEqual(l,['m1'])
self.assertEqual(s4.dumpPython(),"cms.Sequence(process.m1)\n")
s4 = Sequence()
s4 *=m1
l[:]=[]; s1.visit(namesVisitor); self.assertEqual(l,['m1'])
self.assertEqual(s4.dumpPython(),"cms.Sequence(process.m1)\n")
def testRemove(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
s1 = Sequence(m1*m2+~m3)
s2 = Sequence(m1*s1)
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
d = {'m1':m1 ,'m2':m2, 'm3':m3,'s1':s1, 's2':s2}
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', 'm2', '!m3'])
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m1', 'm1', 'm2', '!m3'])
s1.remove(m2)
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', '!m3'])
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m1', 'm1', '!m3'])
s2.remove(m3)
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', '!m3'])
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m1', 'm1'])
s1 = Sequence( m1 + m2 + m1 + m2 )
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', 'm2', 'm1', 'm2'])
s1.remove(m2)
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', 'm1', 'm2'])
s1 = Sequence( m1 + m3 )
s2 = Sequence( m2 + ignore(m3) + s1 + m3 )
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m2', '-m3', 'm1', 'm3', 'm3'])
s2.remove(s1)
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m2', '-m3', 'm3'])
s2.remove(m3)
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m2','m3'])
s1 = Sequence(m1*m2*m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1 = Sequence(m1+m2+m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1 = Sequence(m1*m2+m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1 = Sequence(m1+m2*m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1.remove(m1)
s1.remove(m3)
l[:]=[]; s1.visit(namesVisitor); self.assertEqual(l,[])
self.assertEqual(s1.dumpPython(), "cms.Sequence()\n")
s3 = Sequence(m1)
s3.remove(m1)
l[:]=[]; s3.visit(namesVisitor); self.assertEqual(l,[])
self.assertEqual(s3.dumpPython(), "cms.Sequence()\n")
s3 = Sequence(m1)
s4 = Sequence(s3)
s4.remove(m1)
l[:]=[]; s4.visit(namesVisitor); self.assertEqual(l,[])
self.assertEqual(s4.dumpPython(), "cms.Sequence()\n")
#Task
s1 = Sequence(m1+m2, Task(m3), Task(m4))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.Task(process.m3))\n")
s1 = Sequence(m1+m2+Sequence(Task(m3,m4), Task(m3), Task(m4)))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.Task(process.m3), cms.Task(process.m4))\n")
t1 = Task(m1)
t1.setLabel("t1")
t2 = Task(m2,t1)
t2.setLabel("t2")
t3 = Task(t1,t2,m1)
t3.remove(m1)
self.assertTrue(t3.dumpPython() == "cms.Task(process.m1, process.t2)\n")
t3.remove(m1)
self.assertTrue(t3.dumpPython() == "cms.Task(process.m1, process.m2)\n")
t3.remove(m1)
self.assertTrue(t3.dumpPython() == "cms.Task(process.m2)\n")
t3.remove(m2)
self.assertTrue(t3.dumpPython() == "cms.Task()\n")
#ConditionalTask
s1 = Sequence(m1+m2, ConditionalTask(m3), ConditionalTask(m4))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m3))\n")
s1 = Sequence(m1+m2+Sequence(ConditionalTask(m3,m4), ConditionalTask(m3), ConditionalTask(m4)))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m3), cms.ConditionalTask(process.m4))\n")
t1 = ConditionalTask(m1)
t1.setLabel("t1")
t2 = ConditionalTask(m2,t1)
t2.setLabel("t2")
t3 = ConditionalTask(t1,t2,m1)
t3.remove(m1)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m1, process.t2)\n")
t3.remove(m1)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m1, process.m2)\n")
t3.remove(m1)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m2)\n")
t3.remove(m2)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask()\n")
#FinalPath
fp = FinalPath(m1+m2)
fp.remove(m1)
self.assertEqual(fp.dumpPython(), "cms.FinalPath(process.m2)\n")
fp = FinalPath(m1)
fp.remove(m1)
self.assertEqual(fp.dumpPython(), "cms.FinalPath()\n")
def testCopyAndExclude(self):
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
d = DummyModule("d")
s = Sequence(a+b+c)
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s = Sequence(a+b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s=Sequence(a*b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s = Sequence(a+b*c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s2 = Sequence(a+b)
s = Sequence(c+s2+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.c+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.c+process.a+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence((process.a+process.b)+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.c+(process.a+process.b))\n")
self.assertEqual(s.copyAndExclude([a,b]).dumpPython(),"cms.Sequence(process.c+process.d)\n")
s3 = s.copyAndExclude([c])
s2.remove(a)
self.assertEqual(s3.dumpPython(),"cms.Sequence((process.b)+process.d)\n")
s4 = s.copyAndExclude([a,b])
seqs = []
sequenceVisitor = SequenceVisitor(seqs)
s.visit(sequenceVisitor)
self.assertEqual(len(seqs),1)
seqs[:] = []
s4.visit(sequenceVisitor)
self.assertEqual(len(seqs),0)
self.assertEqual(s4.dumpPython(),"cms.Sequence(process.c+process.d)\n")
holder = SequencePlaceholder("x")
s3 = Sequence(b+d,Task(a))
s2 = Sequence(a+b+holder+s3)
s = Sequence(c+s2+d)
seqs[:] = []
s.visit(sequenceVisitor)
self.assertTrue(seqs == [s2,s3])
s2 = Sequence(a+b+holder)
s = Sequence(c+s2+d)
self.assertEqual(s.copyAndExclude([holder]).dumpPython(),"cms.Sequence(process.c+process.a+process.b+process.d)\n")
s2 = Sequence(a+b+c)
s = Sequence(s2+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence((process.a+process.b+process.c))\n")
self.assertEqual(s.copyAndExclude([s2]).dumpPython(),"cms.Sequence(process.d)\n")
s2 = Sequence(a+b+c)
s = Sequence(s2*d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence((process.a+process.b+process.c))\n")
self.assertEqual(s.copyAndExclude([a,b,c]).dumpPython(),"cms.Sequence(process.d)\n")
s = Sequence(ignore(a)+b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([ignore(a)]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(cms.ignore(process.a)+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(cms.ignore(process.a)+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(cms.ignore(process.a)+process.b+process.c)\n")
s = Sequence(a+ignore(b)+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(cms.ignore(process.b)+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+cms.ignore(process.b)+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+cms.ignore(process.b)+process.c)\n")
s = Sequence(a+b+c+ignore(d))
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+cms.ignore(process.d))\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+cms.ignore(process.d))\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+cms.ignore(process.d))\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s = Sequence(~a+b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(~process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(~process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(~process.a+process.b+process.c)\n")
s = Sequence(a+~b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(~process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([~b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+~process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+~process.b+process.c)\n")
s = Sequence(a+b+c+~d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+~process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
self.assertEqual(s.copyAndExclude([a,b,c,d]).dumpPython(),"cms.Sequence()\n")
#Task
e = DummyModule("e")
f = DummyModule("f")
g = DummyModule("g")
h = DummyModule("h")
t1 = Task(h)
s = Sequence(a+b+c+~d, Task(e,f,Task(g,t1)))
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,f,g,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([a,b,c,d]).dumpPython(),"cms.Sequence(cms.Task(process.e, process.f, process.g, process.h))\n")
self.assertEqual(s.copyAndExclude([t1]).dumpPython(),"cms.Sequence(process.a+process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
taskList = []
taskVisitor = TaskVisitor(taskList)
s.visit(taskVisitor)
self.assertEqual(len(taskList),3)
s2 = s.copyAndExclude([g,h])
taskList[:] = []
s2.visit(taskVisitor)
self.assertEqual(len(taskList),1)
t2 = Task(t1)
taskList[:] = []
t2.visit(taskVisitor)
self.assertEqual(taskList[0],t1)
s3 = Sequence(s)
self.assertEqual(s3.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
s4 = Sequence(s)
self.assertEqual(s4.copyAndExclude([a,b,c,d,e,f,g,h]).dumpPython(),"cms.Sequence()\n")
t1 = Task(e,f)
t11 = Task(a)
t11.setLabel("t11")
t2 = Task(g,t1,h,t11)
t3 = t2.copyAndExclude([e,h])
self.assertTrue(t3.dumpPython() == "cms.Task(process.f, process.g, process.t11)\n")
t4 = t2.copyAndExclude([e,f,g,h,a])
self.assertTrue(t4.dumpPython() == "cms.Task()\n")
#ConditionalTask
t1 = ConditionalTask(h)
s = Sequence(a+b+c+~d, ConditionalTask(e,f,ConditionalTask(g,t1)))
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,f,g,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([a,b,c,d]).dumpPython(),"cms.Sequence(cms.ConditionalTask(process.e, process.f, process.g, process.h))\n")
self.assertEqual(s.copyAndExclude([t1]).dumpPython(),"cms.Sequence(process.a+process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
taskList = []
taskVisitor = ConditionalTaskVisitor(taskList)
s.visit(taskVisitor)
self.assertEqual(len(taskList),3)
s2 = s.copyAndExclude([g,h])
taskList[:] = []
s2.visit(taskVisitor)
self.assertEqual(len(taskList),1)
t2 = ConditionalTask(t1)
taskList[:] = []
t2.visit(taskVisitor)
self.assertEqual(taskList[0],t1)
s3 = Sequence(s)
self.assertEqual(s3.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
s4 = Sequence(s)
self.assertEqual(s4.copyAndExclude([a,b,c,d,e,f,g,h]).dumpPython(),"cms.Sequence()\n")
t1 = ConditionalTask(e,f)
t11 = ConditionalTask(a)
t11.setLabel("t11")
t2 = ConditionalTask(g,t1,h,t11)
t3 = t2.copyAndExclude([e,h])
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.f, process.g, process.t11)\n")
t4 = t2.copyAndExclude([e,f,g,h,a])
self.assertEqual(t4.dumpPython(), "cms.ConditionalTask()\n")
def testSequenceTypeChecks(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
s1 = Sequence(m1*m2)
def testRaise():
s1.something = 1
self.assertRaises(AttributeError,testRaise)
def testRaise2():
s2 = Sequence(m1*None)
self.assertRaises(TypeError,testRaise2)
def testCopy(self):
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
p1 = Path(a+b+c)
p2 = p1.copy()
e = DummyModule("e")
p2.replace(b,e)
self.assertEqual(p1.dumpPython(),"cms.Path(process.a+process.b+process.c)\n")
self.assertEqual(p2.dumpPython(),"cms.Path(process.a+process.e+process.c)\n")
p1 = Path(a+b+c)
p2 = p1.copy()
p1 += e
self.assertEqual(p1.dumpPython(),"cms.Path(process.a+process.b+process.c+process.e)\n")
self.assertEqual(p2.dumpPython(),"cms.Path(process.a+process.b+process.c)\n")
#Task
t1 = Task(a, b)
t2 = t1.copy()
self.assertTrue(t1.dumpPython() == t2.dumpPython())
t1Contents = list(t1._collection)
t2Contents = list(t2._collection)
self.assertTrue(id(t1Contents[0]) == id(t2Contents[0]))
self.assertTrue(id(t1Contents[1]) == id(t2Contents[1]))
self.assertTrue(id(t1._collection) != id(t2._collection))
#ConditionalTask
t1 = ConditionalTask(a, b)
t2 = t1.copy()
self.assertTrue(t1.dumpPython() == t2.dumpPython())
t1Contents = list(t1._collection)
t2Contents = list(t2._collection)
self.assertTrue(id(t1Contents[0]) == id(t2Contents[0]))
self.assertTrue(id(t1Contents[1]) == id(t2Contents[1]))
self.assertTrue(id(t1._collection) != id(t2._collection))
def testCopyAndAdd(self):
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
d = DummyModule("d")
e = DummyModule("e")
#Task
t1 = Task(a, b, c)
self.assertEqual(t1.dumpPython(), "cms.Task(process.a, process.b, process.c)\n")
t2 = t1.copyAndAdd(d, e)
self.assertEqual(t1.dumpPython(), "cms.Task(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.Task(process.a, process.b, process.c, process.d, process.e)\n")
t3 = t2.copyAndExclude([b])
self.assertEqual(t1.dumpPython(), "cms.Task(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.Task(process.a, process.b, process.c, process.d, process.e)\n")
self.assertEqual(t3.dumpPython(), "cms.Task(process.a, process.c, process.d, process.e)\n")
t4 = t1.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t4.dumpPython(), "cms.Task(process.a, process.c, process.d)\n")
t5 = t2.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t5.dumpPython(), "cms.Task(process.a, process.c, process.d, process.e)\n")
t6 = t4.copyAndAdd(Task(b))
self.assertEqual(t6.dumpPython(), "cms.Task(process.a, process.b, process.c, process.d)\n")
#ConditionalTask
t1 = ConditionalTask(a, b, c)
self.assertEqual(t1.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c)\n")
t2 = t1.copyAndAdd(d, e)
self.assertEqual(t1.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c, process.d, process.e)\n")
t3 = t2.copyAndExclude([b])
self.assertEqual(t1.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c, process.d, process.e)\n")
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.a, process.c, process.d, process.e)\n")
t4 = t1.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t4.dumpPython(), "cms.ConditionalTask(process.a, process.c, process.d)\n")
t5 = t2.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t5.dumpPython(), "cms.ConditionalTask(process.a, process.c, process.d, process.e)\n")
t6 = t4.copyAndAdd(Task(b))
self.assertEqual(t6.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c, process.d)\n")
def testInsertInto(self):
from FWCore.ParameterSet.Types import vstring
class TestPSet(object):
def __init__(self):
self._dict = dict()
def addVString(self,isTracked,label,value):
self._dict[label]=value
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
d = DummyModule("d")
p = Path(a+b+c+d)
decoratedList = []
lister = DecoratedNodeNameVisitor(decoratedList)
p.visit(lister)
ps = TestPSet()
p.insertInto(ps,"p",decoratedList)
self.assertEqual(ps._dict, {"p":vstring("a","b","c","d")})
s = Sequence(b+c)
p = Path(a+s+d)
decoratedList[:] = []
p.visit(lister)
ps = TestPSet()
p.insertInto(ps,"p",decoratedList)
self.assertEqual(ps._dict, {"p":vstring("a","b","c","d")})
unittest.main()
| cms-sw/cmssw | FWCore/ParameterSet/python/SequenceTypes.py | SequenceTypes.py | py | 128,875 | python | en | code | 985 | github-code | 50 |
40905109656 | import datetime
import os
import re
import sys
from collections import OrderedDict
import numpy
from . import units
from .colormap import ColorMap
from .Point import Point
from .Qt import QtCore
GLOBAL_PATH = None # so not thread safe.
class ParseError(Exception):
def __init__(self, message, lineNum, line, fileName=None):
self.lineNum = lineNum
self.line = line
self.message = message
self.fileName = fileName
Exception.__init__(self, message)
def __str__(self):
if self.fileName is None:
msg = "Error parsing string at line %d:\n" % self.lineNum
else:
msg = "Error parsing config file '%s' at line %d:\n" % (self.fileName, self.lineNum)
msg += "%s\n%s" % (self.line, Exception.__str__(self))
return msg
def writeConfigFile(data, fname):
s = genString(data)
with open(fname, 'wt') as fd:
fd.write(s)
def readConfigFile(fname, **scope):
#cwd = os.getcwd()
global GLOBAL_PATH
if GLOBAL_PATH is not None:
fname2 = os.path.join(GLOBAL_PATH, fname)
if os.path.exists(fname2):
fname = fname2
GLOBAL_PATH = os.path.dirname(os.path.abspath(fname))
local = {**scope, **units.allUnits}
local['OrderedDict'] = OrderedDict
local['readConfigFile'] = readConfigFile
local['Point'] = Point
local['QtCore'] = QtCore
local['ColorMap'] = ColorMap
local['datetime'] = datetime
# Needed for reconstructing numpy arrays
local['array'] = numpy.array
for dtype in ['int8', 'uint8',
'int16', 'uint16', 'float16',
'int32', 'uint32', 'float32',
'int64', 'uint64', 'float64']:
local[dtype] = getattr(numpy, dtype)
try:
#os.chdir(newDir) ## bad.
with open(fname, "rt") as fd:
s = fd.read()
s = s.replace("\r\n", "\n")
s = s.replace("\r", "\n")
data = parseString(s, **local)[1]
except ParseError:
sys.exc_info()[1].fileName = fname
raise
except:
print("Error while reading config file %s:"% fname)
raise
#finally:
#os.chdir(cwd)
return data
def appendConfigFile(data, fname):
s = genString(data)
with open(fname, 'at') as fd:
fd.write(s)
def genString(data, indent=''):
s = ''
for k in data:
sk = str(k)
if len(sk) == 0:
print(data)
raise Exception('blank dict keys not allowed (see data above)')
if sk[0] == ' ' or ':' in sk:
print(data)
raise Exception('dict keys must not contain ":" or start with spaces [offending key is "%s"]' % sk)
if isinstance(data[k], dict):
s += indent + sk + ':\n'
s += genString(data[k], indent + ' ')
else:
s += indent + sk + ': ' + repr(data[k]).replace("\n", "\\\n") + '\n'
return s
def parseString(lines, start=0, **scope):
data = OrderedDict()
if isinstance(lines, str):
lines = lines.replace("\\\n", "")
lines = lines.split('\n')
lines = [l for l in lines if re.search(r'\S', l) and not re.match(r'\s*#', l)] ## remove empty lines
indent = measureIndent(lines[start])
ln = start - 1
try:
while True:
ln += 1
#print ln
if ln >= len(lines):
break
l = lines[ln]
## Skip blank lines or lines starting with #
if re.match(r'\s*#', l) or not re.search(r'\S', l):
continue
## Measure line indentation, make sure it is correct for this level
lineInd = measureIndent(l)
if lineInd < indent:
ln -= 1
break
if lineInd > indent:
#print lineInd, indent
raise ParseError('Indentation is incorrect. Expected %d, got %d' % (indent, lineInd), ln+1, l)
if ':' not in l:
raise ParseError('Missing colon', ln+1, l)
(k, p, v) = l.partition(':')
k = k.strip()
v = v.strip()
## set up local variables to use for eval
if len(k) < 1:
raise ParseError('Missing name preceding colon', ln+1, l)
if k[0] == '(' and k[-1] == ')': ## If the key looks like a tuple, try evaluating it.
try:
k1 = eval(k, scope)
if type(k1) is tuple:
k = k1
except:
# If tuple conversion fails, keep the string
pass
if re.search(r'\S', v) and v[0] != '#': ## eval the value
try:
val = eval(v, scope)
except:
ex = sys.exc_info()[1]
raise ParseError("Error evaluating expression '%s': [%s: %s]" % (v, ex.__class__.__name__, str(ex)), (ln+1), l)
else:
if ln+1 >= len(lines) or measureIndent(lines[ln+1]) <= indent:
#print "blank dict"
val = {}
else:
#print "Going deeper..", ln+1
(ln, val) = parseString(lines, start=ln+1, **scope)
data[k] = val
#print k, repr(val)
except ParseError:
raise
except:
ex = sys.exc_info()[1]
raise ParseError("%s: %s" % (ex.__class__.__name__, str(ex)), ln+1, l)
#print "Returning shallower..", ln+1
return (ln, data)
def measureIndent(s):
n = 0
while n < len(s) and s[n] == ' ':
n += 1
return n
| pyqtgraph/pyqtgraph | pyqtgraph/configfile.py | configfile.py | py | 5,806 | python | en | code | 3,463 | github-code | 50 |
10166043804 | '''
去重和排序:
随机生成1~10之间的20个随机数,存入列表中
先输出原始的列表数据
对列表进行去重、排序后输出新列表
'''
import random
my_list = []
for n in range(20):
rand = random.randint(1,10)
my_list.append(rand) # append在末尾添加一个元素
#输出原始列表
for n in my_list:
print(n,end=' ')
#去重
my_list2 = []
for n in my_list:
if n not in my_list2: # 判断是否不存在
my_list2.append(n)
print()
#输出去重并排序后的列表
list.sort(my_list2,reverse=True) # reverse 参数可以实现升序或者降序
for n in my_list2:
print(n,end=' ')
| evynlau/pythonDemo | day4/列表案例2.py | 列表案例2.py | py | 646 | python | zh | code | 0 | github-code | 50 |
591733928 | import h5py
import os
import numpy as np
import cv2
import glob
import h5py
import keras
from sklearn.model_selection import train_test_split
Normal_dir = './Normal/'
Glaucoam_dir = './Glaucoma/'
INPUT_DATA = './RIM-ONE2/'
test_DATA = './RIM-ONE2_test/'
def create_image_lists():
# f = h5py.File("dataset0809_l_r_old.hdf5", "w")
f = open('train.txt', 'w')
val = open('val.txt', 'w')
name = []
label = []
sub_dirs = [x[0] for x in os.walk(INPUT_DATA)] # 获取所有子目录
is_root_dir = True # 第一个目录为当前目录,需要忽略
# 分别对每个子目录进行操作
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
# 获取当前目录下的所有有效图片
extensions = {'bmp', 'jpg'}
file_list = [] # 存储所有图像
dir_name = os.path.basename(sub_dir) # 获取路径的最后一个目录名字
for extension in extensions:
file_glob = os.path.join(INPUT_DATA, dir_name, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
continue
label_name = dir_name
for file_name in file_list:
name.append(file_name)
if label_name == 'Glaucoma':
label.append('1')
else:
label.append('0')
train = np.asarray(name)
label = np.asarray(label)
train_name, test_name, train_l, test_l = train_test_split(train, label, train_size=0.25, random_state=20)
for (x, y) in zip(train_name, train_l):
s = x + ' ' + y + '\n'
f.writelines(s)
f.close()
for (x, y) in zip(test_name, test_l):
s = x + ' ' + y + '\n'
val.writelines(s)
val.close()
def load_data():
f = h5py.File('dataset0809_l_r_old.hdf5', 'r')
imgs = f['imgs'][:]
labels = f['labels'][:]
f.close()
print(imgs[0].shape)
return imgs, labels # training_imgs, test_imgs, training_labels, test_labels
if __name__ == '__main__':
# load_data()
create_image_lists()
| manal-asdg/CNNs_medical | TCNN/finetune_alexnet_with_tensorflow/input_data_3.py | input_data_3.py | py | 2,169 | python | en | code | 0 | github-code | 50 |
28322419385 | import math
def mean(list_of_x):
sum_of_x = sum(list_of_x)
length_of_x = len(list_of_x)
return sum_of_x/length_of_x
def variance(list_of_x, mean):
mean = mean(list_of_x)
new_list = []
for i in list_of_x:
new_list.append((i - mean)**2)
return sum(new_list)/len(new_list)
def standard_deviation():
return math.sqrt(variance)
val_of_x = [21,21,21,21,24,24,24,24,24,24,26,26,26,26,26,26,26,29,29,29,29,29,29,29,29,29,29,29,40,40]
variance = variance(val_of_x, mean)
print(f"Mean is {mean(val_of_x)}")
print(f"Variance is {variance}")
print(f"Standard Deviation is {standard_deviation()}")
| jesulonse/Measures-of-Central-Tendency | Mean,Variance and Standard Deviation.py | Mean,Variance and Standard Deviation.py | py | 642 | python | en | code | 0 | github-code | 50 |
15074034885 | from .convert import *
from .compute_cache import compute_and_store_cached_data
import os
class StandaloneExporter(DarwinExporter):
DRW_CONVERT_FILE = os.path.abspath(os.path.splitext(__file__)[0] + ".drw")
def __init__(self, root, name, **kwargs):
os.environ["DARWIN_BROWSERDATA_PATH"] = os.path.abspath(root)
super(StandaloneExporter, self).__init__(name, **kwargs)
self.transformed = False
self.cache_dir = os.path.join(os.getenv("DARWIN_BROWSERDATA_PATH"), "pyoma")
def add_homologs(self):
self.assert_cached_results()
for gs in self.h5.root.Genome.iterrows():
genome = gs["UniProtSpeciesCode"].decode()
rel_node_for_genome = self._get_or_create_node(
"/PairwiseRelation/{}".format(genome)
)
if "homologs" not in rel_node_for_genome:
pass
def get_version(self):
# TODO: obtain real version
return "OmaStandalone; 1.0.x"
def assert_cached_results(self):
if not self.transformed:
res = self.call_darwin_export(
"TransformDataToCache('{}');".format(self.cache_dir)
)
if res != "success":
raise DarwinException("could not transform data from darwin", "")
self.transformed = True
os.environ["DARWIN_NETWORK_SCRATCH_PATH"] = os.getenv(
"DARWIN_BROWSERDATA_PATH"
)
common.package_logger.info("successfully transformed data to json")
def add_orthologs(self):
self.assert_cached_results()
for gs in self.h5.root.Genome.iterrows():
genome = gs["UniProtSpeciesCode"].decode()
rel_node_for_genome = self._get_or_create_node(
"/PairwiseRelation/{}".format(genome)
)
if "VPairs" not in rel_node_for_genome:
cache_file = os.path.join(
os.getenv("DARWIN_NETWORK_SCRATCH_PATH", ""),
"pyoma",
"vps",
"{}.txt.gz".format(genome),
)
if os.path.exists(cache_file):
data = load_tsv_to_numpy((cache_file, 0, 0, False))
else:
# fallback to read from VPsDB
data = self.call_darwin_export("GetVPsForGenome({})".format(genome))
vp_tab = self.h5.create_table(
rel_node_for_genome,
"VPairs",
tablefmt.PairwiseRelationTable,
expectedrows=len(data),
)
if isinstance(data, list):
data = self._convert_to_numpyarray(data, vp_tab)
self._write_to_table(vp_tab, data)
vp_tab.cols.EntryNr1.create_csindex()
def add_hogs(self, **kwargs):
fn = "HierarchicalGroups.orthoxml"
hog_file = os.path.join(os.environ["DARWIN_BROWSERDATA_PATH"], "Output", fn)
hog_cache_dir = os.path.join(self.cache_dir, "split_hogs")
for tree_file in (
"ManualSpeciesTree.nwk",
"EstimatedSpeciesTree.nwk",
"LineageSpeciesTree.nwk",
):
tree_filename = os.path.join(
os.environ["DARWIN_BROWSERDATA_PATH"], "Output", tree_file
)
if os.path.exists(tree_filename):
self.logger.info("Use " + tree_filename + " as HOG backbone tree file")
break
hog_treefile = None
if os.path.exists(tree_filename):
hog_treefile = tree_filename
return super().add_hogs(
hog_path=hog_cache_dir, hog_file=hog_file, tree_filename=hog_treefile
)
def _get_genome_database_paths(self):
return self.call_darwin_export("GetGenomeFileNames();")
def xref_databases(self):
return self._get_genome_database_paths()
def mark_isoforms(dbfn):
from .db import Database, OmaIdMapper
used_splice_fn = os.path.join(
os.getenv("DARWIN_BROWSERDATA_PATH"), "Output", "used_splicing_variants.txt"
)
if not os.path.exists(used_splice_fn):
common.package_logger.info(
"no splicing output file found. Assume not splice variants"
)
return
db = Database(dbfn)
idmapper = OmaIdMapper(db)
main_variants = {}
with open(used_splice_fn) as fh:
for line in fh:
try:
main_variant = line.split("\t")[1].split("|")[0].strip()
common.package_logger.debug(main_variant)
main_variants[main_variant] = idmapper.omaid_to_entry_nr(main_variant)
except Exception:
common.package_logger.warning("cannot convert line: {}".format(line))
pass
common.package_logger.info(
"found {} main splicing variants".format(len(main_variants))
)
common.package_logger.debug(main_variants)
splice_column = (
db.get_hdf5_handle().get_node("/Protein/Entries").col("AltSpliceVariant")
)
for file in os.scandir(os.path.join(os.getenv("DARWIN_BROWSERDATA_PATH"), "DB")):
if not file.name.endswith(".splice"):
continue
common.package_logger.warning("handling {}".format(file.name))
with open(file) as fh:
for line in fh:
splice_variants = [z.strip() for z in line.split(";")]
main = [v for v in splice_variants if v in main_variants]
if len(main) != 1:
common.package_logger.warning(
"not a single main variant for {}: {}".format(
splice_variants, main
)
)
continue
for v in splice_variants:
enr = idmapper.omaid_to_entry_nr(v)
splice_column[enr] = main_variants[main[0]]
db.close()
with tables.open_file(dbfn, "a") as h5:
tab = h5.get_node("/Protein/Entries")
tab.modify_column(column=splice_column, colname="AltSpliceVariant")
def import_oma_run(path, outfile, domains=None, log_level="INFO"):
log = getLogger(log_level)
x = StandaloneExporter(path, outfile, logger=log, mode="write")
x.add_version()
x.add_species_data()
x.add_orthologs()
x.add_proteins()
x.add_hogs()
x.add_xrefs()
if domains is None:
domains = ["file:///dev/null"]
else:
domains = list(
map(lambda url: "file://" + url if url.startswith("/") else url, domains)
)
log.info("loading domain annotations from {}".format(domains))
x.add_domain_info(
filter_duplicated_domains(
only_pfam_or_cath_domains(
itertools.chain.from_iterable(map(iter_domains, domains))
)
)
)
x.add_domainname_info(
itertools.chain(
CathDomainNameParser(
"http://download.cathdb.info/cath/releases/latest-release/"
"cath-classification-data/cath-names.txt"
).parse(),
PfamDomainNameParser(
"ftp://ftp.ebi.ac.uk/pub/databases/Pfam/current_release/Pfam-A.clans.tsv.gz"
).parse(),
)
)
x.add_canonical_id()
x.add_group_metadata()
x.add_hog_domain_prevalence()
x.add_roothog_metadata()
x.close()
x = StandaloneExporter(path, outfile, logger=log)
x.create_indexes()
x.add_sequence_suffix_array()
x.update_summary_stats()
x.add_per_species_aux_groupdata()
x.close()
mark_isoforms(os.path.join(path, outfile))
compute_and_store_cached_data(x.h5.filename, nr_procs=min(os.cpu_count(), 12))
if __name__ == "__main__":
import_oma_run("~/Repositories/OmaStandalone", "oma.h5")
| DessimozLab/pyoma | pyoma/browser/convert_omastandalone.py | convert_omastandalone.py | py | 7,853 | python | en | code | 0 | github-code | 50 |
9828160975 | import math
import random
import pygame
from pygame import mixer
# Intialize the pygame
pygame.init()
# create the screen
screen = pygame.display.set_mode((800, 600))
# Background
background = pygame.image.load('back.png')
# # Sound
# mixer.music.load("background-music.wav")
# mixer.music.play(-1)
# Caption and Icon
pygame.display.set_caption("Ghost Invader")
icon = pygame.image.load('launch.png')
pygame.display.set_icon(icon)
# Player
playerImg = pygame.image.load('space.png')
playerX = 370
playerY = 480
playerX_change = 0
# Enemy
enemyImg = []
enemyX = []
enemyY = []
enemyX_change = []
enemyY_change = []
num_of_enemies = 12
# Some Const
ENEMY_START_Y = 64
SIZE_OF_ENEMY = 64
START_SPEED = 0.2
def create_enemy(counter):
enemyImg.append(pygame.image.load('ghost.png'))
enemyX.append(counter * (SIZE_OF_ENEMY + 1))
enemyY.append(ENEMY_START_Y)
enemyX_change.append(START_SPEED)
enemyY_change.append(SIZE_OF_ENEMY + 5)
for i in range(num_of_enemies):
create_enemy(i)
# Bullet
# Ready - You can't see the bullet on the screen
# Fire - The bullet is currently moving
bulletImg = pygame.image.load('laser.png')
bulletX = 0
bulletY = 480
bulletX_change = 0
bulletY_change = 0.7
bullet_state = "ready"
# Score
score_value = 0
lives = 3
font = pygame.font.Font('freesansbold.ttf', 32)
scoreTextX = 10
scoreTextY = 10
lifeTextX = 680
lifeTextY = 10
# Game Over
over_font = pygame.font.Font('freesansbold.ttf', 64)
def reset_enemy(conter):
enemyX[conter] = 0
enemyY[conter] = 64
enemyX_change[conter] = START_SPEED
enemyY_change[conter] = SIZE_OF_ENEMY + 5
def show_score(x, y):
score_text = font.render("Score : " + str(score_value), True, (255, 255, 255))
screen.blit(score_text, (x, y))
def show_lifes(x, y):
life_text = font.render("Life: " + str(lives), True, (255, 0, 0))
screen.blit(life_text, (x, y))
def game_over_text():
over_text = over_font.render("GAME OVER", True, (255, 255, 255))
screen.blit(over_text, (200, 250))
def player(x, y):
screen.blit(playerImg, (x, y))
def enemy(x, y, i):
screen.blit(enemyImg[i], (x, y))
def fire_bullet(x, y):
global bullet_state
bullet_state = "fire"
screen.blit(bulletImg, (x + 16, y + 10))
def is_collision(enemyX, enemyY, bulletX, bulletY):
distance = math.sqrt(math.pow(enemyX - bulletX, 2) + (math.pow(enemyY - bulletY, 2)))
if distance < 27:
return True
else:
return False
# Game Loop
running = True
while running:
# RGB = Red, Green, Blue
screen.fill((0, 0, 0))
# Background Image
screen.blit(background, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# if keystroke is pressed check whether its right or left
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
playerX_change = -0.3
if event.key == pygame.K_RIGHT:
playerX_change = 0.3
if event.key == pygame.K_SPACE:
if bullet_state is "ready":
# laserSound = mixer.Sound("shot.wav")
# laserSound.play()
# Get the current x cordinate of the spaceship
bulletX = playerX
fire_bullet(bulletX, bulletY)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
playerX_change = 0
# 5 = 5 + -0.1 -> 5 = 5 - 0.1
# 5 = 5 + 0.1
playerX += playerX_change
if playerX <= 0:
playerX = 0
elif playerX >= 736:
playerX = 736
# Enemy Movement
for i in range(num_of_enemies):
# Game Over
if enemyY[i] > 440:
if lives == 0:
for j in range(num_of_enemies):
enemyY[j] = 2000
game_over_text()
break
else:
lives -= 1
reset_enemy(i)
enemyX[i] += enemyX_change[i]
if enemyX[i] <= 0:
enemyX_change[i] = START_SPEED
enemyY[i] += enemyY_change[i]
elif enemyX[i] >= 736:
enemyX_change[i] = -START_SPEED
enemyY[i] += enemyY_change[i]
# Collision
collision = is_collision(enemyX[i], enemyY[i], bulletX, bulletY)
if collision:
# hit_sound = mixer.Sound("hit.wav")
# hit_sound.play()
bulletY = 480
bullet_state = "ready"
score_value += 1
reset_enemy(i)
enemy(enemyX[i], enemyY[i], i)
# Bullet Movement
if bulletY <= 0:
bulletY = 480
bullet_state = "ready"
if bullet_state is "fire":
fire_bullet(bulletX, bulletY)
bulletY -= bulletY_change
player(playerX, playerY)
show_score(scoreTextX, scoreTextY)
show_lifes(lifeTextX, lifeTextY)
pygame.display.update()
| paulknulst/ghost-invader | main.py | main.py | py | 4,972 | python | en | code | 0 | github-code | 50 |
42244092128 | import sys
from itertools import count
grid = [list(row.strip()) for row in sys.stdin.readlines()]
def step():
moves = False
for C, (DX, DY) in [('>', (1, 0)), ('v', (0, 1))]:
tries = ( ((x, y), ((x+DX)%len(row), (y+DY)%len(grid)))
for y, row in enumerate(grid) for x, c in enumerate(row) if c == C )
movables = { f: t for f, t in tries if grid[t[1]][t[0]] == '.' }
if movables:
moves = True
for (fx, fy), (tx, ty) in movables.items():
grid[fy][fx] = '.'
grid[ty][tx] = C
return moves
for n in count(1):
if not step():
break
print(n)
| ShuP1/AoC | src/2021/25.py | 25.py | py | 641 | python | en | code | 0 | github-code | 50 |
38349050173 | """
Description:
Given an array of integers , Find the minimum sum which is obtained from summing
each Two integers product .
"""
def min_sum(arr):
arr.sort()
end = len(arr)
sum = 0
# print(arr[:end // 2], arr[-1:end // 2 - 1:-1])
for pair in zip(arr[:end // 2], arr[-1:end // 2 - 1:-1]):
sum += pair[0] * pair[1]
return sum
if __name__ == "__main__":
min_sum(list(range(10)))
| MaximSinyaev/CodeWars | c7kyu/minimize-sum-of-array-array-series-number-1.py | minimize-sum-of-array-array-series-number-1.py | py | 416 | python | en | code | 0 | github-code | 50 |
42584067781 | import os, stat
import numpy as np
import utils.run_analysis as ana
import datatable as dt
def read_match_timestamps(base_dir, target_fps, subject, trial, num_cameras=1):
'''
Readin timestamps from all devices and match them to a common timestamp, repeating frames if needed.
NOTE: You may need to fix the permissions first to run this
NOTE2: This works for data where the LAST annotation is for starting the task. Future datasets where this is NOT the case (ie with post calibrations) will fail
NOTE3: This only works with one camera FOR NOW!
Params:
base_dir (str): directory where we work
target_fps (int): The sample rate in frames per second that our devices are targeted to
subject (str): name of subject
trial (str): name of trial
num_cameras (int): how many cameras did we collect from? (1=cy, 2=cy,os, 3=cy,od,os)
Returns:
common_timeline_table (n by timestamps array): Table matching frame numbers from devices to our common timeline timestamps
'''
cameras = ['cy','od','os']
cameras = cameras[:num_cameras]
#TODO: IMPLEMENT FOR MULTIPLE CAMERAS
cam = cameras[0]
data_dir = os.path.join(base_dir, 'raw_data')
output_dir = os.path.join(base_dir, 'analysis')
trial_directory = os.path.join(data_dir, subject, trial, 'pre')
#ximea (scene cameras)
ximea_timestamp_file = os.path.join(trial_directory, 'scene_camera', f'timestamps_{cam}.tsv')
ximea_timesync_file = os.path.join(trial_directory, 'scene_camera', f'timestamp_camsync_{cam}.tsv')
#pupil (eye cameras)
pupil_timestamp_file = os.path.join(trial_directory, 'eye_camera','000','exports','000','pupil_positions.csv')
pupil_annotations_file = os.path.join(trial_directory, 'eye_camera','000', 'annotation_timestamps.npy')
analysis_folder = os.path.join(output_dir, subject, trial,'')
try:
os.makedirs(analysis_folder)
except:
print(f'Folder {analysis_folder} Already Made!')
ximea_timestamps = ana.convert_ximea_time_to_unix_time(ximea_timestamp_file, ximea_timesync_file)
ximea_timestamp_converted_path = os.path.join(analysis_folder,f'timestamps_converted_{cam}.tsv')
np.savetxt(ximea_timestamp_converted_path, ximea_timestamps, fmt='%10.5f', delimiter='\t')
pupil_num='000'
#get pupil timestamps
pupil_positions = dt.fread(pupil_timestamp_file)
pupil_positions = pupil_positions[:,[0,1,2,4,5]]
pupil_eye_0 = np.array(pupil_positions)[np.where(np.array(pupil_positions)[:,2]==0)[0],:]
pupil_eye_1 = np.array(pupil_positions)[np.where(np.array(pupil_positions)[:,2]==1)[0],:]
pupil_ts_eye0 = pupil_eye_0[:,0]
pupil_ts_eye1 = pupil_eye_1[:,0]
pupil_annotations = np.load(pupil_annotations_file)
#ximea timestamps
with open(ximea_timestamp_converted_path, 'r') as f:
timestamps_ximea = list(zip(line.strip().split('\t') for line in f))
timestamps_ximea = np.squeeze(np.array(timestamps_ximea[1:]).astype('float'))
timestamps_ximea = timestamps_ximea[:,-1]
## SEE NOTE 2
if(trial == 'cell_phone_1' and subject=='jf'):
start_task_time = pupil_annotations[-2]
end_task_time = pupil_annotations[-1]
else:
start_task_time = pupil_annotations[-1]
end_task_time = timestamps_ximea[-1]
print(f'Task Lasted: {end_task_time-start_task_time} seconds.')
start_timestamp = np.max((timestamps_ximea[0], pupil_ts_eye0[0], pupil_ts_eye1[0]))
end_timestamp = np.min((timestamps_ximea[-1], pupil_ts_eye0[-1],pupil_ts_eye1[-1]))
common_timeline = np.arange(start_timestamp, end_timestamp, 1./target_fps)
ximea_common_timeline_match = np.zeros_like(common_timeline)
pupil_eye0_common_timeline_match = np.zeros_like(common_timeline)
pupil_eye1_common_timeline_match = np.zeros_like(common_timeline)
during_task = np.zeros_like(common_timeline)
during_calibration = np.zeros_like(common_timeline)
for i, t in enumerate(common_timeline):
ximea_common_timeline_match[i] = np.argmin(np.abs(timestamps_ximea - t))
pupil_eye0_common_timeline_match[i] = np.argmin(np.abs(pupil_ts_eye0 - t))
pupil_eye1_common_timeline_match[i] = np.argmin(np.abs(pupil_ts_eye1 - t))
if((t > start_task_time) and (t < end_task_time)):
during_task[i] = 1
common_timeline_table = np.array((common_timeline, ximea_common_timeline_match, pupil_eye0_common_timeline_match, pupil_eye1_common_timeline_match, during_task, during_calibration)).T
common_timeline_table_colnames = 'common_timeline\tximea_frame\tpupil_eye0_frame\tpupil_eye1_frame\tduring_task\tduring_calibration'
common_timeline_file = os.path.join(analysis_folder,'common_timeline.tsv')
common_timeline_file_human = os.path.join(analysis_folder,'common_timeline.tsv')
np.savetxt(common_timeline_file_human, common_timeline_table, delimiter='\t', header=common_timeline_table_colnames)
common_timeline_file = os.path.join(analysis_folder,'common_timeline.npy')
np.save(common_timeline_file, common_timeline_table)
return(common_timeline_table) | vdutell/st-bravo_analysis | read_match_timestamps.py | read_match_timestamps.py | py | 5,221 | python | en | code | 0 | github-code | 50 |
19811541836 | # -*- coding: utf-8 -*-
# Author: Yiqiao Wang
# Date: 27.02.2022
import mne
import os
import numpy as np
import scipy.io
def read_raw_edf(path):
"""
load in raw data that stored in the given path
:param path: string, path of raw data
:return:
"""
raw = mne.io.read_raw_edf(path, preload=True)
return raw
def read_raw_mat(path, ch_names):
"""
load in raw data that stored in the given path
:param path: string, path of raw data
:param ch_names: list, names of channels
:return:
"""
data = scipy.io.loadmat(path)
eeg_data = list(data.values())[3]
eeg_data = np.array(eeg_data)
eeg_max = np.amax(eeg_data, 1)
eeg_min = np.min(eeg_data, 1)
eeg_mean = np.mean(eeg_data, 1)
for i in range(62):
eeg_data[i] = (eeg_data[i] - eeg_min[i]) / (eeg_max[i] - eeg_min[i])
ch_names = ['FP1', 'FPZ', 'FP2', 'AF3', 'AF4', 'F7', 'F5', 'F3', 'F1', 'FZ', 'F2', 'F4', 'F6'
, 'F8', 'FT7', 'FC5', 'FC3', 'FC1', 'FCZ', 'FC2', 'FC4', 'FC6', 'FT8', 'T7', 'C5'
, 'C3', 'C1', 'CZ', 'C2', 'C4', 'C6', 'T8', 'TP7', 'CP5', 'CP3', 'CP1', 'CPZ', 'CP2'
, 'CP4', 'CP6', 'TP8', 'P7', 'P5', 'P3', 'P1', 'PZ', 'P2', 'P4', 'P6', 'P8', 'PO7'
, 'PO5', 'PO3', 'POZ', 'PO4', 'PO6', 'PO8', 'CB1', 'O1', 'OZ', 'O2', 'CB2']
info = mne.create_info(
ch_names,
ch_types=['eeg' for _ in range(62)],
sfreq=200
)
raw = mne.io.RawArray(eeg_data, info)
return raw
def get_channels_names(raw):
"""
:param raw: raw data loaded from the disk
:return: the name of every channel
"""
channel_names = raw.info['ch_names']
return channel_names
def rewrite(raw, include_names, save_path):
"""
rewrite raw data by extracting some specific channels
:param raw: raw data loaded from the disk
:param include_names: the name of every included channels
:param save_path: a path for saving the processed data
:return: extract the data from some specific channels
"""
picks = mne.pick_types(raw.info, include=include_names, exclude='bads')
print("included channel names:{}".format(include_names))
raw.save(save_path, picks=picks, overwrite=True)
print("successfully written!")
return True
def os_mkdir(save_dir, dir):
"""
create some directories
:param save_dir: string, root dir for saving processed data
:param dir: string, new dir to be created
:return:
"""
new_path = os.path.join(save_dir, dir)
if os.path.exists(new_path) is not True:
os.makedirs(new_path)
print("new dir has been created! {}".format(new_path))
else:
print("{} dir is existed!".format(new_path))
def filter_hz(raw, high_pass, low_pass):
"""
Perform signal filtering, extract signal that ranges from (high_pass, low_pass)
:param raw: raw data
:param high_pass: float, maximum threshold
:param low_pass: float, minimum threshold
:return:
"""
raw.filter(high_pass, low_pass, fir_design='firwin')
return raw
def get_duration_raw_data(raw, start, stop):
"""
:param raw: raw data loaded from the disk
:param start: start time
:param stop: end time
:return: a segment of data, ranging from start time to end time
"""
end = max(raw.times)
if stop > end:
print("out of range!!!")
return None
else:
duration_data = raw.crop(start, stop)
return duration_data
def split_data(raw, time_step):
"""
Split the signal data into segments
:param raw:
:param time_step:
:return:
"""
data_split = []
end = max(raw.times)
epoch = int(end // time_step)
fz = int(len(raw) / end) # 采样频率
for index in range(epoch - 1):
start = index * fz * time_step
stop = (index + 1) * fz * time_step
data, time = raw[:, start:stop]
data_split.append(data)
return data_split
def save_numpy_info(data, path):
"""
save numpy file
:param data:
:param path:
:return:
"""
if os.path.exists(path):
print("File is exist!!!")
return False
else:
np.save(path, data)
print("Successfully save!")
return True
def get_all_path(path_dir):
all_path = []
for p in os.listdir(path_dir):
all_path.append(os.path.join(path_dir, p))
return all_path
def bit_coding(data, step):
"""
Binary encoding based on the number of spindles.
:param data: serial information of a person
:param step: window size
:return: encoded sequence
"""
code = []
pre_data = 0
count = 0
length = len(data)
while count < length:
n = (data[count] - pre_data) / step
if n > 0:
if n > int(n):
n = int(n)
code += [0] * n + [1]
else:
n = int(n)
code += [0] * (n - 1) + [1]
pre_data = data[count]
count += 1
return code
def num_coding(data, step):
"""
Coding based on number distribution.
:param data: serial information of a person
:param step: window size
:return: encoded sequence
"""
code = []
pre_flag = step
count = 0
write_count = 0
length = len(data)
while count < length:
if data[count] > pre_flag:
code.append(write_count)
pre_flag += step
write_count = 0
else:
write_count += 1
count += 1
if write_count != 0:
code.append(write_count)
return code
def multiply(data1, data2):
length = len(data1)
sum = 0
for index in range(length):
sum += data1[index] * data2[index]
return sum
def cos(data1, data2):
d1 = multiply(data1, data2)
d2 = math.sqrt(multiply(data1, data1)) * math.sqrt(multiply(data2, data2))
result = d1 / d2
return result | yqwang306/code | util/eeg_utils.py | eeg_utils.py | py | 5,922 | python | en | code | 1 | github-code | 50 |
13511664055 | # -*- coding: UTF-8 -*-
# Roll- drone left-right tilt. (must be in [-100:100])
# Pitch- drone front-back tilt. (must be in [-100:100])
# Gaz- drone vertical speed. (must be in [-100:100])
# Yaw- drone angular speed. (must be in [-100:100])
import olympe
from olympe.messages.ardrone3.Piloting import TakeOff, moveBy, Landing
from olympe.messages.ardrone3.PilotingState import FlyingStateChanged
DRONE_IP = "192.168.42.1"
# Test values
roll = 0
pitch = 0
yaw = 1
gaz = 0
piloting_time = 2
if __name__ == "__main__":
# Connect to drone using IP (same for all ANAFI units)
drone = olympe.Drone(DRONE_IP)
drone.connect()
# Takeoff sequence
assert drone(
# Wait for takeoff to complete before resuming execution
TakeOff()
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait().success()
assert drone(
moveBy(0, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait().success()
# Begin manual control of drone
start_piloting()
# Spin drone on axis
piloting_pcmd(roll, pitch, yaw, gaz, piloting_time)
# End manual control
stop_piloting()
# Issue land command and wait for success message before disconnecting
assert drone(Landing()).wait().success()
drone.disconnect()
# Plan for Main GUI Loop
# Check for Drone messeages at every refresh (at least battery and GPS coords)
# =======================
# /start \sync \end take off
# GUI: ====================================> | Duke-XPrize-Anafi-GUI/DukeXPrizeAnafiGUI | misc/control_test.py | control_test.py | py | 1,570 | python | en | code | 5 | github-code | 50 |
12849727794 | import boto.rds
import boto.ec2
import datetime
import time
import argparse
import collections
from datetime import date, timedelta
from boto.exception import BotoServerError
def getSnapshots():
"Lists all snapshots that were created using this script"
snapshotList = []
for snapshot in dbSnapshots:
if snapshot.id.startswith(dbSnapshotBase) and snapshot.status == 'available':
snapshotList.append(snapshot.id)
snapshotList.sort(reverse=True)
return snapshotList
def printSnapshots():
"Prints the list provided from getSnapshots()"
snapshots = getSnapshots()
if snapshots is None:
print("Couldn't find any snapshots for instance " + dbInstanceName + ", note name must start with " + dbSnapshotBase)
return
for name in snapshots:
print(name)
return snapshots
def getStatusOfDatabase():
"Gets the status of the specified available database"
if dbInstances == []:
print("There is currently no available RDS database running. \nPlease check the AWS Console if this is a problem!")
return
for dbInst in dbInstances:
print("[%s] has status of [%s] " % (dbInstances[dbInst], dbInstances[dbInst].status))
return None
def removeDatabase():
"Removes the AWS RDS database instance after first taking a snapshot"
snapshotName = dbSnapshotBase + '-' + today.strftime('%Y%m%d-%H%M')
print("Backing database up to snapshot name: %s " % snapshotName)
deletedInstance = dbInstances[0].stop(skip_final_snapshot=False, final_snapshot_id=snapshotName)
iterationCount = 0
iterationMax = 40
timerBreak = 30
print("Deleting database..." + dbInstanceName)
while (iterationCount < iterationMax):
time.sleep(timerBreak)
iterationCount += 1
try:
deletedInstance.update(validate=True)
deletedStatus = deletedInstance.status
print("deleted db status: " + deletedStatus)
except ValueError:
print("Could no longer access database status, assuming it has been deleted")
break
except BotoServerError as e:
if e.status == "404":
print("Could no longer access database status, assuming it has been deleted")
else:
print('Unknown botoServerError, giving up')
print('status=', e.status)
print('reason=', e.reason)
print('body=', e.body)
print('request_id=', e.request_id)
print('error_code=', e.error_code)
print('error_message=', e.error_message)
break
return None
def restoreDatabase():
"Restores a database from the latest snapshot"
snapshots = getSnapshots()
if snapshots is None:
print("There are no snapshots to restore from, note name must start with " + dbSnapshotBase)
return
dbSnapshotName = snapshots[0]
print("Will restore database from the most recent available snapshot: [%s] " % dbSnapshotName)
dbClassName = 'db.m3.large'
secGroupId = 'sg-86020ce2'
secGroupName = 'tc-mysql'
restoredInstance = conn.restore_dbinstance_from_dbsnapshot(dbSnapshotName, dbInstanceName, dbClassName, multi_az=True, db_subnet_group_name='tc-pcf-rdsgroup')
iterationCount = 0
iterationMax = 60
timerBreak = 30
restoredStatus = 'restoring'
while ((iterationCount < iterationMax) and (restoredStatus != 'available')):
time.sleep(timerBreak)
iterationCount += 1
try:
restoredInstance.update(validate=True)
restoredStatus = restoredInstance.status
print ("restored db status: " + restoredStatus)
except ValueError:
print("could no longer access database status, exiting")
if(iterationCount < iterationMax):
print("\nWaited %s seconds to remove old instance" % (iterationCount*timerBreak))
else:
print("\nTimed out waiting for old instance to be removed, something probably went wrong, waited for maximum of %s seconds" % (iterationMax*timerBreak))
return
conn.modify_dbinstance(dbInstanceName, vpc_security_groups=[secGroupId]);
return None
def overrideLatestSnapshot():
"Will implement an override to use a specific snapshot instead of the most recently available snapshot"
snapshots = getSnapshots()
if snapshots is None:
print("There are no snapshots to restore from, note name must start with " + dbSnapshotBase)
return
if args.override not in snapshots:
print("The snapshot entered for the override is not an available snapshot! \nAvailable snapshots are: \n")
for snap in snapshots:
print(snap)
return
print("The snapshot specified to use for the override is: [%s] \nWill now restore database from this snapshot!" % args.override)
dbClassName = 'db.m3.large'
secGroupId = 'sg-86020ce2'
secGroupName = 'tc-mysql'
restoredInstance = conn.restore_dbinstance_from_dbsnapshot(args.override, dbInstanceName, dbClassName, multi_az=True, db_subnet_group_name='tc-pcf-rdsgroup')
iterationCount = 0
iterationMax = 60
timerBreak = 30
restoredStatus = 'restoring'
while ((iterationCount < iterationMax) and (restoredStatus != 'available')):
time.sleep(timerBreak)
iterationCount += 1
try:
restoredInstance.update(validate=True)
restoredStatus = restoredInstance.status
print ("restored db status: " + restoredStatus)
except ValueError:
print("could no longer access database status, exiting")
if(iterationCount < iterationMax):
print("\nWaited %s seconds to remove old instance" % (iterationCount*timerBreak))
else:
print("\nTimed out waiting for old instance to be removed, something probably went wrong, waited for maximum of %s seconds" % (iterationMax*timerBreak))
return
conn.modify_dbinstance(dbInstanceName, vpc_security_groups=[secGroupId]);
return None
def delSnapshots():
"Deletes snapshots from snapshotList when list is greater than 10"
snapshots = getSnapshots()
# count = 0
# maxLength = 10
# if len(snapshots) < maxLength:
# print("Currently there are %s snapshots " % (len(snapshots)))
# print("Will quit as the snapshot list is not over 10 snapshots!")
#
# print("Will delete old snapshots until there are 10 snapshots in the list!")
#
# lastSnap = snapshots[-1]
# print(lastSnap)
print("The oldest snapshot in the list is %s\n " % (snapshots[-1]))
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('--remove', action='store_true', help='takes a snapshot and removes the specified database instance')
group.add_argument('--restore', action='store_true', help='restore the database from the latest snapshot taken')
group.add_argument('--status', action='store_true', help='status for the specified database snapshot')
group.add_argument('--snapshots', action='store_true', help='lists all available snapshots for the specified database instance')
group.add_argument('--del_snapshots', action='store_true', help='delete snapshots from end of list if list greater than 10')
group.add_argument('-o','--override', help='allows user to specify snapshot to use for a database restore')
parser.add_argument('-r','--region', help='connect to the specified region', default='us-east-1', choices=['us-east-1','us-west-2'])
args = parser.parse_args()
dbInstanceName = 'tc-pcf-bosh'
dbSnapshotBase = 'tc-pcf-bosh-snapshot'
today = datetime.datetime.now()
print("Connecting to AWS Region [%s] \n" % args.region)
# Create a connection to the service
conn = boto.rds.connect_to_region(args.region)
dbInstances = conn.get_all_dbinstances()
dbSnapshots = conn.get_all_dbsnapshots(instance_id=dbInstanceName)
if args.remove:
print("\ncreating a final snapshot and removing specified database ... \n")
removeDatabase()
print("\nYour AWS RDS database has been successfully removed!")
elif args.restore:
print("\nrestoring database ... \n")
restoreDatabase()
print("\nYour AWS RDS database is now restored!")
elif args.override:
overrideLatestSnapshot()
elif args.snapshots:
printSnapshots()
elif args.del_snapshots:
delSnapshots()
else:
getStatusOfDatabase()
| mminges/cf-scripts | pcf-db-script.py | pcf-db-script.py | py | 8,425 | python | en | code | 0 | github-code | 50 |
32345763727 | from pathlib import Path
import pandas as pd
from config import CsvCube, CsvCubeConfig
import csvwtools
hmrc_ots_cn8_config = CsvCubeConfig.from_info_json(Path("info.json"), "hmrc-ots-cn8")
hmrc_ots_cn8_cube = CsvCube(hmrc_ots_cn8_config)
# Alternatively we could have something like
# hmrc_ots_cn8_cube = csvwtools.load_cube_from_csvw(Path(f"out/{hmrc_ots_cn8_cube.config.dataset_identifier}.csv-metadata.json"))
# This would allow us to add some more data to an existing CSV-W.
hmrc_ots_cn8_cube.set_data(pd.DataFrame(
{
"flow_type": [1, 1, 1],
"country_id": [2, 2, 2],
"sitc_id": [3, 3, 3],
"cn8_id": [4, 4, 4],
"port": [5, 5, 5],
"period": [6, 6, 6],
"measure_type": ["net-mass", "net-mass", "monetary-value"],
"unit_type": [8, 8, 8],
"value": [9.0, 9.0, 9.0]
}
), "Q2-2020")
hmrc_ots_cn8_cube.set_data(pd.DataFrame({
"flow_type": [-1, -1, -1],
"country_id": [-2, -2, -2],
"sitc_id": [-3, -3, -3],
"cn8_id": [-4, -4, -4],
"port": [-5, -5, -5],
"period": [-6, -6, -6],
"measure_type": ["net-mass", "net-mass", "monetary-value"],
"unit_type": [-8, -8, -8],
"value": [-9.0, -9.0, -9.0]
}
), chunk_name="Q3-2020")
csvwtools.cube_to_csvw(hmrc_ots_cn8_cube, Path(f"out/{hmrc_ots_cn8_cube.config.dataset_identifier}.csv-metadata.json"))
| robons/cubes-chunk-proposal | example.py | example.py | py | 1,399 | python | en | code | 0 | github-code | 50 |
25457278941 | from tkinter import *
from tkinter import font
import psutil
from psutil import disk_partitions, disk_usage, virtual_memory, cpu_percent
from tabulate import tabulate
window = Tk()
window.geometry("1024x768")
window.title("CPU - RAM - DISK USAGE")
# Función para mostrar información de la CPU
def show_cpu_info():
cpu_use = cpu_percent(interval=1, percpu=True) # Obtiene el porcentaje de uso de cada núcleo
cpu_label.config(text='{}%'.format(cpu_use[0]))
# Obtener las frecuencias de cada núcleo
cpu_freqs = psutil.cpu_freq(percpu=True)
# Actualizar la frecuencia de la CPU en cada iteración
avg_freq = sum(cpu_freq.current for cpu_freq in cpu_freqs) / len(cpu_freqs)
cpu_freq_label.config(text='CPU Frequency: {} MHz'.format(int(avg_freq)))
# Crear etiquetas para mostrar el porcentaje de uso de cada núcleo
cpu_cores_labels = []
for i, core_usage in enumerate(cpu_use):
core_label = Label(window, bg='#071C1E', fg='#FA5125', font="Arial 20 bold", width=12)
core_label.place(x=610 + (i % 2) * 200, y=130 + (i // 2) * 50)
core_label.config(text='Core {}: {}%'.format(i + 1, core_usage))
cpu_cores_labels.append(core_label)
cpu_freq_label.after(500, show_cpu_info)
# Function converter Bytes to Gigabytes
def conversor_bytes_to_gb(bytes_value):
one_gigabyte = 1073741824 # Bytes
giga = bytes_value / one_gigabyte
giga = '{0:.1f}'.format(giga)
return giga
# Function to display RAM information
def show_ram_info():
ram_usage = virtual_memory()
used = conversor_bytes_to_gb(ram_usage.used)
total = conversor_bytes_to_gb(ram_usage.total)
percent = ram_usage.percent
ram_label.config(text='{} GB / {} GB ({} %)'.format(used, total, percent))
ram_label.after(200, show_ram_info) # Agregar llamada recursiva para actualizar la frecuencia de la CPU
data = disk_partitions(all=False)
def details(device_name):
for i in data:
if i.device == device_name:
return i
# Function to display disk information
def disk_info(device_name):
disk_info = {}
try:
usage = disk_usage(device_name)
disk_info['Device'] = device_name
disk_info['Total'] = f"{conversor_bytes_to_gb(usage.used + usage.free)} GB"
disk_info['Used'] = f"{conversor_bytes_to_gb(usage.used)} GB"
disk_info['Free'] = f"{conversor_bytes_to_gb(usage.free)} GB"
disk_info['Percent'] = f"{usage.percent} GB"
info = details(device_name)
if info is not None:
disk_info.update({"Device": info.device})
disk_info["Mount Point"] = info.mountpoint
disk_info["FS-Type"] = info.fstype
disk_info["Opts"] = info.opts
except PermissionError:
pass
except FileNotFoundError:
pass
return disk_info
# Function that returns the disk partitions
def get_device_names():
return [i.device for i in data]
def all_disk_info():
return_all=[]
for i in get_device_names():
return_all.append(disk_info(i))
return return_all
# Title program
title_program = Label(window, text='PC Performance Manager', font="arial 40 bold", fg='#14747F')
title_program.place(x=110, y=20)
# CPU title
cpu_title_label = Label(window, text='CPU Usage: ', font="arial 24 bold", fg='#FA5125')
cpu_title_label.place(x=20, y=155)
# Label to show percent of CPU
cpu_label = Label(window, bg='#071C1E', fg='#FA5125', font="Arial 30 bold", width=15)
cpu_label.place(x=230, y=150)
# Label para mostrar la frecuencia de la CPU
cpu_freq_label = Label(window, bg='#071C1E', fg='#FA5125', font="Arial 20 bold", width=28)
# Label para mostrar la temperatura de la CPU
cpu_temp_label = Label(window, font=("Arial", 18))
cpu_temp_label.pack(pady=10)
# RAM title
ram_title_label = Label(window, text='RAM Usage: ', font="arial 24 bold", fg='#34A96C')
ram_title_label.place(x=20, y=270)
# Label to show percent of RAM
ram_label = Label(window, bg='#071C1E', fg='#FA5125', font="Arial 30 bold", width=20)
ram_label.place(x=230, y=260)
# Disk title
disk_title_label = Label(window, text='Disk Usage: ', font="arial 24 bold", fg='#797E1E')
disk_title_label.place(x=20,y=350)
#text area disk information
textArea=Text(window,bg="#071C1E", fg="yellow", width=85,height=6,padx=10, font=("consolas", 14))
textArea.place(x=15,y=410)
if __name__ == '__main__':
show_cpu_info()
show_ram_info()
info = all_disk_info()
_list = [i.values() for i in info]
info_tabulated = tabulate(_list, headers=info[0].keys(), tablefmt="simple", missingval=("-"))
textArea.insert(END, info_tabulated)
cpu_freq_label.place(x=180, y=210) # Colocar después del bucle de creación de etiquetas
cpu_temp_label.place(x=180, y=250) # Colocar después del bucle de creación de etiquetas
window.mainloop()
| JuanDQuintero/SO-FinalProject | cpu.py | cpu.py | py | 4,889 | python | en | code | 0 | github-code | 50 |
14880553944 | from pathlib import Path
from typing import List, Tuple
class InfoBase:
"""
InfoBase to construct the project
Attributes:
licenses: list of license paths
"""
def __init__(self):
self._root = Path(__file__).parent
self.templates = self._root.joinpath('templates')
self.licenses: List[Tuple[Path, Path]] = []
def _get_licenses(self):
"""
parse known licenses
"""
headers: List[Path] = []
licenses: List[Path] = []
for text_file in self.templates.joinpath('licenses').glob('*'):
if not text_file.is_file():
continue
if text_file.stem[-7:] == '_header':
headers.append(text_file)
else:
licenses.append(text_file)
for header_path in headers:
license_path = str(header_path).replace('_header', '')
if license_path in licenses:
self.licenses.append((header_path, license_path))
INFO_BASE = InfoBase()
"""
Database-like Object that contains all necessary template information
"""
| pradyparanjpe/pyprojstencil | pyprojstencil/read_templates.py | read_templates.py | py | 1,115 | python | en | code | 0 | github-code | 50 |
28138894249 | import requests
# define the api url
url = 'https://jsonplaceholder.typicode.com/users'
# api call
response = requests.get(url)
# get the data from the response
data = response.json()
# giving the user wanted to find
options = '\n*** id - name - username - website - email - address - phone - company *** \n'
input = input(f"{options}\n--- What do you want to know about the user? : ")
# giving the all json data
for i in data:
# loop in the data
for j in i:
# check data in the json file
if j == input:
if j == 'address':
print(f"{i['address']['street']} {i['address']['suite']} {i['address']['city']} {i['address']['zipcode']}")
break
if j == 'company':
print(f"{i['company']['name']} {i['company']['catchPhrase']} {i['company']['bs']}")
break
# print data
print(' -- ',i[j]) | aliakbarzohour/python-api | api.py | api.py | py | 914 | python | en | code | 0 | github-code | 50 |
21874387158 | from typing import List
# https://leetcode.com/problems/word-search/
class Solution79:
def exist(self, board: List[List[str]], word: str) -> bool: # O(m*n* 3^L)
m, n = len(board), len(board[0])
nw = len(word)
seen = set()
directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]
def dfs(x, y, i):
if i >= nw: return True
if x < 0 or y < 0 or x >= m or y >= n or (x, y) in seen or board[x][y] != word[i]: return False
seen.add((x, y))
for dx, dy in directions:
nx, ny = x + dx, y + dy
if dfs(nx, ny, i + 1): return True
seen.remove((x, y))
return False
for i in range(m):
for j in range(n):
if dfs(i, j, 0):
return True
return False
# https://leetcode.com/problems/unique-paths-iii/
class Solution980:
def uniquePathsIII(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
zero_count = 0
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
start = [i, j]
elif grid[i][j] == 0:
zero_count += 1
directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]
seen = set()
count = 0
def dfs(x, y):
nonlocal count, zero_count
if x < 0 or y < 0 or x >= m or y >= n or (x, y) in seen or grid[x][y] == -1: return
if grid[x][y] == 2 and zero_count == -1: # since zero_count will increase by 1 for the starting point
count += 1
return
seen.add((x, y))
zero_count -= 1
for dx, dy in directions:
nx, ny = x + dx, y + dy
dfs(nx, ny)
zero_count += 1
seen.remove((x, y))
dfs(*start)
return count
# https://leetcode.com/problems/robot-room-cleaner/
class Solution489:
def cleanRoom(self, robot):
def go_back():
robot.turnRight()
robot.turnRight()
robot.move()
robot.turnRight()
robot.turnRight()
def dfs(x, y, d):
seen.add((x, y))
robot.clean()
# going clockwise : 0: 'up', 1: 'right', 2: 'down', 3: 'left' Explore 4 directions : up, right,
# down, and left (the order is important since the idea is always to turn right)
for i in range(4):
new_d = (d + i) % 4
dx, dy = directions[new_d]
nx, ny = x + dx, y + dy
if (nx, ny) not in seen and robot.move():
dfs(nx, ny, new_d)
go_back()
# turn the robot following chosen direction : clockwise
robot.turnRight()
# going clockwise : 0: 'up', 1: 'right', 2: 'down', 3: 'left'
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
seen = set()
dfs(0, 0, 0)
| sunjianbo945/leetcode | src/data_structure/search/dfs_find_specific_path.py | dfs_find_specific_path.py | py | 3,033 | python | en | code | 0 | github-code | 50 |
26176041593 | import numpy as np
from Averages import *
class LCFS:
'''
Klasa odpoweidzialna za policzenie czasu oczekiwania (metoda waitingTime) i czasu przetwarzania
(metoda processing time) dla algorytmu LCFS
'''
def __init__(self,arr,wt,pt):
'''
Konstruktor klasy przyjmuje 4 argumenty, odpowiednio: self ktory zwraca instancje klasy na rzecz ktorej wywolywany jest dana metoda, arr
czyli zmienna ktora jest tablica przechowujaca czasy jakie procesy potrzebuja od procesora na wykonanie sie, wt czyli znowu dwuwymiaorwa tablica numpy
przechowujaca czasy oczekiwania procecso, pt czyli tablica dwuwymiarowa numpy przechowujaca czasy przetwarzania procesow.
'''
self.arr = arr
self.wt=wt
self.pt=pt
self.revArr=np.fliplr(self.arr) #obrocenie kolejnosci ciagow tablicy
def waintingTime(self):
'''
Funkcja przyjmuje jeden argument ktory jest refeencja do instancji klasy na rzecz ktorej wywolywana jest metoda
Funkcja oblicza czas oczekiwania.
'''
for i in range(0,100):
self.wt[0,i]=0
for i in range(0,100):
for j in range(1,100):
a=self.revArr[i,j-1]
b=self.wt[i,j-1]
self.wt[i,j] = int(a) + int(b)
#print(self.wt)
return
def processingTime(self):
'''
Funkcja przyjmuje jeden argument ktory jest refeencja do instancji klasy na rzecz ktorej wywolywana jest metoda
Funkcja oblicza czas przetwarzania.
'''
for i in range(0,100):
for j in range(0,100):
a=self.revArr[i,j]
b=self.pt[i,j-1]
self.pt[i,j] = int(a) + int(b)
#print(self.pt)
return
def averageProcessing(self):
'''
Funkcja przyjmuje jeden argument ktory jest refeencja do instancji klasy na rzecz ktorej wywolywana jest metoda
Funkcja oblicza usredniony czas srednich czasow przetwarzania.
Funkcja zwraca usredniony czas srednich czasu przetwarzania.
'''
averages = Averages(self.revArr,self.wt,self.pt)
return averages.processingAverage()
def averageWaiting(self):
'''
Funkcja przyjmuje jeden argument ktory jest refeencja do instancji klasy na rzecz ktorej wywolywana jest metoda
Funkcja oblicza usredniony czas srednich czasow oczekiwnia.
Funkcja zwraca usredniony czas srednich czasu oczekiwnaia
'''
averages = Averages(self.revArr,self.wt,self.pt)
return averages.waitingAverage() | BartoszSochacki/Process-scheduling-simulation | LCFS.py | LCFS.py | py | 2,639 | python | pl | code | 0 | github-code | 50 |
18018350288 | """
URLs for `admin_dashboard.applications` app
"""
from django.urls import include, path
from openedx.adg.admin_dashboard.applications.views import ApplicationDetailView, ApplicationListingView
from openedx.adg.lms.applications.models import UserApplication
from .views import ApplicationsDashboardView
urlpatterns = [
path('', ApplicationsDashboardView.as_view(), name='applications_summary'),
path(
'open/',
ApplicationListingView.as_view(),
{'application_type': UserApplication.OPEN},
name='open_applications'
),
path(
'waitlisted/', ApplicationListingView.as_view(),
{'application_type': UserApplication.WAITLIST},
name='waitlisted_applications'
),
path(
'accepted/', ApplicationListingView.as_view(),
{'application_type': UserApplication.ACCEPTED},
name='accepted_applications'
),
path(
'<int:pk>/review/', ApplicationDetailView.as_view(), name='application_review'
),
path(
'api/',
include('openedx.adg.admin_dashboard.applications.api.urls')
),
]
| OmnipreneurshipAcademy/edx-platform | openedx/adg/admin_dashboard/applications/urls.py | urls.py | py | 1,106 | python | en | code | null | github-code | 50 |
16548413203 | import pandas as pd
import matplotlib.pyplot as plt
from tabulate import tabulate
import statsmodels.api as sm
import numbers
def print_tabulate(df: pd.DataFrame):
print(tabulate(df, headers=df.columns, tablefmt='orgtbl'))
# Regresión Linear
def transform_variable(df: pd.DataFrame, x:str)->pd.Series:
if isinstance(df[x][0], numbers.Number):
return df[x]
else:
print("Nada")
return pd.Series([i for i in range(0, len(df[x]))])
def regresion_lineal(df: pd.DataFrame, x:str, y: str)->None:
fixed_x = transform_variable(df, x)
model= sm.OLS(df[y],sm.add_constant(fixed_x)).fit()
#print(model.summary())
coef = pd.read_html(model.summary().tables[1].as_html(),header=0,index_col=0)[0]['coef']
df.plot(x=x,y=y, kind='scatter')
plt.plot(df[x],[pd.DataFrame.mean(df[y]) for _ in range(0, len(df[x]))], color='green')
plt.plot(df[x],[ coef.values[1] * x + coef.values[0] for x in range(0, len(df[x]))], color='red')
plt.xticks(rotation=90)
plt.savefig(f'img/reglineal_{y}_{x}.png')
plt.close()
df = pd.read_csv("csv/netflix_catalogo_drama.csv")
df_by_rat = df.groupby(["rating"])[["release_year"]].agg('mean')
df_by_rat.reset_index(inplace=True)
df_by_rat.columns=["rating", "anio_prom"]
#print_tabulate(df_by_rat.head(13))
regresion_lineal(df_by_rat, "rating", "anio_prom") | JuanD254/Mineriadatos | Practica6Regresion.py | Practica6Regresion.py | py | 1,350 | python | en | code | 0 | github-code | 50 |
29007126713 | filename = 'pi_million_digits.txt'
with open(filename) as file_object: # open()返回的对象只在with代码块内可用
lines = file_object.readlines()
pi_string = ''
for line in lines:
pi_string += line.strip()
# print(len(pi_string))
birthday = input("Please input your birthday, in the form mmddyy: ")
if birthday in pi_string:
print("yes")
else:
print("no")
| wngq/PythonCrashCourse | chapter10/pi_birthday.py | pi_birthday.py | py | 384 | python | en | code | 0 | github-code | 50 |
9076575089 | #services/users/manage.py
import unittest
import coverage # new
from flask.cli import FlaskGroup
from project import create_app, db # new
from project.api.models import User, Document, Documententity, Entity # new
COV = coverage.coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/config.py',
]
)
COV.start()
app = create_app() # new
cli = FlaskGroup(create_app=create_app) # new
@cli.command('recreate_db')
def recreate_db():
db.drop_all()
db.create_all()
db.session.commit()
@cli.command()
def test():
"""Ejecutar los tests sin covertura de codigo"""
tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
@cli.command('seed_db')
def seed_db():
db.session.add(Document(documentname='Certificado de Estudios post grado', documentcode='C001', documenttype='Certificado', documentprice=19))
db.session.add(Document(documentname='Certificado de Estudios Pre graddo', documentcode='C001', documenttype='Certificado', documentprice=19))
db.session.commit()
@cli.command('seed_dbe')
def seed_db():
db.session.add(Entity(entityname = 'Escuela de Ingenieria de Sistemas' , entityplace = 'FIA', entitycode = 'E001' ))
db.session.add(Entity(entityname = 'Escuela de Ingenieria Civil' , entityplace = 'FIA' , entitycode = 'E002' ))
db.session.commit()
@cli.command('seed_dbu')
def seed_db():
db.session.add(User(username='Comando', entity_id=1, lastname='Capac', email='capac@gmail.com', password='enlamansiondelosheroes', status = 0))
db.session.add(User(username='Daniel', entity_id=2, lastname='Pacheco', email='danielpacheco@gmail.com', password='dasdasdasd', status = 1))
db.session.commit()
@cli.command('seed_dbd')
def seed_db():
db.session.add(Documententity(document_id=1, entity_id=1))
db.session.add(Documententity(document_id=2, entity_id=1))
db.session.add(Documententity(document_id=1, entity_id=2))
db.session.commit()
# nuevo -> de covertura
@cli.command()
def cov():
"""Ejecuta las pruebas unitarias con coverage"""
tests = unittest.TestLoader().discover('project/tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Resumen de cobertura')
COV.report()
COV.html_report()
COV.erase()
return 0
sys.exit(result)
if __name__ == '__main__':
cli() | Poeteta/Aplication_Arq | services/documents/manage.py | manage.py | py | 2,623 | python | en | code | 0 | github-code | 50 |
74906671835 | from io import BytesIO
import xlsxwriter
def get_format(workbook, bold=False):
f = workbook.add_format()
if bold:
f.set_bold()
f.set_font_name('Arial')
f.set_font_size(11)
f.set_align('left')
f.set_align('vcenter')
f.set_font_color('black')
return f
def build_sheet(workbook, sheet_name, header, data, header_format, body_format):
worksheet = workbook.add_worksheet(sheet_name)
worksheet.write_row('A1', header, header_format)
for i, item in enumerate(data):
worksheet.write_row('A{0}'.format(i + 2), item, body_format)
def build_xlsx(xlsx_filename_or_fp, sheet_info_list):
workbook = xlsxwriter.Workbook(xlsx_filename_or_fp)
header_format = get_format(workbook, bold=True)
body_format = get_format(workbook, bold=False)
for b in sheet_info_list:
sheet_name = b.get('sheet_name')
header = b.get('header')
data = b.get('data')
build_sheet(workbook, sheet_name, header, data, header_format, body_format)
workbook.close()
def xlsx_for_file(build_data):
file = BytesIO()
build_xlsx(file, build_data)
file.seek(0)
return file
| Haner27/open-box | open_box/xlsx/writer.py | writer.py | py | 1,160 | python | en | code | 1 | github-code | 50 |
8083088150 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy as np
# def fun(x,y,z,d,a1,a2):
# pass
def getDistanceAngle(pixel_x, pixel_y, real_z):
camera_fx = 383.599
camera_fy = 383.599
camera_cx = 320.583
camera_cy = 238.327
z = np.float(real_z)
x = (pixel_x-camera_cx)*z/camera_fx
y = (pixel_y-camera_cy)*z/camera_fy
horizon_angle = math.atan2(x,z)
vertical_angle = math.atan2(y,z)
absolute_distance = math.sqrt(x*x+y*y+z*z)
# print("x: ",x)
# print("y: ",y)
# print("z: ",z)
# print("absolut_distance: ",absolute_distance)
# print("horizon_angle: ",horizon_angle)
# print("vertical_angle: ",vertical_angle)
return absolute_distance, vertical_angle, horizon_angle
#pass
| Kester-Broatch/DroneAlgos | CA_functions.py | CA_functions.py | py | 767 | python | en | code | 0 | github-code | 50 |
33543354383 | import cv2
import matplotlib.pyplot as plt
# Matching 2 Images better than simple template matching
template = cv2.imread('Pic-11.png')
img = cv2.imread('Pic-12.png')
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(template, None)
kp2, des2 = orb.detectAndCompute(img, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key= lambda x:x.distance)
fImg = cv2.drawMatches(template, kp1, img, kp2, matches[:10], None, flags=2)
plt.imshow(fImg)
plt.show()
| MordredGit/OpenCV-Codes | 10. featureMatchingUsingOrb.py | 10. featureMatchingUsingOrb.py | py | 553 | python | en | code | 0 | github-code | 50 |
14063219075 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from DDPG_agent import *
from ReplayBuffer import *
from configs import *
class MADDPG_agent:
def __init__(self,env,brain_name,buffer_size = BUFFER_SIZE,batch_size = BATCH_SIZE,update_every = UPDATE_EVERY,
learn_num = LEARN_NUM,state_size = STATE_SIZE,
hidden_in_actor = HIDDEN_IN_ACTOR,hidden_out_actor = HIDDEN_OUT_ACTOR,out_actor = OUT_ACTOR,
merge = MERGE, full_state_size = FULL_STATE_SIZE, full_action_size = FULL_ACTION_SIZE,
hidden_in_critic = HIDDEN_IN_CRITIC,hidden_out_critic1 = HIDDEN_OUT_CRITIC1,hidden_out_critic2 = HIDDEN_OUT_CRITIC2,
gauss = GAUSS, seed = SEED,lr_actor=LR_ACTOR,lr_critic=LR_CRITIC,discount_factor=DISCOUNT_FACTOR, tau=TAU):
""" Params
===============
merge (bool) : to merge the states and actions before feeding them to the critic
gauss (bool) : to use the gaussian noise for exploration instead of the OU noise
"""
self.env_info = env.reset(train_mode=True)[brain_name]
self.num_agents=len(self.env_info.agents)
self.hidden_in_actor=hidden_in_actor
self.hidden_out_actor=hidden_out_actor
self.hidden_in_critic=hidden_in_critic
self.hidden_out_critic1=hidden_out_critic1
self.hidden_out_critic2 = hidden_out_critic2
self.merge=merge
self.lr_actor=lr_actor
self.lr_critic=lr_critic
self.maddpg_agent = [DDPG_agent(state_size,self.hidden_in_actor,self.hidden_out_actor,out_actor,merge,
full_state_size, full_action_size,
self.hidden_in_critic,self.hidden_out_critic1, self.hidden_out_critic2,seed,
self.lr_actor,self.lr_critic,gauss) for i in range(self.num_agents)]
self.seed=seed
self.discount_factor = discount_factor
self.tau = tau
self.t_step = 0
self.update_every=update_every
self.learn_num=learn_num
self.batch_size=batch_size
self.buffer_size=buffer_size
self.memory=ReplayBuffer(out_actor, self.buffer_size, self.batch_size,self.seed)
self.noise_step = 0
def get_actors(self):
"""get actors of all the agents in the MADDPG object"""
actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]
return actors
def get_target_actors(self):
"""get target_actors of all the agents in the MADDPG object"""
target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]
return target_actors
def act(self, obs_all_agents, noise=1.):
"""get actions from all agents in the MADDPG object"""
self.t_step = (self.t_step + 1) % self.update_every
self.noise_step += 1
actions = [agent.act(obs, self.noise_step, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]
return torch.stack(actions).squeeze().permute([1,0]).cpu().numpy()
def target_act(self, obs_all_agents, noise=0.0):
"""get target network actions from all the agents in the MADDPG object """
target_actions = [ddpg_agent.target_act(obs) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]
return torch.stack(target_actions).squeeze(dim=0)
def step(self, obs, action, reward, next_obs, done):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
self.memory.add(obs, action, reward, next_obs, done)
# Learn, if enough samples are available in memory
if self.t_step == 0 and self.noise_step>5500: #only after
if len(self.memory) > self.batch_size:
#loop here to update many times for a update_every time_step
for _ in range(self.learn_num):
samples = self.memory.sample()
for i in range(self.num_agents):
self.update(samples,i)
def reset(self):
for agent in self.maddpg_agent:
agent.noise.reset()
def update(self, samples, agent_number):
"""update the critics and actors of all the agents """
obs,action,reward,next_obs,done=samples
agent = self.maddpg_agent[agent_number]
agent.critic_optimizer.zero_grad()
target_actions = self.target_act(next_obs.permute([1,0,2]))
#target_actions shape is 2(agent)*B*2(actions)
target_actions = target_actions.permute([1,0,2])
with torch.no_grad():
q_next=agent.target_critic(next_obs,target_actions)
y = reward.permute([1,0])[agent_number].view(-1, 1) + self.discount_factor * q_next * (1 - done.permute([1,0])[agent_number].view(-1, 1))
q = agent.critic(obs, action)
huber_loss = torch.nn.SmoothL1Loss()
critic_loss = F.mse_loss(q, y.detach())
critic_loss.backward()
# torch.nn.utils.clip_grad_norm_(agent.critic.parameters(), 1.) #uncomment this to allow clipping
agent.critic_optimizer.step()
agent.actor_optimizer.zero_grad()
####-------actor_update---------####
q_input = [ self.maddpg_agent[i].actor(ob) if i == agent_number \
else self.maddpg_agent[i].actor(ob).detach()
for i, ob in enumerate(obs.permute([1,0,2])) ]
q_input=torch.stack(q_input).squeeze(dim=0).permute([1,0,2])
actor_loss = -agent.critic(obs, q_input).mean()
actor_loss.backward()
agent.actor_optimizer.step()
def update_targets(self):
"""soft update targets"""
for ddpg_agent in self.maddpg_agent:
soft_update(ddpg_agent.target_actor, ddpg_agent.actor, self.tau)
soft_update(ddpg_agent.target_critic, ddpg_agent.critic, self.tau) | mojishoki/DRL-Multi-Agent-Collaboration-Competition-P3 | MADDPG/MADDPG_agent.py | MADDPG_agent.py | py | 5,990 | python | en | code | 0 | github-code | 50 |
18445820766 | from pymongo import MongoClient
from bson import ObjectId
import os
client = MongoClient('localhost', 27017)
root = client.root
print("Connected to db")
basedir = os.getcwd()
for dir in os.listdir(basedir):
if dir != 'insert-image-names.py':
print(dir)
dbQuery1 = root.grounds.find({'_id': ObjectId(dir)})
grounds = list(dbQuery1)
print(grounds)
images = {}
for subdir in os.listdir(dir):
if os.path.isdir(os.path.join(dir, subdir)):
print(subdir)
images[subdir] = []
for file in os.listdir(os.path.join(dir, subdir)):
print(file)
images[subdir].append(file)
print(images)
root.grounds.update(
{
"_id": ObjectId(dir)
},
{
"$set": {
"images": images
}
}
)
print("Updated: " + dir) | nishank-jain/profile | migrations/insert-image-names.py | insert-image-names.py | py | 759 | python | en | code | 0 | github-code | 50 |
19348907679 | from flask import Flask, jsonify
from flask import request
from flask_cors import CORS
import util.personal_opinion as personal_opinion
# configuration
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
CORS(app, resources={r'/*': {'origins': '*'}})
@app.route('/extract-opinion', methods=['POST'])
def extract_opinion():
req_data = request.get_json()
result = personal_opinion.get_opinions(req_data['msg'])
return jsonify(result)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000) | LiamWahahaha/opinion-extraction | server/app.py | app.py | py | 531 | python | en | code | 0 | github-code | 50 |
13449476635 | import numpy as np
import pandas as pd
import streamlit as st
st.set_page_config(page_title='Titanic Survival Prediction App',
layout='wide')
import csv
st.title("App to Predict Survival Chances in Titanic")
def mode(lum):
train = pd.read_csv('C:/Users/rocki/OneDrive/Desktop/mlapp/train.csv')
test = pd.read_csv('C:/Users/rocki/OneDrive/Desktop/mlapp/test.csv')
train.describe(include="all")
train["Age"] = train["Age"].fillna(-0.5)
test["Age"] = test["Age"].fillna(-0.5)
bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]
labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
train['AgeGroup'] = pd.cut(train["Age"], bins, labels = labels)
test['AgeGroup'] = pd.cut(test["Age"], bins, labels = labels)
train["CabinBool"] = (train["Cabin"].notnull().astype('int'))
test["CabinBool"] = (test["Cabin"].notnull().astype('int'))
train = train.drop(['Cabin'], axis = 1)
test = test.drop(['Cabin'], axis = 1)
train = train.drop(['Ticket'], axis = 1)
test = test.drop(['Ticket'], axis = 1)
train = train.fillna({"Embarked": "S"})
combine = [train, test]
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col',
'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Royal": 5, "Rare": 6}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
mr_age = train[train["Title"] == 1]["AgeGroup"].mode() #Young Adult
miss_age = train[train["Title"] == 2]["AgeGroup"].mode() #Student
mrs_age = train[train["Title"] == 3]["AgeGroup"].mode() #Adult
master_age = train[train["Title"] == 4]["AgeGroup"].mode() #Baby
royal_age = train[train["Title"] == 5]["AgeGroup"].mode() #Adult
rare_age = train[train["Title"] == 6]["AgeGroup"].mode() #Adult
age_title_mapping = {1: "Young Adult", 2: "Student", 3: "Adult", 4: "Baby", 5: "Adult", 6: "Adult"}
for x in range(len(train["AgeGroup"])):
if train["AgeGroup"][x] == "Unknown":
train["AgeGroup"][x] = age_title_mapping[train["Title"][x]]
for x in range(len(test["AgeGroup"])):
if test["AgeGroup"][x] == "Unknown":
test["AgeGroup"][x] = age_title_mapping[test["Title"][x]]
age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}
train['AgeGroup'] = train['AgeGroup'].map(age_mapping)
test['AgeGroup'] = test['AgeGroup'].map(age_mapping)
train.head()
train = train.drop(['Age'], axis = 1)
test = test.drop(['Age'], axis = 1)
train = train.drop(['Name'], axis = 1)
test = test.drop(['Name'], axis = 1)
sex_mapping = {"male": 0, "female": 1}
train['Sex'] = train['Sex'].map(sex_mapping)
test['Sex'] = test['Sex'].map(sex_mapping)
embarked_mapping = {"S": 1, "C": 2, "Q": 3}
train['Embarked'] = train['Embarked'].map(embarked_mapping)
test['Embarked'] = test['Embarked'].map(embarked_mapping)
for x in range(len(test["Fare"])):
if pd.isnull(test["Fare"][x]):
pclass = test["Pclass"][x] #Pclass = 3
test["Fare"][x] = round(train[train["Pclass"] == pclass]["Fare"].mean(), 4)
#map Fare values into groups of numerical values
train['FareBand'] = pd.qcut(train['Fare'], 4, labels = [1, 2, 3, 4])
test['FareBand'] = pd.qcut(test['Fare'], 4, labels = [1, 2, 3, 4])
#drop Fare values
train = train.drop(['Fare'], axis = 1)
test = test.drop(['Fare'], axis = 1)
from sklearn.model_selection import train_test_split
predictors = train.drop(['Survived', 'PassengerId'], axis=1)
target = train["Survived"]
x_train, x_val, y_train, y_val = train_test_split(predictors, target, test_size = 0.22, random_state = 0)
from sklearn.ensemble import GradientBoostingClassifier
gbk = GradientBoostingClassifier()
gbk.fit(x_train, y_train)
x=pd.DataFrame(lum).T
x.columns=['Pclass','Sex','SibSp','Parch','Embarked','AgeGroup','CabinBool','Title','FareBand']
y=gbk.predict(x)
return y
genre=st.radio(
"Ticket Class",
('1st', '2nd', '3rd'))
if genre == '1st':
a=1
elif genre=='2nd':
a=2
else:
a=3
sex=st.radio("Gender",("Male","Female",))
if sex=="Male":
b=0
else:
b=1
title=st.radio("Title",("Mr", "Miss", "Mrs", "Master", "Royal", "Rare"))
if title=="Mr":
h=1
elif title=="Master":
h=4
elif title=="Miss":
h=2
elif title=="Mrs":
h=3
elif title=="Royal":
h=5
else:
h=6
embarked=st.radio("Port of Embarkation",("Cherbourg", "Queenstown", "Southampton"))
if embarked=="Southampton":
e=1
elif embarked=="Cherbourg":
e=2
else:
e=3
age=st.radio("AgeGroup",('Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'))
if age=="Baby":
f=1
elif age=="Child":
f=2
elif age=="Teenager":
f=3
elif age=="Student":
f=4
elif age=="Young Adult":
f=5
elif age=="Adult":
f=6
else:
f=7
cabin=st.radio("Cabin Number",("First","Second"))
if cabin=="First":
g=0
else:
g=1
parch=st.radio("No. of parents / children aboard the Titanic",("None", 1, 2, 3, 4,5, 6))
if parch=="None":
d=0
else:
d=parch
sib=st.radio(" No. of siblings / spouses aboard the Titanic",("None", 1,2,3, 4,5))
if sib=="None":
c=0
else:
c=sib
if genre=="1st":
i=1
elif genre=="2nd":
i=2
else:
i=3
values=[a,b,c,d,e,f,g,h,i]
click=st.button("Click to See Survival Chances")
if click:
if mode(values)==0:
st.write("**Less Survival chances**")
else:
st.write("High Survival chances")
def add_bg_from_url():
st.markdown(
f"""
<style>
.stApp {{
background-image: url("https://www.thespruceeats.com/thmb/ytOWw19bNbrd7iT0T-xrPISR9ro=/940x0/filters:no_upscale():max_bytes(150000):strip_icc():format(webp)/GettyImages-738790035-5c565bfdc9e77c000102c641.jpg");
background-attachment:scroll;
background-size: cover
}}
</style>
""",
unsafe_allow_html=True
)
add_bg_from_url()
| ravi3507/titanic_prediction_app | titanic.py | titanic.py | py | 6,860 | python | en | code | 0 | github-code | 50 |
32403932172 | # time complexity = O(nk)
from collections import deque
def max_in_sliding_window_1(array, k):
if not array:
return []
if len(array) < k:
return [max(array)]
result = []
for i in range(len(array) - k + 1):
max_in_array = array[i]
for j in range(i, i + k):
if array[j] > array[i]:
max_in_array = array[j]
result.append(max_in_array)
return result
def max_in_sliding_window(array, k):
deq = deque()
result = []
for i, num in enumerate(array):
while deq and arr[deq[-1]] < num:
deq.pop()
if deq and i - deq[0] >= k:
deq.popleft()
deq.append(i)
result.append(array[deq[0]])
return result[k - 1:]
if __name__ == '__main__':
arr = [1, 2, 3, 4, 5, 6, 7]
result = max_in_sliding_window_1(arr, 3)
print(f" Result : {result}")
result = max_in_sliding_window(arr, 3)
print(f" Result : {result}")
| sudhirsinghshekhawat/problem_solving | algoexpert/maxinslidingwindow.py | maxinslidingwindow.py | py | 974 | python | en | code | 0 | github-code | 50 |
18223839390 | def palabras(lista):
lista.sort()
texto = "Las palabras ordenadas son:\n"
for i in range (0,len(lista)):
if i != len(lista)-1:
texto = texto + lista[i] + ", "
else:
texto = texto + lista[i]
print(texto)
if __name__ == "__main__":
lista = []
palabra = input("Ingrese palabra: (ingreso vacio terminara la lista)")
while palabra != "":
lista.append(palabra)
palabra = input("Ingrese palabra: ")
palabras(lista)
| PedroArr/info175_-Pedro_Arriagada- | Ejercicio2.py | Ejercicio2.py | py | 435 | python | es | code | 0 | github-code | 50 |
42372133362 | import tensorflow as tf
import numpy as np
import argparse
import sys
from flask import Flask, request, Response, jsonify
app = Flask(__name__)
predict = None
def pre_processing(pixels):
#standarize pixels
# pixels = pixels - pixels.mean(axis=1).reshape(-1, 1)
# pixels = np.multiply(pixels, 100.0/255.0)
# each_pixel_mean = pixels.mean(axis=0)
# each_pixel_std = np.std(pixels, axis=0)
# pixels = np.divide(np.subtract(pixels, each_pixel_mean), each_pixel_std)
#change labels numbers to vectors
label_vectors = np.zeros((pixels.shape[0], 7))
for i in range(pixels.shape[0]):
label_vectors[i][0] = 1
return pixels, label_vectors.astype(np.uint8)
@app.route('/')
def main_page():
return Response(open('index.html').read(), mimetype="text/html")
@app.route('/face', methods=['POST'])
def get_face():
# global predict
face_data = np.array(request.json)
pixels, labels = pre_processing(face_data)
prediction = predict({"x": pixels, "y_": labels})["y"]
# prediction = np.argmax(prediction, axis=1)
return jsonify(prediction.tolist())
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,POST')
return response
if __name__=="__main__":
# global predict
parser = argparse.ArgumentParser()
parser.add_argument("-model", help="the SavedModel path")
args = parser.parse_args()
model_path = args.model
if (not model_path):
print("no model path")
sys.exit()
predict = tf.contrib.predictor.from_saved_model(model_path)
app.run(debug=True)
| Whoseop-Song/Emotion-Reader-Service | predict.py | predict.py | py | 1,764 | python | en | code | 0 | github-code | 50 |
11310958908 | import numpy as np
def dimensional_stacking(data, x_dims, y_dims):
"""
Stack an n-dimensional ndarray in two dimensions according to the
dimensional ordering expressed by x_dims and y_dims.
See LeBlanc, Ward, Wittels 1990, 'Exploring N-Dimensional
Databases'.
* data: n-dimensional ndarray (e.g. data.shape=[4,5,6])
* x_dims: dimensions to be stacked on the x axis,
big-endian style ('slowest' dimension first, 'fastest'
dimension last.). e.g. x_dims=[2,0]
* y_dims: dimensions to be stacked on the y axis,
big-endian. e.g. y_dims = [1]
"""
new_x_length = reduce(lambda x, y: x * y,
[data.shape[idx] for idx in x_dims])
new_y_length = reduce(lambda x, y: x * y,
[data.shape[idx] for idx in y_dims])
new_shape = (new_y_length, new_x_length)
dim_order = y_dims + x_dims
stacked_data = data.transpose(*dim_order).reshape(*new_shape)
#print data.shape, new_shape, dim_order
#print(stacked_data)
return stacked_data
| epiasini/dimstack | core.py | core.py | py | 1,009 | python | en | code | 3 | github-code | 50 |
536115605 | import time
import requests
import hashlib
import hmac
import datetime as dt
import os
api_key = os.getenv('binance_api_key')
secret_key = os.getenv('binance_secret_key')
BALANCE_URL = 'https://fapi.binance.com/fapi/v2/balance?{}&signature={}'
ACCOUNT_URL = 'https://fapi.binance.com/fapi/v2/account?{}&signature={}'
POSITIONS_URL = 'https://fapi.binance.com//fapi/v2/positionRisk?{}&signature={}'
KLINES_URL = 'https://fapi.binance.com/fapi/v1/continuousKlines?limit=5&pair=BTCUSDT&contractType=PERPETUAL&interval=1m'
FUNDING_URL = 'https://fapi.binance.com//fapi/v1/income?{}&signature={}'
TICKER_URL = 'https://api.binance.com/api/v3/ticker/price?symbol='
def binancerequest(url):
timestamp = str(int(time.time_ns() / 1000000))
query = 'timestamp=' + timestamp
signature = hmac.new(bytes(secret_key, 'utf-8'), bytes(query, 'utf-8'), hashlib.sha256).hexdigest()
url = url.format(query, signature)
headers = {
'X-MBX-APIKEY': api_key
}
response = requests.request("GET", url, headers=headers)
return response
def fetchpositions():
response = binancerequest(POSITIONS_URL)
result: list = response.json()
result = list(filter(lambda position: float(position['positionAmt']) != 0, result))
result.sort(key=lambda position: float(position['unRealizedProfit']))
return result
def showpositions():
positions = fetchpositions()
displaytext = ''
for position in positions:
displaytext = displaytext + fillspace(position['symbol'] + '@' + roundoff(position['markPrice'], 3),
32) + fillspace(position['positionAmt'], 10) + roundoff(
position['unRealizedProfit'], 2)
displaytext = displaytext + '\n'
# print(displaytext)
def fetchpnl():
response = binancerequest(BALANCE_URL)
pnl = float(list(filter(lambda account: account['asset'] == 'USDT', response.json()))[0]['crossUnPnl'])
return pnl
def fundingfee():
response = binancerequest(FUNDING_URL).json()
totalfundfee = 0
count = 0
# print(len(response))
firsttimestamp = ''
for entry in response:
if entry['incomeType'] == 'FUNDING_FEE':
totalfundfee = totalfundfee + float(entry['income'])
count = count + 1
if count == 1:
firsttimestamp = dt.datetime.fromtimestamp(int(entry['time']) / 1000).strftime('%Y-%m-%d %H:%M:%S')
return totalfundfee, firsttimestamp
def fillspace(text: str, maxlen: int):
spacestofill = maxlen - len(text)
while (spacestofill > 0):
text = text + ' '
spacestofill = spacestofill - 1
return text
def volumetracker():
data = binancerequest(KLINES_URL).json()
totallength = len(data)
sumofvolumes = 0
for entry in data[:-1]:
sumofvolumes = sumofvolumes + float(entry[5])
average = sumofvolumes / (totallength - 1)
currentvolume = float(data[totallength - 1][5])
lastprice = 0
open = 0
volume = 0
if currentvolume > 1000: # and currentvolume >2*average:
open = float(data[totallength - 1][1])
lastprice = float(data[totallength - 1][4])
volume = currentvolume
return volume, open, lastprice
def acccountinfo():
account = binancerequest(ACCOUNT_URL).json()
maintMargin = account['totalMaintMargin']
marginBalance= account['totalMarginBalance']
print(maintMargin+' '+marginBalance)
return maintMargin,marginBalance
def ticker(symbol: str):
response = binancerequest(TICKER_URL + symbol.upper())
# print(response.status_code)
return response.json()
def roundoff(number: str, precision: int):
return number[:number.index('.') + precision + 1]
acccountinfo()
| 99products/MyBinanceBot | mybinance.py | mybinance.py | py | 3,726 | python | en | code | 1 | github-code | 50 |
71994980635 | num = [[],[]]
p = 's'
for i in range(0,6):
no = int(input('Valor:'))
if no % 2 == 0:
num[0].append(no)
else:
num[1].append(no)
print(f'Todos os valores {sorted(num)}')
print(f'Numeros pares {sorted(num[0])}')
print(f'Numeros ímpares {sorted(num[1])}')
| ArthPx/learning-code | d 85.py | d 85.py | py | 292 | python | pt | code | 0 | github-code | 50 |
5916683078 | import requests
import json
ENDPOINT: str = 'https://api.spacexdata.com/v5/launches/latest'
my_request = requests.get(ENDPOINT)
status_code: int = my_request.status_code
message: str = json.loads(my_request.content)
if (status_code == 200):
print("THE API WORKS")
else:
print(':*(')
print(message)
print(message['links']['reddit']['id'])
id_needed: int = message['links']['reddit']['id']
my_request = requests.get(ENDPOINT+str(id_needed)) | KCarey91/python-practice | APIs/test.py | test.py | py | 453 | python | en | code | 0 | github-code | 50 |
17904977429 | # *Input a collection of employee names with their salary, calculate average salary in organisation,
# get the employee with highest salary, get the employee with lowest salary print results.
employee = {'Anny': 60000, 'Avo': 50000, 'Vars': 25000, 'Sed': 100000}
limit = int(input('Input a limit count for employee`s length: '))
salary = []
average = 0
highest = 0
while len(employee) < limit:
employee[input('Add a name of employee: ')] = int(input('Add a salary: '))
limit -= 1
for sal in employee.values():
salary.append(sal)
salary.sort()
average = sum(salary)//len(salary)
print('Average salary in organisation is:', average)
print('the employee with highest salary is:', salary[-1])
print('the employee with lowest salary is:', salary[0]) | TatevAgh/Python_lessons | homeworks/Python homework 6/Ex5.py | Ex5.py | py | 766 | python | en | code | 0 | github-code | 50 |
40825386129 | import os
import collections
import random
import numpy as np
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from typing import Optional, List
from agents.spectral.configs import get_laprepr_args
from agents.spectral.utils import torch_tools, timer_tools, summary_tools
from agents.spectral.modules.episodic_replay_buffer import EpisodicReplayBuffer
from agents.spectral.modules.networks import ReprNetMLP
# from agents.spectral.learners.option_wrapper import Option
def l2_dist(x1, x2, generalized):
if not generalized:
return (x1 - x2).pow(2).sum(-1)
d = x1.shape[1]
weight = np.arange(d, 0, -1).astype(np.float32)
weight = torch_tools.to_tensor(weight, x1.device)
return ((x1 - x2).pow(2)) @ weight.T
def pos_loss(x1, x2, generalized=False):
return l2_dist(x1, x2, generalized).mean()
# used in the original code
# def _rep_loss(inprods, n, k, c, reg):
#
# norms = inprods[torch.arange(n), torch.arange(n)]
# part1 = inprods.pow(2).sum() - norms.pow(2).sum()
# part1 = part1 / ((n - 1) * n)
# part2 = - 2 * c * norms.mean() / k
# part3 = c * c / k
# # regularization
# # if reg > 0.0:
# # reg_part1 = norms.pow(2).mean()
# # reg_part2 = - 2 * c * norms.mean()
# # reg_part3 = c * c
# # reg_part = (reg_part1 + reg_part2 + reg_part3) / n
# # else:
# # reg_part = 0.0
# # return part1 + part2 + part3 + reg * reg_part
# return part1 + part2 + part3
def _rep_loss(inprods, n, k, c, reg):
norms = inprods[torch.arange(n), torch.arange(n)]
part1 = (inprods.pow(2).sum() - norms.pow(2).sum()) / ((n - 1) * n)
part2 = - 2 * c * norms.mean()
part3 = c * c * k
return part1 + part2 + part3
def neg_loss(x, c=1.0, reg=0.0, generalized=False): # derivation and modification
"""
x: n * d.
The formula shown in the paper
"""
n = x.shape[0]
d = x.shape[1]
if not generalized:
inprods = x @ x.T
return _rep_loss(inprods, n, d, c, reg)
tot_loss = 0.0
# tot_loss = torch.tensor(0.0, device=x.device, requires_grad=True) # danger
for k in range(1, d+1):
inprods = x[:, :k] @ x[:, :k].T
tot_loss += _rep_loss(inprods, n, k, c, reg)
return tot_loss
class LapReprLearner:
def __init__(self, common_args, env, agent_id, seed):
random.seed(0)
np.random.seed(seed)
torch.manual_seed(seed)
self.args = get_laprepr_args(common_args)
self.env = env
self.agent_id = agent_id
# NN
if self.args.use_position_only:
self._repr_fn = ReprNetMLP(self.args.obs_pos_dim, n_layers=self.args.lap_n_layers, n_units=self.args.lap_n_units, d=self.args.d)
else:
self._repr_fn = ReprNetMLP(self.args.obs_dim, n_layers=self.args.lap_n_layers, n_units=self.args.lap_n_units, d=self.args.d)
self._repr_fn.to(device=self.args.device)
# optimizer
opt = getattr(optim, self.args.lap_opt_args_name)
self._optimizer = opt(self._repr_fn.parameters(), lr=self.args.lap_opt_args_lr)
# replay_buffer
self._replay_buffer = EpisodicReplayBuffer(max_size=self.args.lap_replay_buffer_size)
self._global_step = 0
self._train_info = collections.OrderedDict()
# create ckpt save dir and log dir
self.saver_dir = os.path.join(self.args.model_dir, "agent_{}".format(self.agent_id))
if not os.path.exists(self.saver_dir):
os.makedirs(self.saver_dir)
self.log_dir = os.path.join(self.args.log_dir, "agent_{}".format(self.agent_id))
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.writer = SummaryWriter(self.log_dir)
def _collect_samples(self):
# start actors, collect trajectories from random actions
print('Start collecting samples for Agent {}.'.format(self.agent_id))
timer = timer_tools.Timer()
# collect initial transitions
total_n_steps = 0
collect_batch = 1000
while total_n_steps < self.args.n_samples:
# cur_obs = self.env.reset(random_init=True, is_sample=True) # random start points for the offline setting
cur_obs = self.env.reset(random_init=True, is_sample=False)
# print(cur_obs[:2])
if self.args.use_position_only:
cur_obs = cur_obs[:2]
epi_len = 0
episode = []
while True:
action = self.env.action_space.sample()
next_obs, reward, done, _ = self.env.step(action)
if self.args.use_position_only:
next_obs = next_obs[:2]
# redundant info
transition = {'s': cur_obs, 'a': action, 'r': reward, 'next_s': next_obs, 'done': done}
cur_obs = next_obs
epi_len += 1
episode.append(transition)
# log
total_n_steps += 1
if (total_n_steps + 1) % collect_batch == 0:
print('({}/{}) steps collected.'.format(total_n_steps + 1, self.args.n_samples))
if epi_len >= self.episode_limit:
break
final_transition = {'s': cur_obs, 'a': self.env.action_space.sample(), 'r': 0.0, 'next_s': cur_obs, 'done': True}
episode.append(final_transition) # to make sure the last state in the episodes can be sampled in the future process
self._replay_buffer.add_steps(episode)
time_cost = timer.time_cost()
print('Data collection for Agent {} finished, time cost: {}s'.format(self.agent_id, time_cost))
def _collect_samples_with_options(self, option_list, agent_num: int):
# start actors, collect trajectories from random actions
print('Start collecting hierarchical samples for Agent {}.'.format(self.agent_id))
timer = timer_tools.Timer()
# collect initial transitions
total_n_steps = 0
collect_batch = 1000
action_space = len(option_list) + 1
while total_n_steps < self.args.n_samples:
cur_obs = self.env.reset(random_init=True, is_sample=False) # random start points for the offline setting
if self.args.use_position_only:
cur_obs = cur_obs[:2]
epi_len = 0
episode = []
last_high_act = 0
option_duration = -1
while True:
if (option_duration >= self.args.option_duration_limit - 1) or (last_high_act == 0) \
or (option_list[last_high_act-1].is_term_true(state=[cur_obs.copy() for _ in range(agent_num)], agent_id=self.agent_id)):
option_duration = -1
if option_duration == -1:
avail_actions = []
for i in range(action_space): # a little wrong in logic but does not hurt much
# if i == 0:
avail_actions.append(i)
# else:
# if option_list[i-1].is_init_true(state=[cur_obs.copy() for _ in range(agent_num)], agent_id=self.agent_id):
# avail_actions.append(i)
high_level_act = random.choice(avail_actions)
last_high_act = high_level_act
else:
high_level_act = last_high_act
option_duration += 1
if high_level_act == 0:
action = self.env.action_space.sample()
else:
action = option_list[high_level_act-1].act(state=[cur_obs.copy() for _ in range(agent_num)], agent_id=self.agent_id)
next_obs, reward, done, _ = self.env.step(action)
if self.args.use_position_only:
next_obs = next_obs[:2]
# redundant info
transition = {'s': cur_obs, 'a': action, 'r': reward, 'next_s': next_obs, 'done': done}
cur_obs = next_obs
epi_len += 1
episode.append(transition)
# log
total_n_steps += 1
if (total_n_steps + 1) % collect_batch == 0:
print('({}/{}) steps collected.'.format(total_n_steps + 1, self.args.n_samples))
if epi_len >= self.episode_limit:
break
final_transition = {'s': cur_obs, 'a': self.env.action_space.sample(), 'r': 0.0, 'next_s': cur_obs, 'done': True}
episode.append(final_transition) # to make sure the last state in the episodes can be sampled in the future process
self._replay_buffer.add_steps(episode)
time_cost = timer.time_cost()
print('Hierarchical data collection for Agent {} finished, time cost: {}s'.format(self.agent_id, time_cost))
def train(self, option_list, agent_num):
self.episode_limit = self.env.get_env_info()["episode_limit"] # 1000
if option_list is None:
self._collect_samples()
else:
self._collect_samples_with_options(option_list, agent_num)
# learning begins
timer = timer_tools.Timer()
timer.set_step(0)
for step in range(self.args.lap_train_steps):
assert step == self._global_step
self._train_step()
# save
if (step + 1) % self.args.lap_save_freq == 0:
saver_path = os.path.join(self.saver_dir, 'model_{}.ckpt'.format(step+1))
torch.save(self._repr_fn.state_dict(), saver_path)
# print info
if step == 0 or (step + 1) % self.args.lap_print_freq == 0:
steps_per_sec = timer.steps_per_sec(step)
print('Training steps per second: {:.4g}.'.format(steps_per_sec))
summary_str = summary_tools.get_summary_str(step=self._global_step, info=self._train_info)
print(summary_str)
if self.args.visualize:
self.visualize_embeddings(sample_num=self.args.ev_n_samples, interval=self.args.ev_interval, step=step)
# save the final laprepr model
saver_path = os.path.join(self.saver_dir, 'final_model.ckpt')
torch.save(self._repr_fn.state_dict(), saver_path)
# log the time cost
time_cost = timer.time_cost()
print('Training finished, time cost {:.4g}s.'.format(time_cost))
def _train_step(self):
train_batch = self._get_train_batch()
loss = self._build_loss(train_batch)
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
self._global_step += 1
def _get_train_batch(self): # how will the discount influence the performance?
s1, s2 = self._replay_buffer.sample_steps(self.args.lap_batch_size, mode='pair', discount=self.args.lap_discount)
s_neg, _ = self._replay_buffer.sample_steps(self.args.lap_batch_size, mode='single')
s1, s2, s_neg = map(self._get_obs_batch, [s1, s2, s_neg])
batch = {}
batch['s1'] = self._tensor(s1)
batch['s2'] = self._tensor(s2)
batch['s_neg'] = self._tensor(s_neg)
return batch
def _build_loss(self, batch): # modification
s1 = batch['s1']
s2 = batch['s2']
s_neg = batch['s_neg']
s1_repr = self._repr_fn(s1)
s2_repr = self._repr_fn(s2)
s_neg_repr = self._repr_fn(s_neg)
loss_positive = pos_loss(s1_repr, s2_repr, generalized=self.args.generalized)
loss_negative = neg_loss(s_neg_repr, c=self.args.c_neg, reg=self.args.reg_neg, generalized=self.args.generalized)
assert loss_positive.requires_grad and loss_negative.requires_grad # danger
loss = loss_positive + self.args.w_neg * loss_negative
info = self._train_info
info['loss_pos'] = loss_positive.item()
info['loss_neg'] = loss_negative.item()
info['loss_total'] = loss.item()
summary_tools.write_summary(self.writer, info=info, step=self._global_step)
return loss
def _get_obs_batch(self, steps): # which way is better for spectral clustering?
if self.args.use_position_only:
obs_batch = [s[:2] for s in steps]
else:
obs_batch = steps
return np.stack(obs_batch, axis=0)
def _tensor(self, x):
return torch_tools.to_tensor(x, self.args.device)
def _get_pair_embeddings(self, sample_num, interval):
pair_embeddings = [[], []]
with torch.no_grad(): # danger
cur_idx = 0
while cur_idx < sample_num:
next_idx = min(cur_idx + interval, sample_num)
s1, s2 = self._replay_buffer.sample_steps(next_idx-cur_idx, mode='pair', discount=self.args.lap_discount)
s1, s2 = map(self._get_obs_batch, [s1, s2])
s1, s2 = map(self._tensor, [s1, s2]) # danger
s1_repr = self._repr_fn(s1)
s2_repr = self._repr_fn(s2)
pair_embeddings[0] += s1_repr.cpu().tolist()
pair_embeddings[1] += s2_repr.cpu().tolist()
cur_idx = next_idx
pair_embeddings = np.array(pair_embeddings)
assert pair_embeddings.shape[1] == sample_num
return pair_embeddings
def get_eigenvalues(self): # important and dangerous; time complexity: d|S| # check!
print('Start estimating eigenvalues for Agent {}.'.format(self.agent_id))
timer = timer_tools.Timer()
self.eigenvalue_list = []
d_max = self.args.d
pair_embeddings = self._get_pair_embeddings(sample_num=self.args.ev_n_samples, interval=self.args.ev_interval) # np.ndarray: [2, |S|, d]
assert pair_embeddings.shape[2] == d_max
assert self.args.generalized
for k in range(d_max):
# danger
k_value = 0.5 * (np.square(pair_embeddings[0][:, k] - pair_embeddings[1][:, k])).mean()
self.eigenvalue_list.append(k_value)
time_cost = timer.time_cost()
print('Eigenvalues estimating finished, time cost: {}s, generalized: {}.'.format(time_cost, self.args.generalized))
print("The eigenvalue list for Agent {} is {}!!!".format(self.agent_id, self.eigenvalue_list))
return self.eigenvalue_list
def get_embedding_optimum(self, sample_num, interval, dim, with_degree):
data_input = self._replay_buffer.get_all_steps(max_num=sample_num)
obs_input = self._get_obs_batch(data_input)
obs_input = self._tensor(obs_input) # maybe too much for the gpu?
data_size = int(obs_input.shape[0])
embeddings = []
with torch.no_grad(): # danger
cur_idx = 0
while cur_idx < data_size:
next_idx = min(cur_idx + interval, data_size)
data_segment = obs_input[cur_idx:next_idx, :]
raw_embedding_segment = self._repr_fn(data_segment)
if with_degree:
embedding_segment = raw_embedding_segment[:, 0] * raw_embedding_segment[:, dim]
else:
embedding_segment = raw_embedding_segment[:, dim]
embeddings = embeddings + embedding_segment.cpu().detach().clone().tolist()
cur_idx = next_idx
embeddings = np.array(embeddings)
assert embeddings.shape[0] == data_size
embeddings = np.around(embeddings, 6)
min_idx = np.argmin(embeddings) # TODO: there may be a few points related to the optimum, which can be further filtered based on the goal location
max_idx = np.argmax(embeddings)
return [(data_input[min_idx], embeddings[min_idx]), (data_input[max_idx], embeddings[max_idx])]
def get_embedding(self, data_input, dim, with_degree):
obs_input = self._get_obs_batch([data_input])
obs_input = self._tensor(obs_input) # maybe too much for the gpu?
with torch.no_grad():
raw_embedding_segment = self._repr_fn(obs_input)
if with_degree:
embedding_segment = raw_embedding_segment[:, 0] * raw_embedding_segment[:, dim]
else:
embedding_segment = raw_embedding_segment[:, dim]
embedding = np.around(embedding_segment.cpu().detach().clone().numpy()[0], 6)
return embedding
def visualize_embeddings(self, sample_num, interval, step, dir='./agents/spectral/visualization'):
import matplotlib.pyplot as plt
if not os.path.exists(dir):
os.makedirs(dir)
data_input = self._replay_buffer.get_all_steps(max_num=sample_num)
obs_input = self._get_obs_batch(data_input)
obs_input = self._tensor(obs_input) # maybe too much for the gpu?
data_size = int(obs_input.shape[0])
embeddings = []
with torch.no_grad(): # danger
cur_idx = 0
while cur_idx < data_size:
next_idx = min(cur_idx + interval, data_size)
data_segment = obs_input[cur_idx:next_idx, :]
raw_embedding_segment = self._repr_fn(data_segment)
embeddings = embeddings + raw_embedding_segment.cpu().detach().clone().tolist()
cur_idx = next_idx
embeddings = np.array(embeddings)
assert embeddings.shape[0] == data_size
embeddings = np.around(embeddings, 6)
for dim in range(2):
axis_x = np.array(data_input)[:, 0]
axis_y = np.array(data_input)[:, 1]
value = embeddings[:, dim]
plt.figure()
plt.scatter(x=axis_x, y=axis_y, c=value, cmap="viridis", alpha=0.3)
plt.colorbar()
plt.savefig(dir + '/'+ 'step_{}_agent_{}_embedding_{}.png'.format(step, self.agent_id, dim))
| LucasCJYSDL/Scalable_MAOD_based_on_KP | continuous/MADO_n_agent_force/agents/spectral/learners/laprepr.py | laprepr.py | py | 17,869 | python | en | code | 0 | github-code | 50 |
3212200237 | from flask import render_template, redirect, session, url_for, request
from flask.views import View
from src.models import User
from src.config import db, app, mail, lang
from flask_login import login_user, current_user, login_required
from wtforms.validators import InputRequired
from src.models import User, Participants, Event, Divisions, Accompanying, unique_str
from src.handlers.views import autherntication_required
from src.email_sending.email_utils import Code_email
from .forms import NewDeclarationForm
def send_invitation_mail(particip, form=None):
mail = Code_email(form)
# mail.text_body = f"your activation link is: http://{url}"
link = f"{app.config['APPLICATION_ROOT']}{url_for('invitation.get')}?invitation={particip.invitation_string}"
img = f"https://api.participantserver.com/v1/create-participant-code/?data={link};size={app.config['participant_CODE_RES']}"
print(img)
print("mail_init")
html_body = render_template("invitation_mail.html", particip=particip, link=link, img=img)
print(html_body)
mail.html_body = html_body
recipients = [particip.email]
if particip.accompanying_person_email:
recipients.append(particip.accompanying_person_email)
mail.send(recipients=recipients)
def create():
# event = request.args.get("event")
declaration_string = request.args.get("declaration_string")
form = NewDeclarationForm()
form.first_name.validators.append(InputRequired())
form.last_name.validators.append(InputRequired())
form.email.validators.append(InputRequired())
division = Divisions.query\
.join(Event, Divisions.event_id == Event.id)\
.join(Participants, Participants.event_id == Event.id)
form.division.choices = division
print(form.division.data)
if form.validate_on_submit():
participant = Participants.query.filter_by(declaration_string=declaration_string).first()
if form.first_name.data:
participant.first_name_by_user = participant.first_name_by_user
participant.first_name = form.first_name.data
if form.last_name.data:
participant.last_name_by_user = participant.last_name
participant.last_name = form.last_name.data
if form.email.data:
participant.email_by_user = participant.email
participant.email = form.email.data
if form.personal_data.data:
participant.personal_data = form.personal_data.data
if form.car_park.data:
participant.car_park = form.car_park.data
if form.division.data:
# value = dict(form.division.choices).get(form.division.data)
participant.division_id = Divisions.query.filter_by(division_name=form.division.data).first().id
acc = None
if form.accompanying_person_first_name.data != "" and \
form.accompanying_person_last_name.data != "" and \
form.accompanying_person_email.data != "" :
invitation_string = unique_str(Accompanying, 64, field = "invitation_string")
acc = Accompanying()
acc.first_name = form.accompanying_person_first_name.data
acc.last_name = form.accompanying_person_last_name.data
acc.invitation_string = invitation_string
acc.email = form.accompanying_person_email.data
acc.event_id = participant.event_id
else:
participant.participants_accomapnying_id = False
if participant:
participant.declarated = True
if acc:
participant.accompanying = acc
db.session.add(participant)
db.session.commit()
# send_invitation_mail(particip=participant, form=form)
else:
pass
next=request.args.get("next")
if next:
return redirect(next)
# return render_template("participants.html", participantCreateForm=form)
return redirect(url_for("declarate.get", declaration_string=declaration_string))
# | invite-me/invite.me | src/declarate/create/views.py | views.py | py | 3,651 | python | en | code | 0 | github-code | 50 |
8652550275 | import sys
sys.stdin = open("input3.txt", "r")
from pandas import DataFrame
# 기본 남 북 동 서
# 0 1 3 0 0
# 4 1 5 4 2 5 4 0 5 1 5 3 3 4 1
# 2 3 1 2 2
# 3 0 2 4 5
def move(go, dice):
if go == 1: # 동
next_dice = [dice[0], dice[5], dice[2], dice[4], dice[1], dice[3]]
elif go == 2: # 서
next_dice = [dice[0], dice[4], dice[2], dice[5], dice[3], dice[1]]
elif go == 3: # 남
next_dice = [dice[1], dice[2], dice[3], dice[0], dice[4], dice[5]]
else: # 북
next_dice = [dice[3], dice[0], dice[1], dice[2], dice[4], dice[5]]
return next_dice
N, M, x, y, K = map(int, input().split())
IN = [list(map(int, input().split())) for _ in range(N)]
DIR = list(map(int, input().split()))
dice = [0] * 6
r, c = x, y
drc = [0, (0, 1), (0, -1), (-1, 0), (1, 0)]
for i in range(K):
go = DIR[i]
nr, nc = r + drc[go][0], c + drc[go][1]
# nr, nc가 맵 안이어서 움직일 수 있을 때
if 0 <= nr < N and 0 <= nc < M:
# 주사위를 굴린다.
dice = move(go, dice)
# 맵 안에 숫자가 쓰여있으면
if IN[nr][nc]:
# 칸에 쓰인 수가 주사위 바닥으로 복사하고 칸은 0
dice[1] = IN[nr][nc]
IN[nr][nc] = 0
# 맵 안에 숫자가 안 쓰여 있으면
else:
# 주사위의 바닥면에 쓰인 수가 칸에 복사
IN[nr][nc] = dice[1]
r, c = nr, nc
# 주사위 윗 면에 쓰인 수를 출력
print(dice[3])
| TValgoStudy/algo_study | 쌔피맨조/다은/BOJ14499/BOJ14499.py | BOJ14499.py | py | 1,670 | python | ko | code | 3 | github-code | 50 |
5965945056 | import numpy as np
import json
def compute_bounding_box_dimensions(vertices):
min_coords = np.min(vertices, axis=0)
max_coords = np.max(vertices, axis=0)
dimensions = max_coords - min_coords
return dimensions
def load_obj(file_path):
vertices = []
with open(file_path, 'r') as obj_file:
for line in obj_file:
tokens = line.strip().split()
if tokens and tokens[0] == 'v':
# Extract vertex coordinates
x, y, z = map(float, tokens[1:])
vertices.append((x, y, z))
return np.array(vertices)
if __name__ == "__main__":
# Replace 'path/to/your/model.obj' with the actual path to your OBJ file
obj_file_path = 'textured_output.obj'
# Load OBJ file
model_vertices = load_obj(obj_file_path)
# Compute bounding box dimensions
dimensions = compute_bounding_box_dimensions(model_vertices)
# Print the dimensions
print(f"Width: {dimensions[0]}, Height: {dimensions[1]}, Depth: {dimensions[2]}")
dimensions_dict = {"width": round(dimensions[0]*100, 2), "height": round(dimensions[1]*100, 2), "depth": round(dimensions[2]*100, 2), "cbm": round(dimensions[0]*dimensions[1]*dimensions[2], 3)}
# Convert the dictionary to a JSON string
dimensions_json = json.dumps(dimensions_dict)
# Print the dimensions
print(dimensions_json)
# Write the dimensions to a JSON file
with open('output.json', 'w') as json_file:
json.dump(dimensions_dict, json_file, indent=2)
| puiyu11/Hackathon | bounding_box_dimensions.py | bounding_box_dimensions.py | py | 1,534 | python | en | code | 0 | github-code | 50 |
70344464474 | # -*- coding: utf-8 -*-
"""
Created on Tue May 12 14:56:26 2020
@author: jvan1
"""
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
def bostonCoefficient():
data = datasets.load_boston()
x = data.data
y = data.target
lr = LinearRegression().fit(x,y)
max_coefficient = max([abs(x) for x in lr.coef_])
name = data.feature_names[[abs(x) for x in lr.coef_].index(max_coefficient)]
return '%s is the feature with the largest coefficient which is %f'%(name,max_coefficient)
def irisCluster():
x_values = []
y_values = []
data = datasets.load_iris()
for i in range(1,20):
curr_k = KMeans(i).fit(data.data,data.target)
x_values.append(i)
y_values.append(curr_k.inertia_)
plt.plot(x_values,y_values)
plt.xticks(x_values)
plt.xlabel('Number of Clusters')
plt.ylabel('Inertia')
plt.title('Elbow Heuristics For Iris Dataset')
plt.show()
if __name__=='__main__':
print(bostonCoefficient())
irisCluster()
print('The Iris data set looks to have 3 clusters as the slope of inertia massively changes in magnitude between 3 and 4 clusters showing that predictive power is not helped much by adding additional clusters after the third') | Vansantj/FE595-SKLearn | sklearn_introduction.py | sklearn_introduction.py | py | 1,325 | python | en | code | 0 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.