seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2946357970 | import heapq,copy,collections
from typing import List,Optional
from collections import deque
class ListNode:
def __init__(self, val = 0, next = None):
self.val = val
self.next = next
class Solution:
#排序链表:给你链表的头结点 head ,请将其按 升序 排列并返回 排序后的链表 。
def sortList(self, head: Optional[ListNode]) -> Optional[ListNode]:
def mergeTwoList(head1: Optional[ListNode], head2: Optional[ListNode]) -> Optional[ListNode]:
dummpy = ListNode()
head = dummpy
while head1 and head2:
if head1.val < head2.val:
head.next = ListNode(head1.val)
head1 = head1.next
else:
head.next = ListNode(head2.val)
head2 = head2.next
head = head.next
if head1:
head.next = head1
if head2:
head.next = head2
return dummpy.next
if not head or not head.next:
return head
slow = head
fast = head.next.next
while slow and fast:
if not fast.next:
break
fast = fast.next.next
slow = slow.next
mid_node = slow.next
slow.next = None
return mergeTwoList(self.sortList(head), self.sortList(mid_node))
#相交链表:给你两个单链表的头节点 headA 和 headB ,请你找出并返回两个单链表相交的起始节点。如果两个链表不存在相交节点,返回 null
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> Optional[ListNode]:
ka = 0
kb = 0
tmpA = headA
tmpB = headB
while tmpA:
ka += 1
tmpA = tmpA.next
while tmpB:
kb += 1
tmpB = tmpB.next
tmpA = headA
tmpB = headB
while ka > kb and tmpA:
tmpA = tmpA.next
ka -= 1
while ka < kb and tmpB:
tmpB = tmpB.next
kb -= 1
while tmpA and tmpB:
if tmpA == tmpB:
return tmpA
tmpA = tmpA.next
tmpB = tmpB.next
return None
#环形链表II:给定一个链表的头节点 head ,返回链表开始入环的第一个节点。 如果链表无环,则返回 null。
def detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:
if not head or not head.next:
return None
slow = head
fast = head
while slow and fast:
if not fast.next:
return None
slow = slow.next
fast = fast.next.next
if slow == fast:
break
slow = head
while slow and fast:
if slow == fast:
return slow
slow = slow.next
fast = fast.next
return None
#反转链表:给你单链表的头节点 head ,请你反转链表,并返回反转后的链表。
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
if not head or not head.next:
return head
tail = head
while tail.next:
cur_node = tail.next
tail.next = cur_node.next
cur_node.next = head
head = cur_node
return head
| gpj10054211/guoDeveloper | listnode.py | listnode.py | py | 3,590 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
... |
30148633270 | import logging
import time
from datetime import datetime
import pytz
from flask import Flask
from flask import json
from github import Github
import commands
import envvariables
from sendToRegression import bucket, administrative_issue, close
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("FlaskRest")
handler = logging.FileHandler(filename='log.txt')
handler.setLevel(logging.INFO)
logger.addHandler(handler)
props = json.load(open("properties.json", 'r'))
app = Flask(__name__)
def run():
while True:
logging.info('Polling started: %s', datetime.now(pytz.timezone('Europe/Paris')))
g = Github(envvariables.github)
org = g.get_organization(props["org"])
repo = org.get_repo(props["repo"])
prs = repo.get_pulls("open")
for pr in prs:
if check_for_labels(pr):
comments = pr.get_issue_comments()
for comment in comments:
if comment.body[0:4] == "cihu":
handle_command(comment, pr)
time.sleep(props["schedule"])
def check_for_labels(pr):
for label in pr.raw_data["labels"]:
if label["name"] in {"bucket-a", "bucket-b", "gauntlet"}:
return False
return True
def handle_command(comment, pr):
allowed_senders = props["allowedSender"]
if comment.user.login in allowed_senders:
params = str(comment.body).split(":")
if params[1] == commands.BUCKETING_COMMAND:
bucket(pr.number, params[2])
elif params[1] == "review":
if params[2] == commands.ADMINISTRATIVE_ISSUE_COMMAND:
administrative_issue(pr.number)
elif params[1] == "close":
close(pr.number)
if __name__ == '__main__':
run()
| peterkungl/bucketservice | FlaskRest.py | FlaskRest.py | py | 1,635 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.FileH... |
4999987358 | import asyncio
import json
import random
import re
import requests
from discord import Intents
from discord import Colour
from discord import Embed
from discord.ext import commands
from discord.utils import get
from environment_variables import (
DISCORD,
REDDIT,
OPTION_FLAGS
)
from links import (
hummer_links,
tesla_link,
prius_link,
honk_links
)
from compliments import (
compliment_base,
compliment_subject,
compliment_descriptor,
compliment_ending
)
##################################
# DECLARATION #
##################################
# Declare the bot
bot = commands.Bot(
command_prefix = '!',
intents = Intents.all()
)
# Remove the {syntax}help option if required
if OPTION_FLAGS["REMOVE_HELP"]:
bot.remove_command("help")
# Initialize the required queue(s)
reddit_queue = []
# Initialize synchronization lock(s)
lock = asyncio.Lock()
##################################
# AUXILLIARY FUNCTIONS #
##################################
def administrator(roles):
return any([role.permissions.administrator for role in roles])
async def handle_EV(ctx, message):
"""
Handle any talk of EVs with brutal honesty
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Search the message for relevant string(s)
tesla_regex = re.search(".*tesla.*", message)
prius_regex = re.search(".*prius.*", message)
sentiment_regex = re.search(".*((best)|(great)|(fantastic)|(love)).*", message)
# Respond appriopriately if required to do so
if (tesla_regex or prius_regex) and sentiment_regex:
if tesla_regex:
await ctx.reply(f"{random.choice(hummer_links)}\n{tesla_link}")
else:
await ctx.reply(f"{random.choice(hummer_links)}\n{prius_link}")
def get_posts(client, secret, username, password, queue, subreddit, max_posts):
"""
Populate the given queue with a series of posts from a specified
subreddit up to a maximum of 'max_posts' using the API constructed
from the arguments provided
Arguments:
client (str): The client ID associated with the bot
secret (str): The secret token associated with the bot
username (str): The username of the Reddit account
password (str): The passsword of the Reddit account
queue (List): The queue to store posts into
subreddit (str): The desired subreddit
max_posts (int): The maximum amount of posts allowed
Returns:
N/A
Raises:
N/A
"""
# Get authorization
authorization = requests.auth.HTTPBasicAuth(client, secret)
# Specify the login method
# wth it's associated data
data = {
"grant_type": "password",
"username": username,
"password": password
}
# Setup our headers info
headers = {"User-Agent": "Thanatos/0.0.1"}
# Send our request for an OAuth token
response = requests.post(
"https://www.reddit.com/api/v1/access_token",
auth = authorization,
data = data,
headers = headers
)
# Get the access token value
access_token = response.json()["access_token"]
# Define headers and get the headers
headers = {
**headers,
**{"Authorization": f"bearer {access_token}"}
}
requests.get("https://oauth.reddit.com/api/v1/me", headers = headers)
# Get posts based on headers and parameters specified
parameters = {"limit": 250, 't': "year"}
response = requests.get(
f"https://oauth.reddit.com/r/{subreddit}/top",
headers = headers,
params = parameters
)
# Parse the given posts
response_data = []
for post in response.json()["data"]["children"]:
if post["data"]["stickied"] == True:
continue
queue.append({
"title": post["data"]["title"],
"selftext": post["data"]["selftext"],
"ups": post["data"]["ups"],
"downs": post["data"]["downs"],
"url": post["data"]["url"],
"thumbnail": post["data"]["thumbnail"]
})
if len(queue) >= max_posts:
return
##################################
# EVENT HANDLING #
##################################
@bot.event
async def on_ready():
print(f"Logged in as: {bot.user.name} (ID {bot.user.id})")
@bot.event
async def on_typing(channel, user, when):
# Disregard input made by self
if user == bot.user:
return
@bot.event
async def on_message(message):
# Disregard input made by self
if message.author == bot.user:
return
# Check for message triggering a status change
if random.randint(0, 10) > 8:
selection = random.choice([
"with the fabric of reality",
"the souls of mortals",
"with fire",
"with something he shouldn't",
"Untitled Goose Game",
"with explosions",
"I use Arch BTW",
"👍 Hitchiker Simulator 2022",
f"with {message.author.name}"
])
# Check for string input in the message
# (e.g. not a raw media type alone)
if len(message.content) > 0:
# Act on the message as required
await handle_EV(message, message.content.lower())
# Try to process the message as a command
try:
await bot.process_commands(message)
except:
print(exception)
##################################
# COMMAND CALLS #
##################################
@bot.command()
async def puppet(ctx):
"""
Talk through the bot using
the corrosponding terminal
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Verify the author of the command call
if (ctx.author.id != DISCORD["OWNER_ID"]):
return
# Hide the original command call
await ctx.message.delete()
# Take input repeatedly from the
# terminal until no input is given
while True:
response = input("Message: ")
if (response != ""):
await ctx.send(response)
else:
print("__END__\n")
return
@bot.command()
async def peptalk(ctx):
"""
Generate and reply back with a peptalk,
sending it to another user if called as
a reply to a message that user made
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Generate random response
response = ''.join([
random.choice(compliment_base),
random.choice(compliment_subject),
random.choice(compliment_descriptor),
random.choice(compliment_ending)
])
# Check for message to be
# directed towards another user
msg = ctx.message
if ctx.message.reference != None:
ctx.message = await ctx.channel.fetch_message(ctx.message.reference.message_id)
# Send message
await ctx.message.reply(response)
@bot.command()
async def puptalk(ctx):
"""
Run an API call to get a random dog
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
response = requests.get("https://random.dog/woof.json").json()
await ctx.message.reply(response["url"])
@bot.command()
async def cattalk(ctx):
"""
Run an API call to get a random cat
Arguments:
(tag): The filter to parse results from
Returns:
N/A
Raises:
N/A
"""
# Handle the optional tags of the user input
url_base = "https://cataas.com/cat"
url_tags = ""
if len(ctx.message.content) > 8:
url_tags = f"/{ctx.message.content[9:]}"
# Send out the request and handle responding
# according to the request response
response = requests.get(f"{url_base}{url_tags}?json=true")
if response.status_code == 404:
await ctx.message.reply(f"Sorry, no results for '{ctx.message.content[9:]}' tags")
else:
await ctx.message.reply(f"{url_base}{response.json()['url'][4:]}")
@bot.command()
async def fire(ctx):
"""
Run an API call to arcgis.com
to check for fires in a given area
Arguments:
(county): The name of a county specified in
the command call (e.g. '!fire butte').
If not specified, all counties in the
state are queried, and the first three
returned are used in the text-response.
Returns:
N/A
Raises:
N/A
"""
# Define the argument filter for county
# from the command call
filter_county = "1%3D1"
if len(ctx.message.content) >= 6:
filter_county = f"irwin_POOCounty%20%3D%20'{ctx.message.content.upper()[6:]}'"
# Define the spatial filter for the request
# to look specifically in California
filter_state = [
"geometry=-138.176%2C31.172%2C-100.471%2C43.363",
"geometryType=esriGeometryEnvelope",
"inSR=4326",
"spatialRel=esriSpatialRelIntersects",
"outSR=4326",
"returnGeometry=False"
]
# Define the basic request information and the
# desired response format
request_base = '/'.join([
f"https://services3.arcgis.com",
f"T4QMspbfLg3qTGWY",
f"arcgis",
f"rest",
f"services",
f"CY_WildlandFire_Perimeters_ToDate",
f"FeatureServer",
f"0",
f"query?where={filter_county}&outFields="
])
request_format = f"f=json"
# Define the requested information
# for each event returned
request_fields = [
"poly_Acres_AutoCalc",
"irwin_FireCause",
"irwin_IncidentName",
"irwin_IncidentShortDescription",
"irwin_PrimaryFuelModel",
"irwin_UniqueFireIdentifier",
"irwin_PercentContained",
"irwin_POOCounty"
]
# Make the request to the API
response = requests.get(
request_base
+ ','.join(request_fields) + '&'
+ '&'.join(filter_state) + '&'
+ request_format
)
# Evaluate response JSON data
reply_amount = 0
for item in response.json()['features']:
# Iterate through each event found
for event, attributes in item.items():
# Check only 'big' events with incident descriptions
# (which are typically locations and whatnot)
if attributes['irwin_IncidentShortDescription'] == None:
continue
output = '\n'.join([
f"\n---------------------------------------------------\n",
f"**Incident Name:** {attributes['irwin_IncidentName']}",
f"**Unique Fire ID:** {attributes['irwin_UniqueFireIdentifier']}",
f"**County:** {attributes['irwin_POOCounty']}",
f"**Description:** {attributes['irwin_IncidentShortDescription']}",
f"**Primary Fuel:** {attributes['irwin_PrimaryFuelModel']}",
f"**Percent Contained:** {attributes['irwin_PercentContained']}%",
f"**Acres Affected:** {round(attributes['poly_Acres_AutoCalc'], 2)}",
f"**Fire Cause:** {attributes['irwin_FireCause']}"
])
await ctx.send(output)
reply_amount += 1
if reply_amount >= 3:
return
if reply_amount == 0:
await ctx.message.reply(f"Sorry, no results for '{ctx.message.content[6:]}'")
@bot.command()
async def reddit(ctx):
"""
Respond to the user with a Reddit post
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Check if the queue needs to be repopulated
if len(reddit_queue) == 0:
get_posts(
client = REDDIT["CLIENT_ID"],
secret = REDDIT["SECRET_TOKEN"],
username = REDDIT["USERNAME"],
password = REDDIT["PASSWORD"],
queue = reddit_queue,
subreddit = "memes",
max_posts = 250
)
random.shuffle(reddit_queue)
# Setup the base message
embed = Embed(
title = f"{reddit_queue[-1]['title']}",
url = reddit_queue[-1]['url'],
colour = Colour.from_rgb(*[random.randint(0, 255) for _ in range(3)])
)
# Setup the extra options for the message
embed.set_footer(text = f"-- r/memes")
embed.set_image(url = reddit_queue[-1]['url'])
# Prepare the response and then pop from the queue
# before sending the message to the calling user
reddit_queue.pop()
await ctx.reply(embed = embed)
@bot.command()
async def honk(ctx):
"""
Honk
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Setup a random picture response
response = random.choice(honk_links)
# Make sure they know he's angry
emoji = '\N{ANGER SYMBOL}'
await ctx.message.add_reaction(emoji)
# Make sure they REALLLY know he's angry
emoji = '\N{SWAN}'
await ctx.message.add_reaction(emoji)
# Release the kraken
await ctx.send(f"**HONK**\n {response}")
@bot.command()
async def uwu(ctx):
"""
Oh no...
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Choose a random response
response = random.choice([
"I\'m going to be 100\% upset at you for that.",
"Do you wish to invoke the wrath of Thanatos, mortal?!",
"Silence, mortal!",
"No.",
"Could you just *not* do that?",
"Stop that, you stop that this instant!",
"DO NOT. DO THIS.",
"Why must you bring \'him\' back?!",
"Oh no...he\'s back,",
"Here he comes again...",
"Ugh.",
"No...no no no. *No*.",
"Why are you like this, mortal?",
"How DARE you?"
])
# Choose a random emoji
emoji = random.choice([
'\N{THUMBS DOWN SIGN}',
'\N{ANGER SYMBOL}'
])
# Respond to the user command call appropriately
await ctx.message.add_reaction(emoji)
await ctx.message.reply(response)
@bot.command()
async def RGB(ctx):
"""
Briefly flash the colours in the RGB role of the server
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Using the RGB call lock to handle multiple callers
async with lock:
# Get the server's RGB role if it exists
role = get(ctx.guild.roles, name = "RGB")
if role == None:
return
# Only proceed if the caller is an admin or has the RGB role
if role in ctx.author.roles or administrator(ctx.author.roles):
default = tuple(value for value in (role.colour).to_rgb())
for i in range(0, 20):
# Get the new RGB value for the role
colour = tuple(random.randint(0, 255) for _ in range(3))
# Re-assign the role's colour and sleep for a brief period
await role.edit(colour = Colour.from_rgb(*colour))
await asyncio.sleep(1)
# Re-apply the old colour to the role
await role.edit(colour = Colour.from_rgb(*default))
@bot.command()
async def set_activity(ctx):
"""
Briefly flash the colours in the RGB role of the server
Arguments:
(message): A string message to display as the activity
Returns:
N/A
Raises:
N/A
"""
# Check for caller authorization
if (ctx.author.id != DISCORD["OWNER_ID"]):
return
# Get any additional arguments from the command caller
if len(ctx.message.content) > 13:
arguments = ctx.message.content[14:]
else:
arguments = ""
await bot.change_presence(activity = discord.Game(name = arguments))
@bot.command()
async def announcement(ctx):
"""
Change the activity setting of the bot
Arguments:
(message): A post to publish in the announcements channel
Returns:
N/A
Raises:
N/A
"""
# Check for caller authorization
if (ctx.author.id != DISCORD["OWNER_ID"] and not administrator(ctx.author.roles)):
return
# Get the server's RGB role if it exists
channel = get(ctx.guild.channels, name = "announcements")
if channel == None:
return
# Setup the base message
embed = Embed(
description = f"{ctx.message.content[14:]}",
colour = Colour.from_rgb(*[random.randint(0, 255) for _ in range(3)])
)
# Setup the optional flaires for the message
embed.set_footer(text = "-- Sent via Thanatos")
embed.set_author(
name = ctx.message.author.name,
icon_url = ctx.author.avatar_url
)
# Send the message
await channel.send(embed = embed)
##################################
# INITIALIZATION #
##################################
if __name__ == "__main__":
bot.run(DISCORD["TOKEN"]) | Haskili/Thanatos | main.py | main.py | py | 17,397 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Bot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "discord.Intents.all",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "dis... |
40795182425 | import asyncio
from concurrent.futures import ThreadPoolExecutor
import nest_asyncio
from discord import Message, File
from ImageGenerator import ImageGenerator
from wiezenlibrary.Game import Game
_executor = ThreadPoolExecutor(10)
nest_asyncio.apply()
class DiscordWiezen(Game):
def __init__(self, bot, parent):
self.stop_notifier = parent
super().__init__()
def stop(self):
self.stop_notifier.stop(self)
def update_table_images(self):
# msg: Message
# for msg in self.table_messages.values():
# if msg:
# # loop = asyncio.get_event_loop()
# asyncio.ensure_future(
# msg.delete()
# )
self.send_tables()
def show_cards(self, players: list):
img_gen = ImageGenerator(1)
for player in players:
img_gen.hand_to_image(player)
img_file = File(img_gen.get_output('hand').strip())
# loop = asyncio.get_event_loop()
player.send_message("Hier zijn uwer kaarten")
player.send_message(img_file, is_file=True)
def send_to(self, players: list, message: str or File, img=False):
for player in players:
if img:
file = File(message)
self.sendMsg(file, player)
else:
player.send_message(message)
def sendMsg(self, file, player):
msg = player.send_message(file, is_file=True)
self.table_messages[player] = msg
def send_tables(self):
ImageGenerator(1).generate_table(self.current_slag, self.players, self.teams)
file = ImageGenerator(1).get_output('table').strip()
self.send_to(self.players, file, img=True)
| FreekDS/De-Grote-Wiezen-Bot | bot/DiscordWiezen.py | DiscordWiezen.py | py | 1,751 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nest_asyncio.apply",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "wiezenlibrary.Game.Game",
"line_number": 13,
"usage_type": "name"
},
{
"... |
20832781197 | from pocket_coffea.utils.configurator import Configurator
from pocket_coffea.lib.cut_definition import Cut
from pocket_coffea.lib.cut_functions import get_nObj_min, get_HLTsel, get_nBtagEq
from pocket_coffea.parameters.cuts import passthrough
from pocket_coffea.parameters.histograms import *
from pocket_coffea.parameters.btag import btag_variations
import workflow
from workflow import ttHbbBaseProcessor
from pocket_coffea.lib.columns_manager import ColOut
import cloudpickle
import custom_cut_functions
cloudpickle.register_pickle_by_value(workflow)
cloudpickle.register_pickle_by_value(custom_cut_functions)
from custom_cut_functions import *
import os
localdir = os.path.dirname(os.path.abspath(__file__))
# Loading default parameters
from pocket_coffea.parameters import defaults
default_parameters = defaults.get_default_parameters()
defaults.register_configuration_dir("config_dir", localdir+"/params")
parameters = defaults.merge_parameters_from_files(default_parameters,
f"{localdir}/params/object_preselection.yaml",
f"{localdir}/params/triggers.yaml",
update=True)
cfg = Configurator(
parameters = parameters,
datasets = {
"jsons": [f"{localdir}/datasets/backgrounds_MC_TTbb_dileptonic_redirector.json"
],
"filter" : {
"samples": ["TTbbDiLeptonic"],
"samples_exclude" : [],
"year": ["2018"]
}
},
workflow = ttHbbBaseProcessor,
skim = [get_nObj_min(1, 200., "FatJet"),
get_HLTsel(primaryDatasets=["DoubleEle","EleMu","DoubleMu"])],
preselections = [dilepton_presel,
get_nObj_min(2,25,"LeptonGood")],
categories = {
"baseline": [passthrough],
"1b" : [ get_nBtagEq(1, coll="BJetGood")],
"2b" : [ get_nBtagEq(2, coll="BJetGood")],
"3b" : [ get_nBtagEq(3, coll="BJetGood")],
"4b" : [ get_nBtagEq(4, coll="BJetGood")]
},
weights = {
"common": {
"inclusive": ["genWeight","lumi","XS",
"pileup",
"sf_ele_reco", "sf_ele_id",
"sf_mu_id","sf_mu_iso",
"sf_btag", "sf_jet_puId",
],
"bycategory" : {
}
},
"bysample": {
}
},
variations = {
"weights": {
"common": {
"inclusive": [ "pileup",
"sf_ele_reco", "sf_ele_id",
"sf_mu_id", "sf_mu_iso", "sf_jet_puId",
],
"bycategory" : {
}
},
"bysample": {
}
},
},
variables = {
**ele_hists(coll="ElectronGood", pos=0),
**muon_hists(coll="MuonGood", pos=0),
**count_hist(name="nElectronGood", coll="ElectronGood",bins=3, start=0, stop=3),
**count_hist(name="nMuonGood", coll="MuonGood",bins=3, start=0, stop=3),
**count_hist(name="nJets", coll="JetGood",bins=8, start=0, stop=8),
**count_hist(name="nBJets", coll="BJetGood",bins=8, start=0, stop=8),
**jet_hists(coll="JetGood", pos=0),
**jet_hists(coll="JetGood", pos=1),
**jet_hists(coll="JetGood", pos=2),
**jet_hists(coll="JetGood", pos=3),
**jet_hists(coll="JetGood", pos=4),
**jet_hists(name="bjet",coll="BJetGood", pos=0),
**jet_hists(name="bjet",coll="BJetGood", pos=1),
**jet_hists(name="bjet",coll="BJetGood", pos=2),
**fatjet_hists(name="fatjet",coll="FatJetGood"),
**fatjet_hists(name="bbfatjetTight",coll="BBFatJetGoodT"),
**fatjet_hists(name="bbfatjetMedium",coll="BBFatJetGoodM"),
**fatjet_hists(name="bbfatjetLoose",coll="BBFatJetGoodL"),
# 2D plots
"jet_eta_pt_leading": HistConf(
[
Axis(coll="JetGood", field="pt", pos=0, bins=40, start=0, stop=1000,
label="Leading jet $p_T$"),
Axis(coll="JetGood", field="eta", pos=0, bins=40, start=-2.4, stop=2.4,
label="Leading jet $\eta$"),
]
),
"jet_eta_pt_all": HistConf(
[
Axis(coll="JetGood", field="pt", bins=40, start=0, stop=1000,
label="Leading jet $p_T$"),
Axis(coll="JetGood", field="eta", bins=40, start=-2.4, stop=2.4,
label="Leading jet $\eta$")
]
),
},
columns = {
"common": {},
"bysample": {
"TTbbDiLeptonic": {
"bycategory": {
"baseline": [
ColOut("JetGood", ["eta","pt","phi","btagDeepFlavB"]),
ColOut("FatJetGood", ["eta", "pt", "phi", "mass", "msoftdrop", "tau1", "tau2", "tau3", "tau4", "btagDDBvLV2", "deepTagMD_ZHbbvsQCD", "deepTagMD_ZHccvsQCD", "deepTagMD_HbbvsQCD", "deepTagMD_bbvsLight", "btagHbb"]),
ColOut("LeptonGood",["eta","pt","phi","pdgId"]),
ColOut("BJetGood", ["eta","pt","phi","btagDeepFlavB"]),
ColOut("BBFatJetGoodT", ["eta", "pt", "phi", "mass", "msoftdrop", "tau1", "tau2", "tau3", "tau4", "btagDDBvLV2", "deepTagMD_ZHbbvsQCD", "deepTagMD_ZHccvsQCD", "deepTagMD_HbbvsQCD", "deepTagMD_bbvsLight", "btagHbb"]),
ColOut("BBFatJetGoodM", ["eta", "pt", "phi", "mass", "msoftdrop", "tau1", "tau2", "tau3", "tau4", "btagDDBvLV2", "deepTagMD_ZHbbvsQCD", "deepTagMD_ZHccvsQCD", "deepTagMD_HbbvsQCD", "deepTagMD_bbvsLight", "btagHbb"]),
ColOut("BBFatJetGoodL", ["eta", "pt", "phi", "mass", "msoftdrop", "tau1", "tau2", "tau3", "tau4", "btagDDBvLV2", "deepTagMD_ZHbbvsQCD", "deepTagMD_ZHccvsQCD", "deepTagMD_HbbvsQCD", "deepTagMD_bbvsLight", "btagHbb"])
]
}
}
}
}
)
run_options = {
"executor" : "dask/lxplus",
"env" : "singularity",
"workers" : 1,
"scaleout" : 50,
"worker_image" : "/cvmfs/unpacked.cern.ch/gitlab-registry.cern.ch/cms-analysis/general/pocketcoffea:lxplus-cc7-latest",
"queue" : "microcentury",
"walltime" : "00:40:00",
"mem_per_worker" : "4GB", # GB
"disk_per_worker" : "1GB", # GB
"exclusive" : False,
"chunk" : 400000,
"retries" : 50,
"treereduction" : 20,
"adapt" : False,
"skipbadfiles" : 10
}
| ryanm124/AnalysisConfigs | configs/ttHbb/example_config.py | example_config.py | py | 6,758 | python | en | code | null | github-code | 36 | [
{
"api_name": "cloudpickle.register_pickle_by_value",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cloudpickle.register_pickle_by_value",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 18,
"usage_type": "call"
},
... |
73578590183 | # coding: utf-8
_all_ = [ 'processing', 'processing_outputs' ]
import os
import sys
parent_dir = os.path.abspath(__file__ + 3 * '/..')
sys.path.insert(0, parent_dir)
import inclusion
from inclusion.config import main
from inclusion.utils import utils
from inclusion.condor.job_writer import JobWriter
import re
import argparse
def produce_trigger_outputs_sample(args, sample, ext):
"""
Produces all outputs of the submitTriggerEff task.
Limitation: As soon as one file is not produced, luigi
reruns everything.
"""
assert(ext in ('root', 'txt'))
extension = '.' + ext
t = []
exp = re.compile('.+output(_[0-9]{1,5}).root')
inputs, _ = utils.get_root_inputs(sample, args.indir)
folder = os.path.join( args.outdir, proc )
for inp in inputs:
number = exp.search(inp)
proc_folder = os.path.dirname(inp).split('/')[-1]
basename = args.tprefix + '_' + proc_folder + number.group(1)
basename += args.subtag + extension
t.append( os.path.join(folder, basename) )
return t
@utils.set_pure_input_namespace
def produce_trigger_outputs(args, ext='root'):
"""
Produces all outputs of the submitTriggerEff task.
Limitation: As soon as one file is not produced, luigi
reruns everything.
"""
tdata, tmc = ([] for _ in range(2))
for proc in args.data_vals:
tdata.extend( produce_trigger_outputs_sample(args, proc, ext) )
for proc in args.mc_vals:
tmc.extend( produce_trigger_outputs_sample(args, proc, ext) )
return tdata, tmc
@utils.set_pure_input_namespace
def processing_outputs(args):
if args.mode == 'histos':
name = 'Histos'
elif args.mode == 'counts':
name = 'Counts'
else:
raise ValueError('Mode {} is not supported.'.format(args.mode))
_data_tup = tuple((k,v) for k,v in zip(args.data_keys,args.data_vals))
_mc_tup = tuple((k,v) for k,v in zip(args.mc_keys,args.mc_vals))
data_folders = [ name + '_' + v for v in args.data_vals ]
mc_folders = [ name + '_' + v for v in args.mc_vals ]
job_opt = dict(localdir=args.localdir, tag=args.tag)
return ( JobWriter.define_output( data_folders=data_folders, **job_opt),
JobWriter.define_output( data_folders=mc_folders, **job_opt),
_data_tup, _mc_tup )
@utils.set_pure_input_namespace
def processing(args):
outs_data, outs_mc, _data_procs, _mc_procs = processing_outputs(args)
# unite Data and MC lists
outs_job = outs_data[0] + outs_mc[0]
outs_submit = outs_data[1] + outs_mc[1]
outs_check = outs_data[2] + outs_mc[2]
outs_log = outs_data[3] + outs_mc[3]
_all_processes = _data_procs + _mc_procs
for i, (kproc, vproc) in enumerate(_all_processes):
filelist, _ = utils.get_root_inputs(vproc, args.indir)
#### Write shell executable (python scripts must be wrapped in shell files to run on HTCondor)
pars = {'outdir' : args.outdir,
'dataset' : kproc,
'sample' : vproc,
'isdata' : int(vproc in args.data_vals),
'file' : '${1}',
'subtag' : args.subtag,
'channels' : ' '.join(args.channels),
'tprefix' : args.tprefix,
'configuration' : args.configuration}
script = ('produce_trig_histos.py' if args.mode == 'histos'
else 'produce_trig_counts.py')
comm = utils.build_script_command(name=script, sep=' ', **pars)
if args.mode == 'histos':
pars1 = {'binedges_fname' : args.binedges_filename,
'intersection_str' : args.intersection_str,
'variables' : ' '.join(args.variables,),
'nocut_dummy_str' : args.nocut_dummy_str}
comm += utils.build_script_command(name=None, sep=' ', **pars1)
jw = JobWriter()
jw.write_shell(filename=outs_job[i], command=comm, localdir=args.localdir)
jw.add_string('echo "Process {} done in mode {}."'.format(vproc,args.mode))
#### Write submission file
jw.write_condor(filename=outs_submit[i],
real_exec=utils.build_script_path(script),
shell_exec=outs_job[i],
outfile=outs_check[i],
logfile=outs_log[i],
queue=main.queue,
machine='llrt3condor')
qlines = []
for listname in filelist:
qlines.append(' {}'.format( listname.replace('\n','') ))
jw.write_queue( qvars=('filename',),
qlines=qlines )
# -- Parse options
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Command line parser')
parser.add_argument('--binedges_dataset', dest='binedges_dataset', required=True, help='in directory')
parser.add_argument('--localdir', dest='localdir', default=os.getcwd(), help='job out directory')
parser.add_argument('--indir', dest='indir', required=True, help='in directory')
parser.add_argument('--outdir', dest='outdir', required=True, help='out directory')
parser.add_argument('--tag', dest='tag', required=True, help='tag')
parser.add_argument('--subtag', dest='subtag', required=True, help='subtag')
parser.add_argument('--tprefix', dest='tprefix', required=True, help='target prefix')
parser.add_argument('--mc_processes', dest='mc_processes', required=True, nargs='+', type=str,
help='list of MC process names')
parser.add_argument('--data_keys', dest='data_keys', required=True, nargs='+', type=str,
help='list of datasets')
parser.add_argument('--data_vals', dest='data_vals', required=True, nargs='+', type=str,
help='list of datasets')
parser.add_argument('--channels', dest='channels', required=True, nargs='+', type=str,
help='Select the channels over which the workflow will be run.' )
parser.add_argument('--variables', dest='variables', required=True, nargs='+', type=str,
help='Select the variables over which the workflow will be run.' )
parser.add_argument('--intersection_str', dest='intersection_str', required=False, default=main.inters_str,
help='String used to represent set intersection between triggers.')
parser.add_argument('--nocut_dummy_str', dest='nocut_dummy_str', required=True,
help='Dummy string associated to trigger histograms were no cuts are applied.')
parser.add_argument('--configuration', dest='configuration', required=True,
help='Name of the configuration module to use.')
args = parser.parse_args()
submitTriggerEff( args )
| bfonta/inclusion | inclusion/condor/processing.py | processing.py | py | 6,926 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number... |
24778042796 | """
From https://brian2.readthedocs.io/en/stable/resources/tutorials/3-intro-to-brian-simulations.html
An experiment to inject current into a neuron and change the amplitude randomly every 10 ms. Model that using a Hodgkin-Huxley type neuron.
"""
from brian2 import *
import matplotlib.pyplot as plt
start_scope()
# Parameters
area = 20000 * umetre**2
Cm = 1 * ufarad * cm**-2 * area
gl = 5e-5 * siemens * cm**-2 * area
El = -65 * mV
EK = -90 * mV
ENa = 50 * mV
g_na = 100 * msiemens * cm**-2 * area
g_kd = 30 * msiemens * cm**-2 * area
VT = -63 * mV
# The model
eqs_HH = '''
dv/dt = (gl*(El-v) - g_na*(m*m*m)*h*(v-ENa) - g_kd*(n*n*n*n)*(v-EK) + I)/Cm : volt
dm/dt = 0.32*(mV**-1)*(13.*mV-v+VT)/
(exp((13.*mV-v+VT)/(4.*mV))-1.)/ms*(1-m)-0.28*(mV**-1)*(v-VT-40.*mV)/
(exp((v-VT-40.*mV)/(5.*mV))-1.)/ms*m : 1
dn/dt = 0.032*(mV**-1)*(15.*mV-v+VT)/
(exp((15.*mV-v+VT)/(5.*mV))-1.)/ms*(1.-n)-.5*exp((10.*mV-v+VT)/(40.*mV))/ms*n : 1
dh/dt = 0.128*exp((17.*mV-v+VT)/(18.*mV))/ms*(1.-h)-4./(1+exp((40.*mV-v+VT)/(5.*mV)))/ms*h : 1
I : amp
'''
group = NeuronGroup(1, eqs_HH,
threshold='v > -40*mV',
refractory='v > -40*mV',
method='exponential_euler')
group.v = El
statemon = StateMonitor(group, 'v', record=True)
spikemon = SpikeMonitor(group, variables='v')
# we replace the loop with a run_regularly
group.run_regularly('I = rand()*50*nA', dt=10 * ms)
run(50 * ms)
plt.figure(figsize=(9, 4))
# we keep the loop just to draw the vertical lines
for l in range(5):
plt.axvline(l * 10, ls='--', c='k')
plt.axhline(El / mV, ls='-', c='lightgray', lw=3)
plt.plot(statemon.t / ms, statemon.v[0] / mV, '-b')
plt.plot(spikemon.t / ms, spikemon.v / mV, 'ob')
plt.xlabel('Time (ms)')
plt.ylabel('v (mV)')
plt.show()
| seankmartin/NeuroModelling | hodgkin_huxley.py | hodgkin_huxley.py | py | 1,776 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axvline",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "... |
16147236974 | from typing import List
from app.movements.base import Special
from app.movements.constants import Attacks
from app.movements.utils import replace_values_string
from app.settings import BASSIC_ATTACK_ENERGY, PLAYER_ENERGY
class Fighter:
def __init__(self, name, specials:List[Special]) -> None:
self.name = name
self.specials = specials
self.energy = PLAYER_ENERGY
def get_moves(self, move):
special = self.__move_is_special(move)
if special:
return special
result_move = replace_values_string(move)
if Attacks.P.value in result_move or Attacks.K.value in result_move:
return {
"name": result_move,
"energy": BASSIC_ATTACK_ENERGY
}
return {
"name": result_move,
"energy": 0
}
def __move_is_special(self, combination):
for special in self.specials:
special_info = special(self.name)
if special_info.combination == combination:
return {
"name": special_info.name,
"energy": special_info.energy
}
return None | FranciscoAczayacatl/GameRPG | app/fighters/fighter.py | fighter.py | py | 1,201 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "app.movements.base.Special",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "app.settings.PLAYER_ENERGY",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "app.m... |
18306677513 | import asyncio
from loguru import logger
from mipa.ext.commands import Bot
from mipac import (
Note,
NotificationFollowRequest,
LiteUser,
ClientManager,
NotificationFollow,
)
from catline.adapters import QueueStorageJSONAdapter, QueueStorageRedisAdapter
from catline.queue import IFQueueStorageAdapter
from src.config import config
from src.utils.common import get_name
from src.di_container import injector
INITIAL_EXTENSIONS = [
{'path': 'src.cogs.follow', 'is_enable': True},
{'path': 'src.cogs.reminder', 'is_enable': True},
{'path': 'src.cogs.avatar_fix', 'is_enable': config.features.notfound_fixer},
]
async def follow_user(user: LiteUser, client: ClientManager):
await user.api.follow.action.add()
await client.note.action.send(
visibility='specified',
visible_user_ids=[user.id],
content=f'{user.api.action.get_mention()} さん、よろしくね!',
)
STRIKE = {}
class Akari(Bot):
def __init__(self, **kwargs):
super().__init__(**kwargs)
async def connect_channel(self):
await self.router.connect_channel(['main', 'global'])
async def setup_hook(self) -> None:
for cog in INITIAL_EXTENSIONS:
if cog['is_enable']:
await self.load_extension(cog['path'])
async def on_reconnect(self, ws):
logger.warning('サーバーとの接続をロストしました。再接続します。')
await self.connect_channel()
async def on_ready(self, ws):
logger.success(f'Connected {get_name(self.user)}')
await self.connect_channel()
async def on_note(self, note: Note):
logger.info(f'{get_name(note.author)}: {note.content}')
async def on_follow_request(self, follow_request: NotificationFollowRequest):
logger.info(f'{get_name(follow_request.user)}さんからフォローリクエストが届きました')
await follow_request.user.api.follow.request.action.accept()
logger.success('フォローリクエストを承認しました')
await follow_user(follow_request.user, self.client)
async def on_user_followed(self, follow: NotificationFollow):
logger.info(f'{get_name(follow.user)}さんからフォローされました')
await follow_user(follow.user, self.client)
async def main():
bot = Akari()
injector.binder.bind(IFQueueStorageAdapter, QueueStorageJSONAdapter if config.job_queue.type == 'json' else QueueStorageRedisAdapter(**config.redis.to_dict))
await bot.start(config.url, config.token)
if __name__ == '__main__':
asyncio.run(main())
| TeamBlackCrystal/akari | main.py | main.py | py | 2,637 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "src.config.config.features",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "src.config.config",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "mipac.LiteUser",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "mipa... |
13492120891 | def read_msh(file, flag_plot):
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
#custom functions
from read_gmsh_V1 import read_gmsh
from elarea import elarea
#%% Create a structure for the output and assign
class structtype():
pass
f = structtype()
#%%
# Read Mesh
msh = read_gmsh("mesh/"+file)
#We have first order mesh
itri = 1 #triangle element indices in the .msh class
ilin = 0 #line element indices in the .msh class
maxel = int(msh.maxel[itri])
maxnp = int(msh.maxnp)
nop = np.array(msh.nop[itri])-1 # -1 is due to index starting from 0 in python
cord = msh.cord[:,0:2]
cordx = cord[:,0]
cordy = cord[:,1]
physs = np.array(msh.phys_group[itri]) #triangular element material indices
physsl = np.array(msh.phys_group[ilin]) #line element material indices
nodel = (msh.nodel[itri])[itri] #number of nodes of an element
nopl = np.array(msh.nop[ilin])-1 #line elements
#material indices defined as in gmsh file
f.AIR = 1000
f.IRON1 = 1001
f.IRON2 = 1004
f.COIL1 = 1002
f.COIL1_neg = 1012
f.COIL2 = 1003
f.COIL2_neg = 1013
f.DIR = 2000
f.DIR_disp = 2001
#dirichlet nodes
indd = np.unique(nopl[physsl == f.DIR,:])
#%% confirm counter-clockwise element numbering for triangles
for i in range(0,maxel):
corde = cord[nop[i,:],:]
if elarea(corde)<0:
nop[i,:] = nop[i,[1,0,2]]
#%% Air-gap line
noplb = nopl[np.where(physsl==3000)]
maxlb = np.size(noplb,0)
#%% Assign the output variables
f.msh = msh
f.maxel = maxel
f.maxnp = maxnp
f.nop = nop
f.cord = cord
f.cordx = cordx
f.cordy = cordy
f.physs = physs
f.physsl = physsl
f.nodel = nodel
f.nopl = nopl
f.indd = indd;
#%% Plotting mesh
if flag_plot:
cmap = mpl.cm.jet
plt.figure()
plt.triplot(cordx, cordy, nop[np.where(physs==f.AIR)], lw = 1.0, color=cmap(100,100))
plt.triplot(cordx, cordy, nop[np.where(physs==f.IRON1)], lw = 1.0, color=cmap(1,1))
plt.triplot(cordx, cordy, nop[np.where(physs==f.IRON2)], lw = 1.0, color=cmap(1,1))
#plt.triplot(cordx, cordy, nop[np.where(physs==IRON2)], lw = 1.0, color=cmap(130,302))
plt.triplot(cordx, cordy, nop[np.where(physs==f.COIL1)], lw = 1.0, color=cmap(150,150))
plt.triplot(cordx, cordy, nop[np.where(physs==f.COIL1_neg)], lw = 1.0, color=cmap(150,150))
plt.triplot(cordx, cordy, nop[np.where(physs==f.COIL2)], lw = 1.0, color=cmap(200,200))
plt.triplot(cordx, cordy, nop[np.where(physs==f.COIL2_neg)], lw = 1.0, color=cmap(200,200))
#dirichlet nodes
plt.plot(cordx[indd], cordy[indd], 'bo');
plt.axis('equal')
plt.xlabel('X (m)')
plt.ylabel('Y (m)')
plt.show()
return f | aydinu1/UA-fem | fem_util/read_msh.py | read_msh.py | py | 3,122 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "read_gmsh_V1.read_gmsh",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"lin... |
17883182675 | from PySide2.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QSpacerItem, QSizePolicy, QPushButton
from PySide2.QtCore import QSize, QCoreApplication
class PMReportWidget(QWidget):
def __init__(self):
super().__init__()
_translate = QCoreApplication.translate
self.setObjectName("tab_report")
self.verticalLayout_2 = QVBoxLayout(self)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widget_2 = QWidget(self)
self.widget_2.setMaximumSize(QSize(16777215, 30))
self.widget_2.setObjectName("widget_2")
self.horizontalLayout_5 = QHBoxLayout(self.widget_2)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.horizontalLayout_4 = QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
spacerItem1 = QSpacerItem(
40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem1)
self.pushButton_browser_open = QPushButton(self.widget_2)
self.pushButton_browser_open.setMinimumSize(QSize(80, 0))
self.pushButton_browser_open.setObjectName("pushButton_browser_open")
self.horizontalLayout_4.addWidget(self.pushButton_browser_open)
self.horizontalLayout_5.addLayout(self.horizontalLayout_4)
self.verticalLayout_2.addWidget(self.widget_2)
self.horizontalLayout_result = QHBoxLayout()
self.horizontalLayout_result.setObjectName("horizontalLayout_result")
spacerItem2 = QSpacerItem(
20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.horizontalLayout_result.addItem(spacerItem2)
self.verticalLayout_2.addLayout(self.horizontalLayout_result)
self.pushButton_browser_open.setText(_translate("MainWindow", "浏览器打开"))
| pyminer/pyminer | pyminer/lib/ui/widgets/reportwidget.py | reportwidget.py | py | 1,899 | python | en | code | 77 | github-code | 36 | [
{
"api_name": "PySide2.QtWidgets.QWidget",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QCoreApplication.translate",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore.QCoreApplication",
"line_number": 8,
"usage_type": ... |
74796094503 | from flask import Flask, render_template, Response, jsonify, request
import settings
from flask import abort
app = Flask(__name__,
static_url_path='',
static_folder='static',
template_folder='templates')
log = settings.logging
@app.route('/')
def index():
return render_template('index.html')
options = [
{
'id': 1,
'title': u'Add new device',
'description': u'More description for add device option',
'status': True
},
{
'id': 2,
'title': u'View device',
'description': u'More description for view device option',
'status': False
}
]
@app.route('/options/api/v1.0/options', methods=['GET'])
def get_options():
return jsonify({'options': options})
@app.route('/options/api/v1.0/options/<int:option_id>', methods=['GET'])
def get_task(option_id):
option = [option for option in options if option['id'] == option_id]
if len(option) == 0:
abort(404)
return jsonify({'task': option[0]})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, threaded=True, debug=True)
# app.run(host='0.0.0.0', port=443, threaded=True, ssl_context=(
# '/etc/letsencrypt/live/cq.jarzebak.eu/cert.pem', '/etc/letsencrypt/live/cq.jarzebak.eu/privkey.pem'))
log.debug("Started up cq app")
| jarzab3/flask_docker | cq_iot/app.py | app.py | py | 1,346 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "settings.logging",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.jsonify"... |
5049725299 | import configparser
import time
from operator import attrgetter
from pathlib import Path
from typing import Dict, List, Optional, Union
import numpy as np
import torch
import tensorrt_llm
import tensorrt_llm.logger as logger
from tensorrt_llm._utils import pad_vocab_size, str_dtype_to_np
from tensorrt_llm.mapping import Mapping
from tensorrt_llm.models import GPTJForCausalLM
from tensorrt_llm.models.quantized.quant import get_dummy_quant_scales
from tensorrt_llm.quantization import QuantMode
def get_scaling_factors(
model_path: Union[str, Path],
num_layers: int,
quant_mode: Optional[QuantMode] = None,
) -> Optional[Dict[str, List[int]]]:
""" Get the scaling factors for GPT-J model
Returns a dictionary of scaling factors for the selected layers of the
GPT-J model.
Args:
model_path (str): Path to the quantized GPT-J model
layers (list): List of layers to get the scaling factors for. If None,
all layers are selected.
Returns:
dict: Dictionary of scaling factors for the selected layers of the
GPT-J model.
example:
{
'qkv_act': qkv_act_scale,
'qkv_weights': qkv_weights_scale,
'qkv_output' : qkv_outputs_scale,
'dense_act': dense_act_scale,
'dense_weights': dense_weights_scale,
'fc_act': fc_act_scale,
'fc_weights': fc_weights_scale,
'proj_act': proj_act_scale,
'proj_weights': proj_weights_scale,
}
"""
if model_path is None:
logger.warning(f"--quantized_fp8_model_path not specified. "
f"Initialize quantization scales automatically.")
return get_dummy_quant_scales(num_layers)
weight_dict = np.load(model_path)
# yapf: disable
scaling_factor = {
'qkv_act': [],
'qkv_weights': [],
'qkv_output': [],
'dense_act': [],
'dense_weights': [],
'fc_act': [],
'fc_weights': [],
'proj_act': [],
'proj_weights': [],
}
for layer in range(num_layers):
scaling_factor['qkv_act'].append(max(
weight_dict[f'_np:layers:{layer}:attention:qkv:q:activation_scaling_factor'].item(),
weight_dict[f'_np:layers:{layer}:attention:qkv:k:activation_scaling_factor'].item(),
weight_dict[f'_np:layers:{layer}:attention:qkv:v:activation_scaling_factor'].item()
))
scaling_factor['qkv_weights'].append(max(
weight_dict[f'_np:layers:{layer}:attention:qkv:q:weights_scaling_factor'].item(),
weight_dict[f'_np:layers:{layer}:attention:qkv:k:weights_scaling_factor'].item(),
weight_dict[f'_np:layers:{layer}:attention:qkv:v:weights_scaling_factor'].item()
))
if quant_mode is not None and quant_mode.has_fp8_kv_cache():
# Not calibrarting KV cache.
scaling_factor['qkv_output'].append(1.0)
scaling_factor['dense_act'].append(weight_dict[f'_np:layers:{layer}:attention:dense:activation_scaling_factor'].item())
scaling_factor['dense_weights'].append(weight_dict[f'_np:layers:{layer}:attention:dense:weights_scaling_factor'].item())
scaling_factor['fc_act'].append(weight_dict[f'_np:layers:{layer}:mlp:fc:activation_scaling_factor'].item())
scaling_factor['fc_weights'].append(weight_dict[f'_np:layers:{layer}:mlp:fc:weights_scaling_factor'].item())
scaling_factor['proj_act'].append(weight_dict[f'_np:layers:{layer}:mlp:proj:activation_scaling_factor'].item())
scaling_factor['proj_weights'].append(weight_dict[f'_np:layers:{layer}:mlp:proj:weights_scaling_factor'].item())
# yapf: enable
for k, v in scaling_factor.items():
assert len(v) == num_layers, \
f'Expect scaling factor {k} of length {num_layers}, got {len(v)}'
return scaling_factor
def gen_suffix(rank, use_smooth_quant, quant_per_channel):
suffix = f"{rank}.bin"
if use_smooth_quant:
sq_prefix = "int8."
if quant_per_channel:
sq_prefix += "col."
suffix = sq_prefix + suffix
return suffix
def extract_layer_idx(name):
ss = name.split('.')
for s in ss:
if s.isdigit():
return s
return None
def split(v, tp_size, idx, dim=0):
if tp_size == 1:
return v
if len(v.shape) == 1:
return np.ascontiguousarray(np.split(v, tp_size)[idx])
elif len(v.shape) == 2:
return np.ascontiguousarray(np.split(v, tp_size, axis=dim)[idx])
return None
def parse_config(ini_file):
gpt_config = configparser.ConfigParser()
gpt_config.read(ini_file)
n_embd = gpt_config.getint('gpt', 'n_embd')
n_head = gpt_config.getint('gpt', 'n_head')
n_layer = gpt_config.getint('gpt', 'n_layer')
n_positions = gpt_config.getint('gpt', 'n_positions')
vocab_size = gpt_config.getint('gpt', 'vocab_size')
do_layer_norm_before = gpt_config.getboolean('gpt',
'do_layer_norm_before',
fallback=True)
rotary_pct = gpt_config.getfloat('gpt', 'rotary_pct', fallback=0.0)
hidden_act = gpt_config.get('gpt', 'activation_function')
bias = gpt_config.getboolean('gpt', 'bias', fallback=True)
inter_size = gpt_config.getint('gpt', 'intermediate_size', fallback=None)
dtype = gpt_config.get('gpt', 'storage_dtype', fallback='float32')
if inter_size is None:
inter_size = 4 * n_embd
multi_query_mode = gpt_config.getboolean('gpt',
'multi_query_mode',
fallback=False)
prompt_num_tasks = gpt_config.getint('gpt', 'prompt_num_tasks', fallback=0)
prompt_max_vocab_size = gpt_config.getint('gpt',
'prompt_max_vocab_size',
fallback=0)
return n_embd, n_head, n_layer, n_positions, vocab_size, do_layer_norm_before, hidden_act, rotary_pct, bias, inter_size, multi_query_mode, dtype, prompt_num_tasks, prompt_max_vocab_size
def load_from_bin_gpt_j(tensorrt_llm_gpt_j: GPTJForCausalLM,
dir_path,
rank=0,
tensor_parallel=1,
dtype='float32',
use_parallel_embedding=False,
sharding_dim=0,
share_embedding_table=False,
scaling_factors=None):
tensorrt_llm.logger.info('Loading weights from bin...')
tik = time.time()
quant_mode = getattr(tensorrt_llm_gpt_j, 'quant_mode', QuantMode(0))
if quant_mode.is_int8_weight_only():
plugin_weight_only_quant_type = torch.int8
elif quant_mode.is_int4_weight_only():
plugin_weight_only_quant_type = torch.quint4x2
n_embd, n_head, n_layer, n_positions, vocab_size, do_layer_norm_before, hidden_act, rotary_pct, bias, inter_size, multi_query_mode, *_ = parse_config(
Path(dir_path) / 'config.ini')
np_dtype = str_dtype_to_np(dtype)
def fromfile(dir_path, name, shape=None, dtype=None):
dtype = np_dtype if dtype is None else dtype
p = dir_path + '/' + name
if Path(p).exists():
t = np.fromfile(p, dtype=dtype)
if shape is not None:
t = t.reshape(shape)
return t
return None
def set_smoothquant_scale_factors(module,
pre_scale_weight,
dir_path,
basename,
shape,
per_tok_dyn,
per_channel,
is_qkv=False,
rank=None):
suffix = "bin"
if per_channel:
if rank is not None:
suffix = f"{rank}." + suffix
suffix = "col." + suffix
col_shape = shape if (per_channel or is_qkv) else [1, 1]
if per_tok_dyn:
if pre_scale_weight is not None:
pre_scale_weight.value = np.array([1.0], dtype=np.float32)
t = fromfile(dir_path, f"{basename}scale_w_quant_orig.{suffix}",
col_shape, np.float32)
module.per_channel_scale.value = t
else:
t = fromfile(dir_path, f"{basename}scale_x_orig_quant.bin", [1],
np.float32)
pre_scale_weight.value = t
t = fromfile(dir_path, f"{basename}scale_y_accum_quant.{suffix}",
col_shape, np.float32)
module.per_channel_scale.value = t
t = fromfile(dir_path, f"{basename}scale_y_quant_orig.bin", [1, 1],
np.float32)
module.act_scale.value = t
# Do we use SmoothQuant?
use_smooth_quant = quant_mode.has_act_and_weight_quant()
# Do we use quantization per token?
quant_per_token_dyn = quant_mode.has_per_token_dynamic_scaling()
# Do we use quantization per channel?
quant_per_channel = quant_mode.has_per_channel_scaling()
# Do we use INT4/INT8 weight-only?
use_weight_only = quant_mode.is_weight_only()
# Int8 KV cache
use_int8_kv_cache = quant_mode.has_int8_kv_cache()
#Enable FP8 Gemm
enable_fp8_qdq = quant_mode.has_fp8_qdq()
def sq_trick(x):
return x.view(np.float32) if use_smooth_quant else x
# Debug
suffix = gen_suffix(rank, use_smooth_quant, quant_per_channel)
# The type of weights.
w_type = np_dtype if not use_smooth_quant else np.int8
# pe = fromfile(dir_path, 'model.wpe.bin', [n_positions, n_embd])
# if pe is not None:
# tensorrt_llm_gpt_j.embedding.position_embedding.weight.value = (pe)
vocab_embedding_weight = fromfile(dir_path, 'model.wte.bin',
[vocab_size, n_embd])
if not use_parallel_embedding:
tensorrt_llm_gpt_j.embedding.weight.value = vocab_embedding_weight
else:
if sharding_dim == 0:
if vocab_size % tensor_parallel != 0:
# padding
vocab_size_padded = pad_vocab_size(
tensorrt_llm_gpt_j.embedding.num_embeddings,
tensor_parallel)
pad_width = vocab_size_padded - vocab_size
vocab_embedding_weight = np.pad(vocab_embedding_weight,
((0, pad_width), (0, 0)),
'constant',
constant_values=0)
tensorrt_llm_gpt_j.embedding.weight.value = np.ascontiguousarray(
split(vocab_embedding_weight,
tensor_parallel,
rank,
dim=sharding_dim))
if do_layer_norm_before:
tensorrt_llm_gpt_j.ln_f.bias.value = (fromfile(
dir_path, 'model.final_layernorm.bias.bin'))
tensorrt_llm_gpt_j.ln_f.weight.value = (fromfile(
dir_path, 'model.final_layernorm.weight.bin'))
# share input embedding
if not share_embedding_table:
lm_head_weight = fromfile(dir_path, 'model.lm_head.weight.bin',
[vocab_size, n_embd])
lm_head_bias = fromfile(dir_path, 'model.lm_head.bias.bin',
[vocab_size])
if lm_head_weight is None:
lm_head_weight = fromfile(dir_path, 'model.wte.bin',
[vocab_size, n_embd])
if vocab_size % tensor_parallel != 0:
# padding
vocab_size_padded = tensorrt_llm_gpt_j.lm_head.out_features * tensor_parallel
pad_width = vocab_size_padded - vocab_size
lm_head_weight = np.pad(lm_head_weight, ((0, pad_width), (0, 0)),
'constant',
constant_values=0)
tensorrt_llm_gpt_j.lm_head.weight.value = np.ascontiguousarray(
split(lm_head_weight, tensor_parallel, rank))
tensorrt_llm_gpt_j.lm_head.bias.value = np.ascontiguousarray(
split(lm_head_bias, tensor_parallel, rank))
fake_fp8_sf_dt = np.float32
for i in range(n_layer):
c_attn_out_dim = (3 * n_embd //
tensor_parallel) if not multi_query_mode else (
n_embd // tensor_parallel +
(n_embd // n_head) * 2)
tensorrt_llm_gpt_j.layers[i].input_layernorm.weight.value = (fromfile(
dir_path, 'model.layers.' + str(i) + '.input_layernorm.weight.bin'))
tensorrt_llm_gpt_j.layers[i].input_layernorm.bias.value = (fromfile(
dir_path, 'model.layers.' + str(i) + '.input_layernorm.bias.bin'))
t = fromfile(
dir_path, 'model.layers.' + str(i) +
'.attention.query_key_value.weight.' + suffix,
[n_embd, c_attn_out_dim], w_type)
if t is not None:
dst = tensorrt_llm_gpt_j.layers[i].attention.qkv.weight
if use_smooth_quant:
dst.value = sq_trick(
np.ascontiguousarray(np.transpose(t, [1, 0])))
set_smoothquant_scale_factors(
tensorrt_llm_gpt_j.layers[i].attention.qkv,
tensorrt_llm_gpt_j.layers[i].input_layernorm.scale_to_int,
dir_path,
'model.layers.' + str(i) + '.attention.query_key_value.',
[1, c_attn_out_dim],
quant_per_token_dyn,
quant_per_channel,
rank=rank,
is_qkv=True)
elif use_weight_only:
processed_torch_weights, torch_weight_scales = torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
torch.tensor(t), plugin_weight_only_quant_type)
dst.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[
i].attention.qkv.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
dst.value = np.ascontiguousarray(np.transpose(t, [1, 0]))
if enable_fp8_qdq:
tensorrt_llm_gpt_j.layers[
i].attention.qkv.activation_scaling_factor.value = np.array(
[scaling_factors['qkv_act'][i]], dtype=fake_fp8_sf_dt)
tensorrt_llm_gpt_j.layers[
i].attention.qkv.weights_scaling_factor.value = np.array(
[scaling_factors['qkv_weights'][i]], dtype=fake_fp8_sf_dt)
tensorrt_llm_gpt_j.layers[
i].attention.kv_orig_quant_scale.value = np.array(
[scaling_factors['qkv_output'][i]], dtype=np.float32)
tensorrt_llm_gpt_j.layers[
i].attention.kv_quant_orig_scale.value = np.array(
[1.0 / scaling_factors['qkv_output'][i]], dtype=np.float32)
dst = tensorrt_llm_gpt_j.layers[i].attention.dense.weight
t = fromfile(
dir_path,
'model.layers.' + str(i) + '.attention.dense.weight.' + suffix,
[n_embd // tensor_parallel, n_embd], w_type)
if use_smooth_quant:
dst.value = sq_trick(np.ascontiguousarray(np.transpose(t, [1, 0])))
dense_scale = getattr(tensorrt_llm_gpt_j.layers[i].attention,
"quantization_scaling_factor", None)
set_smoothquant_scale_factors(
tensorrt_llm_gpt_j.layers[i].attention.dense, dense_scale,
dir_path, 'model.layers.' + str(i) + '.attention.dense.',
[1, n_embd], quant_per_token_dyn, quant_per_channel)
# change it to the real smoother if dense layer is applied smooth quant
tensorrt_llm_gpt_j.layers[
i].attention.dense.smoother.value = np.ones(
[1, n_embd // tensor_parallel], dtype=np.float32)
elif use_weight_only:
processed_torch_weights, torch_weight_scales = torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
torch.tensor(t), plugin_weight_only_quant_type)
dst.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[
i].attention.dense.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
dst.value = np.ascontiguousarray(np.transpose(t, [1, 0]))
if enable_fp8_qdq:
tensorrt_llm_gpt_j.layers[
i].attention.dense.activation_scaling_factor.value = np.array(
[scaling_factors['dense_act'][i]], dtype=fake_fp8_sf_dt)
tensorrt_llm_gpt_j.layers[
i].attention.dense.weights_scaling_factor.value = np.array(
[scaling_factors['dense_weights'][i]], dtype=fake_fp8_sf_dt)
t = fromfile(
dir_path,
'model.layers.' + str(i) + '.mlp.dense_h_to_4h.weight.' + suffix,
[n_embd, inter_size // tensor_parallel], w_type)
if use_smooth_quant:
tensorrt_llm_gpt_j.layers[i].mlp.fc.weight.value = sq_trick(
np.ascontiguousarray(np.transpose(t, [1, 0])))
set_smoothquant_scale_factors(
tensorrt_llm_gpt_j.layers[i].mlp.fc,
tensorrt_llm_gpt_j.layers[i].post_layernorm.scale_to_int,
dir_path,
'model.layers.' + str(i) + '.mlp.dense_h_to_4h.',
[1, inter_size // tensor_parallel],
quant_per_token_dyn,
quant_per_channel,
rank=rank)
elif use_weight_only:
dst = tensorrt_llm_gpt_j.layers[i].mlp.fc.weight
processed_torch_weights, torch_weight_scales = torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
torch.tensor(t), plugin_weight_only_quant_type)
dst.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[i].mlp.fc.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
tensorrt_llm_gpt_j.layers[
i].mlp.fc.weight.value = np.ascontiguousarray(
np.transpose(t, [1, 0]))
if bias:
tensorrt_llm_gpt_j.layers[i].mlp.fc.bias.value = fromfile(
dir_path, 'model.layers.' + str(i) +
'.mlp.dense_h_to_4h.bias.' + str(rank) + '.bin')
if enable_fp8_qdq:
tensorrt_llm_gpt_j.layers[
i].mlp.fc.activation_scaling_factor.value = np.array(
[scaling_factors['fc_act'][i]], dtype=fake_fp8_sf_dt)
tensorrt_llm_gpt_j.layers[
i].mlp.fc.weights_scaling_factor.value = np.array(
[scaling_factors['fc_weights'][i]], dtype=fake_fp8_sf_dt)
t = fromfile(
dir_path,
'model.layers.' + str(i) + '.mlp.dense_4h_to_h.weight.' + suffix,
[inter_size // tensor_parallel, n_embd], w_type)
if use_smooth_quant:
tensorrt_llm_gpt_j.layers[i].mlp.proj.weight.value = sq_trick(
np.ascontiguousarray(np.transpose(t, [1, 0])))
proj_scale = getattr(tensorrt_llm_gpt_j.layers[i].mlp,
"quantization_scaling_factor", None)
set_smoothquant_scale_factors(
tensorrt_llm_gpt_j.layers[i].mlp.proj, proj_scale, dir_path,
'model.layers.' + str(i) + '.mlp.dense_4h_to_h.', [1, n_embd],
quant_per_token_dyn, quant_per_channel)
# change it to the real smoother if proj layer is applied smooth quant
tensorrt_llm_gpt_j.layers[i].mlp.proj.smoother.value = np.ones(
[1, inter_size // tensor_parallel], dtype=np.float32)
elif use_weight_only:
dst = tensorrt_llm_gpt_j.layers[i].mlp.proj.weight
processed_torch_weights, torch_weight_scales = torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
torch.tensor(t), plugin_weight_only_quant_type)
dst.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[i].mlp.proj.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
tensorrt_llm_gpt_j.layers[i].mlp.proj.weight.value = (
np.ascontiguousarray(np.transpose(t, [1, 0])))
if bias:
tensorrt_llm_gpt_j.layers[i].mlp.proj.bias.value = fromfile(
dir_path,
'model.layers.' + str(i) + '.mlp.dense_4h_to_h.bias.bin')
if use_int8_kv_cache:
t = fromfile(
dir_path, 'model.layers.' + str(i) +
'.attention.query_key_value.scale_y_quant_orig.bin', [1],
np.float32)
tensorrt_llm_gpt_j.layers[
i].attention.kv_orig_quant_scale.value = 1.0 / t
tensorrt_llm_gpt_j.layers[i].attention.kv_quant_orig_scale.value = t
if enable_fp8_qdq:
tensorrt_llm_gpt_j.layers[
i].mlp.proj.activation_scaling_factor.value = np.array(
[scaling_factors['proj_act'][i]], dtype=fake_fp8_sf_dt)
tensorrt_llm_gpt_j.layers[
i].mlp.proj.weights_scaling_factor.value = np.array(
[scaling_factors['proj_weights'][i]], dtype=fake_fp8_sf_dt)
tok = time.time()
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
tensorrt_llm.logger.info(f'Weights loaded. Total time: {t}')
def load_from_hf_gpt_j(tensorrt_llm_gpt_j: GPTJForCausalLM,
hf_gpt_j,
fp16=False,
scaling_factors=None):
hf_model_gptj_block_names = [
"ln_1.weight",
"ln_1.bias",
"mlp.fc_in.weight",
"mlp.fc_in.bias",
"mlp.fc_out.weight",
"mlp.fc_out.bias",
]
tensorrt_llm_model_gptj_block_names = [
"input_layernorm.weight",
"input_layernorm.bias",
"mlp.fc.weight",
"mlp.fc.bias",
"mlp.proj.weight",
"mlp.proj.bias",
]
quant_mode = getattr(tensorrt_llm_gpt_j, 'quant_mode', QuantMode(0))
if quant_mode.is_int8_weight_only():
plugin_weight_only_quant_type = torch.int8
elif quant_mode.is_int4_weight_only():
plugin_weight_only_quant_type = torch.quint4x2
# Do we use INT4/INT8 weight-only?
use_weight_only = quant_mode.is_weight_only()
tensorrt_llm.logger.info('Loading weights from HF GPT-J...')
tik = time.time()
torch_dtype = torch.float16 if fp16 else torch.float32
hf_gpt_j_state_dict = hf_gpt_j.state_dict()
v = hf_gpt_j_state_dict.get('transformer.wte.weight')
tensorrt_llm_gpt_j.embedding.weight.value = v.to(torch_dtype).cpu().numpy()
n_layer = hf_gpt_j.config.n_layer
for layer_idx in range(n_layer):
prefix = "transformer.h." + str(layer_idx) + "."
for idx, hf_attr in enumerate(hf_model_gptj_block_names):
v = hf_gpt_j_state_dict.get(prefix + hf_attr)
layer = attrgetter(tensorrt_llm_model_gptj_block_names[idx])(
tensorrt_llm_gpt_j.layers[layer_idx])
if idx == 2 and scaling_factors:
tensorrt_llm_gpt_j.layers[
layer_idx].mlp.fc.activation_scaling_factor.value = np.array(
[scaling_factors['fc_act'][layer_idx]],
dtype=np.float32)
tensorrt_llm_gpt_j.layers[
layer_idx].mlp.fc.weights_scaling_factor.value = np.array(
[scaling_factors['fc_weights'][layer_idx]],
dtype=np.float32)
elif idx == 4 and scaling_factors:
tensorrt_llm_gpt_j.layers[
layer_idx].mlp.proj.activation_scaling_factor.value = np.array(
[scaling_factors['proj_act'][layer_idx]],
dtype=np.float32)
tensorrt_llm_gpt_j.layers[
layer_idx].mlp.proj.weights_scaling_factor.value = np.array(
[scaling_factors['proj_weights'][layer_idx]],
dtype=np.float32)
if use_weight_only and (idx == 2 or idx == 4):
processed_torch_weights, torch_weight_scales = \
torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
v.transpose(0, 1).contiguous(), plugin_weight_only_quant_type
)
layer.value = processed_torch_weights.numpy()
if idx == 2:
scales = tensorrt_llm_gpt_j.layers[
layer_idx].mlp.fc.per_channel_scale
elif idx == 4:
scales = tensorrt_llm_gpt_j.layers[
layer_idx].mlp.proj.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
setattr(layer, 'value', v.to(torch_dtype).cpu().numpy())
# Attention QKV Linear
# concatenate the Q, K, V layers weights.
q_weights = hf_gpt_j_state_dict.get(prefix + "attn.q_proj.weight")
k_weights = hf_gpt_j_state_dict.get(prefix + "attn.k_proj.weight")
v_weights = hf_gpt_j_state_dict.get(prefix + "attn.v_proj.weight")
qkv_weights = torch.cat((q_weights, k_weights, v_weights))
layer = attrgetter("attention.qkv.weight")(
tensorrt_llm_gpt_j.layers[layer_idx])
if use_weight_only:
processed_torch_weights, torch_weight_scales = \
torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
qkv_weights.transpose(0, 1).contiguous(), plugin_weight_only_quant_type)
layer.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[
layer_idx].attention.qkv.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
setattr(layer, "value", qkv_weights.to(torch_dtype).cpu().numpy())
if scaling_factors:
tensorrt_llm_gpt_j.layers[
layer_idx].attention.qkv.activation_scaling_factor.value = np.array(
[scaling_factors['qkv_act'][layer_idx]], dtype=np.float32)
tensorrt_llm_gpt_j.layers[
layer_idx].attention.qkv.weights_scaling_factor.value = np.array(
[scaling_factors['qkv_weights'][layer_idx]],
dtype=np.float32)
if quant_mode.has_fp8_kv_cache():
if scaling_factors:
tensorrt_llm_gpt_j.layers[
layer_idx].attention.kv_orig_quant_scale.value = np.array(
[scaling_factors['qkv_output'][layer_idx]],
dtype=np.float32)
tensorrt_llm_gpt_j.layers[
layer_idx].attention.kv_quant_orig_scale.value = np.array(
[1.0 / scaling_factors['qkv_output'][layer_idx]],
dtype=np.float32)
# Attention Dense (out_proj) Linear
v = hf_gpt_j_state_dict.get(prefix + "attn.out_proj.weight")
layer = attrgetter("attention.dense.weight")(
tensorrt_llm_gpt_j.layers[layer_idx])
if use_weight_only:
processed_torch_weights, torch_weight_scales = \
torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
v.transpose(0, 1).contiguous(), plugin_weight_only_quant_type)
layer.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[
layer_idx].attention.dense.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
setattr(layer, "value", v.to(torch_dtype).cpu().numpy())
if scaling_factors:
tensorrt_llm_gpt_j.layers[
layer_idx].attention.dense.activation_scaling_factor.value = np.array(
[scaling_factors['dense_act'][layer_idx]], dtype=np.float32)
tensorrt_llm_gpt_j.layers[
layer_idx].attention.dense.weights_scaling_factor.value = np.array(
[scaling_factors['dense_weights'][layer_idx]],
dtype=np.float32)
v = hf_gpt_j_state_dict.get('transformer.ln_f.weight')
tensorrt_llm_gpt_j.ln_f.weight.value = v.to(torch_dtype).cpu().numpy()
v = hf_gpt_j_state_dict.get('transformer.ln_f.bias')
tensorrt_llm_gpt_j.ln_f.bias.value = v.to(torch_dtype).cpu().numpy()
v = hf_gpt_j_state_dict.get('lm_head.weight')
tensorrt_llm_gpt_j.lm_head.weight.value = v.to(torch_dtype).cpu().numpy()
v = hf_gpt_j_state_dict.get('lm_head.bias')
tensorrt_llm_gpt_j.lm_head.bias.value = v.to(torch_dtype).cpu().numpy()
tok = time.time()
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
tensorrt_llm.logger.info(f'Weights loaded. Total time: {t}')
def load_from_awq_gpt_j(tensorrt_llm_gpt_j: GPTJForCausalLM,
awq_gpt_j,
config,
mapping=Mapping(),
fp16=False,
group_size=128,
ft_model_dir=None):
awq_gptj_block_names = [
"ln_1.weight",
"ln_1.bias",
"mlp.fc_in.bias",
"mlp.fc_out.bias",
]
tensorrt_llm_model_gptj_block_names = [
"input_layernorm.weight",
"input_layernorm.bias",
"mlp.fc.bias",
"mlp.proj.bias",
]
def fromfile(dir_path, name, shape=None, dtype=None):
p = dir_path + '/' + name
if Path(p).exists():
t = np.fromfile(p, dtype=dtype)
if shape is not None:
t = t.reshape(shape)
return t
return None
quant_mode = getattr(tensorrt_llm_gpt_j, 'quant_mode', QuantMode(0))
# Int8 KV cache
use_int8_kv_cache = quant_mode.has_int8_kv_cache()
packer = torch.ops.fastertransformer.pack_int8_tensor_to_packed_int4
preprocessor = torch.ops.fastertransformer.preprocess_weights_for_mixed_gemm
tensorrt_llm.logger.info('Loading weights from AWQ GPT-J...')
tik = time.time()
torch_dtype = torch.float16 if fp16 else torch.float32
def AWQ_quantize_pack_preprocess(weight, scale):
scale = scale.repeat_interleave(group_size, dim=0)
weight = weight / scale
qweight_int8 = torch.clamp(torch.round(weight.cuda()).char(), -8, 7)
int4_weight = packer(qweight_int8.cpu())
int4_weight = preprocessor(int4_weight, torch.quint4x2)
return int4_weight.view(torch.int8).cpu().numpy()
def process_and_assign_weight(awq_gpt_j, mPrefix, mOp, tp_dim=0):
weight = awq_gpt_j[mPrefix + ".weight"].T.contiguous()
[k, n] = weight.shape
weight = weight.split(weight.shape[tp_dim] // mapping.tp_size,
dim=tp_dim)[mapping.tp_rank]
amax = awq_gpt_j[mPrefix + ".weight_quantizer._amax"].reshape(
(n, int(k / group_size))).T.contiguous()
amax = amax.split(amax.shape[tp_dim] // mapping.tp_size,
dim=tp_dim)[mapping.tp_rank]
pre_quant_scale = awq_gpt_j[
mPrefix + ".input_quantizer._pre_quant_scale"].reshape((1, k))
if tp_dim == 0:
pre_quant_scale = pre_quant_scale.split(k // mapping.tp_size,
dim=1)[mapping.tp_rank]
scale = amax / 8.0
mOp.qweight.value = AWQ_quantize_pack_preprocess(weight, scale)
mOp.scale.value = scale.to(torch_dtype).cpu().numpy()
mOp.pre_quant_scale.value = pre_quant_scale.to(
torch_dtype).cpu().numpy()
def deSmooth(weight, pre_quant_scale):
[k, n] = weight.shape
pre_quant_scale = pre_quant_scale.repeat(
(n, 1)).transpose(1, 0).contiguous()
weight = weight * pre_quant_scale
return weight
def reSmooth(weight, pre_quant_scale):
[k, n] = weight.shape
pre_quant_scale = pre_quant_scale.repeat(
(n, 1)).transpose(1, 0).contiguous()
weight = weight / pre_quant_scale
return weight
def get_scale(weight):
weight = weight.T.contiguous()
[n, k] = weight.shape
weight = weight.reshape(n, int(k / group_size), group_size)
weight = torch.abs(weight.reshape(-1, group_size))
amax, idx = weight.max(1)
amax = amax.reshape(n, int(k / group_size)).T.contiguous()
return amax / 8
def reSmooth_and_get_scale(weight, pre_quant_scale, avg_pre_quant_scale):
weight = deSmooth(weight, pre_quant_scale)
weight = reSmooth(weight, avg_pre_quant_scale)
scale = get_scale(weight)
return weight, scale
def process_and_assign_qkv_weight(awq_gpt_j, prefix, mOp):
q_weight = awq_gpt_j[prefix + "attn.q_proj.weight"].T.contiguous()
k_weight = awq_gpt_j[prefix + "attn.k_proj.weight"].T.contiguous()
v_weight = awq_gpt_j[prefix + "attn.v_proj.weight"].T.contiguous()
k = q_weight.shape[0]
q_weight = q_weight.split(q_weight.shape[1] // mapping.tp_size,
dim=1)[mapping.tp_rank]
k_weight = k_weight.split(k_weight.shape[1] // mapping.tp_size,
dim=1)[mapping.tp_rank]
v_weight = v_weight.split(v_weight.shape[1] // mapping.tp_size,
dim=1)[mapping.tp_rank]
q_pre_quant_scale = awq_gpt_j[
prefix + "attn.q_proj.input_quantizer._pre_quant_scale"].reshape(
(1, k))
k_pre_quant_scale = awq_gpt_j[
prefix + "attn.k_proj.input_quantizer._pre_quant_scale"].reshape(
(1, k))
v_pre_quant_scale = awq_gpt_j[
prefix + "attn.v_proj.input_quantizer._pre_quant_scale"].reshape(
(1, k))
qkv_pre_quant_scale = (q_pre_quant_scale + k_pre_quant_scale +
v_pre_quant_scale) / 3.0
q_weight, q_scale = reSmooth_and_get_scale(q_weight, q_pre_quant_scale,
qkv_pre_quant_scale)
k_weight, k_scale = reSmooth_and_get_scale(k_weight, k_pre_quant_scale,
qkv_pre_quant_scale)
v_weight, v_scale = reSmooth_and_get_scale(v_weight, v_pre_quant_scale,
qkv_pre_quant_scale)
qkv_weights = torch.cat((q_weight, k_weight, v_weight), dim=1)
qkv_scale = torch.cat((q_scale, k_scale, v_scale), dim=1)
mOp.pre_quant_scale.value = qkv_pre_quant_scale.to(
torch_dtype).cpu().numpy()
mOp.qweight.value = AWQ_quantize_pack_preprocess(qkv_weights, qkv_scale)
mOp.scale.value = qkv_scale.to(torch_dtype).cpu().numpy()
#check if we need to pad vocab
v = awq_gpt_j.get('transformer.wte.weight')
[vocab_size, k] = v.shape
pad_vocab = False
pad_vocab_size = vocab_size
if vocab_size % 64 != 0:
pad_vocab = True
pad_vocab_size = int((vocab_size + 63) / 64) * 64
if pad_vocab:
new_v = torch.zeros([pad_vocab_size, k])
new_v[:vocab_size, :] = v
v = new_v
tensorrt_llm_gpt_j.embedding.weight.value = v.to(torch_dtype).cpu().numpy()
n_layer = config["n_layer"]
for layer_idx in range(n_layer):
prefix = "transformer.h." + str(layer_idx) + "."
tensorrt_llm.logger.info(f'Process weights in layer: {layer_idx}')
for idx, awq_attr in enumerate(awq_gptj_block_names):
v = awq_gpt_j[prefix + awq_attr]
if awq_attr == "mlp.fc_in.bias":
v = v.split(v.shape[0] // mapping.tp_size, dim=0)[mapping.rank]
elif awq_attr == "mlp.fc_out.bias":
v = torch.zeros_like(v) if mapping.rank != 0 else v
layer = attrgetter(tensorrt_llm_model_gptj_block_names[idx])(
tensorrt_llm_gpt_j.layers[layer_idx])
setattr(layer, 'value', v.to(torch_dtype).cpu().numpy())
# Attention QKV Linear
# concatenate the Q, K, V layers weights.
process_and_assign_qkv_weight(
awq_gpt_j, prefix,
tensorrt_llm_gpt_j.layers[layer_idx].attention.qkv)
# Attention Dense (out_proj) Linear
mPrefix = prefix + "attn.out_proj"
mOp = tensorrt_llm_gpt_j.layers[layer_idx].attention.dense
process_and_assign_weight(awq_gpt_j, mPrefix, mOp, 0)
# MLP Dense (mlp.fc) Linear
mPrefix = prefix + "mlp.fc_in"
mOp = tensorrt_llm_gpt_j.layers[layer_idx].mlp.fc
process_and_assign_weight(awq_gpt_j, mPrefix, mOp, 1)
# MLP Dense (mlp.proj) Linear
mPrefix = prefix + "mlp.fc_out"
mOp = tensorrt_llm_gpt_j.layers[layer_idx].mlp.proj
process_and_assign_weight(awq_gpt_j, mPrefix, mOp, 0)
if use_int8_kv_cache:
assert ft_model_dir, "You must pass --ft_model_dir to tell TRT-LLM where to look for scales of INT8 kv cache."
t = fromfile(
ft_model_dir, 'model.layers.' + str(layer_idx) +
'.attention.query_key_value.scale_y_quant_orig.bin', [1],
np.float32)
assert t is not None, f"{ft_model_dir} does not contain model.layers.{layer_idx}.attention.query_key_value.scale_y_quant_orig.bin"
tensorrt_llm_gpt_j.layers[
layer_idx].attention.kv_orig_quant_scale.value = 1.0 / t
tensorrt_llm_gpt_j.layers[
layer_idx].attention.kv_quant_orig_scale.value = t
v = awq_gpt_j['transformer.ln_f.weight']
tensorrt_llm_gpt_j.ln_f.weight.value = v.to(torch_dtype).cpu().numpy()
v = awq_gpt_j['transformer.ln_f.bias']
tensorrt_llm_gpt_j.ln_f.bias.value = v.to(torch_dtype).cpu().numpy()
#lm_head
if pad_vocab:
weight = awq_gpt_j['lm_head.weight']
[vocab_size, k] = weight.shape
new_weight = torch.zeros([pad_vocab_size, k])
new_weight[:vocab_size, :] = weight
new_weight = new_weight.T.contiguous()
new_weight = new_weight.split(new_weight.shape[1] // mapping.tp_size,
dim=1)[mapping.tp_rank]
amax = awq_gpt_j['lm_head.weight_quantizer._amax'].reshape(
[vocab_size, int(k / group_size)])
new_amax = torch.ones([pad_vocab_size, int(k / group_size)])
new_amax[:vocab_size, :] = amax
new_amax = new_amax.T.contiguous()
new_amax = new_amax.split(new_amax.shape[1] // mapping.tp_size,
dim=1)[mapping.tp_rank]
new_scale = new_amax / 8
tensorrt_llm_gpt_j.lm_head.qweight.value = AWQ_quantize_pack_preprocess(
new_weight, new_scale)
tensorrt_llm_gpt_j.lm_head.scale.value = new_scale.to(
torch_dtype).cpu().numpy()
tensorrt_llm_gpt_j.lm_head.pre_quant_scale.value = awq_gpt_j[
'lm_head.input_quantizer._pre_quant_scale'].to(
torch_dtype).cpu().numpy()
bias = awq_gpt_j['lm_head.bias']
new_bias = torch.zeros([pad_vocab_size])
new_bias[:vocab_size] = bias
new_bias = new_bias.split(pad_vocab_size // mapping.tp_size,
dim=0)[mapping.tp_rank]
tensorrt_llm_gpt_j.lm_head.bias.value = new_bias.to(
torch_dtype).cpu().numpy()
else:
mPrefix = "lm_head"
mOp = tensorrt_llm_gpt_j.lm_head
process_and_assign_weight(awq_gpt_j, mPrefix, mOp, 1)
v = awq_gpt_j['lm_head.bias']
tensorrt_llm_gpt_j.lm_head.bias.value = v.to(torch_dtype).cpu().numpy()
tok = time.time()
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
tensorrt_llm.logger.info(f'Weights loaded. Total time: {t}')
| NVIDIA/TensorRT-LLM | examples/gptj/weight.py | weight.py | py | 40,329 | python | en | code | 3,328 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "tensorrt_llm.quantization.Q... |
75076600744 | import os
import time
import json
"""
This Script is used to gather all the data from running all the combinations of inputs to ./main
It will then write the output to a file called "data.txt" which can be processed and changed
into json format using processData.py in the reports file.
"""
data_json = {}
testAmount = 0
Generations = ["100","1000","10000"]
GridSize = ["256", "512", "1024"]
OptimiseLevel = ["all", "levelOne", "levelTwo", "levelThree"]
ImplementationTypes = ["SerialOptimized", "Parallel", "GPU"]
Patterns = [
"PatternFiles/13enginecordership.txt",
"PatternFiles/Frothing_puffer.txt",
"PatternFiles/Gosper_glider_gun.txt",
"PatternFiles/Period144Oscillator.txt"]
for a in ImplementationTypes:
data_json[a] = {}
if a == "SerialOptimized":
os.chdir("../" + a)
if a == "Parallel":
os.chdir("../" + a)
OptimiseLevel = ["para", "para1", "para2", "para3"]
if a == "GPU":
os.chdir("../" + a)
OptimiseLevel = ["gpu"]
for i in OptimiseLevel:
data_json[a][i] = {}
os.system("make " + i)
for j in Generations:
data_json[a][i][j] = {}
for k in GridSize:
data_json[a][i][j][k] = {}
for l in Patterns:
title = l.split("/")[1]
data_json[a][i][j][k][title[:-4]] = {}
for m in range(0,6):
start_time = time.time()
os.system("./main " + l + ' ' + k + ' ' +
k + ' ' + j + ' ' + 'NoVis')
end_time = time.time() - start_time
testAmount = testAmount + 1
data_json[a][i][j][k][title[:-4]][m] = str(end_time)
os.chdir("../" + a)
with open("data.json", 'w') as dataFile:
json.dump(data_json, dataFile)
dataFile.close()
print(testAmount)
| DaveR27/Game-of-Life | DataGathering/GatherData.py | GatherData.py | py | 1,945 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 33,
"us... |
36324286850 | import json
import smtplib, ssl
import os
from email.message import EmailMessage
import db_functions
from datetime import datetime
## Helper functions Start
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'intentName': intent_name,
'slots': slots,
'slotToElicit': slot_to_elicit,
'message': message
}
}
def close(session_attributes, fulfillment_state, message):
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
def delegate(session_attributes, slots):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Delegate',
'slots': slots
}
}
## Helper functions End
#Function for Raising Request
def RaiseRequest(intent_request):
userEmail=intent_request['currentIntent']['slots']['userEmail']
ticketSubject=intent_request['currentIntent']['slots']['ticketSub']
ticketBody=intent_request['currentIntent']['slots']['ticketBody']
session_attributes = intent_request['sessionAttributes']
mainusername=session_attributes['mainuser']
if intent_request['invocationSource']=='DialogCodeHook':
validation_result = Slot_Validation(intent_request['currentIntent']['slots'])
if not validation_result['isValid']:
slots = intent_request['currentIntent']['slots']
slots[validation_result['violatedSlot']] = None
return elicit_slot(
session_attributes,
intent_request['currentIntent']['name'],
slots,
validation_result['violatedSlot'],
validation_result['message']
)
return delegate(session_attributes, intent_request['currentIntent']['slots'])
try:
s=sendEmail(userEmail,mainusername,ticketSubject,ticketBody)
except:
return close(
session_attributes,
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Error Raised'
}
)
return close(
session_attributes,
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Ticket #{}, has been raised and will be resolved soon. Please check your mail for further info.'.format(s[1])
}
)
def AutoWelcomeMessage(intent_request):
user_name=intent_request['currentIntent']['slots']['userName']
session_attributes = intent_request['sessionAttributes']
session_attributes['mainuser']=user_name
return delegate(session_attributes, intent_request['currentIntent']['slots'])
def Slot_Validation(slot):
user_Email=slot['userEmail']
if(user_Email == os.environ['SENDER_EMAIL']):
return build_validation_result(
False,
'userEmail',
'This email ID {} is not valid. Please provide valid email ID'.format(user_Email)
)
return {'isValid': True}
def build_validation_result(isvalid, violated_slot, message_content):
return {
'isValid': isvalid,
'violatedSlot': violated_slot,
'message': {'contentType': 'PlainText', 'content': message_content}
}
#Send Email Function
def sendEmail(r_email,r_username,ticketSubject,ticketBody):
msg=EmailMessage()
host=os.environ['SMTPHOST']
port=os.environ['SMTPPORT']
sender_email=os.environ['SENDER_EMAIL']
sender_password=os.environ['SENDER_PASSWORD']
#generaating unique id
id_tuple=db_functions.query_record('select max(id) from requests_tab',1)
if(id_tuple[0]==None):
id=0
else:
id=id_tuple[0]
id=id+1
context=ssl.create_default_context()
message_to_user="""
Hi """+r_username+""",
Thanks for writing to us. We have received your Query. One of our representative will reply to you shortly.
Your Query: """+ticketBody+"""
Thanks,
Kraftcache Team
Note: This mail is an acknowledgment for your ticket raised with our Chatbot"""
msg.set_content(message_to_user)
msg['Subject']='Ticket #'+str(id)+' - ' + ticketSubject
msg['From']=sender_email
msg['To']=r_email
msg['Bcc']=os.environ['BCC']
msg['Reply-To']=r_email
#sending mail
with smtplib.SMTP_SSL(host,port,context=context) as server:
try:
server.login(sender_email,sender_password)
db_functions.insert_records(id,r_username,r_email,ticketSubject,ticketBody,'OPEN',datetime.now())
server.send_message(msg)
server.close()
status=['True',id]
except:
status=['False',id]
return status
def lambda_handler(event, context):
intent= event['currentIntent']['name']
if intent=='RaiseRequest':
return RaiseRequest(event)
if intent=='AutoWelcomeMessage':
return AutoWelcomeMessage(event) | anilreddy864/BBot | Lex_Code/lambda_function.py | lambda_function.py | py | 4,994 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "email.message.EmailMessage",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "os.envir... |
14231634112 | #!/usr/bin/env python3
import fire
import logging
import os, sys, traceback
from IsoNet.util.dict2attr import Arg,check_parse,idx2list
from fire import core
from IsoNet.util.metadata import MetaData,Label,Item
class ISONET:
"""
ISONET: Train on tomograms and restore missing-wedge\n
for detail description, run one of the following commands:
isonet.py prepare_star -h
isonet.py prepare_subtomo_star -h
isonet.py deconv -h
isonet.py make_mask -h
isonet.py extract -h
isonet.py refine -h
isonet.py predict -h
isonet.py resize -h
isonet.py gui -h
"""
#log_file = "log.txt"
def prepare_star(self,folder_name, output_star='tomograms.star',pixel_size = 10.0, defocus = 0.0, number_subtomos = 100):
"""
\nThis command generates a tomograms.star file from a folder containing only tomogram files (.mrc or .rec).\n
isonet.py prepare_star folder_name [--output_star] [--pixel_size] [--defocus] [--number_subtomos]
:param folder_name: (None) directory containing tomogram(s). Usually 1-5 tomograms are sufficient.
:param output_star: (tomograms.star) star file similar to that from "relion". You can modify this file manually or with gui.
:param pixel_size: (10) pixel size in angstroms. Usually you want to bin your tomograms to about 10A pixel size.
Too large or too small pixel sizes are not recommended, since the target resolution on Z-axis of corrected tomograms should be about 30A.
:param defocus: (0.0) defocus in Angstrom. Only need for ctf deconvolution. For phase plate data, you can leave defocus 0.
If you have multiple tomograms with different defocus, please modify them in star file or with gui.
:param number_subtomos: (100) Number of subtomograms to be extracted in later processes.
If you want to extract different number of subtomograms in different tomograms, you can modify them in the star file generated with this command or with gui.
"""
md = MetaData()
md.addLabels('rlnIndex','rlnMicrographName','rlnPixelSize','rlnDefocus','rlnNumberSubtomo','rlnMaskBoundary')
tomo_list = sorted(os.listdir(folder_name))
i = 0
for tomo in tomo_list:
if tomo[-4:] == '.rec' or tomo[-4:] == '.mrc':
i+=1
it = Item()
md.addItem(it)
md._setItemValue(it,Label('rlnIndex'),str(i))
md._setItemValue(it,Label('rlnMicrographName'),os.path.join(folder_name,tomo))
md._setItemValue(it,Label('rlnPixelSize'),pixel_size)
md._setItemValue(it,Label('rlnDefocus'),defocus)
md._setItemValue(it,Label('rlnNumberSubtomo'),number_subtomos)
md._setItemValue(it,Label('rlnMaskBoundary'),None)
md.write(output_star)
def prepare_subtomo_star(self, folder_name, output_star='subtomo.star', pixel_size: float=10.0, cube_size = None):
"""
\nThis command generates a subtomo star file from a folder containing only subtomogram files (.mrc).
This command is usually not necessary in the traditional workflow, because "isonet.py extract" will generate this subtomo.star for you.\n
isonet.py prepare_subtomo_star folder_name [--output_star] [--cube_size]
:param folder_name: (None) directory containing subtomogram(s).
:param output_star: (subtomo.star) output star file for subtomograms, will be used as input in refinement.
:param pixel_size: (10) The pixel size in angstrom of your subtomograms.
:param cube_size: (None) This is the size of the cubic volumes used for training. This values should be smaller than the size of subtomogram.
And the cube_size should be divisible by 8. If this value isn't set, cube_size is automatically determined as int(subtomo_size / 1.5 + 1)//16 * 16
"""
#TODO check folder valid, logging
if not os.path.isdir(folder_name):
print("the folder does not exist")
import mrcfile
md = MetaData()
md.addLabels('rlnSubtomoIndex','rlnImageName','rlnCubeSize','rlnCropSize','rlnPixelSize')
subtomo_list = sorted(os.listdir(folder_name))
for i,subtomo in enumerate(subtomo_list):
subtomo_name = os.path.join(folder_name,subtomo)
try:
with mrcfile.open(subtomo_name, mode='r', permissive=True) as s:
crop_size = s.header.nx
except:
print("Warning: Can not process the subtomogram: {}!".format(subtomo_name))
continue
if cube_size is not None:
cube_size = int(cube_size)
if cube_size >= crop_size:
cube_size = int(crop_size / 1.5 + 1)//16 * 16
print("Warning: Cube size should be smaller than the size of subtomogram volume! Using cube size {}!".format(cube_size))
else:
cube_size = int(crop_size / 1.5 + 1)//16 * 16
it = Item()
md.addItem(it)
md._setItemValue(it,Label('rlnSubtomoIndex'),str(i+1))
md._setItemValue(it,Label('rlnImageName'),subtomo_name)
md._setItemValue(it,Label('rlnCubeSize'),cube_size)
md._setItemValue(it,Label('rlnCropSize'),crop_size)
md._setItemValue(it,Label('rlnPixelSize'),pixel_size)
# f.write(str(i+1)+' ' + os.path.join(folder_name,tomo) + '\n')
md.write(output_star)
def deconv(self, star_file: str,
deconv_folder:str="./deconv",
voltage: float=300.0,
cs: float=2.7,
snrfalloff: float=None,
deconvstrength: float=None,
highpassnyquist: float=0.02,
chunk_size: int=None,
overlap_rate: float= 0.25,
ncpu:int=4,
tomo_idx: str=None):
"""
\nCTF deconvolution for the tomograms.\n
isonet.py deconv star_file [--deconv_folder] [--snrfalloff] [--deconvstrength] [--highpassnyquist] [--overlap_rate] [--ncpu] [--tomo_idx]
This step is recommended because it enhances low resolution information for a better contrast. No need to do deconvolution for phase plate data.
:param deconv_folder: (./deconv) Folder created to save deconvoluted tomograms.
:param star_file: (None) Star file for tomograms.
:param voltage: (300.0) Acceleration voltage in kV.
:param cs: (2.7) Spherical aberration in mm.
:param snrfalloff: (1.0) SNR fall rate with the frequency. High values means losing more high frequency.
If this value is not set, the program will look for the parameter in the star file.
If this value is not set and not found in star file, the default value 1.0 will be used.
:param deconvstrength: (1.0) Strength of the deconvolution.
If this value is not set, the program will look for the parameter in the star file.
If this value is not set and not found in star file, the default value 1.0 will be used.
:param highpassnyquist: (0.02) Highpass filter for at very low frequency. We suggest to keep this default value.
:param chunk_size: (None) When your computer has enough memory, please keep the chunk_size as the default value: None . Otherwise, you can let the program crop the tomogram into multiple chunks for multiprocessing and assembly them into one. The chunk_size defines the size of individual chunk. This option may induce artifacts along edges of chunks. When that happen, you may use larger overlap_rate.
:param overlap_rate: (None) The overlapping rate for adjecent chunks.
:param ncpu: (4) Number of cpus to use.
:param tomo_idx: (None) If this value is set, process only the tomograms listed in this index. e.g. 1,2,4 or 5-10,15,16
"""
from IsoNet.util.deconvolution import deconv_one
logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt="%m-%d %H:%M:%S",level=logging.INFO,handlers=[logging.StreamHandler(sys.stdout)])
logging.info('\n######Isonet starts ctf deconvolve######\n')
try:
md = MetaData()
md.read(star_file)
if not 'rlnSnrFalloff' in md.getLabels():
md.addLabels('rlnSnrFalloff','rlnDeconvStrength','rlnDeconvTomoName')
for it in md:
md._setItemValue(it,Label('rlnSnrFalloff'),1.0)
md._setItemValue(it,Label('rlnDeconvStrength'),1.0)
md._setItemValue(it,Label('rlnDeconvTomoName'),None)
if not os.path.isdir(deconv_folder):
os.mkdir(deconv_folder)
tomo_idx = idx2list(tomo_idx)
for it in md:
if tomo_idx is None or str(it.rlnIndex) in tomo_idx:
if snrfalloff is not None:
md._setItemValue(it,Label('rlnSnrFalloff'), snrfalloff)
if deconvstrength is not None:
md._setItemValue(it,Label('rlnDeconvStrength'),deconvstrength)
tomo_file = it.rlnMicrographName
base_name = os.path.basename(tomo_file)
deconv_tomo_name = '{}/{}'.format(deconv_folder,base_name)
deconv_one(it.rlnMicrographName,deconv_tomo_name,voltage=voltage,cs=cs,defocus=it.rlnDefocus/10000.0, pixel_size=it.rlnPixelSize,snrfalloff=it.rlnSnrFalloff, deconvstrength=it.rlnDeconvStrength,highpassnyquist=highpassnyquist,chunk_size=chunk_size,overlap_rate=overlap_rate,ncpu=ncpu)
md._setItemValue(it,Label('rlnDeconvTomoName'),deconv_tomo_name)
md.write(star_file)
logging.info('\n######Isonet done ctf deconvolve######\n')
except Exception:
error_text = traceback.format_exc()
f =open('log.txt','a+')
f.write(error_text)
f.close()
logging.error(error_text)
def make_mask(self,star_file,
mask_folder: str = 'mask',
patch_size: int=4,
mask_boundary: str=None,
density_percentage: int=None,
std_percentage: int=None,
use_deconv_tomo:bool=True,
z_crop:float=None,
tomo_idx=None):
"""
\ngenerate a mask that include sample area and exclude "empty" area of the tomogram. The masks do not need to be precise. In general, the number of subtomograms (a value in star file) should be lesser if you masked out larger area. \n
isonet.py make_mask star_file [--mask_folder] [--patch_size] [--density_percentage] [--std_percentage] [--use_deconv_tomo] [--tomo_idx]
:param star_file: path to the tomogram or tomogram folder
:param mask_folder: path and name of the mask to save as
:param patch_size: (4) The size of the box from which the max-filter and std-filter are calculated.
:param density_percentage: (50) The approximate percentage of pixels to keep based on their local pixel density.
If this value is not set, the program will look for the parameter in the star file.
If this value is not set and not found in star file, the default value 50 will be used.
:param std_percentage: (50) The approximate percentage of pixels to keep based on their local standard deviation.
If this value is not set, the program will look for the parameter in the star file.
If this value is not set and not found in star file, the default value 50 will be used.
:param use_deconv_tomo: (True) If CTF deconvolved tomogram is found in tomogram.star, use that tomogram instead.
:param z_crop: If exclude the top and bottom regions of tomograms along z axis. For example, "--z_crop 0.2" will mask out the top 20% and bottom 20% region along z axis.
:param tomo_idx: (None) If this value is set, process only the tomograms listed in this index. e.g. 1,2,4 or 5-10,15,16
"""
from IsoNet.bin.make_mask import make_mask
logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt="%m-%d %H:%M:%S",level=logging.INFO,handlers=[logging.StreamHandler(sys.stdout)])
logging.info('\n######Isonet starts making mask######\n')
try:
if not os.path.isdir(mask_folder):
os.mkdir(mask_folder)
# write star percentile threshold
md = MetaData()
md.read(star_file)
if not 'rlnMaskDensityPercentage' in md.getLabels():
md.addLabels('rlnMaskDensityPercentage','rlnMaskStdPercentage','rlnMaskName')
for it in md:
md._setItemValue(it,Label('rlnMaskDensityPercentage'),50)
md._setItemValue(it,Label('rlnMaskStdPercentage'),50)
md._setItemValue(it,Label('rlnMaskName'),None)
tomo_idx = idx2list(tomo_idx)
for it in md:
if tomo_idx is None or str(it.rlnIndex) in tomo_idx:
if density_percentage is not None:
md._setItemValue(it,Label('rlnMaskDensityPercentage'),density_percentage)
if std_percentage is not None:
md._setItemValue(it,Label('rlnMaskStdPercentage'),std_percentage)
if use_deconv_tomo and "rlnDeconvTomoName" in md.getLabels() and it.rlnDeconvTomoName not in [None,'None']:
tomo_file = it.rlnDeconvTomoName
else:
tomo_file = it.rlnMicrographName
tomo_root_name = os.path.splitext(os.path.basename(tomo_file))[0]
if os.path.isfile(tomo_file):
logging.info('make_mask: {}| dir_to_save: {}| percentage: {}| window_scale: {}'.format(tomo_file,
mask_folder, it.rlnMaskDensityPercentage, patch_size))
#if mask_boundary is None:
if "rlnMaskBoundary" in md.getLabels() and it.rlnMaskBoundary not in [None, "None"]:
mask_boundary = it.rlnMaskBoundary
else:
mask_boundary = None
mask_out_name = '{}/{}_mask.mrc'.format(mask_folder,tomo_root_name)
make_mask(tomo_file,
mask_out_name,
mask_boundary=mask_boundary,
side=patch_size,
density_percentage=it.rlnMaskDensityPercentage,
std_percentage=it.rlnMaskStdPercentage,
surface = z_crop)
md._setItemValue(it,Label('rlnMaskName'),mask_out_name)
md.write(star_file)
logging.info('\n######Isonet done making mask######\n')
except Exception:
error_text = traceback.format_exc()
f =open('log.txt','a+')
f.write(error_text)
f.close()
logging.error(error_text)
def extract(self,
star_file: str,
use_deconv_tomo: bool = True,
subtomo_folder: str = "subtomo",
subtomo_star: str = "subtomo.star",
cube_size: int = 64,
crop_size: int = None,
log_level: str="info",
tomo_idx = None
):
"""
\nExtract subtomograms\n
isonet.py extract star_file [--subtomo_folder] [--subtomo_star] [--cube_size] [--use_deconv_tomo] [--tomo_idx]
:param star_file: tomogram star file
:param subtomo_folder: (subtomo) folder for output subtomograms.
:param subtomo_star: (subtomo.star) star file for output subtomograms.
:param cube_size: (64) Size of cubes for training, should be divisible by 8, eg. 32, 64. The actual sizes of extracted subtomograms are this value adds 16.
:param crop_size: (None) The size of subtomogram, should be larger then the cube_size The default value is 16+cube_size.
:param log_level: ("info") level of the output, either "info" or "debug"
:param use_deconv_tomo: (True) If CTF deconvolved tomogram is found in tomogram.star, use that tomogram instead.
"""
d = locals()
d_args = Arg(d)
if d_args.log_level == "debug":
logging.basicConfig(format='%(asctime)s, %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'
,datefmt="%H:%M:%S",level=logging.DEBUG,handlers=[logging.StreamHandler(sys.stdout)])
else:
logging.basicConfig(format='%(asctime)s, %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'
,datefmt="%m-%d %H:%M:%S",level=logging.INFO,handlers=[logging.StreamHandler(sys.stdout)])
logging.info("\n######Isonet starts extracting subtomograms######\n")
try:
if os.path.isdir(subtomo_folder):
logging.warning("subtomo directory exists, the current directory will be overwritten")
import shutil
shutil.rmtree(subtomo_folder)
os.mkdir(subtomo_folder)
from IsoNet.preprocessing.prepare import extract_subtomos
if crop_size is None:
d_args.crop_size = cube_size + 16
else:
d_args.crop_size = crop_size
d_args.subtomo_dir = subtomo_folder
d_args.tomo_idx = idx2list(tomo_idx)
extract_subtomos(d_args)
logging.info("\n######Isonet done extracting subtomograms######\n")
except Exception:
error_text = traceback.format_exc()
f =open('log.txt','a+')
f.write(error_text)
f.close()
logging.error(error_text)
def refine(self,
subtomo_star: str,
gpuID: str = None,
iterations: int = None,
data_dir: str = None,
pretrained_model: str = None,
log_level: str = None,
result_dir: str='results',
remove_intermediate: bool =False,
select_subtomo_number: int = None,
preprocessing_ncpus: int = 16,
continue_from: str=None,
epochs: int = 10,
batch_size: int = None,
steps_per_epoch: int = None,
noise_level: tuple=(0.05,0.10,0.15,0.20),
noise_start_iter: tuple=(11,16,21,26),
noise_mode: str = None,
noise_dir: str = None,
learning_rate: float = None,
drop_out: float = 0.3,
convs_per_depth: int = 3,
kernel: tuple = (3,3,3),
pool: tuple = None,
unet_depth: int = 3,
filter_base: int = None,
batch_normalization: bool = True,
normalize_percentile: bool = True,
):
"""
\ntrain neural network to correct missing wedge\n
isonet.py refine subtomo_star [--iterations] [--gpuID] [--preprocessing_ncpus] [--batch_size] [--steps_per_epoch] [--noise_start_iter] [--noise_level]...
:param subtomo_star: (None) star file containing subtomogram(s).
:param gpuID: (0,1,2,3) The ID of gpu to be used during the training. e.g 0,1,2,3.
:param pretrained_model: (None) A trained neural network model in ".h5" format to start with.
:param iterations: (30) Number of training iterations.
:param data_dir: (data) Temporary folder to save the generated data used for training.
:param log_level: (info) debug level, could be 'info' or 'debug'
:param continue_from: (None) A Json file to continue from. That json file is generated at each iteration of refine.
:param result_dir: ('results') The name of directory to save refined neural network models and subtomograms
:param preprocessing_ncpus: (16) Number of cpu for preprocessing.
************************Training settings************************
:param epochs: (10) Number of epoch for each iteraction.
:param batch_size: (None) Size of the minibatch.If None, batch_size will be the max(2 * number_of_gpu,4). batch_size should be divisible by the number of gpu.
:param steps_per_epoch: (None) Step per epoch. If not defined, the default value will be min(num_of_subtomograms * 6 / batch_size , 200)
************************Denoise settings************************
:param noise_level: (0.05,0.1,0.15,0.2) Level of noise STD(added noise)/STD(data) after the iteration defined in noise_start_iter.
:param noise_start_iter: (11,16,21,26) Iteration that start to add noise of corresponding noise level.
:param noise_mode: (None) Filter names when generating noise volumes, can be 'ramp', 'hamming' and 'noFilter'
:param noise_dir: (None) Directory for generated noise volumes. If set to None, the Noise volumes should appear in results/training_noise
************************Network settings************************
:param drop_out: (0.3) Drop out rate to reduce overfitting.
:param learning_rate: (0.0004) learning rate for network training.
:param convs_per_depth: (3) Number of convolution layer for each depth.
:param kernel: (3,3,3) Kernel for convolution
:param unet_depth: (3) Depth of UNet.
:param filter_base: (64) The base number of channels after convolution.
:param batch_normalization: (True) Use Batch Normalization layer
:param pool: (False) Use pooling layer instead of stride convolution layer.
:param normalize_percentile: (True) Normalize the 5 percent and 95 percent pixel intensity to 0 and 1 respectively. If this is set to False, normalize the input to 0 mean and 1 standard dievation.
"""
from IsoNet.bin.refine import run
d = locals()
d_args = Arg(d)
with open('log.txt','a+') as f:
f.write(' '.join(sys.argv[0:]) + '\n')
run(d_args)
def predict(self, star_file: str, model: str, output_dir: str='./corrected_tomos', gpuID: str = None, cube_size:int=64,
crop_size:int=96,use_deconv_tomo=True, batch_size:int=None,normalize_percentile: bool=True,log_level: str="info", tomo_idx=None):
"""
\nPredict tomograms using trained model\n
isonet.py predict star_file model [--gpuID] [--output_dir] [--cube_size] [--crop_size] [--batch_size] [--tomo_idx]
:param star_file: star for tomograms.
:param output_dir: file_name of output predicted tomograms
:param model: path to trained network model .h5
:param gpuID: (0,1,2,3) The gpuID to used during the training. e.g 0,1,2,3.
:param cube_size: (64) The tomogram is divided into cubes to predict due to the memory limitation of GPUs.
:param crop_size: (96) The side-length of cubes cropping from tomogram in an overlapping patch strategy, make this value larger if you see the patchy artifacts
:param batch_size: The batch size of the cubes grouped into for network predicting, the default parameter is four times number of gpu
:param normalize_percentile: (True) if normalize the tomograms by percentile. Should be the same with that in refine parameter.
:param log_level: ("debug") level of message to be displayed, could be 'info' or 'debug'
:param tomo_idx: (None) If this value is set, process only the tomograms listed in this index. e.g. 1,2,4 or 5-10,15,16
:param use_deconv_tomo: (True) If CTF deconvolved tomogram is found in tomogram.star, use that tomogram instead.
:raises: AttributeError, KeyError
"""
d = locals()
d_args = Arg(d)
from IsoNet.bin.predict import predict
if d_args.log_level == "debug":
logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt="%m-%d %H:%M:%S",level=logging.DEBUG,handlers=[logging.StreamHandler(sys.stdout)])
else:
logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt="%m-%d %H:%M:%S",level=logging.INFO,handlers=[logging.StreamHandler(sys.stdout)])
try:
predict(d_args)
except:
error_text = traceback.format_exc()
f =open('log.txt','a+')
f.write(error_text)
f.close()
logging.error(error_text)
def resize(self, star_file:str, apix: float=15, out_folder="tomograms_resized"):
'''
This function rescale the tomograms to a given pixelsize
'''
md = MetaData()
md.read(star_file)
#print(md._data[0].rlnPixelSize)
from scipy.ndimage import zoom
#from skimage.transform import rescale
#import numpy as np
import mrcfile
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
for item in md._data:
ori_apix = item.rlnPixelSize
tomo_name = item.rlnMicrographName
zoom_factor = float(ori_apix)/apix
new_tomo_name = "{}/{}".format(out_folder,os.path.basename(tomo_name))
with mrcfile.open(tomo_name) as mrc:
data = mrc.data
print("scaling: {}".format(tomo_name))
new_data = zoom(data, zoom_factor,order=3, prefilter=False)
#new_data = rescale(data, zoom_factor,order=3, anti_aliasing = True)
#new_data = new_data.astype(np.float32)
with mrcfile.new(new_tomo_name,overwrite=True) as mrc:
mrc.set_data(new_data)
mrc.voxel_size = apix
item.rlnPixelSize = apix
print(new_tomo_name)
item.rlnMicrographName = new_tomo_name
print(item.rlnMicrographName)
md.write(os.path.splitext(star_file)[0] + "_resized.star")
print("scale_finished")
def check(self):
from IsoNet.bin.predict import predict
from IsoNet.bin.refine import run
import skimage
import PyQt5
import tqdm
print('IsoNet --version 0.2 installed')
def gui(self):
"""
\nGraphic User Interface\n
"""
import IsoNet.gui.Isonet_star_app as app
app.main()
def Display(lines, out):
text = "\n".join(lines) + "\n"
out.write(text)
def pool_process(p_func,chunks_list,ncpu):
from multiprocessing import Pool
with Pool(ncpu,maxtasksperchild=1000) as p:
# results = p.map(partial_func,chunks_gpu_num_list,chunksize=1)
results = list(p.map(p_func,chunks_list))
# return results
if __name__ == "__main__":
core.Display = Display
# logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',datefmt="%m-%d %H:%M:%S",level=logging.INFO)
if len(sys.argv) > 1:
check_parse(sys.argv[1:])
fire.Fire(ISONET)
| IsoNet-cryoET/IsoNet | bin/isonet.py | isonet.py | py | 26,881 | python | en | code | 49 | github-code | 36 | [
{
"api_name": "IsoNet.util.metadata.MetaData",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "IsoNet.util.metadata.Item",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "Is... |
34181491873 | import json
import logging
import traceback
import warnings
from datetime import datetime
from collections import OrderedDict
from typing import Dict, Callable, Optional, Union, List, Any, Type, Sequence
from qiskit.providers.backend import BackendV1 as Backend
from qiskit.providers.provider import ProviderV1 as Provider
from qiskit.providers.exceptions import QiskitBackendNotFoundError
from qiskit.providers.providerutils import filter_backends
from qiskit.providers.models import (
PulseBackendConfiguration,
QasmBackendConfiguration,
)
from qiskit_ibm_provider.proxies import ProxyConfiguration
from qiskit_ibm_provider.utils.hgp import to_instance_format, from_instance_format
from qiskit_ibm_provider.utils.backend_decoder import configuration_from_server_data
from qiskit_ibm_runtime import ibm_backend
from .utils.utils import validate_job_tags
from .accounts import AccountManager, Account, ChannelType
from .api.clients import AuthClient, VersionClient
from .api.clients.runtime import RuntimeClient
from .api.exceptions import RequestsApiError
from .constants import QISKIT_IBM_RUNTIME_API_URL
from .exceptions import IBMNotAuthorizedError, IBMInputValueError, IBMAccountError
from .exceptions import (
IBMRuntimeError,
RuntimeProgramNotFound,
RuntimeJobNotFound,
)
from .hub_group_project import HubGroupProject # pylint: disable=cyclic-import
from .utils.result_decoder import ResultDecoder
from .runtime_job import RuntimeJob
from .utils import RuntimeDecoder, to_python_identifier
from .api.client_parameters import ClientParameters
from .runtime_options import RuntimeOptions
from .ibm_backend import IBMBackend
logger = logging.getLogger(__name__)
SERVICE_NAME = "runtime"
class QiskitRuntimeService(Provider):
"""Class for interacting with the Qiskit Runtime service.
Qiskit Runtime is a new architecture offered by IBM Quantum that
streamlines computations requiring many iterations. These experiments will
execute significantly faster within its improved hybrid quantum/classical
process.
A sample workflow of using the runtime service::
from qiskit_ibm_runtime import QiskitRuntimeService, Session, Sampler, Estimator, Options
from qiskit.test.reference_circuits import ReferenceCircuits
from qiskit.circuit.library import RealAmplitudes
from qiskit.quantum_info import SparsePauliOp
# Initialize account.
service = QiskitRuntimeService()
# Set options, which can be overwritten at job level.
options = Options(optimization_level=1)
# Prepare inputs.
bell = ReferenceCircuits.bell()
psi = RealAmplitudes(num_qubits=2, reps=2)
H1 = SparsePauliOp.from_list([("II", 1), ("IZ", 2), ("XI", 3)])
theta = [0, 1, 1, 2, 3, 5]
with Session(service=service, backend="ibmq_qasm_simulator") as session:
# Submit a request to the Sampler primitive within the session.
sampler = Sampler(session=session, options=options)
job = sampler.run(circuits=bell)
print(f"Sampler results: {job.result()}")
# Submit a request to the Estimator primitive within the session.
estimator = Estimator(session=session, options=options)
job = estimator.run(
circuits=[psi], observables=[H1], parameter_values=[theta]
)
print(f"Estimator results: {job.result()}")
The example above uses the dedicated :class:`~qiskit_ibm_runtime.Sampler`
and :class:`~qiskit_ibm_runtime.Estimator` classes. You can also
use the :meth:`run` method directly to invoke a Qiskit Runtime program.
If the program has any interim results, you can use the ``callback``
parameter of the :meth:`run` method to stream the interim results.
Alternatively, you can use the :meth:`RuntimeJob.stream_results` method to stream
the results at a later time, but before the job finishes.
The :meth:`run` method returns a
:class:`RuntimeJob` object. You can use its
methods to perform tasks like checking job status, getting job result, and
canceling job.
"""
global_service = None
def __init__(
self,
channel: Optional[ChannelType] = None,
token: Optional[str] = None,
url: Optional[str] = None,
filename: Optional[str] = None,
name: Optional[str] = None,
instance: Optional[str] = None,
proxies: Optional[dict] = None,
verify: Optional[bool] = None,
channel_strategy: Optional[str] = None,
) -> None:
"""QiskitRuntimeService constructor
An account is selected in the following order:
- Account with the input `name`, if specified.
- Default account for the `channel` type, if `channel` is specified but `token` is not.
- Account defined by the input `channel` and `token`, if specified.
- Account defined by the `default_channel` if defined in filename
- Account defined by the environment variables, if defined.
- Default account for the ``ibm_cloud`` account, if one is available.
- Default account for the ``ibm_quantum`` account, if one is available.
`instance`, `proxies`, and `verify` can be used to overwrite corresponding
values in the loaded account.
Args:
channel: Channel type. ``ibm_cloud`` or ``ibm_quantum``.
token: IBM Cloud API key or IBM Quantum API token.
url: The API URL.
Defaults to https://cloud.ibm.com (ibm_cloud) or
https://auth.quantum-computing.ibm.com/api (ibm_quantum).
filename: Full path of the file where the account is created.
Default: _DEFAULT_ACCOUNT_CONFIG_JSON_FILE
name: Name of the account to load.
instance: The service instance to use.
For ``ibm_cloud`` runtime, this is the Cloud Resource Name (CRN) or the service name.
For ``ibm_quantum`` runtime, this is the hub/group/project in that format.
proxies: Proxy configuration. Supported optional keys are
``urls`` (a dictionary mapping protocol or protocol and host to the URL of the proxy,
documented at https://docs.python-requests.org/en/latest/api/#requests.Session.proxies),
``username_ntlm``, ``password_ntlm`` (username and password to enable NTLM user
authentication)
verify: Whether to verify the server's TLS certificate.
channel_strategy: Error mitigation strategy.
Returns:
An instance of QiskitRuntimeService.
Raises:
IBMInputValueError: If an input is invalid.
"""
super().__init__()
self._account = self._discover_account(
token=token,
url=url,
instance=instance,
channel=channel,
filename=filename,
name=name,
proxies=ProxyConfiguration(**proxies) if proxies else None,
verify=verify,
channel_strategy=channel_strategy,
)
self._client_params = ClientParameters(
channel=self._account.channel,
token=self._account.token,
url=self._account.url,
instance=self._account.instance,
proxies=self._account.proxies,
verify=self._account.verify,
)
self._channel_strategy = channel_strategy or self._account.channel_strategy
self._channel = self._account.channel
self._backends: Dict[str, "ibm_backend.IBMBackend"] = {}
self._backend_configs: Dict[str, Any] = {}
if self._channel == "ibm_cloud":
self._api_client = RuntimeClient(self._client_params)
# TODO: We can make the backend discovery lazy
self._backends = self._discover_cloud_backends()
QiskitRuntimeService.global_service = self
self._validate_channel_strategy()
return
else:
auth_client = self._authenticate_ibm_quantum_account(self._client_params)
# Update client parameters to use authenticated values.
self._client_params.url = auth_client.current_service_urls()["services"]["runtime"]
if self._client_params.url == "https://api.de.quantum-computing.ibm.com/runtime":
warnings.warn(
"Features in versions of qiskit-ibm-runtime greater than and including "
"0.13.0 may not be supported in this environment"
)
self._client_params.token = auth_client.current_access_token()
self._api_client = RuntimeClient(self._client_params)
self._hgps = self._initialize_hgps(auth_client)
for hgp in self._hgps.values():
for backend_name in hgp.backends:
if backend_name not in self._backends:
self._backends[backend_name] = None
self._current_instance = self._account.instance
if not self._current_instance:
self._current_instance = self._get_hgp().name
logger.info("Default instance: %s", self._current_instance)
QiskitRuntimeService.global_service = self
# TODO - it'd be nice to allow some kind of autocomplete, but `service.ibmq_foo`
# just seems wrong since backends are not runtime service instances.
# self._discover_backends()
def _discover_account(
self,
token: Optional[str] = None,
url: Optional[str] = None,
instance: Optional[str] = None,
channel: Optional[ChannelType] = None,
filename: Optional[str] = None,
name: Optional[str] = None,
proxies: Optional[ProxyConfiguration] = None,
verify: Optional[bool] = None,
channel_strategy: Optional[str] = None,
) -> Account:
"""Discover account."""
account = None
verify_ = verify or True
if channel_strategy:
if channel_strategy not in ["q-ctrl", "default"]:
raise ValueError(f"{channel_strategy} is not a valid channel strategy.")
if channel and channel != "ibm_cloud":
raise ValueError(
f"The channel strategy {channel_strategy} is "
"only supported on the ibm_cloud channel."
)
if name:
if filename:
if any([channel, token, url]):
logger.warning(
"Loading account from file %s with name %s. Any input "
"'channel', 'token' or 'url' are ignored.",
filename,
name,
)
else:
if any([channel, token, url]):
logger.warning(
"Loading account with name %s. Any input "
"'channel', 'token' or 'url' are ignored.",
name,
)
account = AccountManager.get(filename=filename, name=name)
elif channel:
if channel and channel not in ["ibm_cloud", "ibm_quantum"]:
raise ValueError("'channel' can only be 'ibm_cloud' or 'ibm_quantum'")
if token:
account = Account.create_account(
channel=channel,
token=token,
url=url,
instance=instance,
proxies=proxies,
verify=verify_,
channel_strategy=channel_strategy,
)
else:
if url:
logger.warning("Loading default %s account. Input 'url' is ignored.", channel)
account = AccountManager.get(filename=filename, name=name, channel=channel)
elif any([token, url]):
# Let's not infer based on these attributes as they may change in the future.
raise ValueError(
"'channel' is required if 'token', or 'url' is specified but 'name' is not."
)
# channel is not defined yet, get it from the AccountManager
if account is None:
account = AccountManager.get(filename=filename)
if instance:
account.instance = instance
if proxies:
account.proxies = proxies
if verify is not None:
account.verify = verify
# resolve CRN if needed
self._resolve_crn(account)
# ensure account is valid, fail early if not
account.validate()
return account
def _validate_channel_strategy(self) -> None:
"""Raise an error if the passed in channel_strategy and
instance do not match.
"""
qctrl_enabled = self._api_client.is_qctrl_enabled()
if self._channel_strategy == "q-ctrl":
if not qctrl_enabled:
raise IBMNotAuthorizedError(
"The instance passed in is not compatible with Q-CTRL channel strategy. "
"Please switch to or create an instance with the Q-CTRL strategy enabled. "
"See https://cloud.ibm.com/docs/quantum-computing?"
"topic=quantum-computing-get-started for more information"
)
else:
if qctrl_enabled:
raise IBMNotAuthorizedError(
"The instance passed in is only compatible with Q-CTRL performance "
"management strategy. "
"To use this instance, set channel_strategy='q-ctrl'."
)
def _discover_cloud_backends(self) -> Dict[str, "ibm_backend.IBMBackend"]:
"""Return the remote backends available for this service instance.
Returns:
A dict of the remote backend instances, keyed by backend name.
"""
ret = OrderedDict() # type: ignore[var-annotated]
backends_list = self._api_client.list_backends(channel_strategy=self._channel_strategy)
for backend_name in backends_list:
raw_config = self._api_client.backend_configuration(backend_name=backend_name)
config = configuration_from_server_data(
raw_config=raw_config, instance=self._account.instance
)
if not config:
continue
ret[config.backend_name] = ibm_backend.IBMBackend(
configuration=config,
service=self,
api_client=self._api_client,
)
return ret
def _resolve_crn(self, account: Account) -> None:
account.resolve_crn()
def _authenticate_ibm_quantum_account(self, client_params: ClientParameters) -> AuthClient:
"""Authenticate against IBM Quantum and populate the hub/group/projects.
Args:
client_params: Parameters used for server connection.
Raises:
IBMInputValueError: If the URL specified is not a valid IBM Quantum authentication URL.
IBMNotAuthorizedError: If the account is not authorized to use runtime.
Returns:
Authentication client.
"""
version_info = self._check_api_version(client_params)
# Check the URL is a valid authentication URL.
if not version_info["new_api"] or "api-auth" not in version_info:
raise IBMInputValueError(
"The URL specified ({}) is not an IBM Quantum authentication URL. "
"Valid authentication URL: {}.".format(
client_params.url, QISKIT_IBM_RUNTIME_API_URL
)
)
auth_client = AuthClient(client_params)
service_urls = auth_client.current_service_urls()
if not service_urls.get("services", {}).get(SERVICE_NAME):
raise IBMNotAuthorizedError(
"This account is not authorized to use ``ibm_quantum`` runtime service."
)
return auth_client
def _initialize_hgps(
self,
auth_client: AuthClient,
) -> Dict:
"""Authenticate against IBM Quantum and populate the hub/group/projects.
Args:
auth_client: Authentication data.
Raises:
IBMInputValueError: If the URL specified is not a valid IBM Quantum authentication URL.
IBMAccountError: If no hub/group/project could be found for this account.
IBMInputValueError: If instance parameter is not found in hgps.
Returns:
The hub/group/projects for this account.
"""
# pylint: disable=unsubscriptable-object
hgps: OrderedDict[str, HubGroupProject] = OrderedDict()
service_urls = auth_client.current_service_urls()
user_hubs = auth_client.user_hubs()
for hub_info in user_hubs:
# Build credentials.
hgp_params = ClientParameters(
channel=self._account.channel,
token=auth_client.current_access_token(),
url=service_urls["services"]["runtime"],
instance=to_instance_format(
hub_info["hub"], hub_info["group"], hub_info["project"]
),
proxies=self._account.proxies,
verify=self._account.verify,
)
# Build the hgp.
try:
hgp = HubGroupProject(
client_params=hgp_params, instance=hgp_params.instance, service=self
)
hgps[hgp.name] = hgp
except Exception: # pylint: disable=broad-except
# Catch-all for errors instantiating the hgp.
logger.warning(
"Unable to instantiate hub/group/project for %s: %s",
hub_info,
traceback.format_exc(),
)
if not hgps:
raise IBMAccountError(
"No hub/group/project that supports Qiskit Runtime could "
"be found for this account."
)
# Move open hgp to end of the list
if len(hgps) > 1:
open_key, open_val = hgps.popitem(last=False)
hgps[open_key] = open_val
default_hgp = self._account.instance
if default_hgp:
if default_hgp in hgps:
# Move user selected hgp to front of the list
hgps.move_to_end(default_hgp, last=False)
else:
raise IBMInputValueError(
f"Hub/group/project {default_hgp} could not be found for this account."
)
return hgps
@staticmethod
def _check_api_version(params: ClientParameters) -> Dict[str, Union[bool, str]]:
"""Check the version of the remote server in a set of client parameters.
Args:
params: Parameters used for server connection.
Returns:
A dictionary with version information.
"""
version_finder = VersionClient(url=params.url, **params.connection_parameters())
return version_finder.version()
def _get_hgp(
self,
instance: Optional[str] = None,
backend_name: Optional[Any] = None,
) -> HubGroupProject:
"""Return an instance of `HubGroupProject`.
This function also allows to find the `HubGroupProject` that contains a backend
`backend_name`.
Args:
instance: The hub/group/project to use.
backend_name: Name of the IBM Quantum backend.
Returns:
An instance of `HubGroupProject` that matches the specified criteria or the default.
Raises:
IBMInputValueError: If no hub/group/project matches the specified criteria,
or if the input value is in an incorrect format.
QiskitBackendNotFoundError: If backend cannot be found.
"""
if instance:
_ = from_instance_format(instance) # Verify format
if instance not in self._hgps:
raise IBMInputValueError(
f"Hub/group/project {instance} " "could not be found for this account."
)
if backend_name and not self._hgps[instance].has_backend(backend_name):
raise QiskitBackendNotFoundError(
f"Backend {backend_name} cannot be found in " f"hub/group/project {instance}"
)
return self._hgps[instance]
if not backend_name:
return list(self._hgps.values())[0]
for hgp in self._hgps.values():
if hgp.has_backend(backend_name):
return hgp
error_message = (
f"Backend {backend_name} cannot be found in any " f"hub/group/project for this account."
)
if not isinstance(backend_name, str):
error_message += (
f" {backend_name} is of type {type(backend_name)} but should "
f"instead be initialized through the {self}."
)
raise QiskitBackendNotFoundError(error_message)
def _discover_backends(self) -> None:
"""Discovers the remote backends for this account, if not already known."""
for backend in self._backends.values():
backend_name = to_python_identifier(backend.name)
# Append _ if duplicate
while backend_name in self.__dict__:
backend_name += "_"
setattr(self, backend_name, backend)
# pylint: disable=arguments-differ
def backends(
self,
name: Optional[str] = None,
min_num_qubits: Optional[int] = None,
instance: Optional[str] = None,
filters: Optional[Callable[[List["ibm_backend.IBMBackend"]], bool]] = None,
**kwargs: Any,
) -> List["ibm_backend.IBMBackend"]:
"""Return all backends accessible via this account, subject to optional filtering.
Args:
name: Backend name to filter by.
min_num_qubits: Minimum number of qubits the backend has to have.
instance: This is only supported for ``ibm_quantum`` runtime and is in the
hub/group/project format.
filters: More complex filters, such as lambda functions.
For example::
QiskitRuntimeService.backends(
filters=lambda b: b.max_shots > 50000)
QiskitRuntimeService.backends(
filters=lambda x: ("rz" in x.basis_gates )
**kwargs: Simple filters that require a specific value for an attribute in
backend configuration or status.
Examples::
# Get the operational real backends
QiskitRuntimeService.backends(simulator=False, operational=True)
# Get the backends with at least 127 qubits
QiskitRuntimeService.backends(min_num_qubits=127)
# Get the backends that support OpenPulse
QiskitRuntimeService.backends(open_pulse=True)
For the full list of backend attributes, see the `IBMBackend` class documentation
<https://docs.quantum.ibm.com/api/qiskit/providers_models>
Returns:
The list of available backends that match the filter.
Raises:
IBMInputValueError: If an input is invalid.
QiskitBackendNotFoundError: If the backend is not in any instance.
"""
# TODO filter out input_allowed not having runtime
backends: List[IBMBackend] = []
instance_filter = instance if instance else self._account.instance
if self._channel == "ibm_quantum":
if name:
if name not in self._backends:
raise QiskitBackendNotFoundError("No backend matches the criteria.")
if not self._backends[name] or instance != self._backends[name]._instance:
self._set_backend_config(name)
self._backends[name] = self._create_backend_obj(
self._backend_configs[name],
instance,
)
if self._backends[name]:
backends.append(self._backends[name])
elif instance_filter:
hgp = self._get_hgp(instance=instance_filter)
for backend_name in hgp.backends:
if (
not self._backends[backend_name]
or instance_filter != self._backends[backend_name]._instance
):
self._set_backend_config(backend_name, instance_filter)
self._backends[backend_name] = self._create_backend_obj(
self._backend_configs[backend_name], instance_filter
)
if self._backends[backend_name]:
backends.append(self._backends[backend_name])
else:
for backend_name, backend_config in self._backends.items():
if not backend_config:
self._set_backend_config(backend_name)
self._backends[backend_name] = self._create_backend_obj(
self._backend_configs[backend_name]
)
if self._backends[backend_name]:
backends.append(self._backends[backend_name])
else:
if instance:
raise IBMInputValueError(
"The 'instance' keyword is only supported for ``ibm_quantum`` runtime."
)
backends = list(self._backends.values())
if name:
kwargs["backend_name"] = name
if min_num_qubits:
backends = list(
filter(lambda b: b.configuration().n_qubits >= min_num_qubits, backends)
)
return filter_backends(backends, filters=filters, **kwargs)
def _set_backend_config(self, backend_name: str, instance: Optional[str] = None) -> None:
"""Retrieve backend configuration and add to backend_configs.
Args:
backend_name: backend name that will be returned.
instance: the current h/g/p.
"""
if backend_name not in self._backend_configs:
raw_config = self._api_client.backend_configuration(backend_name)
config = configuration_from_server_data(raw_config=raw_config, instance=instance)
self._backend_configs[backend_name] = config
def _create_backend_obj(
self,
config: Union[QasmBackendConfiguration, PulseBackendConfiguration],
instance: Optional[str] = None,
) -> IBMBackend:
"""Given a backend configuration return the backend object.
Args:
config: backend configuration.
instance: the current h/g/p.
Returns:
A backend object.
Raises:
QiskitBackendNotFoundError: if the backend is not in the hgp passed in.
"""
if config:
if not instance:
for hgp in list(self._hgps.values()):
if config.backend_name in hgp.backends:
instance = to_instance_format(hgp._hub, hgp._group, hgp._project)
break
elif config.backend_name not in self._get_hgp(instance=instance).backends:
raise QiskitBackendNotFoundError(
f"Backend {config.backend_name} is not in "
f"{instance}: please try a different hub/group/project."
)
return ibm_backend.IBMBackend(
instance=instance,
configuration=config,
service=self,
api_client=self._api_client,
)
return None
def active_account(self) -> Optional[Dict[str, str]]:
"""Return the IBM Quantum account currently in use for the session.
Returns:
A dictionary with information about the account currently in the session.
"""
return self._account.to_saved_format()
@staticmethod
def delete_account(
filename: Optional[str] = None,
name: Optional[str] = None,
channel: Optional[ChannelType] = None,
) -> bool:
"""Delete a saved account from disk.
Args:
filename: Name of file from which to delete the account.
name: Name of the saved account to delete.
channel: Channel type of the default account to delete.
Ignored if account name is provided.
Returns:
True if the account was deleted.
False if no account was found.
"""
return AccountManager.delete(filename=filename, name=name, channel=channel)
@staticmethod
def save_account(
token: Optional[str] = None,
url: Optional[str] = None,
instance: Optional[str] = None,
channel: Optional[ChannelType] = None,
filename: Optional[str] = None,
name: Optional[str] = None,
proxies: Optional[dict] = None,
verify: Optional[bool] = None,
overwrite: Optional[bool] = False,
channel_strategy: Optional[str] = None,
set_as_default: Optional[bool] = None,
) -> None:
"""Save the account to disk for future use.
Args:
token: IBM Cloud API key or IBM Quantum API token.
url: The API URL.
Defaults to https://cloud.ibm.com (ibm_cloud) or
https://auth.quantum-computing.ibm.com/api (ibm_quantum).
instance: The CRN (ibm_cloud) or hub/group/project (ibm_quantum).
channel: Channel type. `ibm_cloud` or `ibm_quantum`.
filename: Full path of the file where the account is saved.
name: Name of the account to save.
proxies: Proxy configuration. Supported optional keys are
``urls`` (a dictionary mapping protocol or protocol and host to the URL of the proxy,
documented at https://docs.python-requests.org/en/latest/api/#requests.Session.proxies),
``username_ntlm``, ``password_ntlm`` (username and password to enable NTLM user
authentication)
verify: Verify the server's TLS certificate.
overwrite: ``True`` if the existing account is to be overwritten.
channel_strategy: Error mitigation strategy.
set_as_default: If ``True``, the account is saved in filename,
as the default account.
"""
AccountManager.save(
token=token,
url=url,
instance=instance,
channel=channel,
filename=filename,
name=name,
proxies=ProxyConfiguration(**proxies) if proxies else None,
verify=verify,
overwrite=overwrite,
channel_strategy=channel_strategy,
set_as_default=set_as_default,
)
@staticmethod
def saved_accounts(
default: Optional[bool] = None,
channel: Optional[ChannelType] = None,
filename: Optional[str] = None,
name: Optional[str] = None,
) -> dict:
"""List the accounts saved on disk.
Args:
default: If set to True, only default accounts are returned.
channel: Channel type. `ibm_cloud` or `ibm_quantum`.
filename: Name of file whose accounts are returned.
name: If set, only accounts with the given name are returned.
Returns:
A dictionary with information about the accounts saved on disk.
Raises:
ValueError: If an invalid account is found on disk.
"""
return dict(
map(
lambda kv: (kv[0], Account.to_saved_format(kv[1])),
AccountManager.list(
default=default, channel=channel, filename=filename, name=name
).items(),
),
)
def backend(
self,
name: str = None,
instance: Optional[str] = None,
) -> Backend:
"""Return a single backend matching the specified filtering.
Args:
name: Name of the backend.
instance: This is only supported for ``ibm_quantum`` runtime and is in the
hub/group/project format. If an instance is not given, among the providers
with access to the backend, a premium provider will be prioritized.
For users without access to a premium provider, the default open provider will be used.
Returns:
Backend: A backend matching the filtering.
Raises:
QiskitBackendNotFoundError: if no backend could be found.
"""
# pylint: disable=arguments-differ, line-too-long
backends = self.backends(name, instance=instance)
if not backends:
cloud_msg_url = ""
if self._channel == "ibm_cloud":
cloud_msg_url = (
" Learn more about available backends here "
"https://cloud.ibm.com/docs/quantum-computing?topic=quantum-computing-choose-backend "
)
raise QiskitBackendNotFoundError("No backend matches the criteria." + cloud_msg_url)
return backends[0]
def get_backend(self, name: str = None, **kwargs: Any) -> Backend:
return self.backend(name, **kwargs)
def run(
self,
program_id: str,
inputs: Dict,
options: Optional[Union[RuntimeOptions, Dict]] = None,
callback: Optional[Callable] = None,
result_decoder: Optional[Union[Type[ResultDecoder], Sequence[Type[ResultDecoder]]]] = None,
session_id: Optional[str] = None,
start_session: Optional[bool] = False,
) -> RuntimeJob:
"""Execute the runtime program.
Args:
program_id: Program ID.
inputs: Program input parameters. These input values are passed
to the runtime program.
options: Runtime options that control the execution environment.
See :class:`RuntimeOptions` for all available options.
callback: Callback function to be invoked for any interim results and final result.
The callback function will receive 2 positional parameters:
1. Job ID
2. Job result.
result_decoder: A :class:`ResultDecoder` subclass used to decode job results.
If more than one decoder is specified, the first is used for interim results and
the second final results. If not specified, a program-specific decoder or the default
``ResultDecoder`` is used.
session_id: Job ID of the first job in a runtime session.
start_session: Set to True to explicitly start a runtime session. Defaults to False.
Returns:
A ``RuntimeJob`` instance representing the execution.
Raises:
IBMInputValueError: If input is invalid.
RuntimeProgramNotFound: If the program cannot be found.
IBMRuntimeError: An error occurred running the program.
"""
qrt_options: RuntimeOptions = options
if options is None:
qrt_options = RuntimeOptions()
elif isinstance(options, Dict):
qrt_options = RuntimeOptions(**options)
qrt_options.validate(channel=self.channel)
hgp_name = None
if self._channel == "ibm_quantum":
# Find the right hgp
hgp = self._get_hgp(instance=qrt_options.instance, backend_name=qrt_options.backend)
hgp_name = hgp.name
if hgp_name != self._current_instance:
self._current_instance = hgp_name
logger.info("Instance selected: %s", self._current_instance)
backend = self.backend(name=qrt_options.backend, instance=hgp_name)
status = backend.status()
if status.operational is True and status.status_msg != "active":
warnings.warn(
f"The backend {backend.name} currently has a status of {status.status_msg}."
)
try:
response = self._api_client.program_run(
program_id=program_id,
backend_name=qrt_options.backend,
params=inputs,
image=qrt_options.image,
hgp=hgp_name,
log_level=qrt_options.log_level,
session_id=session_id,
job_tags=qrt_options.job_tags,
max_execution_time=qrt_options.max_execution_time,
start_session=start_session,
session_time=qrt_options.session_time,
channel_strategy=None
if self._channel_strategy == "default"
else self._channel_strategy,
)
if self._channel == "ibm_quantum":
messages = response.get("messages")
if messages:
warning_message = messages[0].get("data")
warnings.warn(warning_message)
except RequestsApiError as ex:
if ex.status_code == 404:
raise RuntimeProgramNotFound(f"Program not found: {ex.message}") from None
raise IBMRuntimeError(f"Failed to run program: {ex}") from None
backend = (
self.backend(name=response["backend"], instance=hgp_name)
if response["backend"]
else qrt_options.backend
)
job = RuntimeJob(
backend=backend,
api_client=self._api_client,
client_params=self._client_params,
job_id=response["id"],
program_id=program_id,
user_callback=callback,
result_decoder=result_decoder,
image=qrt_options.image,
service=self,
)
return job
def job(self, job_id: str) -> RuntimeJob:
"""Retrieve a runtime job.
Args:
job_id: Job ID.
Returns:
Runtime job retrieved.
Raises:
RuntimeJobNotFound: If the job doesn't exist.
IBMRuntimeError: If the request failed.
"""
try:
response = self._api_client.job_get(job_id, exclude_params=True)
except RequestsApiError as ex:
if ex.status_code == 404:
raise RuntimeJobNotFound(f"Job not found: {ex.message}") from None
raise IBMRuntimeError(f"Failed to delete job: {ex}") from None
return self._decode_job(response)
def jobs(
self,
limit: Optional[int] = 10,
skip: int = 0,
backend_name: Optional[str] = None,
pending: bool = None,
program_id: str = None,
instance: Optional[str] = None,
job_tags: Optional[List[str]] = None,
session_id: Optional[str] = None,
created_after: Optional[datetime] = None,
created_before: Optional[datetime] = None,
descending: bool = True,
) -> List[RuntimeJob]:
"""Retrieve all runtime jobs, subject to optional filtering.
Args:
limit: Number of jobs to retrieve. ``None`` means no limit.
skip: Starting index for the job retrieval.
backend_name: Name of the backend to retrieve jobs from.
pending: Filter by job pending state. If ``True``, 'QUEUED' and 'RUNNING'
jobs are included. If ``False``, 'DONE', 'CANCELLED' and 'ERROR' jobs
are included.
program_id: Filter by Program ID.
instance: This is only supported for ``ibm_quantum`` runtime and is in the
hub/group/project format.
job_tags: Filter by tags assigned to jobs. Matched jobs are associated with all tags.
session_id: Job ID of the first job in a runtime session.
created_after: Filter by the given start date, in local time. This is used to
find jobs whose creation dates are after (greater than or equal to) this
local date/time.
created_before: Filter by the given end date, in local time. This is used to
find jobs whose creation dates are before (less than or equal to) this
local date/time.
descending: If ``True``, return the jobs in descending order of the job
creation date (i.e. newest first) until the limit is reached.
Returns:
A list of runtime jobs.
Raises:
IBMInputValueError: If an input value is invalid.
"""
hub = group = project = None
if instance:
if self._channel == "ibm_cloud":
raise IBMInputValueError(
"The 'instance' keyword is only supported for ``ibm_quantum`` runtime."
)
hub, group, project = from_instance_format(instance)
if job_tags:
validate_job_tags(job_tags)
job_responses = [] # type: List[Dict[str, Any]]
current_page_limit = limit or 20
offset = skip
while True:
jobs_response = self._api_client.jobs_get(
limit=current_page_limit,
skip=offset,
backend_name=backend_name,
pending=pending,
program_id=program_id,
hub=hub,
group=group,
project=project,
job_tags=job_tags,
session_id=session_id,
created_after=created_after,
created_before=created_before,
descending=descending,
)
job_page = jobs_response["jobs"]
# count is the total number of jobs that would be returned if
# there was no limit or skip
count = jobs_response["count"]
job_responses += job_page
if len(job_responses) == count - skip:
# Stop if there are no more jobs returned by the server.
break
if limit:
if len(job_responses) >= limit:
# Stop if we have reached the limit.
break
current_page_limit = limit - len(job_responses)
else:
current_page_limit = 20
offset += len(job_page)
return [self._decode_job(job) for job in job_responses]
def delete_job(self, job_id: str) -> None:
"""Delete a runtime job.
Note that this operation cannot be reversed.
Args:
job_id: ID of the job to delete.
Raises:
RuntimeJobNotFound: If the job doesn't exist.
IBMRuntimeError: If the request failed.
"""
try:
self._api_client.job_delete(job_id)
except RequestsApiError as ex:
if ex.status_code == 404:
raise RuntimeJobNotFound(f"Job not found: {ex.message}") from None
raise IBMRuntimeError(f"Failed to delete job: {ex}") from None
def _decode_job(self, raw_data: Dict) -> RuntimeJob:
"""Decode job data received from the server.
Args:
raw_data: Raw job data received from the server.
Returns:
Decoded job data.
"""
instance = None
if self._channel == "ibm_quantum":
hub = raw_data.get("hub")
group = raw_data.get("group")
project = raw_data.get("project")
if all([hub, group, project]):
instance = to_instance_format(hub, group, project)
# Try to find the right backend
try:
if "backend" in raw_data:
backend = self.backend(raw_data["backend"], instance=instance)
else:
backend = None
except QiskitBackendNotFoundError:
backend = ibm_backend.IBMRetiredBackend.from_name(
backend_name=raw_data["backend"],
api=None,
)
params = raw_data.get("params", {})
if isinstance(params, list):
if len(params) > 0:
params = params[0]
else:
params = {}
if not isinstance(params, str):
params = json.dumps(params)
decoded = json.loads(params, cls=RuntimeDecoder)
return RuntimeJob(
backend=backend,
api_client=self._api_client,
client_params=self._client_params,
service=self,
job_id=raw_data["id"],
program_id=raw_data.get("program", {}).get("id", ""),
params=decoded,
creation_date=raw_data.get("created", None),
session_id=raw_data.get("session_id"),
tags=raw_data.get("tags"),
)
def least_busy(
self,
min_num_qubits: Optional[int] = None,
instance: Optional[str] = None,
filters: Optional[Callable[[List["ibm_backend.IBMBackend"]], bool]] = None,
**kwargs: Any,
) -> ibm_backend.IBMBackend:
"""Return the least busy available backend.
Args:
min_num_qubits: Minimum number of qubits the backend has to have.
instance: This is only supported for ``ibm_quantum`` runtime and is in the
hub/group/project format.
filters: Filters can be defined as for the :meth:`backends` method.
An example to get the operational backends with 5 qubits::
QiskitRuntimeService.least_busy(n_qubits=5, operational=True)
Returns:
The backend with the fewest number of pending jobs.
Raises:
QiskitBackendNotFoundError: If no backend matches the criteria.
"""
backends = self.backends(
min_num_qubits=min_num_qubits, instance=instance, filters=filters, **kwargs
)
candidates = []
for back in backends:
backend_status = back.status()
if not backend_status.operational or backend_status.status_msg != "active":
continue
candidates.append(back)
if not candidates:
raise QiskitBackendNotFoundError("No backend matches the criteria.")
return min(candidates, key=lambda b: b.status().pending_jobs)
def instances(self) -> List[str]:
"""Return the IBM Quantum instances list currently in use for the session.
Returns:
A list with instances currently in the session.
"""
if self._channel == "ibm_quantum":
return list(self._hgps.keys())
return []
@property
def channel(self) -> str:
"""Return the channel type used.
Returns:
The channel type used.
"""
return self._channel
@property
def runtime(self): # type:ignore
"""Return self for compatibility with IBMQ provider.
Returns:
self
"""
return self
def __repr__(self) -> str:
return "<{}>".format(self.__class__.__name__)
| Qiskit/qiskit-ibm-runtime | qiskit_ibm_runtime/qiskit_runtime_service.py | qiskit_runtime_service.py | py | 47,557 | python | en | code | 106 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "qiskit.providers.provider.ProviderV1",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 107,
"usage_type": "name"
},
{
"api_name"... |
42425916848 | import os
from dotenv import load_dotenv
DEFAULT_GUNICORN_WORKERS = 4
DEFAULT_CONFIG_PATH = ".env"
ACCESS_TOKEN_EXPIRE_MINUTES = 30 # 30 minutes
REFRESH_TOKEN_EXPIRE_MINUTES = 60 * 24 * 7 # 7 days
ALGORITHM = "HS256"
load_dotenv(DEFAULT_CONFIG_PATH)
JWT_SECRET_KEY = os.environ["JWT_SECRET_KEY"]
JWT_REFRESH_SECRET_KEY = os.environ["JWT_REFRESH_SECRET_KEY"]
| IslomK/family_budget | family_budget/core/const.py | const.py | py | 364 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
}
] |
12633123289 | #!/usr/bin/env python
# coding: utf-8
# In[3]:
# load libraries
import numpy as np
import scipy.sparse as sp
import cplex as cp
# In[4]:
def mixed_integer_linear_programming(direction, A, senses, b, c, l, u, types):
# create an empty optimization problem
prob = cp.Cplex()
# add decision variables to the problem including their coefficients in objective and ranges
prob.variables.add(obj = c.tolist(), lb = l.tolist(), ub = u.tolist(), types = types.tolist())
# define problem type
if direction == "maximize":
prob.objective.set_sense(prob.objective.sense.maximize)
else:
prob.objective.set_sense(prob.objective.sense.minimize)
# add constraints to the problem including their directions and right-hand side values
prob.linear_constraints.add(senses = senses.tolist(), rhs = b.tolist())
# add coefficients for each constraint
row_indices, col_indices = A.nonzero()
prob.linear_constraints.set_coefficients(zip(row_indices.tolist(), col_indices.tolist(), A.data.tolist()))
# solve the problem
print(prob.write_as_string())
prob.solve()
# check the solution status
print(prob.solution.get_status())
print(prob.solution.status[prob.solution.get_status()])
# get the solution
x_star = prob.solution.get_values()
obj_star = prob.solution.get_objective_value()
return(x_star, obj_star)
# In[7]:
def coin_distribution_problem(coins_file, M):
coins = np.loadtxt(coins_file)
N = coins.shape[0] # number of coins
# number of decision variables = number of coins * number of children
E = M * N
# number of constraints = number of coins + number of children
V = M + N
# money per child = total money // number of chilren
P = np.sum(coins) / M
print(P)
c = np.repeat(1, E)
b = np.concatenate((np.repeat(P, M), np.repeat(1, N)))
l = np.repeat(0, E)
u = np.repeat(1, E)
senses = np.repeat("E", V)
types = np.repeat("B", E)
aij = np.concatenate((np.tile(coins, M), np.repeat(1, E)))
row = np.concatenate((np.repeat(range(M), N), M + np.repeat(range(N), M)))
col = np.concatenate((np.array(range(E)).reshape(N, M).T.flatten(), range(E)))
A = sp.csr_matrix((aij, (row, col)), shape = (V, E))
X_star, obj_star = mixed_integer_linear_programming("maximize", A, senses, b, c, l, u, types)
return(np.array(X_star).reshape(N, M))
# In[8]:
X_star = coin_distribution_problem("coins.txt", 2)
print(X_star)
# In[ ]:
| berdogan20/Operations-Research-Problems | TheCoinDistributionProblem/Solution.py | Solution.py | py | 2,514 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cplex.Cplex",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number":... |
27177310215 | #region libraries
import cv2
import numpy as np
#endregion
#region process
def process(img_path,template_path): # This Function takes the path and name of basic image and template image
img_bgr = cv2.imread(img_path) # read the image by opencv(cv2)
img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY) # convert the color of the image to gray
img_template = cv2.imread(template_path,0) # read the template image(pattern)
h,w = img_template.shape # it will get the height and width of the template image as h,w
match = cv2.matchTemplate(img_gray, img_template, cv2.TM_CCOEFF_NORMED) # This function will compare the template image with input image to find the template image in input image by CCOEDD_NORMED Method
threshold = 0.9 # we have to define a threshold for accuracy of matching
loc = np.where(match >= threshold) # we use numpy to recognize the accuracy in found template in input image , we definded the accuracy (threshold)
for points in zip(*loc[::-1]): # we need a loop to draw a rectangle for detected template and we need to zip the 'loc' to have all arrays together
img_bgr = cv2.rectangle(img_bgr, points, (points[0]+w ,points[1]+h), (0,255,0),1) # a rectangle will be drawn around the detected template
cv2.imshow('result', img_bgr) # the result will be shown by opencv (cv2)
#endregion
process('images/image.jpg', 'images/pattern.jpg') # You can call the function and enter its input arguments to perform the operation
cv2.waitKey(0)
cv2.destroyAllWindows() | RealTourani/Match-Point | Match_Point.py | Match_Point.py | py | 1,547 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_n... |
19262571802 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from datetime import datetime, timedelta
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot import inventory
from pokemongo_bot.item_list import Item
from pokemongo_bot.human_behaviour import sleep, action_delay
class HealPokemon(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def __init__(self, bot, config):
super(HealPokemon, self).__init__(bot, config)
self.bot = bot
self.config = config
self.enabled = self.config.get("enabled", False)
self.revive_pokemon = self.config.get("revive", True)
self.heal_pokemon = self.config.get("heal", True)
self.next_update = None
self.to_heal = []
self.warned_about_no_revives = False
self.warned_about_no_potions = False
def work(self):
if not self.enabled:
return WorkerResult.SUCCESS
# Check for pokemon to heal or revive
to_revive = []
self.to_heal = []
pokemons = inventory.pokemons().all()
pokemons.sort(key=lambda p: p.hp)
for pokemon in pokemons:
if pokemon.hp < 1.0:
self.logger.info("Dead: %s (%s CP| %s/%s )" % (pokemon.name, pokemon.cp, pokemon.hp, pokemon.hp_max))
to_revive += [pokemon]
elif pokemon.hp < pokemon.hp_max:
self.logger.info("Heal: %s (%s CP| %s/%s )" % (pokemon.name, pokemon.cp, pokemon.hp, pokemon.hp_max))
self.to_heal += [pokemon]
if len(self.to_heal) == 0 and len(to_revive) == 0:
if self._should_print:
self.next_update = datetime.now() + timedelta(seconds=120)
#self.logger.info("No pokemon to heal or revive")
return WorkerResult.SUCCESS
# Okay, start reviving pokemons
# Check revives and potions
revives = inventory.items().get(Item.ITEM_REVIVE.value).count
max_revives = inventory.items().get(Item.ITEM_MAX_REVIVE.value).count
normal = inventory.items().get(Item.ITEM_POTION.value).count
super_p = inventory.items().get(Item.ITEM_SUPER_POTION.value).count
hyper = inventory.items().get(Item.ITEM_HYPER_POTION.value).count
max_p = inventory.items().get(Item.ITEM_MAX_POTION.value).count
self.logger.info("Healing %s pokemon" % len(self.to_heal))
self.logger.info("Reviving %s pokemon" % len(to_revive))
if self.revive_pokemon:
if len(to_revive) > 0 and revives == 0 and max_revives == 0:
if not self.warned_about_no_revives:
self.logger.info("No revives left! Can't revive %s pokemons." % len(to_revive))
self.warned_about_no_revives = True
elif len(to_revive) > 0:
self.logger.info("Reviving %s pokemon..." % len(to_revive))
self.warned_about_no_revives = False
for pokemon in to_revive:
self._revive_pokemon(pokemon)
if self.heal_pokemon:
if len(self.to_heal) > 0 and (normal + super_p + hyper + max_p) == 0:
if not self.warned_about_no_potions:
self.logger.info("No potions left! Can't heal %s pokemon" % len(self.to_heal))
self.warned_about_no_potions = True
elif len(self.to_heal) > 0:
self.logger.info("Healing %s pokemon" % len(self.to_heal))
self.warned_about_no_potions = False
for pokemon in self.to_heal:
self._heal_pokemon(pokemon)
if self._should_print:
self.next_update = datetime.now() + timedelta(seconds=120)
self.logger.info("Done healing/reviving pokemon")
def _revive_pokemon(self, pokemon):
item = Item.ITEM_REVIVE.value
amount = inventory.items().get(item).count
if amount == 0:
self.logger.info("No normal revives left, using MAX revive!")
item = Item.ITEM_MAX_REVIVE.value
amount = inventory.items().get(item).count
if amount > 0:
response_dict_revive = self.bot.api.use_item_revive(item_id=item, pokemon_id=pokemon.unique_id)
action_delay(2, 3)
if response_dict_revive:
result = response_dict_revive.get('responses', {}).get('USE_ITEM_REVIVE', {}).get('result', 0)
revive_item = inventory.items().get(item)
# Remove the revive from the iventory
revive_item.remove(1)
if result is 1: # Request success
self.emit_event(
'revived_pokemon',
formatted='Revived {name}.',
data={
'name': pokemon.name
}
)
if item == Item.ITEM_REVIVE.value:
pokemon.hp = int(pokemon.hp_max / 2)
self.to_heal.append(pokemon)
else:
# Set pokemon as revived
pokemon.hp = pokemon.hp_max
return True
else:
self.emit_event(
'revived_pokemon',
level='error',
formatted='Failed to revive {name}!',
data={
'name': pokemon.name
}
)
return False
def _heal_pokemon(self, pokemon):
if pokemon.hp == 0:
self.logger.info("Can't heal a dead %s" % pokemon.name)
return False
# normal = inventory.items().get(Item.ITEM_POTION.value).count
# super_p = inventory.items().get(Item.ITEM_SUPER_POTION.value).count
# hyper = inventory.items().get(Item.ITEM_HYPER_POTION.value).count
max_p = inventory.items().get(Item.ITEM_MAX_POTION.value).count
# Figure out how much healing needs to be done.
def hp_to_restore(pokemon):
pokemon = inventory.pokemons().get_from_unique_id(pokemon.unique_id)
return pokemon.hp_max - pokemon.hp
if hp_to_restore(pokemon) > 200 and max_p > 0:
# We should use a MAX Potion
self._use_potion(Item.ITEM_MAX_POTION.value, pokemon)
pokemon.hp = pokemon.hp_max
return True
# Okay, now we see to heal as effective as possible
potions = [103, 102, 101]
heals = [200, 50, 20]
for item_id, max_heal in zip(potions, heals):
if inventory.items().get(item_id).count > 0:
while hp_to_restore(pokemon) > max_heal:
if inventory.items().get(item_id).count == 0:
break
action_delay(2, 3)
# More than 200 to restore, use a hyper first
if self._use_potion(item_id, pokemon):
pokemon.hp += max_heal
if pokemon.hp > pokemon.hp_max:
pokemon.hp = pokemon.hp_max
else:
break
# return WorkerResult.ERROR
# Now we use the least
potion_id = 101 # Normals first
while hp_to_restore(pokemon) > 0:
action_delay(2, 4)
if inventory.items().get(potion_id).count > 0:
if potion_id == 104:
self.logger.info("Using MAX potion to heal a %s" % pokemon.name)
if self._use_potion(potion_id, pokemon):
if potion_id == 104:
pokemon.hp = pokemon.hp_max
else:
pokemon.hp += heals[potion_id - 101]
if pokemon.hp > pokemon.hp_max:
pokemon.hp = pokemon.hp_max
else:
if potion_id < 104:
self.logger.info("Failed with potion %s. Trying next." % potion_id)
potion_id += 1
else:
self.logger.info("Failed with MAX potion. Done.")
break
elif potion_id < 104:
potion_id += 1
else:
self.logger.info("Can't heal a %s" % pokemon.name)
break
def _use_potion(self, potion_id, pokemon):
if pokemon.hp >= pokemon.hp_max:
# Already at MAX health
return True
potion_count = inventory.items().get(potion_id).count
healing = 0
if potion_count == 0:
return False
if potion_id == 101:
self.logger.info("Healing with a normal potion we have %s left." % (potion_count - 1))
healing = 20
if potion_id == 102:
self.logger.info("Healing with a Super potion we have %s left." % (potion_count - 1))
healing = 50
if potion_id == 103:
self.logger.info("Healing with a HYper potion we have %s left." % (potion_count - 1))
healing = 200
if potion_id == 104:
self.logger.info("Healing with a MAX potion we have %s left." % (potion_count - 1))
healing = pokemon.hp_max - pokemon.hp
response_dict_potion = self.bot.api.use_item_potion(item_id=potion_id, pokemon_id=pokemon.unique_id)
# Select potion
sleep(2)
if response_dict_potion:
result = response_dict_potion.get('responses', {}).get('USE_ITEM_POTION', {}).get('result', 0)
if result is 1 or result is 0: # Request success
potion_item = inventory.items().get(potion_id)
# Remove the potion from the iventory
potion_item.remove(1)
self.emit_event(
'healing_pokemon',
formatted='Healing {name} ({hp} -> {hp_new}/{hp_max}).',
data={
'name': pokemon.name,
'hp': pokemon.hp,
'hp_new': pokemon.hp + healing,
'hp_max': pokemon.hp_max
}
)
return True
elif result == 3:
# ERROR_CANNOT_USE
pokemon.hp = pokemon.hp_max
self.logger.info("Can't use this to heal the %s" % pokemon.name)
return False
else:
self.logger.info("Result was: %s" % result)
self.emit_event(
'healing_pokemon',
level='error',
formatted='Failed to heal {name} ({hp} -> {hp_new}/{hp_max})!',
data={
'name': pokemon.name,
'hp': pokemon.hp,
'hp_new': pokemon.hp + healing,
'hp_max': pokemon.hp_max
}
)
return False
def _should_print(self):
"""
Returns a value indicating whether the pokemon should be displayed.
:return: True if the stats should be displayed; otherwise, False.
:rtype: bool
"""
return self.next_update is None or datetime.now() >= self.next_update
| PokemonGoF/PokemonGo-Bot | pokemongo_bot/cell_workers/heal_pokemon.py | heal_pokemon.py | py | 11,496 | python | en | code | 3,815 | github-code | 36 | [
{
"api_name": "pokemongo_bot.base_task.BaseTask",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pokemongo_bot.worker_result.WorkerResult.SUCCESS",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pokemongo_bot.worker_result.WorkerResult",
"line_numbe... |
20220878757 | import pathlib
prj_path = str(pathlib.Path(__file__).parent.parent.parent.resolve())
from advent_of_code.lib import parse as aoc_parse
from advent_of_code.lib import aoc
@aoc.pretty_solution(1)
def part1(data):
horizontal = sum(x[1] for x in data if x[0] == 'forward')
depth = sum(
-x[1] if x[0] == 'up' else
x[1] if x[0] == 'down' else
0
for x in data)
return horizontal * depth
@aoc.pretty_solution(2)
def part2(data):
h_pos = 0
d_pos = 0
aim = 0
for dir, step in data:
if dir == "down":
aim += step
elif dir == "up":
aim -= step
elif dir == "forward":
h_pos += step
d_pos += aim * step
return h_pos * d_pos
def main():
def map_line(line):
a, b = line.split()
return a, int(b)
data = aoc_parse.map_input_lines(prj_path + '/year2021/input/day02.txt', map_line)
return part1(data), part2(data)
if __name__ == "__main__":
main()
| Perruccio/advent-of-code | advent_of_code/year2021/solutions/day02.py | day02.py | py | 1,006 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "advent_of_code.lib.aoc.pretty_solution",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "advent_of_code.lib.aoc",
"line_number": 8,
"usage_type": "name"
},
{
"api_name"... |
2251372103 | from typing import List
import mlflow
import pandas as pd
import tensorflow as tf
from keras_preprocessing.image import ImageDataGenerator
from zenml.steps import BaseParameters, Output, step
class EvaluateClassifierConfig(BaseParameters):
"""Trainer params"""
input_shape: List[int] = (224, 224, 3)
batch_size: int = 4
@step(enable_cache=False, experiment_tracker="local_mlflow_tracker")
def evaluate_classifier(
config: EvaluateClassifierConfig, model: tf.keras.Model, test_df: pd.DataFrame
) -> Output(test_acc=float):
# Test data generator
test_generator = ImageDataGenerator()
test_images = test_generator.flow_from_dataframe(
dataframe=test_df,
x_col="Filepath",
y_col="Label",
target_size=(config.input_shape[0], config.input_shape[1]),
color_mode="rgb",
class_mode="categorical",
batch_size=config.batch_size,
shuffle=False,
)
results = model.evaluate(test_images, verbose=1)
mlflow.log_metric("Test accuracy", results[1])
print("Model performance on Test Set:")
print("Accuracy on Test Set: {:.2f}".format(results[1]))
return results[1]
| thbinder/mlops_sea_animal_classification | src/domain/steps/mlflow_evaluator.py | mlflow_evaluator.py | py | 1,170 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "zenml.steps.BaseParameters",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pandas.D... |
6752387766 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QDialog
from PyQt5.QtGui import QDoubleValidator
from PyQt5.QtCore import pyqtSlot, QDate
from warehouse.views.editregtuff import Ui_Dialog
from supplyer.controllers.supplyercontroller import SupplyerController
from stuff.controllers.stuffcontroller import StuffController
from warehouse.controllers.warehousecontroller import WarehouseController
from supplyer.modules.selectstuffModule import SelectstuffModule
from lib.utils.messagebox import MessageBox
import user
import datetime
import re
class EditRegStuffModule(QDialog, Ui_Dialog):
def __init__(self, spid=None, paperno=None, papertype=0, autoid=None, parent=None):
super(EditRegStuffModule, self).__init__(parent)
self.setupUi(self)
if '28' not in user.powers:
self.close()
if user.powers['28'] == 0:
self.close()
self.power = '{:03b}'.format(user.powers['28'])
if self.power[1] == '0':
self.pushButton_accept.setVisible(False)
self.pushButton_cancel.setVisible(False)
self.ori_detail = dict()
self.new_detail = dict()
self.SC = SupplyerController()
self.WC = WarehouseController()
self.SFC = StuffController()
self.spid = spid
self.autoid = autoid
self.paperno = paperno
self.papertype = papertype
self.get_detail()
self.set_amount_validator()
self.get_producer_list()
self.get_location_list()
def get_detail(self):
if not self.autoid:
self.toolButton_more.setEnabled(True)
return
self.toolButton_more.setEnabled(False)
key_dict = {
'autoid': self.autoid
}
res = self.WC.get_stuffcheckinlist(
False, *VALUES_TUPLE_CHECK_IN_LIST, **key_dict
)
if len(res) != 1:
return
self.ori_detail = res[0]
self.lineEdit_stuff.setText(
self.ori_detail['stuffid'] + ' ' + self.ori_detail['stuffname']
)
self.label_spec.setText(self.ori_detail['spec'])
self.label_package.setText(self.ori_detail['package'])
self.lineEdit_amount.setText(str(self.ori_detail['amount']))
self.label_unit.setText(self.ori_detail['unit'])
self.lineEdit_batchno.setText(self.ori_detail['batchno'])
self.lineEdit_mbatchno.setText(self.ori_detail['mbatchno'])
self.dateEdit_makedate.setDate(self.ori_detail['makedate'])
self.dateEdit_invaliddate.setDate(self.ori_detail['expireddate'])
def set_amount_validator(self):
doubleValitor = QDoubleValidator()
doubleValitor.setBottom(0)
doubleValitor.setDecimals(3)
doubleValitor.setNotation(QDoubleValidator.StandardNotation)
self.lineEdit_amount.setValidator(doubleValitor)
def get_location_list(self):
location_list = self.WC.get_stuffcheckinlist(
True, *VALUES_TUPLE_LOCATION).distinct()
if len(location_list):
self.comboBox_location.addItems(location_list)
if len(self.ori_detail):
self.comboBox_location.setCurrentText(self.ori_detail['position'])
else:
self.comboBox_location.setCurrentText("")
def get_producer_list(self, sdid=None):
if not (self.autoid or sdid):
return
if sdid is None:
stuffid = self.ori_detail['stuffid']
key_dict_stuff = {'stuffid': stuffid}
stufffid_list = self.SFC.get_stuffdict(
True, *VALUES_TUPLE_SDID, **key_dict_stuff
)
if len(stufffid_list):
sdid = stufffid_list[0]
if sdid and self.spid:
key_dict_producer = {
'sdid': sdid,
'spid': self.spid
}
producer_list = self.SC.get_stuffsupplyer(
True, *VALUES_TUPLE_PRODUCER , **key_dict_producer
)
if len(producer_list):
self.comboBox_producer.addItems(producer_list)
if len(self.ori_detail):
self.comboBox_producer.setCurrentText(
self.ori_detail['producer']
)
@pyqtSlot(str)
def on_lineEdit_amount_textChanged(self, p_str):
try:
if p_str != self.ori_detail['amount']:
self.new_detail['amount'] = p_str
self.new_detail['piamount'] = p_str
else:
try:
del self.new_detail['amount']
del self.new_detail['piamount']
except KeyError:
pass
except KeyError:
self.new_detail['amount'] = p_str
self.new_detail['piamount'] = p_str
@pyqtSlot(str)
def on_comboBox_producer_currentTextChanged(self, p_str):
try:
if p_str != self.ori_detail['producer']:
self.new_detail['producer'] = p_str
else:
try:
del self.new_detail['producer']
except KeyError:
pass
except KeyError:
self.new_detail['producer'] = p_str
@pyqtSlot(str)
def on_comboBox_location_currentTextChanged(self, p_str):
try:
if p_str != self.ori_detail['position']:
self.new_detail['position'] = p_str
else:
try:
del self.new_detail['position']
except KeyError:
pass
except KeyError:
self.new_detail['position'] = p_str
@pyqtSlot(str)
def on_lineEdit_batchno_textChanged(self, p_str):
try:
if p_str != self.ori_detail['batchno']:
self.new_detail['batchno'] = p_str
else:
try:
del self.new_detail['batchno']
except KeyError:
pass
except KeyError:
self.new_detail['batchno'] = p_str
@pyqtSlot(str)
def on_lineEdit_mbatchno_textChanged(self, p_str):
try:
if p_str != self.ori_detail['mbatchno']:
self.new_detail['mbatchno'] = p_str
else:
try:
del self.new_detail['mbatchno']
except KeyError:
pass
except KeyError:
self.new_detail['mbatchno'] = p_str
@pyqtSlot(QDate)
def on_dateEdit_makedate_dateChanged(self, q_date):
try:
if type(self.ori_detail['makedate']) is str:
self.new_detail['makedate'] = q_date.toPyDate()
return
if q_date != QDate(self.ori_detail['makedate']):
self.new_detail['makedate'] = q_date.toPyDate()
else:
try:
del self.new_detail['makedate']
except KeyError:
pass
except KeyError:
self.new_detail['makedate'] = q_date.toPyDate()
@pyqtSlot(QDate)
def on_dateEdit_invaliddate_dateChanged(self, q_date):
try:
if type(self.ori_detail['expireddate']) is str:
self.new_detail['expireddate'] = q_date.toPyDate()
return
if q_date != QDate(self.ori_detail['expireddate']):
self.new_detail['expireddate'] = q_date.toPyDate()
else:
try:
del self.new_detail['expireddate']
except KeyError:
pass
except KeyError:
self.new_detail['expireddate'] = q_date.toPyDate()
@pyqtSlot()
def on_toolButton_more_clicked(self):
detail = SelectstuffModule(self.spid, self)
detail.selected.connect(self.set_stuff)
detail.show()
def set_stuff(self, p_int):
key_dict = {'autoid': p_int}
res = self.SFC.get_stuffdict(False, *VALUES_TUPLE_STUFF, **key_dict)
if not len(res):
return
stuff = res[0]
self.lineEdit_stuff.setText(stuff['stuffid'] + stuff['stuffname'])
self.label_spec.setText(stuff['spec'])
self.label_package.setText(stuff['package'])
self.label_unit.setText(stuff['spunit'])
self.dateEdit_makedate.setDate(user.now_date)
self.dateEdit_invaliddate.setDate(
user.now_date + datetime.timedelta(days=stuff['expireddays'])
)
self.new_detail['stuffid'] = stuff['stuffid']
self.new_detail['stuffname'] = stuff['stuffname']
self.new_detail['spec'] = stuff['spec']
self.new_detail['package'] = stuff['package']
self.new_detail['unit'] = stuff['spunit']
self.new_detail['stufftype'] = stuff['stufftype']
self.lineEdit_batchno.setFocus()
self.get_producer_list(p_int)
@pyqtSlot()
def on_pushButton_accept_clicked(self):
text = ''
if self.lineEdit_stuff.text() == '':
text = "物料不能为空!\n"
if self.lineEdit_amount.text() in ('', '0'):
text += "到货数量不能为空!\n"
if self.lineEdit_batchno.text() == '':
text += "进厂批号不能为空!\n"
if len(text) > 0:
message = MessageBox(
self, text="以下信息填写错误",
informative=text
)
message.show()
return
if len(self.new_detail):
if self.spid:
key_dict_supplyer = {'autoid': self.spid}
supplyer_list = self.SC.get_supply(
False, *VALUES_TUPLE_SUPPLYER, **key_dict_supplyer
)
if len(supplyer_list):
supplyer = supplyer_list[0]
self.new_detail['supid'] = supplyer['supid']
self.new_detail['supname'] = supplyer['supname']
self.new_detail['paperno'] = self.paperno
self.new_detail['papertype'] = self.papertype
self.new_detail['checkindate'] = user.now_date
res = self.WC.update_stuffcheckinlist(self.autoid, **self.new_detail)
self.accept()
@pyqtSlot()
def on_pushButton_cancel_clicked(self):
self.close()
VALUES_TUPLE_SDID = ('autoid',)
VALUES_TUPLE_PRODUCER = ('producer',)
VALUES_TUPLE_LOCATION = ('position',)
VALUES_TUPLE_SUPPLYER = ('supid','supname')
VALUES_TUPLE_CHECK_IN_LIST = (
"autoid", "stuffid", "stuffname", "spec", "package", "producer", "batchno",
"mbatchno", "unit", "amount", "makedate", "expireddate", "position", "supid"
)
VALUES_TUPLE_STUFF = (
'stuffid', 'stuffname', 'stufftype', 'spec', 'package', 'spunit',
"expireddays"
)
| zxcvbnmz0x/gmpsystem | warehouse/modules/editregstuffmodule.py | editregstuffmodule.py | py | 10,777 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "warehouse.views.editregtuff.Ui_Dialog",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "user.powers",
"line_number": 27,
"usage_type": "attribute"
},
{
"ap... |
18909907329 | from prettytable import PrettyTable
class Database:
def __init__(self, database_name):
import mysql.connector as m1
self.var = "w"
self.conn = m1.connect(host="localhost", user="root", password="utkarsh")
self.cursor = self.conn.cursor()
self.cursor.execute("CREATE DATABASE IF NOT EXISTS %s" % database_name)
self.cursor.execute("USE %s" % database_name)
self.cursor.execute(
"CREATE TABLE IF NOT EXISTS products (product_id INTEGER unsigned primary key auto_increment, product_name VARCHAR(50) not null,quantity INTEGER unsigned NOT NULL,Company enum('asus','msi','asrock','gigabyte','inno3d') not null ,gpu_company enum('nvidea','amd') not null,price INTEGER unsigned NOT NULL,vram_gb INTEGER unsigned NOT NULL)"
)
self.cursor.execute(
"CREATE TABLE IF NOT EXISTS admins(admin_id integer primary key auto_increment,admin_name VARCHAR(20) unique not null,pas VARCHAR(20) not null)"
)
self.producttabletemplate = [
"product id",
"product name",
"quantity",
"graphics card seller",
"gpu company",
"price",
"vram",
]
try:
self.cursor.execute(
"INSERT INTO admins (admin_name,pas) VALUES('admin','1234')"
)
self.conn.commit()
except Exception as e:
pass
def make_database_table(self,table):
table = PrettyTable(table)
for i in self.cursor:
table.add_row(i)
print(table)
def add_admin(self, adminname, password):
try:
self.cursor.execute(
"INSERT INTO admins (admin_name,pas) VALUES('{}','{}')".format(adminname, password)
)
self.conn.commit()
except Exception as e:
print("something wrong happend.try again,maybe user name taken already")
def delete(self, id):
self.cursor.execute("DELETE FROM products where product_id={}".format(id))
self.conn.commit()
def insert(self):
forward = True
productname = input("enter product name")
self.cursor.execute(
"SELECT * FROM products WHERE product_name = '{}'".format(productname)
)
for i in self.cursor:
forward = i
if forward == True:
quantity = int(input("enter quantity of product"))
Company = input(
"enter company name ('asus','msi','asrock','gigabyte','inno3d')"
)
gpu_company = input("enter gpu_company('nvidea','amd')")
price = input("enter price of product")
vram = int(input("input vram: "))
self.cursor.execute(
"INSERT INTO products(product_name,quantity,Company,gpu_company,price,vram_gb) values('{}',{},'{}','{}',{},{})".format(
productname, quantity, Company, gpu_company, price, vram
)
)
self.conn.commit()
def showproductsbyvram(self, vram):
self.cursor.execute("select * from products where vram_gb = {}".format(vram))
self.make_database_table(self.producttabletemplate)
def showproductsbygpu_company(self, gpu):
if gpu not in ["nvidea", "amd"]:
print("no such gpu company available:")
return
self.cursor.execute(
"select * from products where gpu_company = '{}'".format(gpu)
)
self.make_database_table(self.producttabletemplate)
def showproductsbyseller(self, comp):
if comp not in ("asus", "msi", "asrock", "gigabyte", "inno3d"):
print("no such seller available")
return
self.cursor.execute("select * from products where Company = '{}'".format(comp))
self.make_database_table(self.producttabletemplate)
def showproductsbybudget(self, maximum_price):
self.cursor.execute(
"select * from products where price < {}".format(maximum_price)
)
self.make_database_table(self.producttabletemplate)
def showproductsrecords(self):
self.cursor.execute("select * from products")
self.make_database_table(self.producttabletemplate)
def showproductsrecordsbyname(self, name):
self.cursor.execute("select * from products where LOWER(product_name) = '{}'".format(name.lower()))
self.make_database_table(self.producttabletemplate)
def updateprice(self, id):
price = input("enter price of product")
self.cursor.execute(
"UPDATE products SET price={} WHERE product_id={}".format(price, id)
)
self.conn.commit()
def updatequantity(self, id):
quantity = input("enter quantity of product")
self.cursor.execute(
"UPDATE products SET quantity={} WHERE product_id={}".format(quantity, id)
)
self.conn.commit()
if __name__ == "__main__":
db = Database("graphics_shop")
db.showproductsbybudget(2000000)
# db.showproductsrecords()
# db.updateprice(1)
# db.updatequantity(1)
| Codineer/shop-management-sytem | database.py | database.py | py | 5,264 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mysql.connector.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mysql.connector",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "prettytable.PrettyTable",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "{'m1': ... |
8196850520 | from django.contrib import admin
from .models import User
class UserAdmin(admin.ModelAdmin):
list_display = (
'pk', 'role', 'username', 'email',
'first_name', 'last_name',
)
search_fields = ('username', 'email',)
list_filter = ('email', 'username')
admin.site.register(User, UserAdmin)
| lojiver/foodgram-project | backend/foodgram/users/admin.py | admin.py | py | 323 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 15,
"usage_type": "call"
},... |
39479422820 | from django.contrib.auth.models import Group, User
from datetime import datetime
from django.utils import timezone
from schedule.periods import Day
from datetime import timedelta
from apps.policies.models import SchedulePolicyRule
from apps.services.models import Service
"""def get_current_events_users(calendar):
now = timezone.make_aware(datetime.now(), timezone.get_current_timezone())
result = []
day = Day(calendar.events.all(), now)
for o in day.get_occurrences():
if o.start <= now <= o.end:
usernames = o.event.title.split(',')
print usernames
for username in usernames:
result.append(User.objects.get(username=username.strip()))
return result
"""
def get_current_events_users(calendar):
now = timezone.now()
result = []
day = Day(calendar.events.all(), now)
for o in day.get_occurrences():
if o.start <= now <= o.end:
items = o.event.title.split(',')
for item in items:
if Group.objects.filter(name=item.strip()).exists():
for user in User.objects.filter(groups__name=item.strip()):
user.came_from_group = item.strip()
result.append(user)
else:
result.append(User.objects.get(username=item.strip()))
return result
def get_events_users_inbetween(calendar, since, until):
delta = until - since
result = {}
added_users = []
for i in range(delta.days + 1):
that_day = since + timedelta(days=i)
if not timezone.is_aware(that_day):
that_day = timezone.make_aware(that_day, timezone.get_current_timezone())
day = Day(calendar.events.all(), that_day)
for o in day.get_occurrences():
if o.start <= that_day <= o.end:
items = o.event.title.split(',')
for item in items:
username = item.strip()
if Group.objects.filter(name=username):
for user in User.objects.filter(groups__name=username):
if user not in added_users:
result[username] = {
"start": o.start,
"person": user.username,
"end": o.end,
"email": user.email
}
added_users.append(user)
else:
if username not in result.keys():
user_instance = User.objects.get(username=username)
result[username] = {
"start": o.start,
"person": username,
"end": o.end,
"email": user_instance.email
}
return result.values()
def get_escalation_for_service(service):
result = []
if service.notifications_disabled:
return result
rules = SchedulePolicyRule.get_rules_for_service(service)
print(rules)
for item in rules:
print(item.schedule)
print(item.user_id)
print(item.group_id)
if item.schedule:
current_events_users = get_current_events_users(item.schedule)
for user in current_events_users:
if user not in result:
result.append(user)
if item.user_id:
if item.user_id not in result:
result.append(item.user_id)
if item.group_id:
for user in item.group_id.user_set.all():
if user not in result:
result.append(user)
return result
def services_where_user_is_on_call(user):
from django.db.models import Q
services = Service.objects.filter(
Q(policy__rules__user_id=user) | Q(policy__rules__schedule__event__title__icontains=user)
)
return services
| openduty/openduty | apps/incidents/escalation_helper.py | escalation_helper.py | py | 4,082 | python | en | code | 121 | github-code | 36 | [
{
"api_name": "django.utils.timezone.now",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "schedule.periods.Day",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "... |
15371935865 | from RsSmw import *
import json
try:
with open ("config.json") as config_f:
RsSmw.assert_minimum_version('5.0.44')
config = json.load(config_f)
IP_ADDRESS_GENERATOR = config["IP_ADDRESS_GENERATOR"]
PORT = config["PORT"]
CONNECTION_TYPE = config["CONNECTION_TYPE"]
TRACE_FILE = config["TRACE_FILE"]
MEASURE_TIME = config["MEASURE_TIME"]
resource = f'TCPIP::{IP_ADDRESS_GENERATOR}::{PORT}::{CONNECTION_TYPE}' # Resource string for the device
generator = RsSmw(resource, True, True, "SelectVisa='socket'")
config_f.close()
except FileNotFoundError:
print("Brak pliku konfiguracyjnego.")
exit()
def com_check():
# Driver's instrument status checking ( SYST:ERR? ) after each command (default value is True):
generator.utilities.instrument_status_checking = True
# The generator object uses the global HW instance one - RF out A
generator.repcap_hwInstance_set(repcap.HwInstance.InstA)
def meas_prep(set : True, mode : enums.FreqMode, amplitude : int, freq : int):
generator.output.state.set_value(set)
generator.source.frequency.set_mode(mode)
generator.source.power.level.immediate.set_amplitude(amplitude)
generator.source.frequency.fixed.set_value(freq)
print(f'Channel 1 PEP level: {generator.source.power.get_pep()} dBm')
# Direct SCPI interface:
response = generator.utilities.query_str('*IDN?')
print(f'Direct SCPI response on *IDN?: {response}')
generator.close()
if __name__ == "__main__":
com_check()
meas_prep(True, enums.FreqMode.CW, -20, 23E9)
exit() | mgarczyk/channel-sounder-5g | generator.py | generator.py | py | 1,625 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "RsSmw.assert_minimum_version",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
}
] |
43284653684 | import setuptools
from pathlib import Path
with open("README.md", "r") as file:
long_description = file.read()
with open("requirements.txt") as file:
REQUIREMENTS = file.read().split("\n")
setuptools.setup(
name="port_env",
version="0.0.3",
author="Moist-Cat",
author_email="moistanonpy@gmail.com",
description="Make environments portable",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Moist-Cat/port_env",
scripts=["port_env"],
install_requires=REQUIREMENTS,
include_package_data=True,
package_dir={"":"src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3",
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: MIT License",
],
)
| Moist-Cat/port_env | setup.py | setup.py | py | 898 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 23,
"usage_type": "call"
}
] |
41310446545 | from nltk import CFG
from nltk import ChartParser # parse_cfg, ChartParser
from random import choice
import re
from enum import Enum, auto
from argparse import ArgumentParser
from os import listdir
from os.path import isfile, join
import os
this_dir = os.path.dirname(os.path.abspath(__file__))
name_segment_folder = join(this_dir, "../../name-segments/")
class EnumAutoName(Enum):
# An enum where auto() will default to the enum name
def _generate_next_value_(name, start, count, last_values):
return name
def __str__(self):
return self.value
def StringToEnum(s):
if s in [v.name for v in Name.NameBank]:
return Name.NameBank[s]
else:
raise ValueError
def get_files_from_path(path, extension_filter=".txt"):
files = []
for f in listdir(path):
full_f = join(path, f)
if extension_filter in full_f:
if isfile(full_f):
files.append(f)
return files
def get_available_namebanks_and_syllables():
namebanks = get_available_namebanks()
syllables = get_available_syllables()
# return namebanks | syllables # Nicer 3.9 Python syntax
return {**namebanks, **syllables}
def get_available_syllables(where="syllables"):
global name_segment_folder
path = join(name_segment_folder,where)
onlyfiles = get_files_from_path(path)
available = {}
for f in onlyfiles:
f = f.replace(".txt","")
f = f.replace("-female","")
f = f.replace("-male","")
for x in range(10):
f = f.replace(f"-{x}","")
f = f.capitalize()
available[f] = auto()
return available
def get_available_namebanks(where="forenames"):
global name_segment_folder
path = join(name_segment_folder,where)
onlyfiles = get_files_from_path(path)
available = {}
for f in onlyfiles:
f = f.replace(".txt","")
f = f.replace("-female","")
f = f.replace("-male","")
f = f.capitalize()
available[f] = auto()
return available
def get_available_origins(where="nouns"):
global name_segment_folder
path = join(name_segment_folder,where)
onlyfiles = get_files_from_path(path)
available = {}
for f in onlyfiles:
f = f.replace(".txt","")
f = f.replace("-female","")
f = f.replace("-male","")
f = f.capitalize()
available[f] = auto()
return available
class Name:
class NameOrder(EnumAutoName):
Eastern = auto()
Forename_Only = auto()
Surname_Only = auto()
Western = "Western"
namebank_values = get_available_namebanks_and_syllables()
NameBank = EnumAutoName('NameBank', namebank_values)
origin_values = get_available_origins()
Origin = EnumAutoName('Origin', origin_values)
class NameType(EnumAutoName):
Forename = auto()
Surname = auto()
class Origin(EnumAutoName):
Aquatic = auto()
Desert = auto()
Mountain = auto()
Tundra = auto()
Urban = auto()
Forest = auto()
Air = auto()
def __init__(self):
self.gender_male = False
self.gender_female = False
self.gender_neutral = False
self.has_position = False
self.order = Name.NameOrder.Western
class FileFetcher():
def __init__(self):
pass
def get_gender_endings(self, config, always_neutral=False):
e = []
if config.gender_male:
e.append("male")
if config.gender_female:
e.append("female")
if config.gender_neutral or always_neutral:
e.append("")
if len(e) == 0:
print("No Gender Selection. Defaulting to gender neutral")
config.gender_neutral = True
e.append("")
return e
def get_position_files(self, config):
ges = self.get_gender_endings(config)
pt = []
for g in ges:
g = f"-{g}" if g != "" else g
pt.append(f'prefixes/positions{g}.txt')
return pt
def SyllableLength(self, namebank):
global name_segment_folder
path = join(name_segment_folder,"syllables")
onlyfiles = get_files_from_path(path)
unique_syllables = {}
for f in onlyfiles:
if namebank in f:
f = f.replace("-female","")
f = f.replace("-male","")
unique_syllables[f] = True
return len(unique_syllables.items())
class Grammar:
def __init__(self, config):
self.config = config
self.obj = {}
self.root = "S"
self.ff = FileFetcher()
def initialize(self):
self.obj[self.root]= ["PRE", "CORE", "POST"]
self.basic_tokens()
def basic_tokens(self):
self.obj["SPC"] = ["' '"]
self.obj["OF"] = ["'o'", "'f'"]
def define_position(self, config, optional=False):
# Prefix
positions = self.ff.get_position_files(config)
positions = [f"['{p}']" for p in positions]
self.obj["PRE"] = ["TITLE", "SPC"]
if optional:
self.obj["PRE"].append(None)
self.obj["TITLE"] = positions
# Postfix
origin = config.origin.name.lower()
self.obj["POST"] = ["SPC", "OF", "SPC", "WHERE"]
if optional:
self.obj["POST"].append(None)
# TODO: Allow multiple origins
self.obj["WHERE"] = [f"['postfixes/{origin}.txt']",]
def setNameOrder(self, order):
if order == Name.NameOrder.Western:
self.obj["CORE"] = ["FORENAME", "SPC", "SURNAME"]
elif order == Name.NameOrder.Eastern:
self.obj["CORE"] = ["SURNAME", "SPC", "FORENAME"]
elif order == Name.NameOrder.Forename_Only:
self.obj["CORE"] = ["FORENAME"]
elif order == Name.NameOrder.Surname_Only:
self.obj["CORE"] = ["FORENAME"]
else:
print("Unimplemented Name Order: ", order, ". Defaulting to Western")
self.setNameOrder(Name.NameOrder.Western)
def getNamesFromSyllables(self, config, name_type):
ges = self.ff.get_gender_endings(config)
namebank = config.namebank.name.lower()
name_type = name_type.name.upper()
global name_segment_folder
# TODO: Check compatibile with namebanks
syls = self.ff.SyllableLength(namebank)
self.obj[name_type] = []
for x in range(syls):
self.obj[name_type].append(f"SYLLABLE{x}")
for x in range(syls):
pt = []
for g in ges:
g = f"-{g}" if g != "" else g
f = f'syllables/{namebank}{g}-{x}.txt'
if os.path.exists(join(name_segment_folder, f)):
pt.append(f)
else:
print(f"Warn/Err: No syllable file found: {f}. May produce bad name.")
self.obj[f"SYLLABLE{x}"] = [pt]
def getNamesFromBank(self, config, name_type):
ges = self.ff.get_gender_endings(config)
namebank = config.namebank.name.lower()
name_type = name_type.name.upper()
pt = []
for g in ges:
g = f"-{g}" if g != "" else g
# TODO: s shouldnt be there.
pt.append(f'{name_type.lower()}s/{namebank}{g}.txt')
self.obj[name_type] = [pt]
def constructName(self, config, name_type):
origin = config.origin.name.lower()
name_type = name_type.name.upper()
self.obj[name_type] = ["ADJ", "NOUN"]
self.buildAdjBank(config)
self.buildNounBank(config)
def buildAdjBank(self, config):
origin = config.origin.name.lower()
pt = []
# TODO: Dodginess/Alignment. John Bloodsword seems more evil than John Goldheart
pt.append(f"['adjectives/{origin}.txt']")
self.obj["ADJ"] = pt
def buildNounBank(self, config):
origin = config.origin.name.lower()
pt = []
# TODO: Dodginess/Alignment. John Poisonblood seems more evil than John Goldheart
pt.append(f"['nouns/{origin}.txt']")
self.obj["NOUN"] = pt
def write(self, dir="", filename="custom.grammar"):
# TODO: order carefully
s = ""
for key, value in self.obj.items():
s += f"{key} -> "
for i, v in enumerate(value):
sep = " "
if v is None:
v = " | "
s += f"{v}{sep}"
s += "\n"
self.string_repr = s
filename = os.path.join(dir, filename)
print("---------------------")
print(filename)
print(filename)
print(filename)
print(filename)
print("---------------------")
f = open(filename, "w")
f.write(s)
f.close()
filename = os.path.abspath(filename)
return filename
def __str__(self):
if hasattr(self, "string_repr"):
return self.string_repr
else:
return "Not Finalized"
def define_grammar(config, where=""):
grammar = Grammar(config)
grammar.initialize()
if config.has_position:
grammar.define_position(config)
grammar.setNameOrder(config.order)
# Prefer Forenames to be syllable generated
if grammar.ff.SyllableLength(config.namebank.name.lower()) > 0:
grammar.getNamesFromSyllables(config, Name.NameType.Forename)
else:
grammar.getNamesFromBank(config, Name.NameType.Forename)
# TODO: Use namebank for Surnames
grammar.constructName(config, Name.NameType.Surname)
return grammar.write(where)
def resolve_grammar(G):
def file_contents(s):
global name_segment_folder
filename = join(name_segment_folder,str(s.group(1)))
try:
terms = open(filename).readlines()
s = ""
for i, t in enumerate(terms):
t = t.replace("\n","")
# Allow Commenting
if "#" not in t:
seperator = "|" if i > 0 else ""
s += f"{seperator} '{t}' "
except FileNotFoundError:
print("Warn/Err: File doesn't exist:", filename, ". May produce bad names.")
s = ""
return s
G = re.sub(r"\[\'([a-zA-Z\-\.\/0-9]*)\'\]", file_contents, G)
return G
def generate_name(G, ):
grammar = CFG.fromstring(G)
parser = ChartParser(grammar)
gr = parser.grammar()
tokens = produce(gr, gr.start())
name = ''.join(tokens)
return name.title()
def produce(grammar, symbol):
words = []
productions = grammar.productions(lhs = symbol)
production = choice(productions)
for sym in production.rhs():
if isinstance(sym, str):
words.append(sym)
else:
words.extend(produce(grammar, sym))
return words
def generate(args, where=""):
config = Name()
config.has_position = True
config.origin = args.origin
config.namebank = args.namebank
config.order = args.order
config.gender_male = args.gender_male
config.gender_female = args.gender_female
config.gender_neutral = args.gender_neutral
grammar_file = define_grammar(config, where)
G = resolve_grammar(open(grammar_file).read())
name = generate_name(G)
if args.verbose:
print("Your Character:", name)
return name
def parse_args():
ap = ArgumentParser(description="Generate a character name")
# Gender
ap.add_argument('--gender-male', action="store_true")
ap.add_argument('--gender-female', action="store_true")
ap.add_argument('--gender-neutral', action="store_true")
# Origins
ap.add_argument('--order', type=Name.NameOrder, choices=list(Name.NameOrder), nargs="?", default=Name.NameOrder.Western)
ap.add_argument('--origin', type=Name.Origin, choices=list(Name.Origin), nargs="?", default=Name.Origin.Mountain)
ap.add_argument('--namebank', type=Name.NameBank, choices=Name.NameBank, nargs="?", default=Name.NameBank.Dwarf)
args = ap.parse_args()
args.verbose = True
return args
if __name__ == "__main__":
a = parse_args()
generate(a)
| Mimic-Tools/name-generation | src/name_generation/generate.py | generate.py | py | 12,855 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
28483046981 | import numpy as np
import cv2
import sys
import argparse
# Creating the parser
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="path to the model used to make the prediction and generate the class activation maps")
# Parsing the arguments
args = vars(ap.parse_args())
from utilities.gapModels import MobileNetGAP
from utilities.classifier import Classifier
from utilities.helpers import *
print("⏳" + BLUE + " Loading model ... " + END)
model = MobileNetGAP(path=args["model"])
clf = Classifier(model, name='mobilenet')
print("💾" + BLUE + " Model loaded." + END)
def addContours(input_img, output_img, draw_bounding_box=True, draw_contours=False, threshold=100):
"""
>>> Work In Progress <<<
Detects the bounding boxes and/or contours in the input image and adds them to the output image
Returns the modified output_img
>>> Work In Progress <<<
"""
# Convert image to gray
gray = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
# Threshold the image
_, threshed_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
# Get the external contours
_, contours, _ = cv2.findContours(threshed_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if draw_contours:
# Draw the contours
cv2.drawContours(output_img , contours, -1, (0, 255, 0), 5)
if draw_bounding_box:
# Draw the bounding boxes
for c in contours:
# Get the bounding rectangle
x, y, w, h = cv2.boundingRect(c)
# Draw it
cv2.rectangle(output_img, (x, y), (x + w, y + h), (255, 0, 0), 3)
return output_img
def show_detection(img, prediction):
height, width, _ = img.shape
middle = (int(height//2), int(width//2))
if prediction>0.6: cv2.rectangle(img,(10,10),(width-10, height-10),(255, 255, 255),thickness=40)
cv2.rectangle(img,(0,0),(width, 40),(56, 38, 50),thickness=-1)
cv2.rectangle(img,(0,0),(int(width*prediction), 40),(118, 230, 0),thickness=-1)
return img
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
# Get the cam and prediction made by the model
cam, prediction = clf.cam(img, class_number=1)
# Detect the contours and or bounding boxes in the cam
# img = addContours(input_img=cam, output_img=img, draw_bounding_box=True, draw_contours=False, threshold=100)
# Add the cam to the original image
img = cv2.addWeighted(cam, 0.5, img, 0.8, 0)
# Indicators of the probability of presence of a human
img = show_detection(img, prediction[1])
cv2.imshow('img',img)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| Selim78/real-time-human-detection | webcam_cam.py | webcam_cam.py | py | 2,632 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "utilities.gapModels.MobileNetGAP",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "utilities.classifier.Classifier",
"line_number": 20,
"usage_type": "call"
},
... |
20427901161 | # Written by P. Xydi, Feb 2022
######################################
# Import libraries
######################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.cm as cm
color_1 = cm.get_cmap("Set2")(2) # set blue
color_2 = cm.get_cmap("Set2")(1) # set orange
from sklearn.metrics import ConfusionMatrixDisplay
######################################
def sentence_distribution(dataset, label = 'training', to_plot = False):
'''
Plots the distribution of sentences in a given dataset
INPUTS:
- dataset: list, list of samples
- label: str, used in the title of the output plot
OUTPUT:
- histogram of the distribution of sentences in dataset
- nbr_sents : list, number of sentences per sample in dataset
'''
######################################
# Create empty list
nbr_sents = []
for i in range(len(dataset)):
nbr_sents.append(len(dataset[i]))
if to_plot:
# Plot the sentence distibution
# Barplot and font specifications
barplot_specs = {"color": color_1, "alpha": 0.7, "edgecolor": "grey"}
label_specs = {"fontsize": 12}
title_specs = {"fontsize": 14, "fontweight": "bold", "y": 1.03}
plt.figure(figsize=(8,4))
plt.hist(nbr_sents, bins = 20, **barplot_specs)
plt.xlabel('Nbr of sentences per sample', **label_specs)
plt.ylabel('Nbr of samples',**label_specs)
plt.title('Distribution of sentences in {} set'.format(label),**title_specs)
plt.show()
return np.sum(nbr_sents)
######################################
def plot_token_distribution(dataset, label):
'''
Plots the distribution of tokens in the sentences of a
given dataset.
INPUTS:
- dataset: list, list of samples
- label: str, used in the title of the output plot
OUTPUT:
- histogram of the distribution of tokens in dataset
- nbr_tokens: list, number of tokens per sentence in dataset
'''
######################################
# Create empty list
nbr_tokens = []
# Count tokens in sentences and append to list
for i in range(len(dataset)):
for j in range(len(dataset[i])):
nbr_tokens.append(len(dataset[i][j]))
# Plot the sentence distibution
# Barplot and font specifications
barplot_specs = {"color": "mediumpurple", "alpha": 0.7, "edgecolor": "grey"}
label_specs = {"fontsize": 12}
title_specs = {"fontsize": 14, "fontweight": "bold", "y": 1.03}
plt.figure(figsize=(8,4))
plt.hist(nbr_tokens, bins = 20, **barplot_specs)
plt.xlabel('Nbr of tokens per sentence', **label_specs)
plt.ylabel('Nbr of sentences',**label_specs)
plt.title('Distribution of tokens in {} set'.format(label),**title_specs)
plt.show()
return nbr_tokens
######################################
def target_sample_distribution(labels):
"""
Plots the distribution of samples in target variable (categorical)
Input:
- labels : list, list of target values
"""
######################################
w = pd.value_counts(labels)
# Barplot and font specifications
barplot_specs = {"color": color_2, "alpha": 0.7, "edgecolor": "grey"}
label_specs = {"fontsize": 12}
title_specs = {"fontsize": 14, "fontweight": "bold", "y": 1.02}
plt.figure(figsize=(8,4.5))
sns.barplot(x=w.index,y=w.values, **barplot_specs);
plt.ylabel('Counts',**label_specs);
plt.xticks(rotation=45)
plt.yscale('log')
plt.title('Sample distribution in target variable',**title_specs);
######################################
def plot_loss_accuracy_curves(history):
######################################
title_specs = {"fontsize": 16}
label_specs = {"fontsize": 14}
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
# Plot loss values
ax1.set_title('Validation loss: {:.4f}'.format(history.history['val_loss'][-1]))
ax1.plot(history.history['loss'], color =color_1, label='training set')
ax1.plot(history.history['val_loss'], color =color_2, label='validation set')
ax1.set_xlabel('Epochs',**label_specs)
ax1.set_ylabel('Loss',**label_specs)
ax1.set_ylim([0,None])
ax1.legend()
# plot accuracy values
ax2.set_title('Validation accuracy: {:.2f}%'.format(history.history['val_accuracy'][-1]*100))
ax2.plot(history.history['accuracy'], color =color_1, label='training set')
ax2.plot(history.history['val_accuracy'], color =color_2, label='validation set')
ax2.set_xlabel('Epochs',**label_specs)
ax2.set_ylabel('Accuracy',**label_specs)
ax2.set_ylim([None,1])
ax2.legend()
plt.tight_layout()
######################################
def plot_confusion_matrix(y, y_pred, labels, suptitle):
######################################
# Create two subplots
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
title_specs = {"fontsize": 14, "fontweight": "bold", "y": 1.03}
plt.suptitle(suptitle,**title_specs)
# Plots the standard confusion matrix
ax1.set_title("Confusion Matrix (counts)", y= 1.02)
ConfusionMatrixDisplay.from_predictions(y,
y_pred,
display_labels=labels,
cmap=plt.cm.Blues,
values_format='d',
ax=ax1)
ax1.set_xticklabels(labels = labels, rotation=90)
# Plots the normalized confusion matrix
ax2.set_title("Confusion Matrix (ratios)", y= 1.02)
ConfusionMatrixDisplay.from_predictions(y,
y_pred,
normalize="true",
display_labels=labels,
cmap=plt.cm.Blues,
values_format='.1g',
ax=ax2)
ax2.set_xticklabels(labels = labels, rotation=90)
plt.tight_layout() | pxydi/Named-Entity-Recognition | src/tools.py | tools.py | py | 6,287 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.cm.get_cmap",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.cm.get_cmap",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotli... |
8228541339 | from models.pointnet import PointNetDenseCls
import torch
import torch.nn as nn
import torch.nn.functional as F
import hydra
import os
from datasets import kpnet
import logging
from itertools import combinations
import numpy as np
from tqdm import tqdm
def pdist(vectors):
distance_matrix = -2 * vectors.mm(torch.t(vectors)) + vectors.pow(2).sum(dim=1).view(1, -1) + vectors.pow(2).sum(
dim=1).view(-1, 1)
return distance_matrix
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class PairSelector:
"""
Implementation should return indices of positive pairs and negative pairs that will be passed to compute
Contrastive Loss
return positive_pairs, negative_pairs
"""
def __init__(self):
pass
def get_pairs(self, embeddings, labels):
raise NotImplementedError
# reference: https://github.com/adambielski/siamese-triplet
class HardNegativePairSelector(PairSelector):
"""
Creates all possible positive pairs. For negative pairs, pairs with smallest distance are taken into consideration,
matching the number of positive pairs.
"""
def __init__(self, cpu=True):
super(HardNegativePairSelector, self).__init__()
self.cpu = cpu
def get_pairs(self, embeddings, labels):
if self.cpu:
embeddings = embeddings.cpu()
distance_matrix = pdist(embeddings)
labels = labels.cpu().data.numpy()
all_pairs = np.array(list(combinations(range(len(labels)), 2)))
all_pairs = torch.LongTensor(all_pairs)
positive_pairs = all_pairs[(
labels[all_pairs[:, 0]] == labels[all_pairs[:, 1]]).nonzero()]
negative_pairs = all_pairs[(
labels[all_pairs[:, 0]] != labels[all_pairs[:, 1]]).nonzero()]
negative_distances = distance_matrix[negative_pairs[:,
0], negative_pairs[:, 1]]
negative_distances = negative_distances.cpu().data.numpy()
top_negatives = np.argpartition(negative_distances, len(positive_pairs))[
:len(positive_pairs)]
top_negative_pairs = negative_pairs[torch.LongTensor(top_negatives)]
return positive_pairs, top_negative_pairs
# reference: https://github.com/adambielski/siamese-triplet
class OnlineContrastiveLoss(nn.Module):
"""
Online Contrastive loss
Takes a batch of embeddings and corresponding labels.
Pairs are generated using pair_selector object that take embeddings and targets and return indices of positive
and negative pairs
"""
def __init__(self, margin, pair_selector, mean_distance=None):
super(OnlineContrastiveLoss, self).__init__()
self.margin = margin
self.pair_selector = pair_selector
if mean_distance is not None:
self.mean_distance = mean_distance[0].cuda()
else:
self.mean_distance = None
def forward(self, embeddings, target):
positive_pairs, negative_pairs = self.pair_selector.get_pairs(
embeddings, target)
if embeddings.is_cuda:
positive_pairs = positive_pairs.cuda()
negative_pairs = negative_pairs.cuda()
positive_loss = (embeddings[positive_pairs[:, 0]] -
embeddings[positive_pairs[:, 1]]).pow(2).sum(1)
labels_1 = tuple(target[negative_pairs[:, 0]].tolist())
labels_2 = tuple(target[negative_pairs[:, 1]].tolist())
label_pair = (labels_1, labels_2)
if self.mean_distance is not None:
negative_loss = F.relu(
self.mean_distance[label_pair] - ((embeddings[negative_pairs[:, 0]] - embeddings[negative_pairs[:, 1]]).pow(2).sum(
1) + 1e-6).sqrt()).pow(2)
else:
negative_loss = F.relu(
self.margin - ((embeddings[negative_pairs[:, 0]] - embeddings[negative_pairs[:, 1]]).pow(2).sum(
1) + 1e-6).sqrt()).pow(2)
loss = torch.cat([positive_loss, negative_loss], dim=0)
return loss.mean()
@hydra.main(config_path='config', config_name='config')
def main(cfg):
logger = logging.getLogger(__name__)
train_dataset = kpnet.KeypointDataset(cfg)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.num_workers, drop_last=True)
model = PointNetDenseCls(feature_transform=True, cfg=cfg).cuda()
logger.info('Start training on 3D embeddings')
optimizer = torch.optim.Adam(
model.parameters(),
lr=1e-3
)
criterion = OnlineContrastiveLoss(1., HardNegativePairSelector())
meter = AverageMeter()
for epoch in range(cfg.max_epoch + 1):
train_iter = tqdm(train_dataloader)
# Training
meter.reset()
model.train()
for i, (pc, kp_idxs) in enumerate(train_iter):
pc, kp_idxs = pc.cuda(), kp_idxs.cuda()
outputs = model(pc.transpose(1, 2))
embeddings = []
labels = []
for i in range(cfg.batch_size):
embedding_model = outputs[i]
keypoints = kp_idxs[i]
for idx in range(len(keypoints)):
kp_idx = keypoints[idx]
if kp_idx < 0:
continue
embedding_kp = embedding_model[kp_idx]
embeddings.append(embedding_kp)
labels.append(idx)
embeddings = torch.stack(embeddings)
labels = torch.tensor(labels).cuda()
loss = criterion(embeddings, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_iter.set_postfix(loss=loss.item())
meter.update(loss.item())
logger.info(
f'Epoch: {epoch}, Average Train loss: {meter.avg}'
)
torch.save(model.state_dict(), f'epoch{epoch}.pth')
if __name__ == '__main__':
main()
| qq456cvb/SemanticTransfer | train_emb.py | train_emb.py | py | 6,581 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "torch.t",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"li... |
27433129284 | import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits import mplot3d
import math
class Interpolator:
def __init__(self):
DEFAULT_STEP = 1 # 10 ** 5
self.c = 0.9 # Smoothing factor
self.e = 0.1 # sys.float_info.epsilon # Really small number
self.actions = []
self.qualities = []
self.knots_count = 0
self.step = DEFAULT_STEP
def distance(self, chosen_action, i, q_max):
return np.linalg.norm(np.subtract(chosen_action, self.actions[i])) ** 2 + self.c * (
q_max - self.qualities[i]) + self.e
def wsum(self, chosen_action):
output = 0
q_max = max(self.qualities)
for i in range(self.knots_count):
output += self.qualities[i] / self.distance(chosen_action, i, q_max)
return output
def norm(self, chosen_action):
output = 0
q_max = max(self.qualities)
for i in range(self.knots_count):
output += 1 / self.distance(chosen_action, i, q_max)
return output
def get_quality(self, action):
value = self.wsum(action) / self.norm(action)
if math.isnan(value):
return 0
else:
return value
def update_function_2(self, action, quality, update_action=True):
q = np.array(self.qualities)
knot_count = len(q)
optimal_action = action
action = np.array(self.actions)
Q_new = quality
num = 0
den = 0
deriv_q = []
deriv_u0 = []
deriv_u1 = []
for it in range(0, knot_count):
weight = np.linalg.norm(optimal_action - action[it]) + self.c * (q.max() - q[it] + self.e)
den = den + (1.0 / weight)
num = num + (q[it] / weight)
deriv_q.append((den * (weight + q[it] * self.c) - num * self.c) / pow((weight * den), 2))
deriv_u0.append(((num - den * q[it]) * 2 * (action[it][0] - optimal_action[0])) / (pow(weight * den, 2)))
deriv_u1.append(((num - den * q[it]) * 2 * (action[it][1] - optimal_action[1])) / (pow(weight * den, 2)))
Q_dash = num / den
error = Q_new - Q_dash
for it in range(0, knot_count):
q[it] = q[it] + error * deriv_q[it]
action[it][0] = action[it][0] + error * deriv_u0[it]
action[it][1] = action[it][1] + error * deriv_u1[it]
if update_action:
self.actions = action
self.qualities = q
def update_function(self, action, quality, update_action=False):
knot_count = len(self.qualities)
# print("qualities:", self.qualities)
if type(self.qualities) == np.ndarray:
self.qualities = self.qualities.tolist()
if type(self.qualities[0]) == list:
self.qualities = [e[0] for e in self.qualities]
max_list = self.qualities + [float(quality)]
q_max = max(max_list)
for it in range(0, knot_count):
self.qualities[it] += self.e * \
(quality - self.qualities[it]) \
/ self.distance(action, it, q_max) ** 2
def set_u(self, actions):
self.actions = actions
self.knots_count = len(self.actions)
def set_q(self, qualities):
self.qualities = qualities
def set_step(self, step):
self.step = step
def get_u(self):
return self.actions
def get_q(self):
return self.qualities
if __name__ == "__main__":
from output_visualizer import OutputVisualizer
import cv2
u = []
interpolator = Interpolator()
for i in np.arange(-1, 1.1, 0.5):
for j in np.arange(-1, 1.1, 0.5):
u.append(np.array([i, j]))
q = [0.04448929, 0.5086165, 0.76275706, -0.2851543, 0.39455223,
-0.19585085, -0.52812827, 0.25080782, 0.4987614, 0.26595366,
-0.3598364, 0.41622806, 0.10484912, -0.11532316, -0.11455766,
-0.14297369, -0.04747943, 0.19820265, 0.5723205, 0.13500524,
-0.24156858, 0.15854892, 0.22840545, 0.35542938, -0.5061423]
visualizer = OutputVisualizer()
visualizer.render(np.append(u, [[e] for e in q], axis=1))
cv2.waitKey(3000)
interpolator.set_q(q)
interpolator.set_u(u)
# for _ in range(5):
# interpolator.update_function_2(np.array([0, 0]), 2) # , update_action=False)
# interpolator.update_function(np.array([-1, 0]), 2)#, update_action=False)
interpolator.update_function(np.array([-0.5, 1.0]), -0.6402964293956757) # , update_action=False)
q = interpolator.get_q()
u = interpolator.get_u()
visualizer.render(np.append(u, [[e] for e in q], axis=1))
cv2.waitKey(3000)
# print(interpolator.get_quality(np.array([0.75, 0])))
'''
fig = plt.figure()
ax = plt.axes() # projection="3d")
X = []
Y = []
Z = []
for throttle in np.arange(-1, 1.1, 0.1):
for steering in np.arange(-1, 1.1, 0.1):
X.append(throttle)
Y.append(steering)
Z.append(interpolator.get_quality(np.array([throttle, steering])))
'''
# ax.plot_trisurf(np.array(X), np.array(Y), np.array(Z), cmap=cm.bwr)
# throttles = [a[0] for a in u]
# steerings = [a[1] for a in u]
# ax.plot_trisurf(np.array(throttles), np.array(steerings), np.array(q))
# interpolator.update_function(np.array([1, 0]), 20)
# interpolator.update_function(np.array([1, 0]), 20)
'''
X = []
Y = []
Z = []
for throttle in np.arange(-1, 1.1, 0.1):
for steering in np.arange(-1, 1.1, 0.1):
X.append(throttle)
Y.append(steering)
Z.append(interpolator.get_quality(np.array([throttle, steering])))
ax.plot_trisurf(np.array(X), np.array(Y), np.array(Z), cmap=cm.bwr)
'''
'''
u = interpolator.get_u()
q = np.reshape(interpolator.get_q(), (-1, 5))
throttles = np.reshape([a[0] for a in u], (-1, 5))
steerings = np.reshape([a[1] for a in u], (-1, 5))
ax.contourf(np.array(throttles), np.array(steerings), np.array(q), cmap=cm.bwr)
plt.show()
'''
| TimoLoomets/FSTT_dynamics | interpolator.py | interpolator.py | py | 6,172 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linalg.norm",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.subtract",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"... |
25107333860 | from Actors.Actor import Actor
from Actors.Maze import Maze
from Actors.Direction import Direction
from Util.Timer import Timer
import pyrr
import math
import numpy
class Pacman(Actor):
# assume position is 2d vector (x cell, z cell)
def __init__(self, position : list, direction : Direction, speed : float, id : str):
super().__init__(position, direction, speed, id)
self.animation = self.__PacmanAnimation()
self.updateTimer = Timer()
self.updateTimer.restart()
self.isMoving = False
def update(self, maze : Maze):
deltaTime = self.updateTimer.getElapsedTime()
distance = deltaTime * self.speed
self.updateTimer.restart()
# if self.turnSignal == None:
# pass
if self.canApplyTurnSignal(maze):
self.applyTurnSignal()
currentCellPos = Maze.worldCoordsToCellCoords(self.position)
if not maze.isVacantSpotInSpecifiedDirection(currentCellPos, self.currectDirection) and maze.isTheMiddleOfTheCell(self.position, self.currectDirection):
self.isMoving = False
return
self.position += self.frontVector * pyrr.Vector3([distance, distance, distance])
self.isMoving = True
def notify(self, message : str):
tokens = message.split('/')
if tokens[0] == "turn":
direction = tokens[1]
if direction[0] == 'a':
directionValue = Direction.LEFT
elif direction[0] == 'w':
directionValue = Direction.UP
elif direction[0] == 'd':
directionValue = Direction.RIGHT
elif direction[0] == 's':
directionValue = Direction.DOWN
self.setTurnSignal(directionValue)
def getLowerThenUpperJawRotations(self):
return self.animation.getLowerThenUpperJawRotations()
class __PacmanAnimation:
def __init__(self):
self.animationPeriod = 300
self.amplitude = 60.0
self.openMouse = True
self.currentRotationLowerJaw = 0.0
self.currentRotationUpperJaw = 0.0
self.timer = Timer()
self.timer.restart()
def getLowerThenUpperJawRotations(self):
deltaTime = self.timer.getElapsedTime()
delta_x_degrees = (deltaTime * self.amplitude) / self.animationPeriod
if self.currentRotationLowerJaw < 10.0:
self.openMouse = True
elif self.currentRotationLowerJaw > self.amplitude:
self.openMouse = False
if self.openMouse == True:
self.currentRotationLowerJaw += delta_x_degrees
else:
self.currentRotationLowerJaw -= delta_x_degrees
self.currentRotationUpperJaw = -self.currentRotationLowerJaw
self.timer.restart()
return self.currentRotationLowerJaw, self.currentRotationUpperJaw
| VolodymyrVakhniuk/Pacman | src/Actors/Pacman.py | Pacman.py | py | 3,050 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "Actors.Actor.Actor",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "Actors.Direction.Direction",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "Util.Timer.Timer",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "Actors.... |
26388489404 | import os
import pickle
import types
import shutil
import time
import multiprocessing as mtp
packageSample={"mode":"join", "from":"id123", "to":["id234", "id789"], "time":"20181012122123","type":"unknown", "tag":["dog", "white"], "dataSet":"image object"}
class cellComm(object):
def __init__(self):
self.package="package.pkl"
self.pklTemp="temp.pkl"
self.commScript="comm.py"
self.cellScript="cell.py"
self.database="cell.db"
self.cwdir=os.path.dirname(__file__)
self.pwdir=os.path.dirname(self.cwdir)
#check if any file received to awake cell
def listen(self):
if os.path.exists(self.package):
pfile=open(self.package, "rb")
data=pickle.load(pfile)
pfile.close()
if type(data)==dict:
self.awakeCell()
os.remove(self.package)
#return data passed on
def recv(self):
if os.path.exists(self.package):
pfile=open(self.package, "rb")
self.data=pickle.load(pfile)
pfile.close()
if type(self.data)==dict:
return self.data
else:
os.remove(self.package)
return False
#send() function accepts packaged dictionary object
def send(self, packageObject):
self.packageObject=packageObject
if type(self.packageObject)==dict:
temp=open(self.pklTemp, "wb")
pickle.dump(self.packageObject, temp)
temp.close()
for item in self.packageObject["to"]:
shutil.copy(self.pklTemp, (item+r"/"+self.package))
self.awakeComm(item+r"/"+self.commScript)
os.remove(self.pklTemp)
#awake "comm.py" of target cell to activate communication
def awakeComm(self, desObject):
p=mtp.Process(target=execfile(desObject))
p.daemon=False
p.start()
#awake "cell.py" of target cell to activate it's body
def awakeCell(self):
p=mtp.Process(target=execfile(self.cellScript))
p.daemon=False
p.start()
def run(self):
self.listen()
| babyproject/scripts | learn_python_commA.py | learn_python_commA.py | py | 2,162 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
33040434381 | import asyncio
import dataclasses
import time
from secrets import token_bytes
from typing import Callable, Dict, List, Optional, Tuple, Set
from blspy import AugSchemeMPL, G2Element
from chiabip158 import PyBIP158
import chia.server.ws_connection as ws
from chia.consensus.block_creation import create_unfinished_block
from chia.consensus.block_record import BlockRecord
from chia.consensus.pot_iterations import calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters
from chia.full_node.bundle_tools import best_solution_generator_from_template, simple_solution_generator
from chia.full_node.full_node import FullNode
from chia.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin
from chia.full_node.signage_point import SignagePoint
from chia.protocols import farmer_protocol, full_node_protocol, introducer_protocol, timelord_protocol, wallet_protocol
from chia.protocols.full_node_protocol import RejectBlock, RejectBlocks
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.wallet_protocol import PuzzleSolutionResponse, RejectHeaderBlocks, RejectHeaderRequest
from chia.server.outbound_message import Message, make_msg
from chia.types.blockchain_format.coin import Coin, hash_coin_list
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_record import CoinRecord
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.mempool_item import MempoolItem
from chia.types.peer_info import PeerInfo
from chia.types.unfinished_block import UnfinishedBlock
from chia.util.api_decorators import api_request, peer_required, bytes_required, execute_task
from chia.util.generator_tools import get_block_header
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint32, uint64, uint128
from chia.util.merkle_set import MerkleSet
class FullNodeAPI:
full_node: FullNode
def __init__(self, full_node) -> None:
self.full_node = full_node
def _set_state_changed_callback(self, callback: Callable):
self.full_node.state_changed_callback = callback
@property
def server(self):
return self.full_node.server
@property
def log(self):
return self.full_node.log
@property
def api_ready(self):
return self.full_node.initialized
@peer_required
@api_request
async def request_peers(self, _request: full_node_protocol.RequestPeers, peer: ws.WSChiaConnection):
if peer.peer_server_port is None:
return None
peer_info = PeerInfo(peer.peer_host, peer.peer_server_port)
if self.full_node.full_node_peers is not None:
msg = await self.full_node.full_node_peers.request_peers(peer_info)
return msg
@peer_required
@api_request
async def respond_peers(
self, request: full_node_protocol.RespondPeers, peer: ws.WSChiaConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), True)
return None
@peer_required
@api_request
async def respond_peers_introducer(
self, request: introducer_protocol.RespondPeersIntroducer, peer: ws.WSChiaConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers from introducer")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), False)
await peer.close()
return None
@execute_task
@peer_required
@api_request
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSChiaConnection) -> Optional[Message]:
"""
A peer notifies us that they have added a new peak to their blockchain. If we don't have it,
we can ask for it.
"""
# this semaphore limits the number of tasks that can call new_peak() at
# the same time, since it can be expensive
async with self.full_node.new_peak_sem:
return await self.full_node.new_peak(request, peer)
@peer_required
@api_request
async def new_transaction(
self, transaction: full_node_protocol.NewTransaction, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
A peer notifies us of a new transaction.
Requests a full transaction if we haven't seen it previously, and if the fees are enough.
"""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if not (await self.full_node.synced()):
return None
if int(time.time()) <= self.full_node.constants.INITIAL_FREEZE_END_TIMESTAMP:
return None
# Ignore if already seen
if self.full_node.mempool_manager.seen(transaction.transaction_id):
return None
if self.full_node.mempool_manager.is_fee_enough(transaction.fees, transaction.cost):
# If there's current pending request just add this peer to the set of peers that have this tx
if transaction.transaction_id in self.full_node.full_node_store.pending_tx_request:
if transaction.transaction_id in self.full_node.full_node_store.peers_with_tx:
current_set = self.full_node.full_node_store.peers_with_tx[transaction.transaction_id]
if peer.peer_node_id in current_set:
return None
current_set.add(peer.peer_node_id)
return None
else:
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
return None
self.full_node.full_node_store.pending_tx_request[transaction.transaction_id] = peer.peer_node_id
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
async def tx_request_and_timeout(full_node: FullNode, transaction_id, task_id):
counter = 0
try:
while True:
# Limit to asking 10 peers, it's possible that this tx got included on chain already
# Highly unlikely 10 peers that advertised a tx don't respond to a request
if counter == 10:
break
if transaction_id not in full_node.full_node_store.peers_with_tx:
break
peers_with_tx: Set = full_node.full_node_store.peers_with_tx[transaction_id]
if len(peers_with_tx) == 0:
break
peer_id = peers_with_tx.pop()
assert full_node.server is not None
if peer_id not in full_node.server.all_connections:
continue
peer = full_node.server.all_connections[peer_id]
request_tx = full_node_protocol.RequestTransaction(transaction.transaction_id)
msg = make_msg(ProtocolMessageTypes.request_transaction, request_tx)
await peer.send_message(msg)
await asyncio.sleep(5)
counter += 1
if full_node.mempool_manager.seen(transaction_id):
break
except asyncio.CancelledError:
pass
finally:
# Always Cleanup
if transaction_id in full_node.full_node_store.peers_with_tx:
full_node.full_node_store.peers_with_tx.pop(transaction_id)
if transaction_id in full_node.full_node_store.pending_tx_request:
full_node.full_node_store.pending_tx_request.pop(transaction_id)
if task_id in full_node.full_node_store.tx_fetch_tasks:
full_node.full_node_store.tx_fetch_tasks.pop(task_id)
task_id = token_bytes()
fetch_task = asyncio.create_task(
tx_request_and_timeout(self.full_node, transaction.transaction_id, task_id)
)
self.full_node.full_node_store.tx_fetch_tasks[task_id] = fetch_task
return None
return None
@api_request
async def request_transaction(self, request: full_node_protocol.RequestTransaction) -> Optional[Message]:
"""Peer has requested a full transaction from us."""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
spend_bundle = self.full_node.mempool_manager.get_spendbundle(request.transaction_id)
if spend_bundle is None:
return None
transaction = full_node_protocol.RespondTransaction(spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
return msg
@peer_required
@api_request
@bytes_required
async def respond_transaction(
self,
tx: full_node_protocol.RespondTransaction,
peer: ws.WSChiaConnection,
tx_bytes: bytes = b"",
test: bool = False,
) -> Optional[Message]:
"""
Receives a full transaction from peer.
If tx is added to mempool, send tx_id to others. (new_transaction)
"""
assert tx_bytes != b""
spend_name = std_hash(tx_bytes)
if spend_name in self.full_node.full_node_store.pending_tx_request:
self.full_node.full_node_store.pending_tx_request.pop(spend_name)
if spend_name in self.full_node.full_node_store.peers_with_tx:
self.full_node.full_node_store.peers_with_tx.pop(spend_name)
await self.full_node.respond_transaction(tx.transaction, spend_name, peer, test)
return None
@api_request
async def request_proof_of_weight(self, request: full_node_protocol.RequestProofOfWeight) -> Optional[Message]:
if self.full_node.weight_proof_handler is None:
return None
if not self.full_node.blockchain.contains_block(request.tip):
self.log.error(f"got weight proof request for unknown peak {request.tip}")
return None
if request.tip in self.full_node.pow_creation:
event = self.full_node.pow_creation[request.tip]
await event.wait()
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
else:
event = asyncio.Event()
self.full_node.pow_creation[request.tip] = event
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
event.set()
tips = list(self.full_node.pow_creation.keys())
if len(tips) > 4:
# Remove old from cache
for i in range(0, 4):
self.full_node.pow_creation.pop(tips[i])
if wp is None:
self.log.error(f"failed creating weight proof for peak {request.tip}")
return None
# Serialization of wp is slow
if (
self.full_node.full_node_store.serialized_wp_message_tip is not None
and self.full_node.full_node_store.serialized_wp_message_tip == request.tip
):
return self.full_node.full_node_store.serialized_wp_message
message = make_msg(
ProtocolMessageTypes.respond_proof_of_weight, full_node_protocol.RespondProofOfWeight(wp, request.tip)
)
self.full_node.full_node_store.serialized_wp_message_tip = request.tip
self.full_node.full_node_store.serialized_wp_message = message
return message
@api_request
async def respond_proof_of_weight(self, request: full_node_protocol.RespondProofOfWeight) -> Optional[Message]:
self.log.warning("Received proof of weight too late.")
return None
@api_request
async def request_block(self, request: full_node_protocol.RequestBlock) -> Optional[Message]:
if not self.full_node.blockchain.contains_height(request.height):
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
header_hash = self.full_node.blockchain.height_to_hash(request.height)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
if not request.include_transaction_block and block.transactions_generator is not None:
block = dataclasses.replace(block, transactions_generator=None)
return make_msg(ProtocolMessageTypes.respond_block, full_node_protocol.RespondBlock(block))
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
@api_request
async def request_blocks(self, request: full_node_protocol.RequestBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
reject = RejectBlocks(request.start_height, request.end_height)
msg: Message = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
if not request.include_transaction_block:
blocks: List[FullBlock] = []
for i in range(request.start_height, request.end_height + 1):
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(
self.full_node.blockchain.height_to_hash(uint32(i))
)
if block is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
block = dataclasses.replace(block, transactions_generator=None)
blocks.append(block)
msg = make_msg(
ProtocolMessageTypes.respond_blocks,
full_node_protocol.RespondBlocks(request.start_height, request.end_height, blocks),
)
else:
blocks_bytes: List[bytes] = []
for i in range(request.start_height, request.end_height + 1):
block_bytes: Optional[bytes] = await self.full_node.block_store.get_full_block_bytes(
self.full_node.blockchain.height_to_hash(uint32(i))
)
if block_bytes is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
blocks_bytes.append(block_bytes)
respond_blocks_manually_streamed: bytes = (
bytes(uint32(request.start_height))
+ bytes(uint32(request.end_height))
+ len(blocks_bytes).to_bytes(4, "big", signed=False)
)
for block_bytes in blocks_bytes:
respond_blocks_manually_streamed += block_bytes
msg = make_msg(ProtocolMessageTypes.respond_blocks, respond_blocks_manually_streamed)
return msg
@api_request
async def reject_block(self, request: full_node_protocol.RejectBlock):
self.log.debug(f"reject_block {request.height}")
@api_request
async def reject_blocks(self, request: full_node_protocol.RejectBlocks):
self.log.debug(f"reject_blocks {request.start_height} {request.end_height}")
@api_request
async def respond_blocks(self, request: full_node_protocol.RespondBlocks) -> None:
self.log.warning("Received unsolicited/late blocks")
return None
@api_request
@peer_required
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
"""
Receive a full block from a peer full node (or ourselves).
"""
self.log.warning(f"Received unsolicited/late block from peer {peer.get_peer_info()}")
return None
@api_request
async def new_unfinished_block(
self, new_unfinished_block: full_node_protocol.NewUnfinishedBlock
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
block_hash = new_unfinished_block.unfinished_reward_hash
if self.full_node.full_node_store.get_unfinished_block(block_hash) is not None:
return None
# This prevents us from downloading the same block from many peers
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
return None
msg = make_msg(
ProtocolMessageTypes.request_unfinished_block,
full_node_protocol.RequestUnfinishedBlock(block_hash),
)
self.full_node.full_node_store.requesting_unfinished_blocks.add(block_hash)
# However, we want to eventually download from other peers, if this peer does not respond
# Todo: keep track of who it was
async def eventually_clear():
await asyncio.sleep(5)
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
self.full_node.full_node_store.requesting_unfinished_blocks.remove(block_hash)
asyncio.create_task(eventually_clear())
return msg
@api_request
async def request_unfinished_block(
self, request_unfinished_block: full_node_protocol.RequestUnfinishedBlock
) -> Optional[Message]:
unfinished_block: Optional[UnfinishedBlock] = self.full_node.full_node_store.get_unfinished_block(
request_unfinished_block.unfinished_reward_hash
)
if unfinished_block is not None:
msg = make_msg(
ProtocolMessageTypes.respond_unfinished_block,
full_node_protocol.RespondUnfinishedBlock(unfinished_block),
)
return msg
return None
@peer_required
@api_request
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_unfinished_block(respond_unfinished_block, peer)
return None
@api_request
@peer_required
async def new_signage_point_or_end_of_sub_slot(
self, new_sp: full_node_protocol.NewSignagePointOrEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_signage_point_by_index(
new_sp.challenge_hash,
new_sp.index_from_challenge,
new_sp.last_rc_infusion,
)
is not None
):
return None
if self.full_node.full_node_store.have_newer_signage_point(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
):
return None
if new_sp.index_from_challenge == 0 and new_sp.prev_challenge_hash is not None:
if self.full_node.full_node_store.get_sub_slot(new_sp.prev_challenge_hash) is None:
collected_eos = []
challenge_hash_to_request = new_sp.challenge_hash
last_rc = new_sp.last_rc_infusion
num_non_empty_sub_slots_seen = 0
for _ in range(30):
if num_non_empty_sub_slots_seen >= 3:
self.log.debug("Diverged from peer. Don't have the same blocks")
return None
# If this is an end of sub slot, and we don't have the prev, request the prev instead
# We want to catch up to the latest slot so we can receive signage points
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
challenge_hash_to_request, uint8(0), last_rc
)
response = await peer.request_signage_point_or_end_of_sub_slot(full_node_request, timeout=10)
if not isinstance(response, full_node_protocol.RespondEndOfSubSlot):
self.full_node.log.debug(f"Invalid response for slot {response}")
return None
collected_eos.append(response)
if (
self.full_node.full_node_store.get_sub_slot(
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
is not None
or response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
== self.full_node.constants.GENESIS_CHALLENGE
):
for eos in reversed(collected_eos):
await self.respond_end_of_sub_slot(eos, peer)
return None
if (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.number_of_iterations
!= response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.number_of_iterations
):
num_non_empty_sub_slots_seen += 1
challenge_hash_to_request = (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
last_rc = response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge
self.full_node.log.warning("Failed to catch up in sub-slots")
return None
if new_sp.index_from_challenge > 0:
if (
new_sp.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE
and self.full_node.full_node_store.get_sub_slot(new_sp.challenge_hash) is None
):
# If this is a normal signage point,, and we don't have the end of sub slot, request the end of sub slot
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, uint8(0), new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
# Otherwise (we have the prev or the end of sub slot), request it normally
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
@api_request
async def request_signage_point_or_end_of_sub_slot(
self, request: full_node_protocol.RequestSignagePointOrEndOfSubSlot
) -> Optional[Message]:
if request.index_from_challenge == 0:
sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = self.full_node.full_node_store.get_sub_slot(
request.challenge_hash
)
if sub_slot is not None:
return make_msg(
ProtocolMessageTypes.respond_end_of_sub_slot,
full_node_protocol.RespondEndOfSubSlot(sub_slot[0]),
)
else:
if self.full_node.full_node_store.get_sub_slot(request.challenge_hash) is None:
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
self.log.info(f"Don't have challenge hash {request.challenge_hash}")
sp: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point_by_index(
request.challenge_hash,
request.index_from_challenge,
request.last_rc_infusion,
)
if sp is not None:
assert (
sp.cc_vdf is not None
and sp.cc_proof is not None
and sp.rc_vdf is not None
and sp.rc_proof is not None
)
full_node_response = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
sp.cc_vdf,
sp.cc_proof,
sp.rc_vdf,
sp.rc_proof,
)
return make_msg(ProtocolMessageTypes.respond_signage_point, full_node_response)
else:
self.log.info(f"Don't have signage point {request}")
return None
@peer_required
@api_request
async def respond_signage_point(
self, request: full_node_protocol.RespondSignagePoint, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
# Already have signage point
if self.full_node.full_node_store.have_newer_signage_point(
request.challenge_chain_vdf.challenge,
request.index_from_challenge,
request.reward_chain_vdf.challenge,
):
return None
existing_sp = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_vdf.output.get_hash()
)
if existing_sp is not None and existing_sp.rc_vdf == request.reward_chain_vdf:
return None
peak = self.full_node.blockchain.get_peak()
if peak is not None and peak.height > self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
next_sub_slot_iters = self.full_node.blockchain.get_next_slot_iters(peak.header_hash, True)
sub_slots_for_peak = await self.full_node.blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
assert sub_slots_for_peak is not None
ip_sub_slot: Optional[EndOfSubSlotBundle] = sub_slots_for_peak[1]
else:
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
next_sub_slot_iters = sub_slot_iters
ip_sub_slot = None
added = self.full_node.full_node_store.new_signage_point(
request.index_from_challenge,
self.full_node.blockchain,
self.full_node.blockchain.get_peak(),
next_sub_slot_iters,
SignagePoint(
request.challenge_chain_vdf,
request.challenge_chain_proof,
request.reward_chain_vdf,
request.reward_chain_proof,
),
)
if added:
await self.full_node.signage_point_post_processing(request, peer, ip_sub_slot)
else:
self.log.debug(
f"Signage point {request.index_from_challenge} not added, CC challenge: "
f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}"
)
return None
@peer_required
@api_request
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
msg, _ = await self.full_node.respond_end_of_sub_slot(request, peer)
return msg
@peer_required
@api_request
async def request_mempool_transactions(
self,
request: full_node_protocol.RequestMempoolTransactions,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
received_filter = PyBIP158(bytearray(request.filter))
items: List[MempoolItem] = await self.full_node.mempool_manager.get_items_not_in_filter(received_filter)
for item in items:
transaction = full_node_protocol.RespondTransaction(item.spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
await peer.send_message(msg)
return None
# FARMER PROTOCOL
@api_request
@peer_required
async def declare_proof_of_space(
self, request: farmer_protocol.DeclareProofOfSpace, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
Creates a block body and header, with the proof of space, coinbase, and fee targets provided
by the farmer, and sends the hash of the header data back to the farmer.
"""
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
sp_vdfs: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_sp
)
if sp_vdfs is None:
self.log.warning(f"Received proof of space for an unknown signage point {request.challenge_chain_sp}")
return None
if request.signage_point_index > 0:
assert sp_vdfs.rc_vdf is not None
if sp_vdfs.rc_vdf.output.get_hash() != request.reward_chain_sp:
self.log.debug(
f"Received proof of space for a potentially old signage point {request.challenge_chain_sp}. "
f"Current sp: {sp_vdfs.rc_vdf.output.get_hash()}"
)
return None
if request.signage_point_index == 0:
cc_challenge_hash: bytes32 = request.challenge_chain_sp
else:
assert sp_vdfs.cc_vdf is not None
cc_challenge_hash = sp_vdfs.cc_vdf.challenge
pos_sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = None
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
# Checks that the proof of space is a response to a recent challenge and valid SP
pos_sub_slot = self.full_node.full_node_store.get_sub_slot(cc_challenge_hash)
if pos_sub_slot is None:
self.log.warning(f"Received proof of space for an unknown sub slot: {request}")
return None
total_iters_pos_slot: uint128 = pos_sub_slot[2]
else:
total_iters_pos_slot = uint128(0)
assert cc_challenge_hash == request.challenge_hash
# Now we know that the proof of space has a signage point either:
# 1. In the previous sub-slot of the peak (overflow)
# 2. In the same sub-slot as the peak
# 3. In a future sub-slot that we already know of
# Checks that the proof of space is valid
quality_string: Optional[bytes32] = request.proof_of_space.verify_and_get_quality_string(
self.full_node.constants, cc_challenge_hash, request.challenge_chain_sp
)
assert quality_string is not None and len(quality_string) == 32
# Grab best transactions from Mempool for given tip target
aggregate_signature: G2Element = G2Element()
block_generator: Optional[BlockGenerator] = None
additions: Optional[List[Coin]] = []
removals: Optional[List[Coin]] = []
async with self.full_node.blockchain.lock:
peak: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
if peak is not None:
# Finds the last transaction block before this one
curr_l_tb: BlockRecord = peak
while not curr_l_tb.is_transaction_block:
curr_l_tb = self.full_node.blockchain.block_record(curr_l_tb.prev_hash)
try:
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(
curr_l_tb.header_hash
)
except Exception as e:
self.full_node.log.error(f"Error making spend bundle {e} peak: {peak}")
mempool_bundle = None
if mempool_bundle is not None:
spend_bundle = mempool_bundle[0]
additions = mempool_bundle[1]
removals = mempool_bundle[2]
self.full_node.log.info(f"Add rem: {len(additions)} {len(removals)}")
aggregate_signature = spend_bundle.aggregated_signature
if self.full_node.full_node_store.previous_generator is not None:
self.log.info(
f"Using previous generator for height "
f"{self.full_node.full_node_store.previous_generator}"
)
block_generator = best_solution_generator_from_template(
self.full_node.full_node_store.previous_generator, spend_bundle
)
else:
block_generator = simple_solution_generator(spend_bundle)
def get_plot_sig(to_sign, _) -> G2Element:
if to_sign == request.challenge_chain_sp:
return request.challenge_chain_sp_signature
elif to_sign == request.reward_chain_sp:
return request.reward_chain_sp_signature
return G2Element()
def get_pool_sig(_1, _2) -> Optional[G2Element]:
return request.pool_signature
prev_b: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
# Finds the previous block from the signage point, ensuring that the reward chain VDF is correct
if prev_b is not None:
if request.signage_point_index == 0:
if pos_sub_slot is None:
self.log.warning("Pos sub slot is None")
return None
rc_challenge = pos_sub_slot[0].reward_chain.end_of_slot_vdf.challenge
else:
assert sp_vdfs.rc_vdf is not None
rc_challenge = sp_vdfs.rc_vdf.challenge
# Backtrack through empty sub-slots
for eos, _, _ in reversed(self.full_node.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == rc_challenge:
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
found = False
attempts = 0
while prev_b is not None and attempts < 10:
if prev_b.reward_infusion_new_challenge == rc_challenge:
found = True
break
if prev_b.finished_reward_slot_hashes is not None and len(prev_b.finished_reward_slot_hashes) > 0:
if prev_b.finished_reward_slot_hashes[-1] == rc_challenge:
# This block includes a sub-slot which is where our SP vdf starts. Go back one more
# to find the prev block
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
found = True
break
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
attempts += 1
if not found:
self.log.warning("Did not find a previous block with the correct reward chain hash")
return None
try:
finished_sub_slots: Optional[
List[EndOfSubSlotBundle]
] = self.full_node.full_node_store.get_finished_sub_slots(
self.full_node.blockchain, prev_b, cc_challenge_hash
)
if finished_sub_slots is None:
return None
if (
len(finished_sub_slots) > 0
and pos_sub_slot is not None
and finished_sub_slots[-1] != pos_sub_slot[0]
):
self.log.error("Have different sub-slots than is required to farm this block")
return None
except ValueError as e:
self.log.warning(f"Value Error: {e}")
return None
if prev_b is None:
pool_target = PoolTarget(
self.full_node.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH,
uint32(0),
)
farmer_ph = self.full_node.constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH
else:
farmer_ph = request.farmer_puzzle_hash
if request.proof_of_space.pool_contract_puzzle_hash is not None and request.pool_target is None:
pool_target = PoolTarget(request.proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
assert request.pool_target is not None
pool_target = request.pool_target
if peak is None or peak.height <= self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
difficulty = self.full_node.constants.DIFFICULTY_STARTING
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
else:
difficulty = uint64(peak.weight - self.full_node.blockchain.block_record(peak.prev_hash).weight)
sub_slot_iters = peak.sub_slot_iters
for sub_slot in finished_sub_slots:
if sub_slot.challenge_chain.new_difficulty is not None:
difficulty = sub_slot.challenge_chain.new_difficulty
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
sub_slot_iters = sub_slot.challenge_chain.new_sub_slot_iters
required_iters: uint64 = calculate_iterations_quality(
self.full_node.constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
request.proof_of_space.size,
difficulty,
request.challenge_chain_sp,
)
sp_iters: uint64 = calculate_sp_iters(self.full_node.constants, sub_slot_iters, request.signage_point_index)
ip_iters: uint64 = calculate_ip_iters(
self.full_node.constants,
sub_slot_iters,
request.signage_point_index,
required_iters,
)
# The block's timestamp must be greater than the previous transaction block's timestamp
timestamp = uint64(int(time.time()))
curr: Optional[BlockRecord] = prev_b
while curr is not None and not curr.is_transaction_block and curr.height != 0:
curr = self.full_node.blockchain.try_block_record(curr.prev_hash)
if curr is not None:
assert curr.timestamp is not None
if timestamp <= curr.timestamp:
timestamp = uint64(int(curr.timestamp + 1))
self.log.info("Starting to make the unfinished block")
unfinished_block: UnfinishedBlock = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
block_generator,
aggregate_signature,
additions,
removals,
prev_b,
finished_sub_slots,
)
self.log.info("Made the unfinished block")
if prev_b is not None:
height: uint32 = uint32(prev_b.height + 1)
else:
height = uint32(0)
self.full_node.full_node_store.add_candidate_block(quality_string, height, unfinished_block)
foliage_sb_data_hash = unfinished_block.foliage.foliage_block_data.get_hash()
if unfinished_block.is_transaction_block():
foliage_transaction_block_hash = unfinished_block.foliage.foliage_transaction_block_hash
else:
foliage_transaction_block_hash = bytes([0] * 32)
message = farmer_protocol.RequestSignedValues(
quality_string,
foliage_sb_data_hash,
foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
# Adds backup in case the first one fails
if unfinished_block.is_transaction_block() and unfinished_block.transactions_generator is not None:
unfinished_block_backup = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
None,
G2Element(),
None,
None,
prev_b,
finished_sub_slots,
)
self.full_node.full_node_store.add_candidate_block(
quality_string, height, unfinished_block_backup, backup=True
)
return None
@api_request
@peer_required
async def signed_values(
self, farmer_request: farmer_protocol.SignedValues, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
Signature of header hash, by the harvester. This is enough to create an unfinished
block, which only needs a Proof of Time to be finished. If the signature is valid,
we call the unfinished_block routine.
"""
candidate_tuple: Optional[Tuple[uint32, UnfinishedBlock]] = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string
)
if candidate_tuple is None:
self.log.warning(f"Quality string {farmer_request.quality_string} not found in database")
return None
height, candidate = candidate_tuple
if not AugSchemeMPL.verify(
candidate.reward_chain_block.proof_of_space.plot_public_key,
candidate.foliage.foliage_block_data.get_hash(),
farmer_request.foliage_block_data_signature,
):
self.log.warning("Signature not valid. There might be a collision in plots. Ignore this during tests.")
return None
fsb2 = dataclasses.replace(
candidate.foliage,
foliage_block_data_signature=farmer_request.foliage_block_data_signature,
)
if candidate.is_transaction_block():
fsb2 = dataclasses.replace(
fsb2, foliage_transaction_block_signature=farmer_request.foliage_transaction_block_signature
)
new_candidate = dataclasses.replace(candidate, foliage=fsb2)
if not self.full_node.has_valid_pool_sig(new_candidate):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
# Propagate to ourselves (which validates and does further propagations)
request = full_node_protocol.RespondUnfinishedBlock(new_candidate)
try:
await self.full_node.respond_unfinished_block(request, None, True)
except Exception as e:
# If we have an error with this block, try making an empty block
self.full_node.log.error(f"Error farming block {e} {request}")
candidate_tuple = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string, backup=True
)
if candidate_tuple is not None:
height, unfinished_block = candidate_tuple
self.full_node.full_node_store.add_candidate_block(
farmer_request.quality_string, height, unfinished_block, False
)
message = farmer_protocol.RequestSignedValues(
farmer_request.quality_string,
unfinished_block.foliage.foliage_block_data.get_hash(),
unfinished_block.foliage.foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
return None
# TIMELORD PROTOCOL
@peer_required
@api_request
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
# Lookup unfinished blocks
async with self.full_node.timelord_lock:
return await self.full_node.new_infusion_point_vdf(request, peer)
@peer_required
@api_request
async def new_signage_point_vdf(
self, request: timelord_protocol.NewSignagePointVDF, peer: ws.WSChiaConnection
) -> None:
if self.full_node.sync_store.get_sync_mode():
return None
full_node_message = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
request.challenge_chain_sp_vdf,
request.challenge_chain_sp_proof,
request.reward_chain_sp_vdf,
request.reward_chain_sp_proof,
)
await self.respond_signage_point(full_node_message, peer)
@peer_required
@api_request
async def new_end_of_sub_slot_vdf(
self, request: timelord_protocol.NewEndOfSubSlotVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_sub_slot(request.end_of_sub_slot_bundle.challenge_chain.get_hash())
is not None
):
return None
# Calls our own internal message to handle the end of sub slot, and potentially broadcasts to other peers.
full_node_message = full_node_protocol.RespondEndOfSubSlot(request.end_of_sub_slot_bundle)
msg, added = await self.full_node.respond_end_of_sub_slot(full_node_message, peer)
if not added:
self.log.error(
f"Was not able to add end of sub-slot: "
f"{request.end_of_sub_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}. "
f"Re-sending new-peak to timelord"
)
await self.full_node.send_peak_to_timelords(peer=peer)
return None
else:
return msg
@api_request
async def request_block_header(self, request: wallet_protocol.RequestBlockHeader) -> Optional[Message]:
header_hash = self.full_node.blockchain.height_to_hash(request.height)
if header_hash is None:
msg = make_msg(ProtocolMessageTypes.reject_header_request, RejectHeaderRequest(request.height))
return msg
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
tx_removals, tx_additions = await self.full_node.blockchain.get_tx_removals_and_additions(block)
header_block = get_block_header(block, tx_additions, tx_removals)
msg = make_msg(
ProtocolMessageTypes.respond_block_header,
wallet_protocol.RespondBlockHeader(header_block),
)
return msg
return None
@api_request
async def request_additions(self, request: wallet_protocol.RequestAdditions) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectAdditionsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_additions_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
additions = await self.full_node.coin_store.get_coins_added_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
puzzlehash_coins_map: Dict[bytes32, List[Coin]] = {}
for coin_record in additions:
if coin_record.coin.puzzle_hash in puzzlehash_coins_map:
puzzlehash_coins_map[coin_record.coin.puzzle_hash].append(coin_record.coin)
else:
puzzlehash_coins_map[coin_record.coin.puzzle_hash] = [coin_record.coin]
coins_map: List[Tuple[bytes32, List[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes, Optional[bytes]]] = []
if request.puzzle_hashes is None:
for puzzle_hash, coins in puzzlehash_coins_map.items():
coins_map.append((puzzle_hash, coins))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, None)
else:
# Create addition Merkle set
addition_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coins_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
assert addition_merkle_set.get_root() == block.foliage_transaction_block.additions_root
for puzzle_hash in request.puzzle_hashes:
result, proof = addition_merkle_set.is_included_already_hashed(puzzle_hash)
if puzzle_hash in puzzlehash_coins_map:
coins_map.append((puzzle_hash, puzzlehash_coins_map[puzzle_hash]))
hash_coin_str = hash_coin_list(puzzlehash_coins_map[puzzle_hash])
result_2, proof_2 = addition_merkle_set.is_included_already_hashed(hash_coin_str)
assert result
assert result_2
proofs_map.append((puzzle_hash, proof, proof_2))
else:
coins_map.append((puzzle_hash, []))
assert not result
proofs_map.append((puzzle_hash, proof, None))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_additions, response)
return msg
@api_request
async def request_removals(self, request: wallet_protocol.RequestRemovals) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or block.height != request.height
or block.height > self.full_node.blockchain.get_peak_height()
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectRemovalsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_removals_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
all_removals: List[CoinRecord] = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
all_removals_dict: Dict[bytes32, Coin] = {}
for coin_record in all_removals:
all_removals_dict[coin_record.coin.name()] = coin_record.coin
coins_map: List[Tuple[bytes32, Optional[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes]] = []
# If there are no transactions, respond with empty lists
if block.transactions_generator is None:
proofs: Optional[List]
if request.coin_names is None:
proofs = None
else:
proofs = []
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, [], proofs)
elif request.coin_names is None or len(request.coin_names) == 0:
for removed_name, removed_coin in all_removals_dict.items():
coins_map.append((removed_name, removed_coin))
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, None)
else:
assert block.transactions_generator
removal_merkle_set = MerkleSet()
for removed_name, removed_coin in all_removals_dict.items():
removal_merkle_set.add_already_hashed(removed_name)
assert removal_merkle_set.get_root() == block.foliage_transaction_block.removals_root
for coin_name in request.coin_names:
result, proof = removal_merkle_set.is_included_already_hashed(coin_name)
proofs_map.append((coin_name, proof))
if coin_name in all_removals_dict:
removed_coin = all_removals_dict[coin_name]
coins_map.append((coin_name, removed_coin))
assert result
else:
coins_map.append((coin_name, None))
assert not result
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_removals, response)
return msg
@api_request
async def send_transaction(self, request: wallet_protocol.SendTransaction) -> Optional[Message]:
spend_name = request.transaction.name()
status, error = await self.full_node.respond_transaction(request.transaction, spend_name)
error_name = error.name if error is not None else None
if status == MempoolInclusionStatus.SUCCESS:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
else:
# If if failed/pending, but it previously succeeded (in mempool), this is idempotence, return SUCCESS
if self.full_node.mempool_manager.get_spendbundle(spend_name) is not None:
response = wallet_protocol.TransactionAck(spend_name, uint8(MempoolInclusionStatus.SUCCESS.value), None)
else:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
msg = make_msg(ProtocolMessageTypes.transaction_ack, response)
return msg
@api_request
async def request_puzzle_solution(self, request: wallet_protocol.RequestPuzzleSolution) -> Optional[Message]:
coin_name = request.coin_name
height = request.height
coin_record = await self.full_node.coin_store.get_coin_record(coin_name)
reject = wallet_protocol.RejectPuzzleSolution(coin_name, height)
reject_msg = make_msg(ProtocolMessageTypes.reject_puzzle_solution, reject)
if coin_record is None or coin_record.spent_block_index != height:
return reject_msg
header_hash = self.full_node.blockchain.height_to_hash(height)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is None or block.transactions_generator is None:
return reject_msg
block_generator: Optional[BlockGenerator] = await self.full_node.blockchain.get_block_generator(block)
assert block_generator is not None
error, puzzle, solution = get_puzzle_and_solution_for_coin(
block_generator, coin_name, self.full_node.constants.MAX_BLOCK_COST_CLVM
)
if error is not None:
return reject_msg
pz = Program.to(puzzle)
sol = Program.to(solution)
wrapper = PuzzleSolutionResponse(coin_name, height, pz, sol)
response = wallet_protocol.RespondPuzzleSolution(wrapper)
response_msg = make_msg(ProtocolMessageTypes.respond_puzzle_solution, response)
return response_msg
@api_request
async def request_header_blocks(self, request: wallet_protocol.RequestHeaderBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
return None
header_hashes = []
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectHeaderBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_header_blocks, reject)
return msg
header_hashes.append(self.full_node.blockchain.height_to_hash(uint32(i)))
blocks: List[FullBlock] = await self.full_node.block_store.get_blocks_by_hash(header_hashes)
header_blocks = []
for block in blocks:
added_coins_records = await self.full_node.coin_store.get_coins_added_at_height(block.height)
removed_coins_records = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
added_coins = [record.coin for record in added_coins_records if not record.coinbase]
removal_names = [record.coin.name() for record in removed_coins_records]
header_block = get_block_header(block, added_coins, removal_names)
header_blocks.append(header_block)
msg = make_msg(
ProtocolMessageTypes.respond_header_blocks,
wallet_protocol.RespondHeaderBlocks(request.start_height, request.end_height, header_blocks),
)
return msg
@api_request
async def respond_compact_proof_of_time(self, request: timelord_protocol.RespondCompactProofOfTime):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_proof_of_time(request)
@execute_task
@peer_required
@api_request
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
# this semaphore will only allow a limited number of tasks call
# new_compact_vdf() at a time, since it can be expensive
async with self.full_node.compact_vdf_sem:
await self.full_node.new_compact_vdf(request, peer)
@peer_required
@api_request
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.request_compact_vdf(request, peer)
@peer_required
@api_request
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_vdf(request, peer)
| snight1983/chia-rosechain | chia/full_node/full_node_api.py | full_node_api.py | py | 61,930 | python | en | code | 369 | github-code | 36 | [
{
"api_name": "chia.full_node.full_node.FullNode",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "chia.protocols.full_node_protocol.RequestPeers",
"line_number": 65,
"usage_type": "attr... |
9037585810 | import tbapy
import pyperclip
event = input("Enter the event key: ")
def getEventMatchTeams(key):
tba = tbapy.TBA('KiFI9IObf1xbtTKuLzSu6clL006qHK1Lh5Xy65i1zSDutDcvsYJWwliU1svWKVzX')
matches = tba.event_matches(key, simple=True)
# set matches to be only keys of red and blue alliances
alliances = []
for match in matches:
if match['comp_level'] == 'qm':
alliances.append({'match_num': match['match_number'], 'red': match['alliances']['red']['team_keys'], 'blue': match['alliances']['blue']['team_keys']})
#trim 'frc' from team keys
alliances[-1]['red'][0] = alliances[-1]['red'][0][3:]
alliances[-1]['red'][1] = alliances[-1]['red'][1][3:]
alliances[-1]['red'][2] = alliances[-1]['red'][2][3:]
alliances[-1]['blue'][0] = alliances[-1]['blue'][0][3:]
alliances[-1]['blue'][1] = alliances[-1]['blue'][1][3:]
alliances[-1]['blue'][2] = alliances[-1]['blue'][2][3:]
alliances.sort(key=lambda x: x['match_num'])
#delete match_num key
for match in alliances:
del match['match_num']
return alliances
teams = getEventMatchTeams(event)
print("\n" + str(teams))
pyperclip.copy(str(teams))
input("\n\033[1m\033[92mCopied to clipboard!\033[0m\n\nPress any key to continue.") | TotsIsTots/573_Scouting_2023 | teamfinder.py | teamfinder.py | py | 1,332 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tbapy.TBA",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pyperclip.copy",
"line_number": 33,
"usage_type": "call"
}
] |
30988523805 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
#关闭提示安全提示框
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("--disable-infobars")
driver = webdriver.Chrome(executable_path="/usr/bin/chromedriver",chrome_options=chrome_options)
driver.get("https://www.baidu.com/")
elem = driver.find_element_by_name("wd")
elem.send_keys("lufei")
elem.send_keys(Keys.RETURN) | lufeirider/python | crawl/selenium.py | selenium.py | py | 481 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 10,
"usage_type": "name"
},
{
... |
44648975893 | #!/usr/bin/env python
# coding: utf-8
from copy import deepcopy
import pandas as pd
import numpy as np
pd.set_option("display.max_colwidth", None)
def run_adult_experiments_trees_taxonomies(
name_output_dir="output",
type_experiment="one_at_time",
type_criterion="divergence_criterion",
min_support_tree=0.1,
min_sup_divergences=[0.01, 0.02, 0.03, 0.04, 0.05, 0.1, 0.15, 0.2],
metric="d_outcome",
verbose=False,
ouput_folder_dir=".",
minimal_gain = 0,
):
info_list = ["FP", "max"]
type_gens = ["with_gen", "without_gen"]
out = {k: {} for k in info_list}
for i in info_list:
out[i] = {k: {} for k in type_gens}
# # Dataset
minimal_gain = None if minimal_gain == None else minimal_gain
dataset_name = "adult_income_taxonomy"
import os
import pandas as pd
filename_d = os.path.join(
os.path.curdir, "datasets", "ACSPUMS", "adult_dataset_income_tax.csv"
)
dfI = pd.read_csv(filename_d)
attributes = list(dfI.columns.drop("income"))
continuous_attributes = ["AGEP", "WKHP"]
metric = "d_outcome"
target = "income"
dfI = dfI[attributes + [target]]
import os
# # Tree divergence - FPR
all_time_results = {}
time_results = {"with_gen": {}, "without_gen": {}}
df_analyze = dfI.copy()
import time
start_time_tree = time.time()
from tree_discretization_ranking import TreeDiscretization_ranking
tree_discr = TreeDiscretization_ranking()
# ## Extract tree
generalization_dict, discretizations = tree_discr.get_tree_discretization(
df_analyze,
type_splitting=type_experiment,
min_support=min_support_tree,
metric=metric,
continuous_attributes=list(continuous_attributes),
storeTree=True,
type_criterion=type_criterion,
minimal_gain=minimal_gain,
target_col=target
# minimal_gain = 0.0015
)
time_results["tree_time"] = time.time() - start_time_tree
import json
with open(os.path.join(os.path.curdir, "datasets", "ACSPUMS", "adult_taxonomies.json"), "r") as fp:
generalization_dict_tax = json.load(fp)
generalization_dict_all = deepcopy(generalization_dict)
generalization_dict_all.update(generalization_dict_tax)
for min_sup_divergence in min_sup_divergences:
if verbose:
print(min_sup_divergence, end=" ")
# ## Extract divergence - 1 function
for apply_generalization in [False, True]:
if apply_generalization == False:
type_gen = "without_gen"
else:
type_gen = "with_gen"
from utils_extract_divergence_generalized_ranking import (
extract_divergence_generalized,
)
allow_overalp = (
True if type_experiment == "all_attributes" else False
)
if (allow_overalp) and (apply_generalization is False):
continue
start_time_divergence = time.time()
FP_fm = extract_divergence_generalized(
df_analyze,
discretizations,
generalization_dict,
continuous_attributes,
min_sup_divergence=min_sup_divergence,
apply_generalization=apply_generalization,
target_name=target,
FPM_type="fpgrowth",
metrics_divergence=[metric],
allow_overalp=allow_overalp,
type_experiment=type_experiment,
)
time_results[type_gen][min_sup_divergence] = (
time.time() - start_time_divergence
)
if verbose:
print(f"({round( time.time() - start_time_divergence,2)})", end = " ")
from divexplorer_generalized.FP_Divergence import FP_Divergence
fp_i = FP_Divergence(FP_fm, metric)
FP_fm = fp_i.getDivergence(th_redundancy=0)
out["FP"][type_gen][
float(min_sup_divergence)
] = FP_fm.shape[0]
out["max"][type_gen][
float(min_sup_divergence)
] = max(FP_fm[metric])
all_time_results = time_results
if verbose:
print()
outputdir = os.path.join(
ouput_folder_dir,
name_output_dir,
dataset_name,
type_criterion,
f"stree_{min_support_tree}",
metric,
)
from pathlib import Path
Path(outputdir).mkdir(parents=True, exist_ok=True)
import json
filename = os.path.join(
outputdir,
f"info_time.json",
)
with open(
filename,
"w",
) as fp:
json.dump(all_time_results, fp)
for i in info_list:
output = out[i]
filename = os.path.join(
outputdir,
f"info_ALL_{i}.json",
)
with open(
filename,
"w",
) as fp:
json.dump(output, fp)
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--name_output_dir",
default="output_res",
help="specify the name of the output folder",
)
parser.add_argument(
"--type_criterion",
type=str,
default="divergence_criterion",
help='specify the experiment type among ["divergence_criterion", "entropy"]',
)
parser.add_argument(
"--min_sup_tree",
type=float,
default=0.1,
help="specify the minimum support for the tree induction",
)
parser.add_argument(
"--show_fig",
action="store_true",
help="specify show_fig to show the tree graph.",
)
parser.add_argument(
"--verbose",
action="store_true",
help="specify verbose to print working in progress status.",
)
parser.add_argument(
"--no_compute_divergence",
action="store_true",
help="specify no_compute_divergence to not compute the divergence scores.",
)
parser.add_argument(
"--type_experiments",
nargs="*",
type=str,
default=[
"one_at_time",
"all_attributes",
], # , "all_attributes_continuous"], #"",
help="specify the types of experiments to evaluate among ['one_at_time', 'all_attributes', 'all_attributes_continuous']",
)
parser.add_argument(
"--min_sup_divs",
nargs="*",
type=float,
default=[
0.01,
0.02,
0.03,
0.04,
0.05,
0.1,
0.15,
0.2,
0.25,
0.3,
0.35,
],
help="specify a list of min supports of interest, with values from 0 to 1",
)
parser.add_argument(
"--metrics",
nargs="*",
type=str,
default=["d_outcome"], # , "d_fnr", "d_error"]
help="specify a list of metric of interest, ['d_fpr', 'd_fnr', 'd_error', 'd_accuracy', 'd_outcome']",
)
parser.add_argument(
"--minimal_gain",
type=float,
default=0.0,
help="specify the minimal_gain for the tree induction",
)
args = parser.parse_args()
run_adult_experiments_trees_taxonomies(
type_criterion=args.type_criterion,
name_output_dir=args.name_output_dir,
type_experiments=args.type_experiments,
min_support_tree=args.min_sup_tree,
min_sup_divergences=args.min_sup_divs,
show_fig=args.show_fig,
metrics=args.metrics,
verbose=args.verbose,
minimal_gain=args.minimal_gain,
no_compute_divergence=args.no_compute_divergence,
)
| elianap/h-divexplorer | experiments_adult_trees_taxonomies.py | experiments_adult_trees_taxonomies.py | py | 7,907 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pandas.set_option",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number... |
650843683 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
class AqdyPipeline(object):
def process_item(self, item, spider):
xfplay_link = {}
xfplay_link["_id"] = self.count
xfplay_link["play_pic_url"] = item["play_pic_url"]
xfplay_link["xfplay_link"] = item["xfplay_link"]
# 插入数据库
# self.collection.insert(xfplay_link)
self.count += 1
return item
def open_spider(self,spider):
self.client = MongoClient()
self.collection = self.client["pySpider"]["lusi"]
self.count = 1
print("数据库以连接...")
def close_spider(self,spider):
self.client.close()
print("数据库连接关闭")
| jihongzhu/python- | aqdy/aqdy/pipelines.py | pipelines.py | py | 887 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 21,
"usage_type": "call"
}
] |
74470689064 | import os
import datetime
import glob
import urllib.request
import tqdm
import gzip
import pandas as pd
import re
import utils
import random
from time import gmtime, strftime
from multiprocessing import Process
config = __import__('0_config')
def clean_row(row):
return row.decode('utf-8', 'ignore').strip()
def compute_year_quarter(START_YEAR, START_QUARTER):
today = datetime.datetime.now()
current_year = today.year
current_quarter = (today.month-1)//3 + 1
years = list(range(START_YEAR, current_year + 1))
quarters = list(range(1,4 + 1))
year_x_quarter = [(y, q) for y in years for q in quarters]
# We didn't reach the fourth quarter, have to filter last quarters
if current_quarter < 4:
year_x_quarter = year_x_quarter[0:-(4-current_quarter)]
# The first report is not among the first quarter
if START_QUARTER > 1:
year_x_quarter = year_x_quarter[START_QUARTER-1:]
return year_x_quarter
def download_files(year_x_quarter, URL_INDEX_PATTERN, DATA_GZ_FOLDER, START_YEAR):
if not os.path.isdir(DATA_GZ_FOLDER):
os.makedirs(DATA_GZ_FOLDER)
# Don't download already downloaded files
for id, gz_file in enumerate(glob.glob(DATA_GZ_FOLDER + '/*.gz')):
y, q = [int(x) for x in gz_file[gz_file.rfind('/') + 1: gz_file.rfind('.')].split('_')]
idx = (y - START_YEAR) * 4 + q - 1
idx -= id # Removing an element in the list will translate all indices
assert (y,q) == year_x_quarter[idx]
del year_x_quarter[idx]
new_data = False
# Download GZ files
for y, q in tqdm.tqdm(year_x_quarter, desc='Downloading company\' indices'):
url = URL_INDEX_PATTERN.format(quarter=q, year=y)
filename = os.path.join(DATA_GZ_FOLDER, '{year}_{quarter}.gz'.format(year=y, quarter=q))
urllib.request.urlretrieve(url, filename)
new_data = True
return new_data
def read_data(DATA_GZ_FOLDER, DATA_PD_FOLDER, PDF_MERGE_FILE):
extra_keys = ['year', 'quarter']
if not os.path.isdir(DATA_PD_FOLDER):
os.makedirs(DATA_PD_FOLDER)
pdfs = {}
keys = None
pattern = re.compile(" *")
can_safely_load_pdf_merge = True
for file in tqdm.tqdm(glob.glob(DATA_GZ_FOLDER + '/*.gz'), desc='Processing company indices'):
y, q = [int(x) for x in file[file.rfind('/')+1:file.rfind('.')].split('_')]
if y not in pdfs:
pdfs[y] = {}
if q not in pdfs[y]:
pdfs[y][q] = []
filename = os.path.join(DATA_PD_FOLDER, '{year}_{quarter}.pd'.format(year=y, quarter=q))
# Read dataframe or process GZ file it if not exists
if os.path.isfile(filename):
pdfs[y][q] = pd.read_pickle(filename)
if keys is None:
keys = list(pdfs[y][q].columns.values)
else:
can_safely_load_pdf_merge = False
# Read file
with gzip.open(file, 'r') as fp:
for i in range(8):
next(fp)
if keys is None:
keys = re.split(pattern, clean_row(fp.readline())) + extra_keys
else:
next(fp)
next(fp)
# Raw data
for row in fp:
row = clean_row(row)
attributes = []
attribute = ''
spaces = 0
for c in row[::-1]:
if c == ' ':
spaces += 1
else:
spaces = 0
if spaces < 2:
attribute += c
elif attribute != ' ':
attributes.append(attribute[::-1].strip())
attribute = ''
spaces = 0
if attribute != '':
attributes.append(attribute[::-1].strip())
attributes = attributes[::-1]
if len(attributes) >= (len(keys) - len(extra_keys)):
while len(attributes) > (len(keys) - len(extra_keys)):
attributes[0] += ' ' + attributes[1]
del attributes[1]
pdfs[y][q].append(attributes)
# Transform to Pandas dataframe
pdfs[y][q] = pd.DataFrame.from_records([t + [y, q] for t in pdfs[y][q]], columns=keys)
pdfs[y][q].to_pickle(os.path.join(DATA_PD_FOLDER, '{year}_{quarter}.pd'.format(year=y, quarter=q)))
pdfs_merged = None
if not can_safely_load_pdf_merge:
pdfs_merged = pd.DataFrame([], columns=keys)
for pdfs in tqdm.tqdm([pdfs[y][q] for y in sorted(pdfs.keys()) for q in sorted(pdfs[y].keys())], desc='Combining Pandas DataFrames'):
pdfs_merged = pdfs_merged.append(pdfs, ignore_index=True)
pdfs_merged.to_pickle(PDF_MERGE_FILE)
else:
pdfs_merged = pd.read_pickle(PDF_MERGE_FILE)
return pdfs, pdfs_merged, keys
def download_annual_reports(pdfs_10k, DATA_AR_FOLDER, NAME_FILE_PER_CIK, URL_ROOT, LOG_FILE):
if not os.path.isdir(DATA_AR_FOLDER):
os.makedirs(DATA_AR_FOLDER)
# Create CIK folders and also the file containing all the related names
for cik, pdf in tqdm.tqdm(pdfs_10k.groupby('CIK'), desc='Creating company folders'):
company_names = pdf['Company Name'].unique()
folder = os.path.join(DATA_AR_FOLDER, cik)
if not os.path.isdir(folder):
os.mkdir(folder)
name_file = os.path.join(folder, NAME_FILE_PER_CIK)
if not os.path.exists(name_file):
with open(name_file, 'a', encoding='utf-8') as fp:
for company_name in company_names:
fp.write(company_name + '\n')
# Download all annual reports
if config.MULTITHREADING:
print('Downloading company\' annual reports')
whole_entries = [row for idx, row in pdfs_10k.iterrows()]
rows = utils.chunks(whole_entries, 1 + int(len(whole_entries) / config.NUM_CORES))
random.shuffle(rows) # Better separate work load
del whole_entries # High memory consumption
procs = []
for i in range(config.NUM_CORES):
procs.append(Process(target=_download_annual_reports_process, args=(DATA_AR_FOLDER, LOG_FILE, URL_ROOT, rows[i])))
procs[-1].start()
for p in procs:
p.join()
else:
for idx, row in tqdm.tqdm(pdfs_10k.iterrows(), desc='Downloading company\' annual reports'):
_download_annual_reports(DATA_AR_FOLDER, LOG_FILE, URL_ROOT, row)
def _download_annual_reports_process(DATA_AR_FOLDER, LOG_FILE, URL_ROOT, rows):
for row in rows:
_download_annual_reports(DATA_AR_FOLDER, LOG_FILE, URL_ROOT, row)
def _download_annual_reports(DATA_AR_FOLDER, LOG_FILE, URL_ROOT, row):
folder = os.path.join(DATA_AR_FOLDER, row['CIK'])
url = URL_ROOT + row['File Name']
filename = os.path.join(folder, url[url.rfind('/') + 1:])
if not os.path.exists(filename):
try:
urllib.request.urlretrieve(url, filename)
except:
with open(LOG_FILE, 'a') as fp:
fp.write('{}: {}, {} couldn\'t be downloaded\n'.format(strftime("%d-%m-%Y %H:%M:%S", gmtime()), url, filename))
if os.path.exists(filename):
os.remove(filename)
if __name__ == "__main__":
random.seed(config.SEED)
if os.path.exists(config.LOG_FILE):
os.remove(config.LOG_FILE)
# Compute indices for the years and quarters
year_x_quarter = compute_year_quarter(config.START_YEAR, config.START_QUARTER)
# Download all indices related to the determined years and quarters
need_to_process = download_files(year_x_quarter, config.URL_INDEX_PATTERN, config.DATA_GZ_FOLDER, config.START_YEAR)
# If nothing has changed, load the final dataframe
if need_to_process or not os.path.exists(config.PDF_MERGE_10K_FILE):
# Process the data
pdfs, pdfs_merge, keys = read_data(config.DATA_GZ_FOLDER, config.DATA_PD_FOLDER, config.PDF_MERGE_FILE)
# Filter only 10k annual reports
pdfs_10k = pdfs_merge[(pdfs_merge['Form Type'] == config.FORM_TYPE)]
pdfs_10k.to_pickle(config.PDF_MERGE_10K_FILE)
else:
pdfs_10k = pd.read_pickle(config.PDF_MERGE_10K_FILE)
# Download annual reports
download_annual_reports(pdfs_10k, config.DATA_AR_FOLDER, config.NAME_FILE_PER_CIK, config.URL_ROOT, config.LOG_FILE)
| Diego999/Risk-Analysis-using-Topic-Models-on-Annual-Reports | 1_download_data.py | 1_download_data.py | py | 8,603 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
28915101395 | import copy
from pathlib import Path
from collections import defaultdict
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset
try:
import datasets as hf_datasets
except ImportError:
pass
def get_texts_df(dir_path):
paths = [x for x in dir_path.iterdir() if x.is_file() and x.suffix == ".txt"]
paths.sort(key=lambda path: path.stem)
texts = []
for path in paths:
with open(path) as f:
texts.append((path.stem, f.read().rstrip()))
return pd.DataFrame(texts, columns=["id", "text"])
def get_dfs(path):
path = Path(path)
test_texts = get_texts_df(path / "test")
train_df = pd.read_csv(path / "train.csv")
train_texts = get_texts_df(path / "train")
return train_texts, train_df, test_texts
def get_block_dataset(df, text_column, tokenizer, max_len, seed):
if max_len is None:
max_len = tokenizer.model_max_length
dataset = hf_datasets.Dataset.from_pandas(df)
dataset = dataset.shuffle(seed)
def tokenize(examples):
tokenized = tokenizer(
examples[text_column], add_special_tokens=False, return_attention_mask=False
)
for tokens in tokenized.input_ids:
tokens.append(tokenizer.sep_token_id)
return tokenized
dataset = dataset.map(
tokenize,
batched=True,
remove_columns=dataset.column_names,
desc="Tokenizing examples...",
)
def blockify(examples):
all = []
for sub in examples["input_ids"]:
all.extend(sub)
sub_max = max_len - 2
block_starts = range(0, len(all) - sub_max + 1, sub_max)
blocks = [
tokenizer.build_inputs_with_special_tokens(all[i : i + sub_max])
for i in block_starts
]
examples = {"input_ids": blocks}
return examples
dataset = dataset.map(
blockify,
batched=True,
desc="Chunking examples...",
)
return dataset
def split_offsets(line):
words = line.split()
offsets = []
offset = 0
for word in words:
begin_offset = line.index(word, offset)
offset = begin_offset + len(word)
offsets.append((begin_offset, offset))
return offsets
labels = [
"Lead",
"Position",
"Evidence",
"Claim",
"Concluding Statement",
"Counterclaim",
"Rebuttal",
]
def get_label_ids(labels):
label_to_id = {(None, 0): 0}
id_to_label = {0: (None, 0)}
for i, t in enumerate(labels):
label_to_id[(t, True)] = i * 2 + 1
label_to_id[(t, False)] = i * 2 + 2
id_to_label[i * 2 + 1] = (t, True)
id_to_label[i * 2 + 2] = (t, False)
return label_to_id, id_to_label
label_to_id, id_to_label = get_label_ids(labels)
max_labels = len(label_to_id)
def get_answer_dict(df):
answers = defaultdict(list)
for row in df.itertuples():
words = row.predictionstring.split()
words = [int(word) for word in words]
answers[row.id].append((words, row.discourse_type))
for answer in answers.values():
answer.sort()
return answers
def get_clean_answers(answers):
answers = copy.deepcopy(answers)
for _, answer in answers.items():
prev_words = [-1]
cleaned_answer = []
for words, label in answer:
if prev_words[-1] >= words[0]:
if len(prev_words) == 1:
if len(words) == 1:
continue
words.pop(0)
else:
prev_words.pop()
cleaned_answer.append((words, label))
prev_words = words
answer.clear()
answer.extend(cleaned_answer)
return answers
def check_answer_dict(answers):
for answer in answers.values():
prev = -1
for words, _ in answer:
if len(words) == 0:
return False
for word_id in words:
if prev >= word_id:
return False
prev = word_id
return True
def get_word_dict(texts):
offsets = {}
for row in texts.itertuples():
offsets[row.id] = split_offsets(row.text)
return offsets
def overlap(a, b, c, d):
return a < d and c < b
def intersect_ranges(ranges, items):
# Given sorted ranges (non-overlapping) and sorted items (non-overlapping)
# Collect items that fall into these ranges and return the indices.
groups = []
index = 0
for r, s in ranges:
group = []
while index < len(items) and items[index][0] < s:
if r < items[index][1]:
group.append(index)
index += 1
groups.append(group)
return groups
def get_target(token_offsets, answers, word_offsets, overflow_to_sample):
answer_ranges = [
[(word_offset[words[0]][0], word_offset[words[-1]][1]) for words, _ in answer]
for answer, word_offset in zip(answers, word_offsets)
]
answer_seen = set()
target = torch.zeros(token_offsets.size()[:-1], dtype=torch.long)
for i, token_offset in enumerate(token_offsets.tolist()):
j = overflow_to_sample[i].item()
answer_tokens = intersect_ranges(answer_ranges[j], token_offset)
for k, answer_token in enumerate(answer_tokens):
label = answers[j][k][1]
label1 = label_to_id[(label, False)]
label0 = label_to_id[(label, True)] if (j, k) not in answer_seen else label1
target[i, answer_token[0:1]] = label0
target[i, answer_token[1:]] = label1
if len(answer_token) > 0:
answer_seen.add((j, k))
return target
class FeedbackDataset(Dataset):
def __init__(self, texts, df, tokenizer, max_len, stride, pad_to_multiple_of):
self.texts = texts
self.answers = get_answer_dict(df)
self.answers = get_clean_answers(self.answers)
self.words = get_word_dict(texts)
self.tokenizer = tokenizer
self.max_len = max_len
self.stride = stride
self.pad_to_multiple_of = pad_to_multiple_of
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = self.texts.loc[idx, "text"]
text_id = self.texts.loc[idx, "id"]
answer = self.answers[text_id]
words = self.words[text_id]
return text, answer, words
def get_collate_fn(self):
def collate_fn(examples):
text, answer, words = [list(a) for a in zip(*examples)]
inputs = self.tokenizer(
text,
add_special_tokens=True,
padding=True,
truncation=True,
return_overflowing_tokens=True,
return_offsets_mapping=True,
max_length=self.max_len,
stride=self.stride,
return_tensors="pt",
pad_to_multiple_of=self.pad_to_multiple_of,
)
target = get_target(
inputs.offset_mapping,
answer,
words,
inputs.overflow_to_sample_mapping,
)
return len(examples), inputs, target, words, answer
return collate_fn
def get_matches(preds, golds):
pred_sets = [set(pred) for pred in preds]
gold_sets = [set(gold) for gold in golds]
seen = set()
matches = []
for i, pred_set in enumerate(pred_sets):
for j, gold_set in enumerate(gold_sets):
if j in seen:
continue
intersection = len(pred_set.intersection(gold_set))
if intersection <= 0.5 * len(gold_set):
continue
if intersection <= 0.5 * len(pred_set):
continue
seen.add(j)
matches.append((i, j))
break
return matches
def _score_single(tp, fp, fn, pred_words, pred_labels, answer_words, answer_labels):
matches = get_matches(pred_words, answer_words)
for l in pred_labels:
fp[l] += 1
for l in answer_labels:
fn[l] += 1
for i, j in matches:
l = pred_labels[i]
if l != answer_labels[j]:
continue
tp[l] += 1
fp[l] -= 1
fn[l] -= 1
def score(preds_batch, words_batch, answers_batch):
return score_words(pred_to_words(preds_batch, words_batch), answers_batch)
def score_words(preds_batch, answers_batch):
tp = defaultdict(int)
fp = defaultdict(int)
fn = defaultdict(int)
for preds, answers in zip(preds_batch, answers_batch):
pred_words, pred_labels = zip(*preds) if preds else ([], [])
answer_words, answer_labels = zip(*answers)
_score_single(tp, fp, fn, pred_words, pred_labels, answer_words, answer_labels)
return {l: (tp[l], fp[l], fn[l]) for l in labels}
def pred_to_words(preds_batch, words_batch):
pred_words_batch = []
for preds, words in zip(preds_batch, words_batch):
if not preds:
pred_words_batch.append([])
continue
pred_ranges, pred_labels = zip(*preds)
pred_words = intersect_ranges(pred_ranges, words)
pred_words = [(a, b) for a, b in list(zip(pred_words, pred_labels)) if a]
pred_words_batch.append(pred_words)
return pred_words_batch
def _confusion_matrix_single(
matrix, pred_words, pred_labels, answer_words, answer_labels, to_id
):
matches = get_matches(pred_words, answer_words)
pred_seen = [False for _ in range(len(pred_labels))]
answer_seen = [False for _ in range(len(answer_labels))]
for i, j in matches:
pred_seen[i] = True
answer_seen[j] = True
p = to_id[pred_labels[i]]
a = to_id[answer_labels[j]]
matrix[p, a] += 1
for seen, p in zip(pred_seen, pred_labels):
if seen:
continue
matrix[to_id[p], to_id[None]] += 1
for seen, a in zip(answer_seen, answer_labels):
if seen:
continue
matrix[to_id[None], to_id[a]] += 1
def confusion_matrix_words(preds_batch, answers_batch):
full_labels = labels + [None]
to_id = {l: i for i, l in enumerate(full_labels)}
matrix = np.zeros((len(full_labels), len(full_labels)), dtype=np.int64)
for preds, answers in zip(preds_batch, answers_batch):
pred_words, pred_labels = zip(*preds) if preds else ([], [])
answer_words, answer_labels = zip(*answers)
_confusion_matrix_single(
matrix, pred_words, pred_labels, answer_words, answer_labels, to_id
)
full_labels[-1] = "Unmatched"
df = pd.DataFrame(matrix, index=full_labels, columns=full_labels)
return df
def confusion_matrix(preds_batch, words_batch, answers_batch):
return confusion_matrix_words(
pred_to_words(preds_batch, words_batch), answers_batch
)
| jeffdshen/kaggle-public | feedback/datasets.py | datasets.py | py | 10,868 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datasets.Dataset.from_p... |
18190854190 | from csv import DictReader, DictWriter
from io import StringIO
import functools
import tempfile
import os
# helper to map from column names in the CSV dump to the schema
dumpNameMapping = {
'_id': 'mongo_id',
'admin': 'admin',
'profile.adult': 'adult',
'status.completedProfile': 'completed',
'status.admitted': 'admitted',
'verified': 'verified',
'timestamp': 'timestamp',
'email': 'email',
'profile.name': 'name',
'profile.school': 'school',
'profile.graduationYear': 'gradYear',
'profile.gender': 'gender',
'profile.description': 'description',
'profile.essay': 'essay'
}
def get_applicants_from_csv(csv_bytes):
# while it may be cleaner to do this in memory, it Just Doesn't Work Properly (tm) without hitting disk first--i suspect it has something to do with character encodings
with tempfile.TemporaryDirectory() as tempdir:
fn = os.path.join(tempdir, 'dump.csv')
csv_bytes.save(fn)
dr = DictReader(open(fn))
for row in dr:
translated = {}
for key in dumpNameMapping:
if row[key] == 'true':
translated[dumpNameMapping[key]] = 1
elif row[key] == 'false':
translated[dumpNameMapping[key]] = 0
else:
translated[dumpNameMapping[key]] = row[key]
yield translated
def insert_applicant(cursor, applicant):
cols = [
'mongo_id',
'admin',
'adult',
'completed',
'admitted',
'verified',
'timestamp',
'email',
'name',
'school',
'gradYear',
'gender',
'description',
'essay'
]
cols = list(map( lambda k: applicant[k], cols) )
cursor.execute('''
INSERT INTO Applicants (
mongo_id,
admin,
adult,
completed,
admitted,
verified,
timestamp,
email,
name,
school,
gradYear,
gender,
description,
essay
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
''', cols)
def create_csv(conn):
c = conn.cursor()
c.execute('''
SELECT name, email, mongo_id FROM Applicants WHERE completed=1
''')
rows = c.fetchall()
if rows is None:
rows = []
out = []
for row in rows:
c.execute('''
SELECT rating
FROM Votes
WHERE
app_id=?
''', (row['mongo_id'],))
votes = c.fetchall()
if len(votes) != 0:
voteaverage = functools.reduce( lambda a,v: a + v['rating'], votes, 0 ) / len(votes)
voteaverage = float('{:.3}'.format(voteaverage))
else:
voteaverage = 0
out.append({
'name': row['name'],
'email': row['email'],
'mongo_id': row['mongo_id'],
'rating': voteaverage,
'votes': len(votes)
})
buf = StringIO()
csv = DictWriter(buf, fieldnames=['mongo_id', 'name', 'email', 'rating', 'votes'])
csv.writeheader()
for app in out:
csv.writerow(app)
csv_text = buf.getvalue()
buf.close()
return csv_text
| compsoc-edinburgh/htb20-voter | app/data.py | data.py | py | 3,307 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "csv.DictReader"... |
31690737543 | import json
import os
from typing import TextIO
from ctranslate2.converters.transformers import TransformersConverter
def model_converter(model, model_output):
converter = TransformersConverter("openai/whisper-" + model)
try:
converter.convert(model_output, None, "float16", False)
except Exception as e:
print(e)
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
class ResultWriter:
extension: str
def __init__(self, output_dir: str):
self.output_dir = output_dir
def __call__(self, result: dict, audio_path: str):
audio_basename = os.path.basename(audio_path)
output_path = os.path.join(self.output_dir, audio_basename + "." + self.extension)
with open(output_path, "w", encoding="utf-8") as f:
self.write_result(result, file=f)
def write_result(self, result: dict, file: TextIO):
raise NotImplementedError
class WriteTXT(ResultWriter):
extension: str = "txt"
def write_result(self, result: dict, file: TextIO):
for segment in result["segments"]:
print(segment.text.strip(), file=file, flush=True)
class WriteVTT(ResultWriter):
extension: str = "vtt"
def write_result(self, result: dict, file: TextIO):
print("WEBVTT\n", file=file)
for segment in result["segments"]:
print(
f"{format_timestamp(segment.start)} --> {format_timestamp(segment.end)}\n"
f"{segment.text.strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
class WriteSRT(ResultWriter):
extension: str = "srt"
def write_result(self, result: dict, file: TextIO):
for i, segment in enumerate(result["segments"], start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment.start, always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment.end, always_include_hours=True, decimal_marker=',')}\n"
f"{segment.text.strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
class WriteTSV(ResultWriter):
"""
Write a transcript to a file in TSV (tab-separated values) format containing lines like:
<start time in integer milliseconds>\t<end time in integer milliseconds>\t<transcript text>
Using integer milliseconds as start and end times means there's no chance of interference from
an environment setting a language encoding that causes the decimal in a floating point number
to appear as a comma; also is faster and more efficient to parse & store, e.g., in C++.
"""
extension: str = "tsv"
def write_result(self, result: dict, file: TextIO):
print("start", "end", "text", sep="\t", file=file)
for segment in result["segments"]:
print(round(1000 * segment.start), file=file, end="\t")
print(round(1000 * segment.end), file=file, end="\t")
print(segment.text.strip().replace("\t", " "), file=file, flush=True)
class WriteJSON(ResultWriter):
extension: str = "json"
def write_result(self, result: dict, file: TextIO):
json.dump(result, file)
| ahmetoner/whisper-asr-webservice | app/faster_whisper/utils.py | utils.py | py | 3,802 | python | en | code | 1,105 | github-code | 36 | [
{
"api_name": "ctranslate2.converters.transformers.TransformersConverter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{... |
7537133333 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Jan 10, 2013
@author: daniel
This utility file provides a method for moving information between Sugar
instances.
'''
from collections import defaultdict
from sugarcrm import Sugarcrm
from code import interact
from sugarcrm.sugarentry import SugarEntry
com_url = 'http://<host>:<port>/service/v4_1/rest.php'
com_usr = '<user>'
com_pwd = '<pass>'
pro_url = 'http://<host>:<port>/service/v4_1/rest.php'
pro_usr = '<user>'
pro_pwd = '<pass>'
modules = ['Accounts', 'Contacts', 'Opportunities', 'Leads', 'Notes',
'Prospects', 'Tasks']
cache = defaultdict(dict)
# Fill in user values from one system to the other here.
# The mapping is From -> To. Ie: Old System -> New System.
cache['Users']['1'] = '1'
# This map holds fields that need to pull from other cached values.
relate = {
'Contacts': {
'account_id': 'Accounts',
'assigned_user_id': 'Users'
},
'Opportunities': {
'account_id': 'Accounts',
'assigned_user_id': 'Users'
},
'Leads': {
'account_id': 'Accounts',
'assigned_user_id': 'Users',
'contact_id': 'Contacts',
'opportunity_id': 'Opportunities',
},
'Prospects': {
'assigned_user_id': 'Users',
'lead_id': 'Leads'
},
'Tasks': {
'assigned_user_id': 'Users',
'contact_id': 'Contacts'
},
'Calls': {
'assigned_user_id': 'Users',
'contact_id': 'Contacts'
},
'Notes': {
'account_id': 'Accounts',
'assigned_user_id': 'Users',
'contact_id': 'Contacts',
'lead_id': 'Leads',
'opportunity_id': 'Opportunities'
},
'Accounts': {
'assigned_user_id': 'Users'
}
}
SPro = Sugarcrm(pro_url, pro_usr, pro_pwd)
SCom = Sugarcrm(com_url, com_usr, com_pwd)
# A second lookup, this one for required module level connections that
# must be generated.
mod_links = {
'Tasks': [SCom.modules['Leads'],
SCom.modules['Notes'],
SCom.modules['Opportunities'],
SCom.modules['Accounts']],
'Notes': [SCom.modules['Opportunities'],
SCom.modules['Leads'], ]
}
def makeProEntry(oldEntry, oldID = None):
module = oldEntry._module
mod_name = module._name
newEntry = SugarEntry(SPro, mod_name)
for field in module._fields.keys():
if field == 'id':
oldID = oldEntry[field]
continue
if field in relate[mod_name]:
ref_mod = relate[mod_name][field]
newEntry[field] = cache[ref_mod].get(oldEntry[field], '')
continue
newEntry[field] = oldEntry[field]
newEntry.save()
for relmod in mod_links.get(mod_name, []):
for relentry in oldEntry.get_related(relmod):
if relentry['id'] in cache[relmod._name]:
newrelentry = SPro.newSugarEntry(relmod._name)
newrelentry['id'] = cache[relmod._name][relentry['id']]
newEntry.relate(newrelentry)
if oldID is not None:
cache[mod_name][oldID] = newEntry['id']
if __name__ == '__main__':
interact(local = globals())
| gddc/python_webservices_library | sugarcrm/utils/S2S.py | S2S.py | py | 2,952 | python | en | code | 46 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sugarcrm.Sugarcrm",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "sugarcrm.Sugarcrm",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "sugarcrm.... |
24288441205 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 20:50:05 2022
@author: Yifang
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import traceAnalysis as Ananlysis
import SPADdemod
def getSignalTrace (filename, traceType='Constant',HighFreqRemoval=True,getBinTrace=False,bin_window=20):
'''TraceType:Freq, Constant, TimeDiv'''
trace=Ananlysis.Read_trace (filename,mode="SPAD")
if HighFreqRemoval==True:
trace=Ananlysis.butter_filter(trace, btype='low', cutoff=1000, fs=9938.4, order=10)
if traceType=='Constant':
if getBinTrace==True:
trace_binned=Ananlysis.get_bin_trace(trace,bin_window=bin_window,color='m')
trace_binned=Ananlysis.get_bin_trace(trace,bin_window=bin_window)
return trace_binned
else:
return trace
if traceType=='Freq':
#Red,Green= SPADdemod.DemodFreqShift (trace,fc_g=1000,fc_r=2000,fs=9938.4)
Red,Green= SPADdemod.DemodFreqShift_bandpass (trace,fc_g=1009,fc_r=1609,fs=9938.4)
#Red=Ananlysis.butter_filter(Red, btype='low', cutoff=200, fs=9938.4, order=10)
#Green=Ananlysis.butter_filter(Green, btype='low', cutoff=200, fs=9938.4, order=10)
Signal=Ananlysis.getSignal_subtract(Red,Green,fs=9938.4)
return Red,Green,Signal
if traceType=='TimeDiv':
#need to be modified for different time division traces
lmin,lmax=SPADdemod.hl_envelopes_max(trace, dmin=2, dmax=2, split=True)
fig, ax = plt.subplots(figsize=(12, 3))
ax.plot(lmax,trace[lmax], color='r')
ax.plot(lmin,trace[lmin], color='g')
x_green, Green=SPADdemod.Interpolate_timeDiv (lmin,trace)
x_red, Red=SPADdemod.Interpolate_timeDiv (lmax,trace)
Signal=Ananlysis.getSignal_subtract(Red,Green,fs=9938.4)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Signal,ax, label="Signal")
return Red,Green,Signal
#%%
# Sampling Frequency
fs = 9938.4
#dpath= "C:/SPAD/SPADData/20220611/1516996_Freq_2022_6_11_16_8_21"
dpath="D:/SPAD/SPADData/20220913/1534725_HPC_50g_2022_9_13_16_3_57"
#%%
filename=Ananlysis.Set_filename (dpath,"traceValue.csv")
#Red,Green,Signal=getSignalTrace (filename,traceType='TimeDiv',HighFreqRemoval=True,getBinTrace=False)
Signal_raw=getSignalTrace (filename,traceType='Constant',HighFreqRemoval=True,getBinTrace=False,bin_window=100)
#%%
import traceAnalysis as Ananlysis
bin_window=100
Signal_bin=Ananlysis.get_bin_trace(Signal_raw,bin_window=bin_window)
fig, ax = plt.subplots(figsize=(12, 2.5))
Ananlysis.plot_trace(Signal_bin,ax, fs=99.38, label="Binned to 100Hz",color="b")
#%%
import traceAnalysis as Ananlysis
bin_window=200
Red_bin=Ananlysis.get_bin_trace(Red,bin_window=bin_window)
Green_bin=Ananlysis.get_bin_trace(Green,bin_window=bin_window)
#%%
fig, ax = plt.subplots(figsize=(12, 2.5))
Ananlysis.plot_trace(Red_bin[0:500],ax, fs=49.7, label="Binned to 50Hz",color="r")
#ax.set_xlim([0, 0.1])
fig, ax = plt.subplots(figsize=(12, 2.5))
Ananlysis.plot_trace(Green_bin[0:500],ax, fs=49.7, label="Binned to 50Hz",color="g")
#ax.set_xlim([0, 0.1])
#%%
'''unmixing time division'''
lmin,lg=SPADdemod.hl_envelopes_max(Green, dmin=4, dmax=7, split=True)
lmin,lr=SPADdemod.hl_envelopes_max(Red, dmin=4, dmax=7, split=True)
fig, ax = plt.subplots(figsize=(12, 3))
ax.plot(lg,Green[lg], color='g')
ax.plot(lr,Red[lr], color='r')
x_red, Red=SPADdemod.Interpolate_timeDiv (lr,Red)
x_green, Green=SPADdemod.Interpolate_timeDiv (lg,Green)
#%%
Signal=Ananlysis.getSignal_subtract(Red,Green,fs=49.7)
#%%
fig, (ax0, ax1,ax2) = plt.subplots(nrows=3)
ax0=Ananlysis.plot_trace(Green,ax0, fs=49.7, label="Green Signal 200Hz",color='g')
ax1=Ananlysis.plot_trace(Red,ax1, fs=49.7, label="Red Signal 200Hz", color='r')
ax2=Ananlysis.plot_trace(Signal,ax2, fs=49.7, label="Substract Signal 200Hz", color='b')
fig.tight_layout()
#%%
Signal=Ananlysis.butter_filter(Signal, btype='low', cutoff=100, fs=9938.4, order=5)
fig, ax = plt.subplots(figsize=(12, 2.5))
Ananlysis.plot_trace(Signal,ax, fs=9938.4, label="100Hz Low pass")
#%%
'''temporal bin dual channel'''
bin_window=200
Green_bin=Ananlysis.get_bin_trace(Green,bin_window=bin_window)
Red_bin=Ananlysis.get_bin_trace(Red,bin_window=bin_window)
Signal_binned=Ananlysis.get_bin_trace(Signal,bin_window=bin_window)
fig, (ax0, ax1,ax2) = plt.subplots(nrows=3)
ax0=Ananlysis.plot_trace(Green_bin,ax0, fs=99.384/2, label="Green Signal Binned 50Hz",color='g')
ax1=Ananlysis.plot_trace(Red_bin,ax1, fs=99.384/2, label="Red Signal Binned 50Hz", color='r')
ax2=Ananlysis.plot_trace(Signal_binned,ax2, fs=99.384/2, label="Substract Signal Binned 50Hz", color='b')
fig.tight_layout()
#%%
fs=200
fig, ax = plt.subplots(figsize=(8, 2))
powerSpectrum, freqenciesFound, time, imageAxis = ax.specgram(Signal,Fs=fs,NFFT=1024, detrend='linear',vmin=-130)
ax.set_xlabel('Time (Second)')
ax.set_ylabel('Frequency')
ax.set_ylim([0, 100])
#%%
signal1,signal2=Ananlysis.getICA (Red_bin,Green_bin)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(signal1,ax, label="Signal1")
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(signal2,ax, label="Signal2")
#%%
fig, (ax0, ax1) = plt.subplots(nrows=2)
ax0=Ananlysis.plot_trace(signal1,ax0, fs=99.384/2, label="signal1 Signal Binned 50Hz",color='g')
ax1=Ananlysis.plot_trace(signal2,ax1, fs=99.384/2, label="signal2 Signal Binned 50Hz", color='r')
fig.tight_layout()
#%%
'''temporal bin'''
bin_window=200
signal1_bin=Ananlysis.get_bin_trace(signal1,bin_window=bin_window)
signal2_bin=Ananlysis.get_bin_trace(signal2,bin_window=bin_window)
fig, (ax0, ax1) = plt.subplots(nrows=2)
ax0=Ananlysis.plot_trace(signal1_bin,ax0, fs=99.384/2, label="signal1 Signal Binned 50Hz",color='r')
ax1=Ananlysis.plot_trace(signal2_bin,ax1, fs=99.384/2, label="signal2 Signal Binned 50Hz", color='g')
fig.tight_layout()
#%%
Red,Green,Signal = getSignalTrace (filename, traceType='TimeDiv',HighFreqRemoval=False,getBinTrace=False)
#%%
# Plot the spectrogram
fig, ax = plt.subplots(figsize=(8, 2))
powerSpectrum, freqenciesFound, time, imageAxis = ax.specgram(signal2_bin, Fs=fs/200,NFFT=1024, detrend='linear',vmin=-130)
ax.set_xlabel('Time (Second)')
ax.set_ylabel('Frequency')
ax.set_ylim([0, 250])
fig.colorbar(imageAxis,ax=ax)
#%%
Ananlysis.PSD_plot (Signal,fs=9938.4/200,method="welch",color='tab:blue',linewidth=1)
fig=Ananlysis.plot_PSD_bands (Signal,fs=9938.4)
#%%
fig=Ananlysis.plot_PSD_bands (trace_binned,fs=9938.4/20)
#%% Low pass filter
'''Get trend and detrend'''
# trace_trend=Ananlysis.butter_filter(trace_clean, btype='low', cutoff=10, fs=9938.4, order=5)
# trace_detrend = Ananlysis.get_detrend(trace_binned)
#%%
'''USE FASTICE method'''
#Red,Green,signal1, signal2 = FreqShift_getICA (trace_clean,fc_g=1000,fc_r=2000,fs=9938.4)
#%%
'''PHOTOMETRY DATA ANALYSIS'''
dpath= "C:/SPAD/SPADData/20220616"
filename=Ananlysis.Set_filename (dpath,csv_filename="1516995_cont-2022-06-16-145825.csv")
Green,Red=Ananlysis.Read_trace (filename,mode="photometry")
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Green,ax, fs=130, label="GCamp6 Raw")
Gcamp=Ananlysis.butter_filter(Green, btype='low', cutoff=10, fs=130, order=5)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Gcamp,ax, fs=130, label="GCamp6 10Hz lowpass")
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Red,ax, fs=130, label="Isospestic Raw", color='m')
Iso=Ananlysis.butter_filter(Red, btype='low', cutoff=10, fs=130, order=5)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Iso,ax, fs=130, label="Isospestic 10Hz lowpass", color='m')
#%%
sig=Ananlysis.getSignal_subtract(Red,Green,fs=130)
sig=Ananlysis.butter_filter(sig, btype='low', cutoff=20, fs=130, order=5)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(sig,ax, fs=130, label="Isospestic")
#%%
Signal=Ananlysis.getSignal_subtract(Red,Green,fs=130)
#%%
signal1,signal2=Ananlysis.getICA (Red,Green)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(signal1,ax, label="Signal1")
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(signal2,ax, label="Signal2")
#%%
Signal=Ananlysis.butter_filter(Signal, btype='low', cutoff=20, fs=130, order=10)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Signal,ax, fs=130, label="trace")
| MattNolanLab/SPAD_in_vivo | SPAD_Python/mainAnalysis.py | mainAnalysis.py | py | 8,561 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "traceAnalysis.Read_trace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "traceAnalysis.butter_filter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "traceAnalysis.get_bin_trace",
"line_number": 20,
"usage_type": "call"
},
{
"... |
9495049897 | ### Add fixed time effects and controls for infection levels and national lockdown
# Initial imports
import pandas as pd
import statsmodels.api as sm
import numpy as np
from scipy import stats
import statsmodels.formula.api as smf
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
#Import household control data
data = pd.read_csv('household_regression_data_weeks.csv')
# Calculate the national lockdown variable for each week
lockdown_weekly = data.groupby('Week')['National_lockdown'].mean().reset_index()
#Import the y_weekly data
y_weekly = pd.read_csv('y_weekly_inf.csv')
#Merge on the national lockdown control and save the file
y_weekly = y_weekly.merge(lockdown_weekly, on='Week')
y_weekly.to_csv('y_weekly_full.csv')
# Fit the OLS regression model
model = smf.ols(formula='Y ~ Treatment + Week + Student_infection_lag + National_lockdown', data=y_weekly).fit()
print(model.summary())
# Plot the residuals
sns.residplot(x=model.predict(), y=y_weekly['Y'], lowess=True, line_kws={'color': 'red'})
plt.title('Residual Plot')
plt.xlabel('Predicted Values')
plt.ylabel('Residuals')
plt.show()
# Plot the real data and model over time
plt.plot(y_weekly['Week'], y_weekly['Y'], label='Real Data')
plt.plot(y_weekly['Week'], model.predict(), label='Model')
plt.axvline(x=10, linestyle='--', color='black')
plt.title('Real Data vs. Model Over Time')
plt.xlabel('Week of Study')
plt.ylabel('% Participation')
plt.ylim((0,1))
plt.legend()
plt.show()
| rg522/psych_owner | aggregate_model_4.py | aggregate_model_4.py | py | 1,484 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.ols",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "statsmode... |
18370027108 | from flask import Flask, request, render_template
import json
import pickle
import nltk
import string
import re
#from nltk.classify import NaiveBayesClassifier
app = Flask(__name__)
#preprocess the text
def preprocess(sentence):
nltk.download('stopwords')
nltk.download('punkt')
def build_bow_features(words):
return {word:True for word in words}
sentence = sentence.lower()
sentence = sentence.replace('\n','')
useless = nltk.corpus.stopwords.words("english") + list(string.punctuation)
wordlist = [word for word in nltk.word_tokenize(sentence) if word not in useless]
stemmed_words = [nltk.stem.SnowballStemmer('english').stem(word) for word in wordlist]
Bow = (build_bow_features(stemmed_words))
print(Bow)
return Bow
#load the trained model and do prediction
def predict (txt):
prediction = model.classify(txt)
return prediction
#return the prediction
def submit_txt(txt):
txt = preprocess(txt)
status = predict(txt)
if status==4 :
return 'Positive'
if status==0 :
return 'Negative'
return 'FAIL'
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
details = request.form
if details['form_type'] == 'submit_txt':
return submit_txt(details['txt'])
return render_template('interface.html')
if __name__ == '__main__':
model = pickle.load(open('SentimentAnalysisModel2.pkl', 'rb'))
app.run(host='0.0.0.0')
| AnasE17/SentimentAnalysis | app.py | app.py | py | 1,434 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",... |
4157086617 | import os
import glob
import imageio.v2 as imageio
from wand.image import Image
import PySimpleGUI as sg
from moviepy.editor import ImageSequenceClip
# Function to create an MP4 movie from images.
def create_mp4(input_folder, output_path, fps):
# Get a sorted list of image paths.
image_paths = sorted(
[
os.path.join(input_folder, file)
for file in os.listdir(input_folder)
if file.lower().endswith((".png", ".jpg", ".jpeg"))
]
)
# Read in images.
images = [imageio.imread(path) for path in image_paths]
if images:
# Create and write video file.
clip = ImageSequenceClip(images, fps=fps)
clip.write_videofile(output_path, codec="mpeg4")
# Function to create a GIF from images.
def create_gif(input_folder, output_path, duration, loop):
# Ensure output path ends with .gif.
if not output_path.lower().endswith(".gif"):
output_path += ".gif"
# Get a sorted list of image paths.
image_paths = sorted(
[
os.path.join(input_folder, file)
for file in os.listdir(input_folder)
if file.lower().endswith((".png", ".jpg", ".jpeg", ".tiff", ".exr"))
]
)
# Read in images.
images = [imageio.imread(path) for path in image_paths]
if images:
# Create and write GIF file.
imageio.mimsave(output_path, images, duration=duration, loop=loop)
# Function to process images with various effects.
def process_images(
input_folder,
output_folder,
dither,
num_colors,
pixelate,
pixelate_factor,
resize,
width,
height,
rotate,
angle,
blur,
radius,
mirror,
):
processing_done = False
for img_path in glob.glob(os.path.join(input_folder, "*")):
if img_path.lower().endswith((".png", ".jpg", ".jpeg", ".tiff", ".exr")):
# Apply requested image processing operations.
with Image(filename=img_path) as img:
if dither:
img.quantize(number_colors=int(num_colors), dither="riemersma")
processing_done = True
if pixelate:
img.resize(
int(img.width // pixelate_factor),
int(img.height // pixelate_factor),
)
img.resize(
img.width * pixelate_factor, img.height * pixelate_factor
)
processing_done = True
if resize:
img.resize(width, height)
processing_done = True
if rotate:
img.rotate(angle)
processing_done = True
if blur:
img.gaussian_blur(radius=radius)
processing_done = True
if mirror:
img.flop()
processing_done = True
img.save(
filename=os.path.join(output_folder, os.path.basename(img_path))
)
return processing_done
# Set up PySimpleGUI.
sg.theme("DarkBlue")
# Define the layout of the GUI.
layout = [
[
sg.Text(
"Process Images bakes input folder images to output folder with selected effects"
)
],
[
sg.Text("Input Folder", size=(15, 1)),
sg.InputText(key="-IN-"),
sg.FolderBrowse(),
],
[
sg.Text("Output Folder", size=(15, 1)),
sg.InputText(key="-OUT-"),
sg.FolderBrowse(),
],
[
sg.Checkbox("Dither", key="-DITHER-"),
sg.InputText(key="-NUM_COLORS-", default_text="256"),
],
[
sg.Checkbox("Pixelate", key="-PIXELATE-"),
sg.InputText(key="-PIXELATE_FACTOR-", default_text="1"),
],
[
sg.Checkbox("Resize", key="-RESIZE-"),
sg.InputText(key="-WIDTH-", default_text="512"),
sg.InputText(key="-HEIGHT-", default_text="512"),
],
[
sg.Checkbox("Rotate", key="-ROTATE-"),
sg.InputText(key="-ANGLE-", default_text="90"),
],
[sg.Checkbox("Blur", key="-BLUR-"), sg.InputText(key="-RADIUS-", default_text="0")],
[sg.Checkbox("Mirror", key="-MIRROR-", default=False)],
[
sg.Text("MP4 output path"),
sg.InputText(key="-MP4-"),
sg.FileSaveAs(file_types=(("MP4 Files", "*.mp4"),)),
],
[sg.Text("MP4 FPS"), sg.InputText(key="-FPS-", default_text="24")],
[
sg.Text("GIF output path"),
sg.InputText(key="-GIF-"),
sg.FileSaveAs(file_types=(("GIF Files", "*.gif"),)),
],
[sg.Text("GIF delay time in ms"), sg.InputText(key="-DURATION-", default_text="5")],
[sg.Text("GIF loop count"), sg.InputText(key="-LOOP-", default_text="0")],
[
sg.Button("Process Images"),
sg.Button("Create MP4"),
sg.Button("Create GIF"),
sg.Button("Exit"),
],
]
# Define icon path.
icon_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data_set_icon_246682.ico"
)
# Create the GUI window.
window = sg.Window("ImageChef", layout, background_color="", icon=icon_path)
# Main event loop.
while True:
event, values = window.read()
if event in (None, "Exit"):
break
elif event == "Process Images":
try:
pixelate_factor = int(values["-PIXELATE_FACTOR-"])
width = int(values["-WIDTH-"])
height = int(values["-HEIGHT-"])
angle = float(values["-ANGLE-"])
radius = float(values["-RADIUS-"])
if not values["-IN-"] or not os.path.isdir(values["-IN-"]):
raise ValueError("Invalid or non-existent input folder")
if not values["-OUT-"]:
sg.popup(
"No output folder specified, processed images will be saved in the input folder."
)
except ValueError as e:
sg.popup(f"Invalid input: {e}")
continue
processing_done = process_images(
values["-IN-"],
values["-OUT-"],
values["-DITHER-"],
values["-NUM_COLORS-"],
values["-PIXELATE-"],
pixelate_factor,
values["-RESIZE-"],
width,
height,
values["-ROTATE-"],
angle,
values["-BLUR-"],
radius,
values["-MIRROR-"],
)
if processing_done:
sg.popup("Processing Done")
else:
sg.popup("No Processing Done")
elif event == "Create MP4":
try:
fps = int(values["-FPS-"])
if not values["-IN-"] or not os.path.isdir(values["-IN-"]):
raise ValueError("Invalid or non-existent input folder")
if not values["-MP4-"]:
raise ValueError("Output path for MP4 is empty")
create_mp4(values["-IN-"], values["-MP4-"], fps)
except ValueError as e:
sg.popup(f"Invalid input for MP4 creation: {e}")
elif event == "Create GIF":
try:
duration = float(values["-DURATION-"])
loop = int(values["-LOOP-"])
if not values["-OUT-"] or not os.path.isdir(values["-OUT-"]):
raise ValueError("Invalid or non-existent output folder")
if not values["-GIF-"]:
raise ValueError("Output path for GIF is empty")
create_gif(values["-OUT-"], values["-GIF-"], duration, loop)
except ValueError as e:
sg.popup(f"Invalid input for GIF creation: {e}")
window.close()
| avyaktam/ImageChef | ImageChef.py | ImageChef.py | py | 7,866 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "imageio.v2.imread",
"line_nu... |
37712754399 | from django.shortcuts import render
from custom_model_field_app.forms import PersonForm
# Create your views here.
def customview(request):
form = PersonForm()
if request.method == 'POST':
if form.is_valid():
form.save()
return render(request,'custom.html',{'form':form}) | m17pratiksha/django_models | models/custom_model_field_app/views.py | views.py | py | 323 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "custom_model_field_app.forms.PersonForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
}
] |
74853194983 | from langchain.chat_models import ChatVertexAI
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.schema import HumanMessage, SystemMessage
chat = ChatVertexAI()
messages = [
SystemMessage(content="You are a helpful assistant that answer questions."),
HumanMessage(content="Why is the sky blue?"),
]
print(chat(messages))
template = """
You are a helpful assistant that answer questions.
"""
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
# get a chat completion from the formatted messages
resp = chat(chat_prompt.format_prompt(text="why is the sky blue.").to_messages())
print(resp)
| GoogleCloudPlatform/solutions-genai-llm-workshop | LAB001-2-ChatModel/0-run.py | 0-run.py | py | 927 | python | en | code | 55 | github-code | 36 | [
{
"api_name": "langchain.chat_models.ChatVertexAI",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "langchain.schema.SystemMessage",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "langchain.schema.HumanMessage",
"line_number": 13,
"usage_type": "call"
... |
42779512033 | from fastexcel import read_excel
from openpyxl import load_workbook
from xlrd import open_workbook
def pyxl_read(test_file_path: str):
wb = load_workbook(test_file_path, read_only=True, keep_links=False, data_only=True)
for ws in wb:
rows = ws.iter_rows()
rows = ws.values
for row in rows:
for value in row:
value
def xlrd_read(test_file_path: str):
wb = open_workbook(test_file_path)
for ws in wb.sheets():
for idx in range(ws.nrows):
for value in ws.row_values(idx):
value
def fastexcel_read(test_file_path: str):
reader = read_excel(test_file_path)
for sheet_name in reader.sheet_names:
sheet = reader.load_sheet_by_name(sheet_name)
sheet.to_arrow()
| ToucanToco/fastexcel | python/tests/benchmarks/readers.py | readers.py | py | 787 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "xlrd.open_workbook",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "fastexcel.read_excel",
"line_number": 25,
"usage_type": "call"
}
] |
28281049495 | import argparse
import pathlib
import itertools
import sys
import urllib
import docker
import tqdm
from compose import config
version = "0.8.0"
def _resolve_name(args, service):
if args.use_service_image_name_as_filename:
return urllib.parse.quote(service["image"], safe="")
return service["name"]
def save(args, client, service, print):
image = service["image"]
real_images = [i for i in client.images.list() if image in i.tags]
if not real_images:
print("{}: missed (pull, build or specify precisely image name)".format(image))
sys.exit(1)
if len(real_images) > 1:
names = ", ".join(set(itertools.chain.from_iterable(i.tags for i in real_images)))
print("{}: specify image name more precisely (candidates: {})".format(image, names))
sys.exit(1)
path = args.output / "{}.tar".format(_resolve_name(args, service))
if path.exists() and not args.overwrite:
print("{} skip ({} already exists)".format(image, path))
return
print("{} saving...".format(image))
args.output.mkdir(parents=True, exist_ok=True)
with path.open("wb") as f:
for chunk in real_images[0].save():
f.write(chunk)
def load(args, client, service, print):
print("{} loading...".format(service["image"]))
path = args.input / "{}.tar".format(_resolve_name(args, service))
with path.open("rb") as f:
i, *_ = client.images.load(f)
i.tag(service["image"])
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--version", default=False, action="store_true", help="show version")
parser.add_argument("--timeout", default=60, type=int, help="docker connection timeout [default: %(default)s]")
parser.add_argument("--use-service-image-name-as-filename", default=False, action="store_true",
help="Support legacy naming behavior")
parser.add_argument("-f", "--file", default=None, type=pathlib.Path,
help="specify an alternate compose file")
sub_commands = parser.add_subparsers(dest="command")
sub_commands.required = True
p = sub_commands.add_parser("save")
p.set_defaults(function=save)
p.add_argument("-o", "--output", type=pathlib.Path, default=".",
help="output directory [default: %(default)s]")
p.add_argument("--overwrite", action="store_true", default=False,
help="overwrite if exist [default: %(default)s]")
p = sub_commands.add_parser("load")
p.set_defaults(function=load)
p.add_argument("-i", "--input", type=pathlib.Path, default=".",
help="input directory [default: %(default)s]")
return parser.parse_args()
def gen_services(path):
parent = str(path.parent)
env = config.environment.Environment.from_env_file(parent)
details = config.find(parent, [path.name], env)
resolved = config.load(details)
for s in resolved.services:
if "image" not in s:
raise RuntimeError("Service {!r} have no 'image' field".format(s["name"]))
yield s
def main():
args = parse_args()
if args.version:
print(version)
return
if args.file is None:
files = ["docker-compose.yml", "docker-compose.yaml"]
else:
files = [args.file]
for file in files:
path = pathlib.Path(file)
if not path.exists():
continue
path = path.resolve()
services = list(gen_services(path))
break
else:
raise RuntimeError("Files does not exists {!r}".format(files))
client = docker.from_env(timeout=args.timeout)
viewed = set()
with tqdm.tqdm(total=len(services)) as pbar:
services.sort(key=lambda s: s["name"])
for service in services:
if service["image"] not in viewed:
args.function(args, client, service, print=pbar.write)
viewed.add(service["image"])
pbar.update(1)
| pohmelie/docker-compose-transfer | docker_compose_transfer/__init__.py | __init__.py | py | 3,985 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "urllib.parse.quote",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_it... |
19542054640 | import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Change default path
# sys.path.append('PROGMOD_')
# Function for formatting file to array of specific variables
# data = txt file
# sh_index = state history index, i.e. controller or environment
# var_index = variable index, i.e. timestamp, joint_angle etc...
def txt_to_variable_array (data, sh_index, var_index):
sh = data.split("|")[sh_index]
vars = sh.split("/")[var_index]
seperated = vars.split("&")
# Remove blank spaces
index_to_remove = []
for i, e in enumerate(seperated):
if (e == ""):
index_to_remove.append(i)
for e in reversed(index_to_remove):
seperated.pop(e)
# Convert from strings to float
in_numbers = [float(numerical_string) for numerical_string in seperated]
return (in_numbers, seperated, vars)
def plotxy (x, y, y2, xlab, ylab, xunit, yunit):
# PLOT XY DATA
plt.plot(x, y, "ro-", color="blue", label="Original")
plt.plot(x, y2, "ro-", color="red", label="Improved")
plt.xlabel(xlab)
plt.ylabel(ylab)
# SET UNITS
plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter(f'%.1f {xunit}'))
plt.gca().yaxis.set_major_formatter(ticker.FormatStrFormatter(f'%.1f {yunit}'))
plt.grid()
plt.legend()
plt.show()
# Main
def main():
# Changing default matplotlib font: https://jonathansoma.com/lede/data-studio/matplotlib/changing-fonts-in-matplotlib/
matplotlib.rcParams['font.serif'] = "Palatino Linotype" # Change default serif font
matplotlib.rcParams['font.family'] = "serif" # Set default family to serif
x_num = [2, 5, 10, 15, 17, 20, 22, 25, 30, 32]
y_num_original = [3 ,24, 99, 224, 288, 399, 483, 624, 899, 1023]
y_num_improved = [4, 13, 28, 43, 49, 58, 64, 73, 88, 94]
plotxy(x_num, y_num_original, y_num_improved, "No. of processes", "No. of messages", "", "")
if __name__ == "__main__":
main() | Andreas691667/P1LeaderElection_Group2 | UML & Graphs/plot.py | plot.py | py | 2,036 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matpl... |
35398321598 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from pants.backend.core.tasks.check_exclusives import ExclusivesMapping
from pants.backend.jvm.tasks.jvm_task import JvmTask
from pants.base.exceptions import TaskError
from pants.util.dirutil import safe_mkdtemp, safe_rmtree
from pants_test.task_test_base import TaskTestBase
class DummyJvmTask(JvmTask):
def execute(self):
pass
class JvmTaskTest(TaskTestBase):
"""Test some base functionality in JvmTask."""
@classmethod
def task_type(cls):
return DummyJvmTask
def setUp(self):
super(JvmTaskTest, self).setUp()
self.workdir = safe_mkdtemp()
self.t1 = self.make_target('t1', exclusives={'foo': 'a'})
self.t2 = self.make_target('t2', exclusives={'foo': 'a'})
self.t3 = self.make_target('t3', exclusives={'foo': 'b'})
# Force exclusive propagation on the targets.
self.t1.get_all_exclusives()
self.t2.get_all_exclusives()
self.t3.get_all_exclusives()
context = self.context(target_roots=[self.t1, self.t2, self.t3])
# Create the exclusives mapping.
exclusives_mapping = ExclusivesMapping(context)
exclusives_mapping.add_conflict('foo', ['a', 'b'])
exclusives_mapping._populate_target_maps(context.targets())
context.products.safe_create_data('exclusives_groups', lambda: exclusives_mapping)
self.task = self.create_task(context, self.workdir)
def tearDown(self):
super(JvmTaskTest, self).tearDown()
safe_rmtree(self.workdir)
def test_get_base_classpath_for_compatible_targets(self):
self.task.get_base_classpath_for_compatible_targets([self.t1, self.t2])
def test_get_base_classpath_for_incompatible_targets(self):
with self.assertRaises(TaskError):
self.task.get_base_classpath_for_compatible_targets([self.t1, self.t3])
| fakeNetflix/square-repo-pants | tests/python/pants_test/tasks/test_jvm_task.py | test_jvm_task.py | py | 1,897 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pants.backend.jvm.tasks.jvm_task.JvmTask",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pants_test.task_test_base.TaskTestBase",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pants.util.dirutil.safe_mkdtemp",
"line_number": 26,
"usa... |
26469668124 | from ...key import Address
from ...common import Int, concatBytes, _hint
from ...hint import MNFT_COLLECTION_POLICY, MNFT_COLLECTION_REGISTER_FORM, MNFT_MINT_FORM, MNFT_NFT_ID, MNFT_SIGNER, MNFT_SIGNERS
class CollectionRegisterForm:
def __init__(self, target, symbol, name, royalty, uri, whites):
assert royalty >= 0 and royalty < 100, 'Invalid royalty; CollectionRegisterForm'
self.hint = _hint(MNFT_COLLECTION_REGISTER_FORM)
self.target = Address(target)
self.symbol = symbol
self.name = name
self.royalty = Int(royalty)
self.uri = uri
self.whites = []
for w in whites:
self.whites.append(Address(w))
def bytes(self):
bTarget = self.target.bytes()
bSymbol = self.symbol.encode()
bName = self.name.encode()
bRoyalty = self.royalty.bytes()
bUri = self.uri.encode()
_whites = bytearray()
for w in self.whites:
_whites += w.bytes()
bWhites = bytes(_whites)
return concatBytes(bTarget, bSymbol, bName, bRoyalty, bUri, bWhites)
def dict(self):
form = {}
form['_hint'] = self.hint.hint
form['target'] = self.target.address
form['symbol'] = self.symbol
form['name'] = self.name
form['royalty'] = self.royalty.value
form['uri'] = self.uri
whites = []
for w in self.whites:
whites.append(w.address)
form['whites'] = whites
return form
class CollectionPolicy:
def __init__(self, name, royalty, uri, whites):
assert royalty >= 0 and royalty < 100, 'Invalid royalty; CollectionPolicy'
self.hint = _hint(MNFT_COLLECTION_POLICY)
self.name = name
self.royalty = Int(royalty)
self.uri = uri
self.whites = []
for w in whites:
self.whites.append(Address(w))
def bytes(self):
bName = self.name.encode()
bRoyalty = self.royalty.bytes()
bUri = self.uri.encode()
_whites = bytearray()
for w in self.whites:
_whites += w.bytes()
bWhites = bytes(_whites)
return concatBytes(bName, bRoyalty, bUri, bWhites)
def dict(self):
policy = {}
policy['_hint'] = self.hint.hint
policy['name'] = self.name
policy['royalty'] = self.royalty.value
policy['uri'] = self.uri
whites = []
for w in self.whites:
whites.append(w.address)
policy['whites'] = whites
return policy
class NFTSigner:
def __init__(self, account, share, signed):
assert share >= 0 and share <= 100, 'Invalid share; NFTSigner'
self.hint = _hint(MNFT_SIGNER)
self.account = Address(account)
self.share = Int(share)
self.signed = signed
def bytes(self):
bAccount = self.account.bytes()
bShare = self.share.bytes()
bSigned = bytes([0])
if self.signed:
bSigned = bytes([1])
return concatBytes(bAccount, bShare, bSigned)
def dict(self):
signer = {}
signer['_hint'] = self.hint.hint
signer['account'] = self.account.address
signer['share'] = self.share.value
signer['signed'] = self.signed
return signer
class NFTSigners:
def __init__(self, total, signers):
assert total >= 0 and total <= 100, 'Invalid total share; NFTSigners'
self.hint = _hint(MNFT_SIGNERS)
self.total = Int(total)
self.signers = signers
def bytes(self):
bTotal = self.total.bytes()
_signers = bytearray()
for s in self.signers:
_signers += s.bytes()
bSigners = _signers
return concatBytes(bTotal, bSigners)
def dict(self):
signers = {}
signers['_hint'] = self.hint.hint
signers['total'] = self.total.value
_signers = []
for s in self.signers:
_signers.append(s.dict())
signers['signers'] = _signers
return signers
class MintForm:
def __init__(self, hash, uri, creators, copyrighters):
self.hint = _hint(MNFT_MINT_FORM)
self.hash = hash
self.uri = uri
self.creators = creators
self.copyrighters = copyrighters
def bytes(self):
bHash = self.hash.encode()
bUri = self.uri.encode()
bCreators = self.creators.bytes()
bCopyrighters = self.copyrighters.bytes()
return concatBytes(bHash, bUri, bCreators, bCopyrighters)
def dict(self):
form = {}
form['_hint'] = self.hint.hint
form['hash'] = self.hash
form['uri'] = self.uri
form['creators'] = self.creators.dict()
form['copyrighters'] = self.copyrighters.dict()
return form
class NFTID:
def __init__(self, collection, idx):
assert idx.value > 0, 'idx must be over zero; NFTID'
self.hint = _hint(MNFT_NFT_ID)
self.collection = collection
self.idx = idx
def bytes(self):
bCollection = self.collection.encode()
bIdx = self.idx.bytes()
return concatBytes(bCollection, bIdx)
def dict(self):
id = {}
id['_hint'] = self.hint.hint
id['collection'] = self.collection
id['idx'] = self.idx.value
return id
| ProtoconNet/mitum-py-util | src/mitumc/operation/nft/base.py | base.py | py | 5,368 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "common._hint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "hint.MNFT_COLLECTION_REGISTER_FORM",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "key.Address",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "common.... |
73885161703 | from PyQt5 import QtCore
from PyQt5.QtCore import QObject, QThreadPool, pyqtSignal
from PyQt5.QtWidgets import QWidget, QScrollArea
from cvstudio.util import GUIUtilities
from cvstudio.view.widgets import ImageButton
from cvstudio.view.widgets.loading_dialog import QLoadingDialog
from cvstudio.view.widgets.response_grid import ResponseGridLayout, GridCard
from cvstudio.view.wizard import ModelWizard
class ModelsGridWidget(QWidget, QObject):
new_item_action = pyqtSignal()
def __init__(self, parent=None):
super(ModelsGridWidget, self).__init__(parent)
self.grid_layout = ResponseGridLayout()
self.grid_layout.setAlignment(QtCore.Qt.AlignTop)
self.grid_layout.cols = 8
self.setLayout(self.grid_layout)
self._entries = None
def build_new_button(self):
new_item_widget: GridCard = GridCard(with_actions=False, with_title=False)
btn_new_item = ImageButton(GUIUtilities.get_icon("new_folder.png"))
btn_new_item.clicked.connect(lambda: self.new_item_action.emit())
new_item_widget.body = btn_new_item
return new_item_widget
def bind(self):
cards_list = []
new_item_button = self.build_new_button()
cards_list.append(new_item_button)
self.grid_layout.widgets = cards_list
super(ModelsGridWidget, self).update()
class ModelsTabWidget(QScrollArea):
def __init__(self, parent=None):
super(ModelsTabWidget, self).__init__(parent)
self.setCursor(QtCore.Qt.PointingHandCursor)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.data_grid = ModelsGridWidget()
self.data_grid.new_item_action.connect(self.data_grid_new_item_action_slot)
self.setWidget(self.data_grid)
self.setWidgetResizable(True)
self._thread_pool = QThreadPool()
self._loading_dialog = QLoadingDialog()
self.load()
def data_grid_new_item_action_slot(self):
new_model_wizard = ModelWizard()
new_model_wizard.exec_()
def load(self):
self.data_grid.bind()
| haruiz/CvStudio | cvstudio/view/widgets/tab_models.py | tab_models.py | py | 2,168 | python | en | code | 34 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QObject",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "... |
2028059224 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 24 06:56:45 2018
@author: Javier Alejandro Acevedo Barroso
"""
import numpy as np
import matplotlib.pyplot as plt
x_obs = np.array([-2.0,1.3,0.4,5.0,0.1, -4.7, 3.0, -3.5,-1.1])
y_obs = np.array([ -1.931, 2.38, 1.88, -24.22, 3.31, -21.9, -5.18, -12.23, 0.822])
sigma_y_obs = ([ 2.63, 6.23, -1.461, 1.376, -4.72, 1.313, -4.886, -1.091, 0.8054])
plt.errorbar(x_obs, y_obs, yerr=sigma_y_obs, fmt='o')
def model(x,a,b,c):
return a*x*x + b * x + c
def loglikelihood(x_obs, y_obs, sigma_y_obs, a, b, c):
d = y_obs - model(x_obs, a,b,c)
d = d/sigma_y_obs
d = -0.5 * np.sum(d**2)
return d
def logprior(a, b, c):
# p = -np.inf
# if a < 2 and a >-2 and b >-10 and b<10 and c > -10 and c < 10:
# p = 0.0
# return p
return 0
N= 50000
lista_a = [np.random.random()]
lista_b = [np.random.random()]
lista_c = [np.random.random()]
logposterior = [loglikelihood(x_obs, y_obs, sigma_y_obs, lista_a[0], lista_b[0], lista_c[0]) + logprior(lista_a[0], lista_b[0], lista_c[0])]
sigma_delta_a = 0.2
sigma_delta_b = 1
sigma_delta_c = 1.0
for i in range(1,N):
propuesta_a = lista_a[i-1] + np.random.normal(loc=0.0, scale=sigma_delta_a)
propuesta_b = lista_b[i-1] + np.random.normal(loc=0.0, scale=sigma_delta_b)
propuesta_c = lista_b[i-1] + np.random.normal(loc=0.0, scale=sigma_delta_c)
logposterior_viejo = loglikelihood(x_obs, y_obs, sigma_y_obs, lista_a[i-1], lista_b[i-1], lista_c[i-1]) + logprior(lista_a[i-1], lista_b[i-1], lista_c[i-1])
logposterior_nuevo = loglikelihood(x_obs, y_obs, sigma_y_obs, propuesta_a, propuesta_b, propuesta_c) + logprior(propuesta_a, propuesta_b,propuesta_c)
r = min(1,np.exp(logposterior_nuevo-logposterior_viejo))
alpha = np.random.random()
if(alpha<r):
lista_a.append(propuesta_a)
lista_b.append(propuesta_b)
lista_c.append(propuesta_c)
logposterior.append(logposterior_nuevo)
else:
lista_a.append(lista_a[i-1])
lista_b.append(lista_b[i-1])
lista_c.append(lista_c[i-1])
logposterior.append(logposterior_viejo)
lista_a = np.array(lista_a)
lista_b = np.array(lista_b)
lista_c = np.array(lista_c)
logposterior = np.array(logposterior)
realx = np.linspace(-5,5,100)
#rta = (lista_a.argmax(),lista_b.argmax(),lista_c.argmax())
rta = (lista_a.mean(),lista_b.mean(),lista_c.mean())
plt.plot(realx, model(realx,rta[0],rta[1],rta[2]))
plt.title("a = %.3f b = %.3f c = %.3f" % (rta))
h2 = plt.figure()
#plt.plot(lista_a[100:], label='pendiente')
#plt.plot(lista_b[100:], label='intercepto')
#plt.plot(lista_c[100:], label='c')
#plt.plot(logposterior[100:], label='loglikelihood')
#plt.legend()
h2 = plt.figure()
plt.plot(lista_a, lista_b, alpha=0.5)
plt.scatter(lista_a, lista_b, alpha=0.4, c=np.exp(logposterior))
plt.colorbar()
h2 = plt.figure()
plt.plot(lista_b, lista_c, alpha=0.5)
plt.scatter(lista_a, lista_b, alpha=0.4, c=np.exp(logposterior))
plt.colorbar()
h2 = plt.figure()
plt.plot(lista_a, lista_c, alpha=0.5)
plt.scatter(lista_a, lista_b, alpha=0.4, c=np.exp(logposterior))
plt.colorbar() | ClarkGuilty/2018 | metodosComputacionales2/JavierAcevedo_Ejercicio6.py | JavierAcevedo_Ejercicio6.py | py | 3,157 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot"... |
20867944972 | import numpy as np
from utils import plot_output
# Defining parameters
N = 500
L = 5 # Topological charge number
A3 = np.zeros((N, N), dtype='complex_')
# Constructing SPP
x = np.array([i for i in range(N)])
y = np.array([i for i in range(N)])
X, Y = np.meshgrid(x, y)
theta = np.arctan2((X - N/2), (Y - N/2))
r = np.sqrt((X - N/2) * (X - N/2) + (Y - N/2) * (Y - N/2))
A1 = L * (theta + np.pi)
A2 = np.fmod(A1, 2*np.pi)
for p in range(N):
for q in range(N):
if np.fmod(A2[p, q], 2*np.pi) <= np.pi:
A3[p, q] = np.exp(1j * np.pi)
else:
A3[p, q] = np.exp(1j * 0)
A3[r > 30] = 0
plot_output(A3, N, angle=True, spiral=True) | Diana-Kapralova/Diffractive_Optics_on_Python | 3.Advanced_Diffractive_Optical_Elements/3.Exersise/Ex.3.3.py | Ex.3.3.py | py | 667 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number":... |
72639783783 | # https://pypi.org/project/RPi.bme280/
import smbus2
import bme280
import syslog
import threading
def logmsg(level, msg):
syslog.syslog(level, 'station: {}: {}'.format(threading.currentThread().getName(), msg))
def logdbg(msg):
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
logmsg(syslog.LOG_INFO, msg)
def logerr(msg):
logmsg(syslog.LOG_ERR, msg)
port = 1
address = 0x76
bus = smbus2.SMBus(port)
calibration_params = bme280.load_calibration_params(bus, address)
import numpy as np
import time as tm
import os
#with open('station.log', 'w') as f:
# pass
target_t_f = 85.
t_safe = 90.
from importlib import reload
cadence = 5 # seconds
last_time_s = None
last_error = None
while True:
data = bme280.sample(bus, address, calibration_params)
t_f = data.temperature * 9. / 5 + 32.
with open('status', 'r') as f:
status = f.readline()
output = '{:30} {:10s} {:5.2f} {:8.2f} {:5.2f}'.format(str(data.timestamp).replace(' ','_'), status, t_f, data.pressure, data.humidity)
with open('station.log', 'a') as f:
#f.write('{} error={:.3f} dt={:6.3f}s p={:.3f} i={:.3f} d={:.3f} pid={:.3f}'.format(output, error, dt, p, i, d, pid))
f.write('{}\n'.format(output))
time_s = tm.mktime(data.timestamp.timetuple())
if t_f > t_safe:
print('temperature {:.2f} > safe temperature {:.2f}; safe mode'.format(t_f, t_safe))
os.system('./safe')
#elif t_f < target_t_f:
# os.system('./on')
#else:
# os.system('./off')
else:
error = t_f - target_t_f
kp = 1
ki = kp / 300 # 5-minute integration time
kd = kp * 300 # 5-minute derivative time
p = kp * error
if last_time_s:
dt = time_s - last_time_s
i += ki * 0.5 * (error + last_error) * dt
d = kd * (error - last_error) / dt
else:
dt = 0
i = 0
d = 0
# now instead of comparing t_f to target_t_f, compare p+i+d to zero
last_time_s = time_s
last_error = error
pid = p + i + d
full_output = '{} error={:.3f} dt={:6.3f}s p={:.3f} i={:.3f} d={:.3f} pid={:.3f}'.format(output, error, dt, p, i, d, pid)
print(full_output)
loginf(full_output)
if pid > 0:
try:
os.system('./off')
except Exception as e:
logerr(str(e.args[0]))
with open('station.e', 'a') as ferr:
ferr.write('{} {}\n'.format(str(e), full_output))
elif pid < 0:
try:
os.system('./on')
except Exception as e:
logerr(str(e.args[0]))
with open('station.e', 'a') as ferr:
ferr.write('{} {}\n'.format(str(e), full_output))
else:
logerr('error {} not a number'.format(pid))
raise ValueError('error not a number')
tm.sleep(cadence)
| chkvch/fermentation_station | station.py | station.py | py | 2,558 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "syslog.syslog",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "threading.currentThread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "syslog.LOG_DEBUG",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "syslog.LOG_... |
4124871021 | import abc
import numpy as np
from nn.utils.label_mapper import LabelMapper
from datetime import datetime
class LearningAlgorithmTypes(object):
SGD = "stochastic gradient descent"
class LearningAlgorithmFactory(object):
@staticmethod
def create_learning_algorithm_from_type(learning_algorithm_type):
if learning_algorithm_type == LearningAlgorithmTypes.SGD:
return SGD()
else:
raise NotImplementedError("Requested learning algorithm type not yet implemented")
class AbstractLearningAlgorithm(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def learn(self, *args, **kwargs):
raise NotImplementedError()
class SGD(AbstractLearningAlgorithm):
def __init__(self):
super(SGD, self).__init__()
def learn(self, network, training_data_set, number_of_epochs, learning_rate, size_of_batch, **kwargs):
for epoch in range(number_of_epochs):
print("Epoch: " + str(epoch) + " Start time: " + str(datetime.now()))
np.random.shuffle(training_data_set.data_instances)
for batch in np.array_split(training_data_set.data_instances,
len(training_data_set.data_instances) / size_of_batch):
self.__update_weights_and_bias(network, batch, learning_rate)
def __update_weights_and_bias(self, network, batch, learning_rate):
number_of_training_instances = len(batch)
updated_biases = map(lambda layer_biases: np.zeros(layer_biases.shape), network.biases)
updated_weights = map(lambda layer_weights: np.zeros(layer_weights.shape), network.weights)
for data_instance in batch:
# Computing the partial derivatives of the function cost w.r.t. each weight and bias. These partial
# derivatives are the gradient's components.
delta_biases, delta_weights = self.__back_propagate(network, data_instance.features_values_vector,
data_instance.label)
# Accumulating the delta of weights and biases for each training sample of the batch in order to adjust
# the network's weights and biases in the opposite direction of the gradient.
updated_biases = [new_bias + delta for new_bias, delta in zip(updated_biases, delta_biases)]
updated_weights = [new_weight + delta for new_weight, delta in zip(updated_weights, delta_weights)]
# Updating the network's weights and biases in the opposite direction of the cost function's gradient
network.weights = [current_weight - (learning_rate / number_of_training_instances) * new_weight
for current_weight, new_weight in zip(network.weights, updated_weights)]
network.biases = [current_bias - (learning_rate / number_of_training_instances) * new_bias
for current_bias, new_bias in zip(network.biases, updated_biases)]
def __back_propagate(self, network, output_vector, expected_output_label):
last_layer = -1
updated_biases = map(lambda layer_biases: np.zeros(layer_biases.shape), network.biases)
updated_weights = map(lambda layer_weights: np.zeros(layer_weights.shape), network.weights)
output_vectors_by_layer = [output_vector.reshape(1, len(output_vector))]
input_vectors_by_layer = []
for bias, weights in zip(network.biases, network.weights):
next_layer_input = np.dot(output_vector, weights) + bias.T
input_vectors_by_layer.append(next_layer_input)
output_vector = network.neuron.compute(next_layer_input)
output_vectors_by_layer.append(output_vector)
delta = network.cost_computer.compute_cost_derivative(output_vector=output_vectors_by_layer[last_layer],
expected_output_vector=LabelMapper().map_label_to_vector(
expected_output_label)) * \
network.neuron.compute_derivative(input_vectors_by_layer[last_layer])
updated_biases[last_layer] = delta.T
updated_weights[last_layer] = np.dot(output_vectors_by_layer[last_layer - 1].T, delta)
for layer_index in xrange(2, network.number_of_layers):
z = input_vectors_by_layer[-layer_index]
sp = network.neuron.compute_derivative(z)
delta = np.dot(delta, network.weights[-layer_index + 1].T) * sp
updated_biases[-layer_index] = delta.T
updated_weights[-layer_index] = np.dot(output_vectors_by_layer[-layer_index - 1].T, delta)
return updated_biases, updated_weights
| ADozois/ML_Challenge | nn/models/learning/learning_algorithms.py | learning_algorithms.py | py | 4,743 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "abc.ABCMeta",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "abc.abstractmethod",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "dateti... |
26533716957 | '''
Fig1. Cascade Matrix heatmap
'''
import numpy as np
import matplotlib.pyplot as plt
# from numba import jit
import matplotlib.colors as colors
# @jit(nopython = True)
def random_cascade_matrix(mu_L, mu_U, sigma_L, sigma_U, gamma, N = 250):
J = np.zeros((N, N))
for i in range(N):
for j in range(i):
z1 = np.random.normal(0, 1)
z2 = np.random.normal(0, 1)
J[i, j] = mu_L/N + sigma_L*z1/np.sqrt(N)
J[j, i] = mu_U/N + sigma_U*(gamma*z1 + np.sqrt(1 - gamma**2)*z2)/np.sqrt(N)
return J
fig, axs = plt.subplots(1, 3, figsize = (10, 3))
plt.rc('text', usetex=True)
plt.rcParams.update({'font.size': 12})
# for display
def block_matrix(mu, sizes):
N = np.sum(sizes)
c = np.hstack((np.array([0]), np.cumsum(sizes)[:-1]))
J = np.zeros((N, N))
for i in range(np.size(sizes)):
for j in range(np.size(sizes)):
for u in range(sizes[i]):
for v in range(sizes[j]):
J[c[i] + u, c[j] + v] = mu[i, j]
return J
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
arr = np.linspace(0, 50, 100).reshape((10, 10))
cmap = plt.get_cmap('viridis')
new_cmap = truncate_colormap(cmap, 0.12, 0.98)
mu1 = np.random.uniform(0, 1, 16).reshape((4, 4))
sizes1 = np.array([5, 4, 7, 3])
mu_cascade = np.array([[1, 2, 2, 2], [0, 1, 2, 2], [0, 0, 1, 2], [0, 0, 0, 1]])
sizes_cascade = np.array([7, 5, 6, 4])
J_cascade = block_matrix(mu_cascade, sizes_cascade)
J1 = block_matrix(mu1, sizes1)
axs[0].matshow(J1, cmap = "viridis")
axs[1].matshow(J_cascade, cmap = new_cmap)
axs[2].matshow(random_cascade_matrix(-1, 1, 0.0, 0.0, 0.0, 500), cmap = new_cmap)
# print(random_cascade_matrix)
# for i in range(3):
# Turn off tick labels
axs[0].set_xticklabels([])
axs[0].set_xticks([])
axs[1].set_xticklabels([])
axs[1].set_xticks([])
axs[2].set_xticklabels([])
axs[2].set_xticks([])
axs[0].get_yaxis().set_visible(False)
axs[1].get_yaxis().set_visible(False)
axs[2].get_yaxis().set_visible(False)
axs[0].set_xlabel("Block Structured")
axs[1].set_xlabel("Cascade Model, Finite B")
axs[2].set_xlabel("Cascade Model, infinite B")
plt.tight_layout()
# panel labels
axs[0].annotate('(a)', xy=(0.05, 0.05), xycoords='axes fraction', zorder = 10, ma = 'center', bbox=dict(facecolor='white', alpha=0.6, boxstyle='round'))
axs[1].annotate('(b)', xy=(0.05, 0.05), xycoords='axes fraction', zorder = 10, ma = 'center', bbox=dict(facecolor='white', alpha=0.6, boxstyle='round'))
axs[2].annotate('(c)', xy=(0.05, 0.05), xycoords='axes fraction', zorder = 10, ma = 'center', bbox=dict(facecolor='white', alpha=0.6, boxstyle='round'))
fig.tight_layout()
plt.savefig("Interaction Matrix Cartoon.pdf") | LylePoley/Cascade-Model | Figures/Fig1.py | Fig1.py | py | 2,924 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal... |
15982937093 | import numpy as np
from matplotlib.pyplot import *
from scipy import interpolate
dat=np.loadtxt("/home/davidvartanyan/presupernova.dat")
rad=dat[:,2]
rho1=dat[:,4]
i=0
while rad[i]/10**9 < 1:
i+=1
xlim([rad[0],rad[i]])
#loglog(rad[0:i],rho1[0:i],'k')
npoints=1000
radmin=rad[0]
radmax=10**9
radius=np.linspace(0.1,
radmax,npoints)
dr=radius[1]-radius[0]
tck = interpolate.splrep(rad,rho1)
rho2= interpolate.splev(radius,tck,der=0)
#xlabel('radius [$cm$]')
#ylabel('Density [$g/cm^3$]')
#show()
ggrav = 6.67e-8
#######################################
# function definitions
def tov_RHS(x,rho,z):
# RHS function
rhs = np.zeros(2)
rhs[0] = z
rhs[1] = 4*np.pi*ggrav*rho- 2*z/x
return rhs
def tov_integrate_FE(rad,dr,rho,z,phi):
# Forward-Euler Integrator
new = np.zeros(2)
old = np.zeros(2)
old[0] = phi #dphi/dr-> phi
old[1] = z #dz/dr -> z
# forward Euler integrator
new = old + dr*tov_RHS(rad, rho, z)
# assign outputs
phinew = new[0]
znew = new[1]
return (phinew,znew)
#######################################
# set up variables
z1f = np.zeros(npoints)
phif = np.zeros(npoints)
# set up boundary values
z1f[0] = 0.0
phif[0] = 0.0
for n in range(npoints-1):
(phif[n+1],z1f[n+1]) = tov_integrate_FE(radius[n],
dr,rho2[n],z1f[n],phif[n])
dm=4*np.pi*radius**2*rho2*dr
M=np.sum(dm)
phiBC2=-ggrav*M/radius[npoints-1]
phiana=2./3*np.pi*ggrav*rho2*(radius**2-3*((10**9.)**2))
phifin=phif+phiBC2-phif[npoints-1]
#p1,=loglog(radius,-phifin)
#p2,=loglog(radius,-phiana)
xlabel('radius[cm]')
#ylabel('Phi')
#legend([p1,p2], ['Numerical Potential', 'Analytical Potential'])
ylabel('Error')
loglog(radius,np.abs((phiana-phifin)/phiana))
show()
#radmax2=np.asarray([10**9]*len(radius))
#origin=np.asarray([4.2118*10**20]*len(radius))
#plot(radius,phif+origin)
#print .67*np.pi*ggrav*rho2*(radius**2-3*radmax2**2)
#print -0.67*np.pi*ggrav*rho2*(radius**2-3*radmax2**2) | dvartany/ay190 | ws12/ws12.py | ws12.py | py | 2,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.loadtxt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.splrep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.interpolat... |
8868610859 | import logging
from aiogram import Dispatcher, types
from aiogram.dispatcher import FSMContext
import aiogram.utils.markdown as fmt
from aiogram.types.message import ContentType
from .. import userchoice
def check_none_name(name):
new_name = ''
if name is not None:
new_name = name
return new_name
async def starting_message(message: types.Message, state: FSMContext):
"""
Первая команда, она проверяет существование пользователя и заполняет его данные
:param message: сообщение
:param state: состояние
"""
await state.finish()
first_name = check_none_name(message.from_user.first_name)
last_name = check_none_name(message.from_user.last_name)
user = userchoice.UserChoice(message.from_user.id, first_name + ' ' + last_name)
user.check_name()
logging.info(f'Пользователь {message.from_user.first_name} {message.from_user.last_name} залогинился')
await message.answer("Привет! Этот бот сравнивает позволяет сравнить фамилии по "
"<a href='https://ru.wikipedia.org/wiki/"
"%D0%A0%D0%B5%D0%B9%D1%82%D0%B8%D0%BD%D0%B3_%D0%AD%D0%BB%D0%BE'>рейтингу Эло.</a> "
"Для перечня команд набери /help", parse_mode=types.ParseMode.HTML)
async def helping_message(message: types.Message):
"""
Сообщение выводит существующие команды
:param message: сообщение
"""
await message.answer(fmt.text("Я знаю следующие команды:", "/rate - выбрать более смешного из пары",
"/rating - показать список лидеров", sep='\n'))
async def wrong_command_message(message: types.Message):
"""
Сообщение реагирует на неправильные команды
:param message: сообщение
"""
logging.info(f'Пользователь {message.from_user.first_name} {message.from_user.last_name} пишет {message.text}')
await message.answer("Вы ввели неверную команду. Для того, чтобы узнать, "
"какие команды можно использовать, наберите /help")
def register_handlers_common(dp: Dispatcher):
"""
Регистрация основных сообщений в диспетчере
:param dp: диспетчер
"""
dp.register_message_handler(starting_message, commands="start", state="*")
dp.register_message_handler(helping_message, commands="help")
dp.register_message_handler(wrong_command_message, content_types=ContentType.ANY)
| KFeyn/naming_bot | app/handlers/common.py | common.py | py | 2,897 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.types.Message",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "aiogram.dispatcher.FSMContext",
"line_number": 18,
"usage_type": "name"
},
{
"api_name":... |
36289889662 | import os
import os.path
import shutil
import tarfile
import hashlib
import argparse
import fnmatch
import sys
STR_EMPTY = ''
STR_SLASH = '/'
STR_POINT = '.'
STR_TAB = '\t'
STR_EOL = '\n'
STR_CAT_EXT = '.cat'
STR_TAR_EXT = '.tar'
STR_GZ_EXT = '.tar.gz'
STR_BZ2_EXT = '.tar.bz2'
STR_DIR_LIST = 'DIR_LIST'
STR_DIR = 'DIR'
STR_DIR_END = 'DIR_END'
STR_FILE = 'FILE'
STR_FILE_END = 'FILE_END'
STR_DIR_LIST_END = 'DIR_LIST_END'
STR_HASH_LIST = 'HASH_LIST'
STR_HASH = 'HASH'
STR_HASH_LIST_END = 'HASH_LIST_END'
def calc_hash(path): # IOError
h = hashlib.sha256()
with open(path, 'rb') as f: # IOError
block = f.read(h.block_size)
while block:
h.update(block)
block = f.read(h.block_size)
return h.hexdigest()
class CatalogFormatError(Exception):
pass
class HashNameError(Exception):
pass
class FileInfo():
def __init__(self, is_dir):
self.marked = False # for 'include'
self.isDir = is_dir
self.hash = STR_EMPTY
self.size = -1
def hash_name(info): # HashNameError
if info.isDir or (info.hash == STR_EMPTY) or (info.size == -1):
raise HashNameError()
return info.hash + '.' + str(info.size)
class FileList(): # OSError, IOError, CatalogFormatError
def __init__(self):
self.dict = {}
def _get_dir_list(self, root_dir, rel_dir=STR_EMPTY): # OSError
if rel_dir == STR_EMPTY:
self.dict.clear()
current_dir = root_dir + rel_dir
current_dir_list = os.listdir(current_dir) # OSError
for f in current_dir_list:
full_path = current_dir + STR_SLASH + f
rel_path = rel_dir + STR_SLASH + f
if os.path.isdir(full_path): # OSError
path_info = FileInfo(True)
path_info.mtime = int(os.path.getmtime(full_path)) # OSError
self.dict[rel_path] = path_info
self._get_dir_list(root_dir, rel_path)
elif os.path.isfile(full_path): # OSError
path_info = FileInfo(False)
# read mtime, size and hash directly before file checking / archiving
self.dict[rel_path] = path_info
def read_dir_list(self, source_path):
try:
self._get_dir_list(source_path)
except IOError as e:
print('ERROR: Can not read: ' + e.filename)
return
def _unmark_all(self):
for key in self.dict:
self.dict[key].marked = False
# include only matched files/folders
# use for "find"
def include(self, pattern_list):
if (pattern_list is not None) and (len(pattern_list) > 0):
# unmark all records
self._unmark_all()
# mark included
for pattern in pattern_list:
for key in self.dict:
if fnmatch.fnmatch(key, pattern):
self.dict[key].marked = True
# remove not marked (not included)
key_list = list(self.dict.keys())
for key in key_list:
if not self.dict[key].marked:
del self.dict[key]
# include not only matched files/folders but also all parent folders for matched files/folders
# use for "create" and "restore"
def include_hierarchy(self, pattern_list):
if (pattern_list is not None) and (len(pattern_list) > 0):
# unmark all records
self._unmark_all()
# mark included
key_list = list(self.dict.keys())
for pattern in pattern_list:
for key in key_list:
if fnmatch.fnmatch(key, pattern):
self.dict[key].marked = True
# mark folders with marked files/folders
d = os.path.dirname(key)
while d != STR_SLASH:
self.dict[d].marked = True
d = os.path.dirname(d)
# remove not marked (not included)
key_list = list(self.dict.keys())
for key in key_list:
if not self.dict[key].marked:
del self.dict[key]
# check and if not exist all parent folders for files/folders in list
def fix_hierarchy(self):
key_list = list(self.dict.keys())
for key in key_list:
d = os.path.dirname(key)
while d != STR_SLASH:
if d not in key_list:
path_info = FileInfo(False)
path_info.marked = False # for 'include'
path_info.isDir = True
path_info.mtime = self.dict[key].mtime
self.dict[d] = path_info
d = os.path.dirname(d)
def exclude(self, pattern_list):
if (pattern_list is not None) and (len(pattern_list) > 0):
for pattern in pattern_list:
key_list = list(self.dict.keys())
for key in key_list:
if fnmatch.fnmatch(key, pattern):
del self.dict[key]
def save(self, file_object): # IOError
# file_object = open('file.name', mode='w', encoding='utf-8')
file_object.write(STR_DIR_LIST + STR_EOL)
key_list = list(self.dict.keys())
key_list.sort()
for key in key_list:
if self.dict[key].isDir:
file_object.write(STR_DIR + STR_EOL)
file_object.write(key + STR_EOL)
file_object.write(str(self.dict[key].mtime) + STR_EOL)
file_object.write(STR_DIR_END + STR_EOL)
else:
file_object.write(STR_FILE + STR_EOL)
file_object.write(key + STR_EOL)
file_object.write(str(self.dict[key].mtime) + STR_EOL)
file_object.write(str(self.dict[key].size) + STR_EOL)
file_object.write(self.dict[key].hash + STR_EOL)
file_object.write(STR_FILE_END + STR_EOL)
file_object.write(STR_DIR_LIST_END + STR_EOL)
def load(self, file_object): # IOError, CatalogFormatError
# file_object = open('file.name', mode='r', encoding='utf-8')
wait_list = 0
wait_dir_file = 1
wait_path = 2
wait_mtime = 3
wait_size = 4
wait_hash = 5
wait_dir_end = 6
wait_file_end = 7
self.dict.clear()
file_object.seek(0, os.SEEK_SET)
state = wait_list
info_is_dir = False
info_path = STR_EMPTY
info_mtime = -1
info_size = -1
info_hash = STR_EMPTY
for s in file_object:
line = s.strip()
if (state == wait_list) and (line == STR_DIR_LIST):
state = wait_dir_file
elif ((state == wait_dir_file) and
((line == STR_DIR) or (line == STR_FILE) or (line == STR_DIR_LIST_END))):
if line == STR_DIR:
info_is_dir = True
state = wait_path
elif line == STR_FILE:
info_is_dir = False
state = wait_path
elif line == STR_DIR_LIST_END:
return
elif state == wait_path:
info_path = line
state = wait_mtime
elif state == wait_mtime:
info_mtime = int(line)
if info_is_dir:
state = wait_dir_end
else:
state = wait_size
elif state == wait_size:
info_size = int(line)
state = wait_hash
elif state == wait_hash:
info_hash = line
state = wait_file_end
elif (state == wait_dir_end) and (line == STR_DIR_END):
self.dict[info_path] = FileInfo(True)
self.dict[info_path].mtime = info_mtime
info_is_dir = False
state = wait_dir_file
elif (state == wait_file_end) and (line == STR_FILE_END):
self.dict[info_path] = FileInfo(False)
self.dict[info_path].mtime = info_mtime
self.dict[info_path].size = info_size
self.dict[info_path].hash = info_hash
state = wait_dir_file
else:
raise CatalogFormatError() # CatalogFormatError
def load_file(self, file_name):
try:
file_object = open(file_name, mode='r', encoding='utf-8')
try:
self.load(file_object)
except IOError:
print('ERROR: Can not read reference catalogue file!')
return
except CatalogFormatError:
print('ERROR: Reference catalogue is damaged!')
return
finally:
file_object.close()
except IOError:
print('ERROR: Can not open reference catalogue file!')
# key = hash + u'.' + unicode(size)
# value = arch name
# FileList.dict[key].hashName
class HashList(): # IOError, CatalogFormatError
def __init__(self):
self.dict = {}
def save(self, file_object): # IOError
# file_object = open('file.name', mode='w', encoding='utf-8')
file_object.write(STR_HASH_LIST + STR_EOL)
key_list = list(self.dict.keys())
key_list.sort()
for key in key_list:
file_object.write(STR_HASH + STR_TAB + key + STR_TAB + self.dict[key] + STR_EOL)
file_object.write(STR_HASH_LIST_END + STR_EOL)
def load(self, file_object): # IOError, CatalogFormatError
# file_object = open('file.name', mode='r', encoding='utf-8')
wait_list = 0
wait_hash = 1
self.dict.clear()
file_object.seek(0, os.SEEK_SET)
state = wait_list
for s in file_object:
line = s.strip()
if (state == wait_list) and (line == STR_HASH_LIST):
state = wait_hash
elif state == wait_hash:
if line == STR_HASH_LIST_END:
return
else:
lst = line.split(STR_TAB)
if (len(lst) == 3) and (lst[0] == STR_HASH):
self.dict[lst[1]] = lst[2]
else:
raise CatalogFormatError()
def load_file(self, file_name):
try:
file_object = open(file_name, mode='r', encoding='utf-8')
try:
self.load(file_object)
except IOError:
print('ERROR: Can not read reference catalogue file!')
return
except CatalogFormatError:
print('ERROR: Reference catalogue is damaged!')
return
finally:
file_object.close()
except IOError:
print('ERROR: Can not open reference catalogue file!')
# not correct for unicode file names
class TarFileWriter: # OSError, IOError, tarfile.TarError
def __init__(self, name, max_part_size, arch_type='tar'):
self.TarName = name
self.PartNumber = 0
self.PartSize = 0
self.PartFile = None
self.Closed = True
self.MaxPartSize = (max_part_size // tarfile.RECORDSIZE) * tarfile.RECORDSIZE
self.Type = arch_type.lower()
if arch_type == 'tar':
self.Ext = STR_TAR_EXT
self.Mode = 'w:'
elif arch_type == 'gz':
self.Ext = STR_GZ_EXT
self.Mode = 'w:gz'
elif arch_type == 'bz2':
self.Ext = STR_BZ2_EXT
self.Mode = 'w:bz2'
else:
raise IOError()
def close(self): # IOError
if not self.Closed:
self.PartFile.close()
self.PartFile = None
self.Closed = True
def __new_part(self): # IOError
self.close()
self.PartNumber += 1
self.PartFile = tarfile.open(self.TarName + STR_POINT + str(self.PartNumber) + self.Ext, self.Mode)
self.PartSize = 0
self.Closed = False
def add(self, file_path, tar_name): # OSError, IOError, tarfile.TarError
if self.Closed:
self.__new_part()
# prepare file object
file_size = os.path.getsize(file_path) # OSError
file_tar_info = self.PartFile.gettarinfo(file_path) # tarfile.TarError
file_tar_info.name = tar_name
with open(file_path, 'rb') as file_object: # IOError
# copy file to tar
while (self.PartSize + file_size + 3*tarfile.BLOCKSIZE) > self.MaxPartSize:
file_size_to_save = self.MaxPartSize - self.PartSize - 3*tarfile.BLOCKSIZE
file_tar_info.size = file_size_to_save
self.PartFile.addfile(file_tar_info, file_object) # tarfile.TarError
self.PartSize = self.PartSize + tarfile.BLOCKSIZE + file_size_to_save
assert (self.PartSize + 2*tarfile.BLOCKSIZE) == self.MaxPartSize
self.__new_part()
file_size -= file_size_to_save
file_tar_info.size = file_size
self.PartFile.addfile(file_tar_info, file_object) # tarfile.TarError
# recalculate PartSize
self.PartSize = self.PartSize + tarfile.BLOCKSIZE + (file_size // tarfile.BLOCKSIZE) * tarfile.BLOCKSIZE
if (file_size % tarfile.BLOCKSIZE) > 0:
self.PartSize += tarfile.BLOCKSIZE
assert (self.PartSize + 2*tarfile.BLOCKSIZE) <= self.MaxPartSize
if (self.PartSize + 3*tarfile.BLOCKSIZE) >= self.MaxPartSize:
self.close()
# not correct for unicode file names
class TarFileReader: # KeyError, IOError, tarfile.TarError
def __init__(self, name):
self.TarName = name
self.PartNumber = 0
self.PartFile = None
self.Closed = True
if os.path.isfile(name + '.1' + STR_TAR_EXT):
self.Ext = STR_TAR_EXT
elif os.path.isfile(name + '.1' + STR_GZ_EXT):
self.Ext = STR_GZ_EXT
elif os.path.isfile(name + '.1' + STR_BZ2_EXT):
self.Ext = STR_BZ2_EXT
else:
raise IOError()
def close(self): # IOError
if not self.Closed:
self.PartFile.close()
self.PartFile = None
self.Closed = True
def __next_part(self): # IOError
self.close()
self.PartNumber += 1
self.PartFile = tarfile.open(self.TarName + STR_POINT + str(self.PartNumber) + self.Ext)
def extract(self, tar_name, file_path): # KeyError, IOError, tarfile.TarError
self.PartNumber = 0
# ищем первый том в котором есть такой файл
found = False
no_file = False
while not (found or no_file):
try:
self.__next_part()
file_tar_info = self.PartFile.getmember(tar_name)
found = True
except IOError:
no_file = True
except KeyError:
pass
if found:
with open(file_path, 'wb') as file_object: # IOError
while found:
# копируем в файл
tar_buffer = self.PartFile.extractfile(file_tar_info) # tarfile.TarError
file_size = file_tar_info.size
while file_size > 0:
if file_size > tarfile.BLOCKSIZE:
file_size_to_save = tarfile.BLOCKSIZE
else:
file_size_to_save = file_size
file_object.write(tar_buffer.read(tarfile.BLOCKSIZE)) # IOError, tarfile.TarError
file_size = file_size - file_size_to_save
tar_buffer.close() # tarfile.TarError
# проверяем в следующем томе
try:
self.__next_part()
file_tar_info = self.PartFile.getmember(tar_name) # tarfile.TarError
except IOError:
found = False
except KeyError:
found = False
else:
raise KeyError()
def sh_create(sh_args):
# check source
if not os.path.isdir(sh_args.source):
print('ERROR: Source not found!')
return
# check repository
if not os.path.isdir(sh_args.repository):
print('ERROR: Repository not found!')
return
# check if files with backup name exist
if os.path.isfile(sh_args.repository + STR_SLASH + sh_args.name + STR_CAT_EXT):
print('ERROR: Such archive already exists!')
return
# create empty reference and hash lists
reference_list = FileList()
hash_list = HashList()
# load reference and hash lists
if sh_args.reference is not None:
# check if reference file exists
ref_path = sh_args.repository + '/' + sh_args.reference + STR_CAT_EXT
if not os.path.isfile(ref_path):
print('ERROR: Reference not found!')
return
reference_list.load_file(ref_path)
hash_list.load_file(ref_path)
# create list of files/dirs in source destination
source_list = FileList()
source_list.read_dir_list(sh_args.source)
# include / exclude files / dirs
source_list.include_hierarchy(sh_args.include)
source_list.exclude(sh_args.exclude)
# compression
compr = 'tar'
if sh_args.compression is not None:
compr = sh_args.compression
# create TarFileWriter
writer = TarFileWriter(sh_args.repository + STR_SLASH + sh_args.name, sh_args.size, compr)
# check files and if new/changed add to archive
c_all = 0
c_new = 0
size_all = 0
size_new = 0
key_list = list(source_list.dict)
key_list.sort()
for file_name in key_list:
file_path = sh_args.source + file_name
if not source_list.dict[file_name].isDir:
ok = False
while not ok:
try:
# get date and size
source_list.dict[file_name].mtime = int(os.path.getmtime(file_path))
source_list.dict[file_name].size = os.path.getsize(file_path)
# check if such file is in reference
if (not sh_args.recalculate) and (file_name in reference_list.dict) and \
(not reference_list.dict[file_name].isDir) and \
(source_list.dict[file_name].mtime == reference_list.dict[file_name].mtime) and \
(source_list.dict[file_name].size == reference_list.dict[file_name].size):
source_list.dict[file_name].hash = reference_list.dict[file_name].hash
else:
# calculate hash
source_list.dict[file_name].hash = calc_hash(file_path)
# add file to archive
tar_name = hash_name(source_list.dict[file_name])
if tar_name not in hash_list.dict:
hash_list.dict[tar_name] = sh_args.name
writer.add(sh_args.source + file_name, tar_name)
c_new += 1
size_new = size_new + source_list.dict[file_name].size
size_all = size_all + source_list.dict[file_name].size
ok = True
except (OSError, IOError) as e:
print('ERROR: Can not read: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
writer.close()
return
elif answer == 'i':
del source_list.dict[file_name]
ok = True
except tarfile.TarError:
print('ERROR: Can not write files to archive!')
answer = input('Abort (a) / Retry (other): ')
if answer == 'a':
writer.close()
return
c_all += 1
if not sh_args.quiet:
sys.stdout.write("\rFiles (New/All): %s / %s, Size (New/All): %.02f Mb / %.02f Mb" % (
c_new, c_all, size_new/1024.0/1024.0, size_all/1024.0/1024.0))
sys.stdout.flush()
# close TarFileWriter
writer.close()
if not sh_args.quiet:
sys.stdout.write(STR_EOL)
sys.stdout.flush()
# save catalogue
try:
file_object = open(sh_args.repository + STR_SLASH + sh_args.name + STR_CAT_EXT,
mode='w', encoding='utf-8')
try:
source_list.save(file_object)
hash_list.save(file_object)
except IOError:
print('ERROR: Can not create catalogue file!')
return
finally:
file_object.close()
except IOError:
print('ERROR: Can not create catalogue file!')
def sh_find(sh_args):
# check repository
if not os.path.isdir(sh_args.repository):
print('ERROR: Repository not found!\n')
return
# get file list
cat_list = os.listdir(sh_args.repository)
cat_list.sort()
key_list = list(cat_list)
for key in key_list:
if not fnmatch.fnmatch(key, sh_args.name + STR_CAT_EXT):
del cat_list[cat_list.index(key)]
# check if something found
if len(cat_list) == 0:
print('ERROR: No catalogue found!\n')
return
# looking for patterns in all catalogues
for cat in cat_list:
# loading catalogue
file_list = FileList()
file_list.load_file(sh_args.repository + STR_SLASH + cat)
# include / exclude files / dirs
file_list.include(sh_args.include)
file_list.exclude(sh_args.exclude)
# looking for matching files and dirs
key_list = list(file_list.dict.keys())
key_list.sort()
for key in key_list:
print(cat + ': ' + key)
def sh_restore(sh_args):
# check repository
if not os.path.isdir(sh_args.repository):
print('ERROR: Repository not found!\n')
return
# check existence of catalogue file
if not os.path.isfile(sh_args.repository + STR_SLASH + sh_args.name + STR_CAT_EXT):
print('ERROR: Catalogue not found!\n')
return
# check destination existence
if not os.path.isdir(sh_args.destination):
print('ERROR: Destination not found!\n')
return
# read FileList and HashList from catalogue
source_list = FileList()
hash_list = HashList()
source_list.load_file(sh_args.repository + STR_SLASH + sh_args.name + STR_CAT_EXT)
hash_list.load_file(sh_args.repository + STR_SLASH + sh_args.name + STR_CAT_EXT)
# include / exclude files / dirs
source_list.fix_hierarchy()
source_list.include_hierarchy(sh_args.include)
source_list.exclude(sh_args.exclude)
# create not existing dirs and extract new or changed files
c_all = 0
c_new = 0
size_all = 0
size_new = 0
key_list = list(source_list.dict)
key_list.sort()
for file_name in key_list:
file_path = sh_args.destination + file_name
# make directory
if source_list.dict[file_name].isDir:
file_dir = file_path
else:
(file_dir, stub) = os.path.split(file_path)
ok = False
while not ok:
try:
if os.path.isfile(file_dir):
os.remove(file_dir)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
ok = True
except OSError as e:
print('ERROR: Can not create directory: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
return
elif answer == 'i':
ok = True
# restore file
if not source_list.dict[file_name].isDir:
hash_key = hash_name(source_list.dict[file_name])
backup_file = hash_list.dict[hash_key]
ok = False
while not ok:
try:
# check if such file exists
reader = TarFileReader(sh_args.repository + STR_SLASH + backup_file)
if os.path.isfile(file_path) and \
(source_list.dict[file_name].mtime == int(os.path.getmtime(file_path))) and \
(source_list.dict[file_name].size == os.path.getsize(file_path)) and \
(source_list.dict[file_name].hash == calc_hash(file_path)):
pass
else:
if os.path.isdir(file_path):
shutil.rmtree(file_path)
reader.extract(hash_key, file_path)
c_new += 1
size_new = size_new + source_list.dict[file_name].size
ok = True
except (OSError, IOError) as e:
print('ERROR: Can not restore file: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
return
elif answer == 'i':
ok = True
finally:
reader.close()
c_all += 1
size_all = size_all + source_list.dict[file_name].size
# set time
ok = False
while not ok:
try:
os.utime(file_path, (source_list.dict[file_name].mtime,
source_list.dict[file_name].mtime))
ok = True
except OSError as e:
print('ERROR: Can not update time for: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
return
elif answer == 'i':
ok = True
sys.stdout.write("\rFiles (New/All): %s / %s, Size (New/All): %.02f Mb / %.02f Mb" % (
c_new, c_all, size_new/1024.0/1024.0, size_all/1024.0/1024.0))
sys.stdout.flush()
sys.stdout.write(STR_EOL)
sys.stdout.flush()
# get FileList for destination
if sh_args.delete:
destination_list = FileList()
destination_list.read_dir_list(sh_args.destination)
# remove old files
key_list = list(destination_list.dict.keys())
key_list.sort()
for file_name in key_list:
file_path = sh_args.destination + file_name
if (not destination_list.dict[file_name].isDir) and \
(file_name not in source_list.dict):
ok = False
while not ok:
try:
os.remove(file_path)
ok = True
except OSError as e:
print('ERROR: Can not delete file: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
return
elif answer == 'i':
ok = True
# remove old dirs
key_list = list(destination_list.dict.keys())
key_list.sort()
for file_name in key_list:
file_path = sh_args.destination + file_name
if destination_list.dict[file_name].isDir and \
(file_name not in source_list.dict):
ok = False
while not ok:
try:
if os.path.isdir(file_path):
shutil.rmtree(file_path)
ok = True
except OSError as e:
print('ERROR: Can not delete directory: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
return
elif answer == 'i':
ok = True
# source - папка, которая архивируется
# destination - папка, в которую извлекается
# repository - папка в которой хранится архив
# name - имя архива (без расширения и без пути, архив должен быть в [repository])
# reference - путь/имя каталога эталона с расширением
# slice - том архива, перед расширением номер [backup].3.tar
# catalogue - каталог архива - [backup].cat
parser = argparse.ArgumentParser(description='version 0.6.2')
subparsers = parser.add_subparsers()
parser_create = subparsers.add_parser('create') #
parser_create.add_argument('source', help='Directory tree that will be backed up.') # dir
parser_create.add_argument('repository', help='Directory in which backup will be stored.') # dir
parser_create.add_argument('name', help='Basename for backup.') # name
parser_create.add_argument('-r', '--reference',
help='Reference basename for differential backup. '
'Reference catalog should be stored in the same repository.') # path
parser_create.add_argument('-s', '--size', type=int, default=1024*1024*1020, help='Size of one slice.')
parser_create.add_argument('-i', '--include', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will be included in backup. '
'If no mask specified all Files/Dirs will be included.')
parser_create.add_argument('-e', '--exclude', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will be excluded from backup.')
parser_create.add_argument('-q', '--quiet', action='store_true',
help='Nothing is displayed if operation succeeds.') # !!!
parser_create.add_argument('-g', '--ignore', action='store_true', help='Ignore all errors.')
parser_create.add_argument('-c', '--compression', help="'tar'-default, 'gz' or 'bz2'")
parser_create.add_argument('-a', '--recalculate', action='store_true',
help="Recalculate all hashes again. Don't use hashes from reference.")
parser_create.set_defaults(func=sh_create)
parser_find = subparsers.add_parser('find') # simple regular expressions
parser_find.add_argument('repository', help='Directory in which backup is stored.') # dir
parser_find.add_argument('name', help='Mask for backup basename. '
'Several backups can be looked thorough.') # name pattern (without ext)
parser_find.add_argument('-i', '--include', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will be shown. '
'If no mask specified all Files/Dirs will be shown.')
parser_find.add_argument('-e', '--exclude', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will not be shown.')
parser_find.set_defaults(func=sh_find)
parser_restore = subparsers.add_parser('restore') # restore backup
parser_restore.add_argument('repository', help='Directory in which backup is stored.') # dir
parser_restore.add_argument('name', help='Basename for backup to be restored.') # name
parser_restore.add_argument('destination', help='Directory which will be restored.') # dir
parser_restore.add_argument('-i', '--include', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will be restored. '
'If no mask specified all Files/Dirs will be restored.')
parser_restore.add_argument('-e', '--exclude', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will not be restored.')
parser_restore.add_argument('-d', '--delete', action='store_true',
help='Delete Files/Dirs not existing in backup.')
parser_restore.add_argument('-g', '--ignore', action='store_true', help='Ignore all errors.')
parser_restore.set_defaults(func=sh_restore)
args = parser.parse_args()
args.func(args)
# // целочисленное деление, результат – целое число (дробная часть отбрасывается)
# % деление по модулю
# tar file format
# 1 file info - BLOCKSIZE (512)
# 1 file data - filled by zeros to BLOCKSIZE (512)
# 2 file info - BLOCKSIZE (512)
# 2 file data - filled by zeros to BLOCKSIZE (512)
# N file info - BLOCKSIZE (512)
# N file data - filled by zeros to BLOCKSIZE (512)
# two finishing zero blocks - BLOCKSIZE * 2 (512 * 2)
# filled by zeros to RECORDSIZE (BLOCKSIZE * 20) (512 * 20)
# tarfile.BLOCKSIZE = 512
# tarfile.RECORDSIZE = BLOCKSIZE * 20
| 2e8/siddar | siddar.py | siddar.py | py | 34,249 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "hashlib.sha256",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
43506783122 | #!/usr/bin/env python3
"""This modual holds the class created for task 3"""
import numpy as np
import matplotlib.pyplot as plt
class Neuron:
"""
Neuron - class for a neuron
nx = the number of input freatures to the neuron
"""
def __init__(self, nx):
if not isinstance(nx, int):
raise TypeError("nx must be an integer")
if nx < 1:
raise ValueError("nx must be a positive integer")
self.__W = np.random.randn(1, nx)
self.__b = 0
self.__A = 0
@property
def W(self):
return self.__W
@property
def b(self):
return self.__b
@property
def A(self):
return self.__A
def forward_prop(self, X):
"""
This function calculates the forward propogation
and updates the private attibute A
"""
# X numpy array size = (nx - , m - )
Z = np.dot(self.W, X) + self.b
# Z = the (weight*activation)+bias for all data in the set
A = 1/(1 + np.exp(-1 * Z))
# applying the sigmoid function to Z (3brown1blue need to rewatch)
self.__A = A
return self.__A
def cost(self, Y, A):
"""Calculates the cost of the model using logistic regression"""
m = Y.shape[1]
loss = -1 * (Y * np.log(A) + (1 - Y) * np.log(1.0000001 - A))
cost = (1/m) * np.sum(loss)
return cost
def evaluate(self, X, Y):
"""Evalueates neurons predictions"""
predict = self.forward_prop(X)
predict = np.where(predict < 0.5, 0, 1)
return predict, self.cost(Y, self.__A)
def gradient_descent(self, X, Y, A, alpha=0.05):
"""
- Calculates one pass of gradient descent on the neuron
- Gradient decent updates the weights and biases
"""
m = Y.shape[1]
W = self.__W
b = self.__b
Dz = A - Y
Dw = (1/m) * (Dz @ X.T)
Db = (1/m) * np.sum(Dz)
self.__W = W - alpha * Dw
self.__b = b - alpha * Db
def train(
self, X, Y,
iterations=5000, alpha=0.05, verbose=True,
graph=True, step=100):
"""Trains a neuron"""
c = self.cost
if not isinstance(iterations, int):
raise TypeError("iterations must be an integer")
if iterations < 0:
raise ValueError("iterations must be a positive integer")
if not isinstance(alpha, float):
raise TypeError("alpha must be a float")
if alpha < 0:
raise ValueError("alpha must be positive")
if verbose or graph:
if not isinstance(step, int):
raise TypeError("step must be an integer")
if step <= 0 or step > iterations:
raise ValueError("step must be positive and <= iterations")
it = []
cost = []
for i in range(iterations + 1):
A = self.forward_prop(X)
if step == 0 or i % step == 0 or i == iterations:
if verbose:
print("Cost after {} iterations: {}".format(i, c(Y, A)))
if graph:
it.append(i)
cost.append(self.cost(Y, A))
self.gradient_descent(X, Y, A, alpha)
it = np.array(it)
cost = np.array(cost)
if graph:
plt.plot(it, cost)
plt.xlabel("iteration")
plt.ylabel("cost")
plt.title("Training Cost")
plt.show()
return self.evaluate(X, Y)
| chriswill88/holbertonschool-machine_learning | supervised_learning/0x00-binary_classification/7-neuron.py | 7-neuron.py | py | 3,565 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.randn",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_... |
7805060084 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os, sys, datetime, json
from core import info_collection
from conf import settings
# import check python version model
from plugins.detector import check_version
class ArgvHandler(object):
def __init__(self, argvs):
self.argvs = argvs
self.parse_argv()
def parse_argv(self):
if len(self.argvs) > 1:
if hasattr(self, self.argvs[1]):
func = getattr(self, self.argvs[1])
func()
else:
self.help()
else:
self.help()
def help(self):
msg = '''
collect
report
'''
print(msg)
def collect(self):
obj = info_collection.InfoCollection()
asset_data = obj.collect()
print(asset_data)
def __load_asset_id(self):
'''
deal local asset_id
:return: asset_id
'''
asset_id_file = settings.Params["asset_id"]
has_asset_id = False
if os.path.isfile(asset_id_file):
asset_id = open(asset_id_file).read().strip()
asset_id = int(asset_id)
if asset_id.isdigit():
return asset_id
else:
has_asset_id = False
def __import_model(self, cmd1, cmd2):
'''
deal urllib request command
:param cmd1: urllib py3 or urllib2 py2 request comand
:param cmd2: urllib.parse py3 or urllib py2 comand
:return: return request command and urlencode command
'''
cmd_urllib1 = __import__(cmd1)
cmd_urllib2 = __import__(cmd2)
return cmd_urllib1, cmd_urllib2
def __deal_urllib(self, cmd_urllib1, cmd_urllib2, url, data=None, pyversion=None, method=None):
'''
deal python2 or python3 urllib request
:param cmd_urllib1: urllib request model
:param cmd_urllib2: urllib urlencode model
:param url: antOps server url
:param data: system info data
:param pyversion: python version
:param method: get or post
:return: return server callback info
'''
if method == "get":
req = cmd_urllib1.Request(url)
res_data = cmd_urllib1.urlOpen(req, timeout=settings.Params["request_timeout"])
callback = res_data.read()
print("--->server response: ", callback)
return callback
elif method =="post":
data_encode = cmd_urllib2.urlencode(data, encoding='utf-8')
if pyversion == 3:
# 解决 POST data should be bytes or an iterable of bytes. It cannot be of type str.
# # 这里需要测试,存在bug
req = cmd_urllib1.request.Request(url=url, data=bytes(data_encode, encoding='utf-8')) # python 3.x version
res_data = cmd_urllib1.urlOpen(req, timeout=settings.Params["request_timeout"])
callback = res_data.read()
callback = str(callback, encoding='utf-8')
elif pyversion == 2:
req = cmd_urllib1.Request(url=url, data=data_encode)
res_data = cmd_urllib1.urlOpen(req, timeout=settings.Params["request_timeout"])
callback = res_data.read()
callback = json.load(callback)
print("\033[31;1m[%s]:[%s]\033[0m response:\n%s" % (method, url, callback))
return callback
def __submit_data(self, url, data, method):
'''
This model is compability python2 and python3
:param url: antOps server url
:param data: system info data
:param method: get or post
:return: return server callback info
'''
if url in settings.Params["urls"]:
if type(settings.Params["port"]) is int:
url = "http://%s:%s%s" % (
settings.Params["server"], settings.Params["port"], settings.Params["urls"][url])
else:
url = "http://%s/%s" % (settings.Params["server"], settings.Params["urls"][url])
print("Connectins.. \n \033[32;2m[%s] \033[0m, it may take a minute..." % url)
if method == "get":
args = ""
for k, v in data.items():
args += "&%s=%s" % (k, v)
args = args[1:]
url_with_args = "%s?%s" % (url, args)
try:
pversion = check_version.check_python()
if pversion == 3:
cmd_urllib1, cmd_urllib2 = self.__import_model("urllib.request", "urllib.parse")
callback = self.__deal_urllib(cmd_urllib1, cmd_urllib2, url_with_args, method="get")
return callback
elif pversion == 2:
cmd_urllib1, cmd_urllib2 = self.__import_model("urllib2", "urllib")
callback = self.__deal_urllib(cmd_urllib1, cmd_urllib2, url_with_args, method="get")
return callback
except cmd_urllib1.request.URLError:
sys.exit("\033[31;1m%s\033[0m" % cmd_urllib1.request.URLError)
elif method == "post":
try:
pversion = check_version.check_python()
if pversion == 3:
cmd_urllib1, cmd_urllib2 = self.__import_model("urllib.request", "urllib.parse")
callback = self.__deal_urllib(cmd_urllib1, cmd_urllib2, url, data=data, pyversion=pversion, method="get")
return callback
elif pversion == 2:
cmd_urllib1, cmd_urllib2 = self.__import_model("urllib2", "urllib")
callback = self.__deal_urllib(cmd_urllib1, cmd_urllib2, url, data=data, pyversion=pversion, method="get")
return callback
except Exception:
sys.exit("\033[31;1m%s\033[0m" % Exception)
else:
raise KeyError
def __update_asset_id(self, asset_id):
asset_id_file = settings.Params["asset_id"]
f = open(asset_id_file, "w")
f.write(str(asset_id))
f.close()
def report(self):
obj = info_collection.InfoCollection()
asset_data = obj.collect()
asset_id = self.__load_asset_id() # load from asset_id file
if asset_id:
asset_data["asset_id"] = asset_id
post_url = "asset_update"
else:
asset_data["asset_id"] = None
post_url = "asset_report"
data = {"asset_data": json.dumps(asset_data)}
response = self.__submit_data(post_url, data, method="post")
if "asset_id" in response:
self.__update_asset_id(response["asset_id"])
self.log_record(response)
def log_record(self, log_mesg):
'''
log deal model
:param log_mesg: server callback info
:return: None
'''
f = open(settings.Params["log_file"], "a")
if log_mesg is str:
pass
if type(log_mesg) is dict:
if "info" in log_mesg:
for msg in log_mesg["info"]:
log_format = "%s\tINFO\t%s\n" % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"), msg)
f.write(log_format)
if "error" in log_mesg:
for msg in log_mesg["error"]:
log_format = "%s\tERROR\t%s\n" %(datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"),msg)
f.write(log_format)
if "warning" in log_mesg:
for msg in log_mesg["warning"]:
log_format = "%s\tWARNING\t%s\n" %(datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"),msg)
f.write(log_format)
f.close() | szlyunnan/AntOpsv2 | antOpsClient/core/antMain.py | antMain.py | py | 7,879 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "core.info_collection.InfoCollection",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "core.info_collection",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "conf.settings.Params",
"line_number": 43,
"usage_type": "attribute"
},
{
... |
37840139507 | import logging
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.menus import (
menu_object, menu_return, menu_secondary, menu_setup
)
from mayan.apps.events.classes import EventModelRegistry, ModelEventType
from mayan.apps.navigation.classes import SourceColumn
from .classes import CredentialBackend
from .events import event_credential_edited, event_credential_used
from .links import (
link_credential_backend_selection, link_credential_delete,
link_credential_edit, link_credential_list,
link_credential_setup
)
from .permissions import (
permission_credential_delete, permission_credential_edit,
permission_credential_use, permission_credential_view
)
logger = logging.getLogger(name=__name__)
class CredentialsApp(MayanAppConfig):
app_namespace = 'credentials'
app_url = 'credentials'
has_rest_api = True
has_tests = True
name = 'mayan.apps.credentials'
verbose_name = _('Credentials')
def ready(self):
super().ready()
CredentialBackend.load_modules()
StoredCredential = self.get_model(model_name='StoredCredential')
EventModelRegistry.register(model=StoredCredential)
ModelEventType.register(
model=StoredCredential, event_types=(
event_credential_edited, event_credential_used
)
)
SourceColumn(
attribute='label', is_identifier=True, is_sortable=True,
source=StoredCredential
)
SourceColumn(
attribute='internal_name', include_label=True, is_sortable=True,
source=StoredCredential
)
SourceColumn(
attribute='get_backend_class_label', include_label=True,
source=StoredCredential
)
ModelPermission.register(
model=StoredCredential, permissions=(
permission_credential_delete, permission_credential_edit,
permission_credential_view, permission_credential_use
)
)
menu_object.bind_links(
links=(link_credential_delete, link_credential_edit),
sources=(StoredCredential,)
)
menu_return.bind_links(
links=(link_credential_list,), sources=(
StoredCredential,
'credentials:stored_credential_backend_selection',
'credentials:stored_credential_create',
'credentials:stored_credential_list'
)
)
menu_secondary.bind_links(
links=(link_credential_backend_selection,), sources=(
StoredCredential,
'credentials:stored_credential_backend_selection',
'credentials:stored_credential_create',
'credentials:stored_credential_list'
)
)
menu_setup.bind_links(
links=(link_credential_setup,)
)
| salmabader/mayan-edms | mayan/apps/credentials/apps.py | apps.py | py | 3,033 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "mayan.apps.common.apps.MayanAppConfig",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 34,
"usage_type": "call"
... |
17585811312 | import struct
import typing as t
from pathlib import Path
from starwhale import Link, GrayscaleImage
_TItem = t.Generator[t.Dict[str, t.Any], None, None]
def iter_mnist_item() -> _TItem:
root_dir = Path(__file__).parent.parent / "data"
with (root_dir / "t10k-images-idx3-ubyte").open("rb") as data_file, (
root_dir / "t10k-labels-idx1-ubyte"
).open("rb") as label_file:
_, data_number, height, width = struct.unpack(">IIII", data_file.read(16))
_, label_number = struct.unpack(">II", label_file.read(8))
print(
f">data({data_file.name}) split data:{data_number}, label:{label_number} group"
)
image_size = height * width
for i in range(0, min(data_number, label_number)):
_data = data_file.read(image_size)
_label = struct.unpack(">B", label_file.read(1))[0]
yield {
"img": GrayscaleImage(
_data,
display_name=f"{i}",
shape=(height, width, 1),
),
"label": _label,
}
class LinkRawDatasetProcessExecutor:
_endpoint = "10.131.0.1:9000"
_bucket = "users"
def __iter__(self) -> _TItem:
root_dir = Path(__file__).parent.parent / "data"
with (root_dir / "t10k-labels-idx1-ubyte").open("rb") as label_file:
_, label_number = struct.unpack(">II", label_file.read(8))
offset = 16
image_size = 28 * 28
uri = f"s3://{self._endpoint}/{self._bucket}/dataset/mnist/t10k-images-idx3-ubyte"
for i in range(label_number):
_data = GrayscaleImage(
link=Link(
f"{uri}",
offset=offset,
size=image_size,
),
display_name=f"{i}",
shape=(28, 28, 1),
)
_label = struct.unpack(">B", label_file.read(1))[0]
yield {"img": _data, "label": _label}
offset += image_size
| star-whale/starwhale | example/mnist/mnist/dataset.py | dataset.py | py | 2,105 | python | en | code | 171 | github-code | 36 | [
{
"api_name": "typing.Generator",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
... |
7707679326 | import os
import sys
import time
import copy
import random
from reprint import output
MAX_oo = 65535
MIN_MAX = 65280
MIN_oo = -65535
'''
print("1111111",end="")
print("\r222222",end="")
╳〇
─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛
├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻
┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋
═║╒╓╔╕╖╗è]╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╳
╔ ╗╝╚ ╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳⊥﹃﹄┌
'''
turn = 0
max_win = 0
min_win = 0
Max=" 〇"
Min=" ╳ "
empty=" "
symbols=[Max, Min, empty]
alpha=[MIN_oo,MIN_oo]
Chess_Board=[[2]*5, [2]*5, [2]*5, [2]*5, [2]*5]
def check_win(chess_board,role):
for i in range(5):
sign = 1
for j in range(5):
if chess_board[i][j]!=role:
sign = 0
break
if sign:
return True
sign = 1
for j in range(5):
if chess_board[j][i]!=role:
sign = 0
break
if sign:
return True
sign = 1
for i in range(5):
if chess_board[i][i]!=role:
sign = 0
break
if sign:
return True
sign = 1
for i in range(5):
if chess_board[i][4-i]!=role:
sign = 0
break
if sign:
return True
return False
def search(chess_board, role):
value = 0
for i in range(5):
sign = 1
for j in range(5):
if chess_board[i][j]!=role and chess_board[i][j]!=2:
sign = 0
break
value += sign
sign = 1
for j in range(5):
if chess_board[j][i]!=role and chess_board[j][i]!=2:
sign = 0
break
value += sign
sign = 1
for i in range(5):
if chess_board[i][i]!=role and chess_board[i][i]!=2:
sign = 0
break
value += sign
sign = 1
for i in range(5):
if chess_board[i][4-i]!=role and chess_board[i][4-i]!=2:
sign = 0
break
value += sign
return value
def getGuess(chess_board, role):
enemy = (role+1)%2
if check_win(chess_board,role):
return MAX_oo
if check_win(chess_board,enemy):
return MIN_oo
myself_rate = search(chess_board,role)
enemy_rate = search(chess_board,enemy)
return myself_rate - enemy_rate
def MinMax(role):
global Chess_Board
open_list = []
for i in range(5):
for j in range(5):
if Chess_Board[i][j]==2:
new_chess_board = copy.deepcopy(Chess_Board)
new_chess_board[i][j] = role
open_list.append([new_chess_board,MAX_oo])
if len(open_list)==0:
return MIN_MAX
for index, min_node in enumerate(open_list):
alpha_beta_cut = False
new_Chess_board = min_node[0]
beta = min_node[1]
for min_i in range(5):
for min_j in range(5):
if new_Chess_board[min_i][min_j] == 2:
min_chess_board = copy.deepcopy(new_Chess_board)
min_chess_board[min_i][min_j] = (role+1)%2
guess = getGuess(min_chess_board, role)
beta = min(beta,guess)
open_list[index][1] = beta
if beta <= alpha[role]:
alpha_beta_cut = True
break
if alpha_beta_cut:
break
if alpha_beta_cut:
continue
alpha[role] = max(alpha[role],beta)
open_list.sort(key=lambda x:x[1],reverse=True)
#print(open_list)
status = open_list[0]
Chess_Board = status[0]
time.sleep(0.5)
if check_win(Chess_Board,role) == MAX_oo:
return 1
else:
return 0
output_list=[
"┌───┬───┬───┬───┬───┐",
"│{}│{}│{}│{}│{}│",
"├───┼───┼───┼───┼───┤",
"│{}│{}│{}│{}│{}│",
"├───┼───┼───┼───┼───┤",
"│{}│{}│{}│{}│{}│",
"├───┼───┼───┼───┼───┤",
"│{}│{}│{}│{}│{}│",
"├───┼───┼───┼───┼───┤",
"│{}│{}│{}│{}│{}│",
"└───┴───┴───┴───┴───┘"
]
with output(output_type='list', initial_len=11) as out:
while True:
#with output(output_type='list', initial_len=11) as out:
for index,value in enumerate(output_list):
if index%2 == 1:
vals = [symbols[x] for x in Chess_Board[index//2]]
out[index]=value.format(*vals)
else:
out[index]=value
if max_win != 0 or min_win != 0:
break
turn_win = MinMax(turn)
max_win = turn_win if turn==0 else 0
min_win = turn_win if turn==1 else 0
turn = (turn+1)%2
if max_win==1:
print("Max win!!!")
if min_win==1:
print("Min win!!!")
if min_win==MIN_MAX or max_win==MIN_MAX:
print("It ends in a draw!!!") | Mecheal-helloworld/Python-shell | demo/MIN_MAX.py | MIN_MAX.py | py | 4,714 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "copy.deepcopy",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "reprint.output",
"line_... |
39780727033 | import os
import sys
import numpy as np
import pickle
from matplotlib import pyplot as plt
from tqdm import tqdm
ZOOMIN_BUFFER = 1.0
def compute_epoch(result):
return result['epoch'] + result['step_within_epoch'] / result['epoch_length']
def compute_avg_acc(result, standard_or_own_domain):
d = result['zeroshot_top1_acc_as_percentage'][standard_or_own_domain + '_text_template']
return np.mean([d[k] for k in sorted(d.keys())])
#returns epoch_list, acc_list
def grab_data(results, standard_or_own_domain):
epoch_result_pairs = sorted([(compute_epoch(results[k]), results[k]) for k in sorted(results.keys())])
epoch_list, result_list = list(zip(*epoch_result_pairs)) #a pair of lists instead of a list of pairs
acc_list = [compute_avg_acc(result, standard_or_own_domain) for result in result_list]
return epoch_list, acc_list
#will make 4 plots, toggling between zoomin vs zoomout and standard-prompt vs own-domain-prompt
#will put a gold star at the highest point of all the lines, with text of its y-value
#will put a grey dotted line at the starting value of the first sequence in results_list, without any label in the legend
#will try to put the legend below the plot
#zoomout will be organic. zoomin will just change ylim[0] to be the grey-line value minus some buffer
#will always plot accuracy as percentage, averaged across all domains
#will always plot x-axis as epochs
def make_plots(results_list, color_list, marker_list, linestyle_list, label_list, plot_prefix):
os.makedirs(os.path.dirname(plot_prefix), exist_ok=True)
for standard_or_own_domain in ['standard', 'own_domain']:
plt.clf()
plt.figure(figsize=[14.4, 4.8])
best_x = None
best_y = float('-inf')
baseline_y = None
for results, color, marker, linestyle, label in zip(results_list, color_list, marker_list, linestyle_list, label_list):
epoch_list, acc_list = grab_data(results, standard_or_own_domain)
plt.plot(epoch_list, acc_list, color=color, marker=marker, linestyle=linestyle, label=label)
if baseline_y is None:
baseline_y = acc_list[0]
if max(acc_list) > best_y:
best_y = max(acc_list)
best_x = epoch_list[np.argmax(acc_list)]
plt.plot(plt.xlim(), [baseline_y, baseline_y], linestyle='dashed', color='0.5')
plt.scatter([best_x], [best_y], s=320, marker='*', color='gold')
plt.text(best_x, best_y, '%.1f%%'%(best_y))
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, 0.7 * box.width, box.height])
plt.legend(framealpha=1, bbox_to_anchor=(1,0.5), loc='center left')
plt.title('(' + standard_or_own_domain + ' prompt)')
plt.xlabel('epochs')
plt.ylabel('zero-shot accuracy (%)')
plt.savefig(plot_prefix + '-' + standard_or_own_domain + '-zoomout.png')
plt.ylim((baseline_y - ZOOMIN_BUFFER, best_y + ZOOMIN_BUFFER))
plt.savefig(plot_prefix + '-' + standard_or_own_domain + '-zoomin.png')
plt.clf()
| kjmillerCURIS/vislang-domain-exploration | clip_finetuning_plot_utils.py | clip_finetuning_plot_utils.py | py | 3,093 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.mean",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 3... |
40806340636 | """
Problem 30: Digit fifth powers
https://projecteuler.net/problem=30
Surprisingly there are only three numbers that can be written
as the sum of fourth powers of their digits:
1634 = 1^4 + 6^4 + 3^4 + 4^4
8208 = 8^4 + 2^4 + 0^4 + 8^4
9474 = 9^4 + 4^4 + 7^4 + 4^4
As 1 = 1^4 is not a sum it is not included.
The sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.
"""
import pytest
@pytest.mark.parametrize('test_input_number,expected_result', [
(1634, True),
(8208, True),
(9474, True),
(1234, False),
(42, False),
(9876, False),
(3827, False),
(2, False),
])
def test_can_be_written_as_sum_of_nth_power(test_input_number, expected_result):
# arrange
from src.p030_digit_fifth_powers import can_be_written_as_sum_of_nth_power
# act
actual_result = can_be_written_as_sum_of_nth_power(test_input_number, 4)
# assert
assert actual_result == expected_result
def test_get_numbers_that_can_be_written_as_sum_of_nth_power():
# arrange
from src.p030_digit_fifth_powers import get_numbers_that_can_be_written_as_sum_of_nth_power
# act
actual_result_iter = get_numbers_that_can_be_written_as_sum_of_nth_power(4, int(1e5))
# assert
expected_result = [1634, 8208, 9474]
assert list(actual_result_iter) == expected_result
| FranzDiebold/project-euler-solutions | test/test_p030_digit_fifth_powers.py | test_p030_digit_fifth_powers.py | py | 1,404 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "src.p030_digit_fifth_powers.can_be_written_as_sum_of_nth_power",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 21,
"usage_type": "at... |
28775606066 | #!/usr/bin/env python3.7
# Soubor: view.py
# Datum: 25.03.2019 13:11
# Autor: Marek Nožka, nozka <@t> spseol <d.t> cz
# Licence: GNU/GPL
############################################################################
from . import app, socketio
from flask import (render_template,
# Markup,
# request,
flash,
# redirect,
# session
)
import threading
import serial
serial = serial.Serial('/dev/ttyUSB0')
############################################################################
def read_loop():
while True:
cislo = serial.readline()
print('@@@@@@@@@@@', cislo)
socketio.emit('input', {'data': cislo.decode('ascii')})
threading._start_new_thread(read_loop, ())
############################################################################
@app.route('/')
def index():
flash('ahoj')
return render_template('base.html')
@socketio.on('ahoj')
def ahoj(data=None):
print(data)
@socketio.on('connected')
def connected(data):
print('** Conected **')
| MarrekNozka/socketio-experiment | webface/routes.py | routes.py | py | 1,116 | python | de | code | 0 | github-code | 36 | [
{
"api_name": "serial.Serial",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "serial.readline",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "threading._start_new_thread",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.flash... |
16539479262 | import json
from common.variables import *
def send_msg(socket, msg):
json_msg = json.dumps(msg)
coding_msg = json_msg.encode(ENCODING)
socket.send(coding_msg)
def get_msg(client):
json_response = client.recv(MAX_PACKAGE_LENGTH).decode(ENCODING)
response = json.loads(json_response)
if isinstance(response, dict):
return response
else:
raise ValueError
| MariaAfanaseva/app | HW_3_Afanaseva_Maria/common/utils.py | utils.py | py | 401 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
}
] |
26745748297 | """
Workhorse file to perform analysis on data taken by Andor Cameras using CSPY
Author : Juan Bohorquez
Created on : 06/04/2021
Last Modified : 06/04/2021
"""
import h5py
import os
import numpy as np
import warnings
from typing import Tuple
from HamamatsuH5 import HMROI
def load_data(
results_file: h5py.File,
roi: HMROI
) -> np.array:
"""
Loads data from an Andor camera into a numpy array
results are indexed as follows
> results = array[iterations,measurements,shots,horizontal_pixels, vertical_pixels]
Args:
results_file: h5file object corresponding to results.hdf5 file
roi: region of interest from which to extract pixel data
Returns:
5D numpy array holding all of the data taken by the hamamatsu during the experiment
indexed [iteration,measurement,shot,horizontal_pixel,vertical_pixel]
"""
num_its = len(results_file['iterations'])
measurements = results_file['settings/experiment/measurementsPerIteration'][()] + 1
shots_per_measurement = 1
andr_pix = np.zeros(
(num_its, measurements, shots_per_measurement, roi.bottom - roi.top, roi.right - roi.left,),
dtype=int
)
for iteration, i_group in results_file['experiments/0/iterations'].items():
# print(f"iteration : {iteration} : {type(iteration)}")
for measurement, m_tup in enumerate(i_group['measurements'].items()):
m_group = m_tup[1]
# print(f"\tmeasurement : {measurement} : {type(measurement)}")
for shot, s_group in m_group['data/Andor_1026/shots'].items():
try:
# print(f"\t\tshot : {shot} : {type(shot)}")
andr_pix[int(iteration), int(measurement), int(shot)] = s_group[()][roi.slice]
except IndexError as e:
warnings.warn(
f"{e}\n iteration : {iteration} measurement : {measurement} shot {shot}"
)
continue
except ValueError as ve:
warnings.warn(
f"{ve}\n iteration : {iteration} measurement : {measurement} shot {shot}"
)
return andr_pix | JuanBohorquez3/Hybrid_H5 | H5_python3/AndorH5.py | AndorH5.py | py | 2,231 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "h5py.File",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "HamamatsuH5.HMROI",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"lin... |
4104421635 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 11:27:48 2018
@author: anup
"""
from elasticsearch import Elasticsearch
from bs4 import BeautifulSoup as BS
import glob
from preprocess_class import EsPreProcessor
import warnings
from html_processing import *
warnings.filterwarnings('ignore')
from preprocessor_collection import *
def es_index_create(files_location, # location of html files
index_1_params, # name of index 1
pre_processor,
headers_list): # preprocessor
file_list = glob.glob(files_location + '/*.html')
file_names = [filename.split("/")[-1].split('.')[0] for filename in file_list]
# create index in elasticsearch with necessary field limit
es = Elasticsearch() # initialize elasticsearch
doc = {"settings": {"index.mapping.total_fields.limit": 10000}} # setting the field limit
es.indices.create(index = index_1_params[0], body = doc)
es_doc_id = 0
es_doc_id_content_dict = {}
for file_no in range(len(file_list)):
with open(file_list[file_no]) as f:
temp_html_file = [line.rstrip() for line in f]
html_file = ''
html_strip_file = ''
for line in temp_html_file:
html_file += (line + '\n')
html_strip_file += (line)
html = html_strip_file
# extract contents under the headers
section_dict_headers_contents = header_content_extraction(html,headers_list,file_names[file_no])
# assembling contents for the index
section_dict_1 = {**section_dict_headers_contents}
for key, value in section_dict_1.items():
section_dict_1[key] = EsPreProcessor.es_preprocessor_manager(value, pre_processor).es_pre_processed_corpus
for key, value in section_dict_1.items():
es_doc_id += 1
es_doc_id_content_dict[str(es_doc_id)] = eval(key)
es_update_dict = {}
es_update_dict['content'] = value
es.index(index=index_1_params[0], doc_type=index_1_params[1], id=es_doc_id, body = es_update_dict)
return es_doc_id_content_dict
def es_search_processor(es_search_doctype,
es_search_index,
es_search_body):
es_search = Elasticsearch()
es_user_query_search_result = es_search.search(index = es_search_index,
doc_type = es_search_doctype,
body = es_search_body)
return es_user_query_search_result
| anupkhalam/es_xd_standalone | html_indexer.py | html_indexer.py | py | 2,752 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "preproc... |
31068607886 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 15:00
# @Author : cold
# @File : python_mysql.py
from configparser import ConfigParser
import os
class MySQLConfig(ConfigParser):
def __init__(self, config, **kwargs):
# ConfigParser.__init__(self,allow_no_value=True)
super(MySQLConfig, self).__init__(allow_no_value=True)
self.config = config
self.mysql_vars = {}
if os.path.exists(self.config):
self.read(self.config)
self.get_mysqld_vars()
else:
self.get_default_vars()
self.set_mysqld_vars(kwargs)
def set_mysqld_vars(self, kwargs):
for k, v in kwargs.items():
setattr(self,k,v)
self.mysql_vars[k] = str(v)
def get_mysqld_vars(self):
rst = {}
options = self.options('mysqld')
for o in options:
rst[o] = self.get('mysqld', o)
self.set_mysqld_vars(rst)
def get_default_vars(self):
default = {
'port':'3306',
'socket': '/tmp/mysql.sock',
'log-bin':'mysql-bin',
'basedir': '/usr/local/mysql',
'datadir':'/data/mysql',
'binlog_format':'mixed',
'server-id':'1',
'user':'mysql',
}
self.set_mysqld_vars(default)
def set_vars(self,k,v):
self.mysql_vars[k] = v
def save(self):
if not self.has_section('mysqld'):
self.add_section('mysqld')
for k,v in self.mysql_vars.items():
# print(k,v)
self.set('mysqld', k ,v)
with open(self.config,'w') as fd:
# print(fd)
self.write(fd)
if __name__ == '__main__':
mc = MySQLConfig(r'C:\Users\cold\Desktop\my3.cnf', mx=1360)
mc.set_vars('skip-grant1', None)
mc.save()
print(mc.port)
print(mc.socket) | liangtaos/mysqlmanage | python_mysql.py | python_mysql.py | py | 1,876 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
}
] |
28757254981 | import psycopg2
import psycopg2.pool
from psycopg2.extras import execute_values
import pandas.io.sql as psql
class Dbconnection:
def __init__(self, schema, database, user, password, dbhost, dbport):
self._properties = dict(
database=database,
user=user,
password=password,
host=dbhost,
port=dbport,
options=f'-c search_path={schema}'
)
self._pool = psycopg2.pool.ThreadedConnectionPool(1,1,**self._properties)
# self._conn = psycopg2.connect(**self._properties)
#@property
def conn(self):
return self._pool.getconn()
def close(self):
if self._conn and not self._conn.closed:
self._conn.close()
def commit(self):
# Commit della connection a DB (altrimenti le modifiche effettuate non vengono applicate sul database)
self.conn.commit()
def rollback(self):
# Rollback to clean wrong DB modifications
self.conn.rollback()
def read(self, sql, idTable):
"""
:param sql: read sql to execute
:param idTable: the id to filter rows in the select table
:return: a dataframe of the selected rows, -1 otherwise
"""
connection = None
try:
connection = self.conn()
cursor = connection.cursor()
if idTable!=None:
cursor.execute(sql,[idTable])
else:
cursor.execute(sql)
return cursor.fetchall()
except Exception as e:
print(e)
return(-1)
finally:
if connection :
connection.close()
self._pool.putconn(connection)
def insert(self, sql, dframe,return_id = False):
"""
:param sql: insert query to execute
:param dframe: data_frame to insert in the database
Columns order and types must be coherent with the input SQL
:param return_id: Bool if you want the inserted ID back
:return: the inserted ID
"""
connection = None
id_out = -1
try:
connection = self.conn()
cursor = connection.cursor()
values_list = [tuple(x) for x in dframe.values]
# Execute multiple insert
execute_values(cursor, sql, values_list)
# If main table retrieve autoincrement ID
if return_id:
id_out = cursor.fetchone()[0]
connection.commit()
return id_out
except Exception as e:
print(e)
return(-1)
finally:
if connection:
connection.close()
self._pool.putconn(connection)
def update(self, sql, idTable):
"""
:param sql: update_sql query
:param idTable: id to select records to update
:return: None
"""
with self.conn.cursor() as c:
c.execute(sql, (idTable,))
def remove(self, delete_sql, idTable):
"""
:param delete_sql: delete sql to execute
:param idTable: the id of the rows to delete
"""
with self.conn.cursor() as c:
c.execute(delete_sql, (idTable,))
| csipiemonte/unlockpa-unlockbotrasa | code_actions/db/dbconnection.py | dbconnection.py | py | 3,262 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "psycopg2.pool.ThreadedConnectionPool",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "psycopg2.pool",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "psycopg2.extras.execute_values",
"line_number": 75,
"usage_type": "call"
}
] |
6200752155 | from __future__ import print_function
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
from utils import plot_variance
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network"""
def __init__(
self, dims, activation=nn.ReLU, relu_init=False, var_analysis=False, name=""
):
super(FCNet, self).__init__()
self.name = name
self.var_analysis = var_analysis
if var_analysis:
dims += [dims[-1]] * 4
layers = []
for i in range(len(dims) - 2):
in_dim = dims[i]
out_dim = dims[i + 1]
layers.append(
nn.Sequential(
nn.Linear(in_dim, out_dim)
if var_analysis
else weight_norm(nn.Linear(in_dim, out_dim), dim=None),
activation(),
)
)
layers.append(
nn.Sequential(
nn.Linear(dims[-2], dims[-1])
if var_analysis
else weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None),
activation(),
)
)
self.main = nn.ModuleList(layers)
if relu_init:
self.init_weights()
def init_weights(self):
for name, p in self.main.named_parameters():
if "weight" in name:
nn.init.kaiming_normal_(p.data, nonlinearity="relu")
return
def forward(self, x):
for idx, layer in enumerate(self.main):
x = layer(x)
if self.var_analysis and self.training:
plot_variance(x.cpu(), self.name + " layer " + str(idx))
return x
if __name__ == "__main__":
fc1 = FCNet([10, 20, 10])
print(fc1)
print("============")
fc2 = FCNet([10, 20])
print(fc2)
| cliziam/VQA_project_Demo | demo-vqa-webcam/fc.py | fc.py | py | 1,847 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_n... |
1511337021 | import pandas as pd
import datetime
def load_analysis(analysis_id, data, metadata_record, projects, es, framework):
load_data(data, analysis_id, es, framework)
if framework == 'scp':
metadata_record['cell_count'] = data['annotation_metrics'].shape[0]
elif framework == 'mondrian':
metadata_record['cell_count'] = data['hmmcopy_metrics'].shape[0]
else:
raise Exception(f"Unknown framework, expected 'scp' or 'mondrian', but got '{framework}'")
es.load_record(metadata_record, analysis_id, es.ANALYSIS_ENTRY_INDEX)
missing_labels = es.get_missing_labels()
for label in missing_labels:
es.add_label(label)
es.add_analysis_to_projects(analysis_id, projects)
def clean_analysis(analysis_id, es):
clean_data(analysis_id, es)
es.delete_record_by_id(es.ANALYSIS_ENTRY_INDEX, analysis_id)
es.remove_analysis_from_projects(analysis_id)
def clean_data(analysis_id, es):
for data_type, get_data in GET_DATA.items():
es.delete_index(f"{analysis_id.lower()}_{data_type}")
def process_analysis_entry(analysis_id, library_id, sample_id, description, metadata):
record = {
**metadata
}
record['timestamp'] = datetime.datetime.now().isoformat()
record["dashboard_id"] = analysis_id
record["jira_id"] = analysis_id
record["dashboard_type"] = "single"
record["library_id"] = library_id
record["sample_id"] = sample_id
record["description"] = description
return record
def load_analysis_entry(analysis_id, library_id, sample_id, description, metadata, es):
record = process_analysis_entry(analysis_id, library_id, sample_id, description, metadata)
es.load_record(record, analysis_id, es.ANALYSIS_ENTRY_INDEX)
def load_data(data, analysis_id, es, framework):
"""Load dataframes"""
for data_type, get_data in GET_DATA.items():
df = get_data(data, framework)
es.load_df(df, f"{analysis_id.lower()}_{data_type}")
def get_qc_data(hmmcopy_data, framework=None):
if framework == 'scp':
data = hmmcopy_data['annotation_metrics']
elif framework == 'mondrian':
data = hmmcopy_data['hmmcopy_metrics']
data.rename(columns={'clustering_order': 'order', 'condition': 'experimental_condition'}, inplace=True)
else:
raise Exception(f"Unknown framework, expected 'scp' or 'mondrian', but got '{framework}'")
data['percent_unmapped_reads'] = data["unmapped_reads"] / data["total_reads"]
data['is_contaminated'] = data['is_contaminated'].apply(
lambda a: {True: 'true', False: 'false'}[a])
return data
def get_segs_data(hmmcopy_data, framework=None):
data = hmmcopy_data['hmmcopy_segs'].copy()
data['chrom_number'] = create_chrom_number(data['chr'])
return data
def get_bins_data(hmmcopy_data, framework=None):
data = hmmcopy_data['hmmcopy_reads'].copy()
data['chrom_number'] = create_chrom_number(data['chr'])
return data
def get_gc_bias_data(hmmcopy_data, framework=None):
data = hmmcopy_data['gc_metrics']
gc_cols = list(range(101))
gc_bias_df = pd.DataFrame(columns=['cell_id', 'gc_percent', 'value'])
for n in gc_cols:
new_df = data.loc[:, ['cell_id', str(n)]]
new_df.columns = ['cell_id', 'value']
new_df['gc_percent'] = n
gc_bias_df = gc_bias_df.append(new_df, ignore_index=True)
return gc_bias_df
GET_DATA = {
f"qc": get_qc_data,
f"segs": get_segs_data,
f"bins": get_bins_data,
f"gc_bias": get_gc_bias_data,
}
chr_prefixed = {str(a): '0' + str(a) for a in range(1, 10)}
def create_chrom_number(chromosomes):
chrom_number = chromosomes.map(lambda a: chr_prefixed.get(a, a))
return chrom_number
| shahcompbio/alhenaloader | alhenaloader/load.py | load.py | py | 3,730 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 95,
"usage_type": "call"
}
] |
73894898984 | __author__ = "Sebastian Heinlein <devel@glatzor.de>"
import datetime
import glob
import gzip
import locale
import logging
import os
import re
import subprocess
import tempfile
import time
import traceback
import uuid
import apt
import apt_pkg
from defer import inline_callbacks, return_value
from defer.utils import dbus_deferred_method
import dbus
from gi.repository import GObject
import lsb_release
import packagekit.enums as pk_enums
# for optional plugin support
try:
import pkg_resources
except ImportError:
pkg_resources = None
from aptdaemon import policykit1
import aptdaemon.core
from aptdaemon.core import APTDAEMON_TRANSACTION_DBUS_INTERFACE
import aptdaemon.enums as aptd_enums
from aptdaemon.errors import TransactionFailed, TransactionCancelled
from aptdaemon.progress import DaemonAcquireProgress
import aptdaemon.worker
import aptdaemon.networking
GObject.threads_init()
pklog = logging.getLogger("AptDaemon.PackageKit")
# Check if update-manager-core is installed to get aware of the
# latest distro releases
try:
from UpdateManager.Core.MetaRelease import MetaReleaseCore
except ImportError:
META_RELEASE_SUPPORT = False
else:
META_RELEASE_SUPPORT = True
# Xapian database is optionally used to speed up package description search
XAPIAN_DB_PATH = os.environ.get("AXI_DB_PATH", "/var/lib/apt-xapian-index")
XAPIAN_DB = XAPIAN_DB_PATH + "/index"
XAPIAN_DB_VALUES = XAPIAN_DB_PATH + "/values"
XAPIAN_SUPPORT = False
try:
import xapian
except ImportError:
pass
else:
if os.access(XAPIAN_DB, os.R_OK):
pklog.debug("Use XAPIAN for the search")
XAPIAN_SUPPORT = True
# Regular expressions to detect bug numbers in changelogs according to the
# Debian Policy Chapter 4.4. For details see the footnote 16:
# http://www.debian.org/doc/debian-policy/footnotes.html#f16
MATCH_BUG_CLOSES_DEBIAN=r"closes:\s*(?:bug)?\#?\s?\d+(?:,\s*(?:bug)?\#?\s?\d+)*"
MATCH_BUG_NUMBERS=r"\#?\s?(\d+)"
# URL pointing to a bug in the Debian bug tracker
HREF_BUG_DEBIAN="http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=%s"
MATCH_BUG_CLOSES_UBUNTU = r"lp:\s+\#\d+(?:,\s*\#\d+)*"
HREF_BUG_UBUNTU = "https://bugs.launchpad.net/bugs/%s"
# Regular expression to find cve references
MATCH_CVE="CVE-\d{4}-\d{4}"
HREF_CVE="http://web.nvd.nist.gov/view/vuln/detail?vulnId=%s"
# Map Debian sections to the PackageKit group name space
SECTION_GROUP_MAP = {
"admin" : pk_enums.GROUP_ADMIN_TOOLS,
"base" : pk_enums.GROUP_SYSTEM,
"comm" : pk_enums.GROUP_COMMUNICATION,
"devel" : pk_enums.GROUP_PROGRAMMING,
"doc" : pk_enums.GROUP_DOCUMENTATION,
"editors" : pk_enums.GROUP_PUBLISHING,
"electronics" : pk_enums.GROUP_ELECTRONICS,
"embedded" : pk_enums.GROUP_SYSTEM,
"games" : pk_enums.GROUP_GAMES,
"gnome" : pk_enums.GROUP_DESKTOP_GNOME,
"graphics" : pk_enums.GROUP_GRAPHICS,
"hamradio" : pk_enums.GROUP_COMMUNICATION,
"interpreters" : pk_enums.GROUP_PROGRAMMING,
"kde" : pk_enums.GROUP_DESKTOP_KDE,
"libdevel" : pk_enums.GROUP_PROGRAMMING,
"libs" : pk_enums.GROUP_SYSTEM,
"mail" : pk_enums.GROUP_INTERNET,
"math" : pk_enums.GROUP_SCIENCE,
"misc" : pk_enums.GROUP_OTHER,
"net" : pk_enums.GROUP_NETWORK,
"news" : pk_enums.GROUP_INTERNET,
"oldlibs" : pk_enums.GROUP_LEGACY,
"otherosfs" : pk_enums.GROUP_SYSTEM,
"perl" : pk_enums.GROUP_PROGRAMMING,
"python" : pk_enums.GROUP_PROGRAMMING,
"science" : pk_enums.GROUP_SCIENCE,
"shells" : pk_enums.GROUP_SYSTEM,
"sound" : pk_enums.GROUP_MULTIMEDIA,
"tex" : pk_enums.GROUP_PUBLISHING,
"text" : pk_enums.GROUP_PUBLISHING,
"utils" : pk_enums.GROUP_ACCESSORIES,
"web" : pk_enums.GROUP_INTERNET,
"x11" : pk_enums.GROUP_DESKTOP_OTHER,
"unknown" : pk_enums.GROUP_UNKNOWN,
"alien" : pk_enums.GROUP_UNKNOWN,
"translations" : pk_enums.GROUP_LOCALIZATION,
"metapackages" : pk_enums.GROUP_COLLECTIONS }
PACKAGEKIT_DBUS_INTERFACE = "org.freedesktop.PackageKit"
PACKAGEKIT_DBUS_SERVICE = "org.freedesktop.PackageKit"
PACKAGEKIT_DBUS_PATH = "/org/freedesktop/PackageKit"
PACKAGEKIT_TRANS_DBUS_INTERFACE = "org.freedesktop.PackageKit.Transaction"
PACKAGEKIT_TRANS_DBUS_SERVICE = "org.freedesktop.PackageKit.Transaction"
MAP_EXIT_ENUM = {
aptd_enums.EXIT_SUCCESS: pk_enums.EXIT_SUCCESS,
aptd_enums.EXIT_CANCELLED: pk_enums.EXIT_CANCELLED,
aptd_enums.EXIT_FAILED: pk_enums.EXIT_FAILED,
aptd_enums.EXIT_FAILED: pk_enums.EXIT_FAILED,
aptd_enums.EXIT_PREVIOUS_FAILED:
pk_enums.EXIT_FAILED
}
MAP_STATUS_ENUM = {
aptd_enums.STATUS_WAITING: pk_enums.STATUS_WAIT,
aptd_enums.STATUS_RUNNING: pk_enums.STATUS_RUNNING,
aptd_enums.STATUS_CANCELLING: pk_enums.STATUS_CANCEL,
aptd_enums.STATUS_CLEANING_UP: pk_enums.STATUS_CLEANUP,
aptd_enums.STATUS_COMMITTING: pk_enums.STATUS_COMMIT,
aptd_enums.STATUS_DOWNLOADING: pk_enums.STATUS_DOWNLOAD,
aptd_enums.STATUS_DOWNLOADING_REPO: pk_enums.STATUS_DOWNLOAD_REPOSITORY,
aptd_enums.STATUS_FINISHED: pk_enums.STATUS_FINISHED,
aptd_enums.STATUS_LOADING_CACHE: pk_enums.STATUS_LOADING_CACHE,
aptd_enums.STATUS_RESOLVING_DEP: pk_enums.STATUS_DEP_RESOLVE,
aptd_enums.STATUS_RUNNING: pk_enums.STATUS_RUNNING,
aptd_enums.STATUS_WAITING_LOCK:
pk_enums.STATUS_WAITING_FOR_LOCK,
aptd_enums.STATUS_WAITING_MEDIUM: pk_enums.STATUS_UNKNOWN,
aptd_enums.STATUS_WAITING_CONFIG_FILE_PROMPT:
pk_enums.STATUS_UNKNOWN
}
MAP_ERROR_ENUM = {
aptd_enums.ERROR_CACHE_BROKEN: pk_enums.ERROR_NO_CACHE,
aptd_enums.ERROR_DEP_RESOLUTION_FAILED:
pk_enums.ERROR_DEP_RESOLUTION_FAILED,
aptd_enums.ERROR_INCOMPLETE_INSTALL: pk_enums.ERROR_NO_CACHE,
aptd_enums.ERROR_INVALID_PACKAGE_FILE:
pk_enums.ERROR_PACKAGE_CORRUPT,
aptd_enums.ERROR_KEY_NOT_INSTALLED: pk_enums.ERROR_GPG_FAILURE,
aptd_enums.ERROR_KEY_NOT_REMOVED: pk_enums.ERROR_GPG_FAILURE,
aptd_enums.ERROR_NOT_REMOVE_ESSENTIAL_PACKAGE:
pk_enums.ERROR_PACKAGE_FAILED_TO_REMOVE,
aptd_enums.ERROR_NO_CACHE: pk_enums.ERROR_NO_CACHE,
aptd_enums.ERROR_NO_LOCK: pk_enums.ERROR_CANNOT_GET_LOCK,
aptd_enums.ERROR_NO_PACKAGE: pk_enums.ERROR_PACKAGE_NOT_FOUND,
aptd_enums.ERROR_PACKAGE_ALREADY_INSTALLED:
pk_enums.ERROR_PACKAGE_ALREADY_INSTALLED,
aptd_enums.ERROR_PACKAGE_DOWNLOAD_FAILED:
pk_enums.ERROR_PACKAGE_DOWNLOAD_FAILED,
aptd_enums.ERROR_PACKAGE_MANAGER_FAILED:
pk_enums.ERROR_TRANSACTION_ERROR,
aptd_enums.ERROR_PACKAGE_NOT_INSTALLED:
pk_enums.ERROR_PACKAGE_NOT_INSTALLED,
aptd_enums.ERROR_PACKAGE_UNAUTHENTICATED:
pk_enums.ERROR_BAD_GPG_SIGNATURE,
aptd_enums.ERROR_PACKAGE_UPTODATE:
pk_enums.ERROR_NO_PACKAGES_TO_UPDATE,
aptd_enums.ERROR_REPO_DOWNLOAD_FAILED:
pk_enums.ERROR_REPO_NOT_AVAILABLE,
aptd_enums.ERROR_UNREADABLE_PACKAGE_FILE:
pk_enums.ERROR_INVALID_PACKAGE_FILE,
aptd_enums.ERROR_SYSTEM_ALREADY_UPTODATE:
pk_enums.ERROR_NO_PACKAGES_TO_UPDATE,
}
MAP_PACKAGE_ENUM = {
aptd_enums.PKG_CONFIGURING:
pk_enums.INFO_INSTALLING,
aptd_enums.PKG_DISAPPEARING:
pk_enums.INFO_UNKNOWN,
aptd_enums.PKG_INSTALLED:
pk_enums.INFO_FINISHED,
aptd_enums.PKG_INSTALLING:
pk_enums.INFO_INSTALLING,
aptd_enums.PKG_PREPARING_INSTALL:
pk_enums.INFO_PREPARING,
aptd_enums.PKG_PREPARING_PURGE:
pk_enums.INFO_PREPARING,
aptd_enums.PKG_PREPARING_REMOVE:
pk_enums.INFO_PREPARING,
aptd_enums.PKG_PURGED:
pk_enums.INFO_FINISHED,
aptd_enums.PKG_PURGING:
pk_enums.INFO_REMOVING,
aptd_enums.PKG_REMOVED:
pk_enums.INFO_FINISHED,
aptd_enums.PKG_REMOVING:
pk_enums.INFO_REMOVING,
aptd_enums.PKG_RUNNING_TRIGGER:
pk_enums.INFO_CLEANUP,
aptd_enums.PKG_UNKNOWN:
pk_enums.INFO_UNKNOWN,
aptd_enums.PKG_UNPACKING:
pk_enums.INFO_DECOMPRESSING,
aptd_enums.PKG_UPGRADING:
pk_enums.INFO_UPDATING,
}
class PackageKit(aptdaemon.core.DBusObject):
"""Provides a limited set of the PackageKit system D-Bus API."""
SUPPORTED_ROLES = [pk_enums.ROLE_REFRESH_CACHE,
pk_enums.ROLE_UPDATE_SYSTEM,
pk_enums.ROLE_SIMULATE_UPDATE_PACKAGES,
pk_enums.ROLE_UPDATE_PACKAGES,
pk_enums.ROLE_SIMULATE_REMOVE_PACKAGES,
pk_enums.ROLE_INSTALL_PACKAGES,
pk_enums.ROLE_SIMULATE_INSTALL_PACKAGES,
pk_enums.ROLE_INSTALL_PACKAGES,
pk_enums.ROLE_GET_DISTRO_UPGRADES,
pk_enums.ROLE_GET_UPDATES,
pk_enums.ROLE_GET_UPDATE_DETAIL,
pk_enums.ROLE_GET_PACKAGES,
pk_enums.ROLE_GET_DETAILS,
pk_enums.ROLE_GET_DEPENDS,
pk_enums.ROLE_GET_REQUIRES,
pk_enums.ROLE_SEARCH_NAME,
pk_enums.ROLE_SEARCH_DETAILS,
pk_enums.ROLE_SEARCH_GROUP,
pk_enums.ROLE_SEARCH_FILE,
pk_enums.ROLE_WHAT_PROVIDES,
pk_enums.ROLE_DOWNLOAD_PACKAGES]
SUPPORTED_FILTERS = [pk_enums.FILTER_INSTALLED,
pk_enums.FILTER_NOT_INSTALLED,
pk_enums.FILTER_FREE,
pk_enums.FILTER_NOT_FREE,
pk_enums.FILTER_GUI,
pk_enums.FILTER_NOT_GUI,
pk_enums.FILTER_COLLECTIONS,
pk_enums.FILTER_NOT_COLLECTIONS,
pk_enums.FILTER_SUPPORTED,
pk_enums.FILTER_NOT_SUPPORTED,
pk_enums.FILTER_NEWEST]
def __init__(self, queue, connect=True, bus=None):
"""Initialize a new PackageKit compatibility layer.
Keyword arguments:
connect -- if the daemon should connect to the D-Bus (default is True)
bus -- the D-Bus to connect to (defaults to the system bus)
"""
pklog.info("Initializing PackageKit compat layer")
bus_name = None
bus_path = None
if connect == True:
if bus is None:
bus = dbus.SystemBus()
self.bus = bus
bus_path = PACKAGEKIT_DBUS_PATH
bus_name = dbus.service.BusName(PACKAGEKIT_DBUS_SERVICE, self.bus)
aptdaemon.core.DBusObject.__init__(self, bus_name, bus_path)
self._updates_changed_timeout_id = None
self._updates_changed = False
self.queue = queue
self.queue.worker.connect("transaction-done", self._on_transaction_done)
self.queue.connect("queue-changed", self._on_queue_changed)
self._distro_id = None
self.netmon = aptdaemon.networking.get_network_monitor()
self.netmon.connect("network-state-changed",
self._on_network_state_changed)
self.netmon.get_network_state()
# SIGNALS
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="")
def RestartSchedule(self):
"""A system restart has been sceduled."""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="")
def Changed(self):
"""This signal is emitted when a property on the interface changes."""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="as")
def TransactionListChanged(self, transactions):
"""The transaction list has changed, because either a transaction
has finished or a new transaction created.
:param transactions: A list of transaction ID's.
:type transactions: as
"""
pklog.debug("Emitting TransactionListChanged signal: %s", transactions)
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="")
def UpdatesChanged(self):
"""This signal is emitted when the number of updates has changed."""
pklog.debug("Emitting UpdatesChanged signal")
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="")
def RepoListChanged(self):
"""This signal is emitted when the repository list has changed."""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="")
def Changed(self):
"""This signal is emitted when a property on the interface changes."""
pklog.debug("Emitting PackageKit Changed()")
# METHODS
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_DBUS_INTERFACE,
in_signature="s", out_signature="s")
def CanAuthorize(self, action_id):
"""Allows a client to find out if it would be allowed to authorize
an action.
:param action_id: The action ID, e.g.
org.freedesktop.packagekit.system-network-proxy-configure
:returns: The result, either yes, no or interactive.
"""
#FIXME: We need to map packagekit and aptdaemon polices
return "interactive"
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_DBUS_INTERFACE,
in_signature="s", out_signature="")
def StateHasChanged(self, reason):
"""This method suggests to PackageKit that the package backend state
may have changed. This allows plugins to the native package manager
to suggest that PackageKit drops it's caches.
:param reason:
The reason of the state change. Valid reasons are resume or
posttrans. Resume is given a lower priority than posttrans.
"""
pklog.debug("StateHasChanged() was called: %s", reason)
self._updates_changed = True
if reason == "cache-update":
self._check_updates_changed(timeout=30)
elif reason == "resume":
self._check_updates_changed(timeout=180)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_DBUS_INTERFACE,
in_signature="", out_signature="s",
sender_keyword="sender")
def GetTid(self, sender):
"""Gets a new transaction ID from the daemon.
:returns: The tid, e.g. 45_dafeca_checkpoint32
"""
return self._get_tid(sender)
@inline_callbacks
def _get_tid(self, sender):
pid, uid, cmdline = \
yield policykit1.get_proc_info_from_dbus_name(sender, self.bus)
pktrans = PackageKitTransaction(pid, uid, cmdline, self.queue, sender)
return_value(pktrans.tid)
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_DBUS_INTERFACE,
in_signature="", out_signature="as")
def GetTransactionList(self):
"""Gets the transaction list of any transactions that are in
progress.
:returns: A list of transaction ID's
"""
pklog.debug("GetTransactionList() was called")
return self._get_transaction_list()
# HELPERS
def _get_properties(self, iface):
"""Helper to get the properties of a D-Bus interface."""
if iface == PACKAGEKIT_DBUS_INTERFACE:
return {# Claim that we are a stable version
"VersionMajor": dbus.UInt32(6),
"VersionMinor": dbus.UInt32(18),
"VersionMicro": dbus.UInt32(0),
"BackendName": dbus.String("aptdaemon"),
"BackendDescription": dbus.String("Compatibility layer"),
"BackendAuthor": dbus.String(__author__),
"Filters": dbus.String(";".join(self.SUPPORTED_FILTERS)),
"Groups": dbus.String(";".join(SECTION_GROUP_MAP.values())),
"Roles": dbus.String(";".join(self.SUPPORTED_ROLES)),
"Locked": dbus.Boolean(False),
"NetworkState": dbus.String(self.netmon.state),
"DistroId": dbus.String(self._get_distro_id()),
}
else:
return {}
def _get_distro_id(self):
"""Return information about the distibution."""
if self._distro_id is None:
info = lsb_release.get_distro_information()
arch = subprocess.Popen(["dpkg", "--print-architecture"],
stdout=subprocess.PIPE).communicate()[0]
try:
self._distro_id = "%s;%s;%s" % (info["ID"], info["CODENAME"], arch)
except KeyError:
self._distro_id = "unknown;unknown;%s" % arch
return self._distro_id
def _on_network_state_changed(self, mon, state):
self.Changed()
self.PropertiesChanged(PACKAGEKIT_DBUS_INTERFACE,
{"Network": state}, [])
def _on_queue_changed(self, queue):
self.TransactionListChanged(self._get_transaction_list())
self._check_updates_changed()
def _get_transaction_list(self):
pk_transactions = []
for trans in self.queue.items:
# We currently only emit PackageKit transaction
#FIXME: Should we use MergedTransaction for all transactions and
# ROLE_UNKOWN for aptdaemon only transactions?
try:
pk_transactions.append(trans.pktrans.tid)
except AttributeError:
pass
try:
pk_transactions.append(self.queue.worker.trans.pktrans.tid)
except AttributeError:
pass
return pk_transactions
def _on_transaction_done(self, worker, trans):
# If a cache modifing transaction is completed schedule an
# UpdatesChanged signal
if trans.role in (aptd_enums.ROLE_INSTALL_FILE,
aptd_enums.ROLE_INSTALL_PACKAGES,
aptd_enums.ROLE_REMOVE_PACKAGES,
aptd_enums.ROLE_UPGRADE_PACKAGES,
aptd_enums.ROLE_COMMIT_PACKAGES,
aptd_enums.ROLE_UPGRADE_SYSTEM,
aptd_enums.ROLE_FIX_BROKEN_DEPENDS):
self._updates_changed = True
self._check_updates_changed()
elif trans.role == aptd_enums.ROLE_UPDATE_CACHE:
self._updates_changed = True
self._check_updates_changed(timeout=30)
def _check_updates_changed(self, timeout=60):
"""After the queue was processed schedule a delayed UpdatesChanged
signal if required.
"""
if not self.queue.items and self._updates_changed:
if self._updates_changed_timeout_id:
# If we already have a scheduled UpdatesChanged signal
# delay it even further
pklog.debug("UpdatesChanged signal re-scheduled")
GObject.source_remove(self._updates_changed_timeout_id)
else:
pklog.debug("UpdatesChanged signal scheduled")
self._updates_changed_timeout_id = \
GObject.timeout_add_seconds(timeout,
self._delayed_updates_changed)
def _delayed_updates_changed(self):
"""Emit the UpdatesChanged signal and clear the timeout."""
self.UpdatesChanged()
self._updates_changed_timeout_id = None
self._updates_changed = False
return False
class MergedTransaction(aptdaemon.core.Transaction):
"""Overlay of an Aptdaemon transaction which also provides the
PackageKit object and its interfaces.
"""
def __init__(self, pktrans, role, queue, connect=True,
bus=None, packages=None, kwargs=None):
aptdaemon.core.Transaction.__init__(self, pktrans.tid[1:], role, queue,
pktrans.pid, pktrans.uid,
pktrans.cmdline, pktrans.sender,
connect, bus, packages, kwargs)
self.pktrans = pktrans
self.run_time = 0
def _set_status(self, enum):
aptdaemon.core.Transaction._set_status(self, enum)
self.pktrans.status = get_pk_status_enum(enum)
status = property(aptdaemon.core.Transaction._get_status, _set_status)
def _set_progress(self, percent):
aptdaemon.core.Transaction._set_progress(self, percent)
self.pktrans.percentage = self._progress
progress = property(aptdaemon.core.Transaction._get_progress, _set_progress)
def _set_progress_details(self, details):
aptdaemon.core.Transaction._set_progress_details(self, details)
self.pktrans.speed = int(details[4])
self.pktrans.remaining_time = int(details[5])
self.pktrans.elapsed_time = int(time.time() - self.pktrans.start_time)
progress_details = property(aptdaemon.core.Transaction._get_progress_details,
_set_progress_details)
def _set_progress_package(self, progress):
aptdaemon.core.Transaction._set_progress_package(self, progress)
pkg_name, enum = progress
self.emit_package(get_pk_package_enum(enum),
get_pk_package_id(pkg_name),
"")
progress_package = property(aptdaemon.core.Transaction._get_progress_package,
_set_progress_package)
def _set_exit(self, enum):
aptdaemon.core.Transaction._set_exit(self, enum)
self.pktrans.exit = get_pk_exit_enum(enum)
exit = property(aptdaemon.core.Transaction._get_exit, _set_exit)
def _set_error(self, excep):
aptdaemon.core.Transaction._set_error(self, excep)
self.pktrans.ErrorCode(get_pk_error_enum(excep.code),
self._error_property[1])
error = property(aptdaemon.core.Transaction._get_error, _set_error)
def _remove_from_connection_no_raise(self):
aptdaemon.core.Transaction._remove_from_connection_no_raise(self)
self.pktrans.Destroy()
try:
self.pktrans.remove_from_connection()
except LookupError as error:
pklog.debug("remove_from_connection() raised LookupError: %s",
error)
finally:
self.pktrans.trans = None
self.pktrans = None
return False
def emit_details(self, package_id, license, group, detail, url, size):
self.pktrans.Details(package_id, license, group, detail, url, size)
def emit_files(self, id, file_list):
self.pktrans.Files(id, file_list)
def emit_package(self, info, id, summary):
self.pktrans.Package(info, id, summary)
self.pktrans.last_package = id
def emit_update_detail(self, package_id, updates, obsoletes, vendor_url,
bugzilla_url, cve_url, restart, update_text,
changelog, state, issued, updated):
self.pktrans.UpdateDetail(package_id, updates, obsoletes, vendor_url,
bugzilla_url, cve_url, restart, update_text,
changelog, state, issued, updated)
class PackageKitTransaction(aptdaemon.core.DBusObject):
"""Provides a PackageKit transaction object."""
def __init__(self, pid, uid, cmdline, queue, sender,
connect=True, bus=None):
pklog.info("Initializing PackageKit transaction")
bus_name = None
bus_path = None
self.tid = "/%s" % uuid.uuid4().get_hex()
if connect == True:
if bus is None:
bus = dbus.SystemBus()
self.bus = bus
bus_path = self.tid
bus_name = dbus.service.BusName(PACKAGEKIT_DBUS_SERVICE, bus)
aptdaemon.core.DBusObject.__init__(self, bus_name, bus_path)
self.queue = queue
self.hints = {}
self.start_time = time.time()
self._elapsed_time = 0
self._remaining_time = 0
self._speed = 0
self._caller_active = True
self._allow_cancel = False
self._percentage = 0
self._subpercentage = 0
self._status = pk_enums.STATUS_SETUP
self._last_package = ""
self.uid = uid
self.pid = pid
self.cmdline = cmdline
self.role = pk_enums.ROLE_UNKNOWN
self.sender = sender
self.trans = None
@property
def allow_cancel(self):
return self._allow_cancel
@allow_cancel.setter
def allow_cancel(self, value):
self._allow_cancel = dbus.Boolean(value)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"AllowCancel": self._allow_cancel}, [])
self.Changed()
@property
def last_package(self):
return self._last_package
@last_package.setter
def last_package(self, value):
self._last_package = dbus.String(value)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"LastPackage": self._last_package}, [])
self.Changed()
@property
def caller_active(self):
return self._caller_active
@caller_active.setter
def caller_active(self, value):
self._caller_active = dbus.Boolean(value)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"CallerActive": self._caller_active}, [])
self.Changed()
@property
def percentage(self):
return self._percentage
@percentage.setter
def percentage(self, progress):
self._percentage = dbus.UInt32(progress)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"Percentage": self._percentage}, [])
self.Changed()
@property
def subpercentage(self):
return self._subpercentage
@subpercentage.setter
def subpercentage(self, progress):
self._subpercentage = dbus.UInt32(progress)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"SubPercentage": self._subpercentage}, [])
self.Changed()
@property
def status(self):
return self._status
@status.setter
def status(self, enum):
self._status = dbus.String(enum)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"Status": self._status}, [])
self.Changed()
@property
def elapsed_time(self):
return self._elapsed_time
@elapsed_time.setter
def elapsed_time(self, ela):
self._elpased_time = dbus.UInt32(ela)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"ElapsedTime": self._elapsed_time}, [])
self.Changed()
@property
def remaining_time(self):
return self._remaining_time
@remaining_time.setter
def remaining_time(self, value):
self._elpased_time = dbus.UInt32(value)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"RemainingTime": self._remaining_time}, [])
self.Changed()
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, speed):
self._speed = dbus.UInt32(speed)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"AllowCancel": self._speed}, [])
self.Changed()
@property
def exit(self):
return self._exit
@exit.setter
def exit(self, enum):
self._exit = exit
self.run_time = int((time.time() - self.start_time) * 1000)
# The time could go backwards ...
if self.run_time < 0:
self.run_time = 0
if enum == pk_enums.EXIT_CANCELLED:
self.ErrorCode(pk_enums.ERROR_TRANSACTION_CANCELLED, "")
self.status = pk_enums.STATUS_FINISHED
self.Finished(enum, self.run_time)
# SIGNALS
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ssbsusus")
def Transaction(self, old_tid, timespec, succeeded, role, duration, data,
uid, cmdline):
"""This signal is sent when more details are required about a
specific transaction.
:param old_tid: The transaction ID of the old transaction.
:param timespec: The timespec of the old transaction in ISO8601 format.
:param succeeded: If the transaction succeeded.
:param role: The role enumerated type.
:param duration: The duration of the transaction in milliseconds.
:param data: Any data associated
:param uid: The user ID of the user that scheduled the action.
:param cmdline: The command line of the tool that scheduled the action,
e.g. /usr/bin/gpk-application.
"""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ss")
def ErrorCode(self, code, details):
"""This signal is used to report errors back to the session program.
Errors should only be send on fatal abort.
:param code: Enumerated type, e.g. no-network.
:param details: Long description or error, e.g. "failed to connect"
:type code: s
:type details: s
"""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="")
def Changed(self):
"""This signal is emitted when a property on the interface changes."""
pklog.debug("Emitting PackageKitTransaction Changed()")
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="")
def Destroy(self):
"""This signal is sent when the transaction has been destroyed
and is no longer available for use."""
pklog.debug("Emmitting Destroy()")
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="su")
def Finished(self, exit, runtime):
"""This signal is used to signal that the transaction has finished.
:param exit: The PkExitEnum describing the exit status of the
transaction.
:param runtime: The amount of time in milliseconds that the
transaction ran for.
:type exit: s
:type runtime: u
"""
pklog.debug("Emitting Finished: %s, %s", exit, runtime)
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ssssst")
def Details(self, package_id, license, group, detail, url, size):
"""This signal allows the backend to convey more details about the
package.
:param package_id: The package ID
:param license:
The license string, e.g. GPLv2+ or BSD and (MPLv1.1 or GPLv2+).
Moredetails about the correct way to format licensing strings can
be found on the Fedora packaging wiki.
:param group:
The enumerated package group description
:param detail:
The multi-line package description. If formatting is required,
then markdown syntax should be used, e.g. This is **critically**
important
:param url:
The upstream project homepage
:param size:
The size of the package in bytes. This should be the size of the
entire package file, not the size of the files installed on the
system. If the package is not installed, and already downloaded
and present in the package manager cache, then this value should
be set to zero.
"""
pklog.debug("Emmitting Details signal for %s", package_id)
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ss")
def Files(self, package_id, file_list):
"""This signal is used to push file lists from the backend to the
session.
:param package_id:
The Package ID that called the method.
:param file_list:
The file list, with each file seporated with ;.
"""
pklog.debug("Emitting Files signal: %s, %s", package_id, file_list)
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ssssssssssss")
def UpdateDetail(self, package_id, updates, obsoletes, vendor_url,
bugzilla_url, cve_url, restart, update_text, changelog,
state, issued, updated):
"""This signal is sent when more details are required about a
specific update.
:param package_id: The package ID
:param updates:
A list of package_id's that are to be updated, seporated by
&. This odd delimited was chosen as \t is already being used
in the spawned backends, and & is a banned character in a
package_id.
:param obsoletes:
A list of package_id's that are to be obsoleted, separated by &
:param vendor_url:
A URL with more details on the update, e.g. a page with more
information on the update. The format of this command should
be http://www.foo.org/page.html?4567;Update to SELinux
:param bugzilla_url:
A bugzilla URL with more details on the update. If no URL is
available then this field should be left empty.
:param cve_url:
A CVE URL with more details on the security advisory.
:param restart:
A valid restart type, e.g. system.
:param update_text:
The update text describing the update. If formatting is required,
then markdown syntax should be used, e.g. This is **critically**
important.
:param changelog:
The ChangeLog text describing the changes since the last version.
:param state:
The state of the update, e.g. stable or testing.
:param issued:
The ISO8601 encoded date that the update was issued.
:param updated:
The ISO8601 encoded date that the update was updated.
"""
pklog.debug("Emmitting UpdateDetail signal for %s", package_id)
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="sss")
def Package(self, info, package_id, summary):
"""This signal allows the backend to communicate packages to the
session.
If updating, as packages are updated then emit them to the screen.
This allows a summary to be presented after the transaction.
When returning results from a search always return installed
before available for the same package name.
:param info: A valid info string enumerated type
:param package_id: This identifier is of the form
name;version;arch;data in a single string and is meant to
represent a single package unique across all local and remote
data stores. For a remote, not-installed package the data
field should be set as the repository identifier or repository
name. The data field for an installed package must be prefixed
with installed as this is used to identify which packages are
installable or installed in the client tools. As a special
extension, if the package manager is able to track which
repository a package was originally installed from, then the data
field can be set to installed:REPO-NAME which allows the frontend
client to advise the user of the package origin. The data field
for a non-installed local package must be local as this signifies
a repository name is not available and that package resides
locally on the client system rather than in any specific
repository.
:param summary: The one line package summary, e.g. Clipart for
OpenOffice
"""
pklog.debug("Emmitting Package signal: info=%s id=%s summary='%s'",
info, package_id, summary[:10])
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="sss")
def DistroUpgrade(self, type, name, summary):
"""This signal allows the backend to communicate distribution upgrades
to the session.
:param type: A valid upgrade string enumerated type, e.g. stable
or unstable
:param name: The short name of the distribution, e.g. Fedora Core
10 RC1
:param summary: The multi-line description of the release
"""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ss")
def RequireRestart(self, restart_type, package_id):
"""This signal is sent when the session client should notify the user
that a restart is required to get all changes into effect.
:param package_id:
The Package ID of the package tiggering the restart
:param file_list:
One of system, application or session
"""
pklog.debug("Emitting RequireRestart signal: %s, %s",
restart_type, package_id)
# METHODS
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="")
def SetHints(self, hints):
"""This method allows the calling session to set transaction hints
for the package manager which can change as the transaction runs.
This method can be sent before the transaction has been run or
whilst it is running. There is no limit to the number of times
this method can be sent, although some backends may only use the
values that were set before the transaction was started.
Each parameter value is optional.
:param hints: The values as an array of strings, for example
['locale=en_GB.utf8','interactive=false','cache-age=3600']
"""
for hint in hints:
key, value = hint.split("=", 1)
if key not in ["locale", "idle", "background", "interactive",
"cache-age", "frontend-socket"]:
raise Exception("Invalid option %s" % key)
self.hints[key] = value
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="", out_signature="",
sender_keyword="sender")
def Cancel(self, sender):
"""This method cancels a transaction that is already running."""
if self.trans:
return self.trans._cancel(sender)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="asbb", out_signature="",
sender_keyword="sender")
def RemovePackages(self, package_ids, allow_deps, autoremove, sender):
"""This method removes packages from the local system.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be downloading, updating, installing or removing.
:param package_ids: An array of package IDs.
:param allow_deps:
Either true or false. If true allow other packages to be removed
with the package, but false should cause the script to abort if
other packages are dependant on the package.
:param autoremove:
Either true or false. This option is only really interesting on
embedded devices with a limited amount of flash storage. It
suggests to the packagekit backend that dependencies installed at
the same time as the package should also be removed if they are not
required by anything else. For instance, if you install OpenOffice,
it might download libneon as a dependency. When auto_remove is set
to true, and you remove OpenOffice then libneon will also get
removed automatically.
"""
pklog.debug("RemovePackages() was called")
self.role = pk_enums.ROLE_REMOVE_PACKAGES
return self._remove_packages(package_ids, allow_deps, autoremove,
sender)
@inline_callbacks
def _remove_packages(self, package_ids, allow_deps, autoremove, sender):
self.trans = self._get_merged_trans(aptd_enums.ROLE_REMOVE_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_REMOVE)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"RemoveObsoletedDepends", autoremove,
sender)
try:
yield self.trans._simulate(sender)
except aptdameon.errors.TransactionFailed:
raise StopIteration
for pkgs in self.trans.depends:
if pkgs:
error_code = packagekit.errors.ERROR_DEP_RESOLUTION_FAILED
self.trans.pktrans.ErrorCode(error_code,
"Would change additional packages")
self.trans.pktrans.exit = pk_enums.EXIT_FAILED
yield self.trans._run(sender)
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="asb", out_signature="",
sender_keyword="sender")
def SimulateRemovePackages(self, package_ids, autoremove, sender):
"""This method simulates a package update emitting packages
required to be installed, removed, updated, reinstalled,
downgraded, obsoleted or untrusted. The latter is used to present
the user untrusted packages that are about to be installed.
This method typically emits Error and Package.
:param package_ids: An array of package IDs.
:param autoremove:
Either true or false. This option is only really interesting on
embedded devices with a limited amount of flash storage. It
suggests to the packagekit backend that dependencies installed at
the same time as the package should also be removed if they are not
required by anything else. For instance, if you install OpenOffice,
it might download libneon as a dependency. When auto_remove is set
to true, and you remove OpenOffice then libneon will also get
removed automatically.
"""
pklog.debug("SimulateRemovePackages() was called")
GObject.idle_add(defer_idle, self._simulate_remove_packages,
package_ids, autoremove, sender)
@inline_callbacks
def _simulate_remove_packages(self, package_ids, autoremove, sender):
self.role = pk_enums.ROLE_SIMULATE_REMOVE_PACKAGES
self.status = pk_enums.STATUS_DEP_RESOLVE
self.trans = self._get_merged_trans(aptd_enums.ROLE_REMOVE_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_REMOVE)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"RemoveObsoletedDepends", autoremove,
sender)
yield self._simulate_and_emit_packages(sender)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="bas", out_signature="",
sender_keyword="sender")
def UpdatePackages(self, only_trusted, package_ids, sender):
"""This method updates existing packages on the local system.
The installer should always update extra packages automatically
to fulfil dependencies.
This should allow an application to find out what package owns a
file on the system.
This method typically emits Progress, Status and Error and Package.
:param only_trusted:
If the transaction is only allowed to install trusted packages.
Unsigned packages should not be installed if this parameter is
TRUE. If this method is can only install trusted packages, and
the packages are unsigned, then the backend will send a
ErrorCode(missing-gpg-signature). On recieving this error, the
client may choose to retry with only_trusted FALSE after
gaining further authentication.
: param package_ids: An array of package IDs.
"""
pklog.debug("UpdatePackages() was called")
return self._update_packages(only_trusted, package_ids, sender)
@inline_callbacks
def _update_packages(self, only_trusted, package_ids, sender):
self.role = pk_enums.ROLE_UPDATE_PACKAGES
self.trans = self._get_merged_trans(aptd_enums.ROLE_UPGRADE_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_UPGRADE)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"AllowUnauthenticated", not only_trusted,
sender)
yield self.trans._run(sender)
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def SimulateUpdatePackages(self, package_ids, sender):
"""This method simulates a package update emitting packages
required to be installed, removed, updated, reinstalled,
downgraded, obsoleted or untrusted. The latter is used to present
the user untrusted packages that are about to be installed.
This method typically emits Error and Package.
:param package_ids: An array of package IDs.
"""
pklog.debug("SimulateUpdatePackages() was called")
self.role = pk_enums.ROLE_SIMULATE_UPDATE_PACKAGES
GObject.idle_add(defer_idle, self._simulate_update_packages,
package_ids, sender)
@inline_callbacks
def _simulate_update_packages(self, package_ids, sender):
self.status = pk_enums.STATUS_RUNNING
self.trans = self._get_merged_trans(aptd_enums.ROLE_UPGRADE_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_UPGRADE)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"AllowUnauthenticated", True, sender)
yield self._simulate_and_emit_packages(sender)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="bas", out_signature="",
sender_keyword="sender")
def InstallPackages(self, only_trusted, package_ids, sender):
"""This method installs new packages on the local system.
The installer should always install extra packages automatically
as the use could call GetDepends prior to the install if a
confirmation is required in the UI.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be downloading, updating,
installing or removing.
:param only_trusted:
If the transaction is only allowed to install trusted packages.
Unsigned packages should not be installed if this parameter is
TRUE. If this method is can only install trusted packages, and
the packages are unsigned, then the backend will send a
ErrorCode(missing-gpg-signature). On recieving this error, the
client may choose to retry with only_trusted FALSE after
gaining further authentication.
: param package_ids: An array of package IDs.
"""
pklog.debug("InstallPackages() was called")
self.role = pk_enums.ROLE_INSTALL_PACKAGES
return self._install_packages(only_trusted, package_ids, sender)
@inline_callbacks
def _install_packages(self, only_trusted, package_ids, sender):
self.trans = self._get_merged_trans(aptd_enums.ROLE_INSTALL_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_INSTALL)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"AllowUnauthenticated", not only_trusted,
sender)
yield self.trans._run(sender)
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def SimulateInstallPackages(self, package_ids, sender):
"""This method simulates a package instalation emitting packages
required to be installed, removed, updated, reinstalled, downgraded,
obsoleted or untrusted. The latter is used to present the user
untrusted packages that are about to be installed.
This method typically emits Error and Package.
:param package_ids: An array of package IDs.
"""
pklog.debug("SimulateInstallPackages() was called")
self.role = pk_enums.ROLE_SIMULATE_INSTALL_PACKAGES
GObject.idle_add(defer_idle, self._simulate_install_packages,
package_ids, sender)
@inline_callbacks
def _simulate_install_packages(self, package_ids, sender):
self.status = pk_enums.STATUS_RUNNING
self.trans = self._get_merged_trans(aptd_enums.ROLE_INSTALL_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_INSTALL)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"AllowUnauthenticated", True, sender)
yield self._simulate_and_emit_packages(sender)
@inline_callbacks
def _simulate_and_emit_packages(self, sender, update_info=None):
try:
yield self.trans._simulate(sender)
except:
raise StopIteration
for pkg in self.trans.depends[aptd_enums.PKGS_INSTALL]:
self.Package(pk_enums.INFO_INSTALLING,
get_pk_package_id(pkg), "")
for pkg in self.trans.depends[aptd_enums.PKGS_REINSTALL]:
self.Package(pk_enums.INFO_REINSTALLING,
get_pk_package_id(pkg, "installed"), "")
for pkg in self.trans.depends[aptd_enums.PKGS_REMOVE]:
self.Package(pk_enums.INFO_REMOVING,
get_pk_package_id(pkg, "installed"), "")
for pkg in self.trans.depends[aptd_enums.PKGS_PURGE]:
self.Package(pk_enums.INFO_REMOVING,
get_pk_package_id(pkg, "installed"), "")
for pkg in self.trans.depends[aptd_enums.PKGS_UPGRADE]:
self.Package(update_info or pk_enums.INFO_UPDATING,
get_pk_package_id(pkg, None), "")
for pkg in self.trans.depends[aptd_enums.PKGS_DOWNGRADE]:
self.Package(pk_enums.INFO_DOWNGRADING,
get_pk_package_id(pkg), "")
for pkg in self.trans.depends[aptd_enums.PKGS_KEEP]:
self.Package(pk_enums.INFO_BLOCKED,
get_pk_package_id(pkg), "")
self.status = pk_enums.STATUS_FINISHED
self.Finished(pk_enums.EXIT_SUCCESS, 0)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="b", out_signature="",
sender_keyword="sender")
def RefreshCache(self, force, sender):
"""This method should fetch updated meta-data for all enabled
repositories.
When fetching each software source, ensure to emit RepoDetail for
the current source to give the user interface some extra details.
Be sure to have the "enabled" field set to true, otherwise you
wouldn't be fetching that source.
This method typically emits Progress, Error and RepoDetail.
:param force: If the caches should be cleaned and reloaded even if
there is valid, up to date data.
"""
pklog.debug("RefreshCache() was called")
self.role = pk_enums.ROLE_REFRESH_CACHE
return self._refresh_cache(force, sender)
@inline_callbacks
def _refresh_cache(self, force, sender):
self.trans = self._get_merged_trans(aptd_enums.ROLE_UPDATE_CACHE,
kwargs={"sources_list": None})
yield self.trans._run(sender)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="b", out_signature="",
sender_keyword="sender")
def UpdateSystem(self, only_trusted, sender):
"""This method updates all packages on the system to thier newest
versions.
The installer should update all the updateable packages on the
system, including automatically installing any new packages that
are needed for dependancies.
:param only_trusted:
If the transaction is only allowed to install trusted packages.
Unsigned packages should not be installed if this parameter is
TRUE. If this method is can only install trusted packages, and
the packages are unsigned, then the backend will send a
ErrorCode(missing-gpg-signature). On recieving this error, the
client may choose to retry with only_trusted FALSE after
gaining further authentication.
: param package_ids: An array of package IDs.
"""
pklog.debug("UpdateSystem() was called")
return self._update_system(only_trusted, sender)
@inline_callbacks
def _update_system(self, only_trusted, sender):
self.role = pk_enums.ROLE_UPDATE_SYSTEM
self.trans = self._get_merged_trans(aptd_enums.ROLE_UPGRADE_SYSTEM,
kwargs={"safe_mode": False})
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"AllowUnauthenticated", not only_trusted,
sender)
yield self.trans._run(sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def GetUpdateDetail(self, package_ids, sender):
"""This method returns details about a specific update.
This method typically emits UpdateDetail and Error
:param package_ids: An array of package IDs.
"""
pklog.debug("GetUpdateDetail() was called")
self.role = pk_enums.ROLE_GET_UPDATE_DETAIL
kwargs = {"package_ids": package_ids}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="s", out_signature="",
sender_keyword="sender")
def GetUpdates(self, filter, sender):
"""This method should return a list of packages that are installed
and are upgradable. It should only return the newest update for
each installed package.
This method typically emits Progress, Error and Package.
:param filter: A correct filter, e.g. none or installed;~devel
"""
pklog.debug("GetUpdates() was called")
self.role = pk_enums.ROLE_GET_UPDATES
kwargs = {"filters": filter.split(";")}
return self._run_query(kwargs, sender)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="", out_signature="",
sender_keyword="sender")
def GetDistroUpgrades(self, sender):
"""This method should return a list of distribution upgrades that are
available. It should not return updates, only major upgrades.
This method typically emits DistroUpgrade, Error
"""
pklog.debug("GetDistroUpgrades() was called")
self.role = pk_enums.ROLE_GET_DISTRO_UPGRADES
self.status = pk_enums.STATUS_RUNNING
GObject.idle_add(defer_idle, self._get_distro_upgrades)
def _get_distro_upgrades(self):
#FIXME: Should go into the worker after the threading branch is merged
# It allows to run a nested loop until the download is finished
self.allow_cancel = False
self.percentage = 101
self.status = pk_enums.STATUS_DOWNLOAD_UPDATEINFO
if META_RELEASE_SUPPORT == False:
self.ErrorCode(pk_enums.ERROR_INTERNAL_ERROR,
"Please make sure that update-manager-core is"
"correctly installed.")
self.exit = pk_enums.EXIT_FAILED
return
#FIXME Evil to start the download during init
meta_release = GMetaRelease()
meta_release.connect("download-done",
self._on_distro_upgrade_download_done)
def _on_distro_upgrade_download_done(self, meta_release):
#FIXME: Add support for description
if meta_release.new_dist != None:
self.DistroUpgrade("stable",
"%s %s" % (meta_release.new_dist.name,
meta_release.new_dist.version),
"The latest stable release")
self.exit = pk_enums.EXIT_SUCCESS
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sas", out_signature="",
sender_keyword="sender")
def Resolve(self, filter, packages, sender):
"""This method turns a single package name into a package_id suitable
for the other methods.
If the package is a fully formed package_id, then this should be
treated as an exact package match. This is useful to find the summary
or installed status of a package_id returned from other methods.
This method typically emits Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param packages:
An array of package names, e.g. scribus-clipart. The package
names are case sensitive, so for instance: Resolve('Packagekit')
would not match PackageKit. As a special case, if Resolve() is
called with a name prefixed with @ then this should be treated as
a category, for example: @web-development. In this instance, a
meta-package should be emitted, for example:
web-development;;;meta with the correct installed status and
summary for the category.
"""
pklog.debug("Resolve() was called")
self.role = pk_enums.ROLE_RESOLVE
kwargs = {"filters": filter.split(";"), "packages": packages}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="s", out_signature="",
sender_keyword="sender")
def GetPackages(self, filter, sender):
"""This method returns all the packages without a search term.
This method typically emits Progress, Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
"""
pklog.debug("GetPackages() was called")
self.role = pk_enums.ROLE_GET_PACKAGES
kwargs = {"filters": filter.split(";")}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def GetDetails(self, package_ids, sender):
"""This method should return all the details about a specific
package_id.
This method typically emits Progress, Status and Error and Details.
:param package_ids: An array of package IDs.
"""
pklog.debug("GetDetails() was called")
self.role = pk_enums.ROLE_GET_DETAILS
kwargs = {"package_ids": package_ids}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def GetFiles(self, package_ids, sender):
"""This method should return the file list of the package_id.
This method typically emits Progress, Status and Error and Files.
:param package_ids: An array of package IDs.
"""
pklog.debug("GetFiles() was called")
self.role = pk_enums.ROLE_GET_FILES
kwargs = {"package_ids": package_ids}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sas", out_signature="",
sender_keyword="sender")
def SearchFiles(self, filter, values, sender):
"""This method searches for files on the local system and files in
available packages.
This should search for files. This should allow an application to
find out what package owns a file on the system.
This method typically emits Progress, Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param values:
A filename or fully qualified path and filename on the system.
If the search term begins with a / it will be assumed the entire
path has been given and only packages that contain this exact
path and filename will be returned. If the search term does not
start with / then it should be treated as a single filename,
which can be in any directory. The search is case sensitive,
and should not be escaped or surrounded in quotes.
"""
pklog.debug("SearchFiles() was called")
self.role = pk_enums.ROLE_SEARCH_FILE
kwargs = {"filters": filter.split(";"),
"values": values}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sas", out_signature="",
sender_keyword="sender")
def SearchDetails(self, filter, values, sender):
"""This method allows deeper searching than SearchName().
Do not refresh the package cache. This should be fast. This is very
similar to search-name. This should search as much data as possible,
including, if possible repo names, package summaries, descriptions,
licenses and URLs.
Try to emit installed before available packages first, as it allows
the client program to perform the GUI filtering and matching whilst
the daemon is running the transaction.
If the backend includes installed and available versions of the same
package when searching then the available version will have to be
filtered in the backend.
This method typically emits Progress, Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param values:
A single word search term with no wildcard chars. The search term
can contain many words separated by spaces. In this case, the
search operator is AND. For example, search of gnome power should
returns gnome-power-manager but not gnomesword or powertop.
The search should not be treated as case sensitive.
"""
pklog.debug("SearchDetails() was called")
self.role = pk_enums.ROLE_SEARCH_DETAILS
kwargs = {"filters": filter.split(";"),
"values": values}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sas", out_signature="",
sender_keyword="sender")
def SearchGroups(self, filter, values, sender):
"""This method returns packages from a given group enumerated type.
Do not refresh the package cache. This should be fast.
Try to emit installed before available packages first, as it
allows the client program to perform the GUI filtering and matching
whilst the daemon is running the transaction.
If the backend includes installed and available versions of the same
package when searching then the available version will have to be
filtered in the backend.
This method typically emits Progress, Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param values:
An enumerated group type, or unknown. The search cannot contain
spaces. The following recommendations are made below: If the values
strings are prefixed with category: then the request is treated
as a 'category search', for example: category:web-development.
Note: the old nomenclature for a 'category search' suggested using
a @ prefix for the values options. This is still supported, and
backends should continue to support category searches like
@web-development. If the values strings are prefixed with
repo: then the request is treated as a 'repository search', for
example: repo:fedora-debuginfo. In this instance all packages that
were either installed from, or can be installed from the
fedora-debuginfo source would be returned.
"""
pklog.debug("SearchGroups() was called")
self.role = pk_enums.ROLE_SEARCH_GROUP
kwargs = {"filters": filter.split(";"),
"values": values}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sas", out_signature="",
sender_keyword="sender")
def SearchNames(self, filter, values, sender):
"""This method searches the package database by package name.
Try to emit installed before available packages first, as it
allows the client program to perform the GUI filtering and matching
whilst the daemon is running the transaction.
If the backend includes installed and available versions of the same
package when searching then the available version will have to be
filtered in the backend.
The search methods should return all results in all repositories.
This may mean that multiple versions of package are returned. If this
is not what is wanted by the client program, then the newest filter
should be used.
This method typically emits Progress, Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param values:
A single word search term with no wildcard chars. The search term
can contain many words separated by spaces. In this case, the
search operator is AND. For example, search of gnome power should
returns gnome-power-manager but not gnomesword or powertop.
The search should not be treated as case sensitive.
"""
pklog.debug("SearchNames() was called")
self.role = pk_enums.ROLE_SEARCH_NAME
kwargs = {"filters": filter.split(";"),
"values": values}
return self._run_query(kwargs, sender)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="s", out_signature="",
sender_keyword="sender")
def AcceptEula(self, eula_id, sender):
"""This method allows the user to accept a end user licence agreement.
:param eula_id: A valid EULA ID
"""
self.role = pk_enums.ROLE_ACCEPT_EULA
GObject.idle_add(self._fail_not_implemented)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="bas", out_signature="",
sender_keyword="sender")
def DownloadPackages(self, store_in_cache, package_ids, sender):
"""This method downloads packages into a temporary directory.
This method should emit one Files signal for each package that
is downloaded, with the file list set as the name of the complete
downloaded file and directory, so for example:
DownloadPackages('hal;0.1.2;i386;fedora',
'hal-info;2009-09-07;no-arch;updates') should send two signals,
e.g. Files('hal;0.1.2;i386;fedora', '/tmp/hal-0.1.2.i386.rpm')
and Files('hal-info;2009-09-07;no-arch;updates',
'/tmp/hal-info-2009-09-07.noarch.rpm').
:param store_in_cache:
If the downloaded files should be stored in the system package
cache rather than copied into a newly created directory. See the
developer docs for more details on how this is supposed to work.
:param package_ids: An array of package IDs.
"""
self.role = pk_enums.ROLE_DOWNLOAD_PACKAGES
kwargs = {"store_in_cache": store_in_cache,
"package_ids": package_ids}
return self._run_query(kwargs, sender)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="u", out_signature="",
sender_keyword="sender")
def GetOldTransactions(self, number, sender):
"""This method allows a client to view details for old transactions.
:param number:
The number of past transactions, or 0 for all known transactions.
"""
self.role = pk_enums.ROLE_GET_OLD_TRANSACTIONS
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="s", out_signature="",
sender_keyword="sender")
def GetRepoList(self, filter, sender):
"""This method returns the list of repositories used in the system.
This method should emit RepoDetail.
:param filter: A correct filter, e.g. none or installed;~devel
"""
self.role = pk_enums.ROLE_GET_REPO_LIST
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def SimulateInstallFiles(self, full_paths, sender):
"""This method simulates a package file instalation emitting packages
required to be installed, removed, updated, reinstalled, downgraded,
obsoleted or untrusted. The latter is used to present the user
untrusted packages that are about to be installed.
This method typically emits Error and Package.
:param full_paths:
An array of full path and filenames to packages.
"""
self.role = pk_enums.ROLE_SIMULATE_INSTALL_FILES
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="bas", out_signature="",
sender_keyword="sender")
def InstallFiles(self, only_trusted, full_paths, sender):
"""This method installs local package files onto the local system.
The installer should always install extra dependant packages
automatically.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be downloading, updating, installing
or removing.
:param only_trusted:
If the transaction is only allowed to install trusted files.
Unsigned files should not be installed if this parameter is TRUE.
If this method is can only install trusted files, and the files
are unsigned, then the backend will send a
ErrorCode(missing-gpg-signature). On recieving this error, the
client may choose to retry with only_trusted FALSE after gaining
further authentication.
:param full_paths: An array of full path and filenames to packages.
"""
self.role = pk_enums.ROLE_INSTALL_FILES
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sss", out_signature="",
sender_keyword="sender")
def InstallSignature(self, sig_type, key_id, package_id, sender):
"""This method allows us to install new security keys.
:param sig_type: A key type, e.g. gpg
:param key_id: A key ID, e.g. BB7576AC
:param package_id:
A PackageID for the package that the user is trying to install
"""
self.role = pk_enums.ROLE_INSTALL_SIGNATURE
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sss", out_signature="",
sender_keyword="sender")
def RepoSetData(self, repo_id, parameter, value, sender):
"""This method allows arbitary data to be passed to the repository
handler.
:param repo_id:
A repository identifier, e.g. fedora-development-debuginfo
:param parameter:
The backend specific value, e.g. set-download-url.
:param value:
The backend specific value, e.g. http://foo.bar.org/baz.
"""
self.role = pk_enums.ROLE_REPO_SET_DATA
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sb", out_signature="",
sender_keyword="sender")
def RepoEnable(self, repo_id, enabled, sender):
"""This method enables the repository specified.
:param repo_id:
A repository identifier, e.g. fedora-development-debuginfo
:param enabled: true if enabled, false if disabled.
"""
self.role = pk_enums.ROLE_REPO_ENABLE
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="s", out_signature="",
sender_keyword="sender")
def Rollback(self, transaction_id, sender):
"""This method rolls back the package database to a previous transaction.
:param transaction_id: A valid transaction ID.
"""
self.role = pk_enums.ROLE_GET_CATEGORIES
GObject.idle_add(self._fail_not_implemented)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="ssas", out_signature="",
sender_keyword="sender")
def WhatProvides(self, filter, type, values, sender):
"""This method returns packages that provide the supplied attributes.
This method is useful for finding out what package(s) provide a
modalias or GStreamer codec string.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be available or installed.
:param filter:
A correct filter, e.g. none or installed;~devel
:param type:
A PkProvideType, e.g. PK_PROVIDES_ENUM_CODEC.
:param values:
The data to send to the backend to get the packages. Note: This
is backend specific.
"""
self.role = pk_enums.ROLE_WHAT_PROVIDES
kwargs = {"filters": filter.split(";"),
"type": type,
"values": values}
return self._run_query(kwargs, sender)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="", out_signature="",
sender_keyword="sender")
def GetCategories(self, sender):
"""This method return the collection categories"""
self.role = pk_enums.ROLE_GET_CATEGORIES
GObject.idle_add(self._fail_not_implemented)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sasb", out_signature="",
sender_keyword="sender")
def GetRequires(self, filter, package_ids, recursive, sender):
"""This method returns packages that depend on this package. This is
useful to know, as if package_id is being removed, we can warn the
user what else would be removed.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param package_ids: An array of package IDs.
:param recursive:
Either true or false. If yes then the requirements should be
returned for all packages returned. This means if
gnome-power-manager depends on NetworkManager and NetworkManager
depends on HAL, then GetRequires on HAL should return both
gnome-power-manager and NetworkManager.
"""
self.role = pk_enums.ROLE_GET_REQUIRES
kwargs = {"filters": filter.split(";"),
"package_ids": package_ids,
"recursive": recursive}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sasb", out_signature="",
sender_keyword="sender")
def GetDepends(self, filter, package_ids, recursive, sender):
"""This method returns packages that this package depends on.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param package_ids: An array of package IDs.
:param recursive:
Either true or false. If yes then the requirements should be
returned for all packages returned. This means if
gnome-power-manager depends on NetworkManager and NetworkManager
depends on HAL, then GetDepends on gnome-power-manager should
return both HAL and NetworkManager.
"""
self.role = pk_enums.ROLE_GET_DEPENDS
kwargs = {"filters": filter.split(";"),
"package_ids": package_ids,
"recursive": recursive}
return self._run_query(kwargs, sender)
# HELPERS
def _fail_not_implemented(self):
self.ErrorCode(pk_enums.ERROR_NOT_SUPPORTED, "")
self.exit = pk_enums.EXIT_FAILED
return False
def _get_properties(self, iface):
"""Helper to get the properties of a D-Bus interface."""
if iface == PACKAGEKIT_TRANS_DBUS_INTERFACE:
return {"Role": dbus.String(self.role),
"Status": dbus.String(self.status),
"LastPackage": dbus.String(self.last_package),
"Uid": dbus.UInt32(self.uid),
"Percentage": dbus.UInt32(self.percentage),
"Subpercentage": dbus.UInt32(self.subpercentage),
"AllowCancel": dbus.Boolean(self.allow_cancel),
"CallerActive": dbus.Boolean(self.caller_active),
"ElapsedTime": dbus.UInt32(self.elapsed_time),
"RemainingTime": dbus.UInt32(self.remaining_time),
"Speed": dbus.UInt32(self.speed)
}
else:
return {}
@inline_callbacks
def _run_query(self, kwargs, sender):
self.trans = self._get_merged_trans(aptd_enums.ROLE_PK_QUERY,
kwargs=kwargs)
yield self.trans._run(sender)
def _get_merged_trans(self, role, pkg_ids=None, pkg_type=None, kwargs=None):
if pkg_ids:
packages = [[], [], [], [], [], []]
packages[pkg_type] = [get_aptd_package_id(pkg) for pkg in pkg_ids]
else:
packages = None
if self.trans:
raise Exception("%s: Transaction may only run once." % \
pk_enums.ERROR_TRANSACTION_FAILED)
trans = MergedTransaction(self, role, self.queue,
packages=packages, kwargs=kwargs)
try:
trans._set_locale(self.hints["locale"])
except (KeyError, ValueError):
# If the locale isn't vaild or supported a ValueError will be raised
pass
try:
trans._set_debconf(self.hints["frontend-socket"])
except KeyError:
pass
self.queue.limbo[trans.tid] = trans
return trans
class PackageKitWorker(aptdaemon.worker.AptWorker):
_plugins = None
"""Process PackageKit Query transactions."""
def query(self, trans):
"""Run the worker"""
if trans.role != aptd_enums.ROLE_PK_QUERY:
raise TransactionFailed(aptd_enums.ERROR_UNKNOWN,
"The transaction doesn't seem to be "
"a query")
if trans.pktrans.role == pk_enums.ROLE_RESOLVE:
self.resolve(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_UPDATES:
self.get_updates(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_UPDATE_DETAIL:
self.get_update_detail(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_PACKAGES:
self.get_packages(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_FILES:
self.get_files(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_SEARCH_NAME:
self.search_names(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_SEARCH_GROUP:
self.search_groups(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_SEARCH_DETAILS:
self.search_details(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_SEARCH_FILE:
self.search_files(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_DEPENDS:
self.get_depends(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_REQUIRES:
self.get_requires(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_DETAILS:
self.get_details(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_DOWNLOAD_PACKAGES:
self.download_packages(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_WHAT_PROVIDES:
self.what_provides(trans, **trans.kwargs)
else:
raise TransactionFailed(aptd_enums.ERROR_UNKNOWN,
"Role %s isn't supported",
trans.pktrans.role)
def search_files(self, trans, filters, values):
"""Implement org.freedesktop.PackageKit.Transaction.SearchFiles()
Works only for installed file if apt-file isn't installed.
"""
trans.progress = 101
result_names = set()
# Optionally make use of apt-file's Contents cache to search for not
# installed files. But still search for installed files additionally
# to make sure that we provide up-to-date results
if os.path.exists("/usr/bin/apt-file") and \
pk_enums.FILTER_INSTALLED not in filters:
#FIXME: Make use of rapt-file on Debian if the network is available
#FIXME: Show a warning to the user if the apt-file cache is several
# weeks old
pklog.debug("Using apt-file")
filenames_regex = []
for filename in values:
if filename.startswith("/"):
pattern = "^%s$" % filename[1:].replace("/", "\/")
else:
pattern = "\/%s$" % filename
filenames_regex.append(pattern)
cmd = ["/usr/bin/apt-file", "--regexp", "--non-interactive",
"--package-only", "find", "|".join(filenames_regex)]
pklog.debug("Calling: %s" % cmd)
apt_file = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = apt_file.communicate()
if apt_file.returncode == 0:
#FIXME: Actually we should check if the file is part of the
# candidate, e.g. if unstable and experimental are
# enabled and a file would only be part of the
# experimental version
result_names.update(stdout.split())
self._emit_visible_packages_by_name(trans, filters,
result_names)
else:
raise TransactionFailed(ERROR_INTERNAL_ERROR,
"%s %s" % (stdout, stderr))
# Search for installed files
filenames_regex = []
for filename in values:
if filename.startswith("/"):
pattern = "^%s$" % filename.replace("/", "\/")
else:
pattern = ".*\/%s$" % filename
filenames_regex.append(pattern)
files_pattern = re.compile("|".join(filenames_regex))
for pkg in self._iterate_packages():
if pkg.name in result_names:
continue
for installed_file in self._get_installed_files(pkg):
if files_pattern.match(installed_file):
self._emit_visible_package(trans, filters, pkg)
break
def search_groups(self, trans, filters, values):
"""Implement org.freedesktop.PackageKit.Transaction.SearchGroups()"""
#FIXME: Handle repo and category search
trans.progress = 101
for pkg in self._iterate_packages():
if self._get_package_group(pkg) in values:
self._emit_visible_package(trans, filters, pkg)
def search_names(self, trans, filters, values):
"""Implement org.freedesktop.PackageKit.Transaction.SearchNames()"""
def matches(searches, text):
for search in searches:
if not search in text:
return False
return True
pklog.info("Searching for package name: %s" % values)
trans.progress = 101
for pkg_name in self._cache.keys():
if matches(values, pkg_name):
self._emit_all_visible_pkg_versions(trans, filters,
self._cache[pkg_name])
def search_details(self, trans, filters, values):
"""Implement org.freedesktop.PackageKit.Transaction.SearchDetails()"""
trans.progress = 101
results = []
if XAPIAN_SUPPORT == True:
search_flags = (xapian.QueryParser.FLAG_BOOLEAN |
xapian.QueryParser.FLAG_PHRASE |
xapian.QueryParser.FLAG_LOVEHATE |
xapian.QueryParser.FLAG_BOOLEAN_ANY_CASE)
pklog.debug("Performing xapian db based search")
db = xapian.Database(XAPIAN_DB)
parser = xapian.QueryParser()
parser.set_default_op(xapian.Query.OP_AND)
query = parser.parse_query(u" ".join(values), search_flags)
enquire = xapian.Enquire(db)
enquire.set_query(query)
matches = enquire.get_mset(0, 1000)
for pkg_name in (match.document.get_data()
for match in enquire.get_mset(0,1000)):
if pkg_name in self._cache:
self._emit_visible_package(trans, filters,
self._cache[pkg_name])
else:
def matches(searches, text):
for search in searches:
if not search in text:
return False
return True
pklog.debug("Performing apt cache based search")
values = [val.lower() for val in values]
for pkg in self._iterate_packages():
txt = pkg.name
try:
txt += pkg.candidate.raw_description.lower()
txt += pkg.candidate._translated_records.long_desc.lower()
except AttributeError:
pass
if matches(values, txt):
self._emit_visible_package(trans, filters, pkg)
def get_updates(self, trans, filters):
"""Only report updates which can be installed safely: Which can depend
on the installation of additional packages but which don't require
the removal of already installed packages or block any other update.
"""
def succeeds_security_update(pkg):
"""
Return True if an update succeeds a previous security update
An example would be a package with version 1.1 in the security
archive and 1.1.1 in the archive of proposed updates or the
same version in both archives.
"""
for version in pkg.versions:
# Only check versions between the installed and the candidate
if (pkg.installed and
apt_pkg.version_compare(version.version,
pkg.installed.version) <= 0 and
apt_pkg.version_compare(version.version,
pkg.candidate.version) > 0):
continue
for origin in version.origins:
if origin.origin in ["Debian", "Ubuntu"] and \
(origin.archive.endswith("-security") or \
origin.label == "Debian-Security") and \
origin.trusted:
return True
return False
#FIXME: Implment the basename filter
pklog.info("Get updates()")
self.cancellable = False
self.progress = 101
# Start with a safe upgrade
self._cache.upgrade(dist_upgrade=True)
for pkg in self._iterate_packages():
if not pkg.is_upgradable:
continue
# This may occur on pinned packages which have been updated to
# later version than the pinned one
if not pkg.candidate.origins:
continue
if not pkg.marked_upgrade:
#FIXME: Would be nice to all show why
self._emit_package(trans, pkg, pk_enums.INFO_BLOCKED,
force_candidate=True)
continue
# The update can be safely installed
info = pk_enums.INFO_NORMAL
# Detect the nature of the upgrade (e.g. security, enhancement)
candidate_origin = pkg.candidate.origins[0]
archive = candidate_origin.archive
origin = candidate_origin.origin
trusted = candidate_origin.trusted
label = candidate_origin.label
if origin in ["Debian", "Ubuntu"] and trusted == True:
if archive.endswith("-security") or label == "Debian-Security":
info = pk_enums.INFO_SECURITY
elif succeeds_security_update(pkg):
pklog.debug("Update of %s succeeds a security update. "
"Raising its priority." % pkg.name)
info = pk_enums.INFO_SECURITY
elif archive.endswith("-backports"):
info = pk_enums.INFO_ENHANCEMENT
elif archive.endswith("-updates"):
info = pk_enums.INFO_BUGFIX
if origin in ["Backports.org archive"] and trusted == True:
info = pk_enums.INFO_ENHANCEMENT
self._emit_package(trans, pkg, info, force_candidate=True)
self._emit_require_restart(trans)
def _emit_require_restart(self, trans):
"""Emit RequireRestart if required."""
# Check for a system restart
if self.is_reboot_required():
trans.pktrans.RequireRestart(pk_enums.RESTART_SYSTEM, "")
def get_update_detail(self, trans, package_ids):
"""
Implement the {backend}-get-update-details functionality
"""
def get_bug_urls(changelog):
"""
Create a list of urls pointing to closed bugs in the changelog
"""
urls = []
for r in re.findall(MATCH_BUG_CLOSES_DEBIAN, changelog,
re.IGNORECASE | re.MULTILINE):
urls.extend([HREF_BUG_DEBIAN % bug for bug in \
re.findall(MATCH_BUG_NUMBERS, r)])
for r in re.findall(MATCH_BUG_CLOSES_UBUNTU, changelog,
re.IGNORECASE | re.MULTILINE):
urls.extend([HREF_BUG_UBUNTU % bug for bug in \
re.findall(MATCH_BUG_NUMBERS, r)])
return urls
def get_cve_urls(changelog):
"""
Create a list of urls pointing to cves referred in the changelog
"""
return map(lambda c: HREF_CVE % c,
re.findall(MATCH_CVE, changelog, re.MULTILINE))
pklog.info("Get update details of %s" % package_ids)
trans.progress = 0
trans.cancellable = False
trans.pktrans.status = pk_enums.STATUS_DOWNLOAD_CHANGELOG
total = len(package_ids)
count = 1
old_locale = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, "C")
for pkg_id in package_ids:
self._iterate_mainloop()
trans.progress = count * 100 / total
count += 1
pkg = self._get_package_by_id(pkg_id)
# FIXME add some real data
if pkg.installed.origins:
installed_origin = pkg.installed.origins[0].label
else:
installed_origin = ""
updates = "%s;%s;%s;%s" % (pkg.name, pkg.installed.version,
pkg.installed.architecture,
installed_origin)
obsoletes = ""
vendor_url = ""
restart = "none"
update_text = u""
state = ""
issued = ""
updated = ""
#FIXME: make this more configurable. E.g. a dbus update requires
# a reboot on Ubuntu but not on Debian
if pkg.name.startswith("linux-image-") or \
pkg.name in ["libc6", "dbus"]:
restart == pk_enums.RESTART_SYSTEM
changelog_dir = apt_pkg.config.find_dir("Dir::Cache::Changelogs")
if changelog_dir == "/":
changelog_dir = os.path.join(apt_pkg.config.find_dir("Dir::"
"Cache"),
"Changelogs")
filename = os.path.join(changelog_dir,
"%s_%s.gz" % (pkg.name,
pkg.candidate.version))
changelog_raw = ""
if os.path.exists(filename):
pklog.debug("Reading changelog from cache")
changelog_file = gzip.open(filename, "rb")
try:
changelog_raw = changelog_file.read().decode("UTF-8")
except:
pass
finally:
changelog_file.close()
if not changelog_raw:
pklog.debug("Downloading changelog")
changelog_raw = pkg.get_changelog()
# The internal download error string of python-apt ist not
# provided as unicode object
if not isinstance(changelog_raw, unicode):
changelog_raw = changelog_raw.decode("UTF-8")
# Cache the fetched changelog
if not os.path.exists(changelog_dir):
os.makedirs(changelog_dir)
# Remove old cached changelogs
pattern = os.path.join(changelog_dir, "%s_*" % pkg.name)
for old_changelog in glob.glob(pattern):
os.remove(os.path.join(changelog_dir, old_changelog))
changelog_file = gzip.open(filename, mode="wb")
try:
changelog_file.write(changelog_raw.encode("UTF-8"))
finally:
changelog_file.close()
# Convert the changelog to markdown syntax
changelog = u""
for line in changelog_raw.split("\n"):
if line == "":
changelog += " \n"
else:
changelog += u" %s \n" % line
if line.startswith(pkg.candidate.source_name):
match = re.match(r"(?P<source>.+) \((?P<version>.*)\) "
"(?P<dist>.+); urgency=(?P<urgency>.+)",
line)
update_text += u"%s\n%s\n\n" % (match.group("version"),
"=" * \
len(match.group("version")))
elif line.startswith(" "):
update_text += u" %s \n" % line
elif line.startswith(" --"):
#FIXME: Add %z for the time zone - requires Python 2.6
update_text += u" \n"
match = re.match("^ -- (?P<maintainer>.+) (?P<mail><.+>) "
"(?P<date>.+) (?P<offset>[-\+][0-9]+)$",
line)
if not match:
continue
try:
date = datetime.datetime.strptime(match.group("date"),
"%a, %d %b %Y "
"%H:%M:%S")
except ValueError:
continue
issued = date.isoformat()
if not updated:
updated = date.isoformat()
if issued == updated:
updated = ""
bugzilla_url = ";;".join(get_bug_urls(changelog))
cve_url = ";;".join(get_cve_urls(changelog))
trans.emit_update_detail(pkg_id, updates, obsoletes, vendor_url,
bugzilla_url, cve_url, restart,
update_text, changelog,
state, issued, updated)
locale.setlocale(locale.LC_TIME, old_locale)
def get_details(self, trans, package_ids):
"""Implement org.freedesktop.PackageKit.Transaction.GetDetails()"""
trans.progress = 101
for pkg_id in package_ids:
version = self._get_version_by_id(pkg_id)
#FIXME: We need more fine grained license information!
origins = version.origins
if (origins and
origins[0].component in ["main", "universe"] and
origins[0].origin in ["Debian", "Ubuntu"]):
license = "free"
else:
license = "unknown"
group = self._get_package_group(version.package)
trans.emit_details(pkg_id, license, group, version.description,
version.homepage, version.size)
def get_packages(self, trans, filters):
"""Implement org.freedesktop.PackageKit.Transaction.GetPackages()"""
pklog.info("Get all packages")
self.progress = 101
for pkg in self._iterate_packages():
if self._is_package_visible(pkg, filters):
self._emit_package(trans, pkg)
def resolve(self, trans, filters, packages):
"""Implement org.freedesktop.PackageKit.Transaction.Resolve()"""
pklog.info("Resolve()")
trans.status = aptd_enums.STATUS_QUERY
trans.progress = 101
self.cancellable = False
for name_raw in packages:
#FIXME: Python-apt doesn't allow unicode as key. See #542965
name = str(name_raw)
try:
# Check if the name is a valid package id
version = self._get_version_by_id(name)
except ValueError:
pass
else:
if self._package_is_visible(version.package, filters):
self._emit_pkg_version(trans, version)
continue
# The name seems to be a normal name
try:
self._emit_visible_package(trans, filters, self._cache[name])
except KeyError:
raise TransactionFailed(aptd_enums.ERROR_NO_PACKAGE,
"Package name %s could not be "
"resolved.", name)
def get_depends(self, trans, filters, package_ids, recursive):
"""Emit all dependencies of the given package ids.
Doesn't support recursive dependency resolution.
"""
def emit_blocked_dependency(base_dependency, pkg=None,
filters=""):
"""Send a blocked package signal for the given
apt.package.BaseDependency.
"""
if FILTER_INSTALLED in filters:
return
if pkg:
summary = pkg.candidate.summary
try:
filters.remove(FILTER_NOT_INSTALLED)
except ValueError:
pass
if not self._is_package_visible(pkg, filters):
return
else:
summary = u""
if base_dependency.relation:
version = "%s%s" % (base_dependency.relation,
base_dependency.version)
else:
version = base_dependency.version
trans.emit_package("%s;%s;;" % (base_dependency.name, version),
pk_enums.INFO_BLOCKED, summary)
def check_dependency(pkg, base_dep):
"""Check if the given apt.package.Package can satisfy the
BaseDepenendcy and emit the corresponding package signals.
"""
if not self._is_package_visible(pkg, filters):
return
if base_dep.version:
satisfied = False
# Sort the version list to check the installed
# and candidate before the other ones
ver_list = list(pkg.versions)
if pkg.installed:
ver_list.remove(pkg.installed)
ver_list.insert(0, pkg.installed)
if pkg.candidate:
ver_list.remove(pkg.candidate)
ver_list.insert(0, pkg.candidate)
for dep_ver in ver_list:
if apt_pkg.check_dep(dep_ver.version,
base_dep.relation,
base_dep.version):
self._emit_pkg_version(trans, dep_ver)
satisfied = True
break
if not satisfied:
emit_blocked_dependency(base_dep, pkg, filters)
else:
self._emit_package(trans, pkg)
# Setup the transaction
pklog.info("Get depends (%s,%s,%s)" % (filter, package_ids, recursive))
self.status = aptd_enums.STATUS_RESOLVING_DEP
trans.progress = 101
self.cancellable = True
dependency_types = ["PreDepends", "Depends"]
if apt_pkg.config["APT::Install-Recommends"]:
dependency_types.append("Recommends")
for id in package_ids:
version = self._get_version_by_id(id)
for dependency in version.get_dependencies(*dependency_types):
# Walk through all or_dependencies
for base_dep in dependency.or_dependencies:
if self._cache.is_virtual_package(base_dep.name):
# Check each proivider of a virtual package
for provider in \
self._cache.get_providing_packages(base_dep.name):
check_dependency(provider, base_dep)
elif base_dep.name in self._cache:
check_dependency(self._cache[base_dep.name], base_dep)
else:
# The dependency does not exist
emit_blocked_dependency(trans, base_dep, filters=filters)
def get_requires(self, trans, filters, package_ids, recursive):
"""Emit all packages which depend on the given ids.
Recursive searching is not supported.
"""
pklog.info("Get requires (%s,%s,%s)" % (filter, package_ids, recursive))
self.status = aptd_enums.STATUS_RESOLVING_DEP
self.progress = 101
self.cancellable = True
for id in package_ids:
version = self._get_version_by_id(id)
for pkg in self._iterate_packages():
if not self._is_package_visible(pkg, filters):
continue
if pkg.is_installed:
pkg_ver = pkg.installed
elif pkg.candidate:
pkg_ver = pkg.candidate
for dependency in pkg_ver.dependencies:
satisfied = False
for base_dep in dependency.or_dependencies:
if version.package.name == base_dep.name or \
base_dep.name in version.provides:
satisfied = True
break
if satisfied:
self._emit_package(trans, pkg)
break
def download_packages(self, trans, store_in_cache, package_ids):
"""Implement the DownloadPackages functionality.
The store_in_cache parameter gets ignored.
"""
def get_download_details(ids):
"""Calculate the start and end point of a package download
progress.
"""
total = 0
downloaded = 0
versions = []
# Check if all ids are vaild and calculate the total download size
for id in ids:
pkg_ver = self._get_version_by_id(id)
if not pkg_ver.downloadable:
raise TransactionFailed(aptd_enums.ERROR_PACKAGE_DOWNLOAD_FAILED,
"package %s isn't downloadable" % id)
total += pkg_ver.size
versions.append((id, pkg_ver))
for id, ver in versions:
start = downloaded * 100 / total
end = start + ver.size * 100 / total
yield id, ver, start, end
downloaded += ver.size
pklog.info("Downloading packages: %s" % package_ids)
trans.status = aptd_enums.STATUS_DOWNLOADING
trans.cancellable = True
trans.progress = 10
# Check the destination directory
if store_in_cache:
dest = apt_pkg.config.find_dir("Dir::Cache::archives")
else:
dest = tempfile.mkdtemp(prefix="aptdaemon-download")
if not os.path.isdir(dest) or not os.access(dest, os.W_OK):
raise TransactionFailed(aptd_enums.ERROR_INTERNAL_ERROR,
"The directory '%s' is not writable" % dest)
# Start the download
for id, ver, start, end in get_download_details(package_ids):
progress = DaemonAcquireProgress(trans, start, end)
self._emit_pkg_version(trans, ver, pk_enums.INFO_DOWNLOADING)
try:
ver.fetch_binary(dest, progress)
except Exception as error:
raise TransactionFailed(aptd_enums.ERROR_PACKAGE_DOWNLOAD_FAILED,
str(error))
else:
trans.emit_files(id,
os.path.join(dest,
os.path.basename(ver.filename)))
self._emit_pkg_version(trans, ver, pk_enums.INFO_FINISHED)
def get_files(self, trans, package_ids):
"""Emit the Files signal which includes the files included in a package
Apt only supports this for installed packages
"""
for id in package_ids:
pkg = self._get_package_by_id(id)
files = ";".join(self._get_installed_files(pkg))
trans.emit_files(id, files)
def what_provides(self, trans, filters, type, values):
"""Emit all dependencies of the given package ids.
Doesn't support recursive dependency resolution.
"""
self._init_plugins()
supported_type = False
# run plugins
for plugin in self._plugins.get("what_provides", []):
pklog.debug("calling what_provides plugin %s %s" % (str(plugin), str(filters)))
for search_item in values:
try:
for package in plugin(self._cache, type, search_item):
self._emit_visible_package(trans, filters, package)
supported_type = True
except NotImplementedError:
pass # keep supported_type as False
if not supported_type and type != pk_enums.PROVIDES_ANY:
# none of the plugins felt responsible for this type
raise TransactionFailed(aptd_enums.ERROR_NOT_SUPPORTED,
"Query type '%s' is not supported" % type)
# Helpers
def _get_id_from_version(self, version):
"""Return the package id of an apt.package.Version instance."""
if version.origins:
origin = version.origins[0].label
else:
origin = ""
if version.architecture == apt_pkg.config.find("APT::Architecture") or \
version.architecture == "all":
name = version.package.name
else:
name = version.package.name.split(":")[0]
id = "%s;%s;%s;%s" % (name, version.version,
version.architecture, origin)
return id
def _emit_package(self, trans, pkg, info=None, force_candidate=False):
"""
Send the Package signal for a given apt package
"""
if (not pkg.is_installed or force_candidate) and pkg.candidate:
self._emit_pkg_version(trans, pkg.candidate, info)
elif pkg.is_installed:
self._emit_pkg_version(trans, pkg.installed, info)
else:
pklog.debug("Package %s hasn't got any version." % pkg.name)
def _emit_pkg_version(self, trans, version, info=None):
"""Emit the Package signal of the given apt.package.Version."""
id = self._get_id_from_version(version)
section = version.section.split("/")[-1]
if not info:
if version == version.package.installed:
if section == "metapackages":
info = pk_enums.INFO_COLLECTION_INSTALLED
else:
info = pk_enums.INFO_INSTALLED
else:
if section == "metapackages":
info = pk_enums.INFO_COLLECTION_AVAILABLE
else:
info = pk_enums.INFO_AVAILABLE
trans.emit_package(info, id, version.summary)
def _emit_all_visible_pkg_versions(self, trans, filters, pkg):
"""Emit all available versions of a package."""
if self._is_package_visible(pkg, filters):
if pk_enums.FILTER_NEWEST in filters:
if pkg.candidate:
self._emit_pkg_version(trans, pkg.candidate)
elif pkg.installed:
self._emit_pkg_version(trans, pkg.installed)
else:
for version in pkg.versions:
self._emit_pkg_version(trans, version)
def _emit_visible_package(self, trans, filters, pkg, info=None):
"""
Filter and emit a package
"""
if self._is_package_visible(pkg, filters):
self._emit_package(trans, pkg, info)
def _emit_visible_packages(self, trans, filters, pkgs, info=None):
"""
Filter and emit packages
"""
for pkg in pkgs:
if self._is_package_visible(pkg, filters):
self._emit_package(trans, pkg, info)
def _emit_visible_packages_by_name(self, trans, filters, pkgs, info=None):
"""
Find the packages with the given namens. Afterwards filter and emit
them
"""
for name_raw in pkgs:
#FIXME: Python-apt doesn't allow unicode as key. See #542965
name = str(name_raw)
if self._cache.has_key(name) and \
self._is_package_visible(self._cache[name], filters):
self._emit_package(trans, self._cache[name], info)
def _is_package_visible(self, pkg, filters):
"""
Return True if the package should be shown in the user interface
"""
if filters == [pk_enums.FILTER_NONE]:
return True
for filter in filters:
if (filter == pk_enums.FILTER_INSTALLED and not pkg.is_installed) or \
(filter == pk_enums.FILTER_NOT_INSTALLED and pkg.is_installed) or \
(filter == pk_enums.FILTER_SUPPORTED and not \
self._is_package_supported(pkg)) or \
(filter == pk_enums.FILTER_NOT_SUPPORTED and \
self._is_package_supported(pkg)) or \
(filter == pk_enums.FILTER_FREE and not self._is_package_free(pkg)) or \
(filter == pk_enums.FILTER_NOT_FREE and \
not self._is_package_not_free(pkg)) or \
(filter == pk_enums.FILTER_GUI and not self._has_package_gui(pkg)) or \
(filter == pk_enums.FILTER_NOT_GUI and self._has_package_gui(pkg)) or \
(filter == pk_enums.FILTER_COLLECTIONS and not \
self._is_package_collection(pkg)) or \
(filter == pk_enums.FILTER_NOT_COLLECTIONS and \
self._is_package_collection(pkg)) or\
(filter == pk_enums.FILTER_DEVELOPMENT and not \
self._is_package_devel(pkg)) or \
(filter == pk_enums.FILTER_NOT_DEVELOPMENT and \
self._is_package_devel(pkg)):
return False
return True
def _is_package_not_free(self, pkg):
"""
Return True if we can be sure that the package's license isn't any
free one
"""
if not pkg.candidate:
return False
origins = pkg.candidate.origins
return (origins and
((origins[0].origin == "Ubuntu" and
candidate[0].component in ["multiverse", "restricted"]) or
(origins[0].origin == "Debian" and
origins[0].component in ["contrib", "non-free"])) and
origins[0].trusted)
def _is_package_collection(self, pkg):
"""
Return True if the package is a metapackge
"""
section = pkg.section.split("/")[-1]
return section == "metapackages"
def _is_package_free(self, pkg):
"""
Return True if we can be sure that the package has got a free license
"""
if not pkg.candidate:
return False
origins = pkg.candidate.origins
return (origins and
((origins[0].origin == "Ubuntu" and
candidate[0].component in ["main", "universe"]) or
(origins[0].origin == "Debian" and
origins[0].component == "main")) and
origins[0].trusted)
def _has_package_gui(self, pkg):
#FIXME: should go to a modified Package class
#FIXME: take application data into account. perhaps checking for
# property in the xapian database
return pkg.section.split('/')[-1].lower() in ['x11', 'gnome', 'kde']
def _is_package_devel(self, pkg):
#FIXME: should go to a modified Package class
return pkg.name.endswith("-dev") or pkg.name.endswith("-dbg") or \
pkg.section.split('/')[-1].lower() in ['devel', 'libdevel']
def _is_package_supported(self, pkg):
if not pkg.candidate:
return False
origins = pkg.candidate.origins
return (origins and
((origins[0].origin == "Ubuntu" and
candidate[0].component in ["main", "restricted"]) or
(origins[0].origin == "Debian" and
origins[0].component == "main")) and
origins[0].trusted)
def _get_package_by_id(self, id):
"""Return the apt.package.Package corresponding to the given
package id.
If the package isn't available error out.
"""
version = self._get_version_by_id(id)
return version.package
def _get_version_by_id(self, id):
"""Return the apt.package.Version corresponding to the given
package id.
If the version isn't available error out.
"""
name, version_string, arch, data = id.split(";", 4)
if arch and arch != apt_pkg.config.find("APT::Architecture") and \
arch != "all":
name += ":%s" % arch
try:
pkg = self._cache[name]
except KeyError:
raise TransactionFailed(aptd_enums.ERROR_NO_PACKAGE,
"There isn't any package named %s",
name)
#FIXME:This requires a not yet released fix in python-apt
try:
version = pkg.versions[version_string]
except:
raise TransactionFailed(aptd_enums.ERROR_NO_PACKAGE,
"Verion %s doesn't exist",
version_string)
if version.architecture != arch:
raise TransactionFailed(aptd_enums.ERROR_NO_PACKAGE,
"Version %s of %s isn't available "
"for architecture %s",
pkg.name, version.version, arch)
return version
def _get_installed_files(self, pkg):
"""
Return the list of unicode names of the files which have
been installed by the package
This method should be obsolete by the apt.package.Package.installedFiles
attribute as soon as the consolidate branch of python-apt gets merged
"""
path = os.path.join(apt_pkg.config["Dir"],
"var/lib/dpkg/info/%s.list" % pkg.name)
try:
list = open(path)
files = list.read().decode().split("\n")
list.close()
except:
return []
return files
def _get_package_group(self, pkg):
"""
Return the packagekit group corresponding to the package's section
"""
section = pkg.section.split("/")[-1]
if SECTION_GROUP_MAP.has_key(section):
return SECTION_GROUP_MAP[section]
else:
pklog.debug("Unkown package section %s of %s" % (pkg.section,
pkg.name))
return pk_enums.GROUP_UNKNOWN
def _init_plugins(self):
"""Initialize PackageKit apt backend plugins.
Do nothing if plugins are already initialized.
"""
if self._plugins is not None:
return
if not pkg_resources:
return
self._plugins = {} # plugin_name -> [plugin_fn1, ...]
# just look in standard Python paths for now
dists, errors = pkg_resources.working_set.find_plugins(pkg_resources.Environment())
for dist in dists:
pkg_resources.working_set.add(dist)
for plugin_name in ["what_provides"]:
for entry_point in pkg_resources.iter_entry_points(
"packagekit.apt.plugins", plugin_name):
try:
plugin = entry_point.load()
except Exception as e:
pklog.warning("Failed to load %s from plugin %s: %s" % (
plugin_name, str(entry_point.dist), str(e)))
continue
pklog.debug("Loaded %s from plugin %s" % (
plugin_name, str(entry_point.dist)))
self._plugins.setdefault(plugin_name, []).append(plugin)
def _apply_changes(self, trans, fetch_range=(15, 50),
install_range=(50, 90)):
"""Apply changes and emit RequireRestart accordingly."""
aptdaemon.worker.AptWorker._apply_changes(self, trans,
fetch_range,
install_range)
if (hasattr(trans, "pktrans") and
(trans.role == aptd_enums.ROLE_UPGRADE_SYSTEM or
trans.packages[aptd_enums.PKGS_UPGRADE] or
trans.depends[aptd_enums.PKGS_UPGRADE])):
self._emit_require_restart(trans)
if META_RELEASE_SUPPORT:
class GMetaRelease(GObject.GObject, MetaReleaseCore):
__gsignals__ = {"download-done": (GObject.SignalFlags.RUN_FIRST,
None,
())}
def __init__(self):
GObject.GObject.__init__(self)
MetaReleaseCore.__init__(self, False, False)
def download(self):
MetaReleaseCore.download(self)
self.emit("download-done")
def get_pk_exit_enum(enum):
try:
return MAP_EXIT_ENUM[enum]
except KeyError:
return pk_enums.EXIT_UNKNOWN
def get_pk_status_enum(enum):
try:
return MAP_STATUS_ENUM[enum]
except KeyError:
return pk_enums.STATUS_UNKNOWN
def get_pk_package_enum(enum):
try:
return MAP_PACKAGE_ENUM[enum]
except KeyError:
return pk_enums.INFO_UNKNOWN
def get_pk_error_enum(enum):
try:
return MAP_ERROR_ENUM[enum]
except KeyError:
return pk_enums.ERROR_UNKNOWN
def get_aptd_package_id(pk_id):
"""Convert a PackageKit Package ID to the apt syntax.
e.g. xterm;235;i386;installed to xterm:i386=235
"""
name, version, arch, data = pk_id.split(";")
id = name
if arch != apt_pkg.config.find("APT::Architecture") and arch != "all":
id += ":%s" % arch
if version:
id += "=%s" % version
return id
def get_pk_package_id(pk_id, data=""):
"""Convert an AptDaemon package ID to the PackageKit syntax.
e.g. xterm:i368=235; to xterm;235;i386;installed
"""
#FIXME add arch support
name, version, release = \
aptdaemon.worker.AptWorker._split_package_id(pk_id)
try:
name, arch = name.split(":", 1)
except ValueError:
arch = ""
if version is None:
version = ""
if release is None:
release = ""
return "%s;%s;%s;%s" % (name, version, arch, data or release)
def defer_idle(func, *args):
func(*args)
return False
if __name__ == '__main__':
main()
# vim: ts=4 et sts=4
| thnguyn2/ECE_527_MP | mp4/SD_card/partition1/usr/share/pyshared/aptdaemon/pkcompat.py | pkcompat.py | py | 126,545 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gi.repository.GObject.threads_init",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "gi.repository.GObject",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 42,
"usage_type": "call"
},
{
"api_na... |
70863160103 | import json
from datetime import datetime
from dataclasses import dataclass
from tabulate import tabulate
import requests
from exceptions import WrongCommandFormat
from tools import \
format_task, \
http_response_to_str, \
FORMATTED_TASK_COLUMNS
from config import URL, HELP_MSG
@dataclass
class HandlerReturn:
http_response: str
handler_response: str
def get_all_tasks(command):
if len(command) != 1:
raise WrongCommandFormat(reason='количество слов в команде не равно единице.')
url = URL + 'all/'
response = requests.get(url=url)
tasks_list = json.loads(response.json())
tasks_formatted = [format_task(task) for task in tasks_list]
return HandlerReturn(
http_response=http_response_to_str(response=response),
handler_response='\n' + tabulate(tasks_formatted, headers=FORMATTED_TASK_COLUMNS)
)
def create_new_task(command):
if len(command) != 3:
raise WrongCommandFormat(reason='количество слов в команде не равно трём.')
title = command[1]
text = command[2]
task_json = {
'title': title,
'text': text
}
url = URL + 'new/'
response = requests.post(url=url, json=task_json)
return HandlerReturn(
http_response=http_response_to_str(response=response),
handler_response=None
)
def get_task_by_id(command):
if len(command) != 2:
raise WrongCommandFormat(reason='количество слов в команде не равно двум.')
data_json = {'pk': command[1]}
url = URL + 'get/'
response = requests.get(url=url, json=data_json)
try:
task_list = json.loads(response.json())
except json.decoder.JSONDecodeError:
task_list = []
tasks_formatted = [format_task(task) for task in task_list]
return HandlerReturn(
http_response=http_response_to_str(response=response),
handler_response=('\n' + tabulate(tasks_formatted, headers=FORMATTED_TASK_COLUMNS))
if len(tasks_formatted) != 0 else '-'
)
def complete_task_by_id(command):
if len(command) != 2:
raise WrongCommandFormat(reason='количество слов в команде не равно двум.')
data_json = {
'pk': command[1],
'completion_date': datetime.now().strftime('%d-%m-%Y')
}
url = URL + 'complete/'
response = requests.post(url=url, json=data_json)
return HandlerReturn(
http_response=http_response_to_str(response=response),
handler_response=None
)
def delete_task_by_id(command):
if len(command) != 2:
raise WrongCommandFormat(reason='количество слов в команде не равно двум.')
data_json = {'pk': command[1]}
url = URL + 'delete/'
response = requests.post(url=url, json=data_json)
return HandlerReturn(
http_response=http_response_to_str(response=response),
handler_response=None
)
def help_handler(command):
if len(command) != 1:
raise WrongCommandFormat(reason='количество слов в команде не равно единице.')
return HandlerReturn(
http_response=None,
handler_response=HELP_MSG
)
| yabifurkator/appvelox_task | client/handlers.py | handlers.py | py | 3,325 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "exceptions.WrongCommandFormat",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "config.URL",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "reques... |
2152603533 | import calendar
import unittest
from datetime import date, datetime, timedelta
from codenotes import parse_args
from codenotes.util.args import date_args_empty, dates_to_search
class TestDateArgsNeededEmpty(unittest.TestCase):
def test_no_args(self):
args = parse_args(["task", "search"])
self.assertTrue(date_args_empty(args))
def test_only_date(self):
args = parse_args(["task", "search", "--today"])
self.assertFalse(date_args_empty(args))
def test_only_text(self):
args = parse_args(["task", "search", "New", "task", "added"])
self.assertFalse(date_args_empty(args))
def test_text_and_date(self):
args = parse_args(["task", "search", "New", "task", "added", "--today"])
self.assertFalse(date_args_empty(args))
class TestDateToSearch(unittest.TestCase):
def test_today(self):
search_date = datetime.now().date()
args = parse_args(["task", "search", "--today"])
self.assertEqual(dates_to_search(args), search_date)
def test_yesterday(self):
search_date = datetime.now().date() - timedelta(days=1)
args = parse_args(["task", "search", "--yesterday"])
self.assertEqual(dates_to_search(args), search_date)
def test_month(self):
now = datetime.now()
num_days = calendar.monthrange(now.year, now.month)[1]
days = [date(now.year, now.month, 1), date(now.year, now.month, num_days)]
args = parse_args(["task", "search", "--month"])
self.assertListEqual(dates_to_search(args), days)
def test_week(self):
now = datetime.now().date()
first_day = now - timedelta(days=now.weekday())
last_day = first_day + timedelta(days=6)
days = [first_day, last_day]
args = parse_args(["task", "search", "--week"])
self.assertListEqual(dates_to_search(args), days)
if __name__ == "__main__":
unittest.main()
| EGAMAGZ/codenotes | tests/util/test_args.py | test_args.py | py | 1,944 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "codenotes.parse_args",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "codenotes.util.args.date_args_empty",
"line_number": 13,
"usage_type": "call"
},
{
"ap... |
16185556027 | import string
import sqlalchemy.sql as sasql
from ..util import random_string, sha256_hash
from ..adapter.repository import UserRepo, UserFollowRepo
from .exception import UsecaseException, NotFoundException
class UserUsecase:
_mobile_verify_codes = {}
_email_verify_codes = {}
def __init__(self, config: dict, user_repo: UserRepo,
user_follow_repo: UserFollowRepo):
self.config = config
self.user_repo = user_repo
self.user_follow_repo = user_follow_repo
async def create_user(self, **data):
if (await self.info_by_username(data['username'])) is not None:
raise UsecaseException("用户名重复")
data['salt'] = random_string(64)
data['password'] = sha256_hash(data['password'], data['salt'])
return await self.user_repo.create(**data)
async def modify_user(self, id, **data):
if data.get('username') is not None and (await self.info_by_username(data['username'])) is not None:
raise UsecaseException("用户名重复")
if data.get('mobile') is not None and (await self.info_by_mobile(data['mobile'])) is not None:
raise UsecaseException("手机重复")
if data.get('email') is not None and (await self.info_by_email(data['email'])) is not None:
raise UsecaseException("邮箱重复")
if data.get('password') is not None:
user = self.info(id)
data['password'] = sha256_hash(data['password'], user['salt'])
return await self.user_repo.modify(id, **data)
async def info(self, id):
if id is None:
return None
user = await self.user_repo.info(id)
if user is None:
raise NotFoundException('用户未找到')
return user
async def info_by_username(self, username):
return await self.user_repo.info(username, 'username')
async def info_by_mobile(self, mobile):
return await self.user_repo.info(mobile, 'mobile')
async def info_by_email(self, email):
return await self.user_repo.info(email, 'email')
async def infos(self, ids):
return await self.user_repo.infos(ids)
async def list(self, *, limit=None, offset=None):
return await self.user_repo.list(limit=limit, offset=offset)
async def follow(self, follower_id, following_id):
return await self.user_follow_repo.create(
follower_id=follower_id, following_id=following_id)
async def unfollow(self, follower_id, following_id):
await self.user_follow_repo.execute(
sasql.delete(self.user_follow_repo.table).
where(sasql.and_(
self.user_follow_repo.table.c.follower_id == follower_id,
self.user_follow_repo.table.c.following_id == following_id)))
async def following(self, user_id, limit=None, offset=None):
from_ = self.user_repo.table.join(
self.user_follow_repo.table,
self.user_follow_repo.table.c.following_id == self.user_repo.table.c.id)
where = self.user_follow_repo.table.c.follower_id == user_id
order_by = self.user_follow_repo.table.c.id.desc()
return await self.user_repo.list(
from_=from_, where=where, order_by=order_by, limit=limit,
offset=offset)
async def follower(self, user_id, limit=None, offset=None):
from_ = self.user_repo.table.join(
self.user_follow_repo.table,
self.user_follow_repo.table.c.follower_id == self.user_repo.table.c.id)
where = self.user_follow_repo.table.c.following_id == user_id
order_by = self.user_follow_repo.table.c.id.desc()
return await self.user_repo.list(
from_=from_, where=where, order_by=order_by, limit=limit,
offset=offset)
async def is_following_users(self, follower_id, following_ids):
valid_ids = [v for v in following_ids if v is not None]
if valid_ids:
result = await self.user_follow_repo.execute(
self.user_follow_repo.table.select()
.where(sasql.and_(
self.user_follow_repo.table.c.follower_id == follower_id,
self.user_follow_repo.table.c.following_id.in_(following_ids))))
d = {v['following_id']: True for v in await result.fetchall()}
else:
d = {}
return [d.get(v, False) for v in following_ids]
async def send_mobile_verify_code(self, type, mobile):
key = '{}_{}'.format(type, mobile)
code = self._mobile_verify_codes.get(key)
if code is None:
code = random_string(6, string.digits)
# 模拟发送,实际应调用第三方 API 来发送验证码短信
self._mobile_verify_codes[key] = code
return code
async def check_mobile_verify_code(self, type, mobile, code):
key = '{}_{}'.format(type, mobile)
sended = self._mobile_verify_codes.get(key)
if sended is None or sended != code:
return False
del self._mobile_verify_codes[key]
return True
async def send_email_verify_code(self, type, email):
key = '{}_{}'.format(type, email)
code = self._email_verify_codes.get(key)
if code is None:
code = random_string(6, string.digits)
self._email_verify_codes[key] = code
# TODO 调用第三方 API 发送验证码邮件
return code
async def check_email_verify_code(self, type, email, code):
key = '{}_{}'.format(type, email)
sended = self._email_verify_codes.get(key)
if sended is None or sended != code:
return False
del self._email_verify_codes[key]
return True
| jaggerwang/sanic-in-practice | weiguan/usecase/user.py | user.py | py | 5,796 | python | en | code | 42 | github-code | 36 | [
{
"api_name": "adapter.repository.UserRepo",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "adapter.repository.UserFollowRepo",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "exception.UsecaseException",
"line_number": 22,
"usage_type": "call"
},
... |
43168841119 | import settings
import os
import click
import inspect
import sys
from configure import db as dbs
from apps import app
from CustomerException import ParameterError
from apps.API.models import (
Model,
Device,
DeviceService,
DeviceServiceData
)
from asyncpg import create_pool
from utils.table_util import CreateTable
###########################################
########对于非orm项目可以取消注释下############
########面两行用于全局引用数据库连 ############
########接池 ############
###########################################
# @app.listener('before_server_start')
# async def register_db(app, loop):
# conn = "postgres://{user}:{password}@{host}:{port}/{database}".format(
# user=settings.CONFIG.DB_USER, password=settings.CONFIG.DB_PASSWORD,
# host=settings.CONFIG.DB_HOST, port=settings.CONFIG.DB_PORT,
# database=settings.CONFIG.DB_DATABASE
# )
# app.settings['pool'] = await create_pool(
# dsn=conn,
# min_size=10,
# max_size=10,
# max_queries=50000,
# max_inactive_connection_lifetime=300,
# loop=loop
# )
# @app.listener('after_server_stop')
# async def close_connection(app, loop):
# pool = app.settings['pool']
# async with pool.acquire() as conn:
# await conn.close()
@app.listener('before_server_start')
async def register_db(app, loop):
pass
@click.group()
def run():
pass
@click.command()
@click.argument('db')
def init(db):
# 如何分析models下所有自建的Model,然后自动对其进行建表操作,
# 目前可以获得models下所有的class,包括import的
try:
if db == 'db':
__import__('apps.API.models')
modules = sys.modules['apps.API.models']
for name, obj in inspect.getmembers(modules, inspect.isclass):
if 'apps.API.models' in str(obj):
sys.stdout.write('.')
sys.stdout.flush()
CreateTable(obj)
sys.stdout.write('OK')
sys.stdout.flush()
else:
raise ParameterError("Parameter Error, Please use 'db'!")
except ParameterError as e:
print(e)
e = None
@click.command()
def shell():
os.system('ipython -i -m "apps.models"')
@click.command()
def runserver():
app.run(host="0.0.0.0", port=8001, workers=4)
run.add_command(init)
run.add_command(shell)
run.add_command(runserver)
if __name__ == "__main__":
# app.settings.ACCESS_LOG = False
run()
| DemonXD/template_sanic_project | manager.py | manager.py | py | 2,643 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "apps.app.listener",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "apps.app",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "click.group",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sys.modules",
"line_number... |
16248087762 | from .base import Scoring
from math import pi
import torch
__all__ = ["ComplEx"]
class ComplEx(Scoring):
"""ComplEx scoring function.
Examples
--------
>>> from ckb import models
>>> from ckb import datasets
>>> from ckb import scoring
>>> import torch
>>> _ = torch.manual_seed(42)
>>> dataset = datasets.Semanlink(1)
>>> model = models.DistillBert(
... entities = dataset.entities,
... relations = dataset.relations,
... gamma = 9,
... device = 'cpu',
... scoring = scoring.ComplEx(),
... )
>>> sample = torch.tensor([[0, 0, 0], [2, 2, 2]])
>>> model(sample)
tensor([[0.8402],
[0.4317]], grad_fn=<ViewBackward>)
>>> sample = torch.tensor([[0, 0, 1], [2, 2, 1]])
>>> model(sample)
tensor([[0.5372],
[0.1728]], grad_fn=<ViewBackward>)
>>> sample = torch.tensor([[1, 0, 0], [1, 2, 2]])
>>> model(sample)
tensor([[0.5762],
[0.3085]], grad_fn=<ViewBackward>)
>>> sample = torch.tensor([[0, 0, 0], [2, 2, 2]])
>>> negative_sample = torch.tensor([[1, 0], [1, 2]])
>>> model(sample, negative_sample, mode='head-batch')
tensor([[0.5762, 0.8402],
[0.3085, 0.4317]], grad_fn=<ViewBackward>)
>>> model(sample, negative_sample, mode='tail-batch')
tensor([[0.5372, 0.8402],
[0.1728, 0.4317]], grad_fn=<ViewBackward>)
"""
def __init__(self):
super().__init__()
def __call__(self, head, relation, tail, mode, **kwargs):
"""Compute the score of given facts (heads, relations, tails).
Parameters
----------
head: Embeddings of heads.
relation: Embeddings of relations.
tail: Embeddings of tails.
mode: head-batch or tail-batch.
"""
re_head, im_head = torch.chunk(head, 2, dim=2)
re_relation, im_relation = torch.chunk(relation, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
if mode == "head-batch":
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
score = re_head * re_score + im_head * im_score
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
score = re_score * re_tail + im_score * im_tail
return score.sum(dim=2)
| raphaelsty/ckb | ckb/scoring/complex.py | complex.py | py | 2,492 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "base.Scoring",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.chunk",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.chunk",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.chunk",
"line_number":... |
13413427634 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from matplotlib.lines import Line2D
import pandas as pd
##############################################################
# Change these lines to apply on your custom datasets
##############################################################
features = 'scraping/test_data_evaluation/r-mac_features.npy'
dataset = 'scraping/test_dataset.csv'
landmark_colors = {'holstentor': 'b', 'rathaus': 'tab:purple', 'sternschanze': 'g', 'michaelis': 'tab:olive',
'elbphilharmonie': 'tab:orange', 'random': 'tab:brown'}
class_list = ['holstentor', 'rathaus', 'sternschanze', 'michaelis', 'elbphilharmonie', 'random']
save = False
##############################################################
# End of hardcoded parameters
##############################################################
def vis_pca_features(pca_result: np.array, query_id: int = None, answer_id: np.array = None, save_name: str = None,
title: str = ''):
"""
Plot the downprojected features vectors in 2D
:param pca_result: The 2D projected version of the vectors
:param query_id: Index of image that was used as query
:param answer_id: Array of indices of retrieved images
:param save_name: Filename to save the plot
:param title: Title of the plot
:return:
"""
fig, ax = plt.subplots(1)
for l in class_list:
idxs = test_data[test_data.landmark == l].index.values # find all images of that class
ax.scatter(pca_result[idxs, 0], pca_result[idxs, 1], label=l,
color=landmark_colors[l])
ax.scatter(np.average(pca_result[idxs, 0]), np.average(pca_result[idxs, 1]), label=l, marker='*',
color=landmark_colors[l]) # plot the class average as star marker
if query_id is not None:
ax.scatter(pca_result[query_id, 0], pca_result[query_id, 1], color='r', marker='x', label='query')
if answer_id is not None:
ax.scatter(pca_result[answer_id[1:], 0], pca_result[answer_id[1:], 1], color='r', marker=2, label='answer')
# summarize legend entries of same landmark
handles, labels = plt.gca().get_legend_handles_labels()
labels, ids = np.unique(labels, return_index=True)
handles = [handles[i] for i in ids]
# insert legend dummy for average star
avg_patch = Line2D([0], [0], marker='*', color='grey', label='Class average', markersize=9, linestyle='None')
handles.insert(-1, avg_patch)
plt.legend(handles, np.insert(labels, -1, 'Class average'), loc='best')
ax.set_xticks([])
ax.set_yticks([])
plt.title(title)
if save_name:
plt.savefig(save_name + '.pdf')
plt.show()
print("Loading features..")
features = np.load(features)
test_data = pd.read_csv(dataset)
class_order = test_data.landmark.values
print("Projecting features..")
pca = PCA(n_components=2)
pca_result = pca.fit_transform(features)
savepath = 'vis_pca_features.pdf' if save else None
vis_pca_features(pca_result, save_name=savepath)
| MiriUll/multimodal_ABSA_Elbphilharmonie | pca_vis_img_features.py | pca_vis_img_features.py | py | 3,060 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "numpy.a... |
21119755777 | from typing import Counter, List
class Solution:
def mergeSimilarItems(self, items1: List[List[int]], items2: List[List[int]]) -> List[List[int]]:
map = Counter()
for a, b in items1:
map[a] += b
for a, b in items2:
map[a] += b
return sorted([a, b] for a, b in map.items())
if __name__ == '__main__':
items1 = [[1,1],[4,5],[3,8]]
items2 = [[3,1],[1,5]]
items1 = [[1,1],[3,2],[2,3]]
items2 = [[2,1],[3,2],[1,3]]
items1 = [[1,3],[2,2]]
items2 = [[7,1],[2,2],[1,4]]
rtn = Solution().mergeSimilarItems(items1, items2)
print(rtn)
| plattanus/leetcodeDAY | python/2363. 合并相似的物品.py | 2363. 合并相似的物品.py | py | 625 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.Counter",
"line_number": 6,
"usage_type": "call"
}
] |
36493183781 | """archetypal StructureInformation."""
import collections
from validator_collection import validators
from archetypal.template.constructions.base_construction import ConstructionBase
from archetypal.template.materials.opaque_material import OpaqueMaterial
class MassRatio(object):
"""Handles the properties of the mass ratio for building template structure."""
__slots__ = ("_high_load_ratio", "_material", "_normal_ratio")
def __init__(self, HighLoadRatio=None, Material=None, NormalRatio=None, **kwargs):
"""Initialize a MassRatio object with parameters.
Args:
HighLoadRatio (float):
Material (OpaqueMaterial):
NormalRatio (float):
"""
self.HighLoadRatio = HighLoadRatio
self.Material = Material
self.NormalRatio = NormalRatio
@property
def HighLoadRatio(self):
"""Get or set the high load ratio [kg/m2]."""
return self._high_load_ratio
@HighLoadRatio.setter
def HighLoadRatio(self, value):
self._high_load_ratio = validators.float(value, minimum=0)
@property
def Material(self):
"""Get or set the structure OpaqueMaterial."""
return self._material
@Material.setter
def Material(self, value):
assert isinstance(
value, OpaqueMaterial
), f"Material must be of type OpaqueMaterial, not {type(value)}"
self._material = value
@property
def NormalRatio(self):
"""Get or set the normal load ratio [kg/m2]."""
return self._normal_ratio
@NormalRatio.setter
def NormalRatio(self, value):
self._normal_ratio = validators.float(value, minimum=0)
def __hash__(self):
"""Return the hash value of self."""
return hash(self.__key__())
def __key__(self):
"""Get a tuple of attributes. Useful for hashing and comparing."""
return (
self.HighLoadRatio,
self.Material,
self.NormalRatio,
)
def __eq__(self, other):
"""Assert self is equivalent to other."""
if not isinstance(other, MassRatio):
return NotImplemented
else:
return self.__key__() == other.__key__()
def __iter__(self):
"""Iterate over attributes. Yields tuple of (keys, value)."""
for k, v in self.mapping().items():
yield k, v
def to_dict(self):
"""Return MassRatio dictionary representation."""
return collections.OrderedDict(
HighLoadRatio=self.HighLoadRatio,
Material={"$ref": str(self.Material.id)},
NormalRatio=self.NormalRatio,
)
def mapping(self):
"""Get a dict based on the object properties, useful for dict repr."""
return dict(
HighLoadRatio=self.HighLoadRatio,
Material=self.Material,
NormalRatio=self.NormalRatio,
)
def get_unique(self):
"""Return the first of all the created objects that is equivalent to self."""
return self
@classmethod
def generic(cls):
"""Create generic MassRatio object."""
mat = OpaqueMaterial(
Name="Steel General",
Conductivity=45.3,
SpecificHeat=500,
SolarAbsorptance=0.4,
ThermalEmittance=0.9,
VisibleAbsorptance=0.4,
Roughness="Rough",
Cost=0,
Density=7830,
MoistureDiffusionResistance=50,
EmbodiedCarbon=1.37,
EmbodiedEnergy=20.1,
TransportCarbon=0.067,
TransportDistance=500,
TransportEnergy=0.94,
SubstitutionRatePattern=[1],
SubstitutionTimestep=100,
DataSource="BostonTemplateLibrary.json",
)
return cls(HighLoadRatio=305, Material=mat, NormalRatio=305)
def duplicate(self):
"""Get copy of self."""
return self.__copy__()
def __copy__(self):
"""Create a copy of self."""
return self.__class__(self.HighLoadRatio, self.Material, self.NormalRatio)
class StructureInformation(ConstructionBase):
"""Building Structure settings.
.. image:: ../images/template/constructions-structure.png
"""
_CREATED_OBJECTS = []
__slots__ = ("_mass_ratios",)
def __init__(self, Name, MassRatios, **kwargs):
"""Initialize object.
Args:
MassRatios (list of MassRatio): MassRatio object.
**kwargs: keywords passed to the ConstructionBase constructor.
"""
super(StructureInformation, self).__init__(Name, **kwargs)
self.MassRatios = MassRatios
# Only at the end append self to _CREATED_OBJECTS
self._CREATED_OBJECTS.append(self)
@property
def MassRatios(self):
"""Get or set the list of MassRatios."""
return self._mass_ratios
@MassRatios.setter
def MassRatios(self, value):
assert isinstance(value, list), "mass_ratio must be of a list of MassRatio"
self._mass_ratios = value
@classmethod
def from_dict(cls, data, materials, **kwargs):
"""Create StructureInformation from a dictionary.
Args:
data (dict): A python dictionary.
materials (dict): A dictionary of python OpaqueMaterials with their id as
keys.
**kwargs: keywords passed to parent constructors.
"""
mass_ratio_ref = data.pop("MassRatios")
mass_ratios = [
MassRatio(
HighLoadRatio=massratio["HighLoadRatio"],
Material=materials[massratio["Material"]["$ref"]],
NormalRatio=massratio["NormalRatio"],
)
for massratio in mass_ratio_ref
]
_id = data.pop("$id")
return cls(MassRatios=mass_ratios, id=_id, **data, **kwargs)
def to_dict(self):
"""Return StructureInformation dictionary representation."""
self.validate() # Validate object before trying to get json format
data_dict = collections.OrderedDict()
data_dict["$id"] = str(self.id)
data_dict["MassRatios"] = [mass.to_dict() for mass in self.MassRatios]
data_dict["AssemblyCarbon"] = self.AssemblyCarbon
data_dict["AssemblyCost"] = self.AssemblyCost
data_dict["AssemblyEnergy"] = self.AssemblyEnergy
data_dict["DisassemblyCarbon"] = self.DisassemblyCarbon
data_dict["DisassemblyEnergy"] = self.DisassemblyEnergy
data_dict["Category"] = self.Category
data_dict["Comments"] = validators.string(self.Comments, allow_empty=True)
data_dict["DataSource"] = self.DataSource
data_dict["Name"] = self.Name
return data_dict
def validate(self):
"""Validate object and fill in missing values."""
return self
def mapping(self, validate=False):
"""Get a dict based on the object properties, useful for dict repr.
Args:
validate (bool): If True, try to validate object before returning the
mapping.
"""
if validate:
self.validate()
return dict(
MassRatios=self.MassRatios,
AssemblyCarbon=self.AssemblyCarbon,
AssemblyCost=self.AssemblyCost,
AssemblyEnergy=self.AssemblyEnergy,
DisassemblyCarbon=self.DisassemblyCarbon,
DisassemblyEnergy=self.DisassemblyEnergy,
Category=self.Category,
Comments=self.Comments,
DataSource=self.DataSource,
Name=self.Name,
)
def duplicate(self):
"""Get copy of self."""
return self.__copy__()
def __hash__(self):
"""Return the hash value of self."""
return hash(self.id)
def __eq__(self, other):
"""Assert self is equivalent to other."""
if not isinstance(other, StructureInformation):
return NotImplemented
else:
return all(
[
self.AssemblyCarbon == other.AssemblyCarbon,
self.AssemblyCost == other.AssemblyCost,
self.AssemblyEnergy == other.AssemblyEnergy,
self.DisassemblyCarbon == other.DisassemblyCarbon,
self.DisassemblyEnergy == other.DisassemblyEnergy,
self.MassRatios == other.MassRatios,
]
)
def __copy__(self):
"""Create a copy of self."""
return self.__class__(**self.mapping(validate=False))
@property
def children(self):
return tuple(m.Material for m in self.MassRatios)
| samuelduchesne/archetypal | archetypal/template/structure.py | structure.py | py | 8,696 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "validator_collection.validators.float",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "validator_collection.validators",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "archetypal.template.materials.opaque_material.OpaqueMaterial",
"line_nu... |
4399694937 | #!/usr/bin/env python
# coding: utf-8
from codecs import open # to use a consistent encoding
from os import path
from subprocess import check_output
from setuptools import setup, find_packages
def get_version():
cmd = "git describe"
try:
result = check_output(
cmd.split(),
).decode('utf-8').strip()
except:
result = "?"
return result
def get_long_description():
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
return long_description
setup(
name="swmclient",
version=get_version(),
description="Python bindings for swm-core user REST API",
long_description=get_long_description(),
long_description_content_type="text/markdown",
url="https://github.com/openworkload/swm-python-client",
author="Taras Shapovalov",
author_email="taras@iclouds.net",
packages=find_packages(),
license="BSD",
include_package_data=True,
install_requires=["httpx"],
python_requires=">=3.9, <4",
platforms="Linux, Mac OS X, Windows",
keywords=[
"HPC",
"High Performance Computing",
"Cloud Computing",
"Open Workload",
"Sky Port"
],
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3 :: Only",
],
project_urls={
"Bug Reports": "https://github.com/openworkload/swm-python-client/issues",
"Source": "https://github.com/openworkload/swm-python-client",
},
)
| openworkload/swm-python-client | setup.py | setup.py | py | 1,828 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "subprocess.check_output",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
... |
8735286229 | import requests
def linkCheck(linksFound):
goodLinks = []
badLinks = []
for link in linksFound:
res = requests.get(link)
if res.status_code == 200:
print(link + " <<<<<<<<<< 200")
goodLinks.append(link)
else:
badLink = res.status_code
badLinks.append(link)
print(link + " <<<<<<<<<< link broken. Status: " + str(badLink))
return goodLinks, badLinks
| zipinel/Selenium_and_BeautifulSoup | Base/linkChecker.py | linkChecker.py | py | 467 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
}
] |
2037492885 | #!/usr/bin/env python3
import argparse
import datetime
import importlib
import re
import site
import traceback
from pathlib import Path
import yaml
SECRET_FILENAME = "secrets.yaml"
SECRET_REGEX = re.compile(r"!secret\s(\w+)")
def main():
parser = argparse.ArgumentParser(description="Test sources.")
parser.add_argument(
"-s", "--source", action="append", help="Test given source file"
)
parser.add_argument(
"-l", "--list", action="store_true", help="List retrieved entries"
)
parser.add_argument(
"-i", "--icon", action="store_true", help="Show waste type icon"
)
parser.add_argument("--sorted", action="store_true", help="Sort output by date")
parser.add_argument("--weekday", action="store_true", help="Show weekday")
parser.add_argument(
"-t",
"--traceback",
action="store_true",
help="Print exception information and stack trace",
)
args = parser.parse_args()
# read secrets.yaml
secrets = {}
try:
with open(SECRET_FILENAME) as stream:
try:
secrets = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
except FileNotFoundError:
# ignore missing secrets.yaml
pass
package_dir = Path(__file__).resolve().parents[2]
source_dir = package_dir / "waste_collection_schedule" / "source"
# add module directory to path
site.addsitedir(str(package_dir))
if args.source is not None:
files = args.source
else:
files = filter(
lambda x: x != "__init__",
map(lambda x: x.stem, source_dir.glob("*.py")),
)
for f in sorted(files):
# iterate through all *.py files in waste_collection_schedule/source
print(f"Testing source {f} ...")
module = importlib.import_module(f"waste_collection_schedule.source.{f}")
# get all names within module
names = set(dir(module))
# test if all mandatory names exist
assert "TITLE" in names
assert "DESCRIPTION" in names
assert "URL" in names
assert "TEST_CASES" in names
# run through all test-cases
for name, tc in module.TEST_CASES.items():
# replace secrets in arguments
replace_secret(secrets, tc)
# create source
try:
source = module.Source(**tc)
result = source.fetch()
count = len(result)
if count > 0:
print(
f" found {bcolors.OKGREEN}{count}{bcolors.ENDC} entries for {name}"
)
else:
print(
f" found {bcolors.WARNING}0{bcolors.ENDC} entries for {name}"
)
# test if source is returning the correct date format
if (
len(
list(
filter(lambda x: type(x.date) is not datetime.date, result)
)
)
> 0
):
print(
f"{bcolors.FAIL} ERROR: source returns invalid date format (datetime.datetime instead of datetime.date?){bcolors.ENDC}"
)
if args.list:
result = (
sorted(result, key=lambda x: x.date) if args.sorted else result
)
for x in result:
icon_str = f" [{x.icon}]" if args.icon else ""
weekday_str = x.date.strftime("%a ") if args.weekday else ""
print(
f" {x.date.isoformat()} {weekday_str}: {x.type}{icon_str}"
)
except KeyboardInterrupt:
exit()
except Exception as exc:
print(f" {name} {bcolors.FAIL}failed{bcolors.ENDC}: {exc}")
if args.traceback:
print(indent(traceback.format_exc(), 4))
def replace_secret(secrets, d):
for key in d.keys():
value = d[key]
if isinstance(value, dict):
replace_secret(secrets, value)
elif isinstance(value, str):
match = SECRET_REGEX.fullmatch(value)
if match is not None:
id = match.group(1)
if id in secrets:
d[key] = secrets[id]
else:
print(f"identifier '{id}' not found in {SECRET_FILENAME}")
def indent(s, count):
indent = " " * count
return "\n".join([indent + line for line in s.split("\n")])
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
if __name__ == "__main__":
main()
| geNAZt/home-assistant | custom_components/waste_collection_schedule/waste_collection_schedule/test/test_sources.py | test_sources.py | py | 5,022 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "yaml.YAMLError",
... |
26852296512 | from draw_rectangle import print_bbox
def get_tokens(txt:str):
if txt == "nan" or len(txt.strip()) == 0:
return []
else:
return txt.split()
import Levenshtein
def calculate_distance(data,findtxt):
if type(data) == str and type(findtxt) == str and len(findtxt) > 0:
return Levenshtein.distance(data, findtxt)
else:
return 1000000
import re
def get_path(url):
expression = r'gs:\/\/(?P<bucket>.*)\/(?P<file>.*)'
m = re.match(expression, url).groupdict()
return m#, m["bucket"], m["file"]
test = "gs://gcs-public-data--labeled-patents/us_084.pdf"
get_path(test)
def label_file(sample, features,ocrdf):
labels = [get_tokens(str(sample[feat])) for feat in features]
lens = [len(f) for f in labels]
new_features = [ f for f,l in zip(features,lens) if l > 0]
new_lens = [ l for l in lens if l > 0]
tokens_to_search = [token for f in labels for token in f]
data = [(ocrdf.apply(lambda row: calculate_distance(row["text"],token), axis=1)).to_numpy() for token in tokens_to_search]
return np.array(data), tokens_to_search, new_lens,new_features
def getx(features,tokens,lens,best_variation):
all_best_tokens_index = []
all_best_tokens_value = []
all_best_tokens_target_token = []
pos = 0
for i,l in enumerate(lens):
best_tokens_index = best_variation[i][0,:,3]
best_tokens_value = [features[i] for _ in range(l)]
best_tokens_target_token = tokens[pos:pos+l]
all_best_tokens_index.extend(best_tokens_index)
all_best_tokens_value.extend(best_tokens_value)
all_best_tokens_target_token.extend(best_tokens_target_token)
pos = pos + l
return all_best_tokens_index, all_best_tokens_value, all_best_tokens_target_token
pass
labels_file = "data/patents_dataset.xlsx"
import numpy as np
import pandas as pd
#import xlrd
from tqdm import tqdm
import itertools
import os.path
df = pd.read_excel(labels_file, sheet_name=0)
# for each file
for i in tqdm(range(df.shape[0])):
try:
sample = df.iloc[i]
file_name = get_path(sample[0])["file"]
annotation_path = "annotation/" + file_name + ".csv"
if os.path.exists(annotation_path):
continue
features = sample.keys()[3:]
ocrdf = pd.read_csv("data/" + file_name + ".csv")
data, tokens, lens, features = label_file(sample, features, ocrdf)
print(tokens)
ocrdf["x"] = (ocrdf.loc[:, "left"] + ocrdf.loc[:, "width"] / 2) / ocrdf.loc[0, "width"]
ocrdf["y"] = (ocrdf.loc[:, "top"] + ocrdf.loc[:, "height"] / 2) / ocrdf.loc[0, "height"]
# consider that words in the same line are closer
# than in different lines
ocrdf["x"] = ocrdf["x"]/4
positions = ocrdf.loc[:, ["x", "y"]].to_numpy()
myData = data.T
top_n = 4
top_lev = np.argsort(myData, axis=0)[:top_n]
top_lev_values = np.sort(myData, axis=0)[:top_n]
top_postions = ocrdf.loc[top_lev.flatten(), ["x", "y"]]
top_postions["lev"] = top_lev_values.flatten()
top_postions["pos"] = top_lev.flatten()
tokens_matrix = top_postions.to_numpy().reshape(top_lev.shape[0], top_lev.shape[1], 4)
labels_best_results = []
labels_best_results_indexes = []
labels_best_scores = []
pos = 0
# for l as length of one of the labels
# para cada label
for l in lens:
cluster_matrix = tokens_matrix[:, pos:pos + l, :]
# (topn candidates, n_tokens current label, {x y lev pos})
tokens_vars = np.transpose(cluster_matrix, axes=(1, 0, 2))
# ( n_tokens current label,topn candidates, {x y lev pos})
postions_scores = []
variations = []
for variation in itertools.product(*tokens_vars):
# para cada combinacao de candidatos à label
npvariation = np.array(variation)
deviations = np.std(npvariation[:, :2], axis=0)
deviation = np.sqrt(np.sum(np.power(deviations, 2))) # distancia media do centro
levenstein = np.sum(npvariation[:, 2:3], axis=0) # distancia de levenstein média
#score da combinacao
score = np.exp(levenstein) * (deviation + 1)
postions_scores.append(score)
variations.append(npvariation)
postions_scores = np.array(postions_scores)
variations = np.array(variations)
best_variations_indexes = np.argsort(postions_scores, axis=0)[:3]
best_variations_indexes_scores = postions_scores[best_variations_indexes]
labels_best_scores.append(best_variations_indexes_scores)
labels_best_results_indexes.append(best_variations_indexes)
labels_best_results.append(variations[best_variations_indexes])
pos += l
labels_best_results_indexes = np.array(labels_best_results_indexes)
viable_variations = []
viable_scores = []
lists = [list(range(labels_best_results[0].shape[0])) for _ in range(len(labels_best_results))]
combinations = [x for x in itertools.product(*lists)]
print("len(combinations)", len(combinations))
for i, variation_indexes in enumerate(combinations):
all_labels_variation = [labels_best_results[j][k] for j, k in enumerate(variation_indexes)]
all_labels_scores = [labels_best_scores[j][k] for j, k in enumerate(variation_indexes)]
variation_score = np.sum(all_labels_scores)
#join together all position for all labels of a combination
variation_tokens = []
for label_candidate in all_labels_variation:
variation_tokens.extend(label_candidate[0, :, 3])
#if no repeated tokens in more than one label
#it is a valid option
if np.max(np.unique(variation_tokens, return_counts=True)[1]) == 1:
viable_variations.append(all_labels_variation)
viable_scores.append(variation_score)
print("number of evaluated variations",len(viable_variations))
best_vatiation_index = np.argmin(viable_scores)
print("best variation index", best_vatiation_index)
best_variation = viable_variations[best_vatiation_index]
print(best_variation)
all_best_tokens_index, all_best_tokens_value, all_best_tokens_target_token = getx(features,tokens,lens,best_variation)
ocrdf.at[all_best_tokens_index,"label"] = all_best_tokens_value
ocrdf.at[all_best_tokens_index,"target"] = all_best_tokens_target_token
ocrdf["right"] = ocrdf["left"] + ocrdf["width"]
ocrdf["bottom"] = ocrdf["top"] + ocrdf["height"]
tops = ocrdf.groupby(by=["label"], dropna=True)["top"].min()
bottoms = ocrdf.groupby(by=["label"], dropna=True)["bottom"].max()
lefts = ocrdf.groupby(by=["label"], dropna=True)["left"].min()
rights = ocrdf.groupby(by=["label"], dropna=True)["right"].max()
dfx = pd.merge(lefts, rights, right_index=True, left_index=True)
dfx = pd.merge(dfx, tops, right_index=True, left_index=True)
dfx = pd.merge(dfx, bottoms, right_index=True, left_index=True)
print_bbox("pdf/" + file_name, dfx, "img/" + file_name + ".png")
ocrdf.to_csv(annotation_path)
# break # para no primeiro ficheiro
except:
print("error on",file_name) | helderarr/patents_dataset | main.py | main.py | py | 6,975 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Levenshtein.distance",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line... |
412173435 | import nltk
def init_wfst(tokens, grammar):
"""Updates diagonal elements of chart
Arguments:
---------
tokens (list):
List of words in input sentence
grammar (list):
List of production rules in the grammar
"""
num_tokens = len(tokens)
wfst = [[None for i in range(num_tokens+1)] for j in range(num_tokens+1)]
for i in range(num_tokens):
productions = grammar.productions(rhs=tokens[i])
wfst[i][i+1] = [production.lhs() for production in productions]
return wfst
def complete_wfst(wfst, tokens, grammar, trace=False):
"""Updates non-diagonal elements of chart
Arguments:
---------
wfst
tokens (list):
List of words in input sentence
grammar (list):
List of production rules in the grammar
"""
index = dict((p.rhs(), p.lhs()) for p in grammar.productions())
num_tokens = len(tokens)
for span in range(2, num_tokens+1):
for start in range(num_tokens+1-span):
end = start + span
temp = []
for mid in range(start+1, end):
nt1s, nt2s = wfst[start][mid], wfst[mid][end]
for nt1 in nt1s:
for nt2 in nt2s:
if nt1 and nt2 and (nt1, nt2) in index:
temp.append(index[(nt1, nt2)])
wfst[start][end] = list(set(temp))
return wfst
def display(wfst, tokens):
"""Updates non-diagonal elements of chart
Arguments:
---------
wfst
tokens (list):
List of words in input sentence
"""
print('\nWFST ' + ' '.join(("%-4d" % i) for i in range(1, len(wfst))))
for i in range(len(wfst)-1):
print("%d " % i, end=" ")
for j in range(1, len(wfst)):
print("%-4s" % (wfst[i][j] or '.'), end=" ")
print()
# MAIN FUNCTION
groucho_grammar1 = nltk.CFG.fromstring("""
S -> NP VP
PP -> P NP
NP -> Det N | Det N PP | 'I'
VP -> V NP | VP PP
Det -> 'an' | 'my'
N -> 'elephant' | 'pajamas'
V -> 'shot'
P -> 'in'
""")
groucho_grammar2 = nltk.CFG.fromstring("""
S -> NP VP
PP -> P NP
NP -> Det N | Det X | 'I'
X -> N PP
VP -> V NP | VP PP
Det -> 'an' | 'my'
N -> 'elephant' | 'pajamas'
V -> 'shot'
P -> 'in'
""")
tokens = "I shot an elephant in my pajamas".split()
initial_wfst = init_wfst(tokens, groucho_grammar2)
print('Displaying Initial Chart Parser Table for Groucho Grammar...')
display(initial_wfst, tokens)
final_wfst = complete_wfst(initial_wfst, tokens, groucho_grammar2)
print('Displaying Complete Chart Parser Table for Groucho Grammar...')
display(final_wfst, tokens)
| aashishyadavally/MS_AI_Coursework | CS6900/Assignment06/homework6_1.py | homework6_1.py | py | 2,755 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.CFG.fromstring",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "nltk.CFG",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "nltk.CFG.fromstring",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "nltk.CFG",
... |
30382090301 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 13:32:28 2018
@author: jon
"""
#import sys
#from pyuvdata import UVData
from pynfft import NFFT
import numpy as np
import matplotlib.pyplot as plt
from scipy import constants
from mslib import MS_jon
def singleFrequency():
imsize = (256, 256)
cell = np.asarray([0.5, -0.5]) / 3600.0 # #arcseconds. revert v axis because it is empirically right. the axes in the ms are not really standardized
cell = np.radians(cell)
ms = MS_jon()
ms.read_ms("simkat64-default.ms")
# 4 polarizations are XX, XY, YX and YY
#Intensity image should be XX + YY
wavelengths = ms.freq_array[0, 0] / constants.c
uvw_wavelengths = np.dot(ms.uvw_array, np.diag(np.repeat(wavelengths, 3)))
uv = np.multiply(uvw_wavelengths[:,0:2], cell)
plan = NFFT(imsize, uv.shape[0])
plan.x = uv.flatten()
plan.precompute()
plan.f = ms.data_array[:,:,0,0]
dirty = plan.adjoint() / uv.shape[0]
plt.imshow(np.flipud(np.transpose(np.real(dirty))))
def allFrequencies():
imsize = (256, 256)
cell = np.asarray([0.5, -0.5]) / 3600.0 # #arcseconds. revert v axis because it is empirically right. the axes in the ms are not really standardized
cell = np.radians(cell)
ms = MS_jon()
ms.read_ms("simkat64-default.ms")
wavelengths = ms.freq_array[0] / constants.c
offset = ms.uvw_array.shape[0]
start = 0
end = offset
uv = np.zeros((ms.uvw_array.shape[0] * wavelengths.size, 2))
vis = np.zeros(ms.uvw_array.shape[0] * wavelengths.size, dtype=np.complex128)
for i in range(0, wavelengths.size):
uvw_wavelengths = np.dot(ms.uvw_array, np.diag(np.repeat(wavelengths[i], 3)))
#skip w component
uv[start:end] = uvw_wavelengths[:, 0:2]
#add the XX and YY Polarization to get an intensity
vis[start:end] = ms.data_array[:, 0, i, 0] + ms.data_array[:, 0, i, 3]
start += offset
end += offset
uv = np.multiply(uv, cell)
plan = NFFT(imsize, uv.shape[0])
plan.x = uv.flatten()
plan.precompute()
plan.f = vis
dirty = plan.adjoint() / uv.shape[0] / 2
plt.imshow(np.real(dirty))
print(np.max(np.real(dirty)))
return 0
allFrequencies() | lord-blueberry/p8-pipeline | sandbox/img_test/pynfft_test.py | pynfft_test.py | py | 2,295 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.asarray",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "mslib.MS_jon",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scipy.constants.c",
"lin... |
39498051069 | from __future__ import absolute_import
__author__ = "Angelo Ziletti"
__copyright__ = "Angelo Ziletti"
__maintainer__ = "Angelo Ziletti"
__email__ = "ziletti@fhi-berlin.mpg.de"
__date__ = "14/08/18"
import unittest
from ai4materials.models.clustering import design_matrix_to_clustering
import numpy as np
import sklearn.manifold
np.random.seed(42)
class TestClustering(unittest.TestCase):
def setUp(self):
pass
def test_design_matrix_to_clustering(self):
n_samples = 100
n_dim = 5
design_matrix = np.random.rand(n_samples, n_dim)
# test for pre-selected method without user-defined parameters and no probabilities
labels, labels_prob, clustering = design_matrix_to_clustering(design_matrix, clustering_method='kmeans')
self.assertIsInstance(labels, np.ndarray)
self.assertIs(labels_prob, None)
# test for pre-selected method without user-defined parameters and with probabilities
# use gaussian_mixture model since it returns also probabilities
labels, labels_prob, clustering = design_matrix_to_clustering(design_matrix,
clustering_method='gaussian_mixture')
self.assertIsInstance(labels, np.ndarray)
self.assertGreaterEqual(np.amin(labels_prob), 0.0)
self.assertLessEqual(np.amax(labels_prob), 1.0)
# test for pre-selected method without user-defined parameters
n_clusters = 4
labels, labels_prob, clustering = design_matrix_to_clustering(design_matrix, clustering_method='kmeans',
clustering_params={'n_clusters': n_clusters})
actual_n_clusters = clustering.get_params()['n_clusters']
self.assertEqual(actual_n_clusters, n_clusters)
self.assertIsInstance(labels, np.ndarray)
# test when a clustering object is directly passed
dbscan = sklearn.cluster.DBSCAN(eps=0.5, min_samples=50, leaf_size=10)
clustering_labels, prob_labels, clustering = design_matrix_to_clustering(design_matrix, clustering_class=dbscan)
self.assertIsInstance(clustering_labels, np.ndarray)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestClustering)
unittest.TextTestRunner(verbosity=2).run(suite)
| angeloziletti/ai4materials | tests/test_clustering.py | test_clustering.py | py | 2,367 | python | en | code | 36 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.rand... |
25050897663 | # coding: utf-8
from typing import Any, Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.colors import Colormap
from matplotlib.figure import Figure as mplFigure
from plotly.graph_objects import Trace
from plotly.graph_objs import Figure as plotlyFigure
from plotly.subplots import make_subplots
# <=== Utility functions for Both Plotting modules ===
def get_colorList(
n: int, cmap: Optional[Union[str, Colormap]] = None, style: str = "matplotlib"
) -> List[Tuple[float, float, float, float]]:
"""Get a color List using matplotlib's colormaps. See `Choosing Colormaps in Matplotlib <https://matplotlib.org/stable/tutorials/colors/colormaps.html>` for details.
Args:
n (int) : The number of samples
cmap (Optional[Union[str,Colormap]], optional) : A ``Colormap`` object or a color map name. Defaults to ``None``.
style (str) : How to express colors (Please choose from ``"matplotlib"``, or ``"plotly"``)
Returns:
List[Tuple[float,float,float,float]]: Color List
Examples:
>>> import matplotlib
>>> from matplotlib.cm import _cmap_registry
>>> from teilab.utils import get_colorList
>>> get_colorList(n=3, cmap="bwr")
[(0.6666666666666666, 0.6666666666666666, 1.0, 1.0),
(1.0, 0.6666666666666667, 0.6666666666666667, 1.0),
(1.0, 0.0, 0.0, 1.0)]
>>> get_colorList(n=3, cmap=_cmap_registry["bwr"])
[(0.6666666666666666, 0.6666666666666666, 1.0, 1.0),
(1.0, 0.6666666666666667, 0.6666666666666667, 1.0),
(1.0, 0.0, 0.0, 1.0)]
>>> get_colorList(n=3)
[(0.190631, 0.407061, 0.556089, 1.0),
(0.20803, 0.718701, 0.472873, 1.0),
(0.993248, 0.906157, 0.143936, 1.0)]
>>> matplotlib.rcParams['image.cmap'] = "bwr"
>>> get_colorList(n=3)
[(0.6666666666666666, 0.6666666666666666, 1.0, 1.0),
(1.0, 0.6666666666666667, 0.6666666666666667, 1.0),
(1.0, 0.0, 0.0, 1.0)]
>>> get_colorList(n=3, cmap="bwr", style="plotly")
['rgba(170,170,255,1.0)', 'rgba(255,170,170,1.0)', 'rgba(255,0,0,1.0)']
"""
cmap = plt.get_cmap(name=cmap)
colors = [cmap((i + 1) / n) for i in range(n)]
if style in ["plotly", "rgba"]:
colors = [
f'rgba({",".join([str(int(e*255)) if i<3 else str(e) for i,e in enumerate(color)])})' for color in colors
]
return colors
def subplots_create(
nrows: int = 1,
ncols: int = 1,
sharex: Union[bool, str] = False,
sharey: Union[bool, str] = False,
style: str = "matplotlib",
**kwargs,
) -> Union[Tuple[mplFigure, Axes], plotlyFigure]:
"""Create subplots for each plot style.
Args:
nrows (int, optional) : Number of rows of the subplot grid. Defaults to ``1``.
ncols (int, optional) : Number of columns of the subplot grid. Defaults to ``1``.
sharex (Union[bool,str], optional) : Controls sharing of properties among x-axes. Defaults to ``False``.
sharey (Union[bool,str], optional) : Controls sharing of properties among y-axes. Defaults to ``False``.
style (str, optional) : Plot style. Please choose from ``"matplotlib"``, or ``"plotly"`` . Defaults to ``"matplotlib"``.
Returns:
Union[Tuple[mplFigure,Axes],plotlyFigure]: Subplots to suit each plot style.
Examples:
>>> from teilab.utils import subplots_create
>>> fig,axes = subplots_create(nrows=3, style="matplotlib")
>>> fig.__class__
>>> "<class 'matplotlib.figure.Figure'>"
>>> str(axes[0].__class__)
>>> "<class 'matplotlib.axes._subplots.AxesSubplot'>"
>>> fig = subplots_create(nrows=3, style="plotly")
>>> str(fig.__class__)
>>> "<class 'plotly.graph_objs._figure.Figure'>"
"""
if style == "plotly":
return make_subplots(rows=nrows, cols=ncols, shared_xaxes=sharex, shared_yaxes=sharey, **kwargs)
else:
return plt.subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey, **kwargs)
# === Utility functions for Both Plotting modules ===>
# <=== Utility functions for "plotly" ===
def trace_transition(from_fig: plotlyFigure, to_fig: plotlyFigure, row: int = 1, col: int = 1) -> plotlyFigure:
"""Trace ``Figure`` which is created by ``plotly.express``
Args:
from_fig (Figure) : Move the trace that exists in this ``Figure``.
to_fig (Figure) : Move trace to this ``Figure``
row (int, optional) : Row of subplots. Defaults to ``1``.
col (int, optional) : Column of subplots. Defaults to ``1``.
Returns:
Figure: ``to_fig`` with ``from_fig`` 's traces.
"""
def transition(trace: Trace):
"""Move the ``Trace`` from ``from_fig`` to ``to_fig``"""
trace.legendgroup = f"{col}-{row}"
to_fig.add_trace(trace=trace, row=row, col=col)
from_fig.for_each_trace(fn=transition)
return to_fig
# === Utility functions for "plotly" ===>
# <=== Utility functions for "matplotlib" ===
# === Utility functions for "matplotlib" ===>
| iwasakishuto/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments | teilab/utils/plot_utils.py | plot_utils.py | py | 5,241 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.Colormap",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
22377527144 | #!/usr/bin/env python3
import nibabel as nib
from nibabel import processing
import numpy as np
import scipy
import matplotlib.pyplot as plt
import matplotlib
from scipy import ndimage
from scipy.interpolate import RegularGridInterpolator
from scipy import optimize
import os, glob
import json
import time
import shutil
def calc_center_of_mass(img_data, affine):
com_ijk = ndimage.center_of_mass(img_data)
com_ijk = np.array([com_ijk[0], com_ijk[1], com_ijk[2], 1])
com_xyz = np.matmul(affine, com_ijk)
return com_xyz
def calc_affine(original_affine, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z, center_of_mass = None):
#WHERE TO BRING IN CENTER OF MASS CALCULATION??? MAYBE THINGS WILL CONVERGE FAST
#ENOUGH WHERE THIS ISN"T NECESSARY
#Make empty matrices for rotations
mat_rot_x = np.eye(4)
mat_rot_y = np.eye(4)
mat_rot_z = np.eye(4)
#Pre apply COM mass so that
if type(center_of_mass) == type(None):
temp_COM_mat = np.eye(4)
else:
temp_COM_mat = np.eye(4)
temp_COM_mat[0,3] = -1*center_of_mass[0]
temp_COM_mat[1,3] = -1*center_of_mass[1]
temp_COM_mat[2,3] = -1*center_of_mass[2]
#Define mat for x rotations
mat_rot_x[1,1] = np.cos(rot_x)
mat_rot_x[2,2] = np.cos(rot_x)
mat_rot_x[1,2] = -np.sin(rot_x)
mat_rot_x[2,1] = np.sin(rot_x)
#Define mat for y rotations
mat_rot_y[0,0] = np.cos(rot_y)
mat_rot_y[2,2] = np.cos(rot_y)
mat_rot_y[2,0] = -np.sin(rot_y)
mat_rot_y[0,2] = np.sin(rot_y)
#Define mat for z rotations
mat_rot_y[0,0] = np.cos(rot_z)
mat_rot_y[1,1] = np.cos(rot_z)
mat_rot_y[0,1] = -np.sin(rot_z)
mat_rot_y[1,0] = np.sin(rot_z)
#Apply x, then y, then z rotation then add translation
new_affine = np.matmul(mat_rot_x, temp_COM_mat)
new_affine = np.matmul(mat_rot_y, new_affine)
new_affine = np.matmul(mat_rot_z, new_affine)
new_affine = np.matmul(np.linalg.inv(temp_COM_mat), new_affine)
#new_affine = np.matmul(mat_rot_y, mat_rot_x)
#new_affine = np.matmul(mat_rot_z, new_affine)
new_affine[0,3] = trans_x
new_affine[1,3] = trans_y
new_affine[2,3] = trans_z
#print(new_affine)
return new_affine
def grab_orig_inds_xyz_mat(image_data, affine):
#This should be finished
inds = np.indices(image_data.shape)
inds_len = inds.shape[1]*inds.shape[2]*inds.shape[3]
inds_reshaped = np.reshape(inds, (3, inds_len))
ones = np.ones((1,inds_len))
full_inds = np.vstack([inds_reshaped, ones])
orig_xyz = np.matmul(affine, full_inds)
orig_vals = image_data.flatten().copy()
return orig_xyz, orig_vals
def get_new_xyzs(transformation, original_xyzs):
return np.matmul(transformation, original_xyzs)
def grab_image_vals(img_data, img_affine, inds, interp_method = 'linear'):
#Img_data is a matrix you want to sample from. Inds
#are the xyz inds to grab. out_vals are the interpolated
#img values at the inds.
i = np.arange(0,img_data.shape[0])
j = np.arange(0,img_data.shape[1])
k = np.arange(0,img_data.shape[2])
interp = RegularGridInterpolator((i, j, k), img_data, method = interp_method, bounds_error = False)
inds_xyz_to_ijk = np.matmul(np.linalg.inv(img_affine), inds)
out_vals = interp(inds_xyz_to_ijk[0:3,:].transpose())
return out_vals
def make_alignment_images(full_registered_nifti_path, localizers_arr_path, output_figures_folder, close_figures = True):
'''Make plots to show overlap between some full volumetric nifti and localizers
This function makes overlays to show the alignment between the image
represented by full_registered_nifti_path and the images within
localizers_arr_path. The overlays are saved to output_figures_folder
and
Parameters
----------
full_registered_nifti_path : str
Path to full 3d nifti image, probably an image that
is registered to the localizers
localizers_arr_path : list
List of paths to localizer images that will be used
to generate overlays
output_figures_folder : str
Path to the folder to store overlays. This will be
created if it doesn't already exist
Returns
-------
slice_specific_corrs : list
List of slice specific corrs between the full volumetric
image and a slice from any of the localizer images
'''
full_img = nib.load(full_registered_nifti_path)
full_data = full_img.get_fdata()
full_affine = full_img.affine
i = np.arange(0,full_data.shape[0])
j = np.arange(0,full_data.shape[1])
k = np.arange(0,full_data.shape[2])
interp = RegularGridInterpolator((i, j, k), full_data, method = 'linear', bounds_error = False)
slice_specific_corrs = []
if os.path.exists(output_figures_folder) == False:
os.makedirs(output_figures_folder)
for i, temp_localizer in enumerate(localizers_arr_path):
temp_loc_img = nib.load(temp_localizer)
temp_loc_data = temp_loc_img.get_fdata()
smallest_dim = np.argmin(temp_loc_data.shape)
num_slices = temp_loc_data.shape[smallest_dim]
if num_slices > 4:
slices = np.round(np.linspace(0, num_slices, 6)[1:-2]).astype(int)
else:
slices = np.linspace(0,num_slices - 1,num_slices).astype(int)
flattened_inds_vals = grab_orig_inds_xyz_mat(temp_loc_data, temp_loc_img.affine)
reshaped_inds = flattened_inds_vals[0].reshape((4, temp_loc_data.shape[0], temp_loc_data.shape[1], temp_loc_data.shape[2]))
for temp_slice_num in slices:
if smallest_dim == 0:
temp_slice = reshaped_inds[:,temp_slice_num,...]
temp_slice_data = temp_loc_data[temp_slice_num,:,:]
elif smallest_dim == 1:
temp_slice = reshaped_inds[:,:,temp_slice_num,...]
temp_slice_data = temp_loc_data[:,temp_slice_num,:]
elif smallest_dim == 2:
temp_slice = reshaped_inds[:,:,:,temp_slice_num]
temp_slice_data = temp_loc_data[:,:,temp_slice_num]
else:
raise ValueError('Error: localizer should be 3d image')
flattened_slice_inds = temp_slice.reshape((temp_slice.shape[0], int(temp_slice.shape[1]*temp_slice.shape[2])))
#Find values in the full 3d image that correspond to the
#current slice in the current localizer image
inds_xyz_to_ijk = np.matmul(np.linalg.inv(full_affine), flattened_slice_inds)
out_vals = interp(inds_xyz_to_ijk[0:3,:].transpose())
out_vals = out_vals.reshape((temp_slice.shape[1], temp_slice.shape[2]))
#Plot
plt.figure(dpi=200, figsize=(4,10))
plt.subplot(3,1,1)
plt.imshow(out_vals)
plt.xticks(np.arange(0,out_vals.shape[0],25), labels='')
plt.yticks(np.arange(0,out_vals.shape[1],25), labels='')
plt.gca().grid(color='red', linestyle='-.', linewidth=1)
plt.title('Full Volumetric Img.')
plt.subplot(3,1,2)
plt.imshow(temp_slice_data)
plt.title('Localizer Img.')
plt.xticks(np.arange(0,out_vals.shape[0],25), labels='')
plt.yticks(np.arange(0,out_vals.shape[1],25), labels='')
plt.gca().grid(color='red', linestyle='-.', linewidth=1)
plt.subplot(3,1,3)
plt.imshow((temp_slice_data - np.nanmean(temp_slice_data))/np.nanstd(temp_slice_data) - (out_vals - np.nanmean(out_vals))/np.nanstd(out_vals))
plt.title('Difference')
plt.xticks(np.arange(0,out_vals.shape[0],25), labels='')
plt.yticks(np.arange(0,out_vals.shape[1],25), labels='')
plt.gca().grid(color='red', linestyle='-.', linewidth=1)
plt.tight_layout()
plt.savefig(os.path.join(output_figures_folder, 'localizer_{}_slice_{}.png'.format(i, temp_slice_num)), bbox_inches='tight')
if close_figures:
plt.close()
return
def calc_loss(af_vals, localizer_imgs, localizer_vals, reference_data, reference_affine, center_of_mass, xyz_s_list):
affine_transforms = []
new_xyz_s_list = []
for i, temp_img in enumerate(localizer_imgs):
affine_transforms.append(calc_affine(localizer_imgs[i].affine, af_vals[0], af_vals[1], af_vals[2], af_vals[3], af_vals[4], af_vals[5], center_of_mass = center_of_mass)) #transform to apply
new_xyz_s_list.append(get_new_xyzs(affine_transforms[i], xyz_s_list[i]))
new_xyz_s_arr = np.hstack(new_xyz_s_list)
reference_vals = grab_image_vals(reference_data, reference_affine, new_xyz_s_arr, interp_method = 'linear')
good_ref = reference_vals[np.isnan(reference_vals) == False]
good_loc = localizer_vals[np.isnan(reference_vals) == False]
loss = 1 - np.corrcoef(good_ref, good_loc)[0,1]
return loss
def calc_localizer_val_bins(localizer_vals):
std = np.std(localizer_vals)
bin_widths = 3.49*std*np.power(localizer_vals.shape[0], -1/3) #This equation is optimal for unimodal case per page 151 of jenkinson paper
#bin_widths = 2*std*np.power(localizer_vals.shape[0], -1/3)
num_bins = int((np.max(localizer_vals) - np.min(localizer_vals))/bin_widths)
bins = np.histogram(localizer_vals, num_bins)
binned_data = np.zeros(localizer_vals.shape)
for i in range(bins[0].shape[0] - 1):
binned_data[(localizer_vals > bins[0][i+1])*(localizer_vals <= bins[0][i])] = i
binned_data[localizer_vals >= bins[0][0]] = bins[0].shape[0]
return binned_data
def calc_corr_ratio_loss(af_vals, localizer_imgs, localizer_vals, reference_data, reference_affine, mask_data, center_of_mass, xyz_s_list, make_plot = False, image_output_path = None):
affine_transforms = []
new_xyz_s_list = []
for i, temp_img in enumerate(localizer_imgs):
affine_transforms.append(calc_affine(localizer_imgs[i].affine, af_vals[0], af_vals[1], af_vals[2], af_vals[3], af_vals[4], af_vals[5], center_of_mass = center_of_mass)) #transform to apply
new_xyz_s_list.append(get_new_xyzs(affine_transforms[i], xyz_s_list[i]))
new_xyz_s_arr = np.hstack(new_xyz_s_list)
reference_vals = grab_image_vals(reference_data, reference_affine, new_xyz_s_arr, interp_method = 'linear')
mask_vals = grab_image_vals(mask_data, reference_affine, new_xyz_s_arr, interp_method = 'nearest')
good_ref = reference_vals[(np.isnan(reference_vals) == False)*(mask_vals > 0.5)]
good_loc = localizer_vals[(np.isnan(reference_vals) == False)*(mask_vals > 0.5)]
num_good = good_loc.shape[0]
unique_loc_vals = np.unique(good_loc)
corr_ratio = 0
for i in range(unique_loc_vals.shape[0]):
n_k = np.sum(unique_loc_vals == unique_loc_vals[i])
corr_ratio += (n_k/num_good)*np.var(good_ref[good_loc == unique_loc_vals[i]])
corr_ratio = corr_ratio/np.var(good_ref)
loss = 1 - corr_ratio
loss = corr_ratio
if make_plot:
make_corr_ratio_loss_plot(unique_loc_vals, good_loc, good_ref, output_image_path = image_output_path, close_image = False)
return loss
def make_corr_ratio_loss_plot(unique_loc_vals, good_loc, good_ref, output_image_path = None, close_image = True):
plt.figure(dpi = 100)
differences_1 = []
bin_jitters_1 = []
differences_2 = []
bin_jitters_2 = []
for i in range(unique_loc_vals.shape[0]):
temp_vals = good_ref[good_loc == unique_loc_vals[i]]
temp_differences = np.log10(np.absolute(temp_vals - np.mean(temp_vals)))*np.sign(temp_vals - np.mean(temp_vals))
temp_bin_jitters = np.random.uniform(low = i, high = i + 1, size = temp_differences.shape)
if np.mod(i,2) == 0:
differences_1.append(temp_differences)
bin_jitters_1.append(temp_bin_jitters)
else:
differences_2.append(temp_differences)
bin_jitters_2.append(temp_bin_jitters)
differences_1 = np.hstack(differences_1)
bin_jitters_1 = np.hstack(bin_jitters_1)
differences_2 = np.hstack(differences_2)
bin_jitters_2 = np.hstack(bin_jitters_2)
plt.scatter(bin_jitters_1, differences_1, s = 0.05)
plt.scatter(bin_jitters_2, differences_2, s = 0.05)
plt.xlabel('Bin Number')
plt.ylabel('sign(Deviation)*log10(absolute(Deviation))')
plt.axhline(2.5, linestyle = '--', color = 'grey', linewidth = 1)
plt.axhline(-2.5, linestyle = '--', color = 'grey', linewidth = 1)
if type(output_image_path) == type(None):
pass
else:
plt.savefig(output_image_path)
if close_image:
plt.close()
return
def make_readme(affine_readme_path):
with open(affine_readme_path, 'w') as f:
f.write('This folder contains registration results from a high res anatomical template to a localizer thats presumed to be in MRS voxel space.\n')
f.write('The details of what images were registered can be found in registration_summary.json and figures showing the quality of the registration can be found in the figures folder.\n')
f.write('The new copy of the reference image (now aligned to the localizer) is found at reference_img_aligned_to_localizer.nii.gz\n\n')
f.write('How to use transform_mat.npy file:\n\n\n')
f.write('import nibabel as nib\nimport numpy as np\npath_to_image_in_reference_space = ""\ntemp_img = nib.load(path_to_image_in_reference_space)\n')
f.write('transform_mat = np.load("transform_mat.npy")\ntemp_img.affine = np.matmul(transform_mat, temp_img.affine)\n')
f.write('nib.save(temp_img, "/some/new/path/for/image/now/in/localizer/space/img.nii.gz")')
return
def localizer_alignment_anat_update_osprey(anat_files_dict, registration_output_folder, localizer_paths):
'''Registers anat reference image to the localizer image(s)
Parameters
----------
anat_files_dict : dict
Has (at minimum) key 'files_nii' and optionally 'files_seg' that will be
registered to the localizer image. After registration,
this path will be reset to be the path to the new image
following registration.
registration_output_folder : str
Path to the folder that will be created to store registration
results. This will be subject/ses and in certain cases run
specific.
localizer_paths : list of strings
The paths to the localizer image or images to be registered.
You will only have multiple entries in this list if axial
images were stored in different images than sagital or coronal.
Returns
-------
anat_files_dict : dict
The same dictionary as before, but now the 'files_nii' key
has been updated to point to the registered image
'''
output_folder = registration_output_folder
if os.path.exists(os.path.join(output_folder, 'figures')) == False:
os.makedirs(os.path.join(output_folder, 'figures'))
make_readme(os.path.join(output_folder, 'readme.txt'))
#Load the reference image
reference_path = anat_files_dict['files_nii'][0]
reference_img = nib.load(reference_path)
reference_data = reference_img.get_fdata()
#Load and dilate brain mask by 10 iterations ... this keeps the scalp in registration but not neck
#USING MASK SEEMED TO HURT REGISTRATION FOR LOWRES LOCALIZERS SO AM EXCLUDING THIS FOR NOW
#mask_data = nib.load(brain_mask_path).get_fdata()
#mask_data = ndimage.binary_dilation(mask_data, iterations = 10)
#mask_data = mask_data.astype(float) + 1
reference_data_10mm_smoothing = processing.smooth_image(reference_img, 10).get_fdata()
reference_com = calc_center_of_mass(reference_data, reference_img.affine)
mask_data = np.ones(reference_data.shape) #we arent using a mask right now so this is just a dummy mask
#reference_com = None
localizer_imgs = []
xyz_s_list = []
vals = []
localizer_sizes = []
for i, temp_path in enumerate(localizer_paths):
localizer_imgs.append(nib.load(temp_path))
temp_xyz, temp_vals = grab_orig_inds_xyz_mat(localizer_imgs[i].get_fdata(), localizer_imgs[i].affine)
xyz_s_list.append(temp_xyz)
vals.append(temp_vals)
localizer_sizes.append(localizer_imgs[i].get_fdata().size)
xyz_s_arr = np.hstack(xyz_s_list)
localizer_vals = np.hstack(vals)
localizer_vals = calc_localizer_val_bins(localizer_vals) #NOW THESE ARE BINS
reference_vals = grab_image_vals(reference_data, reference_img.affine, xyz_s_arr, interp_method = 'linear')
good_ref = reference_vals[np.isnan(reference_vals) == False]
good_loc = localizer_vals[np.isnan(reference_vals) == False]
print('Original Ref/Localizer Correlation Ratio (0 is best, 1 is worst):')
original_corr = calc_corr_ratio_loss([0,0,0,0,0,0], localizer_imgs, localizer_vals, reference_data, reference_img.affine, mask_data, reference_com, xyz_s_list, make_plot = True, image_output_path = os.path.join(registration_output_folder, 'figures', 'corr_ratio_pre_registration.png'))
print(original_corr)
bounds_10mm = [[-100,100],[-100,100],[-100,100],[-1.5,1.5],[-1.5,1.5],[-1.5,1.5]]
tic = time.perf_counter()
options = {'maxfun':5000, 'maxiter':50}
results_tnc_10mm = optimize.minimize(calc_corr_ratio_loss, [0,0,0,0,0,0], args=(localizer_imgs, localizer_vals, reference_data_10mm_smoothing, reference_img.affine, mask_data, reference_com, xyz_s_list),
method='TNC', jac=None, bounds=bounds_10mm, options=options)
results_tnc_00mm = optimize.minimize(calc_corr_ratio_loss, results_tnc_10mm.x, args=(localizer_imgs, localizer_vals, reference_data, reference_img.affine, mask_data, reference_com, xyz_s_list),
method='TNC', jac=None, bounds=bounds_10mm, options=options)
toc = time.perf_counter()
print(f"Ran optimization in {toc - tic:0.4f} seconds")
###Illustrate the performance of the new transformation
affine_transforms = []
new_xyz_s_list = []
for i, temp_img in enumerate(localizer_imgs):
affine_transforms.append(calc_affine(localizer_imgs[i].affine, results_tnc_00mm.x[0], results_tnc_00mm.x[1], results_tnc_00mm.x[2], results_tnc_00mm.x[3], results_tnc_00mm.x[4], results_tnc_00mm.x[5], reference_com)) #transform to apply
new_xyz_s_list.append(get_new_xyzs(affine_transforms[i], xyz_s_list[i]))
new_xyz_s_arr = np.hstack(new_xyz_s_list)
reference_vals = grab_image_vals(reference_data, reference_img.affine, new_xyz_s_arr, interp_method = 'linear')
good_ref = reference_vals[np.isnan(reference_vals) == False]
good_loc = localizer_vals[np.isnan(reference_vals) == False]
registered_corr = calc_corr_ratio_loss(results_tnc_00mm.x, localizer_imgs, localizer_vals, reference_data, reference_img.affine, mask_data, reference_com, xyz_s_list, make_plot = True, image_output_path = os.path.join(registration_output_folder, 'figures', 'corr_ratio_post_registration.png'))
if original_corr > registered_corr:
inv_affine = calc_affine(np.eye(4), results_tnc_00mm.x[0], results_tnc_00mm.x[1], results_tnc_00mm.x[2], results_tnc_00mm.x[3], results_tnc_00mm.x[4], results_tnc_00mm.x[5], reference_com)
inv_affine = np.linalg.inv(inv_affine)
else:
inv_affine = np.eye(4)
registered_corr = original_corr
shutil.copyfile(os.path.join(registration_output_folder, 'figures', 'corr_ratio_pre_registration.png'), os.path.join(registration_output_folder, 'figures', 'corr_ratio_post_registration.png'))
print('Registered Ref/Localizer Correlation (0 is best, 1 is worst):')
print(registered_corr)
if original_corr > registered_corr:
inv_affine = calc_affine(np.eye(4), results_tnc_00mm.x[0], results_tnc_00mm.x[1], results_tnc_00mm.x[2], results_tnc_00mm.x[3], results_tnc_00mm.x[4], results_tnc_00mm.x[5], reference_com)
inv_affine = np.linalg.inv(inv_affine)
else:
inv_affine = np.eye(4)
registered_corr = original_corr
new_affine = np.matmul(inv_affine, reference_img.affine)
new_img = nib.nifti1.Nifti1Image(reference_data, new_affine)
registered_output_image_name = os.path.join(output_folder, 'reference_img_aligned_to_localizer.nii.gz')
nib.save(new_img, registered_output_image_name)
np.save(os.path.join(output_folder, 'transform_mat.npy'), inv_affine) #This can be used with other images to update their affines
if 'files_seg' in anat_files_dict.keys():
new_seg_img = nib.load(anat_files_dict['files_seg'][0][0])
new_seg_img = nib.Nifti1Image(new_seg_img.get_fdata(), new_affine)
print('Saving new segmentation image.')
nib.save(new_seg_img, os.path.join(output_folder, 'reference_seg_aligned_to_localizer.nii.gz'))
anat_files_dict['files_seg'] = [[os.path.join(output_folder, 'reference_seg_aligned_to_localizer.nii.gz')]]
make_alignment_images(registered_output_image_name, localizer_paths, os.path.join(output_folder, 'figures'))
registration_dict = {"reference_img" : reference_path,
"localizer_imgs": localizer_paths,
"reference_localizer_corr_ratio_pre_registration" : np.round(original_corr, 8),
"reference_localizer_corr_ratio_post_registration" : np.round(registered_corr, 8)}
with open(os.path.join(output_folder, 'registration_summary.json'), 'w') as f:
f.write(json.dumps(registration_dict, indent = 6))
anat_files_dict['files_nii'] = [registered_output_image_name]
return anat_files_dict | erikglee/OSPREY_Containerization | code/localizer_alignment.py | localizer_alignment.py | py | 21,949 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.ndimage.center_of_mass",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.matmul",... |
13030737473 | import sys
from PySide6.QtCore import Qt, QTimer, QSettings, QThread, QRegularExpression
from PySide6.QtGui import QIcon, QAction, QPixmap, QIntValidator, QRegularExpressionValidator
from PySide6.QtWidgets import QApplication, QSystemTrayIcon, QMenu, \
QLabel, QWidgetAction, QWidget, QHBoxLayout, QMessageBox, QFormLayout, QLineEdit, QPushButton
import qdarktheme
from modules.flask_thread import FlaskThread
from modules.flask_factory import create_flask_app
import resources
__version__ = "0.1.1"
class IconLabel(QWidget):
HorizontalSpacing = -2
def __init__(self, text):
super(IconLabel, self).__init__()
layout = QHBoxLayout()
layout.setContentsMargins(2, 0, 0, 0)
self.setLayout(layout)
image_label = QLabel()
image_label.setPixmap(QPixmap(":/icons/feather/life-buoy.svg").scaledToWidth(15))
# image_label.setText(text)
image_label.setMaximumWidth(20)
image_label.setMaximumHeight(25)
layout.addWidget(image_label)
layout.addSpacing(self.HorizontalSpacing)
label = QLabel(text)
label.setStyleSheet("QLabel {background: rgba(41.000, 42.000, 45.000, 1.000)}")
layout.addWidget(label)
class ButtonLabel(QWidget):
horizontal_spacing = 0
def __init__(self):
super(ButtonLabel, self).__init__()
style = """
QPushButton {
background-color: darkred;
}
QPushButton:checked {
background-color: green;
}
"""
layout = QHBoxLayout()
layout.setContentsMargins(1, 5, 8, 5)
self.setLayout(layout)
self.button = QPushButton()
self.button.setText("OFF")
self.button.setCheckable(True)
self.button.setMinimumWidth(60)
self.button.setStyleSheet(style)
layout.addWidget(self.button)
self.button.clicked.connect(self.on_click)
def on_click(self):
if self.button.isChecked():
self.button.setText("ON")
else:
self.button.setText("OFF")
class LabelEdit(QWidget):
HorizontalSpacing = 2
def __init__(self, label_txt, key, settings):
super(LabelEdit, self).__init__()
self.key = key
self.settings = settings
value = self.settings.value(key)
layout = QFormLayout()
layout.setContentsMargins(5, 2, 5, 2)
self.setLayout(layout)
label = QLabel(label_txt)
label.setMinimumWidth(70)
self.edit = QLineEdit()
self.edit.setValidator(self.get_validator())
self.edit.setText(value)
self.edit.setMaximumWidth(70)
layout.addRow(label, self.edit)
self.edit.textChanged.connect(self.on_change)
def get_validator(self):
print(self.key)
if self.key == "institution":
rx = QRegularExpression()
rx.setPattern("[A-Z]{3}\\d{4}")
return QRegularExpressionValidator(rx, self)
else:
return QIntValidator()
def on_change(self):
value = self.edit.text()
self.settings.setValue(self.key, value)
class SysTrayApp:
status_desc = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi-Status',
208: 'Already Reported',
226: 'IM Used',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
308: 'Permanent Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Payload Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'Im a teapot',
421: 'Misdirected Request',
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required',
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
444: 'Connection Closed Without Response',
451: 'Unavailable For Legal Reasons',
499: 'Client Closed Request',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
506: 'Variant Also Negotiates',
507: 'Insufficient Storage',
508: 'Loop Detected',
510: 'Not Extended',
511: 'Network Authentication Required',
599: 'Network Connect Timeout Error'
}
def __init__(self, app):
self.app = app
self.settings = QSettings("Region Västerbotten", "getmod")
self.thread = QThread()
self.timer = QTimer()
self.timer.timeout.connect(self.check_flask_status)
self.tray = QSystemTrayIcon(QIcon(":/icons/feather/life-buoy.svg"), self.app)
self.menu = QMenu()
self.menu.setMinimumWidth(80)
self.menu.setContentsMargins(10, 2, 2, 2)
header = IconLabel("- GetMod " + __version__ + " -")
header.setStyleSheet("margin-left: 0px; margin-top: 0px; margin-bottom: 5px")
header_action = QWidgetAction(self.menu)
header_action.setDefaultWidget(header)
self.menu.addAction(header_action)
self.action_onoff = ButtonLabel()
action_onoff = QWidgetAction(self.menu)
action_onoff.setDefaultWidget(self.action_onoff)
self.menu.addAction(action_onoff)
self.action_onoff.button.clicked.connect(self.onoff_clicked)
self.submenu_settings = self.menu.addMenu("Settings")
self.submenu_settings.setMaximumWidth(200)
listen = LabelEdit("Listen port", "listen_port", self.settings)
listen_action = QWidgetAction(self.submenu_settings)
listen_action.setDefaultWidget(listen)
self.submenu_settings.addAction(listen_action)
target = LabelEdit("Target port", "target_port", self.settings)
target_action = QWidgetAction(self.submenu_settings)
target_action.setDefaultWidget(target)
self.submenu_settings.addAction(target_action)
apikey = LabelEdit("API key", "apikey", self.settings)
apikey_action = QWidgetAction(self.submenu_settings)
apikey_action.setDefaultWidget(apikey)
self.submenu_settings.addAction(apikey_action)
instutution = LabelEdit("Institution", "institution", self.settings)
instutution_action = QWidgetAction(self.submenu_settings)
instutution_action.setDefaultWidget(instutution)
self.submenu_settings.addAction(instutution_action)
self.action_exit = QAction("Exit")
self.action_exit.triggered.connect(self.exit)
self.menu.addAction(self.action_exit)
self.tray.setToolTip("GetMod - get request modifier")
self.tray.setContextMenu(self.menu)
self.tray.setVisible(True)
self.tray.show()
self.app.setStyleSheet(qdarktheme.load_stylesheet())
sys.exit(self.app.exec())
def exit(self):
self.thread.terminate()
self.thread.wait()
self.tray.hide()
self.app.exit()
def check_flask_status(self):
if not self.thread.isRunning():
self.action_onoff.setChecked(False)
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowIcon(QIcon(":/icons/feather/life-buoy.svg"))
msgBox.setWindowTitle("Critical Error")
msgBox.setText("Houston, the flask server did not start!")
msgBox.exec()
def onoff_clicked(self):
if self.action_onoff.button.isChecked():
self.start_flask()
else:
self.thread.terminate()
print("Flask off!")
def start_flask(self):
apikey = self.settings.value('apikey')
institution = self.settings.value('institution')
listen_port = self.settings.value('listen_port')
target_port = self.settings.value('target_port')
flask_app = create_flask_app(apikey, institution, target_port, self.status_desc)
self.thread = FlaskThread(flask_app, listen_port)
self.thread.start()
self.timer.singleShot(1000, self.check_flask_status)
# @staticmethod
# def create_flask_app(apikey, institution, relay_port, status_desc):
#
# site_relay = "http://localhost:" + str(relay_port)
#
# def args2str(args):
# dlist = list()
# for key, value in args.items():
# _str = f"{key}={value}"
# dlist.append(_str)
#
# return "&".join(dlist)
#
# flask_app = Flask(__name__)
#
# # @flask_app.route('/', defaults={'path': ''})
# @flask_app.route('/<path:path>', methods=['GET'])
# def proxy(path):
#
# def get_mod_path(request: flask.request, apikey, institution):
# args = request.args.to_dict()
# request_path = request.path
# outdata = {}
# outdata['apikey'] = apikey
# outdata['institution'] = institution
#
# for key in args:
# value = args[key]
# if key == "request":
# if value.startswith('BAM<'):
# new_key1 = "path"
# new_value1 = "file:///" + value.lstrip('BAM<')
# new_key2 = "filetype"
# new_value2 = "bam"
#
# outdata[new_key1] = new_value1.replace('\\', "/")
# outdata[new_key2] = new_value2
# request_path = "open"
# else:
# outdata[key] = value
# request_path = "search"
#
# return request_path, args2str(outdata)
#
# def error_response(e, site_relay, new_path):
# return f"<html><head></head><body><h1>Communication error!</h1>" \
# f"<p>Exception msg: {e}</p>" \
# f"<p>Target (host:port): {site_relay}</p>" \
# f"<p>Get request: {new_path}</p>" \
# f"</body></html>"
#
# if request.method == "GET" and request.path != "/favicon.ico":
#
# req_path, argstr = get_mod_path(request, apikey, institution)
#
# encoded_argstr = parse.quote(argstr, safe='&=')
#
# encoded_request = f'{site_relay}/{req_path}?{encoded_argstr}'
#
# print(encoded_request)
# print(argstr)
#
# try:
# ret = requests.get(encoded_request, timeout=10)
#
# status = int(ret.status_code)
#
# if status in range(200, 300):
# header = "Success!"
# else:
# header = "Problem!"
#
# return f"<html><head></head><body><h1>{header}</h1>" \
# f"<p>Target status code: {ret.status_code} {status_desc[status]}</p>" \
# f"<p>Target (host:port): {site_relay}</p>" \
# f"<p>Get request: {encoded_argstr}</p>" \
# "</body></html>"
#
# except requests.exceptions.HTTPError as errh:
# e = "Http Error: " + str(errh)
# return error_response(e, site_relay, encoded_argstr)
#
# except requests.exceptions.ConnectionError as errc:
# e = "Error Connecting: " + str(errc)
# return error_response(e, site_relay, encoded_argstr)
#
# except requests.exceptions.Timeout as errt:
# e = "Error Connecting: " + str(errt)
# return error_response(e, site_relay, encoded_argstr)
#
# except requests.exceptions.RequestException as err:
# e = "Error Connecting: " + str(err)
# return error_response(e, site_relay, encoded_argstr)
#
# return f"<html><head></head><body><h1>Something's wrong!</h1>" \
# f"<p>No errors detected but no valid response from target either ... </p>" \
# f"</body></html>"
#
# return flask_app
if __name__ == "__main__":
app = QApplication(sys.argv)
tray = SysTrayApp(app)
sys.exit(app.exec())
| gmc-norr/getmod | getmod.py | getmod.py | py | 13,431 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PySide6.QtWidgets.QWidget",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PySide6.QtWidgets.QHBoxLayout",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets.QLabel",
"line_number": 27,
"usage_type": "call"
},
{
"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.