seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
23090428642 | # Created by Louis LAC 2019
from lxml.etree import Element, SubElement, tostring
from datetime import date
from pathlib import Path
class XMLTree:
def __init__(self, image_path, width, height, user_name="bipbip", date=date.today()):
self.plant_count = 0
self.image_path = Path(image_path)
self.tree = Element("GEDI")
dl_document = SubElement(self.tree, "DL_DOCUMENT")
user = SubElement(self.tree, "USER")
user.attrib["name"] = user_name
user.attrib["date"] = str(date)
dl_document.attrib["src"] = self.image_path.name
dl_document.attrib["docTag"] = "xml"
dl_document.attrib["width"] = str(width)
dl_document.attrib["height"] = str(height)
def add_mask(self, name, crop_type="PlanteInteret"):
dl_document = self.tree.find("DL_DOCUMENT")
mask = SubElement(dl_document, "MASQUE_ZONE")
mask.attrib["id"] = str(self.plant_count)
mask.attrib["type"] = crop_type
mask.attrib["name"] = str(name)
self.plant_count += 1
def save(self, save_dir=""):
path = Path(save_dir) / self.image_path.with_suffix(".xml").name
path.write_text(tostring(self.tree, encoding='unicode', pretty_print=True))
| laclouis5/darknet2 | my_xml_toolbox.py | my_xml_toolbox.py | py | 1,254 | python | en | code | 1 | github-code | 50 |
15078139825 | from setuptools import setup, find_packages
name = 'omark'
__version__ = None
with open('{:s}/__init__.py'.format(name), 'rt') as fp:
for line in fp:
if line.startswith('__version__'):
exec(line.rstrip())
with open("README.md", "rt") as fh:
readme = fh.read()
requirements = ['biopython', 'ete3', 'omamer>=0.2.2', 'matplotlib', 'jinja2']
desc = 'OMArk - Proteome quality assesment based on OMAmer placements'
setup(
name=name,
version=__version__,
author='Yannis Nevers',
email='yannis.nevers@unil.ch',
url='https://github.com/DessimozLab/omark',
description=desc,
long_description=readme,
long_description_content_type="text/markdown",
packages=find_packages(),
package_data={'omark':['assets/*.txt']},
include_package_data=True,
install_requires=requirements,
python_requires=">=3.9",
scripts=['bin/omark'])
| DessimozLab/OMArk | setup.py | setup.py | py | 901 | python | en | code | 18 | github-code | 50 |
17836444378 | ##
# @file 002.py
# @brief Finds sum of even Fibonacci numbers
# @author Deeno Burgan
# @version 1
# @date 2016-06-28
def sumEvenFibonacci(aLimit):
lLast = 1
lCurrent = 1
lSum = 0
while lCurrent <= aLimit:
lTemp = lLast
lLast = lCurrent
lCurrent += lTemp
if( (lCurrent % 2) == 0 ):
lSum += lCurrent
return lSum
if __name__ == "__main__":
print("Sum of even Fibonacci numbers less than or equal to 4000000: ", sumEvenFibonacci(4000000))
| DrakeThane/misc-challenge-solutions | ProjectEuler/Python/002.py | 002.py | py | 511 | python | en | code | 0 | github-code | 50 |
71936543835 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 31 00:02:50 2017
@author: apple
"""
"""
Given a singly linked list, determine if it is a palindrome.
Follow up:
Could you do it in O(n) time and O(1) space?
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head or not head.next:
return True
slow=head
fast=head.next
while fast and fast.next:
slow=slow.next
fast=fast.next.next
cur=slow.next
pre=None
#reverse the second half
while cur:
tmp=cur.next
cur.next=pre
pre=cur
cur=tmp
#compare values from two halves
tail=pre
while head and tail:
if head.val!=tail.val:
return False
head=head.next
tail=tail.next
return True
| cgxabc/Online-Judge-Programming-Exercise | Leetcode/PalindromeLinkedList.py | PalindromeLinkedList.py | py | 1,393 | python | en | code | 0 | github-code | 50 |
10297880784 | #!/usr/local/bin/python3
# Python Challenge - 19
# http://www.pythonchallenge.com/pc/hex/bin.html
# Username: butter; Password: fly
# Keyword: sorry, idiot
import base64
import wave
import audioop
def main():
'''
Hint: please! Photo is a map of India
<!--
From: leopold.moz@pythonchallenge.com
Subject: what do you mean by "open the attachment?"
Mime-version: 1.0
Content-type: Multipart/mixed; boundary="===============1295515792=="
It is so much easier for you, youngsters.
Maybe my computer is out of order.
I have a real work to do and I must know what's inside!
--===============1295515792==
Content-type: audio/x-wav; name="indian.wav"
Content-transfer-encoding: base64
<moved to indian.wav>
--===============1295515792==--
-->
sorry.html (www.pythonchallenge.com/pc/hex/.html) ->
-"what are you apologizing for?"
'''
# Decode base64 encoding, write to a wav file?
with open('./indian_chall_19/indian.txt', 'r') as rawtext:
data = rawtext.read()
decoded_bytes = base64.b64decode(data)
temp = open('./indian_chall_19/temp.wav', 'wb')
temp.write(decoded_bytes)
temp.close()
temp_wav = wave.open('./indian_chall_19/temp.wav', 'rb')
indian = wave.open('./indian_chall_19/indian.wav', 'wb')
bigindian = wave.open('./indian_chall_19/bigindian.wav', 'wb')
nchannels, sampwidth, framerate, nframes, comptype, compname = \
temp_wav.getparams()
print('Number audio channels (1 mono, 2 stereo): {}\nSample width in bits:'
' {}\nSample freq (frame rate): {}\nNumber audio frames: {}\n'
'Compression type (NONE is only supported type): {}\nHuman-readable '
'compression type: {}'.format(nchannels, sampwidth, framerate,
nframes, comptype, compname))
frames = temp_wav.readframes(nframes)
frames_swap = audioop.byteswap(frames, sampwidth)
indian.setparams(
(nchannels, sampwidth, framerate, nframes, comptype, compname))
bigindian.setparams(
(nchannels, sampwidth, framerate, nframes, comptype, compname))
indian.writeframes(frames) # little endian -> 'sorry'
bigindian.writeframes(frames_swap)
# big endian -> 'you are an idiot, ah ah ah ah ah ah ah'
temp_wav.close()
indian.close()
bigindian.close()
return 0
if __name__ == '__main__':
main()
| HKuz/PythonChallenge | Challenges/chall_19.py | chall_19.py | py | 2,447 | python | en | code | 0 | github-code | 50 |
6566014115 | from typing import Any, Dict
from django.db.models.query import QuerySet
from django.shortcuts import render, redirect
from django.views.generic import UpdateView, DetailView, CreateView, DeleteView, ListView
from django.views.generic.edit import UpdateView, DeleteView
from rest_framework.views import APIView
from django.urls import reverse, reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.viewsets import ModelViewSet
from django.contrib.auth.models import User
# from .models import Newblog
from django.http import JsonResponse
from .models import *
from .forms import *
from .serializer import *
from django.db.models import Q
from django.shortcuts import render, redirect, get_object_or_404
from .forms import EditCommentForm
from .models import Comment
# Create your views here.
menu = [
{'title': 'Decode Blog', 'url': 'decode_blog:home'},
]
# Переделанное
def search_action(request):
search_post = request.GET.get('search')
if search_post:
posts = NewBlog.objects.filter(Q(name__icontains=search_post) | Q(description__icontains=search_post))
else:
posts = NewBlog.objects.all()
return render(request, "authe/profile.html", {
'posts': posts,
'menu': menu,
})
def search_h(request):
search_p = request.GET.get('search_h')
if search_p:
pos = NewBlog.objects.filter(Q(name__icontains=search_p) | Q(description__icontains=search_p))
else:
pos = NewBlog.objects.all()
return render(request, 'decode_blog/home.html', {
'newblogs': pos,
})
def home(request):
newblogs = NewBlog.objects.all()
categories = Category.objects.all()
data = {
'menu': menu,
'title': 'Главная страница',
'newblogs': newblogs,
'categories': categories,
}
return render(request, 'decode_blog/home.html', context=data)
class DecodeAddBlog(CreateView):
form_class = AddBlogForm
template_name = 'decode_blog/newblog.html'
success_url = reverse_lazy('authe:profile')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Новый продукт'
context['menu'] = menu
return context
class EditBlog(UpdateView):
model = NewBlog
form_class = AddBlogForm
template_name = 'decode_blog/edit_blog.html'
success_url = reverse_lazy('decode_blog:home')
def get_object(self):
blog_id = self.kwargs['blog_id']
return NewBlog.objects.get(pk=blog_id)
def form_valid(self, form):
form.save()
return super().form_valid(form)
def delete_blog(request, blog_id):
try:
blog = get_object_or_404(NewBlog, pk=blog_id)
blog.delete()
return redirect('decode_blog:home')
except NewBlog.DoesNotExist:
return JsonResponse({'success': False, 'error': 'Blog not found'})
class BlogDetail(DetailView):
model = NewBlog
template_name = 'decode_blog/comment.html'
pk_url_kwarg = 'blog_id'
context_object_name = 'newblog'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Обзор блога'
context['comments'] = Comment.objects.filter(blog_id=self.kwargs['blog_id'])
context['categories'] = Category.objects.all()
context['menu'] = menu
return context
class BlogViewSet(ModelViewSet):
queryset = NewBlog.objects.all()
serializer_class = BlogSerializer
@action(methods=['get'], detail=False) # ДЛя блогов #
def blogs(self, request):
blogs = NewBlog.objects.all()
return Response([blog.name for blog in blogs])
@action(methods=['get'], detail=False)
def genre_filter(self, request):
categ = NewBlog.objects.filter(categ_id=self.request.query_params.get('categ_id')) # Для категорий #
serializer = BlogSerializer(NewBlog, many=True)
return Response(serializer.data)
def delete(self, request, *args, **kwargs):
pk = kwargs.get('pk', None)
if not pk:
return Response({'error': 'Method "DELETE" not allowed.'})
try:
instance = NewBlog.objects.get(pk=pk)
instance.delete()
except:
return Response({'error': 'Object does not exists.'})
return Response({'status': 'Blog was deleted.'})
class AddComment(LoginRequiredMixin, CreateView):
form_class = CommentForm
template_name = 'decode_blog/Add-comment.html'
success_url = reverse_lazy('decode_blog:home') # Переход после создания продукта #
def form_valid(self, form):
form.instance.user = self.request.user # Привязать комментарий к текущему пользователю #
blog_id = self.kwargs.get('blog_id')
if blog_id: # Связка комментария с определенным блогом #
form.instance.blog_id = blog_id
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Новый комментарий'
context['menu'] = menu
return context
class CategoriesBlog(ListView):
model = NewBlog
template_name = 'decode_blog/home.html'
context_object_name = 'categories'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Передача контекстных данных #
context['title'] = 'Категории'
context['menu'] = menu
context['categories'] = Category.objects.all()
return context
def site_category(request, category_id):
newblogs = NewBlog.objects.filter(category_id=category_id)
categories = Category.objects.all()
data = {
'newblogs': newblogs,
'categories':categories,
'menu':menu,
'title':'Статьи',
'category_id':category_id
}
return render(request, 'decode_blog/home.html', context=data)
# class ShowComment(DetailView):
# model = Comment # Изменено на модель Comment, чтобы отображать комментарии
# template_name = 'decode_blog/comment.html'
# pk_url_kwarg = 'comment_id'
# def get_queryset(self):
# queryset = super().get_queryset()
# if not self.request.user.is_authenticated:
# return queryset.none()
# return queryset.filter(blog_id=self.blog_id)
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context['title'] = 'Обзор комментариев'
# context['blog'] = NewBlog.objects.get(id=self.blog_id)
# context['menu'] = menu
# return context
# class CategoriesBlog(ListView):
# model = NewBlog
# template_name = 'decode_blog/home.html'
# context_object_name = 'categories'
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context['title'] = 'Категории'
# context['menu'] = menu
# context['categories'] = Category.objects.all()
# return context
class CreateComment(APIView):
def post(self, request):
message_text = request.data['comment-text']
blog_id = request.data['blog_id']
newblog = NewBlog.objects.get(id=blog_id)
user = User.objects.get(id=request.user.id)
new_comment = Comment.objects.create(text=message_text, blog=newblog, user=user)
new_comment.save()
return redirect('decode_blog:comments-blog', blog_id)
def edit_comment(request, comment_id):
comment = get_object_or_404(Comment, id=comment_id)
if request.method == 'POST':
form = EditCommentForm(request.POST, instance=comment)
if form.is_valid():
form.save()
return redirect('decode_blog:comments-blog', blog_id=comment.blog.id)
else:
form = EditCommentForm(instance=comment)
return render(request, 'decode_blog/edit_comment.html', {'form': form, 'comment': comment})
| Abylai-Yessim/decodeblog | decode_blog/views.py | views.py | py | 8,507 | python | en | code | 1 | github-code | 50 |
22923828907 | # !/usr/bin/python3
# coding:utf-8
# author:panli
import xlutils
import xlrd
import os
from xlutils.copy import copy
def base_dir(filename=None):
return os.path.join(os.path.dirname(__file__), filename)
work = xlrd.open_workbook(base_dir('api.xls'))
sheet = work.sheet_by_index(0)
print(sheet.nrows)
print(sheet.cell_value(4, 2))
old_content = copy(work)
ws = old_content.get_sheet(0)
ws.write(3, 2, '哈哈哈哈')
old_content.save(base_dir('excel.xls'))
| 17621606077pl/Test_Api | script/Excel表格的数据读取/Test_Red_excel.py | Test_Red_excel.py | py | 464 | python | en | code | 0 | github-code | 50 |
34655029164 | _author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/split-a-string-in-balanced-strings/
# Balanced strings are those who have equal quantity of 'L' and 'R' characters.
# Given a balanced string s split it in the maximum amount of balanced strings.
# Return the maximum amount of splitted balanced strings.
# Keep a running net balance of "L" - "R" when iterating over s.
# Update the result whenever the balance is zero.
# Time - O(n)
# Space - O(1)
class Solution(object):
def balancedStringSplit(self, s):
"""
:type s: str
:rtype: int
"""
balance = 0
result = 0
for c in s:
balance += 1 if c == "L" else -1
if balance == 0:
result += 1
return result
| jakehoare/leetcode | python_1001_to_2000/1221_Split_a_String_in_Balanced_Strings.py | 1221_Split_a_String_in_Balanced_Strings.py | py | 786 | python | en | code | 49 | github-code | 50 |
38390782722 | # function to print alternate uppaer and lowercase letters
def myfunc(a):
b = ''
c = len(a)
for x in range(0, c):
if x % 2 == 0:
b += a[x].upper()
else:
b += a[x].lower()
return b
print(myfunc("shivank"))
# map & filter functions
def checkeven(n):
if n % 2 == 0:
return True
mylist = [1, 2, 3, 4, 5, 6]
print(list(map(checkeven, mylist)))
print(list(filter(checkeven, mylist)))
# lambda function
# for checkeven function
lambda num: num % 2 == 0
print(list(filter(lambda num: num % 2 == 0, mylist)))
| shivankgoyal790/DailyTasks | practice/prac2.py | prac2.py | py | 581 | python | en | code | 0 | github-code | 50 |
31040661772 | import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from functools import partial
from rdkit import Chem
from torch.distributions import Categorical
__all__ = ['DGMG']
class MoleculeEnv(object):
"""MDP environment for generating molecules.
Parameters
----------
atom_types : list
E.g. ['C', 'N']
bond_types : list
E.g. [Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC]
"""
def __init__(self, atom_types, bond_types):
super(MoleculeEnv, self).__init__()
self.atom_types = atom_types
self.bond_types = bond_types
self.atom_type_to_id = dict()
self.bond_type_to_id = dict()
for id, a_type in enumerate(atom_types):
self.atom_type_to_id[a_type] = id
for id, b_type in enumerate(bond_types):
self.bond_type_to_id[b_type] = id
def get_decision_sequence(self, mol, atom_order):
"""Extract a decision sequence with which DGMG can generate the
molecule with a specified atom order.
Parameters
----------
mol : Chem.rdchem.Mol
atom_order : list
Specifies a mapping between the original atom
indices and the new atom indices. In particular,
atom_order[i] is re-labeled as i.
Returns
-------
decisions : list
decisions[i] is a 2-tuple (i, j)
- If i = 0, j specifies either the type of the atom to add
self.atom_types[j] or termination with j = len(self.atom_types)
- If i = 1, j specifies either the type of the bond to add
self.bond_types[j] or termination with j = len(self.bond_types)
- If i = 2, j specifies the destination atom id for the bond to add.
With the formulation of DGMG, j must be created before the decision.
"""
decisions = []
old2new = dict()
for new_id, old_id in enumerate(atom_order):
atom = mol.GetAtomWithIdx(old_id)
a_type = atom.GetSymbol()
decisions.append((0, self.atom_type_to_id[a_type]))
for bond in atom.GetBonds():
u = bond.GetBeginAtomIdx()
v = bond.GetEndAtomIdx()
if v == old_id:
u, v = v, u
if v in old2new:
decisions.append((1, self.bond_type_to_id[bond.GetBondType()]))
decisions.append((2, old2new[v]))
decisions.append((1, len(self.bond_types)))
old2new[old_id] = new_id
decisions.append((0, len(self.atom_types)))
return decisions
def reset(self, rdkit_mol=False):
"""Setup for generating a new molecule
Parameters
----------
rdkit_mol : bool
Whether to keep a Chem.rdchem.Mol object so
that we know what molecule is being generated
"""
self.dgl_graph = dgl.graph(([], []), idtype=torch.int32)
# If there are some features for nodes and edges,
# zero tensors will be set for those of new nodes and edges.
self.dgl_graph.set_n_initializer(dgl.frame.zero_initializer)
self.dgl_graph.set_e_initializer(dgl.frame.zero_initializer)
self.mol = None
if rdkit_mol:
# RWMol is a molecule class that is intended to be edited.
self.mol = Chem.RWMol(Chem.MolFromSmiles(''))
def num_atoms(self):
"""Get the number of atoms for the current molecule.
Returns
-------
int
"""
return self.dgl_graph.num_nodes()
def add_atom(self, type):
"""Add an atom of the specified type.
Parameters
----------
type : int
Should be in the range of [0, len(self.atom_types) - 1]
"""
self.dgl_graph.add_nodes(1)
if self.mol is not None:
self.mol.AddAtom(Chem.Atom(self.atom_types[type]))
def add_bond(self, u, v, type, bi_direction=True):
"""Add a bond of the specified type between atom u and v.
Parameters
----------
u : int
Index for the first atom
v : int
Index for the second atom
type : int
Index for the bond type
bi_direction : bool
Whether to add edges for both directions in the DGLGraph.
If not, we will only add the edge (u, v).
"""
if bi_direction:
self.dgl_graph.add_edges([u, v], [v, u])
else:
self.dgl_graph.add_edge(u, v)
if self.mol is not None:
self.mol.AddBond(u, v, self.bond_types[type])
def get_current_smiles(self):
"""Get the generated molecule in SMILES
Returns
-------
s : str
SMILES
"""
assert self.mol is not None, 'Expect a Chem.rdchem.Mol object initialized.'
s = Chem.MolToSmiles(self.mol)
return s
class GraphEmbed(nn.Module):
"""Compute a molecule representations out of atom representations.
Parameters
----------
node_hidden_size : int
Size of atom representation
"""
def __init__(self, node_hidden_size):
super(GraphEmbed, self).__init__()
# Setting from the paper
self.graph_hidden_size = 2 * node_hidden_size
# Embed graphs
self.node_gating = nn.Sequential(
nn.Linear(node_hidden_size, 1),
nn.Sigmoid()
)
self.node_to_graph = nn.Linear(node_hidden_size,
self.graph_hidden_size)
def forward(self, g):
"""
Parameters
----------
g : DGLGraph
Current molecule graph
Returns
-------
tensor of dtype float32 and shape (1, self.graph_hidden_size)
Computed representation for the current molecule graph
"""
if g.num_nodes() == 0:
# Use a zero tensor for an empty molecule.
return torch.zeros(1, self.graph_hidden_size)
else:
# Node features are stored as hv in ndata.
hvs = g.ndata['hv']
return (self.node_gating(hvs) *
self.node_to_graph(hvs)).sum(0, keepdim=True)
class GraphProp(nn.Module):
"""Perform message passing over a molecule graph and update its atom representations.
Parameters
----------
num_prop_rounds : int
Number of message passing rounds for each time
node_hidden_size : int
Size of atom representation
edge_hidden_size : int
Size of bond representation
"""
def __init__(self, num_prop_rounds, node_hidden_size, edge_hidden_size):
super(GraphProp, self).__init__()
self.num_prop_rounds = num_prop_rounds
# Setting from the paper
self.node_activation_hidden_size = 2 * node_hidden_size
message_funcs = []
self.reduce_funcs = []
node_update_funcs = []
for t in range(num_prop_rounds):
# input being [hv, hu, xuv]
message_funcs.append(nn.Linear(2 * node_hidden_size + edge_hidden_size,
self.node_activation_hidden_size))
self.reduce_funcs.append(partial(self.dgmg_reduce, round=t))
node_update_funcs.append(
nn.GRUCell(self.node_activation_hidden_size,
node_hidden_size))
self.message_funcs = nn.ModuleList(message_funcs)
self.node_update_funcs = nn.ModuleList(node_update_funcs)
def dgmg_msg(self, edges):
"""For an edge u->v, send a message concat([h_u, x_uv])
Parameters
----------
edges : batch of edges
Returns
-------
dict
Dictionary containing messages for the edge batch,
with the messages being tensors of shape (B, F1),
B for the number of edges and F1 for the message size.
"""
return {'m': torch.cat([edges.src['hv'],
edges.data['he']],
dim=1)}
def dgmg_reduce(self, nodes, round):
"""Aggregate messages.
Parameters
----------
nodes : batch of nodes
round : int
Update round
Returns
-------
dict
Dictionary containing aggregated messages for each node
in the batch, with the messages being tensors of shape
(B, F2), B for the number of nodes and F2 for the aggregated
message size
"""
hv_old = nodes.data['hv']
m = nodes.mailbox['m']
# Make copies of original atom representations to match the
# number of messages.
message = torch.cat([
hv_old.unsqueeze(1).expand(-1, m.size(1), -1), m], dim=2)
node_activation = (self.message_funcs[round](message)).sum(1)
return {'a': node_activation}
def forward(self, g):
"""
Parameters
----------
g : DGLGraph
"""
if g.num_edges() == 0:
return
else:
for t in range(self.num_prop_rounds):
g.update_all(message_func=self.dgmg_msg,
reduce_func=self.reduce_funcs[t])
g.ndata['hv'] = self.node_update_funcs[t](
g.ndata['a'], g.ndata['hv'])
class AddNode(nn.Module):
"""Stop or add an atom of a particular type.
Parameters
----------
env : MoleculeEnv
Environment for generating molecules
graph_embed_func : callable taking g as input
Function for computing molecule representation
node_hidden_size : int
Size of atom representation
dropout : float
Probability for dropout
"""
def __init__(self, env, graph_embed_func, node_hidden_size, dropout):
super(AddNode, self).__init__()
self.env = env
n_node_types = len(env.atom_types)
self.graph_op = {'embed': graph_embed_func}
self.stop = n_node_types
self.add_node = nn.Sequential(
nn.Linear(graph_embed_func.graph_hidden_size, graph_embed_func.graph_hidden_size),
nn.Dropout(p=dropout),
nn.Linear(graph_embed_func.graph_hidden_size, n_node_types + 1)
)
# If to add a node, initialize its hv
self.node_type_embed = nn.Embedding(n_node_types, node_hidden_size)
self.initialize_hv = nn.Linear(node_hidden_size + \
graph_embed_func.graph_hidden_size,
node_hidden_size)
self.init_node_activation = torch.zeros(1, 2 * node_hidden_size)
self.dropout = nn.Dropout(p=dropout)
def _initialize_node_repr(self, g, node_type, graph_embed):
"""Initialize atom representation
Parameters
----------
g : DGLGraph
node_type : int
Index for the type of the new atom
graph_embed : tensor of dtype float32
Molecule representation
"""
num_nodes = g.num_nodes()
hv_init = torch.cat([
self.node_type_embed(torch.LongTensor([node_type])),
graph_embed], dim=1)
hv_init = self.dropout(hv_init)
hv_init = self.initialize_hv(hv_init)
g.nodes[num_nodes - 1].data['hv'] = hv_init
g.nodes[num_nodes - 1].data['a'] = self.init_node_activation
def prepare_log_prob(self, compute_log_prob):
"""Setup for returning log likelihood
Parameters
----------
compute_log_prob : bool
Whether to compute log likelihood
"""
if compute_log_prob:
self.log_prob = []
self.compute_log_prob = compute_log_prob
def forward(self, action=None):
"""
Parameters
----------
action : None or int
If None, a new action will be sampled. If not None,
teacher forcing will be used to enforce the decision of the
corresponding action.
Returns
-------
stop : bool
Whether we stop adding new atoms
"""
g = self.env.dgl_graph
graph_embed = self.graph_op['embed'](g)
logits = self.add_node(graph_embed).view(1, -1)
probs = F.softmax(logits, dim=1)
if action is None:
action = Categorical(probs).sample().item()
stop = bool(action == self.stop)
if not stop:
self.env.add_atom(action)
self._initialize_node_repr(g, action, graph_embed)
if self.compute_log_prob:
sample_log_prob = F.log_softmax(logits, dim=1)[:, action: action + 1]
self.log_prob.append(sample_log_prob)
return stop
class AddEdge(nn.Module):
"""Stop or add a bond of a particular type.
Parameters
----------
env : MoleculeEnv
Environment for generating molecules
graph_embed_func : callable taking g as input
Function for computing molecule representation
node_hidden_size : int
Size of atom representation
dropout : float
Probability for dropout
"""
def __init__(self, env, graph_embed_func, node_hidden_size, dropout):
super(AddEdge, self).__init__()
self.env = env
n_bond_types = len(env.bond_types)
self.stop = n_bond_types
self.graph_op = {'embed': graph_embed_func}
self.add_edge = nn.Sequential(
nn.Linear(graph_embed_func.graph_hidden_size + node_hidden_size,
graph_embed_func.graph_hidden_size + node_hidden_size),
nn.Dropout(p=dropout),
nn.Linear(graph_embed_func.graph_hidden_size + node_hidden_size, n_bond_types + 1)
)
def prepare_log_prob(self, compute_log_prob):
"""Setup for returning log likelihood
Parameters
----------
compute_log_prob : bool
Whether to compute log likelihood
"""
if compute_log_prob:
self.log_prob = []
self.compute_log_prob = compute_log_prob
def forward(self, action=None):
"""
Parameters
----------
action : None or int
If None, a new action will be sampled. If not None,
teacher forcing will be used to enforce the decision of the
corresponding action.
Returns
-------
stop : bool
Whether we stop adding new bonds
action : int
The type for the new bond
"""
g = self.env.dgl_graph
graph_embed = self.graph_op['embed'](g)
src_embed = g.nodes[g.num_nodes() - 1].data['hv']
logits = self.add_edge(
torch.cat([graph_embed, src_embed], dim=1))
probs = F.softmax(logits, dim=1)
if action is None:
action = Categorical(probs).sample().item()
stop = bool(action == self.stop)
if self.compute_log_prob:
sample_log_prob = F.log_softmax(logits, dim=1)[:, action: action + 1]
self.log_prob.append(sample_log_prob)
return stop, action
class ChooseDestAndUpdate(nn.Module):
"""Choose the atom to connect for the new bond.
Parameters
----------
env : MoleculeEnv
Environment for generating molecules
graph_prop_func : callable taking g as input
Function for performing message passing
and updating atom representations
node_hidden_size : int
Size of atom representation
dropout : float
Probability for dropout
"""
def __init__(self, env, graph_prop_func, node_hidden_size, dropout):
super(ChooseDestAndUpdate, self).__init__()
self.env = env
n_bond_types = len(self.env.bond_types)
# To be used for one-hot encoding of bond type
self.bond_embedding = torch.eye(n_bond_types)
self.graph_op = {'prop': graph_prop_func}
self.choose_dest = nn.Sequential(
nn.Linear(2 * node_hidden_size + n_bond_types, 2 * node_hidden_size + n_bond_types),
nn.Dropout(p=dropout),
nn.Linear(2 * node_hidden_size + n_bond_types, 1)
)
def _initialize_edge_repr(self, g, src_list, dest_list, edge_embed):
"""Initialize bond representation
Parameters
----------
g : DGLGraph
src_list : list of int
source atoms for new bonds
dest_list : list of int
destination atoms for new bonds
edge_embed : 2D tensor of dtype float32
Embeddings for the new bonds
"""
g.edges[src_list, dest_list].data['he'] = edge_embed.expand(len(src_list), -1)
def prepare_log_prob(self, compute_log_prob):
"""Setup for returning log likelihood
Parameters
----------
compute_log_prob : bool
Whether to compute log likelihood
"""
if compute_log_prob:
self.log_prob = []
self.compute_log_prob = compute_log_prob
def forward(self, bond_type, dest):
"""
Parameters
----------
bond_type : int
The type for the new bond
dest : int or None
If None, a new action will be sampled. If not None,
teacher forcing will be used to enforce the decision of the
corresponding action.
"""
g = self.env.dgl_graph
src = g.num_nodes() - 1
possible_dests = range(src)
src_embed_expand = g.nodes[src].data['hv'].expand(src, -1)
possible_dests_embed = g.nodes[possible_dests].data['hv']
edge_embed = self.bond_embedding[bond_type: bond_type + 1]
dests_scores = self.choose_dest(
torch.cat([possible_dests_embed,
src_embed_expand,
edge_embed.expand(src, -1)], dim=1)).view(1, -1)
dests_probs = F.softmax(dests_scores, dim=1)
if dest is None:
dest = Categorical(dests_probs).sample().item()
if not g.has_edges_between(src, dest):
# For undirected graphs, we add edges for both directions
# so that we can perform graph propagation.
src_list = [src, dest]
dest_list = [dest, src]
self.env.add_bond(src, dest, bond_type)
self._initialize_edge_repr(g, src_list, dest_list, edge_embed)
# Perform message passing when new bonds are added.
self.graph_op['prop'](g)
if self.compute_log_prob:
if dests_probs.nelement() > 1:
self.log_prob.append(
F.log_softmax(dests_scores, dim=1)[:, dest: dest + 1])
def weights_init(m):
'''Function to initialize weights for models
Code from https://gist.github.com/jeasinema/ed9236ce743c8efaf30fa2ff732749f5
Usage:
model = Model()
model.apply(weight_init)
'''
if isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
init.normal_(m.bias.data)
elif isinstance(m, nn.GRUCell):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
def dgmg_message_weight_init(m):
"""Weight initialization for graph propagation module
These are suggested by the author. This should only be used for
the message passing functions, i.e. fe's in the paper.
"""
def _weight_init(m):
if isinstance(m, nn.Linear):
init.normal_(m.weight.data, std=1./10)
init.normal_(m.bias.data, std=1./10)
else:
raise ValueError('Expected the input to be of type nn.Linear!')
if isinstance(m, nn.ModuleList):
for layer in m:
layer.apply(_weight_init)
else:
m.apply(_weight_init)
class DGMG(nn.Module):
"""DGMG model
`Learning Deep Generative Models of Graphs <https://arxiv.org/abs/1803.03324>`__
Users only need to initialize an instance of this class.
Parameters
----------
atom_types : list
E.g. ['C', 'N'].
bond_types : list
E.g. [Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC].
node_hidden_size : int
Size of atom representation. Default to 128.
num_prop_rounds : int
Number of message passing rounds for each time. Default to 2.
dropout : float
Probability for dropout. Default to 0.2.
"""
def __init__(self, atom_types, bond_types, node_hidden_size=128,
num_prop_rounds=2, dropout=0.2):
super(DGMG, self).__init__()
self.env = MoleculeEnv(atom_types, bond_types)
# Graph embedding module
self.graph_embed = GraphEmbed(node_hidden_size)
# Graph propagation module
# For one-hot encoding, edge_hidden_size is just the number of bond types
self.graph_prop = GraphProp(num_prop_rounds, node_hidden_size, len(self.env.bond_types))
# Actions
self.add_node_agent = AddNode(
self.env, self.graph_embed, node_hidden_size, dropout)
self.add_edge_agent = AddEdge(
self.env, self.graph_embed, node_hidden_size, dropout)
self.choose_dest_agent = ChooseDestAndUpdate(
self.env, self.graph_prop, node_hidden_size, dropout)
# Weight initialization
self.init_weights()
def init_weights(self):
"""Initialize model weights"""
self.graph_embed.apply(weights_init)
self.graph_prop.apply(weights_init)
self.add_node_agent.apply(weights_init)
self.add_edge_agent.apply(weights_init)
self.choose_dest_agent.apply(weights_init)
self.graph_prop.message_funcs.apply(dgmg_message_weight_init)
def count_step(self):
"""Increment the step by 1."""
self.step_count += 1
def prepare_log_prob(self, compute_log_prob):
"""Setup for returning log likelihood
Parameters
----------
compute_log_prob : bool
Whether to compute log likelihood
"""
self.compute_log_prob = compute_log_prob
self.add_node_agent.prepare_log_prob(compute_log_prob)
self.add_edge_agent.prepare_log_prob(compute_log_prob)
self.choose_dest_agent.prepare_log_prob(compute_log_prob)
def add_node_and_update(self, a=None):
"""Decide if to add a new atom.
If a new atom should be added, update the graph.
Parameters
----------
a : None or int
If None, a new action will be sampled. If not None,
teacher forcing will be used to enforce the decision of the
corresponding action.
"""
self.count_step()
return self.add_node_agent(a)
def add_edge_or_not(self, a=None):
"""Decide if to add a new bond.
Parameters
----------
a : None or int
If None, a new action will be sampled. If not None,
teacher forcing will be used to enforce the decision of the
corresponding action.
"""
self.count_step()
return self.add_edge_agent(a)
def choose_dest_and_update(self, bond_type, a=None):
"""Choose destination and connect it to the latest atom.
Add edges for both directions and update the graph.
Parameters
----------
bond_type : int
The type of the new bond to add
a : None or int
If None, a new action will be sampled. If not None,
teacher forcing will be used to enforce the decision of the
corresponding action.
"""
self.count_step()
self.choose_dest_agent(bond_type, a)
def get_log_prob(self):
"""Compute the log likelihood for the decision sequence,
typically corresponding to the generation of a molecule.
Returns
-------
torch.tensor consisting of a float only
"""
return torch.cat(self.add_node_agent.log_prob).sum()\
+ torch.cat(self.add_edge_agent.log_prob).sum()\
+ torch.cat(self.choose_dest_agent.log_prob).sum()
def teacher_forcing(self, actions):
"""Generate a molecule according to a sequence of actions.
Parameters
----------
actions : list of 2-tuples of int
actions[t] gives (i, j), the action to execute by DGMG at timestep t.
- If i = 0, j specifies either the type of the atom to add or termination
- If i = 1, j specifies either the type of the bond to add or termination
- If i = 2, j specifies the destination atom id for the bond to add.
With the formulation of DGMG, j must be created before the decision.
"""
stop_node = self.add_node_and_update(a=actions[self.step_count][1])
while not stop_node:
# A new atom was just added.
stop_edge, bond_type = self.add_edge_or_not(a=actions[self.step_count][1])
while not stop_edge:
# A new bond is to be added.
self.choose_dest_and_update(bond_type, a=actions[self.step_count][1])
stop_edge, bond_type = self.add_edge_or_not(a=actions[self.step_count][1])
stop_node = self.add_node_and_update(a=actions[self.step_count][1])
def rollout(self, max_num_steps):
"""Sample a molecule from the distribution learned by DGMG."""
stop_node = self.add_node_and_update()
while (not stop_node) and (self.step_count <= max_num_steps):
stop_edge, bond_type = self.add_edge_or_not()
if self.env.num_atoms() == 1:
stop_edge = True
while (not stop_edge) and (self.step_count <= max_num_steps):
self.choose_dest_and_update(bond_type)
stop_edge, bond_type = self.add_edge_or_not()
stop_node = self.add_node_and_update()
def forward(self, actions=None, rdkit_mol=False, compute_log_prob=False, max_num_steps=400):
"""
Parameters
----------
actions : list of 2-tuples or None.
If actions are not None, generate a molecule according to actions.
Otherwise, a molecule will be generated based on sampled actions.
rdkit_mol : bool
Whether to maintain a Chem.rdchem.Mol object. This brings extra
computational cost, but is necessary if we are interested in
learning the generated molecule.
compute_log_prob : bool
Whether to compute log likelihood
max_num_steps : int
Maximum number of steps allowed. This only comes into effect
during inference and prevents the model from not stopping.
Returns
-------
torch.tensor consisting of a float only, optional
The log likelihood for the actions taken
str, optional
The generated molecule in the form of SMILES
"""
# Initialize an empty molecule
self.step_count = 0
self.env.reset(rdkit_mol=rdkit_mol)
self.prepare_log_prob(compute_log_prob)
if actions is not None:
# A sequence of decisions is given, use teacher forcing
self.teacher_forcing(actions)
else:
# Sample a molecule from the distribution learned by DGMG
self.rollout(max_num_steps)
if compute_log_prob and rdkit_mol:
return self.get_log_prob(), self.env.get_current_smiles()
if compute_log_prob:
return self.get_log_prob()
if rdkit_mol:
return self.env.get_current_smiles()
| awslabs/dgl-lifesci | python/dgllife/model/model_zoo/dgmg.py | dgmg.py | py | 28,034 | python | en | code | 641 | github-code | 50 |
13210120428 | def _all_():
import os
import time
import sys
from colorama import Fore
from baner import baner
os.system("clear")
baner()
time.sleep(0.3)
print(Fore.YELLOW + "\t\t\t [" + Fore.GREEN + "1" + Fore.YELLOW + "]" + Fore.BLACK + " ~ "+Fore.CYAN + "Obtain Target Programming Plugins And Information")
time.sleep(0.3)
print(Fore.YELLOW + "\t\t\t [" + Fore.GREEN + "2" + Fore.YELLOW + "]" + Fore.BLACK + " ~ "+Fore.CYAN + "Obtain Sites On The Server")
time.sleep(0.5)
#################### GET NUMBER ####################
try:
number = int(input(Fore.LIGHTWHITE_EX + "\n[!] ~ Enter Your Number" + Fore.RED + " >>>" + Fore.GREEN + " "))
except:
time.sleep(1)
print(Fore.RED + "[-] Your Input Is Not Number !!!")
sys.exit()
if number == 1:
import builtwith
builtwith._all_()
# _Test_Fore_Back_The_Meno
if number == 2:
import site_on_server
site_on_server._all_()
# _Test_Fore_Back_The_Meno
else:
time.sleep(1)
print(Fore.RED + "[-] Your Number Is Not Found !!!")
sys.exit()
_all_()
| trabit373/info | web_tools.py | web_tools.py | py | 1,164 | python | en | code | 0 | github-code | 50 |
28832846886 | from pathlib import Path
from pyshacl import validate
import httpx
from config import *
def main():
# get the validator
r = httpx.get(
"https://raw.githubusercontent.com/surroundaustralia/ogcldapi-profile/master/validator.shacl.ttl",
follow_redirects=True,
)
assert r.status_code == 200
# for all datasets...
warning_datasets = {} # format {dataset_filename: warning_msg}
invalid_datasets = {} # format {dataset_filename: error_msg}
datasets_dir = Path(__file__).parent.parent / "data"
for f in datasets_dir.glob("**/*"):
# ...validate each file
if f.name.endswith(".ttl"):
try:
v = validate(str(f), shacl_graph=r.text, shacl_graph_format="ttl")
if not v[0]:
if "Severity: sh:Violation" in v[2]:
invalid_datasets[f.name] = v[2]
elif "Severity: sh:Warning" in v[2]:
warning_datasets[f.name] = v[2]
# syntax errors crash the validate() function
except Exception as e:
invalid_datasets[f.name] = e
# check to see if we have any invalid datasets
if len(warning_datasets.keys()) > 0 and SHOW_WARNINGS:
print("Warning datasets:\n")
for dataset, warning in warning_datasets.items():
print(f"- {dataset}:")
print(warning)
print("-----")
# check to see if we have any invalid datasets
if len(invalid_datasets.keys()) > 0:
print("Invalid datasets:\n")
for dataset, error in invalid_datasets.items():
print(f"- {dataset}:")
print(error)
print("-----")
if WARNINGS_INVALID:
assert len(warning_datasets.keys()) == 0, "Warning datasets: {}".format(
", ".join([str(x) for x in warning_datasets.keys()])
)
assert len(invalid_datasets.keys()) == 0, "Invalid datasets: {}".format(
", ".join([str(x) for x in invalid_datasets.keys()])
)
if __name__ == "__main__":
main()
| surroundaustralia/surround-prez-features | scripts/validate.py | validate.py | py | 2,078 | python | en | code | 0 | github-code | 50 |
39871151590 |
from __future__ import print_function
# Communication to TensorFlow server via gRPC
from grpc.beta import implementations
import tensorflow as tf
# TensorFlow serving stuff to send messages
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from utils import TweetMapper
import tensorflow as tf
tf.app.flags.DEFINE_string('server', 'localhost:9000',
'PredictionService host:port')
FLAGS = tf.app.flags.FLAGS
def main(_):
host, port = FLAGS.server.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
tweet_mapper = TweetMapper()
# Send request
data = tweet_mapper.vectorize(["Hoy fue un dia de mierda","Me encanta jugar en el parque"])
# See prediction_service.proto for gRPC request/response details.
request = predict_pb2.PredictRequest()
request.model_spec.name = 'sim'
request.inputs['tweets'].CopyFrom(
tf.contrib.util.make_tensor_proto(data))
result = stub.Predict(request, 10.0) # 10 secs timeout
print(result)
if __name__ == '__main__':
tf.app.run()
| si-m/fproject_serv | only_client.py | only_client.py | py | 1,175 | python | en | code | 0 | github-code | 50 |
1889731564 | import os
from Outlier import *
NUM_station = 164
NUM_peroid = 24
NUM_peroid_time = 24*60*60 / NUM_peroid
TravelingMatrix = list()
for O in range(0,NUM_station):
O_List = list()
for D in range(0,NUM_station):
D_List = list()
O_List.append(D_List)
TravelingMatrix.append(O_List)
Normal_Day_List = list()
inFile = open('C:/Users/SyuShengWei/Desktop/project/NormalDay.txt','r')
Data_List = inFile.readlines()
for DataLine in Data_List:
theLine = DataLine.strip('\n')
Normal_Day_List.append(theLine)
for filename in os.listdir('C:/Users/SyuShengWei/Desktop/project/RegularForm'):
the_day = filename.strip('.csv')
if the_day not in Normal_Day_List : continue
else:
os.chdir('C:/Users/SyuShengWei/Desktop/project/RegularForm')
inFile = open(filename,'r')
titleLine = inFile.readline()
Data_List = inFile.readlines()
for DataLine in Data_List:
theLine = DataLine.strip('\n')
Line_Info = theLine.split(',')
o_index = int(Line_Info[4])
d_index = int(Line_Info[5])
#peroid_time = int(Line_Info[1])
#peroid_index = int(peroid_time/NUM_peroid_time)
traveling_time = int(Line_Info[10])
TravelingMatrix[o_index][d_index].append(traveling_time)
AverageMatrix = list()
for O in range(0,NUM_station):
O_List = list()
for D in range(0,NUM_station):
D_List = 0
O_List.append(D_List)
AverageMatrix.append(O_List)
for O in range(0,NUM_station):
for D in range(0,NUM_station):
if len(TravelingMatrix[O][D]) == 0 :
continue
else :
Test_List = TravelingMatrix[O][D]
OutLierResult = outlier(Test_List,3)
total_value = 0
total_num = 0
for index in range(0,len(Test_List)):
if index not in OutLierResult[0] and index not in OutLierResult[1]:
total_value += Test_List[index]
total_num +=1
average_value = round(total_value / total_num,3)
AverageMatrix[O][D] = average_value
outFile = open('C:/Users/SyuShengWei/Desktop/project/Traveling_Time_Average.txt','a')
for O in range(0,NUM_station):
for D in range(0,NUM_station):
if AverageMatrix[O][D] == 0 : outFile.write('non')
else: outFile.write(str(AverageMatrix[O][D]))
if D != NUM_station -1 : outFile.write(' ')
else : outFile.write('\n') | shengwei-ClassNotes/YoubikeProject | Traveling_Time_Average.py | Traveling_Time_Average.py | py | 2,201 | python | en | code | 0 | github-code | 50 |
72617590236 | from tkinter import *
import tkinter.ttk as ttk
"""
This module holds only varibles needed for styling the main application and does not contain any code beyond varibles
colors are in Hex
red = "#cf2823"
light_grey="#f8f9fa"
mid_grey = "#3b3d3d"
dark_grey = "#292b2c"
green = "#19b019"
blue = "#0275d8"
-fg "#a6a6a6"
-bg "#464646"
-disabledbg "#2e2e2e"
-disabledfg "#999999"
-selectbg "#414141"
-selectfg "#a6a6a6"
-window "#373737"
-focuscolor "#bebebe"
-checklight "#e6e6e6"
"""
# background
bg_red = "#cf2823"
bg_light_grey="#f2f2f2"
bg_mid_grey = "#414141" #
bg_dark_grey = "#373737"
bg_green = "#19b019"
bg_blue = "#0275d8"
# foreground
fg_red = "#cf2823"
fg_light_grey="#f2f2f2"
fg_mid_grey = "#3b3d3d"
fg_dark_grey = "#373737"
fg_green = "#19b019"
fg_blue = "#0275d8"
# font family
font_family_= ""
font_family_= ""
# font size
font_size_5 = 5
font_size_10 = 10
font_size_25 = 25
# text position
txt_pos_east = E
txt_pos_west = W
txt_pos_center = CENTER
ttk_pos_nsew = "nsew"
nil_val = 0
num_val_5= 5
num_val_10= 10
num_val_15= 15
num_val_20= 20
num_val_25= 25 | DaveTanton/TheProject | SWACG_stylesheet.py | SWACG_stylesheet.py | py | 1,277 | python | en | code | 0 | github-code | 50 |
42717220130 | from transformers import AutoProcessor, FlaxWav2Vec2Model
from datasets import load_dataset
import soundfile as sf
processor = AutoProcessor.from_pretrained("facebook/wav2vec2-large-lv60")
model = FlaxWav2Vec2Model.from_pretrained("facebook/wav2vec2-large-lv60")
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
batch["speech"] = speech
return batch
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
ds = ds.map(map_to_array)
input_values = processor(
ds["speech"][0], sampling_rate=16_000, return_tensors="np"
).input_values # Batch size 1
hidden_states = model(input_values).last_hidden_state
print(hidden_states)
| nepyope/Stable-Giffusion | tests/test_wav2vec2.py | test_wav2vec2.py | py | 692 | python | en | code | 0 | github-code | 50 |
38731500890 | 'Getting Information from IMDb library '
import imdb
ia = imdb.IMDb()
name = 'the mummy'
search = ia.search_movie(name)
# print(search)
# for i in search:
# print(i)
the_mummy = ia.get_movie('0120616')
# for director in the_mummy['directors']:
# print(director['name'])
# print('Genre:')
# for genre in the_mummy['genres']:
# print(genre)
people = ia.search_person('Will Smith')
for person in people:
print(person.personID, person['name'])
| BekBrace/IMDB-LIBRARY-FOR-MOVIES---PYTHON | main.py | main.py | py | 460 | python | en | code | 3 | github-code | 50 |
37061971259 | #May 23 2020
#Python Basics CourseWork University of Miuchigan
#7.6. The Accumulator Pattern
#1.1 Write code to create a list of integers from 0 through 52 and assign that list to the variable numbers.
#You should use a special Python function – do not type out the whole list yourself. HINT: You can do this in one line of code!
numbers = range(0,53)
#You passed: 100.0% of the tests
#1.2Count the number of characters in string str1. Do not use len(). Save the number in variable numbs.
str1 = "I like nonsense, it wakes up the brain cells. Fantasy is a necessary ingredient in living."
numbs = 0
for string in str1:
numbs += 1
print(numbs)
#You passed: 100.0% of the tests
#1.3 Create a list of numbers 0 through 40 and assign this list to the variable numbers. Then, accumulate the total of the list’s values and assign that sum to the variable sum1.
numbers = list(range(0,41))
sum1 = 0
for s in numbers:
sum1 += s
print(sum1)
#You passed: 100.0% of the tests
#7.7. Traversal and the for Loop: By Index
fruits = ['apple', 'pear', 'apricot', 'cherry', 'peach']
for n in range(5):
print(n, fruits[n])
fruits = ['apple', 'pear', 'apricot', 'cherry', 'peach']
for n in range(len(fruits)):
print(n, fruits[n])
fruits = ['apple', 'pear', 'apricot', 'cherry', 'peach']
for fruit in fruits:
print(fruit)
#moreiter-6-1: How many times is the letter p printed by the following statements?
s = "python"
for idx in range(len(s)):
print(s[idx % 2])
#D. 3 ✔️ idx % 2 is 0 whenever idx is even
import luther.jpg
img = image.Image("luther.jpg")
print(img.getWidth())
print(img.getHeight())
p = img.getPixel(45, 55)
print(p.getRed(), p.getGreen(), p.getBlue())
| CoralieHelm/Introduction-To-Python-Basics-Course-University-of-Michigan | 7_6_Accumulator_Pattern.py | 7_6_Accumulator_Pattern.py | py | 1,710 | python | en | code | 1 | github-code | 50 |
9000778955 | import csv
import re
from bs4 import BeautifulSoup
import requests
url = 'http://anglicismdictionary.ru/Slovar'
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 YaBrowser/23.1.4.779 Yowser/2.5 Safari/537.36"
}
req = requests.get(url=url,
headers=headers
)
src = req.text
with open('pages/main.html', 'w', encoding='utf-8') as file:
file.write(src)
# Считываем скешированный файл для получения элементов со ссылками
with open('main.html', 'r', encoding='utf-8') as file:
src = file.read()
soup = BeautifulSoup(src, 'lxml')
rows_letters = soup.find(class_='content').find('table').find('tbody').findAll('tr')
# Получение словаря ссылок
letters_urls = {}
for row in rows_letters:
prefix = 'http://anglicismdictionary.ru'
temp_letters_urls = {item.find('a')['href'][1:]: f"{prefix + item.find('a')['href']}" for item in row.findAll('td') if item.find('a')}
letters_urls: dict = letters_urls | temp_letters_urls
# Заголовок csv файла в итоговый файл
with open('common.csv', 'w', encoding='utf-8-sig', newline='') as file:
writer = csv.writer(file, delimiter=';')
writer.writerow(
(
'Word',
'Origin',
'Description',
'Explanation'
)
)
error_count = 0
pattern = re.compile(r"\s*([А-ЯЁA-Z].*?)\s?\((.*?)\)\.?.+?(\b[а-яё].*?)?(\b[А-ЯЁA-Z].*)", flags=re.MULTILINE)
for k, v in letters_urls.items():
src = requests.get(v, headers).text
# Кеширование страницы
with open(f'pages/{k}_page.html', 'w', encoding='utf-8-sig') as file:
file.write(src)
with open(f'pages/{k}_page.html', 'r', encoding='utf-8-sig') as file:
data = file.read()
# Парсинг каждой страницы
soup = BeautifulSoup(data, 'lxml')
articles = soup.find(class_='content').findAll('p')
for article in articles:
article_str = article.get_text()
try:
article_tuple = pattern.match(article_str).groups() # Парсинг по паттерну
except AttributeError as er:
print(f'Ошибка {er}-{error_count} в: {article_str}')
error_count += 1
# Запись в итоговый файл
with open('common.csv', 'a', encoding='utf-8-sig', newline='') as file:
writer = csv.writer(file, delimiter=';')
writer.writerow(article_tuple)
| TimurSuvorov/YandexAliceSkill | WebScrapingForProject/SiteScraper.py | SiteScraper.py | py | 2,775 | python | en | code | 0 | github-code | 50 |
21799016133 | # _____ _ _____
# | __ \ | | / ____|
# | | | | ___ _ __ __ _ ___ ___ | | __ | (___ ___ _ __
# | | | |/ _ \| '_ \ / _` |/ _ \ / _ \| |/ / \___ \ / _ \| '_ \
# | |__| | (_) | | | | (_| | (_) | (_) | < ____) | (_) | | | |
# |_____/ \___/|_| |_|\__, |\___/ \___/|_|\_\ |_____/ \___/|_| |_|
# __/ |
# |___/
import numpy as np
from scipy.stats import chi2, f, t
from Multivar import MultivariateData
def two_population_mean_comparison(multivardata1: MultivariateData, multivardata2: MultivariateData, test_only=False, alpha=0.05):
"""
Compare means between Multivariate Data from two populations
params:
multivardata1: MultivariateData from first population
multivardata2: MultivariateData from second population
alpha: 1-significant level
return_constant: return const if true
bool
result:
float: f-statistic value
tuple: (int, int)
returns: miscalaeneous parameters
"""
assert isinstance(multivardata1, MultivariateData)
assert isinstance(multivardata2, MultivariateData)
assert multivardata1.p == multivardata2.p, f"Dimension Error: {multivardata1.p} != {multivardata2.p}"
results = {}
significance = 1-alpha
n1 = multivardata1.n
results['n1'] = n1
n2 = multivardata2.n
results['n2'] = n2
p = multivardata1.p
results['p'] = p
mean1 = multivardata1.mean_vector
results['mean1'] = mean1
mean2 = multivardata2.mean_vector
results['mean2'] = mean2
mean_diff = mean1 - mean2
cov1 = multivardata1.covariance_matrix
results['cov1'] = cov1
cov2 = multivardata2.covariance_matrix
results['cov2'] = cov2
s_p = ((n1-1)*cov1 + (n2-1)*cov2) / (n1 + n2 - 2)
results['s_p'] = s_p
t_sqrd = ((n1 * n2) / (n1 + n2)) * \
np.matmul(np.matmul(mean_diff, np.linalg.inv(s_p)), mean_diff)
const = (n1 + n2 - p - 1) / (p*(n1 + n2 - 2))
f_statistic = const * t_sqrd
results['f-statistic'] = f_statistic
deg_free = (p, n1 + n2 - p - 1)
results['df'] = deg_free
c_sqrd = 1/const * f.ppf(significance, deg_free[0], deg_free[1])
results['c_sqrd'] = c_sqrd
if test_only:
print(f"---------------------HOTELLING'S T^2 TEST----------------------")
print(
f"Null Hypothesis:\n Mean vector {mean1}\n is equal to {mean2}")
print(f"Distribution: F{deg_free}")
print(f"F statistic: {f_statistic}")
p_value = 1 - f.cdf(f_statistic, deg_free[0], deg_free[1])
print(f"Significance: {significance*100}%")
print(f"P-value: {p_value}")
if p_value < alpha:
print(f"Conclusion: REJECT the null hypothesis")
else:
print(f"Conclusion: DO NOT reject the null hypothesis")
print(f"---------------------------------------------------------------")
return
return results
def ellipsoid_info(m1: MultivariateData, m2: MultivariateData, alpha=0.05):
"""
returns ellipsoid information for two multivariate data samples from separate populations
params:
m1: MultivariateData from first population
m2: MultivariateData from second population
alpha: 1-significance level
return:
dict: contains axis and length information. Degree of freedom is derived from mean_comparison
Example {
"axis": (floats...),
"length": (floats...)
}
"""
params = two_population_mean_comparison(
m1, m2, test_only=False, alpha=alpha)
n1 = params['n1']
n2 = params['n2']
s_p = params['s_p']
c_sqrd = params['c_sqrd']
result = {}
significance = 1-alpha
eigenvalues, eigenvectors = np.linalg.eig(s_p)
for i, lmbda in enumerate(eigenvalues):
conf_half_len = np.sqrt(lmbda) * np.sqrt((1/n1 + 1/n2) * c_sqrd)
conf_axe_abs = conf_half_len * eigenvectors[i]
result[i] = {
"axis": conf_axe_abs,
"length": conf_half_len * 2
}
return result
def component_means_confidence_interval(m1: MultivariateData, m2: MultivariateData, is_bonferroni=False, alpha=0.05):
"""
returns lower and upperbounds of component means
params:
m1: MultivariateData from first population
m2: MultivariateData from second population
is_bonferroni: use bonferroni method if true, standard method using sqrt of c_sqrt if false.
alpha: 1-significance level
return:
dict: lower and upperbounds of features
Example {
"feature1": {
"ub": float,
"lb": float,
},
"feature2": {...},
...
}
"""
result = {}
params = two_population_mean_comparison(
m1, m2, test_only=False, alpha=alpha)
c = np.sqrt(params['c_sqrd'])
p = params['p']
n1 = params['n1']
n2 = params['n2']
s_p = params['s_p']
mean1 = params['mean1']
mean2 = params['mean2']
mean_diff = mean1 - mean2
if not is_bonferroni:
for i in range(p):
ci = {
'ub': mean_diff[i] + c * np.sqrt((1/n1 + 1/n2) * s_p[i, i]),
'lb': mean_diff[i] - c * np.sqrt((1/n1 + 1/n2) * s_p[i, i])
}
result[f"feature{i+1}"] = ci
else:
for i in range(p):
ci = {
'ub': mean_diff[i] + t.ppf(1 - alpha/(2*p), n1+n2-2) * np.sqrt((1/n1 + 1/n2) * s_p[i, i]),
'lb': mean_diff[i] - t.ppf(1 - alpha/(2*p), n1+n2-2) * np.sqrt((1/n1 + 1/n2) * s_p[i, i])
}
result[f"feature{i+1}"] = ci
return result
def two_population_profile_analysis(m1: MultivariateData, m2: MultivariateData, method="parallel", alpha=0.05):
"""
conduct profile analysis between two multivariate data derived from two populations
params:
m1: multivariate data from population1
m2: multivariate data from population2
method: "parallel" or "coincident"(also means flat)
str
alpha: 1 - significance
"""
def __stats_calc(c_mat, mean_difference, n1, n2, s_p):
c_matXmean_diff = np.matmul(c_mat, mean_difference)
if c_mat.shape != (p,):
middle_term = np.linalg.inv(
(1/n1 + 1/n2)*np.matmul(np.matmul(c_mat, s_p), np.transpose(c_mat)))
t_statistic = np.matmul(np.matmul(np.transpose(
c_matXmean_diff), middle_term), c_matXmean_diff)
df = (p-1, n1+n2-p)
d_sqrd = ((n1 + n2 - 2) * (p-1) / (n1 + n2 - p)) * \
f.ppf(significance, df[0], df[1])
else: # when middle term is constant
middle_term = 1 / \
((1/n1 + 1/n2)*np.matmul(np.matmul(c_mat, s_p), np.transpose(c_mat)))
t_statistic = c_matXmean_diff * middle_term * c_matXmean_diff
df = (1, n1 + n2 - 2)
d_sqrd = f.ppf(significance, df[0], df[1])
return t_statistic, df, d_sqrd
def __get_parallel_c_matrix(p):
minus_one_matrix = np.delete(
np.hstack((np.zeros((p, 1)), -np.identity(p))), -1, 1)
identity_matrix = np.identity(p)
return np.delete(identity_matrix + minus_one_matrix, -1, 0)
params = two_population_mean_comparison(
m1, m2, test_only=False, alpha=alpha)
p = params['p']
n1 = params['n1']
n2 = params['n2']
s_p = params['s_p']
mean1 = params['mean1']
mean2 = params['mean2']
mean_diff = mean1 - mean2
significance = 1-alpha
if method == "parallel":
c_matrix = __get_parallel_c_matrix(p)
t_statistic, df, d_sqrd = __stats_calc(
c_matrix, mean_diff, n1, n2, s_p)
elif method == "coincident":
c_matrix = np.transpose(np.ones(p))
t_statistic, df, d_sqrd = __stats_calc(
c_matrix, mean_diff, n1, n2, s_p)
print(f"------------------------PROFILE ANALYSIS-------------------------")
print(f"C-matrix: \n{c_matrix}")
print(f"Mean vector | pop1: {mean1}")
print(f"Mean vector | pop2: {mean2}")
print(
f"Null Hypothesis:\n {np.matmul(c_matrix, mean1)} is equal to\n {np.matmul(c_matrix, mean2)}")
print(f"Distribution: F{df}")
print(f"T^2 Statistic: {t_statistic}")
print(f"d^2: {d_sqrd}")
print(f"Significance: {significance*100}%")
if t_statistic > d_sqrd:
print(f"Conclusion: REJECT the null hypothesis")
else:
print(f"Conclusion: DO NOT reject the null hypothesis")
print(f"-----------------------------------------------------------------")
return
if __name__ == "__main__":
import pprint
import pandas as pd
turtle_df = pd.read_csv(
'turtle.dat',
header=None,
index_col=False,
delim_whitespace=True)
turtle_df.columns = ['x1', 'x2', 'x3', 'gender']
fem = MultivariateData(
turtle_df[turtle_df['gender'] == 'female'].iloc[:, 0:3])
mal = MultivariateData(
turtle_df[turtle_df['gender'] == 'male'].iloc[:, 0:3])
two_population_profile_analysis(fem, mal, method="coincident")
| donny-son/multivariate-data | MultivarFunctions.py | MultivarFunctions.py | py | 9,199 | python | en | code | 0 | github-code | 50 |
30280139586 | """
Project: AdaBoost Implementation
Authors: przewnic
Date: 01.2021
"""
# Implementation of class helping reading and manipulating
# and checking data
import csv
from Person import Person
import re
class MalformedData(Exception):
def __init__(self, msg, row=None):
super().__init__(msg)
self.row = row
class Database():
""" Database for reading and processing data from file """
def __init__(self, people=[]):
self.people = people
self.class_values = []
def load_from_file(self, path):
"""
:para: path
"""
with open(path, newline="") as file:
""" fieldnames=['age', 'sex', 'cp', 'trestbps',
'chol', 'fbs', 'restecg', 'thalach',
'exang', 'oldpeak', 'slope', 'ca',
'thal', 'num']
"""
reader = csv.reader(file)
self.people = self.form_data(reader)
self.class_values = self.get_class_values()
def form_data(self, reader):
"""
:param: reader: iterable
"""
result = []
regnumber = re.compile(r'\?')
try:
for row in reader:
if regnumber.search(' '.join(row)) is None:
for _ in range(14):
row[_] = float(row[_])
if row[13] > 1.0:
row[13] = 1.0
result.append(
Person(
row[0], row[1], row[2],
row[3], row[4], row[5],
row[6], row[7], row[8],
row[9], row[10], row[11],
row[12], row[13]
)
)
return result
except IndexError:
raise MalformedData(f"Missing column in file", row)
def get_class_values(self):
values = set()
for person in self.people:
values.add(person.num)
return list(values)
def get_len(self):
return len(self.people)
def get_X(self):
X = []
for person in self.people:
X.append(person.get_atributes())
return X
def get_y(self):
y = []
for person in self.people:
y.append(person.predicted_value())
return y
def sort(self, attribute):
"""
:param: attribute: sorting based on index of attribute
"""
self.people.sort(key=lambda person: person.x[attribute])
def get_sub_db(self, indices):
"""
Creates a subset of database for testing and learning
:param: indices: a list of indices
"""
sub_people = []
for idx in indices:
sub_people.append(self.people[idx])
return Database(sub_people)
def __str__(self):
people = f""
for person in self.people:
people += f"{person}\n"
return people
if __name__ == "__main__":
try:
db = Database()
db.load_from_file("data/processed.cleveland.data")
print(db)
db.sort(0)
print(db)
except MalformedData as e:
print(
f"Error: {e}\n"
f"Row: {e.row}"
)
| przewnic/AdaBoost | Database.py | Database.py | py | 3,477 | python | en | code | 0 | github-code | 50 |
30968179718 | import os, sqlite3
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix='!', intents=discord.Intents.all())
@bot.event
async def on_ready():
pass
# print("Бот готов к работе")
#
# global base, cur
# base = sqlite3.connect('Бот.db')
# cur = base.cursor()
# if base:
# print("Database connected...OK")
@bot.command()
async def test(ctx):
await ctx.send('Бот в онлайне!')
@bot.command()
async def инфо(ctx,arg=None):
author = ctx.message.author
if arg == None:
await ctx.send(f'{author.mention}\nВведите: \n!инфо общая\n!инфо команды')
elif arg == 'общая':
await ctx.send(f'{author.mention}\nЯ контролёр и слежу за порядком в чате. У тебя 3 жизни. 3 мата - БАН!')
elif arg == 'команды':
await ctx.send(f'{author.mention}\n!test - Бот онлайн?\n !статус - мои жизни и предупреждения')
else:
await ctx.send(f'{author.mention}\nТакой команды нет... Введи !команды чтобы узнать список доступных команд.')
bot.run(os.getenv('TOKEN'))
| AlexeyOskilko/discord_bot | bot/botrun.py | botrun.py | py | 1,242 | python | ru | code | 0 | github-code | 50 |
36635598069 | #!env python -*- python-indent:4 -*-
import cgi
import datetime
import feedgenerator
import json
import os
import re
import twitter
CONFIG = os.path.join(os.path.dirname(__file__), 'config.json')
def main():
with open(CONFIG, 'r') as f:
config = json.loads(f.read())
with open(config['user_file'], 'r') as f:
screen_names = f.readlines()
api = twitter.Api(
consumer_key=config['consumer_key'],
consumer_secret=config['consumer_secret'],
access_token_key=config['access_token_key'],
access_token_secret=config['access_token_secret']
)
for user in (x.strip() for x in screen_names):
feed = feedgenerator.Rss201rev2Feed(
title=u"Tweets for %s" % user,
link=u"http://twitter.com/%s" % user,
description=u"Tweets for %s" % user,
language=u"en")
statuses = api.GetUserTimeline(screen_name=user)
for status in statuses:
pubdate = datetime.datetime.strptime(
status.created_at, '%a %b %d %H:%M:%S +0000 %Y')
link = 'http://twitter.com/%s/status/%s' % (user, status.id)
text = FilterStatus(status.text)
feed.add_item(
description=Linkify(text, (x.url for x in status.urls)),
unique_id=link,
link=link,
title=text,
pubdate=pubdate)
with open('%s/%s.rss' % (config['feed_directory'], user), 'w') as f:
feed.write(f, 'utf-8')
def Linkify(text, urls):
for url in urls:
text = text.replace(
url, '<a href="%s">%s</a>' % (url, url))
return text
def FilterStatus(text):
if text.startswith('@'):
users = []
while text.startswith('@'):
user, text = text.split(' ', 1)
users.append(user)
return "@: %s (%s)" % (text, ' '.join(users))
if text.startswith('RT '):
m = re.search(r'^RT (@\S+)\s*(.+)$', text)
if m:
return 'RT: %s (%s)' % (m.group(2), m.group(1).replace(':', ''))
return text
if __name__ == '__main__':
main()
| Packetslave/twitter_to_rss | t2r.py | t2r.py | py | 2,147 | python | en | code | 0 | github-code | 50 |
14487757563 | def selection_sort_comparisons(arr: list):
comparisons = 0
for i in range(len(arr)):
min = i
for j in range(i+1, len(arr)):
comparisons += 1
if arr[j] < arr[min]:
min = j
arr[min], arr[i] = arr[i], arr[min]
return comparisons
def insertion_sort_comparisons(arr: list):
comparisons = 0
for i in range(1, len(arr)):
j = i
while j > 0 and arr[j] < arr[j-1]:
comparisons += 1
arr[j], arr[j-1] = arr[j-1], arr[j]
j -= 1
comparisons += 1
return comparisons
def merge_sort_comparisons(arr):
comparisons = 0
if len(arr) > 1:
mid = len(arr)//2
left = arr[:mid]
right = arr[mid:]
comparisons += merge_sort_comparisons(left)
comparisons += merge_sort_comparisons(right)
comparisons += merge_comparisons(left, right, arr)
return comparisons
def merge_comparisons(left, right, arr):
comparisons = 0
i = j = k = 0
while i < len(left) and j < len(right):
comparisons += 1
if left[i] < right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
return comparisons
def shell_sort_comparisons(arr: list):
comparisons = 0
h = 1
while h < len(arr)/3:
h = 3*h + 1
while h >= 1:
for i in range(h, len(arr)):
j = i
while j >= h and arr[j] < arr[j-h]:
comparisons += 1
arr[j], arr[j-h] = arr[j-h], arr[j]
j -= 1
comparisons += 1
h //= 3
return comparisons
| YuraBD/sorting_algs_compare | sort_comparisons_count.py | sort_comparisons_count.py | py | 1,854 | python | en | code | 0 | github-code | 50 |
72056399516 | from __future__ import division, print_function
import os
import argparse
import configparser
import logging
definitions = [
# model type default help
('model', (str, 'unet', "Model: unet, dilated-unet, dilated-densenet")),
('features', (int, 64, "Number of features maps after first convolutional layer.")),
('depth', (int, 4, "Number of downsampled convolutional blocks.")),
('temperature', (float, 1.0, "Temperature of final softmax layer in model.")),
('padding', (str, 'same', "Padding in convolutional layers. Either `same' or `valid'.")),
('dropout', (float, 0.0, "Rate for dropout of activation units.")),
('classes', (str, 'inner', "One of `inner' (endocardium), `outer' (epicardium), or `both'.")),
('batchnorm', {'default': False, 'action': 'store_true',
'help': "Apply batch normalization before nonlinearities."}),
# loss
('loss', (str, 'pixel', "Loss function: `pixel' for pixel-wise cross entropy, `dice' for dice coefficient.")),
('loss-weights', {'type': float, 'nargs': '+', 'default': [0.1, 0.9],
'help': "When using dice or jaccard loss, how much to weight each output class."}),
# training
('epochs', (int, 20, "Number of epochs to train.")),
('batch-size', (int, 32, "Mini-batch size for training.")),
('validation-split', (float, 0.2, "Percentage of training data to hold out for validation.")),
('optimizer', (str, 'adam', "Optimizer: sgd, rmsprop, adagrad, adadelta, adam, adamax, or nadam.")),
('learning-rate', (float, None, "Optimizer learning rate.")),
('momentum', (float, None, "Momentum for SGD optimizer.")),
('decay', (float, None, "Learning rate decay (not applicable for nadam).")),
('shuffle_train_val', {'default': False, 'action': 'store_true',
'help': "Shuffle images before splitting into train vs. val."}),
('shuffle', {'default': False, 'action': 'store_true',
'help': "Shuffle images before each training epoch."}),
('seed', (int, None, "Seed for numpy RandomState")),
# files
('datadir', (str, '.', "Directory containing patientXX/ directories.")),
('outdir', (str, '.', "Directory to write output data.")),
('outfile', (str, 'weights-final.hdf5', "File to write final model weights.")),
('load-weights', (str, '', "Load model weights from specified file to initialize training.")),
('checkpoint', {'default': False, 'action': 'store_true',
'help': "Write model weights after each epoch if validation accuracy improves."}),
# augmentation
('augment-training', {'default': False, 'action': 'store_true',
'help': "Whether to apply image augmentation to training set."}),
('augment-validation', {'default': False, 'action': 'store_true',
'help': "Whether to apply image augmentation to validation set."}),
('rotation-range', (float, 180, "Rotation range (0-180 degrees)")),
('width-shift-range', (float, 0.1, "Width shift range, as a float fraction of the width")),
('height-shift-range', (float, 0.1, "Height shift range, as a float fraction of the height")),
('shear-range', (float, 0.1, "Shear intensity (in radians)")),
('zoom-range', (float, 0.05, "Amount of zoom. If a scalar z, zoom in [1-z, 1+z]. Can also pass a pair of floats as the zoom range.")),
('fill-mode', (str, 'nearest', "Points outside boundaries are filled according to mode: constant, nearest, reflect, or wrap")),
('alpha', (float, 500, "Random elastic distortion: magnitude of distortion")),
('sigma', (float, 20, "Random elastic distortion: length scale")),
('normalize', {'default': False, 'action': 'store_true',
'help': "Subtract mean and divide by std dev from each image."}),
]
noninitialized = {
'learning_rate': 'getfloat',
'momentum': 'getfloat',
'decay': 'getfloat',
'seed': 'getint',
}
def update_from_configfile(args, default, config, section, key):
# Point of this function is to update the args Namespace.
value = config.get(section, key)
if value == '' or value is None:
return
# Command-line arguments override config file values
if getattr(args, key) != default:
return
# Config files always store values as strings -- get correct type
if isinstance(default, bool):
value = config.getboolean(section, key)
elif isinstance(default, int):
value = config.getint(section, key)
elif isinstance(default, float):
value = config.getfloat(section, key)
elif isinstance(default, str):
value = config.get(section, key)
elif isinstance(default, list):
# special case (HACK): loss-weights is list of floats
string = config.get(section, key)
value = [float(x) for x in string.split()]
elif default is None:
# values which aren't initialized
getter = getattr(config, noninitialized[key])
value = getter(section, key)
setattr(args, key, value)
def parse_arguments():
parser = argparse.ArgumentParser(
description="Train U-Net to segment right ventricles from cardiac "
"MRI images.")
for argname, kwargs in definitions:
d = kwargs
if isinstance(kwargs, tuple):
d = dict(zip(['type', 'default', 'help'], kwargs))
parser.add_argument('--' + argname, **d)
# allow user to input configuration file
parser.add_argument(
'configfile', nargs='?', type=str, help="Load options from config "
"file (command line arguments take precedence).")
args = parser.parse_args()
if args.configfile:
logging.info("Loading options from config file: {}".format(args.configfile))
config = configparser.ConfigParser(
inline_comment_prefixes=['#', ';'], allow_no_value=True)
config.read(args.configfile)
for section in config:
for key in config[section]:
if key not in args:
raise Exception("Unknown option {} in config file.".format(key))
update_from_configfile(args, parser.get_default(key),
config, section, key)
for k,v in vars(args).items():
logging.info("{:20s} = {}".format(k, v))
return args
| chuckyee/cardiac-segmentation | rvseg/opts.py | opts.py | py | 6,752 | python | en | code | 274 | github-code | 50 |
14402608190 | import config
from db import Database
from flask_cors import CORS
from flask import Flask, request, jsonify
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=config.SENTRY_DSN,
integrations=[FlaskIntegration()],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0
)
db = Database('logs.db')
app = Flask(__name__)
CORS(app)
@app.route('/session', methods=['POST'])
def session():
if request.method == 'POST':
data = request.get_json()
ua = request.headers.get('User-Agent')
db.add_session(data['session_id'], data['version'], ua)
return jsonify(success=True)
return jsonify(success=False)
@app.route('/snapshot', methods=['POST'])
def snapshot():
if request.method == 'POST':
data = request.get_json()
db.add_snapshot(data['session_id'], data['snapshot'])
return jsonify(success=True)
return jsonify(success=False)
if __name__ == '__main__':
app.run() | frnsys/half_earth | logserver/main.py | main.py | py | 1,122 | python | en | code | 16 | github-code | 50 |
27600870040 | from collections import deque
a = list(input().split())
S = 0; T = 123456789
for i in range(9):
if a[i] == "x":
S = S * 10 + 9
else:
S = S * 10 + ord(a[i]) - ord('0')
d = {0: [1, 3], 1: [0, 2, 4], 2: [1, 5], 3: [0, 4, 6], 4: [1, 3, 5, 7], 5: [2, 4, 8], 6: [3, 7], 7: [4, 6, 8], 8: [5, 7]}
dis = {S: 0}
def calc(a):
res = 0
for i in range(9):
res = res * 10 + a[i]
return res
def bfs(S, T):
q = deque([S])
while len(q):
x = q[0]
if x == T:
break
q.popleft()
cur = []; y = x
while y:
cur.append(y % 10)
y //= 10
cur.reverse()
i = cur.index(9)
for j in d[i]:
cur[i], cur[j] = cur[j], cur[i]
z = calc(cur)
if z not in dis:
dis[z] = dis[x] + 1
q.append(z)
cur[i], cur[j] = cur[j], cur[i]
bfs(S, T)
print(dis[T] if T in dis else -1) | Nickel-Angel/ACM-and-OI | AcWing/845.py | 845.py | py | 978 | python | en | code | 0 | github-code | 50 |
38021515778 | """
*packageName :
* fileName : 2789.블랙잭(2)
* author : ipeac
* date : 2022-09-29
* description :
* ===========================================================
* DATE AUTHOR NOTE
* -----------------------------------------------------------
* 2022-09-29 ipeac 최초 생성
"""
from itertools import permutations
n, m = map(int, input().split())
# 카드 3장 고르는거임 목표수 m
cards = list(map(int, input().split()))
ans = 10e9
ans2 = 0
for combi in permutations(cards, 3):
sum_of = sum(combi)
if sum_of > m:
continue
tmp = abs(m - sum_of)
if ans > tmp:
ans = tmp
ans2 = sum_of
print(ans2)
| guqtls14/python-algorism-study | 박상준/완전탐색&백트래킹/2789.블랙잭(2).py | 2789.블랙잭(2).py | py | 731 | python | en | code | 0 | github-code | 50 |
29302273478 | from datetime import datetime, timedelta
def round_time(dt=None, date_delta=timedelta(minutes=1), to="average"):
"""
Round a datetime object to a multiple of a timedelta
dt : datetime.datetime object, default now.
dateDelta : timedelta object, we round to a multiple of this, default 1 minute.
from: http://stackoverflow.com/questions/3463930/how-to-round-the-minute-of-a-datetime-object-python
"""
round_to = date_delta.total_seconds()
if dt is None:
dt = datetime.now()
seconds = (dt - dt.min).seconds
if seconds % round_to == 0 and dt.microsecond == 0:
rounding = (seconds + round_to / 2) // round_to * round_to
else:
if to == "up":
# // is a floor division, not a comment on following line (like in javascript):
rounding = (
(seconds + dt.microsecond / 1000000 + round_to) // round_to * round_to
)
elif to == "down":
rounding = seconds // round_to * round_to
else:
rounding = (seconds + round_to / 2) // round_to * round_to
return dt + timedelta(0, rounding - seconds, -dt.microsecond)
| theresnotime/wm-revert-counter | SOTime.py | SOTime.py | py | 1,159 | python | en | code | 0 | github-code | 50 |
73415623836 | import pandas as pd
import psycopg2 as pg
import numpy as np
import os
from urllib.parse import quote
from sqlalchemy import create_engine
import json
import requests
USERNAME = "ckan"
PASSWORD = "ckan"
DB = "datastore"
IPADDR = "192.168.10.47"
engine = create_engine(f"postgresql://{USERNAME}:%s@{IPADDR}/{DB}" % quote(f'{PASSWORD}'))
def getID(dataset):
r = requests.get(f"http://192.168.10.47:8080/api/3/action/package_show?id={dataset}",
headers={'Authorization': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjE2 \
ODYyMDI0MTgsImp0aSI6IlVLaGFsSWU1eG1xUjZNR2o3bGQ4amVkTnEzeHB1X2QyZ05hS1VUV \
ENVNkJRVTZaM3VsX295MkVicE1FRzBkSnNKNHluZTFFM0lVRUlaTlVPIn0.-2GI8s-h4MZrZ\
2MeOtkZvqyWyEw54d5m_p7vLboEekY'})
data = r.json()
result = data['result']['resources'][0]['id']
return result
note = pd.read_sql(f"""
SELECT
cols.data_type,cols."column_name" cols,
(SELECT
pg_catalog.col_description (C.OID, cols.ordinal_position::INT)
FROM
pg_catalog.pg_class C
WHERE
C.OID = (SELECT ('"' || cols.TABLE_NAME || '"')::REGCLASS::OID)
AND C.relname = cols.TABLE_NAME) AS "comment"
FROM
information_schema.COLUMNS cols
WHERE
cols.table_catalog = '{DB}'
AND cols.TABLE_NAME = '{getID('test')}'
AND cols.table_schema = 'public';
""", engine)
#Loop insert
for row in note.itertuples():
TABLE_ID = getID("test")
j = json.dumps({
"notes": row.comment,
"type_override": row.data_type,
"label": row.cols.upper()
})
with engine.begin() as conn:
conn.execute(f'comment on column public."{TABLE_ID}".{row.cols} IS '+"'"+j+"';")
print("Update Comment:", row.cols) | ezynook/open-data | data dict/data_dict_db.py | data_dict_db.py | py | 1,969 | python | en | code | 0 | github-code | 50 |
9122095153 | from allennlp.common.checks import ConfigurationError
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from overrides import overrides
import torch
class PytorchSeq2VecWrapper(Seq2VecEncoder):
"""Copy of ``allennlp.modules.seq2vec_encoders.PytorchSeq2VecWrapper`` that adds support to return the final
hidden state for all layers and also for LSTMs to return the state-memory tuple instead of just the state.
"""
def __init__(self,
module: torch.nn.modules.RNNBase,
return_all_layers: bool = False,
return_all_hidden_states: bool = False) -> None:
# Seq2VecEncoders cannot be stateful.
super(PytorchSeq2VecWrapper, self).__init__(stateful=False)
self._module = module
self._return_all_layers = return_all_layers
self._return_all_hidden_states = return_all_hidden_states
self.last_layer_output = None
if not getattr(self._module, 'batch_first', True):
raise ConfigurationError("Our encoder semantics assumes batch is always first!")
@property
def _bidirectional(self):
return getattr(self._module, 'bidirectional', False)
def _num_directions(self):
return 2 if self._bidirectional else 1
@property
def _num_layers(self):
return getattr(self._module, 'num_layers', 1)
@overrides
def get_input_dim(self) -> int:
return self._module.input_size
@overrides
def get_output_dim(self) -> int:
output_dim = self._module.hidden_size * self._num_directions()
if self._return_all_layers:
output_dim *= self._num_layers
return output_dim
@overrides
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
mask: torch.Tensor,
hidden_state: torch.Tensor = None) -> torch.Tensor:
if mask is None:
if self._return_all_layers:
final_states = self._module(inputs, hidden_state)[1] # FIXME: needs reshape?
else:
# If a mask isn't passed, there is no padding in the batch of instances, so we can just
# return the last sequence output as the state. This doesn't work in the case of
# variable length sequences, as the last state for each element of the batch won't be
# at the end of the max sequence length, so we have to use the state of the RNN below.
# FIXME: take 1 instead of 0, to get (state, memory) if desired.
final_states = self._module(inputs, hidden_state)[0][:, -self._num_directions():, :]
if self._return_all_hidden_states:
if isinstance(final_states, tuple):
return torch.stack(final_states)
else:
return final_states.unsqueeze(0)
elif isinstance(final_states, tuple):
return final_states[0]
else:
return final_states
batch_size, total_sequence_length = mask.size()
packed_sequence_output, final_states, restoration_indices, = \
self.sort_and_run_forward(self._module, inputs, mask, hidden_state)
unpacked_sequence_output = torch.nn.utils.rnn.pad_packed_sequence(packed_sequence_output, batch_first=True)[0]
self.last_layer_output = self._restore_order_and_shape_sequence_output(batch_size,
total_sequence_length,
restoration_indices,
unpacked_sequence_output)
# Deal with the fact the LSTM state is a tuple of (state, memory).
# For consistency, we always add one dimension to the state and later decide if to drop it.
if isinstance(final_states, tuple) and self._return_all_hidden_states:
final_states = torch.stack(final_states)
else:
if isinstance(final_states, tuple):
final_states = final_states[0]
final_states = final_states.unsqueeze(0)
return self._restore_order_and_shape(batch_size, restoration_indices, final_states)
def _restore_order_and_shape(self,
batch_size: int,
restoration_indices: torch.LongTensor,
state: torch.Tensor) -> torch.Tensor:
# `state_len` is 2 if it's an LSTM and `self._return_all_hidden_states` is true.
state_len, num_layers_times_directions, num_valid, encoding_dim = state.size()
# Add back invalid rows.
if num_valid < batch_size:
# batch size is the third dimension here, because PyTorch returns RNN state, which is possibly a tuple,
# as a tensor of shape (num_layers * num_directions, batch_size, hidden_size)
zeros = state.new_zeros(state_len,
num_layers_times_directions,
batch_size - num_valid,
encoding_dim)
state = torch.cat([state, zeros], 2)
# Restore the original indices and return the final state of the
# top layer. PyTorch's recurrent layers return state in the form
# (num_layers * num_directions, batch_size, hidden_size) regardless
# of the 'batch_first' flag, so we transpose, extract the relevant
# layer state (both forward and backward if using bidirectional layers)
# and we combine the hidden states in the last dimension, just after the batch size.
# now of shape: (state_len, batch_size, num_layers * num_directions, hidden_size).
unsorted_state = state.transpose(1, 2).index_select(1, restoration_indices)
if not self._return_all_layers:
# Extract the last hidden vector, including both forward and backward states
# if the cell is bidirectional.
unsorted_state = unsorted_state[:, :, -self._num_directions():, :]
if self._return_all_hidden_states:
return unsorted_state.contiguous().view([state_len, -1, self.get_output_dim()])
else:
return unsorted_state[0].contiguous().view([-1, self.get_output_dim()])
@staticmethod
def _restore_order_and_shape_sequence_output(batch_size: int,
total_sequence_length: int,
restoration_indices: torch.LongTensor,
sequence_output: torch.Tensor) -> torch.Tensor:
num_valid = sequence_output.size(0)
# Add back invalid rows.
if num_valid < batch_size:
_, length, output_dim = sequence_output.size()
zeros = sequence_output.new_zeros(batch_size - num_valid, length, output_dim)
sequence_output = torch.cat([sequence_output, zeros], 0)
# It's possible to need to pass sequences which are padded to longer than the
# max length of the sequence to a Seq2SeqEncoder. However, packing and unpacking
# the sequences mean that the returned tensor won't include these dimensions, because
# the RNN did not need to process them. We add them back on in the form of zeros here.
sequence_length_difference = total_sequence_length - sequence_output.size(1)
if sequence_length_difference > 0:
zeros = sequence_output.new_zeros(batch_size, sequence_length_difference, sequence_output.size(-1))
sequence_output = torch.cat([sequence_output, zeros], 1)
# Restore the original indices and return the sequence.
return sequence_output.index_select(0, restoration_indices)
| mmazab/LifeQA | lqa_framework/modules/pytorch_seq2vec_wrapper.py | pytorch_seq2vec_wrapper.py | py | 7,917 | python | en | code | 10 | github-code | 50 |
33540866164 | from models.Spread.SpreadNet import train_test, SpreadNet
from models.Spread.DenseSpreadNet import DenseSpreadNet
import matplotlib.pyplot as plt
import numpy as np
import pickle
import os
import torch
path = '/media/rico/Data/TU/thesis'
file = '{}/data/ASCAD_0.h5'.format(path)
ranks_x = []
ranks_y = []
# Parameters
use_hw = False
spread_factor = 6
runs = 3
train_size = 10
epochs = 10
batch_size = 1
lr = 0.00001
sub_key_index = 0
attack_size = 100
rank_step=2
# Select the number of classes to use depending on hw
n_classes = 9 if use_hw else 256
type_network = ''
network = None
title = 'Torch {} \nSpread factor {}\nTrain size {}, batch {}, lr {}, epochs {}, Type {}'.format(
type_network,
spread_factor,
train_size,
batch_size,
lr,
epochs,
'HW' if use_hw else 'ID')
# Save the ranks to a file
dir_name = '{}_SF{}_E{}_BZ{}_LR1E-5/train{}'.format(
'HW' if use_hw else 'ID',
spread_factor,
epochs,
batch_size,
train_size
)
for i in range(runs):
# Choose which network to use
network = SpreadNet(spread_factor=spread_factor, input_shape=700, out_shape=n_classes)
# network = DenseSpreadNet(spread_factor=spread_factor, input_shape=700, out_shape=n_classes)
x, y = train_test(file, sub_key_index=sub_key_index, network=network, train_size=train_size, epochs=epochs,
batch_size=batch_size, lr=lr,
use_hw=use_hw,
attack_size=attack_size,
rank_step=rank_step)
ranks_x.append(np.array(x))
ranks_y.append(np.array(y))
if isinstance(network, DenseSpreadNet):
type_network = 'Dense-spread hidden network'
elif isinstance(network, SpreadNet):
type_network = 'Spread Non Relu network'
else:
type_network = '?? network'
model_save_file = '{}/runs/{}/model_r{}_{}.pt'.format(path, dir_name, i, type_network)
print(model_save_file)
torch.save(network.state_dict(), model_save_file)
x_save_file = '{}/runs/{}/x_{}.r'.format(path, dir_name, type_network)
y_save_file = '{}/runs/{}/y_{}.r'.format(path, dir_name, type_network)
os.makedirs(os.path.dirname(x_save_file), exist_ok=True)
with open(x_save_file, 'wb') as f:
pickle.dump(ranks_x, f)
with open(y_save_file, 'wb') as f:
pickle.dump(ranks_y, f)
# Plot the results
plt.title('Performance of {}'.format(title))
plt.xlabel('number of traces')
plt.ylabel('rank')
plt.grid(True)
for i in range(runs):
plt.plot(ranks_x[i], ranks_y[i])
plt.figure()
# Show a figure with the mean of the runs
rank_avg_y = np.mean(ranks_y, axis=0)
plt.title('Performance of {}'.format(title))
plt.xlabel('number of traces')
plt.ylabel('rank')
plt.grid(True)
plt.plot(ranks_x[0], rank_avg_y, label='mean')
plt.legend()
plt.show()
plt.figure()
| klikooo/thesis-src | old/runner.py | runner.py | py | 2,794 | python | en | code | 0 | github-code | 50 |
8042202796 | # Rather than compute all possible permutations of the string, we instead opt to do a frequency analysis
# Simply check the frequency of a given string, and see if its frequency matches with any other string in the dictionary
import sys
def setup():
if len(sys.argv) > 1:
f = sys.argv[1]
else:
print("No file passed. Using path-to-dict.txt...")
f = "path-to-dict.txt"
return f
def divideIntoWords(dictionary):
# Splits up my input, a big string of words,
# into separate, smaller strings containing words
s = dictionary.read()
return s.split()
def getInput():
# Get the user input dictionary
userInput = input("Enter a string: ")
if userInput == "":
print("Error: No input found")
print("Exiting... ")
sys.exit(0)
return userInput
def dictMe(string):
# Conduct a frequency analysis on the string
# Complexity of O(N), where N is the size of string
# Mapping of character to number of times it appears in the string
dictionary = {}
for letter in string:
if letter.lower() in dictionary:
dictionary[letter.lower()] += 1
else:
dictionary[letter.lower()] = 1
return dictionary
# Calling the functions
def main():
f = setup()
dictionary = open(f, "r")
words = divideIntoWords(dictionary)
s = set(words)
# For each word in the dictionary, generate the frequency of the word
# Then, store this in a mapping for ease of access
# This is done so we don't need to create the frequency analysis for every iteration.
# We can just do this in one-step, in the offline process
wordFrequencies = {}
for word in s:
wordFrequencies[word] = (dictMe(word))
user = getInput()
while user:
userFrequency = dictMe(user)
toSort = set() # Set chosen to remove duplicates
for element in s:
if wordFrequencies[element] == userFrequency:
toSort.add(element)
if toSort:
toSort = list(toSort) # Change to list to sort
toSort.sort() # Sort
for element in toSort:
print(element, end = " ")
print()
else:
print("-")
user = getInput()
dictionary.close()
main()
| HaarisKhan/interesting | anagram.py | anagram.py | py | 2,313 | python | en | code | 0 | github-code | 50 |
23230876266 | #!/usr/bin/python27/bin/python
#--*-- coding: utf-8 --*--
# Liszt 2014-3-4
import rsa
import os
import stat
from Crypto.Cipher import AES
from Crypto import Random
import base64
def create_rsa_file(pubfile, prifile):
pubkey, prikey = rsa.newkeys(1024)
pub = pubkey.save_pkcs1()
publicfile = open(pubfile, 'w+')
publicfile.write(pub)
publicfile.close()
pri = prikey.save_pkcs1()
privatefile = open(prifile, 'w+')
privatefile.write(pri)
privatefile.close()
os.chmod(pubfile, stat.S_IREAD)
os.chmod(prifile, stat.S_IREAD)
return pubkey, prikey
class encoder(object):
def __init__(self, PRIVATE_KEY_FILE, REMOTE_KEY_FILE):
self.PRIVATE_KEY = self.__get_private_key_in_file__(PRIVATE_KEY_FILE)
self.REMOTE_KEY = self.__get_public_key_in_file__(REMOTE_KEY_FILE)
def encode(self, MESSAGE):
return rsa.encrypt(MESSAGE, self.REMOTE_KEY)
def decode(self, MESSAGE):
return rsa.decrypt(MESSAGE, self.PRIVATE_KEY)
def __get_public_key_in_file__(self, pubfile):
with open(pubfile) as publicfile:
p = publicfile.read()
pubkey = rsa.PublicKey.load_pkcs1(p)
return pubkey
def __get_private_key_in_file__(self, prifile):
with open(prifile) as privatefile:
p = privatefile.read()
prikey = rsa.PrivateKey.load_pkcs1(p)
return prikey
class AES_encoder(object):
def __init__(self, key=None):
self.bs = 32
if key is None:
key = Random.new().read(self.bs)
if len(key) >= 32:
self.key = key[:32]
else:
self.key = self._pad(key)
def encode(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decode(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:]))
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
def _unpad(self, s):
return s[:-ord(s[len(s)-1:])]
| amwuqd/whisper | server/encoder.py | encoder.py | py | 2,266 | python | en | code | 0 | github-code | 50 |
6072183993 | from celery import shared_task
from webapp.models import PDFOperationOrder, ImageOperationOrder, MultipleFile
from PIL import Image
import pytesseract
import sys
from django.utils import timezone
from pdf2image import convert_from_bytes
import os, shutil
import cv2
import numpy as np
from PyPDF2 import PdfFileMerger
from django.core.files.storage import FileSystemStorage
import fitz
pytesseract.pytesseract.tesseract_cmd = "C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe"
@shared_task(bind=True)
def images_to_pdf_task(self, order_id):
order = ImageOperationOrder.objects.get(pk=order_id)
fs = FileSystemStorage()
dir_name = "images_to_pdf"+str(order_id)
try:
os.mkdir(fs.path("temp/"+dir_name))
except Exception as e:
print(e)
images = order.images.all()
set_task_state(self, "EXTRACTING PAGES", 1, total=2)
for page, image in enumerate(images):
absolute_file_path = image.file.file
pdf = pytesseract.image_to_pdf_or_hocr(Image.open(absolute_file_path))
fs = FileSystemStorage()
fn = fs.path("temp/"+dir_name)+"/page_"+str(page)+".pdf"
with open(fn, "wb") as file:
file.write(pdf)
set_task_state(self, "MERGING PDFS", 2, total=2)
result_path = merge_pdfs(dir_name, images.count())
return result_path
@shared_task(bind=True)
def pdf_to_pngs(self, order_id):
set_task_state(self, "STARTED", 1)
order = PDFOperationOrder.objects.get(pk=order_id)
fs = FileSystemStorage()
set_task_state(self, "EXTRACTING PAGES", 3)
pdffile_path = order.file.file
pdffile = fitz.open(pdffile_path)
page_count = pdffile.pageCount
uploaded_file_name = "".join(order.file.name.split("/")[-1].split(".")[:1])
make_temp_directories(uploaded_file_name)
#Convert pages to images and store in the temp/dir_name_raw directory
set_task_state(self, "CONVERTING...", 3)
convert_pdf_to_images(self, uploaded_file_name, pdffile)
dir_name = fs.path("temp/"+uploaded_file_name+"_raw")
if order.remove_bg:
for i in range(0, page_count):
filename = fs.path("temp/"+uploaded_file_name+"_raw")+"/page_"+str(i)+".png"
info = "page " + str(i+1) + " of " + str(page_count) + " pages"
set_task_state(self, "REMOVING BACKGROUND", 4, info=info)
new_image = remove_background(uploaded_file_name, filename, i)
if new_image:
filename = new_image
dir_name = fs.path("temp/"+uploaded_file_name+"_nbg")
output_dir_name = uploaded_file_name
zip_directory(output_dir_name, dir_name)
remove_temp_dirs()
@shared_task(bind=True)
def static_pdf_to_selectable_pdf(self, order_id):
set_task_state(self, "STARTED", 1)
order = PDFOperationOrder.objects.get(pk=order_id)
process(self, order)
return order_id
def process(self, order):
fs = FileSystemStorage()
set_task_state(self, "GETTING PAGES", 2)
dir_name = "".join(order.file.name.split("/")[-1].split(".")[:1])
make_temp_directories(dir_name)
set_task_state(self, "EXTRACTING PAGES", 3)
pdffile_path = order.file.file
pdffile = fitz.open(pdffile_path)
page_count = pdffile.pageCount
#Convet pages to images and store in the temp directory
convert_pdf_to_images(self, dir_name, pdffile)
set_task_state(self, "ANALYZING THE PAGES", 3)
for i in range(0, page_count):
filename = fs.path("temp/"+dir_name+"_raw")+"/page_"+str(i)+".png"
if order.remove_bg:
info = "page " + str(i+1) + " of " + str(page_count) + " pages"
set_task_state(self, "REMOVING BACKGROUND", 4, info=info)
new_image = remove_background(dir_name, filename, i)
if new_image:
filename = new_image
# Convert to pdf
convert_to_pdf(self, dir_name, filename, page_number=i)
# merge pdfs
set_task_state(self, "MERGING PDFS", 5)
merge_pdfs(dir_name, page_count)
def convert_pdf_to_images(self, dir_name, pdffile):
fs = FileSystemStorage()
for i in range(pdffile.pageCount):
filename = fs.path("temp/"+dir_name+"_raw")+"/page_"+str(i)+".png"
page = pdffile.loadPage(i)
pix = page.getPixmap()
pix.writePNG(filename)
info = str(i+1) + " pages found"
set_task_state(self, "EXTRACTING PAGES", 3, info=info)
def convert_to_pdf(self, dir_name, filename, page_number):
pdf = pytesseract.image_to_pdf_or_hocr(Image.open(filename))
fs = FileSystemStorage()
fn = fs.path("temp/"+dir_name)+"/page_"+str(page_number)+".pdf"
with open(fn, "wb") as file:
file.write(pdf)
def remove_background(dir_name, filename, page):
# Load the image
img = cv2.imread(filename)
# Convert the image to grayscale
gr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Make a copy of the grayscale image
bg = gr.copy()
# Apply morphological transformations
for i in range(5):
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(2 * i + 1, 2 * i + 1))
bg = cv2.morphologyEx(bg, cv2.MORPH_CLOSE, kernel2)
bg = cv2.morphologyEx(bg, cv2.MORPH_OPEN, kernel2)
# Subtract the grayscale image from its processed copy
dif = cv2.subtract(bg, gr)
# Apply thresholding
bw = cv2.threshold(dif, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
dark = cv2.threshold(bg, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
# Extract pixels in the dark region
darkpix = gr[np.where(dark > 0)]
# Threshold the dark region to get the darker pixels inside it
darkpix = cv2.threshold(darkpix, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
try:
# Paste the extracted darker pixels in the watermark region
bw[np.where(dark > 0)] = darkpix.T
except AttributeError:
return
fs = FileSystemStorage()
new_name = fs.path("temp/"+dir_name+"_nbg")+"/page_"+str(page)+".png"
cv2.imwrite(new_name, bw)
return new_name
def merge_pdfs(dir_name, page_count):
merger = PdfFileMerger()
fs = FileSystemStorage()
pdfs = []
for i in range(0, page_count):
filename = fs.path("temp/"+dir_name)+"/page_"+str(i)+".pdf"
pdfs.append(filename)
for pdf in pdfs:
merger.append(pdf)
result_path = fs.path("processed")+"/"+dir_name+".pdf"
merger.write(result_path)
merger.close()
return result_path
def zip_directory(output_dir_name, input_dir_name):
fs = FileSystemStorage()
output_filename = fs.path("processed")+"/"+output_dir_name
shutil.make_archive(output_filename, 'zip', input_dir_name)
#TODO: check for idle celery state
def remove_temp_dirs():
fs = FileSystemStorage()
tempdir = fs.path("temp")
dirs = os.listdir(tempdir)
for directory in dirs:
try:
shutil.rmtree(fs.path("temp/"+directory))
except Exception as e:
print(e)
def make_temp_directories(dir_name):
fs = FileSystemStorage()
# try:
os.mkdir(fs.path("temp/"+dir_name))
os.mkdir(fs.path("temp/"+dir_name+"_raw"))
os.mkdir(fs.path("temp/"+dir_name+"_nbg"))
# except Exception as e:
# print(e)
def set_task_state(task, message, current, total=5, info=""):
task.update_state(
state = message,
meta = {
"current": str(current),
"total" : total,
"info" : info
}
)
@shared_task(bind=True)
def task_merge_pdfs(self, order_id):
merger = PdfFileMerger()
fs = FileSystemStorage()
order = MultipleFile.objects.get(id=order_id)
pdfs = []
for file in order.files.all():
filename = fs.path(str(file.file))
pdfs.append(filename)
for pdf in pdfs:
merger.append(pdf)
new_name = order_id
result_path = fs.path("processed")+"/"+str(new_name)+".pdf"
merger.write(result_path)
merger.close()
print(result_path)
return result_path
| dodziraynard/digitaleye | webapp/tasks.py | tasks.py | py | 7,285 | python | en | code | 0 | github-code | 50 |
73086980316 | def week_day_name(index):
names = ("Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday")
return names[index]
print(week_day_name(5))
def is_leap_year(year):
if(year%100 !=0 and year%4 ==0) or year%400 ==0:
return 29
def days_in_month(month,year):
if month == 2:
return 28 + int(is_leap_year(year))
elif (month<8 and month%2 !=0) or (month>8 and month%2 ==0):
return 31
else:
return 30
m=1
while m<=12:
print(days_in_month(n),ends = "\t")
m+=1
print("\nleap ")
def date_value(day,month,year):
value = 0
y = year - 1
value = y*365 + y//4 - y//100 + y//400
return value
m = 1
while m<month:
print(f'adding(days_in_month(m,year))')
value += days_in_month(m,year)
m+=1
value += day
return value
def week_Day_name(date,month,year):
ref_date = data_value(1,1,2004)
input_date = date_value(date,month,year)
diff = (Ref_data_input_Data) % 7
return get_Week_Day_name(diff)
date_to_week_day(21,9,2023) | Medhavi1101/python-training | programs/A1_P4.py | A1_P4.py | py | 1,092 | python | en | code | 0 | github-code | 50 |
73384431516 | from bnp_assembly.simulation.contig_simulation import simulate_contigs_from_genome
import pytest
from bionumpy.datatypes import SequenceEntry
import numpy as np
@pytest.fixture
def genome():
return SequenceEntry.from_entry_tuples([
["chr1", "ACTGACTGACTG"],
["chr2", "GGGGGGGGGGGGGGGGGGG"]
])
def test_simulate_contigs(genome):
rng = np.random.default_rng(1)
simulated = simulate_contigs_from_genome(genome, 4, min_contig_size=3, rng=rng)
assert len(simulated.contigs) == 6
assert np.all(simulated.contigs.sequence.shape[1] >= 3)
print(simulated.alignment)
alignment = simulated.alignment
assert np.all(alignment.scaffold_end - alignment.scaffold_start == alignment.contig_end - alignment.contig_start)
| knutdrand/bnp_assembly | tests/test_contig_simulation.py | test_contig_simulation.py | py | 761 | python | en | code | 0 | github-code | 50 |
74363412314 | """
Ball in a Cup environment for policy search experiments.
Builds off code written with Johannes Silberbauer and Michael Lutter.
Also builds of experiment design of Pascal Klink.
"""
import multiprocessing
from dataclasses import asdict, dataclass
from enum import Enum
from pathlib import Path
import mujoco_py
import numpy as np
from tqdm import tqdm
from utils import NullContext, VideoRenderStream
class BicType(Enum):
cone = 1
cylinder = 2
def point_line_dist(x0, x1, x):
"""Compute distance of x to the line passing through x0 and x1
(https://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html).
"""
return np.linalg.norm(np.cross(x1 - x0, x0 - x)) / np.linalg.norm(x1 - x0)
def position_along_axis(x0, x1, x):
"""Returns the positions of the orthogonal projection of x on the
the axis passing through x0 and x1. The return value of 0 means x0
and a value of 1 indicates x1."""
return np.dot(x - x0, x1 - x0) / (np.linalg.norm(x1 - x0) ** 2)
def cylinder_contains(x0, x1, r, x):
"""Returns true if the specified point is inside the cylinder
with axis passing through x0 and x1 and radius r."""
d = point_line_dist(x0, x1, x)
t = position_along_axis(x0, x1, x)
return (d <= r) and (0 <= t <= 1)
def cone_contains(x_tip, x_base, r, x):
"""Returns true if the specified point is inside the cone with
base radius r, tip position x_tip and position of the center of
the base circle at x_base."""
d = point_line_dist(x_tip, x_base, x)
t = position_along_axis(x_tip, x_base, x)
return (d <= t * r) and (0 <= t <= 1)
def wam_collision_geom_names():
return [
"base_link_convex_geom",
"shoulder_link_convex_decomposition_p1_geom",
"shoulder_link_convex_decomposition_p2_geom",
"shoulder_link_convex_decomposition_p3_geom",
"shoulder_pitch_link_convex_geom",
"upper_arm_link_convex_decomposition_p1_geom",
"upper_arm_link_convex_decomposition_p2_geom",
"elbow_link_convex_geom",
"elbow_link_cylinder_geom",
]
def ball_in_cup_xml_path(type_: BicType):
data_dir = Path(__file__).parent.resolve()
asset_root = data_dir / "robot_descriptions" / "wam"
if type_ == BicType.cone:
path_ = asset_root / "ball_in_cup_cone.xml"
elif type_ == BicType.cylinder:
path_ = asset_root / "ball_in_cup_cylinder.xml"
else:
raise ValueError("Cannot load ball-in-cup xml. " f"Unknown type {type_}")
assert path_.is_file()
return path_
class BallInCupSimTrace(object):
"""A data class for keeping track off relevant states during the
simulation."""
ball_positions: np.ndarray
goal_positions: np.ndarray
goal_final_positions: np.ndarray
cup_center_top_positions: np.ndarray
cup_center_bottom_positions: np.ndarray
joint_positions: np.ndarray
joint_velocities: np.ndarray
torques: np.ndarray
error: bool
constraint_violation: bool
n_not_executed_steps: int
n_cool_down_steps: int
type_: str
def __init__(self, n_steps: int, type_: BicType):
"""Initialize the trace and all its underlying buffers
to hold data for the specified number of steps."""
self.ball_positions = np.zeros((n_steps, 3))
self.goal_positions = np.zeros((n_steps, 3))
self.goal_final_positions = np.zeros((n_steps, 3))
self.cup_center_top_positions = np.zeros((n_steps, 3))
self.cup_center_bottom_positions = np.zeros((n_steps, 3))
self.joint_positions = np.zeros((n_steps, 4))
self.joint_velocities = np.zeros((n_steps, 4))
self.torques = np.zeros((n_steps, 4))
self.error = False
self.constraint_violation = False
self.n_not_executed_steps = 0
self.n_cool_down_steps = 0
self.type_ = type_
def subset(self, last_index):
"""Return a copy of the trace containing all buffer values up
until the specified step index."""
other = BallInCupSimTrace(last_index, self.type_)
other.ball_positions = self.ball_positions[:last_index]
other.goal_positions = self.goal_positions[:last_index]
other.goal_final_positions = self.goal_final_positions[:last_index]
other.cup_center_top_positions = self.cup_center_top_positions[:last_index]
other.cup_center_bottom_positions = self.cup_center_bottom_positions[
:last_index
]
other.joint_positions = self.joint_positions[:last_index]
other.joint_velocities = self.joint_velocities[:last_index]
other.torques = self.torques[:last_index]
other.error = self.error
other.constraint_violation = self.constraint_violation
other.n_not_executed_steps = self.ball_positions.shape[0] - last_index
other.n_cool_down_steps = self.n_cool_down_steps
return other
def copy(self):
numpy_attrs_names = [
"ball_positions",
"goal_positions",
"goal_final_positions",
"cup_center_top_positions",
"cup_center_bottom_positions",
"joint_positions",
"joint_velocities",
"torques",
]
immutable_types_attrs_names = [
"error",
"constraint_violation",
"n_not_executed_steps",
"n_cool_down_steps",
"type_",
]
result = BallInCupSimTrace(self.ball_positions.shape[0], self.type_)
# call numpy copy on numpy arrays
for attr in numpy_attrs_names:
setattr(result, attr, np.copy(getattr(self, attr)))
# simply set attributes on immutable types
for attr in immutable_types_attrs_names:
setattr(result, attr, getattr(self, attr))
return result
class BallInCupSim:
"""MuJoCo wrapper that handles running ball-in-cup simulations."""
def __init__(self, type_=BicType.cylinder):
# load MuJoCo model
self.type_ = type_
self.xml_path = ball_in_cup_xml_path(type_)
self.model = mujoco_py.load_model_from_path(str(self.xml_path))
self.n_sub_steps = 4
self.sim = self._init_sim()
# time steps (note: mujoco-py does nsubsteps MuJoCo steps at each call to step;
# this produces an effectively larger step when viewed from mujoco-py)
self.dt = self.sim.model.opt.timestep
self.effective_dt = self.dt * self.n_sub_steps
# store some common handles
self.ball_id = self.sim.model.body_name2id("ball")
self.goal_id = self.sim.model.site_name2id("cup_goal")
self.goal_final_id = self.sim.model.site_name2id("cup_goal_final")
self.cup_center_top_id = self.sim.model.site_name2id("cup_center_top")
self.cup_center_bottom_id = self.sim.model.site_name2id("cup_center_bottom")
self.ball_col_id = self.sim.model.geom_name2id("ball_geom")
self.robot_col_ids = [
self.sim.model.geom_name2id(name) for name in wam_collision_geom_names()
]
# PD controller parameters
self.p_gains = np.array([200.0, 300.0, 100.0, 100.0])
self.d_gains = np.array([7, 15, 5, 2.5])
def _init_sim(self):
return mujoco_py.MjSim(self.model, nsubsteps=self.n_sub_steps)
def reset(self, q, recreate_sim=False):
"""Reset the simulation to the specified WAM joint states."""
if recreate_sim:
self.sim = self._init_sim()
sim_state = self.sim.get_state()
sim_state.qpos[:4] = q
sim_state.qpos[4:] = 0.0 # reset string
sim_state.qvel[:] = 0.0 # zero all velocities
self.sim.set_state(sim_state)
self.sim.forward()
def get_joint_states(self):
"""Retrieve WAM joint positions and velocities."""
return (self.sim.data.qpos[:4].copy(), self.sim.data.qvel[:4].copy())
def set_named_joint_pos(self, joint_name, pos):
"""Modifies the joint positions of the given named joint."""
index = self.sim.model.get_joint_qpos_addr(joint_name)
sim_state = self.sim.get_state()
if isinstance(index, int):
sim_state.qpos[index] = pos
else:
sim_state.qpos[index[0] : index[1]] = pos
self.sim.set_state(sim_state)
def get_task_state(self):
"""Get the state of the task defined by the tuple of ball, goal and
cup positions."""
return (
self.sim.data.body_xpos[self.ball_id],
self.sim.data.site_xpos[self.goal_id],
self.sim.data.site_xpos[self.goal_final_id],
self.sim.data.site_xpos[self.cup_center_bottom_id],
self.sim.data.site_xpos[self.cup_center_top_id],
)
def update_trace(self, idx_step, trace):
"""Write current joint states and tracked task states to the specified trace index."""
(
trace.ball_positions[idx_step],
trace.goal_positions[idx_step],
trace.goal_final_positions[idx_step],
trace.cup_center_bottom_positions[idx_step],
trace.cup_center_top_positions[idx_step],
) = self.get_task_state()
(
trace.joint_positions[idx_step],
trace.joint_velocities[idx_step],
) = self.get_joint_states()
def render_camera_img(self, frame_size):
"""Render a default camera view of the scene."""
return np.flipud(
self.sim.render(
width=frame_size[0],
height=frame_size[1],
camera_name="side",
depth=False,
)
).copy(order="C")
def check_ball_robot_collision(self):
"""Test whether there's a collision between the ball and any
other part of the robot."""
for idx_contact in range(self.sim.data.ncon):
contact = self.sim.data.contact[idx_contact]
collision = (
contact.geom1 in self.robot_col_ids
and contact.geom2 == self.ball_col_id
)
collision_trans = (
contact.geom1 == self.ball_col_id
and contact.geom2 in self.robot_col_ids
)
if collision or collision_trans:
return True
return False
def stabilize_current_pos(self):
"""Runs the PD controller for multiple steps. The set-point is
the current joint state and zero velocities."""
q0 = self.get_joint_states()[0]
qd0 = np.zeros_like(q0)
for idx_step in range(500):
self.set_setpoint(q0, qd0)
self.step()
def execute_trajectory(
self,
qs,
qds,
stabilize_current_pos=True,
cool_down_final_pos=True,
verbose=False,
video_writer=None,
overwrite_jnt_states=None,
):
"""Execute a trajectory of desired WAM joint positions and
velocities using PD control."""
n_steps = qs.shape[0] + (350 if cool_down_final_pos else 0)
steps_per_video_frame = (
int((1 / self.effective_dt) / video_writer.fps)
if video_writer is not None
else 1
)
# if needed stabilize the current position before
# executing the trajectory
if stabilize_current_pos:
self.stabilize_current_pos()
# output buffers
trace = BallInCupSimTrace(n_steps, self.type_)
pbar = tqdm(
range(n_steps), desc="Simulating", unit="Steps", disable=(not verbose)
)
for idx_step in pbar:
# retrieve system states
self.update_trace(idx_step, trace)
# apply controls
idx_trj = min(qs.shape[0] - 1, idx_step)
trace.n_cool_down_steps = max(idx_step - idx_trj, 0)
qd_set = qds[idx_trj] if qds is not None else None
trace.torques[idx_step] = self.set_setpoint(qs[idx_trj], qd_set)
# overwrite requested joint states
if overwrite_jnt_states is not None:
for joint_name, states in overwrite_jnt_states.items():
self.set_named_joint_pos(
joint_name, states[min(states.shape[0] - 1, idx_trj)]
)
# advance simulation, in the case of an error return trace
# (e.g. divergent simulation) recorded so far
try:
self.step()
except mujoco_py.builder.MujocoException as ex:
trace.error = True
if verbose:
pbar.write(
"Simulation at {}s encountered and error ({}). "
"Stopping early.".format(idx_step * self.effective_dt, repr(ex))
)
return trace.subset(idx_step)
# also end the simulation if the ball hits the robot
if self.check_ball_robot_collision():
trace.constraint_violation = True
if verbose:
pbar.write(
"Simulation at {}s violated constraints (ball hit robot). "
"Stopping early.".format(idx_step * self.effective_dt)
)
return trace.subset(idx_step)
# render a video frame if needed and requested
if video_writer is not None and (idx_step % steps_per_video_frame == 0):
video_writer.write(self.render_camera_img(video_writer.frame_size))
return trace
def set_setpoint(self, q, qd):
"""Set joint torques using PD control. Note that if you don't
specify a desired velocity only the P-part of the controller
is active."""
q_current, qd_current = self.get_joint_states()
tau_d = np.zeros_like(q) if qd is None else self.d_gains * (qd - qd_current)
tau = self.p_gains * (q - q_current) + tau_d
self.set_torques(tau)
return tau
def set_torques(self, tau):
self.sim.data.qfrc_applied[:4] = tau
def step(self):
self.sim.step()
def replay(
self,
trace,
video_writer,
true_ball_positions=None,
show_trace_cup_position=False,
):
"""Render a video of the joint and ball positions in the trace. This does not
render the string position since it's not well-defined from the ball position.
Note that the input positions should be relative to the MuJoCo world frame.
"""
n_steps = trace.joint_positions.shape[0]
steps_per_video_frame = int((1 / self.effective_dt) / video_writer.fps)
ball_pred_q_index = self.sim.model.get_joint_qpos_addr("ball_pred_jnt")
ball_true_q_index = self.sim.model.get_joint_qpos_addr("ball_true_jnt")
cup_pred_q_index = self.sim.model.get_joint_qpos_addr("cup_pred_jnt")
identity_quaternion = np.array([1.0, 0.0, 0.0, 0.0])
for idx_step in range(n_steps):
sim_state = self.sim.get_state()
jnt_pos = trace.joint_positions[idx_step, ...]
jnt_pos = (
jnt_pos.detach().clone()
if not isinstance(jnt_pos, np.ndarray)
else jnt_pos.copy()
)
sim_state.qpos[:4] = jnt_pos
sim_state.qpos[4:] = 0.0 # reset string
ball_pos = trace.ball_positions[idx_step, ...]
ball_pos = (
ball_pos.detach().clone()
if not isinstance(ball_pos, np.ndarray)
else ball_pos.copy()
)
ball_pos_pred = np.concatenate((ball_pos, identity_quaternion))
sim_state.qpos[ball_pred_q_index[0] : ball_pred_q_index[1]] = ball_pos_pred
if true_ball_positions is not None:
ball_pos_true = true_ball_positions[idx_step, ...]
world_to_ball_true = np.concatenate(
(ball_pos_true, identity_quaternion)
)
sim_state.qpos[
ball_true_q_index[0] : ball_true_q_index[1]
] = world_to_ball_true
if show_trace_cup_position:
cup_pos_pred = trace.cup_center_bottom_positions[idx_step, ...]
world_to_cup_pred = np.concatenate((cup_pos_pred, identity_quaternion))
sim_state.qpos[
cup_pred_q_index[0] : cup_pred_q_index[1]
] = world_to_cup_pred
self.sim.set_state(sim_state)
self.sim.forward()
# render a video frame if needed and requested
if video_writer is not None and (idx_step % steps_per_video_frame == 0):
video_writer.write(self.render_camera_img(video_writer.frame_size))
def evaluate_trajectory(q0, trj, trj_kwargs, sim_init_kwargs):
if sim_init_kwargs is None:
sim_init_kwargs = {}
# init the simulation
sim = BallInCupSim(**sim_init_kwargs)
sim.reset(q0)
# simulate the trajectory and return the recorded trace
return sim.execute_trajectory(*trj, **trj_kwargs)
class BallInCupParallelTrajectoryEvaluator:
"""A class for handling the evaluation of multiple trajectories using
pythons multiprocessing tools."""
def __init__(self, q0):
self.q0 = q0
def eval(self, trajectories, trajectory_exe_kwargs, sim_init_kwargs):
n_tasks = (
len(trajectories)
if isinstance(trajectories, list)
else trajectories.shape[0]
)
pool = multiprocessing.Pool(multiprocessing.cpu_count())
results = []
for idx_task in range(n_tasks):
results.append(
pool.apply_async(
func=evaluate_trajectory,
args=(
self.q0,
trajectories[idx_task],
trajectory_exe_kwargs,
sim_init_kwargs,
),
)
)
results = [r.get() for r in results]
pool.close()
return results
def _test_trajectory(dt, t_end):
ts = np.arange(int(t_end / dt)) * dt
# construct position trajectories
max_pos = 1.8
pos_mod = np.linspace(0.6, max_pos / 2, ts.shape[0])
freq_mod = np.linspace(0.3, 0.6, ts.shape[0])
qs = np.zeros((ts.shape[0], 4))
qs[:, 0] = pos_mod * (np.sin(ts * 2.0 * np.pi * freq_mod))
qs[:, 3] = 1.57
return qs, None
def compute_state_reward_dipole_potential(trace: BallInCupSimTrace, **kwargs):
"""Computes reward for ball positions based a potential function
similar to the magnetic scalar potential of a dipole placed at
the bottom of the cup."""
eps, beta = kwargs["dipole_eps"], kwargs["dipole_beta"]
min_weight = kwargs["min_weight"]
m = trace.cup_center_top_positions - trace.cup_center_bottom_positions
m /= np.linalg.norm(m, axis=-1)[..., None]
rm = trace.ball_positions - trace.cup_center_top_positions
pot_m = (rm * m).sum(-1) / (np.linalg.norm(rm, axis=-1) ** 2 + eps)
rl = trace.ball_positions - trace.cup_center_bottom_positions
pot_l = (rl * m).sum(-1) / (np.linalg.norm(rl, axis=-1) ** 2 + eps)
return np.exp(
min_weight * np.max(pot_m * beta) + (1.0 - min_weight) * (pot_l * beta)[-1]
)
def compute_state_reward_euclidean(trace, **kwargs):
"""Computes rewards for ball positions based on euclidean distance
to desired positions."""
min_weight = kwargs["min_weight"]
dists = np.linalg.norm(trace.goal_positions - trace.ball_positions, axis=-1)
dists_final = np.linalg.norm(
trace.goal_final_positions - trace.ball_positions, axis=-1
)
min_dist = min_weight * np.min(dists) + (1 - min_weight) * dists_final[-1]
return np.exp(-2.0 * min_dist)
def compute_state_reward(trace, type_, reward_kwargs):
"""Computes reward for the tasks states i.e. ball positions."""
reward_fns = {
"dipole_potential": compute_state_reward_dipole_potential,
"euclidean": compute_state_reward_euclidean,
}
return reward_fns[type_](trace, **reward_kwargs)
def compute_joint_velocity_penalty(trace):
"""Compute the unweighted penalty term for joint velocities."""
vel = trace.joint_velocities
if trace.n_not_executed_steps > 0:
# copy last torque as if it would have been applied until the end of the trajectory
# INSPECT: This is taken from https://github.com/psclklnk/self-paced-rl - check
# whether this is necessary.
not_executed_action = np.tile(vel[-1], trace.n_not_executed_steps).reshape(
trace.n_not_executed_steps, vel.shape[-1]
)
vel = np.concatenate((vel, not_executed_action))
return np.mean(np.sum(np.square(vel), axis=1), axis=0)
def compute_joint_position_penalty(trace):
"""Compute the unweighted penalty term for joint positions."""
pos_offset = trace.joint_positions - trace.joint_positions[0]
return np.mean((pos_offset * pos_offset).sum(-1))
def compute_ball_velocity_penalty(trace):
"""Compute the unweighted penalty term for ball velocities."""
dt = 1 / 500.0 # TODO: Don't hard-code this.
velocities = np.diff(trace.ball_positions, axis=0) / dt
return np.mean((velocities * velocities).sum(-1))
def compute_success(trace, cup_inner_radius):
"""Compute whether the last ball position is inside the shape formed
by the inner sides of the cup."""
if trace.type_ == "cylinder":
return cylinder_contains(
trace.cup_center_bottom_positions[-1],
trace.cup_center_top_positions[-1],
cup_inner_radius,
trace.ball_positions[-1],
)
elif trace.type_ == "cone":
return cone_contains(
trace.cup_center_bottom_positions[-1],
trace.cup_center_top_positions[-1],
cup_inner_radius,
trace.ball_positions[-1],
)
else:
# if the cup shape is unknown threshold the distance to the bottom of the cup
ball_to_goal = np.linalg.norm(
trace.cup_center_bottom_positions[-1] - trace.ball_positions[-1]
)
return ball_to_goal < cup_inner_radius
@dataclass
class BallInCupRewardParams:
"""Parameters for the reward / success function of ball-in-cup."""
state_reward_type: str
joint_velocity_penalty_factor: float
joint_position_penalty_factor: float
ball_velocity_penalty_factor: float
cup_inner_radius: float
reward_dipole_eps: float
reward_dipole_beta: float
reward_min_weight: float
def state_reward_kwargs(self):
return {
"dipole_eps": self.reward_dipole_eps,
"dipole_beta": self.reward_dipole_beta,
"min_weight": self.reward_min_weight,
}
def as_dict(self):
return asdict(self)
def compute_reward(trace, params):
"""Compute reward and success flag for a single trace."""
state_reward = compute_state_reward(
trace, params.state_reward_type, params.state_reward_kwargs()
)
joint_pos_cost = (
params.joint_position_penalty_factor * compute_joint_position_penalty(trace)
)
joint_vel_cost = (
params.joint_velocity_penalty_factor * compute_joint_velocity_penalty(trace)
)
ball_vel_cost = params.ball_velocity_penalty_factor * compute_ball_velocity_penalty(
trace
)
reward = state_reward - joint_pos_cost - joint_vel_cost - ball_vel_cost
success = compute_success(trace, params.cup_inner_radius)
return reward, success
def compute_rewards(traces, params):
"""Compute rewards for a list of traces."""
rewards, success_flags = [], []
for trace in traces:
reward, success = compute_reward(trace, params)
rewards.append(reward)
success_flags.append(success)
return rewards, success_flags
if __name__ == "__main__":
t_end = 20
dt = BallInCupSim(BicType.cylinder).effective_dt
q0 = np.array([0.0, 0.0, 0.0, 1.5707])
# run multiple simulations in parallel
evaluator = BallInCupParallelTrajectoryEvaluator(q0)
trajectories = [_test_trajectory(dt, t_end) for _ in range(12)]
traces = evaluator.eval(
trajectories=trajectories,
trajectory_exe_kwargs=dict(
stabilize_current_pos=True, verbose=False, video_writer=None
),
sim_init_kwargs=dict(type_=BicType.cylinder),
)
# run a single simulation (optionally rendering a video)
# render_video = False
render_video = True
video_render_ctx = (
VideoRenderStream("ball-in-cup_TEMP.mp4", Path(__file__).parent.resolve())
if render_video
else NullContext()
)
with video_render_ctx as video_stream:
exe_params = dict(
stabilize_current_pos=True, verbose=False, video_writer=video_stream
)
trace = evaluate_trajectory(
q0=q0,
trj=_test_trajectory(dt, t_end),
trj_kwargs=exe_params,
sim_init_kwargs=dict(type_=BicType.cylinder),
)
print("Done.")
| JoeMWatson/monte-carlo-posterior-policy-iteration | policy_search/ball_in_a_cup.py | ball_in_a_cup.py | py | 25,185 | python | en | code | 4 | github-code | 50 |
8108110408 | from termcolor import colored
import json
import torch
from utils import *
import matplotlib.pyplot as plt
# load the data from interaction_history.json
def load_json(json_file : str) -> dict:
with open(json_file, "r") as f:
data = json.load(f)
return data
def get_colored_word(guessed_word : str, feedback : list) -> str:
"""
The word will have special characters inserted using the colored function,
from termcolor.
Arguments:
`feedback`: should be a list the same size(5) as `guessed_word` containing entries from
{-1, 0, 1}.
`guessed_word`: is a string of 5 characters, ideally the output from your model converted to a string.
The output from the model is converted to string using get_word() function
"""
colored_word = ""
for fb, ch in zip(feedback, guessed_word):
if fb == 1:
color_code = 'green'
elif fb == 0:
color_code = 'yellow'
elif fb == -1:
color_code = 'red'
else:
raise ValueError
colored_word += colored(ch, color_code)
return colored_word
def get_colored_turn(turns : dict) -> str:
"""
A turn is an attempted guess and the corresponding feedback recieved.
The coloring is done using get_colored_word() function.
This function is used to make it easier to find patterns in the AI's learning.
Arguments:
`turns`: A dictionary containing upto 6 attempts made by the model to predict the word.
Each attempt should have a value for 'feedback' and 'guessed_word' key.
Example turn:
'0' : {
'feedback': [1, 1, 0, -1, -1],
'guessed_word': hello
}
turns is a dict of such turns
"""
colored_turn = []
for turn_val in turns.values():
feedback = turn_val['feedback']
guessed_word = turn_val['guessed_word']
colored_word = get_colored_word(guessed_word, feedback)
colored_turn.append(colored_word)
return " => ".join(colored_turn)
def print_epoch_turns(one_epoch_interaction : dict) -> None:
"""
This function is used to print the interaction history from one epoch of the model.
One epoch contains all interactions of the model over the entire training dataset.
By printing these interactions, we can see how the model is doing over the dataset.
Arguments:
`one_epoch_interaction`: A dictionary containing the interaction history of the model
over the entire training dataset. For each word there should be at least 1 attempt,
with each attempt being a valid turn. See get_colored_turn() for example of valid turns.
Example one_epoch_interaction:
{
'hello': {
'0': {
'feedback': [1, 1, 0, -1, -1],
'guessed_word': heoyy,
},
'1': {
'feedback': [1, 1, 1, 1, 1],
'guessed_word': hello,
}
},
'goose': {
'0': {
'feedback': [1, 1, 0, -1, -1],
'guessed_word': goecx,
},
'1': {
'feedback': [1, 1, 1, 0, -1],
'guessed_word': gooex,
},
'2': {
'feedback': [1, 1, 1, 1, 1],
'guessed_word': goose,
}
}
}
"""
for correct_word, turns in one_epoch_interaction.items():
colored_turn = get_colored_turn(turns)
print(f"{correct_word} : {colored_turn}")
def print_word_over_epochs(interaction_history : dict, correct_word : str) -> None:
"""
Print the evolution of the AI's ability to predict the sequence of guesses for a given word.
Useful to see this relation evolve an check for over training.
Arguments:
`interaction_history`: A dictonary that is the output of running the train() function for a model on the dataset.
Usually these files are saved in the interactions sub-directory. An interacion_history is a dict with epoch number
as keys and the interactions over the entire dataset is the value for the keys.
`correct_word`: The word that we are tracking through different epochs.
"""
for epoch in range(100):
turns = interaction_history[str(epoch)][correct_word]
colored_turn = get_colored_turn(turns)
print(colored_turn)
def accuracy_on_output(one_epoch_interaction : dict) -> float:
"""
Find the accuracy from the output. This data can be skewed as the model is learning between different
interactions. Thus, this is not a good measure of accuracy. A better measure is defined below and in
metrics.py.
Arguments:
`one_epoch_ineraction`: Full definition is present in print_epoch_turns()
"""
acc = 0.
count = 0.
for correct_word, turns in one_epoch_interaction.items():
for turn in turns.values():
if turn['guessed_word'] == correct_word:
acc += 1
break
count += 1
return round(100. * acc / count, 3)
def accuracy_on_dataset(model_path : str, wordlist_path : str, dataset_name : str, k : int = 3) -> tuple:
"""
Given a model path, wordlist path, and the dataset name from {'train', 'test', 'val'},
finds the accuracy on the given dataset.
Arguments:
`model_path`: This needs to be the full path to the model. Usuallay models are stored in
the models/ subdirectory.
`wordlist_path`: This need to be the full path to the word list. Usually the word list is
stored in data/ subdirectory.
`dataset_name`: The default split on the loaded wordlist will be [0.8, 0.05, 0.15] for
{'train', 'val', 'test'}. The dataset_name specifies which dataset to use to find this accuracy.
`k`: The number of words to track in beam search. Increasing this number makes search slower.
Return:
`results`: A dict, storing the attempts that the model made for each word in the specified dataset.
`accuracy`: A float multiplied by 100 to give % of accuracy
"""
splits = [0.8, 0.05, 0]
dataset = get_dataset(wordlist_path)
datasets = get_split_dataset(dataset, splits)
mask_tree = get_mask_tree(wordlist_path)
model = torch.load(model_path)
model.eval()
acc, count = 0., 0.
results = {word : {} for word, label in datasets[dataset_name]}
i = 0
N = len(datasets[dataset_name])
for correct_word, label in datasets[dataset_name]:
i += 1
print(f"word: {correct_word} {i}/{N}", end='\r')
features = get_default_features()
for attempt in range(6):
output = model(features)
guessed_word = get_word_beam_search(output, mask_tree, k)
feedback = get_feedback(guessed_word, correct_word)
features = get_updated_features(features, feedback, guessed_word)
results[correct_word][attempt] = {
'feedback': feedback,
'guessed_word': guessed_word,
}
if guessed_word == correct_word:
acc += 1
break
count += 1
acc = round(100. * acc / count, 3) if count else 0
return results, acc
def get_in_vocab(interaction_results : dict, words_set : set) -> float:
"""
Finds the percentage of words from the model's output that were in the given vocab (words).
Arguments:
`interaction_results`: The interacion history of the model.
`words_set`: A set of words that represents the vocabulary for which we are checking overlap
"""
acc, count = 0., 0.
for turns in interaction_results.values():
for turn in turns.values():
guessed_word = turn['guessed_word']
if guessed_word in words_set:
acc += 1
count += 1
return round(100 * acc / count, 4) if count else 0
def show_guess_distribution(results : dict):
"""
Finds the number of attempts taken to guess each word.
Then creates a distriubtion to display the number of guesses taken
Parameters:
`results`: A dictionary which has the attempt history for each word.
This comes as an output from accuracy_on_dataset()
"""
guess_count = { i : 0 for i in range(1, 8) }
for correct_word, attempts in results.items():
if len(attempts) == 6:
if correct_word == attempts[5]['guessed_word']:
count_attempts = 6
else:
count_attempts = 7
else:
count_attempts = len(attempts)
guess_count[count_attempts] += 1
_sum = 0.
total_count = 0.
for guess, count in guess_count.items():
if guess == 7:
continue
_sum += count * guess
total_count += count
avg = round(_sum / total_count, 3)
print(guess_count)
labels = [str(i) for i in guess_count.keys()]
labels[-1] = "Could Not Guess"
bars = list(guess_count.values())
plt.bar(labels, bars, color="green")
plt.title(f"Average Score : {avg}")
plt.show()
plt.close()
def k_variation_beam_search(model_name : str) -> None:
print(model_name)
ks = [1, 3, 5, 10]
results, acc = {}, {}
for k in ks:
results[k], acc[k] = accuracy_on_dataset(model_name, "data/official.txt", "train")
for k in ks:
print(f"Accuracy for k = {k}: {acc[k]}%")
show_guess_distribution(results[k])
print("")
def print_model_statistics(model_name : str) -> None:
"""
This is used to quickly see the statistics like interaction history and accuracy on different
datasets for a given model. The usual split is [0.8, 0.05, 0.15] over the official list of
words.
Arguments:
`model_name`: Needs to be the full path to the model file. Usually under models/ subdirectory.
"""
print(model_name)
results, acc, in_vocab = {}, {}, {}
results['train'], acc['train'] = accuracy_on_dataset(model_name, "data/official.txt", "train")
results['val'], acc['val'] = accuracy_on_dataset(model_name, "data/official.txt", "val")
results['test'], acc['test'] = accuracy_on_dataset(model_name, "data/official.txt", "test")
print(f"Train accuracy: {acc['train']}%")
print(f"validation accuracy: {acc['val']}%")
print(f"Test accuracy: {acc['test']}%")
word_set = get_wordset("data/official.txt")
in_vocab['train'] = get_in_vocab(results['train'], word_set)
in_vocab['val'] = get_in_vocab(results['val'], word_set)
in_vocab['test'] = get_in_vocab(results['test'], word_set)
print(f"Words guessed in vocab(train): {in_vocab['train']}%")
print(f"Words guessed in vocab(val): {in_vocab['val']}%")
print(f"Words guessed in vocab(test): {in_vocab['test']}%")
# show_guess_distribution(results['train'])
print("")
if __name__ == "__main__":
# print_model_statistics("models/15epoch_naive_train")
# print_model_statistics("models/100epoch_naive")
# print_model_statistics("models/200epoch_naive_train")
# print_model_statistics("models/15epoch_bigger_train")
# print_model_statistics("models/25epoch_bigger_train")
# print_model_statistics("models/25epoch_bigger_train_2")
# print_model_statistics("models/25epoch_bigger_train_3")
# print_model_statistics("models/25epoch_bigger_train_beam")
# print_model_statistics("models/25epoch_bigger_train_beam_2")
# print_model_statistics("models/25epoch_bigger_train_beam_3")
# print_model_statistics("models/100epoch_bigger_train_beam")
# print_model_statistics("models/100epoch_bigger_train_beam_2")
# print_model_statistics("models/100epoch_bigger_train_beam_3")
# print_model_statistics("models/100epoch_bigger_train_beam_4")
# print_model_statistics("models/25epoch_biggest_train_beam")
# print_model_statistics("models/25epoch_biggest_train_beam_2")
# k_variation_beam_search("models/100epoch_bigger_train_beam_4")
# print_model_statistics("models/25epoch_bigger_train_beam_k1")
# print_model_statistics("models/25epoch_bigger_train_beam_k3")
# print_model_statistics("models/25epoch_bigger_train_beam_k5")
# print_model_statistics("models/25epoch_bigger_train_beam_k10")
print_model_statistics("models/100epoch_bigger_train") | Tickloop/word-game | visualize.py | visualize.py | py | 12,670 | python | en | code | 0 | github-code | 50 |
34208284759 | import csv
import yaml
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from xlsxwriter import Workbook
from rectpack import newPacker, PackingBin
# Possible optimizing algorithims:
# https://github.com/secnot/rectpack/blob/master/rectpack/maxrects.py
# - MaxRects
# - MaxRectsBl
# - MaxRectsBssf <- Default
# - MaxRectsBaf <- Which I see as more aligning optimized
# - MaxRectsBlsf
# from rectpack.maxrects import MaxRectsBaf as algorithm
# https://github.com/secnot/rectpack/blob/master/rectpack/guillotine.py
# Guillotine algorithms GUILLOTINE-RECT-SPLIT, Selecting one
# Axis split, and one selection criteria.
# GuillotineBssfSas
# GuillotineBssfLas
# GuillotineBssfSlas
# GuillotineBssfLlas
# GuillotineBssfMaxas
# GuillotineBssfMinas
# GuillotineBlsfSas
# GuillotineBlsfLas
# GuillotineBlsfSlas
# GuillotineBlsfLlas
# GuillotineBlsfMaxas
# GuillotineBlsfMinas
# GuillotineBafSas
# GuillotineBafLas
# GuillotineBafSlas
# GuillotineBafLlas
# GuillotineBafMaxas
# GuillotineBafMinas
from rectpack.guillotine import GuillotineBssfSlas as algorithm
def read_file(file, rects={}, mul=1, extra_name='', equivalences={}):
assert mul > 0
with open(file, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';', quotechar='|')
for row in spamreader:
if row[3] in equivalences:
material = equivalences[row[3]]
else:
material = row[3]
if material not in rects:
rects[material] = {}
pre_name = extra_name + '%'
name = ', '.join([pre_name + a for a in row[5].split(', ')])
assert name not in rects[material]
rects[material][name] = {}
height = float(row[0])
rects[material][name]['height'] = height
width = float(row[1])
rects[material][name]['width'] = width
rects[material][name]['mul'] = mul
rects[material][name]['cantos'] = ['none', 'none', 'none', 'none']
for j in range(4):
canto = row[12 + j]
if canto in equivalences:
rects[material][name]['cantos'][j] = equivalences[canto]
else:
rects[material][name]['cantos'][j] = canto
if rects[material][name]['cantos'] != ['', '', '', '']:
assert width >= 120, name
assert height >= 120, name
return rects
def rect_pack(pieces, material, count=2):
ESPESOR_SIERRA = 5
BIN_SIZES = [(1830, 2600 // (2**j), (2**j)) for j in range(2)]
# PackingBin = Enum(["BNF", "BFF", "BBF", "Global"])
packer = newPacker(bin_algo=PackingBin.BBF, pack_algo=algorithm,
rotation=True)
if count is None:
loops = 1
else:
loops = count
for ll in range(loops):
for w, h, div in BIN_SIZES:
name = ''
if div == 1:
name = 'whole'
elif div == 2:
name = 'half'
elif div == 4:
name = 'quarter'
if count is None:
packer.add_bin(w, h, count=float("inf"),
bid=f'{material}_{name}_{ll}')
else:
packer.add_bin(w, h, count=1,
bid=f'{material}_{name}_{ll}')
for piece in pieces.keys():
for name in piece.split(', '):
for q in range(pieces[piece]['mul']):
w = pieces[piece]['width'] + ESPESOR_SIERRA
h = pieces[piece]['height'] + ESPESOR_SIERRA
if q > 1:
packer.add_rect(w, h, f'{q}_{name}')
else:
packer.add_rect(w, h, name)
packer.pack()
return packer
def analyse_packer(packer):
bins = []
for rect in packer.rect_list():
b, _, _, _, _, _ = rect
bins.append(b)
return max(bins) + 1
def init_subplot(bins, offset, size, material):
fig.add_subplot(1, bins, offset + 1).title.set_text(material)
ax = plt.gca()
ax.plot(*size)
ax.add_patch(Rectangle((0, 0), *size,
edgecolor='blue',
facecolor='none',
linewidth=2,
))
return ax
def plot_packers(packers):
from PIL.ImageColor import colormap
from random import choice
exclude_colors = ['snow', 'lavender', 'lavenderblush', 'lightgrey',
'blanchedalmond', 'ghostwhite', 'mintcream', 'ivory',
'white']
while True:
rnd_color = choice(list(colormap.keys()))
if rnd_color not in exclude_colors:
break
print(rnd_color)
# Amount of total bins
bins = 0
for packer in packers:
bins += len(packer)
offset = 0
for packer in packers:
for abin in packer:
ax = init_subplot(bins, offset,
(abin.width, abin.height),
abin.bid)
offset += 1
for rect in abin:
x = rect.x
y = rect.y
w = rect.width
h = rect.height
ax.add_patch(Rectangle((x, y), w, h,
edgecolor='white',
facecolor=rnd_color,
linewidth=1))
def plot_packer(bins, offset, packer):
from PIL.ImageColor import colormap
from random import choice
exclude_colors = ['snow', 'lavender', 'lavenderblush', 'lightgrey',
'blanchedalmond', 'ghostwhite', 'mintcream', 'ivory',
'white']
last_b = 0
ax = init_subplot(bins, offset,
(packer[last_b].width, packer[last_b].height),
packer[last_b].bid)
offset_rt = offset
colors = {}
for rect in packer.rect_list():
b, x, y, w, h, name = rect
obj = name.split('%')[0]
if obj not in colors:
while True:
rnd_color = choice(list(colormap.keys()))
if rnd_color not in exclude_colors:
break
colors[obj] = rnd_color
if b > last_b:
offset_rt += 1
ax = init_subplot(bins, offset_rt,
(packer[b].width, packer[b].height),
packer[b].bid)
last_b = b
ax.add_patch(Rectangle((x, y), w, h,
edgecolor='white',
facecolor=colors[obj],
linewidth=1,
))
return offset_rt + 1
def write_excel(workbook, material, rects, cm):
worksheet = workbook.add_worksheet(material)
# Baumann like
header = ["Cantidad", "Altura", "Anchura", "ID", "Material", "Rota",
"canto_1", "canto_2", "canto_3", "canto_4", "Nombre"]
# write_header
row = 0
for col, item in enumerate(header):
worksheet.write(row, col, item)
row_offset = 1
col_offset = 0
for row, rect in enumerate(rects.items()):
name = rect[0]
data_dict = rect[1]
if cm:
height = int(data_dict['height']) / 10
width = int(data_dict['width']) / 10
else:
height = data_dict['height']
width = data_dict['width']
mul = data_dict['mul']
qty = mul * len(name.split(', '))
cantos = data_dict['cantos']
if cantos[0] == '' and cantos[1] != '':
aux = cantos[1]
cantos[1] = cantos[0]
cantos[0] = aux
if cantos[2] == '' and cantos[3] != '':
aux = cantos[3]
cantos[3] = cantos[2]
cantos[2] = aux
items_to_write = (qty, height, width, row, material, 1, *cantos, name)
for col, item in enumerate(items_to_write):
worksheet.write(row + row_offset, col + col_offset, item)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--file", type=str, default=None,
help="Input file.")
parser.add_argument("--qty", type=int, default=None,
help="Multiple.")
parser.add_argument("--yaml", type=str, default=None,
help="Yaml conf.")
parser.add_argument("--excel", action='store_true',
help="Export Excel file.")
parser.add_argument("--cm", action='store_true',
help="Use centimeters in exported Excel file.")
parser.add_argument("--by_obj", action='store_true',
help="Plot by objects in different colors.")
args = parser.parse_args()
if args.yaml is None:
assert args.file is not None
qty = args.qty or 1
rects = read_file(args.file, mul=qty)
else:
assert args.file is None
assert args.qty is None
with open(args.yaml, "r") as stream:
try:
yaml_conf = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
rects = {}
equivalences = {}
for conf in yaml_conf.keys():
element = yaml_conf[conf]
if conf == 'equivalences':
for k in element:
mat = list(k)[0]
assert mat not in equivalences
equivalences[mat] = k[mat]
else:
qty = 1
for k in element:
if 'path' in k:
file = k['path']
if 'qty' in k:
qty = k['qty']
rects = read_file(file, rects=rects, mul=qty,
extra_name=conf, equivalences=equivalences)
fig = plt.figure()
packers = []
bins = 0
# Get total of used bins to set the following subplots
# In addition, if asked, one excel is generated for each material
if args.excel:
workbook = Workbook('placas.xlsx')
for i, material in enumerate(rects.keys()):
packer = rect_pack(rects[material], material)
packers.append(packer)
bins += analyse_packer(packer)
if args.excel:
write_excel(workbook, material, rects[material], args.cm)
if args.excel:
workbook.close()
if args.by_obj:
offset = 0
for i, material in enumerate(rects.keys()):
offset = plot_packer(bins, offset, packers[i])
else:
plot_packers(packers)
plt.show()
| feede7/MonchoCut | monchocut.py | monchocut.py | py | 10,697 | python | en | code | 0 | github-code | 50 |
10142310011 | from django.urls import path
from .views import *
urlpatterns = [
path('', index, name="home"), # главному маршруту присвоили имя home
path('about/', about, name="about"),
path('post/<int:post_id>/', show_post, name="post"), # post_id прилетает из get_absolute_url
# при открытии страницы show_post
path('category/<int:cat_id>/', show_category, name="category")
] | IvanWeis/models.py | coolsite/women/urls.py | urls.py | py | 476 | python | ru | code | 0 | github-code | 50 |
20745423829 | import sys
import os
import Tkinter
import tkMessageBox
top=Tkinter.Tk()
def register():
os.system('python dataSetGenerator.py')
os.system('python extract_embeddings.py --dataset dataset --embeddings output/embeddings.pickle --detector face_detection_model --embedding-model openface_nn4.small2.v1.t7')
os.system('python train_model.py --embeddings output/embeddings.pickle --recognizer output/recognizer.pickle --le output/le.pickle')
def identify():
os.system('python recognize_video.py --detector face_detection_model --embedding-model openface_nn4.small2.v1.t7 --recognizer output/recognizer.pickle --le output/le.pickle')
B1=Tkinter.Button(top,text="Register",command= register)
B2=Tkinter.Button(top,text="Identify",command = identify)
B1.grid(row=0, column=0)
B2.grid(row=1, column=1)
top.mainloop()
| madamsetty-pavan/Attendance-Managagement-System-Using-Facial-Recognition | face_tkinter.py | face_tkinter.py | py | 817 | python | en | code | 0 | github-code | 50 |
38322166284 | from helpers import *
d = get_aoc_data(day=21)
swap_pos = Parser('swap position <int> with position <int>')
swap_let = Parser('swap letter <str> with letter <str>')
reverse_pos = Parser('reverse positions <int> through <int>')
rotate_left = Parser('rotate left <int> step<>')
rotate_right = Parser('rotate right <int> step<>')
move_pos = Parser('move position <int> to position <int>')
rotate_pos = Parser('rotate based on position of letter <>')
def solve(lines, unscrambled):
current = list(unscrambled)
for l in lines:
if swap_pos(l):
a, b = swap_pos
current[a], current[b] = current[b], current[a]
elif swap_let(l):
a, b = swap_let
a = current.index(a)
b = current.index(b)
current[a], current[b] = current[b], current[a]
elif reverse_pos(l):
a, b = reverse_pos
current[a:b + 1] = current[a:b + 1][::-1]
elif rotate_left(l):
shift, _ = rotate_left
current = current[shift:] + current[:shift]
elif rotate_right(l):
shift, _ = rotate_right
if shift:
current = current[-shift:] + current[:-shift]
elif move_pos(l):
a, b = move_pos
c = current.pop(a)
current[b:b] = [c]
elif rotate_pos(l):
a, = rotate_pos
idx = current.index(a)
current = current[-1:] + current[:-1]
if idx:
current = current[-idx:] + current[:-idx]
if idx >= 4:
current = current[-1:] + current[:-1]
else:
print('unknown command', l)
return ''.join(current)
def part1():
print('answer is', solve(d.lines, 'abcdefgh'))
def part2():
p = list(permutations('fbgdceah'))
print('Attempting to crack the password,', len(p), 'permutations')
for n, i in enumerate(p):
if not n % 1000:
print('\rtried {:.2f} %... '.format(n / len(p) * 100), end='')
if solve(d.lines, i) == 'fbgdceah':
print('\rtried {:.2f} %... '.format(n / len(p) * 100), end='')
print(' CRACKED!!!\nAnswer is ', ''.join(i))
break
| ztane/adventofcode | days/day21.py | day21.py | py | 2,230 | python | en | code | 1 | github-code | 50 |
18134125157 | import ops1d as ops
#hyperparameter processing
from operator import itemgetter
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_size, output_size, hyperparameters):
super(Model,self).__init__()
self.hyperparameters = hyperparameters
self.channels = hyperparameters["channels"]
self.cells = nn.ModuleList()
self.build_cells(hyperparameters)
self.in_conv = ops.StdConv(input_size[0], self.channels)
self.gap = ops.AdaAvgPool()
self.fc = nn.Linear(self.channels, output_size)
def _build_dict(self,parameters : dict, keyword : str):
_dictionary = dict()
keyword_length = len(keyword)
id_index = keyword_length + 1
for parameter in parameters:
if parameter[:keyword_length] == keyword:
cell_id = int(parameter[id_index])
operation_key = parameter[id_index + 2 : ]
operation_value = parameters[ parameter ]
if cell_id in _dictionary.keys():
_dictionary[ cell_id ][ operation_key ] = operation_value
else: #if dictionary doesnt exist, make it
_dictionary[ cell_id ] = { operation_key : operation_value }
return _dictionary
def build_cells(self, parameters):
cell_dictionary = self._build_dict(parameters, "cell")
for i in cell_dictionary:
self.cells.append(Cell(cell_dictionary[i],self.channels))
def forward(self,x):
x = self.in_conv(x)
for i in self.cells:
x = i(x)
#print("Size of x after cells: ", x.size())
x = self.gap(x)
#print("Size of x after gap: ", x.size())
#print("Size of dense input: ", self.channels)
x = self.fc(x.squeeze())
return x
class Ops(nn.Module):
def __init__(self, parameters, channels):
super(Ops,self).__init__()
self.args = {}
self.channels = channels
self.multicompute = False
self.input = []
for i in parameters:
if i == "type":
self.operation = self.get_operation(parameters[i])
elif i[:-2] == "input":
if parameters[i] not in self.input:
self.input.append(parameters[i])
else:
self.args[i] = parameters[i]
self.compute = nn.ModuleList()
if len(self.input) > 1:
self.compute.append(ops.StdAdd())
self.multicompute = True
self.compute.append(self.operation(**self.args))
def get_required(self) -> list:
return self.input
def get_operation(self, op_key):
if op_key == "StdConv":
operation = ops.StdConv
self.args["C_in"] = self.args["C_out"] = self.channels
elif op_key == "Conv3":
operation = ops.ConvBranch
self.args["kernel_size"] = 3
self.args["stride"] = 1
self.args["padding"] = 0
self.args["separable"] = False
self.args["C_in"] = self.args["C_out"] = self.channels
elif op_key == "Conv5":
operation = ops.ConvBranch
self.args["kernel_size"] = 5
self.args["stride"] = 1
self.args["padding"] = 0
self.args["separable"] = False
self.args["C_in"] = self.args["C_out"] = self.channels
elif op_key == "MaxPool":
operation = ops.Pool
self.args["pool_type"] = "max"
self.args["kernel_size"] = 3
elif op_key == "AvgPool":
operation = ops.Pool
self.args["pool_type"] = "avg"
self.args["kernel_size"] = 3
elif op_key == "FactorizedReduce":
operation = ops.FactorizedReduce
self.args["C_in"] = self.args["C_out"] = self.channels
elif op_key == "":
operation = ops.StdConv
return operation
def forward( self, x ):
for count,i in enumerate(self.compute):
if self.multicompute and count == 0:
x = i(*x)
else:
x = i(x)
return x
def process( self, x : list):
return self.forward(itemgetter( *self.get_required() )( x ))
class Cell(nn.Module):
"""
Contains a series of operations and information links
"""
def __init__(self,parameters,channels):
super(Cell, self).__init__()
self.ops_id = [] #numerical identifier for each operation
self.ops = []
self.channels = channels
self.inputs = []
self.output_operation = parameters["num_ops"]
self.build_ops(parameters)
self.compute_order = nn.ModuleList()
self.compute_order.extend(self.calculate_compute_order())
def _build_dict(self,parameters : dict, keyword : str):
_dictionary = dict()
keyword_length = len(keyword)
id_index = keyword_length + 2
for parameter in parameters:
if parameter[:keyword_length] == keyword:
cell_id = int(parameter[id_index])
operation_key = parameter[id_index + 2 : ]
operation_value = parameters[ parameter ]
if cell_id in _dictionary.keys():
_dictionary[ cell_id ][ operation_key ] = operation_value
else: #if dictionary doesnt exist, make it
_dictionary[ cell_id ] = { operation_key : operation_value }
return _dictionary
def build_ops(self, parameters):
ops_dictionary = self._build_dict(parameters, "op")
for i in ops_dictionary:
self.ops.append(Ops(ops_dictionary[i], self.channels))
self.ops_id.append(i)
def calculate_compute_order(self):
#
compute_order = []
#zero is the cell inputs, the first operation is op 1
current_ops_done = [0]
while len(compute_order) < len(self.ops):
for i in self.ops:
if set(i.get_required()).issubset(current_ops_done):
compute_order.append(i)
current_ops_done.append(self.ops_id[self.ops.index(i)])
return compute_order
def forward(self, x):
outputs = [x]
for op in self.compute_order:
outputs.append(op.process(outputs))
return outputs[self.output_operation]
| Snaags/NAS | model_constructor.py | model_constructor.py | py | 5,819 | python | en | code | 0 | github-code | 50 |
74784975195 | #!/usr/bin/python
from json import dumps
from time import sleep
from requests import get, post, exceptions
from .base import get_num_pages
class RundeckApi(object):
'''
This class provides multiple functions to manage projects, jobs or executions from
Rundeck.
To do it so, it uses Rundeck API endpoints, but also a connector to Rundeck database
and complements deleting executions' data from workflow tables.
'''
def __init__(self, url, headers, db_conn, log, chunk_size=200, keep_time='30d', ssl=False, search_time=60, del_time=300):
'''Initialization of global variables'''
self._url = url
self._headers = headers
self._db = db_conn
self._log = log
self._chunk_size = chunk_size
self._keep_time = keep_time
self._check_ssl = ssl
self._search_time = search_time
self._del_time = del_time
def __get(self, endpoint, parameters=''):
'''GET requests in Rundeck API endpoints'''
status = False
data = ''
if not endpoint:
status = False
data = 'No valid endpoint.'
return status, data
try:
response = get(endpoint, params=parameters, headers=self._headers,
verify=self._check_ssl, timeout=self._search_time)
if response.ok:
status = True
data = response
else:
status = False
data = 'Failing accessing API endpoint with http code: {0}'.format(
response.status_code)
except exceptions.RequestException as exception:
data = exception
return status, data
def __post(self, endpoint, parameters=''):
'''POST requests in Rundeck API endpoints'''
status = False
data = ''
if not endpoint:
status = False
data = 'No valid endpoint.'
return status, data
try:
response = post(endpoint, data=parameters, headers=self._headers,
verify=self._check_ssl, timeout=self._del_time)
if response.ok:
status = True
data = response
else:
status = False
data = 'Failing accessing API endpoint with http code: {0}'.format(
response.status_code)
except exceptions.RequestException as exception:
data = exception
return status, data
def __delete_executions_data(self, identifier, executions, page, retries=5, backoff=5, unoptimized=False):
'''Private function to delete both executions and workflows'''
n_retries = 0
interval = [page * self._chunk_size, page * self._chunk_size + len(executions)]
msg = '[{0}]: Deleting range {1} to {2}'.format(identifier, interval[0], interval[1])
self._log.write(msg)
for _ in range(0, retries):
n_retries += 1
workflows, steps, err_wf = self.get_workflow_ids(executions)
if not workflows or not steps:
return False, err_wf
msg = '[{0}] Removing following executions -> {1}'.format(identifier, executions)
self._log.write(msg, 1)
msg = '[{0}] Removing following workflows -> {1}'.format(identifier, workflows)
self._log.write(msg, 1)
msg = '[{0}] Removing following workflow steps -> {1}'.format(identifier, steps)
self._log.write(msg, 1)
status_exec, _ = self.delete_executions(executions)
status_wf, _ = self.delete_workflows(workflows, steps, unoptimized)
if status_exec and status_wf:
break
elif not (status_exec or status_wf) and n_retries <= retries:
sleep(backoff)
msg = '[{0}] #{1} try not succeeded. Trying again in {2} seconds.'.format(identifier, retries, backoff)
self._log.write(msg, 1)
continue
else:
msg = '[{0}]: Error deleting executions.'.format(identifier)
return False, msg
return True, ''
def parse_json_response(self, response, filter_by='', appender=''):
'''...'''
parsed_response = []
try:
res = response.json()
except ValueError:
return False
res = res[filter_by] if filter_by else res
if isinstance(res, list):
for data in res:
if appender:
parsed_response.append(data[appender])
else:
parsed_response.append(data)
else:
parsed_response = res[appender] if appender else res
return parsed_response
def get_projects(self, only_names=True):
'''Retrieve info about existing projects'''
endpoint = '{0}/projects'.format(self._url)
status = False
data = ''
status, response = self.__get(endpoint)
if only_names and status:
status = True
data = self.parse_json_response(response, None, 'name')
elif status and not only_names:
status = True
data = self.parse_json_response(response)
else:
status = False
data = response
if not data:
data = 'Error parsing JSON response.'
return status, data
def get_jobs_by_project(self, project_name, only_ids=True):
'''Retrieve info about all jobs by project'''
endpoint = '{0}/project/{1}/jobs'.format(self._url, project_name)
status = False
data = ''
status, response = self.__get(endpoint)
if only_ids and status:
status = True
data = self.parse_json_response(response, None, 'id')
elif status and not only_ids:
status = True
data = self.parse_json_response(response)
else:
status = False
data = response
if not data:
data = 'Error parsing JSON response.'
return status, data
def get_executions(self, identifier, page, jobs=True, only_ids=True, running=False):
'''Get executions older than a given number of days by job or project'''
status = False
search_type = 'job' if jobs else 'project'
endpoint = '{0}/{1}/{2}/executions'.format(
self._url, search_type, identifier)
if jobs:
parameters = {
'max': self._chunk_size,
'offset': page * self._chunk_size,
}
else:
parameters = {
'max': self._chunk_size,
'olderFilter': str(self._keep_time)
}
if running:
endpoint = '{0}/running'.format(endpoint)
status, response = self.__get(endpoint, parameters)
if only_ids and status:
status = True
data = self.parse_json_response(response, 'executions', 'id')
elif status and not only_ids:
status = True
data = self.parse_json_response(response, 'executions')
else:
status = False
data = response
if not data:
data = 'Error parsing JSON response.'
return status, data
def get_total_executions(self, identifier, jobs=True):
'''Get executions counter by project or job'''
status = False
search_type = 'job' if jobs else 'project'
endpoint = '{0}/{1}/{2}/executions'.format(
self._url, search_type, identifier)
parameters = {
'olderFilter': str(self._keep_time),
'max': 1
}
status, response = self.__get(endpoint, parameters)
if status:
status = True
data = self.parse_json_response(response, 'paging', 'total')
else:
status = False
data = response if response else 'Error parsing JSON response.'
return status, data
def get_workflow_ids(self, executions_ids):
'''Return IDs from workflow and related tables'''
self._db.open()
# Convert execution list to a comma-separated string
executions_ids = ','.join(map(str, executions_ids))
workflow_ids = ''
workflow_step_ids = ''
# Return workflow IDs
workflow_stmt = 'SELECT workflow_id FROM execution WHERE id IN ({0})'.format(
executions_ids)
query_res = self._db.query(workflow_stmt)
for workflow_id in query_res:
workflow_ids = '{0},{1}'.format(workflow_ids, str(workflow_id[0]))
workflow_ids = workflow_ids.strip(',')
# Return workflow step IDs
if workflow_ids:
workflow_step_stmt = 'SELECT workflow_step_id FROM workflow_workflow_step WHERE workflow_commands_id IN ({0})'.format(
workflow_ids)
query_res = self._db.query(workflow_step_stmt)
for workflow_step_id in query_res:
workflow_step_ids = '{0},{1}'.format(workflow_step_ids, str(workflow_step_id[0]))
workflow_step_ids = workflow_step_ids.strip(',')
self._db.close()
return workflow_ids, workflow_step_ids, ''
def delete_executions(self, executions_ids):
'''Bulk deletions of Rundeck executions'''
endpoint = '{0}/executions/delete'.format(self._url)
data = dumps(executions_ids)
status = False
msg = ''
status, response = self.__post(endpoint, data)
if status:
all_succeeded = self.parse_json_response(response, 'allsuccessful')
if all_succeeded:
status = True
else:
status = False
return status, msg
def delete_workflows(self, workflow_ids, workflow_step_ids, unoptimized=False):
'''Bulk deletions of Rundeck workflow tables'''
self._db.open()
if workflow_ids and unoptimized:
work_workflow_delete = 'DELETE FROM workflow_workflow_step WHERE workflow_commands_id IN ({0})'.format(
workflow_ids)
self._db.query(work_workflow_delete)
if workflow_step_ids:
workflow_step_delete = 'DELETE FROM workflow_step WHERE id IN ({0})'.format(
workflow_step_ids)
self._db.query(workflow_step_delete)
if workflow_ids:
workflow_delete = 'DELETE FROM workflow WHERE id IN ({0})'.format(
workflow_ids)
self._db.query(workflow_delete)
self._db.apply()
self._db.close()
return True, ''
def clean_project_executions(self, project, retries=5, backoff=5, unoptimized=False):
'''Clean executions older than a given time by from a project'''
status, total = self.get_total_executions(project, False)
pages = 0
if not status:
msg = "[{0}]: Error returning executions counter.".format(project)
return False, msg
else:
if total > 0:
msg = "[{0}]: There are {1} executions to delete.".format(project, total)
self._log.write(msg)
pages = get_num_pages(total, self._chunk_size)
msg = "Processing deleting in {0} cycles.".format(pages)
self._log.write(msg)
else:
msg = "[{0}]: No available executions for deleting.".format(project)
self._log.write(msg)
for page in range(0, pages):
status, executions = self.get_executions(project, page, False)
if status:
success, msg = self.__delete_executions_data(project, executions, page, retries, backoff, unoptimized)
if not success:
return False, msg
else:
msg = '[{0}]: Error getting executions.'.format(project)
return False, msg
return True, total
def clean_job_executions(self, job, retries=5, backoff=5, unoptimized=False):
'''...'''
status, total = self.get_total_executions(job)
pages = 0
if not status:
msg = "[{0}]: Error returning executions counter.".format(job)
return False, msg
else:
if total > 0:
msg = "[{0}]: There are {1} executions to delete.".format(job, total)
self._log.write(msg)
pages = get_num_pages(total, self._chunk_size)
msg = "Processing deleting in {0} cycles.".format(pages)
self._log.write(msg)
else:
msg = "[{0}]: No available executions for deleting.".format(job)
self._log.write(msg)
for page in range(0, pages):
executions = self.get_executions(job, page)
if status:
success, msg = self.__delete_executions_data(job, executions, page, retries, backoff, unoptimized)
if not success:
return False, msg
else:
msg = '[{0}]: Error getting executions.'.format(job)
return False, msg
return True, total
def clean_executions(self, project=None, project_order=True, retries=5, backoff=5, unoptimized=False):
'''Clean all executions data older than a given time'''
stats_total = 0
project_filter = True if project else False
if project_filter:
projects = project
else:
status, projects = self.get_projects()
if not status:
return status, projects
for proj in projects:
if not project_filter or proj == project:
if project_order:
status, data = self.clean_project_executions(proj, retries, backoff, unoptimized)
else:
status, jobs = self.get_jobs_by_project(project)
if status:
for job in jobs:
status, data = self.clean_job_executions(job, retries, backoff, unoptimized)
if not status:
self._log.write(data, 4)
return False
else:
msg = '[{0}] statistics: {1} old executions deleted.'.format(proj, int(data))
self._log.write(msg)
stats_total += int(data)
msg = 'Global statistics: {0} old executions deleted.'.format(stats_total)
self._log.write(msg)
return True, ''
def list_executions(self, project=None, job=None, only_running=False):
'''List executions by job/project'''
status = True
filter_job = True if job else False
if not filter_job:
if project:
data = project
else:
status, data = self.get_projects()
else:
data = job
if not status:
return False, data
for row in data:
status, executions = self.get_executions(row, 0, False, False, only_running)
if not status:
err_msg = '[{0}] Error getting executions.'.format(row)
return False, err_msg
for ex in executions:
if filter_job and ex['job']['name'] == job:
msg = '[{0}] - \'{1}\' {2}'.format(ex['project'], ex['job']['name'], ex['status'])
self._log.write(msg)
elif not filter_job:
msg = '[{0}] - \'{1}\' is {2}'.format(ex['project'], ex['job']['name'], ex['status'])
self._log.write(msg)
return True, ''
| hugomcfonseca/rundeck-executions-management | app/modules/rundeck.py | rundeck.py | py | 15,908 | python | en | code | 6 | github-code | 50 |
23232587106 | import glob
import pdb
import os
from scipy.misc import imread
import matplotlib.pyplot as plt
import numpy as np
from huMoments import huMoments
from computeMHI import computeMHI
def normalized_euclidean(testMoments, trainMoments, variance, i):
multiply = np.power(np.reshape(trainMoments[i, :], (-1, 1)) - testMoments, 2)
divide = np.divide(multiply, variance)
sum_up = np.sum(divide)
result = np.sqrt(sum_up)
return result
def showNearestMHI(testMoments, trainMoments, train_dir,K):
train_set_len = trainMoments.shape[0]
variance = np.nanvar(trainMoments, axis=0).reshape(-1, 1)
# variance=variance.reshape(20, 1)
distance = np.zeros((1, train_set_len))
testMoments = np.reshape(testMoments, (-1, 1))
# variance=variance.reshape(20, 1)
nearest_MHI=np.zeros((480,640,K+1))
for i in range(0, train_set_len):
distance[:,i] = normalized_euclidean(testMoments,trainMoments,variance,i)
sorted_index = np.argsort(distance)
#predictedAction = np.int(trainLabels[sorted_index][0])
#sorted directory
shape = train_dir.shape
sorted_directory=train_dir[sorted_index].reshape(shape)
#find k image
for i in range (0,K+1):
print("compute MHI for ", i ,"Nearest ")
nearest_MHI[:,:,i]= computeMHI(np.str(sorted_directory[i,:][0]))
return nearest_MHI
if __name__ == "__main__":
# GET ALLMHI
basedir = './'
actions = ['botharms', 'crouch', 'leftarmup', 'punch', 'rightkick']
direct_list = []
row = 480
col = 640
K=4
for actionnum in range(len(actions)):
subdirname = basedir + actions[actionnum] + '/'
subdir = os.listdir(subdirname)
subdir = np.sort(subdir)
#print(subdir)
for dir in subdir:
#print(dir)
dir_sub = subdirname + dir
#print(dir_sub)
tmp = np.array(dir_sub)
direct_list = np.r_[direct_list, tmp]
direct_list=np.asarray(direct_list[1:]).reshape(-1,1)
#choose TRAIN
trainMoments= np.load('huVectors.npy')
trainMoments=np.array(trainMoments)
# choose TEST
testMoments = np.load('botharms-up-p1-1_Vector.npy', allow_pickle=True)
Near_MHI = showNearestMHI(testMoments, trainMoments, direct_list, K)
# plot the original
figure=plt.figure(frameon=False)
figure.suptitle("Nearest MHI for botharms-up-p1-1")
for i in range(0, K + 1):
if i == 0:
im = Near_MHI[:, :, 0]
ax = plt.subplot(2, 3, i + 1)
ax.set_title("origin image")
ax.imshow(im, cmap='gray')
else:
im = Near_MHI[:, :, i]
ax = plt.subplot(2, 3, i + 1)
ax.set_title("Nearest Neighbor" + str(i))
ax.imshow(im, cmap='gray')
figure.savefig("Nearest Neibor for botharm-up-p1-1")
plt.show()
'''
# choose TEST
MHI=computeMHI('./crouch/crouch-p2-1')
testMoments = np.array(huMoments(MHI))
#testMoments = np.load('crouch-up-p1-1_Vector.npy', allow_pickle=True)
Near_MHI = showNearestMHI(testMoments, trainMoments, direct_list, K)
# plot the original
figure2 = plt.figure(frameon=False)
figure2.suptitle("Nearest Neighbor for crouch-up-p2-1")
#figure2.set_cmap('jet')
for i in range(0, K + 1):
if i == 0:
im = Near_MHI[:, :, 0]
ax = plt.subplot(2, 3, i + 1)
ax.set_title("origin image")
ax.imshow(im,cmap='gray')
else:
im = Near_MHI[:, :, i]
ax = plt.subplot(2, 3, i + 1)
ax.set_title("Nearest Neighbor" + str(i))
ax.imshow(im,cmap='gray' )
figure2.savefig("Nearest Neibor for crouch-p2-1")
plt.show()
'''
'''
# choose TEST
testMoments = np.load('leftarmup-p1-1_Vector.npy', allow_pickle=True)
Near_MHI = showNearestMHI(testMoments, trainMoments, direct_list, K)
# plot the original
figure = plt.figure(frameon=False)
figure.suptitle("Nearest MHI for leftarm-up-p1-1")
for i in range(0, K + 1):
if i == 0:
im = Near_MHI[:, :, 0]
ax = plt.subplot(2, 3, i + 1)
ax.set_title("origin image")
ax.imshow(im, cmap='gray')
else:
im = Near_MHI[:, :, i]
ax = plt.subplot(2, 3, i + 1)
ax.set_title("Nearest Neighbor" + str(i))
ax.imshow(im, cmap='gray')
figure.savefig("Nearest Neibor for leftarmup-p1-2")
plt.show()
'''
| HUILIHUANG413/CS6476-HM5 | showNearestMHIs.py | showNearestMHIs.py | py | 4,488 | python | en | code | 0 | github-code | 50 |
32122092802 | def addtition(*args):
result = 0
for x in args:
result += x
print(result)
addtition(10, 20, 30)
addtition(1, 2, 3, 4, 5)
def myFun(*argv):
for arg in argv:
print(arg)
myFun('Hello', 'Welcome', 'to', 'Python course')
def myFun(arg1, *argv):
print("First argument :", arg1)
for arg in argv:
print("Next argument through *argv :", arg)
myFun('Hello', 'Welcome', 'to', 'Python course')
| fnabiyevuz/Advanced-Foundations-of-Python-Programming-2022-Training | Module 3 : Args and Kwargs/args.py | args.py | py | 440 | python | en | code | 0 | github-code | 50 |
13034576408 | #Chris Kopacz
#Python Exercises from Github
#Level 2, question 6
#created: 24 June 2017
"""
Question 6
Level 2
Question:
Write a program that calculates and printes the value according to the given formula:
Q = sqrt[(2*C*D)/H]
Following are the fixed values of C and H:
C=50 H=30
D is the variable whose values chould be input to your program in a comma-
separated sequence.
Example:
Let us assume the following comma-separated input sequence is given to the program:
100,150,180
The output of the program should be:
18,22,24
"""
def aFunc(d):
c = 50
h = 30
result = int(((2*c*d)/h)**.5)
return result
#==========
def main():
result = ''
userIn = input('Enter integer values as a comma-separated string:\n')
userIn = userIn.split(',')
for iter in range(0,len(userIn)):
temp = aFunc(int(userIn[iter]))
result = result + str(temp) + ','
result = result.rstrip(',')
print(result)
if __name__ == "__main__":
main()
| chriskopacz/python_practice | Problems/lev2/lev2_q6.py | lev2_q6.py | py | 984 | python | en | code | 0 | github-code | 50 |
8520915239 | # GCode Speed conversion
# Adjusts for the gantry speed decrease on the curves
#
#F10 (10mm/s) is the standard line speed
LinSpeed = "F10\n"
#F75 (75mm/s) is the standard Arc speed
ArcSpeed = "F75\n"
# Reading File INPUT GCODE FILE HERE
file1 = open('sketch_w_offset_3_DXF_Relativepos.gcode', 'r')
Lines = file1.readlines()
output_Lines = []
# Checks lines for large linear (>0 before decimal) moves and does speed adjustment
for line in Lines:
if len(line) > 6:
if int(line[line.find('.')-1])>0 or int(line[line.find('.',10)-1])>0:
#Write to lines
output_Lines.append(LinSpeed + line + ArcSpeed)
else: output_Lines.append(line)
else: output_Lines.append(line)
# writing saved lines to file
file1 = open('adjusted_speed_output.gcode', 'w')
for line in output_Lines:
file1.write(line)
| strombolini/gcodespeedadj | Gcode_Speed_Adj.py | Gcode_Speed_Adj.py | py | 850 | python | en | code | 0 | github-code | 50 |
877648767 | # coding: utf-8
# more examples: https://github.com/python-telegram-bot/python-telegram-bot/blob/master/examples/README.md
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from assistant.assistant import Assistant
TG_TOKEN = "569776209:AAGlS4OT7jFw3oMtQ9781anRKwLtgCKKbNA"
assistant = None
def idle_main(bot, update):
message = update.message.text
nearest_clusters = assistant.get_nearest_clusters(message, 5)
for cluster in nearest_clusters:
bot.sendMessage(update.message.chat_id, text="Cluster {}: {}, confidence: {}\n\n".format(
cluster['index'], cluster['name'], cluster['confidence']))
nearest_questions = assistant.get_nearest_questions(message, 5)
for question in nearest_questions:
bot.sendMessage(update.message.chat_id, text="Question {},\nAnswer: {},\nConfidence: {}\n\n".format(
question['question'], question['answer'], question['confidence']))
def slash_start(bot, update):
bot.sendMessage(update.message.chat_id, text="Hi!")
def main():
updater = Updater(TG_TOKEN)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", slash_start), group=0)
dp.add_handler(MessageHandler(Filters.text, idle_main))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
assistant = Assistant()
main() | MelnikovAlmaz/bank_assistant | telegram_bot/telegram_bot.py | telegram_bot.py | py | 1,352 | python | en | code | 0 | github-code | 50 |
41371964573 | import xml.dom.minidom
def ParseNode(node):
if node.firstChild == node.lastChild:
return node.firstChild.nodeValue
else:
dct2 = dict()
for child in node.childNodes:
keyName = child.localName
if keyName is None:
continue
dct2[keyName] = ParseNode(child)
return dct2
dom1=xml.dom.minidom.parse('q:/tmp/2008_000002.xml')
root=dom1.documentElement
dct = dict()
for node in root.childNodes:
if node.nodeType == 3 and node.firstChild is None:
# 字符串节点
if node.localName is None:
# 缩进节点
continue
elif node.nodeType == 1:
# element节点
dct[node.localName] = ParseNode(node)
print(root.childNodes)
| nxp-mcuxpresso/FineDataset | xmltest.py | xmltest.py | py | 767 | python | en | code | 0 | github-code | 50 |
28116330407 | """
To do random stuff
@author José Antonio García-Díaz <joseantonio.garcia8@um.es>
@author Rafael Valencia-Garcia <valencia@um.es>
"""
import os
import sys
import config
import argparse
import pandas as pd
import numpy as np
import pickle
import re
from pathlib import Path
import transformers
import shap
from tqdm import tqdm
from dlsdatasets.DatasetResolver import DatasetResolver
from dlsmodels.ModelResolver import ModelResolver
from features.FeatureResolver import FeatureResolver
from utils.Parser import DefaultParser
import tensorflow
def main ():
tensorflow.compat.v1.disable_eager_execution ()
# var parser
parser = DefaultParser (description = 'To do random stuff')
# @var model_resolver ModelResolver
model_resolver = ModelResolver ()
# Add model
parser.add_argument ('--model',
dest = 'model',
default = model_resolver.get_default_choice (),
help = 'Select the family of algorithms to evaluate',
choices = model_resolver.get_choices ()
)
# @var choices List of list
choices = FeatureResolver.get_feature_combinations (['lf', 'se', 'be', 'we', 'ne', 'cf', 'bf', 'pr'])
# Add features
parser.add_argument ('--features',
dest = 'features',
default = 'all',
help = 'Select the family or features to evaluate',
choices = ['all'] + ['-'.join (choice) for choice in choices]
)
# @var args Get arguments
args = parser.parse_args ()
# @var dataset_resolver DatasetResolver
dataset_resolver = DatasetResolver ()
# @var dataset Dataset This is the custom dataset for evaluation purposes
dataset = dataset_resolver.get (args.dataset, args.corpus, args.task, False)
dataset.filename = dataset.get_working_dir (args.task, 'dataset.csv')
# @var df El dataframe original (en mi caso es el dataset.csv)
df = dataset.get ()
# @var model Model
model = model_resolver.get (args.model)
model.set_dataset (dataset)
model.is_merged (dataset.is_merged)
# @var task_type String
task_type = dataset.get_task_type ()
# @var feature_resolver FeatureResolver
feature_resolver = FeatureResolver (dataset)
# @var feature_combinations List
feature_combinations = get_feature_combinations (model.get_available_features ()) if args.features == 'all' else [args.features.split ('-')]
# Load all the available features
for features in feature_combinations:
# Indicate which features we are loading
print ("loading features...")
for feature_set in features:
# @var feature_file String
feature_file = feature_resolver.get_suggested_cache_file (feature_set, task_type)
# @var features_cache String The file where the features are stored
features_cache = dataset.get_working_dir (args.task, feature_file)
# If the feautures are not found, get the default one
if not Path (features_cache, cache_file = "").is_file ():
features_cache = dataset.get_working_dir (args.task, feature_set + '.csv')
# Indicate what features are loaded
print ("\t" + features_cache)
if not Path (features_cache).is_file ():
print ("skip...")
continue
# Set features
model.set_features (feature_set, feature_resolver.get (feature_set, cache_file = features_cache))
# @var feature_combination Tuple
feature_combination = model.get_feature_combinations ()
# @var feature_key String
feature_key = '-'.join (feature_combination)
# Predict this feature set
best_model = model.get_best_model (feature_key)
# @var train_df DataFrame Get training split
train_df = dataset.get_split (df, 'train')
# @var test_df DataFrame Get test split
test_df = dataset.get_split (df, 'test')
# @var
transformer = model.get_features ('lf')
features = pd.DataFrame (transformer.transform (df))
x_train = features[features.index.isin (train_df.index)].reindex (train_df.index)
x_test = features[features.index.isin (test_df.index)].reindex (test_df.index)
# @var background
background = dataset.get ()['label']
# explain the model on two sample inputs
explainer = shap.DeepExplainer (best_model, x_train)
# @var x_train_labels
x_train_labels = df.loc[x_train.index]['label']
# @var shap_values
shap_values = explainer.shap_values (x_train.head (200).values)
# @var data
data = shap.force_plot (explainer.expected_value[0], shap_values[0], x_test)
shap.save_html (dataset.get_working_dir (args.task, 'test.html'), data)
def f (X):
return best_model.predict (X)
explainer = shap.KernelExplainer (f, x_train.iloc[:50, :])
shap_values = explainer.shap_values (x_test.iloc[0].values, nsamples = 500)
data = shap.force_plot (explainer.expected_value, shap_values[1], x_train_labels.iloc[0])
shap.save_html (dataset.get_working_dir (args.task, 'test2.html'), data)
"""
data = shap.summary_plot (shap_values, x_test)
shap.save_html (dataset.get_working_dir (args.task, 'test2.html'), data)
"""
"""
vals = np.abs (shap_values).mean (0)
feature_importance = pd.DataFrame(list (zip (features.columns, sum (vals))), columns=['col_name','feature_importance_vals'])
feature_importance.sort_values (by = ['feature_importance_vals'], ascending = False, inplace = True)
print (feature_importance.head (10))
"""
sys.exit ()
# Clear session
model.clear_session ();
# @var feature_combinations List
feature_combinations = get_feature_combinations (model.get_available_features ()) if args.features == 'all' else [args.features.split ('-')]
# @var huggingface_model String
huggingface_model = dataset.get_working_dir (dataset.task, 'models', 'bert', 'bert-finetunning')
# load a transformers pipeline model
model = transformers.pipeline ('sentiment-analysis', model = huggingface_model, return_all_scores = True)
# explain the model on two sample inputs
explainer = shap.Explainer (model)
shap_values = explainer (df.head (10)['tweet_clean_lowercase'])
# visualize the first prediction's explanation for the POSITIVE output class
# Exception: In v0.20 force_plot now requires the base value as the first parameter!
# Try shap.force_plot(explainer.expected_value, shap_values) or
# for multi-output models try
# shap.force_plot(explainer.expected_value[0], shap_values[0]).
print (df.head (10)['tweet_clean_lowercase'])
# data = shap.force_plot (shap_values[0, :, 1])
# shap.save_html (dataset.get_working_dir (args.task, 'test.html'), data)
data = shap.plots.text (shap_values[0])
print (data)
shap.save_html (dataset.get_working_dir (args.task, 'test-2.html'), data)
if __name__ == "__main__":
main ()
| Smolky/LREV-Hope-Speech-Detection-in-Spanish-2022 | code/stuff.py | stuff.py | py | 7,635 | python | en | code | 2 | github-code | 50 |
5499351098 | from __future__ import print_function
import os
import re
import shutil
import sys
import ec2rlcore.constants
try:
import requests
except ImportError as ie: # pragma: no cover
print("ERROR:\tMissing Python module 'requests'.")
print("\tPlease install this module and rerun ec2rl")
sys.exit(1)
def get_distro():
"""
Return the running Linux distribution.
Returns:
distro (str): the detected Linux distribution
"""
distro = "unknown"
alami_regex = re.compile(r"^Amazon Linux AMI release \d{4}\.\d{2}")
alami2_regex = re.compile(r"^Amazon Linux (release \d \(Karoo\)|release \d.* \(\d{4}\.\d{2}\)|2)")
al2023_regex = re.compile(r"^Amazon Linux release 2023 \(Amazon Linux\)")
rhel_regex = re.compile(r"^Red Hat Enterprise Linux*")
# Amazon Linux & RHEL
if os.path.isfile("/etc/system-release"):
with open("/etc/system-release", "r") as fp:
# This file is a single line
distro_str = fp.readline()
if re.match(alami_regex, distro_str):
distro = ec2rlcore.constants.DISTRO_ALAMI
elif re.match(alami2_regex, distro_str):
distro = ec2rlcore.constants.DISTRO_ALAMI2
elif re.match(al2023_regex, distro_str):
distro = ec2rlcore.constants.DISTRO_AL2023
elif re.match(rhel_regex, distro_str) or \
re.match(r"^CentOS.*release (\d+)\.(\d+)", distro_str):
distro = ec2rlcore.constants.DISTRO_RHEL
else:
distro = "unknown for /etc/system-release"
# SUSE
# /etc/SuSE-release is deprecated
elif os.path.isfile("/etc/SuSE-release"):
with open("/etc/SuSE-release", "r") as fp:
# This file is a single line
distro_str = fp.readline()
regex = re.compile(r"^SUSE Linux Enterprise Server \d{2}")
if re.match(regex, distro_str):
distro = ec2rlcore.constants.DISTRO_SUSE
else:
distro = "unknown for /etc/SuSE-release"
elif os.path.isfile("/etc/SUSE-brand"):
with open("/etc/SUSE-brand", "r") as fp:
distro_str = fp.readline()
regex = re.compile(r"SLE")
if re.match(regex,distro_str):
distro = ec2rlcore.constants.DISTRO_SUSE
else:
distro = "unknown for /etc/SUSE-brand"
# Ubuntu
elif os.path.isfile("/etc/lsb-release"):
with open("/etc/lsb-release", "r") as fp:
# This file is many lines in length
lines = fp.readlines()
distro = "unknown for /etc/lsb-release"
for line in lines:
if re.match(r"DISTRIB_ID=Ubuntu", line):
distro = ec2rlcore.constants.DISTRO_UBUNTU
break
# Older Amazon Linux & RHEL
elif os.path.isfile("/etc/issue"):
with open("/etc/issue", "r") as fp:
distro_str = fp.readline()
if re.match(alami_regex, distro_str):
distro = ec2rlcore.constants.DISTRO_ALAMI
elif re.match(rhel_regex, distro_str) or re.match(r"^CentOS release \d\.\d+", distro_str):
distro = ec2rlcore.constants.DISTRO_RHEL
else:
distro = "unknown for /etc/issue"
# Amazon Linux & SUSE
# /etc/os-release will be replacing /etc/SuSE-release in the future
elif os.path.isfile("/etc/os-release"):
with open("/etc/os-release", "r") as fp:
lines = fp.readlines()
distro = "unknown for /etc/os-release"
for line in lines:
if re.match(r"^PRETTY_NAME=\"SUSE Linux Enterprise Server \d{2}", line):
distro = ec2rlcore.constants.DISTRO_SUSE
break
elif re.match(r"^PRETTY_NAME=\"Amazon Linux AMI \d{4}\.\d{2}", line):
distro = ec2rlcore.constants.DISTRO_ALAMI
break
return distro
def check_root():
"""Return whether the current user ID is 0 (root)."""
return os.getegid() == 0
def verify_metadata():
"""Return whether the system can access the EC2 meta data and user data."""
try:
resp = requests.get("http://169.254.169.254/latest/meta-data/instance-id").status_code
if resp == 200:
return True
elif resp == 401:
token = (
requests.put(
"http://169.254.169.254/latest/api/token",
headers={'X-aws-ec2-metadata-token-ttl-seconds': '21600'},
verify=False
)
).text
return requests.get("http://169.254.169.254/latest/meta-data/instance-id",
headers={'X-aws-ec2-metadata-token': token}).status_code == 200
else:
return False
except requests.exceptions.ConnectionError:
return False
def is_an_instance():
"""
Return whether the running system is an EC2 instance based on criteria in AWS EC2 documentation.
AWS EC2 documentation: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html
"""
sys_hypervisor_uuid = "/sys/hypervisor/uuid"
try:
if is_nitro():
return True
else:
with open(sys_hypervisor_uuid) as uuid_file:
if not uuid_file.readline().startswith("ec2"):
return False
resp = requests.get(
"http://169.254.169.254/latest/dynamic/instance-identity/document").status_code
if resp == 200:
return True
elif resp == 401:
token = (
requests.put(
"http://169.254.169.254/latest/api/token",
headers={
'X-aws-ec2-metadata-token-ttl-seconds': '21600'},
verify=False
)
).text
return requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document",
headers={'X-aws-ec2-metadata-token': token}).status_code == 200
else:
return False
except (IOError, OSError, requests.RequestException):
# Python2: IOError
# Python3: OSError -> FileNotFoundError
return False
# This is shutil.which() from Python 3.5.2
# Replicating it here allows ec2rl to utilize which() in both python 2 & 3
def which(cmd, mode=os.F_OK | os.X_OK, path=None): # pragma: no cover
"""
Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we"re given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for directory in path:
normdir = os.path.normcase(directory)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(directory, thefile)
if _access_check(name, mode):
return name
return None
def get_net_driver():
"""
Return the name of the driver for the first alphabetically ordered non-virtual network interface.
Returns:
driver_name (str): name of driver (e.g. ixgbevf)
"""
try:
net_device_list = list()
# Add all non-virtual interfaces to the list and sort it
for device in os.listdir("/sys/class/net"):
if "virtual" not in os.path.abspath(os.readlink("/sys/class/net/{}".format(device))):
net_device_list.append(device)
if len(net_device_list) > 0:
net_device_list = sorted(net_device_list)
# readlink returns a path (e.g. ../../../../module/xen_netfront) so split the string
# and return the last piece
driver_name = os.readlink("/sys/class/net/{}/device/driver/module".format(net_device_list[0]))\
.split("/")[-1]
# Return an error if the list is somehow empty (didn't find any network devices)
else:
driver_name = "Unknown"
# Catch OSError in Python2 and FileNotFoundError in Python3 (inherits from OSError)
except OSError:
driver_name = "Unknown"
return driver_name
def get_virt_type():
"""
Return the virtualization type as determined from the instance meta-data.
Returns:
profile (str): virtualization type (e.g. default-pv or devault-hvm)
"""
try:
# This path is only exposed on Nitro instance types.
if is_nitro():
profile = "nitro"
else:
profile_request = requests.get("http://169.254.169.254/latest/meta-data/profile")
if profile_request.status_code == 200:
profile = profile_request.text
elif profile_request.status_code == 401:
token=(
requests.put(
"http://169.254.169.254/latest/api/token",
headers={'X-aws-ec2-metadata-token-ttl-seconds': '21600'},
verify=False
)
).text
profile = requests.get(
"http://169.254.169.254/latest/meta-data/profile",
headers={'X-aws-ec2-metadata-token': token}
).text
else:
profile = "ERROR"
except requests.exceptions.ConnectionError:
raise PrediagConnectionError("Failed to connect to AWS EC2 metadata service.")
return profile
def is_nitro():
"""
Returns if the virtualization type is nitro as determined by /sys/devices/virtual/dmi/id/board_asset_tag.
Also returns true for bare metal instances as well, due to being part of the
nitro ecosystem, even though they technically do not have the nitro hypervisor.
"""
try:
nitro_asset = "/sys/devices/virtual/dmi/id/board_asset_tag"
with open(nitro_asset) as asset_file:
if asset_file.readline().startswith("i-"):
return True
else:
return False
except (IOError, OSError):
# Python2: IOError
# Python3: OSError -> FileNotFoundError
return False
def print_indent(str_arg, level=0):
"""Print str_arg indented two spaces per level."""
print("{}{}".format(level * " ", str_arg))
def backup(path_to_backup, backed_files, backup_dir):
"""
Given a path, file_path, copy it to backup_dir, update the backed_files dict, and return the path of the new
backup copy. If the path has already been backed up then return the existing backup path and exit immediately
without copying.
PrediagTargetPathExistsError is raised if the backup destination for a directory already exists. This check is a
pre-screen for shutil.copytreee which raises FileExistsError if the destination path exists.
This function is intended for use within Python-based remediation modules.
Parameters:
path_to_backup (str): path to the file to back up
backed_files (dict): original path of backed up files (keys) and
the path to the backup copy of the file (values)
backup_dir (str): path to the directory containing backup file copies
Returns:
backup_location_path (str): path to the backup copy of the file
"""
# If a backup copy of the file already exists, do not perform another, redundant backup operation.
if path_to_backup in backed_files:
return backed_files[path_to_backup]
backup_location_path = "".join([backup_dir, path_to_backup])
if os.path.exists(backup_location_path):
raise PrediagDestinationPathExistsError(backup_location_path)
if os.path.isdir(path_to_backup):
if not os.path.exists(backup_dir):
os.makedirs(backup_dir, mode=0o0700)
_do_backup_restore(source_path=path_to_backup,
source_path_is_dir=True,
destination_path=backup_location_path,
backed_files=backed_files)
elif os.path.isfile(path_to_backup):
backup_location_parent_path = "".join([backup_dir, os.path.dirname(path_to_backup)])
if not os.path.exists(backup_location_parent_path):
os.makedirs(backup_location_parent_path, mode=0o0700)
_do_backup_restore(source_path=path_to_backup,
source_path_is_dir=False,
destination_path=backup_location_path,
backed_files=backed_files)
else:
raise PrediagInvalidPathError(path_to_backup)
backed_files[path_to_backup] = backup_location_path
return backup_location_path
def restore(restoration_file_path, backed_files):
"""
Given a path a file to restore, restoration_file_path, lookup the location of the backup copy, and
copy the file to location.
This function is intended for use within Python-based remediation modules.
Parameters:
restoration_file_path (str): path to the file backup to be restored
backed_files (dict): names of backed up files (keys) and the path to the backup copy of the file (values)
Returns:
(bool): whether the operation was successful
"""
if restoration_file_path not in backed_files:
return False
backup_copy_path = backed_files[restoration_file_path]
if not os.path.exists(backup_copy_path):
raise PrediagInvalidPathError(backup_copy_path)
_do_backup_restore(source_path=backup_copy_path,
source_path_is_dir=os.path.isdir(backup_copy_path),
destination_path=restoration_file_path,
backed_files=backed_files)
return True
def _do_backup_restore(source_path, source_path_is_dir, destination_path, backed_files):
"""
Given a path a file to restore, source_path, lookup the location of the backup copy in the backup dict,
backed files, and copy the file to restoration location, destination_path. A regular file at destination_path
will be overwritten. shutil.copytree will not copy over an existing directory at destination_path and
FileExistsError will be raised.
Parameters:
source_path (str): file path to be copied
source_path_is_dir (bool): whether source_path is a directory
destination_path (str): where source_path should be copied to
backed_files (dict): names of backed up files (keys) and the path to the backup copy of the file (values)
Returns:
True (bool): if the operation was successful
"""
args_valid = True
bad_args = list()
if not source_path:
print("Invalid source_path arg!")
args_valid = False
bad_args.append("souce_path")
if not isinstance(source_path_is_dir, bool):
print("Invalid source_path_is_dir arg!")
args_valid = False
bad_args.append("source_path_is_dir")
if not destination_path:
print("Invalid destination_path arg!")
args_valid = False
bad_args.append("destination_path")
if not isinstance(backed_files, dict):
print("Invalid backed_files arg!")
args_valid = False
bad_args.append("backed_files")
if not args_valid:
raise PrediagArgumentError(bad_args)
if source_path_is_dir:
shutil.copytree(source_path, destination_path)
seen_paths = set()
for root, dirs, files in os.walk(destination_path, followlinks=True):
# Check if this root has already been visited (avoids symlink-induced infinite looping)
realroot = os.path.realpath(root)
os.chown(realroot, os.stat(source_path).st_uid, os.stat(source_path).st_gid)
seen_paths.add(realroot)
for file_name in files:
full_file_path = os.path.join(realroot, file_name)
real_file_path = os.path.realpath(full_file_path)
this_path_key = "{}{}".format(str(os.stat(real_file_path).st_dev), str(os.stat(real_file_path).st_ino))
if this_path_key in seen_paths and os.path.islink(full_file_path):
print_indent("Skipping previously seen symlink target: {} -> {}".format(full_file_path,
real_file_path),
level=1)
continue
else:
seen_paths.add(this_path_key)
original_stat = os.stat(os.path.join(source_path, file_name))
os.chown(full_file_path, original_stat.st_uid, original_stat.st_gid)
for dir_name in dirs:
full_dir_path = os.path.join(realroot, dir_name)
real_dir_path = os.path.realpath(full_dir_path)
this_path_key = "{}{}".format(str(os.stat(real_dir_path).st_dev), str(os.stat(real_dir_path).st_ino))
if this_path_key in seen_paths and os.path.islink(full_dir_path):
print_indent("Skipping previously seen symlink target: {} -> {}".format(full_dir_path,
real_dir_path),
level=1)
continue
else:
seen_paths.add(this_path_key)
original_stat = os.stat(os.path.join(source_path, dir_name))
os.chown(full_dir_path, original_stat.st_uid, original_stat.st_gid)
return True
else:
shutil.copy2(source_path, destination_path)
os.chown(destination_path, os.stat(source_path).st_uid, os.stat(source_path).st_gid)
return True
def get_config_dict(module_name):
"""
Create and return dictionary with all the necessary variables for module execution.
BACKUP_DIR: directory containing file backups. When run via ec2rl, this is a subdirectory inside LOG_DIR.
LOG_DIR: directory containing ec2rl logs else a default location if not running through ec2rl.
BACKED_FILES: dict containing "original file path":"back up file copy path" key:pair values.
REMEDIATE: controls whether remediation is to be attempted. The default is to only perform detection.
SUDO: whether the module is being executed as root/with sudo privileges.
DISTRO: the detected distribution of Linux running on the system.
NOT_AN_INSTANCE: True if running on anything but an EC2 instance.
Parameters:
module_name (str): name of the module requesting the configuration dict.
Returns:
sys_config_dict (dict): variable name and variable value pairs usable inside Python ec2rl modules.
"""
sys_config_dict = {"BACKUP_DIR": "/var/tmp/ec2rl_{}/backup".format(module_name),
"LOG_DIR": "/var/tmp/ec2rl_{}".format(module_name),
"BACKED_FILES": dict(),
"REMEDIATE": False,
"SUDO": check_root(),
"NOT_AN_INSTANCE": False}
try:
sys_config_dict["DISTRO"] = os.environ["EC2RL_DISTRO"]
except KeyError:
sys_config_dict["DISTRO"] = get_distro()
try:
if os.environ["notaninstance"] == "True":
sys_config_dict["NOT_AN_INSTANCE"] = True
except KeyError:
sys_config_dict["NOT_AN_INSTANCE"] = is_an_instance()
try:
if os.environ["remediate"] == "True":
sys_config_dict["REMEDIATE"] = True
except KeyError:
# Keep default of False
pass
try:
sys_config_dict["BACKUP_DIR"] = os.path.join(os.environ["EC2RL_GATHEREDDIR"], module_name)
except KeyError:
# Keep default
pass
try:
sys_config_dict["LOG_DIR"] = os.path.join(os.environ["EC2RL_LOGDIR"], module_name)
except KeyError:
# Keep default
pass
return sys_config_dict
class PrediagError(Exception):
"""Base class for exceptions in this module."""
pass
class PrediagConnectionError(PrediagError):
"""A Requests ConnectionError occurred."""
def __init__(self, error_message, *args):
message = "Connection error: {}".format(error_message)
super(PrediagConnectionError, self).__init__(message, *args)
class PrediagArgumentError(PrediagError):
"""One or more arguments were missing or invalid."""
def __init__(self, arg_list, *args):
message = "Missing or invalid args: {}!".format(", ".join(arg_list))
super(PrediagArgumentError, self).__init__(message, *args)
class PrediagDestinationPathExistsError(PrediagError):
"""Destination destination directory already exists."""
def __init__(self, path_str, *args):
message = "Backup copy path already exists: {}".format(path_str)
super(PrediagDestinationPathExistsError, self).__init__(message, *args)
class PrediagInvalidPathError(PrediagError):
"""The given path is not a file or directory."""
def __init__(self, path_str, *args):
message = "Invalid path. Not a file or directory: {}".format(path_str)
super(PrediagInvalidPathError, self).__init__(message, *args)
| awslabs/aws-ec2rescue-linux | ec2rlcore/prediag.py | prediag.py | py | 23,046 | python | en | code | 164 | github-code | 50 |
72138032796 | """ This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>."""
# #
from pvlib import location
from aiana.anti_bug_testing.tester_class import Tester
def run_sim_settings_test(**kwargs):
"""
The sim settings attributes 'study_name', 'sub_study_name' '
are changed during the test anyway.
NOTE for parallelization == 'None' also other settings are changed
(resolution, hours... to reduce sim duration)
**kwargs:
mode (optional): Defaults to 'test'
(which means 1. simulate 2. check and plot difference to reference)
alternative: 'create_reference'
open_oct_viewer (bool, optional): to check scene creation for each set.
Viewer needs to be closed manualy. Defaults to False.
default_settings (Settings, optional): Defaults to None (constructing
automatically based on setting files)
run_simulation (bool, optional): Defaults to True, can be set False,
if only viewing is of interest.
"""
testerObj = Tester(**kwargs)
testerObj.test_listItems_separately("spatial_resolution", [0.5, 0.2])
testerObj.test_listItems_separately("ground_albedo", [0.05, 0.5])
# #
#testerObj = Tester(run_simulation=False, open_oct_viewer=True)
testerObj.test_listItems_separately(
"hour_for_sceneInspection", [8, 10, 16])
# #
testerObj.test_listItems_separately("year", [2020])
testerObj.test_listItems_separately("month", [3])
testerObj.test_listItems_separately("day", [1])
# to reduce sim duration without GPU and to test hours and temporal
# resolution (time_step_in_minutes), change default settings, to which
# test_listItems_separately will fall back after each tested listItem:
testerObj.default_settings.sim.hours = [14]
testerObj.default_settings.sim.time_step_in_minutes = 60
testerObj.default_settings.sim.spatial_resolution = 0.2
testerObj.test_listItems_separately("parallelization", [
'None',
# 'multiCore', not working at the moment
# 'GPU' # (standard)
])
# #
# reset
testerObj = Tester(**kwargs)
# #
# testerObj = Tester(mode='create_reference')
testerObj.test_listItems_separately("aggregate_irradiance_perTimeOfDay", [
'False', 'over_the_week', 'over_the_month'
])
testerObj.test_listItems_separately("irradiance_aggfunc", ['min', 'max'])
testerObj.test_listItems_separately("rtraceAccuracy", [
'std', 'accurate', 'hq', 'good_no_interp', 'acc_no_interp'])
# #
#from aiana.anti_bug_testing.tester_class import Tester
#testerObj = Tester(mode='create_reference')
# to test and to shorten test weather data size
testerObj.settings.sim.date_range_to_calc_TMY = '2022-01-01/2022-12-31'
testerObj.settings.sim.apv_location = location.Location(
48.533, 9.717, altitude=750, tz='Europe/Berlin', name='Widderstall')
testerObj.test_with_current_settings_then_reset_to_DefaultSettings(
'location-Widderstall')
# #
# view settings
if __name__ == '__main__':
from aiana.anti_bug_testing.tester_class import Tester
testerObj = Tester(open_oct_viewer=True, run_simulation=False,
mode='create_reference')
testerObj.test_listItems_separately('accelerad_img_height', [400])
# #
testerObj.test_inverted_bool_settings(['use_acceleradRT_view'])
| IEK-5/aiana | aiana/anti_bug_testing/testing_of_sim_and_view_settings.py | testing_of_sim_and_view_settings.py | py | 3,982 | python | en | code | 5 | github-code | 50 |
26283692708 | import os
import secrets
import string
from datetime import timezone, timedelta, datetime
import openai
import requests
import sshtunnel
from cryptography.fernet import Fernet
from flask import abort, session
from app import config
def verify_session(session):
if "tokens" not in session:
abort(400)
return session["tokens"].get("access_token")
def fetch_user_data(access_token):
headers = {"Authorization": f"Bearer {access_token}"}
res = requests.get(config.ME_URL, headers=headers)
if res.status_code != 200:
abort(res.status_code)
return res.json()
def generate_state():
return "".join(
secrets.choice(string.ascii_uppercase + string.digits) for _ in range(16)
)
def prepare_auth_payload(state, scope, show_dialog=False):
payload = {
"client_id": config.CLIENT_ID,
"response_type": "code",
"redirect_uri": config.REDIRECT_URI,
"state": state,
"scope": scope,
}
if show_dialog:
payload["show_dialog"] = True
return payload
def request_tokens(payload, client_id, client_secret):
res = requests.post(config.TOKEN_URL, auth=(client_id, client_secret), data=payload)
res_data = res.json()
if res_data.get("error") or res.status_code != 200:
return None, res.status_code
return res_data, None
def convert_utc_to_est(utc_time):
return utc_time.replace(tzinfo=timezone.utc).astimezone(timezone(timedelta(hours=-4)))
def load_key_from_env():
return os.environ["CRYPT_KEY"].encode()
def encrypt_data(data):
CRYPT_KEY = load_key_from_env()
cipher_suite = Fernet(CRYPT_KEY)
encrypted_data = cipher_suite.encrypt(data.encode())
return encrypted_data
def decrypt_data(encrypted_data):
CRYPT_KEY = load_key_from_env()
cipher_suite = Fernet(CRYPT_KEY)
decrypted_data = cipher_suite.decrypt(encrypted_data)
return decrypted_data.decode()
def is_api_key_valid(api_key):
openai.api_key = api_key
try:
completion = openai.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
)
except:
return False
else:
return True
def get_tunnel():
tunnel = sshtunnel.SSHTunnelForwarder(
(config.SSH_HOST),
ssh_username=config.SSH_USER,
ssh_password=config.SSH_PASS,
remote_bind_address=(
config.SQL_HOSTNAME, 3306)
)
tunnel.start()
return tunnel
def refresh_tokens():
if 'tokens' not in session:
return False
payload = {
'grant_type': 'refresh_token',
'refresh_token': session['tokens'].get('refresh_token')
}
res_data, error = request_tokens(payload, config.CLIENT_ID, config.CLIENT_SECRET)
if error:
return False
new_access_token = res_data.get('access_token')
new_refresh_token = res_data.get('refresh_token', session['tokens']['refresh_token'])
expires_in = res_data.get('expires_in')
new_expiry_time = datetime.now() + timedelta(seconds=expires_in)
session['tokens'].update({
'access_token': new_access_token,
'refresh_token': new_refresh_token,
'expiry_time': new_expiry_time.isoformat()
})
return True
| kpister/prompt-linter | data/scraping/repos/rawcsav~SpotifyFlask/app~util~session_utils.py | app~util~session_utils.py | py | 3,375 | python | en | code | 0 | github-code | 50 |
19364547340 | # -*-coding:utf-8 -*-
"""
@project: self
@author: Administrator
@file: 1_first_steps.py
@time: 2020-04-09 14:04:02
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG。 ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
async def root():
"""
定义路径操作功能
:return:
"""
return {"message": "Hello World"}
| shuqian2017/FastApi | FastApi_demo/1_first_steps.py | 1_first_steps.py | py | 865 | python | en | code | 0 | github-code | 50 |
22199212732 | import unittest
import capnp
try:
from capnp import _capnp_test
except ImportError:
_capnp_test = None
# pylint: disable=c-extension-no-member
@unittest.skipUnless(_capnp_test, '_capnp_test unavailable')
class VoidTest(unittest.TestCase):
def test_void_type(self):
# ``VoidType`` is not declared as subclass of ``type`` for now.
self.assertFalse(issubclass(capnp.VoidType, type))
def test_void_object_singleton(self):
self.assertIs(capnp.VOID, capnp.VoidType())
def test_void(self):
self.assertIs(capnp.VOID, _capnp_test.takeVoid(capnp.VOID))
self.assertFalse(bool(capnp.VOID))
if __name__ == '__main__':
unittest.main()
| clchiou/garage | python/g1/third-party/capnp/tests/test_capnp_void.py | test_capnp_void.py | py | 695 | python | en | code | 3 | github-code | 50 |
43000751473 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
carry = 0
newHead = None
newH = None
while (l1 != None or l2 != None):
if (l1 != None):
carry += l1.val
l1 = l1.next
if (l2 != None):
carry += l2.val
l2 = l2.next
if (newHead == None):
newHead = ListNode(carry % 10)
newH = newHead
else:
newH.next = ListNode(carry % 10)
newH = newH.next
carry = carry // 10
if (carry != 0):
newH.next = ListNode(carry)
return newHead
| pragyagautam02/DSA_StandardProblems | addTwoNoLinkedList.py | addTwoNoLinkedList.py | py | 887 | python | en | code | 3 | github-code | 50 |
12390160014 | import sys
assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
import os
from pyspark.sql import SparkSession, functions, types
'''
Get trip count of every year
'''
def yearly_trip_count(trip):
trip0 = trip.groupBy(trip['year']).count()
trip0 = trip0.orderBy('year')
trip0.write.mode('overwrite').option("header", True).csv(path = outputs + '/year')
'''
Get trip count of every month
'''
def monthly_trip_count(trip):
trip1 = trip.groupBy(trip['month']).count()
trip1 = trip1.orderBy('month')
trip1.write.mode('overwrite').option("header", True).csv(path = outputs + '/month')
'''
Get trip count of every weekday
'''
def weekday_trip_count(trip):
trip2 = trip.groupBy(trip['weekday']).count()
trip2 = trip2.orderBy('weekday')
trip2.write.mode('overwrite').option("header", True).csv(path = outputs + '/weekday')
'''
Get trip count of every day
'''
def daily_trip_count(trip):
trip3 = trip.groupBy(trip['date']).count()
trip3 = trip3.orderBy('date')
trip3.write.mode('overwrite').option("header", True).csv(path = outputs + '/date')
'''
Get trip count of every hour
'''
def hourly_trip_count(trip):
trip4 = trip.groupBy(trip['hour']).count()
trip4 = trip4.orderBy('hour')
trip4.write.mode('overwrite').option("header", True).csv(path = outputs + '/hour')
def main(inputs, outputs):
# main logic starts here
trip = spark.read.parquet(inputs)
# trip.show()
yearly_trip_count(trip)
monthly_trip_count(trip)
weekday_trip_count(trip)
daily_trip_count(trip)
hourly_trip_count(trip)
#trip.write.csv(outputs, mode='overwrite')
#trip.write.option("header",True).csv(outputs, mode='overwrite')
if __name__ == '__main__':
inputs = sys.argv[1]
outputs = sys.argv[2]
spark = SparkSession.builder.appName('analyse time').getOrCreate()
assert spark.version >= '3.0' # make sure we have Spark 3.0+
spark.sparkContext.setLogLevel('WARN')
sc = spark.sparkContext
main(inputs, outputs) | wangyimosquito/SFU-cmpt732-NYC-Taxi-Analysis | time_yga111/query_time.py | query_time.py | py | 2,057 | python | en | code | 0 | github-code | 50 |
41897910651 | from bs4 import BeautifulSoup
import requests
import re
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name("creds.json", scope)
client = gspread.authorize(creds)
sheet = client.open("News").sheet1
sheet.delete_rows(2,200)
page = 1
while page != 0:
url = 'https://bangaloremirror.indiatimes.com/bangalore/crime/articlelist/msid-20970789,curpg-' + str(page) + '.cms'
code = requests.get(url)
soup = BeautifulSoup(code.text, 'lxml')
links = soup.find('div', {'class': 'ipl_lnews containerDiv clearfix'}).findAll('a', href=re.compile(
"(/bangalore/crime/)+([A-Za-z0-9_.()])"))
for link in links:
ab = 2
Headline = link.text
img = link.img.get('data-src')
img_url = 'https://bangaloremirror.indiatimes.com' + img
Link = 'https://bangaloremirror.indiatimes.com' + link['href']
url = Link
code = requests.get(url)
soup = BeautifulSoup(code.text, 'lxml')
news = soup.find('div', {'class': 'Normal'})
extra1 = news.script
extra1.decompose()
extra2 = news.script
extra2.decompose()
extra3 = news.script
extra3.decompose()
news_discription = news.text
view_more = '\n View More\n '
if Headline == 'Next' or Headline == 'Prev' or Headline == view_more:
continue
insertRow = [Headline, Link, img_url, news_discription]
sheet.insert_row(insertRow, ab)
ab = ab + 1
page = page + 1
| vineetdsat/Banglore_Mirror_Clone | News_scrapper.py | News_scrapper.py | py | 1,812 | python | en | code | 0 | github-code | 50 |
8123074878 | from django.views.generic import CreateView, DeleteView, ListView, UpdateView, DetailView, FormView
from .models import *
from django.urls import reverse_lazy, reverse
from .forms import *
from web.users.auth import auth_test
from django.contrib.auth.mixins import UserPassesTestMixin
from django.contrib import messages
from web.users.statics import BEGELEIDER
from web.profiles.models import Profile
from web.forms.statics import URGENTIE_AANVRAAG, FORMS_BY_SLUG
from web.forms.views import GenericUpdateFormView, GenericCreateFormView
import sendgrid
from sendgrid.helpers.mail import Mail
from django.contrib.sites.shortcuts import get_current_site
from django.conf import settings
from django.template.loader import render_to_string
from web.organizations.models import Organization
from django.http import Http404
class UserCaseList(UserPassesTestMixin, ListView):
model = Case
template_name_suffix = '_list_page'
def test_func(self):
return auth_test(self.request.user, BEGELEIDER) and hasattr(self.request.user, 'profile')
def get_queryset(self):
# qs = super().get_queryset()
# profile = get_object_or_404(Profile, id=self.request.user.profile.id)
return self.request.user.profile.cases.all()
def get_context_data(self, *, object_list=None, **kwargs):
# self.profile = self.request.user.profile
return super().get_context_data(object_list=object_list, **kwargs)
class CaseDetailView(UserPassesTestMixin, DetailView):
model = Case
template_name_suffix = '_page'
def test_func(self):
return auth_test(self.request.user, BEGELEIDER) and hasattr(self.request.user, 'profile')
class CaseCreateView(UserPassesTestMixin, CreateView):
model = Case
form_class = CaseForm
template_name_suffix = '_create_form'
success_url = reverse_lazy('cases_by_profile')
def test_func(self):
return auth_test(self.request.user, BEGELEIDER) and hasattr(self.request.user, 'profile')
def form_valid(self, form):
case = form.save(commit=True)
self.request.user.profile.cases.add(case)
messages.add_message(self.request, messages.INFO, "De cliënt '%s' is aangemaakt." % case.client_name)
return super().form_valid(form)
class CaseUpdateView(UserPassesTestMixin, UpdateView):
model = Case
form_class = CaseForm
template_name_suffix = '_update_form'
success_url = reverse_lazy('cases_by_profile')
def test_func(self):
return auth_test(self.request.user, BEGELEIDER) and hasattr(self.request.user, 'profile')
def form_valid(self, form):
messages.add_message(self.request, messages.INFO, "De cliënt '%s' is aangepast." % self.object.client_name)
return super().form_valid(form)
class CaseDeleteView(UserPassesTestMixin, DeleteView):
model = Case
form_class = CaseForm
template_name_suffix = '_delete_form'
success_url = reverse_lazy('cases_by_profile')
def test_func(self):
return auth_test(self.request.user, BEGELEIDER) and hasattr(self.request.user, 'profile')
def delete(self, request, *args, **kwargs):
response = super().delete(self, request, *args, **kwargs)
messages.add_message(self.request, messages.INFO, "De cliënt '%s' is verwijderd." % self.object.client_name)
return response
class GenericCaseUpdateFormView(GenericUpdateFormView):
model = Case
template_name = 'forms/generic_form.html'
success_url = reverse_lazy('cases_by_profile')
form_class = CaseGenericModelForm
def get_success_url(self):
next = self.request.POST.get('next')
if next:
return next
return reverse('update_case', kwargs={'pk': self.object.id, 'slug': self.kwargs.get('slug')})
def get_discard_url(self):
return reverse('case', kwargs={'pk': self.object.id})
def form_invalid(self, form):
return super().form_invalid(form)
def form_valid(self, form):
response = super().form_valid(form)
messages.add_message(self.request, messages.INFO, "De gegevens zijn aangepast.")
return response
class GenericCaseCreateFormView(GenericCreateFormView):
model = Case
template_name = 'forms/generic_form.html'
success_url = reverse_lazy('cases_by_profile')
form_class = CaseGenericModelForm
def get_success_url(self):
return reverse('update_case', kwargs={'pk': self.object.id, 'slug': self.kwargs.get('slug')})
def get_discard_url(self):
return reverse('cases_by_profile')
def form_valid(self, form):
response = super().form_valid(form)
case = form.save(commit=True)
self.request.user.profile.cases.add(case)
messages.add_message(self.request, messages.INFO, "De cliënt '%s' is aangemaakt." % case.client_name)
return response
class SendCaseView(UpdateView):
model = Case
template_name = 'cases/send.html'
form_class = SendCaseForm
def get_success_url(self):
return reverse('case', kwargs={'pk': self.object.id})
def get_context_data(self, **kwargs):
kwargs.update(self.kwargs)
form_context = FORMS_BY_SLUG.get(self.kwargs.get('slug'))
if not form_context:
raise Http404
kwargs.update(form_context)
kwargs.update({
'organization_list': Organization.objects.filter(main_email__isnull=False),
'object': self.object,
})
return super().get_context_data(**kwargs)
def form_valid(self, form):
organization_list = Organization.objects.filter(main_email__isnull=False)
for organization in organization_list:
body = render_to_string('cases/mail/case.txt', {
'case': self.object.to_dict(organization.field_restrictions)
})
current_site = get_current_site(self.request)
sg = sendgrid.SendGridAPIClient(settings.SENDGRID_KEY)
email = Mail(
from_email='noreply@%s' % current_site.domain,
to_emails=organization.main_email,
subject='Omslagroute - %s' % self.kwargs.get('title'),
plain_text_content=body
)
sg.send(email)
messages.add_message(
self.request, messages.INFO, "De cliëntgegevens van '%s', zijn gestuurd naar '%s'." % (
self.object.client_name,
organization.main_email
)
)
return super().form_valid(form)
def form_invalid(self, form):
return super().form_invalid(form)
| tickHub/omslagroute | app/web/cases/views.py | views.py | py | 6,633 | python | en | code | 0 | github-code | 50 |
16139686534 | # not sure why one symbol has one cdf
# optimized for cnn not our case
# 2021.09.23
# x
import torchac
import numpy as np
import torch
class Arithmetic():
def __init__(self):
self.prob = None
self.cdf = [0, ]
def fit(self, idx):
self.prob = np.zeros((len(np.unique(idx))))
idx = idx.reshape(-1).astype('int16')
for i in range(len(idx)):
self.prob[idx[i]] += 1.
self.prob /= len(idx)
for i in range(len(self.prob)):
self.cdf.append(self.cdf[-1]+self.prob[i])
self.cdf[-1] = 1
return self
def gen_cdf(self, S):
output_cdf = []
for k in range(S[0]):
for c in range(S[1]):
for i in range(S[2]):
for j in range(S[3]):
output_cdf.append(self.cdf)
output_cdf = np.array(output_cdf).reshape(S[0],S[1],S[2],S[3],len(self.cdf))
return torch.from_numpy(output_cdf)
def encode(self, idx):
idx = np.moveaxis(idx, -1, 1).astype('int16')
S = idx.shape
byte_stream = torchac.encode_float_cdf(self.gen_cdf(S),
torch.from_numpy(idx),
check_input_bounds=True)
return byte_stream, S
def decode(self, byte_stream, S):
sym_out = torchac.decode_float_cdf(self.gen_cdf(S),
byte_stream)
idx = sym_out.numpy()
idx = np.moveaxis(idx, 1, -1)
return idx
def check(self, sym, sym_out):
assert np.sum(np.abs(sym-sym_out)) == 0, 'Error!'
if __name__ == "__main__":
a = np.random.randint(0, 256, (2, 100, 100, 1))
ac = Arithmetic().fit(a)
b, s = ac.encode(a)
d = ac.decode(b,s)
ac.check(a, d)
print('Avg', len(b)*8/(2*100*100)) | yifan-fanyi/Func-Pool | ArithmaticTorch.py | ArithmaticTorch.py | py | 1,884 | python | en | code | 2 | github-code | 50 |
9376405140 | from obgraph import Graph as OBGraph
from graph_kmer_index.kmer_finder import DenseKmerFinder
from graph_kmer_index import kmer_hash_to_sequence
from kivs import Graph, KmerFinder, hash_kmer
from os.path import exists
import pytest
def hash_all(arr):
if len(arr) == 0:
return []
k = len(arr[0])
return [hash_kmer(i, k) for i in arr]
def compare_kmer_node_lists(kmers_a, nodes_a, kmers_b, nodes_b, k, print_lists=False):
assert len(kmers_a) == len(kmers_b)
assert len(nodes_a) == len(nodes_b)
assert len(kmers_a) == len(nodes_a)
counts_a = {}
for i in range(len(kmers_a)):
if kmers_a[i] not in counts_a:
counts_a[kmers_a[i]] = 0
counts_a[kmers_a[i]] += 1
counts_b = {}
for i in range(len(kmers_b)):
if kmers_b[i] not in counts_b:
counts_b[kmers_b[i]] = 0
counts_b[kmers_b[i]] += 1
all_keys = set(counts_a.keys())
all_keys.update(counts_b.keys())
for i in all_keys:
a = counts_a[i] if i in counts_a else 0
b = counts_b[i] if i in counts_b else 0
if a != b and a > 0:
print(kmer_hash_to_sequence(i, k), a, b)
# Print entire kmer lists
if print_lists:
for i in range(max(len(kmers_a), len(kmers_b))):
if i < len(kmers_a) and i < len(kmers_b):
print(f'{kmer_hash_to_sequence(kmers_a[i], k)}, {nodes_a[i]}\t{kmer_hash_to_sequence(kmers_b[i], k)}, {nodes_b[i]}')
elif i < len(kmers_a):
print(f'{kmer_hash_to_sequence(kmers_a[i], k)}, {nodes_a[i]}')
elif i < len(kmers_b):
print(f'{" " * k}, \t{kmer_hash_to_sequence(kmers_b[i], k)}, {nodes_b[i]}')
for i in all_keys:
assert counts_a[i] == counts_b[i]
# The last tests here are quite large. The graph they share looks like this:
# A G G C T
# ACTGA - G - C - T - A - G - ACTGA
#
@pytest.mark.parametrize("nodes,edges,ref,k,max_var", [
(["ACTGACTGACTG", "ACTAGTC"], [[1], []], [0, 1], 3, 4),
(["ACTGACTGACTG", "ACTGCA"], [[1], []], [0, 1], 4, 4),
(["ACTAG", "GAC", "TGA", "CTGAGT"], [[1], [2], [3], []], [0, 1, 2, 3], 3, 4),
(["ACTAG", "G", "A", "GTACTCA"], [[1, 2], [3], [3], []], [0, 1, 3], 3, 4),
(["ACTAGATTTAGGCTA" * 4, "G", "A", "GTACTCA" * 10], [[1, 2], [3], [3], []], [0, 1, 3], 3, 4),
(["ACTAGATTTAGGCTA" * 4, "GTACTAA" * 12, "ATGACTA" * 12, "GTACTCA" * 10], [[1, 2], [3], [3], []], [0, 1, 3], 3, 4),
(["AGTAGA", "G", "CT", "ACTA", "G", "A", "TCATA"], [[1, 2], [3], [3], [4, 5], [6], [6], []],
[0, 1, 3, 4, 6], 3, 4),
(["AGTAGA", "G", "CT", "ACTA", "G", "A", "TCATA"], [[1, 2], [3], [3], [4, 5], [6], [6], []],
[0, 1, 3, 4, 6], 4, 4),
(["AGTAGA", "G", "CT", "ACTA", "G", "A", "TCATA"], [[1, 2], [3], [3], [4, 5], [6], [6], []],
[0, 1, 3, 4, 6], 5, 4),
(["AGTAGAACTGACTTCAGGTACTTA" * 10] * 7, [[1, 2], [3], [3], [4, 5], [6], [6], []],
[0, 1, 3, 4, 6], 5, 4),
(["ACTGA", "G", "A", "C", "G", "T", "G", "A", "C", "G", "T", "ACTGA"],
[[1, 2], [3, 4], [3, 4], [5, 6], [5, 6], [7, 8], [7, 8], [9, 10], [9, 10], [11], [11], []],
[0, 1, 3, 5, 7, 9, 11], 4, 250),
(["ACTGA", "G", "A", "C", "G", "T", "G", "A", "C", "G", "T", "ACTGA"],
[[1, 2], [3, 4], [3, 4], [5, 6], [5, 6], [7, 8], [7, 8], [9, 10], [9, 10], [11], [11], []],
[0, 1, 3, 5, 7, 9, 11], 6, 2),
(["ACTGA", "G", "A", "C", "G", "T", "G", "A", "C", "G", "T", "ACTGA"],
[[1, 2], [3, 4], [3, 4], [5, 6], [5, 6], [7, 8], [7, 8], [9, 10], [9, 10], [11], [11], []],
[0, 1, 3, 5, 7, 9, 11], 6, 4),
(["ACTGA", "G", "A", "C", "G", "T", "G", "A", "C", "G", "T", "ACTGA"],
[[1, 2], [3, 4], [3, 4], [5, 6], [5, 6], [7, 8], [7, 8], [9, 10], [9, 10], [11], [11], []],
[0, 1, 3, 5, 7, 9, 11], 6, 6)
])
def test_kmer_index_against_kage(nodes, edges, ref, k, max_var):
graph = Graph.from_sequence_edge_lists(nodes, edges, ref=ref)
kmer_finder = KmerFinder(graph, k, reverse_kmers=True)
res_kmers, res_nodes = kmer_finder.find(max_variant_nodes=max_var, include_spanning_nodes=True)
ob_node_sequences = {}
ob_edges = {}
ob_linear_ref_nodes = []
for i in range(len(nodes)):
ob_node_sequences[i + 1] = nodes[i]
if len(edges[i]) > 0:
ob_edges[i + 1] = []
for edge in edges[i]:
ob_edges[i + 1].append(edge + 1)
ob_linear_ref_nodes = [i + 1 for i in ref]
obgraph = OBGraph.from_dicts(
node_sequences=ob_node_sequences,
edges=ob_edges,
linear_ref_nodes=ob_linear_ref_nodes
)
finder = DenseKmerFinder(obgraph, k=k, max_variant_nodes=max_var)
finder.find()
ob_kmers, ob_nodes = finder.get_found_kmers_and_nodes()
compare_kmer_node_lists(res_kmers, res_nodes, ob_kmers, ob_nodes, k)
# The last test here is quite large. The graph looks like this (* means a node with an empty sequence):
# * A * C
# / \ / \ / \ / \
# AC - G - * - * - G - T - * - A - GT
#
@pytest.mark.parametrize("nodes,edges,ref,k,max_var,expected_nodes,expected_kmers", [
(["ACTG", "ACTA"], [[1], []], [0, 1], 3, 4,
[0, 0, 0, 1, 0, 1, 1, 1],
hash_all(['ACT', 'CTG', 'TGA', 'TGA', 'GAC', 'GAC', 'ACT', 'CTA'])),
(["ACT", "", "A", "GCA"], [[1, 2], [3], [3], []], [0, 1], 3, 4,
[0, 0, 2, 0, 2, 3, 2, 3, 3, 0, 1, 3, 0, 1, 3],
hash_all(['ACT', 'CTA', 'CTA', 'TAG', 'TAG', 'TAG', 'AGC', 'AGC', 'GCA', 'CTG', 'CTG', 'CTG',
'TGC', 'TGC', 'TGC'])),
(["AC", "G", "", "", "", "A", "G", "T", "", "", "A", "C", "GT"],
[[1, 2], [3], [3], [4, 5], [6], [6], [7, 8], [9], [9], [10, 11], [12], [12], []],
[0, 1, 3, 4, 6, 7, 9, 10, 12], 3, 10,
[0, 1, 0, 2, 3, 5, 0, 2, 3, 4, 6,
0, 1, 3, 5, 0, 1, 3, 4, 6,
0, 2, 3, 5, 6, 0, 2, 3, 4, 6, 7,
0, 2, 3, 4, 6, 8, 9, 10,
0, 2, 3, 4, 6, 8, 9, 11,
1, 3, 5, 6, 1, 3, 4, 6, 7,
1, 3, 4, 6, 8, 9, 10,
1, 3, 4, 6, 8, 9, 11,
5, 6, 7, 5, 6, 8, 9, 10,
5, 6, 8, 9, 11, 6, 7, 9, 10,
6, 7, 9, 11, 6, 8, 9, 10, 12,
6, 8, 9, 11, 12, 7, 9, 10, 12,
7, 9, 11, 12, 10, 12, 11, 12],
hash_all(['ACG', 'ACG', 'ACA', 'ACA', 'ACA', 'ACA', 'ACG', 'ACG', 'ACG', 'ACG', 'ACG',
'CGA', 'CGA', 'CGA', 'CGA', 'CGG', 'CGG', 'CGG', 'CGG', 'CGG',
'CAG', 'CAG', 'CAG', 'CAG', 'CAG', 'CGT', 'CGT', 'CGT', 'CGT', 'CGT', 'CGT',
'CGA', 'CGA', 'CGA', 'CGA', 'CGA', 'CGA', 'CGA', 'CGA',
'CGC', 'CGC', 'CGC', 'CGC', 'CGC', 'CGC', 'CGC', 'CGC',
'GAG', 'GAG', 'GAG', 'GAG', 'GGT', 'GGT', 'GGT', 'GGT', 'GGT',
'GGA', 'GGA', 'GGA', 'GGA', 'GGA', 'GGA', 'GGA',
'GGC', 'GGC', 'GGC', 'GGC', 'GGC', 'GGC', 'GGC',
'AGT', 'AGT', 'AGT', 'AGA', 'AGA', 'AGA', 'AGA', 'AGA',
'AGC', 'AGC', 'AGC', 'AGC', 'AGC', 'GTA', 'GTA', 'GTA', 'GTA',
'GTC', 'GTC', 'GTC', 'GTC', 'GAG', 'GAG', 'GAG', 'GAG', 'GAG',
'GCG', 'GCG', 'GCG', 'GCG', 'GCG', 'TAG', 'TAG', 'TAG', 'TAG',
'TCG', 'TCG', 'TCG', 'TCG', 'AGT', 'AGT', 'CGT', 'CGT']))
])
def test_kmer_empty_nodes(nodes, edges, ref, k, max_var, expected_nodes, expected_kmers):
graph = Graph.from_sequence_edge_lists(nodes, edges, ref=ref)
kmer_finder = KmerFinder(graph, k)
res_kmers, res_nodes = kmer_finder.find(max_variant_nodes=max_var, include_spanning_nodes=True)
compare_kmer_node_lists(res_kmers, res_nodes, expected_kmers, expected_nodes, k)
@pytest.mark.slow
@pytest.mark.parametrize("file,k,max_var", [
("tests/data/example_graph.npz", 4, 4),
("tests/data/example_graph.npz", 6, 6),
("tests/data/example_graph.npz", 8, 8),
("tests/data/example_graph.npz", 12, 12),
("tests/data/example_graph.npz", 16, 16),
("tests/data/example_graph.npz", 24, 24),
("tests/data/example_graph.npz", 31, 31)
])
def test_obgraph_against_kage_big_graph(file, k, max_var):
obgraph = OBGraph.from_file(file)
graph = Graph.from_obgraph(obgraph)
kmer_finder = KmerFinder(graph, k, reverse_kmers=True)
res_kmers, res_nodes = kmer_finder.find(max_variant_nodes=max_var, include_spanning_nodes=True)
fname = f'tests/data/kage_results_{k}mer_{max_var}var.txt'
ob_kmers, ob_nodes = [], []
if exists(fname):
f = open(fname, "r")
lines = f.readlines()
for l in lines:
seg = l.split(" ")
if len(seg) == 2:
ob_kmers.append(int(seg[0]))
ob_nodes.append(int(seg[1]))
f.close()
else:
finder = DenseKmerFinder(obgraph, k=k, max_variant_nodes=max_var)
finder.find()
ob_kmers, ob_nodes = finder.get_found_kmers_and_nodes()
f = open(fname, "w")
for i in range(len(ob_kmers)):
f.write(f'{str(ob_kmers[i])} {str(ob_nodes[i])}\n')
f.close()
compare_kmer_node_lists(res_kmers, res_nodes, ob_kmers, ob_nodes, k)
| ZinderAsh/python-kivs | tests/test_kivs.py | test_kivs.py | py | 9,505 | python | en | code | 0 | github-code | 50 |
27801632767 | from string import punctuation
from collections import Counter
from nltk.corpus import stopwords
from pandas import read_csv
# load doc into memory
def load_doc(filename, classes=2):
# read all text
data = read_csv(filename, delimiter='\t', header=None, names=["id1", "id2", "sentiment", "tweet"])
data = data.drop(data[(data['tweet'] == 'Not Available')].index)
if classes == 2:
data = data.drop(data[(data['sentiment'] == 'neutral')].index)
raw_data = data.sample(frac=1).reset_index(drop=True)
return raw_data
# turn a sentence into clean tokens
def clean_doc(doc):
doc = doc.lower()
# split into tokens by white space
tokens = doc.split()
# remove punctuation from each token
table = str.maketrans('', '', punctuation)
tokens = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
tokens = [w for w in tokens if not w in stop_words]
# filter out short tokens
tokens = [word for word in tokens if len(word) > 2]
return tokens
# load doc and add to vocab
def add_doc_to_vocab(doc, vocab):
# clean doc
tokens = clean_doc(doc)
# update counts
vocab.update(tokens)
# load all docs in a directory
def create_vocab(filename, test_ratio, vocab):
raw_tweets = load_doc(filename, classes=2)
lengh = int(len(raw_tweets) * (1 - test_ratio))
# walk through all files in the folder
for tweet in raw_tweets["tweet"][:lengh]:
add_doc_to_vocab(tweet, vocab)
# save list to file
def save_list(lines, filename):
# convert lines to a single blob of text
data = '\n'.join(lines)
# open file
file = open(filename, 'w')
# write text
file.write(data)
# close file
file.close()
# define vocab
vocab = Counter()
# add all docs to vocab
test_ratio = 0.1
create_vocab('../database/downloadedB.csv', test_ratio, vocab)
# print the size of the vocab
print(len(vocab))
# print the top words in the vocab
print(vocab.most_common(50))
# keep tokens with a min occurrence
min_occurane = 10
tokens = [k for k, c in vocab.items() if c >= min_occurane]
print(len(tokens))
# save tokens to a vocabulary file
save_list(tokens, '../models/vocab.txt')
| aiir-team/code_TSA_SVC_luxembourg | models/create_vocab.py | create_vocab.py | py | 2,347 | python | en | code | 0 | github-code | 50 |
7964192802 | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from api.llm import router as llm
from api.recipes import router as recipes
import os
os.environ['http_proxy'] = "http://proxy.mei.co.jp:8080"
os.environ['https_proxy'] = "http://proxy.mei.co.jp:8080"
def create_app() -> FastAPI:
app = FastAPI()
app.include_router(llm)
app.include_router(recipes)
return app
app = create_app()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
def read_root():
return {"RecipeChatGPT": "Running"}
| harukary/gpt_recipe_app | server/api/main.py | main.py | py | 645 | python | en | code | 0 | github-code | 50 |
1687857896 | import os
import re
from xml.dom.minidom import parse
from urllib import request
import codecs
import markdown
import argparse
def parse_args():
"""
parse input args
"""
parser = argparse.ArgumentParser()
parser.add_argument("--changed_files", type=str, default="./changed_files.txt",
help="file contains all changed files in current PR")
return parser.parse_args()
def md2html(file_path):
input_file = codecs.open(file_path, mode="r", encoding="utf-8")
text = input_file.read()
html = markdown.markdown(text)
output_file = codecs.open(f"{file_path}.html", mode="w", encoding="utf-8")
output_file.write(html)
def exec_linkchecker(file_path):
link_checker_cmd = f"linkchecker {file_path}.html --verbose --output=xml --check-extern > {file_path}.xml"
os.system(link_checker_cmd)
def parse_linkchecker_result(file_path):
dom = parse(f"{file_path}.xml")
data = dom.documentElement
urldatas = data.getElementsByTagName('urldata')
total_url = []
for item in urldatas:
real_url = item.getElementsByTagName('realurl')[0].childNodes[0].data
if real_url.startswith("http"):
total_url.append(real_url)
total_url = list(set(total_url))
return total_url
def http_link_check(link, file_path):
status_flag = False
for _ in range(3):
try:
with request.urlopen(link, timeout=10) as res:
status = res.status
if res.status in [200, 401, 403]:
print('{} check passed'.format(link))
status_flag = True
status = "[200]"
break
except Exception as e:
print('{} check failed'.format(link))
print("Error as follows:")
status = f"[{e}]"
print(e)
else:
print('{} check failed, The reasons for the failure are as follows:\n{}'.format(link, status))
result = f"{status}:{file_path}:{link}"
return status_flag, result
def relative_link_check(file_path):
file_dir = os.path.dirname(os.path.abspath(file_path))
dead_links = []
with open(file_path, "r") as file:
data = file.read()
regex = r"\[.*?\]\((.*?)\)"
link_list = re.findall(regex, data)
reg_a_label = r'<a name="(.*?)"> *</a>'
a_label_list = re.findall(reg_a_label, data)
relative_links = []
a_label_links = []
for link in link_list:
if link.startswith("http") is False:
if "#" in link:
a_label_links.append(link)
else:
relative_links.append(link)
relative_files = [f"{file_dir}/{link}" for link in relative_links]
for i, file in enumerate(relative_files):
if os.path.exists(file) is False:
dead_links.append(f"[404 Not Found]:{file_path}:{relative_links[i]}")
else:
print(f"{relative_files[i]} check passed")
for i, link in enumerate(a_label_links):
file_name, a_label_name = link.split("#")
if file_name:
file = f"{file_dir}/{file_name}"
if os.path.exists(file) is False:
dead_links.append(f"[404 Not Found]:{file_path}:{a_label_links[i]}")
a_labels = []
else:
with open(f"{file_dir}/{file_name}", "r") as f:
a_labels = re.findall(reg_a_label, f.read())
else:
a_labels = a_label_list
if a_label_name not in a_labels:
dead_links.append(f"[404 Not Found]:{file_path}:{a_label_links[i]}")
else:
print(f"{a_label_links[i]} check passed")
for i in dead_links:
print(i)
return dead_links
def main():
all_dead_links = []
with open(args.changed_files, "r") as f:
file_list = [file.strip() for file in f.readlines()]
os.system("rm -rf dead_links.txt")
for single_file in file_list:
if os.path.exists(single_file) is False:
continue
if single_file.endswith(".md") or single_file.endswith(".rst"):
md2html(single_file)
print(single_file)
exec_linkchecker(single_file)
all_urls = parse_linkchecker_result(single_file)
for link in all_urls:
flag, msg = http_link_check(link, single_file)
if not flag:
all_dead_links.append(msg)
relative_dead_links = relative_link_check(single_file)
all_dead_links.extend(relative_dead_links)
if all_dead_links:
with open("dead_links.txt", "a") as f:
for link in all_dead_links:
f.write(f"{link}\n")
with open("dead_links.txt", "r") as f:
print("All dead links:")
print(f.read())
exit(8)
else:
print("All links check passed")
if __name__ == '__main__':
args = parse_args()
main()
| PaddlePaddle/continuous_integration | inference/inference_test_utils/check_deadlink.py | check_deadlink.py | py | 4,957 | python | en | code | 16 | github-code | 50 |
39208801038 | #prompt for a string containing ints separated by spaces
user = input("Enter Values Separated by Spaces: ")
#load values into a list
values = user.split()
count = 0
#find and print elements that appear in list only once (elements must be printed in order in which they occur in original list)
#don't use list or string functions or methods (except .split() method)
#don't use "for x in y" iterator; use "for x in range(n)"
#HINT: loop through each element of list, them loop through list to look for a match
# don't match element to itself
for i in range(len(values)):
for j in range(len(values)):
if i != j and values[i] == values[j]:
break
else:
print(values[i], sep = " ", end = " ")
print()
print("Unique Elements: ", end = " ")
| izzyward02/IFSC1202 | 08.10 Number of Unique Elements.py | 08.10 Number of Unique Elements.py | py | 771 | python | en | code | 0 | github-code | 50 |
37237450679 |
"""
Adapted OpenFlow 1.0 L2 learning switch implementation.
Original SimpleSwitch class from: https://github.com/faucetsdn/ryu/blob/master/ryu/app/simple_switch.py
Was customised to allow RYU controller to classify flows with PCNN
Original copyright notice:
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import tensorflow as tf
import numpy as np
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, MaxPooling2D, Dropout
from keras.models import Sequential
import time
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet import ethernet, packet
from ryu.lib.packet import ipv4
from ryu.lib.packet import ipv6
from ryu.lib.packet import tcp
from ryu.lib.packet import udp
from ryu.lib.packet import ether_types
class SimpleSwitch(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch, self).__init__(*args, **kwargs)
self.hashmap = dict({}) #holds pims for each flow up until enough pkts are received for classification,
#ideally entries in the hashmap would be deleted after a certain amount of time to save space
self.mac_to_port = {}
self.classifier = self.getClassifier()
#Build and compile 2D CNN with the optimised parameters
def getClassifier(self):
inputShape = (12,12,1)
model = Sequential()
model.add(Conv2D(86, kernel_size=3, strides=1, activation='tanh', input_shape=inputShape, padding='same')) # C1
model.add(MaxPooling2D(pool_size=(2, 2))) # S2
model.add(Conv2D(86*2, kernel_size=5, strides=1, activation='tanh', input_shape=inputShape, padding='same')) # C3
model.add(MaxPooling2D(pool_size=(2, 2)))#S4
model.add(Flatten()) # Flatten
for i in range(2):
model.add(Dense(217, activation='relu'))
model.add(Dropout(0.05379819490458496))
model.add(Dense(66, activation='relu'))
model.add(Dropout(0.05379819490458496))
model.add(Dense(13, activation='softmax'))
model.load_weights('Optimisedss6Weights3.h5')
return model
#Treats flows as bidirecitonal so adds 2 flow entries for forward and backward directions
def add_flow(self, datapath, ip_src, ip_dst, ip_proto, srcPort, dstPort, inPort, outPort):
ofproto = datapath.ofproto
idleTimeout = 5
hardTimeout = 15
forwardMatch = datapath.ofproto_parser.OFPMatch(
dl_type=ether_types.ETH_TYPE_IP,
nw_proto=ip_proto, nw_src=ip_src, nw_dst=ip_dst, tp_src=srcPort, tp_dst=dstPort)
backwardMatch = datapath.ofproto_parser.OFPMatch(
dl_type=ether_types.ETH_TYPE_IP,
nw_proto=ip_proto, nw_src=ip_dst, nw_dst=ip_src, tp_src=srcPort, tp_dst=dstPort)
forwardAction = [datapath.ofproto_parser.OFPActionOutput(outPort)]
backwardAction = [datapath.ofproto_parser.OFPActionOutput(inPort)]
#Update switch flow table for forward and backward directions of flow
modf = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=forwardMatch, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=idleTimeout, hard_timeout=hardTimeout,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=forwardAction)
datapath.send_msg(modf)
modb = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=backwardMatch, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=idleTimeout, hard_timeout=hardTimeout,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=backwardAction)
datapath.send_msg(modb)
#Calculate packet feature vector values and normalise them
def preprocessPIM(self, pim):
maxLen = -1
maxIAT = -1
lastIAT = 0
rowCounter = 0
newPim = np.zeros((12,12)) #make a new pim padded with zeroes
for X in pim: #calculate features
if(rowCounter == 0):
iat = 0
else:
iat = X[2] - lastIAT
lastIAT = X[2]
if(X[1] > maxLen):
maxLen = X[1]
if(iat > maxIAT):
maxIAT = iat
newPim[rowCounter+3][4] = X[0]
newPim[rowCounter+3][5] = X[1]
newPim[rowCounter+3][6] = iat
rowCounter+=1
for i in range(6): #normalise values
newPim[i+3][5] /= maxLen
newPim[i+3][6] /= maxIAT
return newPim
def classifyPIM(self, pim, classifStartTime):
predTime = time.time_ns()
prediction = self.classifier.predict(pim.reshape(1, 12, 12, 1)) #classify pim
endTime = time.time_ns()
preprocessTime = predTime - classifStartTime
predictionTime = endTime-predTime
#prediction.argmax()
fileData = "\n" + str(preprocessTime) + "," + str(predictionTime)
recordFile = open('SDNrecord.txt', 'a') #write information to file for analysis
recordFile.write(fileData)
recordFile.close()
#Adds packet to pim hashmap and returns 1 if flow is to be added to flow table else returns 0
def addPktToHashMap(self, direction, length, arrivalTime, key):
pktCounter = self.hashmap[key][0]
pktCounter+=1
if(pktCounter == 6): #if enough packets acquired do classification
print("CLASSIFICATION TIME!")
startTime = time.time_ns()
pim = self.hashmap[key][1]
pim.append([direction, length, arrivalTime])
finalPim = self.preprocessPIM(pim) #process and classify pim
self.classifyPIM(finalPim, startTime)
self.hashmap.pop(key) #remove pim entry from hashmap
return 1 #return 1 as enough packets of this flow have been received
else: #else if not enough packets received yet, append this packet to pim in hashmap
self.hashmap[key][0] = pktCounter
self.hashmap[key][1].append([direction, length, arrivalTime])
return 0
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) #only acknowledge packet in messages after switch-controller handshake
def _packet_in_handler(self, ev):
msg = ev.msg #data structure containing object of received packet
datapath = msg.datapath #data structure containing object respresenting switch
ofproto = datapath.ofproto #agreed openflow protocol
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
ip = pkt.get_protocol(ipv4.ipv4)
tp = pkt.get_protocol(tcp.tcp)
proto = ipv4.tcp
length = len(pkt)
arrivalTime = time.time_ns()
if(tp == None):
if(pkt.get_protocol(udp.udp) != None):
tp = pkt.get_protocol(udp.udp)
proto = ipv4.udp
else: #if no udp or tcp header ignore this packet
return
#to figure out the packet direction, 2 keys are tested, with ip source and dest ports swapped
hashKey1 = str(proto) + ip.src + ip.dst + str(tp.src_port) + str(tp.dst_port) + "" #make key to store flow packets in the hashmap
hashKey2 = str(proto) + ip.dst + ip.src + str(tp.src_port) + str(tp.dst_port) + ""
saveToFlowTable = 0
if(hashKey1 in self.hashmap):
saveToFlowTable = self.addPktToHashMap(1, length, arrivalTime, hashKey1)
elif hashKey2 in self.hashmap:
saveToFlowTable = self.addPktToHashMap(0, length, arrivalTime, hashKey2)
else:
self.hashmap[hashKey1] = [1, [[1, length, arrivalTime]]] #create new pim entry in hashmap
#Now find which ethernet port to send packet
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id #get switch id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, msg.in_port)
#learn flow 5 tuple to avoid FLOODing pkt to all ports next time
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst] #get output port for this packet if its known
else:
out_port = ofproto.OFPP_FLOOD #otherwise send it out to all ports
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
# if enough packets received for classification and the output port is known, install a flow in flow table to avoid packet_in next time
if saveToFlowTable and out_port != ofproto.OFPP_FLOOD:
self.add_flow(datapath, ip.src, ip.dst, ip.proto, tp.src_port, tp.dst_port, msg.in_port, out_port)
#Then tell switch to send packet to output port (or all ports in the case of FLOOD action)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=msg.in_port,
actions=actions, data=data)
datapath.send_msg(out)
#Handle different events
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
ofproto = msg.datapath.ofproto
if reason == ofproto.OFPPR_ADD:
self.logger.info("port added %s", port_no)
elif reason == ofproto.OFPPR_DELETE:
self.logger.info("port deleted %s", port_no)
elif reason == ofproto.OFPPR_MODIFY:
self.logger.info("port modified %s", port_no)
else:
self.logger.info("Illeagal port state %s %s", port_no, reason) | Khalido2/DL-on-Network-Classification | Source Files/custom_controller.py | custom_controller.py | py | 11,023 | python | en | code | 0 | github-code | 50 |
27209996783 | # -*- coding: UTF-8 -*-
# @Time : 2022/11/12 20:22
# @Author : Ranshi
# @File : main.py
# @Doc : 剑指 Offer II 071. 按权重生成随机数
import bisect
from random import random
class Solution:
def __init__(self, w: list[int]):
self.check_table = []
sum = 0
for v in w:
sum += v
self.check_table.append(sum)
self.total = sum
def pickIndex(self) -> int:
num = random() * self.total
return bisect.bisect_left(self.check_table, num)
print(Solution([1, 3]).pickIndex())
| Zranshi/leetcode | sword-means-offer-2/071/main.py | main.py | py | 571 | python | en | code | 0 | github-code | 50 |
23951895685 | def example(Simulator):
import csdl
from csdl import Model, GraphRepresentation
import numpy as np
class ErrorScalarIncorrectOrder(Model):
def define(self):
scalar = self.declare_variable('scalar', val=1.)
expanded_scalar = csdl.expand((2, 3), scalar)
self.register_output('expanded_scalar', expanded_scalar)
rep = GraphRepresentation(ErrorScalarIncorrectOrder())
sim = Simulator(rep)
sim.run()
| LSDOlab/csdl | csdl/examples/invalid/ex_expand_scalar_incorrect_order.py | ex_expand_scalar_incorrect_order.py | py | 491 | python | en | code | 5 | github-code | 50 |
2557158298 | try:
from .apic_access_module.dnaapicem import *
except:
from apic_access_module.dnaapicem import *
import pprint
def apic_get_device_config(networkDeviceId):
try:
config = get(api='api/v1/network-device/' + networkDeviceId + '/config', ver='v1')
except (BaseException, Timeout) as e:
return (False, str(e))
if isinstance(config, dict) and 'errorCode' in config.keys():
return (False, "DNA-C error: %s. Message: %s. Detail: %s" % (config['errorCode'], config['message'], config['detail']))
return (True, config)
if __name__ == "__main__":
pp = pprint.PrettyPrinter(indent=4)
networkDeviceId = '28ff0a0a-c383-4257-a047-54eb5fdf67d5'
pp.pprint(apic_get_device_config(networkDeviceId))
| oborys/DNAC-Monitoring-App | api/api_requests/get_device_config.py | get_device_config.py | py | 747 | python | en | code | 6 | github-code | 50 |
33968605753 | import os
import argparse
import yaml
from files.configured_attributes_60 import ConfiguredAttributes60
from files.site_security_59 import SiteSecurity59
from files.simple_98 import Simple98
from files.condor_mapfile import CondorMapfile
from files.pc_config_50 import PCConfig50
from files.simple_condor_98 import SimpleCondor98
from files.supported_vo_users import SupportedVOUsers
from files.supplemental_config import SupplementalConfig
from files.timezone import TimeZone
from helpers.generic_helpers import get_lightweight_component
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--site_config', help="Compiled Site Level Configuration YAML file")
parser.add_argument('--execution_id', help="Execution ID of lightweight component")
parser.add_argument('--output_dir', help="Output directory")
args = parser.parse_args()
return {
'augmented_site_level_config_file': args.site_config,
'execution_id': args.execution_id,
'output_dir': args.output_dir
}
if __name__ == "__main__":
args = parse_args()
execution_id = args['execution_id']
augmented_site_level_config_file = args['augmented_site_level_config_file']
output_dir = args['output_dir']
augmented_site_level_config = yaml.safe_load(open(augmented_site_level_config_file, 'r'))
# Files present in container by default at /etc/condor-ce/config.d/: 01-ce-auth.conf 01-ce-router.conf
# 01-common-auth.conf 02-ce-condor.conf 03-ce-shared-port.conf 03-managed-fork.conf
# Custom config files /etc/condor-ce
site_security_59 = SiteSecurity59("{output_dir}/59_site_security.conf".format(output_dir=output_dir),
augmented_site_level_config, execution_id)
site_security_59.generate_output_file()
configured_attributes_60 = ConfiguredAttributes60("{output_dir}/60_configured_attributes.conf"
.format(output_dir=output_dir), augmented_site_level_config,
execution_id)
configured_attributes_60.generate_output_file()
simple_98 = Simple98("{output_dir}/98_simple.conf".format(output_dir=output_dir),
augmented_site_level_config, execution_id)
simple_98.generate_output_file()
condor_mapfile = CondorMapfile("{output_dir}/condor_mapfile".format(output_dir=output_dir),
augmented_site_level_config, execution_id)
condor_mapfile.generate_output_file()
# Custom config files /etc/condor
pc_config_50 = PCConfig50("{output_dir}/50_PC.conf".format(output_dir=output_dir),
augmented_site_level_config, execution_id)
pc_config_50.generate_output_file()
simple_condor_98 = SimpleCondor98("{output_dir}/98_simple_condor.conf".format(output_dir=output_dir),
augmented_site_level_config, execution_id)
simple_condor_98.generate_output_file()
timezone = TimeZone("{output_dir}/timezone".format(output_dir=output_dir), augmented_site_level_config, execution_id)
timezone.generate_output_file()
supported_vo_users = SupportedVOUsers("{output_dir}/supported_vo_users.conf".format(output_dir=output_dir),
augmented_site_level_config, execution_id)
supported_vo_users.generate_output_file()
lc = get_lightweight_component(augmented_site_level_config, execution_id)
if os.path.exists('{output_dir}/supplemental_mapfile'.format(output_dir=output_dir)):
os.remove('{output_dir}/supplemental_mapfile'.format(output_dir=output_dir))
components = lc.get('supplemental_config', [])
if not (components is None or len(components) ==0):
for component in components:
supplemental_config = SupplementalConfig(output_dir, augmented_site_level_config, execution_id, component)
supplemental_config.generate_output_file() | simple-framework/simple_htcondor_ce | sh/pre_config/main.py | main.py | py | 4,006 | python | en | code | 3 | github-code | 50 |
22034033280 | import chellow.scenario
from chellow.models import Session, Contract
sess = None
try:
sess = Session()
db_id = Contract.get_non_core_by_name(sess, 'aahedc').id
finally:
if sess is not None:
sess.close()
create_future_func = chellow.scenario.make_create_future_func_simple(
'aahedc', ['aahedc_gbp_per_gsp_kwh'])
def hh(supply_source):
bill = supply_source.supplier_bill
rate_set = supply_source.supplier_rate_sets['aahedc-rate']
try:
supply_source.caches['aahedc']
except KeyError:
supply_source.caches['aahedc'] = {}
try:
future_funcs = supply_source.caches['future_funcs']
except KeyError:
future_funcs = {}
supply_source.caches['future_funcs'] = future_funcs
try:
future_funcs[db_id]
except KeyError:
future_funcs[db_id] = {
'start_date': None, 'func': create_future_func(1, 0)}
for hh in supply_source.hh_data:
bill['aahedc-gsp-kwh'] += hh['gsp-kwh']
rate = supply_source.hh_rate(
db_id, hh['start-date'], 'aahedc_gbp_per_gsp_kwh')
rate_set.add(rate)
bill['aahedc-gbp'] += hh['gsp-kwh'] * rate
| JuviAndaya/chellow | chellow/aahedc.py | aahedc.py | py | 1,218 | python | en | code | null | github-code | 50 |
105546141 | #!python3.8
cases = int(input())
inform, total, rabbits, rats, flogs = (), 0, 0, 0, 0
while cases:
inform = (input().split())
if inform[1] == 'C':
rabbits += int(inform[0])
elif inform[1] == 'R':
rats += int(inform[0])
elif inform[1] == 'S':
flogs += int(inform[0])
total += int(inform[0])
cases -= 1
print(f'''Total: {total} cobaias
Total de coelhos: {rabbits}
Total de ratos: {rats}
Total de sapos: {flogs}
Percentual de coelhos: {rabbits*100/total:.2f} %
Percentual de ratos: {rats*100/total:.2f} %
Percentual de sapos: {flogs*100/total:.2f} %''')
| certainlyWrong/Solu-Questions | beecrowd/1094.py | 1094.py | py | 615 | python | en | code | 0 | github-code | 50 |
7382485717 | import qml
from glob import glob
import pdb
from xyz2mol import *
from qml.utils import alchemy
import pandas as pd
from rdkit import Chem
import ast
from drfp import DrfpEncoder
from gzip_regressor import regress, cross_val_and_fit_kernel_ridge, predict_kernel_ridge_regression
from smiles_tokenizer import tokenize
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.model_selection import GridSearchCV
from sklearn.kernel_ridge import KernelRidge
random.seed(42)
def xyz2smiles(path):
atoms, charge, coordinates = read_xyz_file(path)
return atoms, charge, coordinates
def side_2_smiles(PATHS):
SMILES = []
for path in PATHS:
atoms, charge, coordinates = xyz2smiles(path)
print(atoms)
if len(atoms) == 1:
atoms = [alchemy.ELEMENT_NAME[atoms[0]]]
#coordinates = [coordinates]
SMILES.append(atoms[0])
else:
mol = xyz2mol(atoms, coordinates)[0]
SMILES.append(Chem.MolToSmiles(mol))
return SMILES
def rxn_smiles(reac_SMILES,prod_SMILES):
left = "{}.{}>>".format(reac_SMILES[0],reac_SMILES[1])
right = "{}.{}".format(prod_SMILES[0],prod_SMILES[1])
return left + right
def learning_curve(X_train, X_test, y_train, y_test):
#gammas, lambdas = np.logspace(-3, 3, 7), [1e-8]
gammas, lambdas = np.logspace(-1, 4, 15), [1e-8, 1e-6]
errors = []
for n in N_train:
best_alpha, best_gamma, best_lambda_, best_score = cross_val_and_fit_kernel_ridge(X_train[:n], y_train[:n],5, gammas, lambdas)
print("n = {}, best_gamma = {}, best_lambda = {}, best_score = {}".format(n, best_gamma, best_lambda_, best_score))
test_preds = predict_kernel_ridge_regression(X_train[:n], X_test, best_alpha, best_gamma)
# compute MAE
mae = np.mean(np.abs(test_preds - y_test))
#print n and mae
print("n = {}, MAE = {}".format(n, mae))
errors.append(mae)
return errors
#read pandas dataframe
if __name__ == "__main__":
PREPROCESS,REGRESSION = False,True
PLOT = True
if PREPROCESS:
reactions_SN2 = pd.read_csv("/home/jan/projects/testing_zip/molzip_react/data/SN2-20/reactions.csv")
REACTANTS, PRODUCTS, Y = reactions_SN2["reactant"].values, reactions_SN2["product"].values, reactions_SN2["rxn_nrj"].values
REACT_SMILES = []
for reacts, products, y in zip(REACTANTS, PRODUCTS, Y):
reacts, products = ast.literal_eval(reacts), ast.literal_eval(products)
reactant_SMILES,products_SMILES = side_2_smiles(reacts), side_2_smiles(products)
reac_smi = rxn_smiles(reactant_SMILES,products_SMILES)
print(reac_smi)
REACT_SMILES.append(reac_smi)
#pdb.set_trace()
REACT_SMILES = np.array(REACT_SMILES)
fps, mapping = DrfpEncoder.encode(REACT_SMILES, mapping=True, n_folded_length=512)
np.savez_compressed("react_SN2-20.npz", fps=fps, mapping=mapping, REACT_SMILES=REACT_SMILES, Y=Y)
REACTION_PANDAS = pd.DataFrame({"REACT_SMILES":REACT_SMILES, "rxn_nrj":Y})
#save the dataframe
REACTION_PANDAS.to_csv("reaction_SN2-20.csv")
if REGRESSION:
data = np.load("react_SN2-20.npz", allow_pickle=True)
fps, mapping, REACT_SMILES, Y = data["fps"], data["mapping"], data["REACT_SMILES"], data["Y"]
fps = fps.astype(str)
FPS_single_smiles = np.array([''.join(row) for row in fps])
X_train, X_test,FPS_train, FPS_test, y_train, y_test = train_test_split(REACT_SMILES,FPS_single_smiles, Y, test_size=0.10, random_state=42)
N_train = [2**i for i in range(5, 11)]
N_train.append(len(X_train))
KNN, KRR = False, False
errors_KNN = []
if KNN:
try:
data = np.load("learning_curve_KNN.npz", allow_pickle=True)
errors_KNN, N_train = data["errors_KNN"], data["N_train"]
except:
k = 5
for n in N_train:
test_preds = np.mean(regress(X_train[:n], y_train[:n], X_test, k), axis=1)
# compute MAE
mae = np.mean(np.abs(test_preds - y_test))
#print n and mae
print("n = {}, MAE = {}".format(n, mae))
errors_KNN.append(mae)
np.savez_compressed("learning_curve_KNN.npz", errors_KNN=errors_KNN, N_train=N_train)
if KRR:
print("GZIP REGRESSION")
#check if learning curve exists
try:
data = np.load("learning_curve_KRR.npz", allow_pickle=True)
error_REACT_SMILES, error_FPS, N_train = data["error_REACT_SMILES"], data["error_FPS"], data["N_train"]
except:
error_REACT_SMILES = learning_curve(X_train, X_test, y_train, y_test)
error_FPS= learning_curve(FPS_train, FPS_test, y_train, y_test)
#save the learning curve
np.savez_compressed("learning_curve_KRR.npz", error_REACT_SMILES=error_REACT_SMILES, error_FPS=error_FPS, N_train=N_train)
if PLOT:
data = np.load("learning_curve_KRR.npz", allow_pickle=True)
error_REACT_SMILES, error_FPS, N_train = data["error_REACT_SMILES"], data["error_FPS"], data["N_train"]
data = np.load("learning_curve_KNN.npz", allow_pickle=True)
errors_KNN, N_train = data["errors_KNN"], data["N_train"]
import seaborn as sns
import matplotlib.pyplot as plt
# Set the theme style
sns.set_style("whitegrid")
fig, ax = plt.subplots()
#set xticks
# Set both axis to log scale
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xticks(N_train)
ax.set_xticklabels(N_train)
#set yticks
ax.set_yticks([0.1, 1, 10, 100])
ax.set_yticklabels([0.1, 1, 10, 100])
# Set color palette
palette = sns.color_palette("Set2")
# Plot the data with markers and enhanced line width
ax.plot(N_train, error_FPS, marker='o', linewidth=2, color=palette[0], label="FPS-KRR")
ax.plot(N_train, error_REACT_SMILES, marker='s', linewidth=2, color=palette[1], label="REACT_SMILES-KRR")
ax.plot(N_train, errors_KNN, marker='^', linewidth=2, color=palette[2], label="REACT_SMILES-KNN")
# Set labels with enhanced font sizes
ax.set_xlabel("$N$", fontsize=21)
ax.set_ylabel("MAE [kcal/mol]", fontsize=21)
# Improve legend appearance
ax.legend(loc='center right', shadow=False, frameon=False, fontsize=12)
#make the legend transparent
#make tight layout
plt.tight_layout()
# Save the figure
fig.savefig("./figures/learning_curve.png")
| daenuprobst/molzip | drafts/molzip_react/react.py | react.py | py | 7,082 | python | en | code | 49 | github-code | 50 |
71741632796 | import json
from datetime import datetime
from typing import Dict, Optional
import requests
def get_data(url: str, headers: Dict[str, str]) -> Optional[str]:
"""GET HTTP Request to URL using custom Headers.
Args:
url (str): API URL.
headers (Dict[str, str]): API Headers.
Returns:
Optional[str]: API Response as JSON string or None.
"""
resp = requests.get(url, headers=headers)
if resp.status_code == 200:
data = resp.json()
data["extracted_at"] = datetime.utcnow()
return json.dumps(data, ensure_ascii=False, default=str)
print("---< Error >---")
print(f"URL: {url}")
print(f"Status Code: {resp.status_code}")
print(f"Content: {resp.content}")
return None
| avcaliani/esports-analytics | web-crawlers/brawl-stars/utils/http.py | http.py | py | 760 | python | en | code | 0 | github-code | 50 |
17085988097 | plates = {"4A2 3000": "František Novák",
"6P5 4747": "Jana Pilná",
"3B7 3652": "Jaroslav Sečkár",
"1P5 5269": "Marta Nováková",
"37E 1252": "Martina Matušková",
"2A5 2241": "Jan Král"
}
print("SPZtku z plzenskeho kraje maji: ")
for key, value in plates.items():
if key[1] == "P":
print(value) | petrazborilova1/intenzivnikurzpython | bonus.py | bonus.py | py | 376 | python | en | code | 0 | github-code | 50 |
70420427035 | #reciprocle of no
try:
lst=input("enter the elements of list:").split()
out=[]
for i in range(len(lst)):
out.append(1/i)
print(out)
except ZeroDivisionError:
print("error occured !!")
except EOFError:
print("error ocured !!")
except KeyboardInterrupt:
print("error occured !!")
else:
print("your code is running successfully")
| FlowerbeanAnsh/python_problems_2 | exceptions_reciprocle.py | exceptions_reciprocle.py | py | 388 | python | en | code | 0 | github-code | 50 |
4603204578 | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
from dotenv import load_dotenv
# Replace these values with your actual PostgreSQL credentials
POSTGRES_USERNAME = "postgres"
POSTGRES_PASSWORD = "jnjdeploy1"
POSTGRES_HOSTNAME = "192.168.1.101:5434"
POSTGRES_DB_NAME = "opusKanbanDB"
POSTGRES_DB_CONNECT = "postgresql+psycopg2"
load_dotenv(".env")
# Create the connection string
SQLALCHEMY_DATABASE_URL = os.environ["POSTGRES_URL_DOCKER"]
# SQLALCHEMY_DATABASE_URL = os.environ["POSTGRES_URL_LOCAL"]
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={}, future=True
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine, future=True)
Base = declarative_base()
# DB Utilities
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close() | JacobSima/trello-backend-fastapi | db/db_setup.py | db_setup.py | py | 941 | python | en | code | 0 | github-code | 50 |
22832549140 | from PIL import Image
import os
def CaptchaParse(img):
captcha=""
dirs=os.listdir("Chars")
img=img.convert('L')
pix=img.load()
for y in range(1,44):
for x in range(1,179):
if pix[x,y-1]==255 and pix[x,y]==0 and pix[x,y+1]==255:
pix[x,y]=255
if pix[x-1,y]==255 and pix[x,y]==0 and pix[x+1,y]==255:
pix[x,y]=255
if pix[x,y]!=255 and pix[x,y]!=0:
pix[x,y]=255
for j in range(30,181,30):
ch=img.crop((j-30,12,j,44))
pix1=ch.load()
matches={}
for i in dirs:
match=0
black=0
pixx=0
im2=Image.open("Chars/"+i)
im2=im2.convert('L')
pix2=im2.load()
for y in range(0,32):
for x in range(0,30):
## if pix1[x,y]==pix2[x,y] and pix2[x,y]==(0,0,0):
## match+=1
## if pix2[x,y]==(0,0,0):
## black+=1
## if pix1[x,y]==(0,0,0):
## pixx+=1
if pix1[x,y]==pix2[x,y] and pix2[x,y]==0:
match+=1
if pix2[x,y]==0:
black+=1
if pix1[x,y]==0:
pixx+=1
if float(match)/float(black)>=0.80:
perc=float(match)/float(black)
matches.update({perc:i[0].upper()})
try:
captcha+=matches[max(matches.keys())]
except ValueError:
captcha+="0"
## img.save("testcaptcha\\"+captcha+".png")
return captcha
##img=Image.open("2.png")
##print CaptchaParse(img)
| shubhodeep9/go-MyVIT | api/login/CaptchaVtopBeta/parser.py | parser.py | py | 1,713 | python | en | code | 3 | github-code | 50 |
20913164162 | # Birthday Json
# https://www.practicepython.org/exercise/2017/02/06/34-birthday-json.html
import json;
# ~ birthdates = {"Britney Spears": "02.12.81", "Selena Gomez":"22.07.87", "Arnold Schwarzenegger":"30.07.47"};
# ~ with open("exercise 34 info.json", "r") as f:
# ~ info = json.load(f)
# ~ print(info);
# ~ for k,v in info.items():
# ~ print(k,v);
# ~ info_about_me = {
# ~ "Nicole Scherzinger": "29.06.78",
# ~ "Brad Pitt": "18.12.63"}
# ~ # update file!
# ~ with open("exercise 34 info.json", "r+") as f:
# ~ data = json.load(f)
# ~ data.update(info_about_me)
# ~ f.seek(0)
# ~ json.dump(data, f)
# ~ # open and print
# ~ with open("exercise 34 info.json", "r") as f:
# ~ info = json.load(f)
# ~ print(info);
with open("exercise 34 info.json", "r") as f:
info = json.load(f)
def birthDate(D):
print("Welcome to the birthday dictionary. We know the birthdays of:\n");
# ~ with open("exercise 34 info.json", "r") as f:
# ~ info = json.load(f)
for i in D.keys():
print(i);
while True:
ask_user = input("Add B.D. or check B.D or All List. Enter 'add' or 'check' or 'alllist'. Here: ");
if ask_user == "add":
tempDict = {};
FNandLN = input("Enter First Name and Last Name 'Brad Pitt' Here: ");
Bdate = input("Enter birthday 'dd.mm.yy' Here: ");
tempDict = {FNandLN:Bdate}
print(f"{FNandLN} added with {Bdate}");
# ~ updateDict(D,tempDict)
with open("exercise 34 info.json", "r+") as f:
data = json.load(f)
data.update(tempDict)
f.seek(0)
json.dump(data, f)
tempDict = {};
elif ask_user == "check":
with open("exercise 34 info.json", "r") as f:
info = json.load(f);
# ~ print(" ".join(i for i in info.keys()));
print("\nWho's birthday do you want to look up?");
UI = input(" Enter: ");
print(f"{UI}'s birthday is {info[UI]}.");
elif ask_user == "alllist":
with open("exercise 34 info.json", "r") as f:
info = json.load(f);
for i in info.keys():
print(i);
else:
print("Enter 'add' or 'check' please!");
# ~ for i in D.keys():
# ~ print(i);
def updateDict(the_file, update_dict):
# update file!
with open(the_file, "r+") as f:
data = json.load(f)
data.update(update_dict)
f.seek(0)
json.dump(data, f)
return the_file;
birthDate(info);
| kristaps-m/practicepython.org-solutions | solutions_and_used_files/34.py | 34.py | py | 2,731 | python | en | code | 0 | github-code | 50 |
27759558518 | import numpy as np
import pandas as pd
import pyodbc as pyodbc
import face_recognition as face_recognition
from PIL import Image #Python Imaging Library
import io
import cv2
from matplotlib import pyplot as plt
import imutils
import easyocr
import pytesseract
pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
auth_id = 1
cnxn = pyodbc.connect(driver='{SQL Server}', server='DESKTOP-9KO479H\SQLEXPRESS', database='facerecognition',trusted_connection='yes')
cursor = cnxn.cursor()
cursor.execute("SELECT vAuthImage from dr_vh_auth where authentication_id = ?",(auth_id))
for row in cursor:
print("***********************************")
print("copying vehicle image from dr_vh_auth ")
driver_authentication_image = row[0]
driver_authentication_image_data = driver_authentication_image
da_image = Image.open(io.BytesIO(driver_authentication_image_data))
da_image.save("vehicleToAuthenticate.jpg")
# Read the image file
im = cv2.imread('vehicleToAuthenticate.jpg')
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
plt.imshow(cv2.cvtColor(imgray, cv2.COLOR_BGR2RGB))
#plt.show()
bfilter = cv2.bilateralFilter(imgray, 11, 17, 17) #Noise reduction
#canny_edge = cv2.Canny(bfilter, 30, 200) #Edge detection
#plt.imshow(cv2.cvtColor(canny_edge, cv2.COLOR_BGR2RGB))
#plt.show()
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours=sorted(contours, key = cv2.contourArea, reverse = True)[:50]
#cv2.drawContours(im, contours, -1, (0,255,0), 3)
#plt.imshow(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
#plt.show()
location = None
for contour in contours:
approx = cv2.approxPolyDP(contour, 10, True)
if len(approx) == 4:
location = approx
break
#print(location)
mask = np.zeros(imgray.shape, np.uint8)
new_image = cv2.drawContours(mask, [location], 0,255, -1)
new_image = cv2.bitwise_and(im, im, mask=mask)
plt.imshow(cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB))
#plt.show()
(x,y) = np.where(mask==255)
(x1, y1) = (np.min(x), np.min(y))
(x2, y2) = (np.max(x), np.max(y))
cropped_image = imgray[x1:x2+1, y1:y2+1]
cropped_image = cv2.bilateralFilter(cropped_image, 10, 10, 10)
(thresh, cropped_image) = cv2.threshold(cropped_image, 150, 180, cv2.THRESH_BINARY)
plt.imshow(cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB))
plt.show()
print("*************2**********************")
text = pytesseract.image_to_string(cropped_image,config='--psm 1')
print("*************3**********************", text)
reader = easyocr.Reader(['en'])
result = reader.readtext(cropped_image,paragraph="False")
print("*************4**********************", result)
print(" cc" , result[0][1])
cursor.execute("UPDATE dr_vh_auth SET vAuthImage_Status = ? , vNum = ? WHERE authentication_id = ?", ("Success",result[0][1] ,auth_id))
cnxn.commit()
cnxn.close() | DAB103-2021/dab_capstone | user_authentication/vehicle_number.py | vehicle_number.py | py | 2,912 | python | en | code | 0 | github-code | 50 |
34203590673 | from __future__ import print_function
from abc import abstractmethod
import math
import random
import copy
from matplotlib import pyplot
class ComputationalNode(object):
@abstractmethod
def forward(self, x): # x is an array of scalars
pass
@abstractmethod
def backward(self, dz): # dz is a scalar
pass
class MultiplyNode(ComputationalNode):
def __init__(self):
self.x = [0., 0.] # x[0] is input, x[1] is weight
def forward(self, x):
self.x = x
return self.x[0] * self.x[1]
def backward(self, dz):
return [dz * self.x[1], dz * self.x[0]]
class SumNode(ComputationalNode):
def __init__(self):
self.x = [] # x is in an array of inputs
def forward(self, x):
self.x = x
return sum(self.x)
def backward(self, dz):
return [dz for xx in self.x]
class SigmoidNode(ComputationalNode):
def __init__(self):
self.x = 0. # x is an input
def forward(self, x):
self.x = x
return self._sigmoid(self.x)
def backward(self, dz):
return dz * self._sigmoid(self.x) * (1. - self._sigmoid(self.x))
def _sigmoid(self, x):
return 1. / (1. + math.exp(-x))
class ReluNode(ComputationalNode):
def __init__(self):
self.x = 0. # x is an input
def forward(self, x):
self.x = x
return self._relu(self.x)
def backward(self, dz):
return dz * (1. if self.x > 0. else 0.)
def _relu(self, x):
return max(0., x)
class NeuronNode(ComputationalNode):
def __init__(self, n_inputs, activation):
self.n_inputs = n_inputs
self.multiply_nodes = [] # for inputs and weights
self.sum_node = SumNode() # for sum of inputs*weights
for n in range(n_inputs): # collect inputs and corresponding weights
mn = MultiplyNode()
mn.x = [1., random.gauss(0., 0.1)] # init input weights
self.multiply_nodes.append(mn)
mn = MultiplyNode() # init bias node
mn.x = [1., random.gauss(0., 0.01)] # init bias weight
self.multiply_nodes.append(mn)
if activation == 'sigmoid':
self.activation_node = SigmoidNode()
elif activation == 'relu':
self.activation_node = ReluNode()
else:
raise RuntimeError('Unknown activation function "{0}".'.format(activation))
self.previous_deltas = [0.] * (self.n_inputs + 1)
self.gradients = []
def forward(self, x): # x is a vector of inputs
x = copy.copy(x)
x.append(1.) # for bias
for_sum = []
for i, xx in enumerate(x):
inp = [x[i], self.multiply_nodes[i].x[1]]
for_sum.append(self.multiply_nodes[i].forward(inp))
summed = self.sum_node.forward(for_sum)
summed_act = self.activation_node.forward(summed)
return summed_act
def backward(self, dz):
dw = []
b = dz[0] if type(dz[0]) == float else sum(dz)
b = self.activation_node.backward(b)
b = self.sum_node.backward(b)
for i, bb in enumerate(b):
dw.append(self.multiply_nodes[i].backward(bb)[1])
self.gradients.append(dw)
return dw
def update_weights(self, learning_rate, momentum):
for i, multiply_node in enumerate(self.multiply_nodes):
mean_gradient = sum([grad[i] for grad in self.gradients]) / len(self.gradients)
delta = learning_rate*mean_gradient + momentum*self.previous_deltas[i]
self.previous_deltas[i] = delta
self.multiply_nodes[i].x[1] -= delta
self.gradients = []
class NeuralLayer(ComputationalNode):
def __init__(self, n_inputs, n_neurons, activation):
self.n_inputs = n_inputs
self.n_neurons = n_neurons
self.activation = activation
self.neurons = []
# construct layer
for _ in range(n_neurons):
neuron = NeuronNode(n_inputs, activation)
self.neurons.append(neuron)
def forward(self, x): # x is a vector of "n_inputs" elements
layer_output = []
for neuron in self.neurons:
neuron_output = neuron.forward(x)
layer_output.append(neuron_output)
return layer_output
def backward(self, dz): # dz is a vector of "n_neurons" elements
b = []
for idx, neuron in enumerate(self.neurons):
neuron_dz = [d[idx] for d in dz]
neuron_dz = neuron.backward(neuron_dz)
b.append(neuron_dz[:-1])
return b # b is a vector of "n_neurons" elements
def update_weights(self, learning_rate, momentum):
for neuron in self.neurons:
neuron.update_weights(learning_rate, momentum)
class NeuralNetwork(ComputationalNode):
def __init__(self):
# construct neural network
self.layers = []
def add(self, layer):
self.layers.append(layer)
def forward(self, x): # x is a vector which is an input for neural net
prev_layer_output = None
for idx, layer in enumerate(self.layers):
if idx == 0: # input layer
prev_layer_output = layer.forward(x)
else:
prev_layer_output = layer.forward(prev_layer_output)
return prev_layer_output # actually an output from last layer
def backward(self, dz):
next_layer_dz = None
for idx, layer in enumerate(self.layers[::-1]):
if idx == 0:
next_layer_dz = layer.backward(dz)
else:
next_layer_dz = layer.backward(next_layer_dz)
return next_layer_dz
def update_weights(self, learning_rate, momentum):
for layer in self.layers:
layer.update_weights(learning_rate, momentum)
def fit(self, X, Y, learning_rate, momentum, nb_epochs, shuffle=False, verbose=0):
assert len(X) == len(Y)
hist = []
for epoch in range(nb_epochs):
if shuffle:
random.seed(epoch)
random.shuffle(X)
random.seed(epoch)
random.shuffle(Y)
total_loss = 0.0
for x, y in zip(X, Y):
# forward pass to compute output
pred = self.forward(x)
# compute loss
grad = 0.0
for o, t in zip(pred, y):
total_loss += (t - o) ** 2.
grad += -(t - o)
# backward pass to compute gradients
self.backward([[grad]])
# update weights with computed gradients
self.update_weights(learning_rate, momentum)
hist.append(total_loss)
if verbose == 1:
print('Epoch {0}: loss {1}'.format(epoch + 1, total_loss))
print('Loss: {0}'.format(total_loss))
return hist
def predict(self, x):
return self.forward(x)
if __name__ == '__main__':
nn = NeuralNetwork()
nn.add(NeuralLayer(2, 2, 'sigmoid'))
nn.add(NeuralLayer(2, 1, 'sigmoid'))
# XOR example
# obucavajuci skup
X = [[0., 0.],
[1., 0.],
[0., 1.],
[1., 1.]]
Y = [[0.],
[1.],
[1.],
[0.]]
# obucavanje neuronske mreze
history = nn.fit(X, Y, learning_rate=0.1, momentum=0.3, nb_epochs=10000, shuffle=True, verbose=0)
# provera da li radi
print(nn.predict([0., 0.]))
print(nn.predict([1., 0.]))
print(nn.predict([0., 1.]))
print(nn.predict([1., 1.]))
# plotovanje funkcije greske
pyplot.plot(history)
pyplot.show() | ftn-ai-lab/ori-2022-e2 | 06-ann-comp-graph/src/solutions/ann_comp_graph.py | ann_comp_graph.py | py | 7,942 | python | en | code | 4 | github-code | 50 |
40560485609 | class Produto:
def __init__(self, id, cod_barra, descricao, fornecedor, valor_venda, valor_custo):
self.id = id
self.cod_barra = cod_barra
self.descricao = descricao
self.fornecedor = fornecedor
self.valor_venda = valor_venda
self.valor_custo = valor_custo
def retornar_lista_produto(self):
return [self.id, self.cod_barra, self.fornecedor, self.valor_venda, self.valor_custo]
| rafael2044/projeto_sistema_gest-o_estoque_venda | Projeto/Geral/Classes/Produto.py | Produto.py | py | 463 | python | pt | code | 0 | github-code | 50 |
25681455505 | from modules.agents import REGISTRY as agent_REGISTRY
from components.action_selectors import REGISTRY as action_REGISTRY
import torch as th
from modules.agents.thgc_agent import THGCAgent
group = 2
# This multi-agent controller shares parameters between agents
class BasicMAC:
def __init__(self, scheme, groups, args):
self.n_agents = args.n_agents
self.args = args
input_shape = self._get_input_shape(scheme)
# print(input_shape)
self.thgc = THGCAgent(input_shape, self.args)
self._build_agents(input_shape, self.args)
self.agent_output_type = args.agent_output_type
self.grouping_method = args.method1 # for location based grouping or args.method2 for health based grouping
self.action_selector = action_REGISTRY[args.action_selector](args)
self.hidden_states = None
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
# Only select actions for the selected batch elements in bs
avail_actions = ep_batch["avail_actions"][:, t_ep]
agent_outputs = self.forward(ep_batch, t_ep, test_mode=test_mode)
chosen_actions = self.action_selector.select_action(agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
return chosen_actions
'''
for t in range(batch.max_seq_length):
agent_outs = self.mac.forward(batch, t=t)
mac_out.append(agent_outs)
'''
# look at 't' above
def forward(self, ep_batch, t, test_mode=False):
batch_input = self._build_inputs(ep_batch, t)
agents_groups = self._build_grps_wise_inp(batch_input, ep_batch[self.grouping_method][:, t, ...], ep_batch.device)
adj_mat = ep_batch["adj_matrix"][:, t, ...]
avail_actions = ep_batch["avail_actions"][:, t]
agent_wise_input = list(th.unbind(agents_groups), dim=0)
for agent_group in agents_groups:
for agent in agent_group:
agent_inputs = agent_wise_input[agent]
h,self.hidden_states = self.agent(agent_inputs, self.hidden_states, adj_mat,intra=True, first=True)
list1 = []
'''Check whether below stacking is needed or not.'''
for k in h:
list1.append(k)
hg = th.stack(list1, dim=0) # stacking all agents inputs
for agent in agent_group:
agent_inputs = agent_wise_input[agent]
z,self.hidden_states = self.agent(hg, self.hidden_states, adj_mat,intra=True,GAT_intra=True) # after mlp layer
h_,self.hidden_states = self.agent(h, self.hidden_states, adj_mat,intra=True) # after mlp layer
H = th.stack((h_, z),dim=0) # stacking along row
V = th.sum(H,dim=1) # do aggregate sum taking along a row (representing a group)
Vg = th.stack((k for k in V), dim=0)
Qg,self.hidden_states = self.agent(Vg, self.hidden_states, adj_mat,inter=True,GAT_intra=True)
Ql,self.hidden_states = self.agent(V, self.hidden_states, adj_mat,inter=True)
agent_outs = th.stack((Ql, Qg),dim=0)#, self.hidden_states = self.agent(agent_inputs, self.hidden_states)
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs[reshaped_avail_actions == 0] = 0.0
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
def init_hidden(self, batch_size):
self.hidden_states = self.agent.init_hidden().unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
def parameters(self):
return list(self.agent.parameters())+self.gat_params()
def load_state(self, other_mac):
self.agent.load_state_dict(other_mac.agent.state_dict())
self.load_gat_state(other_mac)
def cuda(self):
self.agent.cuda()
self.gat_cuda()
def save_models(self, path):
th.save(self.agent.state_dict(), "{}/agent.th".format(path))
self.save_gat(path)
def load_models(self, path):
self.agent.load_state_dict(th.load("{}/agent.th".format(path), map_location=lambda storage, loc: storage))
self.gat_load(path)
def _build_agents(self, input_shape):
self.agent = agent_REGISTRY[self.args.agent](input_shape, self.args)
def _build_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs = []
inputs.append(batch["obs"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
# inputs = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)
inputs = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)
return inputs
def _get_input_shape(self, scheme):
input_shape = scheme["obs"]["vshape"]
if self.args.obs_last_action:
input_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_shape += self.n_agents
return input_shape
def _build_grps_wise_inp(self, batch, batch_size, groups, device):
# batch_size is 32
inp_obs = batch.reshape(batch_size, self.n_agents, -1).to(device=device)
agent_inputs = []
for group in groups:
for agent in group:
inps = [inp_obs[:,k,:] for k in agent]
agent_inputs.append(inps)
return th.tensor(agent_inputs)
| AYUSH-ISHAN/Type-Based_Heirarchial_MARL_SC2 | controllers/basic_controller.py | basic_controller.py | py | 6,986 | python | en | code | 2 | github-code | 50 |
28976019445 | from state import State
from decode import decodeInstruction, decodeToIntList, decodeAll
from encode import encodeInstructionMap, encodeList, encodeListPowerForm
from instruction import AddI, SubI, HaltI
from program import Program
encodeInput = [SubI(1, 1, 2),
AddI(0, 0),
SubI(2, 3, 4),
AddI(0, 2),
HaltI()]
# encodeInput = [SubI(1, 1, 6),
# SubI(2, 2, 4),
# AddI(0, 3),
# AddI(3, 1),
# SubI(3, 5, 0),
# AddI(2, 4),
# HaltI()
# ]
# encodeInput = [SubI(0, 1, 4),
# AddI(1, 2),
# SubI(0, 3, 4),
# AddI(2, 0),
# SubI(1, 4, 5),
# SubI(2, 6, 7),
# AddI(0, 5),
# HaltI()]
# encode(encodeInput)
# decode(decodeInput)
# decode(encode(encodeInput))
# print(encodeList([3])) # 8
# print(encodeList([1, 3])) # 34
# print(encodeList([2, 1, 3])) # 276
# print(decodeToIntList(8)) # [3]
# print(decodeToIntList(34)) # [1, 3]
# print(decodeToIntList(276)) # [2, 1 ,3]
decodeInput = 2**216*833 # 786432
def encode(arr):
earr = encodeInstructionMap(arr)
print(earr)
efarr = encodeList(earr)
print(encodeListPowerForm(earr))
return efarr
def decode(n):
darr = decodeToIntList(n)
print(darr)
dfarr = decodeAll(darr)
print(list(map(str, dfarr)))
return dfarr
def runProgram(state, instructions):
program = Program(state, instructions)
program.execute(10)
nl = '\n'
print(f"{nl.join(str(x) for x in encodeInput)}")
runProgram(State(0, [0, 1, 2]), encodeInput)
# encode(encodeInput)
# print(encodeInstructionMap(encodeInput))
# decodeAll([1144, 448])
| Unevilicorn/register_machine | main.py | main.py | py | 1,782 | python | en | code | 1 | github-code | 50 |
28673897588 | # Rearrange an array such that arr[i] = i
# Given an array of elements of length N, ranging from 0 to N – 1. All elements may not be present in the array. If the element is not present then there will be -1 present in the array. Rearrange the array such that A[i] = i and if i is not present, display -1 at that place.
# Input : arr = {-1, -1, 6, 1, 9, 3, 2, -1, 4, -1}
# Output : [-1, 1, 2, 3, 4, -1, 6, -1, -1, 9]
arr = [-1, -1, 6, 1, 9, 3, 2, -1, 4, -1]
def rearrange(arr):
size = len(arr)
temp = []
for i in range(size):
temp.append(arr[i])
for i in range(size):
if i in temp:
arr[i] = i
else:
arr[i] = -1
print(arr)
rearrange(arr)
| rowince/Programme | tut73.py | tut73.py | py | 712 | python | en | code | 0 | github-code | 50 |
14611570193 | __doc__ = """Monitor Java Management eXtension (JMX) mbeans
Dispatches calls to a java server process to collect JMX values for a device.
"""
import logging
import sys
import os
import socket
import Globals
import zope
from twisted.internet.defer import Deferred
from twisted.web import xmlrpc
from twisted.internet.protocol import ProcessProtocol
from twisted.internet import defer, reactor, error
from Products.ZenCollector.daemon import CollectorDaemon
from Products.ZenCollector.interfaces import ICollectorPreferences,\
IDataService,\
IEventService,\
IScheduledTask
from Products.ZenCollector.tasks import SimpleTaskFactory,\
SimpleTaskSplitter,\
TaskStates
from Products.ZenEvents import Event
from Products.ZenHub.XmlRpcService import XmlRpcService
from Products.ZenUtils.NJobs import NJobs
from Products.ZenUtils.Utils import unused
from Products.ZenUtils.observable import ObservableMixin
import ZenPacks.zenoss.ZenJMX
from ZenPacks.zenoss.ZenJMX.services.ZenJMXConfigService import JMXDataSourceConfig
unused(JMXDataSourceConfig)
log = logging.getLogger( "zen.zenjmx" )
DEFAULT_HEARTBEAT_TIME = 5 * 60
WARNING_EVENT = dict(eventClass='/Status/JMX', component='JMX',
device=socket.getfqdn(), severity=Event.Warning)
class ZenJMXPreferences(object):
"""
Configuration values for the zenjmx daemon.
"""
zope.interface.implements(ICollectorPreferences)
def __init__(self):
"""
Construct a new ZenJMXPreferences instance and provide default
values for needed attributes.
"""
self.collectorName = "zenjmx"
self.defaultRRDCreateCommand = None
self.cycleInterval = 5 * 60 # seconds
self.configCycleInterval = 20 # minutes
self.options = None
# the configurationService attribute is the fully qualified class-name
# of our configuration service that runs within ZenHub
self.configurationService = 'ZenPacks.zenoss.ZenJMX.services.ZenJMXConfigService'
def buildOptions(self, parser):
parser.add_option('-j','--zenjmxjavaport',
dest='zenjmxjavaport',
default=9988,
type='int',
help='Port for zenjmxjava process; default 9988. '+\
'Tries 5 consecutive ports if there is a conflict',
)
parser.add_option('--concurrentJMXCalls',
dest='concurrentJMXCalls',
action='store_true', default=False,
help='Enable concurrent calls to a JMX server'
)
parser.add_option('--parallel', dest='parallel',
default=200, type='int',
help='Number of devices to collect from at one time'
)
parser.add_option('--cycleInterval', dest='cycleInterval',
default=300, type='int',
help='Cycle time, in seconds, to run collection'
)
parser.add_option('--portRange', dest='portRange',
default=5, type='int',
help='Number of ports to attempt when starting' +
'Java jmx client')
parser.add_option('--javaheap',
dest="maxHeap",type="int", default=512,
help="Max heap, in MB, to use for java process")
def postStartup(self):
pass
def getJavaClientArgs(self):
args = None
if self.options.configfile:
args = ('--configfile', self.options.configfile)
if self.options.logseverity:
args = args + ('-v', str(self.options.logseverity))
if self.options.concurrentJMXCalls:
args = args + ('-concurrentJMXCalls', )
return args
def getStartingPort(self):
return self.options.zenjmxjavaport
def getAttemptedPortRange(self):
return self.options.portRange
class IZenJMXJavaClient(zope.interface.Interface):
listenPort = zope.interface.Attribute("listenPort")
class ZenJMXJavaClientImpl(ProcessProtocol):
"""
Protocol to control the zenjmxjava process
"""
zope.interface.implements(IZenJMXJavaClient)
def __init__(
self,
args,
cycle=True,
zenjmxjavaport=9988,
maxHeap=512
):
"""
Initializer
@param args: argument list for zenjmx
@type args: list of strings
@param cycle: whether to run once or repeat
@type cycle: boolean
@param zenjmxjavaport: port on which java process
will listen for queries
@type zenjmxjavaport: int
"""
self.deferred = Deferred()
self.stopCalled = False
self.process = None
self.outReceived = sys.stdout.write
self.errReceived = sys.stderr.write
self.log = logging.getLogger('zen.ZenJMXJavaClient')
self.args = args
self.cycle = cycle
self.listenPort = zenjmxjavaport
self._maxHeap = maxHeap
self.restartEnabled = False
self._eventService = zope.component.queryUtility(IEventService)
self._preferences = zope.component.queryUtility(ICollectorPreferences,
'zenjmx')
def processEnded(self, reason):
"""
Twisted reactor function called when the process ends.
@param reason: message from the process
@type reason: string
"""
self.process = None
if not self.stopCalled:
procEndEvent = {
'eventClass': '/Status/JMX',
'summary': 'zenjmxjava ended unexpectedly: %s'\
% reason.getErrorMessage(),
'severity': Event.Warning,
'component': 'zenjmx',
'device': self._preferences.options.monitor,
}
self._eventService.sendEvent(procEndEvent)
self.log.warn('processEnded():zenjmxjava process ended %s'
% reason)
if self.deferred:
msg = reason.getErrorMessage()
exitCode = reason.value.exitCode
if exitCode == 10:
msg = 'Could not start up Java web server, '+\
'possible port conflict'
self.deferred.callback((exitCode,msg))
self.deferred = None
elif self.restartEnabled:
self.log.info('processEnded():restarting zenjmxjava')
reactor.callLater(1, self.run)
def stop(self):
"""
Twisted reactor function called when we are shutting down.
"""
import signal
self.log.info('stop():stopping zenjmxjava')
self.stopCalled = True
if not self.process:
self.log.debug('stop():no zenjmxjava process to stop')
return
try:
self.process.signalProcess(signal.SIGKILL)
except error.ProcessExitedAlready:
self.log.info('stop():zenjmxjava process already exited')
pass
try:
self.process.loseConnection()
except Exception:
pass
self.process = None
def connectionMade(self):
"""
Called when the Twisted reactor starts up
"""
self.log.debug('connectionMade():zenjmxjava started')
def doCallback():
"""
doCallback
"""
msg = \
'doCallback(): callback on deferred zenjmxjava proc is up'
self.log.debug(msg)
if self.deferred:
self.deferred.callback((True,'zenjmx java started'))
if self.process:
procStartEvent = {
'eventClass': '/Status/JMX',
'summary': 'zenjmxjava started',
'severity': Event.Clear,
'component': 'zenjmx',
'device': self._preferences.options.monitor,
}
self._eventService.sendEvent(procStartEvent)
self.deferred = None
if self.deferred:
self.log.debug('connectionMade():scheduling callback')
# give the java service a chance to startup
reactor.callLater(3, doCallback)
self.log.debug('connectionMade(): done')
def run(self):
"""
Twisted function called when started
"""
if self.stopCalled:
return
self.log.info('run():starting zenjmxjava')
zenjmxjavacmd = os.path.join(ZenPacks.zenoss.ZenJMX.binDir,
'zenjmxjava')
if self.cycle:
args = ('runjmxenabled', )
else:
# don't want to start up with jmx server to avoid port conflicts
args = ('run', )
args = args + ('-zenjmxjavaport',
str(self.listenPort))
if self.args:
args = args + self.args
cmd = (zenjmxjavacmd, ) + args
self.log.debug('run():spawn process %s' % (cmd, ))
self.deferred = Deferred()
env = dict(os.environ)
env['JVM_MAX_HEAP'] = '-Xmx%sm'%self._maxHeap
self.process = reactor.spawnProcess(self, zenjmxjavacmd, cmd,
env=env)
return self.deferred
DEFAULT_JMX_JAVA_CLIENT_NAME = 'zenjmxjavaclient'
class ZenJMXJavaClientInitialization(object):
"""
Wrapper that continues to start the Java jmx client until
successful.
"""
def __init__(self,
registeredName=DEFAULT_JMX_JAVA_CLIENT_NAME):
"""
@param registeredName: the name with which this client
will be registered as a utility
"""
self._jmxClient = None
self._clientName = registeredName
def initialize(self):
"""
Begin the first attempt to start the Java jmx client. Note that
this method returns a Deferred that relies on the ZenJMXPreferences
being present when it is finally executed. This is meant to be
the Deferred that is given to the CollectorDaemon for
initialization before the first JMX task is scheduled.
@return the deferred that represents the loading of preferences
and the initial attempt to start the Java jmx client
@rtype defer.Deferred
"""
def loadPrefs():
log.debug( "Retrieving java client startup args")
preferences = zope.component.queryUtility(ICollectorPreferences,
'zenjmx')
self._args = preferences.getJavaClientArgs()
self._cycle = preferences.options.cycle
self._maxHeap = preferences.options.maxHeap
self._startingPort = preferences.getStartingPort()
self._rpcPort = self._startingPort
self._attemptedPortRange = preferences.getAttemptedPortRange()
def printProblem(result):
log.error( str(result) )
sys.exit(1)
d = defer.maybeDeferred( loadPrefs )
d.addCallback( self._startJavaProc )
d.addErrback( printProblem )
return d
def _tryClientOnCurrentPort( self ):
"""
Returns the Deferred for executing an attempt
to start the java jmx client on the current port.
"""
log.debug( 'Attempting java client startup on port %s',
self._rpcPort )
self._jmxClient = ZenJMXJavaClientImpl( self._args, self._cycle, self._rpcPort, self._maxHeap )
zope.component.provideUtility(
self._jmxClient,
IZenJMXJavaClient,
self._clientName
)
return self._jmxClient.run()
def _startJavaProc( self, result=None ):
"""
Checks whether startup of the java jmx client was successful. If
it was unsuccessful due to port conflict, increments the port and
tries to start the client again.
"""
# If the result is not None, that means this was called as a callback
# after an attempt to start the client
if result is not None:
# If result[0] is True, the client process started
if result[0] is True:
log.debug( 'Java jmx client started' )
self._jmxClient.restartEnabled = True
deferred = defer.succeed( True )
# If the result[0] is 10, there was a port conflict
elif result[0] == 10:
log.debug( 'Java client didn\'t start; port %s occupied',
self._rpcPort )
if self._rpcPort < ( self._startingPort +
self._attemptedPortRange ):
self._rpcPort += 1
deferred = self._tryClientOnCurrentPort()
deferred.addCallback( self._startJavaProc )
else:
raise RuntimeError(
"ZenJMXJavaClient could not be started, check ports")
else:
#unknown error
raise RuntimeError('ZenJMXJavaClient could not be started, '+\
'check JVM type and version: %s' % result[1])
# If there was no result passed in, then this is the first attempt
# to start the client
else:
deferred = self._tryClientOnCurrentPort()
deferred.addCallback( self._startJavaProc )
return deferred
class ZenJMXTask(ObservableMixin):
"""
The scheduled task for all the jmx datasources on an individual device.
"""
zope.interface.implements(IScheduledTask)
def __init__(self,
deviceId,
taskName,
scheduleIntervalSeconds,
taskConfig,
clientName=DEFAULT_JMX_JAVA_CLIENT_NAME ):
super( ZenJMXTask, self ).__init__()
self.name = taskName
self.configId = deviceId
self.state = TaskStates.STATE_IDLE
self._taskConfig = taskConfig
self._manageIp = self._taskConfig.manageIp
self._dataService = zope.component.queryUtility( IDataService )
self._eventService = zope.component.queryUtility( IEventService )
self._preferences = zope.component.queryUtility( ICollectorPreferences,
'zenjmx' )
self._client = zope.component.queryUtility( IZenJMXJavaClient,
clientName )
# At this time, do not use the interval passed from the device
# configuration. Use the value pulled from the daemon
# configuration.
unused( scheduleIntervalSeconds )
self.interval = self._preferences.options.cycleInterval
def createEvent(self, errorMap, component=None):
"""
Given an event dictionary, copy it and return the event
@param errorMap: errorMap
@type errorMap: s dictionarytring
@param component: component name
@type component: string
@return: updated event
@rtype: dictionary
"""
event = errorMap.copy()
if component:
event['component'] = component
if event.get('datasourceId') and not event.get('eventKey'):
event['eventKey'] = event.get('datasourceId')
return event
def sendEvent(self, event, **kw):
self._eventService.sendEvent(event, **kw)
def _collectJMX(self, dsConfigList):
"""
Call Java JMX process to collect JMX values
@param dsConfigList: DataSource configuration
@type dsConfigList: list of JMXDataSourceConfig
@return: Twisted deferred object
@rtype: Twisted deferred object
"""
def toDict(config):
"""
Marshall the fields from the datasource into a dictionary and
ignore everything that is not a primitive
@param config: dictionary of results
@type config: string
@return: results from remote device
@rtype: dictionary
"""
vals = {}
for (key, val) in config.__dict__.items():
if key != 'rrdConfig' and type(val)\
in XmlRpcService.PRIMITIVES:
vals[key] = val
rrdConfigs = config.rrdConfig.values()
rrdConfigs.sort(lambda x, y: cmp(x.dataPointId,
y.dataPointId))
vals['dps'] = []
vals['dptypes'] = []
for rrdConfig in rrdConfigs:
vals['dps'].append(rrdConfig.dataPointId)
vals['dptypes'].append(rrdConfig.rrdType)
vals['connectionKey'] = config.getConnectionPropsKey()
return vals
def rpcCall():
"""
Communicate with our local JMX process to collect results.
This is a generator function
@param driver: generator
@type driver: string
"""
port = self._client.listenPort
xmlRpcProxy = xmlrpc.Proxy('http://localhost:%s/' % port)
d = xmlRpcProxy.callRemote('zenjmx.collect', configMaps)
d.addCallbacks( processResults , processRpcError)
return d
def processRpcError(error):
log.debug("Could not make XML RPC call for device %s; content of call: %s", self._taskConfig, configMaps)
self.sendEvent({}, severity=Event.Error,
eventClass='/Status/JMX',
summary='unexpected error: %s' % error.getErrorMessage(),
eventKey='unexpected_xmlrpc_error',
device=self.configId)
return error
def processResults(jmxResults):
"""
Given the results from JMX, store them or send events.
@param jmxResults: jmxResults
@type jmxResults: string
"""
#Send clear for RPC error
self.sendEvent({}, severity=Event.Clear,
eventClass='/Status/JMX',
summary='unexpected error cleared',
eventKey='unexpected_xmlrpc_error',
device=self.configId)
result = {}
hasConnectionError = False
hasUnexpectedError = False
for result in jmxResults:
log.debug("JMX result -> %s", result)
evtSummary = result.get('summary')
deviceId = result.get('device')
evt = self.createEvent(result)
if not evtSummary:
rrdPath = result.get('rrdPath')
dsId = result.get('datasourceId')
dpId = result.get('dpId')
value = result.get('value')
try:
self.storeRRD(deviceId, rrdPath, dsId, dpId, value)
except ValueError:
pass
self.sendEvent(evt,summary="Clear",severity=Event.Clear)
else:
# send event
log.debug('processResults(): '
+ 'jmx error, sending event for %s'
% result)
if evt.get("eventClass", "") == '/Status/JMX/Connection':
hasConnectionError = True
if evt.get("eventKey", "") == 'unexpected_error':
hasUnexpectedError = True
self.sendEvent(evt, severity=Event.Error)
if not hasConnectionError:
self.sendEvent({}, severity=Event.Clear,
eventClass='/Status/JMX/Connection',
summary='Connection is up',
eventKey=connectionComponentKey,
device=self.configId)
if not hasUnexpectedError:
self.sendEvent({}, severity=Event.Clear,
eventClass='/Status/JMX',
summary='Unexpected error cleared',
eventKey='unexpected_error',
device=self.configId)
return jmxResults
connectionComponentKey = ''
configMaps = []
for config in dsConfigList:
connectionComponentKey = config.getConnectionPropsKey()
configMaps.append(toDict(config))
log.info('collectJMX(): for %s %s' % (config.device,
connectionComponentKey))
return rpcCall()
def storeRRD(
self,
deviceId,
rrdPath,
dataSourceId,
dataPointId,
dpValue,
):
"""
Store a value into an RRD file
@param deviceId: name of the remote device
@type deviceId: string
@param dataSourceId: name of the data source
@type dataSourceId: string
@param dataPointId: name of the data point
@type dataPointId: string
@param dpValue: dpValue
@type dpValue: number
"""
deviceConfig = self._taskConfig
dsConfig = deviceConfig.findDataSource(dataSourceId)
if not dsConfig:
log.info(
'No data source config found for device %s datasource %s' \
% (deviceId, dataSourceId))
return
rrdConf = dsConfig.rrdConfig.get(dataPointId)
type = rrdConf.rrdType
if(type in ('COUNTER', 'DERIVE')):
try:
# cast to float first because long('100.0') will fail with a
# ValueError
dpValue = long(float(dpValue))
except (TypeError, ValueError):
log.warning("value %s not valid for derive or counter data points", dpValue)
else:
try:
dpValue = float(dpValue)
except (TypeError, ValueError):
log.warning("value %s not valid for data point", dpValue)
if not rrdConf:
log.info(
'No RRD config found for device %s datasource %s datapoint %s' \
% (deviceId, dataSourceId, dataPointId))
return
dpPath = '/'.join((rrdPath, rrdConf.dpName))
min = rrdConf.min
max = rrdConf.max
self._dataService.writeRRD(dpPath, dpValue, rrdConf.rrdType,
rrdConf.command, min=min, max=max)
def _finished(self, results):
for result in results:
log.debug("Finished with result %s" % str( result ) )
return results
def doTask(self):
log.debug("Scanning device %s [%s]", self.configId, self._manageIp)
d = self._collectCallback()
d.addBoth(self._finished)
# returning a Deferred will keep the framework from assuming the task
# is done until the Deferred actually completes
return d
def _collectCallback(self):
jobs = NJobs(self._preferences.options.parallel,
self._collectJMX,
self._taskConfig.jmxDataSourceConfigs.values())
deferred = jobs.start()
return deferred
def cleanup(self):
pass
def stopJavaJmxClients():
# Currently only starting/stopping one.
clientName = DEFAULT_JMX_JAVA_CLIENT_NAME
client = zope.component.queryUtility( IZenJMXJavaClient,
clientName )
if client is not None:
log.debug( 'Shutting down JMX Java client %s' % clientName )
client.stop()
if __name__ == '__main__':
myPreferences = ZenJMXPreferences()
initialization = ZenJMXJavaClientInitialization()
myTaskFactory = SimpleTaskFactory(ZenJMXTask)
myTaskSplitter = SimpleTaskSplitter(myTaskFactory)
daemon = CollectorDaemon(myPreferences, myTaskSplitter,
initializationCallback=initialization.initialize,
stoppingCallback=stopJavaJmxClients)
daemon.run()
| krull/docker-zenoss4 | init_fs/usr/local/zenoss/ZenPacks/ZenPacks.zenoss.ZenJMX-3.12.1.egg/ZenPacks/zenoss/ZenJMX/zenjmx.py | zenjmx.py | py | 24,888 | python | en | code | 4 | github-code | 50 |
22624281149 | import os
import pytest
from ansys.materials.manager.util.matml import MatmlReader, convert_matml_materials
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
class TestMatmlToMaterial:
def test_conversion_to_material_object(self):
"""read a xml file with steel and e-glass UD"""
xml_file_path = os.path.join(DIR_PATH, "..", "data", "steel_eglass_air.xml")
reader = MatmlReader(xml_file_path)
num_materials = reader.parse_matml()
assert num_materials == 3
materials = convert_matml_materials(reader.materials, reader.transfer_ids, 3)
steel = materials[2]
assert steel.material_id == 6
assigned_models = steel.models
assert len(assigned_models) == 18
assert steel.uuid == "636a7e55-fe81-4d04-9d98-a2cdd31e962a"
expected_results = {
"strain reference temperature": 0.0,
"density": 7850.0,
"young's modulus x direction": 200000000000.0,
"young's modulus y direction": 200000000000.0,
"young's modulus z direction": 200000000000.0,
"shear modulus xy": 76923076923.0769,
"shear modulus yz": 76923076923.0769,
"shear modulus xz": 76923076923.0769,
"poisson's ratio xy": 0.3,
"poisson's ratio yz": 0.3,
"poisson's ratio xz": 0.3,
"thermal expansion coefficient x direction": 1.2e-5,
"thermal expansion coefficient y direction": 1.2e-5,
"thermal expansion coefficient z direction": 1.2e-5,
"specific heat capacity": 434.0,
"thermal conductivity x direction": 60.5,
"thermal conductivity y direction": 60.5,
"thermal conductivity z direction": 60.5,
}
for name, expected_value in expected_results.items():
assigned_model = steel.get_model_by_name(name)
assert len(assigned_model) == 1
assert assigned_model[0].value == pytest.approx(expected_value)
eglass = materials[1]
assert eglass.material_id == 5
assigned_models = eglass.models
assert len(assigned_models) == 17
assert eglass.uuid == "a1f2e775-77fe-4ad6-a822-54d353e0ea0e"
expected_results = {
"strain reference temperature": 0.0,
"density": 2000.0,
"young's modulus x direction": 45000000000.0,
"young's modulus y direction": 10000000000.0,
"young's modulus z direction": 10000000000.0,
"shear modulus xy": 5000000000.0,
"shear modulus yz": 3846150000.0,
"shear modulus xz": 5000000000.0,
"poisson's ratio xy": 0.3,
"poisson's ratio yz": 0.4,
"poisson's ratio xz": 0.3,
"thermal conductivity x direction": 30.0,
"thermal conductivity y direction": 5.0,
"thermal conductivity z direction": 5.0,
"thermal expansion coefficient x direction": -1e-6,
"thermal expansion coefficient y direction": 1e-5,
"thermal expansion coefficient z direction": 1e-5,
}
for name, expected_value in expected_results.items():
assigned_model = eglass.get_model_by_name(name)
assert len(assigned_model) == 1
assert assigned_model[0].value == pytest.approx(expected_value)
air = materials[0]
assert air.material_id == 4
assigned_models = air.models
assert len(assigned_models) == 8
assert air.uuid == "370e7536-77c0-11ed-8eeb-6c6a77744180"
expected_results = {
"strain reference temperature": 0.0,
"density": 1.225,
"specific heat capacity": 1006.43,
"thermal conductivity x direction": 0.0242,
"thermal conductivity y direction": 0.0242,
"thermal conductivity z direction": 0.0242,
"viscosity": 1.7894e-05,
"speed of sound": 346.25,
}
for name, expected_value in expected_results.items():
assigned_model = air.get_model_by_name(name)
assert len(assigned_model) == 1
assert assigned_model[0].value == pytest.approx(expected_value)
| ansys/pymaterials-manager | tests/matml/test_matmal_to_material.py | test_matmal_to_material.py | py | 4,238 | python | en | code | 0 | github-code | 50 |
34655217464 | _author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/deepest-leaves-sum/
# Given a binary tree, return the sum of values of its deepest leaves.
# Breadth-first search.
# For each layer, sum all the node values and find all nodes in the next layer.
# Repeat until the layer is empty and return the sum of nodes in the previous layer.
# Time - O(n)
# Space - O(n)
class Solution(object):
def deepestLeavesSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
nodes = [root]
while nodes:
level_sum = 0
new_nodes = []
for node in nodes:
level_sum += node.val
if node.left:
new_nodes.append(node.left)
if node.right:
new_nodes.append(node.right)
nodes = new_nodes
return level_sum
| jakehoare/leetcode | python_1001_to_2000/1302_Deepest_Leaves_Sum.py | 1302_Deepest_Leaves_Sum.py | py | 903 | python | en | code | 49 | github-code | 50 |
25125529236 | #!/usr/bin/env python3
import torch
import torch.nn.functional as F
from ai_old.util.etc import print_
from ai_old.trainer.base import BaseTrainer
from external.sg2.misc import print_module_summary
from ai_old.util.factory import build_model, build_model_from_exp
import external.sg2.misc as misc
from random import random
from torch.distributions.uniform import Uniform
import numpy as np
MAX_MAG = 1.5
N_MAGS = 8
class EncLerpTrainer(BaseTrainer):
def run_batch(self, batch, batch_idx, cur_step):
# prep_data
with torch.autograd.profiler.record_function('data_prep'):
phase_img = batch['img']
phase_img = (
phase_img.to(self.device).to(torch.float32) / 127.5 - 1
).split(self.batch_gpu)
phase_gender = batch['gender']
phase_gender = (
phase_gender.to(self.device).to(torch.float32).unsqueeze(1)
).split(self.batch_gpu)
phase_w = batch['w']
phase_w = (
phase_w.to(self.device).to(torch.float32)
).split(self.batch_gpu)
# run training phases
for phase in self.phases:
if batch_idx % phase.interval != 0:
continue
phase.init_gradient_accumulation()
# accumulate gradients over multiple rounds
for round_idx, (img, gender, w) in enumerate(zip(
phase_img,
phase_gender,
phase_w,
)):
sync = (round_idx == self.batch_size // \
(self.batch_gpu * self.num_gpus) - 1)
gain = phase.interval
self.loss.accumulate_gradients(
phase=phase.name,
img=img,
gender=gender,
w=w,
sync=sync,
gain=gain,
)
phase.update_params()
def run_model(self, img, gender, w, sync):
w_lerper = self.ddp_modules['w_lerper']
img_generator = self.ddp_modules['img_generator']
encoder = self.ddp_modules['encoder']
enc_lerper = self.ddp_modules['enc_lerper']
bs = img.shape[0]
mag = self.mag_sampler.sample((bs,)).to(self.device)
with torch.no_grad():
# w lerp
with misc.ddp_sync(w_lerper, sync):
guide_w = w_lerper(
w,
gender,
magnitude=MAX_MAG,
)
target_w = w_lerper(w, gender, magnitude=mag)
# generate
with misc.ddp_sync(img_generator, sync):
guide_img = img_generator(guide_w)
target_img = img_generator(target_w)
# encode
with misc.ddp_sync(encoder, sync):
enc = encoder(img)
guide_enc = encoder(guide_img)
target_enc = encoder(target_img)
# enc lerp
with misc.ddp_sync(enc_lerper, sync):
pred_enc = enc_lerper(enc.detach(), guide_enc.detach(), mag.detach())
# print(pred_enc.shape, target_enc.shape)
# print(torch.mean(torch.abs(pred_enc.detach() - target_enc.detach())))
return pred_enc, target_enc
def get_eval_fn(self):
def _eval_fn(model, batch, batch_size, device):
img = batch['img'].to(device).to(torch.float32) / 127.5 - 1
gender = batch['gender'].to(device).to(
torch.float32).unsqueeze(1)
w = batch['w'].to(device).to(torch.float32)
mag = self.mag_sampler.sample((batch_size,)).to(device)
# w lerp
guide_w = self.lerp_and_gen.f(
w,
gender,
magnitude=MAX_MAG,
)
target_w = self.lerp_and_gen.f(w, gender, magnitude=mag)
# generate
guide_img = self.lerp_and_gen.g(guide_w)
target_img = self.lerp_and_gen.g(target_w)
# encode
enc = self.ae.e(img)
guide_enc = self.ae.e(guide_img)
target_enc = self.ae.e(target_img)
# enc lerp
pred_enc = model(enc, guide_enc, mag)
return pred_enc, target_enc
return _eval_fn
def get_sample_fn(self):
def _sample_fn(model, batch, batch_size, device):
img = batch['img'].to(device).to(torch.float32) / 127.5 - 1
gender = batch['gender'].to(device).to(
torch.float32).unsqueeze(1)
w = batch['w'].to(device).to(torch.float32)
# w lerp
guide_w = self.lerp_and_gen.f(w, gender, magnitude=MAX_MAG)
# w to img
guide_img = self.lerp_and_gen.g(guide_w)
# encode
enc = self.ae.e(img)
guide_enc = self.ae.e(guide_img)
# enc lerp
pred_enc0 = model(
enc,
guide_enc,
torch.tensor((0.,), device=device).repeat(batch_size),
)
pred_enc1 = model(
enc,
guide_enc,
torch.tensor((0.5,), device=device).repeat(batch_size),
)
pred_enc2 = model(
enc,
guide_enc,
torch.tensor((1.,), device=device).repeat(batch_size),
)
pred_enc3 = model(
enc,
guide_enc,
torch.tensor((1.5,), device=device).repeat(batch_size),
)
# enc to img
img0 = self.ae.g(pred_enc0)
img1 = self.ae.g(pred_enc1)
img2 = self.ae.g(pred_enc2)
img3 = self.ae.g(pred_enc3)
# img0 = self.ae.g(enc)
# img1 = self.ae.g(guide_enc)
# img2 = self.ae.g(guide_enc)
# img3 = self.ae.g(guide_enc)
return img0, img1, img2, img3
return _sample_fn
def get_model_key_for_eval(self):
return 'model'
def _init_modules(self):
cfg = self.cfg
# main model
print_(self.rank, '[INFO] initializing model...')
self.model = build_model(
cfg,
cfg.model,
).train().requires_grad_(False).to(self.device)
# w lerp and gen
self.lerp_and_gen = build_model_from_exp(
cfg.trainer.w_lerp_exp,
'G',
return_cfg=False,
).eval().requires_grad_(False).to(self.device)
# autoencode
self.ae = build_model_from_exp(
cfg.trainer.ae_exp,
'G_ema',
return_cfg=False,
).eval().requires_grad_(False).to(self.device)
self.mag_sampler = Uniform(0., MAX_MAG)
# resume training
assert not cfg.resume
def _init_phases(self):
print_(self.rank, '[INFO] initializing training phases...')
self.phases = [self._build_loss_phase('main', self.model)]
def get_modules_for_save(self):
return [('model', self.model)]
def _get_modules_for_distribution(self):
return [
('w_lerper', self.lerp_and_gen.f, False),
('img_generator', self.lerp_and_gen.g, False),
('encoder', self.ae.e, False),
('enc_lerper', self.model, True),
]
def print_models(self):
enc = torch.empty([self.batch_gpu, 512, 4, 4], device=self.device)
mag = torch.ones([self.batch_gpu,], device=self.device)
_ = print_module_summary(self.model, [enc, enc, mag])
class FastEncLerpTrainer(EncLerpTrainer):
def run_batch(self, batch, batch_idx, cur_step):
# prep_data
with torch.autograd.profiler.record_function('data_prep'):
phase_target_enc = batch['target_enc']
phase_target_enc = (
phase_target_enc.to(self.device).to(torch.float32)
).split(self.batch_gpu)
# run training phases
for phase in self.phases:
if batch_idx % phase.interval != 0:
continue
phase.init_gradient_accumulation()
# accumulate gradients over multiple rounds
for round_idx, target_enc in enumerate(phase_target_enc):
sync = (round_idx == self.batch_size // \
(self.batch_gpu * self.num_gpus) - 1)
gain = phase.interval
self.loss.accumulate_gradients(
phase=phase.name,
target_enc=target_enc,
sync=sync,
gain=gain,
)
phase.update_params()
def run_model(self, target_enc, sync):
# target enc shape: (b, N_MAGS, c, h, w)
base_enc = target_enc[:, 0, :, :, :]
guide_enc = target_enc[:, N_MAGS - 1, :, :, :]
model = self.ddp_modules['model']
pred_enc = torch.zeros_like(target_enc)
with misc.ddp_sync(model, sync):
for i in range(N_MAGS):
pred_enc[:, i, :, :, :] = model(
base_enc.detach(),
guide_enc.detach(),
self.mags[i].detach(),
)
return pred_enc
def get_eval_fn(self):
def _eval_fn(model, batch, batch_size, device):
target_enc = batch['target_enc'].to(device).to(torch.float32)
base_enc = target_enc[:, 0, :, :, :]
guide_enc = target_enc[:, N_MAGS - 1, :, :, :]
pred_enc = torch.zeros_like(target_enc)
for i in range(N_MAGS):
pred_enc[:, i, :, :, :] = model(
base_enc,
guide_enc,
self.mags[i],
)
return pred_enc, target_enc
return _eval_fn
def get_sample_fn(self):
def _sample_fn(model, batch, batch_size, device):
target_enc = batch['target_enc'].to(device).to(torch.float32)
base_enc = target_enc[:, 0, :, :, :]
guide_enc = target_enc[:, N_MAGS - 1, :, :, :]
# enc lerp
pred_enc0 = model(
base_enc,
guide_enc,
torch.tensor((0.,), device=device).repeat(batch_size),
)
pred_enc1 = model(
base_enc,
guide_enc,
torch.tensor((0.5,), device=device).repeat(batch_size),
)
pred_enc2 = model(
base_enc,
guide_enc,
torch.tensor((1.,), device=device).repeat(batch_size),
)
pred_enc3 = model(
base_enc,
guide_enc,
torch.tensor((1.5,), device=device).repeat(batch_size),
)
# enc to img
img0 = self.ae_g(pred_enc0)
img1 = self.ae_g(pred_enc1)
img2 = self.ae_g(pred_enc2)
img3 = self.ae_g(pred_enc3)
return img0, img1, img2, img3
return _sample_fn
def _init_modules(self):
cfg = self.cfg
# main model
print_(self.rank, '[INFO] initializing model...')
self.model = build_model(
cfg,
cfg.model,
).train().requires_grad_(False).to(self.device)
# autoencode
self.ae_g = build_model_from_exp(
cfg.trainer.ae_exp,
'G_ema',
return_cfg=False,
).g.eval().requires_grad_(False).to(self.device)
# mags
self.mags = []
for mag in np.linspace(0., MAX_MAG, num=N_MAGS):
self.mags.append(torch.tensor(
(mag,),
device=self.device,
dtype=torch.float32,
).repeat(cfg.dataset.batch_size))
# resume training
assert not cfg.resume
def _get_modules_for_distribution(self):
return [
('model', self.model, True),
]
| calvinpelletier/ai_old | trainer/enc_lerp.py | enc_lerp.py | py | 11,978 | python | en | code | 0 | github-code | 50 |
25039519174 | from host.models import getHost
from host.models import HostinfoCommand, HostinfoException, Links
###############################################################################
class Command(HostinfoCommand):
description = "Delete a link to a host"
###########################################################################
def parseArgs(self, parser):
parser.add_argument("--everytag", help="Delete all links", action="store_true")
parser.add_argument("--tag", help="The link tag", nargs=1)
parser.add_argument("host", help="The host to delete the link from")
###########################################################################
def handle(self, namespace):
host = namespace.host.lower()
targhost = getHost(host)
if not targhost:
raise HostinfoException("Host %s doesn't exist" % host)
links = Links.objects.filter(hostid=targhost)
if namespace.everytag:
pass
if namespace.tag:
links = links.filter(tag=namespace.tag[0].lower())
for link in links:
link.delete()
return None, 0
# EOF
| dwagon/Hostinfo | hostinfo/host/commands/cmd_hostinfo_deletelink.py | cmd_hostinfo_deletelink.py | py | 1,155 | python | en | code | 10 | github-code | 50 |
19455619980 | #!/usr/bin/python
##############################################################
# Program name: NCAA Basketball Stats Scraper (Settings file)
# Version: 1.0
# By: Rodrigo Zamith
# License: MPL 2.0 (see LICENSE file in root folder)
# Additional thanks:
# Refer to http://stats.ncaa.org/team/inst_team_list?sport_code=MBB&division=1 in setting these variables
##############################################################
# Select year for parsing
academic_year = "2014" # Set the academic year (2012 refers to 2011-2012 season). As of writing, this can range from 2010 to 2013.
year_index = "11540" # Set the index that maps to the academic year. This may be obtained from looking at the team URLs on the list of available teams, for the given academic year. As of writing, the [academic_year, year_index] mappings are: [2013, 11220], [2012, 10740], [2011, 10440], and [2010, 10260]
# What do you want to do? (Note: Lower tiers need higher tiers, i.e., ind_game_stats requires map_players (Tier 2), which requires map_teams (Tier 1).)
map_teams = 1 # Create a team mapping (0 = no, 1 = yes) -- TIER 1
map_schedule = 1 # Create schedule mapping (0 = no, 1 = yes)
map_players = 1 # Create a player mapping (0 = no, 1 = yes)
summary_teams = 1 # Get summary statistics for each team (0 = no, 1 = yes)
summary_players = 1 # Get summary statistics for each player (0 = no, 1 = yes)
ind_game_stats = 1 # Get individual game statistics (0 = no, 1 = yes)
ind_player_stats = 1 # Get individual player statistics (0 = no, 1 = yes)
ind_team_stats = 1 # Get individual team statistics (a line per team, such that each game will have two lines (one for away team, one for home team)) (0 = no, 1 = yes)
# Where do you want to save the data?
team_mappingfile = "mappings/team_mappings.tsv" # Data file for team mappings
player_mappingfile = "mappings/player_mappings.tsv" # Data file for player mappings
schedule_mappingfile = "mappings/schedule_mappings.tsv" # Data file for schedule mappings
summary_player_data = "data/summary_player_data.tsv" # Data file for individual player summary statistics
summary_team_data = "data/summary_team_data.tsv" # Data file for team summary statistics
game_data = "data/game_data.tsv" # Data file for each game
player_data = "data/player_data.tsv" # Data file for each player
team_data = "data/team_data.tsv" # Data file for each team
#### The variables below could be set, but probably don't need any modification #####
debugmode = 1 # Output program steps (0 = off, 1 = on)
params = { } # Any POST parameters that need to be sent (default)
http_header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0",
"Accept": "text/plain, */*; q=0.01",
"Accept-Language": "en-US,en;q=0.5",
"DNT": "1",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
"Referer": "http://stats.ncaa.org/team/inst_team_list?sport_code=MBB&division=1",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache"
} # Variables from the HTTP header (default)
start_url = 'http://stats.ncaa.org/team/inst_team_list?sport_code=MBB&academic_year=' + str(academic_year) + "&division=1" # URL to start from (Change this for different years). You can get this URL from http://stats.ncaa.org/team/inst_team_list?sport_code=MBB&division=1. This URL is for the 2011-2012 season.
domain_base = 'http://stats.ncaa.org' # Base domain | rodzam/ncaab-stats-scraper | scrapersettings.py | scrapersettings.py | py | 3,570 | python | en | code | 37 | github-code | 50 |
39723995259 | class Class:
def method(self):
print("ihav")
def function():
print("I'm not")
instance = Class()
instance.method()
instance.method = function
class bird:
song = "fuck"
def sing(self):
print(self.song)
Bird = bird()
Bird.sing()
c.name | kingflyfly/python_study | 第7章-抽象/7.2.3.py | 7.2.3.py | py | 266 | python | en | code | 0 | github-code | 50 |
37705212126 | import pygame
class StartButton:
def __init__(self, msg, x, y, w, h, color, font, screen, action):
self.msg = msg
self.x = x
self.y = y
self.w = w
self.h = h
self.color = color
self.font = font
self.screen = screen
self.action = action # Button action
self.rect = pygame.Rect(x, y, w, h)
def draw(self):
pygame.draw.rect(self.screen, self.color, self.rect)
text_surf, text_rect = self.text_objects(self.msg, self.font)
text_rect.center = self.rect.center
self.screen.blit(text_surf, text_rect)
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
if self.rect.collidepoint(event.pos):
self.action()
def handle_key_event(self, event):
if event.key == pygame.K_RETURN:
self.action()
def text_objects(self, text, font):
text_surface = font.render(text, True, pygame.Color('black'))
return text_surface, text_surface.get_rect()
| SSM-and-etc/Uno_SE | Script/Lobby/button.py | button.py | py | 1,054 | python | en | code | 4 | github-code | 50 |
7185144510 | """
PyTorch implementation for meta-learning plasticity rules (v1.0)
Author: Navid Shervani-Tabar
Date : March 5, 2023, 18:44:17
"""
import os
import torch
import warnings
import argparse
import datetime
from torch import nn, optim
from random import randrange
from torch.nn.utils import _stateless
from torch.utils.data import DataLoader, RandomSampler
from utils import log, Plot, meta_stats
from optim import MyOptimizer, plasticity_rule
from dataset import EmnistDataset, DataProcess
warnings.simplefilter(action='ignore', category=UserWarning)
class MyModel(nn.Module):
"""
Classifier model.
This class contains definitions of the layers and parameters of
1) the classification network,
2) feedback alignment model, and
3) the plasticity meta-parameters.
"""
def __init__(self, args):
"""
Initialize MyModel object.
Initializes a neural network model with forward and feedback pathways,
plasticity meta-parameters, and activation function. We have followed
a naming convention for the module names, which are defined as follows:
- 'fc': parameters that are updated during adaptation,
- 'fk': fixed parameters with `requires_grad=False`,
- 'fwd': meta-learned parameters.
These settings can be adjusted from the `load_model` method in the
`MetaLearner` class.
:param args: (argparse.Namespace) The command-line arguments.
"""
super(MyModel, self).__init__()
# -- forward pathway
dim_out = 47
self.fc1 = nn.Linear(784, 170, bias=False)
self.fc2 = nn.Linear(170, 130, bias=False)
self.fc3 = nn.Linear(130, 100, bias=False)
self.fc4 = nn.Linear(100, 70, bias=False)
self.fc5 = nn.Linear(70, dim_out, bias=False)
# -- feedback pathway
self.fk1 = nn.Linear(784, 170, bias=False)
self.fk2 = nn.Linear(170, 130, bias=False)
self.fk3 = nn.Linear(130, 100, bias=False)
self.fk4 = nn.Linear(100, 70, bias=False)
self.fk5 = nn.Linear(70, dim_out, bias=False)
# -- plasticity meta-params
self.a_fwd = nn.Parameter(torch.tensor(args.a).float())
self.b_fwd = nn.Parameter(torch.tensor(0.).float())
self.c_fwd = nn.Parameter(torch.tensor(0.).float())
self.d_fwd = nn.Parameter(torch.tensor(0.).float())
self.e_fwd = nn.Parameter(torch.tensor(0.).float())
# -- activation function
self.Beta = 10
self.sopl = nn.Softplus(beta=self.Beta)
# -- meta-params
self.params_fwd = nn.ParameterList()
def forward(self, x):
"""
Performs forward pass of information for the classification network.
The function takes in an input tensor x and performs forward propagation
through the network. The output of each layer is passed through a Softplus
activation function, except for the last layer which has no activation
function (softmax is applied in the loss function).
:param x: (torch.Tensor) input images.
:return: tuple: a tuple containing the input, activations across network
layers, and predicted output.
"""
y0 = x.squeeze(1)
y1 = self.sopl(self.fc1(y0))
y2 = self.sopl(self.fc2(y1))
y3 = self.sopl(self.fc3(y2))
y4 = self.sopl(self.fc4(y3))
return (y0, y1, y2, y3, y4), self.fc5(y4)
class MetaLearner:
"""
Class for meta-learning algorithms.
The MetaLearner class is used to define meta-learning algorithm.
"""
def __init__(self, metatrain_dataset, args):
"""
Initialize the Meta-learner.
:param metatrain_dataset: (DataLoader) The meta-training dataset.
:param args: (argparse.Namespace) The command-line arguments.
"""
# -- processor params
self.device = args.device
# -- data params
self.K = args.K
self.M = args.M
self.database = args.database
self.metatrain_dataset = metatrain_dataset
self.data_process = DataProcess(K=self.K, Q=args.Q, dim=args.dim, device=self.device)
# -- model params
self.model = self.load_model(args).to(self.device)
self.Theta = nn.ParameterList([*self.model.params_fwd])
self.fbk = args.fbk
# -- optimization params
self.lamb = args.lamb
self.loss_func = nn.CrossEntropyLoss()
self.OptimAdpt = MyOptimizer(plasticity_rule, args.vec, args.fbk)
self.OptimMeta = optim.Adam([{'params': self.model.params_fwd.parameters(), 'lr': args.lr_meta}])
# -- log params
self.res_dir = args.res_dir
self.plot = Plot(self.res_dir, len(self.Theta), args.avg_window)
def load_model(self, args):
"""
Load classifier model
Loads the classifier network and sets the adaptation, meta-learning,
and grad computation flags for its variables. For module naming conventions
see `__init__` method from `MyModel` class.
:param args: (argparse.Namespace) The command-line arguments.
:return: model with flags "meta_fwd", "adapt", and "requires_grad" set for
its parameters
"""
# -- init model
model = MyModel(args)
# -- learning flags
for key, val in model.named_parameters():
if 'fc' in key:
val.meta_fwd, val.adapt = False, True
elif 'fk' in key:
val.meta_fwd, val.adapt, val.requires_grad = False, False, False
elif 'fwd' in key:
val.meta_fwd, val.adapt = True, False
# -- meta-params
if val.meta_fwd is True:
model.params_fwd.append(val)
return model
@staticmethod
def weights_init(m):
"""
Initialize weight matrices.
The function initializes weight matrices by filling them with values based on
the Xavier initialization method proposed by Glorot et al. (2010). The method
scales the initial values of the weights based on the number of input and output
units to the layer.
* Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of training
deep feedforward neural networks." In Proceedings of the thirteenth international
conference on artificial intelligence and statistics, pp. 249-256. JMLR Workshop
and Conference Proceedings, 2010.
:param m: modules in the model.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
# -- weights
init_range = torch.sqrt(torch.tensor(6.0 / (m.in_features + m.out_features)))
m.weight.data.uniform_(-init_range, init_range)
# -- bias
if m.bias is not None:
m.bias.data.uniform_(-init_range, init_range)
def reinitialize(self):
"""
Initialize module parameters.
Initializes and clones the model parameters, creating a separate copy
of the data in new memory. This duplication enables the modification
of the parameters using inplace operations, which allows updating the
parameters with a customized meta-learned optimizer.
:return: dict: module parameters
"""
# -- initialize weights
self.model.apply(self.weights_init)
# -- enforce symmetric feedbacks when backprop is training
if self.fbk == 'sym':
self.model.fk1.weight.data = self.model.fc1.weight.data
self.model.fk2.weight.data = self.model.fc2.weight.data
self.model.fk3.weight.data = self.model.fc3.weight.data
self.model.fk4.weight.data = self.model.fc4.weight.data
self.model.fk5.weight.data = self.model.fc5.weight.data
# -- clone module parameters
params = {key: val.clone() for key, val in dict(self.model.named_parameters()).items() if '.' in key}
# -- set adaptation flags for cloned parameters
for key in params:
params[key].adapt = dict(self.model.named_parameters())[key].adapt
return params
def train(self):
"""
Perform meta-training.
This function iterates over episodes to meta-train the model. At each
episode, it samples a task from the meta-training dataset, initializes
the model parameters, and clones them. The meta-training data for each
episode is processed and divided into training and query data. During
adaptation, the model is updated using `self.OptimAdpt` function, one
sample at a time, on the training data. In the meta-optimization loop,
the model is evaluated using the query data, and the plasticity
meta-parameters are then updated using the `self.OptimMeta` function.
Accuracy, loss, and other meta statistics are computed and logged.
:return: None
"""
self.model.train()
for eps, data in enumerate(self.metatrain_dataset):
# -- initialize
params = self.reinitialize()
# -- training data
x_trn, y_trn, x_qry, y_qry = self.data_process(data, self.M)
""" adaptation """
for itr_adapt, (x, label) in enumerate(zip(x_trn, y_trn)):
# -- predict
y, logits = _stateless.functional_call(self.model, params, x.unsqueeze(0).unsqueeze(0))
# -- update network params
self.OptimAdpt(params, logits, label, y, self.model.Beta, self.Theta)
""" meta update """
# -- predict
y, logits = _stateless.functional_call(self.model, params, x_qry.unsqueeze(1))
# -- L1 regularization
l1_reg = None
for T in self.model.params_fwd.parameters():
if l1_reg is None:
l1_reg = T.norm(1)
else:
l1_reg = l1_reg + T.norm(1)
loss_meta = self.loss_func(logits, y_qry.ravel()) + l1_reg * self.lamb
# -- compute and store meta stats
acc = meta_stats(logits, params, y_qry.ravel(), y, self.model.Beta, self.res_dir)
# -- update params
Theta = [p.detach().clone() for p in self.Theta]
self.OptimMeta.zero_grad()
loss_meta.backward()
self.OptimMeta.step()
# -- log
log([loss_meta.item()], self.res_dir + '/loss_meta.txt')
line = 'Train Episode: {}\tLoss: {:.6f}\tAccuracy: {:.3f}'.format(eps+1, loss_meta.item(), acc)
for idx, param in enumerate(Theta):
line += '\tMetaParam_{}: {:.6f}'.format(idx + 1, param.cpu().numpy())
print(line)
with open(self.res_dir + '/params.txt', 'a') as f:
f.writelines(line+'\n')
# -- plot
self.plot()
def parse_args():
"""
Parses the input arguments for the meta-learning model.
The function creates an argument parser with various input parameters for
the model. These parameters include processor, data, meta-training, log,
and model parameters. After parsing the input arguments, the function sets
up the storage and GPU settings and returns the validated input arguments
using the check_args() function.
:return: argparse.Namespace: The validated input arguments for the
meta-learning model.
"""
desc = "Pytorch implementation of meta-learning model for discovering biologically plausible plasticity rules."
parser = argparse.ArgumentParser(description=desc)
# -- processor params
parser.add_argument('--gpu_mode', type=int, default=1, help='Accelerate the script using GPU.')
parser.add_argument('--seed', type=int, default=1, help='Random seed.')
# -- data params
parser.add_argument('--database', type=str, default='emnist', help='Meta-training database.')
parser.add_argument('--dim', type=int, default=28, help='Dimension of the training data.')
parser.add_argument('--test_name', type=str, default='',
help='Name of the folder at the secondary level in the hierarchy of the results directory '
'tree.')
# -- meta-training params
parser.add_argument('--episodes', type=int, default=600, help='Number of meta-training episodes.')
parser.add_argument('--K', type=int, default=50, help='Number of training data points per class.')
parser.add_argument('--Q', type=int, default=10, help='Number of query data points per class.')
parser.add_argument('--M', type=int, default=5, help='Number of classes per task.')
parser.add_argument('--lamb', type=float, default=0., help='Meta-loss regularization parameter.')
parser.add_argument('--lr_meta', type=float, default=1e-3, help='Meta-optimization learning rate.')
parser.add_argument('--a', type=float, default=1e-3,
help='Initial learning rate for the pseudo-gradient term at episode 0.')
# -- log params
parser.add_argument('--res', type=str, default='results', help='Result directory.')
parser.add_argument('--avg_window', type=int, default=10,
help='The size of moving average window used in the output figures.')
# -- model params
parser.add_argument('--vec', nargs='*', default=[],
help='Index vector specifying the plasticity terms to be used for model training in '
'adaptation.')
parser.add_argument('--fbk', type=str, default='sym',
help='Feedback connection type: 1) sym = Symmetric feedback; 2) fix = Fixed random feedback.')
args = parser.parse_args()
# -- storage settings
s_dir = os.getcwd()
args.res_dir = os.path.join(s_dir, args.res, args.test_name,
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S_') + str(randrange(40)))
os.makedirs(args.res_dir)
# -- GPU settings
args.device = torch.device('cuda' if (bool(args.gpu_mode) and torch.cuda.is_available()) else 'cpu')
# -- feedback type
args.evl = False
return check_args(args)
def check_args(args):
"""
Check validity of the input arguments.
This function checks validity of the input arguments. It also stores the
settings by writing them to a file named `args.txt` in the `res_dir`
directory specified in the input arguments.
:param args: (argparse.Namespace) The command-line arguments.
:return: (argparse.Namespace) The validated input arguments.
"""
# -- GPU check
# If the gpu_mode argument is set to True but no GPUs are
# available on the device, a message is printed indicating
# that the program will run on the CPU instead.
if bool(args.gpu_mode) and not torch.cuda.is_available():
print('No GPUs on this device! Running on CPU.')
# -- store settings
with open(args.res_dir + '/args.txt', 'w') as fp:
for item in vars(args).items():
fp.write("{} : {}\n".format(item[0], item[1]))
return args
def main():
"""
Main function for Meta-learning the plasticity rule.
This function serves as the entry point for meta-learning model training
and performs the following operations:
1) Loads and parses command-line arguments,
2) Loads custom EMNIST dataset using meta-training arguments (K, Q),
3) Creates tasks for meta-training using `RandomSampler` with specified
number of classes (M) and episodes,
4) Initializes and trains a MetaLearner object with the set of tasks.
:return: None
"""
# -- load arguments
args = parse_args()
# -- load data
dataset = EmnistDataset(K=args.K, Q=args.Q, dim=args.dim)
sampler = RandomSampler(data_source=dataset, replacement=True, num_samples=args.episodes * args.M)
metatrain_dataset = DataLoader(dataset=dataset, sampler=sampler, batch_size=args.M, drop_last=True)
# -- meta-train
metalearning_model = MetaLearner(metatrain_dataset, args)
metalearning_model.train()
if __name__ == '__main__':
main()
| NeuralDynamicsAndComputing/MetaLearning-Plasticity | main.py | main.py | py | 16,265 | python | en | code | 5 | github-code | 50 |
7182134406 | import io
import sys
import pickle
import numpy as np
import rdkit
from rdkit import Chem
from rdkit.Chem import rdDistGeom
np.set_printoptions(precision=4)
print(rdkit.__version__)
suppl = Chem.SDMolSupplier(sys.argv[1])
fragment_list = {}
for mol in suppl:
if mol is None:
continue
# Cut input molecule by rotatable bonds
RotatableBond = Chem.MolFromSmarts('[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]')
rwmol = Chem.RWMol(mol)
for begin, end in mol.GetSubstructMatches(RotatableBond):
rwmol.RemoveBond(begin, end)
beginAtom = rwmol.GetAtomWithIdx(begin)
endAtom = rwmol.GetAtomWithIdx(end)
if beginAtom.GetAtomicNum() != 6 and beginAtom.GetIsAromatic():
beginAtom.SetNumExplicitHs(1)
beginAtom.SetNoImplicit(True)
if endAtom.GetAtomicNum() != 6 and endAtom.GetIsAromatic():
endAtom.SetNumExplicitHs(1)
endAtom.SetNoImplicit(True)
try:
fragments = Chem.rdmolops.GetMolFrags(rwmol.GetMol(), asMols=True)
except:
continue
# Generate distance matrix from fragments
for fragment in fragments:
# Skip small fragments
if fragment.GetNumHeavyAtoms() < 5:
continue
smiles = Chem.MolToSmiles(fragment)
print(smiles)
atomOrder = fragment.GetPropsAsDict(includePrivate=True,includeComputed=True)['_smilesAtomOutputOrder']
fragment = Chem.RenumberAtoms(fragment,atomOrder)
conf = fragment.GetConformer()
N = fragment.GetNumAtoms()
distMat = np.zeros((N, N))
for atom1 in fragment.GetAtoms():
for atom2 in fragment.GetAtoms():
i = atom1.GetIdx()
j = atom2.GetIdx()
distMat[i, j] = (conf.GetAtomPosition(i)-conf.GetAtomPosition(j)).Length()
print(smiles)
fragment_list[smiles] = distMat
# Save distance matrix by pickle
with open('fragments.pickle', mode='wb') as f:
pickle.dump(fragment_list, f)
| n-yoshikawa/gsoc2019 | playground/fragment-rdkit.py | fragment-rdkit.py | py | 2,011 | python | en | code | 0 | github-code | 50 |
70895063514 | import argparse
from typing import Sequence
from pypi_search import __version__
def parse_args(argv: Sequence[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Search for PyPi packages')
parser.add_argument('search', help='Package to show information on')
parser.add_argument(
'-d', '--description', help='Show package description',
action='store_true'
)
parser.add_argument(
'--version', action='version',
version=f'%(prog)s {__version__}'
)
parser.add_argument(
'-o', '--open',
action='store_true',
help='Open homepage in browser'
)
args = parser.parse_args(argv)
return args
| asadmoosvi/pypi-search | pypi_search/arg_parser.py | arg_parser.py | py | 701 | python | en | code | 12 | github-code | 50 |
16139837174 | # 2021.01.27
# @yifan & zhanxuan
# PCA transformation
#
# 2D PCA modified from https://blog.csdn.net/w450468524/article/details/54895477
#
import numpy as np
class myPCA2D():
def __init__(self, n_components, H=None, W=None):
self.H = H
self.W = W
self.K1 = []
self.K2 = []
def fit(self, X):
# input: sample: (X, Y, Z)
# X is the index of different block
# Y and Z is the height and width of block
# row_top and col_top are the height and width of block
# output:
# X: the horizontal kernel
# Z: the vertical kernel
# Notes:
# forward transform: C = Z.T*A*X
# inverse transform: A = Z*C*X.T
S = X.shape
X = X.reshape(-1, self.W, self.H)
mean = np.zeros((self.W, self.H))
for i in range(X.shape[0]):
mean = mean + X[i]
mean /= float(X.shape[0])
cov_row = np.zeros((self.H, self.H))
for i in range(X.shape[0]):
diff = X[i] - mean
cov_row = cov_row + np.dot(diff.T, diff)
cov_row /= float(X.shape[0])
row_eval, row_evec = np.linalg.eig(cov_row)
sorted_index = np.argsort(row_eval)
self.K1 = np.array(row_evec[:,sorted_index[:-self.H-1 : -1]])
cov_col = np.zeros((self.W, self.W))
for i in range(X.shape[0]):
diff = X[i] - mean
cov_col += np.dot(diff,diff.T)
cov_col /= float(X.shape[0])
col_eval, col_evec = np.linalg.eig(cov_col)
sorted_index = np.argsort(col_eval)
self.K2 = np.array(col_evec[:,sorted_index[:-self.W-1 : -1]])
return self
def trans(self, X, inv=False):
res = []
S = X.shape
X = X.reshape(-1, self.W, self.H)
for i in range(X.shape[0]):
if inv == False:
res.append(np.dot(np.transpose(self.K2), np.dot(X[i], self.K1)))
else:
res.append(np.dot(self.K2, np.dot(X[i], np.transpose(self.K1))))
return np.array(res).reshape(S)
def transform(self, X):
return self.trans(X, inv=False)
def inverse_transform(self, X):
return self.trans(X, inv=True)
| yifan-fanyi/Func-Pool | myPCA2D.py | myPCA2D.py | py | 2,327 | python | en | code | 2 | github-code | 50 |
13435916339 | import collections
import os.path
from . import training
work_path = '_workspace'
"""str: Path to parent directory containing program output."""
extraction_path = os.path.join(work_path, 'features')
"""str: Path to the directory containing extracted feature vectors."""
scaler_path = os.path.join(extraction_path, 'scaler.p')
"""str: Path to the scaler file used for standardization."""
model_path = os.path.join(work_path, 'models', training.training_id)
"""str: Path to the output directory of saved models."""
log_path = os.path.join(work_path, 'logs', training.training_id)
"""str: Path to the directory of TensorBoard logs."""
history_path = os.path.join(log_path, 'history.csv')
"""str: Path to log file for training history."""
predictions_path = os.path.join(
work_path, 'predictions', training.training_id, '{}_{}_predictions.p')
"""str: Path to a model predictions file."""
results_path = os.path.join(
work_path, 'results', training.training_id, '{}_{}_results.csv')
"""str: Path to the file containing results."""
Dataset = collections.namedtuple('Dataset',
['name',
'path',
'metadata_path',
])
"""Data structure encapsulating information about a dataset."""
_root_dataset_path = ('/vol/vssp/AP_datasets/audio/audioset/'
'task4_dcase2017_audio/official_downloads')
"""str: Path to root directory containing input audio clips."""
training_set = Dataset(
name='training',
path=os.path.join(_root_dataset_path, 'training'),
metadata_path='metadata/groundtruth_weak_label_training_set.csv',
)
"""Dataset instance for the training dataset."""
validation_set = Dataset(
name='validation',
path=os.path.join(_root_dataset_path, 'testing'),
metadata_path='metadata/groundtruth_strong_label_testing_set.csv',
)
"""Dataset instance for the validation dataset.
Note:
The validation set is called the 'testing' set in DCASE 2017.
"""
test_set = Dataset(
name='test',
path=os.path.join(_root_dataset_path, 'evaluation'),
metadata_path='metadata/groundtruth_strong_label_evaluation_set.csv',
)
"""Dataset instance for the test dataset.
Note:
The test set is called the 'evaluation' set in DCASE 2017.
"""
def to_dataset(name):
"""Return the Dataset instance corresponding to the given name.
Args:
name (str): Name of dataset.
Returns:
The Dataset instance corresponding to the given name.
"""
if name == 'training':
return training_set
elif name == 'validation':
return validation_set
elif name == 'test':
return test_set
return None
| tqbl/gccaps | gccaps/config/paths.py | paths.py | py | 2,735 | python | en | code | 15 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.