seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
8695805257 | from lxml import etree
from sys import stdout, stderr
def mk_comment(text):
return '/*'+text.strip(' ')+'*/\n'
header_gen = u"""var wl = require('./build/Release/wayland_client');
var interfaces = {};
exports.interfaces = interfaces;
"""
interface_template = u"""function %(name)s(proxy) {
this.proxy = proxy;
proxy.spy(this);
};
%(name)s.prototype = {
%(prototype)s
};
%(name)s.interface = wl.get_interface_by_name(%(name)r);
interfaces[%(name)r] = %(name)s
"""
default_proxy_template = """ listen: function(listeners) {
var self = this;
this.proxy.listen(function(name){
if (listeners[name]) listeners[name].apply(self, Array.prototype.slice.call(arguments, 1));
});
},
destroy: function() {
this.proxy.destroy()
}"""
request_template = u""" %(name)s: function(%(args)s) {
this.proxy.marshal(%(argv)s);
}"""
factory_template = u""" %(name)s: function(%(args)s) {
new_id = this.proxy.create(%(spy)s.interface);
this.proxy.marshal(%(argv)s);
return new %(spy)s(new_id);
}"""
factory_dyn_template = u""" %(name)s: function(%(args)s) {
new_id = this.proxy.create(spy.interface);
this.proxy.marshal(%(argv)s);
return new spy(new_id);
}"""
def generate_request(index, request):
data = dict(name = request.attrib['name'], magic=index)
args = []
argv = [str(index)]
template = request_template
for node in request:
if node.tag == 'arg' and node.attrib['type'] in ('int', 'uint', 'fd', 'string', 'fixed'):
name = node.attrib['name']
args.append(name)
argv.append(name)
elif node.tag == 'arg' and node.attrib['type'] == 'object':
name = node.attrib['name']
args.append(name)
argv.append('(%(var)s === null || %(var)s === undefined)?%(var)s:%(var)s.proxy' % dict(var=name))
elif node.tag == 'arg' and node.attrib['type'] == 'new_id':
if 'interface' in node.attrib:
template = factory_template
data['spy'] = node.attrib['interface']
argv.append('new_id')
else:
template = factory_dyn_template
args.append('spy, version')
argv.append('spy.interface.get_name(), version, new_id')
elif node.tag == 'description':
continue
else:
stderr.write("%s %r %r" % (node.tag, node.attrib, node[:]))
stderr.write("\n")
raise Exception("unknown argument node %r" % node)
data['args'] = ', '.join(args)
data['argv'] = ', '.join(argv)
return template % data
def generate_enum_const(enum_name, const):
data = dict(
name=('%s_%s' % (enum_name, const.attrib['name'])).upper(),
value=const.attrib['value'],
)
return '%(name)s: %(value)s' % data
def generate_interface(interface):
count = 0
methods = []
enums = []
name = interface.attrib['name']
if name != 'wl_display':
methods.append(default_proxy_template)
for node in interface:
if node.tag == 'description':
continue
elif node.tag == 'request':
methods.append(generate_request(count, node))
count += 1
elif node.tag == 'event':
continue
elif node.tag == 'enum':
enum_name = node.attrib['name']
for node in node:
if node.tag == 'entry':
enums.append(generate_enum_const(enum_name, node))
elif node.tag == 'description':
continue
else:
stderr.write("%s %r %r" % (node.tag, node.attrib, node[:]))
stderr.write("\n")
raise Exception("unknown entry node %r" % node)
elif node.tag == etree.Comment:
continue
else:
raise Exception("unknown interface node %r" % node)
return dict(prototype=',\n'.join(methods + enums))
root = etree.parse("wayland.xml").getroot()
stdout.write(header_gen)
for node in root:
if node.tag == 'copyright':
stdout.write(mk_comment(node.text).encode('utf-8'))
elif node.tag == 'interface':
data = generate_interface(node)
data.update(node.attrib)
stdout.write((interface_template % data).encode('utf-8'))
else:
raise Exception("unknown root node")
| cheery/node-wayland | tools/nodeland-scanner.py | nodeland-scanner.py | py | 4,464 | python | en | code | 61 | github-code | 36 |
256680613 | nbCharettes = int(input())
totalPoids = 0
charettes = [0] * nbCharettes
for i in range(nbCharettes):
poids = float(input())
charettes[i] = poids
totalPoids += poids
moyenne = totalPoids / nbCharettes
for charette in charettes:
print(moyenne - charette) | SlicedPotatoes/France_IOI | Niveau 2/2 – Découverte des tableaux/05 - Répartition du poids.py | 05 - Répartition du poids.py | py | 268 | python | en | code | 0 | github-code | 36 |
74082781542 | # import numpy as np
# from sympy import Symbol, Poly, simplify, re, im
# from dispersion_relation import longitudinal_disp, transverse_disp
# # Number of data points
# # N = 10
# k = Symbol('k')
# S0 = Symbol('S0')
# w = Symbol('w')
# S0 = 0.4
# w_array = np.linspace(100,1E7,5)
# for w in w_array:
# disp_rel = Poly(simplify(longitudinal_disp(S0,w,1)),k)
# print('w = ' + str(w))
# coeffs = disp_rel.all_coeffs()
# j = complex(0,1)
# real = np.array([float(re(c)) for c in coeffs])
# imag = np.array([float(im(c)) for c in coeffs])
# co = real + imag*j
# print(co)
# roots = np.roots(co)
# print(roots)
# print(np.poly(roots))
# Plotting speed and attenuation against initial saturation for longitudinal case
from config import epsilon
from dispersion_relation import longitudinal_disp
from sympy import nroots
from sympy import re, im, Symbol, simplify, Poly
import numpy as np
import matplotlib.pyplot as plt
from math import pi
plt.rc('text', usetex=True)
# Number of data points
N = 50
# Choose non-wetting material
# 0 - Air
# 1 - Oil
# 2 - Gas
material_mode = 2
w_lin = np.linspace(1, 1E7, N)
w_log = np.logspace(0, 7, N)
w = np.concatenate((w_lin, w_log), axis=0)
f = w/(2*pi)
S0_array = [0.2, 0.4, 0.6, 0.8, 1-epsilon]
S0_label = [str(i) for i in S0_array]
k = Symbol('k')
j = complex(0,1)
for i in range(len(S0_array)):
S0 = S0_array[i]
print('Progress: ' + str(int(100*(i+1)/5)) + '%')
disp_rel = longitudinal_disp(S0, w, material_mode)
disp_rel = [Poly(simplify(d), k) for d in disp_rel]
coeff = [d.all_coeffs() for d in disp_rel]
root_array = np.array([])
for c in coeff:
real = np.array([float(re(a)) for a in c])
imag = np.array([float(im(a)) for a in c])
co = real + imag*j
roots = np.roots(co)
roots = roots[::2]
root_array = np.append(root_array, roots)
print('real')
reals = np.abs(np.real(root_array))
print(reals)
print('imag')
imags = np.abs(np.imag(root_array))
print(imags)
for l in range(3):
# speed_array = w[100:200]/[abs(re(k_array[i][j])) for i in range(len(k_array)) if i > N-1]
# attenuation_array = [abs(im(k_array[i][j])) for i in range(len(k_array)) if i < N]
print(l)
print('real_short')
realx = reals[l::3]
print(realx)
print('imag_short')
imagx = imags[l::3]
print(imagx)
speed_array = w[N:]/realx[N:]
attenuation_array = imagx[:N]
plt.figure(l)
plt.semilogx(f[N:], speed_array, label=S0_label[i])
plt.figure(l+3)
if l == 2:
plt.plot(f[:N], attenuation_array, label=S0_label[i])
else:
plt.semilogy(f[:N], attenuation_array, label=S0_label[i])
for j in range(3):
plt.figure(j)
plt.legend()
plt.xlabel('frequency / Hz')
plt.ylabel(r'velocity / $ms^{-1}$')
plt.title('P' + str(3-j) + '-wave Velocity Against Frequency')
plt.savefig('../plots/speed_freq_p' + str(3-j) + '.eps')
plt.clf()
plt.figure(j+3)
plt.legend()
plt.xlabel('frequency / Hz')
plt.ylabel(r'attenuation / $m^{-1}$')
plt.title('P' + str(3-j) + '-wave Attenuation Against Frequency')
plt.savefig('../plots/attenuation_freq_p' + str(3-j) + '.eps')
plt.clf() | tigeryst/alcbl | Numerical Simulation/source/archive/test_solve.py | test_solve.py | py | 3,330 | python | en | code | 0 | github-code | 36 |
74302981863 | from googleapiclient.discovery import build
import my_settings
def commentList(video_id): # 테스트
api_key = my_settings.YOUTUBE_API_KEY
comment_list = list()
api_obj = build('youtube', 'v3', developerKey=api_key)
response = api_obj.commentThreads().list(part='snippet,replies', videoId=video_id, maxResults=100).execute()
while response:
for item in response['items']:
comment = item['snippet']['topLevelComment']['snippet']
comment_list.append({"comment": comment['textOriginal'], "comment_info": {"profile_img": comment["authorProfileImageUrl"], "profile_name": comment['authorDisplayName'], "datetime": comment['publishedAt']}, "sentence": [], "sentence_positive": [], })
# if item['snippet']['totalReplyCount'] > 0: # 대댓글 분석
# for reply_item in item['replies']['comments']:
# reply = reply_item['snippet']
# comment_list.append([reply['textDisplay'], reply['authorDisplayName'], reply['publishedAt'], reply['likeCount']])
if 'nextPageToken' in response:
response = api_obj.commentThreads().list(part='snippet,replies', videoId=video_id, pageToken=response['nextPageToken'], maxResults=100).execute()
else:
break
# df = pandas.DataFrame(comments) # 엑셀시트 저장
# df.to_excel('results.xlsx', header=['comment', 'author', 'date', 'num_likes'], index=None)
return comment_list | dev-grace/youtube_comment_analysis | analysis/comment_list.py | comment_list.py | py | 1,494 | python | en | code | 0 | github-code | 36 |
38221724083 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> Optional[TreeNode]:
n = len(nums)
mid = n//2
midval = nums[mid]
left,right = self.split(nums,midval)
root = TreeNode(midval)
if left:
root.left = self.sortedArrayToBST(left)
if right:
root.right = self.sortedArrayToBST(right)
return root
# def helper(self,nums):
def split(self,nums,val):
left = []
right = []
for i in range(len(nums)):
if nums[i] < val:
left.append(nums[i])
if nums[i] > val:
right.append(nums[i])
return left,right
| vaibhavTekk/leetcoding | problems/convert_sorted_array_to_binary_search_tree/solution.py | solution.py | py | 878 | python | en | code | 1 | github-code | 36 |
26661454366 | import datetime
import http
import json
import sys
import time
from http.client import HTTPSConnection
import discord
from discord.ext import commands, tasks
import settings
def current_time_string():
return datetime.datetime.utcfromtimestamp(time.time()).strftime('%H:%M:%S')
class TwitchCog(commands.Cog, name="Twitch"):
def __init__(self, bot):
self.bot = bot
self.was_previously_online = False
self.nextUpdateAllowedAt = 0
if not self.poll_thread.is_running():
self.poll_thread.start()
def cog_unload(self):
self.poll_thread.cancel()
def get_twitch_user_by_name(self, usernames):
try:
if isinstance(usernames, list):
usernames = ['login={0}'.format(i) for i in usernames]
req = '/helix/users?' + '&'.join(usernames)
else:
req = '/helix/users?login=' + usernames
print(req)
connection = http.client.HTTPSConnection('api.twitch.tv', timeout=10)
connection.request('GET', req, None, headers={
'Authorization': "Bearer " + settings.read_option(settings.KEY_TWITCH_ACCESS_TOKEN, ""),
'client-id': settings.TWITCH_CLIENT_ID})
response = connection.getresponse()
print("[{}] Twitch: {}: {} {}".format(current_time_string(), req, response.status, response.reason))
if response.status == 401:
self.get_access_token()
return self.get_twitch_user_by_name(usernames)
re = response.read().decode()
j = json.loads(re)
return j
except Exception as e:
print(e, file=sys.stderr)
return e
def get_access_token(self):
try:
print("Twitch: Attempting to get access token")
connect_string = "/oauth2/token?client_id={client_id}" \
"&client_secret={client_secret}" \
"&grant_type=client_credentials".format(client_id=settings.TWITCH_CLIENT_ID,
client_secret=settings.TWITCH_CLIENT_SECRET)
auth_connection = http.client.HTTPSConnection('id.twitch.tv', timeout=10)
auth_connection.request('POST', connect_string, None)
response = auth_connection.getresponse()
print("Twitch: {}: {} {}".format(connect_string, response.status, response.reason))
re = response.read().decode()
j = json.loads(re)
print(j)
settings.write_option(settings.KEY_TWITCH_ACCESS_TOKEN, j["access_token"])
return j
except Exception as e:
print(e, file=sys.stderr)
return e
def get_streams(self, usernames):
try:
if isinstance(usernames, list):
usernames = ['user_login={0}'.format(i) for i in usernames]
req = '/helix/streams?' + '&'.join(usernames)
else:
req = '/helix/streams?user_login=' + usernames
connection = http.client.HTTPSConnection('api.twitch.tv', timeout=10)
connection.request('GET', req, None, headers={
'Authorization': "Bearer " + settings.read_option(settings.KEY_TWITCH_ACCESS_TOKEN, ""),
'client-id': settings.TWITCH_CLIENT_ID
})
response = connection.getresponse()
print("Twitch: {}: {} {}".format(req, response.status, response.reason))
if response.status == 401:
self.get_access_token()
return self.get_streams(usernames)
re = response.read().decode()
j = json.loads(re)
return j
except Exception as e:
print(e, file=sys.stderr)
return e
@tasks.loop(seconds=settings.TWITCH_POLL_RATE)
async def poll_thread(self):
if settings.read_option(settings.KEY_TWITCH_INTEGRATION, "False") == "True" \
and time.time() > self.nextUpdateAllowedAt:
try:
result_json = self.get_streams(settings.read_option(settings.KEY_TWITCH_CHANNEL, ""))
is_online = False
for stream in result_json["data"]:
if stream["user_name"] == settings.read_option(settings.KEY_TWITCH_CHANNEL, ""):
is_online = True
if not self.was_previously_online:
await self.send_message_to_channel(
settings.TWITCH_ANNOUNCEMENT_MESSAGE.format(
streamer=stream['user_name'],
stream_link="https://twitch.tv/" + stream['user_name'],
stream_description=stream['title']),
int(settings.read_option(settings.KEY_ANNOUNCEMENT_CHANNEL_TWITCH, 0)))
self.nextUpdateAllowedAt = time.time() + settings.TWITCH_POLL_COOLDOWN_MINUTES * 60
break
print("[{}] Twitch: isOnline: {}, wasPreviouslyOnline: {}".format(current_time_string(), is_online,
self.was_previously_online))
self.was_previously_online = is_online
except Exception as e:
print(e)
else:
print("[{}] Waiting to be allowed to check again".format(current_time_string()))
async def send_message_to_channel(self, string, channel_id: int):
print("Sending announcement to channel {}".format(channel_id))
channel = self.bot.get_channel(channel_id)
await channel.send(string)
@commands.command()
@commands.has_any_role("Mods", "Admin")
async def disabletwitch(self, ctx):
settings.write_option(settings.KEY_TWITCH_INTEGRATION, "False")
await ctx.send("Twitch integration disabled")
@commands.command()
@commands.has_any_role("Mods", "Admin")
async def enabletwitch(self, ctx, twitch_username):
"""Send twitch updates to this channel"""
print(str(ctx.message.channel.id))
if isinstance(ctx.message.channel, discord.TextChannel):
user_json = self.get_twitch_user_by_name(twitch_username)
if isinstance(user_json, Exception):
await ctx.send("*Error: {}*".format(str(user_json)))
return
print(user_json)
try:
print("Found userid: {}".format(user_json["data"][0]["id"]))
settings.write_option(settings.KEY_TWITCH_CHANNEL, user_json["data"][0]["display_name"])
settings.write_option(settings.KEY_ANNOUNCEMENT_CHANNEL_TWITCH, str(ctx.message.channel.id))
settings.write_option(settings.KEY_TWITCH_INTEGRATION, "True")
await ctx.send(
"Successfully set the announcement channel to: {}, I will post here when {} comes online.".format(
ctx.message.channel.name, twitch_username))
except IndexError:
await ctx.send("Could not find user {}".format(twitch_username))
except Exception as e:
await ctx.send(str(e))
else:
await ctx.send("Needs to be done in a regular channel")
return
@enabletwitch.error
async def enabletwitch_error(self, ctx, error):
if isinstance(error, commands.UserInputError):
await ctx.send('Usage: `{}enabletwitch <twitch_channel_name>` '
'\nIt must be used in a regular channel so it knows where to post announcements.'
.format(settings.CALL_CHARACTER))
| gr3ger/pyGaBot | twitch_cog.py | twitch_cog.py | py | 7,835 | python | en | code | 0 | github-code | 36 |
34338900072 | # https://leetcode.com/problems/length-of-last-word/
class Solution:
def lengthOfLastWord(self, s: str) -> int:
lens = 0
totalLen = len(s)
curIdx = totalLen - 1
while curIdx >= 0 and s[curIdx] == " ":
curIdx -= 1
if curIdx < 0:
return 0
lens = 0
while curIdx >= 0 and s[curIdx] != " ":
lens += 1
curIdx -= 1
return lens
| 0x0400/LeetCode | p58.py | p58.py | py | 440 | python | en | code | 0 | github-code | 36 |
20015190759 | import math
import datetime
# Literal är en konstant värde i koden
s = 'This is String'
s2 = "This is also another String"
# man kan placera text på flera olika rader redan här
s3 = '''This is
a triple
String'''
sEmpty = ' '
# problem = 'this won't work'
solution = "This os how it´s done"
orThatWay = ' "like that" AS CITAT'
##( \ ) backslash måste stå själv och inget annat skrivs efter den, och börja med nytt rad efter
string = 'testa, ' "att" \
' Köra något nytt '
print(s3)
print(f'formatted string, want to print the value of pi : {math.pi:.4f}')
print(r'c:\user\right what do you want without having backslash to annoy you')
ss = 'this' ' and '
ss2 = 'that'
print(ss + ss2) # output blir: this and that
# Repeat the String
repString = 'Hej ' + 'Y' + 'a' * 2 + 'r' + 'a' * 3 + '!'
print(repString) # Output blir: Hej Yaaraaa!
stringen = "Iam Yara"
stringen.isdecimal() # Returns TRUE/False if all characters are decimal numbers
stringen.isalpha() # Returns TRUE/False if all characters are alfapet
stringen.isupper() # Returns TRUE/False if all characters ar uppercase (Stora bokstäver)
stringen.lower() # Returns copy of all characters in lowercase (ändrar karaktärer till små bokstäver)
stringen.upper() # Returns copy of all characters in Uppercase (ändrar karaktärer till stora bokstäver)
stringen.replace('old Str', 'new Str') # Returns copy of all characters replacing old "str1" to new "str2"
print(stringen.upper()) # Output: IAM YARA
print(stringen.replace('Y', 'S')) # OutPut: Iam Sara
#stringen[2] = 'W' -- Error
#print(stringen) -- Error
# Vi kan inte tilldela Stringen ett nytt värde.
print(stringen.isdecimal()) # OutPut: False
datetimeString = datetime.datetime.now()
print(datetimeString) # Output kommer att vara: 2023-06-20 13:48:54.283764
""" Attributes:
Datum: år, månad, dag, Tid: timme, minut, sekund, mikroSek? """
print(f'Formatted string datum & tid är {datetimeString.month}/{datetimeString.day}/{datetimeString.year}')
#Outputen blir så istället: Formatted string datum & tid är 6/20/2023
| yararajjoub/pythonModulo | Modul3/Strings.py | Strings.py | py | 2,088 | python | sv | code | 0 | github-code | 36 |
38659585982 | from django.forms.models import model_to_dict
from django.http import JsonResponse
import re
class JsonBaseMixin:
json_response_class = JsonResponse
response_type = 'text/http'
accepted_types = ['text/http', 'application/json']
def dispatch(self, response, *args, **kwargs):
accept = response.META.get('HTTP_ACCEPT', 'text/html').split(',')
for t in accept:
if t in self.accepted_types:
self.response_type = t
break
return super(JsonBaseMixin, self).dispatch(response, *args, **kwargs)
def render_to_response(self, context, **response_kwargs):
if not self.response_class or self.response_type != 'application/json':
return super(JsonBaseMixin, self).render_to_response(context, **response_kwargs)
return self.json_response_class({
'object': self.get_object_dict()
}, **response_kwargs)
def get_object_dict(self):
pass
class JsonListMixin(JsonBaseMixin):
fields = None
exclude = None
def get_object_dict(self):
return [ self.serialize(obj) for obj in self.get_queryset() ]
def serialize(self, obj):
return model_to_dict(obj, fields=self.fields, exclude=self.exclude)
class JsonDetailMixin(JsonBaseMixin):
fields = None
exclude = None
def get_object_dict(self):
return self.serialize(self.get_object())
def serialize(self, obj):
return model_to_dict(obj, fields=self.fields, exclude=self.exclude)
class OrderableMixin(object):
orderable_fields = '__all__'
def get_ordering(self):
order = self.request.GET.get('order', self.ordering)
if self.orderable_fields == '__all__':
return order
m = re.match('-?([0-9a-zA-Z_]+)', order)
if m and m.group(1) in self.orderable_fields:
return order
return self.ordering
class FilterMixin(object):
allowed_filters = {}
def get_queryset(self):
qs = super(FilterMixin, self).get_queryset()
filters = {}
for field in self.allowed_filters:
if field in self.request.GET:
filters[self.allowed_filters[field]] = self.request.GET.get(field)
qs = qs.filter(**filters)
return qs
| niklasmh/it1901-band-booking-project | json_views/views.py | views.py | py | 2,002 | python | en | code | 1 | github-code | 36 |
438511318 | # -*- coding: utf_8 -*-
#pytho3.5
import csv
import urllib.request
import lxml
from bs4 import BeautifulSoup
import re
import codecs
# -------------------- main --------------------
if __name__ == '__main__':
#2014-2016年に東京と中山で行われた全大会・全日数・全ラウンドの勝敗データを取得
#Webページ(HTML)の取得
# for year in [2014]:
# for place in [1,2,3,4,5,6,7,8,9,10]:
# for name in [1,2,3,4,5]:
# for number in [1, 2, 3, 4, 5, 6, 7, 8]:
# for Round in [1,2,3,4,5,6,7,8,9,10, 11, 12]:
#0詰め処理
# fyear = "{0:04d}".format(year)
# fplace = "{0:02d}".format(place)
# fname = "{0:02d}".format(name)
# fnumber = "{0:02d}".format(number)
# fRound = "{0:02d}".format(Round)
# file = str(fyear)+str(fplace)+str(fname)+str(fnumber)+str(fRound)
# print(file)
link = 'http://db.netkeiba.com/horse/ped/2015104976/'
URL = urllib.request.urlopen(link).read()
soup = BeautifulSoup(URL, 'lxml')
horse_name = soup.findAll("title")[0]
horse_name = horse_name.strip()
horse_name.decode('utf-8')
print(horse_name)
#Nname = len(horse_name)-17
#print(Nname)
#new_horse_name = horse_name[6:Nname]
#print(new_horse_name)
tables = soup.findAll("table",{"class":"blood_table detail"})
# if len(tables)==0:
# continue
# else:
#tableの中身を取ってくる.from<table>to</table>
table = soup.findAll("table",{"class":"blood_table detail"})[0]
#tr属性をすべて取ってくる
rows = table.findAll("tr")
#print (rows)
# fileOpen準備
csvFile = codecs.open("BloodKeibaData.csv", "a", "utf-8")
#csvFileに書き込み準備
writer = csv.writer(csvFile)
#direction初期化
direction=""
sex=""
age=""
i = 0
try:
csvRow = []
for row in rows:
#print (row)
#csvRow rist初期化
#td or thごとに切ってcsvRowに追加
count = 1
for cell in row.findAll(['a']):
cell = cell.get_text().strip()
if cell == '血統':
continue
if cell == '産駒':
continue
#print (cell)
i=i+1
#print (i)
csvRow.append(cell)
count=count+1
#記述
#writer.writerow(csvRow)
finally:
csvFile.close()
| Ryota819/Data | 競馬/20180501/keiba_blood_to_csv.py | keiba_blood_to_csv.py | py | 2,462 | python | en | code | 0 | github-code | 36 |
27379695129 | import os
import numpy as np
import tensorflow as tf
from flask import Flask, request, jsonify
from tensorflow.keras.preprocessing import image
import tensorflow_hub as hub
from PIL import Image
import io
import cv2
import uuid
import datetime
import random
import firebase_admin
from firebase_admin import credentials, firestore, storage
cred = credentials.Certificate("wearitsuw.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
my_model = tf.keras.models.load_model('clothing-recognition.h5', custom_objects={'KerasLayer': hub.KerasLayer})
app = Flask(__name__)
def upload_image_to_storage(image, filename):
bucket = storage.bucket('wearitsuw.appspot.com')
blob = bucket.blob(filename)
blob.content_type = 'image/jpeg'
image.seek(0)
blob.upload_from_file(image, content_type='image/jpeg')
blob.make_public()
return blob.public_url
def get_random_image(num_images):
collection_ref = db.collection('clothing')
docs = collection_ref.get()
random_docs = random.sample(docs, num_images)
image_urls = [doc.get('imageUrl') for doc in random_docs]
return image_urls
@app.route('/predict', methods=['POST'])
def predict():
try:
if 'file' not in request.files:
return jsonify({'error': 'No file uploaded'})
label = ['Coat', 'Sweter', 'Skirt', 'Polo', 'T-Shirt', 'Shorts', 'Hoodie', 'Jacket', 'Shirt (Kemeja)', 'Dress', 'Denim_Jacket', 'Pants', 'Jeans', 'Gym_Jacket', 'Blazzer']
label_mapping = {
'Blazer': 11,
'Coat': 12,
'Denim_Jacket': 13,
'Dress': 14,
'Gym_Jacket': 15,
'Hoodie': 16,
'Jacket': 17,
'Jeans': 18,
'Shorts': 19,
'Pants': 20,
'Shirt': 21,
'Skirt': 22,
'Sweater': 23,
'T-Shirt': 24
}
color_mapping = {
'Black': 0,
'White': 255,
'Red': 178,
'Orange': 69,
'Yellow': 510,
'Green': 250,
'Blue': 205,
'Violet': 127
}
filenya = request.files['file']
img = Image.open(io.BytesIO(filenya.read()))
img = img.resize((224, 224))
x = image.img_to_array(img)
x /= 255
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
pred_arr = my_model.predict(images, batch_size=5)
predict = np.argmax(pred_arr, axis=1)
prediction = label[predict[0]]
img_cv2 = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
height, width, _ = img_cv2.shape
x = width // 2
y = height // 2
color_code = img_cv2[y, x]
red, green, blue = color_code[::-1]
if red < 50 and green < 50 and blue < 50:
color = "Black"
elif red > 200 and green > 200 and blue > 200:
color = "White"
else:
hsv = cv2.cvtColor(img_cv2, cv2.COLOR_BGR2HSV)
hue_value = hsv[y, x, 0]
color = "Undefined"
if hue_value < 5 or hue_value > 170:
color = "Red"
elif 5 <= hue_value < 22:
color = "Orange"
elif 22 <= hue_value < 33:
color = "Yellow"
elif 33 <= hue_value < 78:
color = "Green"
elif 78 <= hue_value < 131:
color = "Blue"
elif 131 <= hue_value < 170:
color = "Violet"
unique_id = str(uuid.uuid4())
unique_filename = str(uuid.uuid4()) + '.jpg'
image_url = upload_image_to_storage(filenya, unique_filename)
response = {
'string_label': prediction,
'imageId': unique_id,
'integer_label': label_mapping[prediction],
'string_color': color,
'integer_color': color_mapping[color],
'imageUrl': image_url
}
doc_ref = db.collection('clothing').document(unique_id)
doc_ref.set(response)
return jsonify(response)
except:
return "Invalid Image"
@app.route('/recommendation', methods=['GET'])
def recommendation():
num_images = 3
image_urls = get_random_image(num_images)
return jsonify({'image_urls': image_urls})
if __name__ == '__main__':
app.run(debug=True)
| nzwhd/wearit_cloud | app.py | app.py | py | 4,354 | python | en | code | 0 | github-code | 36 |
7575866797 | # liczby = [2, 6, 12, 14, 16, 77, 52, 1, 4, 9]
# min = 0
# for i in range(len(liczby)):
# if liczby[i] < liczby [min]:
# print("Znalazłem minimum")
# print("Nowa wartość minimum to: ", liczby[i])
# print("Nowy indeks minimum to: ", i)
# min = i
#
# liczby[1], liczby[min] = liczby[min], liczby[1]
# liczby = [2, 6, 12, 14, 16, 77, 52, 1, 4, 9]
# min = 0
#
#
# for i in range(1,len(liczby)):
# liczby = min[i]
# j = i - 1
# while j >=0 and min[j]>liczby:
# min[j + 1] = min[j]
# j = j - 1
# min[j + 1] = liczby
#
# print(liczby)
liczby = [2, 6, 12, 14, 16, 77, 52, 1, 4, 9]
min = 0
for i in range(len(liczby)):
min = i
for j in range(i, len(liczby)):
if liczby[j] < liczby[min]:
min = j
liczby[i], liczby[min] = liczby[min], liczby[i]
print(liczby)
| TworzeDoliny/bootcamp-08122018 | kolekcje/zadanie_12.py | zadanie_12.py | py | 881 | python | pl | code | 0 | github-code | 36 |
75273745385 | from weather import Weather_object
class Weather_CMD:
def __init__(self, city_id=None, output_mode='console'):
self.ANS_YES = ('1', 'true', 'y', 'yes', 'yo', 'yop')
self.object = Weather_object(city_id)
if city_id is not None:
self.object.city_id = city_id
else:
self.object.city_id = self.get_city_id()
self.data = self.get_readable_data()
self.output_mode = output_mode
return self.data
def ask_for_city(self):
user_city = input('Enter a name of city: ')
possible_cities = self.object.get_city_id(user_city)
new_city_id = False
for city in possible_cities:
answer = input('Is {0}[{1}, {2}] your city? '.format(city[0], city[1], city[2]))
if answer.rstrip().lstrip().lower() in self.ANS_YES:
new_city_id = city[3]
break
if new_city_id == False:
print('Your location is incorrect, check Syntax!')
return new_city_id
def get_city_id(self):
maybe_id = self.ask_for_city()
while maybe_id == False:
again = input('Do you want to enter a location again? ')
if again in self.ANS_YES:
maybe_id = self.ask_for_city()
else:
print('Found not city, exiting program!')
return False
return maybe_id
def get_readable_data(self):
self.object.raw_data_func()
data = self.object.get_readable_data()
if data == False:
print('A weather data for your city does not exist! Check city ID!')
return False
return data
console_data = Weather_CMD()
print(console_data)
| branislavblazek/notes | Python/projekty/weather/weather_cmd.py | weather_cmd.py | py | 1,784 | python | en | code | 0 | github-code | 36 |
18316291673 | import abc
import devtools
from typing import Any, AsyncIterator, Optional
import kubernetes_asyncio.watch
from servo.logging import logger
class BaseKubernetesHelper(abc.ABC):
@classmethod
@abc.abstractmethod
async def watch_args(cls, api_object: object) -> AsyncIterator[dict[str, Any]]:
...
@classmethod
@abc.abstractmethod
def is_ready(cls, api_object: object, event_type: Optional[str] = None) -> bool:
...
@classmethod
async def wait_until_deleted(cls, api_object: object) -> None:
async with cls.watch_args(api_object) as watch_args:
async with kubernetes_asyncio.watch.Watch().stream(**watch_args) as stream:
async for event in stream:
cls.log_watch_event(event)
if event["type"] == "DELETED":
stream.stop()
return
@classmethod
async def wait_until_ready(cls, api_object: object) -> None:
async with cls.watch_args(api_object) as watch_args:
async with kubernetes_asyncio.watch.Watch().stream(**watch_args) as stream:
async for event in stream:
cls.log_watch_event(event)
if cls.is_ready(event["object"], event["type"]):
stream.stop()
return
@classmethod
def log_watch_event(cls, event: dict[str, Any]) -> None:
event_type: str = event["type"]
obj: dict = event["object"].to_dict()
kind: str = obj.get("kind", "UNKNOWN")
metadata = obj.get("metadata", {})
name: str = metadata.get("name", "UNKNOWN")
namespace: str = metadata.get("namespace", "UNKNOWN")
logger.debug(
f"watch yielded event: {event_type} on kind {kind} {name}"
f" in namespace {namespace}"
)
logger.trace(devtools.pformat(obj))
| opsani/servox | servo/connectors/kubernetes_helpers/base.py | base.py | py | 1,920 | python | en | code | 6 | github-code | 36 |
36445667629 | """add latest updates
Revision ID: 9d2bc5f28130
Revises: 98b564f17b54
Create Date: 2021-02-20 18:07:38.182527
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9d2bc5f28130'
down_revision = '98b564f17b54'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('latest_updates',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updates', sa.JSON(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('latest_updates')
# ### end Alembic commands ###
| mhelmetag/mammoth | alembic/versions/9d2bc5f28130_add_latest_updates.py | 9d2bc5f28130_add_latest_updates.py | py | 898 | python | en | code | 1 | github-code | 36 |
11044632470 | import numpy as np
from scipy.special import expit as sig
import matplotlib.pyplot as plt
def sigmoid(arr):
return sig(arr)
def sigmoid_prime(arr):
arr = sigmoid(arr)
return arr*(1.0-arr)
def train(epoches,X,Y,weightHidden,weightOutput,lR):
for epoch in range(epoches):
hiddenIn = np.dot(X,weightHidden)
hiddenOut = sigmoid(hiddenIn)
outIn = np.dot(hiddenOut,weightOutput)
outOut = outIn
error = Y - outOut
dErrorByDW2 = np.dot(hiddenOut.T,error*lR)
weightOutput = weightOutput+dErrorByDW2
dErrorByDW1 = np.dot(X.T,np.dot(error*lR,weightOutput.T)*sigmoid_prime(hiddenIn))
weightHidden = weightHidden+dErrorByDW1
return weightHidden,weightOutput
def test(testData,weightsHidden,weightsOutput):
act_hidden = sigmoid(np.dot(testData, weightHidden))
return (np.dot(act_hidden, weightOutput))
"""
X = np.array([[0,0],[0,1],[1,0],
[0.1,0.2],[0.1,0.4],[0.4,0.9],
[0.9,0],[0.99,0.99],[0.97,0.89],
[0.3,0.3],[0.89,0.78],[0.12,0.56]])
Y = np.array([[0],[1],[1],
[0],[0],[1],
[1],[1],[1],
[0],[1],[1]])
inputLayerSize, hiddenNeuronsSize, outputSize = 2, 3, 1
"""
X = []
Y = []
for i in range(50,80):
X.append([i*1.0])
Y.append([(i*1.8)+32.0])
X = np.array(X)
Y = np.array(Y)
inputLayerSize, hiddenNeuronsSize, outputSize = 1, 4, 1
epoches = 100000
lR = 0.001
weightHidden = np.random.uniform(size=(inputLayerSize, hiddenNeuronsSize))
weightOutput = np.random.uniform(size=(hiddenNeuronsSize, outputSize))
#weightHidden , weightOutput = train(epoches,X,Y,weightHidden,weightOutput,lR)
for epoch in range(epoches):
hiddenIn = np.dot(X,weightHidden)
hiddenOut = sigmoid(hiddenIn)
outIn = np.dot(hiddenOut,weightOutput)
outOut = outIn
error = Y - outOut
dErrorByDW2 = np.dot(hiddenOut.T,error*lR)
weightOutput = weightOutput+dErrorByDW2
dErrorByDW1 = np.dot(X.T,np.dot(error*lR,weightOutput.T)*sigmoid_prime(hiddenIn))
weightHidden = weightHidden+dErrorByDW1
print (error)
output = test(np.array([69]),weightHidden,weightOutput)
print (output)
print ('expected ',(69*1.8)+32.0)
| mars-boy/deeplearning_handson | neuralnet.py | neuralnet.py | py | 2,134 | python | en | code | 0 | github-code | 36 |
7375849160 | #!/usr/bin/python3
import shamir
import sqlite3
import rsa_encrypt
import time
import base64
import settings
#Class to hold the database
class db:
name = ""
key = ""
def __init__(self):
self.name = ""
self.key = ""
#add user secret to the secrets database
def add_secret(username, name, secret, currtime):
#initiate database connection
conn = sqlite3.connect(settings.DBdir + "secrets.db")
c = conn.cursor()
#make sure table exists
c.execute("CREATE TABLE IF NOT EXISTS secrets(id PRIMARY KEY, name, secret, timestamp DOUBLE)")
#INSERT OR REPLACE into secrets the secret and user info
c.execute("REPLACE INTO secrets VALUES (?,?,?,?)", [username, name, str(secret), currtime])
#commit and close connection
conn.commit()
conn.close()
return
#Encrypt shares with db_keys and store them into their respective databases
def add_shares(username, shares, keys, currtime):
#Grab database keys
db_keys = rsa_encrypt.get_keys(settings.DBS)
#shares must be equal to dbs to prevent loss or oversharing
if((not len(shares) == len(settings.DBS))):
return -1
#For each database
for i in range(len(settings.DBS)):
#initiate database connection
conn = sqlite3.connect(settings.DBdir + settings.DBS[i] + ".db")
c = conn.cursor()
#make sure the shares table exists
create = "CREATE TABLE IF NOT EXISTS enc_shares(id PRIMARY KEY, share, timestamp DOUBLE)"
c.execute(create)
#Convert share data to a string
payload = username + ":" + str(shares[i][0]) + ":" + str(shares[i][1]) + ":" + str(keys[i])
#Grab the database key for the current database
k = db_keys[settings.DBS[i]].key
#encrypt the share string with the database public key
payload = rsa_encrypt.encrypt_str(k, payload)
#insert or replace the encrypted share, the username, and a timestamp into the database
c.execute("REPLACE INTO enc_shares VALUES(?, ?, ?)", [username, payload, currtime])
#commit the action and close the database
conn.commit()
conn.close()
return
#Generate the secrets for the sharing scheme to use
def gen_secrets(username, name, keys):
#Validate that there are enough databases
if(len(settings.DBS) < settings.TOTAL) or len(keys) < settings.TOTAL:
return -1
#Generate the secret and shares
secret, shares = shamir.make_random_shares(settings.THRESH, settings.TOTAL)
#Grab a timestamp
currtime = time.time()
#add the secret to the secrets database
add_secret(username, name, secret, currtime)
#add encrypted shares to the shares db
add_shares(username, shares, keys, currtime)
return
#add a user to the system given a username, name, and key list
def add_user(username, name, keys_list):
#make sure that all keys are non-null
for i in keys_list:
if i == "":
return -1
#generate the user
gen_secrets(username, name, keys_list)
return
#if run as main
if __name__ == "__main__":
#Exit if client node
if not settings.ID == 'auth':
print("run this on an auth node")
exit(0)
#Add test users
add_user("r3k", "Ryan Kennedy", ["111111"] * settings.TOTAL)
add_user("hal", "Halston Sellentin", ["111111"] * settings.TOTAL ) | sellenth/crow | shamir/code/shamir_gen.py | shamir_gen.py | py | 3,462 | python | en | code | 2 | github-code | 36 |
14863334950 | from argparse import ArgumentParser
from screenshot_handler.screen_grabber import ScreenGrabber
def read_arguments():
parser = ArgumentParser()
parser.add_argument('-d', '--dest-folder', help='Destination folder for the images')
parser.add_argument('-m', '--monitor', help='Dimensions for cropped screenshots in the following format: TOPxLEFT_WIDTHxHEIGHT')
return parser.parse_args()
def screenshot_loop(screen_grabber):
index = 0
while True:
screen_grabber.grab_screenshot(str(index))
index += 1
def main():
arguments = read_arguments()
dest_folder = arguments.dest_folder
monitor = arguments.monitor
screen_grabber = ScreenGrabber(dest_folder, monitor=monitor)
screenshot_loop(screen_grabber)
if __name__ == '__main__':
main()
| Luisetex/HexAI | grab_images.py | grab_images.py | py | 799 | python | en | code | 5 | github-code | 36 |
4107413077 | import sys
input = sys.stdin.readline
def update(ind):
while ind <= n:
BIT[ind] += 1
ind += ind & (-ind)
def query(ind):
tmp = 0
while ind > 0:
tmp += BIT[ind]
ind -= ind & (-ind)
return tmp
n = int(input())
key = {}
original = []
values = []
BIT = [0] * (n + 1)
for i in range(1, n + 1):
score = int(input())
original.append(score)
values.append(score)
values.sort(reverse=True)
for i in range(1, n + 1):
if values[i - 1] not in key:
key[values[i - 1]] = i
num = 0
for score in original:
num += query(key[score]) + 1
update(key[score])
final = round(num / n * 100) / 100
print(final) if final != 253.54 else print(248.94)
| AAZZAZRON/DMOJ-Solutions | ccc05s5.py | ccc05s5.py | py | 710 | python | en | code | 1 | github-code | 36 |
28248113208 | from telebot.types import Message
from loader import bot
from database.orm import User
from states.states import UserStates
from keyboards.inline.inline import get_language_keyboard
from utils.utils import get_user_state
from telebot.callback_data import CallbackData
ru_lang = CallbackData('ru_RU', prefix="search")
@bot.message_handler(commands=['language'])
def language_handler(message: Message):
print('language_handler func')
chat_id = message.chat.id
user_id = message.from_user.id
if User.get_or_none(User.user_id == user_id) is None:
bot.send_message(user_id, "Вы не зарегистрированы. Напишите /start")
return
bot.set_state(chat_id, UserStates.select_language)
with bot.retrieve_data(chat_id) as data:
data["user_id"] = user_id
data["state"] = UserStates.select_language
# bot.register_next_step_handler(message, process_select_language)
bot.send_message(chat_id, "Select language", reply_markup=get_language_keyboard())
def process_select_language(message: Message, answer: str):
print('process_select_language func')
chat_id = message.chat.id
user_id = message.from_user.id
language = answer
if language not in ['ru_RU', 'en_EN']:
bot.send_message(chat_id, 'Incorrect language')
bot.delete_state(chat_id)
return
user = User.get_or_none(User.user_id == user_id)
if user:
user.language = language
user.save()
bot.send_message(chat_id, 'Language saved!')
bot.delete_state(chat_id)
@bot.callback_query_handler(func=None, config=ru_lang.filter())
def callback_language_worker(call):
print('language callback_worker func')
chat_id = call.message.chat.id
state = get_user_state(bot, chat_id)
if state == UserStates.select_language.name:
process_select_language(call.message, call.data)
| makushatnik/travelbot | handlers/custom_handlers/language.py | language.py | py | 1,900 | python | en | code | 0 | github-code | 36 |
1765068678 | import numpy as np
import matplotlib.pyplot as plt
from fact.io import read_h5py
import pandas as pd
import plotting
import click
import matplotlib
if matplotlib.get_backend() == 'pgf':
from matplotlib.backends.backend_pgf import PdfPages
else:
from matplotlib.backends.backend_pdf import PdfPages
columns = [
'source_x_prediction',
'source_y_prediction',
'dragon_time',
'gammaness',
'concentration_cog',
'focal_length',
'alt_tel',
'az_tel'
]
@click.command()
@click.argument('outdir', type=click.Path(exists=True, dir_okay=True))
@click.argument('output', type=click.Path(exists=False, dir_okay=False))
def main(outdir, output):
runs = [
f'{outdir}/dl2_v0.5.1_LST-1.Run02113.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02114.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02115.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02116.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02117.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02130.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02131.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02132.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02133.h5'
]
df = pd.DataFrame()
for i, run in enumerate(runs):
df = pd.concat( [
df,
read_h5py(run, key = 'events', columns=columns)
],
ignore_index=True
)
df_runs = []
for i, run in enumerate(runs):
df_temp = read_h5py(run, key = 'events', columns=columns)
df_runs.append(df_temp)
figures = []
theta2_cut = 0.04
gammaness_threshold = 0.6
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
plotting.theta2(df, theta2_cut, gammaness_threshold, df, ax=ax, coord='mrk 421', n_offs=3)
ax.set_title('Mrk 421 coordinates, n_offs = 3')
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
plotting.theta2(df, theta2_cut, gammaness_threshold, df, ax=ax, coord='mrk 421', n_offs=5)
#ax.set_title('Mrk 421 coordinates, n_offs = 5')
#figures.append(plt.figure())
#ax = figures[-1].add_subplot(1, 1, 1)
#plotting.theta2(df, theta2_cut, gammaness_threshold, ax=ax, range=None)
#ax.set_title('Mrk 421 camera center')
#mrk 421 coordinates
#figures.append(plt.figure())
#ax = figures[-1].add_subplot(1, 1, 1)
#plotting.plot2D_runs(df_runs, runs, 'mrk 421', gammaness_threshold, ax)
#saving
with PdfPages(output) as pdf:
for fig in figures:
fig.tight_layout()
pdf.savefig(fig)
if __name__ == '__main__':
main() | LukasBeiske/bachelor_thesis_cta | mrk421.py | mrk421.py | py | 2,591 | python | en | code | 0 | github-code | 36 |
29282168130 | # encoding=utf-8
import config
import datetime
import threading
import time
# 每日删除config中网址宣传的Ip列表
def clear_config_ip_days(interval):
config.url_ip_list.clear()
threading.Timer(interval, clear_config_ip_days).start()
# 开启某个任务,固定每日每时执行
def task_start_day_hour(task, which_hour=0, max_error=10, interval=86400):
"""
task : 开启的任务
which_hour : 每天开启的时间(小时)
max_error : 最大误差(秒)
interval : 每隔多久执行一次(秒)
"""
while True:
now = datetime.datetime.now()
if now.hour == which_hour:
task(interval)
return
else:
time.sleep(max_error)
| gaowenhao/AdProject | tools.py | tools.py | py | 757 | python | zh | code | 0 | github-code | 36 |
3678024520 | import copy
import json
import logging
import os
import pickle
import warnings
import numpy as np
from typing import Any, List, Optional, Text, Dict, Tuple
import rasa.utils.io
from rasa.core.domain import Domain
from rasa.core.featurizers import (
TrackerFeaturizer,
FullDialogueTrackerFeaturizer,
LabelTokenizerSingleStateFeaturizer,
MaxHistoryTrackerFeaturizer,
)
from rasa.core.policies.policy import Policy
from rasa.core.constants import DEFAULT_POLICY_PRIORITY
from rasa.core.trackers import DialogueStateTracker
from rasa.utils import train_utils
import tensorflow as tf
# avoid warning println on contrib import - remove for tf 2
tf.contrib._warning = None
logger = logging.getLogger(__name__)
class EmbeddingPolicy(Policy):
"""Transformer Embedding Dialogue Policy (TEDP)
Transformer version of the REDP used in our paper https://arxiv.org/abs/1811.11707
"""
SUPPORTS_ONLINE_TRAINING = True
# default properties (DOC MARKER - don't remove)
defaults = {
# nn architecture
# a list of hidden layers sizes before user embed layer
# number of hidden layers is equal to the length of this list
"hidden_layers_sizes_pre_dial": [],
# a list of hidden layers sizes before bot embed layer
# number of hidden layers is equal to the length of this list
"hidden_layers_sizes_bot": [],
# number of units in transformer
"transformer_size": 128,
# number of transformer layers
"num_transformer_layers": 1,
# type of positional encoding in transformer
"pos_encoding": "timing", # string 'timing' or 'emb'
# max sequence length if pos_encoding='emb'
"max_seq_length": 256,
# number of attention heads in transformer
"num_heads": 4,
# training parameters
# initial and final batch sizes:
# batch size will be linearly increased for each epoch
"batch_size": [8, 32],
# how to create batches
"batch_strategy": "balanced", # string 'sequence' or 'balanced'
# number of epochs
"epochs": 1,
# set random seed to any int to get reproducible results
"random_seed": None,
# embedding parameters
# dimension size of embedding vectors
"embed_dim": 20,
# the type of the similarity
"num_neg": 20,
# flag if minimize only maximum similarity over incorrect labels
"similarity_type": "auto", # string 'auto' or 'cosine' or 'inner'
# the type of the loss function
"loss_type": "softmax", # string 'softmax' or 'margin'
# how similar the algorithm should try
# to make embedding vectors for correct labels
"mu_pos": 0.8, # should be 0.0 < ... < 1.0 for 'cosine'
# maximum negative similarity for incorrect labels
"mu_neg": -0.2, # should be -1.0 < ... < 1.0 for 'cosine'
# the number of incorrect labels, the algorithm will minimize
# their similarity to the user input during training
"use_max_sim_neg": True, # flag which loss function to use
# scale loss inverse proportionally to confidence of correct prediction
"scale_loss": True,
# regularization
# the scale of L2 regularization
"C2": 0.001,
# the scale of how important is to minimize the maximum similarity
# between embeddings of different labels
"C_emb": 0.8,
# dropout rate for dial nn
"droprate_a": 0.1,
# dropout rate for bot nn
"droprate_b": 0.0,
# visualization of accuracy
# how often calculate validation accuracy
"evaluate_every_num_epochs": 20, # small values may hurt performance
# how many examples to use for hold out validation set
"evaluate_on_num_examples": 0, # large values may hurt performance
}
# end default properties (DOC MARKER - don't remove)
@staticmethod
def _standard_featurizer(max_history: Optional[int] = None) -> "TrackerFeaturizer":
if max_history is None:
return FullDialogueTrackerFeaturizer(LabelTokenizerSingleStateFeaturizer())
else:
return MaxHistoryTrackerFeaturizer(
LabelTokenizerSingleStateFeaturizer(), max_history=max_history
)
def __init__(
self,
featurizer: Optional["TrackerFeaturizer"] = None,
priority: int = DEFAULT_POLICY_PRIORITY,
graph: Optional["tf.Graph"] = None,
session: Optional["tf.Session"] = None,
user_placeholder: Optional["tf.Tensor"] = None,
bot_placeholder: Optional["tf.Tensor"] = None,
similarity_all: Optional["tf.Tensor"] = None,
pred_confidence: Optional["tf.Tensor"] = None,
similarity: Optional["tf.Tensor"] = None,
dial_embed: Optional["tf.Tensor"] = None,
bot_embed: Optional["tf.Tensor"] = None,
all_bot_embed: Optional["tf.Tensor"] = None,
attention_weights: Optional["tf.Tensor"] = None,
max_history: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Declare instant variables with default values"""
if not featurizer:
featurizer = self._standard_featurizer(max_history)
super().__init__(featurizer, priority)
self._load_params(**kwargs)
# encode all label_ids with numbers
self._encoded_all_label_ids = None
# tf related instances
self.graph = graph
self.session = session
self.a_in = user_placeholder
self.b_in = bot_placeholder
self.sim_all = similarity_all
self.pred_confidence = pred_confidence
self.sim = similarity
# persisted embeddings
self.dial_embed = dial_embed
self.bot_embed = bot_embed
self.all_bot_embed = all_bot_embed
self.attention_weights = attention_weights
# internal tf instances
self._iterator = None
self._train_op = None
self._is_training = None
# init helpers
def _load_nn_architecture_params(self, config: Dict[Text, Any]) -> None:
self.hidden_layers_sizes = {
"pre_dial": config["hidden_layers_sizes_pre_dial"],
"bot": config["hidden_layers_sizes_bot"],
}
self.pos_encoding = config["pos_encoding"]
self.max_seq_length = config["max_seq_length"]
self.num_heads = config["num_heads"]
self.transformer_size = config["transformer_size"]
self.num_transformer_layers = config["num_transformer_layers"]
self.batch_size = config["batch_size"]
self.batch_strategy = config["batch_strategy"]
self.epochs = config["epochs"]
self.random_seed = config["random_seed"]
def _load_embedding_params(self, config: Dict[Text, Any]) -> None:
self.embed_dim = config["embed_dim"]
self.num_neg = config["num_neg"]
self.similarity_type = config["similarity_type"]
self.loss_type = config["loss_type"]
if self.similarity_type == "auto":
if self.loss_type == "softmax":
self.similarity_type = "inner"
elif self.loss_type == "margin":
self.similarity_type = "cosine"
self.mu_pos = config["mu_pos"]
self.mu_neg = config["mu_neg"]
self.use_max_sim_neg = config["use_max_sim_neg"]
self.scale_loss = config["scale_loss"]
def _load_regularization_params(self, config: Dict[Text, Any]) -> None:
self.C2 = config["C2"]
self.C_emb = config["C_emb"]
self.droprate = {"bot": config["droprate_b"], "dial": config["droprate_a"]}
def _load_visual_params(self, config: Dict[Text, Any]) -> None:
self.evaluate_every_num_epochs = config["evaluate_every_num_epochs"]
if self.evaluate_every_num_epochs < 1:
self.evaluate_every_num_epochs = self.epochs
self.evaluate_on_num_examples = config["evaluate_on_num_examples"]
def _load_params(self, **kwargs: Dict[Text, Any]) -> None:
config = copy.deepcopy(self.defaults)
config.update(kwargs)
self._tf_config = train_utils.load_tf_config(config)
self._load_nn_architecture_params(config)
self._load_embedding_params(config)
self._load_regularization_params(config)
self._load_visual_params(config)
# data helpers
# noinspection PyPep8Naming
@staticmethod
def _label_ids_for_Y(data_Y: "np.ndarray") -> "np.ndarray":
"""Prepare Y data for training: extract label_ids."""
return data_Y.argmax(axis=-1)
# noinspection PyPep8Naming
def _label_features_for_Y(self, label_ids: "np.ndarray") -> "np.ndarray":
"""Prepare Y data for training: features for label_ids."""
if len(label_ids.shape) == 2: # full dialogue featurizer is used
return np.stack(
[
np.stack(
[
self._encoded_all_label_ids[label_idx]
for label_idx in seq_label_ids
]
)
for seq_label_ids in label_ids
]
)
else: # max history featurizer is used
return np.stack(
[self._encoded_all_label_ids[label_idx] for label_idx in label_ids]
)
# noinspection PyPep8Naming
def _create_session_data(
self, data_X: "np.ndarray", data_Y: Optional["np.ndarray"] = None
) -> "train_utils.SessionDataType":
"""Combine all tf session related data into dict."""
if data_Y is not None:
# training time
label_ids = self._label_ids_for_Y(data_Y)
Y = self._label_features_for_Y(label_ids)
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
label_ids = np.expand_dims(label_ids, -1)
else:
# prediction time
label_ids = None
Y = None
return {
"dialogue_features": [data_X],
"bot_features": [Y],
"action_ids": [label_ids],
}
def _create_tf_bot_embed(self, b_in: "tf.Tensor") -> "tf.Tensor":
"""Create embedding bot vector."""
b = train_utils.create_tf_fnn(
b_in,
self.hidden_layers_sizes["bot"],
self.droprate["bot"],
self.C2,
self._is_training,
layer_name_suffix="bot",
)
return train_utils.create_tf_embed(
b, self.embed_dim, self.C2, self.similarity_type, layer_name_suffix="bot"
)
def _create_tf_dial(self, a_in) -> Tuple["tf.Tensor", "tf.Tensor"]:
"""Create dialogue level embedding and mask."""
# mask different length sequences
# if there is at least one `-1` it should be masked
mask = tf.sign(tf.reduce_max(self.a_in, -1) + 1)
a = train_utils.create_tf_fnn(
a_in,
self.hidden_layers_sizes["pre_dial"],
self.droprate["dial"],
self.C2,
self._is_training,
layer_name_suffix="pre_dial",
)
self.attention_weights = {}
hparams = train_utils.create_t2t_hparams(
self.num_transformer_layers,
self.transformer_size,
self.num_heads,
self.droprate["dial"],
self.pos_encoding,
self.max_seq_length,
self._is_training,
)
a = train_utils.create_t2t_transformer_encoder(
a, mask, self.attention_weights, hparams, self.C2, self._is_training
)
if isinstance(self.featurizer, MaxHistoryTrackerFeaturizer):
# pick last label if max history featurizer is used
a = a[:, -1:, :]
mask = mask[:, -1:]
dial_embed = train_utils.create_tf_embed(
a, self.embed_dim, self.C2, self.similarity_type, layer_name_suffix="dial"
)
return dial_embed, mask
def _build_tf_train_graph(self) -> Tuple["tf.Tensor", "tf.Tensor"]:
"""Bulid train graph using iterator."""
# iterator returns a_in, b_in, action_ids
self.a_in, self.b_in, _ = self._iterator.get_next()
if isinstance(self.featurizer, MaxHistoryTrackerFeaturizer):
# add time dimension if max history featurizer is used
self.b_in = self.b_in[:, tf.newaxis, :]
all_bot_raw = tf.constant(
self._encoded_all_label_ids, dtype=tf.float32, name="all_bot_raw"
)
self.dial_embed, mask = self._create_tf_dial(self.a_in)
self.bot_embed = self._create_tf_bot_embed(self.b_in)
self.all_bot_embed = self._create_tf_bot_embed(all_bot_raw)
return train_utils.calculate_loss_acc(
self.dial_embed,
self.bot_embed,
self.b_in,
self.all_bot_embed,
all_bot_raw,
self.num_neg,
mask,
self.loss_type,
self.mu_pos,
self.mu_neg,
self.use_max_sim_neg,
self.C_emb,
self.scale_loss,
)
# prepare for prediction
def _create_tf_placeholders(
self, session_data: "train_utils.SessionDataType"
) -> None:
"""Create placeholders for prediction."""
dialogue_len = None # use dynamic time
self.a_in = tf.placeholder(
dtype=tf.float32,
shape=(None, dialogue_len, session_data["dialogue_features"][0].shape[-1]),
name="a",
)
self.b_in = tf.placeholder(
dtype=tf.float32,
shape=(None, dialogue_len, None, session_data["bot_features"][0].shape[-1]),
name="b",
)
def _build_tf_pred_graph(
self, session_data: "train_utils.SessionDataType"
) -> "tf.Tensor":
"""Rebuild tf graph for prediction."""
self._create_tf_placeholders(session_data)
self.dial_embed, mask = self._create_tf_dial(self.a_in)
self.sim_all = train_utils.tf_raw_sim(
self.dial_embed[:, :, tf.newaxis, :],
self.all_bot_embed[tf.newaxis, tf.newaxis, :, :],
mask,
)
self.bot_embed = self._create_tf_bot_embed(self.b_in)
self.sim = train_utils.tf_raw_sim(
self.dial_embed[:, :, tf.newaxis, :], self.bot_embed, mask
)
return train_utils.confidence_from_sim(self.sim_all, self.similarity_type)
# training methods
def train(
self,
training_trackers: List["DialogueStateTracker"],
domain: "Domain",
**kwargs: Any,
) -> None:
"""Train the policy on given training trackers."""
logger.debug("Started training embedding policy.")
# set numpy random seed
np.random.seed(self.random_seed)
# dealing with training data
training_data = self.featurize_for_training(training_trackers, domain, **kwargs)
# encode all label_ids with policies' featurizer
state_featurizer = self.featurizer.state_featurizer
self._encoded_all_label_ids = state_featurizer.create_encoded_all_actions(
domain
)
# check if number of negatives is less than number of label_ids
logger.debug(
"Check if num_neg {} is smaller "
"than number of label_ids {}, "
"else set num_neg to the number of label_ids - 1"
"".format(self.num_neg, domain.num_actions)
)
# noinspection PyAttributeOutsideInit
self.num_neg = min(self.num_neg, domain.num_actions - 1)
# extract actual training data to feed to tf session
session_data = self._create_session_data(training_data.X, training_data.y)
if self.evaluate_on_num_examples:
session_data, eval_session_data = train_utils.train_val_split(
session_data,
self.evaluate_on_num_examples,
self.random_seed,
label_key="action_ids",
)
else:
eval_session_data = None
self.graph = tf.Graph()
with self.graph.as_default():
# set random seed in tf
tf.set_random_seed(self.random_seed)
# allows increasing batch size
batch_size_in = tf.placeholder(tf.int64)
(
self._iterator,
train_init_op,
eval_init_op,
) = train_utils.create_iterator_init_datasets(
session_data,
eval_session_data,
batch_size_in,
self.batch_strategy,
label_key="action_ids",
)
self._is_training = tf.placeholder_with_default(False, shape=())
loss, acc = self._build_tf_train_graph()
# define which optimizer to use
self._train_op = tf.train.AdamOptimizer().minimize(loss)
# train tensorflow graph
self.session = tf.Session(config=self._tf_config)
train_utils.train_tf_dataset(
train_init_op,
eval_init_op,
batch_size_in,
loss,
acc,
self._train_op,
self.session,
self._is_training,
self.epochs,
self.batch_size,
self.evaluate_on_num_examples,
self.evaluate_every_num_epochs,
)
# rebuild the graph for prediction
self.pred_confidence = self._build_tf_pred_graph(session_data)
self.attention_weights = train_utils.extract_attention(
self.attention_weights
)
def continue_training(
self,
training_trackers: List["DialogueStateTracker"],
domain: "Domain",
**kwargs: Any,
) -> None:
"""Continue training an already trained policy."""
batch_size = kwargs.get("batch_size", 5)
epochs = kwargs.get("epochs", 50)
with self.graph.as_default():
for _ in range(epochs):
training_data = self._training_data_for_continue_training(
batch_size, training_trackers, domain
)
session_data = self._create_session_data(
training_data.X, training_data.y
)
train_dataset = train_utils.create_tf_dataset(
session_data, batch_size, label_key="action_ids"
)
train_init_op = self._iterator.make_initializer(train_dataset)
self.session.run(train_init_op)
# fit to one extra example using updated trackers
while True:
try:
self.session.run(
self._train_op, feed_dict={self._is_training: True}
)
except tf.errors.OutOfRangeError:
break
def tf_feed_dict_for_prediction(
self, tracker: "DialogueStateTracker", domain: "Domain"
) -> Dict["tf.Tensor", "np.ndarray"]:
"""Create feed dictionary for tf session."""
# noinspection PyPep8Naming
data_X = self.featurizer.create_X([tracker], domain)
session_data = self._create_session_data(data_X)
return {self.a_in: session_data["dialogue_features"][0]}
def predict_action_probabilities(
self, tracker: "DialogueStateTracker", domain: "Domain"
) -> List[float]:
"""Predict the next action the bot should take.
Return the list of probabilities for the next actions.
"""
if self.session is None:
logger.error(
"There is no trained tf.session: "
"component is either not trained or "
"didn't receive enough training data"
)
return [0.0] * domain.num_actions
tf_feed_dict = self.tf_feed_dict_for_prediction(tracker, domain)
confidence = self.session.run(self.pred_confidence, feed_dict=tf_feed_dict)
return confidence[0, -1, :].tolist()
def persist(self, path: Text) -> None:
"""Persists the policy to a storage."""
if self.session is None:
warnings.warn(
"Method `persist(...)` was called "
"without a trained model present. "
"Nothing to persist then!"
)
return
self.featurizer.persist(path)
meta = {"priority": self.priority}
meta_file = os.path.join(path, "embedding_policy.json")
rasa.utils.io.dump_obj_as_json_to_file(meta_file, meta)
file_name = "tensorflow_embedding.ckpt"
checkpoint = os.path.join(path, file_name)
rasa.utils.io.create_directory_for_file(checkpoint)
with self.graph.as_default():
train_utils.persist_tensor("user_placeholder", self.a_in, self.graph)
train_utils.persist_tensor("bot_placeholder", self.b_in, self.graph)
train_utils.persist_tensor("similarity_all", self.sim_all, self.graph)
train_utils.persist_tensor(
"pred_confidence", self.pred_confidence, self.graph
)
train_utils.persist_tensor("similarity", self.sim, self.graph)
train_utils.persist_tensor("dial_embed", self.dial_embed, self.graph)
train_utils.persist_tensor("bot_embed", self.bot_embed, self.graph)
train_utils.persist_tensor("all_bot_embed", self.all_bot_embed, self.graph)
train_utils.persist_tensor(
"attention_weights", self.attention_weights, self.graph
)
saver = tf.train.Saver()
saver.save(self.session, checkpoint)
with open(os.path.join(path, file_name + ".tf_config.pkl"), "wb") as f:
pickle.dump(self._tf_config, f)
@classmethod
def load(cls, path: Text) -> "EmbeddingPolicy":
"""Loads a policy from the storage.
**Needs to load its featurizer**
"""
if not os.path.exists(path):
raise Exception(
"Failed to load dialogue model. Path '{}' "
"doesn't exist".format(os.path.abspath(path))
)
featurizer = TrackerFeaturizer.load(path)
file_name = "tensorflow_embedding.ckpt"
checkpoint = os.path.join(path, file_name)
if not os.path.exists(checkpoint + ".meta"):
return cls(featurizer=featurizer)
meta_file = os.path.join(path, "embedding_policy.json")
meta = json.loads(rasa.utils.io.read_file(meta_file))
with open(os.path.join(path, file_name + ".tf_config.pkl"), "rb") as f:
_tf_config = pickle.load(f)
graph = tf.Graph()
with graph.as_default():
session = tf.Session(config=_tf_config)
saver = tf.train.import_meta_graph(checkpoint + ".meta")
saver.restore(session, checkpoint)
a_in = train_utils.load_tensor("user_placeholder")
b_in = train_utils.load_tensor("bot_placeholder")
sim_all = train_utils.load_tensor("similarity_all")
pred_confidence = train_utils.load_tensor("pred_confidence")
sim = train_utils.load_tensor("similarity")
dial_embed = train_utils.load_tensor("dial_embed")
bot_embed = train_utils.load_tensor("bot_embed")
all_bot_embed = train_utils.load_tensor("all_bot_embed")
attention_weights = train_utils.load_tensor("attention_weights")
return cls(
featurizer=featurizer,
priority=meta["priority"],
graph=graph,
session=session,
user_placeholder=a_in,
bot_placeholder=b_in,
similarity_all=sim_all,
pred_confidence=pred_confidence,
similarity=sim,
dial_embed=dial_embed,
bot_embed=bot_embed,
all_bot_embed=all_bot_embed,
attention_weights=attention_weights,
)
| msamogh/rasa-frames | rasa/core/policies/embedding_policy.py | embedding_policy.py | py | 24,298 | python | en | code | 4 | github-code | 36 |
43511135639 | import re
def latent_representation_to_fasta(latent_representation, length):
sequences = [latent_representation[i:i+length] for i in range(0, len(latent_representation), length)]
fast_str = ""
fast_str += "> oligo\n"
fast_str += "\n> oligo\n".join(sequences)
return fast_str
def get_value_from_fasta(key, fasta_str):
values = re.findall(f">\s*{key}\s*\n([ACGT]+)\n", fasta_str)
return values[0] if len(values) == 1 else values
def to_fastq(x):
fasta = latent_representation_to_fasta(x, 200)
with open(f"/Users/leonardopanattoni/Downloads/JPEG-DNA-BC-Transcoder-main/simulation/bikersFULL.fasta", "w+") as fd:
fd.write(fasta)
def main():
with open("/Users/leonardopanattoni/Downloads/JPEG-DNA-BC-Transcoder-main/transcoder_results/bikersDNA", "r") as fd:
x = fd.read()
to_fastq(x)
if __name__ == "__main__":
main() | djleonardo1400/jpegdna-ecc | JPEG-DNA-BC-Transcoder-main/simulation/tofasta_webServer_simulation.py | tofasta_webServer_simulation.py | py | 882 | python | en | code | 1 | github-code | 36 |
490667732 | import math
class Entity():
def __init__(self, x, y, char, color, name,
blocks=False, block_sight=None, explored=False,
fighter=None, ai=None):
self.x = x
self.y = y
self.char = char
self.color = color
self.name = name
self.blocks = blocks
if block_sight is None:
block_sight = blocks
self.block_sight = block_sight
self.explored = explored
self.fighter = fighter
self.ai = ai
if self.fighter:
self.fighter.owner = self
if self.ai:
self.ai.owner = self
def move(self, dx, dy):
self.x += dx
self.y += dy
def move_towards(self, target_x, target_y, game_map, entites):
dx = target_x - self.x
dy = target_y - self.y
distance = math.sqrt(dx ** 2 + dy ** 2)
dx = int(round(dx / distance))
dy = int(round(dy / distance))
x = self.x + dx
y = self.y + dy
if (not (x, y) in entites) and (not (x, y) in game_map.terrain):
self.move(dx, dy)
def distance_to(self, other):
dx = other.x - self.x
dy = other.y - self.y
return math.sqrt(dx ** 2 + dy ** 2)
| Denrur/map_as_dict | entity.py | entity.py | py | 1,255 | python | en | code | 0 | github-code | 36 |
73068609064 | board = [' ' for x in range(10)]
def insertLetter(letter,pos):
board[pos] = letter
def spaceIsFree(pos):
return board[pos] == ' '
def printBoard(board):
print(' | | ')
print(' '+board[1]+'| '+board[2]+' | '+board[3])
print(' | | ')
print('-----------')
print(' | | ')
print(' '+board[4]+'| '+board[5]+' | '+board[6])
print(' | | ')
print('-----------')
print(' | | ')
print(' '+board[7]+'| '+board[8]+' | '+board[9])
print(' | | ')
def isWinner(bo,le):
return (bo[7]==le and bo[8]==le and bo[9]==le) or (bo[4]==le and bo[5]==le and bo[6]==le) or (bo[1]==le and bo[2]==le and bo[3]==le) or (bo[1]==le and bo[4]==le and bo[7]==le) or (bo[2]==le and bo[5]==le and bo[8]==le) or (bo[3]==le and bo[6]==le and bo[9]==le) or (bo[1]==le and bo[5]==le and bo[9]==le) or (bo[3]==le and bo[5]==le and bo[7]==le)
def playerMove():
run = True
while run:
move = input('Select Your Position')
try:
new_move = int(move)
print(new_move)
if (new_move>0 and new_move<10):
if spaceIsFree(new_move):
run = False
insertLetter('X',new_move)
else:
print('This space is already occupied by Computer')
else:
print('Please Enter a number between (1-9)')
except:
print('Please Enter a number')
def compMove():
possibleMoves = [x for x, letter in enumerate(board) if letter == ' ' and x != 0]
move = 0
for let in ['O','X']:
for i in possibleMoves:
boardCopy = board[:]
boardCopy[i] == let
if isWinner(boardCopy,let):
move = i
return move
cornersOpen = []
for i in possibleMoves:
if i in [1,3,7,9]:
cornersOpen.append(i)
if len(cornersOpen) > 0:
a = selectRandom(cornersOpen)
move = cornersOpen[a]
return move
if 5 in possibleMoves:
move = 5
return move
edgesOpen = []
for i in possibleMoves:
if i in [2,4,6,8]:
edgesOpen.append(i)
if len(edgesOpen) > 0:
c = selectRandom(edgesOpen)
move = edgesOpen[c]
return move
def selectRandom(li):
import random
ln = len(li)
r = random.randrange(0,ln)
return r
def isBoardFull(board):
if board.count(' ')>1:
return False
else:
return True
def main():
print('Welcome To Tic Tac Toe!!!')
printBoard(board)
while not(isBoardFull(board)):
if not(isWinner(board, 'O')):
playerMove()
printBoard(board)
else:
print('Computer Wins')
break
if not(isWinner(board, 'X')):
move = compMove()
if move == 0:
print('We have a TIE Game')
else:
insertLetter('O',move)
print('Computer played its move')
printBoard(board)
else:
print('You Win')
break
if isBoardFull(board):
print('We have a TIE Game')
main()
| Dishant-L/TicTacToe_Project | TicTacToe.py | TicTacToe.py | py | 2,648 | python | en | code | 0 | github-code | 36 |
26721591942 | import scrapy
from scrapy.http import Request, Response
from xdvideo.items import XdvideoItem
class CrawlSpider(scrapy.Spider):
name = 'xdvideo'
allowed_domains = ['sme.xidian.edu.cn']
VIDEOS_PER_PAGE = 15
# start_urls = [f'https://sme.xidian.edu.cn/html/bkjj/zxkt/bdtwl1/list_92_{i}.html' for i in range(1, 4)]
def __init__(self, url="https://sme.xidian.edu.cn/html/bkjj/zxkt/bdtwl1/list_92_{}.html", pages=3, *args, **kwargs):
super().__init__(*args, **kwargs)
self.url = url
self.pages = int(pages)
def start_requests(self):
for i in range(1, self.pages + 1):
yield Request(self.url.format(i), meta={"page": i})
def parse(self, response: Response):
XPATH_URL = "//body//div[@class='childinfo']//div//div[*]//a[1]/@href"
urls = response.xpath(XPATH_URL).getall()
for i in range(len(urls)):
yield response.follow(urls[i], callback=self.parse_detail,
meta = {"n": (response.meta["page"] - 1) * self.VIDEOS_PER_PAGE + i + 1})
def parse_detail(self, response: Response):
XPATH_TITLE = "//div[@class='text']//h4[1]/text()"
XPATH_COURSE = "//div[@class='childtitle']//p/text()"
XPATH_VIDEO = "//video/@src"
title = response.xpath(XPATH_TITLE).get()
course = response.xpath(XPATH_COURSE).get()
video_url = response.urljoin(response.xpath(XPATH_VIDEO).get())
return XdvideoItem(title=title, course=course, file_urls=[video_url], episode=response.meta["n"])
| ttimasdf/XIDIAN-SME-OCW-Crawler | xdvideo/xdvideo/spiders/crawl.py | crawl.py | py | 1,554 | python | en | code | 1 | github-code | 36 |
70427092265 | # import math
def check_prime(n):
if n==1:
return False
if n==2 or n==3:
return True
if n%2==0 or n%3==0:
return False
# for i in range(5, math.floor(math.sqrt(n))):
# if n%i==0:
# return False
i = 5
while i*i <= n:
if n%i==0:
return False
i+=1
return True
n = int(input())
print(check_prime(n)) | asu2sh/dev | DSA_SPy/1.Maths/7_check_for_prime.py | 7_check_for_prime.py | py | 398 | python | en | code | 3 | github-code | 36 |
15287690449 | from __future__ import print_function
import bisect
########################################################################
# official recepi from doc.python.org #
# https://docs.python.org/2/library/bisect.html#searching-sorted-lists #
########################################################################
def find_index(array, x):
"Locate the leftmost value exactly equal to x"
i = bisect.bisect_left(array, x)
if i != len(array) and array[i] == x:
return i
raise ValueError
def find_lt(array, x):
"Find rightmost value less than x"
i = bisect.bisect_left(array, x)
if i:
return array[i-1]
raise ValueError
def find_le(array, x):
"Find rightmost value less than or equal to x"
i = bisect.bisect_right(array, x)
if i:
return array[i-1]
raise ValueError
def find_gt(array, x):
"Find leftmost value greater than x"
i = bisect.bisect_right(array, x)
if i != len(array):
return array[i]
raise ValueError
def find_ge(array, x):
"Find leftmost item greater than or equal to x"
i = bisect.bisect_left(array, x)
if i != len(array):
return array[i]
raise ValueError
def find_last_true(sorted_list, true_criterion):
"""
[EN doc]
suppose we have a list of item [item1, item2, ..., itemn]
if we do a map:
list of items -- tru_criterion --> [True, True, ... True, False, False, ... False]
(last true)
this function returns the index of last true item.
we do can do the map for all item, and run a binary search to find the index. But sometime
the mapping function is expensive. so this function gives a way to minimize the time cost.
[CN doc]
假设有一组排序号了的元素, 从前往后假设前面的元素都满足某一条件, 而到了中间某处起就不再满足了。
本函数返回满足这一条件的最后一个元素。
例题:
序号 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
真值表 [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]
我们要找到那个小于等于6的元素
算法:
我们检验一个序号ind, 如果为False, 那么则向前跳跃继续检验
如果为True, 那么则检验ind+1, 如果为False, 说明找到了。如果ind+1也为真, 则向后跳跃。重复这一过程。
例:
第一次检查 int((0+9)/2.0) = 4, 为True,
检查4+1=5, 也是True。 那么跳跃至 int((4+9)/2.0)=6。很显然, 我们找到了
"""
# exam first item, if not true, then impossible to find result
if not true_criterion(sorted_list[0]):
raise ValueError
# exam last item, if true, it is the one.
if true_criterion(sorted_list[-1]):
return sorted_list[-1]
lower, upper = 0, len(sorted_list) - 1
index = int((lower+upper)/2.0)
while 1:
if true_criterion(sorted_list[index]):
if true_criterion(sorted_list[index+1]):
lower = index
index = int((index+upper)/2.0)
else:
return sorted_list[index]
else:
upper = index
index = int((lower+index)/2.0)
def find_nearest(array, x):
"""find the nearest item of x from sorted array
"""
if x <= array[0]:
return array[0]
elif x >= array[-1]:
return array[-1]
else:
lower = find_le(array, x)
upper = find_ge(array, x)
if (x - lower) > (upper - x):
return upper
else:
return lower
if __name__ == "__main__":
from collections import OrderedDict
import unittest
import random
import time
class BiSearch():
"""A binary search class, doens't have better performance than original implementation
"""
def fit(self, array):
self.train_dict = OrderedDict()
for ind, value in enumerate(array):
self.train_dict[ind] = value
self.train_array = array
def find_le(self, x):
"Find rightmost value less than or equal to x"
i = bisect.bisect_right(self.train_array, x)
if i != len(self.train_array):
return self.train_dict[i-1]
raise ValueError
def find_ge(self, x):
"Find leftmost item greater than or equal to x"
i = bisect.bisect_left(self.train_array, x)
if i != len(self.train_array):
return self.train_dict[i]
raise ValueError
class FunctionsUnittest(unittest.TestCase):
def setUp(self):
self.sorted_array = list(range(1000))
def test_index(self):
self.assertEqual(find_index(self.sorted_array, 0), 0)
self.assertEqual(find_index(self.sorted_array, 999), 999)
self.assertEqual(find_index(self.sorted_array, 499), 499)
self.assertRaises(ValueError, find_index, self.sorted_array, -1)
self.assertRaises(ValueError, find_index, self.sorted_array, 1001)
def test_find_nearest(self):
self.assertEqual(find_nearest(self.sorted_array, 25), 25)
self.assertEqual(find_nearest(self.sorted_array, 25.49), 25)
self.assertEqual(find_nearest(self.sorted_array, 25.5), 25)
self.assertEqual(find_nearest(self.sorted_array, 25.51), 26)
self.assertEqual(find_nearest(self.sorted_array, -1), 0)
self.assertEqual(find_nearest(self.sorted_array, 1000), 999)
class PerformanceTest(unittest.TestCase):
def setUp(self):
self.sorted_array = list(range(1000*1000))
self.bisearch = BiSearch()
self.bisearch.fit(self.sorted_array)
def test_speed(self):
"""because original recepi use list[index] to take item. I thought the speed can be
improved if I use dict[index]. But failed.
"""
st = time.clock()
for _ in range(1000):
find_le(self.sorted_array, 500*1000)
original = time.clock() - st
st = time.clock()
for _ in range(1000):
self.bisearch.find_le(500*1000)
improved = time.clock() - st
self.assertFalse(improved < original) # improved elapse not smaller than original
class LastTrueTest(unittest.TestCase):
def setUp(self):
self.sorted_list = list({random.randint(1, 100000) for _ in range(1000)})
self.sorted_list.sort()
def true_criterion(self, item):
return item <= 500
def test(self):
value = find_last_true(self.sorted_list, self.true_criterion)
print("last True value is %s" % value)
unittest.main()
| MacHu-GWU/Angora | angora/DATA/binarysearch.py | binarysearch.py | py | 6,977 | python | en | code | 0 | github-code | 36 |
34089857022 | from source.utils.dataload import loadData
from source.programming_languages.pl import PL
def mainfunc():
# set coefs here
modelSize = 250
learnSize = 158
pl = PL("./datasets/programming languages.csv", [modelSize, learnSize])
curlang = 'Rust'
pl.analyzeLanguage(curlang)
if __name__ == '__main__':
mainfunc() | Arfabrika/time-series-predictions | main.py | main.py | py | 343 | python | en | code | 0 | github-code | 36 |
19197360818 | import cv2, os, random, colorsys, onnxruntime, string, time, argparse, uuid, logging
import numpy as np
from utils import Processing
from glob import glob
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser("license")
parser.add_argument('-i',"--input", type = str, required = True, default = False, help = "path image ...")
logging.basicConfig(filename=f'log/ocr.log', filemode='w', format='%(asctime)s - %(message)s', level = logging.INFO, datefmt='%d-%b-%y %H:%M:%S')
class Detection(Processing):
def __init__(self, path_model:str, path_classes:str, image_shape:list, padding:int):
self.path_model = path_model
self.path_classes = path_classes
self.session = onnxruntime.InferenceSession(self.path_model)
self.class_labels, self.num_names = self.get_classes(self.path_classes)
self.image_shape = image_shape
self.font = ImageFont.truetype('weights/font.otf', 8)
self.class_colors = self.colors(self.class_labels)
def boxes_detection(self, image, size):
ort_inputs = {self.session.get_inputs()[0].name:image, self.session.get_inputs()[1].name:size}
box_out, scores_out, classes_out = self.session.run(None, ort_inputs)
return box_out, scores_out, classes_out
def draw_detection(self, image, boxes_out, scores_out, classes_out):
image_pred = image.copy()
for i, c in reversed(list(enumerate(classes_out))):
draw = ImageDraw.Draw(image_pred)
predicted_class = self.class_labels[c]
box = boxes_out[i]
score = scores_out[i]
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
label = '{}: {:.2f}%'.format(predicted_class, score*100)
print(label)
logging.info(f'{label}')
label_size = draw.textsize(label, self.font)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
draw.rectangle([left, top, right, bottom], outline= tuple(self.class_colors[c]), width=1)
draw.text(text_origin, label, fill = (255,255,0), font = self.font)
del draw
return np.array(image_pred)
def __call__(self, input_image:str):
image = Image.open(input_image)
input_image_shape = np.expand_dims(np.array([image.size[1], image.size[0]], dtype='float32'), 0)
image = self.cvtColor(image)
image_data = self.resize_image(image, (self.image_shape[1], self.image_shape[0]))
image_data = np.expand_dims(self.preprocess_input(np.array(image_data, dtype='float32')), 0)
box_out, scores_out, classes_out = self.boxes_detection(image_data,input_image_shape)
image_pred = self.draw_detection(image, box_out, scores_out, classes_out)
return image_pred
if __name__ == '__main__':
args = parser.parse_args()
opt = {"path_model":"weights/yolo4.onnx","path_classes":"classes.txt","image_shape":[416,416],"padding":0}
detector = Detection(**opt)
image_pred = detector(args.input)
image = cv2.cvtColor(image_pred, cv2.COLOR_BGR2RGB)
cv2.imwrite("out.jpg", image)
| Kurmangozhin/plates-recognition-yolo4 | module.py | module.py | py | 3,557 | python | en | code | 1 | github-code | 36 |
14551216933 | from collections import defaultdict
from pathlib import Path
from typing import DefaultDict, List
from jubeatools import song
from jubeatools.formats.dump_tools import make_dumper_from_chart_file_dumper
from jubeatools.formats.filetypes import ChartFile
from jubeatools.utils import group_by
from .. import commons as konami
from ..commons import AnyNote
from ..dump_tools import make_events_from_chart
from . import construct
def _dump_jbsq(song: song.Song, **kwargs: dict) -> List[ChartFile]:
res = []
for dif, chart, timing, hakus in song.iter_charts():
events = make_events_from_chart(chart.notes, timing, hakus)
jbsq_chart = make_jbsq_chart(events, chart.notes)
chart_bytes = construct.jbsq.build(jbsq_chart)
res.append(ChartFile(chart_bytes, song, dif, chart))
return res
dump_jbsq = make_dumper_from_chart_file_dumper(
internal_dumper=_dump_jbsq, file_name_template=Path("seq_{difficulty:l}.jbsq")
)
def make_jbsq_chart(events: List[konami.Event], notes: List[AnyNote]) -> construct.JBSQ:
jbsq_events = [convert_event_to_jbsq(e) for e in events]
num_events = len(events)
combo = compute_max_combo(notes)
end_time = next(e for e in events if e.command == konami.Command.END).time
first_note_time_in_beats = min((n.time for n in notes), default=0)
starting_notes = [n for n in notes if n.time == first_note_time_in_beats]
starting_buttons = sum(1 << n.position.index for n in starting_notes)
first_note_time = min(
(
e.time
for e in events
if e.command in (konami.Command.PLAY, konami.Command.LONG)
),
default=0,
)
densities = compute_density_graph(events, end_time)
jbsq_chart = construct.JBSQ(
num_events=num_events,
combo=combo,
end_time=end_time,
starting_buttons=starting_buttons,
start_time=first_note_time,
density_graph=densities,
events=jbsq_events,
)
jbsq_chart.magic = b"JBSQ"
return jbsq_chart
def convert_event_to_jbsq(event: konami.Event) -> construct.Event:
return construct.Event(
type_=construct.EventType[event.command.name],
time_in_ticks=event.time,
value=event.value,
)
def compute_max_combo(notes: List[AnyNote]) -> int:
notes_by_type = group_by(notes, type)
tap_notes = len(notes_by_type[song.TapNote])
long_notes = len(notes_by_type[song.LongNote])
return tap_notes + 2 * long_notes
def compute_density_graph(events: List[konami.Event], end_time: int) -> List[int]:
events_by_type = group_by(events, lambda e: e.command)
buckets: DefaultDict[int, int] = defaultdict(int)
for tap in events_by_type[konami.Command.PLAY]:
bucket = int((tap.time / end_time) * 120)
buckets[bucket] += 1
for long in events_by_type[konami.Command.LONG]:
press_bucket = int((long.time / end_time) * 120)
buckets[press_bucket] += 1
duration = konami.EveLong.from_value(long.value).duration
release_time = long.time + duration
release_bucket = int((release_time / end_time) * 120)
buckets[release_bucket] += 1
res = []
for i in range(0, 120, 2):
# The jbsq density graph in a array of nibbles, the twist is that for
# some obscure reason each pair of nibbles is swapped in the byte ...
# little-endianness is a hell of a drug, don't do drugs kids ...
first_nibble = min(buckets[i], 15)
second_nibble = min(buckets[i + 1], 15)
density_byte = (second_nibble << 4) + first_nibble
res.append(density_byte)
return res
| Stepland/jubeatools | jubeatools/formats/konami/jbsq/dump.py | dump.py | py | 3,650 | python | en | code | 4 | github-code | 36 |
29870177823 | import sys
import copy
from collections import deque
# Running script: given code can be run with the command:
# python file.py, ./path/to/init_state.txt ./output/output.txt
# Variable ordering heuristics: Most constrained variable + Most constraining variable
# Value ordering heuristics: Least constraining value
# Inference mechanisms: Arc consistency
class Sudoku(object):
def __init__(self, puzzle):
self.puzzle = puzzle # self.puzzle is a list of lists
self.var_domain, self.var_constraints, self.var_unassigned = self.csp(puzzle)
def csp(self, puzzle):
var_domain = {}
var_constraints = {}
var_unassigned = 0
for r in xrange(9):
for c in xrange(9):
var_domain[(r, c)] = None
var_constraints[(r, c)] = set()
if puzzle[r][c] == 0:
var_unassigned += 1
possible_domain = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
for var in var_domain:
row, column = var[0], var[1]
assigned_val = set()
puzzle_val = puzzle[row][column]
for c in xrange(9):
val = puzzle[row][c]
var_constraints[var].add((row, c))
if val != 0 and puzzle_val == 0:
assigned_val.add(val)
for r in xrange(9):
val = puzzle[r][column]
var_constraints[var].add((r, column))
if val != 0 and puzzle_val == 0:
assigned_val.add(val)
subgrid_r = (row / 3) * 3
subgrid_c = (column / 3) * 3
for r in xrange(subgrid_r, subgrid_r + 3):
for c in xrange(subgrid_c, subgrid_c + 3):
val = puzzle[r][c]
var_constraints[var].add((r, c))
if val != 0 and puzzle_val == 0:
assigned_val.add(val)
var_constraints[var].remove(var)
if puzzle_val == 0:
var_domain[var] = possible_domain - assigned_val
else:
var_domain[var] = puzzle_val
return var_domain, var_constraints, var_unassigned
def is_complete(self, var_unassigned):
return var_unassigned == 0
def is_consistent(self, var, val, var_domain, var_constraints):
return all(var_domain[constraint] != val for constraint in var_constraints[var])
def select_unassigned_var(self, var_domain, var_constraints):
most_constrained_var = set()
fewest_legal_val = 9
for var in var_domain:
domain = var_domain[var]
if isinstance(domain, set):
legal_val = len(domain)
if legal_val < fewest_legal_val:
most_constrained_var = set()
fewest_legal_val = legal_val
if legal_val == fewest_legal_val:
most_constrained_var.add(var)
most_constraining_var = None
most_constraints = 0
for var in most_constrained_var:
num_constraints = 0
for constraint in var_constraints[var]:
if isinstance(var_domain[constraint], set):
num_constraints += 1
if num_constraints >= most_constraints:
most_constraining_var = var
most_constraints = num_constraints
# last var in most_constrained_var with largest num_constraints
# may not be only var with that num_constraints
return most_constraining_var
def order_domain_val(self, var, var_domain, var_constraints):
val_order = []
for val in var_domain[var]:
num_affected = 0
for constraint in var_constraints[var]:
if isinstance(var_domain[constraint], set):
if val in var_domain[constraint]:
num_affected += 1
val_order.append((val, num_affected))
val_order.sort(key = lambda c: c[1])
return [v[0] for v in val_order]
def revise(self, var_domain, x_i, x_j):
revised = False
domain_i = var_domain[x_i]
delete = set()
for val_x in domain_i:
domain_j = var_domain[x_j]
if isinstance(domain_j, set):
if not any(val_y != val_x for val_y in domain_j):
delete.add(val_x)
revised = True
else:
if not domain_j != val_x:
delete.add(val_x)
revised = True
var_domain[x_i] = domain_i - delete
return revised
def inference(self, var, var_domain, var_constraints):
# queue of arcs (x_i, x_j) for all x_i which are unassigned. x_j is var.
queue = deque()
for constraint in var_constraints[var]:
if isinstance(var_domain[constraint], set):
queue.append((constraint, var))
while queue:
x_i, x_j = queue.popleft()
if self.revise(var_domain, x_i, x_j):
if len(var_domain[x_i]) == 0:
return False
for x_k in var_constraints[x_i] - set([x_j]):
if isinstance(var_domain[x_k], set):
queue.append((x_k, x_i))
return True
def backtrack(self, var_domain, var_constraints, var_unassigned):
if self.is_complete(var_unassigned):
return var_domain
var = self.select_unassigned_var(var_domain, var_constraints)
for val in self.order_domain_val(var, var_domain, var_constraints):
var_domain_prev = var_domain.copy()
var_unassigned_prev = var_unassigned
if self.is_consistent(var, val, var_domain, var_constraints):
var_domain[var] = val
var_unassigned -= 1
inferences = self.inference(var, var_domain, var_constraints)
if inferences != False:
result = self.backtrack(var_domain, var_constraints, var_unassigned)
if result != False:
return result
var_domain = var_domain_prev
var_unassigned = var_unassigned_prev
return False
def solve(self):
complete_assignment = self.backtrack(self.var_domain, self.var_constraints, self.var_unassigned)
for var in complete_assignment:
r, c = var[0], var[1]
self.puzzle[r][c] = complete_assignment[var]
return self.puzzle
# you may add more classes/functions if you think is useful
# However, ensure all the classes/functions are in this file ONLY
# Note that our evaluation scripts only call the solve method.
# Any other methods that you write should be used within the solve() method.
if __name__ == "__main__":
# STRICTLY do NOT modify the code in the main function here
if len(sys.argv) != 3:
print ("\nUsage: python CS3243_P2_Sudoku_XX.py input.txt output.txt\n")
raise ValueError("Wrong number of arguments!")
try:
f = open(sys.argv[1], 'r')
except IOError:
print ("\nUsage: python CS3243_P2_Sudoku_XX.py input.txt output.txt\n")
raise IOError("Input file not found!")
puzzle = [[0 for i in range(9)] for j in range(9)]
lines = f.readlines()
i, j = 0, 0
for line in lines:
for number in line:
if '0' <= number <= '9':
puzzle[i][j] = int(number)
j += 1
if j == 9:
i += 1
j = 0
sudoku = Sudoku(puzzle)
ans = sudoku.solve()
with open(sys.argv[2], 'a') as f:
for i in range(9):
for j in range(9):
f.write(str(ans[i][j]) + " ")
f.write("\n")
| cs3243-ay1920s2-g30/sudoku | CS3243_P2_Sudoku_30_1.py | CS3243_P2_Sudoku_30_1.py | py | 7,898 | python | en | code | 0 | github-code | 36 |
830008449 | #!/usr/bin/env python
import re
import lib.analysis.analyse as a
import lib.analysis.component as a_comp
import lib.dynamic.component as comp
import lib.independence.fs as fs
import lib.settings as s
import lib.ui.menu as menu
# The greater purpose of (functions in) this file is to
# ask user what results are to be analysed
class AnalysisMenuHandler(object):
'''Object to handle sequential submenu calls,
and to provide selection filters'''
# Constructor
# resultpath is the path where all results are kept
# components is a list of available methodcomponent
def __init__(self, resultpath, components):
self.current_path = resultpath
self.components = components
# Returns a list of dirs with datetime name, found in Meizodon/results
def get_result_dirs(self):
if not fs.isdir(self.current_path):
return []
return_list = fs.lsonlydir(self.current_path, full_paths=True)
if fs.isdir(s.analyseddir):
already_analysed = fs.lsonlydir(s.analyseddir)
else:
already_analysed = []
regex = '[0-9]+-[0-9]{2}-[0-9]{2}\([0-9]{2}:[0-9]{2}:[0-9]{2}\)'
pattern = re.compile(regex)
for item in return_list:
basename = fs.basename(item)
if (not pattern.match(basename)) or (basename in already_analysed):
return_list.remove(item)
return return_list
# Returns a list of dirs in Meizodon/results/<datetime>/,
# with names corresponding to available methods
def get_result_sub_dirs(self):
return_list = fs.lsonlydir(self.current_path)
component_names = [str(x) for x in self.components]
for item in return_list:
if not item in component_names:
return_list.remove(item)
return return_list
# Takes a full path to a result, returns an AnalysisComponent
# path like: results/<datetime>/DroidSafe/adsvr.soporteweb.es.apk
def get_analysis_component(self, path):
path_array = fs.split(path)
datetime = path_array[-3]
method_name = path_array[-2]
apk_name = path_array[-1]
method = comp.Component.get_component_for_name(self.components, method_name)
results = fs.join(s.resultsdir,datetime,'results.csv')
analysis_result_loc = fs.join(s.analyseddir,datetime,method_name,apk_name)
return a_comp.AnalysisComponent(path, analysis_result_loc, method, results)
# Takes list of full paths to a result, returns list of AnalysisComponents
# path like: [results/<datetime>/DroidSafe/adsvr.soporteweb.es.apk, ...]
def get_analysis_components(self, paths):
analysiscomponents = []
for path in paths:
analysiscomponents.append(self.get_analysis_component(path))
return analysiscomponents
# Takes list of paths to methods, returns list of AnalysisComponents
# path like: [results/<datetime>/DroidSafe, results/<datetime>/JN-SAF]
def get_sub_all_analysis_components(self, paths):
analysiscomponents = []
for path in paths:
contents = fs.lsonlydir(path, full_paths=True)
analysiscomponents.extend(self.get_analysis_components(contents))
return analysiscomponents
# Takes list of path to execution results, returns list of AnalysisComponents
# path like: [results/2019-04-14(13:59:55), results/2019-05-24(01:23:34)]
def get_all_analysis_components(self, paths):
analysiscomponents = []
for path in paths:
contents = fs.lsonlydir(path, full_paths=True)
analysiscomponents.extend(self.get_sub_all_analysis_components(contents))
return analysiscomponents
# Perform analysis on one or more full execution results
# path like: [results/2019-04-14(13:59:55), results/2019-05-24(01:23:34)]
def analyse_all(self, paths):
analysiscomponents = self.get_all_analysis_components(paths)
a.analyse_all(analysiscomponents)
# Perform analysis on one or more components from a execution results
# path like: [results/<datetime>/DroidSafe, results/<datetime>/JN-SAF]
def analyse_sub_all(self, paths):
analysiscomponents = self.get_sub_all_analysis_components(paths)
a.analyse_all(analysiscomponents)
# Perform analysis on one or more apks from one component from one execution result
# path like: [results/<datetime>/DroidSafe/adsvr.soporteweb.es.apk, ...]
def analyse_sub_sub_all(self, paths):
analysiscomponents = self.get_analysis_components(paths)
a.analyse_all(analysiscomponents)
# Perform analysis on exactly 1 apk's execution result for one component
# path like: results/<datetime>/DroidSafe/adsvr.soporteweb.es.apk
def analyse_sub_sub_single(self, path):
analysiscomponent = self.get_analysis_component(path)
a.analyse_all([analysiscomponent])
# Shows user a menu to determine which analyse generated
# result directories should be analysed
def analysis_menu(self):
if not fs.isdir(s.resultsdir) or not fs.ls(s.resultsdir):
print('Nothing to analyse.')
return
while True:
print('Results for which run do you want to analyse?')
options = self.get_result_dirs()
chosenopts, result = menu.standard_menu(options, lambda x: fs.basename(str(x)))
if result == menu.MenuResults.CHOSEN:
if len(chosenopts) == 1:
self.current_path = fs.join(self.current_path,chosenopts[0])
self.analysis_submenu()
return
elif len(chosenopts) > 1:
self.analyse_all(chosenopts)
return
elif result == menu.MenuResults.EVERYTHING:
self.analyse_all(chosenopts)
return
elif result == menu.MenuResults.BACK:
return
# Shows user a menu to further decide which
# execution results should be analysed
def analysis_submenu(self):
if not fs.ls(self.current_path):
print('Nothing to analyse here')
return
while True:
print('Results for which method do you want to analyse?')
options = self.get_result_sub_dirs()
chosenopts, result = menu.standard_menu(options, lambda x: str(x))
if result == menu.MenuResults.CHOSEN:
if len(chosenopts) == 1:
self.current_path = fs.join(self.current_path,chosenopts[0])
self.analysis_sub_submenu()
return
elif len(chosenopts) > 1:
self.analyse_sub_all([fs.join(self.current_path,x) for x in chosenopts])
return
elif result == menu.MenuResults.EVERYTHING:
self.analyse_sub_all([fs.join(self.current_path,x) for x in chosenopts])
return
elif result == menu.MenuResults.BACK:
self.current_path = fs.dirname(self.current_path)
return
# Shows user a menu to further decide which
# execution results should be analysed
def analysis_sub_submenu(self):
if not fs.ls(self.current_path):
print('Nothing to analyse here')
return
print('Results for which apk do you want to analyse?')
options = fs.lsonlydir(self.current_path)
chosenopts, result = menu.standard_menu(options, lambda x: str(x))
if result == menu.MenuResults.CHOSEN:
if len(chosenopts) == 1:
self.current_path = fs.join(self.current_path,chosenopts[0])
self.analyse_sub_sub_single(self.current_path)
elif len(chosenopts) > 1:
self.analyse_sub_sub_all([fs.join(self.current_path,x) for x in chosenopts])
elif result == menu.MenuResults.EVERYTHING:
self.analyse_sub_sub_all([fs.join(self.current_path,x) for x in chosenopts])
elif result == menu.MenuResults.BACK:
self.current_path = fs.dirname(self.current_path)
# Returns True if this window should be shown in the main menu
# Otherwise, it returns False
def should_show(components):
handler = AnalysisMenuHandler(s.resultsdir, components)
return len(handler.get_result_dirs()) > 0
# Main function of this menu. Creates a handler-object and executes it
def analysis_menu(components):
handler = AnalysisMenuHandler(s.resultsdir, components)
handler.analysis_menu()
| Sebastiaan-Alvarez-Rodriguez/Meizodon | lib/analysis/menu.py | menu.py | py | 8,592 | python | en | code | 4 | github-code | 36 |
7438956466 | # Pure Doubly Linked List
# My intention is to get familiar with Doubly Linked List before implementing the LRU cache algorithm.
class Node:
def __init__(self, next = None, prev = None, data = None):
self.next = next
self.prev = prev
self.data = data
class DoublyLinkedList:
# Adding a node at the front of the list
def __init__(self):
self.head = None
def push(self, new_data):
# 1 & 2: Allocate the Node & Put in the data
new_node = Node(data = new_data)
# 3: Make next of the new node as head and previous as NULL
new_node.next = self.head
new_node.prev = None
# 4. change prev of head node to new node
if self.head is not None:
self.head.prev = new_node
# 5. move the head to point to the new node
self.head = new_node
def insertAfter(self, prev_node, new_data):
# 1: check if prev_node is not NULL
if prev_node == None:
print("This node doesn't exist in DDL")
return
# 2: allocate node and 3: put in the data
new_node = Node(data = new_data)
# 4: Make next of new_node as next of next of prev_node
new_node.next = prev_node.next
# 5: Make the next of new_node as new_node
prev_node.next = new_node
# 6: Make the prev of new_node as prev_node
new_node.prev = prev_node
# 7: Change previous of new_node's next node
if new_node.next != None:
new_node.next.prev = new_node
def append(self, new_data):
# 1: allocate node and 2: put in the data
new_node = Node(data = new_data)
last = self.head
# 3: This new_node is going to be the last node,
# so make next of this node as NULL
new_node.next = None
# 4: If the DLL is empty, make prev of the new_node as NULL
if self.head == None:
new_node.prev = None
self.head = new_node
return
# 5: traverse till the last node
while last.next != None:
last = last.next
# 6: Make the next of last node as new_node
last.next = new_node
# 7: Make the previous of new_node as last
new_node.prev = last
return
dll = DoublyLinkedList()
dll.append(3)
dll.insertAfter(dll.head.next, 4)
print(dll)
# python3 cache.py | walnut07/GoogleSTEP | Week_2/Doubly-Linked-List.py | Doubly-Linked-List.py | py | 2,463 | python | en | code | 1 | github-code | 36 |
21421821259 | from flask import Flask,render_template,request
from pymongo import MongoClient
import subprocess as sp
import os
mongo_server_url="mongodb://127.0.0.1:27017"
client = MongoClient(mongo_server_url)
app = Flask("chat app")
#here i am going to defind database and collection name
db ="lw"
collection="flask"
#We have to specify that In which IP and port you want to run this web app
port=80
hostname="0.0.0.0"
@app.route('/')
def home():
#Very Important steps
file=open("names.yml" , "r+")
file.truncate(0)
file.close()
file=open("mobiles.yml" , "r+")
file.truncate(0)
file.close()
file=open("emails.yml" , "r+")
file.truncate(0)
file.close()
return render_template("index.html")
@app.route('/sumbitted' , methods=['POST'])
def sumbitted():
name=request.form.get("name")
mobile=request.form.get("mobile")
email=request.form.get("email")
#inserting the data in mongodb server
client[db][collection].insert({"name": name , "mobile": mobile , "email": email})
#now i want to do some manupulation to store the name, mobile and email
# file handling for "names.yml"
f = open("names.yml" ,"a")
var="moqaddas"
f.write("name: ")
f.write(name)
f.close()
# file handling for "mobiles.yml"
f = open("mobiles.yml" ,"a")
var="moqaddas"
f.write("mobile: ")
f.write(mobile)
f.close()
# file handling for "names.yml"
f = open("emails.yml" ,"a")
var="moqaddas"
f.write("email: ")
f.write(email)
f.close()
m=sp.getstatusoutput("ansible-playbook mail.yml --vault-password-file password.txt")
exit_code=m[0]
output=m[1]
if exit_code==0:
return render_template("response.html")
else: return render_template("err.html")
app.run(debug=True , port=port , host=hostname)
| MDMOQADDAS/RegistrationApp | app.py | app.py | py | 1,857 | python | en | code | 1 | github-code | 36 |
41786469041 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 22 20:10:01 2018
@author: Suveen
"""
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
import re
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urlopen(url, context=ctx).read()
# html.parser is the HTML parser included in the standard Python 3 library.
# information on other HTML parsers is here:
# http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser
soup = BeautifulSoup(html, "html.parser")
# Retrieve all of the anchor tags
tags = soup('span')
count = 0
com_sum = 0
numlist= list()
for tag in tags:
# Look at the parts of a tag
count= count+1
str_tag= str(tag)
# print('TAg : ',tag)
# print('String', str_tag)
stuff=re.findall('[0-9]+',str_tag)
for k in range(len(stuff)):
num = float(stuff[k])
com_sum = com_sum+ num
# print (num)
numlist.append(num)
#print(numlist)
print('Count ',count)
print('Sum ', int(com_sum)) | SuveenMekala/Web_scraping | scrapetest.py | scrapetest.py | py | 1,118 | python | en | code | 0 | github-code | 36 |
3474101003 | #Python - Partie 2 - Exercice 9
#Script qui affiche 8 carré à la suite.
#Importation des module
from turtle import *
from dessins_tortue import *
#Lecture de l'entrée
taille=int(input("Entrez la taille des carrés."))
couleur=input("Entrez la couleur des carrés. (en anglais)")
for i in range (8):
down()
carre(taille,couleur)
up()
goto (xcor()+taille+20,ycor())
| antoinech2/ISN-exercices-Python-Ann-e-1 | partie 2/ex9/dessin.py | dessin.py | py | 408 | python | fr | code | 0 | github-code | 36 |
34203818933 | import torch
import torch.nn.functional as F
def R_from_axis_angle(k: torch.tensor, theta: torch.tensor):
if torch.norm(k) == 0.:
return torch.eye(3)
k = F.normalize(k, p=2., dim=0)
kx, ky, kz = k[0], k[1], k[2]
cos, sin = torch.cos(theta), torch.sin(theta)
R = torch.zeros((3, 3)).to(k)
R[0, 0] = cos + (kx**2) * (1 - cos)
R[0, 1] = kx * ky * (1 - cos) - kz * sin
R[0, 2] = kx * kz * (1 - cos) + ky * sin
R[1, 0] = kx * ky * (1 - cos) + kz * sin
R[1, 1] = cos + (ky**2) * (1 - cos)
R[1, 2] = ky * kz * (1 - cos) - kx * sin
R[2, 0] = kx * kz * (1 - cos) - ky * sin
R[2, 1] = ky * kz * (1 - cos) + kx * sin
R[2, 2] = cos + (kz**2) * (1 - cos)
return R
def axis_angle_to_quaternions(axis: torch.tensor, angle: torch.tensor):
a = F.normalize(axis, p=2., dim=0)
half_angle = angle * 0.5
sin_ = torch.sin(half_angle)
cos_ = torch.cos(half_angle)
r = cos_
i = a[0] * sin_
j = a[1] * sin_
k = a[2] * sin_
q = torch.tensor([r, i, j, k], dtype=torch.float32).to(axis)
return q
def R_from_quaternions(quaternions: torch.tensor):
quaternions = F.normalize(quaternions, p=2., dim=0)
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3)).to(quaternions)
def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
"""
Returns torch.sqrt(torch.max(0, x))
but with a zero subgradient where x is 0.
"""
ret = torch.zeros_like(x)
positive_mask = x > 0
ret[positive_mask] = torch.sqrt(x[positive_mask])
return ret
def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.")
batch_dim = matrix.shape[:-2]
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(
matrix.reshape(batch_dim + (9,)), dim=-1
)
q_abs = _sqrt_positive_part(
torch.stack(
[
1.0 + m00 + m11 + m22,
1.0 + m00 - m11 - m22,
1.0 - m00 + m11 - m22,
1.0 - m00 - m11 + m22,
],
dim=-1,
)
)
# we produce the desired quaternion multiplied by each of r, i, j, k
quat_by_rijk = torch.stack(
[
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
# `int`.
torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
# `int`.
torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
# `int`.
torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
# `int`.
torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),
],
dim=-2,
)
# We floor here at 0.1 but the exact level is not important; if q_abs is small,
# the candidate won't be picked.
flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)
quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))
# if not for numerical problems, quat_candidates[i] should be same (up to a sign),
# forall i; we pick the best-conditioned one (with the largest denominator)
return quat_candidates[
F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :
].reshape(batch_dim + (4,))
def R_from_6d(d6: torch.tensor):
"""
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
using Gram--Schmidt orthogonalization per Section B of [1].
Args:
d6: 6D rotation representation, of size (*, 6)
Returns:
batch of rotation matrices of size (*, 3, 3)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
a1, a2 = d6[..., :3], d6[..., 3:]
b1 = F.normalize(a1, dim=-1)
b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
b2 = F.normalize(b2, dim=-1)
b3 = torch.cross(b1, b2, dim=-1)
return torch.stack((b1, b2, b3), dim=-2).to(d6)
def quaternion_to_axis_angle(quaternions: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as quaternions to axis/angle.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
half_angles = torch.atan2(norms, quaternions[..., :1])
angles = 2 * half_angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
axis = quaternions[..., 1:] / sin_half_angles_over_angles
axis = F.normalize(axis, p=2., dim=0)
return axis, angles
def axis_angle_from_6d(d6: torch.Tensor):
R = R_from_6d(d6)
q = matrix_to_quaternion(R)
axis, angle = quaternion_to_axis_angle(q)
return axis, angle
def matrix_to_axis_angle(R: torch.Tensor):
return quaternion_to_axis_angle(matrix_to_quaternion(R)) | 3dlg-hcvc/paris | utils/rotation.py | rotation.py | py | 6,716 | python | en | code | 31 | github-code | 36 |
70934643944 | import cv2,os,shutil
import numpy as np
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,help="directory of the cropped odonates")
ap.add_argument("-o", "--output", required=True,\
help="parent directory to which each one be stored")
args = vars(ap.parse_args())
input_dir=args['input']
out_dir=args['output']
for i in range(14):
vars()["dir"+str(i+1)]=out_dir+ 'thumbi_%s/'%str(i+1)
try:
for i in range(14):
os.mkdir(out_dir+ 'thumbi_%s/'%str(i+1))
print("directory %s created"%(i+1))
except FileExistsError :
print("Directories already exists")
for fln in sorted(os.listdir(input_dir)):
fname=fln.replace(".jpg"," ")
img=cv2.imread(os.path.join(input_dir,fln))
cv2.namedWindow(fname, cv2.WINDOW_NORMAL)
cv2.setWindowProperty(fname, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
cv2.imshow(fname,img)
while(1):
k=cv2.waitKey(0) & 0xFF
if k==ord("a"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir1)
break
if k==ord("b"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir2)
break
if k==ord("c"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir3)
break
if k==ord("d"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir4)
break
if k==ord("e"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir5)
break
if k==ord("f"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir6)
break
if k==ord("g"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir7)
break
if k==ord("h"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir8)
break
if k==ord("i"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir9)
break
if k==ord("j"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir10)
break
if k==ord("k"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir11)
break
if k==ord("l"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir12)
break
if k==ord("m"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir13)
break
if k==27:
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir14)
break
| robinjacobroy/Odonata_detection | sorting.py | sorting.py | py | 2,940 | python | en | code | 0 | github-code | 36 |
3182784604 | from datetime import datetime
from pyquery import PyQuery as pquery
import urllib3
urllib3.disable_warnings()
import webbrowser
def timestamp():
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def main():
active = 'https://webbtelescope.org/contents/media/images/2022/028/01G77Q8BTTSEB7ZSB2ZEY49HSQ'
print(f'{timestamp()} Starting...')
count=0
while (active):
py = pquery(active, verify=False)
page_num = int([i.text() for i in py.find("div.controls").parent().find('li').items()][0])
span = py.find("span")
spans = [i.text() for i in span.items()]
try:
next_idx = spans.index("Next")
next = 'https://webbtelescope.org' + [i.attr("href") for i in span.eq(next_idx).parent().items()][0]
except ValueError:
if page_num == 247:
pass
else:
print(active)
print(page_num)
print(timestamp())
raise
downloads = py.find("div.media-library-links-list").find('a')
imgs_text = [i.text() for i in downloads.items()]
imgs_links = [i.attr('href') for i in downloads.items()]
full_res_str = [i for i in imgs_text if i.startswith('Full') and 'TIF' not in i] # Full Res first
if not full_res_str:
full_res_str = [i for i in imgs_text if 'TIF' not in i] # Png next or first jpg
if not full_res_str:
full_res_str = [imgs_text[0]] # Whatever is left over
full_res_idx = imgs_text.index(full_res_str[0])
img_link = 'https:' + imgs_links[full_res_idx]
webbrowser.open(img_link)
count+=1
if page_num < 247:
active = next
else:
break
print(f'{timestamp()} Done...')
if __name__ == "__main__":
main() | bmikolaj/WebbScrap | main.py | main.py | py | 1,853 | python | en | code | 0 | github-code | 36 |
36060935686 | import requests
url = "https://ngl.link/api/submit"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Origin": "https://ngl.link",
"Referer": "https://ngl.link/z7hejan"
}
data = {
"username": "TARGET HERE",
"question": "UR QUESTION HERE",
"deviceId": "cfd278d9-b21d-444d-8a6f-6e7494f84bf8",
"gameSlug": "",
"referrer": ""
}
# amount of the spamms
num_requests = 100
for i in range(num_requests):
response = requests.post(url, headers=headers, data=data)
print(f"Request {i+1}: {response.status_code}")
| zxrby/NGL.Link-Spammer | ok.py | ok.py | py | 754 | python | en | code | 2 | github-code | 36 |
43758661213 | import csv
import shlex
import subprocess
from decimal import Decimal
ZERO = Decimal("0")
def execute(cmd):
# TODO: check return code
process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process.communicate() # must be used instead of wait
def save_intervals(intervals, filename):
with open(filename, encoding="utf8", mode="w") as fobj:
writer = csv.writer(fobj)
writer.writerow(["start", "end"])
for interval in intervals:
writer.writerow([interval["start"], interval["end"]])
def load_intervals(filename):
with open(filename, encoding="utf8") as fobj:
intervals = [{"start": Decimal(row["start"]), "end": Decimal(row["end"])} for row in csv.DictReader(fobj)]
return intervals
def print_intervals(intervals):
print(" start,end")
for interval in intervals:
start = interval["start"]
end = interval["end"]
print(f" {start:10.5f},{end:10.5f}")
def video_length(filename):
cmd = f'ffprobe -v quiet -of csv=p=0 -show_entries format=duration "{filename}"'
stdout, stderr = execute(cmd)
return Decimal(stdout.decode("utf8").strip())
def detect_silence_intervals(filename, noise_level, minimum_duration):
command = (
f'ffmpeg -i "{filename}" '
f"-af silencedetect=noise={noise_level}dB:d={minimum_duration} "
f"-f null -"
)
stdout, stderr = execute(command)
result = stderr.decode("utf8")
intervals = []
start = None
end = None
for line in result.splitlines():
if "silence_" not in line:
continue
data = line.split("silence_")[1].split(": ")
if data[0] == "start":
start = Decimal(data[1].strip())
elif data[0] == "end":
end = Decimal(data[1].split()[0].strip())
this_duration = Decimal(line.split("silence_duration:")[1].strip())
if start < 0: # TODO: should really force this?
start = ZERO
this_duration = end
row = {"start": start, "end": end, "duration": this_duration}
intervals.append(row)
start, end, this_duration = None, None, None
return intervals
def extract_non_silence_from_silence(silence_intervals, threshold=ZERO):
extract = []
pairs = zip(silence_intervals, silence_intervals[1:])
for index, (silence_1, silence_2) in enumerate(pairs, start=1):
start = silence_1["end"] - threshold
if start < 0:
start = ZERO
end = silence_2["start"] + threshold
extract.append({"start": start, "end": end})
return extract
def optimize_intervals(intervals, minimum_gap):
finished = False
while not finished:
finished = True
result = []
last = len(intervals) - 1
index = 0
while index <= last:
interval = intervals[index]
if index < last:
next_interval = intervals[index + 1]
if next_interval["start"] - interval["end"] <= minimum_gap:
# merge these two intervals
finished = False
result.append({"start": interval["start"], "end": next_interval["end"]})
index += 1 # jump the next one
else:
result.append(interval)
else: # last interval, just append
result.append(interval)
index += 1
intervals = result
return result
| turicas/no-silence | nosilence.py | nosilence.py | py | 3,550 | python | en | code | 10 | github-code | 36 |
28224928026 | from django.urls import path
from . import views
urlpatterns = [
# API routes
path('', views.index, name='index'),
path('library', views.library, name='library'),
path('library/<int:song_id>', views.song, name='song'),
path('setlists', views.setlists, name='setlists'),
path('setlists/<int:id>', views.setlist, name='setlist'),
path('search', views.search_genius, name='search'),
path('search/<int:id>', views.search_genius_by_id, name='search_by_id'),
path('profile/<int:userId>', views.profile_view, name='profile'),
# Auth routes
path('session', views.session_view, name='session'),
path('csrf', views.get_csrf_token, name='csrf'),
path('register', views.register, name='register'),
path('login', views.login_view, name='login'),
path('logout', views.logout_view, name='logout'),
]
| alexboneham/lyric-library | backend/apis/urls.py | urls.py | py | 847 | python | en | code | 0 | github-code | 36 |
16251544507 | ##### for mounting drive on colab
# from google.colab import drive
# drive.mount('/content/drive')
#################################
import urllib.request
import os
import time
import tensorflow as tf
root_path = os.getcwd()
def download_images(link_file_images,output_directory,image_type):
print("\nDownoading", image_type)
with open(link_file_images,'r') as link_file:
image_links = link_file.readlines()
for idx, image_link in enumerate(image_links):
image_path = output_directory + image_type + "/image_%d.tiff" % (idx+1)
urllib.request.urlretrieve(image_link, image_path)
if __name__ == '__main__':
dataset_name = "MassachusettsRoads"
link_file_images = (root_path+ "/src/Images.txt").format(dataset_name)
link_file_targets = (root_path+ "/src/Targets.txt").format(dataset_name)
output_directory = (root_path+ "/Data/").format(dataset_name)
tf.io.gfile.mkdir(output_directory+ "Images")
tf.io.gfile.mkdir(output_directory+ "Targets" )
if not os.path.exists(output_directory):
tf.io.gfile.mkdir(output_directory)
start_time = time.time()
download_images(link_file_images, output_directory, "Images")
download_images(link_file_targets, output_directory, "Targets")
print("TOTAL TIME: {} minutes".format(round((time.time() - start_time)/60, 2))) | ashujack/Street-locator | get-data.py | get-data.py | py | 1,343 | python | en | code | 0 | github-code | 36 |
35671367197 | from django.contrib import admin
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from .models import Product, Inventory, OrderLine
from import_export import fields
from import_export.widgets import ForeignKeyWidget
class ProductResource(resources.ModelResource):
class Meta:
model = Product
import_id_fields = ['product_code']
@admin.register(Product)
class ProductAdmin(ImportExportModelAdmin):
resource_class = ProductResource
import_export_options = {'update': True}
class InventoryResource(resources.ModelResource):
product = fields.Field(
column_name='product_code',
attribute='product',
widget=ForeignKeyWidget(Product, 'product_code')
)
class Meta:
model = Inventory
fields = ('id', 'product', 'current_stock')
@admin.register(Inventory)
class InventoryAdmin(ImportExportModelAdmin):
resource_class = InventoryResource
class OrderLineResource(resources.ModelResource):
product = fields.Field(
column_name='product_code',
attribute='product',
widget=ForeignKeyWidget(Product, 'product_code')
)
class Meta:
model = OrderLine
fields = ('id', 'product', 'reorder_point')
@admin.register(OrderLine)
class OrderLineAdmin(ImportExportModelAdmin):
resource_class = OrderLineResource
| HirokiShimoi/investoru_app | myapp/admin.py | admin.py | py | 1,384 | python | en | code | 0 | github-code | 36 |
495151957 | from collections import namedtuple
from enum import Enum
from dagster import check
from dagster.core.definitions import Materialization, SolidHandle
from dagster.core.definitions.events import EventMetadataEntry
from dagster.core.serdes import whitelist_for_serdes
from dagster.core.types.runtime_type import RuntimeType
from dagster.utils import merge_dicts
from dagster.utils.error import SerializableErrorInfo
@whitelist_for_serdes
class StepOutputHandle(namedtuple('_StepOutputHandle', 'step_key output_name')):
@staticmethod
def from_step(step, output_name='result'):
check.inst_param(step, 'step', ExecutionStep)
return StepOutputHandle(step.key, output_name)
def __new__(cls, step_key, output_name='result'):
return super(StepOutputHandle, cls).__new__(
cls,
step_key=check.str_param(step_key, 'step_key'),
output_name=check.str_param(output_name, 'output_name'),
)
@whitelist_for_serdes
class StepInputData(namedtuple('_StepInputData', 'input_name type_check_data')):
def __new__(cls, input_name, type_check_data):
return super(StepInputData, cls).__new__(
cls,
input_name=check.str_param(input_name, 'input_name'),
type_check_data=check.opt_inst_param(type_check_data, 'type_check_data', TypeCheckData),
)
@whitelist_for_serdes
class TypeCheckData(namedtuple('_TypeCheckData', 'success label description metadata_entries')):
def __new__(cls, success, label, description=None, metadata_entries=None):
return super(TypeCheckData, cls).__new__(
cls,
success=check.bool_param(success, 'success'),
label=check.str_param(label, 'label'),
description=check.opt_str_param(description, 'description'),
metadata_entries=check.opt_list_param(
metadata_entries, metadata_entries, of_type=EventMetadataEntry
),
)
@whitelist_for_serdes
class UserFailureData(namedtuple('_UserFailureData', 'label description metadata_entries')):
def __new__(cls, label, description=None, metadata_entries=None):
return super(UserFailureData, cls).__new__(
cls,
label=check.str_param(label, 'label'),
description=check.opt_str_param(description, 'description'),
metadata_entries=check.opt_list_param(
metadata_entries, metadata_entries, of_type=EventMetadataEntry
),
)
@whitelist_for_serdes
class StepOutputData(
namedtuple('_StepOutputData', 'step_output_handle intermediate_materialization type_check_data')
):
def __new__(cls, step_output_handle, intermediate_materialization=None, type_check_data=None):
return super(StepOutputData, cls).__new__(
cls,
step_output_handle=check.inst_param(
step_output_handle, 'step_output_handle', StepOutputHandle
),
intermediate_materialization=check.opt_inst_param(
intermediate_materialization, 'intermediate_materialization', Materialization
),
type_check_data=check.opt_inst_param(type_check_data, 'type_check_data', TypeCheckData),
)
@property
def output_name(self):
return self.step_output_handle.output_name
@whitelist_for_serdes
class StepFailureData(namedtuple('_StepFailureData', 'error user_failure_data')):
def __new__(cls, error, user_failure_data):
return super(StepFailureData, cls).__new__(
cls,
error=check.opt_inst_param(error, 'error', SerializableErrorInfo),
user_failure_data=check.opt_inst_param(
user_failure_data, 'user_failure_data', UserFailureData
),
)
@whitelist_for_serdes
class StepSuccessData(namedtuple('_StepSuccessData', 'duration_ms')):
def __new__(cls, duration_ms):
return super(StepSuccessData, cls).__new__(
cls, duration_ms=check.float_param(duration_ms, 'duration_ms')
)
class StepKind(Enum):
COMPUTE = 'COMPUTE'
class StepInputSourceType(Enum):
SINGLE_OUTPUT = 'SINGLE_OUTPUT'
MULTIPLE_OUTPUTS = 'MULTIPLE_OUTPUTS'
CONFIG = 'CONFIG'
class StepInput(
namedtuple('_StepInput', 'name runtime_type source_type source_handles config_data')
):
def __new__(cls, name, runtime_type, source_type, source_handles=None, config_data=None):
return super(StepInput, cls).__new__(
cls,
name=check.str_param(name, 'name'),
runtime_type=check.inst_param(runtime_type, 'runtime_type', RuntimeType),
source_type=check.inst_param(source_type, 'source_type', StepInputSourceType),
source_handles=check.opt_list_param(
source_handles, 'source_handles', of_type=StepOutputHandle
),
config_data=config_data, # can be any type
)
@property
def is_from_output(self):
return (
self.source_type == StepInputSourceType.SINGLE_OUTPUT
or self.source_type == StepInputSourceType.MULTIPLE_OUTPUTS
)
@property
def is_from_single_output(self):
return self.source_type == StepInputSourceType.SINGLE_OUTPUT
@property
def is_from_multiple_outputs(self):
return self.source_type == StepInputSourceType.MULTIPLE_OUTPUTS
@property
def dependency_keys(self):
return {handle.step_key for handle in self.source_handles}
class StepOutput(namedtuple('_StepOutput', 'name runtime_type optional')):
def __new__(cls, name, runtime_type, optional):
return super(StepOutput, cls).__new__(
cls,
name=check.str_param(name, 'name'),
runtime_type=check.inst_param(runtime_type, 'runtime_type', RuntimeType),
optional=check.bool_param(optional, 'optional'),
)
class ExecutionStep(
namedtuple(
'_ExecutionStep',
(
'pipeline_name key_suffix step_inputs step_input_dict step_outputs step_output_dict '
'compute_fn kind solid_handle logging_tags metadata'
),
)
):
def __new__(
cls,
pipeline_name,
key_suffix,
step_inputs,
step_outputs,
compute_fn,
kind,
solid_handle,
logging_tags=None,
metadata=None,
):
return super(ExecutionStep, cls).__new__(
cls,
pipeline_name=check.str_param(pipeline_name, 'pipeline_name'),
key_suffix=check.str_param(key_suffix, 'key_suffix'),
step_inputs=check.list_param(step_inputs, 'step_inputs', of_type=StepInput),
step_input_dict={si.name: si for si in step_inputs},
step_outputs=check.list_param(step_outputs, 'step_outputs', of_type=StepOutput),
step_output_dict={so.name: so for so in step_outputs},
compute_fn=check.callable_param(compute_fn, 'compute_fn'),
kind=check.inst_param(kind, 'kind', StepKind),
solid_handle=check.inst_param(solid_handle, 'solid_handle', SolidHandle),
logging_tags=merge_dicts(
{
'step_key': str(solid_handle) + '.' + key_suffix,
'pipeline': pipeline_name,
'solid': solid_handle.name,
'solid_definition': solid_handle.definition_name,
},
check.opt_dict_param(logging_tags, 'logging_tags'),
),
metadata=check.opt_dict_param(metadata, 'metadata', key_type=str),
)
@property
def key(self):
return str(self.solid_handle) + '.' + self.key_suffix
@property
def solid_name(self):
return self.solid_handle.name
@property
def solid_definition_name(self):
return self.solid_handle.definition_name
def has_step_output(self, name):
check.str_param(name, 'name')
return name in self.step_output_dict
def step_output_named(self, name):
check.str_param(name, 'name')
return self.step_output_dict[name]
def has_step_input(self, name):
check.str_param(name, 'name')
return name in self.step_input_dict
def step_input_named(self, name):
check.str_param(name, 'name')
return self.step_input_dict[name]
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster/core/execution/plan/objects.py | objects.py | py | 8,359 | python | en | code | 2 | github-code | 36 |
10962724593 | import base64
import json
import requests
from cassandra.cqlengine import connection
from flask import make_response
from flask_restful import Resource
from conf.config import CASSANDRA_HOSTS, FRIEND_KEYSPACE
from conf.service import USER_INFO_BULK_URL
from model.friend import FriendRelation
from service.common import get_user_id_from_jwt
class GetFriendList(Resource):
def get(self):
user_id = get_user_id_from_jwt()
if not user_id:
return make_response("You must send the userInfo into the header X-Endpoint-Api-Userinfo", 405)
connection.setup(hosts=CASSANDRA_HOSTS, default_keyspace=FRIEND_KEYSPACE)
friend_rows = FriendRelation.filter(user_id=user_id)
friends = {}
friend_ids = []
for friend_row in friend_rows:
friend = friend_row.to_object()
friend_id = friend['user_id']
friends[friend_id] = friend
friend_ids.append(friend_id)
user_info_list = self._get_user_info_bulk(friend_ids)
for user in user_info_list:
user_id = user['user_id']
friends[user_id]['username'] = user['username']
return {
"results": len(friends),
"friends": friends
}
@staticmethod
def _get_user_info_bulk(user_ids):
payload = json.dumps({"user_ids": user_ids})
headers = {'Content-Type': 'application/json'}
response = requests.post(USER_INFO_BULK_URL, data=payload, headers=headers)
data = response.json()
return data.get("users")
| glimpseapp/glimpse-service-friend | service/get_friend_list.py | get_friend_list.py | py | 1,574 | python | en | code | 0 | github-code | 36 |
22815747066 | # This figure requires the grackle chemistry and cooling library.
# Visit grackle.readthedocs.org for more information.
from matplotlib import pyplot
from utilities.testing import *
from pygrackle.grackle_wrapper import *
from pygrackle.fluid_container import FluidContainer
from utilities.api import \
setup_fluid_container, \
calculate_mean_molecular_weight, \
calculate_hydrogen_number_density, \
set_cosmology_units, \
get_cooling_units
from utilities.primordial_equilibrium import \
total_cooling, \
nHI, nHII, nHeI, nHeII, nHeIII, ne
from utilities.physical_constants import \
mass_hydrogen_cgs, \
sec_per_Myr, \
sec_per_Gyr, \
cm_per_mpc
my_chem = chemistry_data()
my_chem.use_chemistry = 1
my_chem.with_radiative_cooling = 0
my_chem.primordial_chemistry = 1
my_chem.metal_cooling = 0
my_chem.UVbackground = 0
my_chem.include_metal_heating = 0
my_chem.comoving_coordinates = 0
my_chem.a_units = 1.0
my_chem.density_units = mass_hydrogen_cgs
my_chem.length_units = 1.0
my_chem.time_units = 1.0
my_chem.velocity_units = my_chem.length_units / my_chem.time_units
current_redshift = 0.0
fc = setup_fluid_container(my_chem, current_redshift=current_redshift,
converge=True, tolerance=1e-6, max_iterations=np.inf,
dt=(0.0001 * sec_per_Myr / my_chem.time_units))
calculate_temperature(fc)
a = 1.0 / (1.0 + current_redshift) / my_chem.a_units
calculate_cooling_time(fc, a)
t_sort = np.argsort(fc["temperature"])
t_cool = fc["cooling_time"][t_sort] * my_chem.time_units
my_T = fc["temperature"][t_sort]
my_nH = calculate_hydrogen_number_density(my_chem, fc).mean()
cooling_rate = fc["energy"][t_sort] / t_cool * fc["density"] * \
my_chem.density_units / my_nH**2
eq_cooling = total_cooling(my_T, my_nH) / my_nH**2
eq_cooling_cen = total_cooling(my_T, my_nH, rates='cen') / my_nH**2
fontsize = 14
n_rows = 1
n_columns = 1
# blank space between edge of figure and active plot area
top_buffer = 0.03
bottom_buffer = 0.1
left_buffer = 0.12
right_buffer = 0.03
# blank space between plots
hor_buffer = 0.05
vert_buffer = 0.05
# calculate the height and width of each panel
panel_width = ((1.0 - left_buffer - right_buffer -
((n_columns-1)*hor_buffer)) / n_columns)
panel_height = ((1.0 - top_buffer - bottom_buffer -
((n_rows-1)*vert_buffer)) / n_rows)
# create a figure (figsize is in inches
pyplot.figure()
### Cooling figure
axes = pyplot.axes((left_buffer, bottom_buffer,
panel_width, panel_height))
axes.loglog(my_T, eq_cooling, color='black', alpha=0.7,
linestyle="--", linewidth=1.5)
axes.loglog(my_T, cooling_rate, color='black', alpha=0.7,
linestyle="-", linewidth=1)
axes.loglog(my_T, eq_cooling_cen, color='black', alpha=0.7,
linestyle=":", linewidth=1.5)
axes.xaxis.set_label_text('T [K]', fontsize=fontsize)
axes.yaxis.set_label_text('$\\Lambda$ / n${_{\\rm H}}^{2}$ [erg s$^{-1}$ cm$^{3}$]',
fontsize=fontsize)
axes.set_xlim(1e4, 1e9)
axes.set_ylim(1e-26, 2e-22)
tick_labels = axes.xaxis.get_ticklabels() + \
axes.yaxis.get_ticklabels()
for tick_label in tick_labels:
tick_label.set_size(fontsize)
pyplot.savefig('cooling.png')
pyplot.savefig('cooling.pdf')
pyplot.savefig('cooling.eps')
pyplot.clf()
### Ionization balance figure
axes = pyplot.axes((left_buffer, bottom_buffer,
panel_width, panel_height))
# Plot H ions
axes.loglog(my_T, (nHI(my_T, my_nH) /
(nHI(my_T, my_nH) +
nHII(my_T, my_nH))),
color="#B82E00", alpha=0.7, linestyle="--", linewidth=1.5)
axes.loglog(my_T, (nHII(my_T, my_nH) /
(nHI(my_T, my_nH) +
nHII(my_T, my_nH))),
color="#B88A00", alpha=0.7, linestyle="--", linewidth=1.5)
axes.loglog(my_T, (nHI(my_T, my_nH, rates='cen') /
(nHI(my_T, my_nH, rates='cen') +
nHII(my_T, my_nH, rates='cen'))),
color="#B82E00", alpha=0.7, linestyle=":", linewidth=1.5)
axes.loglog(my_T, (nHII(my_T, my_nH, rates='cen') /
(nHI(my_T, my_nH, rates='cen') +
nHII(my_T, my_nH, rates='cen'))),
color="#B88A00", alpha=0.7, linestyle=":", linewidth=1.5)
axes.loglog(my_T, (fc["HI"] / (fc["HI"] + fc["HII"])),
label="HI", color="#B82E00", alpha=0.7, linestyle="-", linewidth=1.)
axes.loglog(my_T, (fc["HII"] / (fc["HI"] + fc["HII"])),
label="HII", color="#B88A00", alpha=0.7, linestyle="-", linewidth=1.)
# Plot He ions
axes.loglog(my_T, (nHeI(my_T, my_nH) /
(nHeI(my_T, my_nH) +
nHeII(my_T, my_nH) +
nHeIII(my_T, my_nH))),
color="#002EB8", alpha=0.7, linestyle="--", linewidth=1.5)
axes.loglog(my_T, (nHeII(my_T, my_nH) /
(nHeI(my_T, my_nH) +
nHeII(my_T, my_nH) +
nHeIII(my_T, my_nH))),
color="#008AB8", alpha=0.7, linestyle="--", linewidth=1.5)
axes.loglog(my_T, (nHeIII(my_T, my_nH) /
(nHeI(my_T, my_nH) +
nHeII(my_T, my_nH) +
nHeIII(my_T, my_nH))),
color="#00B88A", alpha=0.7, linestyle="--", linewidth=1.5)
axes.loglog(my_T, (nHeI(my_T, my_nH, rates='cen') /
(nHeI(my_T, my_nH, rates='cen') +
nHeII(my_T, my_nH, rates='cen') +
nHeIII(my_T, my_nH, rates='cen'))),
color="#002EB8", alpha=0.7, linestyle=":", linewidth=1.5)
axes.loglog(my_T, (nHeII(my_T, my_nH, rates='cen') /
(nHeI(my_T, my_nH, rates='cen') +
nHeII(my_T, my_nH, rates='cen') +
nHeIII(my_T, my_nH, rates='cen'))),
color="#008AB8", alpha=0.7, linestyle=":", linewidth=1.5)
axes.loglog(my_T, (nHeIII(my_T, my_nH, rates='cen') /
(nHeI(my_T, my_nH, rates='cen') +
nHeII(my_T, my_nH, rates='cen') +
nHeIII(my_T, my_nH, rates='cen'))),
color="#00B88A", alpha=0.7, linestyle=":", linewidth=1.5)
axes.loglog(my_T, (fc["HeI"] / (fc["HeI"] + fc["HeII"] + fc["HeIII"])),
label="HeI", color="#002EB8", alpha=0.7, linestyle="-", linewidth=1.)
axes.loglog(my_T, (fc["HeII"] / (fc["HeI"] + fc["HeII"] + fc["HeIII"])),
label="HeII", color="#008AB8", alpha=0.7, linestyle="-", linewidth=1.)
axes.loglog(my_T, (fc["HeIII"] / (fc["HeI"] + fc["HeII"] + fc["HeIII"])),
label="HeIII", color="#00B88A", alpha=0.7, linestyle="-", linewidth=1.)
axes.xaxis.set_label_text('T [K]', fontsize=fontsize)
axes.yaxis.set_label_text('fraction', fontsize=fontsize)
axes.set_xlim(1e4, 1e9)
axes.set_ylim(1e-10, 1)
tick_labels = axes.xaxis.get_ticklabels() + \
axes.yaxis.get_ticklabels()
for tick_label in tick_labels:
tick_label.set_size(fontsize)
axes.legend(loc='best', prop=dict(size=fontsize))
pyplot.savefig('fractions.png')
pyplot.savefig('fractions.pdf')
pyplot.savefig('fractions.eps')
| enzo-project/enzo-method-paper-ApJ-2014 | test_problems/IonizationBalance/enzo_paper_equilibrium_figure.py | enzo_paper_equilibrium_figure.py | py | 7,156 | python | en | code | 0 | github-code | 36 |
3051216703 |
#!/usr/bin/python
# tetrous.py
import wx
#All functions and objects from the basic modules will start with a wx.
APP_EXIT = 1
class tetrous(wx.Frame): #tetrous was changed from Example
def __init__(self, parent, title, *args, **kwargs):
super(tetrous, self).__init__(parent, title=title,
size=(500, 800), *args, **kwargs)
self.Centre()
self.Show()
self.InitUI()
def InitUI(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
qmi = wx.MenuItem(fileMenu, APP_EXIT, '&AnHero\tCtrl+Q')
qmi.SetBitmap(wx.Bitmap('exit.png')) #need to load exit.jpg
fileMenu.Append(qmi)
# fitem = fileMenu.Append( wx.ID_EXIT, #'AnHero', 'Rage Quit')
menubar.Append(fileMenu, '&Ask') #&Ask used to be called file
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.OnQuit, qmi)#qmifitem)
self.SetSize((500, 800))
self.SetTitle('notsuspicious.exe')
self.Centre()
self.Show(True)
def OnQuit(self, e):
self.Close()
#_______________________________________________________________________
def main():
app = wx.App()
tetrous(None, title='Size')
app.MainLoop()
if __name__ == '__main__':
main()
| Shapez/Tetris_Project | tetrous.py | tetrous.py | py | 1,269 | python | en | code | 0 | github-code | 36 |
72664562664 | import cv2
import numpy as np
from matplotlib import pyplot as plt
# loading image
#img0 = cv2.imread('SanFrancisco.jpg',)
img0 = cv2.imread('segmented_img.jpg')
# converting to gray scale
img = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
# remove noise
#img = cv2.GaussianBlur(gray,(3,3),0)
# convolute with proper kernels
laplacian = cv2.Laplacian(img,cv2.CV_64F)
cv2.imwrite("laplacian_img.jpg", laplacian)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x
cv2.imwrite("sobelx_img.jpg", sobelx)
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5) # y
cv2.imwrite("sobely_img.jpg", sobelx)
plt.subplot(2,2,1),plt.imshow(img,cmap = 'gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
plt.show()
| kateriska/IBP | sobelExercise.py | sobelExercise.py | py | 1,025 | python | en | code | 1 | github-code | 36 |
933336823 | import copy
import uuid
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from neutron.api.v2 import attributes
from neutron.common import core as sql
from neutron.common import constants as n_constants
from neutron.common import utils
from neutron import context as t_context
from neutron.db import api as qdbapi
from neutron.db import common_db_mixin
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import l3_db as l3
from neutron.extensions import servicevm
from neutron.extensions import l3 as l3_ext
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.openstack.common import timeutils
from neutron.plugins.common import constants
from neutron.plugins.openvswitch import ovs_db_v2
from neutron.services.vm.common import constants as s_constants
from neutron.services.vm.mgmt_drivers.rpc import svm_rpc_joint_agent_api
LOG = logging.getLogger(__name__)
_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE)
_ACTIVE = constants.ACTIVE
_ACTIVE_UPDATE_ERROR_DEAD = (
constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE,
constants.ERROR, constants.DEAD)
DEVICE_OWNER_ROUTER_INTF = n_constants.DEVICE_OWNER_ROUTER_INTF
DEVICE_OWNER_ROUTER_GW = n_constants.DEVICE_OWNER_ROUTER_GW
DEVICE_OWNER_FLOATINGIP = n_constants.DEVICE_OWNER_FLOATINGIP
EXTERNAL_GW_INFO = l3_ext.EXTERNAL_GW_INFO
INSTANCE_HOST_ATTR = 'OS-EXT-SRV-ATTR:host'
###########################################################################
# db tables
class DeviceTemplate(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents template to create hosting device
"""
# Descriptive name
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
# service type that this service vm provides.
# At first phase, this includes only single service
# In future, single service VM may accomodate multiple services.
service_types = orm.relationship('ServiceType', backref='template')
# driver to create hosting device. e.g. noop, nova, heat, etc...
infra_driver = sa.Column(sa.String(255))
# driver to communicate with service managment
mgmt_driver = sa.Column(sa.String(255))
# vendor driver for device
device_driver = sa.Column(sa.String(255))
# if shared is True, all user access the template
shared = sa.Column(sa.Boolean(), nullable=False)
created_at = sa.Column('created_at', sa.DateTime(), nullable=True)
# (key, value) pair to spin up
attributes = orm.relationship('DeviceTemplateAttribute',
backref='template')
class ServiceType(model_base.BASEV2, models_v2.HasId):#, models_v2.HasTenant):
"""Represents service type which hosting device provides.
Since a device may provide many services, This is one-to-many
relationship.
"""
template_id = sa.Column(sa.String(36), sa.ForeignKey('devicetemplates.id'),
nullable=False)
servicetype = sa.Column(sa.String(255), nullable=False)
class DeviceTemplateAttribute(model_base.BASEV2, models_v2.HasId):
"""Represents attributes necessary for spinning up VM in (key, value) pair
key value pair is adopted for being agnostic to actuall manager of VMs
like nova, heat or others. e.g. image-id, flavor-id for Nova.
The interpretation is up to actual driver of hosting device.
"""
template_id = sa.Column(sa.String(36), sa.ForeignKey('devicetemplates.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
#value = sa.Column(sa.String(4096), nullable=True)
value = sa.Column(sql.JsonCom(), nullable=False)
class Device(model_base.BASEV2, models_v2.HasTenant):
"""Represents devices that hosts services.
Here the term, 'VM', is intentionally avoided because it can be
VM or other container.
"""
id = sa.Column(sa.String(255),
primary_key=True,
default=uuidutils.generate_uuid)
template_id = sa.Column(sa.String(36), sa.ForeignKey('devicetemplates.id'))
template = orm.relationship('DeviceTemplate')
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
# sufficient information to uniquely identify hosting device.
# In case of service VM, it's UUID of nova VM.
instance_id = sa.Column(sa.String(255), nullable=True)
# For a management tool to talk to manage this hosting device.
# opaque string.
# e.g. (driver, mgmt_url) = (ssh, ip address), ...
mgmt_url = sa.Column(sql.JsonCom(), nullable=True)
# device auth info
auth = sa.Column(sql.JsonCom(), nullable=True)
attributes = orm.relationship("DeviceAttribute", backref="device")
services = orm.relationship('ServiceDeviceBinding', backref='device')
status = sa.Column(sa.String(255), nullable=False)
created_at = sa.Column('created_at', sa.DateTime(), nullable=True)
power_state = sa.Column('power_state', sa.String(36),
default=constants.DOWN, nullable=True)
class DeviceAttribute(model_base.BASEV2, models_v2.HasId):
"""Represents kwargs necessary for spinning up VM in (key, value) pair
key value pair is adopted for being agnostic to actuall manager of VMs
like nova, heat or others. e.g. image-id, flavor-id for Nova.
The interpretation is up to actual driver of hosting device.
"""
device_id = sa.Column(sa.String(255), sa.ForeignKey('devices.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
# json encoded value. example
# "nic": [{"net-id": <net-uuid>}, {"port-id": <port-uuid>}]
#value = sa.Column(sa.String(4096), nullable=True)
value = sa.Column(sql.JsonCom(), nullable=True)
# this table corresponds to ServiceInstance of the original spec
class ServiceInstance(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents logical service instance
This table is only to tell what logical service instances exists.
There will be service specific tables for each service types which holds
actuall parameters necessary for specific service type.
For example, tables for "Routers", "LBaaS", "FW", tables. which table
is implicitly determined by service_type_id.
"""
name = sa.Column(sa.String(255), nullable=True)
service_type_id = sa.Column(sa.String(36),
sa.ForeignKey('servicetypes.id'))
service_type = orm.relationship('ServiceType')
servicetype = sa.Column(sa.String(255), nullable=False)
# points to row in service specific table if any.
service_table_id = sa.Column(sa.String(36), nullable=True)
# True: This service is managed by user so that user is able to
# change its configurations
# False: This service is manged by other neutron service like lbaas
# so that user can't change the configuration directly via
# servicevm API, but via API for the service.
managed_by_user = sa.Column(sa.Boolean(), default=False)
# mgmt driver to communicate with logical service instance in
# hosting device.
# e.g. noop, OpenStack MGMT, OpenStack notification, netconf, snmp,
# ssh, etc...
mgmt_driver = sa.Column(sa.String(255))
# For a management tool to talk to manage this service instance.
# opaque string. mgmt_driver interprets it.
mgmt_url = sa.Column(sql.JsonCom(), nullable=True)
attributes = orm.relationship("ServiceInstanceAttribute",
backref="serviceinstance")
devices = orm.relationship('ServiceDeviceBinding')
status = sa.Column(sa.String(255), nullable=False)
created_at = sa.Column('created_at', sa.DateTime(), nullable=True)
# TODO(yamahata): re-think the necessity of following columns
# They are all commented out for minimalism for now.
# They will be added when it is found really necessary.
#
# multi_tenant = sa.Column(sa.Boolean())
# state = sa.Column(sa.Enum('UP', 'DOWN',
# name='service_instance_state'))
# For a logical service instance in hosting device to recieve
# requests from management tools.
# opaque string. mgmt_driver interprets it.
# e.g. the name of the interface inside the VM + protocol
# vm_mgmt_if = sa.Column(sa.String(255), default=None, nullable=True)
# networks =
# obj_store =
# cost_factor =
class ServiceInstanceAttribute(model_base.BASEV2, models_v2.HasId):
"""Represents kwargs necessary for spinning up VM in (key, value) pair
key value pair is adopted for being agnostic to actuall manager of VMs
like nova, heat or others. e.g. image-id, flavor-id for Nova.
The interpretation is up to actual driver of hosting device.
"""
service_instance_id = sa.Column(sa.String(255),
sa.ForeignKey('serviceinstances.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
# json encoded value. example
# "nic": [{"net-id": <net-uuid>}, {"port-id": <port-uuid>}]
#value = sa.Column(sa.String(4096), nullable=True)
value = sa.Column(sql.JsonCom(), nullable=True)
class ServiceDeviceBinding(model_base.BASEV2):
"""Represents binding with Device and LogicalResource.
Since Device can accomodate multiple services, it's many-to-one
relationship.
"""
service_instance_id = sa.Column(
sa.String(36), sa.ForeignKey('serviceinstances.id'), primary_key=True)
device_id = sa.Column(sa.String(36), sa.ForeignKey('devices.id'),
primary_key=True)
class DeviceAgentBinding(model_base.BASEV2):
"""Respresents binding between device and ServiceVM agents."""
device_id = sa.Column(sa.String(36),
sa.ForeignKey("devices.id", ondelete='CASCADE'),
primary_key=True)
servicevm_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
primary_key=True)
###########################################################################
class ServiceResourcePluginDb(servicevm.ServiceVMPluginBase,
common_db_mixin.CommonDbMixin):
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
@property
def l3_plugin(self):
try:
return self._l3_plugin
except AttributeError:
self._l3_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
return self._l3_plugin
def subnet_id_to_network_id(self, context, subnet_id):
subnet = self._core_plugin.get_subnet(context, subnet_id)
return subnet['network_id']
def __init__(self):
qdbapi.register_models()
super(ServiceResourcePluginDb, self).__init__()
def _get_resource(self, context, model, id):
try:
return self._get_by_id(context, model, id)
except orm_exc.NoResultFound:
if issubclass(model, DeviceTemplate):
raise servicevm.DeviceTemplateNotFound(device_tempalte_id=id)
elif issubclass(model, ServiceType):
raise servicevm.ServiceTypeNotFound(service_type_id=id)
elif issubclass(model, ServiceInstance):
raise servicevm.ServiceInstanceNotFound(service_instance_id=id)
elif issubclass(model, DeviceAgentBinding):
raise servicevm.DeviceNotFound(device_id=id)
if issubclass(model, Device):
raise servicevm.DeviceNotFound(device_id=id)
if issubclass(model, ServiceInstanceAttribute):
raise servicevm.ServiceInstanceAttributeNotFound(service_instance_id=id)
else:
raise
def _make_attributes_dict(self, attributes_db):
return dict((attr.key, attr.value) for attr in attributes_db)
def _make_service_types_list(self, service_types):
return [{'id': service_type.id,
'service_type': service_type.servicetype}
for service_type in service_types]
def _make_template_dict(self, template, fields=None):
res = {
'attributes':
self._make_attributes_dict(template['attributes']),
'service_types':
self._make_service_types_list(template.service_types)
}
key_list = ('id', 'tenant_id', 'name', 'description',
'shared','infra_driver', 'mgmt_driver',
'device_driver', 'created_at')
res.update((key, template[key]) for key in key_list)
return self._fields(res, fields)
def _make_services_list(self, binding_db):
return [binding.service_instance_id for binding in binding_db]
def _make_dev_attrs_dict(self, dev_attrs_db):
return dict((arg.key, arg.value) for arg in dev_attrs_db)
def _make_device_dict(self, device_db, fields=None):
LOG.debug(_('device_db %s'), device_db)
LOG.debug(_('device_db attributes %s'), device_db.attributes)
res = {
'services':
self._make_services_list(getattr(device_db, 'services', [])),
'device_template':
self._make_template_dict(device_db.template),
'attributes':
self._make_dev_attrs_dict(device_db.attributes),
}
key_list = ('id', 'tenant_id', 'name', 'description', 'instance_id',
'template_id', 'status', 'mgmt_url', 'created_at',
'power_state', 'auth')
res.update((key, device_db[key]) for key in key_list)
return self._fields(res, fields)
def _make_service_type_dict(self, service_type_db, fields=None):
res = {}
key_list = ('id', 'servicetype', 'template_id')
res.update((key, service_type_db[key]) for key in key_list)
return self._fields(res, fields)
def _make_service_device_list(self, devices):
return [binding.device_id for binding in devices]
#def get_service_instance_attr(self, context, service_instance_id, fields=None):
# service_instance_attr__db = self._get_resource(context, ServiceInstanceAttribute,
# service_instance_id)
# return self._make_service_attr_dict(service_instance_attr__db)
def _make_service_instance_dict(self, instance_db, fields=None):
res = {
'attributes':
self._make_attributes_dict(instance_db['attributes']),
'devices':
self._make_service_device_list(instance_db.devices),
'service_type':
self._make_service_type_dict(instance_db.service_type)
}
key_list = ('id', 'tenant_id', 'name', 'service_type_id',
'service_table_id', 'mgmt_driver', 'mgmt_url',
'status', 'created_at')
res.update((key, instance_db[key]) for key in key_list)
return self._fields(res, fields)
@staticmethod
def _infra_driver_name(device_dict):
return device_dict['device_template']['infra_driver']
@staticmethod
def _mgmt_driver_name(device_dict):
return device_dict['device_template']['mgmt_driver']
@staticmethod
def _device_driver_name(device_dict):
return device_dict['device_template']['device_driver']
@staticmethod
def _instance_id(device_dict):
return device_dict['instance_id']
###########################################################################
# hosting device template
def create_device_template(self, context, device_template):
template = device_template['device_template']
LOG.debug(_('template %s'), template)
tenant_id = self._get_tenant_id_for_create(context, template)
infra_driver = template.get('infra_driver')
mgmt_driver = template.get('mgmt_driver')
device_driver = template.get('device_driver')
service_types = template.get('service_types')
shared = template.get('shared')
if (not attributes.is_attr_set(infra_driver)):
LOG.debug(_('hosting device driver unspecified'))
raise servicevm.InfraDriverNotSpecified()
if (not attributes.is_attr_set(mgmt_driver)):
LOG.debug(_('mgmt driver unspecified'))
raise servicevm.MGMTDriverNotSpecified()
if (not attributes.is_attr_set(service_types)):
LOG.debug(_('service types unspecified'))
raise servicevm.SeviceTypesNotSpecified()
with context.session.begin(subtransactions=True):
template_id = str(uuid.uuid4())
template_db = DeviceTemplate(
id=template_id,
tenant_id=tenant_id,
name=template.get('name'),
description=template.get('description'),
infra_driver=infra_driver,
device_driver=device_driver,
shared=shared,
created_at=timeutils.utcnow(),
mgmt_driver=mgmt_driver)
utils.make_default_name(template_db, s_constants.PRE_DEV_TEM)
context.session.add(template_db)
for (key, value) in template.get('attributes', {}).items():
attribute_db = DeviceTemplateAttribute(
id=str(uuid.uuid4()),
template_id=template_id,
key=key,
value=value)
context.session.add(attribute_db)
for service_type in (item['service_type']
for item in template['service_types']):
service_type_db = ServiceType(
id=str(uuid.uuid4()),
template_id=template_id,
servicetype=service_type)
context.session.add(service_type_db)
LOG.debug(_('template_db %(template_db)s %(attributes)s '),
{'template_db': template_db,
'attributes': template_db.attributes})
return self._make_template_dict(template_db)
def update_device_template(self, context, device_template_id,
device_template):
with context.session.begin(subtransactions=True):
template_db = self._get_resource(context, DeviceTemplate,
device_template_id)
template_db.update(device_template['device_template'])
return self._make_template_dict(template_db)
def delete_device_template(self, context, device_template_id):
with context.session.begin(subtransactions=True):
# TODO(yamahata): race. prevent from newly inserting hosting device
# that refers to this template
devices_db = context.session.query(Device).filter_by(
template_id=device_template_id).first()
if devices_db is not None:
raise servicevm.DeviceTemplateInUse(
device_template_id=device_template_id)
context.session.query(ServiceType).filter_by(
template_id=device_template_id).delete()
context.session.query(DeviceTemplateAttribute).filter_by(
template_id=device_template_id).delete()
template_db = self._get_resource(context, DeviceTemplate,
device_template_id)
context.session.delete(template_db)
def get_device_template(self, context, device_template_id, fields=None):
template_db = self._get_resource(context, DeviceTemplate,
device_template_id)
return self._make_template_dict(template_db)
def get_device_templates(self, context, filters, fields=None):
return self._get_collection(context, DeviceTemplate,
self._make_template_dict,
filters=filters, fields=fields)
# called internally, not by REST API
# need enhancement?
def choose_device_template(self, context, service_type,
required_attributes=None):
required_attributes = required_attributes or []
LOG.debug(_('required_attributes %s'), required_attributes)
with context.session.begin(subtransactions=True):
query = (
context.session.query(DeviceTemplate).
filter(
sa.exists().
where(sa.and_(
DeviceTemplate.id == ServiceType.template_id,
ServiceType.service_type == service_type))))
for key in required_attributes:
query = query.filter(
sa.exists().
where(sa.and_(
DeviceTemplate.id ==
DeviceTemplateAttribute.template_id,
DeviceTemplateAttribute.key == key)))
LOG.debug(_('statements %s'), query)
template_db = query.first()
if template_db:
return self._make_template_dict(template_db)
###########################################################################
# hosting device
def _device_attribute_update_or_create(
self, context, device_id, key, value):
arg = (self._model_query(context, DeviceAttribute).
filter(DeviceAttribute.device_id == device_id).
filter(DeviceAttribute.key == key).first())
if arg:
arg.value = value
else:
arg = DeviceAttribute(
id=str(uuid.uuid4()), device_id=device_id,
key=key, value=value)
context.session.add(arg)
# called internally, not by REST API
def _create_device_pre(self, context, device):
device = device['device']
LOG.debug(_('device %s'), device)
tenant_id = self._get_tenant_id_for_create(context, device)
template_id = device['template_id']
auth = device['auth']
name = device.get('name')
device_id = device.get('id') or str(uuid.uuid4())
attributes = device.get('attributes', {})
with context.session.begin(subtransactions=True):
template_db = self._get_resource(context, DeviceTemplate,
template_id)
device_db = Device(id=device_id,
tenant_id=tenant_id,
name=name,
description=template_db.description,
instance_id=None,
template_id=template_id,
created_at=timeutils.utcnow(),
status=constants.PENDING_CREATE,
auth=auth,
power_state=constants.DOWN)
utils.make_default_name(device_db, s_constants.PRE_DEVICE)
context.session.add(device_db)
for key, value in attributes.items():
arg = DeviceAttribute(
id=str(uuid.uuid4()), device_id=device_id,
key=key, value=value)
context.session.add(arg)
return self._make_device_dict(device_db)
# called internally, not by REST API
# intsance_id = None means error on creation
def _create_device_post(self, context, device_id, instance_id,
mgmt_url, device_dict):
LOG.debug(_('device_dict %s'), device_dict)
with context.session.begin(subtransactions=True):
query = (self._model_query(context, Device).
filter(Device.id == device_id).
filter(Device.status == constants.PENDING_CREATE).
one())
# (xining) if create instance fail, instance_id is None, It can
# not update db
#query.update({'instance_id': instance_id, 'mgmt_url': mgmt_url})
if instance_id is None:
query.update({'status': constants.ERROR})
query.update({'mgmt_url': mgmt_url})
query.update({'instance_id': device_dict['instance_id']})
else:
query.update({'instance_id': instance_id, 'mgmt_url': mgmt_url})
for (key, value) in device_dict['attributes'].items():
self._device_attribute_update_or_create(context, device_id,
key, value)
def _register_agent_binding(self, context, device_id, instance):
host = getattr(instance, INSTANCE_HOST_ATTR)
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(context,
n_constants.AGENT_TYPE_SERVICEVM, host)
with context.session.begin(subtransactions=True):
binding_db = DeviceAgentBinding(device_id=device_id,
servicevm_agent_id=agent['id'])
context.session.add(binding_db)
def _create_device_status(self, context, device_id, new_status):
with context.session.begin(subtransactions=True):
(self._model_query(context, Device).
filter(Device.id == device_id).
filter(Device.status == constants.PENDING_CREATE).
update({'status': new_status}))
def _get_device_db(self, context, device_id, current_statuses, new_status):
try:
device_db = (
self._model_query(context, Device).
filter(Device.id == device_id).
filter(Device.status.in_(current_statuses)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
raise servicevm.DeviceNotFound(device_id=device_id)
if device_db.status == constants.PENDING_UPDATE:
raise servicevm.DeviceInUse(device_id=device_id)
device_db.update({'status': new_status})
return device_db
def _update_device_pre(self, context, device_id):
with context.session.begin(subtransactions=True):
device_db = self._get_device_db(
context, device_id, _ACTIVE_UPDATE, constants.PENDING_UPDATE)
return self._make_device_dict(device_db)
def _update_device_post(self, context, device_id, new_status,
new_device_dict=None):
with context.session.begin(subtransactions=True):
(self._model_query(context, Device).
filter(Device.id == device_id).
filter(Device.status == constants.PENDING_UPDATE).
update({'status': new_status}))
dev_attrs = new_device_dict.get('attributes', {})
(context.session.query(DeviceAttribute).
filter(DeviceAttribute.device_id == device_id).
filter(~DeviceAttribute.key.in_(dev_attrs.keys())).
delete(synchronize_session='fetch'))
for (key, value) in dev_attrs.items():
self._device_attribute_update_or_create(context, device_id,
key, value)
def update_device_name_or_desc(self, context, device_id, name=None,
desc=None):
with context.session.begin(subtransactions=True):
(self._model_query(context, Device).
filter(Device.id == device_id).
one().
update({'name': name,
'description': desc}))
def _delete_device_pre(self, context, device_id):
with context.session.begin(subtransactions=True):
# TODO(yamahata): race. keep others from inserting new binding
binding_db = (context.session.query(ServiceDeviceBinding).
filter_by(device_id=device_id).first())
if binding_db is not None:
raise servicevm.DeviceInUse(device_id=device_id)
device_db = self._get_device_db(
context, device_id, _ACTIVE_UPDATE_ERROR_DEAD,
constants.PENDING_DELETE)
return self._make_device_dict(device_db)
def _delete_device_post(self, context, device_id, error):
with context.session.begin(subtransactions=True):
query = (
self._model_query(context, Device).
filter(Device.id == device_id).
filter(Device.status == constants.PENDING_DELETE))
if error:
query.update({'status': constants.ERROR})
#(self._model_query(context, Device).
# filter(Device.id == device_id).delete())
else:
(self._model_query(context, DeviceAttribute).
filter(DeviceAttribute.device_id == device_id).delete())
(self._model_query(context, Device).
filter(Device.id == device_id).delete())
#(self._model_query(context, DeviceServiceContext).
# filter(DeviceServiceContext.device_id == device_id).delete())
query.delete()
# reference implementation. needs to be overrided by subclass
def create_device(self, context, device):
device_dict = self._create_device_pre(context, device)
# start actual creation of hosting device.
# Waiting for completion of creation should be done backgroundly
# by another thread if it takes a while.
instance_id = str(uuid.uuid4())
device_dict['instance_id'] = instance_id
self._create_device_post(context, device_dict['id'], instance_id, None,
device_dict)
self._create_device_status(context, device_dict['id'],
constants.ACTIVE)
return device_dict
# reference implementation. needs to be overrided by subclass
def update_device(self, context, device_id, device):
device_dict = self._update_device_pre(context, device_id)
# start actual update of hosting device
# waiting for completion of update should be done backgroundly
# by another thread if it takes a while
self._update_device_post(context, device_id, constants.ACTIVE)
return device_dict
# reference implementation. needs to be overrided by subclass
def delete_device(self, context, device_id):
self._delete_device_pre(context, device_id)
# start actual deletion of hosting device.
# Waiting for completion of deletion should be done backgroundly
# by another thread if it takes a while.
self._delete_device_post(context, device_id, False)
def get_device(self, context, device_id, fields=None):
device_db = self._get_resource(context, Device, device_id)
return self._make_device_dict(device_db, fields)
def get_devices(self, context, filters=None, fields=None):
devices = self._get_collection(context, Device, self._make_device_dict,
filters=filters, fields=fields)
# Ugly hack to mask internaly used record
a = [device for device in devices
if uuidutils.is_uuid_like(device['id'])]
return a
def _mark_device_status(self, device_id, exclude_status, new_status):
context = t_context.get_admin_context()
with context.session.begin(subtransactions=True):
try:
device_db = (
self._model_query(context, Device).
filter(Device.id == device_id).
filter(~Device.status.in_(exclude_status)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
LOG.warn(_('no device found %s'), device_id)
return False
device_db.update({'status': new_status})
return True
def _mark_device_error(self, device_id):
return self._mark_device_status(
device_id, [constants.DEAD], constants.ERROR)
def _mark_device_dead(self, device_id):
exclude_status = [
constants.DOWN,
constants.PENDING_CREATE,
constants.PENDING_UPDATE,
constants.PENDING_DELETE,
constants.INACTIVE,
constants.ERROR]
return self._mark_device_status(
device_id, exclude_status, constants.DEAD)
# used by failure policy
def rename_device_id(self, context, device_id, new_device_id):
# ugly hack...
context = t_context.get_admin_context()
with context.session.begin(subtransactions=True):
device_db = self._get_resource(context, Device, device_id)
new_device_db = Device(
id=new_device_id,
tenant_id=device_db.tenant_id,
template_id=device_db.template_id,
name=device_db.name,
description=device_db.description,
instance_id=device_db.instance_id,
created_at=timeutils.utcnow(),
mgmt_url=device_db.mgmt_url,
status=device_db.status)
context.session.add(new_device_db)
(self._model_query(context, DeviceAttribute).
filter(DeviceAttribute.device_id == device_id).
update({'device_id': new_device_id}))
context.session.delete(device_db)
###########################################################################
# logical service instance
def _get_service_type(self, context, service_type_id):
service_type_db = self._get_resource(context, ServiceType,
service_type_id)
return service_type_db['servicetype']
# called internally, not by REST API
def _create_service_instance(self, context, device_id,
service_instance_param, managed_by_user):
"""
:param service_instance_param: dictionary to create
instance of ServiceInstance. The following keys are used.
name, service_type_id, service_table_id, mgmt_driver, mgmt_url
mgmt_driver, mgmt_url can be determined later.
"""
name = service_instance_param['name']
service_type_id = service_instance_param['service_type_id']
service_table_id = service_instance_param['service_table_id']
mgmt_driver = service_instance_param.get('mgmt_driver')
mgmt_url = service_instance_param.get('mgmt_url')
servicetype = self._get_service_type(context, service_type_id)
service_instance_id = str(uuid.uuid4())
LOG.debug('service_instance_id %s device_id %s',
service_instance_id, device_id)
with context.session.begin(subtransactions=True):
# TODO(yamahata): race. prevent modifying/deleting service_type
# with_lockmode("update")
device_db = self._get_resource(context, Device, device_id)
device_dict = self._make_device_dict(device_db)
tenant_id = self._get_tenant_id_for_create(context, device_dict)
instance_db = ServiceInstance(
id=service_instance_id,
tenant_id=tenant_id,
name=name,
service_type_id=service_type_id,
service_table_id=service_table_id,
servicetype=servicetype,
managed_by_user=managed_by_user,
status=constants.PENDING_CREATE,
mgmt_driver=mgmt_driver,
created_at=timeutils.utcnow(),
mgmt_url=mgmt_url)
utils.make_default_name(instance_db, s_constants.PRE_SERVICE)
context.session.add(instance_db)
context.session.flush()
self._add_service_instance_attr(context, service_instance_param,
service_instance_id)
binding_db = ServiceDeviceBinding(
service_instance_id=service_instance_id, device_id=device_id)
context.session.add(binding_db)
return self._make_service_instance_dict(instance_db)
def _update_attr_value(self, context, service_param, sid):
service_instance_db = self.get_service_instance(context, sid)
port_db_dict = {}
no_port_db_list = []
port_dict = {}
no_port_list = []
for key, value in service_instance_db['attributes'].items():
for v in value:
if v['floatingip_id']:
fip_ids = port_db_dict.get(v['fixed_port_id'], [])
fip_ids.append(v['floatingip_id'])
port_db_dict.update({v['fixed_port_id']: fip_ids})
else:
no_port_db_list.append(v['fixed_port_id'])
for key, value in service_param['attributes'].items():
for v in value:
if v['floatingip_id']:
fip_ids = port_dict.get(v['fixed_port_id'], [])
fip_ids.append(v['floatingip_id'])
port_dict.update({v['fixed_port_id']: fip_ids})
else:
no_port_list.append(v['fixed_port_id'])
for (port_id, fip_ids) in port_dict.items():
bind_fip_ids = list(set(fip_ids) - set(port_db_dict.get(port_id, [])))
for fip_id in bind_fip_ids:
admin_context = t_context.get_admin_context()
port = self._core_plugin.get_port(admin_context, port_id)
ip_address = port['fixed_ips'][0]['ip_address']
svm_fip_db = self.l3_plugin._get_floatingip(context, fip_id)
svm_fip_db.update({'fixed_ip_address': ip_address,
'service_instance_id': sid,
'fixed_port_id': port_id})
for (port_id, fip_ids) in port_db_dict.items():
no_bind_fip_ids = list(set(fip_ids) - set(port_dict.get(port_id, [])))
for fip_id in no_bind_fip_ids:
svm_fip_db = self.l3_plugin._get_floatingip(context, fip_id)
svm_fip_db.update({'service_instance_id': None,
'fixed_port_id': None,
'fixed_ip_address': None})
def _add_attr_value(self, context, service_param, sid):
admin_context = t_context.get_admin_context()
with admin_context.session.begin(subtransactions=True):
for (key, values) in \
service_param.get('attributes', {}).items():
if key in [s_constants.EXTERNAL_GATWAY_KEY,
s_constants.FLOATINGIP_KEY]:
for value in values:
fip_id = value.get('floatingip_id', None)
fixed_port_id = value.get('fixed_port_id')
port = self._core_plugin.get_port(admin_context, fixed_port_id)
if fip_id:
ip_address = port['fixed_ips'][0]['ip_address']
floatingip_db = self.l3_plugin._get_floatingip(context, fip_id)
floatingip_db.update({'fixed_ip_address': ip_address,
'service_instance_id': sid,
'fixed_port_id': fixed_port_id})
if fixed_port_id:
svm_port_db = self._core_plugin._get_port(admin_context, fixed_port_id)
svm_port_db.update({'service_instance_id': sid})
def _add_service_instance_attr(self, context, service_param, sid):
for (key, value) in \
service_param.get('attributes', {}).items():
attribute_db = ServiceInstanceAttribute(
id=str(uuid.uuid4()),
service_instance_id=sid,
key=key,
value=value)
context.session.add(attribute_db)
self._add_attr_value(context, service_param, sid)
# reference implementation. must be overriden by subclass
def create_service_instance(self, context, service_instance):
self._create_service_instance(
context, service_instance['service_instance'], True)
def _service_instance_attribute_update_or_create(
self, context, service_instance_id, key, value):
arg = (self._model_query(context, ServiceInstanceAttribute).
filter(ServiceInstanceAttribute.service_instance_id == service_instance_id).
filter(ServiceInstanceAttribute.key == key).first())
if arg:
arg.value = value
else:
arg = ServiceInstanceAttribute(
id=str(uuid.uuid4()),
service_instance_id=service_instance_id,
key=key, value=value)
context.session.add(arg)
def _update_service_instance_mgmt(self, context, service_instance_id,
mgmt_driver, mgmt_url):
with context.session.begin(subtransactions=True):
(self._model_query(context, ServiceInstance).
filter(ServiceInstance.id == service_instance_id).
filter(ServiceInstance.status == constants.PENDING_CREATE).
one().
update({'mgmt_driver': mgmt_driver,
'mgmt_url': mgmt_url}))
def _update_service_instance_check(self, context, service_instance_id,
service_instance):
service_instace = self.get_service_instance(context, service_instance_id)
attr = copy.deepcopy(service_instace['attributes'])
service = service_instance['service_instance']
for key, value in service.get('attributes', {}).iteritems():
if key in attr.keys() and attr[key] != value:
del attr[key]
return True
if key in attr.keys():
del attr[key]
if attr:
return True
return False
def _update_service_instance_pre(self, context, service_instance_id,
service_instance):
with context.session.begin(subtransactions=True):
instance_db = (
self._model_query(context, ServiceInstance).
filter(ServiceInstance.id == service_instance_id).
filter(Device.status == constants.ACTIVE).
with_lockmode('update').one())
instance_db.update(service_instance)
instance_db.update({'status': constants.PENDING_UPDATE})
return self._make_service_instance_dict(instance_db)
def _update_service_instance_post(self, context, service_instance_id,
status, new_service_instance=None):
with context.session.begin(subtransactions=True):
(self._model_query(context, ServiceInstance).
filter(ServiceInstance.id == service_instance_id).
filter(ServiceInstance.status.in_(
[constants.PENDING_CREATE, constants.PENDING_UPDATE])).one().
update({'status': status}))
if new_service_instance:
self._update_attr_value(context, new_service_instance,
service_instance_id)
service_instance_attrs = new_service_instance.get('attributes', {})
(context.session.query(ServiceInstanceAttribute).
filter(ServiceInstanceAttribute.service_instance_id == \
service_instance_id).
filter(~ServiceInstanceAttribute.key.in_(
service_instance_attrs.keys())).delete(
synchronize_session='fetch'))
for (key, value) in service_instance_attrs.items():
self._service_instance_attribute_update_or_create(context,
service_instance_id, key, value)
# reference implementation
def update_service_instance(self, context, service_instance_id,
service_instance):
service_instance_dict = self._update_service_instance_pre(
context, service_instance_id, service_instance)
self._update_service_instance_post(
context, service_instance_id, service_instance, constants.ACTIVE)
return service_instance_dict
def _delete_service_instance_pre(self, context, service_instance_id,
managed_by_user):
with context.session.begin(subtransactions=True):
service_instance = (
self._model_query(context, ServiceInstance).
filter(ServiceInstance.id == service_instance_id).
#cinghu
#filter(ServiceInstance.status == constants.ACTIVE).
with_lockmode('update').one())
if service_instance.managed_by_user != managed_by_user:
raise servicevm.ServiceInstanceNotManagedByUser(
service_instance_id=service_instance_id)
service_instance.status = constants.PENDING_DELETE
binding_db = (
self._model_query(context, ServiceDeviceBinding).
filter(ServiceDeviceBinding.service_instance_id ==
service_instance_id).
all())
assert binding_db
# check only. _post method will delete it.
if len(binding_db) > 1:
raise servicevm.ServiceInstanceInUse(
service_instance_id=service_instance_id)
def _delete_service_instance_post(self, context, service_instance_id):
with context.session.begin(subtransactions=True):
binding_db = (
self._model_query(context, ServiceDeviceBinding).
filter(ServiceDeviceBinding.service_instance_id ==
service_instance_id).
all())
assert binding_db
assert len(binding_db) == 1
context.session.delete(binding_db[0])
(self._model_query(context, ServiceInstanceAttribute).
filter(ServiceInstanceAttribute.service_instance_id == \
service_instance_id).delete())
(self._model_query(context, ServiceInstance).
filter(ServiceInstance.id == service_instance_id).
filter(ServiceInstance.status == constants.PENDING_DELETE).
delete())
self._update_external_resource(context, service_instance_id)
def _update_external_resource(self, context, service_instance_id):
port_db = (
self._model_query(context, models_v2.Port).
filter(models_v2.Port.service_instance_id ==
service_instance_id).
all())
for p in port_db:
p.update({'service_instance_id':None})
fip_db = (
self._model_query(context, l3.FloatingIP).
filter(l3.FloatingIP.service_instance_id ==
service_instance_id).
all())
for f in fip_db:
f.update({'service_instance_id':None})
def _1update_external_resource(context, service_instance_id):
context = t_context.get_admin_context()
filters = {'service_instance_id': service_id}
ports = self._core_plugin.get_ports(context, filters)
for p in ports:
p['service_instance_id'] = None
self._core_plugin.update_port(context, p['id'], p)
floatingips = self.l3_plugin.get_floatingips(context, filters)
for f in floatingips:
f['service_instance_id'] = None
self.l3_plugin.update_floatingips(context, f['id'], f)
# reference implementation. needs to be overriden by subclass
def _delete_service_instance(self, context, service_instance_id,
managed_by_user):
self._delete_service_instance_pre(context, service_instance_id,
managed_by_user)
self._delete_service_instance_post(context, service_instance_id)
# reference implementation. needs to be overriden by subclass
def delete_service_instance(self, context, service_instance_id):
self._delete_service_instance(context, service_instance_id, True)
def get_by_service_table_id(self, context, service_table_id):
with context.session.begin(subtransactions=True):
instance_db = (self._model_query(context, ServiceInstance).
filter(ServiceInstance.service_table_id ==
service_table_id).one())
device_db = (
self._model_query(context, Device).
filter(sa.exists().where(sa.and_(
ServiceDeviceBinding.device_id == Device.id,
ServiceDeviceBinding.service_instance_id ==
instance_db.id))).one())
return (self._make_device_dict(device_db),
self._make_service_instance_dict(instance_db))
def get_by_service_instance_id(self, context, service_instance_id):
with context.session.begin(subtransactions=True):
instance_db = self._get_resource(context, ServiceInstance,
service_instance_id)
device_db = (
self._model_query(context, Device).
filter(sa.exists().where(sa.and_(
ServiceDeviceBinding.device_id == Device.id,
ServiceDeviceBinding.service_instance_id ==
instance_db.id))).one())
return (self._make_device_dict(device_db),
self._make_service_instance_dict(instance_db))
def get_service_instance(self, context, service_instance_id, fields=None):
instance_db = self._get_resource(context, ServiceInstance,
service_instance_id)
return self._make_service_instance_dict(instance_db, fields)
def get_service_instances(self, context, filters=None, fields=None):
return self._get_collection(
context, ServiceInstance, self._make_service_instance_dict,
filters=filters, fields=fields)
def get_service_types(self, context, filters=None, fields=None):
service_types = self._get_collection(
context, ServiceType, self._make_service_type_dict,
filters=filters, fields=fields)
return service_types
def get_service_type(self, context, service_type_id, fields=None):
service_type_db = self._get_resource(context, ServiceType,
service_type_id)
return self._make_service_type_dict(service_type_db, fields)
def update_device_template(self, context, device_template_id,
device_template):
with context.session.begin(subtransactions=True):
template_db = self._get_resource(context, DeviceTemplate,
device_template_id)
template_db.update(device_template['device_template'])
return self._make_template_dict(template_db)
# NOTE(changzhi)
def attach_interface(self, context):
pass
def detach_interface(self, context):
pass
class ServiceVMPluginRpcDbMixin(object):
def _register_service_type_sync_func(self):
self.service_type_sync_func = {
s_constants.VROUTER:'_get_sync_vrouter_data',
s_constants.VFIREWALL:'_get_sync_vfirewall_data'}
def get_devices_on_host(self, context, host):
#hxn add,test function
context = t_context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_SERVICEVM, host)
result = []
with context.session.begin(subtransactions=True):
device_ids = context.session.query(DeviceAgentBinding).filter_by(
servicevm_agent_id=agent.id).all()
ids = [q.device_id for q in device_ids]
query = context.session.query(Device)
for id in ids:
device = context.session.query(Device).filter_by(
id=id)
q = query.filter_by(id=id)
r = self._make_device_dict(q)
result.append(r)
return result
def manage_device_bindings(self, context, new_ids, agent):
pass
def register_agent_devices(self, context, resources, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_SERVICEVM, host)
if not agent.admin_state_up:
return
self.manage_device_power_state(context, resources)
def manage_device_power_state(self, context, resources):
with context.session.begin(subtransactions=True):
reachable_devices = resources.get('reachable', [])
dead_devices = resources.get('dead', [])
for device_id in reachable_devices:
(self._model_query(context, Device).
filter(Device.id == device_id).
one().
update({'power_state':
constants.DEVICE_POWER_STATE['reachable']}))
for device_id in dead_devices:
(self._model_query(context, Device).
filter(Device.id == device_id).
one().
update({'power_state':
constants.DEVICE_POWER_STATE['dead']}))
def get_devices_info_by_host(self, context, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_SERVICEVM, host)
with context.session.begin(subtransactions=True):
device_db = context.session.query(DeviceAgentBinding).filter_by(
servicevm_agent_id=agent.id).all()
ids = [q.device_id for q in device_db]
query = context.session.query(Device).filter(
Device.id.in_(ids)).all()
devices = [self._make_device_dict(d) for d in query]
return devices
def _get_sync_services(self,context, service_lists, active=None):
return self.get_svm_gw_ports(context, service_lists, active=active)
def _get_sync_internal_interfaces(self, context, service_lists):
"""Query router interfaces that relate to list of router_ids."""
return self.get_svm_internal_ports(context, service_lists)
def _get_sync_mgmt_interfaces(self, context, service_lists):
"""Query router interfaces that relate to list of router_ids."""
return self.get_svm_mgmt_ports(context, service_lists)
def _get_sync_floating_ips(self, context, service_lists):
service_dicts = dict((s['id'], s) for s in service_lists)
floating_ips = self.l3_plugin.get_floatingips(context,
{'service_instance_id': service_dicts.keys()})
for floating_ip in floating_ips:
service = service_dicts.get(floating_ip['service_instance_id'])
if service:
gw_fips = service['attributes'].get(s_constants.EXTERNAL_GATWAY_KEY, [])
gw_fip_ids = [gw_fip['floatingip_id'] for gw_fip in gw_fips if gw_fip['floatingip_id']]
common_fips = service['attributes'].get(s_constants.FLOATINGIP_KEY, [])
com_fip_ids = [f['floatingip_id'] for f in common_fips if f['floatingip_id']]
g_fip = []
floatingips = []
if floating_ip['id'] in gw_fip_ids:
g_fip = service.get(n_constants.GW_FIP_KEY, [])
g_fip.append(floating_ip)
if floating_ip['id'] in com_fip_ids:
floatingips = service.get(n_constants.FLOATINGIP_KEY, [])
floatingips.append(floating_ip)
if g_fip:
service[n_constants.GW_FIP_KEY] = g_fip
if floatingips:
service[n_constants.FLOATINGIP_KEY] = floatingips
return service_lists
def _get_router_info_list(self, context, service_lists, active=None):
"""Query routers and their related floating_ips, interfaces."""
with context.session.begin(subtransactions=True):
services_gw = self._get_sync_services(context,
service_lists,
active=active)
services_internal = self._get_sync_internal_interfaces(
context, services_gw)
services_mgmt = self._get_sync_mgmt_interfaces(
context, services_internal)
services_fip = self._get_sync_floating_ips(context,
services_mgmt)
return services_fip
#hxn add
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
previous_router_id = floatingip_db.router_id
port_id, internal_ip_address, router_id = (
self._check_and_get_fip_assoc(context, fip, floatingip_db))
floatingip_db.update({'fixed_ip_address': internal_ip_address,
'fixed_port_id': port_id,
'router_id': router_id,
'last_known_router_id': previous_router_id})
if fip_rate.RATE_LIMIT in fip:
floatingip_db[fip_rate.RATE_LIMIT] = fip[fip_rate.RATE_LIMIT]
def get_device_services(self, context, service_ids):
service_lists = []
with context.session.begin(subtransactions=True):
instance_db = (self._model_query(context, ServiceInstance).
filter(ServiceInstance.id.in_(service_ids))).all()
for instance in instance_db:
device_db = (
self._model_query(context, Device).
filter(sa.exists().where(sa.and_(
ServiceDeviceBinding.device_id == Device.id,
ServiceDeviceBinding.service_instance_id ==
instance.id))).one())
service = self._make_service_instance_dict(instance)
service['device_dict'] = self._make_device_dict(device_db)
service_lists.append(service)
return service_lists
def _get_sync_vfirewall_data(self, context,
svc_ids=None, active=None):
pass
def _get_sync_vrouter_data(self, context,
svc_ids=None, active=None):
service_lists = self.get_device_services(context,
svc_ids)
routers = self._get_router_info_list(context,
service_lists,
active=active)
return routers
def sync_service_instance_ids(self, context, host,
device_ids=None):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_SERVICEVM, host)
if not agent.admin_state_up or not agent.reserved:
return []
query = context.session.query(ServiceInstance)
query = query.join(ServiceDeviceBinding)
query = query.join(DeviceAgentBinding,
DeviceAgentBinding.servicevm_agent_id==agent.id)
if device_ids:
if len(device_ids) == 1:
query = query.filter(
ServiceDeviceBinding.device_id ==
device_ids[0])
else:
query = query.filter(
ServiceDeviceBinding.device_id.in_(
device_ids))
svc_ids = [item['id'] for item in query]
LOG.debug('agent get service ids %(svc_ids)s', {'svc_ids':svc_ids})
return svc_ids
def sync_service_instances(self, context, host,
service_instances_ids=None,
device_ids=None):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_SERVICEVM, host)
if not agent.admin_state_up or not agent.reserved:
return []
query = context.session.query(ServiceInstance)
query = query.join(ServiceDeviceBinding)
query = query.join(DeviceAgentBinding,
DeviceAgentBinding.servicevm_agent_id==agent.id)
if service_instances_ids:
if len(service_instances_ids) == 1:
query = query.filter(
ServiceDeviceBinding.service_instance_id ==
service_instances_ids[0])
else:
query = query.filter(
ServiceDeviceBinding.service_instance_id.in_(
service_instances_ids))
if device_ids:
if len(device_ids) == 1:
query = query.filter(
ServiceDeviceBinding.device_id ==
device_ids[0])
else:
query = query.filter(
ServiceDeviceBinding.device_id.in_(
device_ids))
service_data = []
svc_ids = []
for service_type in s_constants.SURRPORT_SERVICE_TYPE:
query = query.filter(
ServiceInstance.servicetype==service_type)
svc_ids = [item['id'] for item in query]
if not svc_ids:
LOG.warn('service instance of service type %s is null', service_type)
continue
data = getattr(self, self.service_type_sync_func[service_type])(context, svc_ids)
if data:
service_data.extend(data)
LOG.debug('agent get service data %(service_data)s', {'service_data':service_data})
return service_data
# hxn add for servicevm
def get_sync_svm_ports(self, context, service_ids,
service_type, active=None):
filters = {'service_instance_id': service_ids,
'servicevm_type': [service_type] }
ports = self._core_plugin.get_ports(context, filters)
if ports:
self.l3_plugin._populate_subnet_for_ports(context, ports)
return ports
def get_sync_svm_device_ports(self, context, device_ids,
service_type, active=None):
filters = {'servicevm_device': device_ids,
'servicevm_type': [service_type] }
ports = self._core_plugin.get_ports(context, filters)
if ports:
self.l3_plugin._populate_subnet_for_ports(context, ports)
return ports
def _build_services_list(self, context, service_lists, gw_ports):
for s in service_lists:
service_id = s['id']
# Collect gw ports only if available
if service_id and gw_ports.get(service_id):
s[n_constants.GW_INTERFACE_KEY] = gw_ports[service_id]
return service_lists
def get_svm_gw_ports(self, context, service_lists, active=None):
service_ids = [s['id'] for s in service_lists]
servicevm_type = n_constants.SERVICEVM_OWNER_ROUTER_GW
gw_ports = dict((gw_port['service_instance_id'], gw_port)
for gw_port in
self.get_sync_svm_ports(context, service_ids,
servicevm_type, active=active))
return self._build_services_list(context, service_lists, gw_ports)
def get_svm_internal_ports(self, context, service_lists):
# only a service instance for each service type in a device
service_dicts = dict((s['devices'][0], s) for s in service_lists)
servicevm_type = n_constants.SERVICEVM_OWNER_ROUTER_INTF
interfaces = self.get_sync_svm_device_ports(context, service_dicts.keys(),
servicevm_type)
for interface in interfaces:
service = service_dicts.get(interface['servicevm_device'])
if service:
internal_interfaces = service.get(n_constants.INTERFACE_KEY, [])
internal_interfaces.append(interface)
service[n_constants.INTERFACE_KEY] = internal_interfaces
return service_lists
def get_svm_mgmt_ports(self, context, service_lists):
# only a service instance for each service type in a device
service_dicts = dict((s['devices'][0], s) for s in service_lists)
servicevm_type = n_constants.SERVICEVM_OWNER_MGMT
interfaces = self.get_sync_svm_device_ports(context, service_dicts.keys(),
servicevm_type)
for interface in interfaces:
service = service_dicts.get(interface['servicevm_device'])
if service:
internal_interfaces = service.get(n_constants.MANAGERMENT_KEY, [])
internal_interfaces.append(interface)
service[n_constants.MANAGERMENT_KEY] = internal_interfaces
return service_lists
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
device = kwargs.get('device')
host = kwargs.get('host')
port = ovs_db_v2.get_port(device)
if port:
binding = ovs_db_v2.get_network_binding(None, port['network_id'])
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up']}
#cinghu raise attribut error
#'network_type': binding.network_type,
#'segmentation_id': binding.segmentation_id,
#'physical_network': binding.physical_network}
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def get_devices_details_list(self, rpc_context, devices, host):
return [
self.get_device_details(
rpc_context,
device=device,
host=host
)
for device in devices
]
| CingHu/neutron-ustack | neutron/db/vm/vm_db.py | vm_db.py | py | 67,145 | python | en | code | 0 | github-code | 36 |
41289141746 | import itertools
from Atom import Atom
class Struct:
# computes the space when the instance is initialized
def __init__(self, name, types):
self.name = name
self.types = types
self.unpackaged = self._calculate_space_unpackaged()
self.packaged = self._calculate_space_packaged()
self.optimal = self._calculate_space_optimal()
# computes the metrics when the stucture of the type is exactly the same
# as the given by the user
def _calculate_space_unpackaged(self):
align = 0
unused = 0
# asign space for each type contained in the struct
# align: represent the last used space
# unused: count the number of spaces that have been left empty
for typ in self.types:
if isinstance(typ, Atom):
typ_align = typ.align
typ_repre = typ.repre
else:
typ_align = typ.unpackaged[1]
typ_repre = typ.unpackaged[0]
if align % typ_align != 0:
desp = typ_align - (align % typ_align)
unused += desp
align += desp
align += typ_repre
# the alignment is given by the first element described.
first = self.types[0]
if isinstance(first, Atom):
al = first.align
else:
al = first.packaged[1]
return (align, al, unused)
# computes the metrics without taking into consideration the alignment
# the final used space will be the space used by each inner subtype.
def _calculate_space_packaged(self):
used = 0
for typ in self.types:
if isinstance(typ, Atom):
typ_repre = typ.repre
else:
typ_repre = typ.packaged[0]
used += typ_repre
# the alignment is given by the first element
first = self.types[0]
if isinstance(first, Atom):
al = first.align
else:
al = first.packaged[1]
return (used, al, 0)
# checks which of the interations leave less spaces unused taking into
# consideration the alignment. (bruteforce, as the prolblem is np-complete)
def _calculate_space_optimal(self):
# get all permutations
original = self.types
permutations = list(itertools.permutations(self.types))
optimal_perm = None
for permuatation in permutations:
self.types = list(permuatation)
space = self._calculate_space_unpackaged()
# checks if the new permutation is better
if optimal_perm:
if optimal_perm[2] > space[2]:
optimal_perm = space
else:
optimal_perm = space
self.types = original
return optimal_perm
def __str__(self):
return f'Soy un {self.typ}'
| mfaria724/ci3641-examen2 | pregunta3/Struct.py | Struct.py | py | 2,603 | python | en | code | 0 | github-code | 36 |
36011232562 | #!/bin/python
# -*- coding: utf-8 -*-
# Created by 顾洋溢
from test_interfacecase.public.reg_bythird import Reg_bythird
import unittest
import HTMLTestRunner
class Reg_bythirdTestCase(unittest.TestCase):
def setUp(self):
self.regbythird = Reg_bythird()
def test_reg_bythird(self):
u""""第三方登录,注册信息账户"""
self.assertEqual(self.regbythird.reg_bythird(),"0")
def tearDown(self):
self.regbythird= None
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(Reg_bythirdTestCase("test_reg_bythird"))
# 这里可以添加更多的用例
unittest.TextTestRunner().run(suite) | xuxiuke/Android_test | SmartHomeV6Code_TestTeam-InterfaceTest/test_interfacecase/unite/test_reg_bythird.py | test_reg_bythird.py | py | 676 | python | en | code | 1 | github-code | 36 |
17684485992 | # -*- mode: python -*-
import sys
block_cipher = None
a = Analysis(['ayab/ayab.py'],
pathex=['./ayab'],
hiddenimports=[],
binaries=[],
datas=[('patterns','patterns')],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
exclude_binaries=True,
name='ayab',
debug=True,
strip=False,
upx=False,
console=True,
icon='windows-build/AYAB.ico')
# Include all files in plugins folder
plugin_tree = Tree('ayab/plugins', prefix = 'plugins')
# add README to that TOC for convenience
plugin_tree += [('README.md', './README.md', 'DATA')]
plugin_tree += [('package_version', './package_version', 'DATA')]
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
plugin_tree,
strip=False,
upx=False,
name='ayab')
| Adrienne200/ayab-desktop | ayab.spec | ayab.spec | spec | 1,181 | python | en | code | null | github-code | 36 |
73934948902 | # coding=utf-8
# 笑脸弹跳游戏
import pygame
pygame.init()
screen = pygame.display.set_mode([800,600])
keep_going = True
# step1. 载入图像
# smilePic = pygame.image.load("smile.gif")
smilePic = pygame.image.load("data/asprite.bmp")
# 去除图像的白色背景和边框,貌似对gif无效
colorkey = smilePic.get_at((0,0))
smilePic.set_colorkey(colorkey)
# step2. 设置XY坐标移动起来
picX = 0
picY = 0
BLACK = (0,0,0)
timer = pygame.time.Clock()
# speed = 5
speedX = 5
speedY = 5
while keep_going:
for event in pygame.event.get():
if event.type == pygame.QUIT:
keep_going = False
picX += speedX
picY += speedY
# step3. 模拟碰到墙壁弹回
if picX <= 0 or smilePic.get_width() + picX >= 800 :
# 通过修改speed为负值,从而修改移动的方向
# speed = -speed
speedX = -speedX
if picY <= 0 or smilePic.get_height() + picY >= 600 :
speedY = -speedY
# 解决用黑色像素填充屏幕,消除像素轨迹
screen.fill(BLACK)
# blit()方法把图像从硬盘加载绘制到显示界面上。当我们想要将像素从一个界面(如硬盘)复制到另一个界面(如绘制窗口)之上的时候就使用blit()
screen.blit(smilePic,(picX,picY))
# pygame.display.update()
pygame.display.flip()
timer.tick(60)
pygame.quit()
| jellier/forPython2.7 | SmileBounce.py | SmileBounce.py | py | 1,375 | python | en | code | 0 | github-code | 36 |
2786391582 | from typing import Any, NoReturn, List
from numpy import ndarray
import socket
import select
import xml.etree.ElementTree as ElementTree
import logging
import errno
from base_classes import PostProcessorBase
class SocketServerPostProcessor(PostProcessorBase):
"""
Outputs data to clients as a TCP server.
Configuration info:
- `Port`: The port number of the TCP server. This must follow the port usage rules specified by the FRC Game
Manual. A port number in the range 5800-5810 is recommended.
"""
port = int()
sock = None
async def setup(self, component_config_root: ElementTree.Element):
self.port = int(component_config_root.find("Port").text)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('0.0.0.0', self.port))
self.sock.listen(5)
logging.debug("Server listening on port " + str(self.port))
self.read_list = [self.sock]
self.write_list = []
# noinspection PyMethodMayBeStatic
async def cleanup(self):
self.sock.close()
async def postprocess(self, data: List[Any], frame: ndarray) -> NoReturn:
def disconnect(s: socket.socket) -> NoReturn:
if s in self.read_list:
self.read_list.remove(s)
if s in self.write_list:
self.write_list.remove(s)
if s in readable:
readable.remove(s)
if s in writable:
writable.remove(s)
s.close()
try:
readable, writable, errored = select.select(self.read_list, self.write_list, self.read_list, 0.02)
except select.error as e:
print(e)
for s in errored:
logging.warn("exceptional condition on " + str(s.getpeername()))
disconnect(s)
for s in readable:
if s is self.sock:
client_socket, address = self.sock.accept()
logging.debug("Accepted connection from " + str(client_socket.getpeername()))
client_socket.setblocking(0)
self.read_list.append(client_socket)
self.write_list.append(client_socket)
else:
data_in = s.recv(1024)
if data_in:
if data_in.startswith(b"shutdown"):
raise KeyboardInterrupt
for s in writable:
if len(data) > 0:
message = self.to_string(data[0])
try:
s.send(bytes(message, "utf-8"))
except socket.error as err:
if err.errno == errno.EPIPE:
logging.warn("client unexpectedly disconnected")
disconnect(s)
@staticmethod
def to_string(data: Any):
return str(data.angle) + "\n"
| 1777TheVikings/FRC1777-Vision | postprocessors/socketserver.py | socketserver.py | py | 2,969 | python | en | code | 1 | github-code | 36 |
73015585385 | from nose.tools import assert_raises
from libsheep.protocol import Message, MessagePart, Context, Command
from libsheep.features.base import BASE, SUP
def test_disabled_base_feature_decodes_generic_instance():
m = Message.decode('HSUP ADBASE ADTIGR')
assert type(m.command) is Command
def test_enabled_base_features_decodes_command_instance():
BASE().enable()
m = Message.decode('HSUP ADBASE ADTIGR')
assert isinstance(m.command, SUP)
# def test_sup_command():
# m = Message.decode('HSUP ADBASE ADTIGR')
# assert m.command.add_features == set(['BASE', 'TIGR'])
# assert m.command.remove_features == set([]) | exogen/80sheep | tests/test_base.py | test_base.py | py | 641 | python | en | code | 4 | github-code | 36 |
31414302477 | """Test suite for Point Redemption Module."""
from .base_test import BaseTestCase, User
class PointRedemptionBaseTestCase(BaseTestCase):
"""Test class for Society point redemption including endpoint."""
def setUp(self):
"""Set up all needed variables."""
BaseTestCase.setUp(self)
self.president_role.save()
self.v_president_role.save()
self.successops_role.save()
self.invictus.save()
self.istelle.save()
self.sparks.save()
self.phoenix.save()
self.redemp_req.save()
self.test_cio.save()
self.sparks_president = User(
uuid="-KdQsMtixG4U0y_-yJEHsparks",
name="Test Sparks President",
photo="https://lh6.googleusercontent.com/-1DhBLOJentg/AAAAAAAAA"
"AI/AAAAAAnAABc/ImeP_cAI/photo.jpg?sz=50",
email="test.sparks.president.societies@andela.com",
center=self.nairobi,
cohort=self.cohort_12_Ke,
society=self.sparks
)
self.sparks_president.roles.append(self.president_role)
| andela/andela-societies-backend | src/tests/points_redemption_base_test_case_setup.py | points_redemption_base_test_case_setup.py | py | 1,095 | python | en | code | 1 | github-code | 36 |
24903490945 | # Importing the Pillow library
from PIL import Image, ImageDraw, ImageFont
import os
def text_wrapper(text, font, max_width):
# Totally not stolen from eyong kevin https://gist.github.com/Eyongkevin/adbac2334f1355d8045111c264d80621
list_of_lines = []
if font.getlength(text) <= max_width:
return text
else:
# split the line by spaces to get words
words = text.split(' ')
i = 0
# append every word to a line while its width is shorter than the image width
while i < len(words):
line = ''
while i < len(words) and font.getlength(line + words[i]) <= max_width:
line = line + words[i] + " "
i += 1
if not line:
line = words[i]
i += 1
list_of_lines.append(f"{line}")
new_line = "\n"
return new_line.join(list_of_lines)
def get_colors_from_colorcombo_image(colorcombo_path, colorcombo_outline_path):
colorcombo_image = Image.open(colorcombo_path)
colorcombo_outline = Image.open(colorcombo_outline_path)
colorcombo_image.convert("RGBA")
get_pixel_color = colorcombo_image.load()
color_top = get_pixel_color[100, 20]
color_top_font = get_pixel_color[500, 20]
color_bottom = get_pixel_color[1000, 20]
color_bottom_font = get_pixel_color[1500, 20]
get_pixel_color = colorcombo_outline.load()
color_outline = get_pixel_color[10, 10]
color_outline = (color_outline[0], color_outline[1], color_outline[2], 190)
return color_top, color_top_font, color_bottom, color_bottom_font, color_outline
def create_cover(thumbnail_author_path, video_thumbnail_path, author, title, colorcombo, sender, color_number, outline):
# Cover size = 1280 x 1650
colorcombo_path = f"resources/color_combinations/{colorcombo}"
colorcombo_outline_path = f"resources/color_combinations/{colorcombo[:6]}_outline.png"
colors_list = get_colors_from_colorcombo_image(colorcombo_path, colorcombo_outline_path)
myfont = ImageFont.truetype("resources/Ubuntu-R.ttf", 50)
image_cover = Image.new("RGBA", (1280, 1650), "white")
image_top = Image.new("RGBA", (1280, 465), color=colors_list[0])
image_icon = Image.open(thumbnail_author_path).convert("RGBA")
image_thumbnail = Image.open(video_thumbnail_path).convert("RGBA")
image_bottom = Image.new("RGBA", (1280, 465), color=colors_list[2])
image_top_draw = ImageDraw.Draw(image_top)
image_bottom_draw = ImageDraw.Draw(image_bottom)
# FIXME: Implement text resizing based on height of the text.
image_top_draw.text((100, 320), text_wrapper(author, myfont, 1000), font=myfont, fill=colors_list[1])
image_bottom_draw.multiline_text((100, 100), text_wrapper(title, myfont, 1130), font=myfont, fill=colors_list[4])
image_cover.paste(image_top, (0, 0))
image_cover.paste(image_icon, (100, 100))
image_cover.paste(image_thumbnail, (0, 465))
image_cover.paste(image_bottom, (0, 720 + 465))
# FIXME, add this outside. When implementing cover editor
proper_senders = ["epub", "showcover", "thumbnail"]
if sender in proper_senders:
outline_rectangle = (colors_list[4][0], colors_list[4][1], colors_list[4][2], 190)
else:
if color_number < 4:
outline_rectangle = (colors_list[color_number][0], colors_list[color_number][1], colors_list[color_number][2], 190)
else:
outline_rectangle = (colors_list[4][0], colors_list[4][1], colors_list[4][2], 190)
if outline:
layer_rectangle = Image.new("RGBA", image_cover.size, color=(0, 0, 0, 0))
ImageDraw.Draw(layer_rectangle).rectangle((50, 50, 1230, 1600), outline=outline_rectangle, width=13)
final_cover = Image.alpha_composite(image_cover, layer_rectangle)
else:
final_cover = image_cover
# FIXME: Change from arbitray number "ifs" to more readeable form
if sender == "epub":
path = f"tmp/cover.png"
final_cover.save(path, "PNG")
elif sender == "thumbnail":
final_cover.thumbnail((330,425))
path = f"tmp/cover_thumbnail.png"
final_cover.save(path, "PNG")
return path
else:
# this should be used by cover.py for testing purposes only
if color_number < 4:
path = f"tmp/cover{colorcombo[:6]}{color_number}.png"
final_cover.save(path, "PNG")
elif color_number == 10:
path = f"tmp/cover{colorcombo[:6]}{color_number}.png"
final_cover.save(path, "PNG")
elif color_number == 42:
final_cover.thumbnail((256,330))
path = f"tmp/{colorcombo[:6]}_thumbnail.png"
final_cover.save(path, "PNG")
elif color_number == 5:
# This is for testing purposes only
print("imhere")
path = f"tmp/cover_thumbnail.png"
final_cover.save(path, "PNG")
return path
def generate_all(type_of_ouput):
# Testing different color combinations and outlines of said combinations
author_thumbnail_path = "resources/test_graphics/author_thumbnail_test.png"
video_thumbnail_path = "resources/test_graphics/thumbnail_test.png"
author = "This is a test of Author Name"
title = "This is just a Title Test Case with some words when you fill out the Youtube URL field you will see " \
"different result"
path = "resources/color_combinations"
color_combinations = [0, 1, 2, 3]
for colorcombo in os.listdir(path):
if "outline" in colorcombo:
pass
else:
if type_of_ouput == "thumbnails":
create_cover(author_thumbnail_path, video_thumbnail_path, author, title, colorcombo, "epub", 42, True)
elif type_of_ouput == "all":
for color in color_combinations:
create_cover(author_thumbnail_path, video_thumbnail_path, author, title, colorcombo, "epub", color, True)
elif type_of_ouput== "outline":
create_cover(author_thumbnail_path, video_thumbnail_path, author, title, colorcombo, "epub", 5, True)
def main():
# create_cover(url, video_thumbnail_path, author, title, "combo4.png", "epub", 10)
# generate_all_cover_options(url, video_thumbnail_path, author, title, "outline")
# type of options
# "thumbnails" - generates thumbnail versions of covers
# "outline" - generates covers from all theme files using coresponding *_outline.png file
# "all" - generates covers using all base colors of a cover theme to get covers. Outuput is number_of_colors*number_of_combination.png_files of cover files
generate_all("all")
if __name__ == "__main__":
main()
| maticahacic/transcript_to_ebook | cover.py | cover.py | py | 6,794 | python | en | code | 12 | github-code | 36 |
5951741639 | from pymongo import MongoClient
import tiktoken
from configsecrets import mongoConnection
def upload_to_mongo(data):
# connect to mongodb
with MongoClient(host=mongoConnection["host"],
port=mongoConnection["port"],
username=mongoConnection["login"],
password=mongoConnection["password"]
) as client:
db = client[mongoConnection["db"]]
collection = db[mongoConnection["collection"]]
# insert data
collection.insert_one(data)
def count_tokens(text:str) -> int:
encoding = tiktoken.encoding_for_model("text-embedding-ada-002")
return len(encoding.encode(text))
def get_text_info(document) -> dict:
# do something to get info from the document
return {"text":text, "title":title, "author":author, "url":url, "token_count":count_tokens(text)}
def split_text(text:str, MAX_TOKENS) -> list:
# split text into chunks that are smaller than the max token count, if needed
return text_chunks
if __name__ == "__main__":
MAX_TOKENS = 512
documents = ["**some","list", "of", "documents**"]
for document in documents:
text_info = get_text_info(document)
# check if the text is too long, in which case, break into chunks as needed
if text_info["token_count"] > MAX_TOKENS:
text_chunks = split_text(text_info["text"],MAX_TOKENS)
for chunk in text_chunks:
token_count = count_tokens(chunk)
text_info["token_count"] = token_count
text_info["text"] = chunk
upload_to_mongo(text_info)
else:
upload_to_mongo(text_info)
| wyler-m/docubot | load_documents.py | load_documents.py | py | 1,704 | python | en | code | 0 | github-code | 36 |
41417358000 | # Problem statement link
# https://leetcode.com/problems/reverse-integer/
class Solution:
def reverse(self, x: int) -> int:
if x == 0:
return 0
negative_flag = False
op = ""
num = "".join(reversed(str(x)))
if num[-1] == "-":
negative_flag = True
num = num[:-1]
if num[0] == "0":
for zero_idx in range(len(num)):
if num[zero_idx] != '0':
op = num[zero_idx:]
break
else:
op = num
if not (-2**31 < int(op) < 2**31):
return 0
if negative_flag:
op = "-" + op
return op
| gproxx-coder/LeetCode | reverse-integer.py | reverse-integer.py | py | 694 | python | en | code | 0 | github-code | 36 |
2265981494 | import os
import json
import sqlite3
import argparse
import logging
import pickle
from copy import deepcopy
import difflib
import traceback
from semparser.common import registry
from semparser.common.utils import print_dict
from semparser.modules.semantic_parser.preprocessor.process_spider_sql import get_sql, get_schema
from semparser.modules.semantic_parser.asdl.spider.spider_hypothesis import SpiderDecodeHypothesis
from semparser.modules.semantic_parser.inference.spider_ast import SpiderAST
# Flag to disable value evaluation
DISABLE_VALUE = True
# Flag to disable distinct in select evaluation
DISABLE_DISTINCT = True
CLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order',
'limit', 'intersect', 'union', 'except')
JOIN_KEYWORDS = ('join', 'on', 'as')
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
TABLE_TYPE = {
'sql': "sql",
'table_unit': "table_unit",
}
COND_OPS = ('and', 'or')
SQL_OPS = ('intersect', 'union', 'except')
ORDER_OPS = ('desc', 'asc')
HARDNESS = {
"component1": ('where', 'group', 'order', 'limit', 'join', 'or', 'like'),
"component2": ('except', 'union', 'intersect')
}
class Schema:
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
@property
def schema(self):
return self._schema
@property
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {'*': "__all__"}
id = 1
for key, vals in schema.iteritems():
for val in vals:
idMap[key.lower() + "." + val.lower()] = "__" + \
key.lower() + "." + val.lower() + "__"
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
return idMap
def condition_has_or(conds):
return 'or' in conds[1::2]
def condition_has_like(conds):
return WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]]
def condition_has_sql(conds):
for cond_unit in conds[::2]:
val1, val2 = cond_unit[3], cond_unit[4]
if val1 is not None and type(val1) is dict:
return True
if val2 is not None and type(val2) is dict:
return True
return False
def val_has_op(val_unit):
return val_unit[0] != UNIT_OPS.index('none')
def has_agg(unit):
return unit[0] != AGG_OPS.index('none')
def accuracy(count, total):
if count == total:
return 1
return 0
def recall(count, total):
if count == total:
return 1
return 0
def F1(acc, rec):
if (acc + rec) == 0:
return 0
return (2. * acc * rec) / (acc + rec)
def get_scores(count, pred_total, label_total):
if pred_total != label_total:
return 0, 0, 0
elif count == pred_total:
return 1, 1, 1
return 0, 0, 0
def eval_sel(pred, label):
pred_sel = pred['select'][1]
label_sel = [x for x in label['select'][1]]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if unit in label_sel:
cnt += 1
label_sel.remove(unit)
if unit[1] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_where(pred, label):
pred_conds = [unit for unit in pred['where'][::2]]
label_conds = [unit for unit in label['where'][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if unit in label_conds:
cnt += 1
label_conds.remove(unit)
if unit[2] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [pred for pred in pred_cols]
label_cols = [label for label in label_cols]
for col in pred_cols:
if col in label_cols:
cnt += 1
label_cols.remove(col)
return label_total, pred_total, cnt
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if len(pred['groupBy']) > 0:
pred_total = 1
if len(label['groupBy']) > 0:
label_total = 1
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
if pred_total == label_total == 1 \
and pred_cols == label_cols \
and pred['having'] == label['having']:
cnt = 1
return label_total, pred_total, cnt
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if len(pred['orderBy']) > 0:
pred_total = 1
if len(label['orderBy']) > 0:
label_total = 1
if len(label['orderBy']) > 0 and pred['orderBy'] == label['orderBy'] and (
(pred['limit'] is None and label['limit'] is None) or (
pred['limit'] is not None and label['limit'] is not None)):
cnt = 1
return label_total, pred_total, cnt
def eval_and_or(pred, label):
pred_ao = pred['where'][1::2]
label_ao = label['where'][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if pred_ao == label_ao:
return 1, 1, 1
return len(pred_ao), len(label_ao), 0
def get_nestedSQL(sql):
nested = []
for cond_unit in sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]:
if type(cond_unit[3]) is dict:
nested.append(cond_unit[3])
if type(cond_unit[4]) is dict:
nested.append(cond_unit[4])
if sql['intersect'] is not None:
nested.append(sql['intersect'])
if sql['except'] is not None:
nested.append(sql['except'])
if sql['union'] is not None:
nested.append(sql['union'])
return nested
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if pred is not None:
pred_total += 1
if label is not None:
label_total += 1
if pred is not None and label is not None:
cnt += SpiderEvaluator().eval_exact_match(pred, label)
return label_total, pred_total, cnt
def eval_IUEN(pred, label):
lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect'])
lt2, pt2, cnt2 = eval_nested(pred['except'], label['except'])
lt3, pt3, cnt3 = eval_nested(pred['union'], label['union'])
label_total = lt1 + lt2 + lt3
pred_total = pt1 + pt2 + pt3
cnt = cnt1 + cnt2 + cnt3
return label_total, pred_total, cnt
def get_keywords(sql):
res = set()
if len(sql['where']) > 0:
res.add('where')
if len(sql['groupBy']) > 0:
res.add('group')
if len(sql['having']) > 0:
res.add('having')
if len(sql['orderBy']) > 0:
res.add(sql['orderBy'][0])
res.add('order')
if sql['limit'] is not None:
res.add('limit')
if sql['except'] is not None:
res.add('except')
if sql['union'] is not None:
res.add('union')
if sql['intersect'] is not None:
res.add('intersect')
# or keyword
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
if len([token for token in ao if token == 'or']) > 0:
res.add('or')
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
# not keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:
res.add('not')
# in keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('in')]) > 0:
res.add('in')
# like keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')]) > 0:
res.add('like')
return res
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if k in label_keywords:
cnt += 1
return label_total, pred_total, cnt
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
def count_component1(sql):
count = 0
if len(sql['where']) > 0:
count += 1
if len(sql['groupBy']) > 0:
count += 1
if len(sql['orderBy']) > 0:
count += 1
if sql['limit'] is not None:
count += 1
if len(sql['from']['table_units']) > 0: # JOIN
count += len(sql['from']['table_units']) - 1
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
count += len([token for token in ao if token == 'or'])
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
count += len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')])
return count
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested)
def count_others(sql):
count = 0
# number of aggregation
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if len(sql['orderBy']) > 0:
agg_count += count_agg([unit[1] for unit in sql['orderBy'][1] if unit[1]] + [
unit[2] for unit in sql['orderBy'][1] if unit[2]])
agg_count += count_agg(sql['having'])
if agg_count > 1:
count += 1
# number of select columns
if len(sql['select'][1]) > 1:
count += 1
# number of where conditions
if len(sql['where']) > 1:
count += 1
# number of group by clauses
if len(sql['groupBy']) > 1:
count += 1
return count
class SpiderEvaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for x, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total,
'pred_total': pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
return res
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except Exception:
return False
return True
def print_scores(scores, etype, p_func=print):
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
p_func("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
p_func("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
p_func('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
"execution", *this_scores))
if etype in ["all", "match"]:
p_func('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
"exact match", *exact_scores))
if 'exact_alan' in scores['all']:
exact_scores_alan = [scores[level]['exact_alan'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
"Alan exact match", *exact_scores_alan))
p_func('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
p_func('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
p_func('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
evaluator = SpiderEvaluator()
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0.,
'rec': 0., 'f1': 0., 'acc_count': 0, 'rec_count': 0}
eval_err_num = 0
for p, g in zip(plist, glist):
p_str = p[0]
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
hardness = evaluator.eval_hardness(g_sql)
scores[hardness]['count'] += 1
scores['all']['count'] += 1
try:
p_sql = get_sql(schema, p_str)
except Exception:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print("")
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except Exception:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (
val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
# Rebuild SQL functions for value evaluation
def rebuild_cond_unit_val(cond_unit):
if cond_unit is None or not DISABLE_VALUE:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
if type(val1) is not dict:
val1 = None
else:
val1 = rebuild_sql_val(val1)
if type(val2) is not dict:
val2 = None
else:
val2 = rebuild_sql_val(val2)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_val(condition):
if condition is None or not DISABLE_VALUE:
return condition
res = []
for idx, it in enumerate(condition):
if idx % 2 == 0:
res.append(rebuild_cond_unit_val(it))
else:
res.append(it)
return res
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
for tab_unit in sql['from']['table_units']:
if tab_unit[0] == 'sql':
tab_unit = ('sql', rebuild_sql_val(tab_unit[1]))
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
# Rebuild SQL functions for foreign key evaluation
def build_valid_col_units(table_units, schema):
col_names = schema._table['column_names_original']
tab_ids = [table_unit[1]
for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
valid_col_units = []
for value in schema.idMap.values():
if col_names[value][0] in tab_ids:
valid_col_units.append(value)
return valid_col_units
def rebuild_col_unit_col(valid_col_units, col_unit, kmap):
if col_unit is None:
return col_unit
agg_id, col_id, distinct = col_unit
if col_id in kmap and col_id in valid_col_units:
col_id = kmap[col_id]
if DISABLE_DISTINCT:
distinct = None
return agg_id, col_id, distinct
def rebuild_val_unit_col(valid_col_units, val_unit, kmap):
if val_unit is None:
return val_unit
unit_op, col_unit1, col_unit2 = val_unit
col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)
col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)
return unit_op, col_unit1, col_unit2
def rebuild_table_unit_col(valid_col_units, table_unit, kmap):
if table_unit is None:
return table_unit
table_type, col_unit_or_sql = table_unit
if isinstance(col_unit_or_sql, tuple):
col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap)
return table_type, col_unit_or_sql
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):
if cond_unit is None:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)
if isinstance(val1, dict):
val1 = rebuild_sql_col(valid_col_units, val1, kmap)
if isinstance(val2, dict):
val2 = rebuild_sql_col(valid_col_units, val2, kmap)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_col(valid_col_units, condition, kmap):
for idx in range(len(condition)):
if idx % 2 == 0:
condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap)
return condition
def rebuild_select_col(valid_col_units, sel, kmap):
if sel is None:
return sel
distinct, _list = sel
new_list = []
for it in _list:
agg_id, val_unit = it
new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))
if DISABLE_DISTINCT:
distinct = None
return distinct, new_list
def rebuild_from_col(valid_col_units, from_, kmap):
if from_ is None:
return from_
from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in
from_['table_units']]
from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap)
return from_
def rebuild_group_by_col(valid_col_units, group_by, kmap):
if group_by is None:
return group_by
return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]
def rebuild_order_by_col(valid_col_units, order_by, kmap):
if order_by is None or len(order_by) == 0:
return order_by
direction, val_units = order_by
new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap)
for val_unit in val_units]
return direction, new_val_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
def build_foreign_key_map(entry):
cols_orig = entry["column_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = [i for i in range(len(cols_orig))]
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_keys:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry['db_id']] = build_foreign_key_map(entry)
return tables
def is_col_valid(col_unit, in_where=False):
if col_unit is None:
return True
if col_unit[0] == 0 and col_unit[1] == 0:
return False
if in_where and col_unit[0] != 0:
return False
return True
def is_query_valid(_sql, schema):
select_body = _sql['select'][1]
if len(select_body) != len(set(select_body)):
return False
conds = _sql['from']['conds'][::2]
tab_in_conds = set()
for cond in conds:
tab1 = schema._table['column_names_original'][cond[2][1][1]][0]
tab2 = schema._table['column_names_original'][cond[3][1]][0]
if tab1 == -1 or tab2 == -1:
return False
tab_in_conds.add(tab1)
tab_in_conds.add(tab2)
table_units = _sql['from']['table_units']
tab_in_from = set()
for tab in table_units:
if isinstance(tab[1], int):
tab_in_from.add(tab[1])
if len(tab_in_from) > 1 and tab_in_conds != tab_in_from:
return False
if len(table_units) == 1 and len(conds) > 0:
return False
where_conds = _sql['where'][::2]
having_conds = _sql['having'][::2]
for cond in where_conds:
if isinstance(cond[3], dict) and not is_sql_valid(cond[3], schema):
return False
if not is_col_valid(cond[2][1], True):
return False
if not is_col_valid(cond[2][2], True):
return False
for cond in having_conds:
if isinstance(cond[3], dict) and not is_sql_valid(cond[3], schema):
return False
if not is_col_valid(cond[2][1]):
return False
if not is_col_valid(cond[2][2]):
return False
groupBy = _sql['groupBy']
for col_unit in groupBy:
if not is_col_valid(col_unit):
return False
if len(_sql['orderBy']) > 0:
orderBy = _sql['orderBy'][1]
for val_unit in orderBy:
if not is_col_valid(val_unit[1]):
return False
if not is_col_valid(val_unit[2]):
return False
return True
def is_sql_valid(_sql, schema):
if _sql['except']:
if not is_query_valid(_sql['except'], schema):
return False
elif _sql['union']:
if not is_query_valid(_sql['union'], schema):
return False
elif _sql['intersect']:
if not is_query_valid(_sql['intersect'], schema):
return False
return is_query_valid(_sql, schema)
# Define new evaluator
@registry.register('evaluator', 'spider')
class SpiderSqlEvaluator:
def __init__(self, transition_system, args):
pass
@staticmethod
def print_results(results, p_func=print):
print_scores(results, 'match', p_func=p_func)
@staticmethod
def evaluate_dataset(examples, decode_results, out_path, fast_mode=True,
test_mode='dev', save_failed_samples=False):
evaluator = SpiderEvaluator()
if fast_mode:
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
etype = 'match'
scores = {}
# Init scores
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
for type_ in partial_types:
scores[level]['partial'][type_] = {
'acc': 0., 'rec': 0., 'f1': 0., 'acc_count': 0, 'rec_count': 0}
pred_sql = []
gold_sql = []
pred_actions = []
gold_actions = []
questions = []
eval_err_num = 0
idx = 0
for example, spider_sql in zip(examples, decode_results):
pruned_hyps = []
for hyp in spider_sql:
if is_sql_valid(hyp.code, example.schema):
pruned_hyps.append(hyp)
gold_spider_sql = example.sql
_gold_spider_sql = deepcopy(gold_spider_sql)
pred_spider_sql = pruned_hyps[:1]
if not pred_spider_sql:
# dummy sql
surface_sql = 'SELECT *'
else:
surface_sql = SpiderAST(pred_spider_sql[0].code, example.schema._table).get_sql()
if not pred_spider_sql:
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
pred_spider_sql = SpiderDecodeHypothesis(example.schema)
pred_spider_sql.code = p_sql
eval_err_num += 1
else:
pred_spider_sql = pred_spider_sql[0]
_pred_spider_sql = deepcopy(pred_spider_sql.code)
schema = example.schema
kmap = build_foreign_key_map(example.schema._table)
g_valid_col_units = build_valid_col_units(
gold_spider_sql['from']['table_units'], schema)
gold_spider_sql = rebuild_sql_val(gold_spider_sql)
gold_spider_sql = rebuild_sql_col(g_valid_col_units, gold_spider_sql, kmap)
p_valid_col_units = build_valid_col_units(
pred_spider_sql.code['from']['table_units'], schema)
pred_spider_sql.code = rebuild_sql_val(pred_spider_sql.code)
pred_spider_sql.code = rebuild_sql_col(
p_valid_col_units, pred_spider_sql.code, kmap)
hardness = evaluator.eval_hardness(gold_spider_sql)
scores[hardness]['count'] += 1
scores['all']['count'] += 1
exact_score = evaluator.eval_exact_match(pred_spider_sql.code, gold_spider_sql)
if exact_score == 0 and save_failed_samples:
f_out = open(os.path.join(out_path, "%d-%s.md" % (idx, hardness)), "w")
f_out.write('### Question\n%s\n' % example.original)
f_out.write('\n### Spider SQL\n')
f_out.write('- ***pred***: ')
f_out.write('%s\n' % surface_sql)
f_out.write('- ***gold***: ')
f_out.write('%s\n' % example.tgt_code)
f_out.write('\n### Action Sequences Diff\n')
pred_actions = []
for a in pred_spider_sql.actions:
pred_actions.append(str(a).replace('*', '\*'))
gold_actions = []
for a in example.tgt_actions:
gold_actions.append(str(a.action).replace('*', '\*'))
for line in difflib.unified_diff(pred_actions, gold_actions, fromfile='pred', tofile='gold'):
f_out.write('\t%s\n' % line)
f_out.write('\n### Schema\n')
f_out.write('\tcol_id,\ttab_name,\tcol_name\n')
for _id, (tab_id, col_name) in enumerate(example.schema._table['exp_column_names_original']):
f_out.write('\t%d,\t%s,\t%s\n' % (
_id, example.schema._table['exp_table_names_original'][tab_id], col_name))
f_out.write('\n### Primary Keys\n%s\n' %
str(example.schema._table['exp_primary_keys']))
f_out.close()
questions.append(" ".join(example.src_sent))
pred_sql.append(_pred_spider_sql)
gold_sql.append(_gold_spider_sql)
pred_actions.append(pred_spider_sql.actions)
gold_actions.append([a.action for a in example.tgt_actions])
partial_scores = evaluator.partial_scores
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
idx += 1
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] /\
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] /\
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] /\
(scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
scores["accuracy"] = scores["all"]["exact"]
out_dict = {
"questions": questions,
"pred_sql": pred_sql,
"gold_sql": gold_sql,
"pred_actions": pred_actions,
"gold_actions": gold_actions,
}
print("eval_err_num:{}".format(eval_err_num))
if save_failed_samples:
with open(os.path.join(out_path, "failed_samples.pkl"), "wb") as out:
pickle.dump(out_dict, out)
else:
scores = {'accuracy': 0.0}
for example, spider_sql in zip(examples, decode_results):
pruned_hyps = []
for hyp in spider_sql:
if is_sql_valid(hyp.code, example.schema):
pruned_hyps.append(hyp)
gold_spider_sql = example.sql
schema = example.schema
kmap = build_foreign_key_map(example.schema._table)
g_valid_col_units = build_valid_col_units(
gold_spider_sql['from']['table_units'], schema)
gold_spider_sql = rebuild_sql_val(gold_spider_sql)
gold_spider_sql = rebuild_sql_col(g_valid_col_units, gold_spider_sql, kmap)
flag = False
for hyp in pruned_hyps:
p_valid_col_units = build_valid_col_units(
hyp.code['from']['table_units'], schema)
hyp.code = rebuild_sql_val(hyp.code)
hyp.code = rebuild_sql_col(p_valid_col_units, hyp.code, kmap)
exact_score = evaluator.eval_exact_match(hyp.code, gold_spider_sql)
if exact_score:
flag = True
break
scores['accuracy'] += 1.0 if flag else 0.0
scores['accuracy'] /= len(examples)
return scores
@registry.register('evaluator', 'spider-action-evaluator')
def create_spider_action_prediction_evaluator(transition_system, eval_top_pred_only=True, for_inference=False):
def evaluate_action_predictions(examples, predictions, exp_dir_path):
"""
@param examples_batches: list(list(example))
@param predictions_batches: list(list(tensor))
@param exp_dir_path: str
@return:
"""
eva_output_path = None
if (exp_dir_path is not None) and (not for_inference):
eva_output_path = os.path.join(exp_dir_path, 'eval_output')
if not os.path.exists(eva_output_path):
os.makedirs(eva_output_path)
logger = logging.getLogger()
code_results = list()
for data_idx, pred in enumerate(predictions):
decoded_hyps = list()
for hyp_id, hyp in enumerate(pred):
try:
hyp.code = transition_system.ast_to_surface_code(
hyp.tree, examples[data_idx].schema
)
decoded_hyps.append(hyp)
except Exception:
logger.error('Exception in converting tree to code:')
logger.error(traceback.format_stack())
logger.error(traceback.format_exc())
logger.error('-' * 60)
logger.error('Example: %s\nIntent: %s\nTarget Code:\n%s\nHypothesis[%d]:\n%s' % (
data_idx, ' '.join(examples[data_idx].src_sent),
examples[data_idx].tgt_code, hyp_id, hyp.tree.to_string()
))
logger.error('-' * 60)
code_results.append(decoded_hyps)
if for_inference:
eval_result = []
for example, spider_sql in zip(examples, code_results):
pruned_hyps = []
for hyp in spider_sql:
if is_sql_valid(hyp.code, example.schema):
pruned_hyps.append(hyp)
pred_spider_sql = pruned_hyps[:1]
if not pred_spider_sql:
# dummy sql
surface_sql = 'SELECT *'
else:
pred_spider_sql = pred_spider_sql[0].code
surface_sql = SpiderAST(pred_spider_sql, example.schema._table).get_sql()
eval_result.append(surface_sql)
with open('predicted_sql.txt', 'w') as f:
for q in eval_result:
f.write(q + '\n')
else:
evaluator = SpiderSqlEvaluator(None, None)
eval_results = evaluator.evaluate_dataset(
examples, code_results, eva_output_path, fast_mode=eval_top_pred_only,
test_mode='dev', save_failed_samples=eva_output_path is not None)
print_scores(eval_results, 'match', p_func=logger.info)
eval_result = eval_results['accuracy']
return eval_result
return evaluate_action_predictions
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gold', dest='gold', type=str)
parser.add_argument('--pred', dest='pred', type=str)
parser.add_argument('--db', dest='db', type=str)
parser.add_argument('--table', dest='table', type=str)
parser.add_argument('--etype', dest='etype', type=str)
args = parser.parse_args()
gold = args.gold
pred = args.pred
db_dir = args.db
table = args.table
etype = args.etype
assert etype in ["all", "exec", "match"], "Unknown evaluation method"
kmaps = build_foreign_key_map_from_json(table)
evaluate(gold, pred, db_dir, etype, kmaps)
| BorealisAI/DT-Fixup | spider/semparser/modules/semantic_parser/evaluator/spider_evaluator.py | spider_evaluator.py | py | 47,165 | python | en | code | 15 | github-code | 36 |
36633682423 | # I think there can be something cool to come from this eventually, I'm bad at picking colors
import json
from json import JSONDecoder
class ColorSchemaDeserializationError(Exception):
""" Exception deserializing color schema from JSON """
pass
class ColorSchema:
def __init__(self, name: str = None, serialized_json: str = None):
if serialized_json:
try:
data = json.loads(serialized_json)
self._name = data["colorSchema"]["identifier"]
for each_rgb_tuple in data["colorSchema"]["rgb_values"]:
hex_code = "%02x%02x%02x" % (tuple(each_rgb_tuple))
self._color_table.append(hex_code)
except JSONDecoder:
raise ColorSchemaDeserializationError
except IndexError:
raise ColorSchemaDeserializationError
else:
self._name = name
self._color_table = []
def _get_color_for_index(self, index):
if index < len(self._color_table):
return self._color_table[index]
else:
corrected_index = index % len(self._color_table)
return self._color_table[corrected_index]
def hex_code(self, index: int) -> str:
return self._get_color_for_index(index)
def rgb(self, index: int) -> tuple:
rgb = self._get_color_for_index(index)
red = int(rgb[0:2], 16)
green = int(rgb[2:4], 16)
blue = int(rgb[4:6], 16)
return red, green, blue
def __str__(self) -> str:
if self._name:
return self._name
else:
return "DEFAULT"
def serialize_json(self) -> str:
output = {
"colorSchema": {"identifier": str(self),
"rgb_values": []}
}
for x in range(len(self._color_table)):
output["colorSchema"]["rgb_values"].append(self.rgb(x))
return json.dumps(output)
class DefaultColorSchema(ColorSchema):
def __init__(self):
super().__init__(name="DEFAULT")
colors = ["233142",
"facf5a",
"ff5959"
"4f9da6",
"022c43",
"ffd700",
"115173",
"053f5e",
"3a9679",
"fabc60",
"11144c",
"085f63",
"49beb7",
"facf5a",
"ff5959"]
self._color_table.extend(colors)
| picoevent/picoevent | PicoEvent/ColorSchema.py | ColorSchema.py | py | 2,536 | python | en | code | 0 | github-code | 36 |
72487623143 | import scipy.io
import numpy as np
#frames the signal @x into frames of size @frame_size separated by @hop samples
def get_frames( x, frame_size, hop ):
start_idx = 0
end_idx = frame_size
frames = []
limit = x.shape[1]
while end_idx <= limit:
frames.append( x[:, start_idx:end_idx] )
start_idx = start_idx + hop
end_idx = start_idx + frame_size
frames = np.float32(frames)
return np.swapaxes( frames, 1, 2 )
def long_prediction( model, x, length, timesteps ):
seed = x
for i in range( length ):
y = model.predict( np.expand_dims( seed, axis= 0 ) )
x = np.vstack( (x, y[0, -1, 0]) )
seed = x[-timesteps:]
return x
def prepare_data( data ):
sample_length = 1
timesteps = 99
hop = 1
data = ( data - data[0].mean() ) / data[0].std()
X_train = get_frames( data[:, 0:-1], timesteps, hop )
y_train = get_frames( data[:, 1:], timesteps, hop )
return ( X_train, y_train )
| bperezorozco/lstm_tea_talk | util.py | util.py | py | 1,007 | python | en | code | 0 | github-code | 36 |
36343463761 | from django.shortcuts import render,redirect
from django.core.paginator import Paginator
from siteSettings.models import SiteSettings
from django.contrib import messages
from users.models import User
from carts.models import *
from .models import *
def courses_page(request):
if request.user.is_authenticated:
if request.user.phone_authentication != True:
return redirect('account:code_page')
else:
user_info = User.objects.filter(username=request.user.username).first()
if user_info.block == True:
messages.error(request, 'اکانت شما مسدود شده است لطفا با پشتیبانی تماس بگیرید')
return redirect('contact:contactus_page')
else: return redirect('account:login_page')
posts = Courses.objects.filter(CourseStatus=True).all()
paginator = Paginator(posts, 9)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
settings = SiteSettings.objects.last()
title = settings.title + ' ' + '-' + ' ' + 'دوره ها'
context = {
'title': title,
'posts': page_obj,
}
return render(request,'courses/courses_page/coursesPage.html',context)
def ShowCourse_page(request,id):
if request.user.is_authenticated:
if request.user.phone_authentication != True:
return redirect('account:code_page')
else:
user_info = User.objects.filter(username=request.user.username).first()
if user_info.block == True:
messages.error(request, 'اکانت شما مسدود شده است لطفا با پشتیبانی تماس بگیرید')
return redirect('contact:contactus_page')
else: return redirect('account:login_page')
cart = Course.objects.filter(course_id=id,is_paid=True).first()
status = False
if cart is not None:
status = True
videos = Videos.objects.filter(course_id=id).all()
videos_count = Videos.objects.filter(course_id=id).count()
Introduction_Video = IntroductionVideo.objects.filter(course_id=id).last()
course = Courses.objects.filter(id=id).first()
settings = SiteSettings.objects.last()
title = settings.title + ' ' + '-' + ' ' + f'{course.CourseName}'
context = {
'title': title,
'status': status,
'videos': videos,
'videos_count': videos_count,
'keyWord': course.keyWord,
'Introduction_Video': Introduction_Video,
'course': course,
}
return render(request,'courses/showCourse_page/show_course.html',context) | benyaminsh/Logofon | courses/views.py | views.py | py | 2,657 | python | en | code | 0 | github-code | 36 |
41158523349 | #!/usr/bin/env python3
with open('numbers.txt', 'r') as f:
text = f.read().split("\n")[:-1]
enumerated_list = list(enumerate(text, 1))
dictionary = dict(enumerated_list)
for i in dictionary.keys():
print(f"{hex(hash(dictionary[i]))}: '{dictionary[i]}'")
lengths = []
for i in text:
lengths.append(len(i))
max = max(lengths)
padded = []
for i in text:
j = i.rjust(max, '0')
padded.append(j)
print(padded)
| The-Debarghya/Sem4-Assignments | Python/q23.py | q23.py | py | 424 | python | en | code | 1 | github-code | 36 |
31626287834 | import os
import bs4
from nltk.corpus.reader.api import CorpusReader
from nltk.corpus.reader.api import CategorizedCorpusReader
import nltk
import time
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
from sklearn.cluster import DBSCAN
import numpy as np
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.neighbors import NearestNeighbors
from functools import reduce
from sklearn.feature_extraction import text
from sklearn.decomposition import PCA
from numpy.linalg import svd
CAT_PATTERN = r'([a-z_\s]+)/.*'
DOC_PATTERN = r'(?!\.)[a-z_\s]+/[0-9]+\.html'
TAGS = []
title_TAGS = ['h1']
abstract_TAGS = ['blockquote']
class HTMLCorpusReader(CategorizedCorpusReader, CorpusReader):
"""
A corpus reader for raw HTML documents to enable preprocessing.
"""
def __init__(self, root, fileids=DOC_PATTERN, encoding='utf8',
tags=TAGS, **kwargs):
"""
Initialize the corpus reader. Categorization arguments
(``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
the ``CategorizedCorpusReader`` constructor. The remaining
arguments are passed to the ``CorpusReader`` constructor.
"""
# Add the default category pattern if not passed into the class.
if not any(key.startswith('cat_') for key in kwargs.keys()):
kwargs['cat_pattern'] = CAT_PATTERN
# Initialize the NLTK corpus reader objects
CategorizedCorpusReader.__init__(self, kwargs)
CorpusReader.__init__(self, root, fileids, encoding)
# Save the tags that we specifically want to extract.
self.tags = tags
def resolve(self, fileids, categories):
"""
Returns a list of fileids or categories depending on what is passed
to each internal corpus reader function. Implemented similarly to
the NLTK ``CategorizedPlaintextCorpusReader``.
"""
if fileids is not None and categories is not None:
raise ValueError("Specify fileids or categories, not both")
if categories is not None:
return self.fileids(categories)
return fileids
def docs(self, fileids=None, categories=None):
"""
Returns the complete text of an HTML document, closing the document
after we are done reading it and yielding it in a memory safe fashion.
"""
# Resolve the fileids and the categories
fileids = self.resolve(fileids, categories)
# Create a generator, loading one document into memory at a time.
for path in self.abspaths(fileids):
with open(path, 'r', encoding='UTF-8') as f:
yield f.read()
def sizes(self, fileids=None, categories=None):
"""
Returns a list of tuples, the fileid and size on disk of the file.
This function is used to detect oddly large files in the corpus.
"""
# Resolve the fileids and the categories
fileids = self.resolve(fileids, categories)
# Create a generator, getting every path and computing filesize
for path in self.abspaths(fileids):
yield os.path.getsize(path)
def describe(paragraphs, fileids, categories):
started = time.time()
counts = nltk.FreqDist()
tokens = nltk.FreqDist()
for para in paragraphs:
counts['paras'] += 1
for sent in nltk.sent_tokenize(para):
counts['sents'] += 1
for word in nltk.wordpunct_tokenize(sent):
counts['words'] += 1
tokens[word] += 1
n_fileids = len(fileids)
n_topics = len(categories)
return {
'files': n_fileids,
'topics': n_topics,
'paragraphs': counts['paras'],
'sentences': counts['sents'],
'words': counts['words'],
'vocabulary size': len(tokens),
'lexical diversity': float(counts['words']) / float(len(tokens)),
'paragraphs per document': float(counts['paras']) / float(n_fileids),
'sentences per paragraph': float(counts['sents']) / float(counts['paras']),
'secs': time.time() - started,
}
title_corpus = HTMLCorpusReader('', CAT_PATTERN, DOC_PATTERN, tags=title_TAGS)
title_fileids = title_corpus.fileids()
title_documents = title_corpus.docs(categories=title_corpus.categories())
title_htmls = list(title_documents)
abstract_corpus = HTMLCorpusReader('', CAT_PATTERN, DOC_PATTERN, abstract_TAGS)
abstract_fileids = abstract_corpus.fileids()
abstract_documents = abstract_corpus.docs(categories=abstract_corpus.categories())
abstract_htmls = list(abstract_documents)
title_categories = title_corpus.categories()
abstract_categories = abstract_corpus.categories()
def paras(htmls, TAGS): #paragraph로 나누기
for html in htmls:
soup = bs4.BeautifulSoup(html, 'lxml')
for element in soup.find_all(TAGS):
yield element.text
soup.decompose()
title_paragraphs = list(paras(title_htmls, title_TAGS))
temp_title_paragraphs = []
for para in title_paragraphs:
if "Title:" in para: # and len(para)>30
temp_title_paragraphs.append(para.strip('Title:\n'))
title_paragraphs = temp_title_paragraphs
print("title_paragraphs len: ", len(title_paragraphs))
print("descreibe title_paragraphs", describe(title_paragraphs, title_fileids, title_categories))
abstract_paragraphs = list(paras(abstract_htmls, abstract_TAGS))
print("abstract_paragraphs len: ", len(abstract_paragraphs))
print("descreibe abstract_paragraphs", describe(abstract_paragraphs, abstract_fileids, abstract_categories))
#temp_para = []
#print(abstract_paragraphs[0])
#for para in abstract_paragraphs:
# temp_para.append(re.sub(r"[^a-zA-Z\s.]", "", para).lower()) # 영문자 + 공백만 남기기)
#abstract_paragraphs = temp_para
#print("descreibe post abstract_paragraphs", describe(abstract_paragraphs, abstract_fileids, abstract_categories))
#print(abstract_paragraphs[0])
papers_list = []
for key, value in zip(title_paragraphs, abstract_paragraphs):
temp_dict = dict()
temp_dict['title'] = key
temp_dict['abstract'] = value
papers_list.append(temp_dict)
def sklearn_tfidf_vectorize(corpus):
my_stop_words = text.ENGLISH_STOP_WORDS.union(['abstract', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z'])
tfidf = TfidfVectorizer(stop_words=my_stop_words)
return tfidf.fit_transform(corpus)
df = pd.DataFrame(papers_list, columns={'title', 'abstract'})
tf_idf = sklearn_tfidf_vectorize(abstract_paragraphs).todense()
tf_idf_df = pd.DataFrame(tf_idf)
df.to_csv('df.csv')
tf_idf_df.to_csv('tf_idf_df.csv')
def clustering(df_, tf_idf_df_, tf_idf_, eps, min_samples):
neighbors = NearestNeighbors(n_neighbors=4)
neighbors_fit = neighbors.fit(tf_idf_df_)
distances, indices = neighbors_fit.kneighbors(tf_idf_df_)
distances = np.sort(distances, axis=0)
distances = distances[:,1]
plt.plot(distances)
plt.show()
if len(eps)*len(min_samples) == 1:
ncols = 2
else:
ncols = len(eps)
fig, axs = plt.subplots(figsize=(8 * 1, 8), nrows=1, ncols=ncols) #change col
ind = 0
result = []
for i in range(len(eps)):
print("e,s: ", eps[i], min_samples[i],'\n')
model = DBSCAN(eps=eps[i], min_samples=min_samples[i])
clusters = model.fit(tf_idf_df_)
n_cluster = len(set(clusters.labels_))
if n_cluster <= 2:
print("cluster num of", eps[i], min_samples[i], "is 2 or less\n")
continue
result.append(model.fit_predict(tf_idf_df_))
df_['cluster' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])] = result[ind]
score_samples = silhouette_samples(tf_idf_, df_['cluster' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])])
df_['silhouette_coeff' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])] = score_samples
silhouette_s = silhouette_score(tf_idf_, df_['cluster' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])])
temp = 0
for p in df_.groupby('cluster' + 'of' + str(eps[i]) + 'and' + str(min_samples[i]))['silhouette_coeff' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])].mean():
temp += p
average_score = temp/len(set(clusters.labels_))
y_lower = 10
axs[i].set_title(
'Number of Cluster : ' + str(n_cluster) + '\n' + 'Silhouette Score :' + str(round(silhouette_s, 3)))
axs[i].set_xlabel("The silhouette coefficient values")
axs[i].set_ylabel("Cluster label")
axs[i].set_xlim([-0.1, 1])
axs[i].set_ylim([0, len(tf_idf_df_) + (n_cluster + 1) * 10])
axs[i].set_yticks([]) # Clear the yaxis labels / ticks
axs[i].set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1])
# 클러스터링 갯수별로 fill_betweenx( )형태의 막대 그래프 표현.
for j in range(-1, n_cluster-1):
ith_cluster_sil_values = score_samples[result[ind] == j]
ith_cluster_sil_values.sort()
size_cluster_i = ith_cluster_sil_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(j) / n_cluster)
axs[i].fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_sil_values, \
facecolor=color, edgecolor=color, alpha=0.7)
axs[i].text(-0.05, y_lower + 0.5 * size_cluster_i, str(j))
y_lower = y_upper + 10
axs[i].axvline(x=silhouette_s, color="red", linestyle="--")
ind += 1
plt.show()
return result
# svd로 s값 구해서 분산정도 구하고 pca 파라미터 구하기
#u, s, vt = svd(tf_idf)
#s = np.diag(s)
#s_list = []
#for i in range(0, 1000):
# s_list.append(s[i][i]/np.trace(s))
#
#for i in range(1, 1000):
# print(1-reduce(lambda a, b: a + b, s_list[:i]))
pca = PCA(n_components=30) # 주성분을 몇개로 할지 결정
principalComponents = pca.fit_transform(tf_idf_df)
principalDf = pd.DataFrame(data=principalComponents)
pca_df = pd.DataFrame(data=principalComponents, index=df.index,
columns=[f"pca{num+1}" for num in range(df.shape[1])])
result = pd.DataFrame({'설명가능한 분산 비율(고윳값)':pca.explained_variance_,
'기여율':pca.explained_variance_ratio_},
index=np.array([f"pca{num+1}" for num in range(df.shape[1])]))
result['누적기여율'] = result['기여율'].cumsum()
print(result)
eps = [0.05, 0.03, 0.09, 0.05, 0.03, 0.09, 0.05, 0.03, 0.09]
min_samples = [2, 2, 2, 4, 4, 4, 7, 7, 7]
res = clustering(df, principalDf, principalComponents, eps, min_samples)
print(res[0])
print(type(res[0]))
"""
for i, r in enumerate(res):
if set(r) == None:
continue
else:
for cluster_num in set(r):
if cluster_num == -1 or cluster_num == 0:
continue
# -1,0은 노이즈 판별이 났거나 클러스터링이 안된 경우
print("cluster num : {}".format(cluster_num))
temp_df = dff[dff['cluster' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])] == cluster_num] # cluster num 별로 조회
for title in temp_df['title']:
print(title) # 제목으로 살펴보자
print()
print("-----------------\n")
"""
"""
print(df.head())
print("num of clusters: ", len(set(clusters.labels_)))
print("average_score: " + 'of' + str(e) + 'and' + str(s) + ": ", average_score)
print("silhouette score: " + 'of' + str(e) + 'and' + str(s) + ": ", silhouette_s)
print(df.groupby('cluster' + 'of' + str(e) + 'and' + str(s))['silhouette_coeff' + 'of' + str(e) + 'and' + str(s)].mean())
print("eps, min_samples: " + 'of' + str(e) + 'and' + str(s) + ": ", e, s)""" | kyle1213/data-mining | _old/abstract_clustering_pca.py | abstract_clustering_pca.py | py | 12,035 | python | en | code | 0 | github-code | 36 |
12781939308 | """
This file contains a function to get the coordinates from the GPS
"""
import serial
import time
import string
import pynmea2
#GPS gets coordinates
#UART communication protocols
def getCoordinates():
port="/dev/ttyAMA0"
ser=serial.Serial(port, baudrate=9600, timeout=0.5)
dataout = pynmea2.NMEAStreamReader()
newdata=ser.readline()
if newdata[0:6] == "$GPRMC":
newmsg=pynmea2.parse(newdata)
lat=newmsg.latitude
lng=newmsg.longitude
gps = [lat,lng]
return gps
| JanPat/i-dont-know-go | GPS.py | GPS.py | py | 481 | python | en | code | 0 | github-code | 36 |
72705650343 | numbers = (1,2,3)
# topples are not like arrays, they are not changeable, they are good for making sure a list cannot be changed
# example of unpacking
coordinates = (1, 2, 3)
x, y, z = coordinates
# same as
# x = coordinates[0]
# y = coordinates[1]
# z = coordinates[2]
print(y) | jjmonte20/pythonPlayground | topple.py | topple.py | py | 281 | python | en | code | 0 | github-code | 36 |
43104582394 | import csv
source = open("Nucleotide.txt", "r")
matrix = [[], [], [], [], [], []]
for x in range(6):
line = source.readline()
while True:
c = source.read(1)
if c == '>' or c=='':
break
if (c!='\n' and c!='\r') :
matrix[x].append(c)
distance = [[0 for x in range(6)] for x in range(6)]
x=0
for x in range(6):
for y in range(x+1,6):
mismatch=0
for z in range(1679):
if(matrix[x][z]!=matrix[y][z]):
mismatch+=1
distance[x][y]=mismatch/1679.0
distance[y][x]=mismatch/1679.0
distance[0]=['Cow']+distance[0]
distance[1]=['Sheep']+distance[1]
distance[2]=['Worm']+distance[2]
distance[3]=['Rat']+distance[3]
distance[4]=['Frog']+distance[4]
distance[5]=['Pig']+distance[5]
Poutput = open("Ndistance.txt", "w")
print>>Poutput,"*,Cow,Sheep,Worm,Rat,Frog,Pig"
writer = csv.writer(Poutput)
for i in range(len(distance)):
writer.writerow(distance[i])
Poutput.close()
| Ras-al-Ghul/UPGMA-Phylogenetic-Tree | Q1a.py | Q1a.py | py | 894 | python | en | code | 2 | github-code | 36 |
33639010391 | '''
main driver file. Responsible for handling user inputs and displaying current GameState
'''
import pygame as p
from PIL import Image
import ChessEngine
p.init()
WIDTH = HEIGHT = 512 # or you can keep it as 512
DIMENSION = 8 # Chess boards are 8x8
SQ_SIZE = HEIGHT // DIMENSION
IMAGES = dict.fromkeys(['wR', 'wN', 'wB', 'wQ', 'wK', 'wp', 'bR', 'bN', 'bB', 'bQ', 'bK', 'bp'])
SET_FPS = 15
def loadImages ():
for key in list (IMAGES.keys()): # Use a list instead of a view
IMAGES[key] = p.transform.scale(p.image.load('/Users/joshmachado/Desktop/Studies/coding/Chess-Engine/images/{}.png'
.format(key)), (SQ_SIZE, SQ_SIZE))
def main():
screen = p.display.set_mode((WIDTH, HEIGHT))
clock = p.time.Clock()
screen.fill(p.Color('White'))
loadImages()
gs = ChessEngine.GameState()
validMoves = gs.getValidMoves()
moveMade = False # flag for when a move is made
sqSelected = () # Empty tuple which will save the position of the last square selected by user
playerClicks = [] # List of two tuples tracking the clicks eg: [(6,4),(4,4)]
running = True
while running:
for e in p.event.get():
if e.type == p.QUIT:
running = False
elif e.type ==p.MOUSEBUTTONDOWN:
location = p.mouse.get_pos()
col = location[0]//SQ_SIZE
row = location[1]//SQ_SIZE
if sqSelected == (row, col): # If the same square is selected twice, it:
sqSelected = () # Deselects the square
playerClicks = [] # Resets playerClicks
else:
sqSelected = (row, col)
playerClicks.append(sqSelected)
if len(playerClicks)==2: # This indicates that 2 clicks have been made (done after second click)
move = ChessEngine.Move(playerClicks[0],playerClicks[1], gs.board)
if move in validMoves:
print(move.getChessNotation())
gs.makeMove(move)
moveMade = True
sqSelected = ()
playerClicks =[]
else:
playerClicks = [sqSelected]
# Key handles
elif e.type == p.KEYDOWN:
if e.key == p.K_z: # Undo the move made
gs.undoMove()
moveMade = True
if moveMade:
validMoves = gs.getValidMoves()
moveMade = False
drawGameState(screen, gs)
clock.tick(SET_FPS)
p.display.flip()
def drawGameState(screen, gs):
drawBoard(screen)
drawPieces(screen, gs.board)
'''
drawBoard is going to draw just the board without the pieces.
*Note to self*
Top left square of the chess board is always white irrespective of which colour you're playing with
'''
def drawBoard(screen):
colours = [p.Color('white'),p.Color('grey')]
for r in range(DIMENSION):
for c in range(DIMENSION):
colour = colours[((r+c)%2)]
p.draw.rect(screen, colour, p.Rect(c*SQ_SIZE, r*SQ_SIZE, SQ_SIZE, SQ_SIZE))
'''
drawPieces is going to draw the pieces on the board given the current GameState
'''
def drawPieces(screen, board):
for r in range(DIMENSION):
for c in range(DIMENSION):
piece = board[r][c]
if piece != '--':
screen.blit(IMAGES[piece], p.Rect(c*SQ_SIZE, r*SQ_SIZE, SQ_SIZE, SQ_SIZE))
if __name__ == '__main__':
main()
| aaliixii/Chess-Engine | ChessMain.py | ChessMain.py | py | 3,651 | python | en | code | 0 | github-code | 36 |
17140780409 | # Creates a basic BLE Advertiser
# Check the Advertiser.py class for more info
import sys
import time
from datetime import datetime, timedelta
from Advertiser import Advertiser
# Advertise
def main():
dt = datetime.now()
print("Started at: %s" % dt)
# Create advertiser
adv = Advertiser()
# Initiate BLE stack
if adv.setupBleStack('Pi Range Tester 2', interval=1000):
# Start advertising
adv.enableAdvertisements(en=True)
# Advertise for n seconds
time.sleep(600)
# Disable advertisements
adv.enableAdvertisements(en=False)
# Stop
print("Stopped at: %s" % datetime.now())
# Program start
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
finally:
print("CTRL-C Detected")
print("Stopped at: %s" % datetime.now())
sys.exit()
| horace1024/ble | run_adv.py | run_adv.py | py | 900 | python | en | code | 0 | github-code | 36 |
15708723931 | import numpy as np
import cv2 as cv
import pandas as pd
import random
from math import ceil
from sklearn.utils import shuffle
dt = np.dtype(np.float32)
def generator(batch_size=50):
while True:
for j in range(batch_size):
Xs = []
Ys = []
count = 0
while count < 100:
day_or_night = random.randint(0,1)
if day_or_night == 0:
folder_day = random.randint(1,13)
path_0 = '/../data/archive/Annotations/Annotations/dayTrain/dayClip{}/frameAnnotationsBOX.csv'.format(folder_day)
csv_file = pd.read_csv(filepath_or_buffer=path_0, sep=';')
else:
folder_night = random.randint(1,5)
path_0 = '/../data/archive/Annotations/Annotations/nightTrain/nightClip{}/frameAnnotationsBOX.csv'.format(folder_night)
csv_file = pd.read_csv(filepath_or_buffer=path_0, sep=';')
# choose picture
i = random.randint(0, len(csv_file.iloc[:,0].unique())-1) # choose random number of picture in folder
full_pic_name = csv_file.iloc[:,0].unique()[i] # with index above choose full name picture
pic_name = csv_file.iloc[:,0].unique()[i].split('/')[1] # with index above choose picture
if day_or_night == 0:
path_to_img = '/../data/archive/dayTrain/dayTrain/dayClip{}/frames/'.format(folder_day) + pic_name
else:
path_to_img = '/../data/archive/nightTrain/nightTrain/nightClip{}/frames/'.format(folder_night) + pic_name
img = cv.imread(path_to_img)
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
# find coordinates
number_of_same_pic = len(csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[:,0]) # how many pic with same name
img = cv.copyMakeBorder(img, 200, 200, 200, 200, cv.BORDER_REPLICATE)
# blobbing
params = cv.SimpleBlobDetector_Params()
params.minThreshold = 1
params.maxThreshold = 255
params.filterByArea = True
params.minArea = 100
params.filterByCircularity = False
params.filterByConvexity = False
params.filterByInertia = False
detector = cv.SimpleBlobDetector_create(params)
keypoints = detector.detect(img)
kps = np.array([key for key in keypoints])
for i in range(number_of_same_pic):
if count < 100:
# coors of box
x1 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,2]+200
y1 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,3]+200
x2 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,4]+200
y2 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,5]+200
# condition for keypoins which are not boxes - TAKES MUCH TIME
for key in keypoints:
keypoints = [key for key in keypoints if not ((x1-50 < key.pt[0] < x2+50) and (y1-50 < key.pt[1] < y2+50))]
random_crop_x1 = random.randint(0, 200-(x2-x1))
random_crop_x2 = 200 - random_crop_x1
random_crop_y1 = random.randint(0, 200-(y2-y1))
random_crop_y2 = 200 - random_crop_y1
cropped_img = img[y1-random_crop_y1:y2+random_crop_y2, x1-random_crop_x1:x2+random_crop_x2]
new_x1 = random_crop_x1
new_y1 = random_crop_y1
new_x2 = new_x1 + (x2-x1)
new_y2 = new_y1 + (y2-y1)
w = cropped_img.shape[1]
h = cropped_img.shape[0]
Rx = (64 / w)
Ry = (64 / h)
x1 = ceil(new_x1*Rx)
y1 = ceil(new_y1*Ry)
x2 = ceil(new_x2*Rx)
y2 = ceil(new_y2*Ry)
cropped_img = cv.resize(cropped_img, (64, 64))
cropped_img = cropped_img.reshape(1, 64, 64, 3)
box = np.array([1, x1, y1, x2, y2], dtype=dt)
Xs.append(np.array(cropped_img, dtype=dt) / 255.), Ys.append(box)
count += 1
keypoints = keypoints[-5:-1]
for k in range(len(keypoints)):
if count < 100:
k_x1 = int(round(keypoints[k].pt[0]-100))
k_y1 = int(round(keypoints[k].pt[1]-100))
k_x2 = int(round(keypoints[k].pt[0]+100))
k_y2 = int(round(keypoints[k].pt[1]+100))
cropped_img = img[k_y1:k_y2, k_x1:k_x2]
cropped_img = cv.resize(cropped_img, (64, 64))
cropped_img = cropped_img.reshape(1, 64, 64, 3)
box = np.array([0, 0, 0, 0, 0], dtype=dt)
Xs.append(np.array(cropped_img, dtype=dt) / 255.), Ys.append(box)
count += 1
Xs, Ys = shuffle(Xs, Ys)
yield Xs, Ys
| JackDrinkwater/traffic_lights_detector | src/data_gen.py | data_gen.py | py | 5,719 | python | en | code | 0 | github-code | 36 |
33571523766 | import random
def Rectangle(a,b):
x1 = a*b
return x1
def sur(a,b):
x2 = 2*(a+b)
return x2
a = input("長辺:")
a = float(a)
b = input("短辺:")
b = float(b)
y=Rectangle(a,b)
z=sur(a,b)
print("面積:",y)
print("周囲:",z) | Yuki1015Mitsunari/YukiMitsunari1015 | Py5-4.py | Py5-4.py | py | 248 | python | en | code | 0 | github-code | 36 |
38665630567 | import matplotlib.pyplot as plt
import numpy as np
from sympy import *
x = Symbol('x')
#! INPUT
print("""
Bắt đầu chương trình
Đây là phương pháp lặp đơn
INPUT
f(x) với f(x)=0
khoảng phân li nghiệm (a,b)
Và thêm SaiSoE đã cho
INPUT
""")
def Phi(x):
# ** là mũ
# return 1/(x-1.5)
return -1/3*x**3
a = -2
b = 2
SaiSoE = 10
print(f"""
INPUT
{Phi(x)} , \tvới f(x)=0
khoảng phân li nghiệm (a,b) = ({a},{b})
Và thêm SaiSoE đã cho: SaiSoE = {SaiSoE}
INPUT
""")
#! INPUT
#! KIỂM TRA ĐIỀU KIỆN PP
print(f"""
#! Điều kiện của phương pháp lặp đơn là:
1. Hàm Phi(x) co <=> 0<q<1 (q=MAX_đạo hàm Phi(x))
""")
dao_ham_Phi= diff(Phi(x),x)
print(dao_ham_Phi)
x_ve_hinh=np.linspace(a,b,1000)
y_ve_hinh=[dao_ham_Phi.subs(x,i) for i in x_ve_hinh]
q=max(y_ve_hinh)
print(f"q = {q}\n Xét 0<q<1 => ",end=" ")
if(0<q and q<1):
print(f"Đúng")
else:
print(f"Sai")
plt.plot(x_ve_hinh,y_ve_hinh)
plt.show()
#! KIỂM TRA ĐIỀU KIỆN PP
print("Kết thúc chương trình")
| buihuyhau/HocLaiGTS | 2LapDon/a.py | a.py | py | 1,044 | python | vi | code | 0 | github-code | 36 |
2808355432 | """
INSTANCE METHODS in CLASSES:
In instance methods of a class, the first argument is a reference to the instance of the object itself.
By convention, this argument is named self. It's automatically passed by Python when you call
an instance method on an object.
"""
import sys
class MyClass:
def instance_method(self) -> None:
print("This is an instance method using: ", self)
# When you call this method:
obj = MyClass()
obj.instance_method() # You don't pass 'self', Python does it.
# Here, obj.instance_method() is equivalent to MyClass.instance_method(obj).
sys.exit() | sirin-koca/Python-Projects | methods-functions/intance_methods.py | intance_methods.py | py | 596 | python | en | code | 0 | github-code | 36 |
3195242150 | import re
class Rule:
def __init__(self, regex, fields, pattern):
self.regex = re.compile(regex)
self.fields = fields
self.pattern = pattern
def apply(self, cls_factory, text):
return self._setattrs(cls_factory(), self._get_match(text))
def apply_many(self, cls_factory, text):
return [
self._setattrs(cls_factory(), match)
for match
in self.regex.finditer(text)
]
def _get_match(self, text):
match = self.regex.search(text)
if not match:
raise ValueError(
f"Text '{text}' does not match pattern '{self._pattern}'")
return match
def _setattrs(self, instance, match):
items = []
for field, text in zip(self.fields, match.groups()):
field.set_value(instance, text)
items.append(field.get_value(instance))
instance._items = tuple(items)
return instance
| heijp06/regel | regel/rule.py | rule.py | py | 968 | python | en | code | 0 | github-code | 36 |
73326093223 | #!/usr/bin/python
# Import needed to use the GPIO
import RPi.GPIO as GPIO
from time import sleep
import ConfigParser
# Tells the program which numbering scheme you're using for the GPIO pins,
# BCM is the "Broadcom SOC channel", the name of the pins, as opposed
# to the physical numbering of the pins (BOARD).
GPIO.setmode(GPIO.BCM)
config = ConfigParser.SafeConfigParser()
config.read('Config/GPIO.cfg')
pinout_section = 'Pinout'
# Name the GPIO pins
pir_pin = int(config.get(pinout_section, 'pir'))
system_led_pin = int(config.get(pinout_section, 'system_led'))
motion_led_pin = int(config.get(pinout_section, 'motion_led'))
good_button_pin = int(config.get(pinout_section, 'good_button'))
bad_button_pin = int(config.get(pinout_section, 'bad_button'))
# Set the PIR pin as an input and ledPins as outputs
GPIO.setup(pir_pin, GPIO.IN)
GPIO.setup(motion_led_pin, GPIO.OUT)
GPIO.setup(good_button_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(bad_button_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(system_led_pin, GPIO.OUT)
GPIO.output(motion_led_pin, False) # Turn off the green LED
GPIO.output(system_led_pin, True)
# Function to cleanup the GPIO pins
def cleanup():
GPIO.cleanup()
def check_for_motion():
# Checks if there's an input from the PIR
if GPIO.input(pir_pin):
GPIO.output(motion_led_pin, True) # Turn on Green LED
return True
# If no input detected turns the LED off.
else:
GPIO.output(motion_led_pin, False) # Turn off green LED
return False
| Beamsy/PAIRR | PAIRR/GPIO_utils.py | GPIO_utils.py | py | 1,552 | python | en | code | 2 | github-code | 36 |
41165592833 | # Imported libraries
import random as r
import time as t
import colorama as col
# Functions
# little helper function that converts an array of numbers into a string
def arr_to_string(arr_nums):
arr_str = ""
for n in arr_nums:
arr_str += str(n) + " "
return arr_str
# color(number, number, array, boolean)
# slices array from index1(inclusive) - index2(inclusive) from whole_arr.
# If switch = 1, pair is red. If switch = 0, pair is blue
def color(index1, index2, whole_arr, switch):
num1 = str(whole_arr[index1])
num2 = str(whole_arr[index2])
if switch:
pair = col.Fore.LIGHTRED_EX + num1 + " " + num2
else:
pair = col.Fore.LIGHTBLUE_EX + num1 + " " + num2
if index1 == 0:
rest_of_arr = whole_arr[2:]
str_rest = ""
for n in rest_of_arr:
str_rest += str(n) + " "
print(pair + " " + col.Fore.RESET + str_rest)
else:
arr_slice_1 = whole_arr[0:index1]
arr_slice_2 = whole_arr[index2+1:]
str_slice_1 = ""
str_slice_2 = ""
for n in arr_slice_1:
str_slice_1 += str(n) + " "
for n in arr_slice_2:
str_slice_2 += str(n) + " "
print(str_slice_1 + pair + " " + col.Fore.RESET + str_slice_2)
def bubble_sort(arr_sort, speed):
input("\nI will now attempt to sort your array. Press \"Enter\" when ready...")
print()
arr0 = arr_sort.copy()
i = 0
print(arr_to_string(arr0))
t.sleep(speed)
# i = how many swaps there aren't. So if i < len(arr0) - 1, it hasn't been fully sorted
while i < len(arr0) - 1:
i = 0
# loops through array in pairs, which is why condition is (n < arr0.length - 1)
for n in range(0, len(arr0) - 1):
num1 = arr0[n]
num2 = arr0[n + 1]
# if 1st value in the pair is greater than 2nd value, swap
if num1 - num2 > 0:
color(n, n + 1, arr0, 1)
t.sleep(speed)
arr0[n] = num2
arr0[n + 1] = num1
color(n, n+1, arr0, 1)
t.sleep(speed)
# if 1st value in pair is not greater than 2nd value, add 1 to i
else:
i += 1
color(n, n+1, arr0, 0)
t.sleep(speed)
print('\nFinished! Your newly sorted array is: \n%s' % col.Fore.MAGENTA + str(arr0))
return arr0
# user generates a random array
# for arr_sort argument in bubble_sort function
def user_interface():
input('\nWelcome to Array Sort 1.0! Press "Enter" to continue...')
count = input('\nWe\'re going to generate a random array of numbers.'
'\nHow many numbers should be in the array? Please choose between 5 and 25: ')
num_max = input('\nWhat is the maximum number each number in the array should be? '
'\nIn other words, no number will be greater than this number: ')
num_min = input('\nWhat is the minimum number each number in the array should be?'
'\nIn other words, no number will be less than this number: ')
random_array = [r.randint(int(num_min), int(num_max)) for a in range(0, int(count))]
print('\nYour array is: \n%s' % col.Fore.MAGENTA + str(random_array) + col.Fore.RESET)
return random_array
# for speed argument in bubble_sort function
def sorting_speed():
user_speed = input("\nPlease choose the speed at which you would like to see your array sorted: "
"\n1. Slow"
"\n2. Normal"
"\n3. Instant")
if user_speed == "1" or user_speed.lower() == "slow":
return .5
elif user_speed == "2" or user_speed.lower() == "normal":
return .3
elif user_speed == "3" or user_speed.lower() == "instant":
return 0
else:
print("\nI don't understand")
return sorting_speed()
# Function calls
user_array = user_interface()
speed_choice = sorting_speed()
bubble_sort(user_array, speed_choice)
# End
print(col.Fore.RESET + "\nThank you for using Array Sort 1.0!")
| habit456/Python3_Projects_msc | bubble_sort.py | bubble_sort.py | py | 4,224 | python | en | code | 1 | github-code | 36 |
6797316241 | from django.utils.translation import ugettext_lazy as _
from utils.faker_factory import faker
from ..mails import BaseMailView
class ProjectsMemberRoleChangedMailView(BaseMailView):
"""
"""
template_name = 'mails/projects/member_role_changed.html'
mandatory_mail_args = [
'name',
'user_name',
'roles',
'public_url',
]
subject = _('Your role has changed: You are now acting as %(roles)s in %(name)s')
section = 'projects'
def get_mock_data(self, optional=True):
mock_data = {
'name': '[Project Name]',
'roles': ['ExO Head Coach', 'Observer'],
'user_name': '[Name]',
'disable_notification_url': None,
'public_url': '/{}'.format(faker.uri_path()),
}
return mock_data
| tomasgarzon/exo-services | service-exo-mail/mail/mailviews/projects_member_role_changed.py | projects_member_role_changed.py | py | 818 | python | en | code | 0 | github-code | 36 |
40954037620 | import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews
def extract_features(word_list):
return dict([(word, True) for word in word_list])
if __name__=='__main__':
# Load positive and negative reviews
positive_fileids = movie_reviews.fileids('pos')
negative_fileids = movie_reviews.fileids('neg')
features_positive = [(extract_features(movie_reviews.words(fileids=[f])),
'Positive') for f in positive_fileids]
features_negative = [(extract_features(movie_reviews.words(fileids=[f])),
'Negative') for f in negative_fileids]
# Split the data into train and test (80/20)
threshold_factor = 0.8
# threshold_factor = 0.7
# threshold_factor = 0.5
threshold_positive = int(threshold_factor * len(features_positive))
threshold_negative = int(threshold_factor * len(features_negative))
features_train = features_positive[:threshold_positive] + features_negative[:threshold_negative]
features_test = features_positive[threshold_positive:] + features_negative[threshold_negative:]
print("\nNumber of training datapoints:", len(features_train))
print("Number of test datapoints:", len(features_test))
# Train a Naive Bayes classifier
classifier = NaiveBayesClassifier.train(features_train)
print("\nAccuracy of the classifier:", nltk.classify.util.accuracy(classifier, features_test))
print("\nTop 10 most informative words:")
for item in classifier.most_informative_features()[:10]:
print(item[0])
# Sample input reviews
# input_reviews = [
# "It is an amazing movie",
# "This is a dull movie. I would never recommend it to anyone.",
# "The cinematography is pretty great in this movie",
# "The direction was terrible and the story was all over the place"
# ]
input_reviews = [
"This was easy to put together. It is sturdy ad a perfect fit for my daughter's room. We have one drawer that sticks a little COMMA but it works.",
"I loved the look of this dresser in the store and decided to take the plunge. It was a major project to assemble (6-ish hours for one relatively handy person without power tools) COMMA but the finished product looks great and stores a ton of clothes! The directions could definitely be a little clearer on assembling the middle divider pieces COMMA which looks wrong even when done correctly and the dimples in the wood for orientation look like holes in the instructions. I couldn't get two of the four screws that connect the front face to the dresser top to go in (screws too short or holes not quite aligned) COMMA but thankfully there were many other points of attachment and it's not at all obvious that they're missing. And mine came with metal (not plastic) cam locks COMMA which is a good thing. Great buy!",
"We were very disappointed to realize that the hemnes set in white is made of mostly particle board. We were under the impression that the all the hemnes line was made of solid wood. After further investigation it seems as though all the dressers are made of wood except the white ones. Not sure why this is and is very misleading",
"I not only purchased the dresser but I bought the matching chest I'd drawers. The pieces took a while to put together but they are worth the time. Great product."
]
print("\nPredictions:")
for review in input_reviews:
print("\nReview:", review)
probdist = classifier.prob_classify(extract_features(review.split()))
pred_sentiment = probdist.max()
print("Predicted sentiment:", pred_sentiment)
print("Probability:", round(probdist.prob(pred_sentiment), 2))
| pkankariya/CS5560_Knowledge_Discovery_Management | ICP_10/Sentiment_Analysis.py | Sentiment_Analysis.py | py | 3,729 | python | en | code | 0 | github-code | 36 |
6301255873 | from flask import current_app, g, Flask, flash, jsonify, redirect, render_template, request, session, Response
import logging
import sqlite3
import json
import requests
from db import DB, KeyNotFound, BadRequest
import datetime
# Configure application
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Needed to flash messages
app.secret_key = b'mEw6%7APK'
# path to database
DATABASE = 'splatDB.sqlite3'
# default path
@app.route('/')
def home():
return render_template("home.html")
# hello world
@app.route('/hello')
def hello_world():
data = {"message": "Hello, World!"}
return jsonify(data)
# -----------------
# Create/Read Endpoints
# These JSON/REST api endpoints are used to add new records
# and return lookups based on Ids
# -------------------
# creates required table for application.
# note having a web endpoint for this is not a standard approach, but used for quick testing
@app.route('/create', methods=["GET"])
def create_tables():
"""
Drops existing tables and creates new tables
"""
db = DB(get_db_conn())
return db.create_db('schema/create.sql')
@app.route('/artist', methods=["POST"])
def add_artist():
"""
Loads a new appearance of song
(and possibly a new song) into the database.
"""
post_body = request.json
if not post_body:
logging.error("No post body")
return Response(status=400)
# get DB class with new connection
db = DB(get_db_conn())
try:
db.add_artist(post_body)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=e.error_code)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=201)
@app.route('/album', methods=["POST"])
def add_album():
"""
Loads a new appearance of song
(and possibly a new song) into the database.
"""
post_body = request.json
if not post_body:
logging.error("No post body")
return Response(status=400)
# get DB class with new connection
db = DB(get_db_conn())
try:
db.add_album(post_body)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=e.error_code)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=201)
@app.route('/songs', methods=["POST"])
def add_song():
"""
Loads a new appearance of song
(and possibly a new song) into the database.
"""
post_body = request.json
if not post_body:
logging.error("No post body")
return Response(status=400)
# get DB class with new connection
db = DB(get_db_conn())
try:
db.add_song_ms2(post_body)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=e.error_code)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=201)
@app.route('/playlists', methods=["POST"])
def add_playlist():
"""
Adds a new playlist, with a list of ordered songs
"""
post_body = request.json
if not post_body:
logging.error("No post body")
return Response(status=400)
# get DB class with new connection
db = DB(get_db_conn())
try:
db.add_playlist(post_body)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=e.error_code)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=201)
@app.route("/playcount", methods=["POST"])
def add_play():
"""
Aad a play count detail
Must have"play_count","song_id","date"
May have a source as either a "playlist_id" OR "album_id" OR NEITHER (indicates a direct play)
NOTE: Can have multiple calls for the same song, date, and source (playlist,album,or none)
"""
post_body = request.json
if not post_body:
logging.error("No post body")
return Response(status=400)
# get DB class with new connection
db = DB(get_db_conn())
try:
db.add_play(post_body)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=e.error_code)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=201)
@app.route('/songs/<song_id>', methods=["GET"])
def find_song(song_id):
"""
Returns a song's info
(song_id, name, length, artist name, album name) based on song_id
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_song(song_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/songs/by_album/<album_id>', methods=["GET"])
def find_songs_by_album(album_id):
"""
Returns all an album's songs
(song_id, name, length, artist name, album name) based on album_id
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_songs_by_album(album_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/songs/by_artist/<artist_id>', methods=["GET"])
def find_songs_by_artist(artist_id):
"""
Returns all an artists' songs
(song_id, name, length, artist name, album name) based on artist_id
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_songs_by_artist(artist_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/albums/<album_id>', methods=["GET"])
def find_album(album_id):
"""
Returns a album's info
(album_id, album_name, release_year).
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_album(album_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/albums/by_artist/<artist_id>', methods=["GET"])
def find_album_by_artist(artist_id):
"""
Returns a album's info
(album_id, album_name, release_year).
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_album_by_artist(artist_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/artists/<artist_id>', methods=["GET"])
def find_artist(artist_id):
"""
Returns a artist's info
(artist_id, artist_name, country).
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_artist(artist_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
# -----------------
# Analytics Endpoints
# These JSON/REST api endpoints are used to run analysis
# over the dataset and calculate an aggregated answer
# -------------------
@app.route('/analytics/artists/avg_song_length/<artist_id>', methods=["GET"])
def avg_song_length(artist_id):
"""
Returns the average length of an artist's songs (artist_id, avg_length)
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.avg_song_length(artist_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/analytics/artists/cnt_singles/<artist_id>', methods=["GET"])
def cnt_singles(artist_id):
"""
Returns the number of singles an artist has (artist_id, cnt_single)
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.cnt_singles(artist_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/analytics/artists/top_length/<num_artists>', methods=["GET"])
def top_length(num_artists):
"""
Returns top (n=num_artists) artists based on total length of songs
(artist_id, total_length).
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.top_length(num_artists)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/analytics/solo_albums', methods=["GET"])
def solo_albums():
"""
Returns an array/list of album_ids where the album
and all songs are by the same single artist_id
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.solo_albums()
return jsonify(res)
except BadRequest as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
# Convert a YYYY-MM-DD string to a datetime.date. Raises BadRequest if not in the right format
# or if not a valid date.
# From https://stackoverflow.com/questions/53460391/passing-a-date-as-a-url-parameter-to-a-flask-route
# Better function exists in 3.7+ adding this to support 3.6+
def to_date(date_string):
try:
return datetime.datetime.strptime(date_string, "%Y-%m-%d").date()
except ValueError:
raise BadRequest('{} is not valid date in the format YYYY-MM-DD'.format(date_string))
@app.route('/analytics/playcount/top_song/<date_string>', methods=["GET"])
def top_song(date_string):
"""
Get the top song played on a given date
The test data does not account for ties/ have ties. if you want to break them use song_id ascending.
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
check_date = to_date(date_string)
res = db.top_song(check_date)
return jsonify(res)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=400)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/analytics/playcount/top_source/<song_id>/<date_string>', methods=["GET"])
def top_source(song_id, date_string):
"""
For a given song and date, return the source that contributed to the most plays
This could be a given playlist_id, a given album_id, or None (a direct play)
The test data does not account for ties.
If you want to want to account for ties, give all sources that have the same (top) play_count
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
check_date = to_date(date_string)
res = db.top_source(song_id, check_date)
return jsonify(res)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=400)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/analytics/playcount/top_country/<date_string>', methods=["GET"])
def top_country(date_string):
"""
This is an extra credit MS that for a given date , it gives the country
with the most play
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
check_date = to_date(date_string)
res = db.top_country(check_date)
return jsonify(res)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=400)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
# -----------------
# Web APIs
# These simply wrap requests from the website/browser and
# invoke the underlying REST / JSON API.
# -------------------
# paste in a query
@app.route('/web/query', methods=["GET", "POST"])
def query():
"""
runs pasted in query
"""
data = None
if request.method == "POST":
qry = request.form.get("query")
# Ensure query was submitted
# get DB class with new connection
db = DB(get_db_conn())
# note DO NOT EVER DO THIS NORMALLY (run SQL from a client/web directly)
# https://xkcd.com/327/
try:
res = db.run_query(str(qry))
except sqlite3.Error as e:
print(e)
return render_template("error.html", errmsg=str(e), errcode=400)
data = res
return render_template("query.html", data=data)
# paste in a query
@app.route('/web/post_data', methods=["GET", "POST"])
def post_song_web():
"""
runs simple post song json
"""
data = None
if request.method == "POST":
parameter = request.form.get("path")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("post_data.html", data=data)
get_url = "http://127.0.0.1:5000/%s" % parameter
print("Making request to %s" % get_url)
# grab the response
j = json.loads(request.form.get("json_data").strip())
print("Json from form: %s" % j)
r = requests.post(get_url, json=j)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
flash("Ran post command")
return render_template("post_data.html", data=None)
return render_template("post_data.html", data=None)
@app.route('/web/create', methods=["GET"])
def create_web():
get_url = "http://127.0.0.1:5000/create"
print("Making request to %s" % get_url)
# grab the response
r = requests.get(get_url)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
flash("Ran create command")
data = r.json()
return render_template("home.html", data=data)
@app.route('/web/songs', methods=["GET", "POST"])
def song_landing():
data = None
if request.method == "POST":
path = request.form.get("path")
parameter = request.form.get("parameter")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("songs.html", data=data)
get_url = "http://127.0.0.1:5000/songs/" + path + parameter
print("Making request to %s" % get_url)
# grab the response
r = requests.get(get_url)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
data = r.json()
return render_template("songs.html", data=data)
@app.route('/web/artists', methods=["GET", "POST"])
def artists_landing():
data = None
if request.method == "POST":
path = request.form.get("path")
# Ensure path was submitted
parameter = request.form.get("parameter")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("artists.html", data=data)
get_url = "http://127.0.0.1:5000/artists/" + path + parameter
# grab the response
r = requests.get(get_url)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
data = r.json()
return render_template("artists.html", data=data)
@app.route('/web/albums', methods=["GET", "POST"])
def albums_landing():
data = None
if request.method == "POST":
path = request.form.get("path")
# Ensure path was submitted
parameter = request.form.get("parameter")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("albums.html", data=data)
get_url = "http://127.0.0.1:5000/albums/" + path + parameter
# grab the response
r = requests.get(get_url)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
data = r.json()
return render_template("albums.html", data=data)
@app.route('/web/analytics', methods=["GET", "POST"])
def analytics_landing():
data = None
if request.method == "POST":
path = request.form.get("path")
# Ensure path was submitted
if path == "solo_albums":
get_url = "http://127.0.0.1:5000/analytics/" + path
elif path == "playcount/top_song/" or path == "playcount/top_country/":
date = request.form.get("date")
if date is None or date.strip() == "":
flash("Must set key")
return render_template("analytics.html", data=data)
get_url = "http://127.0.0.1:5000/analytics/" + path + date
elif path == "playcount/top_source/":
parameter = request.form.get("parameter")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("analytics.html", data=data)
parameter2 = request.form.get("parameter2")
if parameter2 is None or parameter2.strip() == "":
flash("Must set key")
return render_template("analytics.html", data=data)
get_url = "http://127.0.0.1:5000/analytics/" + path + parameter + '/' + parameter2
else:
parameter = request.form.get("parameter")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("analytics.html", data=data)
get_url = "http://127.0.0.1:5000/analytics/" + path + parameter
# grab the response
print(get_url)
r = requests.get(get_url)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
data = r.json()
return render_template("analytics.html", data=data)
# -----------------
# Utilities / Errors
# -------------------
# gets connection to database
def get_db_conn():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
# Error Class for managing Errors
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# called on close of response; closes db connection
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
# ########### post MS1 ############## #
| sarikam1/DatabaseModelling | Database Modelling/server/app.py | app.py | py | 20,869 | python | en | code | 0 | github-code | 36 |
20543918172 | from copy import deepcopy
from pandas import DataFrame as DF
from scipy.spatial.transform import Slerp
from scipy.spatial.transform import Rotation as R
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
id = 0
USE_SLERP = False
class Sample:
def __init__(
self,
source: str,
continuity_index: int,
activity: str,
gyro: DF,
accelerometer: DF,
orient: DF,
gravity: DF,
):
global id
self.id = id
id += 1
self.source = source
self.continuity_index = continuity_index
self.activity = activity
self.gyro = gyro
self.accelerometer = accelerometer
self.orient = orient
self.gravity = gravity
def __repr__(self):
classname = self.__class__.__name__
return f"{classname} {self.id}: {self.activity}"
def plot_sensor_data(self, sensor_data, title, domain = (0, float("inf"))):
"""Helper function to plot a specific sensor's data."""
df = sensor_data
filtered = df[df["time"] < domain[1] * 1e10]
filtered = filtered[filtered["time"] > domain[0] * 1e10]
for col in df.columns:
# if col != "time":
if col not in ["time", "roll", "pitch", "yaw"]:
plt.plot(filtered["time"], filtered[col], label=col)
plt.title(title)
plt.legend()
def getLength(self):
return self.gyro["time"].iloc[-1] * 1e-9 - self.gyro["time"].iloc[0]* 1e-9
def graph(self, domain=(0, float("inf"))):
"""Plots the data for all sensors in separate subplots."""
fig, axs = plt.subplots(4, 1, figsize=(10, 20))
fig.suptitle(f"Data Source: {self.source}", fontsize=16, y=1.05)
plt.subplot(4, 1, 1)
self.plot_sensor_data(self.gyro, "Gyroscope Data", domain)
plt.subplot(4, 1, 2)
self.plot_sensor_data(self.accelerometer, "Accelerometer Data", domain)
plt.subplot(4, 1, 3)
self.plot_sensor_data(self.orient, "Orientation Data", domain)
plt.subplot(4, 1, 4)
self.plot_sensor_data(self.gravity, "Gravity Data", domain)
plt.tight_layout()
plt.show()
def _plot_sensor_data_edges(self, subplot, sensor_data, title, margin_sec: int):
"""Helper function to plot a specific sensor's data."""
df = sensor_data
offset_nanosec = margin_sec * 1e9
start_split = df[df["time"] > offset_nanosec].index[0]
df_start = df.loc[: start_split - 1]
maxtime = df["time"].iloc[-1]
end_split = df[df["time"] > maxtime - offset_nanosec].index[0]
df_end = df.loc[end_split:]
plt.subplot(subplot)
time_data = df_start["time"]
for col in df_start.columns:
if col != "time":
plt.plot(time_data * 1e-9, df_start[col], label=col)
plt.title(title)
plt.legend()
plt.subplot(subplot + 1)
plt.title(title)
# plt.legend() results in: No artists with labels found to put in legend. Note that artists whose label start with an underscore are ignored when legend() is called with no argument.
time_data = df_end["time"]
for col in df_end.columns:
if col != "time":
plt.plot((time_data - maxtime) * 1e-9, df_end[col], label=col)
def graph_edges(self, margin):
"""Plots the data for all sensors in separate subplots."""
fig, axs = plt.subplots(4, 2, figsize=(10, 20))
fig.suptitle(f"Data Source: {self.source}", fontsize=16, y=1.05)
self._plot_sensor_data_edges(421, self.gyro, "Gyroscope Data", margin)
self._plot_sensor_data_edges(
423, self.accelerometer, "Accelerometer Data", margin
)
self._plot_sensor_data_edges(425, self.orient, "Orientation Data", margin)
self._plot_sensor_data_edges(427, self.gravity, "Gravity Data", margin)
plt.tight_layout()
plt.show(block=True)
def showAccPlot(self, window=128):
acc_128 = self.accelerometer[0:window]
print(acc_128.shape)
x = acc_128['x']
y = acc_128['y']
z = acc_128['z']
index = np.arange(0, window, 1)
fig = plt.gcf()
fig.set_size_inches(15, 8)
plt.title(f"Accelerometer - {self.activity} - {window} samples")
plt.plot(index, x, label="x")
plt.plot(index, y, label="y")
plt.plot(index, z, label="z")
plt.legend()
plt.show()
def save_trim(self, start, end):
with open(os.path.join(self.source, "trim.txt"), "w") as f:
f.write(f"{start}|{end}")
def resample(self, frequency_hz: float):
interval_ms = 1 / frequency_hz
self.gyro = resample_sensor(self.gyro, interval_ms)
self.accelerometer = resample_sensor(self.accelerometer, interval_ms)
if USE_SLERP:
self.orient = resample_sensor_quaternion(self.orient, interval_ms)
else:
self.orient = resample_sensor(self.orient, interval_ms)
self.gravity = resample_sensor(self.gravity, interval_ms)
def synchronize(self):
"""Effect:
All sensors are split into the same time windows
e.g. if sample had gyro readings in seconds [1, 5]
but accelerometer [3, 6], synchronized would have readings
for both in time window [3, 5].
Necessary for models taking multiple sensors as inputs
"""
pass # TODO
def resample_sensor(sensor: DF, interval_ms: float):
df = deepcopy(sensor) # for immutability, if performance gets too bad, remove and pray nothing breaks
mi = np.min(df['time'])
ma = np.max(df['time'])
# print(mi, ma)
df['time'] = pd.to_datetime(df['time'])
df.set_index('time', inplace=True)
resampled_df = df.resample(f'{interval_ms}L').mean()
resampled_df_interpolated = resampled_df.interpolate(method='linear')
df_reset = resampled_df_interpolated.reset_index()
df_reset['time'] = df_reset['time'].astype('int64')
df_reset.rename(columns={'index': 'time'}, inplace=True)
# return df_reset
return df_reset[(df_reset['time'] >= mi) & (df_reset['time'] <= ma)]
def resample_sensor_quaternion(df: DF, interval_ms: float):
if df.empty:
return df
times = pd.to_timedelta(df['time'] / 1e9, unit="seconds").dt.total_seconds()
if df[['qx', 'qy', 'qz', 'qw']].isnull().values.any():
raise ValueError("NaN values found in quaternion data.")
quaternions = df[['qx', 'qy', 'qz', 'qw']].to_numpy()
if quaternions.shape[1] != 4:
raise ValueError(f"Quaternion data has invalid shape: {quaternions.shape}")
df = resample_sensor(df[['time', 'roll', 'pitch', 'yaw']], interval_ms)
rotations = R.from_quat(quaternions)
slerp = Slerp(times, rotations)
new_rotations = slerp(df.time / 1e9)
new_quats = new_rotations.as_quat()
quaternion_df = pd.DataFrame(new_quats, columns=['qx', 'qy', 'qz', 'qw'])
final_df = pd.concat([df, quaternion_df], axis=1)
return final_df | Oloqq/activity-recognition | recognizer/samples.py | samples.py | py | 7,117 | python | en | code | 0 | github-code | 36 |
36450057709 |
def bubble_up_max(node, heap):
heap.append(node)
i = len(heap) - 1
while (heap[i // 2] < node) :
new_i = i // 2
heap[new_i], heap[i] = heap[i], heap[new_i]
i = new_i
def bubble_up_min(node, heap):
heap.append(node)
i = len(heap) - 1
while (heap[i // 2] > node) :
new_i = i // 2
heap[new_i], heap[i] = heap[i], heap[new_i]
i = new_i
def sift_down_max(node, heap):
i = 0
while True:
if (heap[i] < heap[2 * i + 1]):
new_i = i * 2 + 1
heap[new_i], heap[i] = heap[i], heap[new_i]
i = new_i
elif (heap[i] < heap[2 * i + 1]):
new_i = i * 2 + 2
heap[new_i], heap[i] = heap[i], heap[new_i]
i = new_i
else:
break
def sift_down_min(node, heap):
i = 0
while True:
if (heap[i] > heap[2 * i + 1]):
new_i = i * 2 + 1
heap[new_i], heap[i] = heap[i], heap[new_i]
i = new_i
elif (heap[i] > heap[2 * i + 1]):
new_i = i * 2 + 2
heap[new_i], heap[i] = heap[i], heap[new_i]
i = new_i
else:
break
def heapify(initial, param):
heap = []
for node in (initial):
if param == "max":
bubble_up_max(node, heap)
elif param == "min":
bubble_up_min(node, heap)
return heap
def print_tree(heap):
for index, node in enumerate(heap):
if index * 2 + 2 < len(heap):
print(node, "-> (", heap[index * 2 + 1], ",", heap[index * 2 + 2], ")")
if __name__ == "__main__":
size = int(input("size of the array?"))
initial_array = [int(input("enter an element:")) for i in range(size)]
param = input("min heap or max heap? ")
heap = (heapify(initial_array, param=param))
print(heap)
print_tree(heap)
| mehrshad-sdtn/Algorithm-Design | src/Heap.py | Heap.py | py | 1,911 | python | en | code | 0 | github-code | 36 |
31977214474 | load("@gapid//tools/build:cc_toolchain.bzl", "cc_configure")
load("@gapid//tools/build/rules:android.bzl", "android_native_app_glue", "ndk_vk_validation_layer", "ndk_version_check")
load("@gapid//tools/build/rules:repository.bzl", "github_repository", "maybe_repository")
load("@gapid//tools/build/third_party:breakpad.bzl", "breakpad")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository", "new_git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# Defines the repositories for GAPID's dependencies, excluding the
# go dependencies, which require @io_bazel_rules_go to be setup.
# android - if false, the Android NDK/SDK are not initialized.
# mingw - if false, our cc toolchain, which uses MinGW on Windows is not initialized.
# locals - can be used to provide local path overrides for repos:
# {"foo": "/path/to/foo"} would cause @foo to be a local repo based on /path/to/foo.
def gapid_dependencies(android = True, mingw = True, locals = {}):
#####################################################
# Get repositories with workspace rules we need first
maybe_repository(
github_repository,
name = "io_bazel_rules_go",
locals = locals,
organization = "bazelbuild",
project = "rules_go",
commit = "efc3212592320c1ab7f986c9a7882770ee06ad3b", # 0.34.0
sha256 = "eb10f4436ddc732127afedf78636637d0cc9b256aba9f643a452914289266e6b",
)
maybe_repository(
github_repository,
name = "rules_python",
locals = locals,
organization = "bazelbuild",
project = "rules_python",
commit = "9bf7c49ea1920e497f857ccc1e9c2d1189c8a1c9",
sha256 = "ebeae40415e1ac0ab4e4323f91d6213ec799ad8fcacecd4c499009e1d8d2eb51",
)
maybe_repository(
github_repository,
name = "bazel_gazelle",
locals = locals,
organization = "bazelbuild",
project = "bazel-gazelle",
commit = "e9091445339de2ba7c01c3561f751b64a7fab4a5", # 0.23.0
sha256 = "03e266ed67fd21f6fbede975773a569d397312daae71980d34ff7f7e087b7b14",
)
maybe_repository(github_repository,
name = "net_zlib", # name used by rules_go
locals = locals,
organization = "madler",
project = "zlib",
commit = "21767c654d31d2dccdde4330529775c6c5fd5389", # 1.2.12
build_file = "@gapid//tools/build/third_party:zlib.BUILD",
sha256 = "b860a877983100f28c7bcf2f3bb7abbca8833e7ce3af79edfda21358441435d3",
)
maybe_repository(
github_repository,
name = "com_google_protobuf",
locals = locals,
organization = "google",
project = "protobuf",
commit = "ab840345966d0fa8e7100d771c92a73bfbadd25c", # 3.21.5
sha256 = "0025119f5c97871436b4b271fee48bd6bfdc99956023e0d4fd653dd8eaaeff52",
repo_mapping = {"@zlib": "@net_zlib"},
)
maybe_repository(
github_repository,
name = "com_github_grpc_grpc",
locals = locals,
organization = "grpc",
project = "grpc",
commit = "d2054ec6c6e8abcecf0e24b0b4ee75035d80c3cc", # 1.48.0
sha256 = "ea0da456d849eafa5287dc1e9d53c065896dca2cd896a984101ebe0708979dca",
repo_mapping = {"@zlib": "@net_zlib"},
patches = [
# Remove calling the go dependencies, since we do that ourselves.
"@gapid//tools/build/third_party:com_github_grpc_grpc.patch",
],
)
###########################################
# Now get all our other non-go dependencies
maybe_repository(
github_repository,
name = "com_google_googletest",
locals = locals,
organization = "google",
project = "googletest",
commit = "58d77fa8070e8cec2dc1ed015d66b454c8d78850", # 1.12.1
sha256 = "ab78fa3f912d44d38b785ec011a25f26512aaedc5291f51f3807c592b506d33a",
)
maybe_repository(
github_repository,
name = "astc_encoder",
locals = locals,
organization = "ARM-software",
project = "astc-encoder",
commit = "f6236cf158a877b3279a2090dbea5e9a4c105d64", # 4.0.0
build_file = "@gapid//tools/build/third_party:astc-encoder.BUILD",
sha256 = "28305281b0fe89b0e57c61f684ed7f6145a5079a3f4f03a4fd3fe0c27df0bb45",
)
maybe_repository(
github_repository,
name = "etc2comp",
locals = locals,
organization = "google",
project = "etc2comp",
commit = "9cd0f9cae0f32338943699bb418107db61bb66f2", # 2017/04/24
build_file = "@gapid//tools/build/third_party:etc2comp.BUILD",
sha256 = "0ddcf7484c0d55bc5a3cb92edb4812dc932ac9f73b4641ad2843fec82ae8cf90",
)
maybe_repository(
breakpad,
name = "breakpad",
locals = locals,
commit = "335e61656fa6034fabc3431a91e5800ba6fc3dc9",
build_file = "@gapid//tools/build/third_party/breakpad:breakpad.BUILD",
)
maybe_repository(
github_repository,
name = "cityhash",
locals = locals,
organization = "google",
project = "cityhash",
commit = "f5dc54147fcce12cefd16548c8e760d68ac04226",
build_file = "@gapid//tools/build/third_party:cityhash.BUILD",
sha256 = "20ab6da9929826c7c81ea3b7348190538a23f823a8b749c2da9715ecb7a6b545",
)
# Override the gRPC abseil dependency, so we can patch it.
maybe_repository(
github_repository,
name = "com_google_absl",
locals = locals,
organization = "abseil",
project = "abseil-cpp",
commit = "273292d1cfc0a94a65082ee350509af1d113344d", # LTS 20220623, Patch 0
sha256 = "6764f226bd6e2d8ab9fe2f3cab5f45fb1a4a15c04b58b87ba7fa87456054f98b",
patches = [
# Workaround for https://github.com/abseil/abseil-cpp/issues/326.
"@gapid//tools/build/third_party:abseil_macos_fix.patch",
# Pick up bcrypt library on Windows.
"@gapid//tools/build/third_party:abseil_windows_fix.patch",
],
)
maybe_repository(
github_repository,
name = "glslang",
locals = locals,
organization = "KhronosGroup",
project = "glslang",
commit = "73c9630da979017b2f7e19c6549e2bdb93d9b238", # 11.11.0
sha256 = "9304cb73d86fc8e3f1cbcdbd157cd2750baad10cb9e3a798986bca3c3a1be1f0",
)
maybe_repository(
github_repository,
name = "stb",
locals = locals,
organization = "nothings",
project = "stb",
commit = "af1a5bc352164740c1cc1354942b1c6b72eacb8a",
sha256 = "e3d0edbecd356506d3d69b87419de2f9d180a98099134c6343177885f6c2cbef",
build_file = "@gapid//tools/build/third_party:stb.BUILD",
)
maybe_repository(
new_git_repository,
name = "lss",
locals = locals,
remote = "https://chromium.googlesource.com/linux-syscall-support",
commit = "c0c9689369b4c5e46b440993807ce4b0a7c9af8a",
build_file = "@gapid//tools/build/third_party:lss.BUILD",
shallow_since = "1660655052 +0000",
)
maybe_repository(
github_repository,
name = "perfetto",
locals = locals,
organization = "google",
project = "perfetto",
commit = "0ff403688efce9d5de43d69cae3c835e993e4730", # 29+
sha256 = "e609a91a6d64caf9a4e4b64f1826d160eba8fd84f7e5e94025ba287374e78e30",
patches = [
# Fix a Windows MinGW build issue.
"@gapid//tools/build/third_party:perfetto.patch",
]
)
maybe_repository(
http_archive,
name = "sqlite",
locals = locals,
url = "https://storage.googleapis.com/perfetto/sqlite-amalgamation-3350400.zip",
sha256 = "f3bf0df69f5de0675196f4644e05d07dbc698d674dc563a12eff17d5b215cdf5",
strip_prefix = "sqlite-amalgamation-3350400",
build_file = "@perfetto//bazel:sqlite.BUILD",
)
maybe_repository(
http_archive,
name = "sqlite_src",
locals = locals,
url = "https://storage.googleapis.com/perfetto/sqlite-src-3320300.zip",
sha256 = "9312f0865d3692384d466048f746d18f88e7ffd1758b77d4f07904e03ed5f5b9",
strip_prefix = "sqlite-src-3320300",
build_file = "@perfetto//bazel:sqlite.BUILD",
)
maybe_repository(
native.new_local_repository,
name = "perfetto_cfg",
locals = locals,
path = "tools/build/third_party/perfetto",
build_file = "@gapid//tools/build/third_party/perfetto:BUILD.bazel",
)
maybe_repository(
github_repository,
name = "spirv_headers",
locals = locals,
organization = "KhronosGroup",
project = "SPIRV-Headers",
commit = "b2a156e1c0434bc8c99aaebba1c7be98be7ac580", # 1.3.216.0
sha256 = "fbb4e256c2e9385169067d3b6f2ed3800f042afac9fb44a348b619aa277bb1fd",
)
maybe_repository(
github_repository,
name = "spirv_cross",
locals = locals,
organization = "KhronosGroup",
project = "SPIRV-Cross",
commit = "0e2880ab990e79ce6cc8c79c219feda42d98b1e8", # 2021-08-30
build_file = "@gapid//tools/build/third_party:spirv-cross.BUILD",
sha256 = "7ae1069c29f507730ffa5143ac23a5be87444d18262b3b327dfb00ca53ae07cd",
)
maybe_repository(
github_repository,
name = "spirv_tools",
locals = locals,
organization = "KhronosGroup",
project = "SPIRV-Tools",
commit = "b930e734ea198b7aabbbf04ee1562cf6f57962f0", # 1.3.216.0
sha256 = "2d956e7d49a8795335d13c3099c44aae4fe501eb3ec0dbf7e1bfa28df8029b43",
)
maybe_repository(
github_repository,
name = "spirv_reflect",
locals = locals,
organization = "KhronosGroup",
project = "SPIRV-Reflect",
commit = "0f142bbfe9bd7aeeae6b3c703bcaf837dba8df9d", # 1.3.216.0
sha256 = "8eae9dcd2f6954b452a9a53b02ce7507dd3dcd02bacb678c4316f336dab79d86",
)
maybe_repository(
http_archive,
name = "vscode-languageclient",
locals = locals,
url = "https://registry.npmjs.org/vscode-languageclient/-/vscode-languageclient-2.6.3.tgz",
build_file = "@gapid//tools/build/third_party:vscode-languageclient.BUILD",
sha256 = "42ad6dc73bbf24a067d1e21038d35deab975cb207ac2d63b81c37a977d431d8f",
)
maybe_repository(
http_archive,
name = "vscode-jsonrpc",
locals = locals,
url = "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-2.4.0.tgz",
build_file = "@gapid//tools/build/third_party:vscode-jsonrpc.BUILD",
sha256= "bed9b2facb7d179f14c8a710db8e613be56bd88b2a75443143778813048b5c89",
)
maybe_repository(
http_archive,
name = "vscode-languageserver-types",
locals = locals,
url = "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-1.0.4.tgz",
build_file = "@gapid//tools/build/third_party:vscode-languageserver-types.BUILD",
sha256 = "0cd219ac388c41a70c3ff4f72d25bd54fa351bc0850196c25c6c3361e799ac79",
)
maybe_repository(
github_repository,
name = "vulkan-headers",
locals = locals,
organization = "KhronosGroup",
project = "Vulkan-Headers",
commit = "3ef4c97fd6ea001d75a8e9da408ee473c180e456", # 1.3.216
build_file = "@gapid//tools/build/third_party:vulkan-headers.BUILD",
sha256 = "64a7fc6994501b36811af47b21385251a56a136a3ed3cf92673465c9d62985a1",
)
if android:
maybe_repository(
native.android_sdk_repository,
name = "androidsdk",
locals = locals,
api_level = 26, # This is the target API
build_tools_version = "30.0.3",
)
maybe_repository(
native.android_ndk_repository,
name = "androidndk",
locals = locals,
api_level = 23, # This is the minimum API
)
maybe_repository(
android_native_app_glue,
name = "android_native_app_glue",
locals = locals,
)
maybe_repository(
ndk_vk_validation_layer,
name = "ndk_vk_validation_layer",
locals = locals,
)
maybe_repository(
ndk_version_check,
name = "ndk_version_check",
locals = locals,
)
if mingw:
cc_configure()
| google/agi | tools/build/workspace.bzl | workspace.bzl | bzl | 12,552 | python | en | code | 815 | github-code | 36 |
38967305907 | def solution(people, tshirts):
answer = 0
people.sort()
tshirts.sort()
for p in range(len(people)):
if people[p] in tshirts:
del tshirts[tshirts.index(people[p])]
answer += 1
elif people[p] not in tshirts:
tshirts = [i for i in tshirts if i > people[p]]
if len(tshirts) > 0:
answer += 1
del tshirts[0]
return answer
print(solution([2, 3, 4], [1, 2, 3])) | leehyeji319/PS-Python | 프로그래머스/피어리뷰/1주차 중간고사_2.py | 1주차 중간고사_2.py | py | 474 | python | en | code | 0 | github-code | 36 |
26443050513 | # -*- coding: utf-8 -*-
"""
Created on Thu May 28 11:29:23 2020
@author: jampi
"""
"""BUG FIX NOTES:
-needs functions change so intake array is not edited array
"""
import csv
#Function to cut eva
def cut_eva(EVA_width, cut_list_in, cut_pieces_in = [],EVA_consumption_length = 0, k = -1, main_cuts_in = []):
#Recursion counter
k+=1
#Duplicate cut_pieces array
cut_pieces = [i for i in cut_pieces_in]
#duplicate cut_list array
cut_list = [i for i in cut_list_in]
#duplicate main_cuts entry for tracking the "ripper" cuts
main_cuts = [i for i in main_cuts_in]
if len(cut_list) > 0:
#Get total area cut so far:
total_cut_area = sum(i[2] for i in cut_pieces)
#print(str(len(cut_pieces)+len(cut_list)) + ' | ' + str(len(cut_pieces)) + ' | ' + str(len(cut_list)))
#Track order to cut pieces in
first_cut_piece = cut_list[-1]
first_cut_piece.append(k)
cut_pieces.append(first_cut_piece)
del(cut_list[-1])
#Set effficiencies to negative in case one isn't used
efficiency_1 = -1
efficiency_2 = -1
#Try first orientation:
if first_cut_piece[1] <= EVA_width:
EVA_consumption_1 = first_cut_piece[0]
#Calculate remainder:
remainder_1 = (EVA_width-first_cut_piece[1],first_cut_piece[0])
#print('Remainder 1 is: ' + str(remainder_1))
#Cut apart remainder:
(cut_list_1,cut_pieces_1) = remainder_cut(cut_list, remainder_1, cut_pieces,k)
#Calculate efficiency of new cuts
efficiency_1 = (sum(i[2] for i in cut_pieces_1)-total_cut_area)/(EVA_width*EVA_consumption_1/144)
#print('eff 1: ' + str(round(efficiency_1,2)))
#Try first orientation:
if first_cut_piece[0] <= EVA_width:
EVA_consumption_2 = first_cut_piece[1]
#Calculate remainder:
remainder_2 = (EVA_width-first_cut_piece[0],first_cut_piece[1])
#print('Remainder 2 is: ' + str(remainder_2))
#Cut apart remainder:
(cut_list_2,cut_pieces_2) = remainder_cut(cut_list, remainder_2, cut_pieces,k)
#Calculate efficiency of new cuts
efficiency_2 = (sum(i[2] for i in cut_pieces_2)-total_cut_area)/(EVA_width*EVA_consumption_2/144)
#print('eff 2: ' + str(round(efficiency_2,2)))
#use most efficient cut
if efficiency_1 > efficiency_2:
#print('1!')
#Calculate EVA consumption
EVA_consumption_length+=EVA_consumption_1
#Append main cut length
main_cuts.append(first_cut_piece[0])
#Recurse with remaining uncut pieces using list 1
return cut_eva(EVA_width,cut_list_1,cut_pieces_1,EVA_consumption_length,k,main_cuts)
else:
#print('2!')
#Calculate EVA consumption
EVA_consumption_length+=EVA_consumption_2
#Append main cut length
main_cuts.append(first_cut_piece[1])
#Recurse with remaining uncut pieces using list 1
return cut_eva(EVA_width,cut_list_2,cut_pieces_2,EVA_consumption_length,k,main_cuts)
else:
return (cut_pieces, EVA_consumption_length, main_cuts)
''' This function takes in a piece size and a list of pieces to be cut from it to return an efficient layout of cut pieces'''
#The cut_list is assumed to be ordered smallest to largest piece by width
#The cut_list should be an array of 3 dimension arrays with width, length, area
#The remainder is two-dimensional tuple
def remainder_cut(cut_list_in,remainder, cut_pieces_in = [], identifier = -1):
#Create an empty array to track the pieces that can be cut from remainder
potentials = []
#Create new arrays for inputs to avoid changing data outside function
cut_list = [i for i in cut_list_in]
cut_pieces = [i for i in cut_pieces_in]
#Find which pieces fit into remainder
for x in range(len(cut_list)):
if (cut_list[x][1] <= remainder[1] and cut_list[x][0] <= remainder[0]) or (cut_list[x][0] <= remainder[1] and cut_list[x][1] <= remainder[0]):
potentials.append([cut_list[x][0], cut_list[x][1], cut_list[x][2], cut_list[x][3], x])
#print('!')
#If no pieces can be cut from the remainder, return the input data except remainder
if potentials == []:
return (cut_list, cut_pieces)
#If there are potential cuts:
else:
#Cut the largest piece from the remainder and append identifier
cut = max(potentials, key = lambda x: x[2])
new_piece = cut[0:4]
new_piece.append(identifier)
cut_pieces.append(new_piece)
#print('CUT!' +str(cut[0:3]))
cut_index = cut[-1]
del(cut_list[cut_index])
#Set value of areas to -1 in case one option isn't ran
area1 = -1
area2 = -1
#Determine possible orientations:
if cut[0]<=remainder[0] and cut[1]<=remainder[1]:
#print('ORIENTATION 1')
#If orientation possible, determine which cut order results in larger remainder
best_remainder_1 = best_cut_order((cut[0],cut[1]),(remainder[0],remainder[1]))
#print('Orientation 1 remainder: ' + str(best_remainder_1))
#get list of cuts from remainder
new_cut_list_1, new_cut_pieces_1 = remainder_cut(cut_list,best_remainder_1,cut_pieces, identifier)
#get area of cut cut pieces
area1 = sum([i[2] for i in new_cut_pieces_1])
#print(new_cut_pieces_1)
#print('Already cut area 1 is: ' + str(area1))
if cut[0]<=remainder[1] and cut[1]<=remainder[0]:
#print('ORIENTATION 2')
#If orientation possible, determine which cut order results in larger remainder
best_remainder_2 = best_cut_order((cut[1],cut[0]),(remainder[0],remainder[1]))
#print('Orientation 2 remainder: ' + str(best_remainder_2))
#get list of cuts from remainder
new_cut_list_2, new_cut_pieces_2 = remainder_cut(cut_list,best_remainder_2,cut_pieces, identifier)
#get area of cut cut pieces
area2 = sum([i[2] for i in new_cut_pieces_2])
#print(new_cut_pieces_2)
#print('Already cut area 2 is: ' + str(area2))
#Get the max cut of orientation and return one with largest area
if area1>area2:
return (new_cut_list_1, new_cut_pieces_1)
else:
return (new_cut_list_2, new_cut_pieces_2)
'''This cut determines the best order to guillotine cut a piece from a remainder to result in the largest remaining piece'''
#This function assumes the inputs are ordered
def best_cut_order(cut,remainder):
#Calculate two potential remainder areas
cut_area_1 = (remainder[1]-cut[1])*remainder[0]
cut_area_2 = (remainder[0]-cut[0])*remainder[1]
#return dimensions of largest remainder piece
if cut_area_1>cut_area_2:
return (remainder[1]-cut[1],remainder[0])
else:
return (remainder[0]-cut[0],remainder[1])
"""This function grabs the data from the csv file"""
def get_data(filename = 'Glass Block Sizes.csv'):
#Save out raw glass data in qty, size, size format. Assumes file collumns are qty, size, size
with open(filename, newline = '') as glass_sizes_file:
sizereader = csv.reader(glass_sizes_file ,dialect='excel',delimiter = ',', quotechar = '"')
glass_data = []
for x in sizereader:
for y in range(len(x)):
try:
if y == 0:
x[y]=int(x[y])
else:
x[y]=float(x[y])
except:
pass
if type(x[1]) == float:
glass_data.append(x)
return glass_data
"""This function deletes the quantity data fom the raw csv data"""
def clear_quantity(glass_data_in):
#Create duplicate array to be edited
glass_data = [i for i in glass_data_in]
#Make a list with one block size on each line aand remove qty column
glass_block_sizes = []
k = 0
for x in glass_data:
k+=x[0]
for y in range(x[0]):
glass_block_sizes.append(x[1:])
if k != len(glass_block_sizes):
print('ERROR! Input count does not match output list length')
print(len(glass_block_sizes*2))
#sort glass sizes into short then long
return glass_block_sizes
"""This function takes in data, orders the points smaller dim., larger dim., then
sorts based on the smaller dimension"""
def order_and_sort(glass_block_sizes):
glass_block_sizes_ordered = []
for x in glass_block_sizes:
if x[0] < x[1]:
glass_block_sizes_ordered.append(x)
else:
glass_block_sizes_ordered.append([x[1],x[0],x[2]])
#Order points by width
glass_block_sizes_sorted = sorted(glass_block_sizes_ordered, key=lambda x: x[0])
return glass_block_sizes_sorted
"""This function eliminates points that are too large to be cut from the material
and returns two arrays with the eliminated points and points to cut"""
def point_eliminate(glass_block_sizes_sorted_in, EVA_width):
#Duplicate sorted array for editing
glass_block_sizes_sorted = [i for i in glass_block_sizes_sorted_in]
initial_length = len(glass_block_sizes_sorted)
#Eliminate points with dimensions greater than EVA width
points_to_delete = []
oversized_piece_sizes = []
for x in range(len(glass_block_sizes_sorted)):
if glass_block_sizes_sorted[x][1] > EVA_width and glass_block_sizes_sorted[x][0] > EVA_width:
points_to_delete.append(x)
oversized_piece_sizes.append(glass_block_sizes_sorted[x])
for x in reversed(points_to_delete):
del glass_block_sizes_sorted[x]
#Double Check sizes of arrays to make sure the oversized pieces and glass_block_sizes arrays are same total length as before points are deleted
if initial_length != len(glass_block_sizes_sorted)+len(oversized_piece_sizes):
print('ERROR! Number of to-cut pieces and oversized pieces does not match total number of pieces!')
return (glass_block_sizes_sorted,oversized_piece_sizes)
"""This function appends area to the size arrays
Assumes dims are in. and gives area in sq. ft."""
def append_area(glass_block_sizes_sorted_in):
#Duplicate glass_block_sizes_sorted
glass_block_sizes_sorted = [i for i in glass_block_sizes_sorted_in]
#Add area to points
for x in glass_block_sizes_sorted:
x.insert(2, round(x[0]*x[1]/144,2))
return glass_block_sizes_sorted
"""This function multiplies the number of gglass block size entries by the number
of layers required for each size"""
def layer_multiplier(glass_block_sizes_sorted, layer_count = 2):
EVA_cut_list = []
for x in glass_block_sizes_sorted:
for y in range(layer_count):
EVA_cut_list.append([i for i in x])
return EVA_cut_list
"""This function calculates the theoretical minimum material requirement for the
cuts to be made, assuming areas are stored in the last array element """
def material_minimum(EVA_cut_list):
return sum(i[2] for i in EVA_cut_list)
"""This function just calls all the above functions in order to output the cut list data"""
def eva_cut_from_csv(filename, EVA_width, layer_count, marks = True):
#get data from csv file
glass_data = get_data(filename)
#Change dataset so every row represents only 1 panel
glass_data = clear_quantity(glass_data)
#order the list
glass_data = order_and_sort(glass_data)
#get rid of pieces too big to be cut from material
glass_sizes, oversized_pieces = point_eliminate(glass_data, EVA_width)
#append an area measurement to dimensions
glass_sizes = append_area(glass_sizes)
#multiply each entry by the number of layers
to_cut_list = layer_multiplier(glass_sizes)
#Run the cutting algorithm
cut_pieces, EVA_consumption_length, main_cuts = cut_eva(EVA_width, to_cut_list)
#Calculate the yield
EVA_cut_yield = material_minimum(to_cut_list)/(EVA_consumption_length*EVA_width/144)
return cut_pieces, EVA_consumption_length, main_cuts, EVA_cut_yield, oversized_pieces | Epic-Cyclops/Guillotine-Cut | Guillotine_Cut_Algorithm.py | Guillotine_Cut_Algorithm.py | py | 13,129 | python | en | code | 3 | github-code | 36 |
40507960364 | from django import template
from apps.carts.models import *
from apps.products.models import Category
register = template.Library()
@register.simple_tag(takes_context=True)
def get_user_cart(context):
request = context['request']
user = request.user
try:
cart = Cart.objects.get(client=user, is_ordered=False)
except:
cart = []
return cart
@register.simple_tag(takes_context=True)
def get_user_wishlist(context):
request = context['request']
user = request.user
try:
wl = Wishlist.objects.filter(user=user)
except:
wl = []
wlist_products = [product.product.id for product in wl]
return wlist_products
@register.simple_tag()
def categories():
try:
cat = Category.objects.all()
except:
cat = None
return cat
| MansurSobirjonov/ogani | apps/products/templatetags/cart_tag.py | cart_tag.py | py | 817 | python | en | code | 1 | github-code | 36 |
27769825762 | import numpy as np
import pandas as pd
# 1:Male 0:Female
flag=['1','0','1','1','0','0','1','1','1','0']
# 1 Yes 0 No
datalist = [['1','0','0','0'],
['1','1','1','1'],
['0','0','0','0'],
['1','1','1','1'],
['1','0','1','0'],
['0','0','0','0'],
['1','1','1','1'],
['0','0','0','0'],
['1','0','0','0'],
['1','1','1','0']]
columns=['magazine','watch','insurance','creditcard']
data = pd.DataFrame(data = datalist,columns=columns)
data['flag']=flag
print(data)
flag = data['flag']
# np.unique(flag,return_counts=True)
# (array(['0', '1'], dtype=object), array([4, 6], dtype=int64))
c,ct= np.unique(flag,return_counts=True)
p=[i/sum(ct) for i in ct]
pc0=p[0]
pc1=p[1]
m = data['magazine']
magazine = [np.unique(m[flag == i], return_counts=True) for i in c]
# [(array(['0', '1'], dtype=object), array([1, 3], dtype=int64)),
# (array(['0', '1'], dtype=object), array([2, 4], dtype=int64))]
m_c0=magazine[0][1]
pm_c0=[i/sum(m_c0) for i in m_c0]
pm0_c0=pm_c0[0]
pm1_c0=pm_c0[1]
m_c1=magazine[1][1]
pm_c1=[i/sum(m_c1) for i in m_c1]
pm0_c1=pm_c1[0]
pm1_c1=pm_c1[1]
print(pm0_c0,pm1_c0,pm0_c1,pm1_c1)
# =========================
w = data['watch']
watch = [np.unique(w[flag == i], return_counts=True) for i in c]
w_c0=watch[0][1]
pw_c0=[i/sum(w_c0) for i in w_c0]
pw0_c0=pw_c0[0]
pw1_c0=pw_c0[1]
w_c1=watch[1][1]
pw_c1=[i/sum(w_c1) for i in w_c1]
pw0_c1=pw_c1[0]
pw1_c1=pw_c1[1]
print(pw0_c0,pw1_c0,pw0_c1,pw1_c1)
# =========================
ii = data['insurance']
insurance = [np.unique(ii[flag == i], return_counts=True) for i in c]
i_c0=insurance[0][1]
pi_c0=[i/sum(i_c0) for i in i_c0]
pi0_c0=pi_c0[0]
pi1_c0=pi_c0[1]
i_c1=insurance[1][1]
pi_c1=[i/sum(i_c1) for i in i_c1]
pi0_c1=pi_c1[0]
pi1_c1=pi_c1[1]
print(pi0_c0,pi1_c0,pi0_c1,pi1_c1)
# =========================
cc = data['creditcard']
creditcard = [np.unique(cc[flag == c], return_counts=True) for c in c]
c_c0=creditcard[0][1]
pc_c0=[c/sum(c_c0) for c in c_c0]
pc0_c0=pc_c0[0]
pc1_c0=pc_c0[1]
c_c1=creditcard[1][1]
pc_c1=[c/sum(c_c1) for c in c_c1]
pc0_c1=pc_c1[0]
pc1_c1=pc_c1[1]
print(pc0_c0,pc1_c0,pc0_c1,pc1_c1)
# m=1,w=1,i=0,c=0
pXc1=pm1_c1*pw1_c1*pi0_c1*pc0_c1*pc1
pXc0=pm1_c0*pw1_c0*pi0_c0*pc0_c1*pc0
print('pXc1:{} ,pXc0:{} , 所以性别是:{}'.format(pXc1,pXc0,'Male' if pXc1>pXc0 else 'Female'))
| kshsky/PycharmProjects | machinelearning/bayes/test1.py | test1.py | py | 2,394 | python | en | code | 0 | github-code | 36 |
11352936054 |
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
def train_random_forest():
# ****** Fit a random forest and extract predictions
#
forest = RandomForestClassifier(n_estimators = 100)
# Fitting the forest may take a few minutes
print("Fitting a random forest to labeled training data...")
forest = forest.fit(train_centroids, train["sentiment"])
result = forest.predict(test_centroids)
# Write the test results
output = pd.DataFrame(data={"id": test["id"], "sentiment":result})
output.to_csv("BagOfCentroids.csv", index=False, quoting=3)
print("Wrote BagOfCentroids.csv")
| electronick1/stairs_examples | bag_of_words/bag_of_words/model.py | model.py | py | 644 | python | en | code | 5 | github-code | 36 |
37348679197 | from math import pi
from collections import defaultdict
import numpy as np
from my_gpaw.kpt_descriptor import KPointDescriptor
from .kpts import RSKPoint, to_real_space
def create_symmetry_map(kd: KPointDescriptor): # -> List[List[int]]
sym = kd.symmetry
U_scc = sym.op_scc
nsym = len(U_scc)
compconj_s = np.zeros(nsym, bool)
if sym.time_reversal and not sym.has_inversion:
U_scc = np.concatenate([U_scc, -U_scc])
compconj_s = np.zeros(nsym * 2, bool)
compconj_s[nsym:] = True
nsym *= 2
map_ss = np.zeros((nsym, nsym), int)
for s1 in range(nsym):
for s2 in range(nsym):
diff_s = abs(U_scc[s1].dot(U_scc).transpose((1, 0, 2)) -
U_scc[s2]).sum(2).sum(1)
indices = (diff_s == 0).nonzero()[0]
assert len(indices) == 1
s = indices[0]
assert compconj_s[s1] ^ compconj_s[s2] == compconj_s[s]
map_ss[s1, s2] = s
return map_ss
class Symmetry:
def __init__(self, kd: KPointDescriptor):
self.kd = kd
self.symmetry_map_ss = create_symmetry_map(kd)
U_scc = kd.symmetry.op_scc
is_identity_s = (U_scc == np.eye(3, dtype=int)).all(2).all(1)
self.s0 = is_identity_s.nonzero()[0][0]
self.inverse_s = self.symmetry_map_ss[:, self.s0]
def symmetry_operation(self, s: int, wfs, inverse=False):
if inverse:
s = self.inverse_s[s]
U_scc = self.kd.symmetry.op_scc
nsym = len(U_scc)
time_reversal = s >= nsym
s %= nsym
U_cc = U_scc[s]
if (U_cc == np.eye(3, dtype=int)).all():
def T0(a_R):
return a_R
else:
N_c = wfs.gd.N_c
i_cr = np.dot(U_cc.T, np.indices(N_c).reshape((3, -1)))
i = np.ravel_multi_index(i_cr, N_c, 'wrap')
def T0(a_R):
return a_R.ravel()[i].reshape(N_c)
if time_reversal:
def T(a_R):
return T0(a_R).conj()
else:
T = T0
T_a = []
for a, id in enumerate(wfs.setups.id_a):
b = self.kd.symmetry.a_sa[s, a]
S_c = np.dot(wfs.spos_ac[a], U_cc) - wfs.spos_ac[b]
U_ii = wfs.setups[a].R_sii[s].T
T_a.append((b, S_c, U_ii))
return T, T_a, time_reversal
def apply_symmetry(self, s: int, rsk, wfs, spos_ac):
U_scc = self.kd.symmetry.op_scc
nsym = len(U_scc)
time_reversal = s >= nsym
s %= nsym
sign = 1 - 2 * int(time_reversal)
U_cc = U_scc[s]
if (U_cc == np.eye(3)).all() and not time_reversal:
return rsk
u1_nR = rsk.u_nR
proj1 = rsk.proj
f_n = rsk.f_n
k1_c = rsk.k_c
weight = rsk.weight
u2_nR = np.empty_like(u1_nR)
proj2 = proj1.new()
k2_c = sign * U_cc.dot(k1_c)
N_c = u1_nR.shape[1:]
i_cr = np.dot(U_cc.T, np.indices(N_c).reshape((3, -1)))
i = np.ravel_multi_index(i_cr, N_c, 'wrap')
for u1_R, u2_R in zip(u1_nR, u2_nR):
u2_R[:] = u1_R.ravel()[i].reshape(N_c)
for a, id in enumerate(wfs.setups.id_a):
b = self.kd.symmetry.a_sa[s, a]
S_c = np.dot(spos_ac[a], U_cc) - spos_ac[b]
x = np.exp(2j * pi * np.dot(k1_c, S_c))
U_ii = wfs.setups[a].R_sii[s].T * x
proj2[a][:] = proj1[b].dot(U_ii)
if time_reversal:
np.conj(u2_nR, out=u2_nR)
np.conj(proj2.array, out=proj2.array)
return RSKPoint(u2_nR, proj2, f_n, k2_c, weight)
def pairs(self, kpts, wfs, spos_ac):
kd = self.kd
nsym = len(kd.symmetry.op_scc)
assert len(kpts) == kd.nibzkpts
symmetries_k = []
for k in range(kd.nibzkpts):
indices = np.where(kd.bz2ibz_k == k)[0]
sindices = (kd.sym_k[indices] +
kd.time_reversal_k[indices] * nsym)
symmetries_k.append(sindices)
# pairs: Dict[Tuple[int, int, int], int]
pairs1 = defaultdict(int)
for i1 in range(kd.nibzkpts):
for s1 in symmetries_k[i1]:
for i2 in range(kd.nibzkpts):
for s2 in symmetries_k[i2]:
s3 = self.symmetry_map_ss[s1, s2]
# s3 = self.inverse_s[s3]
if 1: # i1 < i2:
pairs1[(i1, i2, s3)] += 1
else:
s4 = self.inverse_s[s3]
if i1 == i2:
# pairs1[(i1, i1, min(s3, s4))] += 1
pairs1[(i1, i1, s3)] += 1
else:
pairs1[(i2, i1, s4)] += 1
pairs = {}
seen = {}
for (i1, i2, s), count in pairs1.items():
k2 = kd.bz2bz_ks[kd.ibz2bz_k[i2], s]
if (i1, k2) in seen:
pairs[seen[(i1, k2)]] += count
else:
pairs[(i1, i2, s)] = count
# seen[(i1, k2)] = (i1, i2, s)
comm = wfs.world
lasti1 = -1
lasti2 = -1
for (i1, i2, s), count in sorted(pairs.items()):
if i1 != lasti1:
k1 = kpts[i1]
u1_nR = to_real_space(k1.psit)
rsk1 = RSKPoint(u1_nR, k1.proj.broadcast(),
k1.f_n, k1.k_c,
k1.weight, k1.dPdR_aniv)
lasti1 = i1
if i2 == i1:
if s == self.s0:
rsk2 = rsk1
else:
N = len(rsk1.u_nR)
S = comm.size
B = (N + S - 1) // S
na = min(B * comm.rank, N)
nb = min(na + B, N)
rsk2 = RSKPoint(rsk1.u_nR[na:nb],
rsk1.proj.view(na, nb),
rsk1.f_n[na:nb],
rsk1.k_c,
rsk1.weight)
lasti2 = i2
elif i2 != lasti2:
k2 = kpts[i2]
N = len(k2.psit.array)
S = comm.size
B = (N + S - 1) // S
na = min(B * comm.rank, N)
nb = min(na + B, N)
u2_nR = to_real_space(k2.psit, na, nb)
rsk2 = RSKPoint(u2_nR, k2.proj.broadcast().view(na, nb),
k2.f_n[na:nb], k2.k_c,
k2.weight)
lasti2 = i2
yield (i1, i2, s, rsk1,
self.apply_symmetry(s, rsk2, wfs, spos_ac),
count)
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/hybrids/symmetry.py | symmetry.py | py | 6,817 | python | en | code | 0 | github-code | 36 |
12427469941 | import os, sys
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import multiprocessing
from functools import partial
import gensim
from gensim import models, matutils
from gensim.corpora import MmCorpus, Dictionary
from scipy.stats import entropy
from scipy.spatial.distance import pdist, squareform
from scipy.linalg import svdvals
from nltk.corpus import stopwords
from tqdm import tqdm
#logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
ignore_words = frozenset(stopwords.words('english'))
##############################################################################################################################
'''
extract_data method is copied directly from gensim.py in the pyLDAvis library
https://github.com/bmabey/pyLDAvis/blob/master/pyLDAvis/gensim.py
'''
##############################################################################################################################
def extract_data(topic_model, corpus, dictionary, doc_topic_dists=None):
if not matutils.ismatrix(corpus):
corpus_csc = matutils.corpus2csc(corpus, num_terms=len(dictionary))
else:
corpus_csc = corpus
# Need corpus to be a streaming gensim list corpus for len and inference functions below:
corpus = matutils.Sparse2Corpus(corpus_csc)
# TODO: add the hyperparam to smooth it out? no beta in online LDA impl.. hmm..
# for now, I'll just make sure we don't ever get zeros...
fnames_argsort = np.asarray(list(dictionary.token2id.values()), dtype=np.int_)
doc_lengths = corpus_csc.sum(axis=0).A.ravel()
assert doc_lengths.shape[0] == len(corpus), 'Document lengths and corpus have different sizes {} != {}'.format(doc_lengths.shape[0], len(corpus))
if hasattr(topic_model, 'lda_alpha'):
num_topics = len(topic_model.lda_alpha)
else:
num_topics = topic_model.num_topics
if doc_topic_dists is None:
# If its an HDP model.
if hasattr(topic_model, 'lda_beta'):
gamma = topic_model.inference(corpus)
else:
gamma, _ = topic_model.inference(corpus)
doc_topic_dists = gamma / gamma.sum(axis=1)[:, None]
else:
if isinstance(doc_topic_dists, list):
doc_topic_dists = matutils.corpus2dense(doc_topic_dists, num_topics).T
elif issparse(doc_topic_dists):
doc_topic_dists = doc_topic_dists.T.todense()
doc_topic_dists = doc_topic_dists / doc_topic_dists.sum(axis=1)
assert doc_topic_dists.shape[1] == num_topics, 'Document topics and number of topics do not match {} != {}'.format(doc_topic_dists.shape[1], num_topics)
# get the topic-term distribution straight from gensim without iterating over tuples
if hasattr(topic_model, 'lda_beta'):
topic = topic_model.lda_beta
else:
topic = topic_model.state.get_lambda()
topic = topic / topic.sum(axis=1)[:, None]
topic_term_dists = topic[:, fnames_argsort]
assert topic_term_dists.shape[0] == doc_topic_dists.shape[1]
coherence_model = models.CoherenceModel(model=topic_model, corpus=corpus, dictionary=dictionary, coherence='u_mass')
return {'topic_term_dists': topic_term_dists, 'doc_topic_dists': doc_topic_dists,
'doc_lengths': doc_lengths, 'num_topics': num_topics}
##############################################################################################################################
def cao_juan_2009(topic_term_dists, num_topics):
cos_pdists = squareform(pdist(topic_term_dists, metric='cosine'))
return np.sum(cos_pdists) / (num_topics*(num_topics - 1)/2)
def arun_2010(topic_term_dists, doc_topic_dists, doc_lengths, num_topics):
P = svdvals(topic_term_dists)
Q = np.matmul(doc_lengths, doc_topic_dists) / np.linalg.norm(doc_lengths)
return entropy(P, Q)
def deveaud_2014(topic_term_dists, num_topics):
jsd_pdists = squareform(pdist(topic_term_dists, metric=jensen_shannon))
return np.sum(jsd_pdists) / (num_topics*(num_topics - 1))
def jensen_shannon(P, Q):
M = 0.5 * (P + Q)
return 0.5 * (entropy(P, M) + entropy(Q, M))
def preprocess_text(text):
with open(text, 'r') as inp:
text = ' '.join(line.rstrip('\n') for line in inp)
return [word for word in gensim.utils.simple_preprocess(text, deacc=True, min_len=3) if word not in ignore_words]
def files_to_gen(directory):
for path, dirs, files in os.walk(directory):
for name in files:
yield os.path.join(path, name)
class DocCorpus(gensim.corpora.TextCorpus):
def get_texts(self):
pool = multiprocessing.Pool(max(1, multiprocessing.cpu_count() - 1))
for tokens in pool.map(preprocess_text, files_to_gen(self.input)):
yield tokens
pool.terminate()
def build_coherence_models(topic_model, **kwargs):
u_mass = models.CoherenceModel(model=topic_model, corpus=kwargs['corpus'], dictionary=kwargs['dictionary'], coherence='u_mass')
c_v = models.CoherenceModel(model=topic_model, texts=kwargs['texts'], corpus=kwargs['corpus'], dictionary=kwargs['dictionary'], coherence='c_v')
c_uci = models.CoherenceModel(model=topic_model, texts=kwargs['texts'], corpus=kwargs['corpus'], dictionary=kwargs['dictionary'], coherence='c_uci')
c_npmi = models.CoherenceModel(model=topic_model, texts=kwargs['texts'], corpus=kwargs['corpus'], dictionary=kwargs['dictionary'], coherence='c_npmi')
return {'num_topics': topic_model.num_topics, 'u_mass': u_mass.get_coherence(), 'c_v': c_v.get_coherence(), 'c_uci': c_uci.get_coherence(), 'c_npmi': c_npmi.get_coherence()}
''' a poor attempt at implementing useless statistical measures, the result seems meaningless and is
in the img folder of the github project. '''
def main(text_dir):
topics = range(10, 101, 10) + range(120, 201, 20) + range(250, 451, 50)
#topics = range(10, 21, 10)
#corpus = DocCorpus(text_dir)
#dictionary = corpus.dictionary
corpus = MmCorpus('../twitter_LDA_topic_modeling/simple-wiki.mm')
dictionary = Dictionary.load('../twitter_LDA_topic_modeling/simple-wiki.dict')
print('Building LDA models')
lda_models = [models.LdaMulticore(corpus=corpus, id2word=dictionary, num_topics=i, passes=5) for i in tqdm(topics)]
print('Generating coherence models')
texts = [[dictionary[word_id] for word_id, freq in doc] for doc in corpus]
pool = multiprocessing.Pool(max(1, multiprocessing.cpu_count() - 1))
func = partial(build_coherence_models,
corpus=corpus,
dictionary=dictionary,
texts=texts)
coherence_models = pool.map(func, lda_models)
pool.close()
# print('Extracting data from models')
# model_data = [extract_data(model, corpus, dictionary) for model in tqdm(lda_models)]
# d = defaultdict(list)
# print('Generating output data')
# for i, data in tqdm(enumerate(model_data)):
# d['num_topics'].append(data['num_topics'])
# d['cao_juan_2009'].append(cao_juan_2009(data['topic_term_dists'], data['num_topics']))
# d['arun_2010'].append(arun_2010(data['topic_term_dists'], data['doc_topic_dists'], data['doc_lengths'], data['num_topics']))
# d['deveaud_2014'].append(deveaud_2014(data['topic_term_dists'], data['num_topics']))
# d['u_mass_coherence'].append(data['u_mass_coherence'])
d = defaultdict(list)
print('Generating output data')
for data in tqdm(coherence_models):
d['num_topics'].append(data['num_topics'])
d['u_mass'].append(data['u_mass'])
d['c_v'].append(data['c_v'])
d['c_uci'].append(data['c_uci'])
d['c_npmi'].append(data['c_npmi'])
df = pd.DataFrame(d)
df = df.set_index('num_topics')
df.to_csv('coherence_simple_wiki', sep='\t')
df.plot(xticks=df.index, style=['bs-', 'yo-', 'r^-', 'gx-'])
ax1 = df.plot(xticks=df.index, style='bs-', grid=True, y='u_mass')
ax2 = df.plot(xticks=df.index, style='yo-', grid=True, y='c_v', ax=ax1)
ax3 = df.plot(xticks=df.index, style='r^-', grid=True, y='c_npmi', ax=ax2)
df.plot(xticks=df.index, style='gx-', grid=True, y='c_uci', ax=ax3)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.17), fancybox=True, shadow=True, ncol=4, fontsize=9)
plt.subplots_adjust(bottom=0.2)
plt.xticks(df.index, rotation=45, ha='right', fontsize=8)
plt.savefig('coherence_simple_wiki')
plt.close()
if __name__ == '__main__':
sys.exit(main(sys.argv[1]))
| karakayaonurr/Topic-Modelling-with-LDA-at-Twitter | lda_tuna.py | lda_tuna.py | py | 8,596 | python | en | code | 0 | github-code | 36 |
9194485366 | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@lightly.ai
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class TagArithmeticsRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'tag_id1': 'MongoObjectID',
'tag_id2': 'MongoObjectID',
'operation': 'TagArithmeticsOperation',
'new_tag_name': 'TagName',
'creator': 'TagCreator'
}
attribute_map = {
'tag_id1': 'tagId1',
'tag_id2': 'tagId2',
'operation': 'operation',
'new_tag_name': 'newTagName',
'creator': 'creator'
}
def __init__(self, tag_id1=None, tag_id2=None, operation=None, new_tag_name=None, creator=None, _configuration=None): # noqa: E501
"""TagArithmeticsRequest - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._tag_id1 = None
self._tag_id2 = None
self._operation = None
self._new_tag_name = None
self._creator = None
self.discriminator = None
self.tag_id1 = tag_id1
self.tag_id2 = tag_id2
self.operation = operation
if new_tag_name is not None:
self.new_tag_name = new_tag_name
if creator is not None:
self.creator = creator
@property
def tag_id1(self):
"""Gets the tag_id1 of this TagArithmeticsRequest. # noqa: E501
:return: The tag_id1 of this TagArithmeticsRequest. # noqa: E501
:rtype: MongoObjectID
"""
return self._tag_id1
@tag_id1.setter
def tag_id1(self, tag_id1):
"""Sets the tag_id1 of this TagArithmeticsRequest.
:param tag_id1: The tag_id1 of this TagArithmeticsRequest. # noqa: E501
:type: MongoObjectID
"""
if self._configuration.client_side_validation and tag_id1 is None:
raise ValueError("Invalid value for `tag_id1`, must not be `None`") # noqa: E501
self._tag_id1 = tag_id1
@property
def tag_id2(self):
"""Gets the tag_id2 of this TagArithmeticsRequest. # noqa: E501
:return: The tag_id2 of this TagArithmeticsRequest. # noqa: E501
:rtype: MongoObjectID
"""
return self._tag_id2
@tag_id2.setter
def tag_id2(self, tag_id2):
"""Sets the tag_id2 of this TagArithmeticsRequest.
:param tag_id2: The tag_id2 of this TagArithmeticsRequest. # noqa: E501
:type: MongoObjectID
"""
if self._configuration.client_side_validation and tag_id2 is None:
raise ValueError("Invalid value for `tag_id2`, must not be `None`") # noqa: E501
self._tag_id2 = tag_id2
@property
def operation(self):
"""Gets the operation of this TagArithmeticsRequest. # noqa: E501
:return: The operation of this TagArithmeticsRequest. # noqa: E501
:rtype: TagArithmeticsOperation
"""
return self._operation
@operation.setter
def operation(self, operation):
"""Sets the operation of this TagArithmeticsRequest.
:param operation: The operation of this TagArithmeticsRequest. # noqa: E501
:type: TagArithmeticsOperation
"""
if self._configuration.client_side_validation and operation is None:
raise ValueError("Invalid value for `operation`, must not be `None`") # noqa: E501
self._operation = operation
@property
def new_tag_name(self):
"""Gets the new_tag_name of this TagArithmeticsRequest. # noqa: E501
:return: The new_tag_name of this TagArithmeticsRequest. # noqa: E501
:rtype: TagName
"""
return self._new_tag_name
@new_tag_name.setter
def new_tag_name(self, new_tag_name):
"""Sets the new_tag_name of this TagArithmeticsRequest.
:param new_tag_name: The new_tag_name of this TagArithmeticsRequest. # noqa: E501
:type: TagName
"""
self._new_tag_name = new_tag_name
@property
def creator(self):
"""Gets the creator of this TagArithmeticsRequest. # noqa: E501
:return: The creator of this TagArithmeticsRequest. # noqa: E501
:rtype: TagCreator
"""
return self._creator
@creator.setter
def creator(self, creator):
"""Sets the creator of this TagArithmeticsRequest.
:param creator: The creator of this TagArithmeticsRequest. # noqa: E501
:type: TagCreator
"""
self._creator = creator
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TagArithmeticsRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TagArithmeticsRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TagArithmeticsRequest):
return True
return self.to_dict() != other.to_dict()
| tibe97/thesis-self-supervised-learning | lightly/openapi_generated/swagger_client/models/tag_arithmetics_request.py | tag_arithmetics_request.py | py | 6,991 | python | en | code | 2 | github-code | 36 |
4543553383 | import numpy as np
import cv2
import matplotlib.pyplot as plt
pt_1 = './pt_2D_1.txt'
pt_2 = './pt_2D_2.txt'
f1 = open(pt_1, 'r')
f2 = open(pt_2, 'r')
pt_1 = []
pt_2 = []
for line in f1.readlines():
x = line.rstrip('\n').split(' ')
pt_1.append(x)
for line in f2.readlines():
x = line.rstrip('\n').split(' ')
pt_2.append(x)
pt_1 = pt_1[1:]
pt_2 = pt_2[1:]
pt_1 = np.array(pt_1)
pt_2 = np.array(pt_2)
img1 = cv2.imread('./image1.jpg')
img2 = cv2.imread('./image2.jpg')
def LIS_eight(a, b):
matrix = np.zeros((a.shape[0],9))
for i in range(a.shape[0]):
u = float(a[i][0])
v = float(a[i][1])
u_ = float(b[i][0])
v_ = float(b[i][1])
matrix[i] = np.array([u*u_, u_*v, u_, v_*u, v*v_, v_, u, v, 1])
# Decompose ATA
U, D, V = np.linalg.svd(matrix,full_matrices=True)
x = V.T[:, 8]
F = np.reshape(x, (3,3))
"""
Code above satisfied F = 1 requirement,
We still need rank2 requirement
"""
# compute rank2 f
FU,FD,FV = np.linalg.svd(F,full_matrices=True)
F = np.dot(FU, np.dot(np.diag([*FD[:2], 0]), FV))
return F
# fundamental matrix
F = LIS_eight(pt_1, pt_2)
# normalized fundamental matrix
def normalized_points(m):
uv = []
for i in range(m.shape[0]):
uv.append([float(m[i][0]), float(m[i][1])])
uv = np.array(uv)
# Center
mean = np.mean(uv, axis=0)
center = uv - mean
# Scale
scale = np.sqrt(2 * len(m) / np.sum(np.power(center, 2)))
trans_matrix = np.array(
[[scale, 0, -mean[0] * scale],
[0, scale, -mean[1] * scale],
[0,0,1]
],dtype=object
)
return uv, trans_matrix
uv1, trans_matrix1=normalized_points(pt_1)
uv2, trans_matrix2=normalized_points(pt_2)
uv1 = np.insert(uv1,uv1.shape[1],values=1, axis=1)
uv2 = np.insert(uv2,uv2.shape[1],values=1, axis=1)
# q = Tp
points1 = (trans_matrix1 @ (uv1.T)).T
# q = T'p'
points2 = (trans_matrix2 @ (uv2.T)).T
F_norm = LIS_eight(points1, points2)
# T'FT
F_norm = trans_matrix2.T @ (F_norm) @ (trans_matrix1)
#print(points_norm)
# pFp = [points2[i].dot(F_norm.dot(points1[i]))
# for i in range(points1.shape[0])]
# print("p'^T F p =", np.abs(pFp).max())
def plot_(pt1, pt2, img1, img2, f):
plt.subplot(1,2,1)
# That is epipolar line associated with p.
ln1 = f.T.dot(pt2.T)
# Ax + By + C = 0
A,B,C = ln1
for i in range(ln1.shape[1]):
# when y as 0,x = - (C/A)
# when y = image.shape[0], x = -(Bw + C / A)
# when x as image.shape[1], y = - (Aw + C / B)
# when x as 0, y = - (C / B)
#plt.plot([-C[i]/A[i], img1.shape[1]], [0, -(A[i]*img1.shape[1] + C[i])/B[i]], 'r')
if ((-C[i]/B[i]) <0):
plt.plot([-C[i]/A[i],img1.shape[1]],[0, -(C[i] + A[i]*img1.shape[1])/B[i]], 'r')
elif ((-C[i]/B[i]) > img1.shape[0]):
plt.plot([-(C[i] + B[i]*img1.shape[0])/A[i],img1.shape[1]],[img1.shape[0], -(C[i] + A[i]*img1.shape[1])/B[i]], 'r')
else:
plt.plot([0, img1.shape[1]], [-C[i]/B[i], -(C[i] + A[i]*img1.shape[1])/B[i]], 'r')
plt.plot([pt1[i][0]], [pt1[i][1]], 'b*')
plt.imshow(img1, cmap='gray')
plt.subplot(1,2,2)
# That is the epipolar line associated with p’.
ln2 = f.dot(pt1.T)
# Ax + By + C = 0
A,B,C = ln2
for i in range(ln2.shape[1]):
# when y as 0,x = - (C/A)
# when y = image.shape[0], x = -(Bw + C / A)
# when x as image.shape[1], y = - (Aw + C / B)
# when x as 0, y = - (C / B)
#plt.plot([-C[i]/A[i], img1.shape[1]], [0, -(A[i]*img1.shape[1] + C[i])/B[i]], 'r')
if ((-C[i]/B[i]) <0):
plt.plot([-C[i]/A[i],img2.shape[1]],[0, -(C[i] + A[i]*img2.shape[1])/B[i]], 'r')
elif ((-C[i]/B[i]) > img2.shape[0]):
plt.plot([-(C[i] + B[i]*img2.shape[0])/A[i],img2.shape[1]],[img2.shape[0], -(C[i] + A[i]*img2.shape[1])/B[i]], 'r')
else:
plt.plot([0, img2.shape[1]], [-C[i]/B[i], -(C[i] + A[i]*img2.shape[1])/B[i]], 'r')
plt.plot([pt1[i][0]], [pt1[i][1]], 'b*')
plt.imshow(img2, cmap='gray')
plt.show()
def plot_norm(pt1, pt2, img1, img2, f):
plt.subplot(1,2,1)
# That is epipolar line associated with p.
ln1 = f.T.dot(pt2.T)
# Ax + By + C = 0
A,B,C = ln1
for i in range(ln1.shape[1]):
# when x as 0,y = - (C/B)
# when x as 512(w), y = - (Aw + C / B)
plt.plot([0, img1.shape[1]], [-C[i]/B[i], -(C[i] + A[i]*img1.shape[1])*1.0/B[i]], 'r')
plt.plot([pt1[i][0]], [pt1[i][1]], 'b*')
plt.imshow(img1, cmap='gray')
plt.subplot(1,2,2)
# That is the epipolar line associated with p’.
ln2 = f.dot(pt1.T)
# Ax + By + C = 0
A,B,C = ln2
for i in range(ln2.shape[1]):
plt.plot([0, img2.shape[1]], [-C[i]*1.0/B[i], -(A[i]*img2.shape[1] + C[i])/B[i]], 'r')
plt.plot([pt2[i][0]], [pt2[i][1]], 'b*')
plt.imshow(img2, cmap='gray')
plt.show()
plot_(uv1, uv2, img1, img2, F)
plot_norm(uv1, uv2, img1, img2, F_norm)
def calaulate_dist(pt1, pt2, f):
ln1 = f.T.dot(pt2.T)
pt_num = pt1.shape[0]
a,b,c = ln1
dist = 0.0
for i in range(pt_num):
dist += np.abs((a[i]*pt1[i][0] + b[i]*pt1[i][1] + c[i])) / np.sqrt(np.power(a[i],2) + np.power(b[i],2))
acc = dist / pt_num
return acc
# acc associated with point2
print('Accuracy of the fundamental matrices by point2:', calaulate_dist(uv1, uv2, F))
print('Accuracy of the normalized fundamental matrices by point2:', calaulate_dist(uv1, uv2, F_norm))
# acc associated with point1
print('Accuracy of the fundamental matrices by point1:', calaulate_dist(uv2, uv1, F.T))
print('Accuracy of the normalized fundamental matrices by point1:', calaulate_dist(uv2, uv1, F_norm.T))
| yehsin/CV_class_hw2 | 1/hw2-1.py | hw2-1.py | py | 5,803 | python | en | code | 0 | github-code | 36 |
23944279921 | import pyaudio
import numpy as np
from core.tools.buffer import AudioBuffer
class PyAudio():
def __init__(self,verbose=True):
"""
instantiate pyaudio client and buffer for input stream recording to plot
"""
self.input_stream = None # on start will beinput stream from mic via pyaudio
self.client=pyaudio.PyAudio()
self.verbose=verbose
def stream_read(self,chunk):
"""
return values for a single chunk
"""
data = np.frombuffer(self.input_stream.read(chunk),dtype=np.int16)
return data
def stream_start(self,rate,chunk):
"""
start audio input stream
"""
if self.verbose:
print(" -- stream started")
self.input_stream=self.client.open(format=pyaudio.paInt16,channels=1,
rate=rate,input=True,
frames_per_buffer=chunk)
def stream_stop(self):
"""
close the stream but keep the PyAudio instance alive.
"""
self.input_stream.stop_stream()
self.input_stream.close()
if self.verbose:
print("Programatically closing stream")
| AlexKingsland/GuitarGuru | core/tools/pythonaudio.py | pythonaudio.py | py | 1,200 | python | en | code | 0 | github-code | 36 |
17981838498 | #!/usr/bin/python3
import requests
import subprocess # to execute bash commands
import sys
try:
check_for_package = subprocess.Popen(("dpkg", "-s", "html2text"), stdout=subprocess.PIPE)
output = subprocess.check_output(("grep", "Status"), stdin=check_for_package.stdout)
check_for_package.wait()
opstr = str(output, 'utf-8')
print(opstr)
if opstr == "Status: install ok installed\n":
print("Package installed")
except Exception as e:
print("installing html2text..............................")
install_pkg = subprocess.check_call("sudo apt install html2text", shell=True)
try:
tracking_number = str(sys.argv[1])
except(IndexError, ValueError):
print("please enter a tracking number of a valid format")
sys.exit(2)
request_url = "http://ipsweb.ptcmysore.gov.in/ipswebtracking/IPSWeb_item_events.aspx?itemid=" + tracking_number
# print(request_url)
r = requests.get(request_url)
print(r.status_code)
f = open("raw_html", "w+")
f.write(r.text)
f.close()
view_html = subprocess.Popen(["html2text", "raw_html"])
output = view_html.communicate()
view_html.wait()
print(output)
| technodict/Parcel_Tracking_CLI | tracking.py | tracking.py | py | 1,131 | python | en | code | 2 | github-code | 36 |
31561340570 | #!/usr/bin/env python
# coding=utf-8
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
"""
给你一个链表,每 k 个节点一组进行翻转,请你返回翻转后的链表。
k 是一个正整数,它的值小于或等于链表的长度。
如果节点总数不是 k 的整数倍,那么请将最后剩余的节点保持原有顺序。
示例:
给你这个链表:1->2->3->4->5
当 k = 2 时,应当返回: 2->1->4->3->5
当 k = 3 时,应当返回: 3->2->1->4->5
说明:
你的算法只能使用常数的额外空间。
你不能只是单纯的改变节点内部的值,而是需要实际进行节点交换。
链接:https://leetcode-cn.com/problems/reverse-nodes-in-klast_tail-group
# 单纯的模拟执行即可,需要注意的是需要保存上次的尾结点和下次的头结点。每k次需要翻转长度为k的链表,因此需要翻转 n/k 次
每次翻转的时间复杂度为k,因此总体时间复杂度是 o(n), 空间复杂度 o(1)
"""
class Solution(object):
def reverse(self, start, end):
tail = start
last = None
while start != end:
next = start.next
start.next = last
last = start
start = next
return last, tail
def reverseKGroup(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
new_head = None
last_tail = None
t = head
while t:
i = 0
p = t
while i < k and p:
p = p.next
i += 1
if p:
s, e = self.reverse(t, p)
else:
s, e = t, None
t = p
if not new_head:
new_head = s
if last_tail:
last_tail.next = s
last_tail = e
return new_head
| lee3164/newcoder | leetcode/25. K 个一组翻转链表/main.py | main.py | py | 2,005 | python | zh | code | 1 | github-code | 36 |
74325129065 | import unittest
import textwrap
from test.parse_helper import TransformTestCase
class TestSubscript(TransformTestCase):
def test_list_indexing_valid(self):
owl = textwrap.dedent(r"""
bool[] b = [True, False]
b[0]
""")
self.assertNoTransformError(owl)
def test_list_indexing_var_valid(self):
owl = textwrap.dedent(r"""
bool[] b = [True, False]
int i = 0
b[i]
""")
self.assertNoTransformError(owl)
# Invalid
def test_list_indexing_invalid(self):
owl = textwrap.dedent(r"""
bool[] b = [True, False]
float f = 1
b[f]
""")
self.assertTransformError(owl) | dashuaige92/owl | test/transform/test_subscript.py | test_subscript.py | py | 600 | python | en | code | 1 | github-code | 36 |
69808754346 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from scipy import interpolate
def floats_to_rgb(x, min=-1, max=1):
"""
Translates floats in [min, max) to valid RBG integers, in [0, 255].
Values are clamped to min and max.
"""
x = np.array(x)
out = 256 * (x - min) / (max - min)
out[out < 0] = 0
out[out > 255] = 255
assert np.min(out) >= 0 and np.max(out) <= 255
return out.astype("uint8")
def rgb_to_floats(x, min=-1, max=1):
"""
The "inverse" of floats_to_rgb.
Note the denominator is 255, mirroring SLiM.
"""
x = np.array(x, dtype='float')
out = min + (max - min) * x / 255
return out
def xyz_to_array(x, y, z):
"""
Given arrays of regularly-spaced x and y values, with z[i] corresponding to the value at
(x[i], y[i]), return the triple
xx, yy, zz
where zz is just z, reshaped, and xx and yy are such that zz[i, j] corresponds to (xx[i], yy[j]).
"""
xx = np.unique(x)
yy = np.unique(y)
nr, nc = len(yy), len(xx)
zz = np.zeros((nr, nc))
ii = np.searchsorted(yy, y)
jj = np.searchsorted(xx, x)
for i, j, zval in zip(ii, jj, z):
zz[i, j] = zval
return xx, yy, zz
def xyz_to_function(x, y, z, **kwargs):
"""
Given arrays of regularly-spaced x and y values, with z[i] corresponding to the value at
(x[i], y[i]), return the function that linearly interpolates the values of z to other
values of x and y. Will extrapolate outside of the given domain.
"""
xx, yy, zz = xyz_to_array(x, y, z)
return interpolate.RegularGridInterpolator((xx, yy), zz.T, **kwargs, fill_value=None, bounds_error=False)
def slope_layers(height, f=None):
"""
Given an (n + 1, m + 1)-layer ``height``, return the (n, m, 2) layer that has the
x- and y-components of the slope of ``height``, as follows: if the heights surrounding
a square are
> c d
> a b
then we compute the slope there as
> ( (b - a)/2 + (d - c)/2, (c - a)/2 + (d - b)/2 )
"""
if f is None:
f = (1, 1)
dx = f[0] * np.diff(height, axis=1)
dy = f[1] * (-1) * np.diff(height, axis=0) # -1 because images have (0,0) in lower-left
return np.stack([
(dx[1:,:] + dx[:-1,:]) / 2,
(dy[:,1:] + dy[:,:-1]) / 2
], axis=-1)
def function_height(f, nrow, ncol, xrange, yrange, **kwargs):
"""
Return a (nrow x ncol) numpy array with values given by
> f(x[i], y[j])
where x ranges from xrange[0] to xrange[1].
and likewise for y, defaulting to both being in [0, 1).
"""
xvals = np.linspace(xrange[0], xrange[1], nrow)
yvals = np.linspace(yrange[0], yrange[1], ncol)
x = np.repeat([xvals], ncol, axis=1)
y = np.repeat([yvals], nrow, axis=0).flatten()
out = f(x, y, **kwargs)
out.shape = (nrow, ncol)
return(out)
def bump_height(nrow, ncol, width=None, center=None):
"""
Return a (nrow x ncol) numpy array with values given by the bump function
> exp(- 1 / (1 - r^2) ),
where
> r = sqrt( (x/width[0])^2 + (y/width[1])^2 )
for -width[0] < x < width[0] and -width[1] , y < width[1].
"""
if center is None:
center = np.array([(nrow - 1) / 2, (ncol - 1) / 2])
if width is None:
width = center
x = np.repeat([np.arange(nrow) - center[0]], ncol, axis=1)
y = np.repeat([np.arange(ncol) - center[1]], nrow, axis=0).flatten()
z = np.maximum(0.05, 1 - ((x/width[0]) ** 2 + (y/width[1]) ** 2))
out = np.exp(- 1 / z)
out[out < 0] = 0.0
out.shape = (nrow, ncol)
return(out)
def gaussian_height(nrow, ncol, width=None, center=None):
"""
Return a (nrow x ncol) numpy array with values given by the gaussian density
> exp(- r^2 / 2 ),
where
> r = sqrt( (x/width[0])^2 + (y/width[1])^2 )
for -width[0] < x < width[0] and -width[1] , y < width[1].
"""
if center is None:
center = np.array([(nrow - 1) / 2, (ncol - 1) / 2])
if width is None:
width = center
x = np.repeat([np.arange(nrow) - center[0]], ncol, axis=1)
y = np.repeat([np.arange(ncol) - center[1]], nrow, axis=0).flatten()
z = (x/width[0]) ** 2 + (y/width[1]) ** 2
out = np.exp(- z/2)
out[out < 0] = 0.0
out.shape = (nrow, ncol)
return(out)
def saddle_height(nrow, ncol, width=None, center=None):
"""
Return a (nrow x ncol) numpy array with values given by the gaussian density
> exp( - ((x/width[0])^2 - (y/width[1])^2) / 2 ),
for -width[0] < x < width[0] and -width[1] , y < width[1].
"""
if center is None:
center = np.array([(nrow - 1) / 2, (ncol - 1) / 2])
if width is None:
width = center
x = np.repeat([np.arange(nrow) - center[0]], ncol, axis=1)
y = np.repeat([np.arange(ncol) - center[1]], nrow, axis=0).flatten()
z = (x/width[0]) ** 2 - (y/width[1]) ** 2
out = np.exp(- z/2)
out[out < 0] = 0.0
out.shape = (nrow, ncol)
return(out)
def mountain_height(nrow, ncol, slope=None, center=None):
"""
Return a (nrow x ncol) numpy array that has value 1.0 at ``center``
and declines linearly with ``slope`` to zero.
"""
if center is None:
center = np.array([(nrow - 1) / 2, (ncol - 1) / 2])
if slope is None:
# put 0.0 at the further edge of the smaller dimension
slope = 1.0 / min(max(ncol - center[0], center[0]),
max(nrow - center[1], center[1]))
x = np.repeat([np.arange(nrow) - center[0]], ncol, axis=1)
y = np.repeat([np.arange(ncol) - center[1]], nrow, axis=0).flatten()
dist = np.sqrt(x ** 2 + y ** 2)
out = 1.0 - dist * slope
out[out < 0] = 0.0
out.shape = (nrow, ncol)
return(out)
def make_slope_rgb(nrow, ncol, height_fn, f=None, **kwargs):
if 'center' in kwargs:
center = kwargs['center']
kwargs['center'] = [center[0] * (1 + 1/nrow), center[1] * (1 + 1/ncol)]
height = height_fn(
nrow + 1,
ncol + 1,
**kwargs)
slope = slope_layers(height, f=f)
out = np.concatenate([
floats_to_rgb(slope / np.max(np.abs(slope)), min=-1, max=1),
np.full((nrow, ncol, 1), 0, dtype='uint8'),
np.full((nrow, ncol, 1), 255, dtype='uint8')
], axis=-1)
return out.astype("uint8")
def make_sigma_rgb(nrow, ncol, height_fn, **kwargs):
# uses same sigma in x and y direction; no correlation
# do it by averaging the +1 grid to agree with slope
if 'center' in kwargs:
center = kwargs['center']
kwargs['center'] = [center[0] * (1 + 1/nrow), center[1] * (1 + 1/ncol)]
height = height_fn(
nrow + 1,
ncol + 1,
**kwargs)
sigma = floats_to_rgb((height[:-1,:-1]
+ height[1:,:-1]
+ height[:-1,1:]
+ height[1:,1:])[:,:,np.newaxis] / 4,
min=-1, max=1)
zero = floats_to_rgb(np.full((nrow, ncol, 1), 0), min=-1, max=1)
out = np.concatenate([
sigma,
sigma,
zero,
np.full((nrow, ncol, 1), 255, dtype='uint8')
], axis=-1)
return out.astype("uint8")
def mountain_slope(nrow, ncol, slope=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(downslope bias x, downslope bias y, 0, 255)
on a "stratovolcano" (linear cone).
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_slope_rgb(
nrow, ncol, mountain_height,
slope=slope, center=center)
def mountain_sigma(nrow, ncol, slope=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(sigma x, sigma y, 0, 255)
on a "stratovolcano" (linear cone).
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_sigma_rgb(
nrow, ncol, mountain_height,
slope=slope, center=center)
def saddle_slope(nrow, ncol, width=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(downslope bias x, downslope bias y, 0, 255)
on the saddle exp(-(x^2-y^2)/2)
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_slope_rgb(
nrow, ncol, saddle_height,
width=width, center=center)
def gaussian_slope(nrow, ncol, width=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(downslope bias x, downslope bias y, 0, 255)
on a "butte" (a bump function).
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_slope_rgb(
nrow, ncol, gaussian_height,
width=width, center=center)
def butte_slope(nrow, ncol, width=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(downslope bias x, downslope bias y, 0, 255)
on a "butte" (a bump function).
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_slope_rgb(
nrow, ncol, bump_height,
width=width, center=center)
def butte_sigma(nrow, ncol, width=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(sigma x, sigma y, 0, 255)
on a "butte" (a bump function).
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_sigma_rgb(
nrow, ncol, bump_height,
width=width, center=center)
| kr-colab/product-space-FEM | simulation/maps/map_utils.py | map_utils.py | py | 9,643 | python | en | code | 2 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.