seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
36255805376 | import boto3
import json
import os
dynamodb = boto3.resource('dynamodb')
client = boto3.client('dynamodb')
USERS_TABLE = dynamodb.Table(os.environ['USERS_TABLE'])
def delete_user_service(event, context):
try:
response = USERS_TABLE.update_item(
Key={
'userId': event['pathParameters']['id']
},
ConditionExpression='attribute_exists(userId)',
UpdateExpression='SET active = :active',
ExpressionAttributeValues={':active': False}
)
print('[GET RESPONSE]:', response)
return {
'statusCode': 200,
'body': json.dumps('user deleted.')
}
except Exception as e:
print("Error deleting user:")
print(e)
return {
'statusCode': 400,
'body': json.dumps('Error deleting the user')
} | Glendid/glendid-app-users | src/services/DeleteUser.py | DeleteUser.py | py | 875 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "boto3.resource",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_numbe... |
73084758907 | import pandas as pd
# import requests
import sys
import collections
# import urllib.request
import json
# url = 'http://loterias.caixa.gov.br/wps/portal/loterias/landing/lotofacil/!ut/p/a1/04_Sj9CPykssy0xPLMnMz0vMAfGjzOLNDH0MPAzcDbz8vTxNDRy9_Y2NQ13CDA0sTIEKIoEKnN0dPUzMfQwMDEwsjAw8XZw8XMwtfQ0MPM2I02-AAzgaENIfrh-FqsQ9wBmoxN_FydLAGAgNTKEK8DkRrACPGwpyQyMMMj0VAcySpRM!/dl5/d5/L2dBISEvZ0FBIS9nQSEh/pw/Z7_HGK818G0K85260Q5OIRSC42046/res/id=historicoHTML/c=cacheLevelPage/=/'
# url = 'https://servicebus2.caixa.gov.br/portaldeloterias/api/resultados?modalidade=Lotofácil'
# url = sys.argv[1]
file = sys.argv[1] #'resultados.json'
# r = requests.get(url)
# r.text
# r.text = r.text
r = open(file, encoding="utf8")
data = json.load(r)
# data = json.load(urllib.request.urlopen(url))
r_text = data['html'].replace('\\r\\n', '')
r_text = r_text.replace('"\r\n}','')
r_text = r_text.replace('{\r\n "html:','')
r_text
df = pd.read_html(r_text)
type(df)
type(df[0])
df1 = df
df = df[0].copy()
df = df[df['Bola1'] == df['Bola1']]
df.head()
nr_pop = list(range(1,26))
nr_par = []
nr_impar = []
nr_primo = []
for n in nr_pop:
if n % 2 == 0:
nr_par.append(n)
elif n % 2 == 1:
nr_impar.append(n)
for i in range(2,n):
if n % i == 0:
break
elif n not in nr_primo:
nr_primo.append(n)
comb = []
v_cont = []
for n in nr_pop:
v_cont.append([n, 0])
'''v01 = 0
v02 = 0
v03 = 0
v04 = 0
v05 = 0
v06 = 0
v07 = 0
v08 = 0
v09 = 0
v10 = 0
v11 = 0
v12 = 0
v13 = 0
v14 = 0
v15 = 0
v16 = 0
v17 = 0
v18 = 0
v19 = 0
v20 = 0
v21 = 0
v22 = 0
v23 = 0
v24 = 0
v25 = 0'''
cols = ['Bola1', 'Bola2', 'Bola3', 'Bola4', 'Bola5', 'Bola6', 'Bola7', 'Bola8', 'Bola9', 'Bola10', 'Bola11', 'Bola12', 'Bola13', 'Bola14', 'Bola15']
for idx, row in df.iterrows():
v_par = 0
v_impar = 0
v_primo = 0
for c in cols:
if row[c] in nr_par:
v_par += 1
elif row[c] in nr_impar:
v_impar += 1
if row[c] in nr_primo:
v_primo += 1
for n in nr_pop:
if row[c] == n:
v_cont[n-1][1] += 1
'''if row[c] == 1:
v01 += 1
elif row[c] == 2:
v02 += 1
elif row[c] == 3:
v03 += 1
elif row[c] == 4:
v04 += 1
elif row[c] == 5:
v05 += 1
elif row[c] == 6:
v06 += 1
elif row[c] == 7:
v07 += 1
elif row[c] == 8:
v08 += 1
elif row[c] == 9:
v09 += 1
elif row[c] == 10:
v10 += 1
elif row[c] == 11:
v11 += 1
elif row[c] == 12:
v12 += 1
elif row[c] == 13:
v13 += 1
elif row[c] == 14:
v14 += 1
elif row[c] == 15:
v15 += 1
elif row[c] == 16:
v16 += 1
elif row[c] == 17:
v17 += 1
elif row[c] == 18:
v18 += 1
elif row[c] == 19:
v19 += 1
elif row[c] == 20:
v20 += 1
elif row[c] == 21:
v21 += 1
elif row[c] == 22:
v22 += 1
elif row[c] == 23:
v23 += 1
elif row[c] == 24:
v24 += 1
elif row[c] == 25:
v25 += 1'''
comb.append(str(v_par) + 'p-' + str(v_impar) + 'i-' + str(v_primo) + 'np')
freq_nr = v_cont
freq_nr.sort(key=lambda tup: tup[1])
counter_comb = collections.Counter(comb)
resultado = pd.DataFrame(counter_comb.items(), columns=['combination','frequency'])
resultado['p_freq'] = resultado.frequency / resultado.frequency.sum()
resultado.sort_values('p_freq', inplace=True)
print('''
O número mais frequente é: {}
O número menos frequente é: {}
A combinação mais frequente é {}, com a frequência de {}%.
'''.format(freq_nr[-1][0], freq_nr[0][0], resultado['combination'].values[-1], int(resultado['p_freq'].values[-1]*100*100)/100)
) | daklima/bootcamp-engdados-oct22 | A001/main.py | main.py | py | 3,954 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.read_html",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"l... |
37616517604 | from time import sleep
from signal import pause
from gpiozero import LED
from gpiozero import Button
from pygame import mixer
mixer.init()
placeholder = mixer.Sound('placeholder.wav')
ph_len = placeholder.get_length()
led = LED(25)
btn = Button(4)
while True:
btn.wait_for_press()
print("Initialized")
btn.wait_for_release()
print("Starting Sequence")
# Light and Sound Sequence
placeholder.play()
led.blink(.5, .5, round(ph_len))
sleep(ph_len)
print("Sequence Complete")
| Aahil52/animatronics2022 | testscripts/soundandlightstest.py | soundandlightstest.py | py | 516 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.mixer.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"li... |
30513158454 | import os
import datetime
from django.conf import settings
date = datetime.datetime.now()
filename_secrets_bx24 = os.path.join(settings.BASE_DIR, 'reports', 'report.txt')
class Report:
def __init__(self):
self.date = None
self.filename = None
self.fields = None
# self.encoding = 'cp1251'
self.encoding = 'utf8'
def create(self):
self.set_date()
self.forming_filename()
with open(self.filename, 'a+', encoding=self.encoding) as f:
html_tags = \
"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style>
table {border-collapse: collapse;}
th {
border: 2px solid #dee2e6;
padding: 6px;
text-align: "center";
font-size: 14px;
font-family: sans-serif;
color: rgb(33, 37, 41);
max-width: 300px;
overflow: auto;
}
td {
border: 2px solid #dee2e6;
font-size: 12px;
font-weight: 400;
font-family: sans-serif;
white-space: nowrap;
color: rgb(33, 37, 41);
padding: 0 5px;
max-width: 300px;
overflow: auto;
}
.result td {
background-color: #cfe2ff;
border-bottom: 4px solid #74b0ec;
}
</style>
<!-- <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.2.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-iYQeCzEYFbKjA/T2uDLTpkwGzCiq6soy8tYaI1GyVh/UjpbCx/TYkiZhlZB6+fzT" crossorigin="anonymous"> -->
<title>Отчет</title>
</head>
<body>
<h1>Результат объединения контактов от
"""
html_tags += self.date.isoformat()
html_tags += """
</h1>
<table class="table">
"""
f.write(html_tags)
def add_fields(self, fields):
self.fields = fields
with open(self.filename, 'a', encoding=self.encoding) as f:
header_html = '<th>ID</th>\n'
for field in self.fields:
if field == 'ID':
continue
header_html += f'<th>{field}</th>\n'
header_html += f'<th>DEALS</th>\n'
f.write(f'''
<thead>
<tr>
{header_html}
</tr>
</thead>
''')
def add(self, old_contacts, id_contact_res, data_update, companies, deals={}):
with open(self.filename, 'a', encoding=self.encoding) as f:
html = ''
for _, contact in old_contacts.items():
html += f'''
<tr>
{self.get_row_html(contact, deals)}
</tr>
'''
res_contact = old_contacts.get(id_contact_res, {})
html += f"""
<tr class="result">
{self.get_row_res_html(res_contact, data_update, companies, deals)}
</tr>
"""
f.write(f'''
<tbody>
{html}
</tbody>
''')
def get_row_html(self, contact, deals):
id_contact = contact.get("ID", "")
html_row = f'<td>{contact.get("ID", "")}</td>\n'
for field, field_data in self.fields.items():
if field == 'ID':
continue
elif field_data['type'] == 'crm_multifield':
cell = ''
for item in contact.get(field, []):
cell += item.get('VALUE', '') or "–"
cell += '<br>'
html_row += f'<td>{cell}</td>\n'
else:
html_row += f'<td>{contact.get(field, "") or "–"}</td>\n'
deals_lst = deals.get(str(id_contact), [])
html_row += f'<td>{", ".join([str(i) for i in deals_lst])}</td>\n'
return html_row
def get_row_res_html(self, contact, data_update, companies, deals):
html_row = f'<td>{contact.get("ID", "")}</td>\n'
for field, field_data in self.fields.items():
if field == 'ID':
continue
elif field == 'COMPANY_ID' and not data_update.get(field, None) and companies:
html_row += f'<td>{companies[0]}</td>\n'
elif field in data_update and field_data['type'] == 'crm_multifield':
cell = ''
for item in data_update.get(field, []):
cell += item.get('VALUE', '') or "–"
cell += '<br>'
html_row += f'<td>{cell}</td>\n'
elif field in data_update:
html_row += f'<td>{data_update.get(field, "") or "–"}</td>\n'
elif field_data['type'] == 'crm_multifield':
cell = ''
for item in contact.get(field, []):
cell += item.get('VALUE', '') or "–"
cell += '<br>'
html_row += f'<td>{cell}</td>\n'
else:
html_row += f'<td>{contact.get(field, "") or "–"}</td>\n'
deals_lst = deals.get("summary", [])
html_row += f'<td>{", ".join([str(i) for i in deals_lst])}</td>\n'
return html_row
def closed(self):
with open(self.filename, 'a', encoding=self.encoding) as f:
html_tags = \
"""
</table>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.2.1/dist/js/bootstrap.bundle.min.js" integrity="sha384-u1OknCvxWvY5kfmNBILK2hRnQC3Pr17a+RTT6rIHI7NnikvbZlHgTPOOmMi466C8" crossorigin="anonymous"></script>
</body>
</html>
"""
f.write(html_tags)
def forming_filename(self):
date_str = self.convert_date_to_str(self.date)
self.filename = os.path.join(settings.BASE_DIR, 'reports', f'report_{date_str}.html')
def set_date(self):
self.date = datetime.datetime.now()
@staticmethod
def convert_date_to_str(date):
return date.strftime("%d.%m.%Y_%H.%M")
| Oleg-Sl/Quorum_merge_contacts | merge_contacts/api_v1/service/report/report_to_html.py | report_to_html.py | py | 7,159 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
26596833071 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
# Import libraries
import matplotlib.pyplot as plt
import matplotlib.animation as animate
import matplotlib.lines as mlines
import agentframework
import csv
# Request input from user for number of heroes and enemies
print("Welcome to the hero agent game. \nPlease set the number of hero and agents by inputting into the console below...")
num_heroes = int(input("Set the number of heroes: "))
num_enemies = int(input("Set the number of enemies: "))
# Declare variables
heroes = []
enemies =[]
winners = []
environment = []
rowlist = []
# Declare figure for plot
fig = plt.figure(num= 1, figsize=(7, 5))
carry_on = True
# DECLARE FUNCTIONS
# Keeps animation running as long as carry_on = true
def gen_function():
global carry_on
a = 0
while carry_on:
yield a
a = a + 1
# Writes end environment to file end_environment.txt
def end_game():
print('End game function called')
with open('end_environment.txt', 'w') as e:
e.write("END ENVIRONMENT: \n")
for row in environment:
e.write(" ".join(str(value) for value in row) +"\n")
e.write("DOCUMENT END \n")
e.close()
with open('stores_record.txt', 'a') as s:
s.write("GAME STARTS with {} heroes and {} enemies: \n".format(num_heroes, num_enemies))
for hero in heroes:
s.write("Hero {} finishes with a total store of {}. \n".format(hero.identity, hero.store))
s.write("GAME ENDS \n")
s.close()
# Sets updates to figure per iteration
def update(frame_number):
# Creates figures and axes:
fig.clear() # Clears figure so that updated markers and environment can be applied at each iteration
axes = plt.gca() # Points to axis
# Sets ranges of axes:
axes.set_xlim([0,300])
axes.set_ylim([0,300])
# Adds environment and colour scale key:
plt.imshow(environment)
plt.colorbar(ax = axes, orientation= 'horizontal', extend = 'min', spacing = 'proportional', shrink = 0.5).set_label('Environment density')
# Plots and actions for heroes:
for hero in heroes: # Loops through heroes
print(hero) # Heroes prints location and store to console
global carry_on # Access to stopping condition variable
# Plots heroes according to status:
if hero.store >= 3000: # First hero to reach store of 3000 wins and is plotted as winner
winners.append(hero)
plt.scatter(winners[0].x, winners[0].y, marker="D", c= "Orange")
plt.text((winners[0].x + 25), (winners[0].y - 1), "{} is the winner!".format(winners[0].identity), fontsize=8, color='White', backgroundcolor='Black')
print("We have a winner! Hero {} wins with a store of {}".format(winners[0].identity, winners[0].store) + "\n Remaining heroes:" )
carry_on = False
end_game()
elif hero.store >= 2500: # Fast heroes plotted
plt.scatter(hero.x, hero.y, c= 'Purple', label='Fast')
plt.text((hero.x + 8), (hero.y - 1), str(hero.identity), fontsize=8, color='White')
elif hero.store >= 1000: # Medium heroes plotted
plt.scatter(hero.x, hero.y, c= 'Pink', label= 'Average')
plt.text((hero.x + 8), (hero.y - 1), str(hero.identity), fontsize=8, color='White')
elif hero.store < 1000: # Fast heroes plotted
plt.scatter(hero.x, hero.y, c= 'Grey', label= 'Slow')
plt.text((hero.x + 8), (hero.y - 1), str(hero.identity), fontsize=8, color='White')
# Actions for heroes (movement and sharing and eating of environment)
hero.move()
hero.eat()
hero.share_with_neighbours()
# Creates key for hero markers
enemy = mlines.Line2D([], [], color='Black', marker='x', linestyle='None', label='Enemy')
key_slow = mlines.Line2D([], [], color='Grey', marker='o', linestyle='None', label='Slow hero')
key_medium = mlines.Line2D([], [], color='Pink', marker='o', linestyle='None', label='Average hero')
key_fast = mlines.Line2D([], [], color='Purple', marker='o', linestyle='None', label='Fast hero')
plt.legend(handles=[key_slow, key_medium, key_fast, enemy], bbox_to_anchor=(1,1), bbox_transform=plt.gcf().transFigure, title='Agent key')
# Plots and actions for enemies
for enemy in enemies:
enemy.move()
enemy.eat()
plt.scatter(enemy.x, enemy.y, marker="x", c= 'Black')
for hero in heroes:
enemy.eat_neighbours(hero) # enemy eats neighbouring hereos' stores
# Creates environment array from data file
f = open('environment.txt', newline='')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
for value in row:
rowlist.append(int(value))
environment.append(rowlist)
rowlist = []
f.close()
# Creates heroes (as many as inputted into console by user) and adds them to hereos list
for identity in range(num_heroes):
heroes.append(agentframework.Agent(environment, heroes, (identity + 1), enemies))
# Creates enemies (as many as inputted into console by user) and adds them to enemies list
for identity in range(num_enemies):
enemies.append(agentframework.Agent(environment, heroes, (identity + 1), enemies))
# Animates plot
animation = animate.FuncAnimation(fig, update, interval=1, frames=gen_function, repeat=False,)
| emilyjcoups/Agent_Based_Model | model.py | model.py | py | 5,535 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matp... |
30023024204 | import os
import glob
import numpy as np
from scipy.stats import norm
import json
class WarpedSpace:
# takes a prior as a dict and returns a scipy normal distribution
@staticmethod
def create_distribution(prior):
means = []
stds = []
ranges = []
for key in sorted(prior.keys()):
mean = np.inf
while mean > prior[key]['range'][1] or mean < prior[key]['range'][0]:
mean = prior[key]['params']['mean'] + norm.rvs(0, prior[key].get('noise', 0))
means.append(mean)
stds.append(prior[key]['params']['std'][0])
ranges.append(prior[key]['range'])
return norm(np.array(means).reshape(1, -1), np.array(stds).reshape(1, -1))
# class that computes the warped space, and through the call funtion extrapolates up
# from warped space to function space - wraps around the original function
def __init__(self, dist, ranges, objective):
# ranges in numpy matrix, one row per dimension
# dist needs to implement elements of a scipy distribution, i.e. pdf, cdf, ppf etc.
self.dist = dist
self.param_ranges = np.zeros((len(ranges), 2))
for i, range_ in enumerate(ranges):
self.param_ranges[i, 0] = range_[0]
self.param_ranges[i, 1] = range_[1]
self.get_warped_ranges()
self.objective = objective
def get_warped_ranges(self):
# gives the coordinates in warped (0, 1)-space where the boundaries of the original space lie
# we want this boundary to be represented as such in the warped space too - thus, we warp the
# space again by minmax-scaling the warped space with these boundary values. Consequently,
# we get a limit on the warped space that (at largest) has the same boundaries as the original
# space, and otherwise further enlarges the original search space. This makes is a truncated
# gaussian even if the prior is set at the very edge
# in the case where the entire prior fits within the search space, we need boundaries for where
# numerical issues occur - i.e. not letting the algorithm go more than 8:ish standard deviations
# away for any dimension
self.boundaries = np.zeros(self.param_ranges.shape)
for i, range_ in enumerate(self.param_ranges.T):
self.boundaries[:, i] = self.dist.cdf(np.array(range_))
#increment boundaries with smallest possible value to avoid inverting bach to infinity
self.boundaries[:, 0] = self.boundaries[:, 0] + 2e-16
self.boundaries[:, 1] = self.boundaries[:, 1] - 2e-16
def get_original_range(self, X):
# input - an X in range 0, 1 irregardless of problem
# this needs to be shrinked linearly to the range which is allowed to still be in range
# Thus, we get inverse cdf (floor + X * (floor of w.s. - ceiling w.s.) )
X_scaled = np.zeros(X.shape)
for dim in range(X.shape[1]):
X_scaled[:, dim] = self.boundaries[dim, 0] + X[:, dim] * (self.boundaries[dim, 1] - self.boundaries[dim, 0])
# this probably won't work in higher dimensions
X_unwarped = self.dist.ppf(X_scaled)
for dim in range(X.shape[1]):
assert np.all(X_unwarped[:, dim] >= self.param_ranges[dim, 0])
assert np.all(X_unwarped[:, dim] <= self.param_ranges[dim, 1])
return X_unwarped
def __call__(self, X):
X_original = self.get_original_range(X)
return self.objective(X_original) | piboauthors/PiBO-Spearmint | spearmint/warping.py | warping.py | py | 3,633 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.inf",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats.norm.rvs",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "scipy.stats.norm... |
3910734213 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Fri 14 09:34:03 2018
@author: MariusD
"""
#Server
from flask import Flask, jsonify
server = Flask("phonebook")
phonebook={"Mum":"0173240", "Dad":"01717374", "Pepe":"01773849", "IE":"01"}
# Add contact
@server.route("/add_contact/<number>/<name>", methods=["POST"])
def add_contact(number, name):
if name not in phonebook:
phonebook.update({name:number})
return jsonify("You added " + name + " the number is: " + number)
else:
return jsonify("The contact " + name + " is already in your phonebook.")
# Get a phone by name
@server.route("/get_number/<name>")
def get_number(name):
if name in phonebook:
return jsonify(name + "phone number is: " + phonebook[name])
else:
return jsonify("You don't have a contact called " + name + " in your phonebook.")
# Delete a phone by name
@server.route("/delete_contact/<name>", methods=["DELETE"])
def delete_contact(name):
if name not in phonebook:
return jsonify("You don't have a contact called " + name + " in your phonebook.")
else:
del phonebook[name]
return jsonify("The contact "+ name + " has been deleted from your phonebook.")
#• update a phone by name
@server.route("/update_contact/<name>/<phone>", methods=["PUT"])
def update_contact(name, number):
if name not in phonebook:
return jsonify("You don't have a contact called "+ name + " in your phonebook.")
else:
phonebook[name] = number
return jsonify("You just updated: " + name + "'s number to: " + number)
@server.route("/phonebook")
def get_phonebook():
return jsonify(phonebook)
server.run() | Mariusxz/Indidivdual_Assignment_3 | Individual-Assignment-3/Phonebook/Server.py | Server.py | py | 1,759 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_num... |
3238675482 | """Contains the class single_object.
Used to compute single thermal objects.
"""
from .. import solvers
from . import Object
import matplotlib.pyplot as plt
import numpy as np
class SingleObject:
"""Single_object class.
This class solves numerically the heat conduction equation for 1 dimension
of a single material(s). The class has 6 methods.
"""
def __init__(self, amb_temperature, materials=('Cu',), borders=(1, 11),
materials_order=(0,), dx=0.01, dt=0.1, file_name=None,
boundaries=(0, 0), initial_state=False,
materials_path=False, draw=['temperature'], draw_scale=None):
"""Thermal object initialization.
`amb_temperature` is the ambient temperature of the whole system.
`materials` is the list of strings of all the used materials present in
`material_path`. `borders` is a list of the points where there is a
change of material. `materials_order` is a list of the materials list
indexes that defines the material properties given by borders. `dx` and
`dt` are the space and time steps, respectively. `file_name` is the
file name where the temperature is saved. `boundaries` is a list of two
entries that define the boundary condition for temperature. If 0 the
boundary condition is insulation. `initial_state` is the initial state
of the materials. True if there are an applied field and False if them
field is absent. `materials_path` is absolute path of the materials
database. If false, then the materials database is the standard
heatrapy database. `draw` is a list of strings representing the online
plots. In this version only `'temperature'` can be potted. If the list
is empty, then no drawing is performed. `draw_scale` is a list of two
values, representing the minimum and maximum temperature to be drawn.
If None, there are no limits.
"""
# check the validity of inputs
materials = tuple(materials)
borders = tuple(borders)
materials_order = tuple(materials_order)
boundaries = tuple(boundaries)
cond01 = isinstance(amb_temperature, float)
cond01 = cond01 or isinstance(amb_temperature, int)
cond02 = isinstance(materials, tuple)
cond03 = isinstance(borders, tuple)
cond04 = isinstance(materials_order, tuple)
cond05 = isinstance(dx, int) or isinstance(dx, float)
cond06 = isinstance(dt, int) or isinstance(dt, float)
cond07 = isinstance(file_name, str)
cond07 = cond07 or (file_name is None)
cond08 = isinstance(boundaries, tuple)
cond10 = isinstance(initial_state, bool)
if isinstance(draw, list):
cond15 = True
elif draw is None:
cond15 = True
else:
cond15 = False
if isinstance(draw_scale, list) or isinstance(draw_scale, tuple):
cond16 = (len(draw_scale) == 2)
elif draw_scale is None:
cond16 = True
else:
cond16 = False
condition = cond01 and cond02 and cond03 and cond04 and cond05
condition = condition and cond06 and cond07 and cond08
condition = condition and cond10
condition = condition and cond15 and cond16
if not condition:
raise ValueError
self.object = Object(amb_temperature, materials=materials,
borders=borders, materials_order=materials_order,
dx=dx, dt=dt, file_name=file_name,
boundaries=boundaries,
initial_state=initial_state,
materials_path=materials_path)
# initializes the plotting
self.draw = draw
self.draw_scale = draw_scale
for drawing in self.draw:
if drawing == 'temperature':
self.figure = plt.figure()
self.ax = self.figure.add_subplot(111)
temp = []
for i in range(len(self.object.temperature)):
temp.append(self.object.temperature[i][0])
if not self.draw_scale:
vmax = max(temp)
vmin = min(temp)
if vmax == vmin:
vmin = vmin - 0.1
vmax = vmax + 0.1
temp = np.array(temp)
x_plot = [self.object.dx*j for j in range(len(temp))]
self.online, = self.ax.plot(x_plot, temp)
self.ax.set_ylim([vmin, vmax])
else:
temp = np.array(temp)
x_plot = [self.object.dx*j for j in range(len(temp))]
self.online, = self.ax.plot(x_plot, temp)
self.ax.set_ylim(self.draw_scale)
self.ax.set_title('Temperature (K)')
self.ax.set_xlabel('x axis (m)')
self.ax.set_ylabel('temperature (K)')
plt.show(block=False)
def show_figure(self, figure_type, draw_scale=None):
"""Plotting.
Initializes a specific live plotting. `figure_type` is a string
identifying the plotting. This version only allows the plotting of the
'temperature'. `draw_scale` defines the range of temperatures. If None,
this range is found automatically for every frame.
"""
# check the validity of inputs
if isinstance(draw_scale, list) or isinstance(draw_scale, tuple):
condition = (len(draw_scale) == 2)
elif draw_scale is None:
condition = True
else:
condition = False
condition = condition and isinstance(figure_type, str)
if not condition:
raise ValueError
self.draw_scale = draw_scale
if figure_type == 'temperature':
if figure_type not in self.draw:
self.draw.append(figure_type)
self.figure = plt.figure()
self.ax = self.figure.add_subplot(111)
temp = []
for i in range(len(self.object.temperature)):
temp.append(self.object.temperature[i][0])
if not self.draw_scale:
vmax = max(temp)
vmin = min(temp)
if vmax == vmin:
vmin = vmin - 0.1
vmax = vmax + 0.1
temp = np.array(temp)
x_plot = [self.object.dx*j for j in range(len(temp))]
self.online, = self.ax.plot(x_plot, temp)
self.ax.set_ylim([vmin, vmax])
else:
temp = np.array(temp)
x_plot = [self.object.dx*j for j in range(len(temp))]
self.online, = self.ax.plot(x_plot, temp)
self.ax.set_ylim(self.draw_scale)
self.ax.set_title('Temperature (K)')
self.ax.set_xlabel('x axis (m)')
self.ax.set_ylabel('temperature (K)')
plt.show(block=False)
def activate(self, initial_point, final_point):
"""Activation.
Activates the thermal object between `initial_point` to `final_point`.
"""
# check the validity of inputs
condition = isinstance(initial_point, int)
condition = condition and isinstance(final_point, int)
if not condition:
raise ValueError
self.object.activate(initial_point, final_point)
if self.draw:
for drawing in self.draw:
if drawing == 'temperature':
try:
temp = []
for i in range(len(self.object.temperature)):
temp.append(self.object.temperature[i][0])
if not self.draw_scale:
vmax = max(temp)
vmin = min(temp)
if vmax == vmin:
vmin = vmin - 0.1
vmax = vmax + 0.1
temp = np.array(temp)
self.online.set_ydata(temp)
self.ax.set_ylim([vmin, vmax])
else:
temp = np.array(temp)
self.online.set_ydata(temp)
self.figure.canvas.draw()
except:
pass
def deactivate(self, initial_point, final_point):
"""Deactivation.
Deactivates the thermal object between `initial_point` to
`final_point`.
"""
# check the validity of inputs
condition = isinstance(initial_point, int)
condition = condition and isinstance(final_point, int)
if not condition:
raise ValueError
self.object.deactivate(initial_point, final_point)
if self.draw:
for drawing in self.draw:
if drawing == 'temperature':
try:
temp = []
for i in range(len(self.object.temperature)):
temp.append(self.object.temperature[i][0])
if not self.draw_scale:
vmax = max(temp)
vmin = min(temp)
if vmax == vmin:
vmin = vmin - 0.1
vmax = vmax + 0.1
temp = np.array(temp)
self.online.set_ydata(temp)
self.ax.set_ylim([vmin, vmax])
else:
temp = np.array(temp)
self.online.set_ydata(temp)
self.figure.canvas.draw()
except:
pass
def change_power(self, power_type, power, initial_point, final_point):
"""Heat power source change.
Changes the coeficients for the heat power sources by a value of power
from `initial_point` to `final_point`. `power_type` is a string that
represents the type of coefficient, i.e. 'Q' or 'Q0'.
"""
# check the validity of inputs
value = isinstance(initial_point, int)
if value and isinstance(final_point, int):
cond1 = True
else:
cond1 = False
cond2 = isinstance(power, int) or isinstance(power, float)
if isinstance(power_type, str):
if power_type == 'Q' or power_type == 'Q0':
cond3 = True
else:
cond3 = False
else:
cond3 = False
if not (cond1 and cond2 and cond3):
raise ValueError
if power_type == 'Q':
for j in range(initial_point, final_point):
self.object.Q[j] = power
if power_type == 'Q0':
for j in range(initial_point, final_point):
self.object.Q0[j] = power
def change_boundaries(self, boundaries):
"""Boundary change.
Changes the `boundaries` variable.
"""
# check the validity of inputs
if isinstance(boundaries, tuple):
if len(boundaries) == 2:
condition = True
else:
condition = False
else:
condition = False
if not condition:
raise ValueError
self.object.boundaries = boundaries
def compute(self, time_interval, write_interval, solver='explicit_k(x)',
verbose=True):
"""Compute the thermal process.
Computes the system for time_interval seconds, and writes into the
`file_name` file every `write_interval` time steps. Four different
solvers can be used: `'explicit_general'`, `'explicit_k(x)'`,
`'implicit_general'`, and `'implicit_k(x)'`. If `verbose = True`, then
the progress of the computation progress is shown.
"""
# check the validity of inputs
cond1 = isinstance(time_interval, float)
cond1 = cond1 or isinstance(time_interval, int)
cond2 = isinstance(write_interval, int)
if isinstance(solver, str):
all_solvers = ['implicit_general', 'implicit_k(x)',
'explicit_k(x)', 'explicit_general']
if solver in all_solvers:
cond3 = True
else:
cond3 = False
else:
cond3 = False
cond4 = isinstance(verbose, bool)
condition = cond1 and cond2 and cond3 and cond4
if not condition:
raise ValueError
# number of time steps for the given timeInterval
nt = int(time_interval / self.object.dt)
# number of time steps counting from the last writing process
nw = 0
# computes
for j in range(nt):
# updates the time_passed
self.object.time_passed = self.object.time_passed + self.object.dt
# defines the material properties accoring to the state list
for i in range(1, self.object.num_points - 1):
if self.object.state[i] is True:
value = self.object.materials_index[i]
self.object.rho[i] = self.object.materials[value].rhoa(
self.object.temperature[i][0])
self.object.Cp[i] = self.object.materials[value].cpa(
self.object.temperature[i][0])
self.object.k[i] = self.object.materials[value].ka(
self.object.temperature[i][0])
if self.object.state[i] is False:
value = self.object.materials_index[i]
self.object.rho[i] = self.object.materials[value].rho0(
self.object.temperature[i][0])
self.object.Cp[i] = self.object.materials[value].cp0(
self.object.temperature[i][0])
self.object.k[i] = self.object.materials[value].k0(
self.object.temperature[i][0])
# SOLVERS
# implicit k constant
if solver == 'implicit_general':
value = solvers.implicit_general(self.object)
self.object.temperature, self.object.lheat = value
# implicit k dependent on x
if solver == 'implicit_k(x)':
value = solvers.implicit_k(self.object)
self.object.temperature, self.object.lheat = value
# explicit k constant
if solver == 'explicit_general':
value = solvers.explicit_general(self.object)
self.object.temperature, self.object.lheat = value
# explicit k dependent on x
if solver == 'explicit_k(x)':
value = solvers.explicit_k(self.object)
self.object.temperature, self.object.lheat = value
nw = nw + 1
if self.draw:
for drawing in self.draw:
if drawing == 'temperature':
try:
value = nw + 1 == write_interval
if value or j == 0 or j == nt - 1:
temp = []
for i in range(len(self.object.temperature)):
temp.append(self.object.temperature[i][0])
if not self.draw_scale:
vmax = max(temp)
vmin = min(temp)
if vmax == vmin:
vmin = vmin - 0.1
vmax = vmax + 0.1
temp = np.array(temp)
self.online.set_ydata(temp)
self.ax.set_ylim([vmin, vmax])
else:
temp = np.array(temp)
self.online.set_ydata(temp)
self.figure.canvas.draw()
except:
pass
# writes the temperature to file_name file ...
# if the number of time steps is verified
if self.object.file_name:
if nw == write_interval or j == 0 or j == nt - 1:
line = '%f,' % self.object.time_passed
for i in self.object.temperature:
new_line = '%f,' % i[1]
line = line + new_line
line = line[:-1] + '\n'
f = open(self.object.file_name, 'a')
f.write(line)
f.close()
if nw == write_interval:
nw = 0
if verbose:
print('pogress:', int(100*j/nt), '%', end="\r")
if verbose:
print('Finished simulation')
| djsilva99/heatrapy | heatrapy/dimension_1/objects/single.py | single.py | py | 17,265 | python | en | code | 51 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.array",... |
9002196810 | from ast import parse
import pathlib
import configparser
import shutil
import enum
from sre_constants import CATEGORY
output = pathlib.Path("./.out")
shutil.rmtree(output, ignore_errors=True)
output.mkdir(exist_ok=True)
book_counter = 0
cfg = configparser.ConfigParser()
cfg.read(".input.ini", encoding="utf-8")
with open(f".out/books.lua", encoding="utf-8", mode="w") as definition:
with open(f".out/books.txt", encoding="utf-8", mode="w") as text:
definition.write(
"""
--
-- Please, do not delete this comment.
-- This file is generated with ZoMeGen by DiRaven
-- https://github.com/diraven/zomegen
--
""".strip()
+ "\n\n"
)
titles = cfg["books"]["titles"]
print(titles)
for title in titles.strip().splitlines():
book_counter += 1
definition.write(
f"""
-- {title}
item book_ukrlit_{book_counter}
{{
DisplayCategory = Literature,
DisplayName = book_ukrlit_{book_counter},
Type = Literature,
Icon = Book,
Weight = 0.5,
UnhappyChange = -40,
StressChange = -40,
BoredomChange = -50,
FatigueChange = +5,
StaticModel = Book,
WorldStaticModel = BookClosedGround,
}}
""".strip()
+ "\n\n"
)
# Write text.
text.write(
f"""
DisplayName_book_ukrlit_{book_counter} = "{title}",
""".strip()
+ "\n"
)
| diraven/zomegen | books/__main__.py | __main__.py | py | 1,440 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 15,
"usage_type": "call"
}
] |
25969778046 | import requests
from bs4 import BeautifulSoup
import pandas as pd
#程式欢迎语
print("**欢迎来到UCAR爬虫程式**")
print("\n")
#将资料存入建立好的list
titles = []
date = []
url_list = []
clicks =[]
replys = []
#自定义查询关键字及日期区间
x = str(input("请输入想爬取的关键字:"))
print("日期格式输入范例(YYYYMMDD):20200715")
print("\n")
start_date = int(input("请输入想爬取的起始日期:")) #格式YYYYMMDD:20180101
end_date = int(input("请输入想爬取的结束日期:")) #格式YYYYMMDD:20191231
fake_request_url= "https://forum.u-car.com.tw/forum/list?keywords=" + x
print("\n")
print("爬取中...请稍后")
#抓使用者输入之关键字所有网页
z = 0
while(True):
z += 1
real_request_url = fake_request_url + "&page=" + str(z)
#print(real_request_url)
response = requests.get(real_request_url)
response_text = response.text
soup = BeautifulSoup(response_text, "html.parser")
#print(soup)
#判断这一页目录有没有文章(有就接下一步,没有就break)
#值得注意的是,这里判断每个目录页是否有文章存在的标签是选择.writer而不是用.title的原因为:在该目录页没有文章时还是有其他的.title标签,若选择.title做判断会导致无穷循环程式无法结束,所以选择.writer为判断依据
#当然也可以选择其他判断标签,这里就以.writer为例
if soup.select(".writer"):
pass
else:
break
#将所有div class="cell_topic"的内容爬下来储存到变数soup.find1(是个list)
soup_find1 = soup.find_all('div', 'cell_topic')
#抓发文日期
#循环soup_find1这个list
for i in range(len(soup_find1)):
#第一页且list前两项0,1是置顶广告,所以continue
if (z == 1 and i <= 1):
continue
b = soup_find1[i].find('div', 'postby margin-right-10').find('p').text
#print(b)
re_b = b[:10]
#print(re_b)
re_b_b = int(re_b.replace('/', ''))
#print(re_b_b)
#print(re_b)
#判断发文日期是否符合使用者需求并丢到list
if (start_date <= re_b_b and re_b_b <= end_date):
pass
else:
continue
date.append(re_b)
#print(re_b)
#抓网址
url = soup_find1[i].find('div', 'title').find('a')
#print(url)
a = 'https://forum.u-car.com.tw'
if url is not None:
url_list.append(a + url.get('href'))
else:
url_list.append("(本文已被删除)")
#print(a + url.get('href'))
#抓标题
c = soup_find1[i].find('div', 'title').find('a')
#print(c)
if c is not None:
titles.append(c.text)
else:
titles.append('(本文已被删除)')
print(c.text)
#抓点阅数
click_count = soup_find1[i].find('div', 'cell_topic_view').find('p')
if click_count is not None:
clicks.append(click_count.text)
else:
clicks.append("0")
#print(click_count.text)
#抓回复数
replys_count = soup_find1[i].find('div', 'cell_topic_chats').find('p')
if click_count is not None:
replys.append(replys_count.text)
else:
replys.append("0")
#print(replys_count.text)
#print('46行循环结束')
print("\n")
print("转档中...请稍后")
#转为DataFrame
df = pd.DataFrame(
{
'标题' : titles,
'点阅数' : clicks,
'回复数' : replys,
'发文日期' : date,
'文章连结' : url_list
}
)
#另存为csv
df.to_csv( "UCAR_" + x +"回传结果.csv", index = False, encoding = "utf_8_sig")
#程式结束
len_titles = len(titles)
print("本次共爬出 {} 篇文章".format(len_titles))
print("\n")
end = input("请输入任意键结束程式:")
| weiweibro87777/UCAR_web-crawler | ucar.py | ucar.py | py | 3,533 | python | zh | code | 1 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 111,
"usage_type": "call"
}
] |
5782054342 | from django.contrib import admin
from .models import Listing
# change Register your models data's list views attribite.
class ListingAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'is_published', 'price', 'list_date', 'realtor') # display items
list_display_links = ('id', 'title', 'realtor') #clickable display items
list_filter = ('realtor',) # filter display items
list_editable = ('is_published', 'price') # editable list item
search_fields = ('title', 'address', 'city', 'description', 'state', 'zipcode') # search panels filter items
list_per_page = 3 # display list per page
admin.site.register(Listing, ListingAdmin)
| MonadWizard/django_HouseSellRealEstate_project | listings/admin.py | admin.py | py | 671 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 14,
"usage_type": "call"
},... |
5073225844 | import urllib, urllib.request
from datetime import datetime
def get_data(province_id): # Отримання тестових даних із WEB-сторінки
url = 'https://www.star.nesdis.noaa.gov/smcd/emb/vci/VH/get_TS_admin.php?country=UKR&provinceID={}&year1=1981&year2=2020&type=Mean'.format(province_id)
# Відкриття WEB-сторінки можна зробити наступним чином:
webpage = urllib.request.urlopen(url)
text = webpage.read()
# Отримати поточну дату і час
now = datetime.now()
# Згенерувати строку з поточою датою і часом та необхідним форматуванням можна за допомогою методу strftime
date_and_time_time = now.strftime("%d.%m.%Y_%H^%M^%S")
# Створити новий файл за допомоги функції open
out = open('D:\\AD\\' + 'NOAA_ID' + str(province_id) + '-' + date_and_time_time + '.csv', 'wb')
# Після відкриття у змінній text міститься текст із WEB-сторінки, який тепер можна записати у файл
out.write(text)
out.close()
import pandas as pd
def make_header(filepath):
headers = ['Year', 'Week', 'SMN', 'SMT', 'VCI', 'TCI', 'VHI', 'empty']
dataframe = pd.read_csv(filepath, header=1, names=headers)
dataframe.drop(dataframe.loc[dataframe['VHI'] == -1].index)
return dataframe
| DJeik7/lab2 | Ad1.py | Ad1.py | py | 1,504 | python | uk | code | 0 | github-code | 6 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dateti... |
23156107935 | from flask import Flask
from main import main
from mypage import mypage
from challengedetail import challengedetail
from flask import Flask, render_template, jsonify, request, session, redirect, url_for
from db import db
app = Flask(__name__)
# JWT 토큰을 만들 때 필요한 비밀문자열입니다. 아무거나 입력해도 괜찮습니다.
# 이 문자열은 서버만 알고있기 때문에, 내 서버에서만 토큰을 인코딩(=만들기)/디코딩(=풀기) 할 수 있습니다.
SECRET_KEY = 'SPARTA'
# JWT 패키지를 사용합니다. (설치해야할 패키지 이름: PyJWT)
import jwt
# 토큰에 만료시간을 줘야하기 때문에, datetime 모듈도 사용합니다.
import datetime
# 회원가입 시엔, 비밀번호를 암호화하여 DB에 저장해두는 게 좋습니다.
# 그렇지 않으면, 개발자(=나)가 회원들의 비밀번호를 볼 수 있으니까요.^^;
import hashlib
app.register_blueprint(main)
app.register_blueprint(mypage)
app.register_blueprint(challengedetail)
#################################
## HTML을 주는 부분 ##
#################################
@app.route('/')
def main():
token_receive = request.cookies.get('mytoken')
try:
payload = jwt.decode(token_receive, SECRET_KEY, algorithms=['HS256'])
user_info = db.user.find_one({"user_id": payload['id']})
return render_template('main.html', nickname=user_info["nick"])
except jwt.ExpiredSignatureError:
return redirect(url_for("login", msg="로그인 시간이 만료되었습니다."))
except jwt.exceptions.DecodeError:
return redirect(url_for("login", msg="로그인 정보가 존재하지 않습니다."))
@app.route('/mypage/')
def mypage():
token_receive = request.cookies.get('mytoken')
try:
payload = jwt.decode(token_receive, SECRET_KEY, algorithms=['HS256'])
user_info = db.user.find_one({"user_id": payload['id']})
return render_template('mypage.html', nickname=user_info["nick"])
except jwt.ExpiredSignatureError:
return redirect(url_for("login", msg="로그인 시간이 만료되었습니다."))
except jwt.exceptions.DecodeError:
return redirect(url_for("login", msg="로그인 정보가 존재하지 않습니다."))
@app.route('/challengedetail/')
def challengedetail():
token_receive = request.cookies.get('mytoken')
try:
payload = jwt.decode(token_receive, SECRET_KEY, algorithms=['HS256'])
user_info = db.user.find_one({"user_id": payload['id']})
return render_template('challengedetail.html', nickname=user_info["nick"])
except jwt.ExpiredSignatureError:
return redirect(url_for("login", msg="로그인 시간이 만료되었습니다."))
except jwt.exceptions.DecodeError:
return redirect(url_for("login", msg="로그인 정보가 존재하지 않습니다."))
@app.route('/login')
def login():
msg = request.args.get("msg")
return render_template('login.html', msg=msg)
@app.route('/signup')
def register():
return render_template('signup.html')
#################################
## 로그인을 위한 API ##
#################################
# [회원가입 API]
# id, pw, nickname을 받아서, mongoDB에 저장합니다.
# 저장하기 전에, pw를 sha256 방법(=단방향 암호화. 풀어볼 수 없음)으로 암호화해서 저장합니다.
@app.route('/api/register', methods=['POST'])
def api_register():
id_receive = request.form['id_give']
pw_receive = request.form['pw_give']
nickname_receive = request.form['nickname_give']
pw_hash = hashlib.sha256(pw_receive.encode('utf-8')).hexdigest()
db.user.insert_one({'user_id': id_receive, 'password': pw_hash, 'nick': nickname_receive})
return jsonify({'result': 'success'})
# [로그인 API]
# id, pw를 받아서 맞춰보고, 토큰을 만들어 발급합니다.
@app.route('/api/login', methods=['POST'])
def api_login():
id_receive = request.form['id_give']
pw_receive = request.form['pw_give']
print(id_receive)
print(pw_receive)
# 회원가입 때와 같은 방법으로 pw를 암호화합니다.
pw_hash = hashlib.sha256(pw_receive.encode('utf-8')).hexdigest()
# id, 암호화된pw을 가지고 해당 유저를 찾습니다.
result = db.user.find_one({'user_id': id_receive, 'password': pw_hash})
print(result)
# 찾으면 JWT 토큰을 만들어 발급합니다.
if result is not None:
# JWT 토큰에는, payload와 시크릿키가 필요합니다.
# 시크릿키가 있어야 토큰을 디코딩(=풀기) 해서 payload 값을 볼 수 있습니다.
# 아래에선 id와 exp를 담았습니다. 즉, JWT 토큰을 풀면 유저ID 값을 알 수 있습니다.
# exp에는 만료시간을 넣어줍니다. 만료시간이 지나면, 시크릿키로 토큰을 풀 때 만료되었다고 에러가 납니다.
payload = {
'id': id_receive,
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=78)
}
token = jwt.encode(payload, SECRET_KEY, algorithm='HS256')
# token을 줍니다.
return jsonify({'result': 'success', 'token': token})
# 찾지 못하면
else:
return jsonify({'result': 'fail', 'msg': '아이디/비밀번호가 일치하지 않습니다.'})
# [유저 정보 확인 API]
# 로그인된 유저만 call 할 수 있는 API입니다.
# 유효한 토큰을 줘야 올바른 결과를 얻어갈 수 있습니다.
# (그렇지 않으면 남의 장바구니라든가, 정보를 누구나 볼 수 있겠죠?)
@app.route('/api/nick', methods=['GET'])
def api_valid():
token_receive = request.cookies.get('mytoken')
print("받은 토큰 값")
print(token_receive)
# try / catch 문?
# try 아래를 실행했다가, 에러가 있으면 except 구분으로 가란 얘기입니다.
try:
# token을 시크릿키로 디코딩합니다.
# 보실 수 있도록 payload를 print 해두었습니다. 우리가 로그인 시 넣은 그 payload와 같은 것이 나옵니다.
payload = jwt.decode(token_receive, SECRET_KEY, algorithms=['HS256'])
print(payload)
# payload 안에 id가 들어있습니다. 이 id로 유저정보를 찾습니다.
# 여기에선 그 예로 닉네임을 보내주겠습니다.
userinfo = db.user.find_one({'user_id': payload['id']}, {'_id': 0})
return jsonify({'result': 'success', 'nickname': userinfo['nick']})
except jwt.ExpiredSignatureError:
# 위를 실행했는데 만료시간이 지났으면 에러가 납니다.
return jsonify({'result': 'fail', 'msg': '로그인 시간이 만료되었습니다.'})
except jwt.exceptions.DecodeError:
return jsonify({'result': 'fail', 'msg': '로그인 정보가 존재하지 않습니다.'})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True) | cchloe0927/Mallenge | app.py | app.py | py | 6,984 | python | ko | code | 2 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "main.main",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "mypage.mypage",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "challengedetail.challenged... |
10423084443 | from __future__ import annotations
import dataclasses
from typing import TYPE_CHECKING
from unittest.mock import MagicMock, PropertyMock
import pytest
from randovania.exporter import pickup_exporter
from randovania.game_description import default_database
from randovania.game_description.assignment import PickupTarget
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.games.dread.exporter.patch_data_factory import (
DreadAcquiredMemo,
DreadPatchDataFactory,
get_resources_for_details,
)
from randovania.games.dread.layout.dread_cosmetic_patches import DreadCosmeticPatches, DreadMissileCosmeticType
from randovania.games.game import RandovaniaGame
from randovania.generator.pickup_pool import pickup_creator
from randovania.interface_common.players_configuration import PlayersConfiguration
from randovania.layout.base.ammo_pickup_state import AmmoPickupState
from randovania.layout.base.pickup_model import PickupModelStyle
from randovania.layout.base.standard_pickup_state import StandardPickupState
from randovania.layout.layout_description import LayoutDescription
from randovania.lib import json_lib
if TYPE_CHECKING:
from randovania.layout.preset import Preset
@pytest.mark.parametrize(
("rdvgame_filename", "expected_results_filename", "num_of_players"),
[
("starter_preset.rdvgame", "starter_preset.json", 1), # starter preset
("crazy_settings.rdvgame", "crazy_settings.json", 1), # crazy settings
("dread_dread_multiworld.rdvgame", "dread_dread_multiworld_expected.json", 2), # dread-dread multi
("dread_prime1_multiworld.rdvgame", "dread_prime1_multiworld_expected.json", 2), # dread-prime1 multi
("elevator_rando.rdvgame", "elevator_rando.json", 1), # elevator_rando multi
],
)
def test_create_patch_data(test_files_dir, rdvgame_filename, expected_results_filename, num_of_players, mocker):
# Setup
rdvgame = test_files_dir.joinpath("log_files", "dread", rdvgame_filename)
players_config = PlayersConfiguration(0, {i: f"Player {i + 1}" for i in range(num_of_players)})
description = LayoutDescription.from_file(rdvgame)
cosmetic_patches = DreadCosmeticPatches()
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_word_hash",
new_callable=PropertyMock,
return_value="Words Hash",
)
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_hash",
new_callable=PropertyMock,
return_value="$$$$$",
)
# Run
data = DreadPatchDataFactory(description, players_config, cosmetic_patches).create_data()
# Expected Result
expected_results_path = test_files_dir.joinpath("patcher_data", "dread", expected_results_filename)
expected_data = json_lib.read_path(expected_results_path)
# Uncomment to easily view diff of failed test
# json_lib.write_path(expected_results_path, data); assert False
assert data == expected_data
def _preset_with_locked_pb(preset: Preset, locked: bool):
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_DREAD)
preset = dataclasses.replace(
preset,
configuration=dataclasses.replace(
preset.configuration,
ammo_configuration=preset.configuration.ammo_pickup_configuration.replace_state_for_ammo(
pickup_database.ammo_pickups["Power Bomb Tank"],
AmmoPickupState(requires_main_item=locked),
),
),
)
return preset
@pytest.mark.parametrize("locked", [False, True])
def test_pickup_data_for_pb_expansion(locked, dread_game_description, preset_manager):
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_DREAD)
resource_db = dread_game_description.resource_database
# Setup
pickup = pickup_creator.create_ammo_pickup(
pickup_database.ammo_pickups["Power Bomb Tank"],
[2],
locked,
resource_db,
)
creator = pickup_exporter.PickupExporterSolo(DreadAcquiredMemo.with_expansion_text(), RandovaniaGame.METROID_DREAD)
# Run
details = creator.export(PickupIndex(0), PickupTarget(pickup, 0), pickup, PickupModelStyle.ALL_VISIBLE)
result = get_resources_for_details(details.original_pickup, details.conditional_resources, details.other_player)
# Assert
assert result == [
[
{"item_id": "ITEM_WEAPON_POWER_BOMB_MAX", "quantity": 2},
]
if locked
else [
{"item_id": "ITEM_WEAPON_POWER_BOMB_MAX", "quantity": 2},
{"item_id": "ITEM_WEAPON_POWER_BOMB", "quantity": 1},
]
]
@pytest.mark.parametrize("locked", [False, True])
def test_pickup_data_for_main_pb(locked, dread_game_description, preset_manager):
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_DREAD)
resource_db = dread_game_description.resource_database
# Setup
pickup = pickup_creator.create_standard_pickup(
pickup_database.standard_pickups["Power Bomb"],
StandardPickupState(included_ammo=(3,)),
resource_database=resource_db,
ammo=pickup_database.ammo_pickups["Power Bomb Tank"],
ammo_requires_main_item=locked,
)
creator = pickup_exporter.PickupExporterSolo(DreadAcquiredMemo.with_expansion_text(), RandovaniaGame.METROID_DREAD)
# Run
details = creator.export(PickupIndex(0), PickupTarget(pickup, 0), pickup, PickupModelStyle.ALL_VISIBLE)
result = get_resources_for_details(details.original_pickup, details.conditional_resources, details.other_player)
# Assert
assert result == [
[
{"item_id": "ITEM_WEAPON_POWER_BOMB", "quantity": 1},
{"item_id": "ITEM_WEAPON_POWER_BOMB_MAX", "quantity": 3},
]
]
def test_pickup_data_for_recolored_missiles(dread_game_description, preset_manager):
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_DREAD)
resource_db = dread_game_description.resource_database
preset = preset_manager.default_preset_for_game(RandovaniaGame.METROID_DREAD).get_preset()
description = MagicMock(spec=LayoutDescription)
description.all_patches = {0: MagicMock()}
description.get_preset.return_value = preset
description.get_seed_for_player.return_value = 1000
cosmetics = DreadCosmeticPatches(missile_cosmetic=DreadMissileCosmeticType.PRIDE)
# Setup
pickup = pickup_creator.create_ammo_pickup(
pickup_database.ammo_pickups["Missile Tank"], (2,), False, resource_database=resource_db
)
factory = DreadPatchDataFactory(description, PlayersConfiguration(0, {0: "Dread"}), cosmetics)
creator = pickup_exporter.PickupExporterSolo(DreadAcquiredMemo.with_expansion_text(), RandovaniaGame.METROID_DREAD)
# Run
details = creator.export(PickupIndex(0), PickupTarget(pickup, 0), pickup, PickupModelStyle.ALL_VISIBLE)
result = factory._pickup_detail_for_target(details)
# Assert
assert result == {
"pickup_type": "actor",
"caption": "Missile Tank acquired.\nMissile capacity increased by 2.",
"resources": [[{"item_id": "ITEM_WEAPON_MISSILE_MAX", "quantity": 2}]],
"pickup_actor": {"scenario": "s010_cave", "layer": "default", "actor": "ItemSphere_ChargeBeam"},
"model": ["item_missiletank_green"],
"map_icon": {
"icon_id": "item_missiletank",
"original_actor": {"actor": "powerup_chargebeam", "layer": "default", "scenario": "s010_cave"},
},
}
def test_pickup_data_for_a_major(dread_game_description, preset_manager):
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_DREAD)
resource_db = dread_game_description.resource_database
preset = preset_manager.default_preset_for_game(RandovaniaGame.METROID_DREAD).get_preset()
description = MagicMock(spec=LayoutDescription)
description.all_patches = {0: MagicMock()}
description.get_preset.return_value = preset
description.get_seed_for_player.return_value = 1000
# Setup
pickup = pickup_creator.create_standard_pickup(
pickup_database.standard_pickups["Speed Booster"],
StandardPickupState(),
resource_database=resource_db,
ammo=None,
ammo_requires_main_item=False,
)
factory = DreadPatchDataFactory(description, PlayersConfiguration(0, {0: "Dread"}), MagicMock())
creator = pickup_exporter.PickupExporterSolo(DreadAcquiredMemo.with_expansion_text(), RandovaniaGame.METROID_DREAD)
# Run
details = creator.export(PickupIndex(0), PickupTarget(pickup, 0), pickup, PickupModelStyle.ALL_VISIBLE)
result = factory._pickup_detail_for_target(details)
# Assert
assert result == {
"pickup_type": "actor",
"caption": "Speed Booster acquired.",
"resources": [[{"item_id": "ITEM_SPEED_BOOSTER", "quantity": 1}]],
"pickup_actor": {"scenario": "s010_cave", "layer": "default", "actor": "ItemSphere_ChargeBeam"},
"model": ["powerup_speedbooster"],
"map_icon": {
"icon_id": "powerup_speedbooster",
"original_actor": {"actor": "powerup_chargebeam", "layer": "default", "scenario": "s010_cave"},
},
}
@pytest.fixture()
def _setup_and_teardown_for_wrong_custom_spawn():
# modify the default start to have no collision_camera (asset_id) and no vanilla
# actor name for a start point
game_desc = default_database.game_description_for(RandovaniaGame.METROID_DREAD)
region = game_desc.region_list.region_with_name("Artaria")
area = region.area_by_name("Intro Room")
node = area.node_with_name("Start Point")
modified_node = dataclasses.replace(node, extra={})
area.nodes.remove(node)
area.nodes.append(modified_node)
asset_id = area.extra["asset_id"]
del area.extra["asset_id"]
yield
area.nodes.remove(modified_node)
area.nodes.append(node)
area.extra["asset_id"] = asset_id
@pytest.mark.usefixtures("_setup_and_teardown_for_wrong_custom_spawn")
def test_create_patch_with_wrong_custom_spawn(test_files_dir, mocker):
# test for a not createable spawn point
file = test_files_dir.joinpath("log_files", "dread", "starter_preset.rdvgame")
description = LayoutDescription.from_file(file)
players_config = PlayersConfiguration(0, {0: "Dread"})
cosmetic_patches = DreadCosmeticPatches()
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_word_hash",
new_callable=PropertyMock,
return_value="Words Hash",
)
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_hash",
new_callable=PropertyMock,
return_value="$$$$$",
)
patcher = DreadPatchDataFactory(description, players_config, cosmetic_patches)
with pytest.raises(
KeyError,
match="Artaria/Intro Room/Start Point has neither a start_point_actor_name nor the "
"area has a collision_camera_name for a custom start point",
):
patcher.create_data()
@pytest.fixture()
def _setup_and_teardown_for_custom_spawn():
# modify a node to be a valid start point without a vanilla spawn
game_desc = default_database.game_description_for(RandovaniaGame.METROID_DREAD)
region = game_desc.region_list.region_with_name("Artaria")
area = region.area_by_name("Charge Tutorial")
node = area.node_with_name("Start Point")
modified_node = dataclasses.replace(node, valid_starting_location=True)
area.nodes.remove(node)
area.nodes.append(modified_node)
yield
area.nodes.remove(modified_node)
area.nodes.append(node)
@pytest.mark.usefixtures("_setup_and_teardown_for_custom_spawn")
def test_create_patch_with_custom_spawn(test_files_dir, mocker):
# test for custom spawn point referenced by starting location and teleporters
file = test_files_dir.joinpath("log_files", "dread", "custom_start.rdvgame")
description = LayoutDescription.from_file(file)
players_config = PlayersConfiguration(0, {0: "Dread"})
cosmetic_patches = DreadCosmeticPatches()
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_word_hash",
new_callable=PropertyMock,
return_value="Words Hash",
)
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_hash",
new_callable=PropertyMock,
return_value="$$$$$",
)
data = DreadPatchDataFactory(description, players_config, cosmetic_patches).create_data()
# Expected Result
expected_data = test_files_dir.read_json("patcher_data", "dread", "custom_start.json")
# Update the file
# json_lib.write_path(test_files_dir.joinpath("patcher_data", "dread", "custom_start.json"), data); assert False
assert data == expected_data
| randovania/randovania | test/games/dread/exporter/test_dread_patch_data_factory.py | test_dread_patch_data_factory.py | py | 12,949 | python | en | code | 165 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "randovania.interface_common.players_configuration.PlayersConfiguration",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "randovania.layout.layout_description.LayoutDescriptio... |
13415300522 | # -*- coding: utf-8 -*-
"""
@author: Taoting
将用coco格式的json转化成labeime标注格式的json
"""
import json
import cv2
import numpy as np
import os
#用一个labelme格式的json作为参考,因为很多信息都是相同的,不需要修改。
def reference_labelme_json():
ref_json_path = './bin/25.json'
data=json.load(open(ref_json_path))
return data
def labelme_shapes(data,data_ref,id):
shapes = []
label_num = {'box':0}#根据你的数据来修改
for ann in data['annotations']:
if id == ann['image_id']:
shape = {}
class_name = [i['name'] for i in data['categories'] if i['id'] == ann['category_id']]
#label要对应每一类从_1开始编号
label_num[class_name[0]] += 1
shape['label'] = class_name[0]
shape['points'] = []
# ~ print(ann['segmentation'])
if not type(ann['segmentation']) == list:
continue
else:
x = ann['segmentation'][0][::2]#奇数个是x的坐标
y = ann['segmentation'][0][1::2]#偶数个是y的坐标
for j in range(len(x)):
shape['points'].append([x[j], y[j]])
shape['shape_type'] = data_ref['shapes'][0]['shape_type']
shape['flags'] = data_ref['shapes'][0]['flags']
shapes.append(shape)
return shapes
def Coco2labelme(json_path,data_ref):
with open(json_path,'r') as fp:
data = json.load(fp) # 加载json文件
for img in data['images']:
id = img['id']
data_labelme={}
data_labelme['version'] = data_ref['version']
data_labelme['flags'] = data_ref['flags']
data_labelme['shapes'] = labelme_shapes(data,data_ref,id)
data_labelme['imagePath'] = img['file_name']
data_labelme['imageData'] = None
# ~ data_labelme['imageData'] = data_ref['imageData']
data_labelme['imageHeight'] = img['height']
data_labelme['imageWidth'] = img['width']
file_name = data_labelme['imagePath']
# 保存json文件
json.dump(data_labelme,open('./%s.json' % file_name.split('.')[0],'w'),indent=4)
return data_labelme
if __name__ == '__main__':
root_dir = '/home/eason/PackSeg/'
json_list = os.listdir(root_dir)
#参考的json
data_ref = reference_labelme_json()
for json_path in json_list:
if json_path.split('.')[-1] == 'json':
print('当前文件: ', json_path)
data_labelme= Coco2labelme(os.path.join(root_dir,json_path), data_ref)
#file_name = data_labelme['imagePath']
# 保存json文件
#json.dump(data_labelme,open('./%s.json' % file_name.split('.')[0],'w'),indent=4)
| Tommy-Bie/Logistics-Package-Separation-Software | DatasetUtils/coco2labelme.py | coco2labelme.py | py | 2,887 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 70,
... |
9658653690 | import matplotlib.pyplot as plt
import scipy.signal as ss
import numpy as np
import math
window_size = 55
'------------------------------------------------------------------------------'
# opis: oblicza i zwraca pochodna sygnalu.
# nazwa funkcji: FUNC_diff
# parametry:
# ecg_filtered - numpy 1D array
# zwraca:
# diff - numpy 1D array
def FUNC_diff(ecg_filtered):
N = len(ecg_filtered)
'''POCHODNA---------------------------------------------------------------'''
#diff = np.diff(ecg_filtered)
diff = (2*ecg_filtered[5:N]+ecg_filtered[4:N-1]-ecg_filtered[2:N-3]-2*ecg_filtered[1:N-4])/8;
'''Wektor wynikowy krotszy o 5 elementow'''
return diff
# opis: oblicza i zwraca potege sygnalu.
# nazwa funkcji: FUNC_sqr
# parametry:
# diff - numpy 1D array
# zwraca:
# sqr - numpy 1D array
def FUNC_sqr(diff):
'''POTEGA-----------------------------------------------------------------'''
sqr = diff**2
return sqr
# opis: calkuje sygnal w ruchomym oknie i zwraca go.
# nazwa funkcji: FUNC_signal_integration
# parametry:
# sqr - numpy 1D array
# fs - integer
# zwraca:
# ecgmf - numpy 1D array
def FUNC_signal_integration(sqr, fs):
dt = 1/float(fs)
'''CALKOWANIE W RUCHOMYM OKNIE--------------------------------------------'''
'''Utworzenie okna'''
window = np.ones(window_size)
'''Calkowanie'''
temp = ss.lfilter(window,1,sqr)
ecgmf = ss.medfilt(temp,9)
ecgmf = ecgmf*dt
'''Usuniecie opoznienia filtru'''
delay = math.ceil(len(window) / 2)
ecgmf = ecgmf[delay:len(ecgmf)]
return ecgmf
# opis: wyszukuje i zwraca przyblizone granice QRS
# potrzebne do wyszukania zalamka R (oraz zalamka S).
# nazwa funkcji: FUNC_prepare_for_maxima_search
# parametry:
# ecgmf - numpy 1D array
# zwraca:
# left_T - numpy 1D array
# right_T - numpy 1D array
def FUNC_prepare_for_maxima_search(ecgmf):
'''Wyszukanie najwyzszej amplitudy'''
max_A = max(ecgmf)
'''Budowa tablicy do przeszukiwania'''
threshold = 0.2
region_poz = ecgmf>(threshold*max_A)
region_poz = region_poz*1
region_poz = np.array([region_poz])
'''Uzupelnienie zerem'''
region_poz_LR = np.insert(region_poz, 0, 0)
region_poz_RL = np.append(region_poz, 0)
'''SZUKANE MAKSIMOW-------------------------------------------------------'''
deltaLR = np.diff(region_poz_LR)
deltaRL = np.diff(region_poz_RL)
'''Wyznaczenie granic segmentow'''
left = np.where(deltaLR==1);
right = np.where(deltaRL==-1);
left_T = np.transpose(left)
right_T = np.transpose(right)
return left_T, right_T
# opis: znajduje i zwraca wartosci QRS na podstawie przyblizonych granic QRS.
# nazwa funkcji: FUNC_find_qrs_values
# parametry:
# left_T - numpy 1D array
# right_T - numpy 1D array
# ecg_filtered - numpy 1D array
# ecgmf - numpy 1D array
# zwraca:
# qrs_left_values - numpy 1D array
# qrs_right_values - numpy 1D array
# qrs_left_values_ecgmf - numpy 1D array
# qrs_right_values_ecgmf - numpy 1D array
def FUNC_find_qrs_values(left_T, right_T, ecg_filtered, ecgmf):
qrs_left_values = np.empty(len(left_T))
qrs_right_values = np.empty(len(left_T))
qrs_left_values_ecgmf = np.empty(len(left_T))
qrs_right_values_ecgmf = np.empty(len(left_T))
ecg_filtered[12:len(ecg_filtered)]
for i in range(0,len(left_T)):
qrs_left_values[i] = ecg_filtered[left_T[i]]
qrs_right_values[i] = ecg_filtered[right_T[i]]
qrs_left_values_ecgmf[i] = ecgmf[left_T[i]]
qrs_right_values_ecgmf[i] = ecgmf[right_T[i]]
return qrs_left_values, qrs_right_values, qrs_left_values_ecgmf, qrs_right_values_ecgmf
# opis: znajduje lokalne maksima w przyblizonych granicach QRS.
# Zwraca wartosci i indeksy wyszukanych maksimow.
# nazwa funkcji: FUNC_find_max
# parametry:
# left_T - numpy 1D array
# right_T - numpy 1D array
# ecg_filtered - numpy 1D array
# zwraca:
# max_value - numpy 1D array
# max_index - numpy 1D array
def FUNC_find_max(left_T, right_T, ecg_filtered):
max_index = np.empty(len(left_T))
max_value = np.empty(len(left_T))
#obciecie poczatku sygnalu oryginlanego w celu dopasownania indeksow po calkowaniu i pochodnej wzgledem oryginalnego
#ecg_filtered = ecg_filtered[18:len(ecg_filtered)]
#sygnal w trakcie calkownia jest scinany na koncu
for i in range(0,len(left_T)):
start = int(left_T[i])
end = int(right_T[i])
max_value[i] = ecg_filtered[start]
max_index[i] = start
for j in range(start,end):
if ecg_filtered[j] > max_value[i]:
max_value[i] = ecg_filtered[j]
max_index[i] = j
#max_index[i] = np.argmax(ecg_filtered[left_T[i]:right_T[i]])
#max_index[i] = max_index[i]+left_T[i]
#max_value[i] = ecg_filtered[max_index[i]]
'''for i in range(0,len(left_T)):
max_index[i] = np.argmax(ecg_filtered[left_T[i]:right_T[i]])
max_index[i] = max_index[i]+left_T[i]
max_value[i] = ecg_filtered[max_index[i]]'''
return max_value, max_index
# opis: znajduje lokalne minima w przyblizonych granicach QRS.
# Zwraca wartosci i indeksy wyszukanych minimow.
# nazwa funkcji: FUNC_find_min
# parametry:
# left_T - numpy 1D array
# right_T - numpy 1D array
# ecg_filtered - numpy 1D array
# zwraca:
# min_value - numpy 1D array
# min_index - numpy 1D array
def FUNC_find_min(left_T, right_T, ecg_filtered):
min_index = np.empty(len(left_T))
min_value = np.empty(len(left_T))
for i in range(0,len(left_T)):
start = int(left_T[i])
end = int(right_T[i])
min_value[i] = ecg_filtered[start]
min_index[i] = start
for j in range(start,end):
if ecg_filtered[j] < min_value[i]:
min_value[i] = ecg_filtered[j]
min_index[i] = j
'''
for i in range(0,len(left_T)):
min_index[i] = np.argmin(ecg_filtered[left_T[i]:right_T[i]])
min_index[i] = min_index[i]+left_T[i]
min_value[i] = ecg_filtered[min_index[i]]'''
return min_value, min_index
# opis: detekcja zalamka R. Zwraca wartosci i indeksy wyszukanych zalamkow R.
# nazwa funkcji: FUNC_r_detection
# parametry:
# ecg_filtered - numpy 1D array
# fs - integer
# zwraca:
# r_value - numpy 1D array
# r_index - numpy 1D array
def FUNC_r_detection(ecg_filtered, fs):
stateDict = {'Done': 1,
'Signal too short': -2,
'Incorrect input': -3}
try:
if not len(ecg_filtered):
stateFlag = stateDict['Incorrect input']
return stateFlag, [[], []]
dt = 1/float(fs)
diff = FUNC_diff(ecg_filtered)
sqr = FUNC_sqr(diff)
ecgmf = FUNC_signal_integration(sqr, dt)
left_T, right_T = FUNC_prepare_for_maxima_search(ecgmf)
r_value, r_index = FUNC_find_max(left_T, right_T, ecg_filtered)
stateFlag = stateDict['Done']
return stateFlag, [r_value, r_index]
except Exception as e:
print(f'Module R_PEAKS failed: {e}')
stateFlag = stateDict['Error']
return stateFlag, [[], []]
# opis: detekcja zalamka S przy zalozeniu, ze osiaga minimum.
# Zwraca wartosci i indeksy wyszukanych zalamkow S.
# nazwa funkcji: FUNC_s_detection
# parametry:
# ecg_filtered - numpy 1D array
# fs - integer
# zwraca:
# s_value - numpy 1D array
# s_index - numpy 1D array
def FUNC_s_detection(ecg_filtered, fs):
dt = 1/float(fs)
diff = FUNC_diff(ecg_filtered)
sqr = FUNC_sqr(diff)
ecgmf = FUNC_signal_integration(sqr, dt)
left_T, right_T = FUNC_prepare_for_maxima_search(ecgmf)
s_value, s_index = FUNC_find_min(left_T, right_T, ecg_filtered)
return s_value, s_index
# opis: Rysuje wykresy poszczegolnych sygnalow z naniesionymi na nie punktami
# nazwa funkcji: PRINT_all
# parametry:
# ecg_filtered - numpy 1D array
# fs - integer
def PRINT_all(ecg_filtered, fs):
diff = FUNC_diff(ecg_filtered)
sqr = FUNC_sqr(diff)
ecgmf = FUNC_signal_integration(sqr, fs)
left_T, right_T = FUNC_prepare_for_maxima_search(ecgmf)
r_value, r_index = FUNC_find_max(left_T, right_T, ecg_filtered)
s_value, s_index = FUNC_find_min(left_T, right_T, ecg_filtered)
qrs_left_values, qrs_right_values, qrs_left_values_ekgmf, qrs_right_values_ekgmf = FUNC_find_qrs_values(left_T, right_T, ecg_filtered, ecgmf)
plt.figure(1)
'Zaznaczono przyblizone granice QRS na przefiltrowanym sygnale'
plt.subplot(411)
plt.plot(ecg_filtered)
plt.plot(left_T,qrs_left_values, marker='o', color='g', ls='')
plt.plot(right_T,qrs_right_values, marker='o', color='y', ls='')
plt.ylabel('ekg_filtered')
plt.subplot(412)
plt.plot(diff)
plt.ylabel('diff')
plt.subplot(413)
plt.plot(sqr)
plt.ylabel('sqr')
'Zaznaczono przyblizone granice QRS na scalkowanym sygnale'
plt.subplot(414)
plt.plot(ecgmf)
plt.plot(left_T,qrs_left_values_ekgmf, marker='o', color='g', ls='')
plt.plot(right_T,qrs_right_values_ekgmf, marker='o', color='y', ls='')
plt.ylabel('ecgmf')
plt.figure(2)
plt.plot(ecg_filtered)
plt.plot(r_index,r_value, marker='x', color='r', ls='')
plt.plot(s_index,s_value, marker='x', color='b', ls='')
plt.plot(left_T,qrs_left_values, marker='o', color='g', ls='')
plt.plot(right_T,qrs_right_values, marker='o', color='y', ls='')
plt.xlabel('zalamki R, S oraz przyblizone granice QRS')
plt.figure(3)
plt.plot(ecg_filtered)
plt.plot(r_index,r_value, marker='x', color='r', ls='')
plt.xlabel('zalamki R')
plt.show()
# opis: Zaznacza wykryte zalamki R na sygnale EKG
# nazwa funkcji: PRINT_r
# parametry:
# ecg_filtered - numpy 1D array
# r_index - numpy 1D array
# r_value - numpy 1D array
def PRINT_r(ecg_filtered, r_index, r_value):
plt.figure(4)
plt.plot(ecg_filtered)
plt.plot(r_index,r_value, marker='x', color='r', ls='')
plt.xlabel('zalamki R')
plt.show()
| sebastianczuma/r_peaks | R_PEAKS_old.py | R_PEAKS_old.py | py | 9,657 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.ones",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "scipy.signal.lfilter",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "scipy.signal.medfilt",
... |
22093759735 | import pandas as pd
from sklearn import svm
import statistics
data = pd.read_csv('cleaned_LaptopDataset.csv')
t = statistics.median(data['latest_price'])
h = []
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
for x in data.latest_price:
if (x >= t):
h.append(1)
else:
h.append(0)
data['latest_price'] = h
for col in data:
data[col] = le.fit_transform(data[col])
########Train-test Dataset#######
x = data.drop('latest_price', axis=1)
y = data['latest_price']
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
######## SVM Using Linear Kernel #######
classifiere = svm.SVC(kernel="linear", C=1, gamma=1)
classifiere.fit(x_train, y_train)
predictions = classifiere.predict(x_test)
########SVM Accuracy#######
from sklearn.metrics import accuracy_score
acc = accuracy_score(y_test, predictions)
print("accuracy of SVM " , acc*100,"%")
| mohamedezzeldeenhassanmohamed/Data-Mining-Project | svm.py | svm.py | py | 1,007 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "statistics.median",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sk... |
14475582891 | from django import forms
from django.forms import modelformset_factory
from dashboard.forms.educator_account_form import EducatorAccountForm
from dashboard.models.educator_model import Educator
class EducatorForm(forms.ModelForm):
class Meta:
model = Educator
fields = ['photo', 'name', 'title', 'email', 'about_me']
def __init__(self, *args, accounts, educator_accounts, educator_not_accounts, **kwargs):
super().__init__(*args, **kwargs)
self.accounts = accounts
self.EducatorAccountFormset = modelformset_factory(model=EducatorAccountForm.Meta.model,
form=EducatorAccountForm,
extra=len(educator_not_accounts),
validate_max=True,
max_num=len(accounts),
can_delete=True)
self.accounts_formset = self.EducatorAccountFormset(args[0],
form_kwargs={'accounts': accounts},
queryset=educator_accounts,
initial=educator_not_accounts)
| EslamTK/Students-Performance-System | dashboard/forms/educator_form.py | educator_form.py | py | 1,367 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dashboard.models.educator_model.Educator",
"line_number": 10,
"usage_type": "name"
},
{
"a... |
44633342253 | from typing import List
from torch import optim
from torch.optim.optimizer import Optimizer
from torch_geometric.data.data import Data
from src.dataset import citeSeer
from src.model import GAT
import torch
import torch.nn.functional as F
from torch_geometric.data import Dataset
EPOCH = 200
# --- dataloader
'''
전체 graph를 사용하기 때문에 dataloader 불필요.
'''
citeSeer_ds = citeSeer('data')
citeseer = citeSeer_ds[0]
# --- Model
gat_model = GAT(citeSeer_ds.num_features,
num_layers = 2,
hidden_dim = [64],
num_heads = [8],
output_dim = citeSeer_ds.num_classes)
# --- Setting
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
gat_model.to(device)
citeseer = citeseer.to(device)
optimizer = torch.optim.Adam(gat_model.parameters(), lr = 0.005, weight_decay = 5e-4)
criterion = F.nll_loss
# --- training
def train(data : Dataset) -> None:
gat_model.train()
optimizer.zero_grad()
out = gat_model(data.x, data.edge_index)
loss = criterion(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
@torch.no_grad()
def test(data : Dataset) -> dict:
gat_model.eval()
out= gat_model(data.x, data.edge_index)
acc_dic = {}
for name, mask in data('train_mask', 'test_mask', 'val_mask'):
acc = float((out[mask].argmax(-1) == data.y[mask]).sum() / mask.sum())
acc_dic[name[:-5]] = acc
return acc_dic
for epoch in range(EPOCH):
train(citeseer)
acc_dic = test(citeseer)
print(f"Epoch : {epoch+1:03d}, Train : {acc_dic['train']:.4f}, Test : {acc_dic['test']:.4f}") | February24-Lee/gnn_research | test_gat_exmaple.py | test_gat_exmaple.py | py | 1,645 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "src.dataset.citeSeer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "src.model.GAT",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_availab... |
36076011155 |
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request,jsonify, redirect,url_for
from json import dumps
import celery , sys
from celeryconfig import appcelery
from Buscador import tasks
import time, json
app = Flask(__name__)
@app.route('/datos', methods=['GET', 'POST'])
def recibirInformacion():
resultados=[]
expedientes=[]
expedientes=convertirAstring(request.args.get('expedientes'))
informacion = tratamientosDatos(expedientes,request.args.get('array'))
resultados=informacion.unirVectores()
return jsonify(resultados)
def convertirAstring(argumentos):
argumentos=argumentos.replace(' ','')
argumentos=argumentos.replace('"','')
argumentos=argumentos.replace(']','')
argumentos=argumentos.replace('[','')
argumentos=argumentos.split(',',argumentos.count(','))
return argumentos
class tratamientosDatos():
def __init__(self, resultados_ncbi, resultados_array):
self.datos_ncbi=resultados_ncbi[:]
self.resultados_array=resultados_array
def almacenar_datos_visualizacion_array(self):
visualizacion_array=[]
for i in appcelery.AsyncResult(self.resultados_array).get()['experiments']['experiment']:
visualizacion_array.append({'id': i['id'],
'accession': i['accession'],
'name': i['name'], 'releasedate': i['releasedate'],
'description': i['description'][0]['text'],'bd': 'arrayexpress', 'descarga': "null" })
return visualizacion_array
def almacenar_datos_visualizacion_ncbi(self):
tam_list=len(self.datos_ncbi)
visualizacion_ncbi=[]
for j in range(tam_list):
i=appcelery.AsyncResult(self.datos_ncbi[j])
identificador=i.get()['result']['uids'][0]
visualizacion_ncbi.append({'id': identificador,
'accession':i.get()['result'][identificador]['accession'],
'name': i.get()['result'][identificador]['title'],
'releasedate': i.get()['result'][identificador]['pdat'],
'description': i.get()['result'][identificador]['summary'],
'bd': 'ncbi_gds', 'descarga': i.get()['result'][identificador]['ftplink'] })
return visualizacion_ncbi
def unirVectores(self):
vector_ncbi=[]
vector_array=[]
vector_ncbi=self.almacenar_datos_visualizacion_ncbi()
vector_array=self.almacenar_datos_visualizacion_array()
for i in vector_array:
vector_ncbi.append(i)
return vector_ncbi
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True)
| AntonioAlcM/tfg_ugr | backend/tratamientoDatos.py | tratamientoDatos.py | py | 2,620 | python | pt | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
2500816506 | from bokeh.layouts import column
from bokeh.models.widgets import RadioButtonGroup,Select, Div, Button,PreText
from bokeh.models import TextInput, RadioGroup
from bokeh.plotting import curdoc
button_group = RadioButtonGroup(labels=["Physical parameters", "Geometric parameters", "Initial conditions"], active=1)
## Add the elements inside each radio button
slope = TextInput(value='25', title="slope:")
###### Drop down menus
Coefficient = Select(title="Coefficient:", value="chezy", options=["Manning", "chezy"])
Coefficient_value1 = TextInput(value=" ", title="chezy:" )
Coefficient_value2 = TextInput(value=" ", title="Manning:" )
#### Add geometric parameters
length = TextInput(value=" ", title="Length:")
width = TextInput(value=" ", title="Width:")
#LABELS = ["km", "m"]
#radio_group = RadioGroup(labels=LABELS, active=0)
#### Add ginitial conditiions
depth = TextInput(value=" ", title="Depth(m):")
Q = TextInput(value=" ", title="Discharge(m3/sec):")
texto = PreText(text="""Please click here""",width=500, height=100)
layout = ()
def button_group_change(active):
ch = active
if ch == 0:
slope.visible=True
Coefficient.visible=True
Coefficient_value1.visible=True
Coefficient_value2.visible=True
length.visible=False
width.visible=False
depth.visible=False
Q.visible=False
# layout= column(slope, Coefficient,Coefficient_value1, Coefficient_value2)
elif ch == 1:
length.visible=True
width.visible=True
slope.visible=False
Coefficient.visible=False
Coefficient_value1.visible=False
Coefficient_value2.visible=False
depth.visible=False
Q.visible=False
# layout= column(length, width)
elif ch == 2:
depth.visible=True
Q.visible=True
slope.visible=False
Coefficient.visible=False
Coefficient_value1.visible=False
Coefficient_value2.visible=False
length.visible=False
width.visible=False
# layout= column(length, width )
texto.text ='text' #str(layout)
#curdoc().add_root(column(button_group,slope, Coefficient))
#curdoc().add_root(row(button_group,length))
layout= column(slope, Coefficient,Coefficient_value1, Coefficient_value2,length, width,length)
button_group.on_click(button_group_change)
###show
curdoc().add_root(column(texto,button_group,layout))
#bokeh serve --show BWC.py
| sduarte09/Module5 | Exercise/Group5/BWC.py | BWC.py | py | 2,455 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bokeh.models.widgets.RadioButtonGroup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bokeh.models.TextInput",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bokeh.models.widgets.Select",
"line_number": 14,
"usage_type": "call"
},
... |
18732949012 | import json
import numpy as np
import util
class AudfprintAligner:
matches = {}
def __init__(self, matchfile):
with open(matchfile) as f:
for x, ys in json.load(f).iteritems():
for y, m in ys.iteritems():
m = m[0]
if "Matched" in m:
d = float(self.between(m, "Matched ", " s "))
t1 = float(self.between(m, " at ", " s "))
f1 = self.between(m, " in ", " to ")
t2 = float(self.between(m, " time ", " s "))
f2 = self.between(m, " in ", " with ", 1)
if f1 not in self.matches:
self.matches[f1] = {}
self.matches[f1][f2] = [t1, t2, d]
def between(self, string, s1, s2, index=0):
string = string.split(s1)[index+1]
return string[:string.find(s2)]
def get_alignment_points(self, file, reffile):
if file in self.matches:
if reffile in self.matches[file]:
t1, t2, d = self.matches[file][reffile]
filedur = util.get_duration(file)
refdur = util.get_duration(reffile)
delta_start = t2-t1
delta_end = delta_start+filedur #assume slope 1
return [delta_start, delta_end], 1
return None, 0 | grateful-dead-live/meta-alignment | audfprint_aligner.py | audfprint_aligner.py | py | 1,407 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "util.get_duration",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "util.get_duration",
"line_number": 33,
"usage_type": "call"
}
] |
31472363916 | # Code adapted from https://www.codeproject.com/Articles/5297227/Deep-Learning-for-Fashion-Classification
# import tensorflow.keras as keras
import os
import matplotlib.pyplot as plt
import matplotlib.image as img
import tensorflow as tf
import keras
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import ImageDataGenerator
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))
data = '/home/lunet/conce/Downloads/Codeproject/DeepFashion Custom/DeepFashion/Train'
os.chdir(data)
batch_size = 3
def DataLoad(shape, preprocessing):
"""Create the training and validation datasets for a given image shape."""
img_data = ImageDataGenerator(
preprocessing_function=preprocessing,
horizontal_flip=True,
validation_split=0.1,
)
height, width = shape
train_dataset = img_data.flow_from_directory(
os.getcwd(),
target_size=(height, width),
classes=['Blazer', 'Blouse', 'Cardigan', 'Dress', 'Jacket',
'Jeans', 'Jumpsuit', 'Romper', 'Shorts', 'Skirts', 'Sweater', 'Sweatpants'
, 'Tank', 'Tee', 'Top'],
batch_size=batch_size,
subset='training',
)
val_dataset = img_data.flow_from_directory(
os.getcwd(),
target_size=(height, width),
classes=['Blazer', 'Blouse', 'Cardigan', 'Dress', 'Jacket',
'Jeans', 'Jumpsuit', 'Romper', 'Shorts', 'Skirts', 'Sweater',
'Sweatpants', 'Tank', 'Tee', 'Top'],
batch_size=batch_size,
subset='validation'
)
return train_dataset, val_dataset
vgg16 = keras.applications.vgg16
conv_model = vgg16.VGG16(weights='imagenet', include_top=False)
conv_model.summary()
train_dataset, val_dataset = DataLoad((224, 224), preprocessing=vgg16.preprocess_input)
# Function for plots images with labels within jupyter notebook
X_train, y_train = next(train_dataset)
# Load ImageNet weights of this network, to be used during the transfer learning
conv_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
# flatten the output of the convolutional part:
x = keras.layers.Flatten()(conv_model.output)
# three hidden layers
x = keras.layers.Dense(100, activation='relu')(x)
x = keras.layers.Dense(100, activation='relu')(x)
x = keras.layers.Dense(100, activation='relu')(x)
# final softmax layer with 15 categories
predictions = keras.layers.Dense(15, activation='softmax')(x)
# creating the full model:
full_model = keras.models.Model(inputs=conv_model.input, outputs=predictions)
full_model.summary()
for layer in conv_model.layers:
layer.trainable = False
full_model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adamax(lr=0.001), metrics=['acc'])
history = full_model.fit_generator(
train_dataset,
validation_data=val_dataset,
workers=0,
epochs=7,
)
def plot_history(history, yrange):
"""Plot loss and accuracy as a function of the epoch, for the training and validation datasets.
"""
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
# Get number of epochs
epochs = range(len(acc))
# Plot training and validation accuracy per epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.ylim(yrange)
# Plot training and validation loss per epoch
plt.figure()
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
plt.show()
plot_history(history, yrange=(0.9, 1))
test_data = '/home/lunet/conce/Downloads/Codeproject/DeepFashion Custom/DeepFashion/Train'
test_datagen = ImageDataGenerator()
test_generator = test_datagen.flow_from_directory(test_data, target_size=(224, 224), batch_size=3, class_mode='categorical')
# X_test, y_test = next(test_generator)
test_results = full_model.evaluate(test_generator)
print("test loss, test acc:", test_results) | nnanna217/msc-image-search | func/my_samples/cp_fashion-classifier.py | cp_fashion-classifier.py | py | 4,162 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.compat.v1.ConfigProto",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.keras.backend.set_session",
"line_number": 15,
"usage_type":... |
10713768489 | from scipy import stats, signal
from collections import defaultdict
import numpy as np
from tqdm.notebook import tqdm
import pandas as pd
from src import config
from src.FFT import FFTAnalysis as FFT
def _extractTimeDomainFeatures(sig):
''' Extracts time domain features from one vibration signal'''
# Get time features
features = dict()
if sig is not None:
rmsVal = np.sqrt(np.square(sig).mean())
sigMax = sig.max()
absSig = np.abs(sig)
absMean = absSig.mean()
features = {
'mean': sig.mean(),
'min': sig.min(),
'max': sigMax,
'std': np.std(sig),
'skew': stats.skew(sig),
'kurt': stats.kurtosis(sig),
'rms': rmsVal,
'p2p': sigMax - sig.min(),
'crestF': sigMax / rmsVal,
'impulseF': absSig.max() / absMean,
'shapeF': rmsVal / absMean
}
return features
def _getSubBand(FFTfreqs, FFTlevels, band):
''''Extract portion of the FFT corresponding to the given frequency band '''
idxKeep = (FFTfreqs >= band[0]) & (FFTfreqs < band[1])
freqs = FFTfreqs[idxKeep]
levels = FFTlevels[idxKeep]
return freqs, levels
def _extractPeaks(freqs, levels, distance):
''' Extracts peaks from an FFT '''
peakIdx, _ = signal.find_peaks(levels, distance = distance)
peakLevels = levels[peakIdx]
peakFreqs = freqs[peakIdx]
return peakFreqs, peakLevels
def _getTopPeaks(levels, freqs, noPeaks):
''' Extract top <n> peaks from an FFT '''
# Sort peak indices from highest to lowest level
sortIdx = np.argsort(levels)
sortIdx = np.flip(sortIdx)
# Grab top <n> peaks
levels = levels[sortIdx[0:noPeaks]]
freqs = freqs[sortIdx[0:noPeaks]]
return freqs, levels
def _extractFrequencyDomainFeatures(sig, FFTsettings,
noPeaks = config.FBAND_PEAKS,
peakDist = config.PEAK_DIST,
fs = config.Fs,
fBands = config.FBANDS):
''' Extracts frequency domain features from one vibration signal'''
FFTfreqs, FFTlevels = None, None
features = defaultdict()
if sig is not None:
FFTfreqs, FFTlevels = FFT(sig, config.Fs, FFTsettings)
# Split in bands
for bandNo, band in enumerate(fBands):
freqs, levels = _getSubBand(FFTfreqs, FFTlevels, band)
freqs, levels = _extractPeaks(freqs, levels, peakDist)
freqs, levels = _getTopPeaks(levels, freqs, noPeaks)
# Add peaks from current band to the dictionary with the features
for peakNo in range(noPeaks):
featName = f'band_{bandNo + 1}_peak_{peakNo+1}_level'
features[featName] = levels[peakNo]
featName = f'band_{bandNo + 1}_peak_{peakNo+1}_freq'
features[featName] = freqs[peakNo]
return features
def _extractFeatures(sig, FFTsettings):
''''Extracts time- and frequency-domain features from one vibration signal'''
feats = _extractTimeDomainFeatures(sig)
freqFeats = _extractFrequencyDomainFeatures(sig, FFTsettings)
feats.update(freqFeats)
return feats
def extractDatasetFeatures(df, FFTsettings = config.FFT_SETTINGS):
''' Extracts features from the entire dataset '''
# Extract features from every experiment
driveFeats, fanFeats = [], []
for idx, record in tqdm(df.iterrows(), total = df.shape[0]):
FFTsettings['HPCutoffFrequency'] = 20 * record['MotorSpeed_rpm'] / 60
FFTsettings['LPCutoffFrequency'] = 10 * record['MotorSpeed_rpm'] / 60
driveFeats.append(_extractFeatures(record['DriveVibs'], FFTsettings))
fanFeats.append(_extractFeatures(record['FanVibs'], FFTsettings))
# Make dataframes with the extracted features
dfDrive = pd.DataFrame(driveFeats)
dfFan = pd.DataFrame(fanFeats)
# Add corresponding labels
dfDrive['label'] = df['DriveLabel']
dfFan['label'] = df['FanLabel']
# Remove rows with missing records for fan-end bearing
dfFan.dropna(axis = 0, how = 'any', inplace = True)
return dfDrive, dfFan
| Miltos-90/Bearing_Fault_Classification | src/feature_extraction.py | feature_extraction.py | py | 4,432 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.sqrt",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 28,
... |
40678810913 | import argparse
import json
import os
import platform
import subprocess
from typing import List
HOST_MAGMA_ROOT = '../../../.'
def main() -> None:
""" Run main"""
args = _parse_args()
if args.mount:
_run(['up', '-d', 'test'])
_run(['exec', 'test', 'bash'])
_down(args)
elif args.lint:
_run(['up', '-d', 'test'])
_run(['exec', 'test', 'make', 'lint'])
_down(args)
elif args.precommit:
_run(['up', '-d', 'test'])
_run(['exec', 'test', 'make', 'precommit'])
_down(args)
elif args.coverage:
_run(['up', '-d', 'test'])
_run(['exec', 'test', 'make', 'cover'])
_down(args)
elif args.tests:
_run(['up', '-d', 'test'])
_run(['exec', 'test', 'make', 'test'])
_down(args)
elif args.health:
# _set_mac_env_vars is needed to override LOG_DRIVER for mac
_set_mac_env_vars()
_run(['-f', 'docker-compose.yml', '-f', 'docker-compose.override.yml', '-f', 'docker-compose.health.override.yml', 'up', '-d'])
_run_health()
_down(args)
elif args.git:
print(json.dumps(_run_get_git_vars(), indent=4, sort_keys=True))
else:
_run(['build'] + _get_default_build_args(args))
_down(args)
def _run(cmd: List[str]) -> None:
""" Run the required docker compose command """
cmd = ['docker', 'compose', '--compatibility'] + cmd
print("Running '%s'..." % ' '.join(cmd))
try:
subprocess.run(cmd, check=True) # noqa: S603
except subprocess.CalledProcessError as err:
exit(err.returncode)
def _down(args: argparse.Namespace) -> None:
if args.down:
_run(['down'])
def _get_default_build_args(args: argparse.Namespace) -> List[str]:
ret = []
git_info = _run_get_git_vars()
for arg, val in git_info.items():
ret.append("--build-arg")
ret.append("{0}={1}".format(arg, val))
if args.nocache:
ret.append('--no-cache')
return ret
def _run_get_git_vars():
try:
cmd = "tools/get_version_info.sh"
cmd_res = \
subprocess.run(cmd, check=True, capture_output=True) # noqa: S603
except subprocess.CalledProcessError as err:
print("Error _run_get_git_vars")
exit(err.returncode)
return json.loads(cmd_res.stdout)
def _run_health():
try:
cmd = "tools/docker_ps_healthcheck.sh"
subprocess.run(cmd, check=True) # noqa: S603
except subprocess.CalledProcessError as err:
print("Error _run_health")
exit(err.returncode)
def _set_mac_env_vars():
if (platform.system().lower() == "darwin"):
os.environ['LOG_DRIVER'] = "json-file"
def _parse_args() -> argparse.Namespace:
""" Parse the command line args """
# There are multiple ways to invoke finer-grained control over which
# images are built.
#
# (1) How many images to build
#
# all: all images
# default: images required for minimum functionality
# - excluding metrics images
# - including postgres, proxy, etc
#
# (2) Of the core orc8r images, which modules to build
#
# Defaults to all modules, but can be further specified by targeting a
# deployment type.
parser = argparse.ArgumentParser(description='Orc8r build tool')
# Run something
parser.add_argument(
'--tests', '-t',
action='store_true',
help='Run unit tests',
)
parser.add_argument(
'--mount', '-m',
action='store_true',
help='Mount the source code and create a bash shell',
)
parser.add_argument(
'--precommit', '-c',
action='store_true',
help='Mount the source code and run pre-commit checks',
)
parser.add_argument(
'--coverage', '-o',
action='store_true',
help='Generate test coverage statistics',
)
parser.add_argument(
'--lint', '-l',
action='store_true',
help='Run lint test',
)
parser.add_argument(
'--health', '-e',
action='store_true',
help='Run health test',
)
# Run something
parser.add_argument(
'--git', '-g',
action='store_true',
help='Get git info',
)
# How to do it
parser.add_argument(
'--nocache', '-n',
action='store_true',
help='Build the images with no Docker layer caching',
)
parser.add_argument(
'--down', '-down',
action='store_true',
default=False,
help='Leave containers up after running tests',
)
return parser.parse_args()
if __name__ == '__main__':
main()
| magma/magma | feg/gateway/docker/build.py | build.py | py | 4,696 | python | en | code | 1,605 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "subprocess.run",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProcessError",... |
13300386404 | #!/usr/bin/python3
import _thread
import re, time, cv2, serial
'''
ServoController interfaces with the arduino board to control the servo motor over
USB serial coms
'''
class ServoController:
def __init__(self):
self.ser = serial.Serial('com3', 9600, timeout=0.5)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.ser.close()
def servo_read_position(self):
self.ser.write(b'r')
pos = str(self.ser.readline())
# First read is always empty, sometimes the first seversl,
# so recusively call until numbers come back from arduino
while pos == "b''":
pos = self.servo_read_position()
print("pos_remote_read: " + str(pos))
m = re.search('\d+', str(pos))
pos = int(m.group(0))
return pos
def servo_set_position(self, pos):
print("ad pos: " +str(pos))
self.ser.write(bytes(str(pos), 'utf8'))
self.ser.write(b'\n')
print("pos_remote: " + str(self.ser.readline()))
'''
SerialServo controls camera rotation and motion detection
Currently written to sweep the camera, wait and detect motion, only continue sweeping once no motion detected
'''
class CameraController:
def camera_frame_grab(self):
ret, frame = self.cap.read()
cv2.imshow(self.frameName, frame)
cv2.waitKey(1)
def camera_rotate(self, start, stop, step_delay=0.2):
# If the stop angle bigger than start, increment, else decrement
# If the stop angle is smaller, it cannot be negative, if it's bigger,
# then it can't be bigger than 180
if start <= stop:
if stop < 25:
stop = 25
direction = 1
else:
if stop > 165:
stop = 165
direction = -1
for pos in range(start, stop, direction):
self.camera_frame_grab()
self.sc.servo_set_position(pos)
time.sleep(step_delay)
def camera_sweep(self):
self.camera_rotate(45, 180)
self.camera_rotate(180, 45)
def camera_left(self, degrees):
pos = self.sc.servo_read_position()
if pos <= 180:
self.camera_rotate(pos, pos + degrees)
def camera_right(self, degrees):
pos = self.sc.servo_read_position()
if pos > 0:
self.camera_rotate(pos, pos - degrees)
def detect_motion(self, duration):
start_time = time.time()
while (True):
ret, frame = self.cap.read()
if self.motionSense:
end_time = time.time()
# Wait detecting motion for duration, stop detecting once time has passed and no motion detected
if(end_time - start_time) > duration and count == 1:
print("Finished recording " + str(duration))
break
fgmask = self.fgbg.apply(frame)
# Generate and prepare the threshold image, and find contours with it.
ret, thresh = cv2.threshold(fgmask, 127, 255, 0)
thresh = cv2.dilate(thresh, None, iterations=2)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Count contours with large areas, use them for movement simple detection
count = 1
for c in contours:
if cv2.contourArea(c) < 500:
continue
count += count
# optionally draw bounding boxes around the detected contours
if self.drawRect:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow(self.frameName, frame)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
def __init__(self, camera, servo=0, drawRect=0, motionSense=1):
if servo:
self.sc = ServoController()
self.drawRect = drawRect
self.motionSense = motionSense
self.frameName = "Camera " + str(camera)
self.cap = cv2.VideoCapture(camera)
self.fgbg = cv2.createBackgroundSubtractorMOG2()
ret, frame = self.cap.read()
cv2.imshow(self.frameName, frame)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# When everything done, release the capture and serial port
self.cap.release()
cv2.destroyAllWindows()
def dynamic_camera(cam):
with CameraController(cam, 1) as cc:
while(True):
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
def static_camera(cam):
print("Static Cam - no motion detection")
with CameraController(cam, drawRect=1, motionSense=0) as cc:
while (True):
cc.detect_motion(20)
if __name__ == "__main__":
try:
_thread.start_new_thread(dynamic_camera, (1, ))
_thread.start_new_thread(static_camera, (0, ))
except:
print("Error: unable to start thread")
while 1:
pass | bradys/cat-cam | Cat_Cam.py | Cat_Cam.py | py | 6,213 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "serial.Serial",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 5... |
20651798683 | import math
import random
import numpy as np
from itertools import combinations
from copy import deepcopy
class Node:
def __init__(self):
self.parent = None
self.state = []
self.children = []
self.fully_expanded = False
self.Q = 0
self.N = 0
def __str__(self):
return f"node state: {self.state}, Q: {self.Q}, N: {self.N}, fully expanded: {self.fully_expanded}"
def ripple(v_sum, f_sum, f_leaves):
return f_leaves - (f_sum - v_sum) * (f_leaves / f_sum) if f_sum != 0 else 0
def distance(v1, v2):
return np.sqrt(np.sum(np.power(v1 - v2, 2)))
def ps(df, v, f, selections):
a = np.copy(f)
for selection in selections:
v_sum = df.loc[selection, 'real'].sum()
f_sum = df.loc[selection, 'predict'].sum()
a[selection] = ripple(v_sum, f_sum, df.loc[selection, 'predict'])
score = max(1 - distance(v, a) / distance(v, f), 0)
return score
def gps(v, f, selections):
a, b = [], []
for selection in selections:
selection_v = v[selection]
selection_f = f[selection]
with np.errstate(divide='ignore', invalid='ignore'):
selection_a = f[selection] * (selection_v.sum() / selection_f.sum())
selection_a = np.nan_to_num(selection_a)
a.extend(np.abs(selection_v - selection_a))
b.extend(np.abs(selection_v - selection_f))
selection = np.logical_or.reduce(selections)
non_selection_v = v[~selection]
non_selection_f = f[~selection]
a = np.mean(a)
b = np.mean(b)
c = np.nan_to_num(np.mean(np.abs(non_selection_v - non_selection_f)))
score = 1 - ((a + c) / (b + c))
return score
def get_unqiue_elements(df, cuboid):
return {tuple(row) for row in df[cuboid].values}
def get_element_mask(df, cuboid, combination):
return [np.logical_and.reduce([df[d] == e for d, e in zip(cuboid, c)]) for c in combination]
def ucb(node, C=math.sqrt(2.0)):
best_child = None
max_score = -1
for child in node.children:
if child.N > 0 and not child.fully_expanded:
left = child.Q
right = C * math.sqrt(math.log(node.N) / child.N)
score = left + right
if score > max_score:
best_child = child
max_score = score
return best_child
def init_children(node, elements):
children = [e for e in elements if e not in set(node.state)]
for c in children:
child = Node()
child.state = node.state + [c]
child.parent = node
node.children.append(child)
def get_initial_scores(df, elements, cuboid, v, f, scoring):
element_scores = dict()
for leaf in elements:
selections = get_element_mask(df, cuboid, [leaf])
if scoring == 'ps':
element_scores[leaf] = ps(df.copy(), v, f, selections)
else:
element_scores[leaf] = gps(v, f, selections)
return element_scores
def sublist(lst1, lst2):
return set(lst1) <= set(lst2)
def selection(node, elements):
while len(node.state) < len(elements):
if len(node.children) == 0: # First time to search this node.
init_children(node, elements)
return node
q_max = 0
all_visit = True
for child in node.children:
q_max = max(q_max, child.Q)
if child.N == 0: # Not all children have been visited.
all_visit = False
if not all_visit and random.random() > q_max:
return node # Expand current node
child_node = ucb(node) # Select the best path got go deeper into the tree.
if child_node is None: # If all children are already fully expanded.
if all_visit:
node.fully_expanded = True
if node.parent is None:
return node # The tree is fully exanded.
node = node.parent # Continue again with parent node.
else:
return node # Expand current node.
else:
node = child_node
node.fully_expanded = True
return node
def expand(node, element_scores):
best_child = None
max_score = -1
for child in node.children:
if child.N == 0:
score = element_scores[child.state[-1]]
if score > max_score:
max_score = score
best_child = child
return best_child
def evaluate(df, selected_node, cuboid, v, f, scoring):
selections = get_element_mask(df, cuboid, selected_node.state)
if scoring == 'ps':
score = ps(df.copy(), v, f, selections)
else:
score = gps(v, f, selections)
return score
def backup(node, new_q):
while node is not None:
node.N += 1
node.Q = max(node.Q, new_q)
node = node.parent
def MCTS(df, elements, cuboid, v, f, pt, m, scoring):
root = Node()
max_q = -1
best_selection = Node()
element_scores = get_initial_scores(df, elements, cuboid, v, f, scoring)
for i in range(m):
node = selection(root, elements)
if not node.fully_expanded:
node = expand(node, element_scores)
if root.fully_expanded:
break
new_q = evaluate(df, node, cuboid, v, f, scoring)
backup(node, new_q)
if new_q > max_q:
max_q = root.Q
best_selection = deepcopy(node)
elif (new_q == max_q) and not sublist(node.state, best_selection.state) and len(node.state) < len(
best_selection.state):
max_q = root.Q
best_selection = deepcopy(node)
if max_q >= pt:
break
return best_selection.state, max_q
def hierarchical_pruning(elements, layer, cuboid, candidate_set):
previous_layer_candidates = [candidate for candidate in candidate_set if candidate['layer'] == layer - 1]
parent_selections = [cand['elements'] for cand in previous_layer_candidates if set(cand['cuboid']) < set(cuboid)]
for parent_selection in parent_selections:
elements = [e for e in elements if np.any([set(pe) < set(e) for pe in parent_selection])]
return elements
def get_best_candidate(candidate_set):
# Sort by score, layer, number of elements
sorted_cands = sorted(candidate_set, key=lambda c: (c['score'], -c['layer'], -len(c['elements'])), reverse=True)
return sorted_cands[0]
def hotspot(df, dimensions, pt=0.67, m=200, scoring='gps', debug=False):
assert scoring in ['ps', 'gps'], "Supported scoring is 'ps' and 'gps'."
# Hierarcical pruning does not seem to work well when using gps scoring
use_pruning = scoring != 'gps'
v = df['real'].values
f = df['predict'].values
candidate_set = []
for layer in range(1, len(dimensions) + 1):
if debug:
print('Layer:', layer)
cuboids = [list(c) for c in combinations(dimensions, layer)]
for cuboid in cuboids:
if debug:
print('Cuboid:', cuboid)
elements = get_unqiue_elements(df, cuboid)
# if debug: print('Elements:', elements)
if use_pruning and layer > 1:
elements = hierarchical_pruning(elements, layer, cuboid, candidate_set)
# if debug: print('Filtered elements:', elements)
selected_set, score = MCTS(df, elements, cuboid, v, f, pt, m, scoring)
if debug:
print('Best subset:', selected_set, 'score', score)
candidate = {
'layer': layer,
'cuboid': cuboid,
'score': score,
'elements': np.array(selected_set)
}
if candidate['score'] >= pt:
return candidate
candidate_set.append(candidate)
return get_best_candidate(candidate_set)
| shaido987/riskloc | algorithms/hotspot.py | hotspot.py | py | 8,100 | python | en | code | 93 | github-code | 6 | [
{
"api_name": "numpy.sqrt",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 30,
... |
5991002670 | from collections import OrderedDict
from itertools import chain
from .types import Vsn, MatrixID, PacketClass
from .patches import patch
from .cache import from_page, get_page
from .sources import version_urls
from .parsers import pre_versions, pre_packets, rel_version, rel_packets
from .parsers import first_heading
__all__ = ('version_packet_ids',)
# Returns matrix with matrix[version][packet_class] = matrix_id
@from_page(dep=(first_heading,pre_versions,pre_packets,rel_version,rel_packets),
rdoc='Recalculate the packet ID matrix. Give if the version_urls dict\n'
'or the code of version_packet_ids() have been changed.', doc_order=-2)
def version_packet_ids():
"""Return a dict mapping `Vsn' instances to dicts mapping `PacketClass'
instances to `MatrixID' instances, giving the matrix of packet IDs as
they vary across packets and across protocol versions."""
used_patches = set()
packet_classes = {}
matrix = OrderedDict()
prev_v = None
for v, url in reversed(version_urls.items()):
with get_page(url) as page:
heading = first_heading(page)
if heading == 'Pre-release protocol':
vdiff = pre_versions(page, v)
if (v, vdiff) in patch:
used_patches.add((v, vdiff))
vdiff = patch[v, vdiff]
from_v, to_v = vdiff
assert v == to_v, '%r != %r' % (v, to_v)
matrix[v] = {}
matrix.move_to_end(v, last=False)
seen_names = {}
all_pre_packets = pre_packets(page, v)
if (v, None) in patch:
all_pre_packets = chain(all_pre_packets, patch[(v, None)])
used_patches.add((v, None))
for packet in all_pre_packets:
if (v, packet) in patch:
used_patches.add((v, packet))
packet = packet.patch(patch[v, packet])
if packet is None: continue
assert packet.name not in seen_names, \
'[%s] Duplicate packet name:\n%s\n%s' % \
(v.name, seen_names[packet.name], packet)
seen_names[packet.name] = packet
packet_class = PacketClass(
name=packet.name, state=packet.state, bound=packet.bound)
if packet.name not in packet_classes:
packet_classes[packet.name] = packet_class
assert packet_class == packet_classes[packet.name], \
'[%s] %r != %r' % (v.name, packet_class, packet_classes[packet.name])
if packet.old_id is None:
assert packet_class not in matrix[from_v], \
'[%s] %r in matrix[%r]' % (v.name, packet_class, from_v)
else:
if packet_class not in matrix[from_v]:
msg = '[%s] [0x%02X] %r not in matrix[%r]' % (
v.name, packet.old_id, packet_class, from_v)
for from_pcls, from_mid in matrix[from_v].items():
if (from_pcls.state, from_pcls.bound, from_mid.id) \
== (packet_class.state, packet_class.bound, packet.old_id):
msg += '\n(however, matrix[%r][%r].id == 0x%02X)' % (
from_v, from_pcls, packet.old_id)
break
raise AssertionError(msg)
assert packet.old_id == matrix[from_v][packet_class].id, \
'[%s] 0x%02X != matrix[%r][%r].id == 0x%02X' % (
v.name, packet.old_id, from_v, packet_class,
matrix[from_v][packet_class].id)
if packet.url is not None:
url = packet.url
elif not packet.changed and from_v and packet_class in matrix[from_v]:
url = matrix[from_v][packet_class].url
else:
url = None
if packet.new_id is not None:
matrix[v][packet_class] = MatrixID(
id=packet.new_id, base_ver=from_v,
changed=packet.changed, html=packet.html, url=url)
for packet_class, id in matrix[from_v].items():
if packet_class.name in seen_names: continue
matrix[v][packet_class] = id._replace(
base_ver=from_v, changed=False)
elif heading == 'Protocol':
rel_v = rel_version(page)
if rel_v.name is None:
rel_v = Vsn(v.name, rel_v.protocol)
assert v == rel_v, '%r != %r' % (v, rel_v)
matrix[v] = {}
seen_names = {}
all_rel_packets = rel_packets(page, v)
if (v, None) in patch:
all_rel_packets = chain(all_rel_packets, patch[(v, None)])
used_patches.add((v, None))
for packet in all_rel_packets:
if (v, packet) in patch:
used_patches.add((v, packet))
packet = packet.patch(patch[v, packet])
if packet is None: continue
assert packet.name not in seen_names, \
'[%s] Duplicate packet name:\n%s\n%s.' \
% (v.name, seen_names[packet.name], packet)
seen_names[packet.name] = packet
packet_class = PacketClass(
name=packet.name, state=packet.state, bound=packet.bound)
if packet.name not in packet_classes:
packet_classes[packet.name] = packet_class
assert packet_classes[packet.name] == packet_class, \
'[%s] %r != %r' % (v.name,
packet_classes[packet.name], packet_class)
matrix[v][packet_class] = MatrixID(
id=packet.id, base_ver=v, changed=False, url=packet.url,
html=packet.html)
else:
raise AssertionError('Unrecognised article title: %r' % heading)
state_bound_ids = {}
for packet_class, matrix_id in matrix[v].items():
key = (packet_class.state, packet_class.bound, matrix_id.id)
assert key not in state_bound_ids, '[%s] Duplicate packet ID: ' \
'%s is used by packets %r and %r.' % (v.name,
'(%s, %s, 0x%02X)' % key, state_bound_ids[key], packet_class.name)
state_bound_ids[key] = packet_class.name
unused_patches = set(k for k in patch.keys() if k[0] == v and k not in used_patches)
if unused_patches:
raise AssertionError('Unused patches:\n'
+ '\n'.join('%r -> %r' % (p, patch[p]) for p in unused_patches))
prev_v = v
unused_patches = set(k for k in patch.keys() if k not in used_patches)
if unused_patches:
raise AssertionError('Unused patches:\n'
+ '\n'.join('%s -> %s' % (p, patch[p]) for p in unused_patches))
return matrix
| joodicator/mc-dev-data | mcdevdata/matrix.py | matrix.py | py | 7,498 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.OrderedDict",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sources.version_urls.items",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sources.version_urls",
"line_number": 26,
"usage_type": "name"
},
{
"api_name"... |
14872333572 | import torch
from torch import nn
import torch.nn.functional as F
from torch import optim
from torchvision import datasets, transforms, models
from workspace_utils import active_session
from collections import OrderedDict
import numpy as np
from PIL import Image
import argparse
import json
parser = argparse.ArgumentParser(description='Inference for classification')
parser.add_argument('-i','--image_path',type=str, metavar='', required=True, help='path to image to predict e.g. flowers/test/class/image')
parser.add_argument('-t','--top_k', type=int, metavar='', default=1, help='print out the top K classes along with associated probabilities')
parser.add_argument('-c','--category_names', type=str, metavar='', default='cat_to_name.json', help='load a JSON file that maps the class values to other category names')
parser.add_argument('-g','--gpu',action="store_true", default=False, help='choose training the model on a GPU')
args = parser.parse_args()
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
# a function that loads a checkpoint and rebuilds the model
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
# Load the saved file
checkpoint = torch.load("checkpoint.pth")
architecture = checkpoint['architecture']
# Download pretrained model
model = getattr(models, architecture)(pretrained=True);
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
hidden_units = checkpoint['hidden_units']
model.class_to_idx = checkpoint['class_to_idx']
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
#optimizer = optim.Adam(model.classifier.parameters())
#optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epochs = checkpoint['epochs']
return model
model = load_checkpoint('checkpoint.pth')
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
im = Image.open(image)
size = 256,256
im.thumbnail(size)
#Crop
left = (256-224)/2
top = (256-224)/2
right = (left + 224)
bottom = (top + 224)
im = im.crop((left, top, right, bottom))
np_image = np.array(im)
np_image = np_image / 255
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
np_image = (np_image - mean) / std
np_image = np_image.transpose(2, 0, 1)
return np_image
# Reversing idx to class
idx_to_class = {}
for key, value in model.class_to_idx.items():
idx_to_class[value] = key
def predict(image_path, model, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# Use GPU if it's available
if args.gpu:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# TODO: Implement the code to predict the class from an image file
#preprocess image
image = process_image(image_path)
image = torch.from_numpy(np.array([image])).float()
# turn off dropout
model.eval()
#Load image and the model to cpu or gpu
model.to(device)
image = image.to(device)
logps = model.forward(image)
ps = torch.exp(logps)
top_p, top_class = ps.topk(topk, dim=1)
top_class = np.array(top_class)[0]
top_p = np.array(top_p.detach())[0]
# Mapping index to class
top_classes = []
for i in range(len(top_class)):
top_classes.append(idx_to_class[top_class[i]])
# Mapping class to flower name
flower_names = []
for i in range(len(top_classes)):
flower_names.append(cat_to_name[top_classes[i]])
return top_p, flower_names
probs, classes = predict(args.image_path, model, args.top_k)
print(f"class Probability: {probs}")
print(f"flower name: {classes}")
| OmarMohy/Image-Classifier-with-Deep-Learning | predict.py | predict.py | py | 3,922 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_n... |
40160808434 | import openpyxl
import os
from setting import get_file_path, get_file_name
file_path = get_file_path()
file_name = get_file_name()
# 切換到指定路徑
os.chdir(file_path)
# 讀進Excel檔案
wb = openpyxl.load_workbook(file_name)
# 取的Excel的第一個工作表
sheet = wb.worksheets[0]
etf_all = dict()
# 彙整全部的ETF清單
for columnNum in range(1, sheet.max_column + 1, 3):
for rowNum in range(3, sheet.max_row + 1):
if (sheet.cell(rowNum, columnNum).value == None):
break
if (etf_all.get(sheet.cell(rowNum, columnNum).value) == None):
etf_all[sheet.cell(rowNum, columnNum).value] = {
'name' : sheet.cell(rowNum, columnNum + 1).value,
'content' : [sheet.cell(1, columnNum).value]
}
else:
etf_all.get(sheet.cell(rowNum, columnNum).value)['content'].append(sheet.cell(1, columnNum).value)
sorted_list = sorted(etf_all.items(), key=lambda x:len(x[1]['content']), reverse=True)
# 輸出的結果
new_sheet = wb.create_sheet('result')
row = 1
column = 1
for t in sorted_list:
new_sheet.cell(row, column).value = t[0]
new_sheet.cell(row, column + 1).value = t[1]['name']
new_sheet.cell(row, column + 2).value = len(t[1]['content'])
new_sheet.cell(row, column + 3).value = ','.join(str(etf_id) for etf_id in t[1]['content'])
row = row + 1
# 存檔
wb.save(file_name)
| ShengUei/Stock | etf_analysis.py | etf_analysis.py | py | 1,419 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "setting.get_file_path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "setting.get_file_name",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workb... |
37169471952 | from flask import request
from flask_restx import Resource
from ..service.auth_service import Auth
from ..util.decorator import admin_token_required
from ..service.user_service import save_new_user, get_a_user
from ..util.dto import AuthDto
api = AuthDto.api
user_auth = AuthDto.user_auth
user_token = AuthDto.user_token
@api.route('/register')
class UserRegister(Resource):
"""
User Register Resource
"""
@api.doc('user register')
@api.expect(user_auth, validate=True)
def post(self):
# get the post data
data = request.json
return save_new_user(data=data)
@api.route('/login')
class UserLogin(Resource):
"""
User Login Resource
"""
@api.doc('user login')
@api.expect(user_auth, validate=True)
def post(self):
# get the post data
post_data = request.json
return Auth.login_user(data=post_data)
@api.route('/logout')
class LogoutAPI(Resource):
"""
Logout Resource
"""
@api.doc('logout a user')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
def post(self):
"""
logout user
"""
# get auth token
auth_header = request.headers.get('Authorization')
return Auth.logout_user(data=auth_header)
@api.route('/force-logout/<id>')
class ForceLogoutAPI(Resource):
"""
Force Logout Resource
"""
@api.doc('force logout a user')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
@api.response(404, 'User not found.')
@admin_token_required
def post(self, id):
"""
force logout a user and blacklist all the tokens
"""
user = user = get_a_user(id)
if not user:
api.abort(404)
return Auth.force_logout_user(user)
@api.route('/expire-token')
class ExpireTokenAPI(Resource):
"""
Expire Token Resource
"""
@api.doc('expire a user token')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
@api.expect(user_token, validate=True)
@admin_token_required
def post(self):
"""
expire a token passed in post body, admin only authorized
"""
token = request.json['token']
return Auth.expire_token(token)
| miteshnath/flask-admin-jwt | app/main/controller/auth_controller.py | auth_controller.py | py | 2,367 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "util.dto.AuthDto.api",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "util.dto.AuthDto",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "util.dto.AuthDto.user_auth",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name... |
811036516 | '''Process Restricted Friend Requests
https://leetcode.com/problems/process-restricted-friend-requests/
You are given an integer n indicating the number of people in a network. Each person is labeled from 0 to n - 1.
You are also given a 0-indexed 2D integer array restrictions, where restrictions[i] = [xi, yi] means that person xi
and person yi cannot become friends, either directly or indirectly through other people.
Initially, no one is friends with each other. You are given a list of friend requests as a 0-indexed 2D integer array
requests, where requests[j] = [uj, vj] is a friend request between person uj and person vj.
A friend request is successful if uj and vj can be friends. Each friend request is processed in the given order (i.e.,
requests[j] occurs before requests[j + 1]), and upon a successful request, uj and vj become direct friends for all
future friend requests.
Return a boolean array result, where each result[j] is true if the jth friend request is successful or false if it is
not.
Note: If uj and vj are already direct friends, the request is still successful.
Example 1:
Input: n = 3, restrictions = [[0,1]], requests = [[0,2],[2,1]]
Output: [true,false]
Explanation:
Request 0: Person 0 and person 2 can be friends, so they become direct friends.
Request 1: Person 2 and person 1 cannot be friends since person 0 and person 1 would be indirect friends (1--2--0).
'''
from collections import defaultdict, deque
class Solution:
def friendRequests(self, n: int, restrictions: List[List[int]], requests: List[List[int]]) -> List[bool]:
def bfs(node):
q = deque([node])
visited = set()
visited.add(node)
relations = set()
while q:
node = q.popleft()
relations.add(node)
for neighbor in friend_mapping[node]:
if neighbor not in visited:
visited.add(neighbor)
q.append(neighbor)
return relations
banned_mapping = defaultdict(set)
for u, v in restrictions:
banned_mapping[u].add(v)
banned_mapping[v].add(u)
result = [False] * len(requests)
friend_mapping = defaultdict(set)
for index, req in enumerate(requests):
u, v = req[0], req[1]
if v in banned_mapping[u]:
result[index] = False
else:
set1 = bfs(u)
set2 = bfs(v)
banned = False
for key in set1:
for bannedId in banned_mapping[key]:
if bannedId in set2:
result[index] = False
banned = True
if not banned:
result[index] = True
friend_mapping[u].add(v)
friend_mapping[v].add(u)
return result | Saima-Chaity/Leetcode | Graph/Process Restricted Friend Requests.py | Process Restricted Friend Requests.py | py | 2,945 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 54,
"usage_type": "call"
}
] |
74281107067 | from typing import Any, List
from fastapi import APIRouter, HTTPException, Depends
from apps.auth.model import User
from apps.bank.cruds import invoice
from apps.bank.schemas.invoice import InvoiceUpdate, InvoiceView, InvoiceCreate, InvoiceViewFull
from core.security import current_user_is_banker, get_current_user
router = APIRouter(prefix='/invoices', tags=['Invoices'])
@router.get('/', response_model=List[InvoiceViewFull],
dependencies=[Depends(current_user_is_banker)])
async def list_invoices(skip: int = 0, limit: int = 100) -> Any:
results = await invoice.get_list(skip=skip, limit=limit)
return results
@router.get('/my', response_model=List[InvoiceViewFull])
async def list_my_invoices(user: User = Depends(get_current_user),
skip: int = 0, limit: int = 100) -> Any:
results = await invoice.get_list(user=user, skip=skip, limit=limit)
return results
@router.get('/{obj_id}', response_model=InvoiceViewFull)
async def get_invoice(obj_id: int, user: User = Depends(get_current_user)) -> Any:
result = await invoice.get(id=obj_id, user=user)
if not result:
raise HTTPException(status_code=404, detail='Invoice not found!')
return result
@router.post('/create', response_model=InvoiceView, status_code=201)
async def create_invoice(item: InvoiceCreate, user: User = Depends(get_current_user)) -> Any:
result = await invoice.create_invoice(obj_in=item, user=user)
return result
@router.put('/{obj_id}', response_model=InvoiceView)
async def update_invoice(obj_id: int, item: InvoiceUpdate,
user: User = Depends(get_current_user)) -> Any:
obj_db = await invoice.get(id=obj_id, user=user)
if not obj_db:
raise HTTPException(status_code=404, detail='Invoice not found!')
result = await invoice.update_invoice(obj_db=obj_db, obj_in=item, user=user)
return result
@router.delete('/{obj_id}')
async def delete_invoice(obj_id: int, user: User = Depends(get_current_user)) -> Any:
result = await invoice.remove(id=obj_id, user=user)
return result
| MojsaKirill/CRUD | app/api/api_v1/endpoints/invoices.py | invoices.py | py | 2,101 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "apps.bank.cruds.invoice.get_list",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "apps.bank.cruds.invoice",
"line_number": 16,
"usage_type": "name"
},
{
"api_na... |
17007776735 | import requests
import bs4
import urllib
def spider(max_pages):
for page in range(1, max_pages + 1):
query = urllib.parse.urlencode({'query':u'대선후보'})
url = 'http://news.naver.com/main/search/search.nhn?query=' + '%B4%EB%BC%B1%C8%C4%BA%B8'
source_code = requests.get(url)
plain_text = source_code.text
soup = bs4.BeautifulSoup(plain_text, 'html.parser')
content = soup.find(id='search_div')
for result in content.select('ul > li > div'):
print('############# Title')
print(result.a.text)
print('############# Content')
print(result.p.text)
spider(1)
| masonHong/INU-Study | C Team(Hong, Heo)/Crowaling/Practice 1.py | Practice 1.py | py | 668 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.parse.urlencode",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup"... |
23468677797 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tapp', '0005_comment_end_i'),
]
operations = [
migrations.AlterModelOptions(
name='essay',
options={'permissions': (('view_essay', 'View essay'),)},
),
]
| rihakd/textAnalyticsDjango | TA/tapp/migrations/0006_auto_20151119_1941.py | 0006_auto_20151119_1941.py | py | 386 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterModelOptions",
"line_number": 14,
"usage_type": "call"
... |
8694936867 | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("newListing", views.newForm, name="new"),
path("createListing", views.createNewListing, name="create"),
path("<str:title>Listing", views.listing_view, name="listing"),
path("Watchlist", views.watchlist, name="watchlist"),
path("AddToWatchlist/<str:title>", views.add_to_watchlist, name="add"),
path("RemoveFromWatchlist/<str:title>", views.remove_from_watchlist, name="remove"),
path("placeBid/<str:title>", views.place_bid, name="bid"),
path("closeListing/<str:title>", views.close_listing, name="close"),
path("postComment/<str:title>", views.post_comment, name="postComment"),
path("categories", views.categories_view, name="categories"),
path("showCategory/<str:category>", views.single_category_view, name="showCategory")
]
| SHorne41/Project-2-Commerce | auctions/urls.py | urls.py | py | 1,046 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
72112922749 | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
# import snntorch
import pandas as pd
import tqdm
import argparse
from . import p_snu_layer
class SNN_Net(torch.nn.Module):
def __init__(self, inputs_num = 4, hidden_num = 4, outputs_num = 3 ,l_tau = 0.8,num_time = 100, batch_size = 80 ,soft = False, rec = False, power = False, gpu = True):
super().__init__()
self.num_time = num_time
self.batch_size = batch_size
self.rec = rec
self.power = power
#parametr
# self.neuron_0 = 4
# self.neuron_1 = 24
# self.neuron_2 = 24
# self.neuron_3 = 4
#my #hidden num = 24
# self.l1 = p_snu_layer.P_SNU(inputs_num, hidden_num, l_tau = l_tau, soft = soft, gpu = gpu)
# self.l2 = p_snu_layer.P_SNU(hidden_num, hidden_num, l_tau = l_tau, soft = soft, gpu = gpu)
# self.l3 = p_snu_layer.P_SNU(hidden_num, hidden_num, l_tau = l_tau, soft = soft, gpu = gpu)
# self.l4 = p_snu_layer.P_SNU(hidden_num, outputs_num, l_tau = l_tau, soft = soft, gpu = gpu)
#my2 hidden num = 4
self.l1 = p_snu_layer.P_SNU(inputs_num, hidden_num, l_tau = l_tau, soft = soft, gpu = gpu)
self.l2 = p_snu_layer.P_SNU(hidden_num, hidden_num, l_tau = l_tau, soft = soft, gpu = gpu)
self.l3 = p_snu_layer.P_SNU(hidden_num, outputs_num, l_tau = l_tau, soft = soft, gpu = gpu)
# for 1 layer test
# self.l4 = p_snu_layer.P_SNU(inputs_num, outputs_num, l_tau = l_tau, soft = soft, gpu = gpu)
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
self.l3.reset_state()
# self.l4.reset_state()
def forward(self,x,y):
# y = torch.tensor(y)
losse = None
accuracy = None
sum_out = None #タイムステップ毎のスパイク数の累積をとる
out_list = [] #各データ(120×4)のタイムステップ(100ms)における出力スパイクを時系列で挿入
out_total_list = []
membrane_out = torch.empty(100,3)
mem1_out = torch.empty(100,4)
mem2_out = torch.empty(100,4)
spikes_ = torch.empty(100,4)
self.reset_state()
for time in range(self.num_time): #num_timeはデフォルトで100になる。
# spike_encoded_neuron = x[time]
# target_ = torch.reshape(y[time],(1,3))
# spike_encoded_neuron = torch.reshape(x[time],(4,1))
#4→4→3(network)
spike_encoded_neuron = x[time]
h1,mem1,u1 = self.l1(spike_encoded_neuron)
h2,mem2,u2 = self.l2(h1)
# out,mem = self.l3(h2)
# 1 layer test 4→3(network)
#膜電位と入出力スパイクの確認
# out,thresh,spike = self.l4(spike_encoded_neuron)
#normal
# out = self.l4(spike_encoded_neuron)
# sum_out = out if sum_out is None else sum_out + out
mem1_out[time] = mem1
mem2_out[time] = mem2
# membrane_out[time] = out
spikes_[time] = h1
#出力を確認する
# return sum_out,y
#バッチ学習の場合
# criterion = nn.CrossEntropyLoss()
# losse = criterion(sum_out,y)
#正解率
# predicted_label = torch.argmax(sum_out)
# accuracy = 1 if predicted_label == y else 0
return spikes_,mem1_out,mem2_out
return mem_out
return sum_out,losse,accuracy
| GTAKAGI/PSNN | snn_model/network.py | network.py | py | 3,693 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.empty",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.empty",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.empty",
"line_number"... |
32170276466 | #
# demo.py
#
import argparse
import os
import numpy as np
import time
from modeling.deeplab import *
from dataloaders import custom_transforms as tr
from PIL import Image
from torchvision import transforms
from dataloaders.utils import *
from torchvision.utils import make_grid, save_image
torch.set_printoptions(profile="full")
def main():
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Training")
parser.add_argument('--in-path', type=str, default=r'D:\PT\archive\Testingset10class\dataset\结果\test_96_label',
help='image to test')
parser.add_argument('--out-path', type=str, default=r'D:\PT\archive\Testingset10class\dataset\结果\96', help='mask image to save')
parser.add_argument('--backbone', type=str, default='mobilenet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--ckpt', type=str, default=r'D:\PT\超分辨率语义分割\模型保存\10_class\dsrl\128/model_best.pth.tar',
help='saved model')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--dataset', type=str, default='rockdataset',
choices=['pascal', 'coco', 'cityscapes', 'rockdataset'],
help='dataset name (default: pascal)')
parser.add_argument('--crop-size', type=int, default=96,
help='crop image size')
parser.add_argument('--num_classes', type=int, default=11,
help='crop image size')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if args.sync_bn is None:
if args.cuda and len(args.gpu_ids) > 1:
args.sync_bn = True
else:
args.sync_bn = False
composed_transforms = transforms.Compose([
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
for name in os.listdir(args.in_path):
image = Image.open(args.in_path + "/" + name).convert('RGB')
# image = Image.open(args.in_path).convert('RGB')
target = Image.open(args.in_path + "/" + name)
sample = {'image': image, 'label': target}
tensor_in = composed_transforms(sample)['label'].unsqueeze(0)
print(tensor_in.shape)
grid_image = make_grid(decode_seg_map_sequence(tensor_in.detach().cpu().numpy()),
3, normalize=False, range=(0, 255))
save_image(grid_image, args.out_path + "/" + "{}_label.png".format(name[0:-4]))
# save_image(grid_image, args.out_path)
# print("type(grid) is: ", type(grid_image))
# print("grid_image.shape is: ", grid_image.shape)
print("image save in in_path.")
if __name__ == "__main__":
main()
# python demo.py --in-path your_file --out-path your_dst_file
| AlisitaWeb/SSRN | ceshi_label.py | ceshi_label.py | py | 3,929 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 60,
"usage_type": "name"
},
{
"api... |
16986332004 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator
from numpy.lib.shape_base import split
import math
import cmath
def get_MSI (matrix, f, tau, iterCnt): #method of simple iterations
n = np.size(f)
B = np.diagflat([1] * n) - tau * matrix
f = tau * f
x = f
for i in range (iterCnt):
x = B.dot(x) + f
return x
def get_MJ (matrix, f, iterCnt): # method of Jacobi
u = f
n = np.size(f)
for cnt in range(iterCnt):
uNext = np.zeros(n)
for i in range (n):
uNext[i] = (f[i] - matrix[i].dot(u)) / matrix[i][i]
u = uNext
return u
def get_MZ (matrix, f, iterCnt):
u = f
n = np.size(f)
answ = np.zeros((n, iterCnt))
for cnt in range(iterCnt):
uNext = np.zeros(n)
for i in range (n):
uNext[i] = (f[i] -
matrix[i][:i - 1].dot(uNext[:i - 1]) -
matrix[i][i + 1:].dot(u[i + 1:])) / matrix[i][i]
u = uNext
answ[:, cnt] = uNext
return answ[:, -1]
eigenValuesFilename = "eigenValues15.dat"
#Вход задачи
N = 15 # число точек
h = 1 / (N - 1)
matrix = np.zeros (((N - 2) * (N - 2), N * N))
#построение матрицы
def elem (i, j):
return N * j + i
for j in range (1, N - 1):
for i in range (1, N - 1):
eqID = (j - 1) * (N - 2) + i - 1 #equationID - индекс строчки
if (i > 1):
matrix[eqID][elem(i - 1, j)] = -1
if (j > 1):
matrix[eqID][elem(i, j - 1)] = -1
if (i < N - 2):
matrix[eqID][elem(i + 1, j)] = -1
if (j < N - 2):
matrix[eqID][elem(i, j + 1)] = -1
matrix[eqID][elem(i, j)] = 4
print ("half")
#Удаление пустых столбцов
zeroColumns = []
for i in range (N * N - 1, -1, -1):
if i % N == 0 or \
i < N or \
i >= N * (N - 1) or \
(i + 1) % N == 0:
zeroColumns.append (i)
otherColumns = [x for x in range (N * N - 1) if x not in zeroColumns]
matrix = matrix[:, otherColumns]
print ("matrix is done")
# plt.spy(matrix)
# plt.show()
# поиск собственных чисел
# eigenValues = np.linalg.eigvals(matrix)
# print ("eighenvalues are computed")
# with open (eigenValuesFilename, "w") as file:
# for val in eigenValues:
# file.write(str(val) + "\n")
minVal = 1e9
maxVal = -1e9
with open(eigenValuesFilename, 'r') as file:
for line in file:
val = cmath.polar(complex(line))[0]
if (val > maxVal):
maxVal = val
elif val < minVal:
minVal = val
# Формирование вектора правой части
v = np.zeros((N - 2) * (N - 2))
v[(N // 2) * (N - 2) + N // 2 - 1] = h**2
# v[N * N // 3 : N * N // 2] += h ** 2
#Решаем систему разными методами
# u = get_MSI(matrix, v, 2 / (maxVal + minVal), 100)
# u = get_MSI(matrix, v, 1, 100)
# print (matrix)
# u = get_MJ(matrix, v, 0)
u = get_MZ(matrix, v, 10)
# u = np.linalg.solve(matrix, v)
# Заполняем Матрицу Z для отображения
Z = np.zeros((N, N))
for (uVal, oldID) in zip(u, otherColumns):
Z[oldID % N][oldID // N] = uVal
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
w = v = np.linspace(0, 1, N)
X, Y = np.meshgrid(w, v)
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
# A StrMethodFormatter is used automatically
# ax.zaxis.set_major_formatter('{x:.02f}')
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.savefig("pic.png")
# plt.show()
| Nevtod/Labs | ComputingMath/lab1/Computing_math.py | Computing_math.py | py | 3,874 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.size",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.diagflat",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number":... |
11547913275 | # This file is part of RADAR.
# Copyright (C) 2019 Cole Daubenspeck
#
# RADAR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RADAR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RADAR. If not, see <https://www.gnu.org/licenses/>.
import re
from typing import Tuple
# used if anything wants to sort/use numeric values instead of strings
VALUE_STANDARD_CONVERSIONS = {
"unknown": 0,
"low": 1,
"medium": 2,
"high": 3,
"very-high": 4
}
# format
# device_type_string
# - list of services (if it might not appear in consistent ports) (THESE ARE REGULAR EXPRESSIONS!)
# - list of ports that correspond to hosts of that type (prefered unless service tends to run on different ports)
HOST_TYPE = {
"webserver": {
"value": "high",
"service_names": [
"werkzeug",
"httpd",
"nginx",
"apache"
],
"ports": [
80,
443,
3000, # node webserver
8000,
8443
]
},
"database": {
"value": "very-high",
"service_names": [
],
"ports": [
1433, # mssql
3306, # mysql
6379, # redis
27017, # mongo
]
},
"fileserver": {
"value": "high",
"service_names": [
],
"ports": [
21,
990
]
},
"mailserver": {
"value": "medium",
"service_names": [
],
"ports": [
25, # smtp ports
468,
587,
2525,
110, # pop3 ports
993,
143, # imap ports
995
]
},
"ics": { # industrial control system
"value": "very-high",
"service_names": [
"modbus"
],
"ports": [
502
]
},
"domain_controller": {
"value": "very-high",
"service_names": [
],
"ports": [
88 # kerberos
]
}
}
def get_info(target: dict) -> Tuple[str, str]:
""" For a given target, returns information about the priority and best-guess type of host
arguments:
target: a dictionary that conforms to RADAR target specifications
returns:
a tuple of strings (priority, type). First string is the value of the device (e.g. "high"), second is the type of device (e.g. "webserver").
Multiple device types will be seperated with a semicolon (e.g. 'webserver;database').
"""
services = target.get("services")
if not services: # no running services, we don't care
return "unknown", "generic"
device_value = "unknown"
device_type = ""
global HOST_TYPE
global VALUE_STANDARD_CONVERSIONS
# for every service on the target
for service in services:
port = int(service.get("port"))
name = service.get("service")
# check if any of the host types matches the target...
for host_type, details in HOST_TYPE.items():
# skip checking the type if it's already flagged (e.g. it has multiple services related to being a webserver)
if host_type in device_type:
continue
type_value = details.get("value")
# by seeing if the port is in one of the lists
if port in details.get("ports"):
device_value = device_value if VALUE_STANDARD_CONVERSIONS[type_value] < VALUE_STANDARD_CONVERSIONS[device_value] else type_value
device_type += f";{host_type}"
# or by seeing if any of the patterns matches
else:
for check_names in details.get("service_names", []):
if re.search(check_names, name):
device_value = device_value if VALUE_STANDARD_CONVERSIONS[type_value] < VALUE_STANDARD_CONVERSIONS[device_value] else type_value
device_type += f";{host_type}"
break
return device_value, device_type[1:] or "unknown"
| Sevaarcen/RADAR | cyber_radar/helpers/target_prioritizer.py | target_prioritizer.py | py | 4,715 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "re.search",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 109,
"usage_type": "name"
}
] |
650459657 | #! /bin/python
import os
import sys
import json
import luigi
import nifty.tools as nt
import elf.skeleton.io as skelio
from elf.skeleton import skeletonize as skel_impl, get_method_names
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
from cluster_tools.utils.task_utils import DummyTask
#
# skeletonize tasks
#
class SkeletonizeBase(luigi.Task):
""" Skeletonize base class
"""
task_name = 'skeletonize'
src_file = os.path.abspath(__file__)
allow_retry = False
# input and output volumes
input_path = luigi.Parameter()
input_key = luigi.Parameter()
morphology_path = luigi.Parameter()
morphology_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
number_of_labels = luigi.IntParameter()
resolution = luigi.ListParameter()
size_threshold = luigi.IntParameter(default=None)
method = luigi.Parameter(default='thinning')
dependency = luigi.TaskParameter(default=DummyTask())
methods = get_method_names()
# expose skeletonization parameter if we support more parameter
@staticmethod
def default_task_config():
# we use this to get also get the common default config
config = LocalTask.default_task_config()
config.update({'chunk_len': 1000, 'method_kwargs': {}})
return config
def requires(self):
return self.dependency
def _prepare_output(self, config):
# make the blocking
block_len = min(self.number_of_labels, config.get('chunk_len', 1000))
block_list = vu.blocks_in_volume((self.number_of_labels,),
(block_len,))
n_jobs = min(len(block_list), self.max_jobs)
# require output dataset
with vu.file_reader(self.output_path) as f:
f.require_dataset(self.output_key, shape=(self.number_of_labels,),
chunks=(1,), compression='gzip', dtype='uint64')
# update the config
config.update({'number_of_labels': self.number_of_labels,
'block_len': block_len})
return config, n_jobs, block_list
def run_impl(self):
assert self.method in self.methods,\
"Method %s is not supported, must be one of %s" % (self.method, str(self.methods))
# TODO support roi
# get the global config and init configs
shebang, block_shape, _, _ = self.global_config_values()
self.init(shebang)
# load the skeletonize config
# update the config with input and output paths and keys
config = self.get_task_config()
config.update({'input_path': self.input_path, 'input_key': self.input_key,
'morphology_path': self.morphology_path,
'morphology_key': self.morphology_key,
'output_path': self.output_path, 'output_key': self.output_key,
'resolution': self.resolution, 'size_threshold': self.size_threshold,
'method': self.method})
config, n_jobs, block_list = self._prepare_output(config)
# prime and run the jobs
self.prepare_jobs(n_jobs, block_list, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class SkeletonizeLocal(SkeletonizeBase, LocalTask):
"""
skeletonize on local machine
"""
pass
class SkeletonizeSlurm(SkeletonizeBase, SlurmTask):
"""
skeletonize on slurm cluster
"""
pass
class SkeletonizeLSF(SkeletonizeBase, LSFTask):
"""
skeletonize on lsf cluster
"""
pass
#
# Implementation
#
# not parallelized for now
def _skeletonize_id_block(blocking, block_id, ds_in, ds_out,
sizes, bb_min, bb_max, resolution, size_threshold,
method):
fu.log("start processing block %i" % block_id)
block = blocking.getBlock(block_id)
id_begin, id_end = block.begin[0], block.end[0]
# we don't compute the skeleton for id 0, which is reserved for the ignore label
id_begin = 1 if id_begin == 0 else id_begin
# we increase the bounding box with a small halo, otherwise there
# semms to be boundary inconsistencies
halo = (2, 2, 2)
shape = ds_in.shape
# skeletonize ids in range and serialize skeletons
for seg_id in range(id_begin, id_end):
if size_threshold is not None:
if sizes[seg_id] < size_threshold:
continue
bb = tuple(slice(max(int(mi - ha), 0),
min(int(ma + ha), sh)) for mi, ma, sh, ha in zip(bb_min[seg_id],
bb_max[seg_id],
shape, halo))
fu.log("skeletonize id %i from bb %s" % (seg_id, str(bb)))
obj = ds_in[bb] == seg_id
# try to skeletonize the object, skip if any exception is thrown
try:
nodes, edges = skel_impl(obj, resolution=resolution, method=method)
except Exception:
continue
offsets = [b.start * res for b, res in zip(bb, resolution)]
skelio.write_n5(ds_out, seg_id, nodes, edges, offsets)
fu.log_block_success(block_id)
def skeletonize(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, 'r') as f:
config = json.load(f)
# read the input cofig
input_path = config['input_path']
input_key = config['input_key']
morphology_path = config['morphology_path']
morphology_key = config['morphology_key']
output_path = config['output_path']
output_key = config['output_key']
size_threshold = config['size_threshold']
resolution = config['resolution']
method = config['method']
# morphology feature-columns
# 0 = label-id
# 1 = pixel size
# 2:5 = center of mass
# 5:8 = min coordinate
# 8:11 = max coordinate
with vu.file_reader(morphology_path) as f:
morpho = f[morphology_key][:]
sizes = morpho[:, 1].astype('uint64')
bb_min = morpho[:, 5:8].astype('uint64')
bb_max = morpho[:, 8:11].astype('uint64') + 1
block_list = config['block_list']
block_len = config['block_len']
n_labels = config['number_of_labels']
blocking = nt.blocking([0], [n_labels], [block_len])
# skeletonize this id block
with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
ds_in = f_in[input_key]
ds_out = f_out[output_key]
for block_id in block_list:
_skeletonize_id_block(blocking, block_id, ds_in, ds_out,
sizes, bb_min, bb_max, resolution, size_threshold,
method)
# log success
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
skeletonize(job_id, path)
| constantinpape/cluster_tools | cluster_tools/skeletons/skeletonize.py | skeletonize.py | py | 7,301 | python | en | code | 32 | github-code | 6 | [
{
"api_name": "luigi.Task",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "luigi.Parameter",
"l... |
38217727284 |
from utils import pickle_load
from matplotlib import cm
import matplotlib.pyplot as plt
import collections
def show_results(res_paths):
results = {}
for path in res_paths:
result = pickle_load(path)
for k, v in result.items():
if k not in results.keys():
results[k] = result[k]
results = collections.OrderedDict(sorted(results.items()))
fig, ax = plt.subplots(figsize=(9, 5.5))
colors = cm.Dark2(np.linspace(0, 1, len(results)))
count = 0
for k, res in results.items():
mean, std = np.nanmean(res, axis=0), np.nanstd(res, axis=0)
# ax.errorbar(np.arange(mean.shape[0]), mean, yerr=std, color=colors[count], label=k, fmt='-o')
plt.plot(np.arange(mean.shape[0]) + 1, mean, '-o', color=colors[count], label=k)
count += 1
print(np.array_str(mean[8:], precision=3))
print("Average precision of %s for future prediction: %f" % (k, mean[8:].mean()))
# Now add the legend with some customizations.
legend = ax.legend(loc='upper right')
ax.set_xlabel("time step")
ax.set_ylabel("average precision")
plt.axvline(x=8.5, color='r', linestyle='--')
plt.text(3, 0.1, 'tracking', fontsize=18, color='grey')
plt.text(11, 0.1, 'prediction', fontsize=18, color='grey')
plt.show()
def show_best(filename, metric, k=1):
def line_to_list(line):
exclude_next_line = lambda x: x[:-1] if x.endswith('\n') else x
entries = map(exclude_next_line, line.split(','))
return entries
items = []
def print_dict(dic, attrs=None):
if attrs is None:
attrs = ['omega', 'noise_var', 'extent', metric, metric + ' mean']
if 'keep_motion' in dic and dic['keep_motion']:
attrs += ['window_size', 'initial_motion_factor', 'keep_motion_factor']
if 'blur_spatially' in dic and dic['blur_spatially']:
attrs += ['blur_extent', 'blur_var']
for k, v in dic.items():
if attrs is not None and k not in attrs:
continue
print("{}: {}".format(k, v))
with open(filename, 'r') as f:
line = f.readline()
#print(line)
attrs = line_to_list(line)
for i, line in enumerate(f):
#print(line)
values = line_to_list(line)
#print(values)
dict_ = {k: v for (k, v) in zip(attrs, values)}
items.append(dict_)
#print(items[0])
items = sorted(items, key=lambda item: item[metric + ' mean'])
if metric == 'f1_score' or metric == 'average_precision':
items = items[::-1]
for i in range(k):
print("------- {}th best ------- ".format(i+1))
print_dict(items[i])
| stomachacheGE/bofmp | tracking/scripts/show_best_parameter.py | show_best_parameter.py | py | 2,753 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "utils.pickle_load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "... |
73549612348 | import pandas as pd
import numpy as np
import io
import requests
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn import metrics
from beta_encoder import BetaEncoder
import category_encoders as ce
from utils import *
import csv
import xgboost as xgb
def run_ls_experiments():
print("Loading Data")
df = load_data()
continuous = ['company_size', 'interested_desks']
categorical = ['industry','location', 'lead_source']
#columns:
print("continuous columns: ",continuous)
print("categorical columns: ",categorical)
# plot increasing dimensionality va computation time
sample_sizes = [2000,
4000,
6000,
8000,
10000,
12000,
14000,
16000,
18000,
20000,
22000,
24000,
26000,
28000,
30000,
32000,
34000,
36000,
38000,
40000,
42000,
44000,
46000,
48000,
50000]
sample_sizes = [42000,
44000,
46000,
48000,
50000]
results = [['model','Encoder','Accuracy','STD','Training Time','Sparsity','Dimensions','sample_size']]
for sample_size in sample_sizes:
print("")
print("----------------------")
print("Sample Size: ",sample_size)
print("----------------------")
if not sample_size < len(df):
sample_size = len(df)
sample = df.sample(sample_size)
X = sample[continuous+categorical]
y = sample[['converted']]
successes = y.sum()[0]
alpha_prior = float(successes / len(y))
model = xgb.XGBClassifier(n_jobs=4) #[GradientBoostingClassifier(max_depth=8, n_estimators=64)]
#BetaEncoder (mean)
print("Beta Encoder (mean) Results:")
acc, std, time, sparsity, dimensions = cv_lead_scoring_classification(model, X, y, continuous, categorical, encoder=BetaEncoder(alpha=alpha_prior, beta=1-alpha_prior))
results.append([type(model), 'BetaEncoder (m)', acc, std, time, sparsity, dimensions,sample_size])
#OneHotEncoder
print("OneHotEncoder Results:")
acc, std, time, sparsity, dimensions = cv_lead_scoring_classification(model, X, y, continuous, categorical, encoder=OneHotEncoder(handle_unknown='ignore', sparse=False))
results.append([type(model), 'OneHotEncoder', acc, std, time, sparsity, dimensions,sample_size])
file = 'lead_scoring_experiments_comp_time_2.csv'
with open(file, "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(results)
try:
upload_file(file)
except:
print("File Not Uploaded")
def load_data():
df = pd.read_csv('lead_scoring_1mil.csv')
df = df.fillna('null')
industries = df.industry.str.split(',', n=-1, expand=True)
df['industry'] = industries[0]
#training_df['sector'] = industries[1]
return df
if __name__ == '__main__':
run_ls_experiments()
| aslakey/CBM_Encoding | lead_scoring_computation_time.py | lead_scoring_computation_time.py | py | 3,159 | python | en | code | 18 | github-code | 6 | [
{
"api_name": "xgboost.XGBClassifier",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "beta_encoder.BetaEncoder",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.OneHotEncoder",
"line_number": 89,
"usage_type": "call"
},
{
... |
35379120975 | import math
import boto3
from aws_cdk import (
core,
aws_ec2 as ec2,
aws_ecs as ecs,
aws_cloudwatch as cw
)
from cdklocust.locust_container import locustContainer
class CdklocustStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.get_cdk_context()
self.vpc=vpc
#ECS cluster for the loadgen
self.loadgen_cluster = ecs.Cluster(
self, "Loadgen-Cluster",
vpc=self.vpc
)
#Just using base ENI count, not caring about having ENI trunking turned on
client = boto3.client('ec2')
response = client.describe_instance_types(InstanceTypes=[self.ecs_instance_type])
eni_per_instance = response['InstanceTypes'][0]['NetworkInfo']['MaximumNetworkInterfaces']
number_of_instances = math.ceil((self.number_of_workers + 1) / (eni_per_instance-1))
self.loadgen_cluster.add_capacity("AsgSpot",
max_capacity=number_of_instances * 2,
min_capacity=number_of_instances,
instance_type=ec2.InstanceType(self.ecs_instance_type),
spot_price="0.07",
spot_instance_draining=True
)
#cloudmap for service discovery so workers can lookup mast via dns
self.loadgen_cluster.add_default_cloud_map_namespace(name = self.cloudmap_namespace)
#Create a graph widget to track reservation metrics for our cluster
ecs_widget = cw.GraphWidget(
left=[self.loadgen_cluster.metric_cpu_reservation()],
right=[self.loadgen_cluster.metric_memory_reservation()],
title="ECS - CPU and Memory Reservation",
)
#CloudWatch dashboard to monitor our stuff
self.dashboard = cw.Dashboard(self, "Locustdashboard")
self.dashboard.add_widgets(ecs_widget)
if not self.distributed_locust:
role = "standalone"
locustContainer(self, "locust" + role, self.vpc, self.loadgen_cluster, role, self.target_url)
else:
role = "master"
master_construct = locustContainer(self, "locust" + role, self.vpc,
self.loadgen_cluster, role, self.target_url)
lb_widget = cw.GraphWidget(
left=[master_construct.lb.metric_active_connection_count(),
master_construct.lb.metric_target_response_time()],
right=[master_construct.lb.metric_request_count()],
title="Load Balancer")
self.dashboard.add_widgets(lb_widget)
role = "worker"
worker_construct = locustContainer(self, "locust" + role, self.vpc,
self.loadgen_cluster, role, self.target_url,
self.number_of_workers)
worker_construct.node.add_dependency(master_construct)
def get_cdk_context(self):
# grab stuff from context
self.number_of_workers = int(self.node.try_get_context("number_of_workers"))
self.ecs_instance_type = self.node.try_get_context("ecs_instance_type")
self.vpc_cidr = self.node.try_get_context("vpc_cidr")
self.distributed_locust = self.node.try_get_context("distributed_locust")
self.cloudmap_namespace = self.node.try_get_context("cloudmap_namespace")
self.target_url = self.node.try_get_context("target_url")
| tynooo/cdklocust | cdklocust/cdklocust_stack.py | cdklocust_stack.py | py | 3,687 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "aws_cdk.core.Stack",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "aws_cdk.core",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "aws_cdk.core.Construct",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "aws_... |
75132007548 | from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split # model_selection模型选择过程中各种数据分割的类与函数
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, SGDRegressor, LogisticRegression # 线性回归
# externals是外部的、外部扩展的意思
from sklearn.externals import joblib # 模型的保存和提取 ==》 https://blog.csdn.net/YZXnuaa/article/details/80694372
from sklearn.metrics import mean_squared_error, classification_report # 均方误差的评估
import pandas as pd
import numpy as np
# 通过线性回归预测房价
# (数据量小的时候,可以用linearRegression,也就是最小二乘法之正规方程,可以直接求得误差最小点,但是计算量特别大,数据多时不推荐使用)
# 这个也就是高中学的线性方程的求解k和b的值类似的方法
def mylinear():
# 获取数据
lb = load_boston()
# 特征数值化
# 分割数据集
x_train, x_test, y_train, y_test = train_test_split(lb.data, lb.target, test_size=0.25)
# 归一化处理
"""因为特征值和目标值对应的数组维度不一样, 所以无法用同一个转换器进行归一化,而是分别创造一个转换器"""
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
# 对于目标的
std_y = StandardScaler()
y_train = std_y.fit_transform(y_train.reshape(-1, 1))
y_test = std_y.transform(y_test.reshape(-1, 1))
# 建立模型
lr = LinearRegression()
lr.fit(x_train, y_train)
print(lr.coef_)
# 模型保存,可以在别的地方直接加载出来这次保存好的模型用来继续学习或者预测使用
joblib.dump(lr, './通过joblib保存线性回归模型.pkl')
# 预测结果评估
y_predict = lr.predict(x_test)
# 线性回归模型的好坏通过均方误差来评测
print("产生的均方误差为:", mean_squared_error(y_test, y_predict))
# 将数据变回归一化之前的大小
y_predict_orign = std_y.inverse_transform(y_predict)
print(y_predict, "==================", y_predict_orign) # ==》产生的均方误差为: 0.42782165417388635
# 用梯度下降来预测房价,在数据量大的时候推荐使用这个
def sgd_regression():
# 下面这些是直接从上面的函数中copy下来的
# 获取数据
lb = load_boston()
# 特征数值化
# 分割数据集
x_train, x_test, y_train, y_test = train_test_split(lb.data, lb.target, test_size=0.25)
# 归一化处理
"""因为特征值和目标值对应的数组维度不一样, 所以无法用同一个转换器进行归一化,而是分别创造一个转换器"""
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
# 对于目标的
std_y = StandardScaler()
y_train = std_y.fit_transform(y_train.reshape(-1, 1))
y_test = std_y.transform(y_test.reshape(-1, 1))
# 建立模型(基于随机梯度下降法估计参数的SGDRegressor)
sgd = SGDRegressor()
# 预测并评估
sgd.fit(x_train, y_train)
y_predict = sgd.predict(x_test)
print(mean_squared_error(y_test, y_predict)) # ==》产生的均方误差为: 0.21901470588593194
# 岭回归:也就是逻辑回归
# 只能用于解决二分类问题,属于判别模型(与前面所学的生成模型相区别,生成模型是需要从历史中总结出某些概率)
# 但我感觉对于判别模型和生成模型还是不太对
# 岭回归解觉二分类问题的思路 ==》 https://blog.csdn.net/weixin_39445556/article/details/83930186
# 先生成回归曲线,再从y轴找一个合理的值设置为阈值(临界值),求解的y值大于这个阈值的为一个结果,y小于这个阈值是另一个结果
# 阈值的选择可以根据二分类中的一类占总数量的比例
def LingHuiGui():
# 对于癌症的预测
# 读取数据
column = ['Sample code number','Clump Thickness', 'Uniformity of Cell Size','Uniformity of Cell Shape','Marginal Adhesion', 'Single Epithelial Cell Size','Bare Nuclei','Bland Chromatin','Normal Nucleoli','Mitoses','Class']
data = pd.read_csv(r"E:\chrome下载的东西\breast-cancer-wisconsin.data", names=column) # 这个指定标签名的方式一定要学一学
print(data)
# 缺失值处理
data = data.replace("?", np.nan)
data = data.dropna()
# 数据分离 # data中的column是前面第89行的那个标签名列表
x_train, x_test, y_train, y_test = train_test_split(data[column[1:10]], data[column[10]], test_size=0.25)
# 数据标准化(归一化)
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
# 这里用的逻辑回归是个二分类方法,是解决二分类问题的
# std_y = StandardScaler()
# y_train = std_y.fit_transform(y_train)
# y_test = std_y.transform(y_test)
# 建立模型
lg = LogisticRegression(C=1.0) # c为正则化参数(正则化用于解决过拟合问题,可以减少模型的复杂度)
lg.fit(x_train, y_train)
y_predict = lg.predict(x_test)
# 误差评估
print("精确率:", lg.score(x_test, y_test))
# 将结果为2、4两个数值换为名字良性和恶性
print("召回率", classification_report(y_test, y_predict, labels=[2, 4], target_names=["良性", "恶性"]))
print("加油!")
# mylinear()
# sgd_regression()
LingHuiGui()
| hahahei957/NewProject_Opencv2 | 机器学习/19_线性回归.py | 19_线性回归.py | py | 5,679 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.datasets.load_boston",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 28,
"usage_ty... |
10422637903 | from __future__ import annotations
import dataclasses
from typing import TYPE_CHECKING
from randovania.bitpacking import bitpacking
from randovania.bitpacking.bitpacking import BitPackDecoder, BitPackValue
from randovania.game_description import default_database
if TYPE_CHECKING:
from collections.abc import Iterator
from randovania.game_description.pickup.ammo_pickup import AmmoPickupDefinition
@dataclasses.dataclass(frozen=True)
class AmmoPickupState(BitPackValue):
ammo_count: tuple[int, ...] = (0,)
pickup_count: int = 0
requires_main_item: bool = True
def check_consistency(self, ammo: AmmoPickupDefinition):
db = default_database.resource_database_for(ammo.game)
if len(self.ammo_count) != len(ammo.items):
raise ValueError(f"Ammo state has {len(self.ammo_count)} ammo counts, expected {len(ammo.items)}")
for count, ammo_name in zip(self.ammo_count, ammo.items):
ammo_item = db.get_item(ammo_name)
minimum_count = -ammo_item.max_capacity if ammo.allows_negative else 0
if not (minimum_count <= count <= ammo_item.max_capacity):
raise ValueError(
f"Ammo count for item {ammo_name} of value {count} is not "
f"in range [{minimum_count}, {ammo_item.max_capacity}]."
)
if self.pickup_count < 0:
raise ValueError(f"Pickup count must be at least 0, got {self.pickup_count}")
def bit_pack_encode(self, metadata) -> Iterator[tuple[int, int]]:
ammo: AmmoPickupDefinition = metadata["ammo"]
db = default_database.resource_database_for(ammo.game)
for count, ammo_name in zip(self.ammo_count, ammo.items):
ammo_item = db.get_item(ammo_name)
yield from bitpacking.encode_int_with_limits(
abs(count),
(ammo_item.max_capacity // 2, ammo_item.max_capacity + 1),
)
if ammo.allows_negative:
yield from bitpacking.encode_bool(count < 0) # Negative?
yield from bitpacking.encode_big_int(self.pickup_count)
if ammo.unlocked_by is not None:
yield from bitpacking.encode_bool(self.requires_main_item)
@classmethod
def bit_pack_unpack(cls, decoder: BitPackDecoder, metadata) -> AmmoPickupState:
ammo: AmmoPickupDefinition = metadata["ammo"]
db = default_database.resource_database_for(ammo.game)
# Ammo Count
ammo_count = []
for ammo_name in ammo.items:
ammo_item = db.get_item(ammo_name)
count = bitpacking.decode_int_with_limits(
decoder,
(ammo_item.max_capacity // 2, ammo_item.max_capacity + 1),
)
if ammo.allows_negative and bitpacking.decode_bool(decoder): # Negative?
count *= -1
ammo_count.append(count)
# Pickup Count
pickup_count = bitpacking.decode_big_int(decoder)
# Require Main Item
requires_main_item = True
if ammo.unlocked_by is not None:
requires_main_item = bitpacking.decode_bool(decoder)
return cls(
ammo_count=tuple(ammo_count),
pickup_count=pickup_count,
requires_main_item=requires_main_item,
)
@property
def as_json(self) -> dict:
result: dict = {}
for field in dataclasses.fields(self):
value = getattr(self, field.name)
result[field.name] = value
result["ammo_count"] = list(result["ammo_count"])
return result
@classmethod
def from_json(cls, value: dict) -> AmmoPickupState:
kwargs = {}
for field in dataclasses.fields(cls):
if field.name in value:
kwargs[field.name] = value[field.name]
if "ammo_count" in kwargs:
kwargs["ammo_count"] = tuple(kwargs["ammo_count"])
return cls(**kwargs)
| randovania/randovania | randovania/layout/base/ammo_pickup_state.py | ammo_pickup_state.py | py | 3,973 | python | en | code | 165 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.BitPackValue",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.pickup.ammo_pickup.AmmoPickupDefinition",
"lin... |
8600407692 | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .routers import post, user, auth, vote
############################################
#models.Base.metadata.create_all(bind=engine)
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(post.router)
app.include_router(user.router)
app.include_router(auth.router)
app.include_router(vote.router)
@app.get("/")
def root():
return {"message": "Hello World"}
"""
my_posts = [{"title": "title of post 1", "content": "content of post 1", "id":1},
{"title": "favorite foods", "content": "I like pizza", "id":2}]
def find_post(id):
for p in my_posts:
if p["id"] == id:
return p
def find_index_post(id):
for i, p in enumerate(my_posts):
if p['id'] == id:
return i
"""
"""
#Path Operations
@app.get("/")
#IT IS THE DECORATOR: IT CREATES THE ENDPOINT OF THE FUNCTION. It is called on the already created FastAPI INSTANCE (app)
#Within the brackets we have the PATH, the path that we must access from the URL
#the get method: is one of the possible HTTP methods
def root():
1): Async it is optional (so we can delete it)
2): the name of the fun should be as much descriptive as possible
3)RETURN: is the message that is returned back to the user
return {"message": "Hello World"}
"""
| Mattia921/example-fastapi | app/main.py | main.py | py | 1,524 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "routers.post.router",
"line_number": 21,
"usage_type": "attribute"
},
{
... |
2055718392 | # USAGE
# python knn.py --dataset ../../SolutionDL4CV/SB_code/datasets/animals
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from pyimagesearch.preprocessing import SimplePreprocessor
from pyimagesearch.datasets import SimpleDatasetLoader
from imutils import paths
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dataset', required=True, help='path to input dataset')
ap.add_argument('-k', '--neighbors', type=int, default=1, help='# of nearest neighbors for classification')
ap.add_argument('-j', '--jobs', type=int, default=-1, help='# of jobs for k-NN distance (-1 uses all available cores)')
args = vars(ap.parse_args())
print('[INFO] loading images...')
img_paths = list(paths.list_images(args['dataset']))
sp = SimplePreprocessor(32, 32)
sdl = SimpleDatasetLoader(preprocessors=[sp])
data, labels = sdl.load(img_paths, verbose=500)
data = data.reshape((len(data), -1))
print(f'[INFO] feature matrix: {data.nbytes/(1024*1000.0):.1f}MB')
# Encode the labels as integer
le = LabelEncoder()
labels = le.fit_transform(labels)
# Split to train and test set
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.25, random_state=42)
print("[INFO] evaluating k-NN classifier...")
model = KNeighborsClassifier(n_neighbors=args['neighbors'], n_jobs=args['jobs'])
model.fit(X_train, y_train)
print(classification_report(y_test, model.predict(X_test), target_names=le.classes_)) | lykhahaha/Mine | StarterBundle/chapter07-first_image_classifier/knn.py | knn.py | py | 1,583 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "imutils.paths.list_images",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "imutils.paths",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pyima... |
4709876834 | import numpy as np
import matplotlib.pyplot as plt
import activation_functions as acfunc
inp_a = np.arange(-1.0, 1.0, 0.2)
inp_b = np.arange(-1.0, 1.0, 0.2)
outputs = np.zeros((10, 10))
weight_a = 2.5
weight_b = 3
bias = 0.1
for i in range(10):
for j in range(10):
u_single = inp_a[i] * weight_a + inp_b[j] * weight_b + bias
outputs[i][j] = acfunc.sigmoid_func(u_single)
plt.imshow(outputs, "gray", vmin=0.0, vmax=1.0)
plt.colorbar()
plt.show()
| tsubamon55/pyailesson | single_neuron.py | single_neuron.py | py | 471 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.arange",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "activation_functions.sigmoid_func"... |
18849850963 | import os
import random
import math
import seaborn
import matplotlib.pyplot as plt
num_train_samples = 1
threshold = 0.25
dtw_window = 50
# thresholds: 0.15, 0.2, ...
def read_gesture(path):
with open(path, "r") as file:
lines = [line.rstrip() for line in file]
gesture = [[float(value) for value in data.split(',')] for data in lines]
return gesture
labels = ['circle_ccw', 'circle_cw', 'heart_cw', 'square_ccw', 'triangle_cw', 'junk']
paths = os.listdir('gestures')
circle_ccw = [('circle_ccw', read_gesture('gestures/' + path)) for path in paths if path.startswith('circle_ccw')]
circle_cw = [('circle_cw', read_gesture('gestures/' + path)) for path in paths if path.startswith('circle_cw')]
heart_cw = [('heart_cw', read_gesture('gestures/' + path)) for path in paths if path.startswith('heart_cw')]
square_ccw = [('square_ccw', read_gesture('gestures/' + path)) for path in paths if path.startswith('square_ccw')]
triangle_cw = [('triangle_cw', read_gesture('gestures/' + path)) for path in paths if path.startswith('triangle_cw')]
junk = [('junk', read_gesture('gestures/' + path)) for path in paths if path.startswith('junk')]
def fir_lowpass_first(a):
q = 0.95
b = [a[0]]
for i in range(1, len(a)):
x = (1.0 - q) * a[i - 1][0] + q * a[i][0]
y = (1.0 - q) * a[i - 1][1] + q * a[i][1]
z = (1.0 - q) * a[i - 1][2] + q * a[i][2]
b.append([x, y, z])
return b
def calc_distance(a, b) -> float:
ax = a[0]
ay = a[1]
az = a[2]
bx = b[0]
by = b[1]
bz = b[2]
dir = (ax * bx + ay * by + az * bz) / (normalize(ax, ay, az) * normalize(bx, by, bz) + 0.0000001)
return (1.0 - 0.5 * dir) * normalize(ax - bx, ay - by, az - bz)
def normalize(x, y, z) -> float:
return math.sqrt(x * x + y * y + z * z)
def calc_dtw(a, b) -> float:
a = fir_lowpass_first(a)
b = fir_lowpass_first(b)
dtw = [[0.0 for _ in range(50)] for _ in range(50)]
dtw[0][0] = calc_distance(a[0], b[0])
for i in range(1, 50):
dtw[i][0] = calc_distance(a[i], b[0]) + dtw[i - 1][0]
dtw[0][i] = calc_distance(a[0], b[i]) + dtw[0][i - 1]
for i in range(1, 50):
for j in range(1, 50):
dtw[i][j] = calc_distance(a[i], b[j]) + min(dtw[i - 1][j], dtw[i][j - 1], dtw[i - 1][j - 1])
i = 49
j = 49
distance = [0.0 for _ in range(100)]
length = 0
while i > 0 and j > 0:
if dtw[i - 1][j] <= dtw[i][j - 1] and dtw[i - 1][j] <= dtw[i - 1][j - 1] and (j - i) <= dtw_window:
distance[length] = dtw[i][j] - dtw[i - 1][j]
i -= 1
elif dtw[i][j - 1] < dtw[i - 1][j - 1] and (i - j) <= dtw_window:
distance[length] = dtw[i][j] - dtw[i][j - 1]
j -= 1
else:
distance[length] = dtw[i][j] - dtw[i - 1][j - 1]
i -= 1
j -= 1
length += 1
while i > 0:
distance[length] = dtw[i][0] - dtw[i - 1][0]
i -= 1
length += 1
while j > 0:
distance[length] = dtw[0][j] - dtw[0][j - 1]
j -= 1
length += 1
distance[length] = dtw[0][0]
length += 1
mean = 0.0
for i in range(length):
mean += distance[i]
mean = mean / float(length)
return mean
confusion_matrix = {}
num_trails = {}
for true_label in labels:
confusion_matrix[true_label] = {}
num_trails[true_label] = 0.0
for predicted_label in labels:
confusion_matrix[true_label][predicted_label] = 0.0
for _ in range(25):
random.shuffle(circle_ccw)
random.shuffle(circle_cw)
random.shuffle(heart_cw)
random.shuffle(square_ccw)
random.shuffle(triangle_cw)
circle_ccw_train = circle_ccw[:num_train_samples]
circle_ccw_test = circle_ccw[num_train_samples:]
circle_cw_train = circle_cw[:num_train_samples]
circle_cw_test = circle_cw[num_train_samples:]
heart_cw_train = heart_cw[:num_train_samples]
heart_cw_test = heart_cw[num_train_samples:]
square_ccw_train = square_ccw[:num_train_samples]
square_ccw_test = square_ccw[num_train_samples:]
triangle_cw_train = triangle_cw[:num_train_samples]
triangle_cw_test = triangle_cw[num_train_samples:]
train = circle_ccw_train + circle_cw_train + heart_cw_train + square_ccw_train + triangle_cw_train
test = circle_ccw_test + circle_cw_test + heart_cw_test + square_ccw_test + triangle_cw_test + junk
for (predicted_label, gesture) in test:
means = [(a, calc_dtw(gesture, tr)) for (a, tr) in train]
means.sort(key=lambda x:x[1])
true_label = means[0][0]
mean = means[0][1]
if mean > threshold:
true_label = 'junk'
confusion_matrix[true_label][predicted_label] += 1.0
num_trails[predicted_label] += 1.0
cf_plot = [[0 for _ in labels] for _ in labels]
for (i, true_label) in enumerate(['circle_ccw', 'circle_cw', 'heart_cw', 'square_ccw', 'triangle_cw', 'junk']):
for (j, predicted_label) in enumerate(['circle_ccw', 'circle_cw', 'heart_cw', 'square_ccw', 'triangle_cw', 'junk']):
cf_plot[j][i] = confusion_matrix[true_label][predicted_label] / num_trails[predicted_label]
plt.tick_params(labeltop=True, labelbottom=False)
seaborn.heatmap(cf_plot, cmap='rocket_r', annot=True, vmin=0.0, vmax=1.0, xticklabels=labels, yticklabels=labels, cbar=False)
plt.show()
| xrgman/ColorMatchingBracelet | arduino/GestureRecorder/evaluate.py | evaluate.py | py | 5,392 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_numbe... |
42174278814 | # import transformers
# import datasets
# from pprint import pprint
# # with pipeline
# model = transformers.AutoModelForSequenceClassification.from_pretrained("") # load model from local directory
# tokenizer = transformers.AutoTokenizer.from_pretrained("TurkuNLP/bert-base-finnish-cased-v1")
# test_pipe = transformers.pipeline(task="text-classification", model=model, tokenizer=tokenizer, function_to_apply="sigmoid", top_k=None) # return_all_scores=True is deprecated
# test = [""] # add examples to test
# results = test_pipe(test)
# for zipped in zip(test, results):
# pprint(zipped)
import transformers
import torch
import numpy as np
import argparse
from pprint import PrettyPrinter
import json
import datasets
import pandas as pd
import csv
""" This script is meant for looking at multi-label predictions for raw text data and saving the probabilities with id. """
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True,
help="the model name")
parser.add_argument('--data', required=True,
help="the file name of the raw text to use for predictions")
parser.add_argument('--tokenizer', required=True,
help="the tokenizer to use for tokenizing new text")
parser.add_argument('--filename', required=True,
help="the file name to give file resulting from the predictions")
args = parser.parse_args()
print(args)
pprint = PrettyPrinter(compact=True).pprint
# read the data in
data = args.data
if ".json" in data:
with open(data, 'r') as json_file:
json_list = list(json_file)
lines = [json.loads(jline) for jline in json_list]
# use pandas to look at each column
df=pd.DataFrame(lines)
# # TODO might have to change this depending on the data type
# elif ".tsv" in data:
# with open(data, "rt", encoding="utf-8") as f:
# lines = f.readlines()
# lines = lines[1:]
# for i in range(len(lines)):
# lines[i] = lines[i].replace("\n", "")
# lines[i] = lines[i].split("\t")
# assert len(lines[i]) == 3
# df=pd.DataFrame(lines, columns = ['id', 'label', 'text'])
elif ".tsv" in data:
with open(data, "rt", encoding="utf-8") as f:
lines = f.readlines()
lines = lines[1:]
for i in range(len(lines)):
lines[i] = lines[i].replace("\n", "")
lines[i] = lines[i].split("\t")
assert len(lines[i]) == 2
df=pd.DataFrame(lines, columns = ['label', 'text'])
# instantiate model, this is pretty simple
model=transformers.AutoModelForSequenceClassification.from_pretrained(args.model)
trainer = transformers.Trainer(
model=model
)
tokenizer = transformers.AutoTokenizer.from_pretrained(args.tokenizer)
def tokenize(example):
return tokenizer(
example["text"],
padding='max_length', # this got it to work, data_collator could have helped as well?
max_length=512,
truncation=True,
)
dataset = datasets.Dataset.from_pandas(df)
#map all the examples
dataset = dataset.map(tokenize)
labels = dataset["label"]
# oh right I would have to change the labels for the test set to match the upper ones if I wanted easily readable results
dataset = dataset.remove_columns("label")
texts = dataset["text"]
#ids = dataset["id"]
# see how the labels are predicted
test_pred = trainer.predict(dataset)
predictions = test_pred.predictions # these are the logits
sigmoid = torch.nn.Sigmoid()
probs = sigmoid(torch.Tensor(predictions))
probs = probs.numpy()
unique_labels = ["IN", "NA", "HI", "LY", "IP", "SP", "ID", "OP", "QA_NEW"] # upper labels plus qa_new
with open(args.filename, 'w') as outfile:
header = ["text", "gold_labels", *unique_labels] #maybe I should put the text last
writer = csv.writer(outfile, delimiter="\t")
writer.writerow(header)
for i in range(len(texts)):
text = texts[i]
gold = labels[i]
line = [text, gold]
pred_list = [str(val) for val in probs[i]]
line = [*line, *pred_list]
writer.writerow(line) | TurkuNLP/register-qa | predict.py | predict.py | py | 4,037 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pprint.PrettyPrinter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pandas.DataFr... |
14478011852 | '''
Loss functions.
'''
import copy
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import utils
class NLLLoss(nn.Module):
"""Self-Defined NLLLoss Function
Args:
weight: Tensor (num_class, )
"""
def __init__(self, weight):
super(NLLLoss, self).__init__()
self.weight = weight
def forward(self, prob, target):
"""
Args:
prob: (N, C)
target : (N, )
"""
N = target.size(0)
C = prob.size(1)
weight = Variable(self.weight).view((1, -1))
weight = weight.expand(N, C) # (N, C)
if prob.is_cuda:
weight = weight.cuda()
prob = weight * prob
one_hot = torch.zeros((N, C))
if prob.is_cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(1, target.data.view((-1,1)), 1)
one_hot = one_hot.type(torch.ByteTensor)
one_hot = Variable(one_hot)
if prob.is_cuda:
one_hot = one_hot.cuda()
loss = torch.masked_select(prob, one_hot)
return -torch.sum(loss)
class GANLoss(nn.Module):
"""Reward-Refined NLLLoss Function for adversial training of Generator"""
def __init__(self):
super(GANLoss, self).__init__()
def forward_reinforce(self, prob, target, reward, cuda=False):
"""
Forward function used in the SeqGAN implementation.
Args:
prob: (N, C), torch Variable
target : (N, ), torch Variable
reward : (N, ), torch Variable
"""
N = target.size(0)
C = prob.size(1)
one_hot = torch.zeros((N, C))
if cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(1, target.data.view((-1,1)), 1)
one_hot = one_hot.type(torch.ByteTensor)
one_hot = Variable(one_hot)
if cuda:
one_hot = one_hot.cuda()
loss = torch.masked_select(prob, one_hot)
loss = loss * reward
loss = -torch.sum(loss)
return loss
def forward_reward(self, i, samples, prob, rewards, BATCH_SIZE, g_sequence_len, VOCAB_SIZE, cuda=False):
"""
Returns what is used to get the gradient contribution of the i-th term of the batch.
"""
conditional_proba = Variable(torch.zeros(BATCH_SIZE, VOCAB_SIZE))
if cuda:
conditional_proba = conditional_proba.cuda()
for j in range(BATCH_SIZE):
conditional_proba[j, int(samples[j, i])] = 1
conditional_proba[j, :] = - (rewards[j]/BATCH_SIZE * conditional_proba[j, :])
return conditional_proba
def forward_reward_grads(self, samples, prob, rewards, g, BATCH_SIZE, g_sequence_len, VOCAB_SIZE, cuda=False):
"""
Returns a list of gradient contribution of every term in the batch
"""
conditional_proba = Variable(torch.zeros(BATCH_SIZE, g_sequence_len, VOCAB_SIZE))
batch_grads = []
if cuda:
conditional_proba = conditional_proba.cuda()
for j in range(BATCH_SIZE):
for i in range(g_sequence_len):
conditional_proba[j, i, int(samples[j, i])] = 1
conditional_proba[j, :, :] = - (rewards[j] * conditional_proba[j, :, :])
for j in range(BATCH_SIZE):
j_grads = []
# since we want to isolate each contribution, we have to zero the generator's gradients here.
g.zero_grad()
prob[j, :, :].backward(conditional_proba[j, :, :], retain_graph=True)
for p in g.parameters():
j_grads.append(p.grad.clone())
batch_grads.append(j_grads)
return batch_grads
class VarianceLoss(nn.Module):
"""Loss for the control variate annex network"""
def __init__(self):
super(VarianceLoss, self).__init__()
def forward(self, grad, cuda = False):
"""
Used to get the gradient of the variance.
"""
bs = len(grad)
ref = 0
for j in range(bs):
for i in range(len(grad[j])):
ref += torch.sum(grad[j][i]**2).item()
total_loss = np.array([ref/bs])
total_loss = Variable(torch.Tensor(total_loss), requires_grad=True)
if cuda:
total_loss = total_loss.cuda()
return total_loss
def forward_variance(self, grad, cuda=False):
"""
Used to get the variance of one single parameter.
In this case, we take look at the last layer, then take the variance of the first parameter of this last layer in main.py
"""
bs = len(grad)
n_layers = len(grad[0])
square_term = torch.zeros((grad[0][n_layers-1].size()))
normal_term = torch.zeros((grad[0][n_layers-1].size()))
if cuda:
square_term = square_term.cuda()
normal_term = normal_term.cuda()
for j in range(bs):
square_term = torch.add(square_term, grad[j][n_layers-1]**2)
normal_term = torch.add(normal_term, grad[j][n_layers-1])
square_term /= bs
normal_term /= bs
normal_term = normal_term ** 2
return square_term - normal_term
| TalkToTheGAN/REGAN | loss.py | loss.py | py | 5,235 | python | en | code | 42 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
... |
36920706174 | #-------------------------------------------------------------------------
# Script en python que se encarga de conectarse a un recurso Event Hub de
# Microsoft Azure y leer todos los mensajes disponibles, al mismo tiempo
# que deja un checkpoint de lo que ha leído para no repetir mensajes
# la siguiente vez que arranque el programa.
#
# Instrucciones para utilizarla.
# 1-. Tener Python 3.4 o mayor.
# 2-. Tener el instalador de paquetes "pip".
# 3-. Ingresar el comando "pip install azure-eventhub"
#
# Autor: Noé Amador Campos Castillo.
# E-mail: noecampos@tec.mx
#--------------------------------------------------------------------------
import os
import sys
import json
import signal
import logging
import asyncio
import functools
from azure.eventprocessorhost import (
AbstractEventProcessor,
AzureStorageCheckpointLeaseManager,
EventHubConfig,
EventProcessorHost,
EPHOptions)
class EventProcessor(AbstractEventProcessor):
# Constructor de un Event Processor
def __init__(self, params=None):
super().__init__(params)
self._msg_counter = 0
# Función que se puede hacer override
# Es para inicializar un Procesador de Eventos
async def open_async(self, context):
print("Connection established {}".format(context.partition_id))
# Función que se puede hacer override
# Sirve para detener el Procesador de Eventos.
async def close_async(self, context, reason):
print("Connection closed (reason {}, id {}, offset {}, sq_number {})".format(
reason,
context.partition_id,
context.offset,
context.sequence_number))
# Función que se puede hacer override
"""
Se llama cuando el EPH recibe un nuevo batch de eventos.
Es donde se programa las acciones a realizar.
Parametros:
context = Información sobre la partición
messages = El batch de eventos a procesar
"""
async def process_events_async(self, context, messages):
# Por cada evento...
for Event in messages:
# Se imprime el número de secuencia
print("Mensaje: {}".format(Event.sequence_number))
# Se parsea el json recibido en el mensaje del evento
parsedMessage = json.loads(Event.body_as_str())
# Se imprime de manera más estetica
print(json.dumps(parsedMessage, indent=2, sort_keys=True))
# Deja un checkpoint del evento recibido
await context.checkpoint_async()
# Función que se puede hacer override
"""
Se llama cada que el cliente experimenta algún error al recibir eventos.
El Event Proccessor Host se recupera recibiendo desde donde se quedo.
( A menos de que se haya matado el programa )
Parametros:
context = Información sobre la partición
messages = El batch de eventos a procesar
"""
async def process_error_async(self, context, error):
print("Event Processor Error {!r}".format(error))
# Recibir eventos por dos minutos y luego apagarlo
async def wait_and_close(host):
await asyncio.sleep(60)
await host.close_async()
# Se conecta y recibe mensajes
try:
# Regresa un loop asincrono
ephLoop = asyncio.get_event_loop()
# Nombre del Storage Account
stgName = "-"
# Key del storage
stgKey = "-"
# Nombre del Blob
blobName = "-"
# Nombre del namespace de Event Hubs
ehNamespace = "-"
# Nombre del Event Hub
ehName = "-"
# Nombre del SAS Policy del Event Hub
SASUser = "-"
# Llave del SAS Policy del Event Hub
SASKey = "-"
"""
Configuración del Event Hub
Párametros:
sb_name = Nombre del namespace de Event Hubs
eh_name = Nombre del Event Hub
policy = Nombre del SAS Policy
key = Llave de la SAS Policy
"""
ehConfig = EventHubConfig(ehNamespace, ehName, SASUser, SASKey)
# Opciones por default
ehOptions = EPHOptions()
# Set algunas opciones
ehOptions.release_pump_on_timeout = True
ehOptions.debug_trace = False
"""
Configuración del Storage
Párametros:
storage_account_name = Nombre del storage
storage_account_key = Llave del storage
lease_container_name = Nombre del contenedor
"""
stgManager = AzureStorageCheckpointLeaseManager(
stgName, stgKey, blobName)
# Host del Event Hub Processor
ehHost = EventProcessorHost(
EventProcessor,
ehConfig,
stgManager,
ep_params = ["param1", "param2"],
eph_options = ehOptions,
loop = ephLoop)
# Prepara los procedimientos a ejecutar en loop
ephTasks = asyncio.gather(
ehHost.open_async(),
wait_and_close(ehHost))
# Corre el loop
ephLoop.run_until_complete(ephTasks)
# En caso de ocurrri excepciones de teclado
except KeyboardInterrupt:
# Cancela las tareas y el loop
for task in asyncio.Task.all_tasks():
task.cancel()
ephLoop.run_forever()
ephTasks.exception()
# Cierra el loop
finally:
ephLoop.stop()
| NoeCampos22/Ejercicio_Azure_Databricks | Mini-Ejercicios/1_Enviar_Recibir_Eventos_EventHub/EPH.py | EPH.py | py | 5,351 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "azure.eventprocessorhost.AbstractEventProcessor",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": ... |
34668621923 | from django.urls import path
from django.conf.urls import url
from . import views
app_name = 'choistick'
urlpatterns = [
path('', views.index, name='index'),
path('map/', views.map, name='map'),
path('join/', views.signup, name='join'),
path('pick/', views.pick, name='pick'),
path('warn/', views.warn, name='warn'),
# url(r'^login/$', views.signin, name='login'),
url(r'^login/$', views.signin, name='login'),
] | jaemin8852/Search_Location | choistick/urls.py | urls.py | py | 421 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
71570292029 | from django import forms
from django.core.exceptions import ValidationError
from semester.models import Semester, CourseOffered, CourseDistribution, DistributedSectionDetail
from tempus_dominus.widgets import TimePicker, DatePicker
from django.contrib import messages
from django.shortcuts import redirect
class SemesterForm(forms.ModelForm):
start_effect_date = forms.DateField(
widget=DatePicker(
options={
'collapse': False,
'format': 'L',
},
attrs={
'append': 'fa fa-calendar',
'icon_toggle': True,
'input_toggle': True,
'placeholder': 'Start Effect Date',
'required': 'true',
'autocomplete': 'off',
},
)
)
end_effect_date = forms.DateField(
widget=DatePicker(
options={
'collapse': False,
'format': 'L',
},
attrs={
'append': 'fa fa-calendar',
'icon_toggle': True,
'input_toggle': True,
'placeholder': 'End Effect Date',
'required': 'true',
'autocomplete': 'off',
},
)
)
class Meta:
model = Semester
fields = ('name', 'year', 'start_effect_date', 'end_effect_date')
class CourseOfferingForm(forms.ModelForm):
class Meta:
model = CourseOffered
fields = '__all__'
class CourseDistributionForm(forms.ModelForm):
starting_id = forms.CharField()
ending_id = forms.CharField()
class Meta:
model = CourseDistribution
fields = ('offered', 'section', 'teacher', 'parent_dist', 'starting_id', 'ending_id')
def clean(self):
cd = self.cleaned_data
qs = CourseDistribution.objects.filter(offered=cd.get('offered'), section=cd.get('section'))
if qs:
for obj in qs:
try:
ms = DistributedSectionDetail.objects.get(distribution=obj)
if ms.starting_id == "*" or cd.get('starting_id') == "*":
raise ValidationError("Course already distributed to Section.")
else:
if not (cd.get('ending_id') < ms.starting_id or cd.get('starting_id') > ms.ending_id):
raise ValidationError("Course already distributed to Section.")
# raise ValidationError("Course already distributed to Section.")
except:
return redirect("/distribution/create/")
parent = cd.get('parent_dist')
if parent:
while parent.parent_dist != parent:
cd['parent_dist'] = parent.parent_dist
parent = cd.get('parent_dist')
if parent and cd.get('teacher') != parent.teacher:
raise ValidationError("Merged sections has different teacher")
return cd
def save(self, commit=True):
cd = self.cleaned_data
instance = super(CourseDistributionForm, self).save(commit=True)
if not instance.parent_dist:
instance.parent_dist = instance
instance.save()
DistributedSectionDetail.objects.update(
distribution=instance,
starting_id=cd.get('starting_id'),
ending_id=cd.get('ending_id')
)
return instance
class CourseDistributionUpdateForm(forms.ModelForm):
class Meta:
model = CourseDistribution
fields = ('offered', 'section', 'teacher', 'parent_dist')
def clean(self):
cd = self.cleaned_data
parent = cd.get('parent_dist')
if parent:
while parent.parent_dist != parent:
cd['parent_dist'] = parent.parent_dist
parent = cd.get('parent_dist')
if parent and cd.get('teacher') != parent.teacher:
raise ValidationError("Merged sections has different teacher")
return cd
| Emad-ahmed/luRoutine | semester/forms.py | forms.py | py | 4,085 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.DateField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.... |
17137130653 | import requests
import re
from bs4 import BeautifulSoup
from openpyxl import load_workbook
DIRECTORY_URL = "https://directory.tufts.edu/searchresults.cgi"
WORKBOOK_NAME = "DirectoryResults_2017-2018.xlsx"
NAME_SHEET = "DirectoryResults"
# This script works on Excel Sheets with a single column in the A column of
# a unique listing of names. It searches for every name in the tufts directory,
# then strips out a department if it is found and places it in the B column next
# to the search query.
def getDirectoryPage(name):
rawPage = requests.post(DIRECTORY_URL, data={"type": "Faculty", "search": name})
soup = BeautifulSoup(rawPage.text, "html.parser")
for child in soup.find_all(href=re.compile("department.cgi")):
return child.contents[0].strip()
dataBook = load_workbook(WORKBOOK_NAME)
nameSheet = dataBook[NAME_SHEET]
for index in range(2, nameSheet.max_row + 1):
currentName = nameSheet["A{0}".format(index)].value
affiliation = getDirectoryPage(currentName)
nameSheet["B{0}".format(index)] = affiliation
# Rudimentary backup every 100 entries
if index % 100 == 0:
print("Progress: {:.2%}".format(index / (nameSheet.max_row + 1)))
dataBook.save(WORKBOOK_NAME)
dataBook.save(WORKBOOK_NAME)
| jGowgiel/fec-donation-aggregator | scripts/DirectoryScrape.py | DirectoryScrape.py | py | 1,261 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
... |
40182196672 | from anomaly_detection import Amean
from multiprocessing import Process, Queue
from database import DataBase
from datetime import datetime, timedelta
import time
import traceback
class AnomalyDomain (Process):
# initilize data
def __init__(self, name, host) :
super(AnomalyDomain, self).__init__()
self.name = name
self.db = DataBase(host=host, database="anomaly", collection="data")
self.last_update = ""
self.last_value = 0
self.timeline = []
for i in range(24) :
hour_detection = Amean()
self.timeline.append(hour_detection)
self.timeline_weekend = []
for i in range(24) :
hour_detection = Amean()
self.timeline_weekend.append(hour_detection)
return
def __predict (self, hour=0, current=0, angle=0, date="") :
"""predict and return value for new data point"""
date = datetime.strptime(date, "%Y-%m-%d")
code = self.timeline[hour].predict(current=current, angle=angle)
if code == '' :
return 10
else :
return code
def __log (self, msg) :
with open("log.txt","a") as f :
f.write(msg + '\n')
return
def __check_new_data (self, name) :
"""check if there is new data in repo
if yes, return all new data"""
# check if repo is null (start-date = null)
if self.last_update == "" :
start_date = self.db.get_start_date(name=name)
if start_date != '' :
self.last_update = start_date
# check last update
# go to database and get last_update, then update data in anomaly class (this class)
db_last_update = self.db.get_last_update(name=name)
print("db_last_update: ",db_last_update)
if db_last_update == '' or not db_last_update:
return []
else :
db_last_update = datetime.strptime(db_last_update, "%Y-%m-%d %H")
last_update = datetime.strptime(self.last_update, "%Y-%m-%d %H")
result = []
while last_update < db_last_update :
print("db_last_update: ", name," ", db_last_update)
last_update += timedelta(seconds=3600)
print("check last update :", last_update)
date = last_update.strftime("%Y-%m-%d")
hour = last_update.hour
data_value = self.db.get_data_by_hour(name=name, date=date, hour=hour)
self.__log(date + ' ' + str(hour) + ' ' + str(data_value))
data = {'angle':float(data_value)-float(self.last_value),
'current':data_value,
'date':date,
'hour':hour}
result.append(data)
self.last_value = data_value
self.last_update = datetime.strftime(last_update, '%Y-%m-%d %H')
return result
def __save_result (self, name, date, result) :
self.db.insert_result(name=name, date=date, value=result)
return
#========================= RUN ==============================
# Run process method
# start per process by calling run()
def run(self) :
name = self.name
try :
while True :
time.sleep(10)
data = self.__check_new_data (name)
# data :
# [] : no new data
# [ {date:, hour:, current:, angle:)]
print("--------------AnomalyDomain is running1--------------")
if data != [] :
print("--------------AnomalyDomain is running2--------------")
# predict new data
for hour_data in data :
result_prediction = self.__predict(hour=hour_data['hour'],
current=hour_data['current'],
angle=hour_data['angle'],
date=hour_data["date"])
# save result to db
self.__save_result(name=name,
date=hour_data['date']+' '+str(hour_data['hour']),
result=result_prediction)
#continue waiting
except Exception as e:
with open("log.txt","a") as f :
f.write(str(e) + '\n')
traceback.print_exc() | DUCQUAN7850/warning_service_master | warning_service-master/anomaly_domain.py | anomaly_domain.py | py | 4,642 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "multiprocessing.Process",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "database.DataBase",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "anomaly_detection.Amean",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "anom... |
21806540252 | from jetracer.nvidia_racecar import NvidiaRacecar
import time
import sys
from multiprocessing import Process, Value
import zmq
import Jetson.GPIO as GPIO
pinrun = 'DAP4_SCLK' #12
pinbouton = 'SPI2_SCK' #13
pinau = 'SPI2_CS1' #16
autrepin = 'SPI2_CS0' #18
GPIO.setmode(GPIO.TEGRA_SOC)
GPIO.setup(pinrun, GPIO.OUT)
GPIO.output(pinrun, 0)
GPIO.setup(pinau, GPIO.OUT)
GPIO.output(pinau, 0)
GPIO.setup(pinbouton, GPIO.IN)
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5555")
currentlyRunning = Value('b', False)
def RunScript(script, data):
#print("running " + script)
GPIO.output('DAP4_SCLK', 1)
data.value = True
exec(script)
print(f"currentlyRunning from process {currentlyRunning.value}")
print("Script terminé avec succès")
data.value = False
print(f"currentlyRunning from process {currentlyRunning.value}")
GPIO.output('DAP4_SCLK', 0)
def BumperChock(data):
print("Detection pare-chocs")
if currentlyRunning.value:
runThread.terminate() # sends a SIGTERM
currentlyRunning.value = False;
car.throttle = 0.001
car.throttle = 0
GPIO.output(pinrun, 0)
GPIO.output(pinau, 1)
data.value = False
car = NvidiaRacecar()
car.steering_gain = -0.65
car.steering_offset = -0.25
if car.steering_offset != -0.25 : exit()
print("Car ready")
GPIO.add_event_detect(pinbouton, GPIO.FALLING, callback=lambda x: BumperChock(currentlyRunning), bouncetime=10)
while True:
try:
print("en attente recv...")
message = socket.recv()
GPIO.output(pinau, 0)
socket.send(b"OK")
message = message.decode("utf-8")
#print("Received request: %s" % message)
f = open("/KDesir_Tests/logging.txt", "a")
t = time.strftime('%d/%m/%Y-%H:%M:%S', time.localtime()) + ","
log = message.replace("\n", "\n" + t)
f.write(t + log + "\n")
f.close()
#print(message)
if "ArretUrgence" in message:
runThread.terminate() # sends a SIGTERM
#socket.send(b"AU_Done")
print("Arrêt d'urgence déclenché")
currentlyRunning.value = False;
raise
else:
print(f"currentlyRunning from main script {currentlyRunning.value}")
if not currentlyRunning.value:
print(f"currentlyRunning {currentlyRunning.value}")
runThread=Process(target=RunScript,args=(message, currentlyRunning))
runThread.start()
else:
print("Impossible d'exécuter le script car un autre est déjà en cours")
except Exception as e:
print(e)
car.throttle = 0.001
car.throttle = 0
GPIO.output(pinrun, 0)
GPIO.output(pinau, 1)
#finally:
# GPIO.cleanup()
sys.exit("Fin du programme")
| SpaceLabsfr/BlockApp | serveur-blockapp.py | serveur-blockapp.py | py | 2,553 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "Jetson.GPIO.setmode",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "Jetson.GPIO",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "Jetson.GPIO.TEGRA_SOC",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "Jetson.GPI... |
41774127803 | import hearts.model.game as m
from hearts.game_master import GameMaster
import logging
class GameBackend(object):
def __init__(self, player_svc):
self._next_game_id = 1
self._game_masters = {}
self._players = {}
self._player_mapping = {}
self._player_svc = player_svc
self.logger = logging.getLogger(__name__)
def create_game(self, players):
game_id = self._next_game_id
self._next_game_id += 1
model = m.HeartsGame()
model.start()
master = GameMaster(model, game_id)
self._game_masters[game_id] = master
master.add_observer(self)
self._players[game_id] = list(players)
for idx, player_id in enumerate(players):
self._player_mapping[player_id] = (game_id, idx)
return game_id
def get_game_master(self, game_id):
return self._game_masters[game_id]
def try_get_player_game(self, player_id):
data = self._player_mapping.get(player_id)
if data is None:
return None
return data[0]
def try_get_game_info(self, player_id):
data = self._player_mapping.get(player_id)
if data is None:
return None
return data
def is_in_game(self, player_id):
return player_id in self._player_mapping
def on_game_finished(self, game_id):
self.logger.info("Game %d has finished.", game_id)
self._destruct_game(game_id)
def on_game_abandoned(self, game_id):
self.logger.info("Game %d has been abandoned.", game_id)
self._destruct_game(game_id)
def _destruct_game(self, game_id):
for player in self._players[game_id]:
del self._player_mapping[player]
self._player_svc.remove_player(player)
del self._players[game_id]
del self._game_masters[game_id] | MHeasell/hearts-server | hearts/game_backend.py | game_backend.py | py | 1,880 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "hearts.model.game.HeartsGame",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "hearts.model.game",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "hear... |
27456097150 | from pathlib import Path
from sphinx.directives import SphinxDirective
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.logging import getLogger
import yaml
import json
logger = getLogger(__name__)
class PyConfig(SphinxDirective):
has_content = True
def run(self):
if self.content:
data = "\n".join(self.content)
data = json.dumps(yaml.safe_load(data), indent=2)
return [nodes.raw("", f'<py-config type="json">\n{data}\n</py-config>\n', format="html")]
class PyRepl(SphinxDirective):
has_content = True
option_spec = {
"auto-generate": directives.unchanged,
"output": directives.unchanged,
"src": directives.path,
}
def run(self):
attrs: str = ""
code: str = ""
for key, value in self.options.items():
attrs += f' {key}="{value}"'
if self.content:
code = "\n".join(self.content)
py_repl = f'''
<py-repl {attrs}>
{code}
</py-repl>
'''
return [nodes.raw("", py_repl, format="html")]
class PyScript(SphinxDirective):
has_content = True
option_spec = {
"file": directives.path,
"output": directives.unchanged
}
def run(self):
if "file" in self.options:
path = self.env.relfn2path(self.options['file'])[1]
try:
with open(path, 'r') as f:
code = f.read()
self.env.note_dependency(path)
except (FileNotFoundError, Exception) as err:
logger.warn('reading error: %s, %s', path, err)
return []
elif self.content:
code = "\n".join(self.content)
else:
raise logger.error("Must provide either content or the 'file' option")
return [nodes.raw("", f"<py-script>\n{code}\n</py-script>\n", format="html")]
class PyTerminal(SphinxDirective):
option_spec = {
"auto": directives.flag,
"false": directives.flag
}
def run(self):
attrs: str = ""
for key, _ in self.options.items():
attrs += f' {key}'
py_terminal = f'''
<py-terminal {attrs}></py-terminal>
'''
return [nodes.raw("", py_terminal, format="html")]
| yoblee/docs | sphext/sphinx_pyscript/pys_directives/__init__.py | __init__.py | py | 2,360 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sphinx.util.logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sphinx.directives.SphinxDirective",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 19,
"usage_type": "call"
},
{
"api_nam... |
31329871933 | #%%
import pandas as pd
import numpy as np
import datetime as dt
import xarray as xr
import cftime
import dask
from glob import glob
#%%
'''SUBSET RH DATA'''
data = pd.read_csv("preprocessing/inputdata/AMF_US-MBP_BASE_HH_2-5.csv",
skiprows = 2,
na_values = -9999)
data['TIMESTAMP_START'] = pd.to_datetime(data.TIMESTAMP_START, format = '%Y%m%d%H%M')
data['TIMESTAMP_END'] = pd.to_datetime(data.TIMESTAMP_END, format = '%Y%m%d%H%M')
data = data.rename(columns = {'TIMESTAMP_START':'TIMESTAMP'})
'''
#TRY EDI Data
data = pd.read_csv('preprocessing/inputdata/BogLake_Met_data_30min.csv', skiprows = 1, sep = ",",
names=["TIMESTAMP", "Air_TempC_Avg", "RH", "Soil_TempC_5cm", "Soil_TempC_10cm", "Soil_TempC_20cm",
"Soil_TempC_30cm", "Soil_TempC_40cm", "Soil_TempC_50cm", "Soil_TempC_100cm", "Soil_TempC_200cm",
"WS_Tot", "WindDir_D", "WindDir_SD", "PAR_Den_Avg"],
parse_dates = ['TIMESTAMP'],
na_values = {'RH':['NA',]})
'''
# Subset to the right variables
RH = data[['TIMESTAMP', 'RH']]
# Sort into Year-Month combos
RH['Year'] = RH.TIMESTAMP.dt.year
RH['Month'] = RH.TIMESTAMP.dt.month
RH['Year-Month'] = RH.TIMESTAMP.dt.strftime('%Y') + '-' + RH.TIMESTAMP.dt.strftime('%m')
#Subset to proper years
RH = RH[(RH.Year > 2010) & (RH.Year < 2018)]
#Patch long NA period in August 2017 with data from the NADP site in the EDI data
patch_data = pd.read_csv('preprocessing/inputdata/NADP_Met_data_30min.csv', skiprows = 1, sep = ",",
names=["TIMESTAMP", "Air_TempC_Avg", "RH", "Soil_TempC_Avg", "WS_Tot", "WindDir_D", "WindDir_SD", "PAR_Den_Avg", "Soil_VWC_Avg"],
parse_dates = ['TIMESTAMP'],
na_values = {'RH':['NA',]})
RH_patch = patch_data[['TIMESTAMP', 'RH']]
RH_patch = RH_patch[(RH_patch.TIMESTAMP.dt.year == 2017) & (RH_patch.TIMESTAMP.dt.month == 8)]
# %%
'''OPEN ALL NCDF DATA'''
vals = set(RH['Year-Month'])
for val in vals:
#Open sample data set
test = xr.open_mfdataset(val + '.nc', decode_times = False)
#Select RH data:
if val == '2017-08':
test_RH = RH_patch
else:
test_RH = RH[RH['Year-Month'] == val]
#Subset netdcf spatially
test2 = test.drop_sel(lon = 1)
#If leap Feb and leap year remove Feb29 data
r = test_RH['RH']
if (val.endswith('02')) & (len(r) > len(test2.time)):
#remove feb 9
r = r[:len(test2.time)]
#print(val + ": " + str(min(r)))
#print("NEW" + val + ": " + str(min(r)))
# Interpolate
r3 = pd.Series(r).interpolate(method = "linear")
print("NAN" + val + ": " + str(any(np.isnan(r3))))
#Reshape
r4 = np.reshape(list(r3),(-1,1,1))
#Add RH Data
test2['RH'] = xr.DataArray(r4,
dims = ['time', 'lat', 'lon'],
attrs = {'FillValue': np.NaN,
'long_name': 'relative humidity at the lowest atm level (RH)',
'units': '%' })
#Write sample
test2.to_netcdf('preprocessin/forcings-modified' + val + '.nc')
# %%
'''CHECK'''
#Open file to check
dat = xr.open_mfdataset('preprocessing/forcings-modified/2017-08.nc')
# %%
| mwdjones/clm_frost | preprocessing/forcings/Add_RH_to_Forcings.py | Add_RH_to_Forcings.py | py | 3,217 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv... |
23435779102 | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import sys
import json
import mainwindow, mystock, recepe, compute
class MyMainWindow(mainwindow.Ui_MainWindow):
def setupUi(self, mw, database):
super().setupUi(mw)
self.tabWidget = QtWidgets.QTabWidget()
mw.setCentralWidget(self.tabWidget)
self.ms = QtWidgets.QWidget()
self.mystock = mystock.MyStock()
self.mystock.setupUi(self.ms, database["bernard"]["stock"])
self.tabWidget.addTab(self.ms, "STOCK")
self.mr = QtWidgets.QWidget()
self.myRecepe = recepe.Ui_TabRecepe()
self.myRecepe.setupUi(self.mr)
self.tabWidget.addTab(self.mr, "Recepe")
self.mc = QtWidgets.QWidget()
self.myCompute = compute.Ui_TabCompute()
self.myCompute.setupUi(self.mc)
self.tabWidget.addTab(self.mc, "Compute")
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mw = QtWidgets.QMainWindow()
ui = MyMainWindow()
ui.setupUi(mw)
mw.show()
sys.exit(app.exec_()) | bernard169/open-breware | mymainwindow.py | mymainwindow.py | py | 1,084 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mainwindow.Ui_MainWindow",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QTabWidget",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 10,
"usage_type": "name"
},
{
"api_name"... |
30367868751 | from numpy import linspace, sin
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
from enable.api import ComponentEditor
from traits.api import Enum, HasTraits, Instance
from traitsui.api import Item, Group, View
class PlotEditor(HasTraits):
plot = Instance(Plot)
plot_type = Enum("scatter", "line")
orientation = Enum("horizontal", "vertical")
traits_view = View(
Item("orientation", label="Orientation"),
Item("plot", editor=ComponentEditor(), show_label=False),
width=500,
height=500,
resizable=True,
)
def __init__(self, *args, **kw):
HasTraits.__init__(self, *args, **kw)
# Create the data and the PlotData object
x = linspace(-14, 14, 100)
y = sin(x) * x ** 3
plotdata = ArrayPlotData(x=x, y=y)
# Create the scatter plot
plot = Plot(plotdata)
plot.plot(("x", "y"), type=self.plot_type, color="blue")
plot.tools.append(PanTool(plot))
plot.tools.append(ZoomTool(plot))
self.plot = plot
def _orientation_changed(self):
if self.orientation == "vertical":
self.plot.orientation = "v"
else:
self.plot.orientation = "h"
self.plot.request_redraw()
# ===============================================================================
# demo object that is used by the demo.py application.
# ===============================================================================
class Demo(HasTraits):
# Scatter plot.
scatter_plot = Instance(PlotEditor)
# Line plot.
line_plot = Instance(PlotEditor)
traits_view = View(
Group(Item("@scatter_plot", show_label=False), label="Scatter"),
Group(Item("@line_plot", show_label=False), label="Line"),
title="Chaco Plot",
resizable=True,
)
def __init__(self, *args, **kws):
super(Demo, self).__init__(*args, **kws)
# Hook up the ranges.
self.scatter_plot.plot.range2d = self.line_plot.plot.range2d
def _scatter_plot_default(self):
return PlotEditor(plot_type="scatter")
def _line_plot_default(self):
return PlotEditor(plot_type="line")
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
| enthought/chaco | examples/tutorials/scipy2008/ploteditor.py | ploteditor.py | py | 2,299 | python | en | code | 286 | github-code | 6 | [
{
"api_name": "traits.api.HasTraits",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "traits.api.Instance",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "chaco.api.Plot",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "traits.ap... |
5223556496 | import os
import time
import requests
import functools
from concurrent.futures import ThreadPoolExecutor
import click
import yaml
def _get_connection_urls(workload_yaml):
with open(workload_yaml) as f:
workload = yaml.safe_load(f)
uris = workload.get("EnvironmentDetails", {}).get("MongosyncConnectionURIs")
if not uris:
raise Exception(
f"This actor requires setting EnvironmentDetails: MongosyncConnectionURIs to use this script"
)
return uris
def poll(workload_yaml, predicate, key):
"""
Wait for all mongosyncs to reach a certain state (e.g. predicate returns False)
based on a value returned by the /progress endpoint
"""
connection_urls = _get_connection_urls(workload_yaml)
def get_progress():
res = requests.get(f"{url}/api/v1/progress")
return res.json()["progress"][key]
for url in connection_urls:
info = get_progress()
while predicate(info):
time.sleep(1)
print(f"Polling {url} for {key}, current value = {info}", flush=True)
info = get_progress()
def _change_one_mongosync_state(route, body, url):
"""
Change state of a given mongosync running at the provided url
"""
resp = requests.post(f"{url}{route}", json=body)
print(resp.json(), flush=True)
success = resp.json()["success"]
if not success:
raise Exception(f"State change failed at route {route}")
return success
def change_state(workload_yaml, route, body):
"""
Helper function to change state of mongosync. This must
send all requests in parallel, as some commands block until
all instances recieve them
"""
connection_urls = _get_connection_urls(workload_yaml)
fn = functools.partial(_change_one_mongosync_state, route, body)
with ThreadPoolExecutor() as executor:
futures = []
# Using executor.map swallows exceptions from the task,
# using .submit and then accessing the future's .result
# will cause exceptions to be rethrown
for url in connection_urls:
futures.append(executor.submit(fn, url))
for f in futures:
f.result()
@click.group(name="MongosyncActor", context_settings=dict(help_option_names=["-h", "--help"]))
def cli():
pass
@cli.command(
"start",
help=("Issue /start to all mongosync processes"),
)
@click.argument("workload_yaml", nargs=1)
def start(workload_yaml):
change_state(workload_yaml, "/api/v1/start", {"Source": "cluster0", "Destination": "cluster1"})
@cli.command(
"poll_for_cea",
help=("Poll all available instances for the CEA stage"),
)
@click.argument("workload_yaml", nargs=1)
def poll_for_cea(workload_yaml):
poll(workload_yaml, lambda x: x != "change event application", "info")
@cli.command(
"poll_for_commit_point",
help=("Wait till all the instances canCommit = true and lagTimeSeconds < 120"),
)
@click.argument("workload_yaml", nargs=1)
def poll_for_commit_point(workload_yaml):
poll(workload_yaml, lambda x: bool(x) == False, "canCommit") or poll(
workload_yaml, lambda x: int(x) > 120, "lagTimeSeconds"
)
@cli.command(
"drain_writes",
help=("Wait till all writes have been drained to the destination cluster"),
)
@click.argument("workload_yaml", nargs=1)
def drain_writes(workload_yaml):
poll(workload_yaml, lambda x: int(x) > 5, "lagTimeSeconds")
@cli.command(
"commit",
help=("Commit the migration"),
)
@click.argument("workload_yaml", nargs=1)
def commit(workload_yaml):
change_state(workload_yaml, "/api/v1/commit", {})
@cli.command(
"wait_for_commit",
help=("Wait until all mongosyncs are finished commiting the migration"),
)
@click.argument("workload_yaml", nargs=1)
def wait_for_commit(workload_yaml):
poll(workload_yaml, lambda x: x != "COMMITTED", "state")
@cli.command(
"pause",
help=("Pause the migration"),
)
@click.argument("workload_yaml", nargs=1)
def pause(workload_yaml):
change_state(workload_yaml, "/api/v1/pause", {})
@cli.command(
"resume",
help=("Resume the migration"),
)
@click.argument("workload_yaml", nargs=1)
def resume(workload_yaml):
change_state(workload_yaml, "/api/v1/resume", {})
if __name__ == "__main__":
cli()
| mongodb/genny | src/cast_python/src/mongosync_actor.py | mongosync_actor.py | py | 4,300 | python | en | code | 42 | github-code | 6 | [
{
"api_name": "yaml.safe_load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_numb... |
1448273356 | """This file is to run the model inference here's the command
python run_inference.py -i trainval/images/image_000000001.jpg -m model/model.pt"""
# import the necessary packages
import argparse
import cv2
import numpy as np
from PIL import Image
import torch
from torchvision import transforms
import config
from utils import get_model_instance_segmentation
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input_image", required=True,
help="path to input image")
ap.add_argument("-m", "--model", required=True,
help="path to trained pytorch model")
ap.add_argument("-c", "--confidence", type=float, default=0.85,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
model_path = args["model"]
input_image = args["input_image"]
confidence = args["confidence"]
# classes which our model will detect and the color object of the bounding box it will create
CLASSES=["Background","Person","Car"]
# reading the image with pillow and converion into the numpy arrays
img = Image.open(input_image)
open_cv_image = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
# pytorch will work on the suitable device wheather it's CPU or GPU
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# getting the model instance and loading the pytorch model
model = get_model_instance_segmentation(config.num_classes)
model.load_state_dict(torch.load(model_path))
# move model to the right device
model.to(device)
model.eval()
trans =transforms.Compose([transforms.ToTensor()])
img = trans(img).cuda()
# getting the all the detections generated by the trained model
detections = model([img])
# seperating out all the bounding boxes, labels, and scores we get
_bboxes, _labels, _scores = detections[0]['boxes'], detections[0]['labels'], detections[0]['scores']
# loop over the detections
for i in range(0, len(_bboxes)):
# extract the confidence (i.e., probability) associated with the
# prediction
pred_confidence = _scores[i]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if pred_confidence > confidence:
# extract the index of the class label from the detections,
# then compute the (x, y)-coordinates of the bounding box
# for the object
idx = int(_labels[i])
box = _bboxes[i].detach().cpu().numpy()
(startX, startY, endX, endY) = box.astype("int")
# display the prediction to our terminal
label = "{}: {:.2f}%".format(CLASSES[idx], pred_confidence * 100)
print("[INFO] {}".format(label))
# draw the bounding box and label on the image
cv2.rectangle(open_cv_image, (startX, startY), (endX, endY),
(0,0,255) if idx==1 else (0,255,0), 1)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(open_cv_image, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255) if idx==1 else (0,255,0), 2)
# show the output image
cv2.imshow("output", open_cv_image)
cv2.waitKey(0)
| Pradhunmya/pytorch_faster_rcnn | run_inference.py | run_inference.py | py | 3,143 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"... |
9591325686 | from rest_framework.authentication import TokenAuthentication
from rest_framework.exceptions import AuthenticationFailed
from .models import AuthToken
from utils.exceptions import *
def expire_token(user):
try:
for auth_token in user.auth_tokens.all():
auth_token.delete()
except AuthToken.DoesNotExist:
pass
def get_auth_token_by(raise_exception=True, only_deleted=False, **kwargs):
key = kwargs.get('key')
if only_deleted:
auth_token = AuthToken.objects.deleted_only().filter(**kwargs).first()
else:
auth_token = AuthToken.objects.filter(key=key).first()
if not auth_token and raise_exception:
raise ObjectNotFound
return auth_token
def create_token(user):
auth_token = AuthToken.objects.create(user=user)
return auth_token.key
def token_expire_handler(auth_token):
if auth_token.is_expired:
auth_token = create_token(user=auth_token.user)
return auth_token.is_expired, auth_token
class ExpiringTokenAuthentication(TokenAuthentication):
def authenticate_credentials(self, key):
try:
auth_token = AuthToken.objects.get(key=key)
except AuthToken.DoesNotExist:
raise AuthenticationFailed
is_expired, auth_token = token_expire_handler(auth_token)
if is_expired:
raise AuthenticationFailed
return auth_token.user, auth_token
| danghh-1998/django_rest_boilerplate | auth_tokens/services.py | services.py | py | 1,418 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "models.AuthToken.DoesNotExist",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.AuthToken",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.AuthToken.objects.deleted_only",
"line_number": 19,
"usage_type": "call"
},
... |
8898957294 | import telebot
import config
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import json
from telebot import types
import pyowm
owm = pyowm.OWM('c8548689b28b1916f78403fb9c92e4f3', language='ru')
bot = telebot.TeleBot(config.TOKEN)
authenticator = IAMAuthenticator('9n-ZTrznhrAKV0YAJIWIM-fwico0pbNeHp9Wek67nt6V')
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url('https://api.eu-gb.language-translator.watson.cloud.ibm.com/instances'
'/1bec2e12-6251-4b94-8d80-fcead8ec6d68')
languages = language_translator.list_languages().get_result()
user_dict = {}
# Формируем класс, для сохранения в него переменных (то что вводит кандидат)!
class User:
def __init__(self, name):
self.name = name
self.application = None
self.writer_lang = None
self.phrases = None
self.place = None
# Обработать / start и / help
@bot.message_handler(commands=['start', 'help'])
def command_start(message):
chat_id = message.chat.id
text = message.text
msg = bot.send_message(chat_id,
"Здравствуйте, я полезный бот! Мое предназначение помогать в тех функциях которые в меня "
"встроенные! "
"В них входит перевод текста на необходимый Вам язык! Подскажите как я могу к Вам обращатся?")
bot.register_next_step_handler(msg, process_name_step)
# Бот предлагает свои услуги\возможности!
@bot.message_handler(content_types=['text'])
def process_name_step(message):
chat_id = message.chat.id
name = message.text
user = User(name)
user_dict[chat_id] = user
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик', 'Погода', 'Список покупок')
msg = bot.send_message(chat_id, 'Приятно познакомится ' + user.name + '! Выберите приложение, которым хотите '
'воспользоваться!', reply_markup=markup)
bot.register_next_step_handler(msg, how_can_i_help)
# Действие выполняется при выборе приложения!
@bot.message_handler(content_types=['text'])
def how_can_i_help(message):
chat_id = message.chat.id
application = message.text
if application == '/start' or application == '/help':
return command_start
if application == u'Переводчик':
user = user_dict[chat_id]
user.application = application
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('английский', 'русский', 'испанский', 'немецкий', 'итальянский', 'французский', 'китайский',
'Погода')
msg = bot.send_message(chat_id, "Укажите на каком языке будете писать (если такого языка нет в выпадающем "
"списке, напишите этот язык в сообщении!): ", reply_markup=markup)
bot.register_next_step_handler(msg, translater_func1)
elif application == u'Погода':
user = user_dict[chat_id]
user.application = application
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик', 'Погода')
msg = bot.send_message(chat_id, "Введите в каком городе/стране?: ", reply_markup=markup)
bot.register_next_step_handler(msg, weather_bot)
elif application == u'Список покупок':
pass
# Тут запускается приложение для перевода текста!
@bot.message_handler(content_types=['text'])
def translater_func1(message):
global lang1 # Создаем переменную lang1 (язык ввода текста)
try: # Создаем исключение для того чтобы различать кнопки и команду /start
chat_id = message.chat.id
writer_lang = message.text
if writer_lang == u'Погода': # Создаем условие нажатия на кнопку: если это погода выполняем код - ниже!
user = user_dict[chat_id]
user.writer_lang = writer_lang
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик')
msg = bot.send_message(chat_id, "Введите в каком городе/стране?: ", reply_markup=markup) # Спрашиваем в
# каком городе проверить и при этом крепим кнопку переводчика!
bot.register_next_step_handler(msg, weather_bot)
# Дальше условия касаются выбора языка с помощью кнопок! Если выбрали одну из кнопок ниже :
if writer_lang == u'английский':
lang1 = 'en'
elif writer_lang == u'русский':
lang1 = 'ru'
elif writer_lang == u'украинский':
lang1 = 'uk'
elif writer_lang == u'испанский':
lang1 = 'es'
elif writer_lang == u'немецкий':
lang1 = 'de'
elif writer_lang == u'итальянский':
lang1 = 'it'
elif writer_lang == u'французский':
lang1 = 'fr'
elif writer_lang == u'китайский':
lang1 = 'zh'
user = user_dict[chat_id]
user.writer_lang = writer_lang
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('английский', 'русский', 'испанский', 'немецкий', 'итальянский', 'французский', 'китайский')
# При этом спрашиваем на какой язык переводить и крепим кнопки с соответствующими языками
msg = bot.send_message(chat_id, "На какой язык переводить (если такого языка нет в выпадающем "
"списке, напишите этот язык в сообщении!): ", reply_markup=markup)
bot.register_next_step_handler(msg, translater_func2)
except:
if writer_lang == '/start' or writer_lang == '/help': # Если написали команду старт, то возвращаемся в начало кода!
msg = bot.send_message(chat_id,
"Здравствуйте, я полезный бот! Мое предназначение помогать в тех функциях которые "
"в меня "
"встроенные! "
"В них входит перевод текста на необходимый Вам язык! Подскажите как я могу к Вам "
"обращатся?")
bot.register_next_step_handler(msg, command_start)
else:
msg = bot.send_message(chat_id, "Oooops!")
bot.register_next_step_handler(msg, translater_func1)
def translater_func2(message):
global lang # Создаем глобальную переменную для языка на который будет осуществлятся перевод!
try:
chat_id = message.chat.id
translation_lang = message.text
if translation_lang == u'Погода': # Создаем условие нажатия на кнопку: если это погода выполняем код - ниже!
user = user_dict[chat_id]
user.translation_lang = translation_lang
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик')
msg = bot.send_message(chat_id, "Введите в каком городе/стране?: ", reply_markup=markup) # Спрашиваем в
# каком городе проверить и при этом крепим кнопку переводчика!
bot.register_next_step_handler(msg, weather_bot)
# Условия для выбора языка на который будет перевод!
if translation_lang == u'английский':
lang = 'en'
elif translation_lang == u'русский':
lang = 'ru'
elif translation_lang == u'украинский':
lang = 'uk'
elif translation_lang == u'испанский':
lang = 'es'
elif translation_lang == u'немецкий':
lang = 'de'
elif translation_lang == u'итальянский':
lang = 'it'
elif translation_lang == u'французский':
lang = 'fr'
elif translation_lang == u'китайский':
lang = 'zh'
user = user_dict[chat_id]
user.translation_lang = translation_lang
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Смена языков для перевода', 'Смена языка для перевода', 'Погода')
msg = bot.send_message(chat_id, "Введите фразу для перевода : ", reply_markup=markup) # После определения
# языков просим ввести текст для перевода. А так же крепим кнопки!
bot.register_next_step_handler(msg, translate_phrases)
except:
if translation_lang == '/start' or translation_lang == '/help': # Если написали команду старт, то возвращаемся в начало кода!
msg = bot.send_message(chat_id,
"Здравствуйте, я полезный бот! Мое предназначение помогать в тех функциях которые "
"в меня "
"встроенные! "
"В них входит перевод текста на необходимый Вам язык! Подскажите как я могу к Вам "
"обращатся?")
bot.register_next_step_handler(msg, command_start)
else:
msg = bot.send_message(chat_id, 'Oooops')
bot.register_next_step_handler(msg, translater_func2)
def translate_phrases(message):
#global translation
chat_id = message.chat.id
phrases = message.text
# Устанавливаем цикл: если нажаты кнопки текст не переводится, а выполняется переход по приложениям или смена языков!
while phrases == u'Погода' or phrases == u'Смена языков для перевода' or phrases == u'Смена языка для перевода':
# Тут как раз условия которые работают если нажали на одну из кнопок!
if phrases == u'Погода':
user = user_dict[chat_id]
user.phrases = phrases
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик')
msg = bot.send_message(chat_id, "Введите в каком городе/стране?: ", reply_markup=markup)
bot.register_next_step_handler(msg, weather_bot)
elif phrases == u'Смена языков для перевода':
user = user_dict[chat_id]
user.phrases = phrases
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('английский', 'русский', 'испанский', 'немецкий', 'итальянский', 'французский', 'китайский',
'Погода')
msg = bot.send_message(chat_id, "Укажите на каком языке будете писать (если такого языка нет в выпадающем "
"списке, напишите этот язык в сообщении!): ", reply_markup=markup)
bot.register_next_step_handler(msg, translater_func1)
elif phrases == u'Смена языка для перевода':
user = user_dict[chat_id]
user.phrases = phrases
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('английский', 'русский', 'испанский', 'немецкий', 'итальянский', 'французский', 'китайский',
'Погода')
msg = bot.send_message(chat_id, "На какой язык переводить (если такого языка нет в выпадающем "
"списке, напишите этот язык в сообщении!): ", reply_markup=markup)
bot.register_next_step_handler(msg, translater_func2)
break
else:
translation = language_translator.translate(
phrases,
source=lang1, target=lang).get_result()
msg = bot.send_message(chat_id, json.dumps(translation, indent=2, ensure_ascii=False))
bot.register_next_step_handler(msg, translate_phrases)
# Здесь запускается приложение погода!
@bot.message_handler(content_types=['text'])
def weather_bot(message):
global place
try:
chat_id = message.chat.id
place = message.text
user = user_dict[chat_id]
user.place = place
observation = owm.weather_at_place(place)
w = observation.get_weather()
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик')
temp = w.get_temperature('celsius')["temp"]
temp = round(temp)
msg = bot.send_message(chat_id,
'В городе ' + place + ' сейчас ' + w.get_detailed_status() + ' Температура в этом '
'городе: ' + str(
temp), reply_markup=markup)
if temp < 10 and temp >= 0:
msg = bot.send_message(chat_id, 'Сейчас пипец как холодно, одевайся как танк!')
elif temp >= 10 and temp < 20:
msg = bot.send_message(chat_id, 'Тепло конечно, но загорать еще рано!')
elif temp >= 20 and temp < 25:
msg = bot.send_message(chat_id, 'Ну еще чуть чуть и загорать можно идти!')
elif temp > 25:
msg = bot.send_message(chat_id, 'Можно смело загорать!')
else:
msg = bot.send_message(chat_id, 'Снеговики наступааааают!!!')
bot.register_next_step_handler(msg, weather_bot)
except:
if place == u'Переводчик':
user = user_dict[chat_id]
user.place = place
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('английский', 'русский', 'испанский', 'немецкий', 'итальянский', 'французский', 'китайский',
'Погода')
msg = bot.send_message(chat_id, 'Да, давайте переводить. Укажите на каком языке будете писать!', reply_markup=markup)
bot.register_next_step_handler(msg, translater_func1)
elif place == '/start' or place == '/help':
msg = bot.send_message(chat_id,
"Здравствуйте, я полезный бот! Мое предназначение помогать в тех функциях которые в меня "
"встроенные! "
"В них входит перевод текста на необходимый Вам язык! Подскажите как я могу к Вам обращатся?")
bot.register_next_step_handler(msg, command_start)
else:
msg = bot.send_message(chat_id, 'Такого города нет... Уточните пожалуйста название!')
bot.register_next_step_handler(msg, weather_bot)
bot.polling()
while True:
pass
| IgorSopronyuk/translate_IBM_bot | translater_IBM_bot.py | translater_IBM_bot.py | py | 17,771 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "pyowm.OWM",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "telebot.TeleBot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "config.TOKEN",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "ibm_cloud_sdk_core.authent... |
4391450776 | from flask import Flask, render_template, request
import sqlite3
app = Flask(__name__)
@app.route('/',methods = ['POST', 'GET'])
def home():
if request.method == 'GET':
return render_template('index.html')
@app.route('/thankyou',methods = ['POST', 'GET'])
def thankyou():
if request.method == 'GET':
return render_template('thankyou.html')
elif request.method == 'POST':
emailid = request.form.get('eid')
conn = sqlite3.connect("emailist")
cur=conn.cursor()
cur.execute("SELECT * from emails_table")
print(cur.fetchall())
#cur.execute('INSERT INTO emails_table (email) VALUES (?), ("t@iron.com")')
#cur.execute("INSERT INTO movie VALUES(%s,%s)",(movID,Name))
cur.execute("INSERT INTO emails_table (email) VALUES (?)", (emailid,))
conn.commit()
conn.close()
return render_template('thankyou.html')
if __name__ == '__main__':
app.run(debug=True)
| senthil-kumar-n/Subscribe_email | subscribe.py | subscribe.py | py | 1,004 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "flask.render_templat... |
20918248522 | import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity, linear_kernel
def GetMoviesByDescription(movieName):
movie_list=[]
csv_path = "cleaned data/movies.csv"
df = pd.read_csv(csv_path)
df['description'] = df['description'].fillna('')
#create the matrix
tf = TfidfVectorizer(analyzer='word',ngram_range=(1, 2),min_df=0, stop_words='english')
tfidf_matrix = tf.fit_transform(df['description'])
#calaculate the Cosine Similarity Score
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
md = df.reset_index()
titles = df['title']
indices = pd.Series(df.index, index=df['title'])
try:
idx = indices[movieName]
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:31]
movie_indices = [i[0] for i in sim_scores]
recdf = titles.iloc[movie_indices]
count = 0
for index, value in recdf.items():
count = count + 1
movie_list.append(value)
if(count == 8):
break
except:
movie_list.append("No Recommendation available.")
return movie_list
def GetMoviesByUserRating(movieName):
movies = pd.read_csv("raw data/movies.csv")
ratings = pd.read_csv("raw data/ratings.csv")
movievsuser = ratings.pivot(index='movieId',columns='userId',values='rating')
movievsuser.fillna(0, inplace=True)
ratingsByMovie = ratings.groupby('movieId')['rating'].agg('count')
ratingsByUser = ratings.groupby('userId')['rating'].agg('count')
movievsuser = movievsuser.loc[ratingsByMovie[ratingsByMovie > 40].index,:]
csr_data = csr_matrix(movievsuser.values)
movievsuser.reset_index(inplace=True)
# Using KNN algorithm to predict similarity with cosine distance
knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=20, n_jobs=-1)
knn.fit(csr_data)
reccomendCount = 8
listing = []
movieList = movies[movies['title'].str.contains(movieName)]
if len(movieList):
movie_id= movieList.iloc[0]['movieId']
movie_id = movievsuser[movievsuser['movieId'] == movie_id].index[0]
distances , indices = knn.kneighbors(csr_data[movie_id],n_neighbors=reccomendCount+1)
recommendedMovies = sorted(list(zip(indices.squeeze().tolist(),distances.squeeze().tolist())),key=lambda x: x[1])[:0:-1]
recMoviesList = []
for val in recommendedMovies:
movie_id = movievsuser.iloc[val[0]]['movieId']
idx = movies[movies['movieId'] == movie_id].index
recMoviesList.append({'Title':movies.iloc[idx]['title'].values[0],'Distance':val[1]})
df = pd.DataFrame(recMoviesList,index=range(1,reccomendCount+1))
df['Distance'] = pd.to_numeric(df['Distance'])
df= df.sort_values('Distance')
listing = df['Title']
return listing
def GetMoviesByGenre(movieName):
file_to_load = "raw data/movies.csv"
ratings = pd.read_csv("raw data/ratings.csv")
data = pd.read_csv(file_to_load)
data['genres'] = data['genres'].str.replace(r'|', ' ')
genre_data=data[['title','genres']]
genre_data=genre_data.set_index('title')
#convert genre column to array
cv = CountVectorizer()
X = cv.fit_transform(genre_data["genres"]).toarray()
similarities = cosine_similarity(X)
movie_index = data.loc[data['title'].str.contains(movieName)].index[0]
similarity_values = pd.Series(similarities[movie_index])
#We converted list into series in order to preserve the actual indexes of dataset even after sorting
similarity_values.sort_values(ascending=False)
similar_movie_indexes = list(similarity_values.sort_values(ascending=False).index)
#Remove the already watched movie from index list
similar_movie_indexes.remove(movie_index)
movie_list=[]
for i in range(8):
movie_list.append(genre_data.index[similar_movie_indexes[i]])
return movie_list
def GetPredictionsForMovie(moviename):
complete_df = pd.read_csv("cleaned data/complete_df_with_predictions.csv")
mv = complete_df.loc[complete_df['title'].str.contains(moviename),['title']]
movie = mv.head(1)
names = movie.to_numpy()
name = names[0][0]
#based on all the users in the dataframe and their predictions what is the average rating the movie will get
movie_rating=round((complete_df.loc[complete_df['title']==name,['predicted rating']].values).mean(),2)
#from data already available what is the average of the movie
movie_gavg=round((complete_df.loc[complete_df['title']==name,['MAvg']].values).mean(),2)
percdiff = round(((movie_rating-movie_gavg)/movie_gavg*100),2)
summary = {'Predicted Rating': movie_rating, 'Actual Rating': movie_gavg ,"Percentage Difference%":percdiff}
return summary
def GetPredictions(moviename, userid):
complete_df = pd.read_csv("cleaned data/complete_df_with_predictions.csv")
#print(complete_df.head())
userid=int(userid)
try:
mv = complete_df.loc[complete_df['title'].str.contains(moviename),['title']]
movie = mv.head(1)
names = movie.to_numpy()
name = names[0][0]
#print(name)
#based on users past ratings what is the prediction for a particular movie
pred_rating=round(complete_df.loc[(complete_df['user']==userid) & (complete_df['title']==name),['predicted rating']].values[0][0],2)
#from data already available what is the average of the movie
user_rating=round(complete_df.loc[(complete_df['user']==userid) & (complete_df['title']==name),['rating']].values[0][0],2)
percdiff = round(((pred_rating-user_rating)/user_rating*100),2)
summary = {'Predicted Rating': pred_rating, 'Actual Rating': user_rating ,"Percentage Difference%":percdiff}
return summary
except:
pred_rating=0
user_rating=0
percdiff = 0
#based on all the users in the dataframe and their predictions what is the average rating a user gives
user_rating=round((complete_df.loc[complete_df['user']==userid,['predicted rating']].values).mean(),2)
#from data already available what is the average of the movie
user_uavg=round((complete_df.loc[complete_df['user']==userid,['UAvg']].values).mean(),2)
percdiff = round(((user_rating-user_uavg)/user_uavg*100),2)
print ("this user has not rated this movie")
summary = {'Status': "this user has not rated this movie, showing a prediction of what this user is likely to predict",'Predicted Rating': user_rating, 'Actual Rating': user_uavg ,"Percentage Difference%":percdiff}
return summary
| InsiyaKanjee/Project4 | initdb.py | initdb.py | py | 7,073 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.linear_kernel",
"line_number": 20,
"usage_type":... |
24150609056 | import requests
import random
from utils.others import get_atitle, get_genre, get_t_from_u, get_urls
from utils.anilist import Anilist
from utils.techzapi import TechZApi
def get_genre_html(li):
x = """<a>{}</a>"""
html = ""
for i in li:
html += x.format(i.strip())
return html
def get_eps_html(data=None, api: TechZApi = None, anime=None):
if not data:
anime = api.gogo_search(anime)[0].get("id").strip()
data = api.gogo_anime(anime).get("episodes")
x = """<a class="ep-btn" href="{}">{}</a>"""
html = ""
pos = 1
for i in data:
i = i.replace("-episode-", "/")
html += x.format(f"/episode/{i}", str(pos))
pos += 1
if api:
return html, anime
return html
def get_eps_html2(data):
x = """<a class="ep-btn" href="{}">{}</a>"""
html = ""
pos = 1
for i in data:
i = i.replace("-episode-", "/")
html += x.format(f"/episode/{i}", str(pos))
pos += 1
return html
ANIME_POS = """<a href="{}"><div class="poster la-anime"> <div id="shadow1" class="shadow"> <div class="dubb">{}</div><div class="dubb dubb2">{}</div></div><div id="shadow2" class="shadow"> <img class="lzy_img" src="https://cdn.jsdelivr.net/gh/TechShreyash/AnimeDex@main/static/img/loading.gif" data-src="{}"> </div><div class="la-details"> <h3>{}</h3> <div id="extra"> <span>{}</span> <span class="dot"></span> <span>{}</span> </div></div></div></a>"""
ANIME_POS2 = """<a href="{}"><div class="poster la-anime"> <div id="shadow1" class="shadow"> <div class="dubb">{}</div></div><div id="shadow2" class="shadow"> <img class="lzy_img" src="https://cdn.jsdelivr.net/gh/TechShreyash/AnimeDex@main/static/img/loading.gif" data-src="{}"> </div><div class="la-details"> <h3>{}</h3> <div id="extra"> <span>{}</span> </div></div></div></a>"""
def animeRecHtml(data):
if not data:
return "Not Available"
if len(data) == 0:
return "Not Available"
html = ""
for i in data.get("recommendations").get("edges"):
i = i.get("node").get("mediaRecommendation")
img = i.get("coverImage")
if img:
img = img.get("medium").replace("small", "medium")
else:
img = i.get("bannerImage")
title = get_atitle(i.get("title"))
url = get_urls(title)
x = ANIME_POS.format(
url,
str(i.get("meanScore")).strip() + " / 100",
"Ep " + str(i.get("episodes")).strip(),
img,
title,
i.get("format"),
i.get("status"),
)
if x not in html:
html += x
return html
def animeRecHtml2(data):
if not data:
return "Not Available"
if len(data) == 0:
return "Not Available"
html = ""
for i in data:
i = i.get("node").get("mediaRecommendation")
img = i.get("coverImage")
if img:
img = img.get("medium").replace("small", "medium")
else:
img = i.get("bannerImage")
title = get_atitle(i.get("title"))
url = get_urls(title)
x = ANIME_POS.format(
url,
str(i.get("meanScore")).strip() + " / 100",
"Ep " + str(i.get("episodes")).strip(),
img,
title,
i.get("format"),
i.get("status"),
)
if x not in html:
html += x
return html
def get_trending_html(data):
html = ""
for id, i in data:
try:
img = i[5]
title = i[0]
url = get_urls(id)
x = ANIME_POS.format(url, i[1], "Ep " + str(i[2]), img, title, i[3], i[4])
html += x
except:
pass
return html
def get_search_html(data):
html = ""
for i in data:
if "dub" in i.get("id").lower():
d = "DUB"
else:
d = "SUB"
x = ANIME_POS2.format(
"/anime/" + i.get("id"),
d,
i.get("img"),
i.get("title"),
"Released: " + i.get("year"),
)
html += x
return html
def get_recent_html(data):
html = ""
for i in data:
url = i.get("id").split("-episode-")[0]
x = ANIME_POS.format(
f"/anime/{url}",
i.get("lang"),
"Ep " + str(i.get("episode")),
i.get("img"),
i.get("title"),
f"Latest {i.get('lang')}",
"HD",
)
html += x
return html
def get_selector_btns(url, current, episodes):
if episodes < 2:
return ""
selector = ""
if current == 1:
x = """<a class="btns" href="usrl"><button class="sbtn inline-flex text-white bg-indigo-500 border-0 py-2 px-6 focus:outline-none hover:bg-indigo-600 rounded text-lg ">Episode NEXT<i style="margin-left:10px; margin-right: auto;" class="fa fa-arrow-circle-right"></i></button></a>"""
selector += x.replace("usrl", url + str(current + 1)).replace(
"NEXT", str(current + 1)
)
elif current == episodes:
x = """<a class="btns" href="usrl"><button class="sbtn inline-flex text-white bg-indigo-500 border-0 py-2 px-6 focus:outline-none hover:bg-indigo-600 rounded text-lg "><i class="fa fa-arrow-circle-left"></i>Episode PREV</button></a>"""
selector += x.replace("usrl", url + str(current - 1)).replace(
"PREV", str(current - 1)
)
else:
x = """<a class="btns" href="usrl"><button class="sbtn inline-flex text-white bg-indigo-500 border-0 py-2 px-6 focus:outline-none hover:bg-indigo-600 rounded text-lg "><i class="fa fa-arrow-circle-left"></i>Episode PREV</button></a>"""
selector += x.replace("usrl", url + str(current - 1)).replace(
"PREV", str(current - 1)
)
x = """<a class="btns" href="usrl"><button class="sbtn inline-flex text-white bg-indigo-500 border-0 py-2 px-6 focus:outline-none hover:bg-indigo-600 rounded text-lg ">Episode NEXT<i style="margin-left:10px; margin-right: auto;" class="fa fa-arrow-circle-right"></i></button></a>"""
selector += x.replace("usrl", url + str(current + 1)).replace(
"NEXT", str(current + 1)
)
return selector
SLIDER_HTML = """<div class="mySlides fade"> <div class="data-slider"> <p class="spotlight">{}</p><h1>{}</h1> <div class="extra1"> <span class="year"><i class="fa fa-play-circle"></i>{}</span> <span class="year year2"><i class="fa fa-calendar"></i>{}</span> <span class="cbox cbox1">{}</span> <span class="cbox cbox2">HD</span> </div><p class="small-synop">{}</p><div id="watchh"> <a href="{}" class="watch-btn"> <i class="fa fa-play-circle"></i> Watch Now </a> <a href="{}" class="watch-btn watch-btn2"> <i class="fa fa-info-circle"></i> Details<i class="fa fa-angle-right"></i> </a> </div></div><div class="shado"> <a href="{}"></a> </div><img src="{}"> </div>"""
def slider_gen():
data = Anilist().trending()
random.shuffle(data)
html = ""
pos = 1
for i in data:
img = i.get("bannerImage")
if not img:
img = (
i.get("coverImage")
.get("medium")
.replace("small", "large")
.replace("medium", "large")
)
title = get_atitle(i.get("title"))
url = get_urls(title)
temp = SLIDER_HTML.format(
f"#{pos} Spotlight",
title,
i.get("type"),
i.get("status"),
get_genre(i.get("genres")),
i.get("description"),
url.replace("/anime/", "/episode/") + "/1",
url,
url,
img,
)
html += temp
pos += 1
return html
def episodeHtml(episode, title, dl=True):
isSub = episode.get("SUB")
isDub = episode.get("DUB")
DL = episode.get("DL")
sub = dub = dlsub = dldub = ""
defa = 0
s, d = 1, 1
if isSub:
for i in isSub:
if defa == 0:
defa = f"/embed?url={i}&title={title}"
sub += f"""<div class="sitem"> <a class="sobtn sactive" onclick="selectServer(this)" data-value="/embed?url={i}&title={title}">Server{s}</a> </div>"""
else:
sub += f"""<div class="sitem"> <a class="sobtn" onclick="selectServer(this)" data-value="/embed?url={i}&title={title}">Server{s}</a> </div>"""
s += 1
if isDub:
for i in isDub:
if defa == 0:
defa = f"/embed?url={i}&title={title}"
dub += f"""<div class="sitem"> <a class="sobtn sactive" onclick="selectServer(this)" data-value="/embed?url={i}&title={title}">Server{d}</a> </div>"""
else:
dub += f"""<div class="sitem"> <a class="sobtn" onclick="selectServer(this)" data-value="/embed?url={i}&title={title}">Server{d}</a> </div>"""
d += 1
if DL:
link = DL.get("SUB")
if link:
for n, l in link.items():
dlsub += f"""<div class="sitem"> <a class="sobtn download" target="_blank" href="{l}"><i class="fa fa-download"></i>{n}</a> </div>"""
link = DL.get("DUB")
if link:
for n, l in link.items():
dldub += f"""<div class="sitem"> <a class="sobtn download" target="_blank" href="{l}"><i class="fa fa-download"></i>{n}</a> </div>"""
if sub != "":
t4 = f"""<div class="server"> <div class="stitle"> <i class="fa fa-closed-captioning"></i>SUB: </div><div class="slist">{sub}</div></div>"""
else:
t4 = ""
if dub != "":
t5 = f""" <div class="server sd"> <div class="stitle"> <i class="fa fa-microphone-alt"></i>DUB: </div><div class="slist">{dub}</div></div>"""
else:
t5 = ""
if dlsub != "":
t6 = f""" <div class="server"> <div class="stitle"> <i class="fa fa-closed-captioning"></i>SUB: </div><div class="slist">{dlsub}</div></div>"""
else:
t6 = ""
if dldub != "":
t7 = f""" <div class="server sd"> <div class="stitle"> <i class="fa fa-microphone-alt"></i>DUB: </div><div class="slist">{dldub}</div></div>"""
else:
t7 = ""
t8 = f"""<a id="showdl" onclick="showDownload()"><i class="fa fa-download"></i>Download</a><div id="dldiv" class="dldiv"><h4 id="download">Download Links:</h4>{t6}{t7}</div>"""
html = t4 + t5
if dl:
html += t8
return html, defa
| TechShreyash/AnimeDex | utils/html_gen.py | html_gen.py | py | 10,463 | python | en | code | 186 | github-code | 6 | [
{
"api_name": "utils.techzapi.TechZApi",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "utils.others.get_atitle",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "utils.others.get_urls",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": ... |
16021983077 | import numpy as np
import matplotlib.pyplot as plt
from cartoplot import cartoplot
import imageio
from netCDF4 import Dataset
import pickle
def get(string):
"""
"Lakes":0,
"Oceans":1,
"Okhotsk":2,
"Bering":3,
"Hudson":4,
"St Lawrence":5,
"Baffin":6,
"Greenland":7,
"Barents":8,
"Kara":9,
"Laptev":10,
"East Siberian":11,
"Chukchi":12,
"Beaufort":13,
"Canadian Archipelago":14,
"Central Arctic":15,
"Land":20,
"Coast":21}
"""
path_grid = "/home/robbie/Dropbox/Data/grid.nc"
if string == 'lon':
grid_data = Dataset(path_grid)
lon = np.array(grid_data.variables["lon"])
return(lon)
elif string == 'lat':
grid_data = Dataset(path_grid)
lat = np.array(grid_data.variables["lat"])
return(lat)
elif string == 'mask':
im = imageio.imread('J_Mask.tif')
mask = np.flipud(np.array(im))
return(mask)
def EASE():
"""
"Lakes":0,
"Oceans":1,
"Okhotsk":2,
"Bering":3,
"Hudson":4,
"St Lawrence":5,
"Baffin":6,
"Greenland":7,
"Barents":8,
"Kara":9,
"Laptev":10,
"East Siberian":11,
"Chukchi":12,
"Beaufort":13,
"Canadian Archipelago":14,
"Central Arctic":15,
"Land":20,
"Coast":21}
"""
mask = pickle.load( open( "/home/robbie/Dropbox/Code/mask_348x348.p", "rb" ) )
return(mask)
def OSISAF():
"""
"Lakes":0,
"Oceans":1,
"Okhotsk":2,
"Bering":3,
"Hudson":4,
"St Lawrence":5,
"Baffin":6,
"Greenland":7,
"Barents":8,
"Kara":9,
"Laptev":10,
"East Siberian":11,
"Chukchi":12,
"Beaufort":13,
"Canadian Archipelago":14,
"Central Arctic":15,
"Land":20,
"Coast":21}
"""
mask = pickle.load( open( "/home/robbie/custom_modules/mask_1120x760.p", "rb" ) )
return(mask)
def plot(region_string):
regions_dict = {"Lakes":0,
"Oceans":1,
"Okhotsk":2,
"Bering":3,
"Hudson":4,
"St Lawrence":5,
"Baffin":6,
"Greenland":7,
"Barents":8,
"Kara":9,
"Laptev":10,
"East Siberian":11,
"Chukchi":12,
"Beaufort":13,
"Canadian Archipelago":14,
"Central Arctic":15,
"Land":20,
"Coast":21}
code = regions_dict[region_string]
fig = plt.figure(figsize=(10, 8))
cartoplot(get('lon'), get('lat'), get('mask'),color_scale=(code+1,code-1))
print(code)
plt.show()
| robbiemallett/custom_modules | mask.py | mask.py | py | 3,596 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "netCDF4.Dataset",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_n... |
19240250728 | import tensorflow as tf
import tensorflow_datasets as tfds
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
def load_celeba_dataset(args, shuffle_files=False, batch_size=128):
ds_train, ds_test = tfds.load(name='celeb_a', split=['train', 'test'], data_dir=args.data_dir,
batch_size=batch_size, download=True, shuffle_files=shuffle_files)
return ds_train, ds_test
| UCSC-REAL/fair-eval | celeba/experiments/data.py | data.py | py | 452 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "tensorflow.compat.v1.ConfigProto",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.Session",
"line_number": 7,
"usage_type": "call"
},
{
... |
2893376277 | # -*- encoding: UTF-8 -*-
from django.http import Http404
from django.db.models.loading import get_model
from django.contrib.staticfiles.storage import staticfiles_storage
from django.contrib.admin.views.decorators import staff_member_required
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.forms.widgets import Select
import models
def detail(request, app, cls, slug):
""" generic view that return direct CMS model rendered content """
model = get_model(app, cls)
if model and issubclass(model, models.CMSModel):
return model.get_response(request, slug)
raise Http404
@staff_member_required
def imagechooser(request, app, cls):
model = get_model(app, cls)
datas = {
'tinymce_path': staticfiles_storage.url('tiny_mce'),
'chosen_path': staticfiles_storage.url('chosen'),
'admin_path': staticfiles_storage.url('admin')
}
if model and issubclass(model, models.CMSModel):
if getattr(model.CMSMeta, 'image_model', None):
images = [('', '----')]
for fileitem in model.CMSMeta.image_model.objects.all().order_by('title'):
if fileitem.file.name.lower().endswith(('.jpg', '.jpeg', '.gif', '.png')):
images.append((fileitem.get_absolute_url(), fileitem.title))
datas['select_files'] = Select(choices=images, attrs={'class': 'chosen-single', 'style': 'width:200px'}).render('file', '')
#datas['form_upload'] = None
# gestion upload if any
# send result back
return render(request, 'imagechooser.html', datas)
@staff_member_required
def tinymcejs(request, app, cls):
datas = {
'tinymce_path': staticfiles_storage.url('tiny_mce'),
'imagechooser_path': reverse('picocms-imagechooser', args=(app, cls))
}
return render(request, 'tiny_mce_src.js', datas, content_type='application/javascript')
| revolunet/django-picocms | picocms/views.py | views.py | py | 1,926 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "django.db.models.loading.get_model",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.CMSModel",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "django.http.Http404",
"line_number": 17,
"usage_type": "name"
},
{
"api_n... |
8352755533 | from __future__ import absolute_import, unicode_literals
import base64
import json
import random
import warnings
import websocket
from c8 import constants
from c8.api import APIWrapper
from c8.apikeys import APIKeys
from c8.c8ql import C8QL
from c8.collection import StandardCollection
from c8.exceptions import (
CollectionCreateError,
CollectionDeleteError,
CollectionFindError,
CollectionListError,
CollectionPropertiesError,
EventCreateError,
EventGetError,
FabricCreateError,
FabricDeleteError,
FabricGetMetadataError,
FabricListError,
FabricPropertiesError,
FabricSetMetadataError,
FabricUpdateMetadataError,
GetAPIKeys,
GetDcDetailError,
GetDcListError,
GetLocalDcError,
GraphCreateError,
GraphDeleteError,
GraphListError,
RestqlCreateError,
RestqlCursorError,
RestqlDeleteError,
RestqlExecuteError,
RestqlImportError,
RestqlListError,
RestqlUpdateError,
RestqlValidationError,
ServerConnectionError,
ServerVersionError,
SpotRegionAssignError,
SpotRegionUpdateError,
StreamAppGetSampleError,
StreamCommunicationError,
StreamConnectionError,
StreamCreateError,
StreamDeleteError,
StreamListError,
StreamPermissionError,
)
from c8.executor import AsyncExecutor, BatchExecutor, DefaultExecutor
from c8.graph import Graph
from c8.keyvalue import KV
from c8.request import Request
from c8.search import Search
from c8.stream_apps import StreamApps
from c8.stream_collection import StreamCollection
__all__ = [
"StandardFabric",
"AsyncFabric",
"BatchFabric",
]
ENDPOINT = "/streams"
def raise_timeout(signum, frame):
raise TimeoutError
class Fabric(APIWrapper):
"""Base class for Fabric API wrappers.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
:param executor: API executor.
:type executor: c8.executor.Executor
"""
def enum(**enums):
return type("Enum", (), enums)
SPOT_CREATION_TYPES = enum(
AUTOMATIC="automatic", NONE="none", SPOT_REGION="spot_region"
)
def __init__(self, connection, executor):
self.url = connection.url
self.header = connection.headers
self.stream_port = constants.STREAM_PORT
super(Fabric, self).__init__(connection, executor)
def __getitem__(self, name):
"""Return the collection API wrapper.
:param name: Collection name.
:type name: str | unicode
:returns: Collection API wrapper.
:rtype: c8.collection.StandardCollection
"""
return self.collection(name)
@property
def name(self):
"""Return fabric name.
:returns: Fabric name.
:rtype: str | unicode
"""
return self.fabric_name
@property
def c8ql(self):
"""Return C8QL (C8Db Query Language) API wrapper.
:returns: C8QL API wrapper.
:rtype: c8.c8ql.C8QL
"""
return C8QL(self._conn, self._executor)
@property
def key_value(self):
"""Return KV (Key Value) API wrapper.
:returns: KV API wrapper.
:rtype: c8.keyvalue.KV
"""
return KV(self._conn, self._executor)
def on_change(self, collection, callback, timeout=60):
"""Execute given input function on receiving a change.
:param collection: Collection name(s) regex to listen for
:type collection: str
:param timeout: timeout value
:type timeout: int
:param callback: Function to execute on a change
:type callback: function
"""
if not callback:
raise ValueError("You must specify a callback function")
if not collection:
raise ValueError(
"You must specify a collection on which realtime "
"data is to be watched!"
)
namespace = constants.STREAM_LOCAL_NS_PREFIX + self.fabric_name
subscription_name = "%s-%s-subscription-%s" % (
self.tenant_name,
self.fabric_name,
str(random.randint(1, 1000)),
)
url = self.url.split("//")[1].split(":")[0]
topic = "wss://{}/_ws/ws/v2/consumer/persistent/{}/{}/{}/{}".format(
url, self.tenant_name, namespace, collection, subscription_name
)
ws = websocket.create_connection(topic, header=self.header, timeout=timeout)
try:
# "pyC8 Realtime: Begin monitoring realtime updates for " + topic
while True:
msg = json.loads(ws.recv())
data = base64.b64decode(msg["payload"])
ws.send(json.dumps({"messageId": msg["messageId"]}))
callback(data)
except websocket.WebSocketTimeoutException:
pass
except Exception as e:
raise Exception(e)
finally:
ws.close()
def properties(self):
"""Return fabric properties.
:returns: Fabric properties.
:rtype: dict
:raise c8.exceptions.FabricPropertiesError: If retrieval fails.
"""
request = Request(
method="get",
endpoint="/database/current",
)
def response_handler(resp):
if not resp.is_success:
raise FabricPropertiesError(resp, request)
result = resp.body["result"]
result["system"] = result.pop("isSystem")
return result
return self._execute(request, response_handler)
def update_spot_region(self, tenant, fabric, new_dc):
"""Updates spot primary region for the geo-fabric
:param tenant: tenant name
:type tenant: str
:param fabric: fabric name
:type fabric: str
:param new_dc: New spot region
:type new_dc: str
:returns: True if request successful,false otherwise
:rtype: bool
:raise c8.exceptions.SpotRegionUpdateError: If updation fails.
"""
request = Request(
method="put", endpoint="/_fabric/{}/database/{}".format(fabric, new_dc)
)
def response_handler(resp):
if not resp.is_success:
raise SpotRegionUpdateError(resp, request)
return True
return self._execute(request, response_handler)
def fabrics_detail(self):
request = Request(method="get", endpoint="/database/user")
def response_handler(resp):
if not resp.is_success:
raise FabricListError(resp, request)
return [
{"name": col["name"], "options": col["options"]}
for col in map(dict, resp.body["result"])
]
return self._execute(request, response_handler)
def version(self):
"""Return C8Db server version.
:returns: Server version.
:rtype: str | unicode
:raise c8.exceptions.ServerVersionError: If retrieval fails.
"""
request = Request(method="get", endpoint="/version", params={"details": False})
def response_handler(resp):
if not resp.is_success:
raise ServerVersionError(resp, request)
return resp.body["version"]
return self._execute(request, response_handler)
def ping(self):
"""Ping the C8Db server by sending a test request.
:returns: Response code from server.
:rtype: int
:raise c8.exceptions.ServerConnectionError: If ping fails.
"""
request = Request(
method="get",
endpoint="/collection",
)
def response_handler(resp):
code = resp.status_code
if code in {401, 403}:
raise ServerConnectionError("bad username and/or password")
if not resp.is_success:
raise ServerConnectionError(resp.error_message or "bad server response")
return code
return self._execute(request, response_handler)
#########################
# Datacenter Management #
#########################
def dclist(self, detail=False):
"""Return the list of names of Datacenters
:param detail: detail list of DCs if set to true else only DC names
:type: boolean
:returns: DC List.
:rtype: [str | unicode ]
:raise c8.exceptions.GetDcListError: If retrieval fails.
"""
properties = self.properties()
if not detail:
return properties["options"]["dcList"].split(",")
tenant_name = properties["options"]["tenant"]
request = Request(
method="get", endpoint="/datacenter/_tenant/{}".format(tenant_name)
)
def response_handler(resp):
if not resp.is_success:
raise GetDcListError(resp, request)
dc_list = resp.body[0]["dcInfo"]
for dc in dc_list:
if dc["name"] not in properties["options"]["dcList"]:
dc_list.remove(dc)
return dc_list
return self._execute(request, response_handler, custom_prefix="")
def localdc(self, detail=True):
"""Fetch data for a local/regional the data center
:param detail: Details of local DC if set to true else only DC name.
:type: boolean
:returns: Local DC details.
:rtype: str | dict
:raise c8.exceptions.GetLocalDcError: If retrieval fails.
"""
request = Request(method="get", endpoint="/datacenter/local")
def response_handler(resp):
if not resp.is_success:
raise GetLocalDcError(resp, request)
if detail:
return resp.body
return resp.body["name"]
return self._execute(request, response_handler, custom_prefix="")
def get_dc_detail(self, dc):
"""Fetch data for data center, identified by dc-name
:param dc: DC name
:type: str
:returns: DC details.
:rtype: dict
:raise c8.exceptions.GetDcDetailError: If retrieval fails.
"""
request = Request(method="get", endpoint="/datacenter/{}".format(dc))
def response_handler(resp):
if not resp.is_success:
raise GetDcDetailError(resp, request)
return resp.body
return self._execute(request, response_handler, custom_prefix="")
def dclist_all(self):
"""Fetch data about all the data centers
:returns: DC List.
:rtype: [str | unicode ]
:raise c8.exceptions.GetDcListError: If retrieval fails.
"""
request = Request(method="get", endpoint="/datacenter/all")
def response_handler(resp):
if not resp.is_success:
raise GetDcListError(resp, request)
return resp.body
return self._execute(request, response_handler, custom_prefix="")
def assign_dc_spot(self, dc, spot_region=False):
"""Assigns spot region of a fed
:param dc: dc name
:type dc: str
:param spot_region: If True, makes the region a spot region
:type spot_region: bool
:returns: True if request successful, False otherwise
:rtype: bool
:raise c8.exceptions.SpotRegionAssignError: If assignment fails.
"""
data = json.dumps(spot_region)
request = Request(method="put", endpoint="/datacenter/{}/{}".format(dc, data))
def response_handler(resp):
if not resp.is_success:
raise SpotRegionAssignError(resp, request)
return True
return self._execute(request, response_handler, custom_prefix="")
#######################
# Fabric Management #
#######################
def fabrics(self):
"""Return the names all fabrics.
:returns: Fabric names.
:rtype: [str | unicode]
:raise c8.exceptions.FabricListError: If retrieval fails.
"""
request = Request(method="get", endpoint="/database")
def response_handler(resp):
if not resp.is_success:
raise FabricListError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def has_fabric(self, name):
"""Check if a fabric exists.
:param name: Fabric name.
:type name: str | unicode
:returns: True if fabric exists, False otherwise.
:rtype: bool
"""
return name in self.fabrics()
def create_fabric(
self,
name,
spot_dc=None,
users=None,
dclist=None,
spot_creation_type=SPOT_CREATION_TYPES.AUTOMATIC,
):
"""Create a new fabric.
:param name: Fabric name.
:type name: str | unicode
:param spot_creation_type: Specifying the mode of creating geo-fabric.
If you use AUTOMATIC, a random spot region
will be assigned by the system. If you
specify NONE, a geo-fabric is created
without the spot properties. If you specify
SPOT_REGION,pass the corresponding spot
region in the spot_dc parameter.
:type name: Enum containing spot region creation types
:param name: Spot Region name, if spot_creation_type is set to
SPOT_REGION
:type name: str
:param users: List of users with access to the new fabric
:type users: [str | unicode]
:param dclist: list of strings of datacenters
:type dclist: [str | unicode]
:returns: True if fabric was created successfully.
:rtype: bool
:raise c8.exceptions.FabricCreateError: If create fails.
"""
data = {"name": name}
if users is not None:
data["users"] = users
options = {}
dcl = ""
if dclist:
# Process dclist param (type list) to build up comma-separated
# string of DCs
for dc in dclist:
if len(dcl) > 0:
dcl += ","
dcl += dc
options["dcList"] = dcl
if spot_creation_type == self.SPOT_CREATION_TYPES.NONE:
options["spotDc"] = ""
elif spot_creation_type == self.SPOT_CREATION_TYPES.SPOT_REGION and spot_dc:
options["spotDc"] = spot_dc
data["options"] = options
request = Request(method="post", endpoint="/database", data=data)
def response_handler(resp):
if not resp.is_success:
raise FabricCreateError(resp, request)
return True
return self._execute(request, response_handler)
def get_fabric_metadata(self):
"""Fetch information about a GeoFabric.
:returns: Fabric information.
:rtype: dict
:raise c8.exceptions.FabricGetMetadataError: If retrieval fails.
"""
request = Request(method="get", endpoint="/database/metadata")
def response_handler(resp):
if not resp.is_success:
raise FabricGetMetadataError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def set_fabric_metadata(self, metadata):
"""Set the GeoFabric Metadata.
:param metadata: Fabric metadata.
:type metadata: dict
:returns: True if metadata was set successfully.
:rtype: bool
:raise c8.exceptions.FabricSetMetadataError: If set fails.
"""
data = {"metadata": metadata}
request = Request(method="put", endpoint="/database/metadata", data=data)
def response_handler(resp):
if not resp.is_success:
raise FabricSetMetadataError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def update_fabric_metadata(self, metadata):
"""Modfiy the GeoFabric metadata.
:param metadata: Fabric metadata.
:type metadata: dict
:returns: True if metadata was set successfully.
:rtype: bool
:raise c8.exceptions.FabricUpdateMetadataError: If update fails.
"""
data = {"metadata": metadata}
request = Request(method="patch", endpoint="/database/metadata", data=data)
def response_handler(resp):
if not resp.is_success:
raise FabricUpdateMetadataError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def delete_fabric(self, name, ignore_missing=False):
"""Delete the fabric.
:param name: Fabric name.
:type name: str | unicode
:param ignore_missing: Do not raise an exception on missing fabric.
:type ignore_missing: bool
:returns: True if fabric was deleted successfully, False if fabric
was not found and **ignore_missing** was set to True.
:rtype: bool
:raise c8.exceptions.FabricDeleteError: If delete fails.
"""
request = Request(method="delete", endpoint="/database/{}".format(name))
def response_handler(resp):
if resp.error_code == 1228 and ignore_missing:
return False
if not resp.is_success:
raise FabricDeleteError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
#########################
# Collection Management #
#########################
def collection(self, name):
"""Return the standard collection API wrapper.
:param name: Collection name.
:type name: str | unicode
:returns: Standard collection API wrapper.
:rtype: c8.collection.StandardCollection
"""
if self.has_collection(name):
return StandardCollection(self._conn, self._executor, name)
else:
raise CollectionFindError("Collection not found")
def has_collection(self, name):
"""Check if collection exists in the fabric.
:param name: Collection name.
:type name: str | unicode
:returns: True if collection exists, False otherwise.
:rtype: bool
"""
return any(col["name"] == name for col in self.collections())
def collections(self, collectionModel=None):
"""Return the collections in the fabric.
:returns: Collections in the fabric and their details.
:rtype: [dict]
:raise c8.exceptions.CollectionListError: If retrieval fails.
"""
request = Request(method="get", endpoint="/collection")
def response_handler(resp):
if not resp.is_success:
raise CollectionListError(resp, request)
if collectionModel is not None:
docs = [
col
for col in map(dict, resp.body["result"])
if col["collectionModel"] == collectionModel
]
else:
docs = [col for col in map(dict, resp.body["result"])]
collections = []
for col in docs:
c = {
"id": col["id"],
"name": col["name"],
"system": col["isSystem"],
"type": StandardCollection.types[col["type"]],
"status": StandardCollection.statuses[col["status"]],
"collectionModel": col["collectionModel"],
}
if "isSpot" in col.keys():
c["isSpot"] = col["isSpot"]
else:
c["strongConsistency"] = col.get("strongConsistency", False)
collections.append(c)
return collections
return self._execute(request, response_handler)
def create_collection(
self,
name,
sync=False,
edge=False,
user_keys=True,
key_increment=None,
key_offset=None,
key_generator="traditional",
shard_fields=None,
index_bucket_count=None,
sync_replication=None,
enforce_replication_factor=None,
strong_consistency=None,
local_collection=False,
is_system=False,
stream=False,
*,
spot_collection=None,
):
"""Create a new collection.
:param name: Collection name.
:type name: str | unicode
:param sync: If set to True, document operations via the collection
will block until synchronized to disk by default.
:type sync: bool
:param edge: If set to True, an edge collection is created.
:type edge: bool
:param key_generator: Used for generating document keys. Allowed values
are "traditional" or "autoincrement".
:type key_generator: str | unicode
:param user_keys: If set to True, users are allowed to supply document
keys. If set to False, the key generator is solely responsible for
supplying the key values.
:type user_keys: bool
:param key_increment: Key increment value. Applies only when value of
**key_generator** is set to "autoincrement".
:type key_increment: int
:param key_offset: Key offset value. Applies only when value of
**key_generator** is set to "autoincrement".
:type key_offset: int
:param shard_fields: Field(s) used to determine the target shard.
:type shard_fields: [str | unicode]
:param index_bucket_count: Number of buckets into which indexes using
hash tables are split. The default is 16, and this number has to be
a power of 2 and less than or equal to 1024. For large collections,
one should increase this to avoid long pauses when the hash table
has to be initially built or re-sized, since buckets are re-sized
individually and can be initially built in parallel. For instance,
64 may be a sensible value for 100 million documents.
:type index_bucket_count: int
:param sync_replication: If set to True, server reports success only
when collection is created in all replicas. You can set this to
False for faster server response, and if full replication is not a
concern.
:type sync_replication: bool
:param enforce_replication_factor: Check if there are enough replicas
available at creation time, or halt the operation.
:type enforce_replication_factor: bool
:param strong_consistency: If True, strong consistency is enabled
:type strong_consistency: bool
:param is_system: If True, able to create system collections
:type is_system: bool
:param stream: If True, create a local stream for collection.
:type stream: bool
:param spot_collection: If True, it is a spot collection.
Deprecated. Use If strong_consistency instead.
:type spot_collection: bool
:returns: Standard collection API wrapper.
:rtype: c8.collection.StandardCollection
:raise c8.exceptions.CollectionCreateError: If create fails.
"""
# Newer versions of GDN has renamed the isSpot property to strongConsistency.
# We are using a keyword only argument to keep the backward compatibility of the SDK
if spot_collection is not None and strong_consistency is not None:
raise TypeError("create_collection recieved both spot_collection and strong_consistency")
elif spot_collection is not None:
warnings.simplefilter("once", DeprecationWarning)
warnings.warn("spot_collection is deprecated. Use strong_consistency instead.", DeprecationWarning, 2)
warnings.simplefilter("default", DeprecationWarning)
isSpot = spot_collection
elif strong_consistency is not None:
isSpot = strong_consistency
else:
isSpot = False
key_options = {"type": key_generator, "allowUserKeys": user_keys}
if key_increment is not None:
key_options["increment"] = key_increment
if key_offset is not None:
key_options["offset"] = key_offset
if spot_collection and local_collection:
return "Collection can either be spot or local"
else:
# Both strong_consistency and isSpot is added to the request body.
# Correct value will be picked by the GDN depending on the supported property.
data = {
"name": name,
"waitForSync": sync,
"keyOptions": key_options,
"type": 3 if edge else 2,
"isSpot": isSpot,
"strongConsistency": isSpot,
"isLocal": local_collection,
"isSystem": is_system,
"stream": stream,
}
if shard_fields is not None:
data["shardKeys"] = shard_fields
if index_bucket_count is not None:
data["indexBuckets"] = index_bucket_count
params = {}
if sync_replication is not None:
params["waitForSyncReplication"] = sync_replication
if enforce_replication_factor is not None:
params["enforceReplicationFactor"] = enforce_replication_factor
request = Request(
method="post", endpoint="/collection", params=params, data=data
)
def response_handler(resp):
if resp.is_success:
return self.collection(name)
raise CollectionCreateError(resp, request)
return self._execute(request, response_handler)
def update_collection_properties(
self, collection_name, has_stream=None, wait_for_sync=None
):
"""Changes the properties of a collection.
Note: except for waitForSync and hasStream, collection properties cannot be changed once a collection is created.
:param collection_name: Collection name.
:type collection_name: str | unicode
:param has_stream: True if creating a live collection stream.
:type has_stream: bool
:param wait_for_sync: True if all data must be synced to storage before operation returns.
:type wait_for_sync: bool
"""
data = {}
if has_stream is not None:
data["hasStream"] = has_stream
if wait_for_sync is not None:
data["waitForSync"] = wait_for_sync
request = Request(
method="put",
endpoint="/collection/{}/properties".format(collection_name),
data=data,
)
def response_handler(resp):
if resp.is_success:
return resp.body
raise CollectionPropertiesError(resp, request)
return self._execute(request, response_handler)
def delete_collection(self, name, ignore_missing=False, system=None):
"""Delete the collection.
:param name: Collection name.
:type name: str | unicode
:param ignore_missing: Do not raise an exception on missing collection.
:type ignore_missing: bool
:param system: Whether the collection is a system collection.
:type system: bool
:returns: True if collection was deleted successfully, False if
collection was not found and **ignore_missing** was set to True.
:rtype: bool
:raise c8.exceptions.CollectionDeleteError: If delete fails.
"""
params = {}
if system is not None:
params["isSystem"] = system
request = Request(
method="delete", endpoint="/collection/{}".format(name), params=params
)
def response_handler(resp):
if resp.error_code == 1203 and ignore_missing:
return False
if not resp.is_success:
raise CollectionDeleteError(resp, request)
return True
return self._execute(request, response_handler)
####################
# Graph Management #
####################
def graph(self, name):
"""Return the graph API wrapper.
:param name: Graph name.
:type name: str | unicode
:returns: Graph API wrapper.
:rtype: c8.graph.Graph
"""
return Graph(self._conn, self._executor, name)
def has_graph(self, name):
"""Check if a graph exists in the fabric.
:param name: Graph name.
:type name: str | unicode
:returns: True if graph exists, False otherwise.
:rtype: bool
"""
for graph in self.graphs():
if graph["name"] == name:
return True
return False
def graphs(self):
"""List all graphs in the fabric.
:returns: Graphs in the fabric.
:rtype: [dict]
:raise c8.exceptions.GraphListError: If retrieval fails.
"""
request = Request(method="get", endpoint="/graph")
def response_handler(resp):
if not resp.is_success:
raise GraphListError(resp, request)
return [
{
"id": body["_id"],
"name": body["_key"],
"revision": body["_rev"],
"orphan_collections": body["orphanCollections"],
"edge_definitions": [
{
"edge_collection": definition["collection"],
"from_vertex_collections": definition["from"],
"to_vertex_collections": definition["to"],
}
for definition in body["edgeDefinitions"]
],
"shard_count": body.get("numberOfShards"),
"replication_factor": body.get("replicationFactor"),
}
for body in resp.body["graphs"]
]
return self._execute(request, response_handler)
def create_graph(
self, name, edge_definitions=None, orphan_collections=None, shard_count=None
):
"""Create a new graph.
:param name: Graph name.
:type name: str | unicode
:param edge_definitions: List of edge definitions, where each edge
definition entry is a dictionary with fields "edge_collection",
"from_vertex_collections" and "to_vertex_collections" (see below
for example).
:type edge_definitions: [dict]
:param orphan_collections: Names of additional vertex collections that
are not in edge definitions.
:type orphan_collections: [str | unicode]
:param shard_count: Number of shards used for every collection in the
graph. To use this, parameter **smart** must be set to True and
every vertex in the graph must have the smart field. This number
cannot be modified later once set. Applies only to enterprise
version of C8Db.
:type shard_count: int
:returns: Graph API wrapper.
:rtype: c8.graph.Graph
:raise c8.exceptions.GraphCreateError: If create fails.
Here is an example entry for parameter **edge_definitions**:
.. code-block:: python
{
'edge_collection': 'teach',
'from_vertex_collections': ['teachers'],
'to_vertex_collections': ['lectures']
}
"""
data = {"name": name}
if edge_definitions is not None:
data["edgeDefinitions"] = [
{
"collection": definition["edge_collection"],
"from": definition["from_vertex_collections"],
"to": definition["to_vertex_collections"],
}
for definition in edge_definitions
]
if orphan_collections is not None:
data["orphanCollections"] = orphan_collections
if shard_count is not None: # pragma: no cover
data["numberOfShards"] = shard_count
request = Request(method="post", endpoint="/graph", data=data)
def response_handler(resp):
if resp.is_success:
return Graph(self._conn, self._executor, name)
raise GraphCreateError(resp, request)
return self._execute(request, response_handler)
def delete_graph(self, name, ignore_missing=False, drop_collections=None):
"""Drop the graph of the given name from the fabric.
:param name: Graph name.
:type name: str | unicode
:param ignore_missing: Do not raise an exception on missing graph.
:type ignore_missing: bool
:param drop_collections: Drop the collections of the graph also. This
is only if they are not in use by other graphs.
:type drop_collections: bool
:returns: True if graph was deleted successfully, False if graph was not
found and **ignore_missing** was set to True.
:rtype: bool
:raise c8.exceptions.GraphDeleteError: If delete fails.
"""
params = {}
if drop_collections is not None:
params["dropCollections"] = drop_collections
request = Request(
method="delete", endpoint="/graph/{}".format(name), params=params
)
def response_handler(resp):
if resp.error_code == 1924 and ignore_missing:
return False
if not resp.is_success:
raise GraphDeleteError(resp, request)
return True
return self._execute(request, response_handler)
########################
# Async Job Management #
########################
# Pratik: APIs not supported in documentation. Waiting for verification
# def async_jobs(self, status, count=None):
# """Return IDs of async jobs with given status.
#
# :param status: Job status (e.g. "pending", "done").
# :type status: str | unicode
# :param count: Max number of job IDs to return.
# :type count: int
# :returns: List of job IDs.
# :rtype: [str | unicode]
# :raise c8.exceptions.AsyncJobListError: If retrieval fails.
# """
# params = {}
# if count is not None:
# params['count'] = count
#
# request = Request(
# method='get',
# endpoint='/job/{}'.format(status),
# params=params
# )
#
# def response_handler(resp):
# if resp.is_success:
# return resp.body
# raise AsyncJobListError(resp, request)
#
# return self._execute(request, response_handler)
#
# def clear_async_jobs(self, threshold=None):
# """Clear async job results from the server.
#
# Async jobs that are still queued or running are not stopped.
#
# :param threshold: If specified, only the job results created prior to
# the threshold (a unix timestamp) are deleted. Otherwise, all job
# results are deleted.
# :type threshold: int
# :returns: True if job results were cleared successfully.
# :rtype: bool
# :raise c8.exceptions.AsyncJobClearError: If operation fails.
# """
# if threshold is None:
# url = '/job/all'
# params = None
# else:
# url = '/job/expired'
# params = {'stamp': threshold}
#
# request = Request(
# method='delete',
# endpoint=url,
# params=params
# )
#
# def response_handler(resp):
# if resp.is_success:
# return True
# raise AsyncJobClearError(resp, request)
#
# return self._execute(request, response_handler)
########################
# Streams Management #
########################
def stream(self, operation_timeout_seconds=30):
"""Return the stream collection API wrapper.
:returns: stream collection API wrapper.
:rtype: c8.stream_collection.StreamCollection
"""
return StreamCollection(
self,
self._conn,
self._executor,
self.url,
self.stream_port,
operation_timeout_seconds,
)
def streams(self, local=None):
"""Get list of all streams under given fabric
:returns: List of streams under given fabric.
:rtype: json
:raise c8.exceptions.StreamListError: If retrieving streams fails.
"""
if local is False:
url_endpoint = "/streams?global=true"
elif local is True:
url_endpoint = "/streams?global=false"
elif local is None:
url_endpoint = "/streams"
request = Request(method="get", endpoint=url_endpoint)
def response_handler(resp):
code = resp.status_code
if resp.is_success:
return [
{
"name": col["topic"],
"topic": col["topic"],
"local": col["local"],
"db": col["db"],
"tenant": col["tenant"],
"type": StreamCollection.types[col["type"]],
"status": "terminated"
if "terminated" in col
else "active", # noqa
}
for col in map(dict, resp.body["result"])
]
elif code == 403:
raise StreamPermissionError(resp, request)
raise StreamListError(resp, request)
return self._execute(request, response_handler)
def has_stream(self, stream, isCollectionStream=False, local=False):
"""Check if the list of streams has a stream with the given name.
:param stream: The name of the stream for which to check in the list
of all streams.
:type stream: str | unicode
:returns: True=stream found; False=stream not found.
:rtype: bool
"""
if isCollectionStream is False:
if local is False and "c8globals" not in stream:
stream = "c8globals." + stream
elif local is True and "c8locals" not in stream:
stream = "c8locals." + stream
return any(mystream["name"] == stream for mystream in self.streams(local=local))
def create_stream(self, stream, local=False):
"""
Create the stream under the given fabric
:param stream: name of stream
:param local: Operate on a local stream instead of a global one.
:returns: 200, OK if operation successful
:raise: c8.exceptions.StreamCreateError: If creating streams fails.
"""
if local is True:
endpoint = "{}/{}?global=False".format(ENDPOINT, stream)
elif local is False:
endpoint = "{}/{}?global=True".format(ENDPOINT, stream)
request = Request(method="post", endpoint=endpoint)
def response_handler(resp):
code = resp.status_code
if resp.is_success:
return resp.body["result"]
elif code == 502:
raise StreamCommunicationError(resp, request)
raise StreamCreateError(resp, request)
return self._execute(request, response_handler)
def delete_stream(self, stream, force=False):
"""
Delete the streams under the given fabric
:param stream: name of stream
:param force:
:returns: 200, OK if operation successful
:raise: c8.exceptions.StreamDeleteError: If deleting streams fails.
"""
endpoint = f"{ENDPOINT}/{stream}"
if force:
endpoint = endpoint + "?force=true"
request = Request(method="delete", endpoint=endpoint)
def response_handler(resp):
code = resp.status_code
if resp.is_success:
return True
elif code == 403:
raise StreamPermissionError(resp, request)
elif code == 412:
raise StreamDeleteError(resp, request)
raise StreamConnectionError(resp, request)
return self._execute(request, response_handler)
#####################
# Restql Management #
#####################
def save_restql(self, data):
"""Save restql by name.
:param data: data to be used for restql POST API
:type data: dict
:returns: Results of restql API
:rtype: dict
:raise c8.exceptions.RestqlCreateError: if restql operation failed
"""
query_name = data["query"]["name"]
if " " in query_name:
raise RestqlValidationError("White Spaces not allowed in Query " "Name")
request = Request(method="post", endpoint="/restql", data=data)
def response_handler(resp):
if not resp.is_success:
raise RestqlCreateError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def import_restql(self, queries, details=False):
"""Import custom queries.
:param queries: queries to be imported
:type queries: [dict]
:param details: Whether to include details
:type details: bool
:returns: Results of importing restql
:rtype: dict
:raise c8.exceptions.RestqlImportError: if restql operation failed
"""
data = {"queries": queries, "details": details}
request = Request(method="post", endpoint="/restql/import", data=data)
def response_handler(resp):
if not resp.is_success:
raise RestqlImportError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def execute_restql(self, name, data=None):
"""Execute restql by name.
:param name: restql name
:type name: str | unicode
:param data: restql data (optional)
:type data: dict
:returns: Results of execute restql
:rtype: dict
:raise c8.exceptions.RestqlExecuteError: if restql execution failed
"""
if data is None or not ("bindVars" in data or "batchSize" in data):
data = {}
request = Request(
method="post", data=data, endpoint="/restql/execute/{}".format(name)
)
def response_handler(resp):
if not resp.is_success:
raise RestqlExecuteError(resp, request)
return resp.body
return self._execute(request, response_handler)
def read_next_batch_restql(self, id):
"""Read next batch from query worker cursor.
:param id: the cursor-identifier
:type id: int
:returns: Results of execute restql
:rtype: dict
:raise c8.exceptions.RestqlCursorError: if fetch next batch failed
"""
request = Request(method="put", endpoint="/restql/fetch/{}".format(id))
def response_handler(resp):
if not resp.is_success:
raise RestqlCursorError(resp, request)
return resp.body
return self._execute(request, response_handler)
def get_all_restql(self):
"""Get all restql associated for user.
:returns: Details of all restql
:rtype: list
:raise c8.exceptions.RestqlListError: if getting restql failed
"""
request = Request(method="get", endpoint="/restql/user")
def response_handler(resp):
if not resp.is_success:
raise RestqlListError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def update_restql(self, name, data):
"""Update restql by name.
:param name: name of restql
:type name: str | unicode
:param data: restql data
:type data: dict
:returns: True if restql is updated
:rtype: bool
:raise c8.exceptions.RestqlUpdateError: if query update failed
"""
request = Request(method="put", data=data, endpoint="/restql/" + name)
def response_handler(resp):
if not resp.is_success:
raise RestqlUpdateError(resp, request)
return True
return self._execute(request, response_handler)
def delete_restql(self, name):
"""Delete restql by name.
:param name: restql name
:type name: str | unicode
:returns: True if restql is deleted
:rtype: bool
:raise c8.exceptions.RestqlDeleteError: if restql deletion failed
"""
request = Request(method="delete", endpoint="/restql/" + name)
def response_handler(resp):
if not resp.is_success:
raise RestqlDeleteError(resp, request)
return True
return self._execute(request, response_handler)
########################
# Events #
########################
def create_event(self, payload):
"""Create an event.
:param payload: Payload to create event
:type payload: dict
:returns: Dictionary containing the event id
:rtype: dict
:raise c8.exceptions.EventCreateError: if event creation failed
Here is an example entry for parameter **payload**:
.. code-block:: python
{
"action": "string",
"attributes": {},
"description": "string",
"details": "string",
"entityName": "string",
"entityType": "string",
"status": "string"
}
"""
request = Request(method="post", endpoint="/events", data=payload)
def response_handler(resp):
if not resp.is_success:
raise EventCreateError(resp, request)
return resp.body
return self._execute(request, response_handler)
def delete_event(self, eventIds):
"""Delete an event/s.
:param eventIds: The event id for which you want to fetch the event details
:type eventId: list of strings(event Ids)
:returns: List containig all the information of existing events
:rtype: list
:raise c8.exceptions.EventDeleteError: if event creation failed
"""
data = json.dumps((eventIds))
request = Request(method="delete", endpoint="/events", data=data)
def response_handler(resp):
if not resp.is_success:
raise EventGetError(resp, request)
return True
return self._execute(request, response_handler)
def get_all_events(self):
"""Create an event.
:returns: List containig all the information of existing events
:rtype: list
:raise c8.exceptions.EventGetError: if event creation failed
"""
request = Request(method="get", endpoint="/events/tenant")
def response_handler(resp):
if not resp.is_success:
raise EventGetError(resp, request)
return resp.body
return self._execute(request, response_handler)
def get_event_by_Id(self, eventId):
"""Create an event.
:param eventId: The event id for which you want to fetch the event details
:returns: List containig all the information of existing events
:rtype: list
:raise c8.exceptions.EventGetError: if event creation failed
"""
request = Request(method="get", endpoint="/events/" + str(eventId))
def response_handler(resp):
if not resp.is_success:
raise EventGetError(resp, request)
return resp.body
return self._execute(request, response_handler)
########################
# Stream Apps #
########################
def stream_app(self, name):
return StreamApps(self._conn, self._executor, name)
def validate_stream_app(self, data):
"""validates a stream app by given data
:param data: stream app defination string
"""
body = {"definition": data}
req = Request(
method="post", endpoint="/streamapps/validate", data=json.dumps(body)
)
def response_handler(resp):
if resp.is_success is True:
return True
return False
return self._execute(req, response_handler)
def retrieve_stream_app(self):
"""retrieves all the stream apps of a fabric"""
req = Request(
method="get",
endpoint="/streamapps",
)
def response_handler(resp):
if resp.is_success is True:
return resp.body
return False
return self._execute(req, response_handler)
def get_samples_stream_app(self):
"""gets samples for stream apps"""
req = Request(
method="get",
endpoint="/streamapps/samples",
)
def response_handler(resp):
if resp.is_success is not True:
raise StreamAppGetSampleError(resp, req)
return resp.body["streamAppSample"]
return self._execute(req, response_handler)
def create_stream_app(self, data, dclist=[]):
"""Creates a stream application by given data
:param data: stream app definition
:param dclist: regions where stream app has to be deployed
"""
# create request body
req_body = {"definition": data, "regions": dclist}
# create request
req = Request(method="post", endpoint="/streamapps", data=json.dumps(req_body))
# create response handler
def response_handler(resp):
if resp.is_success is True:
return True
return False
# call api
return self._execute(req, response_handler)
########################
# APIKeys #
########################
def api_keys(self, keyid):
"""Return the API keys API wrapper.
:param keyid: API Key id
:type kaeyid: string | unicode
:returns: API keys API wrapper.
:rtype: c8.stream_collection.StreamCollection
"""
return APIKeys(self._conn, self._executor, keyid)
def list_all_api_keys(self):
"""List the API keys.
:returns:list.
:raise c8.exceptions.GetAPIKeys: If request fails
"""
request = Request(
method="get",
endpoint="/key",
)
# create response handler
def response_handler(resp):
if not resp.is_success:
raise GetAPIKeys(resp, request)
else:
return resp.body["result"]
return self._execute(request, response_handler, custom_prefix="/_api")
##############################
# Search, View and Analyzers #
##############################
def search(self):
"""Returns the Search APIWrapper
:returns: Search API Wrapper
:rtype: c8.search.Search
"""
return Search(self._conn, self._executor)
class StandardFabric(Fabric):
"""Standard fabric API wrapper.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
"""
def __init__(self, connection):
super(StandardFabric, self).__init__(
connection=connection, executor=DefaultExecutor(connection)
)
def __repr__(self):
return "<StandardFabric {}>".format(self.name)
def begin_async_execution(self, return_result=True):
"""Begin async execution.
:param return_result: If set to True, API executions return instances
of :class:`c8.job.AsyncJob`, which you can use to retrieve
results from server once available. If set to False, API executions
return None and no results are stored on server.
:type return_result: bool
:returns: Fabric API wrapper built specifically for async execution.
:rtype: c8.fabric.AsyncFabric
"""
return AsyncFabric(self._conn, return_result)
def begin_batch_execution(self, return_result=True):
"""Begin batch execution.
:param return_result: If set to True, API executions return instances
of :class:`c8.job.BatchJob` that are populated with results on
commit. If set to False, API executions return None and no results
are tracked client-side.
:type return_result: bool
:returns: Fabric API wrapper built specifically for batch execution.
:rtype: c8.fabric.BatchFabric
"""
return BatchFabric(self._conn, return_result)
class AsyncFabric(Fabric):
"""Fabric API wrapper tailored specifically for async execution.
See :func:`c8.fabric.StandardFabric.begin_async_execution`.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
:param return_result: If set to True, API executions return instances of
:class:`c8.job.AsyncJob`, which you can use to retrieve results
from server once available. If set to False, API executions return None
and no results are stored on server.
:type return_result: bool
"""
def __init__(self, connection, return_result):
super(AsyncFabric, self).__init__(
connection=connection, executor=AsyncExecutor(connection, return_result)
)
def __repr__(self):
return "<AsyncFabric {}>".format(self.name)
class BatchFabric(Fabric):
"""Fabric API wrapper tailored specifically for batch execution.
See :func:`c8.fabric.StandardFabric.begin_batch_execution`.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
:param return_result: If set to True, API executions return instances of
:class:`c8.job.BatchJob` that are populated with results on commit.
If set to False, API executions return None and no results are tracked
client-side.
:type return_result: bool
"""
def __init__(self, connection, return_result):
super(BatchFabric, self).__init__(
connection=connection, executor=BatchExecutor(connection, return_result)
)
def __repr__(self):
return "<BatchFabric {}>".format(self.name)
def __enter__(self):
return self
def __exit__(self, exception, *_):
if exception is None:
self._executor.commit()
def queued_jobs(self):
"""Return the queued batch jobs.
:returns: Queued batch jobs or None if **return_result** parameter was
set to False during initialization.
:rtype: [c8.job.BatchJob] | None
"""
return self._executor.jobs
def commit(self):
"""Execute the queued requests in a single batch API request.
If **return_result** parameter was set to True during initialization,
:class:`c8.job.BatchJob` instances are populated with results.
:returns: Batch jobs, or None if **return_result** parameter was set to
False during initialization.
:rtype: [c8.job.BatchJob] | None
:raise c8.exceptions.BatchStateError: If batch state is invalid
(e.g. batch was already committed or the response size did not
match expected).
:raise c8.exceptions.BatchExecuteError: If commit fails.
"""
return self._executor.commit()
| Macrometacorp/pyC8 | c8/fabric.py | fabric.py | py | 56,104 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "c8.api.APIWrapper",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "c8.constants.STREAM_PORT",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "c8.constants",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "c8.c8ql.... |
12900539476 | from fastapi import APIRouter
from pydantic import BaseModel
from starlette.requests import Request
from ozz_backend import app_logger
from ozz_backend.persistence_layer import User
router = APIRouter(
prefix="/user",
tags=["user"],
# dependencies=[Depends(get_token_header)],
)
class UserOngoingOut(BaseModel):
user_id: str
mission_id: int
quest_id: int
@router.get('/test')
def test_api():
app_logger.info('test')
return {'test'}
@router.get('/user-ongoing', response_model=UserOngoingOut)
def get_ongoing_info(request: Request, user_id: int, mission_id: int):
app_logger.info(f'[{request.method}] {request.url}: {request.client.host}:{request.client.port}')
result = User.get_user_ongoing_info(user_id, mission_id)
ongoing = UserOngoingOut(user_id=result.user_id, mission_id=result.mission_id, quest_id=result.quest_id)
return ongoing
| honeybeeveloper/plat_back | ozz_backend/api/user.py | user.py | py | 895 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "ozz_backend.app_logger.info",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "ozz_b... |
13160016876 | from django.shortcuts import render
from subscribers.models import subscriberForm
from subscribe.settings import EMAIL_HOST_USER
from django.core.mail import send_mail,BadHeaderError
# Create your views here.
def index(request):
form=subscriberForm()
return render(request,"index.html",{"form":form})
def subscribe(request):
if request.method=="GET":
form=subscriberForm(request.GET)
if form.is_valid():
""" saving data in our database """
form.save()
""" using this cleaned data must, to get value of email"""
senderEmail=form.cleaned_data['Email']
subject="Welcome to "+senderEmail
"""here your message will be, also you send templates etc"""
message="Thanking you for subscribing me, you get every updates within a seconds..."
""" receiver email address"""
recipient=str(senderEmail)
if subject and message and recipient :
try:
""" send email is function in django to use this to send mail"""
""" if you want send bulk email add more email in [recipients]"""
send_mail(subject,message,EMAIL_HOST_USER,[recipient],fail_silently=False)
except BadHeaderError :
""" bad header error means prevent from header injection apply by hackers"""
return render(request,"subscribe.html",{'message':'Invalid header found.','class':'text-danger'})
return render(request,"subscribe.html",{'message':'Thanking you For Subscribing..','class':'text-success'})
return render(request,"subscribe.html",{"message":"Make sure all fields are entered and valid.",'class':'text-info'}) | pawankushwah850/Emailsubscriber | subscribers/views.py | views.py | py | 1,862 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "subscribers.models.subscriberForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "subscribers.models.subscriberForm",
"line_number": 14,
"usage_type": "call"
},
... |
44496456290 | import json
import logging
import os
import random
import time
from datetime import datetime
from uuid import uuid4
import paho.mqtt.client as mqtt
# MQTT broker details
BROKER_ADDRESS = os.getenv("BROKER_HOST")
BROKER_PORT = 1883
# Configuring file handler for logging
log_file = f"{__file__}.log"
# Logging setup
logging.basicConfig(
filename=log_file,
filemode="w",
format="%(asctime)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
# Creating unique sensor IDs for each sensor
temp_sensor_id = str(uuid4())
hum_sensor_id = str(uuid4())
# Simulated sensor data generation for temperature
def generate_temperature_data() -> dict:
"""
Generate random temperature data.
Returns:
dict: Generated sensor data.
"""
temperature = round(20 + (30 * random.random()), 2)
timestamp = datetime.utcnow().isoformat() # ISO8601 format
data = {
"sensor_id": temp_sensor_id,
"topic": "temperature",
"value": temperature,
"timestamp": timestamp,
}
return data
def generate_humidity_data() -> dict:
"""
Generate random humidity data.
Returns:
dict: Generated sensor data.
"""
humidity = round(40 + (60 * random.random()), 2)
timestamp = datetime.utcnow().isoformat()
data = {
"sensor_id": hum_sensor_id,
"topic": "humidity",
"value": humidity,
"timestamp": timestamp,
}
return data
def on_publish(client, userdata, mid):
"""
MQTT on_publish callback function.
Args:
client: The MQTT client instance.
userdata: User data.
mid: Message ID.
"""
logger.info(f"Message Published: {mid}")
def on_connect(client, userdata, flags, rc):
"""
MQTT on_connect callback function.
Args:
client: The MQTT client instance.
userdata: User data.
flags: Flags.
rc: Return code.
"""
if rc == 0:
logger.info("Connected to Mosquitto MQTT Broker!")
else:
logger.error(f"Failed to connect, return code: {rc}")
# Create MQTT client instance
client = mqtt.Client()
client.on_connect = on_connect
client.on_publish = on_publish
# Connect to broker
client.connect(BROKER_ADDRESS, port=BROKER_PORT)
# Start the MQTT loop
client.loop_start()
try:
while True:
sensor_data_temp = generate_temperature_data()
sensor_data_hum = generate_humidity_data()
temperature_payload = json.dumps(sensor_data_temp)
humidity_payload = json.dumps(sensor_data_hum)
# Publishing the topics
client.publish("sensors/temperature", temperature_payload)
client.publish("sensors/humidity", humidity_payload)
time.sleep(15) # Publish every 5 seconds
except KeyboardInterrupt:
logger.info("Publisher stopped.")
client.loop_stop()
client.disconnect()
| SudeepKumarS/mqtt-sensor-api | mqtt-publisher/mqtt_publisher.py | mqtt_publisher.py | py | 2,912 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
... |
15143757328 | from atelier_4_ex1 import gen_list_random_int
import matplotlib.pyplot as plt
import numpy as np
import time ,random
def extract_elements_list(list_in_which_to_choose,int_nbr_of_element_to_extract=10):
list_in_which_to_choose_length,mix_length = len(list_in_which_to_choose),0
mixList = list()
while mix_length < int_nbr_of_element_to_extract :
random_ = gen_list_random_int(0,list_in_which_to_choose_length)
if random_ not in mixList :
mixList.append(random_)
mix_length += 1
else :
continue
return [ list_in_which_to_choose[elem] for elem in mixList ]
# Test de votre code
# def extract_elements_list2(list_in_which_to_choose,int_nbr_of_element_to_extract=10):
# list_in_which_to_choose_length,mix_length = len(list_in_which_to_choose),0
# mixList = list()
# while mix_length < int_nbr_of_element_to_extract :
# random_ = gen_list_random_int(0,list_in_which_to_choose_length)
# mixList.append(random_)
# mix_length += 1
# return [ list_in_which_to_choose[elem] for elem in mixList ]
# print(extract_elements_list( [ i for i in range(1,11)],4))
def pref_mix(func1,func2,lst,num=100):
result = ([],[])
for elem in lst :
data1 ,data2= [],[]
nb_elements = int(elem / 2)
for index in range(num) :
lst_elem = list(range(elem))
# first function
start = time.perf_counter()
func1(lst_elem,nb_elements)
end = time.perf_counter() - start
data1.append(end)
start = time.perf_counter()
func2(lst_elem,nb_elements)
end = time.perf_counter() - start
data2.append(end)
result[0].append(sum(data1)/len(data1))
result[1].append(sum(data2)/len(data2))
return result
list_test = [500,1000,2500,5000,7500]
result = pref_mix(extract_elements_list,random.sample, list_test ,100)
print(result)
#Ici on décrit les abscisses
#Entre 0 et 5 en 10 points
fig, ax = plt.subplots()
#Dessin des courbes, le premier paramètre
#correspond aux point d'abscisse le
#deuxième correspond aux points d'ordonnées
#le troisième paramètre, optionnel permet de
#choisir éventuellement la couleur et le marqueur
ax.plot(list_test,result[0], 'bo-',label='extract_elements_list')
ax.plot(list_test,result[1], 'r*-',label='random.sample')
ax.set(xlabel='temps', ylabel='nombre d\'elements',
title='temps d’exécution moyen pour extract_elements_list et random.sample')
ax.legend(loc='upper center', shadow=True, fontsize='x-large')
#fig.savefig("test.png")
plt.show()
| K-Ilyas/python | atelier_4/atelier_4_ex4.py | atelier_4_ex4.py | py | 2,594 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "atelier_4_ex1.gen_list_random_int",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": ... |
71855094268 | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import datasets
from sklearn import linear_model
import matplotlib.pyplot as plt
def sigmoid(z):
return 1/(1+np.exp(-z))
def costfunction(X, y, w):
cost = 0
size = y.shape[0]
for i in range(size):
if y[i] == 1:
cost -= np.log(sigmoid(X[i]*w))
else:
cost -= np.log(1 - sigmoid(X[i]*w))
return cost / size
def gradAscent(traindata,label,iter,alpha,step,lamda=0.001):
dataMat=np.mat(traindata)
labelMat=np.mat(label)
m,n=np.shape(dataMat)
weights=np.ones((n,1))
weights=np.mat(weights)
for k in range(iter):
temp=costfunction(dataMat,labelMat,weights)
weights=weights-alpha*((dataMat.transpose())*(sigmoid(dataMat*weights)-labelMat)+lamda*weights)
if k%200==0:
print("Loss is: ",temp,weights.transpose())
if (k/step==0 and k!=0):
alpha=alpha/5
return weights
def preprocessing(x_train,x_test):
sc=StandardScaler()
sc.fit(x_train)
x_train_scaled=sc.transform(x_train)
x_test_scaled=sc.transform(x_test)
return x_train_scaled,x_test_scaled
def split(ratio):
Data = datasets.load_iris()
#Data = datasets.load_wine() #for Dataset wine
x = Data.data
y=Data.target
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = ratio, random_state = 0)
return x_train,x_test,y_train,y_test
def plot(X,Y):
x_min, x_max = X[:, 0].min() - .2, X[:, 0].max() + .2
y_min, y_max = X[:, 1].min() - .2, X[:, 1].max() + .2
h = .02
logreg =linear_model.LogisticRegression(C=1e5, solver='lbfgs', multi_class='multinomial')
logreg.fit(X,Y)
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.show()
if __name__=='__main__':
x_train,x_test,y_train,y_test=split(0.3)
x_train_scaled,x_test_scaled=preprocessing(x_train,x_test)
#logreg=linear_model.LogisticRegression(C=1e4) #for ovr
logreg=linear_model.LogisticRegression(C=1e4,multi_class='multinomial',solver='lbfgs') #ovm
logreg.fit(x_train_scaled,y_train)
print("Accuracy:",logreg.score(x_test_scaled,y_test))
plot(x_train_scaled[:,:2],y_train)
| Fred199683/Logistic-Regression | LR.py | LR.py | py | 2,556 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.exp",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.mat",
"line_number": 19,
"... |
29756907883 | # Author: Sirui Feng
'''
This file splits each review on periods and conjuctions.
'''
import re
import json
from textblob import TextBlob
from textblob.sentiments import NaiveBayesAnalyzer
import csv
from word_stemmer import word_stemmer
public_utilities_path = 'data/public_utilities.json'
def split_period(review):
'''
Splits sentences on periods.
'''
p = re.compile(r'[^\s\.][^\.\n]+')
sentences = p.findall(review)
return sentences
def split_conjunctions(sentence):
'''
Splits each sentence on conjuctions.
'''
conjuctions = [';', 'for', 'and', 'nor', 'but', 'or', 'yet', 'so']
clause = re.split('; | and | nor | but | or | yet | so | although | despite | though | however | on the other hand | in contrast ', sentence)
clause = [x.strip() for x in clause]
clause = [x for x in clause if len(x) != 0]
return clause
def gen_sentences():
'''
Reads in the sentences and splits on periods and conjuctions.
'''
with open(public_utilities_path) as datafile:
with open('data/full_data.csv', 'w') as outfile:
writer = csv.DictWriter(outfile, fieldnames = ['review_id', \
'business_id', 'user_id', 'stars', 'blob_polarity', 'review', \
'label'])
writer.writeheader()
i=0
for line in datafile:
i += 1
print(i)
row = json.loads(line)
review = row['text']
review = review.lower()
#split only on periods
sentences = split_period(review)
for s in sentences:
blob = TextBlob(s, analyzer = NaiveBayesAnalyzer())
polarity = blob.polarity
#s = word_stemmer(s)
writer.writerow({'review_id':row['review_id'], \
'business_id': row['business_id'], \
'user_id':row['user_id'], 'stars':row['stars'], \
'blob_polarity': polarity, 'review': s})
gen_sentences() | vi-tnguyen/textinsighters | gen_sentences.py | gen_sentences.py | py | 1,757 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 48... |
40449109187 | import argparse
import json
EXAMPLE_USAGE = """
Example Usage via RLlib CLI:
rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
--env CartPole-v0 --steps 1000000 --out rollouts.pkl
Example Usage via executable:
./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
--env CartPole-v0 --steps 1000000 --out rollouts.pkl
"""
def create_parser(parser_creator = None):
#parser = argparse.ArgumentParser("Ray training with custom IG environment")
## parser for rollouts
parser_creator = parser_creator or argparse.ArgumentParser
parser = parser_creator(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Roll out a reinforcement learning agent "
"given a checkpoint.",
epilog=EXAMPLE_USAGE)
parser.add_argument(
"--checkpoint", default='' ,type=str, help="Checkpoint from which to roll out.")
required_named = parser.add_argument_group("required named arguments")
required_named.add_argument(
"--run",
type=str,
required=True,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
required_named.add_argument(
"--env", type=str, help="The gym environment to use.")
parser.add_argument(
"--no-render",
default=False,
action="store_const",
const=True,
help="Suppress rendering of the environment.")
parser.add_argument(
"--monitor",
default=False,
action="store_true",
help="Wrap environment in gym Monitor to record video. NOTE: This "
"option is deprecated: Use `--video-dir [some dir]` instead.")
parser.add_argument(
"--video-dir",
type=str,
default=None,
help="Specifies the directory into which videos of all episode "
"rollouts will be stored.")
parser.add_argument(
"--steps",
default=20000,
help="Number of timesteps to roll out (overwritten by --episodes).")
parser.add_argument(
"--episodes",
default=0,
help="Number of complete episodes to roll out (overrides --steps).")
parser.add_argument("--out", default=None, help="Output filename.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams). "
"Gets merged with loaded configuration from checkpoint file and "
"`evaluation_config` settings therein.")
parser.add_argument(
"--save-info",
default=False,
action="store_true",
help="Save the info field generated by the step() method, "
"as well as the action, observations, rewards and done fields.")
parser.add_argument(
"--use-shelve",
default=False,
action="store_true",
help="Save rollouts into a python shelf file (will save each episode "
"as it is generated). An output filename must be set using --out.")
parser.add_argument(
"--track-progress",
default=False,
action="store_true",
help="Write progress to a temporary file (updated "
"after each episode). An output filename must be set using --out; "
"the progress file will live in the same folder.")
# save and restore file management
parser.add_argument(
"--policy-dir", type=str, help="folder name of the policy.", default="")
parser.add_argument(
"--experiment", type=str, help="chosen experiment to reload.", default="")
parser.add_argument(
"--ncheckpoint", type=str, help="chosen checkpoint to reload.", default="")
parser.add_argument(
"--heuristic-policy", type=bool, help="chosen checkpoint to reload.", default=False)
parser.add_argument(
"--static-targets", type=bool, help="chosen checkpoint to reload.", default=False)
parser.add_argument(
"--video_dir", type=str, help="chosen folder to save video.", default="")
parser.add_argument(
"--horizon", type=int, help="limit of timesteps.", default=40)
### Old arguments needs a cleanup
parser.add_argument("--scenario", type=str, default="simple_spread_assigned",
choices=['simple', 'simple_speaker_listener',
'simple_crypto', 'simple_push',
'simple_tag', 'simple_spread', 'simple_adversary', 'simple_spread_assigned',
'matlab_simple_spread_assigned','matlab_simple_spread_assigned_hardcoll', 'matlab_simple_spread_assigned_checkpoints'],
help="name of the scenario script")
parser.add_argument("--max-episode-len", type=int, default=100,
help="maximum episode length")
parser.add_argument("--num-episodes", type=int, default=60000,
help="number of episodes")
parser.add_argument("--num-adversaries", type=int, default=0,
help="number of adversaries")
parser.add_argument("--good-policy", type=str, default="maddpg",
help="policy for good agents")
parser.add_argument("--adv-policy", type=str, default="maddpg",
help="policy of adversaries")
# Core training parameters
parser.add_argument("--lr", type=float, default=1e-3,
help="learning rate for Adam optimizer")
parser.add_argument("--gamma", type=float, default=0.99,
help="discount factor")
# NOTE: 1 iteration = sample_batch_size * num_workers timesteps * num_envs_per_worker
parser.add_argument("--sample-batch-size", type=int, default=25,
help="number of data points sampled /update /worker")
parser.add_argument("--train-batch-size", type=int, default=1024,
help="number of data points /update")
parser.add_argument("--n-step", type=int, default=1,
help="length of multistep value backup")
parser.add_argument("--num-units", type=int, default=128,
help="number of units in the mlp")
parser.add_argument("--replay-buffer", type=int, default=1000000,
help="size of replay buffer in training")
parser.add_argument("--seed", type=int, default=100,
help="initialization seed for the network weights")
# Checkpoint
parser.add_argument("--checkpoint-freq", type=int, default = 10, #75,
help="save model once every time this many iterations are completed")
parser.add_argument("--local-dir", type=str, default="./ray_results",
help="path to save checkpoints")
parser.add_argument("--restore", type=str, default=None,
help="directory in which training state and model are loaded")
parser.add_argument("--in-evaluation", type=bool, default=False, help="trigger evaluation procedure")
# Parallelism
#parser.add_argument("--num-workers", type=int, default=0)
#parser.add_argument("--num-envs-per-worker", type=int, default=1)
#parser.add_argument("--num-gpus", type=int, default=0)
parser.add_argument("--num-workers", type=int, default=0) #0
parser.add_argument("--num-envs-per-worker", type=int, default=1) #1
parser.add_argument("--num-gpus", type=int, default=0) #0
#parser.add_argument("--num-cpus-per-worker", type=int, default=1)
parser.add_argument("--num-gpus-per-worker", type=int, default=0) #0
# From the ppo
parser.add_argument("--stop-iters", type=int, default=100)
parser.add_argument("--stop-timesteps", type=int, default=160000000)
# parser.add_argument("--stop-reward", type=float, default=7.99)
# For rollouts
parser.add_argument("--stop-iters-rollout", type=int, default=1)
parser.add_argument("--nagents", type=int, default=1)
parser.add_argument("--ntargets", type=int, default=1)
parser.add_argument("--nrobots", type=int, default=1)
# mode of hand-engineered comm. policy (-1 no hand-engineered)
parser.add_argument("--mode", type=int, default=-1)
parser.add_argument("--test", type=int, default=0, choices = [0,1], help="whether we want to test the policy or not")
parser.add_argument("--test-env", type=int, default=0, choices = [0,1], help="whether we want to act in the test environment or not")
parser.add_argument("--deterministic", type=int, default=1, choices=[0, 1],
help="enable exploration or not during execution")
return parser | tud-amr/AC-LCP | utils/parse_args_rollout.py | parse_args_rollout.py | py | 8,847 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 67,
"usage_type": "attribute"
}
] |
40483417494 | import tkinter, threading
from tkinter import ttk
from interface.onglets.onglets_map import OngletsMap
from interface.onglets.onglets_packets import OngletsPackets
from interface.onglets.onglets_personnage import OngletsPersonnage
from interface.onglets.onglets_sorts import OngletsSorts
import time
class MainInterface(threading.Thread):
def __init__(self):
threading.Thread(None,self.launch).start()
while True:
time.sleep(1)
if self.ongletsSorts:
break
def set_character(self, character):
self.character = character
self.ongletsMap.set_character(character)
self.ongletsSorts.set_character(character)
self.ongletsPersonnage.set_character(character)
threading.Thread(None,self.character_statue).start()
def character_statue(self):
en_mouvement = tkinter.Label(self.main, bg="red", text = "En mouvement")
en_mouvement.place(relx=0.05, rely=0.05, relwidth=0.08, relheight=0.04)
en_recolte = tkinter.Label(self.main, bg="red", text = "En recolte")
en_recolte.place(relx=0.05, rely=0.10, relwidth=0.08, relheight=0.04)
en_combat = tkinter.Label(self.main, bg="red", text = "En combat")
en_combat.place(relx=0.05, rely=0.15, relwidth=0.08, relheight=0.04)
while True:
time.sleep(1)
if self.character.deplacement.ismouving:
en_mouvement.configure(bg = "Green")
else:
en_mouvement.configure(bg = "Red")
if self.character.isharvest:
en_recolte.configure(bg = "Green")
else:
en_recolte.configure(bg = "red")
if self.character.isfighting:
en_combat.configure(bg = "Green")
else:
en_combat.configure(bg = "red")
def launch(self):
self.main = tkinter.Tk()
self.main.title("LeafBot")
self.main.geometry('1200x900')
self.create_notebook()
self.main.mainloop()
def create_notebook(self):
self.onglets = tkinter.ttk.Notebook(self.main)
self.onglets.pack()
self.onglets.place(relx=0.15, rely=0.05, relwidth=0.83, relheight=0.83)
self.ongletsPackets = OngletsPackets(self.onglets)
self.ongletsPersonnage = OngletsPersonnage(self.onglets)
self.ongletsMap = OngletsMap(self.onglets)
self.ongletsSorts = OngletsSorts(self.onglets)
def base_start(self,character):
self.vita = tkinter.Label(self.main, bg="red", text = character.vie_actuelle +" / " + character.vie_max)
self.vita.pack()
self.vita.place(relx=0.20, rely=0.90, relwidth=0.08, relheight=0.08)
self.energie = tkinter.Label(self.main, bg="yellow", text = character.ennergie_actuelle +" / " + character.ennergie_max)
self.energie.pack()
self.energie.place(relx=0.40, rely=0.90, relwidth=0.08, relheight=0.08)
self.xp = tkinter.Label(self.main,bg="deep sky blue", text = character.xp_actuelle +" / " + character.xp_fin)
self.xp.pack()
self.xp.place(relx=0.60, rely=0.90, relwidth=0.1, relheight=0.08)
self.kamas = tkinter.Label(self.main, bg="orange", text = character.kamas)
self.kamas.pack()
self.kamas.place(relx=0.80, rely=0.90, relwidth=0.08, relheight=0.08)
if __name__ == "__main__":
MainInterface()
| Azzary/LeafMITM | interface/main_interface.py | main_interface.py | py | 3,458 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "threading.Thread",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
... |
11356022056 | from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import argodb as argo
import research_tools as research
plt.ion()
plt.close('all')
dirtopo = '/datawork/fsi2/mars/DATA/BATHY/ETOPO2'
topofile = 'etopo2.nc'
dirtopo = '/net/alpha/exports/sciences/data/BATHYMETRIE/BATHYMETRIE'
topofile = 'ETOPO2v2c_f4.nc'
dirtile = '/net/libra/local/tmp/1/herry/tiles'
itile = 50
figsize = (9, 7)
reso = 0.5
argodic = research.read_argo_filter(itile)
minlon, maxlon, minlat, maxlat = argodic['LONMIN_NO_M'], argodic['LONMAX_NO_M'], argodic['LATMIN_NO_M'], argodic['LATMAX_NO_M']
lon = np.arange(reso*np.floor(minlon/reso), reso*np.floor(maxlon/reso)+reso, reso)
lat = np.arange(reso*np.floor(minlat/reso), reso*np.floor(maxlat/reso)+reso, reso)
#lon_deg, lat_deg = define_grid(minlon, maxlon, minlat, maxlat, reso_deg)
with Dataset('%s/%s' % (dirtopo, topofile)) as nc:
z = nc.variables['z'][:,:]
dl0 = 1/30. # 1/30deg for etopo
lontopo = np.arange(-180, 180+dl0, dl0)
lattopo = np.arange(-90, 90+dl0, dl0)
def get_idx_of_box(lontopo, lattopo, cell):
minlon, maxlon, minlat, maxlat = cell
ilon = [i for i, x in enumerate(lontopo) if (x>=minlon) and (x<=maxlon)]
jlon = [j for j, x in enumerate(lattopo) if (x>=minlat) and (x<=maxlat)]
return ilon[0], ilon[-1], jlon[0], jlon[-1]
domain = [minlon, maxlon, minlat, maxlat]
i0, i1, j0, j1 = get_idx_of_box(lontopo, lattopo, domain)
def average_topo_on_box(depth, cell):
""" average high resolution depth array on cell """
i0, i1, j0, j1 = get_idx_of_box(lontopo, lattopo, cell)
return np.mean(depth[j0:j1, i0:i1].ravel())
def box(cell, d=0):
x1, x2, y1, y2 = cell
plt.plot([x1-d, x1-d, x2+d, x2+d, x1-d],
[y1-d, y2+d, y2+d, y1-d, y1-d], 'k')
plt.figure(figsize=figsize)
plt.imshow(z[j0:j1, i0:i1],
origin='lower', extent=[minlon, maxlon, minlat, maxlat])
plt.axis('tight')
plt.colorbar()
reso = 0.5
lon = np.arange(minlon, maxlon, reso)
lat = np.arange(minlat, maxlat, reso)
nlon = len(lon)
nlat = len(lat)
bathy = np.zeros((nlat, nlon))
for j in range(nlat-1):
for i in range(nlon-1):
reso2 = reso*0.5
gridcell = [lon[i]-reso2, lon[i]+reso2, lat[j]-reso2, lat[j]+reso2]
box(gridcell)
get_idx_of_box(lontopo, lattopo, gridcell)
bathy[j, i] = average_topo_on_box(z, gridcell)
msk = bathy < 0
fig, ax = plt.subplots(2,1)
divider = make_axes_locatable(ax[0])
ax_cb = divider.new_horizontal(size="4%", pad=0.2)
im = ax[0].imshow(bathy,
origin='lower', interpolation='nearest',
extent=[minlon, maxlon, minlat, maxlat])
ax[0].set_title('tile #%03i' % itile)
fig.add_axes(ax_cb)
fig.colorbar(im, cax=ax_cb)
divider = make_axes_locatable(ax[1])
ax_cb = divider.new_horizontal(size="4%", pad=0.2)
ax[1].imshow(msk,
origin='lower', interpolation='nearest',
extent=[minlon, maxlon, minlat, maxlat])
| pvthinker/pargopy | pargopy_v0/define_landmask.py | define_landmask.py | py | 2,984 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotl... |
70818525628 | #import Library
import speech_recognition as sr
# Initialize recognizer class
r = sr.Recognizer()
# Reading Audio file as source
# listening the audio file and store in audio_text variable
# The path should be correct
with sr.AudioFile('Sample.wav') as source:
audio = r.listen(source)
# Using exception handling in case the api could not be acceessed successfully.
try:
# using google speech recognition
text = r.recognize_google(audio)
print('Convertint Speech into text successfully!')
print(text)
except:
print('Could not access API, please run it again.') | CHAODENG/Project4 | SpeechToText.py | SpeechToText.py | py | 632 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "speech_recognition.Recognizer",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "speech_recognition.AudioFile",
"line_number": 11,
"usage_type": "call"
}
] |
18677212205 | import argparse
import logging
import random
import sys
import time
from copy import deepcopy
import numpy as np
import torch
from scipy.stats import kendalltau
from datasets.dataloader import get_dataloader
from models.cell_operations import NAS_BENCH_201
from models.supernet import Supernet201
from utils import obtain_accuracy, AverageMeter, set_seed, run_func, time_record
parser = argparse.ArgumentParser("Train 201 Supernet")
# dataset
parser.add_argument("--data_root", type=str, default='./dataset/', help="The path to dataset")
parser.add_argument("--dataset", type=str, default='cifar10', help="Dataset.")
parser.add_argument("--search_space_name", type=str, default='nas-bench-201', help="The search space name.")
parser.add_argument("--num_classes", type=int, default=10, help="Dataset Classes")
# supernet
parser.add_argument("--max_nodes", type=int, default=4, help="The maximum number of nodes.")
parser.add_argument("--channel", type=int, default=16, help="The number of channels.")
parser.add_argument("--num_cells", type=int, default=5, help="The number of cells in one stage.")
# training settings
parser.add_argument("--exp_name", type=str, default='debug_baseline', help='exp_name for saving results')
parser.add_argument("--method", type=str, default='spos', choices=['spos', 'fairnas', 'sumnas'])
parser.add_argument("--lr", type=float, default=0.05, help="Learning rate")
parser.add_argument("--inner_lr", type=float, default=0.05, help="Learning rate")
parser.add_argument("--momentum", type=float, default=0.9, help="Momentum")
parser.add_argument("--wd", type=float, default=2.5e-4, help="Weight decay")
parser.add_argument("--epochs", type=int, default=250, help="Training epochs")
parser.add_argument("--gpu_id", type=int, default=0, help="Training GPU")
parser.add_argument("--train_batch_size", type=int, default=256, help="Train batch size")
parser.add_argument("--valid_batch_size", type=int, default=512, help="Valid batch size")
parser.add_argument("--print_freq", type=int, default=50, help="print frequency when training")
parser.add_argument("--rank_print_freq", type=int, default=100, help="print frequency when ranking")
parser.add_argument("--seed", type=int, default=0, help="manual seed")
parser.add_argument("--debug", default=False, action='store_true', help="for debug")
args = parser.parse_args()
args.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
args.ckpt_path = 'checkpoints/%s.pt' % args.exp_name
args.pred_path = 'results/%s.npy' % args.exp_name
if args.debug:
args.epochs = 5
# logging config
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
logging.info(args)
set_seed(args.seed)
def mix_grad(grad_list, weight_list):
"""
calc weighted average of gradient
"""
mixed_grad = []
for g_list in zip(*grad_list):
g_list = torch.stack([weight_list[i] * g_list[i] for i in range(len(weight_list))])
mixed_grad.append(torch.sum(g_list, dim=0))
return mixed_grad
def apply_grad(model, grad):
"""
assign gradient to model(nn.Module) instance. return the norm of gradient
"""
for p, g in zip(model.parameters(), grad):
if p.grad is None:
p.grad = g
else:
p.grad += g
def train(epoch, train_loader, model, criterion, optimizer, inner_optimizer=None):
train_loss = AverageMeter()
train_top1 = AverageMeter()
train_top5 = AverageMeter()
model.train()
path_list = []
num_candidate_ops = 5
candidate_ops = list(range(5))
candidate_edges = 6
for step, (inputs, targets) in enumerate(train_loader):
inputs = inputs.to(args.device)
targets = targets.to(args.device)
if args.method == 'spos':
# randomly sample an arch
sampled_arch = [
random.choice(candidate_ops) for _ in range(candidate_edges)
]
optimizer.zero_grad()
logits = model(inputs, sampled_arch)
loss = criterion(logits, targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
elif args.method == 'fairnas':
# shuffle the ops to get sub-models with strict fairness
for _ in range(candidate_edges):
random.shuffle(candidate_ops)
path_list.append(deepcopy(candidate_ops))
# inner loop
optimizer.zero_grad()
for _path_id in range(num_candidate_ops):
sampled_arch = [_operations[_path_id] for _operations in path_list]
logits = model(inputs, sampled_arch)
loss = criterion(logits, targets)
loss.backward()
# record training metrics
prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
train_loss.update(loss.item(), inputs.size(0))
train_top1.update(prec1.item(), inputs.size(0))
train_top5.update(prec5.item(), inputs.size(0))
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
elif args.method == 'sumnas':
# record the supernet weights
weights_before = deepcopy(model.state_dict())
grad_list = []
# shuffle the ops to get sub-models fairly
for _ in range(candidate_edges):
random.shuffle(candidate_ops)
path_list.append(deepcopy(candidate_ops))
# inner loop
for _path_id in range(num_candidate_ops):
sampled_arch = [_operations[_path_id] for _operations in path_list]
# inner optimization
for _step in range(args.adaption_steps):
inner_optimizer.zero_grad()
logits = model(inputs, sampled_arch)
loss = criterion(logits, targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
inner_optimizer.step()
# record training metrics
prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
train_loss.update(loss.item(), inputs.size(0))
train_top1.update(prec1.item(), inputs.size(0))
train_top5.update(prec5.item(), inputs.size(0))
# record reptile gradient
outer_grad = []
weights_after = deepcopy(model.state_dict())
for p_0, p_T in zip(weights_before.items(), weights_after.items()):
outer_grad.append(-(p_T[1] - p_0[1]).detach())
grad_list.append(outer_grad)
model.load_state_dict(weights_before)
# outer loop
optimizer.zero_grad()
weight = torch.ones(len(grad_list)) / len(grad_list)
grad = mix_grad(grad_list, weight)
apply_grad(model, grad)
optimizer.step()
else:
raise ValueError('Wrong training method for the supernet: %s' % args.method)
# record
prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
train_loss.update(loss.item(), inputs.size(0))
train_top1.update(prec1.item(), inputs.size(0))
train_top5.update(prec5.item(), inputs.size(0))
if step % args.print_freq == 0 or step + 1 == len(train_loader):
logging.info('[Training] Epoch %03d/%03d, step %03d/%03d, loss: %.3f, top1: %.3f, top5: %.3f'
% (epoch, args.epochs, step, len(train_loader), train_loss.avg, train_top1.avg, train_top5.avg))
return train_loss.avg, train_top1.avg, train_top5.avg
def valid(valid_loader, model, criterion):
val_loss, val_top1, val_top5 = AverageMeter(), AverageMeter(), AverageMeter()
model.eval()
with torch.no_grad():
for step, (val_inputs, val_targets) in enumerate(valid_loader):
val_inputs = val_inputs.to(args.device)
val_targets = val_targets.to(args.device)
# randomly sample an arch
candidate_ops = range(5)
candidate_edges = 6
sampled_arch = [
random.choice(candidate_ops) for _ in range(candidate_edges)
]
# prediction
logits = model(val_inputs, sampled_arch)
loss = criterion(logits, val_targets)
# record
prec1, prec5 = obtain_accuracy(
logits.data, val_targets.data, topk=(1, 5)
)
val_loss.update(loss.item(), val_inputs.size(0))
val_top1.update(prec1.item(), val_inputs.size(0))
val_top5.update(prec5.item(), val_inputs.size(0))
return val_loss.avg, val_top1.avg, val_top5.avg
def valid_specific_path(valid_loader, model, sampled_arch, criterion, device):
val_loss, val_top1, val_top5 = AverageMeter(), AverageMeter(), AverageMeter()
model.eval()
with torch.no_grad():
for step, (val_inputs, val_targets) in enumerate(valid_loader):
val_inputs = val_inputs.to(device)
val_targets = val_targets.to(device)
# prediction
logits = model(val_inputs, sampled_arch)
loss = criterion(logits, val_targets)
# record
prec1, prec5 = obtain_accuracy(
logits.data, val_targets.data, topk=(1, 5)
)
val_loss.update(loss.item(), val_inputs.size(0))
val_top1.update(prec1.item(), val_inputs.size(0))
val_top5.update(prec5.item(), val_inputs.size(0))
return val_loss.avg, val_top1.avg, val_top5.avg
def rank_supernet(valid_loader, model, criterion):
logging.info('---------- Start to rank on NAS-Bench-201 ----------')
nasbench201 = np.load('./dataset/nasbench201/nasbench201_dict.npy', allow_pickle=True).item()
if args.debug:
new_dict = {}
for i in range(5):
new_dict[str(i)] = deepcopy(nasbench201[str(i)])
nasbench201 = deepcopy(new_dict)
nasbench201_len = len(nasbench201)
tmp_pred = []
tmp_target = []
prediction = {}
for step, item in enumerate(nasbench201):
model_id = int(item)
operation = nasbench201[item]['operation']
target = nasbench201[item]['cifar10_test']
val_loss, val_top1, val_top5 = valid_specific_path(valid_loader, model, operation, criterion, args.device)
tmp_pred.append(val_top1)
tmp_target.append(target)
prediction[model_id] = {'id': model_id, 'model_gene': operation, 'pred': val_top1, 'target': target}
if step % args.rank_print_freq == 0 or (step + 1) == nasbench201_len:
logging.info("model_id: %d gene: %s loss: %.3f top1: %.3f target: %.3f"
% (model_id, str(operation), val_loss, val_top1, target))
logging.info("Evaluated: %05d\tWaiting: %05d\tCurrent Kendall's Tau: %.5f" %
(len(tmp_pred), nasbench201_len-len(tmp_pred), kendalltau(tmp_pred, tmp_target)[0]))
# save predictions
print('\n')
np.save(args.pred_path, prediction)
logging.info('Finish ranking and save predictions to : %s' % args.pred_path)
final_ranking = kendalltau(tmp_pred, tmp_target)[0]
logging.info("Final_pred: %05d\tFinal_target: %05d\tFinal_Kendall's Tau: %.5f" %
(len(tmp_pred), len(tmp_target), final_ranking))
return final_ranking
def main():
# time record
train_start = time.time()
# dataloader
train_loader, valid_loader = get_dataloader(args, model=None, dataset=args.dataset)
# supernet
model = Supernet201(
C=args.channel, N=args.num_cells, max_nodes=args.max_nodes,
num_classes=args.num_classes, search_space=NAS_BENCH_201
).to(args.device)
# training settings
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.wd)
if args.method == 'sumnas':
args.adaption_steps = 2
optimizer = torch.optim.SGD(model.parameters(), 1.0, weight_decay=4e-5)
inner_optimizer = torch.optim.SGD(model.parameters(), 0.05, momentum=0.9)
else:
inner_optimizer = None
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs)
criterion = torch.nn.CrossEntropyLoss()
best_val_top1 = 0.0
logging.info('---------- Start to train supernet ----------')
for epoch in range(args.epochs):
# train supernet
train_loss, train_top1, cnn_top5 = train(epoch, train_loader, model, criterion, optimizer, inner_optimizer)
logging.info(
"[Epoch: %s/%s] train_loss=%.3f, train_top1=%.3f, train_top5=%.3f" %
(epoch, args.epochs, train_loss, train_top1, cnn_top5)
)
# valid supernet
val_loss, val_top1, val_top5 = valid(valid_loader, model, criterion)
logging.info(
"[Validation], val_loss=%.3f, val_top1=%.3f, val_top5=%.3f, best_top1=%.3f" %
(val_loss, val_top1, val_top5, best_val_top1)
)
if best_val_top1 < val_top1:
best_val_top1 = val_top1
# save latest checkpoint
torch.save(model.state_dict(), args.ckpt_path)
logging.info('Save latest checkpoint to %s' % args.ckpt_path)
# scheduler step
scheduler.step()
print('\n')
# time record
supernet_training_elapse = time_record(train_start, prefix='Supernet training')
print('\n')
# load best supernet weights
latest_pretrained_weights = torch.load(args.ckpt_path)
model.load_state_dict(latest_pretrained_weights)
model.eval()
# ranking supernet
final_ranking = rank_supernet(valid_loader, model, criterion)
# write results
with open('./results/ranking.txt', 'a') as f:
f.write("EXP: %s \t Seed: %s \t Kendall' Tau: %.6f \t Training_Elapse: %s \n"
% (args.exp_name, args.seed, final_ranking, supernet_training_elapse))
if __name__ == "__main__":
run_func(args, main)
| ShunLu91/PA-DA | nasbench201/train_baselines_201.py | train_baselines_201.py | py | 14,254 | python | en | code | 29 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torch... |
71718726268 | from . import Funktioner
import datetime
import MyModules.GUIclasses2 as GUI
import numpy as np
import os
from . import FileOps
#fix 14.04.10, simlk. Changed "centering error", which should make the test more forgiving at small distances - at large distances it has no effect.
# Last edit: 2012-01-09 fixed mtl test. Unified, 2012-04-23: fixed possible None return val in TestStretch
#Rejcection criteria reflects a model variance for meas. of a strectch, should correpond to a variance of half the parameter used in the test.
# Test is really - for two meas:
#par=prec=reject_par/2.....
#|diff|<2*sqrt(var_model(d,par)) - always linear in par.
#No more Centering err/ constant 'avoid zero' term!
#Thus the var-models are artificial close to zero. Instead a global min is defined (0,3 mm for now)!!!!!
GLOBAL_MIN_DEV=0.3 #twice precision on mean
def MTL_var_model_linear(dist,parameter):
dist=dist/1000.0
return (dist*parameter)**2
def MTL_var_model(dist,parameter):
dist=dist/1000.0
DLIM=0.2 #km
c_err=0 #divided by two below because 'precision' is (defined to be) half of 'reject par'
if dist<DLIM:
FKLIN=(np.sqrt(DLIM)*parameter-c_err*0.5)/DLIM
return (FKLIN*dist+c_err*0.5)**2
else:
return (parameter**2*dist)
def MGL_var_model(dist,parameter):
dist=dist/1000.0
c_err=0.0 #divided by two below because 'precision' is (defined to be) half of 'reject par'
return (np.sqrt(dist)*parameter+c_err*0.5)**2 #add a centering err....
class FBreject(object):
def __init__(self,database,program="MGL",parameter=2.0,unit="ne"):
if program=="MGL":
self.var_model=MGL_var_model
else:
if unit=="ne":
self.var_model=MTL_var_model
else:
self.var_model=MTL_var_model_linear
self.unit=unit
self.parameter=parameter
self.precision=parameter*0.5 #this is the correpsonding 'precision'
self.initialized=False
self.found=False
self.wasok=False
self.database=database
self.initialized=True
def GetData(self):
data=""
for key in list(self.database.keys()):
s=self.database[key]
data+="%s->%s: dist: %.2f m\n" %(key[0],key[1],s.dist)
for i in range(len(s.hdiffs)):
data+="dh: %.4f m tid: %s j-side: %s\n" %(s.hdiffs[i],s.times[i].isoformat().replace("T",","),s.jpages[i])
return data
def GetDatabase(self):
return self.database
def TestStretch(self,start,end,hdiff): #returns foundstretch,testresult,#found,msg
self.found=False
self.wasok=False
msg=""
key_back=(end,start)
key_forward=(start,end)
nforward=0
nback=0
hdiffs_all=np.empty((0,))
dists=[]
if key_back in self.database:
s_back=self.database[key_back]
nback+=len(s_back.hdiffs)
if nback>0:
dists.append(s_back.dist)
hdiffs_all=np.append(hdiffs_all,np.array(s_back.hdiffs)*-1.0)
if key_forward in self.database:
s_forward=self.database[key_forward]
nforward+=len(s_forward.hdiffs)
if nforward>0:
dists.append(s_forward.dist)
hdiffs_all=np.append(hdiffs_all,np.array(s_forward.hdiffs))
msg+="%s->%s er tidligere m\u00E5lt %d gang(e), og %d gang(e) i modsat retning.\n" %(start,end,nforward,nback)
nall=len(hdiffs_all)
if len(hdiffs_all)>0:
d=np.mean(dists)
norm_d=np.sqrt(d/1e3)
msg+="Afstand: %.2f m\n" %d
if len(hdiffs_all)>1:
raw_mean=np.mean(hdiffs_all)
raw_std=np.std(hdiffs_all,ddof=1)
raw_prec=raw_std/np.sqrt(len(hdiffs_all))
raw_max_diff=hdiffs_all.max()-hdiffs_all.min()
msg+="hdiff_middel: %.4f m, max-diff: %.2f mm (%.2f ne)\n" %(raw_mean,raw_max_diff*1000,raw_max_diff*1e3/norm_d)
msg+="std_dev: %.2f mm, std_dev(middel): %.2f mm (%.2f ne)\n" %(raw_std*1000,raw_prec*1000,raw_prec*1e3/norm_d)
msg+="\nEfter inds\u00E6ttelse af ny m\u00E5ling:\n"
hdiffs_new=np.append(hdiffs_all,[hdiff])
new_mean=np.mean(hdiffs_new)
new_std=np.std(hdiffs_new,ddof=1)
new_prec=new_std/np.sqrt(len(hdiffs_new))
new_max_diff=hdiffs_new.max()-hdiffs_new.min()
msg+="hdiff_middel: %.4f m, max-diff: %.2f mm (%.2f ne)\n" %(new_mean,new_max_diff*1000,new_max_diff*1e3/norm_d)
msg+="std_dev: %.2f mm, std_dev(middel): %.2f mm (%.2f ne)\n" %(new_std*1000,new_prec*1000,new_prec*1e3/norm_d)
msg+="\nForkastelsesparameter: %.3f %s." %(self.parameter,self.unit)
max_dev=self.GetMaxDev(d) #in mm!!
if len(hdiffs_new)==2:
msg+=" Vil acceptere |diff|<%.2f mm" %(2*max_dev)
isok=(new_prec*1e3<=max_dev)
self.found=True
self.wasok=isok
if isok:
msg+="\nDen samlede standardafvigelse p\u00E5 middel er OK.\n"
else:
msg+="\nDen samlede standarafvigelse p\u00E5 middel er IKKE OK\n"
msg+="Foretag flere m\u00E5linger!\n"
if len(hdiffs_all)>1 and new_prec>raw_prec: #or something more fancy
msg+="Den nye m\u00E5ling er tilsyneladende en outlier og kan evt. omm\u00E5les!\n"
isok=False
return True,isok,len(hdiffs_all),msg
else:
msg="%s->%s er ikke m\u00E5lt tidligere" %(start,end)
self.found=False
self.wasok=True
return True,True,0,msg
def GetMaxDev(self,dist): #max dev in mm!
return max(np.sqrt(self.var_model(dist,self.precision)),GLOBAL_MIN_DEV*0.5)
def InsertStretch(self,start,end,hdiff,dist,dato,tid,jside=""):
if not self.initialized:
return True #we havent done anyting
data=self.database
try:
start=start.strip()
end=end.strip()
key=(start,end)
m,h=Funktioner.GetTime(tid)
day,month,year=Funktioner.GetDate(dato)
date=datetime.datetime(year,month,day,h,m)
if key in data:
data[key].AddStretch(hdiff,dist,date,jside)
else:
data[key]=Stretch()
data[key].AddStretch(hdiff,dist,date,jside)
except Exception as msg:
print(repr(msg))
return False
else:
return True
def OutlierAnalysis(self):
data=self.database.copy()
msg=""
noutliers=0
nbad=0
keys=list(data.keys())
for key_forward in keys:
l_msg="%s->%s:" %key_forward
key_back=(key_forward[1],key_forward[0])
if not key_forward in data: #could happen since we delete stuff below
continue
s_forward=data[key_forward]
hdiffs_all=np.array(s_forward.hdiffs)
nforward=len(s_forward.hdiffs)
dists=[s_forward.dist]
nback=0
if key_back in data:
s_back=data[key_back]
nback=len(s_back.hdiffs)
if nback>0:
dists.append(s_back.dist)
hdiffs_all=np.append(hdiffs_all,np.array(s_back.hdiffs)*-1.0)
d=np.mean(dists)
l_msg+=" m\u00E5lt %d gange frem og %d gange tilbage." %(nforward,nback)
report=False
if len(hdiffs_all)>1:
std_dev=np.std(hdiffs_all,ddof=1)
m=np.mean(hdiffs_all)
#same test as above#
prec=std_dev/np.sqrt(len(hdiffs_all))
max_dev=self.GetMaxDev(d) #in mm
#print max_dev,prec
is_ok=(prec*1e3<=max_dev)
if not is_ok:
nbad+=1
report=True
l_msg+="\nForkastelseskriterie IKKE overholdt."
l_msg+="\nTilladt fejl p\u00E5 middel: %.2f mm, aktuel fejl: %.2f mm" %(max_dev,prec*1e3)
if len(hdiffs_all)>2:
dh=np.fabs(hdiffs_all-m)
outlier_limit=1.5*std_dev
if len(hdiffs_all)==3:
outlier_limit=1.1*std_dev
I=np.where(np.fabs(dh)>outlier_limit)[0]
if I.size>0:
report=True
l_msg+="\nOutliere:"
for i in I:
noutliers+=1
if i>nforward-1:
i-=nforward
s=s_back
else:
s=s_forward
l_msg+="\nHdiff: %.4f m, m\u00E5lt %s, journalside: %s" %(s.hdiffs[i],s.times[i].isoformat().replace("T"," "),s.jpages[i])
hdiffs_new=np.delete(hdiffs_all,i)
new_prec=np.std(hdiffs_new,ddof=1)/np.sqrt(len(hdiffs_new))
l_msg+="\nFejl p\u00E5 middel: %.2f mm, fejl p\u00E5 middel uden denne m\u00E5ling: %.2f mm" %(prec*1e3,new_prec*1e3)
if report:
msg+="\n"+"*"*60+"\n"+l_msg
#Finally delete that entry#
del data[key_forward]
if nback>0:
del data[key_back]
nprob=noutliers+nbad
if nprob==0:
return True,"Ingen problemer fundet"
lmsg="%*s %d\n" %(-42,"#overtr\u00E6delser af forkastelseskriterie:",nbad)
lmsg+="%*s %d\n" %(-42,"#outliere:",noutliers)
return False,lmsg+msg
def IsInitialized(self):
return self.initialized
def GetNumber(self):
return len(self.database)
def Disconnect(self):
pass
def GetPlotData(program="MGL",parameter=2.0,unit="ne"):
if program=="MGL":
var_model=MGL_var_model
else:
if unit=="ne":
var_model=MTL_var_model
else:
var_model=MTL_var_model_linear
dists=np.arange(0,1500,10)
precision=0.5*parameter #since parameter is 'reject-parameter' and we define precison as half of dat - man :-)
out=2*np.sqrt([var_model(x,precision) for x in dists])
return np.column_stack((dists,out))
def GetGlobalMinLine(program="MGL"):
dists=[0,400.0]
hs=[GLOBAL_MIN_DEV,GLOBAL_MIN_DEV]
return np.column_stack((dists,hs))
class Stretch(object):
def __init__(self):
self.hdiffs=[]
self.dist=0
self.times=[]
self.jpages=[]
def AddStretch(self,hdiff,dist,date,jpage=""):
n=float(len(self.hdiffs))+1
self.dist=self.dist*(n-1)/n+dist/n
self.hdiffs.append(hdiff)
self.times.append(date)
self.jpages.append(jpage)
def MakeRejectData(resfiles):
data=dict()
nstrk=0
nerrors=0
for file in resfiles:
heads=FileOps.Hoveder(file)
for head in heads:
try:
key=(head[0],head[1])
hdiff=float(head[5])
dist=float(head[4])
jside=head[6]
tid=head[3]
dato=head[2]
m,h=Funktioner.GetTime(tid)
day,month,year=Funktioner.GetDate(dato)
date=datetime.datetime(year,month,day,h,m)
except Exception as msg:
print(repr(msg),head)
nerrors+=1
else:
if key in data:
data[key].AddStretch(hdiff,dist,date,jside)
else:
data[key]=Stretch()
data[key].AddStretch(hdiff,dist,date,jside)
nstrk+=1
return data,nerrors | SDFIdk/nivprogs | MyModules/FBtest.py | FBtest.py | py | 9,922 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.sqrt",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 8... |
74535524027 | from django.conf.urls import url
from . import views
app_name = 'api'
urlpatterns = [
url(r'^device/',views.device,name='api_device'),
url(r'^light/',views.light,name='api_light'),
url(r'^temperature/',views.temperature,name='api_temperature'),
url(r'^humidity/',views.humidity,name='api_humidity'),
url(r'^dirt_humidity/',views.dirt_humidity,name='api_dirt_humidity'),
url(r'^fertilization/',views.fertilization,name='api_fertilization'),
url(r'^water/',views.water,name='api_water'),
url(r'^schedule/',views.schedule,name='api_schedule'),
url(r'^user/',views.user,name='api_user'),
url(r'^.*', views.noSuchApi, name='api_no_such_api'),
] | CreeperSan/Graduation-Project | Web/field/api/urls.py | urls.py | py | 699 | python | en | code | 50 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.c... |
72067319869 | import numpy as np
import cv2
def compute_perspective_transform(corner_points,width,height,image):
""" Compute the transformation matrix
@ corner_points : 4 corner points selected from the image
@ height, width : size of the image
"""
# Create an array out of the 4 corner points
corner_points_array = np.float32(corner_points)
# Create an array with the parameters (the dimensions) required to build the matrix
img_params = np.float32([[0,0],[width,0],[0,height],[width,height]])
# Compute and return the transformation matrix
matrix = cv2.getPerspectiveTransform(corner_points_array,img_params)
img_transformed = cv2.warpPerspective(image,matrix,(width,height))
return matrix,img_transformed
def compute_point_perspective_transformation(matrix,list_downoids):
""" Apply the perspective transformation to every ground point which have been detected on the main frame.
@ matrix : the 3x3 matrix
@ list_downoids : list that contains the points to transform
return : list containing all the new points
"""
# Compute the new coordinates of our points
list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)
transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)
# Loop over the points and add them to the list that will be returned
transformed_points_list = list()
for i in range(0,transformed_points.shape[0]):
transformed_points_list.append([transformed_points[i][0][0],transformed_points[i][0][1]])
return transformed_points_list
| basileroth75/covid-social-distancing-detection | src/bird_view_transfo_functions.py | bird_view_transfo_functions.py | py | 1,517 | python | en | code | 123 | github-code | 6 | [
{
"api_name": "numpy.float32",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.getPerspectiveTransform",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.warpPersp... |
40205551139 | # encoding: utf-8
"""
GraphicInterface.py
Displays the op amp calculator
Dario Marroquin 18269 (dariomarroquin)
Pablo Ruiz 18259 (PingMaster99)
Version 1.0
Updated March 4, 2020
"""
from tkinter import *
from CalculationsModule import *
import matplotlib.pyplot as plt
import numpy as np
# Constants
TITLE_SIZE = 15
def calculate():
"""
Performs the op amp calculator calculations
"""
plt.clf()
inverter = int(opa.get()) == 1
point = vin.get()
try:
point = float(point)
except ValueError:
vin.delete(0, END)
point = None
# Needed data
populate_calculations()
function, result, real_value = calculate_opamp_function(point, inverter)
spline_result, spline_print = calculate_opamp_spline(point)
error = calculate_error(point, result, inverter)
spline_error = calculate_error(point, spline_result, inverter)
# Error comparison
print("Error mínimo cuadrado:", error, "%\nError trazadores cúbicos: ", spline_error, "%\n\nTrazadores:\n",
spline_print, "\n\n")
if type(result) is not str:
str(round(result, 4))
if type(error) is not str:
error = str(round(error, 4)) + " %"
if function[0] > 0:
a0 = "+ " + str(round(function[0], 4))
elif function[0] < 0:
a0 = "- " + str(round(function[0], 4))[1:]
else:
a0 = ""
result_funcion["text"] = f"{round(function[1], 4)} * Vin {a0}"
result_ev["text"] = result
result_err["text"] = error
x_1 = np.linspace(0, 20)
y_1 = x_1 * real_value
y_2 = x_1 * function[1] + function[0]
# Results graph
plt.plot(x_1, y_1, label="Teórico")
plt.plot(x_1, y_2, label="Experimental")
plt.legend()
plt.title("Función teórica y experimental")
plt.xlabel("Vin")
plt.ylabel("Vout")
plt.show()
"""
GUI window with grid layout
"""
window = Tk()
window.columnconfigure(0, minsize=100)
window.columnconfigure(1, minsize=100)
window.columnconfigure(2, minsize=100)
window.columnconfigure(3, minsize=100)
window.columnconfigure(4, minsize=100)
window.columnconfigure(5, minsize=100)
window.columnconfigure(6, minsize=100)
window.columnconfigure(7, minsize=50)
window.rowconfigure(0, minsize=30)
window.rowconfigure(1, minsize=30)
window.rowconfigure(2, minsize=30)
window.rowconfigure(3, minsize=30)
window.rowconfigure(4, minsize=30)
window.rowconfigure(5, minsize=30)
window.rowconfigure(6, minsize=30)
window.rowconfigure(7, minsize=30)
"""
Titles
"""
title = Label(window, text="Calculadora de Op amps", bg="#595358", fg="white")
title.config(font=("Arial", 20))
title.grid(column=0, row=0, columnspan=8, sticky="we")
"""
Input
"""
vin = Entry(window, font="Arial 20")
vin.grid(row=1, column=4)
vin_title = Label(window, text="Vin", bg="#3891A6", fg="BLACK")
vin_title.config(font=("Arial", TITLE_SIZE))
vin_title.grid(row=1, column=3)
"""
RadioButton
"""
opa = StringVar(window, True)
# Dictionary to create multiple buttons
radio = {"Opamp Amplificador Inversor": True,
"Opamp Amplificador no inversor": False,
}
# Loop is used to create multiple Radiobuttons
# rather than creating each button separately
for (text, value) in radio.items():
Radiobutton(window, text=text, variable=opa, value=value).grid(columnspan=2, pady=(1, 0))
"""
Buttons
"""
calculate_button = Button(window, text="Calcular", padx=20, pady=10, command=calculate, bg="#99c24d")
calculate_button.config(font=("Arial", 15))
calculate_button.grid(row=2, column=6)
"""
Results
"""
result_funcion = Label(window)
result_funcion.grid(row=2, column=4)
rsf_title = Label(window, text="Función", bg="#3891A6", fg="BLACK")
rsf_title.config(font=("Arial", TITLE_SIZE))
rsf_title.grid(row=2, column=3)
result_ev = Label(window)
result_ev.grid(row=3, column=4)
rsev_title = Label(window, text="Vout", bg="#3891A6", fg="BLACK")
rsev_title.config(font=("Arial", TITLE_SIZE))
rsev_title.grid(row=3, column=3)
result_err = Label(window)
result_err.grid(row=4, column=4)
rserr_title = Label(window, text="Error (%)", bg="#3891A6", fg="BLACK")
rserr_title.config(font=("Arial", TITLE_SIZE))
rserr_title.grid(row=4, column=3)
"""
Circuit picture
"""
photo = PhotoImage(file=r"./OPAMPS.png")
image = Button(window, image=photo, padx=0, pady=0)
image.config(height=200, width=500)
image.grid(row=6, column=1, columnspan=5, pady=(0, 20))
"""
Window display
"""
window.geometry("980x500")
window.config(bg="#B2CEDE")
window.mainloop()
| PingMaster99/MNOpampCalculator | GraphicInterface.py | GraphicInterface.py | py | 4,699 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyp... |
23660254288 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
""" A Python logging library with super powers """
import sys
import textwrap
from os import getcwd, path as p
from argparse import RawTextHelpFormatter, ArgumentParser
from pickle import dump, load
from io import open
from functools import partial, lru_cache
from signal import signal, SIGINT
import pygogo as gogo
from dateutil.parser import parse as parse_date
from chakula import tail, __version__
from chakula.formatter import PLACEHOLDERS, Formatter
try:
from redisworks import Root as OldRoot
except ImportError:
OldRoot = object
DEF_TIME_FMT = '%Y/%m/%d %H:%M:%S'
DEF_INTERVAL = '300s'
CURDIR = p.basename(getcwd())
LOGFILE = '%s.log' % CURDIR
FIELDS = sorted(PLACEHOLDERS)
logger = gogo.Gogo(__name__, monolog=True).logger
examples = r'''
Format specifiers must have one the following forms:
%%(placeholder)[flags]s
{placeholder:flags}
Examples:
%(prog)s <url>
echo '<url>' | %(prog)s --reverse
%(prog)s -s pubdate -s title -s author <url1> <url2> <url3>
%(prog)s --interval 60s --newer "2011/12/20 23:50:12" <url>
%(prog)s --format '%%(timestamp)-30s %%(title)s\n' <url>
%(prog)s --format '%%(title)s was written on %%(pubdate)s\n' <url>
%(prog)s --format '{timestamp:<30} {title} {author}\n' <url>
%(prog)s --format '{timestamp:<20} {pubdate:^30} {author:>30}\n' <url>
%(prog)s --time-format '%%Y/%%m/%%d %%H:%%M:%%S' <url>
%(prog)s --time-format 'Day of the year: %%j Month: %%b' <url>
Useful flags in this context are:
%%(placeholder)-10s - left align and pad
%%(placeholder)10s - right align and pad
{placeholder:<10} - left align and pad
{placeholder:>10} - right align and pad
{placeholder:^10} - center align and pad
'''
available = textwrap.wrap('Available fields: {}'.format(', '.join(FIELDS)))
epilog = [textwrap.dedent(examples)] + available
def timespec(value):
"""Parse the 'timespec' option:
>>> timespec(1)
1
>>> timespec('5m')
300
>>> timespec('1h')
3600
"""
try:
return int(value)
except ValueError:
multiply = {'s': 1, 'm': 60, 'h': 3600}
suffix = value[-1]
msg = 'invalid timespec value {} - hint: 60, 60s, 1m, 1h'
if suffix in multiply:
try:
v = int(value[:-1])
return v * multiply[suffix]
except ValueError:
ValueError(msg.format(value))
else:
raise ValueError(msg.format(value))
parser = ArgumentParser(
description='description: Tails 1 or more rss feeds',
prog='chakula',
usage='%(prog)s [options] <url> [<url> ...]',
formatter_class=RawTextHelpFormatter,
epilog='\n'.join(epilog),
)
parser.add_argument(
dest='urls', nargs='*', default=[sys.stdin],
help='The urls to tail (default: reads from stdin).')
i_help = 'Number of seconds between polling (default: {}).'
parser.add_argument(
'-i', '--interval', action='store', help=i_help.format(DEF_INTERVAL),
type=timespec, default=DEF_INTERVAL)
parser.add_argument(
'-N', '--iterations', action='store', type=int,
help='Number of times to poll before quiting (default: inf).')
parser.add_argument(
'-I', '--initial', action='store', type=int,
help='Number of entries to show (default: all)')
parser.add_argument(
'-n', '--newer', metavar='DATE', action='store',
help='Date by which entries should be newer than')
parser.add_argument(
'-s', '--show', metavar='FIELD', choices=FIELDS, action='append',
help='Entry field to display (default: title).', default=[])
t_help = "The date/time format (default: 'YYYY/MM/DD HH:MM:SS')."
parser.add_argument(
'-t', '--time-format', metavar='FORMAT', action='store',
default=DEF_TIME_FMT, help=t_help)
parser.add_argument(
'-F', '--format', action='store',
help='The output format (overrides other format options).')
parser.add_argument(
'-c', '--cache', action='store',
help='File path to store feed information across multiple runs.')
parser.add_argument(
'-r', '--reverse', action='store_true',
help='Show entries in reverse order.')
parser.add_argument(
'-f', '--fail', action='store_true', help='Exit on error.')
parser.add_argument(
'-u', '--unique', action='store_true', help='Skip duplicate entries.')
parser.add_argument(
'-H', '--heading', action='store_true', help='Show field headings.')
parser.add_argument(
'-v', '--version', help="Show version and exit.", action='store_true',
default=False)
parser.add_argument(
'-V', '--verbose', help='Increase output verbosity.', action='store_true',
default=False)
class Root(OldRoot):
def __init__(self, conn, return_object=True, *args, **kwargs):
super(Root, self).__init__(*args, **kwargs)
self.red = conn
self.return_object = return_object
self.setup()
get_root = lru_cache(maxsize=8)(lambda conn: Root(conn))
def sigint_handler(signal=None, frame=None):
logger.info('\nquitting...\n')
sys.exit(0)
def update_cache(path, extra, redis=False):
if redis:
root = get_root(path)
try:
items = extra.__dict__['_registry'].evaluated_items
except AttributeError:
root.extra = extra
else:
root.extra = items['root.extra']
return root.red
else:
with open(path, 'wb') as f:
dump(extra, f)
return path
def load_extra(path, redis=False):
if redis:
root = get_root(path)
extra = root.extra or {}
for k, v in extra.items():
v['updated'] = tuple(v.get('updated') or [])
v['modified'] = tuple(v.get('modified') or [])
else:
try:
with open(path, 'rb') as f:
extra = load(f)
except FileNotFoundError:
extra = {}
return extra
def run():
"""CLI runner"""
args = parser.parse_args()
kwargs = {'monolog': True, 'verbose': args.verbose}
logger = gogo.Gogo(__name__, **kwargs).get_logger('run')
signal(SIGINT, sigint_handler)
if args.version:
logger.info('chakula v%s' % __version__)
exit(0)
if args.newer:
newer = parse_date(args.newer).timetuple()
logger.debug('showing entries newer than %s', newer)
else:
newer = None
if args.format:
fmt = args.format.replace('\\n', '\n')
formatter = Formatter(fmt, args.time_format)
else:
show = args.show or ['title']
pargs = (show, args.time_format, args.heading)
formatter = Formatter.from_fields(*pargs)
logger.debug('using format: %r', formatter.fmt)
logger.debug('using time format: %r', formatter.time_fmt)
info = {
'seen': set() if args.unique else None, 'newer': newer,
'reverse': args.reverse, 'iterations': args.iterations,
'interval': args.interval, 'formatter': formatter,
'initial': args.initial, 'logger': logger, 'fail': args.fail}
first = args.urls[0]
if hasattr(first, 'isatty') and first.isatty(): # called with no args
# This doesn't work for scripttest though
parser.print_help()
sys.exit(0)
elif hasattr(first, 'read'): # piped into sdtin
urls = first.read().splitlines()
else:
urls = args.urls
if args.cache:
extra = load_extra(args.cache)
info['tail_handler'] = partial(update_cache, args.cache)
else:
extra = {}
tail(urls, extra=extra, **info)
sys.exit(0)
if __name__ == '__main__':
run()
| reubano/chakula | chakula/main.py | main.py | py | 7,603 | python | en | code | null | github-code | 6 | [
{
"api_name": "redisworks.Root",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number... |
28470996419 | import os
import sys
from lockdoors import main
from lockdoors import sanitize
from lockdoors import shrts
from pathlib import Path
from datetime import datetime
from time import sleep
#VAR
yes = set(['yes', 'y', 'ye', 'Y'])
no = set(['no', 'n', 'nop', 'N'])
cwd = os.getcwd()
null = ""
###Cheatsheets
def revsh():
shrts.clscprilo()
print("\033[91mHere is the list of the files :\033[90m")
print("\033[92m")
os.system(" find " + shrts.getinstalldir() + "/REVERSE/CHEATSHEETS/ -type f")
print("\033[90m")
shrts.okrev()
#Tools
def radar2():
radar2.title = "Radar 2 : unix-like reverse engineering framework"
tool_dir = "/REVERSE/Tools/radar2"
shrts.prilogspc()
os.system("git clone https://github.com/radare/radare2.git " + shrts.getinstalldir() + tool_dir + null)
shrts.clscprilo()
print("\033[92m Radar2 Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check " + shrts.getinstalldir() + tool_dir +" Folder\033[90m")
shrts.okrev()
def virustotal():
virustotal.title = "VirusTotal tools"
tool_dir = "/REVERSE/Tools/virustotal"
if os.path.exists('/usr/local/bin/virustotal'):
shrts.prilogspc()
os.system("git clone https://github.com/botherder/virustotal.git " + shrts.getinstalldir() + tool_dir + null)
shrts.prilogspc()
print("\033[92m " + virustotal.title + "\033[90m")
shrts.spc()
key = sanitize.bash_escape_restrictor(input("\033[92mEnter the Virtustoal Api ? : \033[90m"))
outp = sanitize.bash_escape_restrictor(input("\033[92mEnter directory containing files to scan ? : \033[90m"))
os.system("python2 " + shrts.getinstalldir() + tool_dir + "/vt.py --key "+key+" " +outp)
shrts.okrev()
else:
shrts.prilogspc()
print("\033[92m " + virustotal.title + "\033[90m")
shrts.spc()
print("\033[91mDownloading ...\033[0m")
shrts.spc()
os.system("git clone https://github.com/botherder/virustotal.git " + shrts.getinstalldir() + tool_dir + null)
shrts.prilogspc()
print("\033[92m " + virustotal.title + "\033[90m")
shrts.spc()
shrts.prilogspc()
print("\033[91mInstalling ...\033[0m.")
shrts.spc()
os.system("""echo "#!/bin/bash" > /usr/local/bin/virustotal""")
os.system("""echo "#Dev : Sofiane Hamlaoui" >> /usr/local/bin/virustotal""")
os.system("echo python2 " + shrts.getinstalldir() + tool_dir + "/vt.py >> /usr/local/bin/virustotal")
os.system("chmod +x /usr/local/bin/virustotal")
print(("You can now use " + "\033[91m" + virustotal.title + "\033[90m" + " from Lockdoor [\033[92m Lockdoor \033[90m ]" ))
shrts.okrev()
def miasm():
miasm.title = "miasm : Reverse engineering framework"
tool_dir = "/REVERSE/Tools/miasm"
shrts.prilogspc()
os.system("git clone https://github.com/cea-sec/miasm.git " + shrts.getinstalldir() + tool_dir + null)
shrts.prilogspc()
os.system("cd " +shrts.getinstalldir() + tool_dir + " && python2 setup.py build")
os.system("cd " +shrts.getinstalldir() + tool_dir + " && python2 setup.py install")
shrts.spc()
print("\033[92m Miasm Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check " + shrts.getinstalldir() + tool_dir +" Folder\033[90m")
shrts.okrev()
def mirror():
mirror.title = "mirror : reverses the bytes of a file"
tool_dir = "/REVERSE/Tools/mirror"
shrts.prilogspc()
os.system("git clone https://github.com/guelfoweb/mirror.git " + shrts.getinstalldir() + tool_dir + null)
shrts.clr()
shrts.prilogspc()
print("\033[92m Mirror Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check " + shrts.getinstalldir() + tool_dir +" Folder\033[90m")
shrts.okrev()
def Dnspy():
Dnspy.title = "Dnspy : reverses the bytes of a file"
tool_dir = "/REVERSE/Tools/Dnspy"
shrts.prilogspc()
os.system("git clone https://github.com/0xd4d/dnSpy.git " + shrts.getinstalldir() + tool_dir + null)
shrts.clr()
shrts.prilogspc()
print("\033[92m Dnspy Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check " + shrts.getinstalldir() + tool_dir +" Folder\033[90m")
shrts.okrev()
def angrio():
angrio.title = "angrio : a python framework for analyzing binaries"
tool_dir = "/REVERSE/Tools/angrio"
shrts.prilogspc()
print("\033[92m Installing \033[90m")
shrts.spc()
os.system("pip install angr ")
shrts.clr()
shrts.prilogspc()
print("\033[92m Dnspy Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check Angr.io docs to learn more about the tool \033[90m")
print("\033[92m https://github.com/angr/angr-doc \033[90m")
shrts.okrev()
def dllrunner():
dllrunner.title = "Dllrunner : a smart DLL execution script for malware analysis"
tool_dir = "/REVERSE/Tools/dllrunner"
shrts.prilogspc()
os.system("git clone https://github.com/Neo23x0/DLLRunner " + shrts.getinstalldir() + tool_dir + null)
shrts.clr()
shrts.prilogspc()
print("\033[92m Dllrunner Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check "+ shrts.getinstalldir() + tool_dir + " Folder\033[90m")
shrts.okrev()
def yara():
yara.title = "YARA : a tool to identify and classify malwares "
tool_dir = "/REVERSE/Tools/yara"
shrts.prilogspc()
print("\033[92m Installing \033[90m")
shrts.spc()
os.system("pip install yara-python")
shrts.clr()
shrts.prilogspc()
print("\033[92m YARA Downlaoded successfully \033[90m")
shrts.spc()
print("\033[92m Check YARA Docs to learn more about the tool\033[90m")
print("\033[92m https://yara.readthedocs.io/en/latest/\033[90m")
shrts.okrev()
#Menu
def menu():
shrts.clscprilo()
print("""\033[94m
[ REVERSE ENGINEERING ]
Make A Choice :\033[90m
\033[91m -[!]----- Tools ------[!]-\033[90m
\033[93m1) Radar2
2) Virustotal
3) Miasm
4) Mirror
5) Dnspy
6) Angrio
7) DLLRunner
8) Yara\033[90m
\033[91m-[!]----- Cheatsheets ------[!]-\033[90m
\033[93m 9) Reverse Engineering Cheatsheets\033[90m
------------------------
\033[94mb) Back to ROOT MENU
q) Leave Lockdoor\033[94m
""")
choice = input("\033[92mLockdoor@ReverseEngineering~# \033[0m")
os.system('clear')
if choice == "1":
radar2()
elif choice == "2":
virustotal()
elif choice == "3":
miasm()
elif choice == "4":
mirror()
elif choice == "5":
Dnspy()
elif choice == "6":
angrio()
elif choice == "7":
dllrunner()
elif choice == "8":
yara()
elif choice == "9":
revsh()
elif choice == "b":
main.menu()
elif choice == "q":
shrts.prilogspc()
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print(" \033[91m-[!]- LOCKDOOR IS EXITING -[!]-\033[0m")
shrts.spc()
print(" \033[91m-[!]- EXITING AT " + dt_string + " -[!]-\033[0m")
sys.exit()
elif choice == "":
menu()
else:
menu()
| SofianeHamlaoui/Lockdoor-Framework | lockdoors/reverse.py | reverse.py | py | 7,496 | python | en | code | 1,248 | github-code | 6 | [
{
"api_name": "os.getcwd",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "lockdoors.shrts.clscprilo",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "lockdoors.shrts",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.system",
"... |
18230626408 | # Tim Marder
# SoftDev1 pd06
# K#13 -- Echo Echo Echo
# 2018-09-28
from flask import Flask, render_template, request
app = Flask(__name__) #create instance of class Flask
@app.route("/") #assign fxn to route
def hello_world():
return render_template("home.html")
@app.route("/auth", methods = ["GET", "POST"])
def authenticate():
print(app)
print(request)
print(request.args)
print(request.headers)
return render_template("auth.html",
first = request.form['first'],
last = request.form['last'],
request = request.method)
if __name__ == "__main__":
app.debug = True
app.run()
| TimMarder/SoftDev-Office | 13_formation/app.py | app.py | py | 701 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "flask.request.args... |
33390551810 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import gzip
from collections import defaultdict
import math
import scipy.optimize
import numpy
import string
import random
from sklearn import linear_model
import sklearn
# In[2]:
# This will suppress any warnings, comment out if you'd like to preserve them
import warnings
warnings.filterwarnings("ignore")
# In[3]:
# Check formatting of submissions
def assertFloat(x):
assert type(float(x)) == float
def assertFloatList(items, N):
assert len(items) == N
assert [type(float(x)) for x in items] == [float]*N
# In[4]:
answers = {}
# In[5]:
f = open("spoilers.json.gz", 'r')
# In[6]:
dataset = []
for l in f:
d = eval(l)
dataset.append(d)
# In[7]:
f.close()
# In[8]:
# A few utility data structures
reviewsPerUser = defaultdict(list)
reviewsPerItem = defaultdict(list)
for d in dataset:
u,i = d['user_id'],d['book_id']
reviewsPerUser[u].append(d)
reviewsPerItem[i].append(d)
# Sort reviews per user by timestamp
for u in reviewsPerUser:
reviewsPerUser[u].sort(key=lambda x: x['timestamp'])
# Same for reviews per item
for i in reviewsPerItem:
reviewsPerItem[i].sort(key=lambda x: x['timestamp'])
# In[9]:
# E.g. reviews for this user are sorted from earliest to most recent
[d['timestamp'] for d in reviewsPerUser['b0d7e561ca59e313b728dc30a5b1862e']]
# In[10]:
### 1
# In[11]:
def MSE(y, ypred):
return sum([(a-b)**2 for (a,b) in zip(y,ypred)]) / len(y)
# In[12]:
# (a)
y = []
y_pred = []
for u in reviewsPerUser:
cur = []
reviews = reviewsPerUser[u]
for i in range(0, len(reviews) - 1):
cur.append(reviews[i]['rating'])
if len(cur) == 0:
continue
y_pred.append(sum(cur)/len(cur))
y.append(reviews[-1]['rating'])
answers['Q1a'] = MSE(y, y_pred)
assertFloat(answers['Q1a'])
# In[13]:
# (b)
y = []
y_pred = []
for u in reviewsPerItem:
cur = []
reviews = reviewsPerItem[u]
for i in range(0, len(reviews) - 1):
cur.append(reviews[i]['rating'])
if len(cur) == 0:
continue
y_pred.append(sum(cur)/len(cur))
y.append(reviews[-1]['rating'])
answers['Q1b'] = MSE(y, y_pred)
assertFloat(answers['Q1b'])
# In[14]:
### 2
answers['Q2'] = []
for N in [1,2,3]:
y = []
y_pred = []
for u in reviewsPerUser:
cur = []
reviews = reviewsPerUser[u]
for i in range(0, len(reviews) - 1):
cur.append(reviews[i]['rating'])
if len(cur) == 0:
continue
if len(cur) < N:
cur_new = cur
if len(cur) >= N:
cur_new = cur[-N:]
y_pred.append(sum(cur_new)/len(cur_new))
y.append(reviews[-1]['rating'])
answers['Q2'].append(MSE(y,y_pred))
# In[15]:
assertFloatList(answers['Q2'], 3)
# In[16]:
answers
# In[17]:
### 3a
# In[18]:
def feature3(N, u): # For a user u and a window size of N
cur = []
reviews = reviewsPerUser[u]
for i in range(0, len(reviews) - 1):
cur.append(reviews[i]['rating'])
feat = [1]
for n in range(1, N + 1):
feat.append(cur[-n])
return feat
# In[19]:
answers['Q3a'] = [feature3(2,dataset[0]['user_id']), feature3(3,dataset[0]['user_id'])]
# In[20]:
assert len(answers['Q3a']) == 2
assert len(answers['Q3a'][0]) == 3
assert len(answers['Q3a'][1]) == 4
# In[21]:
### 3b
answers['Q3b'] = []
def feat(N, u):
feat = [1]
data = reviewsPerUser[u]
for d in data[-N-1:-1]:
feat.append(d['rating'])
return feat
for N in [1,2,3]:
X = []
y = []
for u,data in reviewsPerUser.items():
if len(data) <= N:
continue
else:
X.append(feat(N,u))
y.append(data[-1]['rating'])
model = sklearn.linear_model.LinearRegression(fit_intercept=False)
model.fit(X, y)
y_pred = model.predict(X)
mse = MSE(y, y_pred)
answers['Q3b'].append(mse)
assertFloatList(answers['Q3b'], 3)
answers
# In[22]:
### 4a
globalAverage = [d['rating'] for d in dataset]
globalAverage = sum(globalAverage) / len(globalAverage)
def featureMeanValue(N, u): # For a user u and a window size of N
feat = [1]
data = reviewsPerUser[u]
if len(data) < N + 1:
if len(data) < 2:
for j in range(N):
feat.append(globalAverage)
elif len(data) >= 2:
rate = [review['rating'] for review in data[:-1]]
avg = sum(rate)/len(rate)
for i in range(len(data)-1):
feat.append(data[-i-2]['rating'])
for i in range(N-len(data)+1):
feat.append(avg)
else:
for i in range(N):
feat.append(data[-i-2]['rating'])
return feat
def featureMissingValue(N, u):
feat = [1]
data = reviewsPerUser[u]
if len(data) < N + 1:
if len(data) < 2:
for j in range(N):
feat.append(1)
feat.append(0)
elif len(data) >= 2:
for i in range(len(data)-1):
feat.append(0)
feat.append(data[- i - 2]['rating'])
for i in range(N + 1-len(data)):
feat.append(1)
feat.append(0)
else:
for i in range(N):
feat.append(0)
feat.append(data[-i-2]['rating'])
return feat
answers['Q4a'] = [featureMeanValue(10, dataset[0]['user_id']), featureMissingValue(10, dataset[0]['user_id'])]
answers
# In[23]:
answers['Q4b'] = []
for featFunc in [featureMeanValue, featureMissingValue]:
X = []
y = []
for user,rating in reviewsPerUser.items():
if len(rating) < 1:
continue
else:
X.append(featFunc(10,user))
y.append(rating[-1]['rating'])
model = linear_model.LinearRegression()
model.fit(X,y)
y_pred = model.predict(X)
mse = MSE(y, y_pred)
answers['Q4b'].append(mse)
# In[24]:
answers['Q4b']
# In[25]:
### 5
#(a)
def feature5(sentence):
feat = [1]
feat.append(len(sentence))
feat.append(sentence.count('!')) # Quadratic term
feat.append(sum(i.isupper() for i in sentence))
return feat
X = []
y = []
for d in dataset:
for spoiler,sentence in d['review_sentences']:
X.append(feature5(sentence))
y.append(spoiler)
# In[26]:
answers['Q5a'] = X[0]
# In[27]:
###5(b)
mod = sklearn.linear_model.LogisticRegression( class_weight='balanced', C=1)
mod.fit(X,y)
predictions = mod.predict(X)
TP = sum([(p and l) for (p,l) in zip(predictions, y)])
FP = sum([(p and not l) for (p,l) in zip(predictions, y)])
TN = sum([(not p and not l) for (p,l) in zip(predictions, y)])
FN = sum([(not p and l) for (p,l) in zip(predictions, y)])
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
BER = 1 - 1/2 * (TPR + TNR)
answers['Q5b'] = [TP, TN, FP, FN, BER]
# In[28]:
assert len(answers['Q5a']) == 4
assertFloatList(answers['Q5b'], 5)
# In[29]:
### 6
def feature6(review):
review = review['review_sentences']
feat = [1]
for i in range(0, 5):
feat.append(review[i][0])
feat.append(len(review[5][1]))
feat.append(review[5].count('!')) # Quadratic term
feat.append(sum(i.isupper() for i in review[5][1]))
return feat
# In[30]:
y = []
X = []
for d in dataset:
sentences = d['review_sentences']
if len(sentences) < 6: continue
X.append(feature6(d))
y.append(sentences[5][0])
# In[31]:
answers['Q6a'] = feature6(dataset[0])
answers
# In[32]:
answers['Q6a'] = X[0]
answers
# In[33]:
mod = sklearn.linear_model.LogisticRegression(class_weight='balanced', C = 1)
mod.fit(X,y)
predictions = mod.predict(X)
TP = sum([(p and l) for (p,l) in zip(predictions, y)])
FP = sum([(p and not l) for (p,l) in zip(predictions, y)])
TN = sum([(not p and not l) for (p,l) in zip(predictions, y)])
FN = sum([(not p and l) for (p,l) in zip(predictions, y)])
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
BER = 1 - 1/2 * (TPR + TNR)
answers['Q6b'] = BER
# In[34]:
assert len(answers['Q6a']) == 9
assertFloat(answers['Q6b'])
answers
# In[35]:
### 7
# In[36]:
# 50/25/25% train/valid/test split
Xtrain, Xvalid, Xtest = X[:len(X)//2], X[len(X)//2:(3*len(X))//4], X[(3*len(X))//4:]
ytrain, yvalid, ytest = y[:len(X)//2], y[len(X)//2:(3*len(X))//4], y[(3*len(X))//4:]
# In[37]:
def pipeline(reg, bers, BER_test):
mod = linear_model.LogisticRegression(class_weight='balanced', C=reg)
# 50/25/25% train/valid/test split
Xtrain, Xvalid, Xtest = X[:len(X)//2], X[len(X)//2:(3*len(X))//4], X[(3*len(X))//4:]
ytrain, yvalid, ytest = y[:len(X)//2], y[len(X)//2:(3*len(X))//4], y[(3*len(X))//4:]
mod.fit(Xtrain,ytrain)
ypredValid = mod.predict(Xvalid)
ypredTest = mod.predict(Xtest)
# validation
TP = sum([(a and b) for (a,b) in zip(yvalid, ypredValid)])
TN = sum([(not a and not b) for (a,b) in zip(yvalid, ypredValid)])
FP = sum([(not a and b) for (a,b) in zip(yvalid, ypredValid)])
FN = sum([(a and not b) for (a,b) in zip(yvalid, ypredValid)])
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
BER = 1 - 0.5*(TPR + TNR)
print("C = " + str(reg) + "; validation BER = " + str(BER))
bers = bers.append(BER)
# test
TP = sum([(a and b) for (a,b) in zip(ytest, ypredTest)])
TN = sum([(not a and not b) for (a,b) in zip(ytest, ypredTest)])
FP = sum([(not a and b) for (a,b) in zip(ytest, ypredTest)])
FN = sum([(a and not b) for (a,b) in zip(ytest, ypredTest)])
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
BER = 1 - 0.5*(TPR + TNR)
BER_test = BER_test.append(BER)
return mod
# In[38]:
bers = []
BER_test = []
for c in [0.01, 0.1, 1, 10, 100]:
pipeline(c, bers, BER_test)
bers
BER_test
# In[39]:
bestC = 0.1
ber = 0.21299572460563176
answers['Q7'] = bers + [bestC] + [ber]
assertFloatList(answers['Q7'], 7)
answers
# In[40]:
### 8
def Jaccard(s1, s2):
numer = len(s1.intersection(s2))
denom = len(s1.union(s2))
if denom == 0:
return 0
return numer / denom
# In[41]:
# 75/25% train/test split
dataTrain = dataset[:15000]
dataTest = dataset[15000:]
# In[42]:
# A few utilities
itemAverages = defaultdict(list)
ratingMean = []
for d in dataTrain:
itemAverages[d['book_id']].append(d['rating'])
ratingMean.append(d['rating'])
for i in itemAverages:
itemAverages[i] = sum(itemAverages[i]) / len(itemAverages[i])
ratingMean = sum(ratingMean) / len(ratingMean)
# In[43]:
reviewsPerUser = defaultdict(list)
usersPerItem = defaultdict(set)
for d in dataTrain:
u,i = d['user_id'], d['book_id']
reviewsPerUser[u].append(d)
usersPerItem[i].add(u)
# In[44]:
# From my HW2 solution, welcome to reuse
def predictRating(user,item):
ratings = []
similarities = []
for d in reviewsPerUser[user]:
i2 = d['book_id']
if i2 == item: continue
ratings.append(d['rating'] - itemAverages[i2])
similarities.append(Jaccard(usersPerItem[item],usersPerItem[i2]))
if (sum(similarities) > 0):
weightedRatings = [(x*y) for x,y in zip(ratings,similarities)]
return itemAverages[item] + sum(weightedRatings) / sum(similarities)
else:
# User hasn't rated any similar items
if item in itemAverages:
return itemAverages[item]
else:
return ratingMean
# In[45]:
predictions = [predictRating(d['user_id'], d['book_id']) for d in dataTest]
labels = [d['rating'] for d in dataTest]
# In[46]:
answers["Q8"] = MSE(predictions, labels)
assertFloat(answers["Q8"])
# In[ ]:
# In[56]:
### 9
item = [d['book_id'] for d in dataTrain]
data0, rating0 = [], []
for d in dataTest:
num = item.count(d['book_id'])
if num == 0:
data0.append([d['user_id'], d['book_id']])
rating0.append(d['rating'])
pred0 = [predictRating(u, i) for u, i in data0]
mse0 = MSE(pred0, rating0)
mse0
# In[57]:
data1, rating1 = [],[]
for d in dataTest:
num = item.count(d['book_id'])
if 1 <= num <= 5:
data1.append([d['user_id'], d['book_id']])
rating1.append(d['rating'])
pred1 = [predictRating(u, i) for u, i in data1]
mse1to5= MSE(pred1, rating1)
mse1to5
# In[58]:
data5, rating5 = [], []
for d in dataTest:
num = item.count(d['book_id'])
if num > 5:
data5.append([d['user_id'], d['book_id']])
rating5.append(d['rating'])
pred5 = [predictRating(u, i) for u, i in data5]
mse5 = MSE(pred5, rating5)
mse5
# In[ ]:
# In[50]:
answers["Q9"] = [mse0, mse1to5, mse5]
assertFloatList(answers["Q9"], 3)
answers
# In[51]:
### 10
# In[52]:
userAverages = defaultdict(list)
for d in dataTrain:
userAverages[d['user_id']].append(d['rating'])
for i in userAverages:
userAverages[i] = sum(userAverages[i]) / len(userAverages[i])
def predictRating(user,item):
ratings = []
similarities = []
for d in reviewsPerUser[user]:
i2 = d['book_id']
if i2 == item: continue
ratings.append(d['rating'] - itemAverages[i2])
similarities.append(Jaccard(usersPerItem[item],usersPerItem[i2]))
if (sum(similarities) > 0):
weightedRatings = [(x*y) for x,y in zip(ratings,similarities)]
return itemAverages[item] + sum(weightedRatings) / sum(similarities)
else:
# User hasn't rated any similar items
if item in itemAverages:
return itemAverages[item]
else:
# return RatingMean
if user in userAverages:
return userAverages[user]
else:
return ratingMean
item = [d['book_id'] for d in dataTrain]
data10, rating10 = [], []
for d in dataTest:
num = item.count(d['book_id'])
if num == 0:
data10.append([d['user_id'], d['book_id']])
rating10.append(d['rating'])
pred10 = [predictRating(u, i) for u, i in data10]
mse10 = MSE(pred10, rating10)
mse10
# In[59]:
answers["Q10"] = ("To improve the prediction function for unseen items, we can modify the predictRating function. Since previously the predictRating only use itemAverages for prediction function, we can add the userAverage to specify the condition and make mse smaller, inside of just categorize data into ratingMean. We can see that the mse become smaller for unseen data.", mse10)
assert type(answers["Q10"][0]) == str
assertFloat(answers["Q10"][1])
# In[60]:
answers
# In[55]:
f = open("answers_midterm.txt", 'w')
f.write(str(answers) + '\n')
f.close()
| vivianchen04/Master-Projects | WebMining&RecommenderSystems/midterm.py | midterm.py | py | 14,655 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 70,
"usage_type": "call"
},
{
"api_name"... |
9878964651 | #!/usr/bin/python
# disk monitor
import logging as l
l.basicConfig(filename='disk_log.txt',filemode='a',level=l.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%c')
# modes
# r -> read -> you can only read the file.
# a -> append -> you can only append the contents to the file.
# w -> write -> you can write to the file.
# -> if you dont have a file a new file will be created.
# -> if you have a file with data,the file gets truncated to zero.
disk_size = int(raw_input("please enter your disk size:"))
if disk_size < 60:
l.info("Your disk looks healthy at {}.".format(disk_size))
elif disk_size < 80:
l.warning("Buddy!! your disk is getting fat - {}.".format(disk_size))
elif disk_size < 90:
l.error("Buddy!! you disk is feeling sick - {}.".format(disk_size))
elif disk_size < 99:
l.critical("Buddy!! you disk is dead - {}.".format(disk_size)) | tuxfux-hlp-notes/python-batches | batch-68/14-logging/third.py | third.py | py | 900 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.warning",
... |
71077185467 | import Gmail_API_Lib
import Track_API_Lib
import Slack_API_Lib
import importlib
import json
import csv
import lovely_logger as log
import datetime
import time
late_checkin_alert_hour = 21
unclean_property_alert_hour = 14
regular_check_interval_minutes = 15
check_checkin_interval_minutes = 15
reload = 1#dummy variable to make the library re-save
#All times are in local time (EST)
late_checkins_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
alert_checkins_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
check_for_cleans_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
set_cleans_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
regular_interval_check_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
check_checkin_time = datetime.datetime.now() - datetime.timedelta(days = 1) #Init
last_email_subject_read_file_cleaner = 'C:\\Users\\Bailey\\Documents\\Cozi\\Automations\\Track Automations\\email subject logs\\Last_Email_Read_Cleaner.txt'
last_email_subject_read_file_UMC = 'C:\\Users\\Bailey\\Documents\\Cozi\\Automations\\Track Automations\\email subject logs\\Last_Email_Read_UMC.txt' #universal Master Code
log.init('C:\\Users\\Bailey\\Documents\\Cozi\\Automations\\Track Automations\\Daily_Checks_Log')
try:
while (1):
today = datetime.datetime.now()
current_hour = today.hour
if (check_checkin_time + datetime.timedelta(minutes = check_checkin_interval_minutes) < today): #Updates todays reservations every 15 minutes.
log.info("Updating todays checkins")
todays_checkins = Track_API_Lib.get_todays_arrivals_units_and_names()
check_checkin_time = today
#General checks at regular intervals
if (regular_interval_check_time + datetime.timedelta(minutes = regular_check_interval_minutes)) < today: #Check every hour for Universal Master Code
log.info('Getting messages from Gmail')
msg_info = Gmail_API_Lib.get_gmail_subjects_and_dates() #E-mail subjects and dates
log.info('Got messages from Gmail')
#Alert for Master Code usage
log.info('Starting UMC Check')
UMC_check = Gmail_API_Lib.check_universal_master_code(msg_info) #Checks to see if the universal master code. Sends a Slack notification if so.
log.info('Completed UMC Check')
log.info('Starting New Checkins check')
new_checkins = Gmail_API_Lib.check_for_checkins(msg_info, todays_checkins) #Already strips non-PC properties and notifies CS team in Slack
if (len(new_checkins) > 0):
new_alerts = Gmail_API_Lib.alert_checkin()
Track_API_Lib.note_checkins(new_checkins)
regular_interval_check_time = today
if (current_hour == 13 or current_hour == late_checkin_alert_hour): #check for late checkins at 12pm and 8pm CST (Check twice to ensure there aren't more than 500 messages in inbox)
if ((late_checkins_time + datetime.timedelta(hours = 1)) <= today):
log.info('Checking for late checkins')
msg_info = Gmail_API_Lib.get_gmail_subjects_and_dates() #E-mail subjects and dates
log.info('Got messages from Gmail')
log.info('Getting todays checkins')
todays_checkins = Track_API_Lib.get_todays_arrivals_units_and_names()
log.info('Processing missing checkins')
missing_checkins = Gmail_API_Lib.check_for_checkins(msg_info, todays_checkins) #Already strips non-PC properties
log.info('Processing missing checkins')
late_checkins_time = today #subtract an hour to ensure the execution time doesn't keep creeping up over time.
if (current_hour == late_checkin_alert_hour and missing_checkins != None): #Alert for late checkins at 8pm CST. MUST BE SAME HOUR AS IF STATEMENT ABOVE OR THIS WONT TRIGGER
if ((alert_checkins_time + datetime.timedelta(hours = 1)) <= today):
log.info('Alerting for late checkins')
late_checkins = Gmail_API_Lib.report_late_checkins()
log.info('Sending Slack notifications')
Slack_API_Lib.send_guest_late_checkin_alert(late_checkins)
log.info('Posting notes to reservations in Track')
Track_API_Lib.note_late_checkins(late_checkins)
alert_checkins_time = today
log.info('Completed late checkins')
if ((current_hour >= 7 and current_hour <= 21) or current_hour == 3): #Check between 7am EST and 8pm EST and again at 3am EST
if ((check_for_cleans_time + datetime.timedelta(hours = 1)) <= today): #Checks every hour. Need to keep file updated with properties that have PC locks
log.info('Checking for cleaned properties')
#Set cleaned property statuses in Track
msg_info = Gmail_API_Lib.get_gmail_subjects_and_dates() #E-mail subjects and dates
log.info('Got messages from Gmail')
cleaned_units = Gmail_API_Lib.check_for_cleaners(msg_info) #Need to ensure Point Central has people properly labeled
inspected_units = Gmail_API_Lib.check_for_inspectors(msg_info) #Figure out what to do with Inspected Units
ready_units = Track_API_Lib.add_clean_and_inspected(cleaned_units, inspected_units)
log.info("Updating clean properties")
if (ready_units != None):
res = Track_API_Lib.set_unit_clean_status(ready_units, 1) #Sets units to clean. 1 sets status to clean
log.info("Updating clean combo properties")
res = Track_API_Lib.set_combo_properties_clean_status() #Sets combo properties to clean. Need to manually keep this list up to date. Is it necessary?
log.info('Set unit statuses')
check_for_cleans_time = today
if (current_hour == unclean_property_alert_hour): #Check at ~3pm EST (2pm CST) and alert
if ((set_cleans_time + datetime.timedelta(hours = 1)) < today):
#Alert for non-clean units
log.info('Checking for unclean properties to alert')
msg_info = Gmail_API_Lib.get_gmail_subjects_and_dates() #E-mail subjects and dates
log.info('Got messages from Gmail')
todays_checkins = Track_API_Lib.get_todays_arrivals_units_and_names()
check_for_clean = Gmail_API_Lib.remove_non_PC_properties(todays_checkins) #Removes non PC Properties from the clean check
unclean_units = Track_API_Lib.check_unclean_units(check_for_clean) #Need to cross reference the unit name as well
#Handle combo units based on what Track says
log.info('Sending Slack alerts if any')
for unit in unclean_units:
last_access = Gmail_API_Lib.last_cleaner(msg_info, unit['unit_name'])
res = Slack_API_Lib.send_slack_message('automated-alerts',"UNCLEAN CHECKIN POSSIBLE! " + last_access)
set_cleans_time = today
time.sleep(60)
except Exception as e:
Slack_API_Lib.send_slack_message("automation-errors", "Error with the Daily Checks code. Need to restart")
print(e)
#Check Track for unit clean status, and set to Clean if a claner has been there. (For combo's, both units must be Clean, then Combo can be Clean)
#Check email subjects for owners, then verify it is used during an owner stay. If not...? How about if the unit is blocked? Still notify? | mammalwithashell/scott-heyman-gcp-functions | Daily_Checks_v1.0.py | Daily_Checks_v1.0.py | py | 7,843 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datet... |
70514119229 | from collections import defaultdict
from github import Github
def get_git_skills(username):
g = Github()
user = g.get_user(username)
tags = defaultdict()
languages = defaultdict(int)
for repo in user.get_repos():
# new_repo_languages = repo.get_languages()
# for lang in new_repo_languages:
# languages[lang] += new_repo_languages[lang]
new_repo_topics = repo.get_topics()
for topic in new_repo_topics:
print (topic)
print(languages)
return sorted(languages.items(), key=lambda x: x[1], reverse=True)
| HackRU/teamRU | src/matching/git_skill_finder.py | git_skill_finder.py | py | 593 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "github.Github",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 10,
"usage_type": "call"
}
] |
37122760097 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from fanza.items import ImageItem
from fanza.common import download_image
from fanza.image.image_helper import handle_image_item
from scrapy.exceptions import DropItem
from scrapy import Spider
from time import sleep
from socket import timeout
from urllib.request import ProxyHandler, build_opener
from urllib.error import URLError, HTTPError
from os.path import isdir, isfile
from os import makedirs
class AvbookImagePipeline:
def __init__(self) -> None:
self.opener = None
def open_spider(self, spider: Spider):
img_download_proxy = spider.settings['IMAGE_DOWNLOAD_PROXY']
self.opener = build_opener(ProxyHandler({'https': img_download_proxy, 'http': img_download_proxy}))
self.img_fail = spider.settings['IMAGE_FAIL_FILE']
self.failed = set()
async def process_item(self, item, spider: Spider):
if not isinstance(item, ImageItem):
return item
img_dir, img_des, prefix = handle_image_item(item, spider)
if not isdir(img_dir):
makedirs(img_dir)
if not item.isUpdate and isfile(img_des):
spider.logger.debug('already exist: %s %s', prefix, item.imageName)
return
retry = 0
delay = 1
retry_limit = spider.settings['RETRY_LIMIT']
while True:
try:
download_image(self.opener, item.url, img_des)
break
except (URLError, HTTPError, timeout):
if retry > retry_limit:
spider.logger.exception("download image error, url: %s", item.url)
if item.subDir not in self.failed:
self.failed.add(item.subDir)
with open(self.img_fail, 'w', encoding='utf-8') as f:
f.write(f'{item.subDir}\n')
raise DropItem(f'download error happend\titem: {item}')
sleep(delay)
retry += 1
delay *= 2
spider.logger.debug('retry download image: retry\t%s url\t%s', retry, item.url)
spider.logger.info('save img:\t%s %s', prefix, item.imageName)
class SuccessResponsePipeline:
def close_spider(self, spider: Spider):
if spider.name != 'movie_detail' and spider.name != 'movie_image':
return
spider.logger.info('------------------------------------save failed------------------------------------')
failed = spider.processed - spider.successed
with open('failed.txt', 'w', encoding='utf-8') as f:
for failed_id in failed:
f.write(failed_id + '\n')
| takiya562/Adult_video_scrapy | fanza/pipelines.py | pipelines.py | py | 2,876 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "urllib.request.build_opener",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "urllib.request.ProxyHandler",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": ... |
12461550259 | from preprocess import *
import os
import argparse
from csv import writer
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process pcap file and integer data.")
parser.add_argument("-pcap", nargs="+", help="The pcap file. Multiple pcaps can be added when separated by a space.")
parser.add_argument("-protocol", help ="The application layer protocol (ex: HTTP)")
args = parser.parse_args()
columns=["src_ip", "dst_ip", "src_port", "dst_port", "t_proto", "dsfield", "ip_flags", "length", "d_proto", "payload"]
output_prefix = os.getcwd() + "/output"
if not os.path.exists(output_prefix):
os.makedirs(output_prefix)
filecount = 0
ext = str(filecount) + ".csv"
filename = (output_prefix + "/" + str(args.protocol))
with open(filename + ext, "w", newline='') as my_csv:
csv_writer = writer(my_csv)
csv_writer.writerow(columns)
total = 0
oldtotal = 0
for f in args.pcap:
total += parsePacket(filename + ext, f, str(args.protocol))
if (oldtotal + 100000 <= total):
filecount += 1
oldtotal = total
ext = str(filecount) + ".csv"
with open(filename + ext, "w", newline='') as my_csv:
csv_writer = writer(my_csv)
csv_writer.writerow(columns)
print("Number of packets processed: %d" % total)
| mayakapoor/palm | src/preprocessing/main.py | main.py | py | 1,391 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
24502646621 | from utils import *
from fastapi import FastAPI, Query, Path, Body, Cookie, Header
from pydantic import BaseModel, Required, Field, HttpUrl
app = FastAPI()
@app.get('/')
def read_root():
return {'Hello': 'World'}
# Examplos com path params
class ModelName(str, Enum):
name1 = 'Phelipe'
name2 = 'Marcos'
name3 = 'Fran'
@app.get('/items/{item_id}')
def read_item(item_id: int, q: Union[str, None] = None):
return {'item_id': item_id, 'q': q}
@app.get('/names/{name}')
def get_name(name: ModelName):
response = {'model_name': name}
if name.value == 'Phelipe':
response.update({'message': 'Bad name.'})
else:
response.update({'Message': 'Cool name.'})
return response
# Exemplo com query params
fake_items_db = [{'item_name': 'Foo'}, {'item_name': 'Bar'}, {'item_name': 'Baz'}]
_lmt = len(fake_items_db) - 1
@app.get('/items/')
async def read_item(skip: int = 0, limit: int = _lmt):
return fake_items_db[skip : skip + limit]
# Exemplo com query param opcional
@app.get('/cats/')
async def get_cat(name: str = None):
cats = [
'zoe',
'zulke',
'zara',
'miuda',
'frajola',
'cruel',
'mafalda',
'jade',
'maria',
]
if name:
if name in cats:
return {'Valor aleatorio': False, 'Nome do gato': name}
return {'Valor aleatorio': True, 'Nome do gato': cats[random.randint(0, len(cats))]}
# Exemplo com Pydantic model
class Item(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
@app.post('/items_s/')
async def create_item(item: Item):
if item.tax:
price = item.price + item.price * item.tax
item.price = price
return item
"""
Examplo usando Query como valor padrão, validadores e sobrescrita de documentação
Old way - async def create_dog(name: str, age: int, description: Union[None, str] = None):
"""
class Dog(BaseModel):
level: int
name: str
@app.post('/dogs/')
async def create_dog(
brothers: Union[List[Dog], None],
age: int,
name: str = Query(
default=..., title='Nome', description='Esse é um nome', regex='^love'
),
description: Union[None, str] = Query(
default=None,
title='Descrição',
description='Essa é uma descrição',
min_length=5,
max_length=10,
deprecated=True,
),
hidden_query: Union[str, None] = Query(default=None, include_in_schema=False),
):
dog = {'name': name, 'age': age, 'brothers': {}}
if description:
dog.update({'description': description})
if brothers:
list(map(lambda d: dog['brothers'].update({d.name: d.level}), brothers))
return dog
"""
Exemplo usando Path pydantic
"""
@app.get('/memories/{person_id}')
def create_memories(
*, person_id: int = Path(..., title='Uma pessoa existente.', gt=0, le=1000)
):
people = {1: {'name': 'amourir'}, 2: {'name': 'joão'}}
if person_id in people.keys():
return people.get(person_id)
return {}
"""
Exemplo usando Body e multiplos parâmetros
"""
class City(BaseModel):
name: str
country: int
@app.post('/cities/{country_id}')
def create_cities(
*,
country_id: int = Path(..., title='Id de um país existente.', gt=1, le=5),
city: City = Body(..., embed=False),
person_name: str = Body(..., regex='^mar', embed=False)
):
countries = {1: 'Brazil', 2: 'Russia', 3: 'Senegal', 4: 'Marrocos', 5: 'Irã'}
city_item = {'name': city.name}
country_name = countries.get(country_id)
city_item.update({'country': country_name})
return city_item
"""
Exemplo usando Body Field
"""
class PersonalComputer(BaseModel):
name: str = Field(
regex='^pc', default=None, description='Nome do pc', max_length=10
)
serie: int = Field(gt=1, description='Passe uma série válida')
@app.put('/pcs/{pc_id}')
async def update_pc(
*,
pc_id: int = Path(..., description='Passe um válido.'),
pc: PersonalComputer = Body(..., embed=True, description='Uma maquina')
):
if pc_id // 2 == 0:
return {'message': 'Inválido'}
return pc
"""
Exemplo com subtipos como BaseModel
"""
class Image(BaseModel):
url: HttpUrl
size: int
class Product(BaseModel):
name: str = Field(..., example='Produto base')
price: float
images: list[Union[Image, None]] = None
class Store(BaseModel):
products: Union[list[Product], None]
name: str = Field(..., description='Nome da loja')
class Config:
schema_extra = {
'example': {
'products': [
{
'name': 'Computador',
'price': 12.4,
'images': [{'url': 'http://test.com', 'size': 1}],
}
],
'name': 'Loja dos fundos',
}
}
@app.post('/products/')
def create_products(store: Store = Body(..., embed=True)):
return store
@app.get('/products/{product_id}')
def retrive_products(*, product_id: UUID = Path(..., description='Produto existente')):
return {}
"""
Exemplo com Cookie e Header
"""
@app.get('/params/')
def request_params(
user_agent: str = Header(default=None),
ads_id: Union[str, None] = Cookie(default=None),
):
return {'cookie': ads_id, 'user_agent': user_agent}
| williamelias/Fast-Api-Quiz | code/app/main.py | main.py | py | 5,446 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "fastapi.Query",
... |
34993256569 | import requests
from bs4 import BeautifulSoup
def extract_teok_jobs(keyword):
results = []
url = f"https://remoteok.com/remote-{keyword}-jobs"
request = requests.get(url, headers={"User-Agent": "Kimchi"})
if request.status_code == 200:
soup = BeautifulSoup(request.text, "html.parser")
jobs = soup.find_all('tr', class_="job")
for job_section in jobs:
job_posts = job_section.find_all('td', class_="company")
for post in job_posts:
anchors = post.find_all('a')
anchor = anchors[0]
link = anchor['href']
title = anchor.find("h2")
organization = post.find_all('span', class_="companyLink")
orga = organization[0]
company = orga.find('h3')
location = post.find_all('div', class_="location")[0]
if company:
company = company.string.strip()
if title:
title = title.string.strip()
if location:
location = location.string
job_data = {
'link': f"https://remoteok.com{link}",
'company': company.replace(",", " "),
'location': location.replace(",", " "),
'position': title,
}
results.append(job_data)
return results | hoseel/job-scrapper | extractors/teok.py | teok.py | py | 1,444 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.