seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11737234773 | from copy import deepcopy
from zope.interface import implements
from Globals import InitializeClass
from AccessControl import ClassSecurityInfo
from Acquisition import aq_base, aq_inner, aq_parent, aq_chain
from OFS.PropertyManager import PropertyManager
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.utils import SimpleItemWithProperties
from Products.CPSSchemas.Layout import CPSLayout
from Products.CPSSchemas.interfaces import IWidget
class IndirectWidget(SimpleItemWithProperties, object):
"""See documentation in CPSSchemas/doc/indirect_widget
current implementation actually makes a copy of the base widget and store
it as a volatile attribute. This is good enough for now, we'll see what
profiling tells us.
This is not an adapter because:
- making an adapter persistent is probably funky
- flexibility does not seem to be needed there
The current implementation uses dirty acquisition hacks. Namely, it stores
the wrapped parent in a volatile attribute, to perform lookup on it.
The reason is that both in @property or in __getattr__, aq_chain is None,
but we need to avoid constructing the worker widget each time a simple
attribute is being looked up, and the Layout class does lots of them.
An alternative would have been to use zope.proxy, but it's not clear
without further study whether this goes well with acquisition, although
rewriting in ZTK style would certainly have to be tried this way first.
"""
implements(IWidget)
meta_type = 'Indirect Widget'
security = ClassSecurityInfo()
_properties = (dict(id='base_widget_rpath', type='string', mode='w',
label='Relative path of base widget'),
dict(id='is_parent_indirect', type='boolean', mode='w',
label="Is the worker widget aq parent the indirect's"),
)
_v_worker = (None, None) # actualy, worker + base widget
_v_parent = (None,)
is_parent_indirect = True
def __init__(self, wid, **kw):
self._setId(wid)
@classmethod
def localProps(cls):
"""Properties that are always local to the indirected widget."""
return (p['id'] for p in cls._properties)
def getWorkerWidget(self):
worker, base = self._v_worker
if worker is None:
self.makeWorkerWidget()
worker, base = self._v_worker
# place in right aq context
# _v_parent is more volatile that _v_worker because it gets
# written over with each traversal, and in particular each request.
# This is important: if we store the worker with its aq chain,
# an expired request with no URL or RESPONSE can be acquired
# from the widget in subsequent requests.
if self.is_parent_indirect: # avoid and/or pattern to avoid bool(self)
parent = self._v_parent[0]
else:
parent = aq_parent(aq_inner(base))
return worker.__of__(parent)
@property
def title(self):
return self.getProperty('title', None) or self.getWorkerWidget().title
# def title_or_id(self):
# return self.getWorkerWidget().title or self.getId()
def clear(self):
try:
delattr(self, '_v_worker')
except AttributeError:
pass
def getTemplateWidget(self):
utool = getToolByName(self._v_parent[0], 'portal_url')
portal = utool.getPortalObject()
return portal.unrestrictedTraverse(self.base_widget_rpath)
def makeWorkerWidget(self):
# using _v_parent to avoid loops in __getattr__
base = self.getTemplateWidget()
worker = deepcopy(aq_base(base))
# update worker properties, by creating them if needed
props_upd = {}
worker_base_props = set(p['id'] for p in worker._properties)
for p in self._properties:
pid = p['id']
if pid in self.localProps():
continue
if pid in worker_base_props:
# in some very special case, we may have complex objects
# (vocabulary...) They need to be unwrapped first.
props_upd[pid] = aq_base(self.getProperty(pid))
else:
worker.manage_addProperty(pid, self.getProperty(pid),
p['type'])
worker.manage_changeProperties(**props_upd)
# fix worker widget id
worker._setId(self.getId())
# store in volatile var, without any aq wrapping (tuple hack)
self._v_worker = (worker, base)
security.declarePublic('getWidgetId')
def getWidgetId(self):
"""Get this widget's id."""
zid = self.getId()
try:
# Inside a FolderWithPrefixedIds.
# method on parent used in makeWorkerWidget: avoid aq loops
return getattr(self._v_parent[0], 'getIdUnprefixed')(zid)
except AttributeError:
# Standalone
return zid
def isHidden(self):
# some buggy old template widgets may have been there and got through
# if upgraded before fix for #2394
return self.fields == ('?',)
#
# All other attributes from Widget API are indirected to worker widget
#
def __of__(self, parent):
"""Zope2 trick so that we can carry on aq chain from __getattr__
"""
# tuple hack to store original aq (includes request container)
if isinstance(parent, CPSLayout): # see #2430, avoid infinite loops
self._v_parent = (parent,)
return SimpleItemWithProperties.__of__(self, parent)
def __getattr__(self, k):
"""This is called if normal python attr lookup fails, but before aq.
In this method, the instance is never wrapped by acquisition.
"""
if k in self.forwarded_attributes:
try:
return getattr(self.getWorkerWidget(), k)
except AttributeError:
pass
if k.startswith('_'):
raise AttributeError(k) # XXX maybe some have to get through
assert self._v_parent[0] is not None
return getattr(self._v_parent[0], k)
forwarded_attributes = frozenset([
'has_input_area', 'label_edit', 'hidden_empty', 'required',
'label', 'help', 'is_i18n', 'fieldset', 'prepare', 'validate',
'render', 'getHtmlWidgetId', 'getModeFromLayoutMode',
'getProperty', 'getFileInfo',
'isReadOnly', 'getCssClass', 'getJavaScriptCode'])
forwarded_properties = frozenset([
'label_edit', 'hidden_empty', 'required',
'label', 'help', 'is_i18n', 'fieldset'])
def valid_property_id(self, pid):
"""Allow adding properties on attributes that are forwarded.
"""
if pid in self.forwarded_properties:
# at least it is well formed, just check if already there
return not pid in self.__dict__ # avoid getattr, of course
return PropertyManager.valid_property_id(self, pid)
InitializeClass(IndirectWidget)
| nuxeo-cps/products--CPSSchemas | widgets/indirect.py | indirect.py | py | 7,146 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Products.CMFCore.utils.SimpleItemWithProperties",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "zope.interface.implements",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "Products.CPSSchemas.interfaces.IWidget",
"line_number": 38,
"us... |
19931655401 | import typing
from .datatype import Datatype
def parse_int(v: typing.Any) -> int:
"""Parse the value `v` to an int.
This function fixes parsing values like "100.1" to int by rounding.
Raises
------
ValueError
When the value `v` could not be parsed
Parameters
----------
v : int, float, str, any
The value to parse
Returns
-------
int
The converted int
"""
return int(float(v))
def format_int(v: typing.Any, f: typing.Optional[str]="") -> str:
"""Format the given value to an int.
Parameters
----------
v : any
The value to format
f : str
The format specification
Returns
-------
str
The formatted value or an emtpy string if it is not formattable
"""
try:
v = parse_int(v)
except ValueError:
return ""
return ("{" + f + "}").format(v)
int_type = Datatype("int", format_int, parse_int)
int_type.default_parse = 0
def parse_hex(v: typing.Any) -> int:
"""Parse the given value to an integer on the base of 16.
Raises
------
ValueError
When the value `v` could not be parsed
Parameters
----------
v : int, float, str, any
If int or float are given, the number is returned as an int, if a
string is given it is treated as a hex number (values after the decimal
separator are ignored), everything else will be tried to convert to a
16 base int
Returns
-------
int
The converted int
"""
if isinstance(v, (int, float)):
return int(v)
elif isinstance(v, str):
v = v.split(".")
return int(v[0], base=16)
else:
return int(v, base=16)
def format_hex(v: typing.Any, f: typing.Optional[str]="") -> str:
"""Format the given value to a hex number.
Parameters
----------
v : any
The value to format
f : str
The format specification
Returns
-------
str
The formatted value or an empty string if it is not formattable
"""
f = list(Datatype.split_format_spec(f))
# alternative form, this will make 0x<number>
f[3] = "#"
# convert to hex
f[8] = "x"
# remove precision, raises error otherwise
f[7] = ""
try:
v = parse_int(v)
except ValueError:
return ""
return Datatype.join_format_spec(f).format(v)
hex_int_type = Datatype("hex", format_hex, parse_hex)
hex_int_type.default_parse = 0
def float_np(n: int) -> Datatype:
"""Get a float datatype that rounds to `n` digits.
Parameters
----------
n : int
The number of digits after the decimal separator
Returns
-------
Datatype
The datatype
"""
def format_float_np(v: typing.Any, f: typing.Optional[str]="") -> str:
"""Format the given value to a float.
Parameters
----------
v : any
The value to format
f : str
The format specification
Returns
-------
str
The formatted value or an emtpy string if it is not formattable
"""
try:
v = float(v)
except ValueError:
return ""
f = list(Datatype.split_format_spec(f))
if f[7] == "":
f[7] = str(n)
f[8] = "f"
f = Datatype.join_format_spec(f)
return f.format(v)
dt = Datatype("float_{}p".format(n), format_float_np, float)
dt.default_parse = "0.00"
return dt | miile7/pylo-project | pylo/default_datatypes.py | default_datatypes.py | py | 3,582 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.Any",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "datatype.Datatype... |
19842328575 | import os
import re
from io import open
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def process_line(self, line):
return re.sub(r"[^a-zA-Z0-9,\.!?<>\- ]+", '', line).replace('<unk>','').lower()
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
for line in f:
line = self.process_line(line)
words = line.split() + ['<eos>']
if words[0]=='<eos>':
continue
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
ids_list = []
for line in f:
line = self.process_line(line)
words = line.split() + ['<eos>']
ids = []
if words[0]=='<eos>':
continue
for word in words:
ids.append(self.dictionary.word2idx[word])
ids_list.append(torch.tensor(ids).type(torch.int64))
ids = torch.cat(ids_list)
return ids
| TheMarvelousWhale/NTU-CE4045-NLP | Assignment2/part_1/data_fnn.py | data_fnn.py | py | 1,860 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2... |
16937190382 | import re
import pandas as pd
import requests
from bs4 import BeautifulSoup as bs
base = "https://www.dictionary.com/browse/"
csv = pd.read_csv("words.csv")
meaning = []
pronounce = []
password = []
words = csv["Word"]
for word in words:
url = base + word
try:
read = requests.get(url,allow_redirects=True)
LK = read.url
print(LK)
Link = requests.get(LK)
link = bs(Link.content,'html.parser')
except:
pro=""
final = ""
try:
WO = link.find("h1",attrs={"data-first-headword":"true"})
wo = WO.text
except:
wo=""
try:
PRO = link.find("span",attrs={"class":"pron-spell-content css-7iphl0 evh0tcl1"})
pro = PRO.text
except:
pro = ""
try:
M1 = link.find("div",attrs={"value":"1"})
m1 = M1.text + ", "
except:
m1 = ""
try:
M2 = link.find("div",attrs={"value":"2"})
m2 = M2.text + ", "
except:
m2 = ""
try:
M3 = link.find("div",attrs={"value":"3"})
m3 = M3.text + ", "
except:
m3 = ""
try:
M4 = link.find("div",attrs={"value":"4"})
m4 = M4.text
except:
m4=""
final = m1+ m2 +m3 + m4
print(final)
print(wo)
print(pro)
pronounce.append(pro)
meaning.append(final)
password.append(wo)
print("Done")
csv["Meaning"] = meaning
csv["Pronunciation"] = pronounce
csv["password"] = password
csv.to_csv("meaning.csv")
print("CSV created!!")
| priyakaur/portifolio | fetch_meaning.py | fetch_meaning.py | py | 1,586 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"lin... |
29403941645 | """Handlers tests."""
from django.conf import settings
from django.db.models.signals import post_save
from django.test import override_settings
from test_plus.test import TestCase
import responses
import rovercode_web
@override_settings(SUBSCRIPTION_SERVICE_HOST='http://test.test')
class TestHandlers(TestCase):
"""Test signal handlers."""
def setUp(self):
"""Initialize the tests."""
post_save.connect(
rovercode_web.users.signals.handlers.create_new_user,
sender=settings.AUTH_USER_MODEL,
dispatch_uid='new_user'
)
@responses.activate
def test_user_create_error(self):
"""Test external user create failure."""
responses.add(
responses.POST,
'http://test.test/api/v1/customer/',
status=503
)
self.make_user()
self.assertEqual(len(responses.calls), 2)
@responses.activate
def test_external_user_create(self):
"""Test external user created on new user."""
responses.add(
responses.POST,
'http://test.test/api/v1/customer/',
status=200
)
user = self.make_user()
self.assertEqual(len(responses.calls), 2)
# Updating a user should do nothing
user.email = 'test@example.com'
user.save()
self.assertEqual(len(responses.calls), 2)
| rovercode/rovercode-web | rovercode_web/users/tests/test_handlers.py | test_handlers.py | py | 1,401 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "test_plus.test.TestCase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.db.models.signals.post_save.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 18,
"usage_type": ... |
72529053224 | from django.shortcuts import render_to_response, render
from django.contrib.auth.decorators import login_required
from models import Court
from notification.models import Notification
# Create your views here.
@login_required
def all(request):
'''
To list all the court
:param request:
:return:
'''
courts = Court.objects.all()
user = request.user.profile
notifications = Notification.objects.filter(user = request.user, viewed = False).order_by('time').reverse()
args = {}
args['courts'] = courts
args['profile'] = user
args['notifications'] = notifications
return render(request, 'all-court.html', args)
| Championzb/TenniSoda | court/views.py | views.py | py | 662 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Court.objects.all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Court.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "models.Court",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "notif... |
6939711390 | from bokeh.plotting import figure, show
x = [1, 2, 3, 4, 5]
y1 = [6, 7, 2, 4, 5]
y2 = [2, 3, 4, 5, 6]
y3 = [4, 5, 5, 7, 2]
p = figure(title="Multiple bars example")
# top defines the single y-coord for each bar; stated more clearly, height
# bottom defines y-intercept, i.e. the 0 value where the lowest data pt starts
# width is fatness of each bar
p.vbar(x=x, top=y1, width=0.5, bottom=0, color="red")
p.vbar(x=x, top=y2, width=0.7, bottom=1, color="blue")
p.vbar(x=x, top=y3, width=0.9, bottom=2, color="green")
show(p)
| marnatgon/Senior-Design | software/example/bokeh/2-custom-render/bar.py | bar.py | py | 525 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bokeh.plotting.figure",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.show",
"line_number": 14,
"usage_type": "call"
}
] |
73360095144 | # django imports
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.db.models.signals import post_save
from django.db.models.signals import pre_save
from django.db.models.signals import pre_delete
# lfs imports
from lfs.caching.utils import clear_cache
from lfs.cart.models import Cart
from lfs.catalog.models import Category
from lfs.catalog.models import Product
from lfs.catalog.models import StaticBlock
from lfs.core.models import Shop
from lfs.core.signals import cart_changed
from lfs.core.signals import product_changed
from lfs.core.signals import category_changed
from lfs.core.signals import topseller_changed
from lfs.marketing.models import Topseller
from lfs.order.models import OrderItem
from lfs.page.models import Page
from lfs.shipping.models import ShippingMethod
# reviews imports
from reviews.signals import review_added
# Cart
def cart_changed_listener(sender, **kwargs):
update_cart_cache(sender)
cart_changed.connect(cart_changed_listener)
def cart_deleted_listender(sender, instance, **kwargs):
update_cart_cache(instance)
pre_delete.connect(cart_deleted_listender, sender=Cart)
# Category
def category_deleted_listender(sender, instance, **kwargs):
update_category_cache(instance)
pre_delete.connect(category_deleted_listender, sender=Category)
def category_saved_listener(sender, instance, **kwargs):
update_category_cache(instance)
pre_save.connect(category_saved_listener, sender=Category)
def category_changed_listener(sender, **kwargs):
update_category_cache(sender)
category_changed.connect(category_changed_listener)
# OrderItem
def order_item_listener(sender, instance, **kwargs):
"""Deletes topseller after an OrderItem has been updated. Topseller are
calculated automatically on base of OrderItems, hence we have to take of
that.
"""
cache.delete("topseller")
try:
for category in instance.product.get_categories(with_parents=True):
cache.delete("topseller-%s" % category.id)
except:
pass # fail silently
pre_delete.connect(order_item_listener, sender=OrderItem)
post_save.connect(order_item_listener, sender=OrderItem)
# Page
def page_saved_listener(sender, instance, **kwargs):
cache.delete("page-%s" % instance.slug)
cache.delete("pages")
post_save.connect(page_saved_listener, sender=Page)
# Product
def product_changed_listener(sender, **kwargs):
update_product_cache(sender)
product_changed.connect(product_changed_listener)
def product_saved_listener(sender, instance, **kwargs):
# update_product_cache(instance)
update_category_cache(instance)
post_save.connect(product_saved_listener, sender=Product)
# Shipping Method
def shipping_method_saved_listener(sender, instance, **kwargs):
cache.delete("shipping-delivery-time")
cache.delete("shipping-delivery-time-cart")
post_save.connect(shipping_method_saved_listener, sender=ShippingMethod)
# Shop
def shop_saved_listener(sender, instance, **kwargs):
cache.delete("shop-%s" % instance.id)
post_save.connect(shop_saved_listener, sender=Shop)
# Static blocks
def static_blocks_saved_listener(sender, instance, **kwargs):
update_static_block_cache(instance)
post_save.connect(static_blocks_saved_listener, sender=StaticBlock)
# Topseller
def topseller_changed_listener(sender, **kwargs):
update_topseller_cache(sender)
topseller_changed.connect(topseller_changed_listener)
def topseller_saved_listener(sender, instance, **kwargs):
update_topseller_cache(instance)
post_save.connect(topseller_saved_listener, sender=Topseller)
def review_added_listener(sender, **kwargs):
ctype = ContentType.objects.get_for_id(sender.content_type_id)
product = ctype.get_object_for_this_type(pk=sender.content_id)
update_product_cache(product)
review_added.connect(review_added_listener)
#####
def update_category_cache(instance):
# NOTE: ATM, we clear the whole cache if a category has been changed.
# Otherwise is lasts to long when the a category has a lot of products
# (1000s) and the shop admin changes a category.
clear_cache()
return
cache.delete("category-breadcrumbs-%s" % instance.slug)
cache.delete("category-products-%s" % instance.slug)
cache.delete("category-all-products-%s" % instance.slug)
cache.delete("category-categories-%s" % instance.slug)
for category in Category.objects.all():
cache.delete("categories-portlet-%s" % category.slug)
cache.delete("category-%s" % instance.id)
cache.delete("category-%s" % instance.slug)
cache.delete("category-all-children-%s" % instance.id)
cache.delete("category-children-%s" % instance.id)
cache.delete("category-parents-%s" % instance.id)
cache.delete("category-products-%s" % instance.id)
cache.delete("category-all-products-%s" % instance.id)
# Note: As this is called "pre-saved" newly created categories don't have
# the many-to-many attribute "products", hence we have to take care of it
# here.
try:
for product in instance.products.all():
update_product_cache(product)
except ValueError:
pass
def update_product_cache(instance):
# If the instance is a product with variant or a variant we have to
# delete also the parent and all other variants
if instance.is_variant():
parent = instance.parent
else:
parent = instance
cache.delete("product-%s" % parent.id)
cache.delete("product-%s" % parent.slug)
cache.delete("product-inline-%s" % parent.id)
cache.delete("product-images-%s" % parent.id)
cache.delete("related-products-%s" % parent.id)
cache.delete("manage-properties-variants-%s" % parent.id)
cache.delete("product-categories-%s-False" % parent.id)
cache.delete("product-categories-%s-True" % parent.id)
cache.delete("product-navigation-%s" % parent.slug)
try:
c = cache.get("shipping-delivery-time")
del c["product-%s" % parent.slug]
cache.set("shipping-delivery-time", c)
except (KeyError, TypeError):
pass
for variant in parent.get_variants():
cache.delete("product-%s" % variant.id)
cache.delete("product-%s" % parent.slug)
cache.delete("product-inline-%s" % variant.id)
cache.delete("product-images-%s" % variant.id)
cache.delete("related-products-%s" % variant.id)
cache.delete("product-categories-%s-False" % variant.id)
cache.delete("product-categories-%s-True" % variant.id)
cache.delete("product-navigation-%s" % variant.slug)
cache.delete("product-shipping-%s" % variant.slug)
def update_cart_cache(instance):
"""Deletes all cart relevant caches.
"""
cache.delete("cart-%s" % instance.user)
cache.delete("cart-%s" % instance.session)
cache.delete("cart-items-%s" % instance.id)
cache.delete("cart-costs-True-%s" % instance.id)
cache.delete("cart-costs-False-%s" % instance.id)
cache.delete("shipping-delivery-time-cart")
cache.delete("shipping-delivery-time")
def update_static_block_cache(instance):
"""Deletes all static block relevant caches.
"""
cache.delete("static-block-%s" % instance.id)
for category in instance.categories.all():
cache.delete("category-inline-%s" % category.slug)
def update_topseller_cache(topseller):
"""Deletes all topseller relevant caches.
"""
cache.delete("topseller")
product = topseller.product
for category in product.get_categories(with_parents=True):
cache.delete("topseller-%s" % category.id) | django-lfs/lfs | caching/listeners.py | listeners.py | py | 7,661 | python | en | code | 23 | github-code | 36 | [
{
"api_name": "lfs.core.signals.cart_changed.connect",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "lfs.core.signals.cart_changed",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.signals.pre_delete.connect",
"line_number": 34,
"usa... |
21158916889 | from colorsys import rgb_to_yiq
import speedtest
s = speedtest.Speedtest()
bytes_num = 1000000
dws = round(s.download()/bytes_num, 2)
ups = round(s.upload()/bytes_num, 2)
print(f' download {dws}')
print(f' download {ups}') | jesus-sanchez5/Kali_pruebas | Programas_prueba/python/pruebaVelocidad.py | pruebaVelocidad.py | py | 227 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "speedtest.Speedtest",
"line_number": 4,
"usage_type": "call"
}
] |
7838997057 | from django.conf.urls import url
from django.contrib.auth.views import LoginView, LogoutView
from .views import *
urlpatterns = [
url(r'^login/',
LoginView.as_view(template_name='management/login.html'), name='login'),
url(r'^logout/',
LogoutView.as_view(template_name='management/logout.html',
next_page=reverse_lazy('management:login')), name='logout'),
url(r'^categories/(?:(?P<parent_id>\d+)/)?$', CategoriesListView.as_view(), name='categories'),
url(r'^categories/add/(?:(?P<parent_id>\d+)/)?$', CategoryCreate.as_view(), name='add_category'),
url(r'^categories/edit/(?P<pk>\d+)/$', CategoryUpdate.as_view(), name='edit_category'),
url(r'^dishes/(?P<category_id>\d+)/$', DishesListView.as_view(), name='dishes'),
url(r'^dishes/add/(?:(?P<category_id>\d+)/)?$', DishCreate.as_view(), name='add_dish'),
url(r'^dishes/edit/(?P<pk>\d+)/$', DishUpdate.as_view(), name='edit_dish'),
url(r'^orders/$', OrdersListView.as_view(), name='orders'),
url(r'^orders/edit/(?P<pk>\d+)/$', OrderUpdate.as_view(), name='edit_order'),
url(r'^$', index, name='index'),
] | robbydrive/RestaurantsAPI | management/urls.py | urls.py | py | 1,143 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LoginView.as_view",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LoginView",
"line_number": 7,
"usage_type": "nam... |
74791822182 | import os
import csv
import logging
from collections import defaultdict
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
# ROOT_PATH = "/Midgard/home/martinig/thesis-src"
ROOT_PATH = "/home/martin/Documents/Education/Master/thesis/project/thesis-src"
MODEL_NAME = "BlindGST"
DATA_SPLIT = "dev"
def nested_dict():
return defaultdict(nested_dict)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--root_path",
type=str,
default=ROOT_PATH)
parser.add_argument("--model_name",
type=str,
default=MODEL_NAME)
parser.add_argument("--data_split",
type=str,
help="dev or test",
default=DATA_SPLIT)
args = parser.parse_args()
jobs_path = f"{args.root_path}/jobs"
out_path = f"{args.root_path}/results/{args.data_split}"
model_name = args.model_name
results_path = f"{jobs_path}/{model_name}"
logger.info(f"Collecting results from:\t{results_path}")
logger.info(f"Outputing results into:\t{out_path}")
results = nested_dict()
# Read results from all dev_results.md or (test)_results.md files for all jobs run
for path, dirs, files in os.walk(results_path):
for filename in files:
if filename == f"{args.data_split}_results.md":
path_folders = path.split("/")
filepath = os.path.join(path, filename)
with open(filepath, "r") as f:
lines = f.readlines()
result_values = lines[-1].strip().split("|")
if path_folders[-1] == "bert_best_head":
dataset = path_folders[-2]
submodel = "bert_best_head"
outfile_path = f"{out_path}/{dataset}/bert_best_head.csv"
results[dataset][submodel] = {
"ACC": result_values[0],
"SIM": result_values[1],
"FL": result_values[2],
"J": result_values[3],
"BLEU": result_values[4]
}
logger.info(f"Read results from:\t{dataset}/{submodel}")
if "top-" in path_folders[-1]:
sim_strategy = path_folders[-1]
lexicon = path_folders[-2]
common_words_dir = path_folders[-3]
dataset = path_folders[-5]
submodel = "word_embeddings"
results[dataset][submodel][common_words_dir][lexicon][sim_strategy] = {
"ACC": result_values[0],
"SIM": result_values[1],
"FL": result_values[2],
"J": result_values[3],
"BLEU": result_values[4]
}
logger.info(f"Read results from:\t{dataset}/{submodel}/{common_words_dir}/{lexicon}/{sim_strategy}")
# Aggregate results into csv files
for dataset in results.keys():
dataset_dir = f"{out_path}/{dataset}"
os.makedirs(dataset_dir, exist_ok=True)
for submodel in results[dataset].keys():
if submodel == "word_embeddings":
# jigsaw/word_embeddings/remove_common_words/abusive/top-1/results.md
submodel_dir = f"{dataset_dir}/{submodel}"
os.makedirs(submodel_dir, exist_ok=True)
for common_words_model in results[dataset][submodel].keys():
out_filepath = f"{dataset_dir}/{submodel}/{common_words_model}.csv"
logger.info(f"Writting:\t{out_filepath}")
with open(out_filepath, "w") as f:
writer = csv.writer(f)
writer.writerow(["Lexicon", "K", "ACC", "SIM", "FL", "J", "BLEU"])
for lexicon in sorted(results[dataset][submodel][common_words_model].keys()):
for sim_strategy in sorted(results[dataset][submodel][common_words_model][lexicon].keys()):
k = sim_strategy[-1] # top-9, top-7, ...
metrics = results[dataset][submodel][common_words_model][lexicon][sim_strategy].values()
writer.writerow([lexicon] + [k] + list(metrics))
if submodel == "bert_best_head":
# jigsaw/bert_best_head/results.md
out_filepath = f"{dataset_dir}/{submodel}.csv"
logger.info(f"Writting:\t{out_filepath}")
with open(out_filepath, "w") as f:
writer = csv.writer(f)
writer.writerow(["ACC", "SIM", "FL", "J", "BLEU"])
writer.writerow(results[dataset][submodel].values())
| martinigoyanes/LexiconGST | src/postprocessing/collect_results.py | collect_results.py | py | 5,189 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "collections.def... |
11863065064 | # Orbit propagator class to encapsulate solve
import numpy as np
import matplotlib.pyplot as plt
## for newer scipy.integrate
# import scipy.integrate as ode
## for older scipy.integrate (in the videos)
from scipy.integrate import ode
from mpl_toolkits.mplot3d import Axes3D
import planetary_data as pd
class OrbitPropagator:
def __init__(self,r0,v0,tf,cb=pd.earth):
self.r0 = r0
self.v0 = v0
self.tf = tf
self.cb = cb
self.ys = np.zeros((1,6))
self.ts = np.zeros((1,1))
self.rs = r0
def diff_eq(self,t,y):
# unpack state
rx, ry, rz, vx, vy, vz = y
r = np.array([rx,ry,rz])
# norm(r)
norm_r = np.linalg.norm(r)
# two-body acceleration
ax,ay,az = -r*self.cb['mu']/norm_r**3
return [vx,vy,vz,ax,ay,az]
def propagate_orbit(self):
# Something wrong with this newer version of scipy.integrate so sticking
# with old one for now
# IC
# self.y0 = np.array(self.r0+self.v0) # concatenate lists
#
# # set of t values at which output is desired
#
# t_output = np.linspace(0,self.tf,int(np.ceil(self.tf/100)+1))
# print(t_output)
#
# sol = ode.solve_ivp(self.diff_eq,[0,self.tf],self.y0,method='RK45',t_eval=t_output,vectorized=False)
# self.ys = sol.y
# self.ts = sol.t
# self.ys = np.transpose(np.array(self.ys))
#########################################################################3
# solver setup from the videos
# time step
dt = 100.0
# total number of steps
n_steps = int(np.ceil(self.tf/dt))
# new dt
dt = self.tf/n_steps
# pre-allocate memory
self.ys = np.zeros((n_steps,6))
self.ts = np.zeros((n_steps,1))
# IC
y0 = self.r0+self.v0
self.ys[0] = np.array(y0)
n = 1 # time step
# initialize solver
solver = ode(self.diff_eq)
solver.set_integrator('lsoda')
solver.set_initial_value(y0,0)
# solver.set_f_params(self.cb['mu'])
# propogate orbit
while solver.successful() and n < n_steps :
solver.integrate(solver.t+dt)
self.ts[n] = solver.t
self.ys[n] = solver.y
n += 1
self.rs = self.ys[:,:3]
# 3D plotting function
def plot3D(self,show_plot=True,save_plot=False,plot_title='Default'):
plt.style.use('dark_background')
fig = plt.figure(figsize = (12,6))
ax = fig.add_subplot(111,projection='3d')
# rs = self.ys[:,:3]
# plot trajectory
ax.plot(rs[:,0],rs[:,1],rs[:,2],'w',label='trajectory')
ax.plot([rs[0,0]],[rs[0,1]],[rs[0,2]],'wo',label='Initial position')
# plot central body
_u,_v = np.mgrid[0:2*np.pi:40j,0:np.pi:20j]
_x = self.cb['radius']*np.cos(_u)*np.sin(_v)
_y = self.cb['radius']*np.sin(_u)*np.sin(_v)
_z = self.cb['radius']*np.cos(_v)
ax.plot_surface(_x,_y,_z,cmap='Blues')
# plot x,y,z vectors
l = self.cb['radius']*2
x,y,z = [[0,0,0],[0,0,0],[0,0,0]]
u,v,w = [[l,0,0],[0,l,0],[0,0,l]]
ax.quiver(x,y,z,u,v,w,color='k')
max_val = np.max(np.abs(rs))
ax.set_xlim([-max_val,max_val])
ax.set_ylim([-max_val,max_val])
ax.set_zlim([-max_val,max_val])
ax.set_xlabel('X (km)')
ax.set_ylabel('Y (km)')
ax.set_zlabel('Z (km)')
ax.set_title(plot_title)
plt.legend() # automatically fills earlier
if show_plot:
plt.show()
if save_plot:
plt.savefig(plot_title+'.png', dpi=300,edgecolor='none')
| stevespreiz/orbit-solver | src/OrbitPropagator.py | OrbitPropagator.py | py | 3,754 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "planetary_data.earth",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"... |
14147429202 | import argparse
import re
PLATOON_PRESETS= {
# scenario 1: 4 AVs with human cars inbetween, some of which are sensing cars used to collect metrics on
'scenario1': 'human#sensor human*5 (human#sensor human*5 av human*5)*4 human#sensor human*5 human#sensor',
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--platoon', type=str, default='av human*5',
help='Platoon of vehicles following the leader. Can contain either "human"s or "av"s. '
'"(av human*2)*2" can be used as a shortcut for "av human human av human human". '
'Vehicle tags can be passed with hashtags, eg "av#tag" "human#tag*3". '
'Available presets: "scenario1".')
parser.add_argument('--gui', default=False, action='store_true',
help='Run in gui mode so that the platoon\'s progress can be watched during simulation. '
'Has not been tested.')
parser.add_argument('--launch_file', type=str, default='followerstopper_with_accel_dynamics.launch',
help='Launch file that would be used to start the ROS nodes to support a single autonomous vehicle.')
parser.add_argument('--accel_node_names', type=str, default='',
help='Acceleration dynamics ROS node "package type name" separated by spaces, like "accel accel accel",'
'if this is not included in the launch file provided.')
args = parser.parse_args()
args.platoon = platoon_parse(args.platoon)
return args
# From trajectory-training
def platoon_parse(platoon):
if platoon in PLATOON_PRESETS:
print(f'Setting scenario preset "{platoon}"')
platoon = PLATOON_PRESETS[platoon]
# replace (subplatoon)*n into subplatoon ... subplatoon (n times)
replace1 = lambda match: ' '.join([match.group(1)] * int(match.group(2)))
platoon = re.sub(r'\(([a-z0-9\s\*\#]+)\)\*([0-9]+)', replace1, platoon)
# parse veh#tag1...#tagk*n into (veh, [tag1, ..., tagk], n)
platoon_lst = re.findall(r'([a-z]+)((?:\#[a-z]+)*)(?:\*?([0-9]+))?', platoon)
# spawn vehicles
vehicles = []
for vtype, vtags, vcount in platoon_lst:
for _ in range(int(vcount) if vcount else 1):
tags = vtags.split('#')[1:]
if vtype == 'av':
vehicles.append('av')
elif vtype == 'human':
vehicles.append('idm')
else:
raise ValueError(f'Unknown vehicle type: {vtype}. Allowed types are "human" and "av".')
return vehicles
| sarahbhaskaran/cosim | scripts/args.py | args.py | py | 2,504 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 38,
"usage_type": "call"
}
] |
43157704514 | #!/usr/bin/python3
#-*- coding:utf-8 -*-
import sys
import pygame
from pygame.locals import *
SCREEN_WIDTH, SCREEN_HEIGHT = 480, 700
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("Plane Flight")
bk_img = pygame.image.load("resources/image/background.png")
bk_music = pygame.mixer.Sound("resources/music/game_music.ogg")
plane_img1 = pygame.image.load("resources/image/plane_ok_1.png")
plane_img2 = pygame.image.load("resources/image/plane_ok_2.png")
plane_pos = [SCREEN_WIDTH // 2, SCREEN_HEIGHT * 2 //3]
bk_music.play(-1)
tick = 0
clock = pygame.time.Clock()
offset = { K_LEFT:0, K_RIGHT:0, K_UP:0, K_DOWN:0}
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
offset[event.key] = 5
elif event.type == KEYUP:
offset[event.key] = 0
x_off = offset[K_RIGHT] - offset[K_LEFT]
y_off = offset[K_DOWN] - offset[K_UP]
plane_pos[0] += x_off
plane_pos[1] += y_off
screen.blit(bk_img, (0,0))
if tick % 16 < 8:
screen.blit(plane_img1, plane_pos)
else:
screen.blit(plane_img2, plane_pos)
tick += 1
pygame.display.update()
clock.tick(30)
| minskeyguo/mylib | python-edu/plane/02-plane.py | 02-plane.py | py | 1,311 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.displa... |
71485638184 | import json
from .lock import f_lock
from .config import read_config
config = read_config()
def update_dict(key=None, target=4, goal=4, flag=1):
pwd = config["pwd"]
with f_lock(f"{pwd}/bin3D_list.json") as json_file:
json_dict = json.load(json_file)
if key is None:
for key in json_dict.keys():
if json_dict[key] != goal:
break
else:
return None
json_dict[key] = flag
with open(f"{pwd}/bin3D_list.json", 'w+') as json_file:
json_file.write(json.dumps(json_dict, indent=4))
return key
def find_exclusion_list(file_name):
# Assume file_name is string
tag = file_name.split('_')[1]
if tag in ['CORE', 'CLOUD']:
exc_list = []
else:
exc_list = []
# exc_list = ['QI', 'NI', 'QS', 'NS', 'QG', 'NG']
return exc_list
| lorenghoh/bin3D2zarr | src/lib/handler.py | handler.py | py | 887 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.read_config",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "lock.f_lock",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number... |
16537362389 | import json
import pickle
import math
import os
import torch
import numpy as np
from pycocoevalcap.eval import COCOEvalCap
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from build_vocab import Vocabulary
from adaptiveModel import Encoder2Decoder
from cocoapi2.PythonAPI.pycocotools.coco import COCO
from data_load import collate_fn, CocoDataset, get_loader, CocoEvalLoader
from evaluation import predict_captions, coco_metrics
from utils import to_var
def train_model(image_dir, caption_path, val_caption_path, vocab_path, learning_rate, num_epochs, lrd, lrd_every, alpha,
beta, clip, logger_step, model_path, crop_size, batch_size, num_workers, cnn_learning_rate, shuffle,
eval_size, evaluation_result_root, max_steps=None):
cider_scores = []
best_epoch = 0
best_cider_score = 0
with open(vocab_path, 'rb') as f:
vocab = pickle.load(f)
# Image Preprocessing
transform = transforms.Compose([
transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
data_loader = get_loader(image_dir, caption_path, vocab, transform, batch_size, shuffle=shuffle, num_workers=num_workers)
adaptive = Encoder2Decoder(256, len(vocab), 512)
# Constructing CNN parameters for optimization, only fine-tuning higher layers
cnn_subs = list(adaptive.encoder.resnet_conv.children())[5:]
cnn_params = [list(sub_module.parameters()) for sub_module in cnn_subs]
cnn_params = [item for sublist in cnn_params for item in sublist]
cnn_optimizer = torch.optim.Adam(cnn_params, lr=cnn_learning_rate,
betas=(alpha, beta))
params = list(adaptive.encoder.affine_a.parameters()) + list(adaptive.encoder.affine_b.parameters()) \
+ list(adaptive.decoder.parameters())
start_epoch = 1
LMcriterion = nn.CrossEntropyLoss()
# Change to GPU mode if available
if torch.cuda.is_available():
adaptive.cuda()
LMcriterion.cuda()
num_steps = len(data_loader)
for epoch in range(start_epoch, num_epochs + 1):
if epoch > lrd:
frac = float(epoch - lrd) / lrd_every
decay_factor = math.pow(0.5, frac)
learning_rate = lrd * decay_factor
print(f'Learning Rate Epoch {epoch}: {"{0:.6f}".format(learning_rate)}')
optimizer = torch.optim.Adam(params, lr=learning_rate, betas=(alpha, beta))
print(f'Training for Epoch {epoch}')
for i, (images, captions, lengths, _, _) in enumerate(data_loader):
if max_steps is not None:
if i > max_steps:
break
images = to_var(images)
captions = to_var(captions)
lengths = [cap_len - 1 for cap_len in lengths]
targets = pack_padded_sequence(captions[:, 1:], lengths, batch_first=True)[0]
adaptive.train()
adaptive.zero_grad()
packed_scores = adaptive(images, captions, lengths)
loss = LMcriterion(packed_scores[0], targets)
loss.backward()
for p in adaptive.decoder.LSTM.parameters():
p.data.clamp_(-clip, clip)
optimizer.step()
if epoch > 20:
cnn_optimizer.step()
if i % logger_step == 0:
print(f'Epoch {epoch}/{num_epochs}, Step {i}/{num_steps}, CrossEntropy Loss: {loss.item()}, Perplexity: {np.exp(loss.item())}')
torch.save(adaptive.state_dict(), os.path.join(model_path, f'adaptive-{epoch}.pkl'))
print('Start Epoch Evaluation')
# Evaluate Model after epoch
epoch_score = evaluate_epoch(adaptive, image_dir, vocab, crop_size, val_caption_path, num_workers, eval_size, evaluation_result_root, epoch)
cider_scores.append(epoch_score)
print(f'Epoch {epoch}/{num_epochs}: CIDEr Score {epoch_score}')
if epoch_score > best_cider_score:
best_cider_score = epoch_score
best_epoch = epoch
if len(cider_scores) > 5:
last_6 = cider_scores[-6:]
last_6_max = max(last_6)
if last_6_max != best_cider_score:
print('No improvements in the last 6 epochs')
print(f'Model of best epoch #: {best_epoch} with CIDEr score {best_cider_score}')
break
def evaluate_epoch(model, image_dir, vocab, crop_size, val_caption_path, num_workers, eval_size, evaluation_result_root, epoch):
transform = transforms.Compose([
transforms.Resize((crop_size, crop_size)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
eval_data_loader = torch.utils.data.DataLoader(
CocoEvalLoader(image_dir, val_caption_path, transform),
batch_size=eval_size,
shuffle=False, num_workers=num_workers,
drop_last=False)
result_json = predict_captions(model, vocab, eval_data_loader)
json.dump(result_json, open(evaluation_result_root + f'/evaluate-{epoch}.json', 'w'))
return coco_metrics(val_caption_path, evaluation_result_root, 'CIDEr')
| b-feldmann/ImcaptionNet | train.py | train.py | py | 5,421 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.transforms.Compose",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.transforms",
"line_number": 35,
"usage_type": "name"
},
... |
70806732583 | from bs4 import BeautifulSoup
import requests
import time
import json
import wikipedia
import html2text
API_URL = "https://{}.fandom.com/api.php"
def timeit(fn):
def wrapper(*args, **kwargs):
av_list = []
for i in range(10):
start = time.time()
fn(*args, **kwargs)
total = time.time()-start
av_list.append(total)
print("average time", sum(av_list)/len(av_list))
return wrapper
def update_fandom(name: str=None):
global API_URL
if name != None:
API_URL = API_URL.format(name.lower())
else:
API_URL="http://en.wikipedia.org/w/api.php"
def _fandom_request(params):
'''Synchronous request(Blocking)'''
a = requests.get(API_URL, params=params)
print(a.url)
return(a.json())
class Page:
def __init__(self, pageid: int = None, page_name=None):
'''Assigning the title or the pageid'''
self.pageid = pageid
#page_name ie the Query String ie https://hello.fandom.com/api.php
self.page_name = page_name
def __repr__(self):
return f"Page(Id:{self.pageid},Title:{self.title})"
@property
def all_content(self):
SEARCH_PARAMS = {
"action": "parse",
"format": "json",
"pageid": self.pageid,
"contentformat": 'application/json'
}
raw_response = _fandom_request(SEARCH_PARAMS)["parse"]["text"]["*"]
'''THE PROBLEM STARTS FROM HERE PARSING IS NOT DONE CORRECTLY HERE MORE EFFICIENT WORK TO DO'''
'''⛔⛔⛔⛔⛔⛔⛔⛔❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌⛔⛔⛔⛔⛔⛔⛔⛔'''
'''content=BeautifulSoup(raw_response,"lxml")
raw_response="\n".join((i.text for i in content.findAll(name=)))
text_maker = html2text.HTML2Text()
text_maker.ignore_links = True
text_maker.ignore_images=True
text_maker.ignore_emphasis=True
text_maker.ignore_tables=True
text = text_maker.handle(raw_response)
return text'''
content = BeautifulSoup(raw_response, "lxml")
paragraphs = content.find_all(
"p", limit=10,
) # returns list of all <p> inside <div> class_=mw-parser-output we only need 1st(and 2nd if the content is less and even 3rd can be added if needed)
for par in paragraphs:
for tags in par.find_all("sup"):
tags.decompose()
main_content=""
for para in paragraphs:
para_text=para.text
if len(para_text) <= 1:
pass
else:
main_content+="\n"+para_text
#for paragraph in paragraphs[
#
# 1:]: # O(1) is more better than searching the whole list
# if len(text_content) > 2048 or len(text_content +
# paragraph.text) > 2048:
# break # breaks the loop if more than 1024 char for Embed
# else:
# text_content += "\n" + paragraph.text
return main_content
@property
def images(self):
self.PARSED_DATA
class Search:
def __init__(self):
self.last_params = None
def __str__(self):
return f"Last Parameters were: {self.last_params}"
def __eq__(self, value):
return self.last_params==value.last_params
def show(self):
print(self.last_params)
def search(self, query: str, limit=5):
SEARCH_PARAMS = {
"action": "query",
"format": "json",
"list": "search",
"srprop": "",
"srsearch": query,
"srlimit": limit,
"srinfo": ""
}
PAGE={}
self.last_params=SEARCH_PARAMS
list_of_page = _fandom_request(SEARCH_PARAMS)["query"]["search"]
for i in list_of_page:
PAGE[i["pageid"]] = i["title"]
return PAGE
def open_search(self, item: str):
SEARCH_PARAMS = {
"action": "opensearch",
"format": "json",
"search": item,
}
'''Returns Array with index 0 as the search item, index 1 as the search result'''
fandom_content = _fandom_request(SEARCH_PARAMS)
result = dict(zip(fandom_content[1], fandom_content[3]))
return result
class FandomExceptions(Exception):
pass
# page=Page(pageid=150086)
# print(page.all_content)
if __name__ =="__main__":
sex=input("Enter Name")
print(type(sex))
update_fandom()
item=Search()
time1=time.time()
a=list(item.search(sex).keys())[0]
page=Page(pageid=a)
print(page.all_content)
print(time.time()-time1)
#print(wikipedia.page(pageid=2616).content)
| Unic-X/Kala-Bot | commands/Fandom/fandom.py | fandom.py | py | 4,901 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number... |
34173082997 | #!/usr/bin/env python
__author__ = 'xinya'
from bleu.bleu import Bleu
from meteor.meteor import Meteor
from rouge.rouge import Rouge
from cider.cider import Cider
from collections import defaultdict
from argparse import ArgumentParser
import codecs
from pdb import set_trace
import sys
import numpy as np
reload(sys)
sys.setdefaultencoding('utf-8')
class QGEvalCap:
def __init__(self, gts, res):
self.gts = gts
self.res = res
def evaluate(self):
output = []
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
# (Cider(), "CIDEr")
]
bleus = []
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
# print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(self.gts, self.res)
# set_trace()
if type(method) == list:
bleus = scores
np.savez("bleus.npz", bleus=bleus)
for sc, scs, m in zip(score, scores, method):
print("%s: %0.5f"%(m, sc))
output.append(sc)
else:
print( "%s: %0.5f"%(method, score))
output.append(score)
return output
def eval(out_file, src_file, tgt_file, isDIn = False, num_pairs = 500):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
infile = codecs.open(src_file, 'r', encoding='utf-8').readlines()
infile = [_.strip() for _ in infile]
tgts = codecs.open(tgt_file, "r" , encoding='utf-8').readlines()
tgts = [_.strip() for _ in tgts]
output = []
output = codecs.open(out_file, 'r', encoding='utf-8').readlines()
output = [_.strip() for _ in output]
aclen = min(len(infile), len(tgts), len(output))
output = output[:aclen]
for cnt, (inline, tgt) in enumerate(zip(infile[:aclen], tgts[:aclen])):
pair = {}
pair['tokenized_sentence'] = inline
pair['tokenized_question'] = tgt
pairs.append(pair)
print(len(pairs), len(output))
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
## eval
from eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
#set_trace()
for pair in pairs[:]:
key = pair['tokenized_sentence']
res[key] = [pair['prediction']]
## gts
gts[key].append(pair['tokenized_question'])
#set_trace()
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-out", "--out_file", dest="out_file", default="./output/pred.txt", help="output file to compare")
parser.add_argument("-src", "--src_file", dest="src_file", default="../data/processed/src-test.txt", help="src file")
parser.add_argument("-tgt", "--tgt_file", dest="tgt_file", default="../data/processed/tgt-test.txt", help="target file")
args = parser.parse_args()
print("scores: \n")
eval(args.out_file, args.src_file, args.tgt_file)
| zpeide/transfer_qg | metric/qgevalcap/eval.py | eval.py | py | 3,487 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bleu.bleu.Bleu",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "meteor.meteor.Meteor",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "rouge.roug... |
12340279038 | # treatment of routes for search
from pathlib import Path
from flask import Blueprint, \
make_response, \
render_template, \
request, \
session
from backend.collect import container_images_cache, \
update_status
from backend.config import config
from frontend.misc import is_htmx
SORTABLE_BY = ['name', 'created', 'tag']
SORT_ORDERS = {'up': False, 'down': True}
RESULTS_PER_PAGE = 10
# take name for blueprint from file for flawless copy&paste
blueprint = Blueprint(Path(__file__).stem, __name__)
def process_search_request(request=None, session=None, search_string: str = ''):
"""
prepare for search string in container images
:param request:
:param session:
:param search_string:
:return:
"""
search_results = dict()
# 'sort_by' is either set by request or found in session and decides the property to sort by
if not request.args.get('sort_by'):
if session.get('sort_by'):
sort_by = session['sort_by']
else:
sort_by = SORTABLE_BY[0]
session['sort_by'] = sort_by
elif request.args.get('sort_by') in SORTABLE_BY:
sort_by = request.args['sort_by']
session['sort_by'] = sort_by
elif session.get('sort_by'):
sort_by = session['sort_by']
else:
sort_by = SORTABLE_BY[0]
session['sort_by'] = sort_by
# 'sort_order' is either set by request or found in session and may be ascending or descending
if not request.args.get('sort_order'):
if session.get('sort_order'):
sort_order = session['sort_order']
else:
sort_order = list(SORT_ORDERS.keys())[0]
session['sort_order'] = sort_order
elif request.args.get('sort_order') in SORT_ORDERS.keys():
sort_order = request.args['sort_order']
session['sort_order'] = sort_order
elif session.get('sort_order'):
sort_order = session['sort_order']
else:
sort_order = list(SORT_ORDERS.keys())[0]
session['sort_order'] = sort_order
# clean up search string
if request.form.get('search') or request.form.get('search') == '':
# to be refined
search_string = request.form['search'].strip().lower()
# add matching container_images to search results
for name, container_image in container_images_cache.index_by_name.items():
if search_string.lower() in name.lower():
search_results.update({name: container_image})
# sort by sort_order
search_results_sorted = dict(sorted(search_results.items(),
key=lambda x: x[1][sort_by],
reverse=SORT_ORDERS.get(sort_order)))
# take only sorted values as list
search_results_list = list(search_results_sorted.values())
# count number of results
search_results_count = len(search_results_list)
# paginate_search_results() delivers page-related information and paginated list of results
search_results_list_paginated, page, pages_count = paginate_search_results(request, search_results_list)
return search_string, search_results_list_paginated, search_results_count, page, pages_count
def paginate_search_results(request=None, search_results: list = None):
"""
cut search result into pieces aka pages
:param request:
:param search_results:
:return:
"""
pages_count = (len(search_results) // RESULTS_PER_PAGE) + 1
if not request.args.get('page'):
page = 1
else:
try:
page = int(request.args['page'])
except ValueError:
page = 1
search_results_paginated = search_results[(page - 1) * RESULTS_PER_PAGE:page * RESULTS_PER_PAGE]
return search_results_paginated, page, pages_count
@blueprint.route('/<part1>/<part2>/<part3>/<part4>/<part5>/', methods=['GET'])
@blueprint.route('/<part1>/<part2>/<part3>/<part4>/<part5>', methods=['GET'])
@blueprint.route('/<part1>/<part2>/<part3>/<part4>/', methods=['GET'])
@blueprint.route('/<part1>/<part2>/<part3>/<part4>', methods=['GET'])
@blueprint.route('/<part1>/<part2>/<part3>/', methods=['GET'])
@blueprint.route('/<part1>/<part2>/<part3>', methods=['GET'])
@blueprint.route('/<part1>/<part2>/', methods=['GET'])
@blueprint.route('/<part1>/<part2>', methods=['GET'])
@blueprint.route('/<part1>/', methods=['GET'])
@blueprint.route('/<search_string>', methods=['GET', 'POST'])
@blueprint.route('/', methods=['GET', 'POST'])
def search(search_string: str = '', part1: str = '', part2: str = '', part3: str = '', part4: str = '',
part5: str = ''):
"""
Search requests - either with single search_strings or whole paths
"""
# when no searchstring was transmitted try if a '/'-containing path was searched for
if not search_string:
parts = [part1, part2, part3, part4, part5]
search_string = '/'.join([x for x in parts if x])
# '/' can be part of search_string but will be cut by route handling so add it here again
# only if at least one part contains anything
if any(parts) and request.base_url.endswith('/'):
search_string += '/'
# for pagination the request will be processed and the result split into page pieces
search_string, search_results, search_results_count, page, pages_count = process_search_request(request, session,
search_string)
# modes 'sort' and 'scroll' are used for sorting of results and paginated infinite scroll
# otherwise the default whole index will be shown
if not request.args.get('mode') or request.args.get('mode') not in ['sort', 'scroll']:
template = '/search/index.html'
else:
template = '/search/results.html'
# use Response to add URL for browser historx via header
response = make_response(render_template(template,
is_htmx=is_htmx(),
gitlab_url=config.api.url,
page=page,
pages_count=pages_count,
search_string=search_string,
search_results=search_results,
search_results_count=search_results_count,
session=session,
SORTABLE_BY=SORTABLE_BY,
SORT_ORDERS=SORT_ORDERS,
update_status=update_status))
# htmx header allows to modify browser history
response.headers['HX-Push-Url'] = f'/search/{search_string}'
return response
| HenriWahl/gitlab-container-registry-hub | frontend/search.py | search.py | py | 6,841 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask.request.arg... |
29142750716 | # Assignment Collector/Grader - a Django app for collecting and grading code
# Copyright (C) 2010,2011,2012 Anthony Rossi <anro@acm.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collector.models import Course, CourseForm, JavaAssignment, JavaAssignmentForm, JavaSubmission
from grader.models import JavaGrade
from django.contrib import admin
from django import forms
class CourseAdmin(admin.ModelAdmin):
form = CourseForm
fieldsets = (
('Course Information', {
'fields': ('course_num', 'course_title',)
}),
('Course Description', {
# 'classes': ('collapse',),
'fields': ('description',)
}),
('Course Passkey', {
'fields': ('passkey',)
}),
('Year and Term', {
'fields':('year', 'term',)
}),
('Owner', {
'classes':('collapse',),
'fields':('creator',)
}),
)
list_display = ('__unicode__', 'course_num', 'term', 'year', )
list_filter = ('year', 'term', 'course_num',)
# list_display_links = ('__unicode__', 'course_num', 'term', 'year', )
search_fields = ('^course_num', )
def save_model(self, request, obj, form, change):
# If creating this object (not UPDATEing it)
if change == False:
obj.creator = request.user # save the current user as creator
obj.save()
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
# For all objects being saved
for instance in instances:
# If this object is being created
if change == False:
# set the creator as the current user
instance.creator = request.user
instance.save()
def queryset(self, request):
qs = super(CourseAdmin, self).queryset(request)
# Allow superusers to see all Courses
if request.user.is_superuser:
return qs
# otherwise, show only Courses created by current user
return qs.filter(creator=request.user)
class AssignmentAdmin(admin.ModelAdmin):
form = JavaAssignmentForm
fieldsets = (
('Assignment Information', {
'fields': ('course', 'name', 'start_date', 'due_date', )
}),
('Submission Settings', {
'fields': ('max_submissions', 'allow_late',)
}),
('Assignment Instructions', {
# 'classes': ('collapse',),
'fields': ('instructions',)
}),
('Assignment Passkey', {
'fields': ('passkey',)
}),
('Test File', {
'fields':('test_file', )
}),
('Advanced', {
'classes': ('collapse',),
'fields':('java_cmd', 'javac_cmd', 'options', 'watchdog_wait', 'creator')
}),
# ('Owner', {
# 'classes':('collapse',),
# 'fields':('creator',)
# }),
)
list_display = ('__unicode__', 'course', 'due_date', )
list_filter = ('course', 'due_date')
search_fields = ('name', )
actions = ['display_grades']
def save_model(self, request, obj, form, change):
# If creating this object (not UPDATEing it)
if change == False:
# save the current user as creator
obj.creator = request.user
obj.save()
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
# For all objects being saved
for instance in instances:
# If this object is being created
if change == False:
# set the creator as the current user
instance.creator = request.user
instance.save()
def queryset(self, request):
qs = super(AssignmentAdmin, self).queryset(request)
# Allow superusers to see all Assignments
if request.user.is_superuser:
return qs
# otherwise, show only Assignments created by current user
return qs.filter(creator=request.user)
def display_grades(self, request, queryset):
from django.shortcuts import render_to_response
import datetime
grades = []
# Show grades for every assignment selected
for assn in queryset.order_by('course'):
# Only add assignments that have started. No point showing assignments that can't even be turned in yet.
if assn.start_date < datetime.datetime.now():
warning = ""
# Get all names submitted to this assignment
names = assn.javasubmission_set.values_list('last_name', 'first_name').distinct()
# Get the newest submission for each name and store it in a list
submissions = []
for name in names:
submissions.append(JavaSubmission.objects.filter(last_name=name[0], first_name=name[1], assignment=assn).order_by('-javagrade__tests_passed', '-submission_time')[0])
# Display a warning if grades are being retrieved before the due date.
if datetime.datetime.now() < assn.due_date:
warning = "These grades are preliminary. The assignment is not due yet."
# Display a warning if late assignments are allowed and it's after the due date
if assn.allow_late and assn.due_date < datetime.datetime.now():
warning = "Submissions after the due date are allowed."
# Add the assignment, the latest unique submissions, and warnings (if any) to the output
grades.append([assn, submissions, warning])
return render_to_response('grades.html', {'grades':grades,})
display_grades.short_description = "Generate gradesheet"
class SubmissionAdmin(admin.ModelAdmin):
fieldsets = (
('Submission Information', {
'fields': ('first_name', 'last_name', 'submission_time', 'assignment', )
}),
('Submitted File', {
'fields':('file', )
}),
# ('Grade', {
# 'fields':('javagrade',)
# }),
)
list_display = ('__unicode__', 'last_name', 'first_name', 'assignment', 'submission_time', 'javagrade')
list_filter = ('assignment', 'assignment__course', 'submission_time', 'last_name', 'submission_number')
readonly_fields = ('first_name', 'last_name', 'assignment', 'submission_time', 'javagrade')
admin.site.register(Course, CourseAdmin)
admin.site.register(JavaAssignment, AssignmentAdmin)
admin.site.register(JavaSubmission, SubmissionAdmin) | rossica/assignmentcollectorgrader | collector/admin.py | admin.py | py | 7,481 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "collector.models.CourseForm",
"line_number": 25,
"usage_type": "name"
},
{
... |
38033762926 | import asyncio
import inspect
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import List
import pytest
from typing_extensions import Annotated, TypedDict
from pydantic import BaseModel, Extra, Field, ValidationError, validate_arguments
from pydantic.decorator import ValidatedFunction
from pydantic.errors import ConfigError
skip_pre_38 = pytest.mark.skipif(sys.version_info < (3, 8), reason='testing >= 3.8 behaviour only')
def test_args():
@validate_arguments
def foo(a: int, b: int):
return f'{a}, {b}'
assert foo(1, 2) == '1, 2'
assert foo(*[1, 2]) == '1, 2'
assert foo(*(1, 2)) == '1, 2'
assert foo(*[1], 2) == '1, 2'
with pytest.raises(ValidationError) as exc_info:
foo()
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'},
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 'x')
assert exc_info.value.errors() == [
{'loc': ('b',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, 3)
assert exc_info.value.errors() == [
{'loc': ('args',), 'msg': '2 positional arguments expected but 3 given', 'type': 'type_error'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, apple=3)
assert exc_info.value.errors() == [
{'loc': ('kwargs',), 'msg': "unexpected keyword argument: 'apple'", 'type': 'type_error'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, a=3)
assert exc_info.value.errors() == [
{'loc': ('v__duplicate_kwargs',), 'msg': "multiple values for argument: 'a'", 'type': 'type_error'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, a=3, b=4)
assert exc_info.value.errors() == [
{'loc': ('v__duplicate_kwargs',), 'msg': "multiple values for arguments: 'a', 'b'", 'type': 'type_error'}
]
def test_wrap():
@validate_arguments
def foo_bar(a: int, b: int):
"""This is the foo_bar method."""
return f'{a}, {b}'
assert foo_bar.__doc__ == 'This is the foo_bar method.'
assert foo_bar.__name__ == 'foo_bar'
assert foo_bar.__module__ == 'tests.test_decorator'
assert foo_bar.__qualname__ == 'test_wrap.<locals>.foo_bar'
assert isinstance(foo_bar.vd, ValidatedFunction)
assert callable(foo_bar.raw_function)
assert foo_bar.vd.arg_mapping == {0: 'a', 1: 'b'}
assert foo_bar.vd.positional_only_args == set()
assert issubclass(foo_bar.model, BaseModel)
assert foo_bar.model.__fields__.keys() == {'a', 'b', 'args', 'kwargs', 'v__duplicate_kwargs'}
assert foo_bar.model.__name__ == 'FooBar'
assert foo_bar.model.schema()['title'] == 'FooBar'
assert repr(inspect.signature(foo_bar)) == '<Signature (a: int, b: int)>'
def test_kwargs():
@validate_arguments
def foo(*, a: int, b: int):
return a + b
assert foo.model.__fields__.keys() == {'a', 'b', 'args', 'kwargs'}
assert foo(a=1, b=3) == 4
with pytest.raises(ValidationError) as exc_info:
foo(a=1, b='x')
assert exc_info.value.errors() == [
{'loc': ('b',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 'x')
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('args',), 'msg': '0 positional arguments expected but 2 given', 'type': 'type_error'},
]
def test_untyped():
@validate_arguments
def foo(a, b, c='x', *, d='y'):
return ', '.join(str(arg) for arg in [a, b, c, d])
assert foo(1, 2) == '1, 2, x, y'
assert foo(1, {'x': 2}, c='3', d='4') == "1, {'x': 2}, 3, 4"
@pytest.mark.parametrize('validated', (True, False))
def test_var_args_kwargs(validated):
def foo(a, b, *args, d=3, **kwargs):
return f'a={a!r}, b={b!r}, args={args!r}, d={d!r}, kwargs={kwargs!r}'
if validated:
foo = validate_arguments(foo)
assert foo(1, 2) == 'a=1, b=2, args=(), d=3, kwargs={}'
assert foo(1, 2, 3, d=4) == 'a=1, b=2, args=(3,), d=4, kwargs={}'
assert foo(*[1, 2, 3], d=4) == 'a=1, b=2, args=(3,), d=4, kwargs={}'
assert foo(1, 2, args=(10, 11)) == "a=1, b=2, args=(), d=3, kwargs={'args': (10, 11)}"
assert foo(1, 2, 3, args=(10, 11)) == "a=1, b=2, args=(3,), d=3, kwargs={'args': (10, 11)}"
assert foo(1, 2, 3, e=10) == "a=1, b=2, args=(3,), d=3, kwargs={'e': 10}"
assert foo(1, 2, kwargs=4) == "a=1, b=2, args=(), d=3, kwargs={'kwargs': 4}"
assert foo(1, 2, kwargs=4, e=5) == "a=1, b=2, args=(), d=3, kwargs={'kwargs': 4, 'e': 5}"
def test_field_can_provide_factory() -> None:
@validate_arguments
def foo(a: int, b: int = Field(default_factory=lambda: 99), *args: int) -> int:
"""mypy is happy with this"""
return a + b + sum(args)
assert foo(3) == 102
assert foo(1, 2, 3) == 6
def test_annotated_field_can_provide_factory() -> None:
@validate_arguments
def foo2(a: int, b: Annotated[int, Field(default_factory=lambda: 99)], *args: int) -> int:
"""mypy reports Incompatible default for argument "b" if we don't supply ANY as default"""
return a + b + sum(args)
assert foo2(1) == 100
@skip_pre_38
def test_positional_only(create_module):
module = create_module(
# language=Python
"""
from pydantic import validate_arguments
@validate_arguments
def foo(a, b, /, c=None):
return f'{a}, {b}, {c}'
"""
)
assert module.foo(1, 2) == '1, 2, None'
assert module.foo(1, 2, 44) == '1, 2, 44'
assert module.foo(1, 2, c=44) == '1, 2, 44'
with pytest.raises(ValidationError) as exc_info:
module.foo(1, b=2)
assert exc_info.value.errors() == [
{
'loc': ('v__positional_only',),
'msg': "positional-only argument passed as keyword argument: 'b'",
'type': 'type_error',
}
]
with pytest.raises(ValidationError) as exc_info:
module.foo(a=1, b=2)
assert exc_info.value.errors() == [
{
'loc': ('v__positional_only',),
'msg': "positional-only arguments passed as keyword arguments: 'a', 'b'",
'type': 'type_error',
}
]
def test_args_name():
@validate_arguments
def foo(args: int, kwargs: int):
return f'args={args!r}, kwargs={kwargs!r}'
assert foo.model.__fields__.keys() == {'args', 'kwargs', 'v__args', 'v__kwargs', 'v__duplicate_kwargs'}
assert foo(1, 2) == 'args=1, kwargs=2'
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, apple=4)
assert exc_info.value.errors() == [
{'loc': ('v__kwargs',), 'msg': "unexpected keyword argument: 'apple'", 'type': 'type_error'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, apple=4, banana=5)
assert exc_info.value.errors() == [
{'loc': ('v__kwargs',), 'msg': "unexpected keyword arguments: 'apple', 'banana'", 'type': 'type_error'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, 3)
assert exc_info.value.errors() == [
{'loc': ('v__args',), 'msg': '2 positional arguments expected but 3 given', 'type': 'type_error'}
]
def test_v_args():
with pytest.raises(
ConfigError, match='"v__args", "v__kwargs", "v__positional_only" and "v__duplicate_kwargs" are not permitted'
):
@validate_arguments
def foo1(v__args: int):
pass
with pytest.raises(
ConfigError, match='"v__args", "v__kwargs", "v__positional_only" and "v__duplicate_kwargs" are not permitted'
):
@validate_arguments
def foo2(v__kwargs: int):
pass
with pytest.raises(
ConfigError, match='"v__args", "v__kwargs", "v__positional_only" and "v__duplicate_kwargs" are not permitted'
):
@validate_arguments
def foo3(v__positional_only: int):
pass
with pytest.raises(
ConfigError, match='"v__args", "v__kwargs", "v__positional_only" and "v__duplicate_kwargs" are not permitted'
):
@validate_arguments
def foo4(v__duplicate_kwargs: int):
pass
def test_async():
@validate_arguments
async def foo(a, b):
return f'a={a} b={b}'
async def run():
v = await foo(1, 2)
assert v == 'a=1 b=2'
loop = asyncio.get_event_loop_policy().get_event_loop()
loop.run_until_complete(run())
with pytest.raises(ValidationError) as exc_info:
loop.run_until_complete(foo('x'))
assert exc_info.value.errors() == [{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'}]
def test_string_annotation():
@validate_arguments
def foo(a: 'List[int]', b: 'Path'):
return f'a={a!r} b={b!r}'
assert foo([1, 2, 3], '/')
with pytest.raises(ValidationError) as exc_info:
foo(['x'])
assert exc_info.value.errors() == [
{'loc': ('a', 0), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'},
]
def test_item_method():
class X:
def __init__(self, v):
self.v = v
@validate_arguments
def foo(self, a: int, b: int):
assert self.v == a
return f'{a}, {b}'
x = X(4)
assert x.foo(4, 2) == '4, 2'
assert x.foo(*[4, 2]) == '4, 2'
with pytest.raises(ValidationError) as exc_info:
x.foo()
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'},
]
def test_class_method():
class X:
@classmethod
@validate_arguments
def foo(cls, a: int, b: int):
assert cls == X
return f'{a}, {b}'
x = X()
assert x.foo(4, 2) == '4, 2'
assert x.foo(*[4, 2]) == '4, 2'
with pytest.raises(ValidationError) as exc_info:
x.foo()
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'},
]
def test_config_title():
@validate_arguments(config=dict(title='Testing'))
def foo(a: int, b: int):
return f'{a}, {b}'
assert foo(1, 2) == '1, 2'
assert foo(1, b=2) == '1, 2'
assert foo.model.schema()['title'] == 'Testing'
def test_config_title_cls():
class Config:
title = 'Testing'
@validate_arguments(config=Config)
def foo(a: int, b: int):
return f'{a}, {b}'
assert foo(1, 2) == '1, 2'
assert foo(1, b=2) == '1, 2'
assert foo.model.schema()['title'] == 'Testing'
def test_config_fields():
with pytest.raises(ConfigError, match='Setting the "fields" and "alias_generator" property on custom Config for @'):
@validate_arguments(config=dict(fields={'b': 'bang'}))
def foo(a: int, b: int):
return f'{a}, {b}'
def test_config_arbitrary_types_allowed():
class EggBox:
def __str__(self) -> str:
return 'EggBox()'
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def foo(a: int, b: EggBox):
return f'{a}, {b}'
assert foo(1, EggBox()) == '1, EggBox()'
with pytest.raises(ValidationError) as exc_info:
assert foo(1, 2) == '1, 2'
assert exc_info.value.errors() == [
{
'loc': ('b',),
'msg': 'instance of EggBox expected',
'type': 'type_error.arbitrary_type',
'ctx': {'expected_arbitrary_type': 'EggBox'},
},
]
def test_validate(mocker):
stub = mocker.stub(name='on_something_stub')
@validate_arguments
def func(s: str, count: int, *, separator: bytes = b''):
stub(s, count, separator)
func.validate('qwe', 2)
with pytest.raises(ValidationError):
func.validate(['qwe'], 2)
stub.assert_not_called()
def test_validate_all():
@validate_arguments(config=dict(validate_all=True))
def foo(dt: datetime = Field(default_factory=lambda: 946684800)):
return dt
assert foo() == datetime(2000, 1, 1, tzinfo=timezone.utc)
assert foo(0) == datetime(1970, 1, 1, tzinfo=timezone.utc)
@skip_pre_38
def test_validate_all_positional(create_module):
module = create_module(
# language=Python
"""
from datetime import datetime
from pydantic import Field, validate_arguments
@validate_arguments(config=dict(validate_all=True))
def foo(dt: datetime = Field(default_factory=lambda: 946684800), /):
return dt
"""
)
assert module.foo() == datetime(2000, 1, 1, tzinfo=timezone.utc)
assert module.foo(0) == datetime(1970, 1, 1, tzinfo=timezone.utc)
def test_validate_extra():
class TypedTest(TypedDict):
y: str
@validate_arguments(config={'extra': Extra.allow})
def test(other: TypedTest):
return other
assert test(other={'y': 'b', 'z': 'a'}) == {'y': 'b', 'z': 'a'}
@validate_arguments(config={'extra': Extra.ignore})
def test(other: TypedTest):
return other
assert test(other={'y': 'b', 'z': 'a'}) == {'y': 'b'}
| merlinepedra25/PYDANTIC | tests/test_decorator.py | test_decorator.py | py | 13,616 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pytest.mark.skipif",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.version_info",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pydantic.va... |
34405116951 | import time
import re
import codecs
import io
#import urllib2 as ul
import requests
import xml.etree.ElementTree as ET
from konlpy.tag import Kkma
from konlpy.utils import pprint
import zipfile
def LoadDB_2020(src='oro'):
dat = []
z = zipfile.ZipFile('../scraped/{}/{}.zip'.format(src,src))
for j in z.filelist:
with z.open(j) as f:
for q in f.readlines():
try:
dat.append(Clean(codecs.decode(q,encoding='utf-8')))
except UnicodeDecodeError:
print(q)
return [i for i in dat if len(i) > 0]
def LoadSGFs(path = '../scraped/sgfs/sgfs.zip'):
#import csv, io, sys, zipfile
games = []
#bad_games = []
z = zipfile.ZipFile('../scraped/sgfs/sgfs.zip')
for i in z.filelist:
h = z.open(i, 'r')
game_string = io.TextIOWrapper(h).read()
games.append(game_string)
good_games = [j for i,j in enumerate(games) if j!='(null)']
moves = [j.split(';')[2:] for j in good_games]
hmoves = []
for i in moves:
try:
if len(i[0]) > 2:
if i[0][:2]=='B[':
hmoves.append([j.replace('C[]','')[:5] for j in i if j[0] in ['B', 'W']])
except IndexError:
#print(i)
pass
return hmoves
def ReadSent(pageno,src='./han/'):
dat=codecs.open(src+str(pageno)+'.d',encoding='utf-8')
dat = dat.read()
dat = Clean(dat)
dat = dat.replace('?','.')
dat = dat.replace('!','.')
dat = [' '.join(i.split()) for i in dat.split('.')]
return dat
def Clean(sentence,comma=0):
if comma==0:
for i in ['\n','_','-','(',')','"','\'','...','[',']','<','>','\r', '\xa0']:
sentence=sentence.replace(i,' ')
else:
for i in ['\n','_','-','(',')','"','\'','...','[',']','<','>',',','\r']:
sentence=sentence.replace(i,' ')
return sentence.strip()
def LoadDB(src='han'):
if src!='all':
sent1=[]
f = open('./lists/'+src+'.list')
for line in f:
tmp=ReadSent(int(line.split('.')[0]),src='./'+src+'/')
for j in tmp:
sent1.append(j)
else:
sent1=LoadDB(src='dat')
sent1+=LoadDB(src='dat')
sent1+=LoadDB(src='oro')
return sent1
def WCount(sent,kkma,write=0,n=2000,name='konlp.wlist'):
mywords={}
for i in sent:
morphs=kkma.morphs(i)
for j in morphs:
try:
mywords[j]+=1
except KeyError:
mywords[j]=1
freq = sorted(mywords.keys(), key=lambda x: mywords[x])
freq.reverse()
if write==1:
dat=codecs.open('./output/'+name,'w',encoding='utf-8')
for i in freq[:n]:
try:
dat.write(i)
#dat.write(', '+translate(i))
dat.write('\n')
except UnicodeDecodeError:
continue
return mywords,freq
def WCountPos(sent,kkma,write=0,n=2000,name='konlp.wlist'):
mywords={}
mypos={}
for i in sent:
pos=kkma.pos(i)
for j in pos:
#print j,j[0],j[1]
#zwom=input("sheat")
try:
mywords[j[0]]+=1
except KeyError:
mywords[j[0]]=1
mypos[j[0]]=j[1]
freq = sorted(mywords.keys(), key=lambda x: mywords[x])
freq.reverse()
if write==1:
dat=codecs.open('./output/'+name,'w',encoding='utf-8')
for i in freq[:n]:
try:
dat.write(i)
dat.write(', ')
dat.write(mypos[i])
dat.write(', ')
dat.write(str(mywords[i]))
dat.write('\n')
except KeyError:
continue
return mywords, freq
#sent = LoadDB(src='all')
#print len(sent)
#sent = LoadDB(src='dat')
#print (len(sent))
#kkma=Kkma()
# morphs = []
# pos = []
# for i in sent[:10]:
# #print i
# #pprint(kkma.nouns(i))
# #pprint(kkma.pos(i))
# morphs+=kkma.morphs(i)
# pos+=kkma.pos(i)
# print len(morphs),len(set(morphs))
# print len(pos),len(set(pos))
# for i in pos:
# print i[0],i[1]
# k=31
# n=len(sent)/k
# for i in range(k)[2:]:
# a=time.time()
# morphs,freq=WCountPos(sent[n*i:min([n*(i+1),len(sent)-1])],kkma,write=1,n=-1,name='han_'+str(i)+'.list')
# b=time.time()
# print (b-a)/60.,' minutes passed ',i
# k=42
# n=len(sent)/k
# for i in range(k):
# a=time.time()
# morphs,freq=WCountPos(sent[n*i:min([n*(i+1),len(sent)-1])],kkma,write=1,n=-1,name='oro_'+str(i)+'.list')
# b=time.time()
# print (b-a)/60.,' minutes passed ',i
# k=77
# n=len(sent)/k
# for i in range(k)[14:]:
# a=time.time()
# morphs,freq=WCountPos(sent[n*i:min([n*(i+1),len(sent)-1])],kkma,write=1,n=-1,name='dat_'+str(i)+'.list')
# b=time.time()
# print ((b-a)/60.,' minutes passed ',i)
# #for i in freq[:10]:
# #print i
| nborggren/BadukNews | src/BadukCorpus.py | BadukCorpus.py | py | 4,965 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "zipfile.ZipFile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "codecs.decode",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "io.TextIOWrapper",
... |
43302752004 | import py
import sys
from rpython.rtyper.lltypesystem import lltype
from rpython.rlib import rawstorage
from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage,\
raw_storage_setitem, raw_storage_getitem, AlignmentError,\
raw_storage_setitem_unaligned, raw_storage_getitem_unaligned
from rpython.rtyper.test.tool import BaseRtypingTest
from rpython.translator.c.test.test_genc import compile
def test_untranslated_storage():
r = alloc_raw_storage(37)
raw_storage_setitem(r, 8, 1<<30)
res = raw_storage_getitem(lltype.Signed, r, 8)
assert res == 1<<30
raw_storage_setitem(r, 8, 3.14)
res = raw_storage_getitem(lltype.Float, r, 8)
assert res == 3.14
py.test.raises(AlignmentError, raw_storage_getitem, lltype.Signed, r, 3)
py.test.raises(AlignmentError, raw_storage_setitem, r, 3, 42.5)
free_raw_storage(r)
def test_untranslated_storage_unaligned(monkeypatch):
monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False)
r = alloc_raw_storage(15)
raw_storage_setitem_unaligned(r, 3, 1<<30)
res = raw_storage_getitem_unaligned(lltype.Signed, r, 3)
assert res == 1<<30
raw_storage_setitem_unaligned(r, 3, 3.14)
res = raw_storage_getitem_unaligned(lltype.Float, r, 3)
assert res == 3.14
free_raw_storage(r)
class TestRawStorage(BaseRtypingTest):
def test_storage_int(self):
def f(i):
r = alloc_raw_storage(24)
raw_storage_setitem(r, 8, i)
res = raw_storage_getitem(lltype.Signed, r, 8)
free_raw_storage(r)
return res
x = self.interpret(f, [1<<30])
assert x == 1 << 30
def test_storage_float_unaligned(self, monkeypatch):
def f(v):
r = alloc_raw_storage(24)
raw_storage_setitem_unaligned(r, 3, v)
res = raw_storage_getitem_unaligned(lltype.Float, r, 3)
free_raw_storage(r)
return res
monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False)
x = self.interpret(f, [3.14])
assert x == 3.14
class TestCBackend(object):
def test_backend_int(self):
def f(i):
r = alloc_raw_storage(24)
raw_storage_setitem(r, 8, i)
res = raw_storage_getitem(lltype.Signed, r, 8)
free_raw_storage(r)
return res != i
fc = compile(f, [int])
x = fc(-sys.maxint // 3)
assert x == 0
def test_backend_float_unaligned(self, monkeypatch):
def f(v):
r = alloc_raw_storage(24)
raw_storage_setitem_unaligned(r, 3, v)
res = raw_storage_getitem_unaligned(lltype.Float, r, 3)
free_raw_storage(r)
return res != v
if monkeypatch is not None:
monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False)
fc = compile(f, [float])
x = fc(-3.14)
assert x == 0
def test_backend_float_unaligned_allow_misalign(self):
self.test_backend_float_unaligned(monkeypatch=None)
| mozillazg/pypy | rpython/rlib/test/test_rawstorage.py | test_rawstorage.py | py | 3,046 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "rpython.rlib.rawstorage.alloc_raw_storage",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "rpython.rlib.rawstorage.raw_storage_setitem",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "rpython.rlib.rawstorage.raw_storage_getitem",
"line_num... |
7405420240 | import numpy as np
import operator
import matplotlib.pyplot as plt
import os
def createDataSet():
group = np.array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels = ['A','A','B','B']
return group, labels
def classify0(inX, dataSet, labels, k): #k-近邻算法
dataSetSize = dataSet.shape[0] #shape代表读取矩阵第一维度的长度
diffMat = np.tile(inX, (dataSetSize,1)) - dataSet #tile表示将输入的inX向量的行重复dataSetSize次,列重复1次组成矩阵
sqDiffMat = diffMat**2 #**代表幂运算,相当于矩阵中每个数值的平方
sqDistances = sqDiffMat.sum(axis=1) #axis=1表示按行累加,axis=0表示按列累加
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort() #argsort将数组从小到大进行排序,并且保存索引号即数组下标
classCount={}
for i in range(k): #从0开始循环3个数字,根据索引号进行标记,第一个是B,则分类B的值+1,第二个是B,则分类B的值再+1,第三个是A,则分类A的值+1
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True) #sorted是进行排序操作,第一个元素是迭代对象,第二个是参照物(itemgetter表示序号),第三个True表示降序,False表示升序(默认)
return sortedClassCount[0][0]
def file2matrix(filename): #解析输入数据
fr = open(filename)
arrayOLines = fr.readlines()
numberofLines = len(arrayOLines) #获取文件行数
returnMat = np.zeros((numberofLines,3)) #zeros返回一个给定形状和类型的数组
classlabelVector = []
index = 0
for line in arrayOLines:
line = line.strip() #strip()用于移除字符串头尾指定的字符(默认为空格)
listFromLine = line.split('\t') #split(str="", num=string.count(str))通过指定分隔符对字符串进行切片,如果参数num 有指定值,则仅分隔 num 个子字符串,\t是水平制表(即TAB)
returnMat[index,:] = listFromLine[0:3]
classlabelVector.append(int(listFromLine[-1])) #append()方法用于在列表末尾添加新的对象,索引值-1表示列表中最后一列元素
index += 1
return returnMat,classlabelVector
def autoNorm(dataSet): #归一化特征值
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = np.zeros(np.shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - np.tile(minVals, (m,1))
normDataSet = normDataSet/np.tile(ranges, (m,1))
return normDataSet, ranges, minVals
def datingClassTest():
hoRatio = 0.10 #交叉训练比例,输入的文本数据中多少比例作为测试数据
datingDataMat, datingLabels =file2matrix('datingTestSet2.txt') #第一步处理读入的训练数据
normMat, ranges, minVals = autoNorm(datingDataMat) #第二步归一化特征值,将值确定在0-1的范围之内
m = normMat.shape[0]
numTestVece = int(m*hoRatio)
errorCount = 0.0
for i in range(numTestVece):
classifierResult = classify0(normMat[i,:],normMat[numTestVece:m,:],datingLabels[numTestVece:m],3)
print("the classifierResult came back with: %d, the real answer is: %d"%(classifierResult,datingLabels[i]))
if (classifierResult != datingLabels[i]):
errorCount += 1.0
print("the total error rate is: %f"%(errorCount/float(numTestVece)))
def classifyperson():
resultList = ['not at all', 'in small doses', 'in large doses']
percentTats = float(input("percentage of time spent playing video games?"))
ffMiles = float(input("frequent flier miles earned per year?"))
iceCream = float(input("liters of ice cream consumed per year?"))
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
inArr = np.array([ffMiles, percentTats, iceCream])
classifierResult = classify0((inArr - minVals)/ranges, normMat, datingLabels, 3)
print("You will probably like this person:",resultList[classifierResult - 1])
def img2vector(filename):
returnVect = np.zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
def handwritingClassTest():
hwLabels = []
trainingFileList = os.listdir('trainingDigits') #listdir用于返回指定的文件夹包含的文件或文件夹的名字的列表
m = len(trainingFileList)
trainingMat = np.zeros((m,1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i,:] = img2vector('trainingDigits/%s' % fileNameStr)
testFileList = os.listdir('testDigits')
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr))
if (classifierResult != classNumStr): errorCount += 1.0
print("\nthe total number of errors is: %d" % errorCount)
print("\nthe total error rate is: %f" % (errorCount/float(mTest)))
select = int(input("请输入你要选择的操作:"))
if select == 1:
group, labels = createDataSet()
print(classify0([0.5, 0.5], group, labels, 3))
elif select == 2:
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
fig = plt.figure()
ax = fig.add_subplot(111) # add_subplot(xyz)的参数含义为划分x行,y列,将图片放在第z块
ax.scatter(datingDataMat[:, 0], datingDataMat[:, 1], 15.0 * np.array(datingLabels), 15.0 * np.array(datingLabels))
plt.show()
elif select == 3:
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
elif select == 4:
datingClassTest()
elif select == 5:
testVector = img2vector('testDigits/0_13.txt')
print(testVector[0,0:31])
print(testVector[0,32:63])
elif select == 6:
classifyperson()
else:
handwritingClassTest() | GuoBayern/MachineLearning | kNN.py | kNN.py | py | 6,576 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_num... |
74957408425 | #! /usr/bin/env python
from __future__ import print_function
import os
import sys
import argparse
from subprocess import CalledProcessError
from scripting.conda import install_python
from scripting.contexts import cd, cdtemp, homebrew_hidden, setenv
from scripting.unix import system, which, check_output
from scripting.prompting import prompt, status, success
from scripting.git import git_clone_or_update
from scripting.termcolors import blue
RECIPES = ['boost-headers', 'libiconv', 'libxml2', # 'ncurses',
'chasm', 'cca-babel', 'cca-spec-babel', 'ccaffeine',
'cca-bocca', ]
def build_env(which_gfortran=None, which_python=None):
# which_gfortran = which_gfortran or which('gfortran')
which_python = which_python or which('python')
# prefix = os.path.dirname(os.path.dirname(which_gfortran))
# which_gcc = os.path.join(prefix, 'bin', 'gcc')
# which_gxx = os.path.join(prefix, 'bin', 'g++')
# ver = check_output([which_gfortran, '-dumpversion']).split('.')
# gcc_version = ver[0] + ver[1]
env = {
# 'COMPILER': 'gcc' + gcc_version,
'PATH': os.pathsep.join([
# os.path.dirname(which_gfortran),
os.path.dirname(which_python),
'/usr/bin', '/bin', '/usr/sbin', '/etc', '/usr/lib', ]),
# 'CC': which_gcc,
# 'CXX': which_gxx,
# 'FC': which_gfortran,
}
return env
def conda_build_output(recipe, which_conda=None):
conda = which_conda or which('conda')
path_to_file = check_output([conda, 'build', recipe, '--output'])
return path_to_file.strip()
def conda_build(recipes, dir='.', batch_mode=False, which_conda=None):
conda = which_conda or which('conda')
env = {
'PATH': os.pathsep.join(['/usr/bin', '/bin', '/usr/sbin', '/etc',
'/usr/lib', ]), }
files_to_upload = []
with cd(dir):
with homebrew_hidden(prompt=not batch_mode):
with setenv(env, verbose=True):
status('pwd: {dir}'.format(dir=dir))
system([conda, 'clean', '--all'])
for recipe in recipes:
try:
system([conda, 'build', '-c', 'conda-forge',
'-c', 'csdms/channel/dev', recipe])
except CalledProcessError:
break
else:
files_to_upload.append(
conda_build_output(recipe, which_conda=conda))
return files_to_upload
def conda_upload(files):
uploaded = []
for fname in files:
try:
system(['anaconda', 'upload', '--no-progress', '--force',
'--channel', 'nightly', '--user', 'csdms', fname])
except CalledProcessError:
pass
else:
success('uploaded: {fname}'.format(fname=fname))
return uploaded
def install_internal_python(prefix, batch_mode=False):
if prompt('ok to install python in {prefix}'.format(prefix=prefix),
batch_mode=batch_mode):
python_prefix = install_python(prefix=prefix,
packages=('conda-build=1.17',
'anaconda-client',
'jinja2'))
which_python = os.path.join(python_prefix, 'bin', 'python')
else:
which_python = None
return which_python
def main():
parser = argparse.ArgumentParser(
description='Build the CSDMS software stack',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('recipe', nargs='*',
help='recipes to build')
parser.add_argument('--cache-dir', default=os.path.abspath('.'),
help='location for temporary build files')
parser.add_argument('-b', '--batch', action='store_true',
help='Run in batch mode')
parser.add_argument('--which-gfortran', default=which('gfortran'),
help='Path to gfortran')
parser.add_argument('--which-python', default='internal',
help='Path to python')
parser.add_argument('--recipe-dir', default=None,
help="location of recipes")
args = parser.parse_args()
recipes = args.recipe or RECIPES
recipe_dir = args.recipe_dir
if recipe_dir is not None:
recipe_dir = os.path.abspath(recipe_dir)
which_gfortran = args.which_gfortran
if which_gfortran is not None and os.path.isdir(which_gfortran):
which_gfortran = os.path.join(which_gfortran, 'bin', 'gfortran')
with cdtemp(dir=args.cache_dir, cleanup=False):
which_python = args.which_python or which('python')
if which_python == 'internal':
which_python = install_internal_python(os.path.abspath('.'),
batch_mode=args.batch)
if which_python is None:
print('Missing Python', file=sys.stderr) and sys.exit(1)
if which_gfortran is None:
print('Missing gfortran', file=sys.stderr) and sys.exit(1)
if recipe_dir is None:
git_clone_or_update('https://github.com/csdms/csdms-stack',
dir='csdms-stack')
recipe_dir = os.path.abspath(os.path.join('csdms-stack',
'conda-recipes'))
# env = build_env(which_gfortran=which_gfortran,
# which_python=which_python)
# status('Building with this environment:')
# for key, val in env.items():
# print('{k}={v}'.format(k=blue(key), v=blue(val)), file=sys.stdout)
with setenv(build_env(which_python=which_python), verbose=True):
files = conda_build(recipes, dir=recipe_dir, batch_mode=args.batch)
# status_code = 0
# files_to_upload = []
# with cd(recipe_dir):
# with homebrew_hidden(prompt=not args.batch):
# with setenv(env):
# system(['conda', 'clean', '--all'])
# for recipe in recipes:
# try:
# system(['conda', 'build', recipe])
# except CalledProcessError:
# status_code = 1
# break
# else:
# files_to_upload.append(conda_build_output(recipe))
for fname in files:
success('created: {fname}'.format(fname=fname))
if len(files) > 0 and prompt('ok to upload to Anaconda cloud',
batch_mode=args.batch):
for fname in conda_upload(files):
success('uploaded: {fname}'.format(fname=fname))
# for fname in files_to_upload:
# try:
# system(['anaconda', 'upload', '--force', '--channel', 'nightly',
# '--user', 'csdms', fname])
# except CalledProcessError:
# status_code = 1
# else:
# success('uploaded: {fname}'.format(fname=fname))
return len(recipes) - len(files)
if __name__ == '__main__':
sys.exit(main())
| csdms/csdms-stack | build-stack.py | build-stack.py | py | 7,372 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scripting.unix.which",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.pathsep.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.pathsep",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname"... |
41865321011 | import sys
import copy
import tempfile
import os.path
import filecmp
import shutil
import functools
from album import Album, ParseError, SaveError
TEST_CASE_DIR = "test_cases/DyphalGenerator_Album_save"
def create_file(name, dir_name):
with open(os.path.join(dir_name, name), "w") as f:
pass
def create_file_ro(name, dir_name):
umask = os.umask(0o333)
f = open(os.path.join(dir_name, name), "w")
print("foo", file=f)
f.close()
os.umask(umask)
def main():
testsTotal = 0
testsFailed = 0
verbosity = 0
if 2 <= len(sys.argv):
if "-v" == sys.argv[1]:
verbosity = 1
elif "-vv" == sys.argv[1]:
verbosity = 2
print("Testing album serialization.")
def test_save_success(description, data, save_name_album, save_name_web, compare_name_album, compare_name_web, pre_func):
"""Saves an album, compares the results against expected data, and
reports success or failure depending on whether they matched.
Arguments:
description: A description of the test case, at most 55 characters.
data: A dictionary containing album data.
save_name_album: The name under which to save the album, relative to
a temporary directory.
save_name_web: The name under which the web JSON will be saved,
relative to a temporary directory.
compare_name_album: The name of the canonical album file, relative to
the test case directory.
compare_name_web: The name of the canonical web JSON file, relative
to the test case directory.
pre_func: A function to execute before the album is saved. The name
of the temporary directory will be passed in as an argument.
"""
print(" Testing %s... " % (description), end="")
nonlocal testsTotal, testsFailed
testsTotal += 1
try:
with tempfile.TemporaryDirectory() as temp_dir:
if None is not pre_func:
pre_func(temp_dir)
Album.save(os.path.join(temp_dir, save_name_album), data)
if not filecmp.cmp(os.path.join(temp_dir, save_name_album), os.path.join(TEST_CASE_DIR, compare_name_album), shallow=False):
print("FAILED!")
testsFailed += 1
shutil.copy(os.path.join(temp_dir, save_name_album), "/tmp/album.dyphal")
if 1 <= verbosity:
with open(os.path.join(temp_dir, save_name_album), "r") as f:
for line in f.readlines():
print(line)
elif not filecmp.cmp(os.path.join(temp_dir, save_name_web), os.path.join(TEST_CASE_DIR, compare_name_web), shallow=False):
print("FAILED!")
testsFailed += 1
if 1 <= verbosity:
with open(os.path.join(temp_dir, save_name_web), "r") as f:
for line in f.readlines():
print(line)
else:
print("passed.")
except (Exception) as ex:
print("FAILED!")
testsFailed += 1
if 1 <= verbosity:
print(ex)
def test_save_failure(description, data, save_name_album, save_name_web, exceptions, pre_func):
"""Saves an album and verifies that an expected exception was thrown.
Arguments:
description: A description of the test case, at most 55 characters.
data: A dictionary containing album data.
save_name_album: The name under which to save the album, relative to
a temporary directory.
save_name_web: The name under which the web JSON will be saved,
relative to a temporary directory.
exceptions: A tuple of exceptions that indicate save failure.
pre_func: A function to execute before the album is saved. The name
of the temporary directory will be passed in as an argument.
"""
print(" Testing %s... " % (description), end="")
nonlocal testsTotal, testsFailed
testsTotal += 1
try:
with tempfile.TemporaryDirectory() as temp_dir:
if None is not pre_func:
pre_func(temp_dir)
Album.save(os.path.join(temp_dir, save_name_album), data)
except exceptions as ex:
print("passed.")
if 2 <= verbosity:
print(ex)
except (Exception) as ex:
print("FAILED!")
testsFailed += 1
if 1 <= verbosity:
print(ex)
else:
print("FAILED!")
testsFailed += 1
template = {
"title": "Test Album with an unnecessarily verbose title",
"description": "This album is designed to test Dyphal. It has photos with a mix of different caption types and date formats, a photo with an odd aspect ratio, a low-resolution photo, and a photo with a bunch of unusual characters in its name.",
"footer": "Copyright © \"Rennie deGraaf\" 2005-2017. <script>All rights \nreserved.<script>",
"photos": [
{
"name": "img_0357.jpg",
"thumbnail": "thumbnails/img_0357.thumbnail.jpg",
"orientation": "horizontal",
"path": "%7E/Projects/PhotoAlbum/trunk/test/img_0357.jpg"
},
{
"name": "img_2235.jpg",
"thumbnail": "thumbnails/img_2235.thumbnail.jpg",
"orientation": "vertical",
"path": "%7E/Projects/PhotoAlbum/trunk/test/img_2235.jpg"
}
],
"metadataDir": "metadata/",
"captionFields": [
"Description",
"Location"
],
"propertyFields": [
"File name",
"File size"
],
"photoResolution": [
1024,
768
]
}
test_save_success("save to non-existing album with no suffix", template, "album", "album.json", "album.dyphal", "album.json", None)
test_save_success("save to non-existing album with suffix", template, "album.dyphal", "album.json", "album.dyphal", "album.json", None)
test_save_success("save to pre-existing album", template, "album.dyphal", "album.json", "album.dyphal", "album.json", functools.partial(create_file, "album.dyphal"))
test_save_success("save to pre-existing web json", template, "album.dyphal", "album.json", "album.dyphal", "album.json", functools.partial(create_file, "album.json"))
test_save_failure("save to non-writable album", template, "album.dyphal", "album.json", (SaveError), functools.partial(create_file_ro, "album.dyphal"))
test_save_failure("save to non-writable web json", template, "album.dyphal", "album.json", (SaveError), functools.partial(create_file_ro, "album.json"))
if 0 != testsFailed:
print("ERROR: %d of %d tests failed!" % (testsFailed, testsTotal))
exit(1)
if __name__ == '__main__':
main()
| rdegraaf/dyphal | test/test_DyphalGenerator_Album_save.py | test_DyphalGenerator_Album_save.py | py | 7,238 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path.umask",
"line... |
24788463789 | from collections import defaultdict
class TrieNode:
def __init__(self):
self.word = -1
self.children = defaultdict(TrieNode)
self.palindrome_word = []
class Trie:
def __init__(self):
self.root = TrieNode()
@staticmethod
def is_palindrome(word: str) -> bool:
return word[::] == word[::-1]
def insert(self, word: str, index: int) -> None:
node = self.root
for i, w in enumerate(reversed(word)):
if self.is_palindrome(word[:len(word) - i]):
node.palindrome_word.append(index)
node = node.children[w]
node.word = index
def search(self, word: str, index: int) -> list[list[int]]:
node = self.root
result = []
# word 를 따라 내려갔을 때 중간에 id가 있고 word 가 palindrome 일때
while word:
if node.word != -1:
if self.is_palindrome(word):
result.append([index, node.word])
if word[0] not in node.children:
return result
node = node.children[word[0]]
word = word[1:]
# word ID
if node.word != -1 and node.word != index:
result.append([index, node.word])
# palindrome ID
for palindrome in node.palindrome_word:
result.append([index, palindrome])
return result
def print_trie(self) -> None:
# dfs 출력
node = self.root
stack = [(node,"")]
visited = []
while stack:
new_node, val = stack.pop()
if new_node not in visited:
visited.append(new_node)
print(val, new_node.word, new_node.palindrome_word)
for ke in new_node.children.keys():
stack.append((new_node.children[ke], ke))
from typing import List
class Solution:
# brute force -> time limit exceed
# def palindromePairs(self, words: List[str]) -> List[List[int]]:
# length = len(words)
# result = []
# for i in range(length):
# for j in range(length):
# if i == j:
# continue
# concat = words[i] + words[j]
# if concat[:] == concat[::-1]:
# result.append([i, j])
# return result
def palindromePairs(self, words: List[str]) -> List[List[int]]:
trie = Trie()
result = []
for idx, word in enumerate(words):
trie.insert(word, idx)
# trie.print_trie()
for idx, word in enumerate(words):
result.extend(trie.search(word, idx))
return result
sol = Solution()
print(sol.palindromePairs(["abcd", "dcba", "lls", "s", "sssll"]))
# print(sol.palindromePairs(["a",""]))
| inhyeokJeon/AALGGO | Python/LeetCode/trie/336_palindrome_pair.py | 336_palindrome_pair.py | py | 2,814 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 81,
"usage_type": "name"
}
] |
31194615398 | import time
import requests
# Takes in a schedule dictionary and sends instructions at specified times
# Exits when mode is changed to manual
def send_instructions(schedule):
manual = False
while not manual:
for scheduled_time in schedule.keys():
if int(time.strftime('%H')) == int(scheduled_time.split(':')[0]) and int(time.strftime('%M')) == int(scheduled_time.split(':')[1]):
new_state = schedule[scheduled_time]
print('Posting... ' + str(new_state))
requests.post(
'https://blind-control-299118.ue.r.appspot.com/flip', json={'new_state': new_state})
print('Posted.')
manual = requests.get(
'https://blind-control-299118.ue.r.appspot.com/get-mode').json()['mode'] == 'manual'
schedule = requests.get(
'https://blind-control-299118.ue.r.appspot.com/get-schedule').json()
return
def create_schedule(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
request_json = request.get_json()
send_instructions(request_json)
return 'Success!'
| apangasa/hackumass-blindcontrol | cloudFxns/scheduler.py | scheduler.py | py | 1,384 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "time.strftime",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_num... |
21052645591 | import csv
import datetime as dt
import os
import numpy as np
from matplotlib import pyplot
from pandas import datetime
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from prepare_historical_data import PREDICTION_END_YEAR, months, DATA_FOLDER, PYTHON_LANGUAGE, R_LANGUAGE, \
CSV_FILE_SUFFIX
SARIMAX_ = "sarimax"
PREDICTIONS_FOLDER = "results"
def write_to_csv(dates, predictions, language, model):
data_file_name = os.path.join(PREDICTIONS_FOLDER, language + "_" + model + "_predictions.csv")
with open(data_file_name, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(zip(dates, predictions))
return True
def create_predictions_folder():
if not os.path.exists(PREDICTIONS_FOLDER):
os.makedirs(PREDICTIONS_FOLDER)
def RMSE(actual, pred):
mse = mean_squared_error(actual, pred)
rmse = np.sqrt(mse)
return rmse
def parser(x):
return datetime.strptime(x, '%Y-%m')
def get_future_date_list():
future_dates = [str(year) + "-" + month for year in range(2019, PREDICTION_END_YEAR + 1) for month in
months]
dates_list = [dt.datetime.strptime(date, '%Y-%m').date() for date in future_dates]
print(dates_list)
return dates_list
def run_arima(language):
create_predictions_folder()
series = read_csv(os.path.join(DATA_FOLDER, language + CSV_FILE_SUFFIX), header=0, parse_dates=[0], index_col=0,
squeeze=True,
date_parser=parser)
data = series.values.tolist()
train, test = data[:-12], data[-12:]
model = ARIMA(train, order=(1, 1, 1))
model_fit = model.fit(disp=False,
start_params=[1, .1, .1]) # , start_params=[np.mean(data), .1, np.mean(data)]
dates_list = get_future_date_list()
test_pred = model_fit.predict(len(train) + 1, len(data) - 1, typ='levels')
future_pred = model_fit.predict(len(data), len(data) + 59, typ='levels')
future_pred = future_pred[12:]
pyplot.figure()
pyplot.title("Predictions based on ARIMA model : " + language + " repositories")
pyplot.plot(series, label='Historical data')
pyplot.plot(series.keys().tolist(), [None for i in range(len(train))] + test_pred.tolist(),
label='Predictions - Test data')
pyplot.plot(dates_list, future_pred, label='Predictions - 2019 to 2023')
pyplot.legend()
pyplot.savefig(os.path.join(PREDICTIONS_FOLDER, language + "_predictions_arima.png"))
rmse = RMSE(test, test_pred)
print('ARIMA RMSE: %.3f' % rmse + " for " + language + " repos test set")
write_to_csv([str(date_)[:-3] for date_ in dates_list], future_pred, language, "arima")
return future_pred
def run_sarimax(language):
create_predictions_folder()
series = read_csv(os.path.join(DATA_FOLDER, language + CSV_FILE_SUFFIX), header=0, parse_dates=[0], index_col=0,
squeeze=True,
date_parser=parser)
data = series.values.tolist()
train, test = data[:-12], data[-12:]
model_fit = SARIMAX(train, order=(2, 1, 4), seasonal_order=(1, 1, 1, 12)).fit()
dates_list = get_future_date_list()
print(len(dates_list))
test_pred = model_fit.predict(len(train) + 1, len(data), dynamic=True)
future_pred = model_fit.predict(len(data), len(data) + 59, dynamic=True)
pyplot.figure()
pyplot.title("Predictions based on SARIMAX model : " + language + " repositories")
pyplot.plot(series, label='Historical data')
pyplot.plot(series.keys().tolist(), [None for i in range(len(train))] + test_pred.tolist(),
label='Predictions - Test data')
pyplot.plot(dates_list, future_pred, label='Predictions - 2019 to 2023')
pyplot.legend()
pyplot.savefig(os.path.join(PREDICTIONS_FOLDER, language + "_predictions_SARIMAX.png"))
rmse = RMSE(test, test_pred)
print('SARIMAX RMSE: %.3f' % rmse + " for " + language + " repos test set")
write_to_csv([str(date_)[:-3] for date_ in dates_list], future_pred, language, SARIMAX_)
return future_pred
if __name__ == '__main__':
run_arima(PYTHON_LANGUAGE)
run_arima(R_LANGUAGE)
run_sarimax(PYTHON_LANGUAGE)
run_sarimax(R_LANGUAGE)
| chaitanyacsss/github_repository_growth_forecast | arima_predictions.py | arima_predictions.py | py | 4,334 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_numbe... |
14566840458 | from datetime import date
from django.apps import apps
from django.db import models
from lxml import etree
import requests
from .product_forms import FORMS
class IsbnPool(models.Model):
PURPOSE_GENERAL = 'GENERAL'
PURPOSE_WL = 'WL'
PURPOSE_CHOICES = (
(PURPOSE_WL, 'Wolne Lektury'),
(PURPOSE_GENERAL, 'Ogólne'),
)
prefix = models.CharField(max_length=10)
suffix_from = models.IntegerField()
suffix_to = models.IntegerField()
ref_from = models.IntegerField()
purpose = models.CharField(max_length=8, choices=PURPOSE_CHOICES)
def __str__(self):
return '-'.join((
self.prefix[:3],
self.prefix[3:5],
self.prefix[5:],
'X' * (12 - len(self.prefix)),
'X'
))
@staticmethod
def check_digit(prefix12):
digits = [int(d) for d in prefix12]
return str((-sum(digits[0::2]) + 7 * sum(digits[1::2])) % 10)
def get_code(self, suffix, dashes=False):
suffix_length = 12 - len(self.prefix)
suffix_str = f'{suffix:0{suffix_length}d}'
prefix12 = self.prefix + suffix_str
check_digit = self.check_digit(prefix12)
if dashes:
isbn = '-'.join((
self.prefix[:3],
self.prefix[3:5],
self.prefix[5:],
suffix_str,
check_digit
))
else:
isbn = ''.join((
prefix12, check_digit
))
return isbn
@property
def size(self):
return self.suffix_to - self.suffix_from + 1
@property
def entries(self):
return self.isbn_set.count()
@property
def fill_percentage(self):
return 100 * self.entries / self.size
def bn_record_id_for(self, suffix):
return self.ref_from + suffix
def import_all_bn_data(self):
for suffix in range(self.suffix_from, self.suffix_to + 1):
print(suffix)
self.import_bn_data_for(suffix)
def import_bn_data_for(self, suffix):
record_id = self.bn_record_id_for(suffix)
content = requests.get(
f'https://e-isbn.pl/IsbnWeb/record/export_onix.xml?record_id={record_id}').content
elem = etree.fromstring(content)
product = elem.find('{http://ns.editeur.org/onix/3.0/reference}Product')
if product is not None:
isbn, created = self.isbn_set.get_or_create(
suffix=suffix
)
isbn.bn_data = etree.tostring(product, pretty_print=True, encoding='unicode')
isbn.save(update_fields=['bn_data'])
class Isbn(models.Model):
pool = models.ForeignKey(IsbnPool, models.PROTECT)
suffix = models.IntegerField()
datestamp = models.DateField(blank=True, null=True)
book = models.ForeignKey(
'catalogue.Book', models.PROTECT, null=True, blank=True
)
form = models.CharField(
max_length=32, choices=[
(form, form)
for form, config in FORMS
], blank=True
)
bn_data = models.TextField(blank=True)
wl_data = models.TextField(blank=True)
notes = models.TextField(blank=True)
class Meta:
ordering = ['pool', 'suffix']
unique_together = ['pool', 'suffix']
def __str__(self):
return self.get_code(True)
def get_code(self, dashes=True):
return self.pool.get_code(self.suffix, dashes=dashes)
@classmethod
def get_for_book(cls, book, form):
isbn = cls.objects.filter(book=book, form=form).first()
if isbn is None:
return cls.assign(book, form)
return isbn
@classmethod
def assign(cls, book, form):
pool = IsbnPool.objects.filter(purpose=IsbnPool.PURPOSE_WL).first()
suffix = pool.isbn_set.aggregate(s=models.Max('suffix'))['s'] + 1
assert suffix <= pool.suffix_to
return pool.isbn_set.create(
book=book, form=form, suffix=suffix, datestamp=date.today()
)
@classmethod
def formats_from_document(cls, document):
# This is a document
meta = document.wldocument(librarian2=True).meta
is_parent = len(meta.parts)
formats = []
for form, config in FORMS:
if config.book and (not is_parent or config.parent):
formats.append((
form,
getattr(meta, f'isbn_{form}')
))
return formats
@classmethod
def import_from_documents(cls):
Book = apps.get_model('documents', 'Book')
for book in Book.objects.all():
try:
catalogue_book = book.catalogue_book
if catalogue_book is None:
continue
except:
continue
try:
meta = book.wldocument(publishable=False, librarian2=True).meta
except:
continue
for form in ('html', 'txt', 'pdf', 'epub', 'mobi'):
isbn = getattr(meta, f'isbn_{form}')
if isbn is not None:
parts = isbn.split('-')
assert parts[0] == 'ISBN'
suffix = int(parts[-2])
prefix = ''.join(parts[1:-2])
pool = IsbnPool.objects.get(prefix=prefix)
isbn, created = pool.isbn_set.get_or_create(
suffix=suffix,
)
add_note = False
if isbn.book is None:
isbn.book = catalogue_book
elif isbn.book != catalogue_book:
add_note = True
if not isbn.form:
isbn.form = form
elif isbn.form != form:
add_note = True
if add_note:
isbn.notes += '\n\n' + catalogue_book.slug + ' ' + form
isbn.save(update_fields=['book', 'form', 'notes'])
| fnp/redakcja | src/isbn/models.py | models.py | py | 6,085 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": ... |
20244314041 | #!/usr/bin/env python
# Download individual checksum files for Electron zip files from S3,
# concatenate them, and upload to GitHub.
from __future__ import print_function
import argparse
import sys
from lib.config import s3_config
from lib.util import boto_path_dirs
sys.path.extend(boto_path_dirs())
from boto.s3.connection import S3Connection
def main():
args = parse_args()
bucket_name, access_key, secret_key = s3_config()
s3 = S3Connection(access_key, secret_key)
bucket = s3.get_bucket(bucket_name)
if bucket is None:
print('S3 bucket "{}" does not exist!'.format(bucket_name), file=sys.stderr)
return 1
prefix = 'atom-shell/tmp/{0}/'.format(args.version)
shasums = [s3_object.get_contents_as_string().strip()
for s3_object in bucket.list(prefix, delimiter='/')
if s3_object.key.endswith('.sha256sum')]
print('\n'.join(shasums))
return 0
def parse_args():
parser = argparse.ArgumentParser(description='Upload SHASUMS files to GitHub')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
return parser.parse_args()
if __name__ == '__main__':
sys.exit(main())
| brave/muon | script/merge-electron-checksums.py | merge-electron-checksums.py | py | 1,189 | python | en | code | 970 | github-code | 36 | [
{
"api_name": "sys.path.extend",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "lib.util.boto_path_dirs",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "lib.config.s3_c... |
6994313010 | from lib.cuckoo.common.abstracts import Signature
class MemoryAvailable(Signature):
name = "antivm_memory_available"
description = "Checks amount of memory in system, this can be used to detect virtual machines that have a low amount of memory available"
severity = 1
categories = ["anti-vm"]
authors = ["Kevin Ross"]
minimum = "2.0"
ttp = ["T1082"]
filter_apinames = [
"GlobalMemoryStatusEx", "GetPhysicallyInstalledSystemMemory",
]
safelistprocs = [
"iexplore.exe",
"firefox.exe",
"chrome.exe",
"safari.exe",
"acrord32.exe",
"acrord64.exe",
"wordview.exe",
"winword.exe",
"excel.exe",
"powerpnt.exe",
"outlook.exe",
"mspub.exe"
]
def on_call(self, call, process):
if process["process_name"].lower() not in self.safelistprocs:
self.mark_call()
return self.has_marks()
| cuckoosandbox/community | modules/signatures/windows/antivm_memory_available.py | antivm_memory_available.py | py | 954 | python | en | code | 312 | github-code | 36 | [
{
"api_name": "lib.cuckoo.common.abstracts.Signature",
"line_number": 3,
"usage_type": "name"
}
] |
35220926762 | import json
import requests
from bs4 import BeautifulSoup
URL = 'https://www.zr.ru/news/'
HEADERS = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:93.0) Gecko/20100101 Firefox/93.0', 'accept': '*/*'}
HOST = 'https://www.zr.ru'
# Функция получения данных с сервера
def get_html(url, params=None): # params - для определения числа страниц
get_request = requests.get(url, headers=HEADERS, params=params) # При помощи requests делается get-запрос к серверу
return get_request # Обьект r будет возвращён и использован в функции parse()
# Функция сбора данных с сайта
def get_content(html):
soup = BeautifulSoup(html, 'html.parser')
items = soup.find_all('article', class_='story-short')
news = []
for item in items[:10]:
details = get_article_content(HOST + item.find('a', class_='link').get('href'))
# Ограничился десятью постами, чтобы собрать больше статей требуется пагинация
news.append({
'title': item.find('a', class_='link').get_text(strip=True),
'link': HOST + item.find('a', class_='link').get('href'),
'article': item.find('div', class_='articles__item-desc').get_text(strip=True),
'picture': HOST + item.find('img').get('src'),
'autor': details['autor'],
'date': details['date']
})
print(news)
with open("db_zr.json", "w") as jfile:
json.dump(news, jfile, indent=4, ensure_ascii=False)
# Функция сбора данных внутри статьи
def get_article_content(article_url):
html = get_html(article_url)
soup = BeautifulSoup(html.text, 'html.parser')
items = soup.find('body', class_='zr')
return {
'autor': items.find('span', class_='link_pink').get_text(strip=True),
'date': items.find('div', class_='info__date').get_text(strip=True)
}
# Основная функция парсинга первой страницы сайта
def parse():
html = get_html(URL)
# Проверяем связь со страицей
if html.status_code == 200:
get_content(html.text)
else:
print('Error')
parse()
| dimedrolex/parser-news-zr | www-zr-ru.py | www-zr-ru.py | py | 2,503 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"lin... |
17928875089 | # -*- coding: utf-8 -*-
import datetime
from django.utils.decorators import method_decorator
from django.conf import settings
from django.contrib.auth.decorators import permission_required
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from django.db.models import Count
from django.db.models import Q
from django.contrib import messages
from django.forms.models import model_to_dict
from django.http import HttpResponse, HttpResponseRedirect
from django.http import Http404, HttpResponsePermanentRedirect
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, render
from django.template.loader import render_to_string
from django.views.decorators.http import require_GET, require_POST
from django.views.decorators.http import require_http_methods
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
from uuslug import slugify
from tcms.core.utils import DataTableResult
from tcms.management.models import EnvGroup
from tcms.search import remove_from_request_path
from tcms.search.order import order_plan_queryset
from tcms.testcases.forms import SearchCaseForm, QuickSearchCaseForm
from tcms.testcases.models import TestCaseStatus
from tcms.testcases.models import TestCase, TestCasePlan
from tcms.testcases.views import get_selected_testcases
from tcms.testcases.views import printable as testcases_printable
from tcms.testplans.forms import ClonePlanForm
from tcms.testplans.forms import EditPlanForm
from tcms.testplans.forms import NewPlanForm
from tcms.testplans.forms import SearchPlanForm
from tcms.testplans.models import TestPlan
from tcms.testruns.models import TestRun, TestCaseRun
def update_plan_email_settings(test_plan, form):
"""Update test plan's email settings"""
test_plan.emailing.notify_on_plan_update = form.cleaned_data[
'notify_on_plan_update']
test_plan.emailing.notify_on_case_update = form.cleaned_data[
'notify_on_case_update']
test_plan.emailing.auto_to_plan_owner = form.cleaned_data['auto_to_plan_owner']
test_plan.emailing.auto_to_plan_author = form.cleaned_data['auto_to_plan_author']
test_plan.emailing.auto_to_case_owner = form.cleaned_data['auto_to_case_owner']
test_plan.emailing.auto_to_case_default_tester = form.cleaned_data[
'auto_to_case_default_tester']
test_plan.emailing.save()
# _____________________________________________________________________________
# view functons
@require_http_methods(['GET', 'POST'])
@permission_required('testplans.add_testplan')
def new(request, template_name='plan/new.html'):
"""New testplan"""
# If the form has been submitted...
if request.method == 'POST':
# A form bound to the POST data
form = NewPlanForm(request.POST)
form.populate(product_id=request.POST.get('product'))
if form.is_valid():
test_plan = TestPlan.objects.create(
product=form.cleaned_data['product'],
author=request.user,
owner=request.user,
product_version=form.cleaned_data['product_version'],
type=form.cleaned_data['type'],
name=form.cleaned_data['name'],
create_date=datetime.datetime.now(),
extra_link=form.cleaned_data['extra_link'],
parent=form.cleaned_data['parent'],
text=form.cleaned_data['text'],
)
# Add test plan environment groups
if request.user.has_perm('testplans.add_envplanmap'):
if request.POST.get('env_group'):
env_groups = EnvGroup.objects.filter(
id__in=request.POST.getlist('env_group')
)
for env_group in env_groups:
test_plan.add_env_group(env_group=env_group)
# create emailing settings to avoid Issue #181 on MySQL
test_plan.emailing.save()
return HttpResponseRedirect(
reverse('test_plan_url_short', args=[test_plan.plan_id, ])
)
else:
form = NewPlanForm()
context_data = {
'form': form,
}
return render(request, template_name, context_data)
@require_GET
def get_all(request, template_name='plan/all.html'):
"""Display all testplans"""
# TODO: this function now only performs a forward feature, no queries
# need here. All of it will be removed in the future.
# If it's not a search the page will be blank
tps = TestPlan.objects.none()
query_result = False
order_by = request.GET.get('order_by', 'create_date')
asc = bool(request.GET.get('asc', None))
# if it's a search request the page will be fill
if request.GET:
search_form = SearchPlanForm(request.GET)
search_form.populate(product_id=request.GET.get('product'))
if search_form.is_valid():
query_result = True
# build a QuerySet:
tps = TestPlan.list(search_form.cleaned_data)
tps = tps.select_related('author', 'type', 'product')
# We want to get the number of cases and runs, without doing
# lots of per-test queries.
#
# Ideally we would get the case/run counts using m2m field tricks
# in the ORM
# Unfortunately, Django's select_related only works on ForeignKey
# relationships, not on ManyToManyField attributes
# See http://code.djangoproject.com/ticket/6432
# SQLAlchemy can handle this kind of thing in several ways.
# Unfortunately we're using Django
# The cleanest way I can find to get it into one query is to
# use QuerySet.extra()
# See http://docs.djangoproject.com/en/dev/ref/models/querysets
tps = tps.annotate(num_cases=Count('case', distinct=True),
num_runs=Count('run', distinct=True),
num_children=Count('child_set', distinct=True))
tps = order_plan_queryset(tps, order_by, asc)
else:
# Set search active plans only by default
search_form = SearchPlanForm(initial={'is_active': True})
if request.GET.get('action') == 'clone_case':
template_name = 'case/clone_select_plan.html'
tps = tps.order_by('name')
if request.GET.get('t') == 'ajax':
results = []
for obj in tps:
dict_obj = model_to_dict(obj, fields=('name', 'parent', 'is_active'))
for attr in ['pk', 'num_cases', 'num_cases', 'num_runs', 'num_children']:
dict_obj[attr] = getattr(obj, attr)
dict_obj['plan_url'] = reverse('test_plan_url_short', args=[obj.pk])
results.append(dict_obj)
return JsonResponse(results, safe=False)
if request.GET.get('t') == 'html':
if request.GET.get('f') == 'preview':
template_name = 'plan/preview.html'
query_url = remove_from_request_path(request, 'order_by')
if asc:
query_url = remove_from_request_path(query_url, 'asc')
else:
query_url = '%s&asc=True' % query_url
page_type = request.GET.get('page_type', 'pagination')
query_url_page_type = remove_from_request_path(request, 'page_type')
if query_url_page_type:
query_url_page_type = remove_from_request_path(query_url_page_type, 'page')
context_data = {
'test_plans': tps,
'query_result': query_result,
'search_plan_form': search_form,
'query_url': query_url,
'query_url_page_type': query_url_page_type,
'page_type': page_type
}
return render(request, template_name, context_data)
def get_number_of_plans_cases(plan_ids):
"""Get the number of cases related to each plan
Arguments:
- plan_ids: a tuple or list of TestPlans' id
Return value:
Return value is an dict object, where key is plan_id and the value is the
total count.
"""
query_set = TestCasePlan.objects.filter(plan__in=plan_ids).values('plan').annotate(
total_count=Count('pk')).order_by('-plan')
number_of_plan_cases = {}
for item in query_set:
number_of_plan_cases[item['plan']] = item['total_count']
return number_of_plan_cases
def get_number_of_plans_runs(plan_ids):
"""Get the number of runs related to each plan
Arguments:
- plan_ids: a tuple or list of TestPlans' id
Return value:
Return value is an dict object, where key is plan_id and the value is the
total count.
"""
query_set = TestRun.objects.filter(plan__in=plan_ids).values('plan').annotate(
total_count=Count('pk')).order_by('-plan')
number_of_plan_runs = {}
for item in query_set:
number_of_plan_runs[item['plan']] = item['total_count']
return number_of_plan_runs
def get_number_of_children_plans(plan_ids):
"""Get the number of children plans related to each plan
Arguments:
- plan_ids: a tuple or list of TestPlans' id
Return value:
Return value is an dict object, where key is plan_id and the value is the
total count.
"""
query_set = TestPlan.objects.filter(parent__in=plan_ids).values('parent').annotate(
total_count=Count('parent')).order_by('-parent')
number_of_children_plans = {}
for item in query_set:
number_of_children_plans[item['parent']] = item['total_count']
return number_of_children_plans
def calculate_stats_for_testplans(plans):
"""Attach the number of cases and runs for each TestPlan
Arguments:
- plans: the queryset of TestPlans
Return value:
A list of TestPlans, each of which is attached the statistics which is
with prefix cal meaning calculation result.
"""
plan_ids = []
for plan in plans:
plan_ids.append(plan.pk)
cases_counts = get_number_of_plans_cases(plan_ids)
runs_counts = get_number_of_plans_runs(plan_ids)
children_counts = get_number_of_children_plans(plan_ids)
# Attach calculated statistics to each object of TestPlan
for plan in plans:
setattr(plan, 'cal_cases_count', cases_counts.get(plan.pk, 0))
setattr(plan, 'cal_runs_count', runs_counts.get(plan.pk, 0))
setattr(plan, 'cal_children_count', children_counts.get(plan.pk, 0))
return plans
@require_GET
def ajax_search(request, template_name='plan/common/json_plans.txt'):
"""Display all testplans"""
# If it's not a search the page will be blank
test_plans = TestPlan.objects.none()
# if it's a search request the request will be full
if request.GET:
search_form = SearchPlanForm(request.GET)
search_form.populate(product_id=request.GET.get('product'))
if search_form.is_valid():
# Detemine the query is the user's plans and change the sub
# module value
author = request.GET.get('author__email__startswith')
if author and len(search_form.changed_data) == 1:
if request.user.is_authenticated:
if author == request.user.username or author == request.user.email:
query = Q(author__email__startswith=author) | \
Q(owner__email__startswith=author)
test_plans = TestPlan.objects.filter(query).distinct()
else:
test_plans = TestPlan.list(search_form.cleaned_data)
test_plans = test_plans.select_related('author', 'owner', 'type', 'product')
# columnIndexNameMap is required for correct sorting behavior, 5 should
# be product, but we use run.build.product
column_names = [
'plan_id',
'name',
'author__username',
'owner__username',
'product',
'product_version',
'type',
'num_cases',
'num_runs',
''
]
return ajax_response(request, test_plans, column_names, template_name)
def ajax_response(request, queryset, column_names, template_name):
""" JSON template for the ajax request for searching """
data_table_result = DataTableResult(request.GET, queryset, column_names)
data = data_table_result.get_response_data()
data['querySet'] = calculate_stats_for_testplans(data['querySet'])
# todo: prepare the JSON with the response, consider using :
# from django.template.defaultfilters import escapejs
json_result = render_to_string(
template_name,
data,
request=request)
return HttpResponse(json_result, content_type='application/json')
def get(request, plan_id, slug=None, template_name='plan/get.html'):
"""Display the plan details."""
try:
test_plan = TestPlan.objects.select_related().get(plan_id=plan_id)
except ObjectDoesNotExist:
raise Http404
if slug is None:
return HttpResponsePermanentRedirect(reverse('test_plan_url',
args=[plan_id, slugify(test_plan.name)]))
# Initial the case counter
confirm_status_name = 'CONFIRMED'
test_plan.run_case = test_plan.case.filter(case_status__name=confirm_status_name)
test_plan.review_case = test_plan.case.exclude(case_status__name=confirm_status_name)
context_data = {
'test_plan': test_plan,
}
return render(request, template_name, context_data)
@require_http_methods(['GET', 'POST'])
@permission_required('testruns.change_testrun')
def choose_run(request, plan_id):
"""Choose one run to add cases"""
if request.method == 'GET':
try:
test_plan = TestPlan.objects.get(pk=int(plan_id))
except ObjectDoesNotExist:
raise Http404
test_runs = TestRun.objects.filter(plan=plan_id).values('pk',
'summary',
'build__name',
'manager__username')
# Ready to write cases to test plan
test_cases = get_selected_testcases(request).values('pk', 'summary',
'author__username',
'create_date',
'category__name',
'priority__value', )
context_data = {
'plan_id': plan_id,
'plan': test_plan,
'test_runs': test_runs.iterator(),
'test_cases': test_cases.iterator(),
}
return render(request, 'plan/choose_testrun.html', context_data)
# Add cases to runs
if request.method == 'POST':
chosen_test_run_ids = request.POST.getlist('testrun_ids')
to_be_added_cases = TestCase.objects.filter(pk__in=request.POST.getlist('case_ids'))
# Adding cases to runs by recursion
cases_selected = 0
for test_run_id in chosen_test_run_ids:
test_run = get_object_or_404(TestRun, run_id=test_run_id)
cases = TestCaseRun.objects.filter(run=test_run_id)
existing_cases = cases.values_list('case', flat=True)
for test_case in to_be_added_cases:
# counter used as a flag that runs or cases were selected
# in the form, regardless of whether or not they were actually added
# used to produce an error message if user clicked the Update button
# without selecting anything on the screen
cases_selected += 1
if test_case.case_id not in existing_cases:
test_run.add_case_run(case=test_case)
estimated_time = datetime.timedelta(0)
for case in to_be_added_cases:
estimated_time += case.estimated_time
test_run.estimated_time = test_run.estimated_time + estimated_time
test_run.save()
if not cases_selected:
messages.add_message(request,
messages.ERROR,
_('Select at least one TestRun and one TestCase'))
return HttpResponseRedirect(reverse('test_plan_url_short', args=[plan_id]))
@require_http_methods(['GET', 'POST'])
@permission_required('testplans.change_testplan')
def edit(request, plan_id, template_name='plan/edit.html'):
"""Edit test plan view"""
try:
test_plan = TestPlan.objects.select_related().get(plan_id=plan_id)
except ObjectDoesNotExist:
raise Http404
# If the form is submitted
if request.method == "POST":
form = EditPlanForm(request.POST)
form.populate(product_id=request.POST.get('product'))
# FIXME: Error handle
if form.is_valid():
if request.user.has_perm('testplans.change_testplan'):
test_plan.name = form.cleaned_data['name']
test_plan.parent = form.cleaned_data['parent']
test_plan.product = form.cleaned_data['product']
test_plan.product_version = form.cleaned_data['product_version']
test_plan.type = form.cleaned_data['type']
test_plan.is_active = form.cleaned_data['is_active']
test_plan.extra_link = form.cleaned_data['extra_link']
test_plan.owner = form.cleaned_data['owner']
# IMPORTANT! tp.current_user is an instance attribute,
# added so that in post_save, current logged-in user info
# can be accessed.
# Instance attribute is usually not a desirable solution.
test_plan.current_user = request.user
test_plan.text = form.cleaned_data['text']
test_plan.save()
if request.user.has_perm('testplans.change_envplanmap'):
test_plan.clear_env_groups()
if request.POST.get('env_group'):
env_groups = EnvGroup.objects.filter(
id__in=request.POST.getlist('env_group'))
for env_group in env_groups:
test_plan.add_env_group(env_group=env_group)
# Update plan email settings
update_plan_email_settings(test_plan, form)
return HttpResponseRedirect(
reverse('test_plan_url', args=[plan_id, slugify(test_plan.name)]))
else:
# Generate a blank form
# Temporary use one environment group in this case
if test_plan.env_group.all():
for env_group in test_plan.env_group.all():
env_group_id = env_group.id
break
else:
env_group_id = None
form = EditPlanForm(initial={
'name': test_plan.name,
'product': test_plan.product_id,
'product_version': test_plan.product_version_id,
'type': test_plan.type_id,
'text': test_plan.text,
'parent': test_plan.parent_id,
'env_group': env_group_id,
'is_active': test_plan.is_active,
'extra_link': test_plan.extra_link,
'owner': test_plan.owner,
'auto_to_plan_owner': test_plan.emailing.auto_to_plan_owner,
'auto_to_plan_author': test_plan.emailing.auto_to_plan_author,
'auto_to_case_owner': test_plan.emailing.auto_to_case_owner,
'auto_to_case_default_tester': test_plan.emailing.auto_to_case_default_tester,
'notify_on_plan_update': test_plan.emailing.notify_on_plan_update,
'notify_on_case_update': test_plan.emailing.notify_on_case_update,
})
form.populate(product_id=test_plan.product_id)
context_data = {
'test_plan': test_plan,
'form': form,
}
return render(request, template_name, context_data)
@require_POST
@permission_required('testplans.add_testplan')
def clone(request):
"""Clone testplan"""
if 'plan' not in request.POST:
messages.add_message(request,
messages.ERROR,
_('TestPlan is required'))
# redirect back where we came from
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
plan_id = request.POST.get('plan')
test_plan = get_object_or_404(TestPlan, pk=int(plan_id))
post_data = request.POST.copy()
if not request.POST.get('name'):
post_data['name'] = test_plan.make_cloned_name()
clone_form = ClonePlanForm(post_data)
clone_form.populate(product_id=request.POST.get('product_id'))
# if required values are missing we are still going to show
# the form below, otherwise clone & redirect
if clone_form.is_valid():
clone_options = clone_form.cleaned_data
# Create new test plan.
new_name = clone_options['name']
clone_params = dict(
# Cloned plan properties
new_name=new_name,
product=clone_options['product'],
version=clone_options['product_version'],
set_parent=clone_options['set_parent'],
# Related data
copy_environment_group=clone_options['copy_environment_group'],
# Link or copy cases
link_cases=clone_options['link_testcases'],
copy_cases=clone_options['copy_testcases'],
default_component_initial_owner=request.user,
)
assign_me_as_plan_author = not clone_options['keep_orignal_author']
if assign_me_as_plan_author:
clone_params['new_original_author'] = request.user
assign_me_as_copied_case_author = \
clone_options['copy_testcases'] and \
not clone_options['maintain_case_orignal_author']
if assign_me_as_copied_case_author:
clone_params['new_case_author'] = request.user
assign_me_as_copied_case_default_tester = \
clone_options['copy_testcases'] and \
not clone_options['keep_case_default_tester']
if assign_me_as_copied_case_default_tester:
clone_params['new_case_default_tester'] = request.user
cloned_plan = test_plan.clone(**clone_params)
return HttpResponseRedirect(
reverse('test_plan_url_short', args=[cloned_plan.plan_id]))
# clone form wasn't valid
context_data = {
'test_plan': test_plan,
'clone_form': clone_form,
}
return render(request, 'plan/clone.html', context_data)
def attachment(request, plan_id, template_name='plan/attachment.html'):
"""Manage attached files"""
test_plan = get_object_or_404(TestPlan, plan_id=plan_id)
context_data = {
'test_plan': test_plan,
'limit': settings.FILE_UPLOAD_MAX_SIZE,
}
return render(request, template_name, context_data)
class ReorderCasesView(View):
"""Reorder cases"""
http_method_names = ['post']
def post(self, request, plan_id):
if 'case' not in request.POST:
return JsonResponse({
'rc': 1,
'response': 'At least one case is required to re-order.'
})
case_ids = []
for case_id in request.POST.getlist('case'):
case_ids.append(int(case_id))
cases = TestCasePlan.objects.filter(case_id__in=case_ids, plan=plan_id).only('case_id')
for case in cases:
case.sortkey = (case_ids.index(case.case_id) + 1) * 10
case.save()
return JsonResponse({'rc': 0, 'response': 'ok'})
@method_decorator(permission_required('testplans.change_testplan'), name='dispatch')
class UpdateParentView(View):
"""Updates TestPlan.parent. Called from the front-end."""
http_method_names = ['post']
def post(self, request):
parent_id = int(request.POST.get('parent_id'))
if parent_id == 0:
parent_id = None
child_ids = request.POST.getlist('child_ids[]')
for child_pk in child_ids:
test_plan = get_object_or_404(TestPlan, pk=int(child_pk))
test_plan.parent_id = parent_id
test_plan.save()
return JsonResponse({'rc': 0, 'response': 'ok'})
class LinkCasesView(View):
"""Link cases to plan"""
@method_decorator(permission_required('testcases.add_testcaseplan'))
def post(self, request, plan_id):
plan = get_object_or_404(TestPlan.objects.only('pk'), pk=int(plan_id))
case_ids = []
for case_id in request.POST.getlist('case'):
case_ids.append(int(case_id))
cases = TestCase.objects.filter(case_id__in=case_ids).only('pk')
for case in cases:
plan.add_case(case)
return HttpResponseRedirect(reverse('test_plan_url', args=[plan_id, slugify(plan.name)]))
class LinkCasesSearchView(View):
"""Search cases for linking to plan"""
template_name = 'plan/search_case.html'
def get(self, request, plan_id):
plan = get_object_or_404(TestPlan, pk=int(plan_id))
normal_form = SearchCaseForm(initial={
'product': plan.product_id,
'product_version': plan.product_version_id,
'case_status_id': TestCaseStatus.get_CONFIRMED()
})
quick_form = QuickSearchCaseForm()
return render(self.request, self.template_name, {
'search_form': normal_form,
'quick_form': quick_form,
'test_plan': plan,
})
def post(self, request, plan_id):
plan = get_object_or_404(TestPlan, pk=int(plan_id))
search_mode = request.POST.get('search_mode')
if search_mode == 'quick':
form = quick_form = QuickSearchCaseForm(request.POST)
normal_form = SearchCaseForm()
else:
form = normal_form = SearchCaseForm(request.POST)
form.populate(product_id=request.POST.get('product'))
quick_form = QuickSearchCaseForm()
cases = []
if form.is_valid():
cases = TestCase.list(form.cleaned_data)
cases = cases.select_related(
'author', 'default_tester', 'case_status', 'priority'
).only(
'pk', 'summary', 'create_date', 'author__email',
'default_tester__email', 'case_status__name',
'priority__value'
).exclude(
case_id__in=plan.case.values_list('case_id', flat=True))
context = {
'test_plan': plan,
'test_cases': cases,
'search_form': normal_form,
'quick_form': quick_form,
'search_mode': search_mode
}
return render(request, self.template_name, context=context)
class DeleteCasesView(View):
"""Delete selected cases from plan"""
def post(self, request, plan_id):
plan = get_object_or_404(TestPlan.objects.only('pk'), pk=int(plan_id))
if 'case' not in request.POST:
return JsonResponse({
'rc': 1,
'response': 'At least one case is required to delete.'
})
cases = get_selected_testcases(request).only('pk')
for case in cases:
plan.delete_case(case=case)
return JsonResponse({'rc': 0, 'response': 'ok'})
@require_POST
def printable(request):
"""Create the printable copy for plan"""
plan_pk = request.POST.get('plan', 0)
if not plan_pk:
messages.add_message(request,
messages.ERROR,
_('At least one test plan is required for print'))
return HttpResponseRedirect(reverse('core-views-index'))
try:
TestPlan.objects.get(pk=plan_pk)
except (ValueError, TestPlan.DoesNotExist):
messages.add_message(request,
messages.ERROR,
_('Test Plan "%s" does not exist') % plan_pk)
return HttpResponseRedirect(reverse('core-views-index'))
# rendering is actually handled by testcases.views.printable()
return testcases_printable(request)
| jgesim/kiwitcms | tcms/testplans/views.py | views.py | py | 28,207 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tcms.testplans.forms.NewPlanForm",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "tcms.testplans.models.TestPlan.objects.create",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tcms.testplans.models.TestPlan.objects",
"line_number": 72,
... |
2920450419 | #coding:utf-8
import urllib
import http.cookiejar
import json
class Qqpush:
pushurl='https://wx.scjtqs.com/qq/push/pushMsg'
def push(self,qq,token,data):
url = self.pushurl+"?token="+token
post={}
post['qq']=qq
post['content']=[{"msgtype":"text","text":data}]
postdata=bytes(json.dumps(post),'utf8')
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0",
'Content-Type': 'application/json',
}
req = urllib.request.Request(url=url, data=postdata, headers=header)
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
r = opener.open(req)
response = r.read().decode('utf-8')
jsonret=json.loads(response)
return jsonret | scjtqs2/fqsign | utils/qqpush.py | qqpush.py | py | 861 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "urllib.request.Request",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "http.cookiejar.... |
15573580960 | import argparse
import json
import os
import cv2
import imageio
import numpy as np
import pims
def _get_box(annot_box):
x, y, w, h = annot_box["x"], annot_box["y"], annot_box["width"], annot_box["height"]
return (int(x), int(y), int(x + w), int(y + h))
def extract_crop_from_image(image, box):
x1, y1, x2, y2 = box
crop = image[y1:y2, x1:x2]
h, w = crop.shape[:2]
# Add padding to make it square
pad_l, pad_r, pad_t, pad_b = 0, 0, 0, 0
if h > w:
pad_l = (h - w) // 2
pad_r = (h - w) - pad_l
if w > h:
pad_t = (w - h) // 2
pad_b = (w - h) - pad_t
crop = np.pad(
crop,
((pad_t, pad_b), (pad_l, pad_r), (0, 0)),
mode="constant",
constant_values=255,
)
return crop
def draw_box_on_image(image, box, color=(255, 0, 0), thickness=5):
x1, y1, x2, y2 = [int(b) for b in box]
image = cv2.rectangle(image, (x1, y1), (x2, y2), color=color, thickness=thickness)
return image
def draw_border(image, color=(255, 0, 0), thickness=5):
x1, y1 = 0, 0
x2, y2 = image.shape[1] - 1, image.shape[0] - 1
return draw_box_on_image(image, [x1, y1, x2, y2], color=color, thickness=thickness)
def scale_im_height(image, H):
im_H, im_W = image.shape[:2]
W = int(1.0 * H * im_W / im_H)
return cv2.resize(image, (W, H))
def visualize_query_set(video_reader, qset, save_height=640):
qf_fno = qset["query_frame"]
vc_fno = qset["visual_crop"]["frame_number"]
last_fno = max(qf_fno, vc_fno)
vc_box = _get_box(qset["visual_crop"])
rt_fnos = [rf["frame_number"] for rf in qset["response_track"]]
rt_boxes = [_get_box(rf) for rf in qset["response_track"]]
oW = qset["visual_crop"]["original_width"]
oH = qset["visual_crop"]["original_height"]
# Visualize visual crop
vc_frame = np.copy(video_reader[vc_fno])
## Scale up to original resolution
vc_frame = scale_im_height(vc_frame, oH)
vc_im = extract_crop_from_image(vc_frame, vc_box)
## Add text header
tx_height = 50
ob_title = qset["object_title"]
tx_im = get_text_box(ob_title, (tx_height, vc_im.shape[1]))
vc_im = np.concatenate([tx_im, vc_im], axis=0)
# Visualize frames in the response track
rt_ims = []
for rf_fno, rf_box in zip(rt_fnos, rt_boxes):
rf_frame = np.copy(video_reader[rf_fno])
## Scale up to original resolution
rf_frame = scale_im_height(rf_frame, oH)
rf_im = draw_box_on_image(rf_frame, rf_box, color=(0, 255, 0), thickness=8)
rf_im = draw_border(rf_im, color=(0, 255, 0), thickness=15)
## Scale down to save height
rf_im = scale_im_height(rf_im, save_height)
rt_ims.append(rf_im)
# Visualize frames after the response track till the query frame
post_rt_ims = []
for i in range(rt_fnos[-1], qf_fno):
# Concatenate vc_plot_im to the right
frame = scale_im_height(video_reader[i], save_height)
post_rt_ims.append(frame)
rt_ims = rt_ims + post_rt_ims
return rt_ims, vc_im
def get_mp4_writer(path, fps, output_params=["-crf", "31"]):
writer = imageio.get_writer(
path,
codec="h264",
fps=fps,
quality=None,
pixelformat="yuv420p",
bitrate=0, # Setting bitrate to 0 is required to activate -crf
output_params=output_params,
)
return writer
def save_video(frames, path, fps):
writer = get_mp4_writer(path, fps)
for frame in frames:
writer.append_data(frame)
writer.close()
def get_text_box(text, shape, fg_color=(255, 255, 255), bg_color=(0, 0, 0)):
text_im = np.zeros((*shape, 3), dtype=np.uint8)
text_im[:, :] = bg_color
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1.0
line_thickness = 2
text_im = cv2.putText(
text_im,
text,
(2, shape[0] - 10),
font,
font_scale,
fg_color,
line_thickness,
cv2.LINE_AA,
)
return text_im
def visualize_annotation(clip_path, rt_save_path, crop_save_path, qset):
"""
Visualizes an annotation from the visual-queries task
"""
video_reader = pims.Video(clip_path)
# Visualize annotations for 3 query sets
rt_frames, vc_im = visualize_query_set(video_reader, qset)
save_video(rt_frames, rt_save_path, 5.0)
imageio.imwrite(crop_save_path, vc_im)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--annot-path", type=str, required=True)
parser.add_argument("--clips-root", type=str, required=True)
parser.add_argument("--vis-save-root", type=str, default="./videos")
args = parser.parse_args()
with open(args.annot_path, "r") as fp:
annotations = json.load(fp)
os.makedirs(args.vis_save_root, exist_ok=True)
for v in annotations["videos"]:
for c in v["clips"]:
cuid = c["clip_uid"]
for a_idx, a in enumerate(c["annotations"]):
for qset_id, qset in a["query_sets"].items():
if not qset["is_valid"]:
continue
qf_fno = qset["query_frame"]
rt_last_fno = max(
[rf["frame_number"] for rf in qset["response_track"]]
)
rtsp = f"{args.vis_save_root}/{cuid}_{a_idx:05d}_{qset_id}_rt.mp4"
csp = f"{args.vis_save_root}/{cuid}_{a_idx:05d}_{qset_id}_crop.png"
clip_path = f"{args.clips_root}/{cuid}.mp4"
if not os.path.isfile(clip_path):
print(f"======> Clip {clip_path} is missing...")
continue
visualize_annotation(clip_path, rtsp, csp, qset)
| EGO4D/episodic-memory | VQ2D/visualizations/visualize_annotations.py | visualize_annotations.py | py | 5,766 | python | en | code | 80 | github-code | 36 | [
{
"api_name": "numpy.pad",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 65... |
10895202904 | from __future__ import unicode_literals
import six
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
This was taken from the Django source code.
"""
if six.PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
""" Forces the argument s into either a str object (Python 3) or a unicode
object (Python 2). Taken and modified from the Django source code. """
if isinstance(s, six.text_type):
return s
if not isinstance(s, six.string_types[0]):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, '__unicode__'):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
s = s.decode(encoding, errors)
return s
| akloster/table-cleaner | table_cleaner/utils.py | utils.py | py | 1,509 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "six.PY2",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "six.text_type",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "six.PY3",
"... |
6527845396 | from django.shortcuts import render,get_object_or_404
from .models import Post,Category
from markdown import markdown
from django.views.generic import ListView
from comment.forms import CommentForm
from django.http import HttpResponse
# def index(request):
# post_list = Post.objects.all()
# return render(request,'blog/index.html',context={'post_list':post_list})
# # return HttpResponse('<h1>hello world</h1>')
class IndexView(ListView):
model = Post
template_name = 'blog.index.html'
context_object_name = 'post_list' #这个name能瞎取,必须和模板中的变量一致
def detail(request,pk):
post = get_object_or_404(Post,pk=pk)
post.increase_views()
post.body = markdown(post.body,extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',])
form = CommentForm()
comment_list = post.comment_set.all()
context = {
'post':post,
'form':form,
'comment_list':comment_list
}
return render(request,'blog/detail.html',context=context)
def archives(request,year,month):
post_list = Post.objects.filter(created_time__year=year,
created_time__month=month)
return render(request,'blog/index.html',context={'post_list':post_list})
def category(request,pk):
cate = get_object_or_404(Category,pk=pk)
post_list = Post.objects.filter(category=cate)
return render(request,'blog/index.html',context={'post_list':post_list})
| Sunnysunflowers/danjo | blogproject/blog/views.py | views.py | py | 1,581 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 21,
"usage_type": "call"
},
{
"api_... |
40395330066 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
# Data preprocessing
data = pd.read_csv("data.csv")
# Converting Pandas dataframe to numpy array
X = data.x.values.reshape(-1, 1)
Y = data.y.values.reshape(-1, 1)
# Add bias
m = X.shape[0] # sample count
bias = np.ones((m, 1))
X_train = np.append(bias, X, axis=1)
# Add x^2 values
x2 = X**2
X_train = np.append(X_train, x2, axis=1)
Y_train = np.array(Y)
# Initial Variables
theta = np.array([[0, 0, 0]]).reshape(-1, 1)
alpha = 0.0001
iterations = 100000
# Cost function
def cost_function(X, Y, B):
m = Y.shape[0]
J = np.sum((X.dot(B) - Y) ** 2)/(2 * m)
return J
print("Initial cost", cost_function(X_train, Y_train, theta))
# Gradient Descent Algorithm
def gradient_descent(X, Y, theta, alpha, iterations):
cost_history = [0] * iterations
m = len(Y)
for iteration in range(iterations):
# Hypothesis Values
h = X.dot(theta)
# Difference b/w Hypothesis and Actual Y
loss = h - Y
# Gradient Calculation
gradient = X.T.dot(loss) / m
# Changing Values of B using Gradient
theta = theta - alpha * gradient
# New Cost Value
cost = cost_function(X, Y, theta)
cost_history[iteration] = cost
return theta, cost_history
# 100000 Iterations
newTheta, cost_history = gradient_descent(
X_train, Y_train, theta, alpha, iterations)
print("New theta", newTheta)
print("Final Cost", cost_history[-1])
# To see wheather Gradient Descent decrease cost in each iteration
plt.plot(range(iterations), cost_history)
plt.title("Gradient Descent")
plt.xlabel('Iteration')
plt.ylabel('Cost')
plt.show()
# Predicted Values
Y_pred = X_train.dot(newTheta)
# Model Evaluation
def rmse(Y, Y_pred):
rmse = np.sqrt(sum((Y - Y_pred) ** 2) / Y.shape[0])
return rmse
def r2_score(Y, Y_pred):
mean_y = np.mean(Y)
ss_tot = sum((Y - mean_y) ** 2)
ss_res = sum((Y - Y_pred) ** 2)
r2 = 1 - (ss_res / ss_tot)
return r2
# Print Scores
print("RMSE = ", rmse(Y_train, Y_pred))
print("R2 Score = ", r2_score(Y_train, Y_pred))
# Visualization
# Ploting Regression Line
plt.plot(X, Y_pred, color='#c93e4e', label='Regression Line')
# Ploting Scatter Points
plt.scatter(X, Y, c='#54a774', label='Scatter Plot')
plt.xlabel('Size')
plt.ylabel('Price')
plt.legend()
plt.show()
| arnakoguzhan/machine-learning | 3-polynomial-regression/plr_from_scratch_GD.py | plr_from_scratch_GD.py | py | 2,424 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_numbe... |
74569463783 | import sys, random, string, poplib
from PyQt5 import QtCore, uic
from PyQt5.QtWidgets import QApplication, QComboBox, \
QPushButton, QLineEdit, QLabel
def on_cross_pushbutton_clicked():
if method_combo_box.currentText() == "Corte Simples":
offsprings = simple_cut_crossover()
son1_label_3.setVisible(False)
son2_label_3.setVisible(False)
else:
offsprings = pmx_crossover()
son1_label_3.setVisible(True)
son2_label_3.setVisible(True)
son1_label_1.setText(offsprings[0])
son1_label_2.setText(offsprings[1])
son1_label_3.setText(offsprings[2])
son2_label_1.setText(offsprings[3])
son2_label_2.setText(offsprings[4])
son2_label_3.setText(offsprings[5])
def on_method_combobox_current_text_changed():
if method_combo_box.currentText()=="PMX":
father_line_edit.setInputMask('AAAAAAAAAA')
father_line_edit.setText('ABCDEFGHIJ')
mother_line_edit.setInputMask('AAAAAAAAAA')
mother_line_edit.setText('JHIGFEDCBA')
else:
father_line_edit.setInputMask('BBBBBBBBBB')
father_line_edit.setText('0000000000')
mother_line_edit.setInputMask('BBBBBBBBBB')
mother_line_edit.setText('1111111111')
print('combo box changed')
def pmx_crossover():
cromossomopai= father_line_edit.text()
cromossomomae= mother_line_edit.text()
corte1 = random.randint(0,len(cromossomopai)-2)
corte2 = random.randint(corte1+1,len(cromossomomae)-1)
cortepai=''
cortemae=''
corte1pai=''
corte1mae=''
corte2pai=''
corte2mae=''
for i in range(0,corte1):
cortepai=cortepai+(cromossomopai[i])
corte1mae= corte1mae+(cromossomomae[i])
for i in range(corte1,corte2):
cortemae=cortemae+(cromossomomae[i])
corte1pai= corte1pai+(cromossomopai[i])
for i in range(corte2,len(cromossomopai)):
corte2pai=corte2pai+(cromossomopai[i])
corte2mae= corte2mae+(cromossomomae[i])
# Troca de caracteres repetidos
for i in range(0,len(cortemae)):
for j in range(0,len(cortepai)):
if (cortemae[i]==cortepai[j]):
cortepai=cortepai.replace(cortepai[j],corte1pai[i])
if (corte1pai[i]==corte1mae[j]):
corte1mae=corte1mae.replace(corte1mae[j],cortemae[i])
for h in range(0,len(corte2pai)):
if (cortemae[i]==corte2pai[h]):
corte2pai=corte2pai.replace(corte2pai[h],corte1pai[i])
if (corte1pai[i]==corte2mae[h]):
corte2mae=corte2mae.replace(corte2mae[h],cortemae[i])
# Esta função está retornando 6 valores.
# Ao criar o corpo da função você deve ordená-los de acordo com
# as linhas 17 a 22 deste arquivo.
return cortepai,cortemae,corte2pai,corte1mae,corte1pai,corte2mae
def simple_cut_crossover():
cromossomopai= father_line_edit.text()
cromossomomae= mother_line_edit.text()
corte= random.randint(0,len(cromossomopai)-1)
cortepai=''
cortemae=''
corte1pai=''
corte1mae=''
for i in range(0,corte):
cortepai=cortepai+(cromossomopai[i])
corte1mae= corte1mae+(cromossomomae[i])
for i in range(corte,len(cromossomopai)):
cortemae= cortemae+(cromossomomae[i])
corte1pai= corte1pai+(cromossomopai[i])
return cortepai,cortemae,'',corte1mae,corte1pai,''
if __name__ == "__main__":
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
app = QApplication(sys.argv)
# Loading widgets elements from ui file
window = uic.loadUi("crossover_operation.ui")
window.show()
# Getting widgets elements
father_line_edit = window.findChild(QLineEdit, 'fatherLineEdit')
mother_line_edit = window.findChild(QLineEdit, 'motherLineEdit')
son1_label_1 = window.findChild(QLabel, 'son1Label1')
son1_label_2 = window.findChild(QLabel, 'son1Label2')
son1_label_3 = window.findChild(QLabel, 'son1Label3')
son2_label_1 = window.findChild(QLabel, 'son2Label1')
son2_label_2 = window.findChild(QLabel, 'son2Label2')
son2_label_3 = window.findChild(QLabel, 'son2Label3')
method_combo_box = window.findChild(QComboBox, 'methodComboBox')
cross_push_button = window.findChild(QPushButton, 'crossPushButton')
# Connecting
cross_push_button.clicked.connect(on_cross_pushbutton_clicked)
method_combo_box.currentTextChanged.connect(on_method_combobox_current_text_changed)
sys.exit(app.exec_())
| gabbarco/IA-Projects-2022 | 7_crossover_operation/crossover_operation.py | crossover_operation.py | py | 4,508 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QCoreApplic... |
2654931278 | import matplotlib.pyplot as graph
from usrfuncs import *
from time import *
def desmos(FUNC, X_MIN, X_MAX):
error = 0
if (len(FUNC) != len(X_MIN)) or (len(FUNC) != len(X_MAX)) or (len(X_MIN) != len(X_MAX)):
exit('Недопустимые вводные данные!')
for c in range(len(FUNC)):
t = time()
if (X_MAX[c] <= X_MIN[c]):
print('Недопустимый интервал сканирования для %s-й функи, пропускаем её построение.' % (c + 1))
error += 1
continue
X, Y = [], []; x, percents = X_MIN[c], 0
while X_MIN[c] <= x <= X_MAX[c]:
X.append(x)
Y.append(eval(streplace(FUNC[c], 'there')))
percents += 0.1
print('Построение ' + str(c + 1) + '-й функи: ' + f'{round(percents, 2)}' + '%\r', end = '', flush = True)
x += (X_MAX[c] - X_MIN[c]) / 10 ** 3
graph.plot(X, Y)
print('Построение ' + str(c + 1) + '-й функи завершено за %s сек.' % round((time() - t), 3))
if error != len(FUNC):
graph.title('График фунок: ' + str(FUNC))
graph.show()
return 0
try:
count = int(input('Количество исследуемых фунок: '))
FUNC, X_MIN, X_MAX = [], [], []
for c in range(count):
FUNC.append(str(input('Введите %s-ю функу: ' % (c + 1))))
X_MIN.append(float(input('Введите начало области определения %s-й функи: ' % (c + 1))))
X_MAX.append(float(input('Введите конец области определения %s-й функи: ' % (c + 1))))
desmos(FUNC, X_MIN, X_MAX)
except KeyboardInterrupt:
desmos(FUNC, X_MIN, X_MAX)
except OverflowError:
exit('При вычислении невозможно обработать слишком большие числа ...')
except ValueError as error:
if str(error) == 'math domain error':
exit('Заданная область определения выходит за рамки определенной области определения для заданной функи ...')
exit('Произошла ошибка при вычислении ...')
except NameError:
exit('Поддерживается только следующие функции и константы: pi, e, pf(x, m), relat(x) и другие стандартные функции ...') | TIIGR/Python_in_SPbAU | funcs_operation/desmos.py | desmos.py | py | 2,541 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matp... |
19839810739 | import torch
import torch.nn as nn
def test_reflectionPad(padding):
m = nn.ReflectionPad2d(padding)
input = torch.arange(16, dtype=torch.float).reshape(1, 1, 4, 4)
out = m(input)
return out
if __name__ == '__main__':
print(test_reflectionPad(1))
x = torch.arange(4, dtype=torch.float).reshape(1, 1, 2, 2)
conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=1)
pad = nn.ReflectionPad2d(padding=1)
out = pad(x)
out = conv(out)
conv_pad = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=1, stride=1, padding=1)
conv_pad.load_state_dict(conv.state_dict())
out_conv_pad = conv_pad(x)
print((out - out_conv_pad).abs().max())
| AnhVietPham/Deep-Learning | DL-Pytorch/padding/main.py | main.py | py | 696 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.ReflectionPad2d",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.arange",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_n... |
4004966963 | #!/usr/local/bin/python
"""
File allowing to create all kind of useful files like saving targets's primers pair in bed file or
save target object into file for example.
"""
import dill
from config import *
def create_fasta_file(targets):
"""
Creates a fasta file containing all sequences of targets
:param targets: a list containing target objects.
:return: nothing but creates a file sequences.fasta containing all targets's sequences.
"""
with open(targets_sequences_file_path, "w") as f:
for target in targets:
f.write(">" + target.noChromosome + ":" + target.mutPosition + "\n" + target.sequence + "\n")
def create_fasta_file_primer(primer_seq):
"""
Creates fasta file containing a sequence of primer
:param primer_seq: a primer sequence
:return: nothing but create primers.fasta containing the sequence of the primer passed in parameter.
"""
with open(primer_to_blast_file_path, "w") as f:
f.write(">primer\n" + primer_seq + "\n")
def create_bed_file(targets):
"""
Creates a bed file containing chromosomal range based on the first primers pair of each target.
chromosomal range: last position of the left primer and first position of the right
:param targets: a list containing target objects.
:return: nothing but create chrom.bed a file containing such chromosomal range
"""
c = 1
with open(chrom_file_path, "w") as b:
for target in targets:
if target.all_primers_pairs:
best_primers_pair = target.all_primers_pairs[0]
b.write("#" + str(c) + "\n#bioImp: " + str(target.bio_imp) +
"\n#varID: " + target.var_key +
"\n" + best_primers_pair.no_chromosome + "\t")
for position in best_primers_pair.TFGR:
b.write(str(position) + "\t")
b.write("\n")
c += 1
def create_bed_file_chromosome(chromosome):
"""
Creates a bed file with a list a primers pairs.
Originally created for the multiplex_machines algorithm.
:param chromosome: a list containing primers pair object.
:return: nothing but create a file chrom.bed containing chromosomic range according to primers pairs.
"""
c = 1
with open(chrom_file_path, "w") as b:
for primersPair in chromosome:
b.write("#" + str(c) + "\n" + "#mutation position: " + str(primersPair.left_primer.target.mutation_pos) + "\n")
b.write(primersPair.left_primer.target.no_chromosome + "\t" + str(primersPair.TFGR[0]) + "\t" + str(primersPair.TFGR[1]) + "\n")
c += 1
def save_targets_into_file(targets):
"""
Saves targets into files. Important for not generating again primers pairs of targets.
:param targets: a list containing target object
:return: nothing but create targets_datas.pkl containing target objects (serialisation)
"""
with open(serialisation_file_path, "wb") as output:
for target in targets:
dill.dump(target, output)
def load_targets_from_file(numberOfTargets):
"""
Loads targets from the file targets_datas.pkl
:param numberOfTargets: the number of targets stores in the file
:return: a list of the target objects that have been saved previously
"""
targets = []
with open(serialisation_file_path, 'rb') as f:
for _ in range(numberOfTargets):
targets.append(dill.load(f))
return targets
| gloubsi/oncodna_primers_design | code/fileCreation.py | fileCreation.py | py | 3,515 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dill.dump",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "dill.load",
"line_number": 100,
"usage_type": "call"
}
] |
10848555614 | from datetime import datetime
from uuid import UUID
import uuid
from fastapi import HTTPException
from starlette.responses import Response
from starlette import status
from http import HTTPStatus
from ..app import app
from ..api.schemas import CreateOrderSchema, GetOrderSchema, GetOrdersSchema, Status
ORDERS = []
orders = {
'id': 'ff0f1355-e821-4178-9567-550dec27a373',
'status': 'delivered',
'created': datetime.utcnow(),
'order': [
{
'product': 'cappuccino',
'size': 'medium',
'quantity': 1
}
]
}
@app.get('/orders', response_model=GetOrdersSchema)
def get_orders():
return {'orders': ORDERS}
@app.post('/orders', status_code=status.HTTP_201_CREATED, response_model=GetOrdersSchema)
def create_order(order_detail: CreateOrderSchema):
order = order_detail.dict()
order['id'] = uuid.uuid4()
order['created'] = datetime.utcnow()
order['status'] = 'created'
ORDERS.append(order)
return order
@app.get('/orders/{order_id}')
def get_order(order_id: UUID):
# 注文をIDで検索するために、ORDERS リストを順番に処理してIDをチェック
for order in ORDERS:
if order['id'] == order_id:
return order
# 注文が見つからない場合は、 status_code を 404 に設定した上で HTTPException を生成し、404レスポンスを返す
raise HTTPException(
status_code=404,
detail=f'Order with ID {order_id} not found'
)
@app.put('orders/{order_id}')
def update_order(order_id: UUID, order_details: CreateOrderSchema):
for order in ORDERS:
if order['id'] == order_id:
order.update(order_details.dict())
return order
raise HTTPException(
status_code=404,
detail=f'Order with ID {order_id} not found'
)
@app.delete(
'/orders/{order_id}',
status_code=status.HTTP_204_NO_CONTENT,
response_class=Response # response_class を指定することで、通常 JSON オブジェクトが返される設定を上書きできる。
)
def delete_order(order_id: UUID):
for index, order in enumerate(ORDERS):
if order['id'] == order_id:
ORDERS.pop(index)
return Response(status_code=HTTPStatus.NO_CONTENT.value)
raise HTTPException(
status_code=404,
detail=f"Order with ID {order_id} not found."
)
@app.post('/orders/{order_id}/cancel')
def cancel_order(order_id: UUID):
for order in ORDERS:
if order['id'] == order_id:
order['status'] == Status.cancelled.value
return order
raise HTTPException(
status_code=404,
detail=f"Order with ID {order_id} not found."
)
@app.post('/orders/{order_id}/pay')
def pay_order(order_id: UUID):
for order in ORDERS:
if order['id'] == order_id:
order['status'] = Status.progress.value
return order
raise HTTPException(
status_code=404,
detail=f"Order with ID {order_id} not found."
) | BlackJack2021/microservice-api | src/ch02/orders/api/api.py | api.py | py | 3,040 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.utcnow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "app.app.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "app.app",
... |
13898975323 | import numpy as np
import functools
from functools import total_ordering
import bisect
import matplotlib.pyplot as plt
# Ryan Filgas
# AI Fall 2022
STATESIZE = 8
MAXFITNESS = 28
# Allow for sorting of individuals
@functools.total_ordering
class member:
def __init__(self, fitness, position):
self.fitness, self.position = fitness, position
def __lt__(self, other):
return (self.fitness) < (other.fitness)
def __eq__(self, other):
return (self.fitnessn) == (other.fitness)
def copy(self):
return member(self.fitness, self.position)
#Check pairs of queens that aren't attacking.
def fitness(queens):
length = len(queens)
num = 0
count = 0
for i in range(0, length):
for j in range(i+1, length):
# Horizontal
if queens[i] == queens[j]:
count += 1
# Diagonal
elif (j - i) == abs((queens[j] - queens[i])):
count += 1
return (MAXFITNESS - count)
# Generate a population for start.
def generate_pop(population):
sorted_pool = list()
initial_fitness = 0
pop_generator = np.random.randint(1, high=(STATESIZE+1), size=(population,STATESIZE), dtype=int)
for pop in pop_generator:
my_fitness = fitness(pop)
initial_fitness += my_fitness
new_pop = member(my_fitness, pop)
bisect.insort(sorted_pool, new_pop)
return sorted_pool, initial_fitness
# create children using crossover and mutation
def generate_successors(child1, child2, MutationPct):
max = len(child1.position)
crossover = np.random.randint(1, high=max-1, dtype=int)
new1 = np.array(list(child1.position[0:crossover]) + list(child2.position[crossover:max]))
new2 = np.array(list(child2.position[0:crossover]) + list(child1.position[crossover:max]))
# mutate children randomly
if np.random.choice([0,1],1, p=(1-MutationPct, MutationPct))[0] == 1:
mutate = np.random.randint(0, high=max, dtype=int)
new1[mutate] = np.random.randint(0, high=max+1, dtype=int)
if new1[mutate] <= 0:
new1[mutate] = 1
if new1[mutate] >= STATESIZE:
new1[mutate] = STATESIZE
if np.random.choice([0,1],1, p=(1-MutationPct, MutationPct))[0] == 1:
mutate = np.random.randint(0, high=max, dtype=int)
new2[mutate] = np.random.randint(0, high=max+1, dtype=int)
if new2[mutate] <= 0:
new2[mutate] = 1
if new2[mutate] >= STATESIZE:
new2[mutate] = STATESIZE
# Create member objects with the new specimens
mutated1 = member(fitness(new1), new1)
mutated2 = member(fitness(new2), new2)
return mutated1, mutated2
def group_fitness(population):
sum = 0
max = len(population)
for i in population:
sum += i.fitness
return (sum/max), sum
def run_game(PopulationSize, NumIterations, MutationPct):
sorted_pool, initial_fitness = generate_pop(PopulationSize)
childStart = np.max(sorted_pool).copy()
startingFitness = initial_fitness/PopulationSize
average_fitness = []
top2List = []
for i in range(NumIterations):
new_pop = []
avg_fitness, total_fitness = group_fitness(sorted_pool)
average_fitness.append(avg_fitness)
# Get probabilities and select parents
probabilities = [(k.fitness/total_fitness) for k in sorted_pool]
##################
pop_control = int(PopulationSize/2)
for j in range(pop_control):
# Set to arbitrarily large number so loop begins.
parent1_idx, parent2_idx = 999999999, 999999999
# Assign indexes based on distribution. They must not choose the same individual.
# As the array will be one smaller after the first index is popped off, we have to check
# that the next thing popped off doesn;t fall offf the array.
while parent2_idx == parent1_idx or parent2_idx >= PopulationSize-1 or parent1_idx >= PopulationSize:
parent1_idx = int(np.random.choice(PopulationSize, 1, replace=False, p=probabilities))
parent2_idx = int(np.random.choice(PopulationSize, 1, replace=False, p=probabilities))
# Retrieve parents
parent1 = sorted_pool[parent1_idx]
parent2 = sorted_pool[parent2_idx]
#generate children
child_a, child_b = generate_successors(parent1, parent2, MutationPct)
new_pop.append(child_a)
new_pop.append(child_b)
sorted_pool = new_pop
new_pop = []
top2List.append(np.max(sorted_pool))
childEnd = np.max(sorted_pool).fitness
finalRound = NumIterations
lastChild = np.max(sorted_pool)
endingFitness = average_fitness[len(average_fitness)-1]
return startingFitness, endingFitness, childStart, childEnd, finalRound, average_fitness, top2List, lastChild
PopulationSize = 10
NumIterations= 100
MutationPct = .05
startingFitness, endingFitness, childStart, childEnd, finalRound, averageFitness, top2List, lastChild = run_game(PopulationSize, NumIterations, MutationPct)
print("\n")
print("Starting Fitness: ", startingFitness)
print("Ending Fitness: ", endingFitness)
print("Fitness of best start: ", childStart.fitness)
print("Fitness of best end: ", childEnd)
print("Final Round Was: ", len(averageFitness))
print("\n\n")
# Gather samples
check = [top2List[int(len(top2List)/i)-1] for i in range(1,9)]
#Print the first best, the middle 80% and the final best.
print(list(top2List[0].position))
print([list(i.position) for i in check])
print(list(lastChild.position))
plt.plot(np.arange(len(averageFitness)),averageFitness)
plt.title('Average Fitness (Goal: 28) \nPopulation: ' + str(PopulationSize) + '\nMutation Pct: ' + str(MutationPct))
plt.xlabel('Iterations')
plt.ylabel('Non-Attacking Queen Pairs')
plt.show() | rfilgas/ML-AI-CV | AI-Genetic-Algorithm/8-queens.py | 8-queens.py | py | 5,883 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "functools.total_ordering",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "... |
2185921772 | import ping3
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.schedulers.base import STATE_RUNNING
import myhome.tgbot
import myhome.core
class Cron:
def __init__(self):
self.presence = True
self.leave_home_count_down = 0
self.leave_home_count_down_max = myhome.config['monitor']['leave_home_count_down_max']
self.scheduler = AsyncIOScheduler(
job_defaults={'misfire_grace_time': 60},
timezone=myhome.config.get('timezone', 'Asia/Tokyo')
)
def run(self):
if myhome.config['monitor']['enabled']:
self.scheduler.add_job(
self.back_home_cron,
'cron',
hour='8-23',
second=f'*/{myhome.config["monitor"]["interval"]}',
)
for each_schedule in myhome.config.get('schedule'):
if not each_schedule['enabled']:
continue
if each_schedule['type'] == 'group':
self.scheduler.add_job(
myhome.core.run_group_command, 'cron', (each_schedule['command'], myhome.tgbot.send_message,),
day_of_week=each_schedule['day_of_week'],
hour=each_schedule['hour'],
minute=each_schedule['minute']
)
elif each_schedule['type'] == 'single':
self.scheduler.add_job(
myhome.core.run_single_command, 'cron', (each_schedule['command'], myhome.tgbot.send_message,),
day_of_week=each_schedule['day_of_week'],
hour=each_schedule['hour'],
minute=each_schedule['minute']
)
self.scheduler.start()
async def back_home_cron(self):
if ping3.ping(myhome.config['monitor']['ip_addr'], timeout=1):
await self.back_home()
else:
await self.leave_home()
async def back_home(self):
if self.leave_home_count_down > 0:
self.leave_home_count_down = 0
if self.presence:
# still at home
return
self.presence = True
myhome.tgbot.send_message('back_home: executing back home commands')
for each_schedule in myhome.config['monitor']['on_commands']:
if not each_schedule['enabled']:
continue
if each_schedule['type'] == 'group':
await myhome.core.run_group_command(each_schedule['command'])
elif each_schedule['type'] == 'single':
await myhome.core.run_single_command(each_schedule['command'])
return
async def leave_home(self):
if not self.presence:
# still not at home
return
if self.leave_home_count_down < self.leave_home_count_down_max:
self.leave_home_count_down += 1
return
self.presence = False
myhome.tgbot.send_message('leave_home: executing leave home commands')
for each_schedule in myhome.config['monitor']['off_commands']:
if not each_schedule['enabled']:
continue
if each_schedule['type'] == 'group':
await myhome.core.run_group_command(each_schedule['command'])
elif each_schedule['type'] == 'single':
await myhome.core.run_single_command(each_schedule['command'])
return
def is_enabled(self):
return self.scheduler.state == STATE_RUNNING
async def enable(self, logging_function=None):
if callable(logging_function):
await logging_function('enabling cron')
self.scheduler.resume()
async def disable(self, logging_function=None):
if callable(logging_function):
await logging_function('disabling cron')
self.scheduler.pause()
| hldh214/myhome | myhome/cron.py | cron.py | py | 3,864 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "myhome.tgbot.config",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "myhome.tgbot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "apscheduler.schedulers.asyncio.AsyncIOScheduler",
"line_number": 14,
"usage_type": "call"
},
{
... |
5030099218 | # Desenvolva uma lógica que leia o peso e a altura de uma pessoa, calcule seu Índice de
# Massa Corporal (IMC) e mostre seu status, de acordo com a tabela abaixo:
# - IMC abaixo de 18,5: Abaixo do Peso
# - Entre 18,5 e 25: Peso Ideal
# - 25 até 30: Sobrepeso
# - 30 até 40: Obesidade
# - Acima de 40: Obesidade Mórbida
from datetime import date
n = input('Insira seu nome: ')
i = int(input('Insira sua idade: '))
a = float(input('Insira sua altura: '))
p = float(input('Insira seu peso: '))
aa = date.today().year
an = aa - i
imc = p / a ** 2
print(f'{n} tem {i} anos, {a}m e {p:.2f}kg, logo seu IMC é {imc:.2f}\nComo estamos em {aa}, {n} nasceu em {an}')
if imc < 18.5:
print('Você está abaixo do peso: ')
elif imc <= 25:
print('Você está com um peso ideal')
elif imc <= 30:
print('Você está com sobrepeso')
elif imc <= 40:
print('Você está em um estado de obesidade')
elif imc > 40:
print("Você está em um estado de obesidade mórbida")
| hdtorrad/Estudos-Python3 | Só exercícios/ex043-IMC.py | ex043-IMC.py | py | 981 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "datetime.date.today",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 14,
"usage_type": "name"
}
] |
73198221223 | import functools
import json
import sys
import traceback
from flask import jsonify
import sql_graph
from backend.bq import BackendBQClient
from backend.validation import ValidationError
def _convert_bq_obj_to_rf(obj):
data_dict = json.loads(obj["object_data"])
converted_obj = {
"id": obj["object_id"],
"parentNode": obj["parent_object_id"],
"data": {
"pythonType": obj["object_type"],
"domain": obj["subject_area"],
"target_table": obj["target_table_name"],
},
"position": {
"x": data_dict["coordinates"][0],
"y": data_dict["coordinates"][1],
},
"style": {
"width": data_dict["width"],
"height": data_dict["height"],
}
}
del data_dict["coordinates"]
del data_dict["width"]
del data_dict["height"]
converted_obj["data"].update(data_dict)
if obj["parent_object_id"] is None:
del converted_obj["parentNode"]
return converted_obj
def _convert_bq_conn_to_rf(conn):
converted_conn = {
"id": conn["source_object_id"] + "-" + conn["target_object_id"],
"source": conn["source_object_id"],
"target": conn["target_object_id"],
"data": json.loads(conn["connection_data"]),
}
return converted_conn
def _sort_by_type(objects):
def comp(t):
if t.lower().endswith("table"):
return 0
elif t.lower().endswith("container"):
return 1
else:
return 2
objects.sort(key=lambda o: comp(o["data"]["pythonType"]))
return objects
def bq_workflow(gcp_project, object_query_function, connection_query_function,
query_args):
client = BackendBQClient(gcp_project)
objects = getattr(client, object_query_function)(**query_args)
connections = getattr(client, connection_query_function)(**query_args)
return {
"objects": _sort_by_type([_convert_bq_obj_to_rf(obj) for obj in objects]),
"connections": [_convert_bq_conn_to_rf(conn) for conn in connections],
}
def on_demand_workflow(gcp_project, datetime, domains, physical):
loader = sql_graph.GrizzlyLoader(gcp_project, datetime)
graph = sql_graph.Graph(loader.filter_queries_by_domain_list(domains))
serializer = sql_graph.ReactFlowSerializer(graph, physical)
return serializer.serialize()
def response_code_handler(func):
@functools.wraps(func)
def wrapper():
try:
return jsonify(func()), 200
except ValidationError as e:
return str(e), 400
except sql_graph.exceptions.ParsingError as e:
tb = traceback.format_exc()
print(tb, file=sys.stderr)
return "An error has occurred during parsing: " + str(e), 500
except Exception as e:
tb = traceback.format_exc()
print(tb, file=sys.stderr)
return "Unexpected error has occurred: " + str(e), 500
return wrapper
| google/grizzly | grizzly_data_lineage/backend/utils.py | utils.py | py | 2,755 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "backend.bq.BackendBQClient",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sql_graph.GrizzlyLoa... |
37068047547 | from django.shortcuts import render,HttpResponseRedirect,reverse,redirect
from django.contrib.auth import authenticate,login,logout
from .forms import login_form,addressform
from .models import guestuser
# Create your views here.
def user_login(request):
if request.method=='POST':
loginform=login_form(request.POST)
username=request.POST['username']
password=request.POST['password']
url = request.POST['url']
user=authenticate(username=username,password=password)
# remove guest_id
if request.session.get('guest_id'):
del request.session['guest_id']
print(user)
if user is not None:
login(request,user)
if url:
return redirect(url)
else:
return HttpResponseRedirect(reverse('products'))
else:
return render(request,"account/login.html",{'loginform':loginform})
else:
loginform=login_form()
return render(request,"account/login.html",{'loginform':loginform})
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('login'))
def guestgrap(request):
if request.method=='POST':
loginform=login_form(request.POST)
email=request.POST['email']
if email is not None:
guest,created=guestuser.objects.get_or_create(email=email)
request.session['guest_id']=guest.id
return HttpResponseRedirect(reverse('checkout'))
else:
loginform=login_form()
return render(request,"account/login.html",{'loginform':loginform})
def save_address(request):
if request.method=='POST':
form = addressform(request.POST)
if form.is_valid():
new_address=form.save()
if new_address.address_type=='shipping':
request.session['shipping']=new_address.id
else:
request.session['billing']=new_address.id
if request.POST['url']:
return redirect(request.POST['url'])
else:
return HttpResponseRedirect(reverse('checkout'))
| MohamedHany2002/online-shop | account/views.py | views.py | py | 2,121 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "forms.login_form",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 19,
"usage_type": "call"
},
{
"api_na... |
1020567975 | # *********************************************************************************************************************
# league_of_legends_api.py
# import cogs.helper.api.league_of_legends_api as lol_api
# *********************************************************************************************************************
import os
from dotenv import load_dotenv
from riotwatcher import LolWatcher, ApiError
# get riot_lol_key from .env file
load_dotenv()
LOL_KEY = os.getenv('RIOT_LOL_KEY')
lol_watcher = LolWatcher(LOL_KEY)
default_region = 'na1'
def get_version(region=default_region):
return lol_watcher.data_dragon.versions_for_region(region)
def get_champion_list(champions_version=get_version(default_region)):
return lol_watcher.data_dragon.champions(champions_version)
# def get_summoner_match_history_20(summoner_id):
# http://ddragon.leagueoflegends.com/cdn/12.11.1/data/en_US/champion.json
def champion_string_formatting(champion):
check = champion.replace("'", '').lower().title().replace(' ', '').strip('"')
if check == 'Kogmaw' or check == 'Reksai':
return champion.lower().title().replace(' ', '').replace("'", '').strip('"')
elif check == 'Wukong': return 'MonkeyKing'
elif check == 'JarvanIv' or check == 'J4': return 'JarvanIV'
elif check == 'Mf': return 'MissFortune'
elif check == 'Ez': return 'Ezreal'
elif check == 'Heimer' or check =='Donger': return 'Herimerdinger'
elif check == 'Cass': return 'Cassiopeia'
elif check == 'Tk': return 'TahmKench'
elif check == 'Tf': return 'TwistedFate'
elif check == 'Asol': return 'AurelionSol'
elif check == 'Cait': return 'Caitlyn'
elif check == 'RenataGlasc': return 'Renata'
return champion.replace('.', ' ').replace("'", '').lower().title().replace(' ', '').strip('"')
def champion_url_by_name(champ_name):
formatting = champ_name.replace("'", '-').replace(" ", '-').replace('.', '').lower()
return f"https://www.leagueoflegends.com/en-us/champions/{formatting}/"
| nartgnoh/BeeBot.py | cogs/helper/api/league_of_legends_api.py | league_of_legends_api.py | py | 2,024 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "riotwatcher.LolWatcher",
"line_number": 14,
"usage_type": "call"
}
] |
17448094011 | from lumicube.standard_library import *
import opensimplex
# Enter the address or hostname of your lumicube
cube = None
if isRunningOnCube():
# connect locally if running locally
cube = LumiCube();
else:
# connect to my remote cube if not running locally (eg from my Mac)
cube = LumiCube("cube.local")
# Generate a lava lamp effect using OpenSimplex noise.
opensimplex.random_seed()
def lava_colour(x, y, z, t):
scale = 0.10
speed = 0.05
# hue = noise_4d(scale * x, scale * y, scale * z, speed * t)
# Replaced the above noise_4d call which needs to make use of customisation in the supplied
# daemon to make this work. The version below is pure python code and uses the opensimplex library.
hue = opensimplex.noise4(scale * x, scale * y, scale * z, speed * t)
return hsv_colour(hue, 1, 1)
def paint_cube(t):
colours = {}
for x in range(9):
for y in range(9):
for z in range(9):
if x == 8 or y == 8 or z == 8:
colour = lava_colour(x, y, z, t)
colours[x,y,z] = colour
cube.display.set_3d(colours)
t = 0
while True:
paint_cube(t)
time.sleep(1/30)
t += 1
| paultough/lumicube | lava.py | lava.py | py | 1,210 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "opensimplex.random_seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "opensimplex.noise4",
"line_number": 28,
"usage_type": "call"
}
] |
24711363173 | # KLIB - variables
# wykys 2021
from pathlib import Path
PATH_KLIB = f'{Path.home()}/projects/klib'
PATH_KICAD = '/usr/share/kicad-nightly'
PATH_KICAD_CONFIG = f'{Path.home()}/.config/kicad/6.99'
PATH_KICAD_COMMON = f'{PATH_KICAD_CONFIG}/kicad_common.json'
PATH_FP_LIB_TABLE = f'{PATH_KICAD_CONFIG}/fp-lib-table'
PATH_SYM_LIB_TABLE = f'{PATH_KICAD_CONFIG}/sym-lib-table'
class KlibVars(object):
def __init__(self) -> None:
self.name_symbols = ''
self.name_models3d = ''
self.name_footprints = ''
self.dir_symbols = ''
self.dir_models3d = ''
self.dir_footprints = ''
def set_symbols(self, name: str, value: str) -> None:
self.name_symbols = name
self.dir_symbols = value
def set_models3d(self, name: str, value: str) -> None:
self.name_models3d = name
self.dir_models3d = value
def set_footprints(self, name: str, value: str) -> None:
self.name_footprints = name
self.dir_footprints = value
def __dict__(self) -> dict:
res = dict()
if self.name_symbols != '' and self.dir_symbols != '':
res[self.name_symbols] = self.dir_symbols
if self.name_models3d != '' and self.dir_models3d != '':
res[self.name_models3d] = self.dir_models3d
if self.name_footprints != '' and self.dir_footprints != '':
res[self.name_footprints] = self.dir_footprints
return res
KLIB = KlibVars()
KLIB.set_symbols('KLIB_SYMBOL_DIR', f'{PATH_KLIB}/symbols')
KLIB.set_models3d('KLIB_3DMODEL_DIR', f'{PATH_KLIB}/3dmodels')
KLIB.set_footprints('KLIB_FOOTPRINT_DIR', f'{PATH_KLIB}/footprints')
KICAD = KlibVars()
KICAD.set_symbols('KICAD6_SYMBOL_DIR', f'{PATH_KICAD}/symbols')
KICAD.set_models3d('KICAD6_3DMODEL_DIR', f'{PATH_KICAD}/3dmodels')
KICAD.set_footprints('KICAD6_FOOTPRINT_DIR', f'{PATH_KICAD}/footprints')
| wykys/klib | scripts/klib_vars.py | klib_vars.py | py | 1,892 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "pathlib.Path.home",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.home",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"lin... |
22349362845 | import tqdm
from tensorboardX import SummaryWriter
import cv2
import numpy as np
import os
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from models.lbs import batch_rodrigues
from utils import misc
from pytorch3d.io import save_obj
from opt_params import OptParams
def process_visuals(visuals):
visual_list = []
keys_visuals = list(visuals.keys())
for visual_key in keys_visuals:
visual_list += misc.prepare_visual(visuals, visual_key, preprocessing_op=None)
visual_list = torch.cat(visual_list, 3) # cat w.r.t. width
visual_list = visual_list.clamp(0, 1)
visual_list = torch.cat(visual_list.split(1, 0), 2)[0] # cat batch dim in lines w.r.t. height
visual_list = visual_list.cpu()
return visual_list
def tensor2image(tensor):
image = tensor.detach().cpu().numpy()
image = image * 255.
image = np.maximum(np.minimum(image, 255), 0)
image = image.transpose(1, 2, 0)
return image.astype(np.uint8).copy()
class Runner:
def __init__(
self,
dataset,
losses,
smplx_model,
device,
save_path,
cut_flame_head,
loss_weights,
train_rotation,
train_pose,
train_shape,
checkpoint_path
):
self.dataset = dataset
self.losses = losses
self.smplx = smplx_model
self.opt_params = OptParams(
device,
dataset,
train_rotation,
train_pose,
train_shape,
checkpoint_path
)
self.device = device
self.save_path = save_path
self.eye_pose = torch.eye(3, requires_grad=False, device=self.device).unsqueeze(0).repeat(self.dataset.nimages, 2, 1, 1)
self.left_hand_pose = torch.eye(3, requires_grad=False, device=self.device).unsqueeze(0).repeat(self.dataset.nimages, 15, 1, 1)
self.right_hand_pose = torch.eye(3, requires_grad=False, device=self.device).unsqueeze(0).repeat(self.dataset.nimages, 15, 1, 1)
self.cut_flame_head = cut_flame_head
self.loss_weights = loss_weights
self.img_size = dataset.images.shape[2]
os.makedirs(os.path.join(save_path, 'mesh'), exist_ok=True)
os.makedirs(os.path.join(save_path, 'opt_params'), exist_ok=True)
@torch.no_grad()
def get_visuals(self, lm_pred, lm_gt, label, visuals, batch):
name = 'target_stickman' + str(label)
image_size = batch['img'].shape[1]
if label == 'openpose_face':
visuals[name] = misc.draw_stickman(lm_gt / (image_size / 2) - 1, image_size, images=batch['img']) #lmks[-1, 1]
visuals['pred_' + name] = misc.draw_stickman(lm_pred / (image_size / 2) - 1, image_size, images=batch['img'])
elif label =='openpose_body':
visuals[name] = misc.draw_stickman_body(lm_gt / (image_size / 2) - 1, image_size, images=batch['img'])
visuals['pred_' + name] = misc.draw_stickman_body(lm_pred / (image_size / 2) - 1, image_size, images=batch['img'])
else:
visuals[name] = misc.draw_stickman_fa(lm_gt/ (image_size / 2) - 1, image_size, images=batch['img'])
visuals['pred_' + name] = misc.draw_stickman_fa(lm_pred / (image_size / 2) - 1, image_size, images=batch['img'])
return visuals
def fit(self, epochs, lr, max_iter, tol=1e-9):
dataloader = DataLoader(
self.dataset,
batch_size=self.dataset.nimages,
shuffle=False,
num_workers=0
)
for batch_idx, batch in enumerate(dataloader):
if epochs > 0:
param_lst = self.opt_params.get_train_params_list()
# need to create optimizer for every batch so that data for prev batches is not changed
optimizer = optim.LBFGS(
param_lst,
lr=lr, max_iter=max_iter,
line_search_fn='strong_wolfe',
tolerance_grad=tol, tolerance_change=tol)
frame_id_0 = batch['frame_ids'][0]
writer = SummaryWriter(log_dir=os.path.join(self.save_path, f'logs/{frame_id_0:06d}'))
step = 0
for epoch in range(0, epochs):
tq = tqdm.tqdm(ncols=100)
tq.set_description(f'Epoch {epoch}')
tq.refresh()
def closure():
nonlocal step, optimizer, tq, epoch, writer
def log(loss_str, label, value_gpu):
loss_value_cpu = value_gpu.detach().cpu().numpy()
loss_str += f'\n\t{label}: {loss_value_cpu:.1E} '
writer.add_scalar(os.path.join('train', label), loss_value_cpu, step)
return loss_str, loss_value_cpu
if torch.is_grad_enabled():
optimizer.zero_grad()
pred = self.forward(batch)
writer.add_scalar(os.path.join('train', 'epoch'), epoch, step)
loss_str = 'losses:'
step += 1
loss_values = []
visuals = {}
for loss in self.losses:
loss_value, lm_gt, lm_pred = loss.compute(pred, batch, img_size=self.img_size, weight=self.loss_weights[loss.label])
loss_str, _ = log(loss_str, loss.label, loss_value)
loss_values.append(loss_value)
if loss.label == 'openpose_face' or loss.label == 'openpose_body' or loss.label == 'fa_kpts':
visuals = self.get_visuals(lm_pred, lm_gt, loss.label, visuals, batch)
if step % 10 == 0:
self.dump_results(pred, step, epoch, batch_idx)
visual_list = process_visuals(visuals)
writer.add_image(f'images', visual_list, step)
total_loss = torch.stack(loss_values).sum()
if step % 20 == 0:
print(total_loss)
loss_str, loss_value_cpu = log(loss_str, 'total_loss', total_loss)
total_loss.backward()
tq.update()
tq.set_postfix({'loss': f'{loss_value_cpu:.3E}'})
tq.refresh()
return total_loss
optimizer.step(closure)
tq.close()
def dump_results(self, pred, step, epoch, batch_idx):
save_obj(os.path.join(self.save_path, 'mesh', f'mesh{epoch}_{step}_{batch_idx}.obj'), pred['verts_world'][0], pred['faces_world'])
self.opt_params.dump_json_dict(os.path.join(self.save_path, 'opt_params', f'opt_params_{epoch}_{step}_{batch_idx}'))
def obtain_global_matx(self, batch, ds_ids):
b = len(ds_ids)
scale = (torch.eye(3, device=self.device) * self.opt_params.global_scale.repeat(3)).repeat(b, 1, 1)
global_rot = torch.bmm(batch_rodrigues(self.opt_params.global_rot.repeat(b, 1)), scale) #[b, 3, 3]
global_trans = self.opt_params.global_trans.repeat(b, 1).reshape(b, 3, 1) #[b, 3, 1]
return global_rot.unsqueeze(1), global_trans.reshape(b, 3).unsqueeze(1)
def forward(self, batch):
ds_ids = batch['frame_ids']
b = len(ds_ids)
pose_jaw_rotmtx = torch.index_select(self.opt_params.pose_jaw, 0, ds_ids)
pose_body_rotmtx = torch.index_select(self.opt_params.pose_body, 0, ds_ids)
face_expression = torch.index_select(self.opt_params.face_expression, 0, ds_ids)
betas = self.opt_params.beta.reshape(1, -1).repeat(b, 1)
global_rotmtx, global_trans = self.obtain_global_matx(batch, ds_ids)
# use default pose
eye_pose = torch.index_select(self.eye_pose, 0, ds_ids)
left_hand_pose = torch.index_select(self.left_hand_pose, 0, ds_ids)
right_hand_pose = torch.index_select(self.right_hand_pose, 0, ds_ids)
verts, landmarks, joints = self.smplx(shape_params=betas, expression_params=face_expression, global_pose=global_rotmtx, body_pose=pose_body_rotmtx, jaw_pose=pose_jaw_rotmtx, eye_pose=eye_pose, left_hand_pose=left_hand_pose, right_hand_pose=right_hand_pose)
# Add translation
verts += global_trans
joints += global_trans
landmarks += global_trans
# Change axis to be in NEUS space
verts[:, :, :0] = -verts[:, :, :0]
joints[:, :, :0] = -joints[:, :, :0]
landmarks[:, :, :0] = -landmarks[:, :, :0]
result = {}
if self.cut_flame_head:
flame_verts, flame_faces = self.smplx.cut_flame_head(verts)
result['verts_world'] = flame_verts
result['faces_world'] = flame_faces
else:
result['verts_world'] = verts
result['faces_world'] = self.smplx.faces_tensor
# get extrinsics
extrinsics_rot = batch['extrinsics_rvec'].unsqueeze(1)
extrinsics_trans = batch['extrinsics_tvec'].unsqueeze(1)
# world to camera transform
joints = torch.matmul(extrinsics_rot.repeat(1, joints.shape[1], 1, 1), joints.unsqueeze(-1)).squeeze(-1)
verts = torch.matmul(extrinsics_rot.repeat(1, verts.shape[1], 1, 1), verts.unsqueeze(-1)).squeeze(-1)
landmarks = torch.matmul(extrinsics_rot.repeat(1, landmarks.shape[1], 1, 1), landmarks.unsqueeze(-1)).squeeze(-1)
Jtr = joints + extrinsics_trans
verts_trans = verts + extrinsics_trans
landmarks += extrinsics_trans
result['verts_extrinsics'] = verts #[bs, 10475, 3]
result['verts'] = verts_trans
result['Jtr'] = Jtr #[166, 3]
result['betas'] = self.opt_params.beta
result['face_kpt'] = landmarks
return result | SamsungLabs/NeuralHaircut | src/multiview_optimization/runner.py | runner.py | py | 10,207 | python | en | code | 453 | github-code | 36 | [
{
"api_name": "utils.misc.prepare_visual",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "utils.misc",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_... |
38191760011 | """
GIW 2020-21
Práctica 07
Grupo 05
Autores: XX, YY, ZZ,
(Nombres completos de los autores) declaramos que esta solución es fruto exclusivamente
de nuestro trabajo personal. No hemos sido ayudados por ninguna otra persona ni hemos
obtenido la solución de fuentes externas, y tampoco hemos compartido nuestra solución
con nadie. Declaramos además que no hemos realizado de manera deshonesta ninguna otra
actividad que pueda mejorar nuestros resultados ni perjudicar los resultados de los demás.
"""
from mongoengine import Document, EmbeddedDocument, EmbeddedDocumentField, ComplexDateTimeField
from mongoengine import StringField, IntField, FloatField, ListField, ReferenceField, ValidationError
from mongoengine import connect
connect("giw_mongoengine")
letras = ['T', 'R', 'W', 'A', 'G', 'M', 'Y', 'F', 'P', 'D', 'X', 'B', 'N', 'J', 'Z',
'S', 'Q', 'V', 'H', 'L', 'C', 'K', 'E']
class Producto(Document):
codigo_barras = StringField(primary_key=True, min_length=13, max_length=13, regex="^[0-9]*$")
nombre = StringField(required=True, min_length=2)
categoria_principal = IntField(required=True, min_value=0)
categorias_secundarias = ListField(IntField(min_value=0))
def clean(self):
self.validate(clean=False)
if len(self.categorias_secundarias) == 0 | self.categoria_principal != self.categorias_secundarias[0]:
raise(ValidationError("La categoría principal no coincide con la primera categoría "
"en la lista de categorías secundarias!"))
cifra_ultima = int(self.codigo_barras[12])
suma = 0
for i in range(12):
if i % 2 == 0:
suma += int(self.codigo_barras[i])
else:
suma += int(self.codigo_barras[i])*3
if (suma + cifra_ultima) % 10 != 0:
raise(ValidationError("Formato del código de barra invalido!"))
class Linea(EmbeddedDocument):
num_items = IntField(required=True, min_value=0)
precio_item = FloatField(required=True, min_value=0)
name = StringField(required=True, min_length=2)
total = FloatField(required=True, min_value=0)
ref = ReferenceField(Producto, required=True)
def clean(self):
self.validate(clean=False)
suma_precio = self.num_items * self.precio_item
if suma_precio != self.total:
raise(ValidationError("El precio total no coincide con el precio del"
"producto multiplicado por la cantiad!"))
if self.name != self.ref.nombre:
raise(ValidationError("El nombre del producto no coincide con el de la "
"referencia al producto!"))
class Pedido(Document):
total = FloatField(required=True, min_value=0)
fecha = ComplexDateTimeField(required=True)
lineas = ListField(EmbeddedDocumentField(Linea), required=True)
def clean(self):
self.validate(clean=False)
suma_precio = 0
for linea in self.lineas:
suma_precio += linea.total
if suma_precio != self.total:
raise(ValidationError("El precio total no coincide con el precio "
"total sumando los precios de los productos!"))
nombre_productos = list()
for linea in self.lineas:
if linea.name in nombre_productos:
raise(ValidationError("Existen 2 lineas de pedidos con el mismo nombre de producto!"))
else:
nombre_productos.append(linea.name)
class Tarjeta(EmbeddedDocument):
nombre = (StringField(required=True, min_length=2, regex="[A-Z]+[a-z]"))
numero = (StringField(primary_key=True, min_length=16, max_length=16, regex="^[0-9]+$"))
mes = (StringField(required=True, min_length=2, max_length=2, choices=['01', '02', '03', '04', '05', '06', '07',
'08', '09', '10', '11', '12']))
año = (StringField(required=True, min_length=2, max_length=2, regex="^[0-9]+$"))
ccv = (StringField(required=True, min_length=3, max_length=3, regex="^[0-9]+$"))
class Usuario(Document):
dni = (StringField(primary_key=True, min_length=9, max_length=9))
nombre = (StringField(required=True, min_length=2, regex="[A-Z]+[a-z]"))
apellido1 = (StringField(required=True, min_length=2, regex="[A-Z]+[a-z]"))
apellido2 = (StringField(min_length=2, regex="[A-Z]+[a-z]"))
f_nac = StringField(required=True, min_length=10, max_length=10,
regex="^([12][0-9][0-9][0-9])-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$")
tarjetas = ListField(EmbeddedDocumentField(Tarjeta))
pedidos = ListField(ReferenceField(Pedido, reverse_delete_rule=4))
def clean(self):
self.validate(clean=False)
err = "Formato del dni invalido!"
if not self.dni[8].isalpha():
raise(ValidationError(err))
for i in range(8):
if not self.dni[i].isnumeric():
raise(ValidationError(err))
letra = int(self.dni[0:8]) % 23
if self.dni[8] != letras[letra]:
raise(ValidationError(err))
def insertar():
productos = [
Producto(codigo_barras="1000000000009", nombre="Galletas",
categoria_principal=1, categorias_secundarias=[1, 2]),
Producto(codigo_barras="2000000000008", nombre="Chocolate",
categoria_principal=2, categorias_secundarias=[2, 3, 4]),
Producto(codigo_barras="3000000000007", nombre="Almendras",
categoria_principal=3, categorias_secundarias=[3]),
Producto(codigo_barras="4006381333931", nombre="Leche",
categoria_principal=4, categorias_secundarias=[4, 5, 6, 7]),
]
lineas = [
Linea(num_items=1, precio_item=2.5, name="Galletas", total=2.5, ref=productos[0]),
Linea(num_items=2, precio_item=3.5, name="Chocolate", total=7.0, ref=productos[1]),
Linea(num_items=3, precio_item=4.5, name="Almendras", total=13.5, ref=productos[2]),
Linea(num_items=4, precio_item=5.5, name="Leche", total=22, ref=productos[3])
]
pedidos = [
Pedido(total=9.5, fecha="2020,11,26", lineas=[lineas[0], lineas[1]]),
Pedido(total=20.5, fecha="2020,11,26", lineas=[lineas[1], lineas[2]]),
Pedido(total=35.5, fecha="2020,11,26", lineas=[lineas[2], lineas[3]]),
Pedido(total=24.5, fecha="2020,11,26", lineas=[lineas[3], lineas[0]])
]
tarjetas = [
Tarjeta(nombre="Dale1", numero="1234567891234567", mes="01", año="02", ccv="034"),
Tarjeta(nombre="Dale2", numero="1234567891234568", mes="02", año="03", ccv="044"),
Tarjeta(nombre="Joesh1", numero="1234567891234569", mes="03", año="04", ccv="054"),
Tarjeta(nombre="Joesh2", numero="1234567891234510", mes="04", año="05", ccv="064")
]
personas = [
Usuario(dni="65068806N", nombre="Dale", apellido1="Valencia",
apellido2="Calicdan", f_nac="1993-04-02",
tarjetas=[tarjetas[0], tarjetas[1]], pedidos=[pedidos[0], pedidos[1]]),
Usuario(dni="65068807J", nombre="Josh", apellido1="Lopez",
apellido2="Victor", f_nac="1990-02-16",
tarjetas=[tarjetas[2], tarjetas[3]], pedidos=[pedidos[2], pedidos[3]])
]
for producto in productos:
producto.save()
for pedido in pedidos:
pedido.save()
for persona in personas:
persona.save()
insertar()
| dalevale/GIW2020-21 | practica8.py | practica8.py | py | 7,670 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "mongoengine.connect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mongoengine.Document",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "mongoengine.StringField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "mong... |
42296323474 | import threading
from typing import ContextManager, Optional
from liquidctl.driver.kraken3 import KrakenX3
from liquidctl.driver.hydro_platinum import HydroPlatinum
from .sensor import Sensor
from .log import LogManager
class AIODeviceSensor(Sensor, ContextManager):
is_valid: bool
device: Optional[any]
device_name: str
sensor_name: str
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
self.is_valid = False
self.current_temp = 0.0
# self.sensor_name = device_name
if not hasattr(self, "device"):
self.device = None
if self.device:
try:
self.device.connect()
self.is_valid = True
self.device.disconnect()
LogManager.logger.info(f"AIO device initialized {repr({'device': self.sensor_name})}")
except BaseException:
self.is_valid = False
LogManager.logger.exception(f"Error in initializing AIO device {repr({'device': self.sensor_name})}")
def __enter__(self):
if self.device:
return self
else:
return None
def __exit__(self, exc_type, exc_val, exc_tb):
if self.is_valid:
self.device.disconnect()
del self.device
self.is_valid = False
LogManager.logger.debug(f"AIO Device disconnected and reference removed {repr({'device': self.sensor_name})}")
return None
def get_temperature(self) -> float:
raise NotImplementedError()
def get_signature(self) -> list:
raise NotImplementedError()
def _safe_call_aio_function(self, function):
self._lock.acquire()
try:
self.device.connect()
result = function()
except Exception:
raise
finally:
self.device.disconnect()
self._lock.release()
return result
class KrakenX3Sensor(AIODeviceSensor):
def __init__(self, device: KrakenX3):
self.device = device
self.device_name = device.description
self.sensor_name = "Kraken X3"
super(KrakenX3Sensor, self).__init__()
def get_temperature(self) -> float:
self.current_temp = 0.0
if self.is_valid:
try:
ret = self._safe_call_aio_function(lambda: self.device._read())
part1 = int(ret[15])
part2 = int(ret[16])
if (0 <= part1 <= 100) and (0 <= part2 <= 90):
self.current_temp = float(part1) + float(part2 / 10)
LogManager.logger.trace(f"Getting sensor temperature {repr({'sensor': self.sensor_name, 'temperature': round(self.current_temp, 1)})}")
else:
LogManager.logger.warning(f"Invalid sensor data {repr({'sensor': self.sensor_name, 'part 1': part1, 'part 2': part2})}")
except BaseException:
LogManager.logger.exception(f"Unexpected error in getting sensor data {repr({'sensor': self.sensor_name})}")
return self.current_temp
def get_signature(self) -> list:
return [__class__.__name__, self.device_name, self.device.product_id, self.sensor_name]
class HydroPlatinumSensor(AIODeviceSensor):
# Details: https://github.com/liquidctl/liquidctl/blob/main/liquidctl/driver/hydro_platinum.py
def __init__(self, device: HydroPlatinum):
self.device = device
self.device_name = device.description
device_prefix = "Corsair Hydro "
self.sensor_name = self.device_name.split(device_prefix, 1)[1]
super(HydroPlatinumSensor, self).__init__()
def get_temperature(self) -> float:
self.current_temp = 0.0
if self.is_valid:
try:
ret = self._safe_call_aio_function(lambda: self.device._send_command(0b00, 0xff))
part1 = int(ret[8])
part2 = int(ret[7])
if (0 <= part1 <= 100) and (0 <= part2 <= 255):
self.current_temp = float(part1) + float(part2 / 255)
LogManager.logger.trace(f"Getting sensor temperature {repr({'sensor': self.sensor_name, 'temperature': round(self.current_temp, 1)})}")
else:
LogManager.logger.warning(f"Invalid sensor data {repr({'sensor': self.sensor_name, 'part 1': part1, 'part 2': part2})}")
except ValueError as verr:
LogManager.logger.error(f"Problem in getting sensor data {repr({'sensor': self.sensor_name, 'error': repr(verr)})}")
except BaseException:
LogManager.logger.exception(f"Unexpected error in getting sensor data {repr({'sensor': self.sensor_name})}")
return self.current_temp
def get_signature(self) -> list:
return [__class__.__name__, self.device_name, self.device.product_id, self.sensor_name]
| maclarsson/cfancontrol | cfancontrol/devicesensor.py | devicesensor.py | py | 4,944 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "sensor.Sensor",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.ContextManager",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "threading.Lock",
... |
69844963625 | import cv2
from tensorflow.keras.models import load_model
from keras_preprocessing import image
import numpy as np
import cv2
from pygame import mixer
import os
IMG_SIZE = 250
song_iter = 0
start_flag = False
next_flag = True
model = load_model('C:/Users/ANUJ/Desktop/gesture-recognition/model/gesture_model.h5')
# Label
labels = ["unpause","none","pause","next","start","stop"]
#Starting the mixer
mixer.init()
#list of songs
songs = os.listdir("../songs")
# Setting the volume
mixer.music.set_volume(0.3)
def rescale_frame(frame, percent=150):
width = int(frame.shape[1] * percent/ 100)
height = int(frame.shape[0] * percent/ 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
def play(iter):
index = int(song_iter % len(songs))
print(index)
# Loading the song
mixer.music.load("C:/Users/ANUJ/Desktop/gesture-recognition/songs/" + songs[index])
mixer.music.play()
cap = cv2.VideoCapture(0)
while(True):
_, init_frame = cap.read()
frame = rescale_frame(init_frame)
# Drawing the ROI
# The increment/decrement by 1 is to compensate for the bounding box
cv2.rectangle(frame,(100,100),(550,550),(0,255,0),2)
# Extracting the ROI
roi = frame[100:550,100:550]
# Resizing the ROI so it can be fed to the model for prediction
roi = cv2.resize(roi, (IMG_SIZE, IMG_SIZE))
roi_grey = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
#image_grey_final = cv2.threshold(roi_grey,120,255,cv2.THRESH_BINARY)[1]
image_grey_final = cv2.adaptiveThreshold(roi_grey,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,115,1)
cv2.imshow('threshold',image_grey_final)
image_array = image.img_to_array(image_grey_final)
image_array = image_array.reshape((1,250,250,1))
image_array = image_array/255.
# Batch of 1
prediction = model.predict(image_array)
result = labels[np.argmax(prediction)]
if result == 'start' and start_flag==False:
mixer.music.load("C:/Users/ANUJ/Desktop/gesture-recognition/songs/" + songs[0])
mixer.music.play()
start_flag = True
elif result == 'pause':
mixer.music.pause()
elif result == 'unpause':
mixer.music.unpause()
elif result == 'next' and next_flag == False:
song_iter = song_iter + 1
play(song_iter)
next_flag = True
elif result == 'stop':
mixer.music.stop()
next_flag = False
# Displaying the predictions
cv2.putText(frame, result, (10, 120), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,255), 3)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| anuj1501/Gesture-music-controller | src_files/tester.py | tester.py | py | 2,854 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "os.... |
74205980903 | from . import utils
import torch
from torch import nn, autograd
import torch.nn.functional as F
from typing import Union, List, Optional, Tuple
from dataclasses import dataclass
from warnings import warn
from functools import reduce
from tqdm import tqdm
import numpy as np
import svox2.csrc as _C
# _C = utils._get_c_extension()
@dataclass
class RenderOptions:
"""
Rendering options, see comments
"""
empty_space_brightness: float = 1.0 # [0, 1], the background color black-white
step_size: float = 0.5 # Step size, in normalized voxels (not used for svox1)
sigma_thresh: float = 1e-10 # Voxels with sigmas < this are ignored, in [0, 1]
stop_thresh: float = 1e-7 # Stops rendering if the remaining light intensity/termination, in [0, 1]
last_sample_opaque: bool = False # Make the last sample opaque (for forward-facing)
near_clip: float = 0.0
use_spheric_clip: bool = False
def _to_cpp(self):
"""
Generate object to pass to C++
"""
opt = _C.RenderOptions()
opt.empty_space_brightness = self.empty_space_brightness
opt.step_size = self.step_size
opt.sigma_thresh = self.sigma_thresh
opt.stop_thresh = self.stop_thresh
opt.near_clip = self.near_clip
opt.use_spheric_clip = self.use_spheric_clip
opt.last_sample_opaque = self.last_sample_opaque
return opt
@dataclass
class Rays:
origins: torch.Tensor
dirs: torch.Tensor
def _to_cpp(self):
"""
Generate object to pass to C++
"""
spec = _C.RaysSpec()
spec.origins = self.origins
spec.dirs = self.dirs
return spec
def __getitem__(self, key):
return Rays(self.origins[key], self.dirs[key])
@property
def is_cuda(self) -> bool:
return self.origins.is_cuda and self.dirs.is_cuda
@dataclass
class Camera:
c2w: torch.Tensor # OpenCV
fx: float = 1111.11
fy: Optional[float] = None
cx: Optional[float] = None
cy: Optional[float] = None
width: int = 800
height: int = 800
@property
def fx_val(self):
return self.fx
@property
def fy_val(self):
return self.fx if self.fy is None else self.fy
@property
def cx_val(self):
return self.width * 0.5 if self.cx is None else self.cx
@property
def cy_val(self):
return self.height * 0.5 if self.cy is None else self.cy
def _to_cpp(self):
"""
Generate object to pass to C++
"""
spec = _C.CameraSpec()
spec.c2w = self.c2w
spec.fx = self.fx_val
spec.fy = self.fy_val
spec.cx = self.cx_val
spec.cy = self.cy_val
spec.width = self.width
spec.height = self.height
return spec
@property
def is_cuda(self) -> bool:
return self.c2w.is_cuda
def gen_rays(self) -> Rays:
"""
Generate the rays for this camera
:return: (origins (H*W, 3), dirs (H*W, 3))
"""
origins = self.c2w[None, :3, 3].expand(self.height * self.width, -1).contiguous()
yy, xx = torch.meshgrid(
torch.arange(self.height, dtype=torch.float64, device=self.c2w.device) + 0.5,
torch.arange(self.width, dtype=torch.float64, device=self.c2w.device) + 0.5,
)
xx = (xx - self.cx_val) / self.fx_val
yy = (yy - self.cy_val) / self.fy_val
zz = torch.ones_like(xx)
dirs = torch.stack((xx, yy, zz), dim=-1) # OpenCV
del xx, yy, zz
dirs /= torch.norm(dirs, dim=-1, keepdim=True)
dirs = dirs.reshape(-1, 3, 1)
dirs = (self.c2w[None, :3, :3].double() @ dirs)[..., 0]
dirs = dirs.reshape(-1, 3).float()
return Rays(origins, dirs)
def gen_rays_(self):
"""
Generate the rays for this camera
:return: (origins (H*W, 3), dirs (H*W, 3))
"""
origins = self.c2w[None, :3, 3].expand(self.height * self.width, -1).contiguous()
yy, xx = torch.meshgrid(
torch.arange(self.height, dtype=torch.float64, device=self.c2w.device) + 0.5,
torch.arange(self.width, dtype=torch.float64, device=self.c2w.device) + 0.5,
)
xx = (xx - self.cx_val) / self.fx_val
yy = (yy - self.cy_val) / self.fy_val
zz = torch.ones_like(xx)
dirs = torch.stack((xx, yy, zz), dim=-1) # OpenCV
del xx, yy, zz
dirs /= torch.norm(dirs, dim=-1, keepdim=True)
dirs = dirs.reshape(-1, 3, 1)
dirs = (self.c2w[None, :3, :3].double() @ dirs)[..., 0]
dirs = dirs.reshape(-1, 3).float()
return origins, dirs
def create_rays_with_index(self, index):
origins = self.c2w[None, :3, 3].expand(len(index), -1).contiguous()
yy = torch.floor_divide(index, self.width)
xx = index - yy*self.width
xx = (xx + 0.5 - self.cx_val) / self.fx_val
yy = (yy + 0.5 - self.cy_val) / self.fy_val
zz = torch.ones_like(xx)
dirs = torch.stack((xx, yy, zz), dim=-1).double()
del xx, yy, zz
dirs /= torch.norm(dirs, dim=-1, keepdim=True)
dirs = dirs.reshape(-1, 3, 1)
dirs = (self.c2w[None, :3, :3].double() @ dirs)[..., 0]
dirs = dirs.reshape(-1, 3).float()
return origins, dirs
@dataclass
class qCamera:
qtensor: torch.Tensor # OpenCV
fx: float = 1111.11
fy: Optional[float] = None
cx: Optional[float] = None
cy: Optional[float] = None
width: int = 800
height: int = 800
@property
def fx_val(self):
return self.fx
@property
def fy_val(self):
return self.fx if self.fy is None else self.fy
@property
def cx_val(self):
return self.width * 0.5 if self.cx is None else self.cx
@property
def cy_val(self):
return self.height * 0.5 if self.cy is None else self.cy
def create_rays_with_index(self, index):
origins = self.c2w[None, :3, 3].expand(len(index), -1).contiguous()
yy = torch.floor_divide(index, self.width)
xx = index - yy*self.width
xx = (xx + 0.5 - self.cx_val) / self.fx_val
yy = (yy + 0.5 - self.cy_val) / self.fy_val
zz = torch.ones_like(xx)
dirs = torch.stack((xx, yy, zz), dim=-1).double()
del xx, yy, zz
dirs /= torch.norm(dirs, dim=-1, keepdim=True)
dirs = dirs.reshape(-1, 3, 1)
dirs = (self.c2w[None, :3, :3].double() @ dirs)[..., 0]
dirs = dirs.reshape(-1, 3).float()
return origins, dirs
# BEGIN Differentiable CUDA functions with custom gradient
class _SampleGridAutogradFunction(autograd.Function):
@staticmethod
def forward(
ctx,
data_density: torch.Tensor,
data_sh: torch.Tensor,
grid,
points: torch.Tensor,
want_colors: bool,
):
assert not points.requires_grad, "Point gradient not supported"
out_density, out_sh = _C.sample_grid(grid, points, want_colors)
ctx.save_for_backward(points)
ctx.grid = grid
ctx.want_colors = want_colors
return out_density, out_sh
@staticmethod
def backward(ctx, grad_out_density, grad_out_sh):
(points,) = ctx.saved_tensors
grad_density_grid = torch.zeros_like(ctx.grid.density_data.data)
grad_sh_grid = torch.zeros_like(ctx.grid.sh_data.data)
_C.sample_grid_backward(
ctx.grid,
points,
grad_out_density.contiguous(),
grad_out_sh.contiguous(),
grad_density_grid,
grad_sh_grid,
ctx.want_colors
)
if not ctx.needs_input_grad[0]:
grad_density_grid = None
if not ctx.needs_input_grad[1]:
grad_sh_grid = None
return grad_density_grid, grad_sh_grid, None, None, None
class _VolumeRenderFunction(autograd.Function):
@staticmethod
def forward(
ctx,
data_density: torch.Tensor,
data_sh: torch.Tensor,
grid,
rays,
opt,
):
cu_fn = _C.__dict__[f"volume_render_cuvol"]
color = cu_fn(grid, rays, opt)
ctx.save_for_backward(color)
ctx.grid = grid
ctx.rays = rays
ctx.opt = opt
return color
@staticmethod
def backward(ctx, grad_out):
(color_cache,) = ctx.saved_tensors
cu_fn = _C.__dict__[f"volume_render_cuvol_backward"]
grad_density_grid = torch.zeros_like(ctx.grid.density_data.data)
grad_sh_grid = torch.zeros_like(ctx.grid.sh_data.data)
grad_holder = _C.GridOutputGrads()
grad_holder.grad_density_out = grad_density_grid
grad_holder.grad_sh_out = grad_sh_grid
cu_fn(
ctx.grid,
ctx.rays,
ctx.opt,
grad_out.contiguous(),
color_cache,
grad_holder
)
ctx.grid = ctx.rays = ctx.opt = None
if not ctx.needs_input_grad[0]:
grad_density_grid = None
if not ctx.needs_input_grad[1]:
grad_sh_grid = None
return grad_density_grid, grad_sh_grid, None, None, None
class _RenderFunctionPoseRGB(autograd.Function):
@staticmethod
def forward(
ctx,
origins: torch.Tensor,
dirs: torch.Tensor,
grid,
opt
):
color = torch.empty((origins.size(0),3),
dtype=torch.float32,
device=origins.device)
rays = Rays(origins, dirs)._to_cpp()
log_trans = _C.render_pos_forward(grid, rays, opt, color)
ctx.save_for_backward(color)
ctx.origins = origins
ctx.dirs = dirs
ctx.grid = grid
ctx.opt = opt
ctx.rays = rays
return color
@staticmethod
def backward(ctx, grad_color):
(color_cache, ) = ctx.saved_tensors
grad_origin = torch.zeros_like(ctx.origins.data)
grad_dir = torch.zeros_like(ctx.dirs.data)
grad_holder = _C.PoseGrads()
grad_holder.grad_pose_origin_out = grad_origin
grad_holder.grad_pose_direction_out = grad_dir
_C.render_pos_backward(
ctx.grid,
ctx.rays,
ctx.opt,
grad_color.contiguous(),
color_cache,
grad_holder)
ctx.grid = ctx.opt = None
torch.cuda.synchronize()
return grad_origin, grad_dir, None, None, None
class _RenderFunctionPoseRGBD(autograd.Function):
@staticmethod
def forward(
ctx,
origins: torch.Tensor,
dirs: torch.Tensor,
grid,
opt
):
color = torch.empty((origins.size(0),3),
dtype=torch.float32,
device=origins.device)
depth = torch.empty((origins.size(0),),
dtype=torch.float32,
device=origins.device)
rays = Rays(origins, dirs)._to_cpp()
log_trans = _C.render_pos_forward_rgbd(grid, rays, opt, color, depth)
ctx.save_for_backward(color, depth)
ctx.origins = origins
ctx.dirs = dirs
ctx.grid = grid
ctx.opt = opt
ctx.rays = rays
return color, depth
@staticmethod
def backward(ctx, grad_color, grad_depth):
color_cache, depth_cache = ctx.saved_tensors
grad_origin = torch.zeros_like(ctx.origins.data)
grad_dir = torch.zeros_like(ctx.dirs.data)
grad_holder = _C.PoseGrads()
grad_holder.grad_pose_origin_out = grad_origin
grad_holder.grad_pose_direction_out = grad_dir
_C.render_pos_backward_rgbd(
ctx.grid,
ctx.rays,
ctx.opt,
grad_color.contiguous(),
grad_depth.contiguous(),
color_cache,
depth_cache,
grad_holder)
ctx.grid = ctx.opt = None
torch.cuda.synchronize()
return grad_origin, grad_dir, None, None, None
class _TotalVariationFunction(autograd.Function):
@staticmethod
def forward(
ctx,
data: torch.Tensor,
links: torch.Tensor,
start_dim: int,
end_dim: int,
use_logalpha: bool,
logalpha_delta: float,
ignore_edge: bool,
):
tv = _C.tv(links, data, start_dim, end_dim, use_logalpha, logalpha_delta, ignore_edge)
ctx.save_for_backward(links, data)
ctx.start_dim = start_dim
ctx.end_dim = end_dim
ctx.use_logalpha = use_logalpha
ctx.logalpha_delta = logalpha_delta
ctx.ignore_edge = ignore_edge
return tv
@staticmethod
def backward(ctx, grad_out):
links, data = ctx.saved_tensors
grad_grid = torch.zeros_like(data)
_C.tv_grad(links, data, ctx.start_dim, ctx.end_dim, 1.0,
ctx.use_logalpha, ctx.logalpha_delta,
ctx.ignore_edge,
grad_grid)
ctx.start_dim = ctx.end_dim = None
if not ctx.needs_input_grad[0]:
grad_grid = None
return grad_grid, None, None, None,\
None, None, None, None
# END Differentiable CUDA functions with custom gradient
class SparseGrid(nn.Module):
"""
Main sparse grid data structure.
initially it will be a dense grid of resolution <reso>.
Only float32 is supported.
:param reso: int or List[int, int, int], resolution for resampled grid, as in the constructor
:param radius: float or List[float, float, float], the 1/2 side length of the grid, optionally in each direction
:param center: float or List[float, float, float], the center of the grid
:param sh_dim: int, number of SH components (must be square number)
:param use_z_order: bool, if true, stores the data initially in a Z-order curve if possible
:param device: torch.device, device to store the grid
"""
def __init__(
self,
reso: Union[int, List[int], Tuple[int, int, int]] = 128,
radius: Union[float, List[float]] = 1.0,
center: Union[float, List[float]] = [0.0, 0.0, 0.0],
sh_dim: int = 9, # SH number, square number
use_z_order : bool=False,
use_sphere_bound : bool=False,
mlp_width : int = 16,
device: Union[torch.device, str] = "cpu",
):
super().__init__()
assert (
sh_dim >= 1 and sh_dim <= utils.MAX_SH_BASIS
), f"sh_dim 1-{utils.MAX_SH_BASIS} supported"
self.sh_dim = sh_dim
if isinstance(reso, int):
reso = [reso] * 3
else:
assert (
len(reso) == 3
), "reso must be an integer or indexable object of 3 ints"
if use_z_order and not (reso[0] == reso[1] and reso[0] == reso[2] and utils.is_pow2(reso[0])):
print("Morton code requires a cube grid of power-of-2 size, ignoring...")
use_z_order = False
if isinstance(radius, float) or isinstance(radius, int):
radius = [radius] * 3
if isinstance(radius, torch.Tensor):
radius = radius.to(device="cpu", dtype=torch.float32)
else:
radius = torch.tensor(radius, dtype=torch.float32, device="cpu")
if isinstance(center, torch.Tensor):
center = center.to(device="cpu", dtype=torch.float32)
else:
center = torch.tensor(center, dtype=torch.float32, device="cpu")
self.radius: torch.Tensor = radius # CPU
self.center: torch.Tensor = center # CPU
self._offset = 0.5 * (1.0 - self.center / self.radius)
self._scaling = 0.5 / self.radius
n3: int = reduce(lambda x, y: x * y, reso)
if use_z_order:
init_links = utils.gen_morton(reso[0], device=device, dtype=torch.int32).flatten()
else:
init_links = torch.arange(n3, device=device, dtype=torch.int32)
if use_sphere_bound:
X = torch.arange(reso[0], dtype=torch.float32, device=device) - 0.5
Y = torch.arange(reso[1], dtype=torch.float32, device=device) - 0.5
Z = torch.arange(reso[2], dtype=torch.float32, device=device) - 0.5
X, Y, Z = torch.meshgrid(X, Y, Z)
points = torch.stack((X, Y, Z), dim=-1).view(-1, 3)
gsz = torch.tensor(reso)
roffset = 1.0 / gsz - 1.0
rscaling = 2.0 / gsz
points = torch.addcmul(
roffset.to(device=points.device),
points,
rscaling.to(device=points.device),
)
norms = points.norm(dim=-1)
mask = norms <= 1.0 + (3 ** 0.5) / gsz.max()
self.capacity: int = mask.sum()
data_mask = torch.zeros(n3, dtype=torch.int32, device=device)
idxs = init_links[mask].long()
data_mask[idxs] = 1
data_mask = torch.cumsum(data_mask, dim=0) - 1
init_links[mask] = data_mask[idxs].int()
init_links[~mask] = -1
else:
self.capacity = n3
self.density_data = nn.Parameter(
torch.zeros(self.capacity, 1, dtype=torch.float32, device=device)
)
self.sh_data = nn.Parameter(
torch.zeros(
self.capacity, self.sh_dim * 3, dtype=torch.float32, device=device
)
)
self.register_buffer("links", init_links.view(reso))
self.links: torch.Tensor
self.opt = RenderOptions()
self.sparse_grad_indexer: Optional[torch.Tensor] = None
self.sparse_sh_grad_indexer: Optional[torch.Tensor] = None
self.density_rms: Optional[torch.Tensor] = None
self.sh_rms: Optional[torch.Tensor] = None
if self.links.is_cuda and use_sphere_bound:
self.accelerate()
@property
def data_dim(self):
"""
Get the number of channels in the data, including color + density
(similar to svox 1)
"""
return self.sh_data.size(1) + 1
@property
def shape(self):
return list(self.links.shape) + [self.data_dim]
def _fetch_links(self, links):
results_sigma = torch.zeros(
(links.size(0), 1), device=links.device, dtype=torch.float32
)
results_sh = torch.zeros(
(links.size(0), self.sh_data.size(1)),
device=links.device,
dtype=torch.float32,
)
mask = links >= 0
idxs = links[mask].long()
results_sigma[mask] = self.density_data[idxs]
results_sh[mask] = self.sh_data[idxs]
return results_sigma, results_sh
def _fetch_links_sigma(self, links):
results_sigma = torch.zeros(
(links.size(0), 1), device=links.device, dtype=torch.float32
)
mask = links >= 0
idxs = links[mask].long()
results_sigma[mask] = self.density_data[idxs]
return results_sigma
def sample(self, points: torch.Tensor,
use_kernel: bool = True,
grid_coords: bool = False,
want_colors: bool = True):
"""
Grid sampling with trilinear interpolation.
Behaves like torch.nn.functional.grid_sample
with padding mode border and align_corners=False (better for multi-resolution).
Any voxel with link < 0 (empty) is considered to have 0 values in all channels
prior to interpolating.
:param points: torch.Tensor, (N, 3)
:param use_kernel: bool, if false uses pure PyTorch version even if on CUDA.
:param grid_coords: bool, if true then uses grid coordinates ([-0.5, reso[i]-0.5 ] in each dimension);
more numerically exact for resampling
:param want_colors: bool, if true (default) returns density and colors,
else returns density and a dummy tensor to be ignored
(much faster)
:return: (density, color)
"""
if use_kernel and self.links.is_cuda and _C is not None:
assert points.is_cuda
return _SampleGridAutogradFunction.apply(
self.density_data, self.sh_data, self._to_cpp(grid_coords=grid_coords), points, want_colors
)
else:
if not grid_coords:
points = self.world2grid(points)
points.clamp_min_(0.0)
for i in range(3):
points[:, i].clamp_max_(self.links.size(i) - 1)
l = points.to(torch.long)
for i in range(3):
l[:, i].clamp_max_(self.links.size(i) - 2)
wb = points - l
wa = 1.0 - wb
lx, ly, lz = l.unbind(-1)
links000 = self.links[lx, ly, lz]
links001 = self.links[lx, ly, lz + 1]
links010 = self.links[lx, ly + 1, lz]
links011 = self.links[lx, ly + 1, lz + 1]
links100 = self.links[lx + 1, ly, lz]
links101 = self.links[lx + 1, ly, lz + 1]
links110 = self.links[lx + 1, ly + 1, lz]
links111 = self.links[lx + 1, ly + 1, lz + 1]
sigma000, rgb000 = self._fetch_links(links000)
sigma001, rgb001 = self._fetch_links(links001)
sigma010, rgb010 = self._fetch_links(links010)
sigma011, rgb011 = self._fetch_links(links011)
sigma100, rgb100 = self._fetch_links(links100)
sigma101, rgb101 = self._fetch_links(links101)
sigma110, rgb110 = self._fetch_links(links110)
sigma111, rgb111 = self._fetch_links(links111)
c00 = sigma000 * wa[:, 2:] + sigma001 * wb[:, 2:]
c01 = sigma010 * wa[:, 2:] + sigma011 * wb[:, 2:]
c10 = sigma100 * wa[:, 2:] + sigma101 * wb[:, 2:]
c11 = sigma110 * wa[:, 2:] + sigma111 * wb[:, 2:]
c0 = c00 * wa[:, 1:2] + c01 * wb[:, 1:2]
c1 = c10 * wa[:, 1:2] + c11 * wb[:, 1:2]
samples_sigma = c0 * wa[:, :1] + c1 * wb[:, :1]
if want_colors:
c00 = rgb000 * wa[:, 2:] + rgb001 * wb[:, 2:]
c01 = rgb010 * wa[:, 2:] + rgb011 * wb[:, 2:]
c10 = rgb100 * wa[:, 2:] + rgb101 * wb[:, 2:]
c11 = rgb110 * wa[:, 2:] + rgb111 * wb[:, 2:]
c0 = c00 * wa[:, 1:2] + c01 * wb[:, 1:2]
c1 = c10 * wa[:, 1:2] + c11 * wb[:, 1:2]
samples_rgb = c0 * wa[:, :1] + c1 * wb[:, :1]
else:
samples_rgb = torch.empty_like(self.sh_data[:0])
return samples_sigma, samples_rgb
def forward(self, points: torch.Tensor, use_kernel: bool = True):
return self.sample(points, use_kernel=use_kernel)
def _volume_render_rgbd_pytorch(self, rays: Rays):
"""
trilerp gradcheck version of RGBD
"""
self.sh_data.requires_grad_(False)
self.density_data.requires_grad_(False)
origins = self.world2grid(rays.origins)
dirs = rays.dirs / torch.norm(rays.dirs, dim=-1, keepdim=True)
viewdirs = dirs
B = dirs.size(0)
assert origins.size(0) == B
gsz = self._grid_size()
dirs = dirs * (self._scaling * gsz).to(device=dirs.device)
delta_scale = 1.0 / dirs.norm(dim=1)
dirs = dirs * delta_scale.unsqueeze(-1)
sh_mult = utils.eval_sh_bases(self.sh_dim, viewdirs)
invdirs = 1.0 / dirs
gsz_cu = gsz.to(device=dirs.device)
t1 = (-0.5 - origins) * invdirs
t2 = (gsz_cu - 0.5 - origins) * invdirs
t = torch.min(t1, t2)
t[dirs == 0] = -1e9
t = torch.max(t, dim=-1).values.clamp_min_(self.opt.near_clip)
tmax = torch.max(t1, t2)
tmax[dirs == 0] = 1e9
tmax = torch.min(tmax, dim=-1).values
log_light_intensity = torch.zeros(B, device=origins.device)
out_rgb = torch.zeros((B, 3), device=origins.device)
out_depth = torch.zeros((B, ), device=origins.device)
good_indices = torch.arange(B, device=origins.device)
mask = t <= tmax
good_indices = good_indices[mask]
origins = origins[mask]
dirs = dirs[mask]
# invdirs = invdirs[mask]
del invdirs
t = t[mask]
sh_mult = sh_mult[mask]
tmax = tmax[mask]
while good_indices.numel() > 0:
pos = origins + t[:, None] * dirs
pos = pos.clamp_min_(0.0)
pos[:, 0] = torch.clamp_max(pos[:, 0].clone(), gsz_cu[0] - 1)
pos[:, 1] = torch.clamp_max(pos[:, 1].clone(), gsz_cu[1] - 1)
pos[:, 2] = torch.clamp_max(pos[:, 2].clone(), gsz_cu[2] - 1)
# print('pym', pos, log_light_intensity)
l = pos.to(torch.long)
l.clamp_min_(0)
l[:, 0] = torch.clamp_max(l[:, 0].clone(), gsz_cu[0] - 2)
l[:, 1] = torch.clamp_max(l[:, 1].clone(), gsz_cu[1] - 2)
l[:, 2] = torch.clamp_max(l[:, 2].clone(), gsz_cu[2] - 2)
pos -= l
# BEGIN CRAZY TRILERP
lx, ly, lz = l.unbind(-1)
links000 = self.links[lx, ly, lz]
links001 = self.links[lx, ly, lz + 1]
links010 = self.links[lx, ly + 1, lz]
links011 = self.links[lx, ly + 1, lz + 1]
links100 = self.links[lx + 1, ly, lz]
links101 = self.links[lx + 1, ly, lz + 1]
links110 = self.links[lx + 1, ly + 1, lz]
links111 = self.links[lx + 1, ly + 1, lz + 1]
sigma000, rgb000 = self._fetch_links(links000)
sigma001, rgb001 = self._fetch_links(links001)
sigma010, rgb010 = self._fetch_links(links010)
sigma011, rgb011 = self._fetch_links(links011)
sigma100, rgb100 = self._fetch_links(links100)
sigma101, rgb101 = self._fetch_links(links101)
sigma110, rgb110 = self._fetch_links(links110)
sigma111, rgb111 = self._fetch_links(links111)
wa, wb = 1.0 - pos, pos
c00 = sigma000 * wa[:, 2:] + sigma001 * wb[:, 2:]
c01 = sigma010 * wa[:, 2:] + sigma011 * wb[:, 2:]
c10 = sigma100 * wa[:, 2:] + sigma101 * wb[:, 2:]
c11 = sigma110 * wa[:, 2:] + sigma111 * wb[:, 2:]
c0 = c00 * wa[:, 1:2] + c01 * wb[:, 1:2]
c1 = c10 * wa[:, 1:2] + c11 * wb[:, 1:2]
sigma = c0 * wa[:, :1] + c1 * wb[:, :1]
c00 = rgb000 * wa[:, 2:] + rgb001 * wb[:, 2:]
c01 = rgb010 * wa[:, 2:] + rgb011 * wb[:, 2:]
c10 = rgb100 * wa[:, 2:] + rgb101 * wb[:, 2:]
c11 = rgb110 * wa[:, 2:] + rgb111 * wb[:, 2:]
c0 = c00 * wa[:, 1:2] + c01 * wb[:, 1:2]
c1 = c10 * wa[:, 1:2] + c11 * wb[:, 1:2]
rgb = c0 * wa[:, :1] + c1 * wb[:, :1]
# END CRAZY TRILERP
log_att = (
-self.opt.step_size
* torch.relu(sigma[..., 0])
* delta_scale[good_indices]
)
weight = torch.exp(log_light_intensity[good_indices]) * (
1.0 - torch.exp(log_att)
)
# [B', 3, n_sh_coeffs]
rgb_sh = rgb.reshape(-1, 3, self.sh_dim)
rgb = torch.clamp_min(
torch.sum(sh_mult.unsqueeze(-2) * rgb_sh, dim=-1) + 0.5,
0.0,
) # [B', 3]
rgb = weight[:, None] * rgb[:, :3].clone()
depth = weight * t * delta_scale[good_indices]
out_rgb[good_indices] = out_rgb[good_indices] + rgb
out_depth[good_indices] = out_depth[good_indices] + depth
log_light_intensity[good_indices] = log_light_intensity[good_indices] + log_att
t = t + self.opt.step_size
mask = t <= tmax
good_indices = good_indices[mask]
origins = origins[mask].clone()
dirs = dirs[mask].clone()
t = t[mask]
sh_mult = sh_mult[mask]
tmax = tmax[mask]
if self.opt.empty_space_brightness:
out_rgb = out_rgb + (
torch.exp(log_light_intensity).unsqueeze(-1)
* self.opt.empty_space_brightness
)
return out_rgb, out_depth
def _volume_render_depth_pytorch(self, rays: Rays):
"""
trilerp gradcheck of DEPTH
"""
self.density_data.requires_grad_(False)
origins = self.world2grid(rays.origins)
B = origins.size(0)
gsz = self._grid_size()
delta_scale = 1./((self._scaling * gsz).to(device=rays.dirs.device))
dirs = rays.dirs
invdirs = 1.0 / dirs.clone().detach()
gsz_cu = gsz.to(device=dirs.device)
t1 = (-0.5 - origins) * invdirs
t2 = (gsz_cu - 0.5 - origins) * invdirs
t = torch.min(t1, t2)
t[dirs == 0] = -1e9
t = torch.max(t, dim=-1).values.clamp_min_(self.opt.near_clip)
tmax = torch.max(t1, t2)
tmax[dirs == 0] = 1e9
tmax = torch.min(tmax, dim=-1).values
log_light_intensity = torch.zeros(B, device=origins.device)
out_depth = torch.zeros((B, ), device=origins.device)
good_indices = torch.arange(B, device=origins.device)
del invdirs
mask = t <= tmax
good_indices = good_indices[mask]
origins = origins[mask]
dirs = dirs[mask]
t = t[mask]
tmax = tmax[mask]
del t1, t2
while good_indices.numel() > 0:
pos = origins + t[:, None] * dirs
pos = pos.clamp_min_(0.0)
pos[:, 0] = torch.clamp_max(pos[:, 0].clone(), gsz_cu[0] - 1)
pos[:, 1] = torch.clamp_max(pos[:, 1].clone(), gsz_cu[1] - 1)
pos[:, 2] = torch.clamp_max(pos[:, 2].clone(), gsz_cu[2] - 1)
l = pos.to(torch.long)
l.clamp_min_(0)
l[:, 0] = torch.clamp_max(l[:, 0].clone(), gsz_cu[0] - 2)
l[:, 1] = torch.clamp_max(l[:, 1].clone(), gsz_cu[1] - 2)
l[:, 2] = torch.clamp_max(l[:, 2].clone(), gsz_cu[2] - 2)
pos -= l
# BEGIN CRAZY TRILERP
lx, ly, lz = l.unbind(-1)
del l
links000 = self.links[lx, ly, lz]
links001 = self.links[lx, ly, lz + 1]
links010 = self.links[lx, ly + 1, lz]
links011 = self.links[lx, ly + 1, lz + 1]
links100 = self.links[lx + 1, ly, lz]
links101 = self.links[lx + 1, ly, lz + 1]
links110 = self.links[lx + 1, ly + 1, lz]
links111 = self.links[lx + 1, ly + 1, lz + 1]
sigma000 = self._fetch_links_sigma(links000)
sigma001 = self._fetch_links_sigma(links001)
sigma010 = self._fetch_links_sigma(links010)
sigma011 = self._fetch_links_sigma(links011)
sigma100 = self._fetch_links_sigma(links100)
sigma101 = self._fetch_links_sigma(links101)
sigma110 = self._fetch_links_sigma(links110)
sigma111 = self._fetch_links_sigma(links111)
wa, wb = 1.0 - pos, pos
c00 = sigma000 * wa[:, 2:] + sigma001 * wb[:, 2:]
c01 = sigma010 * wa[:, 2:] + sigma011 * wb[:, 2:]
c10 = sigma100 * wa[:, 2:] + sigma101 * wb[:, 2:]
c11 = sigma110 * wa[:, 2:] + sigma111 * wb[:, 2:]
c0 = c00 * wa[:, 1:2] + c01 * wb[:, 1:2]
c1 = c10 * wa[:, 1:2] + c11 * wb[:, 1:2]
sigma = c0 * wa[:, :1] + c1 * wb[:, :1]
# END CRAZY TRILERP
log_att = (
-self.opt.step_size
* torch.relu(sigma[..., 0])
* delta_scale[good_indices]
)
weight = torch.exp(log_light_intensity[good_indices]) * (
1.0 - torch.exp(log_att)
)
depth = weight * t * delta_scale[good_indices]
out_depth[good_indices] = out_depth[good_indices] + depth
log_light_intensity[good_indices] = log_light_intensity[good_indices] + log_att
t = t + self.opt.step_size
mask = t <= tmax
good_indices = good_indices[mask].clone()
origins = origins[mask].clone()
dirs = dirs[mask].clone()
t = t[mask]
tmax = tmax[mask]
return out_depth
def volume_render(
self, rays: Rays, use_kernel: bool = True,
return_raylen: bool=False
):
"""
Standard volume rendering. See grid.opt.* (RenderOptions) for configs.
:param rays: Rays, (origins (N, 3), dirs (N, 3))
:param use_kernel: bool, if false uses pure PyTorch version even if on CUDA.
:param return_raylen: bool, if true then only returns the length of the
ray-cube intersection and quits
:return: (N, 3), predicted RGB
"""
if use_kernel and self.links.is_cuda and _C is not None and not return_raylen:
assert rays.is_cuda
return _VolumeRenderFunction.apply(
self.density_data,
self.sh_data,
self._to_cpp(),
rays._to_cpp(),
self.opt._to_cpp(),
)
else:
warn("Using slow volume rendering, should only be used for debugging")
return self._volume_render_rgbd_pytorch(rays)
def volume_render_fused(
self,
rays: Rays,
rgb_gt: torch.Tensor,
beta_loss: float = 0.0,
sparsity_loss: float = 0.0
):
"""
Standard volume rendering with fused MSE gradient generation,
given a ground truth color for each pixel.
Will update the *.grad tensors for each parameter
You can then subtract the grad manually or use the optim_*_step methods
See grid.opt.* (RenderOptions) for configs.
:param rays: Rays, (origins (N, 3), dirs (N, 3))
:param rgb_gt: (N, 3), GT pixel colors, each channel in [0, 1]
:param beta_loss: float, weighting for beta loss to add to the gradient.
(fused into the backward pass).
This is average voer the rays in the batch.
Beta loss also from neural volumes:
[Lombardi et al., ToG 2019]
:return: (N, 3), predicted RGB
"""
assert (
_C is not None and self.sh_data.is_cuda
), "CUDA extension is currently required for fused"
assert rays.is_cuda
grad_density, grad_sh = self._get_data_grads()
rgb_out = torch.zeros_like(rgb_gt)
self.sparse_grad_indexer = torch.zeros((self.density_data.size(0),),
dtype=torch.bool, device=self.density_data.device)
grad_holder = _C.GridOutputGrads()
grad_holder.grad_density_out = grad_density
grad_holder.grad_sh_out = grad_sh
grad_holder.mask_out = self.sparse_grad_indexer
cu_fn = _C.__dict__[f"volume_render_cuvol_fused"]
# with utils.Timing("actual_render"):
cu_fn(
self._to_cpp(),
rays._to_cpp(),
self.opt._to_cpp(),
rgb_gt,
beta_loss,
sparsity_loss,
rgb_out,
grad_holder
)
self.sparse_sh_grad_indexer = self.sparse_grad_indexer.clone()
return rgb_out
def volume_render_fused_rgbd(
self,
rays: Rays,
rgb_gt: torch.Tensor,
depth_gt: torch.Tensor = None,
beta_loss: float = 0.0,
sparsity_loss: float = 0.0,
d_loss: float = 0.0,
):
assert rays.is_cuda
grad_density, grad_sh = self._get_data_grads()
self.sparse_grad_indexer = torch.zeros((self.density_data.size(0),),
dtype=torch.bool, device=self.density_data.device)
grad_holder = _C.GridOutputGrads()
grad_holder.grad_density_out = grad_density
grad_holder.grad_sh_out = grad_sh
grad_holder.mask_out = self.sparse_grad_indexer
rgb_out = torch.zeros_like(rgb_gt)
depth_out = torch.zeros_like(depth_gt)
_C.volume_render_fused_rgbd(
self._to_cpp(),
rays._to_cpp(),
self.opt._to_cpp(),
rgb_gt,
depth_gt,
beta_loss,
sparsity_loss,
d_loss,
rgb_out,
depth_out,
grad_holder)
self.sparse_intensity_grad_indexer = self.sparse_grad_indexer.clone()
return rgb_out, depth_out
def volume_render_image(
self, camera: Camera, use_kernel: bool = True,
batch_size : int = 5000,
return_raylen: bool=False
):
"""
Standard volume rendering (entire image version).
See grid.opt.* (RenderOptions) for configs.
:param camera: Camera
:param use_kernel: bool, if false uses pure PyTorch version even if on CUDA.
:return: (H, W, 3), predicted RGB image
"""
imrend_fn_name = f"volume_render_cuvol_image"
if imrend_fn_name in _C.__dict__ and not torch.is_grad_enabled() and not return_raylen:
# Use the fast image render kernel if available
cu_fn = _C.__dict__[imrend_fn_name]
return cu_fn(
self._to_cpp(),
camera._to_cpp(),
self.opt._to_cpp()
)
else:
# Manually generate rays for now
rays = camera.gen_rays()
all_rgb_out = []
for batch_start in range(0, camera.height * camera.width, batch_size):
rgb_out_part = self.volume_render(rays[batch_start:batch_start+batch_size],
use_kernel=use_kernel,
return_raylen=return_raylen)
all_rgb_out.append(rgb_out_part)
all_rgb_out = torch.cat(all_rgb_out, dim=0)
return all_rgb_out.view(camera.height, camera.width, -1)
def volume_render_pos_rgbd(self, rays):
self.origins, self.dirs = rays.origins, rays.dirs
return _RenderFunctionPoseRGBD.apply(
self.origins,
self.dirs,
self._to_cpp(),
self.opt._to_cpp())
def volume_render_pos_rgb(self, rays):
self.origins, self.dirs = rays.origins, rays.dirs
return _RenderFunctionPoseRGB.apply(
self.origins,
self.dirs,
self._to_cpp(),
self.opt._to_cpp())
def volume_render_depth(
self, rays: Rays, sigma_thresh: Optional[float] = None,
use_kernel: bool = True):
"""
Volumetric depth rendering for rays
:param rays: Rays, (origins (N, 3), dirs (N, 3))
:param sigma_thresh: Optional[float]. If None then finds the standard expected termination
(NOTE: this is the absolute length along the ray, not the z-depth as usually expected);
else then finds the first point where sigma strictly exceeds sigma_thresh
:return: (N,)
"""
if use_kernel and self.links.is_cuda and _C is not None:
if sigma_thresh is None:
return _C.volume_render_expected_term(
self._to_cpp(),
rays._to_cpp(),
self.opt._to_cpp())
else:
return _C.volume_render_sigma_thresh(
self._to_cpp(),
rays._to_cpp(),
self.opt._to_cpp(),
sigma_thresh)
else:
return self._volume_render_depth_pytorch(rays)
def volume_render_depth_image(self, camera: Camera, sigma_thresh: Optional[float] = None, batch_size: int = 5000):
"""
Volumetric depth rendering for full image
:param camera: Camera, a single camera
:param sigma_thresh: Optional[float]. If None then finds the standard expected termination
(NOTE: this is the absolute length along the ray, not the z-depth as usually expected);
else then finds the first point where sigma strictly exceeds sigma_thresh
:return: depth (H, W)
"""
rays = camera.gen_rays()
all_depths = []
for batch_start in range(0, camera.height * camera.width, batch_size):
depths = self.volume_render_depth(rays[batch_start: batch_start + batch_size], sigma_thresh)
all_depths.append(depths)
all_depth_out = torch.cat(all_depths, dim=0)
return all_depth_out.view(camera.height, camera.width)
def resample(
self,
reso: Union[int, List[int]],
sigma_thresh: float = 5.0,
weight_thresh: float = 0.01,
dilate: int = 2,
cameras: Optional[List[Camera]] = None,
use_z_order: bool=False,
accelerate: bool=True,
weight_render_stop_thresh: float = 0.2, # SHOOT, forgot to turn this off for main exps..
max_elements:int=0
):
"""
Resample and sparsify the grid; used to increase the resolution
:param reso: int or List[int, int, int], resolution for resampled grid, as in the constructor
:param sigma_thresh: float, threshold to apply on the sigma (if using sigma thresh i.e. cameras NOT given)
:param weight_thresh: float, threshold to apply on the weights (if using weight thresh i.e. cameras given)
:param dilate: int, if true applies dilation of size <dilate> to the 3D mask for nodes to keep in the grid
(keep neighbors in all 28 directions, including diagonals, of the desired nodes)
:param cameras: Optional[List[Camera]], optional list of cameras in OpenCV convention (if given, uses weight thresholding)
:param use_z_order: bool, if true, stores the data initially in a Z-order curve if possible
:param accelerate: bool, if true (default), calls grid.accelerate() after resampling
to build distance transform table (only if on CUDA)
:param weight_render_stop_thresh: float, stopping threshold for grid weight render in [0, 1];
0.0 = no thresholding, 1.0 = hides everything.
Useful for force-cutting off
junk that contributes very little at the end of a ray
:param max_elements: int, if nonzero, an upper bound on the number of elements in the
upsampled grid; we will adjust the threshold to match it
"""
with torch.no_grad():
device = self.links.device
if isinstance(reso, int):
reso = [reso] * 3
else:
assert (
len(reso) == 3
), "reso must be an integer or indexable object of 3 ints"
if use_z_order and not (reso[0] == reso[1] and reso[0] == reso[2] and utils.is_pow2(reso[0])):
print("Morton code requires a cube grid of power-of-2 size, ignoring...")
use_z_order = False
self.capacity: int = reduce(lambda x, y: x * y, reso)
curr_reso = self.links.shape
dtype = torch.float32
reso_facts = [0.5 * curr_reso[i] / reso[i] for i in range(3)]
X = torch.linspace(
reso_facts[0] - 0.5,
curr_reso[0] - reso_facts[0] - 0.5,
reso[0],
dtype=dtype,
)
Y = torch.linspace(
reso_facts[1] - 0.5,
curr_reso[1] - reso_facts[1] - 0.5,
reso[1],
dtype=dtype,
)
Z = torch.linspace(
reso_facts[2] - 0.5,
curr_reso[2] - reso_facts[2] - 0.5,
reso[2],
dtype=dtype,
)
X, Y, Z = torch.meshgrid(X, Y, Z)
points = torch.stack((X, Y, Z), dim=-1).view(-1, 3)
if use_z_order:
morton = utils.gen_morton(reso[0], dtype=torch.long).view(-1)
points[morton] = points.clone()
points = points.to(device=device)
use_weight_thresh = cameras is not None
batch_size = 720720
all_sample_vals_density = []
print('Pass 1/2 (density)')
for i in tqdm(range(0, len(points), batch_size)):
sample_vals_density, _ = self.sample(
points[i : i + batch_size],
grid_coords=True,
want_colors=False
)
sample_vals_density = sample_vals_density
all_sample_vals_density.append(sample_vals_density)
self.density_data.grad = None
self.sh_data.grad = None
self.sparse_grad_indexer = None
self.sparse_sh_grad_indexer = None
self.density_rms = None
self.sh_rms = None
sample_vals_density = torch.cat(
all_sample_vals_density, dim=0).view(reso)
del all_sample_vals_density
if use_weight_thresh:
gsz = torch.tensor(reso)
offset = (self._offset * gsz - 0.5).to(device=device)
scaling = (self._scaling * gsz).to(device=device)
max_wt_grid = torch.zeros(reso, dtype=torch.float32, device=device)
print(" Grid weight render", sample_vals_density.shape)
for i, cam in enumerate(cameras):
_C.grid_weight_render(
sample_vals_density, cam._to_cpp(),
0.5,
weight_render_stop_thresh,
False,
offset, scaling, max_wt_grid
)
sample_vals_mask = max_wt_grid >= weight_thresh
if max_elements > 0 and max_elements < max_wt_grid.numel() \
and max_elements < torch.count_nonzero(sample_vals_mask):
# To bound the memory usage
weight_thresh_bounded = torch.topk(max_wt_grid.view(-1),
k=max_elements, sorted=False).values.min().item()
weight_thresh = max(weight_thresh, weight_thresh_bounded)
print(' Readjusted weight thresh to fit to memory:', weight_thresh)
sample_vals_mask = max_wt_grid >= weight_thresh
del max_wt_grid
else:
sample_vals_mask = sample_vals_density >= sigma_thresh
if max_elements > 0 and max_elements < sample_vals_density.numel() \
and max_elements < torch.count_nonzero(sample_vals_mask):
# To bound the memory usage
sigma_thresh_bounded = torch.topk(sample_vals_density.view(-1),
k=max_elements, sorted=False).values.min().item()
sigma_thresh = max(sigma_thresh, sigma_thresh_bounded)
print(' Readjusted sigma thresh to fit to memory:', sigma_thresh)
sample_vals_mask = sample_vals_density >= sigma_thresh
if self.opt.last_sample_opaque:
# Don't delete the last z layer
sample_vals_mask[:, :, -1] = 1
if dilate:
for i in range(int(dilate)):
sample_vals_mask = _C.dilate(sample_vals_mask)
sample_vals_mask = sample_vals_mask.view(-1)
sample_vals_density = sample_vals_density.view(-1)
sample_vals_density = sample_vals_density[sample_vals_mask]
cnz = torch.count_nonzero(sample_vals_mask).item()
# Now we can get the colors for the sparse points
points = points[sample_vals_mask]
print('Pass 2/2 (color), eval', cnz, 'sparse pts')
all_sample_vals_sh = []
for i in tqdm(range(0, len(points), batch_size)):
_, sample_vals_sh = self.sample(
points[i : i + batch_size],
grid_coords=True,
want_colors=True
)
all_sample_vals_sh.append(sample_vals_sh)
sample_vals_sh = torch.cat(all_sample_vals_sh, dim=0) if len(all_sample_vals_sh) else torch.empty_like(self.sh_data[:0])
del self.density_data
del self.sh_data
del all_sample_vals_sh
if use_z_order:
inv_morton = torch.empty_like(morton)
inv_morton[morton] = torch.arange(morton.size(0), dtype=morton.dtype)
inv_idx = inv_morton[sample_vals_mask]
init_links = torch.full(
(sample_vals_mask.size(0),), fill_value=-1, dtype=torch.int32
)
init_links[inv_idx] = torch.arange(inv_idx.size(0), dtype=torch.int32)
else:
init_links = (
torch.cumsum(sample_vals_mask.to(torch.int32), dim=-1).int() - 1
)
init_links[~sample_vals_mask] = -1
self.capacity = cnz
print(" New cap:", self.capacity)
del sample_vals_mask
print('density', sample_vals_density.shape, sample_vals_density.dtype)
print('sh', sample_vals_sh.shape, sample_vals_sh.dtype)
print('links', init_links.shape, init_links.dtype)
self.density_data = nn.Parameter(sample_vals_density.view(-1, 1).to(device=device))
self.sh_data = nn.Parameter(sample_vals_sh.to(device=device))
self.links = init_links.view(reso).to(device=device)
if accelerate and self.links.is_cuda:
self.accelerate()
def resize(self, sh_dim: int):
"""
Modify the size of the data stored in the voxels. Called expand/shrink in svox 1.
:param sh_dim: new basis dimension, must be square number
"""
assert utils.isqrt(sh_dim) is not None, "sh_dim (SH) must be a square number"
assert (
sh_dim >= 1 and sh_dim <= utils.MAX_SH_BASIS
), f"sh_dim 1-{utils.MAX_SH_BASIS} supported"
old_sh_dim = self.sh_dim
self.sh_dim = sh_dim
device = self.sh_data.device
old_data = self.sh_data.data.cpu()
shrinking = sh_dim < old_sh_dim
sigma_arr = torch.tensor([0])
if shrinking:
shift = old_sh_dim
arr = torch.arange(sh_dim)
remap = torch.cat([arr, shift + arr, 2 * shift + arr])
else:
shift = sh_dim
arr = torch.arange(old_sh_dim)
remap = torch.cat([arr, shift + arr, 2 * shift + arr])
del self.sh_data
new_data = torch.zeros((old_data.size(0), 3 * sh_dim + 1), device="cpu")
if shrinking:
new_data[:] = old_data[..., remap]
else:
new_data[..., remap] = old_data
new_data = new_data.to(device=device)
self.sh_data = nn.Parameter(new_data)
self.sh_rms = None
def accelerate(self):
"""
Accelerate
"""
assert (
_C is not None and self.links.is_cuda
), "CUDA extension is currently required for accelerate"
_C.accel_dist_prop(self.links)
def world2grid(self, points):
"""
World coordinates to grid coordinates. Grid coordinates are
normalized to [0, n_voxels] in each side
:param points: (N, 3)
:return: (N, 3)
"""
gsz = self._grid_size()
offset = self._offset * gsz - 0.5
scaling = self._scaling * gsz
return torch.addcmul(
offset.to(device=points.device), points, scaling.to(device=points.device)
)
def grid2world(self, points):
"""
Grid coordinates to world coordinates. Grid coordinates are
normalized to [0, n_voxels] in each side
:param points: (N, 3)
:return: (N, 3)
"""
gsz = self._grid_size()
roffset = self.radius * (1.0 / gsz - 1.0) + self.center
rscaling = 2.0 * self.radius / gsz
return torch.addcmul(
roffset.to(device=points.device), points, rscaling.to(device=points.device)
)
def save(self, path: str, compress: bool = False):
"""
Save to a path
"""
save_fn = np.savez_compressed if compress else np.savez
data = {
"radius":self.radius.numpy(),
"center":self.center.numpy(),
"links":self.links.cpu().numpy(),
"density_data":self.density_data.data.cpu().numpy(),
"sh_data":self.sh_data.data.cpu().numpy().astype(np.float16),
}
save_fn(
path,
**data
)
@classmethod
def load(cls, path: str, device: Union[torch.device, str] = "cpu"):
"""
Load from path
"""
z = np.load(path)
if "data" in z.keys():
# Compatibility
all_data = z.f.data
sh_data = all_data[..., 1:]
density_data = all_data[..., :1]
else:
sh_data = z.f.sh_data
density_data = z.f.density_data
links = z.f.links
sh_dim = (sh_data.shape[1]) // 3
radius = z.f.radius.tolist() if "radius" in z.files else [1.0, 1.0, 1.0]
center = z.f.center.tolist() if "center" in z.files else [0.0, 0.0, 0.0]
grid = cls(
1,
radius=radius,
center=center,
sh_dim=sh_dim,
use_z_order=False,
device="cpu")
if sh_data.dtype != np.float32:
sh_data = sh_data.astype(np.float32)
if density_data.dtype != np.float32:
density_data = density_data.astype(np.float32)
sh_data = torch.from_numpy(sh_data).to(device=device)
density_data = torch.from_numpy(density_data).to(device=device)
grid.sh_data = nn.Parameter(sh_data)
grid.density_data = nn.Parameter(density_data)
grid.links = torch.from_numpy(links).to(device=device)
grid.capacity = grid.sh_data.size(0)
if grid.links.is_cuda:
grid.accelerate()
return grid
def tv(self, logalpha: bool=False, logalpha_delta: float=2.0):
"""
Compute total variation over sigma,
similar to Neural Volumes [Lombardi et al., ToG 2019]
:return: torch.Tensor, size scalar, the TV value (sum over channels,
mean over voxels)
"""
assert (
_C is not None and self.sh_data.is_cuda
), "CUDA extension is currently required for total variation"
assert not logalpha, "No longer supported"
return _TotalVariationFunction.apply(
self.density_data, self.links, 0, 1, logalpha, logalpha_delta, False)
def tv_color(self,
start_dim: int = 0, end_dim: Optional[int] = None,
logalpha: bool=False, logalpha_delta: float=2.0):
"""
Compute total variation on color
:param start_dim: int, first color channel dimension to compute TV over (inclusive).
Default 0.
:param end_dim: int, last color channel dimension to compute TV over (exclusive).
Default None = all dimensions until the end.
:return: torch.Tensor, size scalar, the TV value (sum over channels,
mean over voxels)
"""
assert (
_C is not None and self.sh_data.is_cuda
), "CUDA extension is currently required for total variation"
assert not logalpha, "No longer supported"
if end_dim is None:
end_dim = self.sh_data.size(1)
end_dim = end_dim + self.sh_data.size(1) if end_dim < 0 else end_dim
start_dim = start_dim + self.sh_data.size(1) if start_dim < 0 else start_dim
return _TotalVariationFunction.apply(
self.sh_data, self.links, start_dim, end_dim, logalpha, logalpha_delta, True)
def inplace_tv_grad(self, grad: torch.Tensor,
scaling: float = 1.0,
sparse_frac: float = 0.01,
logalpha: bool=False, logalpha_delta: float=2.0,
contiguous: bool = True
):
"""
Add gradient of total variation for sigma as in Neural Volumes
[Lombardi et al., ToG 2019]
directly into the gradient tensor, multiplied by 'scaling'
"""
assert (
_C is not None and self.density_data.is_cuda and grad.is_cuda
), "CUDA extension is currently required for total variation"
assert not logalpha, "No longer supported"
rand_cells = self._get_rand_cells(sparse_frac, contiguous=contiguous)
if rand_cells is not None:
if rand_cells.size(0) > 0:
_C.tv_grad_sparse(self.links, self.density_data,
rand_cells,
self._get_sparse_grad_indexer(),
0, 1, scaling,
logalpha, logalpha_delta,
False,
self.opt.last_sample_opaque,
grad)
else:
_C.tv_grad(self.links, self.density_data, 0, 1, scaling,
logalpha, logalpha_delta,
False,
grad)
self.sparse_grad_indexer : Optional[torch.Tensor] = None
def inplace_tv_color_grad(
self,
grad: torch.Tensor,
start_dim: int = 0,
end_dim: Optional[int] = None,
scaling: float = 1.0,
sparse_frac: float = 0.01,
logalpha: bool=False,
logalpha_delta: float=2.0,
contiguous: bool = True
):
"""
Add gradient of total variation for color
directly into the gradient tensor, multiplied by 'scaling'
:param start_dim: int, first color channel dimension to compute TV over (inclusive).
Default 0.
:param end_dim: int, last color channel dimension to compute TV over (exclusive).
Default None = all dimensions until the end.
"""
assert (
_C is not None and self.sh_data.is_cuda and grad.is_cuda
), "CUDA extension is currently required for total variation"
assert not logalpha, "No longer supported"
if end_dim is None:
end_dim = self.sh_data.size(1)
end_dim = end_dim + self.sh_data.size(1) if end_dim < 0 else end_dim
start_dim = start_dim + self.sh_data.size(1) if start_dim < 0 else start_dim
rand_cells = self._get_rand_cells(sparse_frac, contiguous=contiguous)
if rand_cells is not None:
if rand_cells.size(0) > 0:
indexer = self._get_sparse_sh_grad_indexer()
# with utils.Timing("actual_tv_color"):
_C.tv_grad_sparse(self.links, self.sh_data,
rand_cells,
indexer,
start_dim, end_dim, scaling,
logalpha,
logalpha_delta,
True,
False,
grad)
else:
_C.tv_grad(self.links, self.sh_data, start_dim, end_dim, scaling,
logalpha,
logalpha_delta,
True,
grad)
self.sparse_sh_grad_indexer = None
def optim_density_step(self, lr: float, beta: float=0.9, epsilon: float = 1e-8):
assert (_C is not None and self.sh_data.is_cuda), "CUDA extension is currently required for optimizers"
indexer = self._maybe_convert_sparse_grad_indexer()
if (self.density_rms is None
or self.density_rms.shape != self.density_data.shape):
del self.density_rms
self.density_rms = torch.zeros_like(self.density_data.data) # FIXME init?
_C.rmsprop_step(
self.density_data.data,
self.density_rms,
self.density_data.grad,
indexer,
beta,
lr,
epsilon,
-1e9,
lr
)
def optim_sh_step(self, lr: float, beta: float=0.9, epsilon: float = 1e-8,
optim: str = 'rmsprop'):
assert (_C is not None and self.sh_data.is_cuda), "CUDA extension is currently required for optimizers"
indexer = self._maybe_convert_sparse_grad_indexer(sh=True)
if self.sh_rms is None or self.sh_rms.shape != self.sh_data.shape:
del self.sh_rms
self.sh_rms = torch.zeros_like(self.sh_data.data) # FIXME init?
_C.rmsprop_step(
self.sh_data.data,
self.sh_rms,
self.sh_data.grad,
indexer,
beta,
lr,
epsilon,
-1e9,
lr
)
def __repr__(self):
return (
f"svox2.SparseGrid(sh_dim={self.sh_dim}, "
+ f"reso={list(self.links.shape)}, "
+ f"capacity:{self.sh_data.size(0)})"
)
def _to_cpp(self, grid_coords: bool = False):
"""
Generate object to pass to C++
"""
gspec = _C.SparseGridSpec()
gspec.density_data = self.density_data
gspec.sh_data = self.sh_data
gspec.links = self.links
if grid_coords:
gspec._offset = torch.zeros_like(self._offset)
gspec._scaling = torch.ones_like(self._offset)
else:
gsz = self._grid_size()
gspec._offset = self._offset * gsz - 0.5
gspec._scaling = self._scaling * gsz
gspec.sh_dim = self.sh_dim
return gspec
def _grid_size(self):
return torch.tensor(self.links.shape, device="cpu", dtype=torch.float32)
def _get_data_grads(self):
ret = []
for subitem in ["density_data", "sh_data"]:
param = self.__getattr__(subitem)
if not param.requires_grad:
ret.append(torch.zeros_like(param.data))
else:
if (
not hasattr(param, "grad")
or param.grad is None
or param.grad.shape != param.data.shape
):
if hasattr(param, "grad"):
del param.grad
param.grad = torch.zeros_like(param.data)
ret.append(param.grad)
return ret
def _get_sparse_grad_indexer(self):
indexer = self.sparse_grad_indexer
if indexer is None:
indexer = torch.empty((0,), dtype=torch.bool, device=self.density_data.device)
return indexer
def _get_sparse_sh_grad_indexer(self):
indexer = self.sparse_sh_grad_indexer
if indexer is None:
indexer = torch.empty((0,), dtype=torch.bool, device=self.density_data.device)
return indexer
def _maybe_convert_sparse_grad_indexer(self, sh=False):
"""
Automatically convert sparse grad indexer from mask to
indices, if it is efficient
"""
indexer = self.sparse_sh_grad_indexer if sh else self.sparse_grad_indexer
if indexer is None:
return torch.empty((), device=self.density_data.device)
if (
indexer.dtype == torch.bool and
torch.count_nonzero(indexer).item()
< indexer.size(0) // 8
):
# Highly sparse (use index)
indexer = torch.nonzero(indexer.flatten(), as_tuple=False).flatten()
return indexer
def _get_rand_cells(self, sparse_frac: float, force: bool = False, contiguous:bool=True):
if sparse_frac < 1.0 or force:
assert self.sparse_grad_indexer is None or self.sparse_grad_indexer.dtype == torch.bool, \
"please call sparse loss after rendering and before gradient updates"
grid_size = self.links.size(0) * self.links.size(1) * self.links.size(2)
sparse_num = max(int(sparse_frac * grid_size), 1)
if contiguous:
start = np.random.randint(0, grid_size)
arr = torch.arange(start, start + sparse_num, dtype=torch.int32, device=
self.links.device)
if start > grid_size - sparse_num:
arr[grid_size - sparse_num - start:] -= grid_size
return arr
else:
return torch.randint(0, grid_size, (sparse_num,), dtype=torch.int32, device=
self.links.device)
return None | ysus33/RGB-D_Plenoxel_Mapping_Tracking | svox2/svox2.py | svox2.py | py | 65,491 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "svox2.csrc.RenderOptions",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "svox2.csrc",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.Tenso... |
3266365657 | # forms.py
from django import forms
from .models import Mensaje, Conversacion
from django.contrib.auth.models import User
class MensajeForm(forms.ModelForm):
class Meta:
model = Mensaje
fields = ('contenido',)
class EnviarMensajeForm(forms.Form):
contenido = forms.CharField(label="Mensaje", widget=forms.Textarea(attrs={'rows': 1, 'cols': 85}))
class ConversacionForm(forms.ModelForm):
participantes = forms.ModelMultipleChoiceField(queryset=User.objects.all(), widget=forms.CheckboxSelectMultiple)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super().__init__(*args, **kwargs)
self.fields['participantes'].queryset = User.objects.all()
self.fields['participantes'].initial = [user.id]
class Meta:
model = Conversacion
fields = ('participantes',) | arielgodoy/EntregafinalPython-Agodoy | chat/forms.py | forms.py | py | 863 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "models.Mensaje",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.forms.For... |
42903865352 | import glob
import cv2 as cv
method = cv.TM_SQDIFF_NORMED
# Path / Threshold # Thresholds were set manually, by careful examination of examples
template_blue_locks = ([img for img in glob.glob("./templates_heist/blue_lock/*.png")],0.05)
template_blue_keys = ([img for img in glob.glob("./templates_heist/blue_key/*.png")],0.07)
template_green_locks = ([img for img in glob.glob("./templates_heist/green_lock/*.png")],0.06)
template_green_keys = ([img for img in glob.glob("./templates_heist/green_key/*.png")],0.15)
template_red_locks = ([img for img in glob.glob("./templates_heist/red_lock/*.png")],0.07)
template_red_keys = ([img for img in glob.glob("./templates_heist/red_key/*.png")],0.22)
template_goal = ([img for img in glob.glob("./templates_heist/goal/*.png")],0.05)
def has_blue_locks(img):
has , _= find_template(img,template_blue_locks[0],template_blue_locks[1])
return has
def has_green_locks(img):
has, _ =find_template(img,template_green_locks[0],template_green_locks[1])
return has
def has_red_locks(img):
has , _ = find_template(img, template_red_locks[0],template_red_locks[1])
return has
def has_blue_keys(img):
has , _= find_template(img,template_blue_keys[0],template_blue_keys[1])
return has
def has_green_keys(img):
has, _ =find_template(img,template_green_keys[0],template_green_keys[1])
return has
def has_red_keys(img):
has , _ = find_template(img, template_red_keys[0],template_red_keys[1])
return has
def has_no_Keys_and_Doors(img):
return not has_blue_locks(img) and not has_green_locks(img) and not has_red_locks(img) and not has_blue_keys(img) and not has_green_keys(img) and not has_red_keys(img)
def has_goal(img):
has , _ = find_template(img, template_goal[0], template_goal[1])
return has
templates =[]
templates.append(template_red_locks)
def find_template(img, templates, threshold):
'''
For a given image finds the best matching template. If no good template is found this function returns False
otherwise True
:param img: The image that is to be examined
:param templates: List of Templates locations
:param threshold: Threshold that is used for the templates. If the error is bigger than the given treshhold
match will not be accepted
:return: returns True if one good template match is found, False Otherwise. Returns the location of the best
Matching Template in the picture for further use.
'''
best_matching =threshold
best_min,best_max,best_minloc, best_maxloc = 0,0,0,0
for next_template in templates:
next_template = cv.imread(next_template,cv.COLOR_BGR2RGB)
w,h,_ = next_template.shape[::-1]
result = cv.matchTemplate(img, next_template, method)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
if min_val < best_matching:
best_min =min_val
best_max =max_val
best_minloc =min_loc
best_maxloc =max_loc
best_matching = min_val
if best_matching == threshold:
return False, [img, best_min, best_max, best_minloc, best_maxloc, w, h]
return True , [img, best_min, best_max, best_minloc, best_maxloc, w, h]
| neuroevolution-ai/ProcgenAutoencoder | data_generation/balance_data.py | balance_data.py | py | 3,236 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.TM_SQDIFF_NORMED",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_num... |
34651163338 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from datetime import datetime
from xml.dom.minidom import Document
from xml.etree import cElementTree as cET
class TagBase:
def __init__(self):
pass
def setTextNode(self, tag, data):
if data != '' and data is not None:
if data is False or data == 'False':
data = 0
if data is True or data == 'True':
data = 1
tag.appendChild(self.xml.createTextNode(str(data)))
class Sample(TagBase):
def __init__(self, id_):
self.xml = Document()
self.sample_id = self.xml.createElement("sample_id")
self.sample_id.setAttribute("sample", str(id_))
class ProcessOrder(TagBase):
def __init__(self, id_, ptype_, dtype_, curves, parameters):
self.parameters = parameters
self.xml = Document()
self.process_order = self.xml.createElement("process_order")
self.process_order.setAttribute("number", str(id_))
self.process = self.xml.createElement("process")
self.data_type = self.xml.createElement("data_type")
self.curves = self.xml.createElement("curves")
self.param = self.xml.createElement("param")
super(ProcessOrder, self).setTextNode(self.process, ptype_)
super(ProcessOrder, self).setTextNode(self.data_type, dtype_)
self.process_order.appendChild(self.process)
self.process_order.appendChild(self.data_type)
self.process_order.appendChild(self.curves)
self.process_order.appendChild(self.param)
for curve in curves:
self.curves.appendChild(curve)
self.time_per_channel = self.xml.createElement("time_per_channel")
self.beta_irradiation_time = self.xml.createElement("beta_irradiation_time")
self.beta_dose = self.xml.createElement("beta_dose")
self.external_irradiation = self.xml.createElement("external_irradiation")
self.external_dose = self.xml.createElement("external_dose")
self.preheating_temperature = self.xml.createElement("preheating_temperature")
self.measuring_temperature = self.xml.createElement("measuring_temperature")
self.preheating_rate = self.xml.createElement("preheating_rate")
self.heating_rate = self.xml.createElement("heating_rate")
self.light_source = self.xml.createElement("light_source")
self.optical_power = self.xml.createElement("optical_power")
self.electric_stimulation = self.xml.createElement("electric_stimulation")
self.electric_frequency = self.xml.createElement("electric_frequency")
self.time_beta_irradiation = self.xml.createElement("time_beta_irradiation")
self.time_external_irradiation = self.xml.createElement("time_external_irradiation")
self.time_measurement = self.xml.createElement("time_measurement")
self.illumination_source = self.xml.createElement("illumination_source")
self.illumination_power = self.xml.createElement("illumination_power")
self.illumination_temperature = self.xml.createElement("illumination_temperature")
self.setTextNode(self.time_per_channel, 'Time_per_channel')
self.setTextNode(self.beta_irradiation_time, 'Beta_irradiation_time')
self.setTextNode(self.beta_dose, 'Beta_dose')
self.setTextNode(self.external_irradiation, 'External_irradiation')
self.setTextNode(self.external_dose, 'External_dose')
self.setTextNode(self.preheating_temperature, 'Preheating_temperature')
self.setTextNode(self.measuring_temperature, 'Measuring_temperature')
self.setTextNode(self.preheating_rate, 'Preheating_rate')
self.setTextNode(self.heating_rate, 'Heating_rate')
self.setTextNode(self.light_source, 'Light_source')
self.setTextNode(self.optical_power, 'Optical_power')
self.setTextNode(self.electric_stimulation, 'Electric_stimulation')
self.setTextNode(self.electric_frequency, 'Electric_frequency')
self.setTextNode(self.time_beta_irradiation, 'Time_beta_irradiation')
self.setTextNode(self.time_external_irradiation, 'Time_external_irradiation')
self.setTextNode(self.time_measurement, 'Time_measurement')
self.setTextNode(self.illumination_source, 'Illumination_source')
self.setTextNode(self.illumination_power, 'Illumination_power')
self.setTextNode(self.illumination_temperature, 'Illumination_temperature')
def setTextNode(self, tag, key):
if key in self.parameters:
data = self.parameters[key]
if data is False or data == 'False':
data = 0
if data is True or data == 'True':
data = 1
if data != '' and data is not None:
tag.appendChild(self.xml.createTextNode(str(data)))
self.param.appendChild(tag)
class Curve(TagBase):
def __init__(self, number, signal_active, background_active, count_signal, low_signal,
high_signal, count_background, low_background, high_background):
self.xml = Document()
self.curve = self.xml.createElement("curve_" + str(number))
if signal_active:
self.count_signal = self.xml.createElement("count_signal")
self.low_signal = self.xml.createElement("low_signal")
self.high_signal = self.xml.createElement("high_signal")
self.setTextNode(self.count_signal, count_signal)
self.setTextNode(self.low_signal, low_signal)
self.setTextNode(self.high_signal, high_signal)
self.curve.appendChild(self.count_signal)
self.curve.appendChild(self.low_signal)
self.curve.appendChild(self.high_signal)
if background_active:
self.count_background = self.xml.createElement("count_background")
self.low_background = self.xml.createElement("low_background")
self.high_background = self.xml.createElement("high_background")
self.setTextNode(self.count_background, count_background)
self.setTextNode(self.low_background, low_background)
self.setTextNode(self.high_background, high_background)
self.curve.appendChild(self.count_background)
self.curve.appendChild(self.low_background)
self.curve.appendChild(self.high_background)
class CreateRLF(TagBase):
def __init__(self, samples_amount, name, owner, nitrogen_use, dose_rate,
external_dose_rate, protocol, status, reader_id, datecrea):
self.xml = Document()
self.rlf = self.xml.createElement("rlf")
self.xml.appendChild(self.rlf)
self.name = self.xml.createElement("name")
self.status = self.xml.createElement("status")
self.datecrea = self.xml.createElement("date_crea")
self.datemod = self.xml.createElement("date_mod")
self.owner = self.xml.createElement("owner")
self.samples_amount = self.xml.createElement("samples_amount")
self.reader_id = self.xml.createElement("reader_id")
self.nitrogen_use = self.xml.createElement("N2_flow")
self.dose_rate = self.xml.createElement("dose_rate")
self.external_dose_rate = self.xml.createElement("external_dose_rate")
self.protocol = self.xml.createElement("protocol")
self.rep = self.xml.createElement("rep")
datemod = datetime.now()
if (not datecrea) or datecrea is None:
datecrea = datemod
self.setTextNode(self.samples_amount, samples_amount)
self.setTextNode(self.name, name)
self.setTextNode(self.owner, owner)
self.setTextNode(self.nitrogen_use, nitrogen_use)
self.setTextNode(self.dose_rate, dose_rate)
self.setTextNode(self.external_dose_rate, external_dose_rate)
self.setTextNode(self.protocol, protocol)
self.setTextNode(self.reader_id, reader_id)
self.setTextNode(self.datecrea, datecrea)
self.setTextNode(self.status, status)
self.setTextNode(self.datemod, datemod)
self.rlf.appendChild(self.name)
self.rlf.appendChild(self.status)
self.rlf.appendChild(self.datecrea)
self.rlf.appendChild(self.datemod)
self.rlf.appendChild(self.owner)
self.rlf.appendChild(self.samples_amount)
self.rlf.appendChild(self.reader_id)
self.rlf.appendChild(self.nitrogen_use)
self.rlf.appendChild(self.dose_rate)
self.rlf.appendChild(self.external_dose_rate)
self.rlf.appendChild(self.protocol)
self.rlf.appendChild(self.rep)
def refreshDateMod(self):
self.datemod.firstChild.data = str(datetime.now())
def createSample(self, id_):
self.refreshDateMod()
samples_ids = self.xml.getElementsByTagName("rep")[0].getElementsByTagName('sample_id')
new_sample = Sample(id_).sample_id
if len(samples_ids) > 0:
for sample_id in samples_ids:
value = sample_id.attributes['sample'].value
if int(value) == int(id_):
return sample_id
if int(value) > int(id_):
self.seq.insertBefore(new_sample, sample_id)
return new_sample
self.rep.appendChild(new_sample)
return new_sample
def createProcessOrder(self, sample_id, id_, ptype_, dtype_, curves, parameters):
self.refreshDateMod()
process_order = ProcessOrder(id_, ptype_, dtype_, curves, parameters).process_order
sample_id.appendChild(process_order)
return process_order
def createCurve(self, number, signal_active, background_active, count_signal, low_signal,
high_signal, count_background, low_background, high_background):
self.refreshDateMod()
curve = Curve(number, signal_active, background_active, count_signal, low_signal,
high_signal, count_background, low_background, high_background).curve
return curve
def preview(self):
return self.xml.toprettyxml(indent=" ")
def save(self, path, rewrite=False):
if os.path.exists(path):
if os.path.isfile(path):
ext = os.path.splitext(path)[-1]
if ext not in ('.xml', '.rlf',):
raise ValueError('Incorrect format, must be a rlf or xml file.')
else:
if rewrite:
try:
document = open(path, 'w')
self.xml.writexml(document, addindent=' ', newl='\n', encoding='iso-8859-1')
return True
except:
raise ValueError('Error while writing.')
return False
else:
raise ValueError('Invalid file path.')
else:
dirname = os.path.dirname(path)
if os.path.exists(dirname) or dirname == '':
ext = os.path.splitext(path)[-1]
if ext not in ('.xml', '.rlf',):
path += '.rlf'
document = open(path, 'w')
self.xml.writexml(document, addindent=' ', newl='\n', encoding='iso-8859-1')
return True
else:
raise ValueError('Directory "{0}" does not exist.'.format(dirname))
class LoadRLF:
def __init__(self, path):
if os.path.exists(path):
if os.path.isfile(path):
ext = os.path.splitext(path)[-1]
if ext != '.rlf':
raise ValueError('Incorrect format, must be a rlf file')
else:
raise ValueError('The path is not valid, must be a rlf file')
else:
raise ValueError('Directory "{0}" does not exist.'.format(path))
try:
self.path = path
self.tree = cET.parse(path)
self.root = self.tree.getroot()
except:
raise ValueError('Error while reading')
def build(self, root):
childs = root.getchildren()
keys = root.keys()
data = {}
for key in keys:
data[key] = root.get(key)
tree = [root, childs, data]
if len(childs) == 0:
tree[1] = root.text
else:
for i in range(len(tree[1])):
child = self.build(tree[1][i])
tree[1][i] = child
return tree
def getCleanData(self, data):
if data == 'None' or data is None:
return ''
return str(data)
def open(self):
general = {}
try:
tree = self.build(self.root)
general['name'] = self.getCleanData(tree[1][0][1])
general['status'] = self.getCleanData(tree[1][1][1])
general['creation_date'] = self.getCleanData(tree[1][2][1])
general['modification_date'] = self.getCleanData(tree[1][3][1])
general['owner'] = self.getCleanData(tree[1][4][1])
general['samples_amount'] = self.getCleanData(tree[1][5][1])
general['reader_id'] = self.getCleanData(tree[1][6][1])
general['nitrogen_use'] = int(tree[1][7][1])
general['dose_rate'] = float(tree[1][8][1])
general['external_dose_rate'] = float(tree[1][9][1])
general['protocol'] = self.getCleanData(tree[1][10][1])
except:
raise RuntimeError("General data of the report can't be read.")
table = []
for rep in tree[1][11:]:
for sample in rep[1]:
tuple_ = ['', []]
for process_order in sample[1]:
command = {
'process_name': self.getCleanData(process_order[1][0][1]),
'data_type': self.getCleanData(process_order[1][1][1]),
'process_order_id': int(process_order[2]['number'])
}
curves = {}
for curve in process_order[1][2][1]:
curve_num = str(curve[0]).split('\'')[1].split('_')[1]
curve_data = {}
for data in curve[1]:
data_key = str(data[0]).split('\'')[1]
curve_data[data_key] = data[1]
curves[curve_num] = curve_data
parameters = {}
for param in process_order[1][3][1]:
parameter_key = str(param[0]).split('\'')[1]
parameter_value = param[1]
parameters[parameter_key] = parameter_value
command['curves'] = curves
command['parameters'] = parameters
tuple_[1].append(command)
sample_id = sample[2]['sample']
tuple_[0] = sample_id
table.append(tuple_)
return general, table
| carlos-ferras/Sequence-ToolKit | model/handle_rlf.py | handle_rlf.py | py | 15,117 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "xml.dom.minidom.Document",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.Document",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.Document",
"line_number": 111,
"usage_type": "call"
},
{
"api_n... |
16823303848 | from uuid import uuid4
def randId():
return uuid4().hex
def loggedIn(session, LoggedIn):
if ('user' in session) and (session['user'] is not None):
userLoggedIn = LoggedIn.query.filter_by(rand_id=str(session['user'])).first()
if userLoggedIn:
return userLoggedIn.username
return False
else:
return False
def logoutUser(session, LoggedIn, db):
randID = session['user']
user = LoggedIn.query.filter_by(rand_id=str(randID).encode('utf-8')).first()
db.session.delete(user)
db.session.commit()
session.pop('user', None)
def split_skills(skills):
result = skills.split(',')
return result
| billz96/Pycourses | helpers.py | helpers.py | py | 669 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "uuid.uuid4",
"line_number": 4,
"usage_type": "call"
}
] |
31113875208 | """
The main script that serves as the entry-point for all kinds of training experiments.
"""
from __future__ import annotations
import logging
from functools import partial
from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, Sequence, Tuple, Union, cast
import torch
from al.core.data.collators import JointBatchToTensorDataCollator
from al.core.models.waal.xai_model import WAALXAIModel
from al.core.training.query_strategies.factory import QueryStrategyFactory
from al.core.training.trainer import DALTrainer
from ignite.contrib.handlers import TensorboardLogger
from xai_torch.core.args import Arguments
from xai_torch.core.constants import DataKeys
from xai_torch.core.models.utilities.data_collators import BatchToTensorDataCollator
from xai_torch.core.models.xai_model import XAIModel
from xai_torch.core.training.utilities import reset_random_seeds
if TYPE_CHECKING:
from al.core.training.query_strategies.base import QueryStrategy
from xai_torch.core.args import Arguments
from xai_torch.core.data.data_modules.base import BaseDataModule
from al.core.data.active_learning_datamodule import ActiveLearningDataModule
from ignite.engine import Engine
from torch.utils.data import DataLoader
from xai_torch.core.training.constants import TrainingStage
logging.basicConfig(level=logging.INFO)
class WAALTrainer(DALTrainer):
@classmethod
def configure_running_avg_logging(cls, args: Arguments, engine: Engine, stage: TrainingStage):
from ignite.metrics import RunningAverage
def output_transform(x: Any, index: int, name: str) -> Any:
import numbers
import torch
if isinstance(x, Mapping):
return x[name]
elif isinstance(x, Sequence):
return x[index]
elif isinstance(x, (torch.Tensor, numbers.Number)):
return x
else:
raise TypeError(
"Unhandled type of update_function's output. "
f"It should either mapping or sequence, but given {type(x)}"
)
# add loss as a running average metric
if stage == TrainingStage.train:
for i, n in enumerate([f"{step}_{DataKeys.LOSS}" for step in ["task", "dsc"]]):
RunningAverage(
alpha=0.5, output_transform=partial(output_transform, index=i, name=n), epoch_bound=False
).attach(engine, f"{stage}/{n}")
else:
for i, n in enumerate([DataKeys.LOSS]):
RunningAverage(
alpha=0.5, output_transform=partial(output_transform, index=i, name=n), epoch_bound=False
).attach(engine, f"{stage}/{n}")
@classmethod
def initialize_training_engine(
cls,
args: Arguments,
model: WAALXAIModel,
output_dir: str,
device: Optional[Union[str, torch.device]] = torch.device("cpu"),
scaler: Optional["torch.cuda.amp.GradScaler"] = None,
) -> Callable:
if args.training_args.gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
from ignite.engine import Engine
# get related arguments
total_task_model_iterations = model.steps_per_epoch * args.training_args.max_epochs
gradient_accumulation_steps = args.training_args.gradient_accumulation_steps
non_blocking = args.training_args.non_blocking_tensor_conv
if args.training_args.with_amp:
try:
from torch.cuda.amp import autocast
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use amp_mode='amp'.")
if scaler is None:
from torch.cuda.amp.grad_scaler import GradScaler
scaler = GradScaler(enabled=True)
def update_model(engine, model, batch, step="task"):
from xai_torch.core.constants import DataKeys
# perform optimizers zero_grad() operation with gradient accumulation
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
model.optimizers[step].zero_grad()
with autocast(enabled=True):
# forward pass
model_output = model.torch_model.training_step(batch=batch, step=step)
# make sure we get a dict from the model
assert isinstance(model_output, dict), "Model must return an instance of dict."
# get loss from the output dict
loss = model_output[DataKeys.LOSS]
# accumulate loss if required
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
if scaler:
scaler.scale(loss).backward()
# perform optimizer update for correct gradient accumulation step
if engine.state.iteration % gradient_accumulation_steps == 0:
scaler.step(model.optimizers[step])
scaler.update()
else:
# backward pass
loss.backward()
# perform optimizer update for correct gradient accumulation step
if engine.state.iteration % gradient_accumulation_steps == 0:
model.optimizers[step].step()
# if on the go training evaluation is required, detach data from the graph
if args.training_args.eval_training and step == "task":
return_dict = {}
for key, value in model_output.items():
if key == DataKeys.LOSS:
return_dict[key] = value.item()
elif isinstance(value, torch.Tensor):
return_dict[key] = value.detach()
return_dict[f"{step}_{DataKeys.LOSS}"] = return_dict[DataKeys.LOSS]
del return_dict[DataKeys.LOSS]
return return_dict
return {f"{step}_{DataKeys.LOSS}": model_output[DataKeys.LOSS].item()}
def training_step(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
"""
Define the model training update step
"""
from ignite.utils import convert_tensor
# setup model for training
model.torch_model.train()
# put batch to device
batch = convert_tensor(batch, device=device, non_blocking=non_blocking)
# we store the task model after n epochs * n labeled training iterations so that its not overfitted
# this is only for testing the performance
# call task model update
if engine.state.iteration == total_task_model_iterations:
from ignite.engine import Events
from ignite.handlers.checkpoint import BaseSaveHandler, DiskSaver
checkpoint = {}
model.on_save_checkpoint(checkpoint)
# if only to save weights, remove all other keys
if args.training_args.model_checkpoint_config.save_weights_only:
for k in list(checkpoint.keys()):
if k not in ["model"]:
checkpoint.pop(k)
model_checkpoint_config = args.training_args.model_checkpoint_config
checkpoint_dir = output_dir / model_checkpoint_config.dir
save_handler = DiskSaver(
checkpoint_dir,
require_empty=False,
)
from ignite.handlers import Checkpoint
checkpoint_handler = Checkpoint(
checkpoint,
cast(Union[Callable, BaseSaveHandler], save_handler),
filename_prefix="task",
)
checkpoint_handler(engine)
task_output = update_model(engine, model, batch, step="task")
# dsc output
dsc_output = update_model(engine, model, batch, step="dsc")
# print("Losses: ", task_output["task_loss"], dsc_output["dsc_loss"])
return {**task_output, **dsc_output}
return Engine(training_step)
@classmethod
def setup_model(
cls,
args: Arguments,
datamodule: BaseDataModule,
tb_logger: TensorboardLogger,
summarize: bool = False,
stage: TrainingStage = TrainingStage.train,
) -> XAIModel:
"""
Initializes the model for training.
"""
from xai_torch.core.models.factory import ModelFactory
# setup model
model = ModelFactory.create(args, datamodule, tb_logger=tb_logger, wrapper_class=WAALXAIModel)
model.setup(stage=stage)
# generate model summary
if summarize:
model.summarize()
return model
@classmethod
def setup_training_engine(cls, args, model, train_dataloader, val_dataloader, output_dir, tb_logger, device):
pass
# setup training engine
training_engine = cls.initialize_training_engine(args=args, model=model, output_dir=output_dir, device=device)
validation_engine = None
if args.general_args.do_val:
# setup validation engine
validation_engine = cls.initialize_validation_engine(args=args, model=model, device=device)
# configure training and validation engines
cls.configure_training_engine(
args=args,
training_engine=training_engine,
model=model,
output_dir=output_dir,
tb_logger=tb_logger,
train_dataloader=train_dataloader,
validation_engine=validation_engine,
val_dataloader=val_dataloader,
)
# add training hooks from the model
model.add_training_hooks(training_engine)
return training_engine, validation_engine
@classmethod
def setup_test_engine_task(cls, args, model, test_dataloader, output_dir, tb_logger, device):
import glob
from ignite.handlers import Checkpoint
# setup training engine
test_engine = cls.initialize_test_engine(args=args, model=model, device=device)
# configure training and validation engines
cls.configure_test_engine(
args=args,
test_engine=test_engine,
model=model,
output_dir=output_dir,
tb_logger=tb_logger,
load_checkpoint=False,
)
checkpoint = {}
model_checkpoint_config = args.training_args.model_checkpoint_config
checkpoint_dir = output_dir / model_checkpoint_config.dir
list_checkpoints = glob.glob(str(checkpoint_dir) + "/*.pt")
test_checkpoint_path = [c for c in list_checkpoints if "task" in c][0]
model.on_save_checkpoint(checkpoint)
test_checkpoint = torch.load(test_checkpoint_path, map_location="cpu")
Checkpoint.load_objects(to_load=checkpoint, checkpoint=test_checkpoint)
cls.configure_test_tb_logger(args=args, test_engine=test_engine, model=model, tb_logger=tb_logger, tag="task")
return test_engine
@classmethod
def train(cls, local_rank, args: Arguments):
"""
Initializes the training of a model given dataset, and their configurations.
"""
import ignite.distributed as idist
from xai_torch.core.training.utilities import initialize_training, setup_logging
from xai_torch.utilities.logging_utils import DEFAULT_LOGGER_NAME, setup_logger
# setup logging
logger = setup_logger(DEFAULT_LOGGER_NAME, distributed_rank=local_rank, level=logging.INFO)
# initialize training
initialize_training(args)
# initialize torch device (cpu or gpu)
device = idist.device()
# get device rank
rank = idist.get_rank()
# initialize logging directory and tensorboard logger
output_dir, tb_logger = setup_logging(args)
# setup datamodule
datamodule: ActiveLearningDataModule = cls.setup_datamodule(args, rank=rank, stage=None)
# setup model
model = cls.setup_model(args, datamodule, tb_logger, summarize=True)
# define active learning query strategy
query_strategy: QueryStrategy = QueryStrategyFactory.create(
datamodule=datamodule, model=model, device=device, args=args.al_args
)
# load active learning state
al_state = DALTrainer.load_round_state(0, datamodule, output_dir=output_dir)
curr_round = al_state["curr_round"]
if curr_round == args.al_args.n_rounds:
logger.warning(
"Active learning rounds have already been finished! Either increase the number of "
f"max rounds (current={args.al_args.n_rounds}) "
"OR reset the training from start."
)
exit()
# reset seeds for training. This allows multiple experiments with same seed for dataset initialization but
# different seeds for the active learning training process.
reset_random_seeds(args.al_args.al_seed)
while curr_round < args.al_args.n_rounds:
from al.core.training.query_strategies.impl.ceal import CEAL
logger.info(f"============== Running round={curr_round} of active learning ===========")
# update tblogger dir
tb_logger = None
if rank == 0:
from ignite.contrib.handlers import TensorboardLogger
tb_logger = TensorboardLogger(output_dir / str(curr_round))
# print labels summary
datamodule.print_label_summary()
# Reset model for re-training
if args.al_args.reset_model:
model = cls.setup_model(args, datamodule, tb_logger, summarize=False)
else:
# Reset only optimizers and schedulers
model._opt_sch_handler.setup_opt_sch()
# get train dataloader for labelled data
joint_dataloader = datamodule.get_joint_dataset_loader(
collate_fn=JointBatchToTensorDataCollator(datamodule._collate_fns.train)
)
# setup gamma ratio
args.al_args.training_args.gamma_ratio = len(datamodule.labeled_indices) / len(datamodule.unlabeled_indices)
# get validation data loader
val_dataloader = datamodule.val_dataloader()
# setup training engine
training_engine, _ = cls.setup_training_engine(
args=args,
model=model,
train_dataloader=joint_dataloader,
val_dataloader=val_dataloader,
output_dir=output_dir / str(curr_round), # append round number to output_dir
tb_logger=tb_logger,
device=device,
)
training_engine.logger = logger
# NOTE: The test engine has already updated the model state with state of last/best
# checkpoint which will be used for querying of the next round.
def perform_query():
import timeit
# reset the querying strategy
query_strategy.reset(model)
# update the labeled pool
start = timeit.default_timer()
n_query_samples = int(args.al_args.n_query_ratio * datamodule.pool_size)
if isinstance(query_strategy, CEAL):
query_indices = query_strategy.query(n_samples=n_query_samples, round=curr_round)
else:
query_indices = query_strategy.query(n_samples=n_query_samples)
stop = timeit.default_timer()
tb_logger.writer.add_scalar("query_time", stop - start, curr_round)
datamodule.update_dataset_labels(query_indices)
def test_model():
# after the training, the test engine automatically loads the 'best' model to continue the rounds.
test_dataloader = datamodule.test_dataloader()
# perform testing on task model which is trained for only set number of epochs on labeled dataset
test_engine_task = cls.setup_test_engine_task(
args=args,
model=model,
test_dataloader=test_dataloader,
output_dir=output_dir / str(curr_round),
tb_logger=tb_logger,
device=device,
)
test_engine_task.logger = logger
test_engine_task.run(test_dataloader)
# perform testing on task model which is trained for complete iterations ran by joint dataloader
test_engine = cls.setup_test_engine(
args=args,
model=model,
test_dataloader=test_dataloader,
output_dir=output_dir / str(curr_round),
tb_logger=tb_logger,
device=device,
)
test_engine.logger = logger
test_engine.run(test_dataloader)
resume_epoch = training_engine.state.epoch
if not (training_engine._is_done(training_engine.state) or resume_epoch >= args.training_args.max_epochs):
# run training
training_engine.run(joint_dataloader, max_epochs=args.training_args.max_epochs)
# perform query before testing
perform_query()
# test model
test_model()
else:
# if we are resuming from last checkpoint and training is already finished
logger.info(
"Training has already been finished! Either increase the number of "
f"epochs (current={args.training_args.max_epochs}) >= {resume_epoch} "
"OR reset the training from start."
)
# perform query before testing
perform_query()
# test model
test_model()
# save active learning query state for next round
DALTrainer.save_round_state(curr_round + 1, datamodule, output_dir=output_dir)
if rank == 0:
# close tb logger
tb_logger.close()
curr_round += 1
| saifullah3396/doc_al | src/al/core/training/waal_trainer.py | waal_trainer.py | py | 18,690 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "al.core.tr... |
41978072202 | import time
from flask import Blueprint, jsonify, request
import requests
import part2.health_check as health_check
from dbBrokerManager.config import async_session, engine, BaseBroker
from dbBrokerManager.AsyncDAL import DAL
import datetime
import asyncio
server = Blueprint("broker_manager_Read_Only", __name__)
brokers = list() # List of brokers in the network
topics_lock = True
class Broker:
def __init__(self, address, broker_id, isReadOnly = False):
self.address = address
self.isAlive = True
self.brokerID = broker_id
self.isReadOnly = isReadOnly
def declareDead(self):
self.isAlive = False
def declareAlive(self):
self.isAlive = True
async def checkHealth(self):
try:
r = requests.get(f"{self.address}/status")
response = r.json()
if response["status"] == "success" and response["message"] == "broker running":
self.declareAlive()
else:
self.declareDead()
except:
self.declareDead()
lastBrokerCheckTime = datetime.datetime.utcnow()
async def checkForNewBrokers():
brokers = list()
ip, ports = health_check.doSearchJob(0)
for port in ports:
address = f"http://{ip}:{port}"
brokers.append(Broker(address, len(brokers)))
print(ports)
return brokers
async def getServerAddress(broker_id):
global topics_lock
while topics_lock == False:
time.sleep(1)
topics_lock = False
global brokers, lastBrokerCheckTime
print(lastBrokerCheckTime)
print(datetime.datetime.utcnow() - lastBrokerCheckTime)
if datetime.datetime.utcnow() - lastBrokerCheckTime > datetime.timedelta(seconds=10):
brokers = await checkForNewBrokers()
lastBrokerCheckTime = datetime.datetime.utcnow()
address = None
await brokers[broker_id].checkHealth()
if brokers[broker_id].isAlive:
address = brokers[broker_id].address
else:
brokers[broker_id].declareDead()
# if address is None:
# for replica in brokersReplica[broker_id]:
# replica.checkHealth()
# if replica.isAlive:
# address = replica.address
# else:
# replica.declareDead()
topics_lock = True
return address
@server.before_app_first_request
async def setUpBrokerManager():
# This function sets up the broker manager by backing up things from server and setting up brokers in the network and read only copies of broker manager.
global brokers, topics_lock
topics_lock = True
brokers = list()
async with engine.begin() as conn:
# await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(BaseBroker.metadata.create_all, checkfirst=True)
while len(brokers)==0:
ip, ports = health_check.doSearchJob(0)
print(ports)
for id, port in enumerate(ports):
brokers.append(Broker(f"http://{ip}:{port}", id))
print(brokers)
@server.route("/")
def index():
return "<h1>Welcome to the Broker Manager Copy!</h1>"
@server.route("/status")
def status():
return jsonify({"status": "success", "message": "Broker Manager Copy running"})
@server.route("/topics", methods=["POST"])
async def create_topic():
topic_name = request.json["topic_name"]
brokers_list = list()
tempVar = await getServerAddress(0)
for broker_id in range(len(brokers)):
address = await getServerAddress(broker_id)
print(address)
if address is None:
return jsonify({"status": "failure", "message": f"Data Lost due to complete broker failure"})
params = {"topic_name": topic_name, "partition_id": broker_id}
r = requests.post(f"{address}/topics", json=params)
response = r.json()
if response["status"] == "failure":
return jsonify({"status": "failure", "message": f"Topic '{topic_name}' could not be created"})
brokers_list.append(broker_id)
return jsonify({"status": "success", "message": f"Topic '{topic_name}' created successfully", "brokers_list": brokers_list})
@server.route("/producer/produce", methods=["POST"])
async def enqueue():
topic_name = request.json["topic_name"]
producer_id = request.json["producer_id"]
log_message = request.json["log_message"]
partition_id = request.json["partition_id"]
address = await getServerAddress(partition_id)
if address is None:
return jsonify({"status": "failure", "message": f"Data Lost due to complete broker failure"})
params = {"topic_name": topic_name, "producer_id": producer_id, "log_message": log_message, "partition_id": partition_id}
r = requests.post(f"{address}/producer/produce", json=params)
response = r.json()
if response["status"] == "failure":
return jsonify({"status": "failure", "message": f"Message production failed"})
return jsonify({"status": "success"})
@server.route("/consumer/consume", methods=["GET"])
async def dequeue():
topic_name = request.json["topic_name"]
consumer_id = request.json["consumer_id"]
partitions = request.json["partitions"]
consumerFront = request.json["consumer_fronts"]
log_message = None
minTime = datetime.datetime.utcnow()
minbrokerID = None
for broker_id in partitions:
address = await getServerAddress(broker_id)
if address is None:
return jsonify({"status": "failure", "message": f"Data Lost due to complete broker failure"})
query = None
params = {"topic_name": topic_name, "partition_id": broker_id, "consumer_front": consumerFront[str(broker_id)]}
r = requests.get(f"{address}/consumer/consume", json=params)
query = r.json()
if query["status"] == "failure":
continue
if minTime > datetime.datetime.strptime(query["timestamp"], '%a, %d %b %Y %H:%M:%S %Z'):
minTime = datetime.datetime.strptime(query["timestamp"], '%a, %d %b %Y %H:%M:%S %Z')
log_message = query["log_message"]
minbrokerID = broker_id
if log_message is None:
return jsonify({"status": "failure", "message": f"No message to consume"})
return jsonify({"status": "success", "log_message": log_message, "broker_id": minbrokerID})
@server.route("/size", methods=["GET"])
async def size():
topic_name = request.json["topic_name"]
consumer_id = request.json["consumer_id"]
partitions = request.json["partitions"]
consumerFront = request.json["consumer_fronts"]
consumer_size = 0
for broker_id in partitions:
address = await getServerAddress(broker_id)
if address is None:
return jsonify({"status": "failure", "message": f"Data Lost due to complete broker failure"})
query = None
params = {"topic_name": topic_name, "partition_id": broker_id, "consumer_front": consumerFront[str(broker_id)]}
r = requests.get(f"{address}/size", json=params)
query = r.json()
if query["status"] == "failure":
return jsonify({"status": "failure", "message": f"Size query failed"})
consumer_size += query["size"]
return jsonify({"status": "success", "size": consumer_size}) | DistributedSystemsGroup-IITKGP/Assignment-2 | BrokerManagerReadOnly.py | BrokerManagerReadOnly.py | py | 7,333 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "datetime.dateti... |
24952050763 | from selenium import webdriver
import time
import json
import os
from selenium.webdriver.common.by import By
import subprocess
import difflib
import re
from urllib.parse import unquote
from colorama import *
class Voltaire:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
ch_output = subprocess.Popen(["chromedriver"])
self.driver = webdriver.Chrome(options=options)
self.driver.implicitly_wait(1)
def Header(self):
os.system('cls')
print(f"""
{Fore.GREEN}__ __ _ _ {Fore.RED}_{Fore.GREEN}
\ \ / /__| | |_ __ _{Fore.RED}(_){Fore.GREEN}_ _ ___
\ V / {Fore.WHITE}_{Fore.GREEN} \ | _/ {Fore.WHITE}_{Fore.GREEN}` | | '_/ -_)
\_/\___/_|\__\__,_|_|_| \___|{Fore.RESET} [{Fore.RED}solved{Fore.RESET}]
""")
self.par = input(f' [{Fore.GREEN}*{Fore.RESET}] Supérieur / Excellence ? => ')
self.nb = input(f' [{Fore.GREEN}*{Fore.RESET}] Test number => ')
try:
int(self.nb)
pass
except ValueError:
print(f' [{Fore.RED}!{Fore.RESET}] INT is required !')
time.sleep(3.5)
self.Header()
def solver(self):
data_filename = f"Module{self.nb}.txt"
directory = f"./src/{self.par}/"
reponses = []
for filename in os.listdir(directory): #Credit => https://github.com/sylvain-reynaud/projet-voltaire-solver
if filename.endswith(data_filename):
with open(directory + filename, 'r', encoding="utf-8") as f:
data = f.read()
try:
data = data[data.index("[\"java.util.ArrayList"):data.index("]")] + "]"
data = data.replace("\\", "\\\\")
reponses += json.loads(data)
except:
pass
reponses = [x for x in reponses if "\\x3C" in x]
input(f' [{Fore.GREEN}*{Fore.RESET}] Ready ? (Enter to start)')
print(f' [{Fore.GREEN}*{Fore.RESET}] Test {self.nb} Started with 1 attempt every 5 seconds !')
while True:
try:
time.sleep(5)
items = self.driver.find_elements(By.XPATH, "/html/body/div[5]/div[2]/div[3]/div/div[1]/div/div/div[2]/div")
for item in items:
phrase = item.text
possibilites = difflib.get_close_matches(phrase, reponses)
if len(possibilites) != 0:
toPrint = re.sub(r"<B>(.*)<\/B>", fr"{Fore.GREEN}\1{Fore.RESET}", unquote(possibilites[0].replace("\\x", "%")))
print(f' [{Fore.GREEN}*{Fore.RESET}]', toPrint)
else:
print(f' [{Fore.GREEN}*{Fore.RESET}] Il n\'y a pas de faute')
self.driver.find_element(By.XPATH, "/html/body/div[5]/div[2]/div[3]/div/div[2]/div[1]/div[1]/div/button").click()
self.driver.find_element(By.XPATH, "/html/body/div[5]/div[2]/div[3]/div/div[3]/div[2]/button").click()
except:
print(f' [{Fore.RED}*{Fore.RESET}] Attempt Failed !')
Voltaire_solved = Voltaire()
Voltaire_solved.Header()
Voltaire_solved.solver()
| Sshinx/Voltaire-is-Over | Voltaire.py | Voltaire.py | py | 3,406 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "... |
40713013689 | from datetime import datetime
import pytz
import requests
from config import NO_IMG_URL, TIMEZONE
def convert_timezone(
time=None, format="%Y-%m-%dT%H:%M:%SZ", ori_timezone=None, desire_timezone=TIMEZONE
):
date_time = datetime.strptime(time, "%Y-%m-%dT%H:%M:%SZ")
ori_timezone = pytz.timezone(ori_timezone)
date_time = ori_timezone.localize(date_time)
desired_timezone = pytz.timezone(desire_timezone)
desire_time = pytz.utc.normalize(date_time).astimezone(desired_timezone)
return desire_time
def check_img_access(img_url=""):
response = requests.get(img_url)
if not response.status_code == 200:
img_url = NO_IMG_URL
return img_url
| timho102003/newsfriend | util.py | util.py | py | 689 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.TIMEZONE",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pytz.tim... |
14824615177 | from pathlib import Path
import csv
import random
from faker import Faker
fake=Faker()
p = Path('.')
# Find all files in folder
fileslist=list(p.glob('**/*.csv'))
# Set the folder for the anonymized files
outfolder = 'anon'
for file in fileslist:
randid=random.randint(10000,99999)
randid2=random.randint(10000,99999)
a=fake.date_between(start_date='+10y',end_date='+14y')
randdate=f'{str(a.day).zfill(2)}-{str(a.month).zfill(2)}-{a.year}'
readFile=open(file,'r')
writeFile=open(f'./{outfolder}/{randid2}.csv', 'w')
print(file)
for row in readFile:
# Add for fields to be changed
if 'Field1' in row:
writeFile.write(f'Field1={randid}\n')
elif 'Field2' in row:
writeFile.write(f'Field2={randid2}\n')
elif 'Date' in row:
writeFile.write(f'Date={randdate}\n')
else:
writeFile.write(row)
#Close and write new files
readFile.close()
writeFile.close() | carluri/pythonscripts | anonymize_csv.py | anonymize_csv.py | py | 994 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "faker.Faker",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_numb... |
7640727921 | #Before all this run the command pip install --user PyPDF2==1.26.0
#Shift + right click will bring up the powershell/cmd for the folder
#Ensure file is in the same folder and has the extension .pdf
import PyPDF2
import sys
import time
from tqdm import tqdm
pdfname = input("Enter the name of your file (example: Hello.pdf) :")
pdfname =pdfname.strip()
if pdfname.endswith(".pdf") == False:
print("Enter a proper pdf name")
print("Shutting down program...")
time.sleep(5)
sys.exit(0)
pdfFileObj = open(pdfname, 'rb') #If you get FileNotFoundError check if the file name you mentioned is in the same folder/directory
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
print("Number of pages: "+ str(pdfReader.numPages))
while True:
ans = input("Do you want to extract from one page or all (o/a) :")
if ans=="o":
page = input("Which page number? :")
pageObj = pdfReader.getPage(int(page)-1)
print("Extracting text......")
with open("extractedtext.txt","a") as f:
f.write(pageObj.extractText() + "\n")
for i in tqdm(range(5)):
time.sleep(3)
print("DONE!")
break
elif ans=="a":
print("Extracting text......")
for x in range(0,pdfReader.numPages):
pageObj = pdfReader.getPage(x)
with open("extractedtext.txt","a") as f:
f.write(pageObj.extractText() + "\n")
for i in tqdm(range(5)):
time.sleep(3)
print("DONE!")
break
pdfFileObj.close() | anjannair/Automating-With-Python | PDFs/extracttext.py | extracttext.py | py | 1,524 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfFileReader",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number... |
26665766366 | from metrics import *
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.vis_utils import plot_model
from keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_auc_score
import random
import pickle
import json
import pathlib
MAX_NUM_WORDS = 200000
def evaluate_model(model, X_train, y_train, X_val, y_val):
pred_train = model.predict(X_train)
pred_val = model.predict(X_val)
prec_train = precision(y_train, pred_train)
prec_val = precision(y_val, pred_val)
recall_train = recall(y_train, pred_train)
recall_val = recall(y_val, pred_val)
f1_train = fbeta_score(y_train, pred_train)
f1_val = fbeta_score(y_val, pred_val)
auc = roc_auc_score(y_val, pred_val)
val_results = "Validation Results: Prec - {:.2f} Recall - {:.2f} F1 - {:.2f} AUC - {:.2f}".format(prec_val,
recall_val,
f1_val,
auc)
train_results = "Train Results: Prec - {:.2f} Recall - {:.2f} F1 - {:.2f}".format(prec_train,
recall_train,
f1_train)
print("{}\n{}".format(train_results, val_results))
return prec_val, recall_val, f1_val
def save_model(model, history, filename,
prec=None, recall=None, f1=None,
epochs=None, batch_size=None, random_seed=None,
class_weights=None, sample_type=None):
pathlib.Path("../models/{}".format(filename)).mkdir(parents=True, exist_ok=True)
model.save("../models/{}/{}.h5".format(filename, filename))
with open("../models/{}/{}.txt".format(filename, filename), 'wb') as f:
pickle.dump(history.history, f)
with open("../results/{}.txt".format(filename), 'w+') as f:
if prec is not None:
f.write("Validation Set Precision: {}\n".format(prec))
if recall is not None:
f.write("Validation Set Recall : {}\n".format(recall))
if f1 is not None:
f.write("Validation Set F1 Score : {}\n".format(f1))
if epochs is not None:
f.write("Number epochs trained: {}\n".format(epochs))
if batch_size is not None:
f.write("Batch size: {}\n".format(batch_size))
f.write("Random seed: {}\n".format(random_seed))
if class_weights is not None:
f.write(json.dumps(class_weights))
f.write("\n")
if sample_type is not None:
f.write("Sample type: {}\n".format(sample_type))
plot_model(model, to_file="../models/{}/{}.png".format(filename, filename), show_shapes=True)
return
def shuffle_arrays(arr1, arr2, seed=None):
if seed is None:
seed = random.randint(0, 1000)
indices = np.arange(arr1.shape[0])
np.random.seed(seed)
np.random.shuffle(indices)
arr1_ret = arr1[indices]
arr2_ret = arr2[indices]
return arr1_ret, arr2_ret
def is_digit(string):
# Sourced from https://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-float
try:
float(string)
return True
except ValueError:
return False
def collapse_embedding_array(array):
word = array[0]
remaining_arr = []
for i in range(1, len(array)):
if not is_digit(array[i]):
word += (" " + array[i])
else:
remaining_arr = array[i:]
break
return [word] + remaining_arr
def create_embedding_matrix(filepath, dictionary, embedding_dim):
embeddings_index = {}
with open(filepath, encoding="utf8") as f:
for line in f:
values = collapse_embedding_array(line.split())
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
embedding_matrix = np.zeros((len(dictionary)+1, embedding_dim))
for word, i in dictionary.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def recover_queries(tokenizer, X, y):
recovered_queries = []
for i, query in enumerate(X):
query_text = []
for index in query:
query_text.append(tokenizer.index_word.get(index, "UNKNOWN"))
if y[i] == 1:
recovered_queries.append(query_text)
return recovered_queries
def read_data_embeddings(max_input_length=10, vocab_size=MAX_NUM_WORDS):
X_train, y_train, X_val, y_val = read_train_val_data()
X = pd.concat((X_train, X_val))
tokenizer = Tokenizer(num_words=vocab_size, split=' ')
tokenizer.fit_on_texts(X[0].values)
X_train = tokenizer.texts_to_sequences(X_train[0].values)
X_train = pad_sequences(X_train, maxlen=max_input_length)
X_val = tokenizer.texts_to_sequences(X_val[0].values)
X_val = pad_sequences(X_val, maxlen=max_input_length)
y_train = y_train[0].values
y_val = y_val[0].values
return X_train, y_train, X_val, y_val, tokenizer
def read_bag_of_words(vocab_size=MAX_NUM_WORDS):
X_train, y_train, X_val, y_val = read_train_val_data()
X_total = pd.concat((X_train, X_val))
tokenizer = Tokenizer(num_words=vocab_size, split=' ')
tokenizer.fit_on_texts(X_total[0].values)
X_train = tokenizer.texts_to_matrix(X_train[0].values)
X_val = tokenizer.texts_to_matrix(X_val[0].values)
y_train = y_train[0].values
y_val = y_val[0].values
return X_train, y_train, X_val, y_val, tokenizer
def validation_training_split(split=0.2):
kdd_data = pd.read_csv("../data/KDD_Cup_2005_Data.csv")
kdd_x = kdd_data[["Query"]]
kdd_y = kdd_data[["Label"]]
google_trends_data = pd.read_csv("../data/Google_Trends_Search_Queries.csv")
trends_x = google_trends_data[["Query"]]
trends_y = google_trends_data[["Label"]]
kdd_data_8k = pd.read_csv("../data/KDD_Cup_2005_Data_8k.csv")
kdd_8k_x = kdd_data_8k[["Query"]]
kdd_8k_y = kdd_data_8k[["Label"]]
X = pd.concat((kdd_x, kdd_8k_x, trends_x))
y = pd.concat((kdd_y, kdd_8k_y, trends_y))
X, y = shuffle_arrays(X.values, y.values)
num_val_examples = int(split * X.shape[0])
x_train = X[:-num_val_examples]
y_train = y[:-num_val_examples]
x_val = X[-num_val_examples:]
y_val = y[-num_val_examples:]
pd.DataFrame(x_train).to_csv("../data/X_train.csv", header=None, index_label=False, index=False)
pd.DataFrame(y_train).to_csv("../data/y_train.csv", header=None, index_label=False, index=False)
pd.DataFrame(x_val).to_csv("../data/X_val.csv", header=None, index_label=False, index=False)
pd.DataFrame(y_val).to_csv("../data/y_val.csv", header=None, index_label=False, index=False)
return
def read_train_val_data():
x_train = pd.read_csv("../data/X_train.csv", header=None)
y_train = pd.read_csv("../data/y_train.csv", header=None)
x_val = pd.read_csv("../data/X_val.csv", header=None)
y_val = pd.read_csv("../data/y_val.csv", header=None)
return x_train, y_train, x_val, y_val
| hpabst/CS680Project | src/utils.py | utils.py | py | 7,487 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "json.dumps",
... |
40072930878 | # 标准库
import io as _io
import os as _os
import sys as _sys
import imp as _imp
import codecs as _codecs
import traceback as _traceback
import pathlib as _pathlib
def enhance_init(work_dir=__file__, python_version_require=0, check_module_list=[]):
"""
:param pythonVersionRequire(int): 最低python所需版本
:param checkModuleList: 示例: ("requests", "requests_toolbelt", "urllib3", "bs4", "Crypto", "pyDes", "yaml", "lxml", "rsa")
"""
# ==========检查python版本==========
if not (
_sys.version_info[0] == 3 and _sys.version_info[1] >= python_version_require
):
raise Exception(
"!!!!!!!!!!!!!!Python版本错误!!!!!!!!!!!!!!\n请使用python3.%d及以上版本,而不是[python %s]"
% (python_version_require, _sys.version)
)
# ==========环境变量初始化==========
try:
print("==========开始初始化==========")
except UnicodeEncodeError:
# 设置默认输出编码为utf-8, 但是会影响腾讯云函数日志输出。
_sys.stdout = _codecs.getwriter("utf-8")(_sys.stdout.detach())
print("==========开始初始化(utf-8输出)==========")
script_abs_path = _pathlib.Path(work_dir).absolute().parent
_os.chdir(script_abs_path) # 将工作路径设置为脚本位置
if _os.name == "posix":
# 如果是linux系统, 增加TZ环境变量
_os.environ["TZ"] = "Asia/Shanghai"
_sys.path.append(script_abs_path) # 将脚本路径加入模块搜索路径
# ==========检查第三方模块==========
try:
for i in check_module_list:
_imp.find_module(i)
except ImportError as e: # 腾讯云函数在初始化过程中print运作不正常,所以将信息丢入异常中
raise ImportError(
f"""!!!!!!!!!!!!!!缺少第三方模块(依赖)!!!!!!!!!!!!!!
请使用pip3命令安装或者手动将依赖拖入文件夹
错误信息: [{e}]"""
)
class Decorators:
@staticmethod
def catch_exception(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
print(_traceback.format_exc())
return None
return wrapper
class FileOut:
"""
代替stdout和stderr, 使print同时输出到文件和终端中。
start()方法可以直接用自身(self)替换stdout和stderr
close()方法可以还原stdout和stderr
"""
stdout = _sys.stdout
stderr = _sys.stderr
log: str = "" # 同时将所有输出记录到log字符串中
logFile: _io.TextIOWrapper = None
@classmethod
def set_file_out(cla, path: str = None):
"""
设置日志输出文件
:params path: 日志输出文件路径, 如果为空则取消日志文件输出
"""
# 关闭旧文件
if cla.logFile:
cla.logFile.close()
cla.logFile = None
# 更新日志文件输出
if path:
try:
path = _os.path.abspath(path)
logDir = _os.path.dirname(path)
if not _os.path.isdir(logDir):
_os.makedirs(logDir)
cla.logFile = open(path, "w+", encoding="utf-8")
cla.logFile.write(cla.log)
cla.logFile.flush()
return
except Exception as e:
print(2, f"设置日志文件输出失败, 错误信息: [{e}]")
cla.logFile = None
return
else:
cla.logFile = None
return
@classmethod
def start(cla):
"""开始替换stdout和stderr"""
if type(_sys.stdout) != cla and type(_sys.stderr) != cla:
_sys.stdout = cla
_sys.stderr = cla
else:
print("sysout/syserr已被替换为FileOut")
@classmethod
def write(cla, str_):
r"""
:params str: print传来的字符串
:print(s)等价于sys.stdout.write(s+"\n")
"""
str_ = str(str_)
cla.log += str_
if cla.logFile:
cla.logFile.write(str_)
cla.stdout.write(str_)
cla.flush()
@classmethod
def flush(cla):
"""刷新缓冲区"""
cla.stdout.flush()
if cla.logFile:
cla.logFile.flush()
@classmethod
def close(cla):
"""关闭"""
if cla.logFile:
cla.logFile.close()
cla.log = ""
_sys.stdout = cla.stdout
_sys.stderr = cla.stderr
| IceTiki/tikilib | enhance.py | enhance.py | py | 4,584 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.version",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "codecs.getwrit... |
10178404139 | import logging
import multiprocessing
import random
import signal
import sys
import time
import traceback
from typing import Callable, Dict, List, Optional
from pebble import ProcessPool, sighandler
from .client import Client
from .util import constants as C
from .util import helper
from .util.enums import State
class Consumer:
"""
Faktory Consumer (Worker).
It fetches units of work (jobs) from the server and executes them.
It retrieves the jobs from the queues, and decides how to execute them
based on the jobtype.
A handler must be attached to each jobtype before the consumer is launched.
Parameters
----------
client : pyfaktory.Client
Faktory client. Client `role` must be either 'consumer' or 'both'.
queues : List[str], default ['default']
The queues from which the consumer will fetch jobs. If you provide
no `queues`, the consumer will process `default` queue.
priority : {{'strict', 'uniform', 'weighted'}}, default 'uniform'
Priority indicates in which queue order the jobs should be fetched
first. With `strict` priority, the worker always fetches from the first
queue and will only try to fetch from the next once the previous queue
is empty. With `uniform`, each queue has an equal chance of being
fetched first. With `weighted`, queues have a different probability
of being fetched. These probabilities are specified with `weights`
argument.
weights : Optional[List[float]], default None
Probability of the queues to be fetched. This parameter is required
when `priority` is `weighted` (and ignored in other cases), and must
have the same number of elements as `queues`.
concurrency : int, default 4
Number of jobs to run at the same time.
grace_period : int, default 25
Grace period between the beginning of a shutdown and its end.
This period is used to give the job some time to finish, to stop them
properly and to notify the server. This period should never be longer
than 30 seconds.
sentry_capture_exception : bool, default False
If `True` capture exceptions using Sentry before failling jobs.
"""
def __init__(self,
client: Client,
queues: List[str] = C.DEFAULT_QUEUES,
priority: str = 'uniform',
weights: Optional[List[float]] = None,
concurrency: int = 4,
grace_period: int = C.DEFAULT_GRACE_PERIOD,
sentry_capture_exception: bool = False) -> None:
self.logger = logging.getLogger(name='FaktoryConsumer')
if client.role == 'producer':
raise ValueError(
"Provided client is exclusively producer and can't act as a consumer"
)
self.client = client
self.queues = queues
if priority not in ['strict', 'uniform', 'weighted']:
raise ValueError(
f"Unexpected priority ({priority}), priority should be 'strict', 'uniform' or 'weighted'"
)
self.priority = priority
if self.priority == 'weighted':
if weights is None:
raise ValueError(
'Priority is weighted but weights are not provided')
elif len(weights) != len(self.queues):
raise ValueError('Weights and queues lengths mismatch')
else:
self.weights = weights
self.concurrency = concurrency
self.grace_period = grace_period
self.sentry_capture_exception = sentry_capture_exception
self.pool = ProcessPool(max_workers=self.concurrency)
self.job_handlers: Dict[str, Callable] = {}
self.pending_tasks_count = 0
self.lock_pending_tasks_count = multiprocessing.Lock()
@sighandler((signal.SIGTERM))
def handle_sigterm(*_):
raise KeyboardInterrupt
def register(self, name: str, fn: Callable):
"""
Register a handler for the given jobtype.
It is expected that all jobtypes are registered upon process
startup.
"""
self.job_handlers[name] = fn
self.logger.info(f"Registered handler for jobtype: {name}")
def get_job_handler(self, name: str) -> Callable:
try:
return self.job_handlers[name]
except KeyError:
self.logger.error(f"'{name}' is not a registered job handler")
# One could consider just failing the job and continue running,
# but we are not doing it here because it is expected that all
# jobtypes are registered upon process startup.
raise ValueError(f"'{name}' has no registered handler")
def get_queues(self) -> List[str]:
if self.priority == 'strict':
return self.queues
elif self.priority == 'uniform':
random.shuffle(self.queues)
return self.queues
else:
return helper.weighted_shuffle(self.queues, self.weights)
def task_done(self, future):
self.logger.info(f'Task done callback called for job {future.job_id}')
try:
result = future.result()
self.logger.info(f'Task (job {future.job_id}) returned {result}')
self.client._ack(jid=future.job_id)
except Exception as err:
if self.sentry_capture_exception:
import sentry_sdk
sentry_sdk.capture_exception(err)
err_type, err_value, err_traceback = sys.exc_info()
self.logger.info(
f'Task (job {future.job_id}) raised {err_type}: {err_value}')
self.logger.debug(f'Task (job {future.job_id}) backtrace: ',
traceback.format_tb(err_traceback))
self.client._fail(jid=future.job_id,
errtype=err_type.__name__,
message=str(err_value),
backtrace=traceback.format_tb(
err_traceback, limit=future.backtrace))
finally:
with self.lock_pending_tasks_count:
self.pending_tasks_count -= 1
def run(self):
"""
Start the consumer.
When this method is called, the fetching and execution of the jobs
starts. The job handlers must have been registered beforehand.
This method is blocking, it only stops in the event of an error
(only main loop errors, errors that occur in the handlers cause the job
to fail and are reported to Faktory Server) or when a signal is received
(Ctrl-C or from the Faktory Web UI).
At the beginning of the shutdown, the worker gives itself a grace period
to stop properly and notify the last information to the Faktory server.
If a second signal is received, this causes an immediate shutdown.
"""
self.logger.info('Entering run loop..')
# TODO: check this
while self.client.state in [State.IDENTIFIED, State.QUIET]:
try:
if self.client.state == State.QUIET:
self.logger.info(
f'State is {self.client.state}, not fetching further jobs'
)
time.sleep(self.client.beat_period)
continue
if self.pending_tasks_count < self.concurrency:
queues_tmp = self.get_queues()
self.logger.info(f'Fetching from queues: {queues_tmp}')
# If no jobs are found, _fatch will block for up
# to 2 seconds on the first queue provided.
job = self.client._fetch(queues_tmp)
if job:
# TODO: check
# timeout=job.get('reserve_for', None)
job_handler = self.get_job_handler(job.get('jobtype'))
future = self.pool.schedule(job_handler,
args=job.get('args'))
with self.lock_pending_tasks_count:
self.pending_tasks_count += 1
future.job_id = job.get("jid")
future.backtrace = job.get("backtrace", 0)
future.add_done_callback(self.task_done)
else:
self.logger.debug('Queues are empty.')
else:
# TODO: maybe use Event object instead of sleep
time.sleep(0.1)
except KeyboardInterrupt:
self.logger.info(
'First KeyboardInterrupt, stopping after grace period')
break
except Exception:
self.logger.error(
f'Shutting down due to an error: {traceback.format_exc()}')
break
self.logger.info(f'Run loop exited, state is {self.client.state}')
self.logger.info(f'Grace period of {self.grace_period} seconds...')
self.logger.info(f'Press Ctrl-C again to stop immediately')
try:
self.pool.close()
self.pool.join(timeout=self.grace_period)
except KeyboardInterrupt:
self.logger.info('Second KeyboardInterrupt, stopping immediately')
self.logger.info(f'End of the grace period. Stopping.')
sys.exit(1)
# TODO: set rss_kb (sys.getsizeof?)
| ghilesmeddour/faktory_worker_python | src/pyfaktory/consumer.py | consumer.py | py | 9,583 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "client.Client",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_num... |
74262373225 | import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm
from transformers import ElectraForTokenClassification, ElectraConfig, WEIGHTS_NAME, CONFIG_NAME
from transformers import ElectraTokenizer
import time
import pandas as pd
import spacy
import re
import _pickle as cPickle
import numpy as np
from extract_hpi import get_hpi_output
import argparse
#load pickle file which is a label to id mapping required for predictions
with open(r"./label2id.pkl", "rb") as input_file:
label2id = cPickle.load(input_file)
_nlp = spacy.load('en_core_sci_lg', disable=['ner', 'parser', 'tagger', 'lemmatizer'])
_nlp.create_pipe('sentencizer')
_nlp.add_pipe('sentencizer')
tokenizer = ElectraTokenizer.from_pretrained('./electra/')
model = ElectraForTokenClassification.from_pretrained('./electra/')
def processing_hpi_output(data):
"""
Clean the HPI section - text analysis
:return: text
:rtype: text
"""
final = []
for fileid, record in enumerate(data):
text = re.sub('[\r\n]{2}', '. ', record['text'])
text = re.sub('\r|\n|\W', ' ', text)
text = re.sub('\\n', '', text)
text = re.sub('\r|\n', ' ', text)
esc = re.escape('.\\n\\n')
text = re.sub(esc, '', text)
esc2 = re.escape('\\n')
text = re.sub(esc2, '', text)
tokens = {tok.idx: tok.text for tok in _nlp.tokenizer(text)}
out = ['-DOCSTART- -X- O O', '<s> _ _ O']
sentence_sep = '</s> _ _ O'
# get tokens and tags for documents
for idx, txt in tokens.items():
if re.search('\n+|\s+', txt):
out.append('<blank> _ _ ')
else:
out.append(txt + ' _ _ ')
# split documents into sentences and apply HPI sectioning if required
text = ' '.join(o.split(' ')[0] for o in out)
doc = _nlp(text)
sentences = [sent.text.strip() for sent in doc.sents]
start = 2
for sent in sentences:
start += len(sent.split())
out.insert(start, sentence_sep)
start += 1
out.append(sentence_sep)
final += out
return final
def get_essentials(filepath):
"""
Read train/valid/test data from input_dir and
convert data to features (input_ids, label_ids, attention_masks)
:return: label2id, num_labels, label2id[pad_token], id2label
:rtype: dict, number, list, dict
"""
pad_token = 0
pad_token_label_id = -100
with open(filepath, 'r') as f:
lines = f.readlines()
sentences, labels, idx, sent, lab, id = [], [], [], [], [], []
tags = set()
for line in lines:
if '-DOCSTART-' in line or '</s>' in line or '<s>' in line or line.rstrip() == '':
if sent and lab:
sentences.append(sent)
labels.append(lab)
idx.append(id)
if '-DOCSTART-' in line:
sent, lab, id = ['D'], ['O'], ['D{}'.format(line.split()[-2])]
else:
sent, lab, id = [], [], []
else:
sent.append(line.split()[0])
id.append(line.split()[-2])
lab.append(line.split()[-1])
tags.add(line.split()[-1])
# label_map
label2id = {t: i for i, t in enumerate(list(tags))}
num_labels = len(label2id)
label2id[pad_token] = pad_token_label_id
id2label = {v: k for k, v in label2id.items()}
return label2id, num_labels, label2id[pad_token], id2label
def prepare_data(filepath):
"""
Read train/valid/test data from input_dir and
convert data to tokens
:return: processed data, tokenized_token, tokenized_sentences
:rtype: dataframe, list, list
"""
pad_token = 0
with open(filepath, 'r') as f:
lines = f.readlines()
sentences, sent = [], []
for line in lines:
if '-DOCSTART-' in line or '</s>' in line or '<s>' in line or line.rstrip() == '':
if sent:
sentences.append(sent)
if '-DOCSTART-' in line:
sent = ['D']
else:
sent = []
else:
sent.append(line.split()[0])
# tokenize the sentences and save the start offset of each subwords
tokenized_sentences, tokenized_token = [], []
for sent in sentences:
tokenized_sent, tokenized_tok = [], []
for word in sent:
tokenized_word = tokenizer.tokenize(word)
tokenized_sent.extend(tokenized_word)
tokenized_tok.extend([word] * len(tokenized_word))
# truncate the subword tokens longer than maxium sequence length
if len(tokenized_sent) > 512:
tokenized_sent = tokenized_sent[: 512]
tokenized_tok = tokenized_tok[: 512]
tokenized_sentences.append(tokenized_sent)
tokenized_token.append(tokenized_tok)
input_ids, attention_masks = [], []
for sent in tokenized_sentences:
# get token's id and label's id
input_id = tokenizer.convert_tokens_to_ids(sent)
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to
input_mask = [1] * len(input_id)
# Zero-pad up to the sequence length (pad on right)
padding_length = 512 - len(input_id)
input_id += [pad_token] * padding_length
input_mask += [0] * padding_length
input_ids.append(input_id)
attention_masks.append(input_mask)
data = TensorDataset(torch.tensor(input_ids), torch.tensor(attention_masks))
return data, tokenized_token, tokenized_sentences
def format_tags(predictions,tokenized_token,id2label):
"""
convert ids to original labels and create formatted output prediction
:return: predicted tags, labels, indices
:rtype: list, list, list
"""
pred_tags, out = [], []
docid =0
start=0
indices = []
for prediction, token in zip(predictions,tokenized_token):
i,j = False,False
docid+=1
if token == ['D']:
i=True
for index, (pred, tok) in enumerate(zip(prediction, token)):
pred_tags.append(id2label[pred])
text = '{} {}'.format(tok, id2label[pred])
if text == 'D O':
j=True
out.append(text)
if i==False and j==True:
indices.append(start)
j=False
start+=1
out.append('')
start+=1
return pred_tags, out,indices
def get_text_file(hpi_ip,col_name):
"""
perform HPI processing
"""
hpi_output = get_hpi_output(hpi_ip,col_name)
processed_hpi_output = processing_hpi_output(hpi_output)
with open('processed_file.txt', 'w') as f:
f.writelines("%s\n" % o for o in processed_hpi_output)
def predict(file):
"""
Load the saved model and perform prediction
:return: labels dataframe
:rtype: dataframe
"""
hpi_ip = pd.read_csv(file)
get_text_file(hpi_ip)
te_data, tt, ts = prepare_data('./processed_file.txt')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
test_dataloader = DataLoader(te_data, sampler=SequentialSampler(te_data), batch_size=32)
model.to(device)
model.eval()
test_loss = 0
test_steps = 0
predictions, pr_label = [], []
start = time.time()
for batch in tqdm(test_dataloader, desc='Prediction'):
# move batch to gpu
b_input_ids, b_input_mask = tuple(t.to(device) for t in batch)
with torch.no_grad():
# Forward pass
outputs = model(b_input_ids, attention_mask=b_input_mask)
logits = outputs.logits
logits = logits.detach().cpu().numpy()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
test_steps += 1
test_loss /= test_steps
test_time = time.time() - start
id2label = {v: k for k, v in label2id.items()}
pred_tags, out, indices = format_tags(predictions, tt, id2label)
index_pos_list = [i for i in range(len(out)) if out[i] == 'D O']
for i in tqdm(indices, desc='indices'):
index_pos_list.pop(index_pos_list.index(i))
final = []
for i in range(len(index_pos_list)):
start = index_pos_list[i]
if i < len(index_pos_list) - 1:
end = index_pos_list[i + 1]
final.append(out[start:end])
else:
final.append(out[start:])
tags = []
for i in tqdm(final):
s = []
for j in i:
s.append(j.split(' ')[-1])
tags.append(list(set(s)))
labels = []
for i in tqdm(tags, desc="Tags"):
k = []
for j in i:
if j != '' or j != 'O':
k.append(j.split('-')[-1])
labels.append(k)
hpi_ip['labels'] = pd.Series(labels)
return hpi_ip
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Predict labels for clinical text')
parser.add_argument('-ipf', '--input_file', dest='ipf', help='Path to input file', required=True)
parser.add_argument('-cn', '--column_name', dest='cn', help='Name of the column containing the note', required=True)
parser.add_argument('-opf', '--output_file', dest='opf', help='Path to output file', required=True)
args = parser.parse_args()
final_op = predict(args.ipf)
final_op.to_csv(args.opf, index=False)
| lindvalllab/MLSym | inference/run_and_predict.py | run_and_predict.py | py | 9,557 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "_pickle.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "spacy.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "transformers.ElectraTokenizer.from_pretrained",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": ... |
32923615588 | import torch
import json
speakers = torch.load('../models/vits_ca/speakers.pth')
print(type(speakers))
conv = [line.strip().split(',') for line in open('speakers_conversion.csv').readlines()]
new_speakers = {}
for source, target in conv:
id = speakers.get(source)
if id:
new_speakers[target] = source
with open('speaker_ids.json', 'w') as out:
json.dump(new_speakers, out)
print(new_speakers)
| projecte-aina/tts-api | scripts/change_model.py | change_model.py | py | 414 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "torch.load",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 13,
"usage_type": "call"
}
] |
30647902648 | #!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-10,10,100) # 创建一个包含-10到10之间100个等距点的数组作为x坐标
y= x ** 2 # 计算y坐标
plt.plot(x, y) # 绘制曲线
plt.xlabel('x') # 设置x轴标签
plt.xlabel('y') # 设置y轴标签
plt.title('y = x^2') # 设置图标题
plt.grid(True) # 显示网格线
plt .show() #显示图形 | Hsurpass/ElegantTest | test_python/python3/test_matplot/x_square.py | x_square.py | py | 397 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplo... |
26296087809 |
from PIL import Image
import numpy as np
import cv2
import pdb
import os
import time
import signal
import argparse
import json
import shutil
def split_data(path_, set_):
path_ = path_ + "/"
file_array = [file for file in os.listdir(path_) if file.endswith('.txt')]
file_array = sorted(file_array)
if set_ == "train":
split_save_dir = "train/"
step = 1
else:
split_save_dir = "test/"
step = 5
if not os.path.exists(split_save_dir):
os.makedirs(split_save_dir)
for k in range(0, len(file_array), step):
copy_path = path_ + file_array[k]
save_path = split_save_dir + file_array[k]
shutil.copyfile(copy_path, save_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='out_dir/Event_ReId/')
params = parser.parse_args()
for i in range(33):
i_d = "{0:03}".format(i+1)
path = params.data_dir + str(i_d) + '/'
sub_path = os.listdir(path)
for j in range(4):
complete_path = os.path.join(path, sub_path[j])
id_ = i+1
if id_ not in [2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 32]:
set_ = "train"
else:
set_ = "test"
split_data(complete_path, set_)
if __name__ == "__main__":
main()
| IIT-PAVIS/ReId_without_Id | data/split_train_test.py | split_train_test.py | py | 1,418 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number... |
31474695571 | # encoding: utf-8
"""
Core
"""
import json
import pytz
from typing import Any, Dict
from base64 import b64decode
from datetime import datetime
from google.cloud import datastore
from jsonschema import validate
from jsonschema.exceptions import ValidationError, SchemaError
import config
import log
def current_datetime():
return datetime.now(pytz.timezone("America/Sao_Paulo")).isoformat()
def validate_schema(doc_value: json, schema_properties: json) -> bool:
"""
Parameters
----------
doc_value : json
The document json to validate
schema_properties : json
Json schema with properties to validate doc_value
import datetime
Returns
-------
Boolean
Return with the boolean value
"""
try:
validate(instance=doc_value, schema=schema_properties)
except (ValidationError, SchemaError):
return False
return True
def validate_manifest(pubsub_message: Any) -> Dict[str, Any]:
"""
Parameters
----------
pubsub_message : str
Pub/Sub Message to Validate
Returns
-------
JSON Manifest, Dict
Return JSON dict with Manifest Config
"""
json_manifest = json.loads(b64decode(pubsub_message).decode("utf-8"))
print(json_manifest)
try:
result = validate_schema(json_manifest, config.MANIFEST_SCHEMA)
if result:
return json_manifest
return None
except ValueError: # Invalid JSON
return None
def datastore_publish(json_manifest: json, id: str):
"""
Parameters
----------
json_manifest : json
JSON with Manifest contract
id : str
Id of Manifest
Returns
-------
None
"""
# Instantiates a client
datastore_client = datastore.Client()
# The kind for the new entity
kind = "schema-repository"
# The Cloud Datastore key for the new entity
task_key = datastore_client.key(kind, id)
# Verify if Document Alredy Exists and
# update `version`` and `last_update`` values
#
task_result = datastore_client.get(task_key)
version = (
int(task_result["manifest"]["meta"]["version"]) + 1
if task_result is not None
else 1
)
json_manifest["meta"]["version"] = version
json_manifest["meta"]["last_update"] = current_datetime()
# Prepares the new entity
task = datastore.Entity(key=task_key)
task["manifest"] = json_manifest
# Saves the entity
datastore_client.put(task)
log.info(f"Saved {task.key.name}: {task['manifest']}")
| rogerjestefani/schema-publish | src/core.py | core.py | py | 2,605 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pytz.timezone",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "jsonschema.vali... |
43194853510 | import torch
from HDGCN import HDGCN
from utils import DatasetLoader, accuracy
from torch.utils.data import DataLoader
from adabelief_pytorch import AdaBelief
import torch.nn.functional as F
#
# Settings.
#
torch.cuda.set_device(4)
learning_rate = 0.001
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#
# load datasets
#
batch_size = 32
bert_dim = 300
train_data = DatasetLoader('mr', set_name="train")
vocab = train_data.vocab
test_data = DatasetLoader('mr', set_name="test")
max_seq_len = train_data.nnodes
train_data_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_data_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=True)
nhid = 300
vote_dim = 100
nclass = train_data.nclass()
input_cols = ['node_embeddings', 'dependency_graph', 'polarity']
#
# Create capsule network.
#
network = HDGCN(nnodes=max_seq_len,
nfeat=bert_dim,
nhid=nhid,
nclass=nclass,
max_seq_len=max_seq_len,
device=device,
batch_size=batch_size,
vocab=vocab).to(device)
optimizer = AdaBelief(network.parameters(), lr=learning_rate, eps=1e-16, betas=(0.9, 0.999),
weight_decouple=True, rectify=False)
# Converts batches of class indices to classes of one-hot vectors.
def to_one_hot(x, length):
batch_size = x.size(0)
x_one_hot = torch.zeros(batch_size, length)
for i in range(batch_size):
x_one_hot[i, x[i]] = 1.0
return x_one_hot
def test():
network.eval()
test_loss = 0
correct = 0
total_samples = 0
with torch.no_grad():
for i_batch, sample_batched in enumerate(test_data_loader):
inputs = [sample_batched[col].to(device) for col in input_cols]
sentences = sample_batched['sentence']
output = network(inputs[0], inputs[1], sentences, batch=i_batch, vis=False)
test_loss += float(F.nll_loss(output, inputs[2].max(1, keepdim=False)[1]).item())
total_samples += len(output)
correct += accuracy(output, inputs[2].max(1, keepdim=False)[1])
test_loss /= (i_batch + 1)
correct = correct.item()
correct /= (i_batch + 1)
return correct
def train(epoch):
train_loss = 0.
correct = 0.
total_samples = 0
network.train()
for i_batch, sample_batched in enumerate(train_data_loader):
inputs = [sample_batched[col].to(device) for col in input_cols]
sentences = sample_batched['sentence']
optimizer.zero_grad()
if epoch % 5 == 0:
visualize = True
else:
visualize = False
output = network(inputs[0], inputs[1], sentences, batch=i_batch, vis=visualize)
# loss = network.loss(output, inputs[2])
loss = F.nll_loss(output, inputs[2].max(1, keepdim=False)[1])
loss.backward()
train_loss += float(loss.item())
optimizer.step()
total_samples += len(output)
correct += accuracy(output, inputs[2].max(1, keepdim=False)[1])
correct = correct.item()
correct /= (i_batch + 1)
train_loss /= (i_batch + 1)
print('Train Epoch: {} \t Loss: {:.6f}, \t Accuracy: {:.6f}'.format(epoch, train_loss, correct))
return train_loss
num_epochs = 1000
best_correct = 0.0
corrects = []
for epoch in range(1, num_epochs + 1):
print('training epochs: {}'.format(epoch))
train_loss = train(epoch)
correct = test()
corrects.append(correct)
if correct > best_correct:
best_correct = correct
print("best correct: {:.6f} \n".format(best_correct))
print('>' * 100)
| MathIsAll/HDGCN-pytorch | main.py | main.py | py | 3,791 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "torch.cuda.set_device",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_avai... |
25809988661 | #!/usr/bin/env python3
import multiprocessing
import time
import os
from src.model.load_json import load_json_to_dict
from src.model.load_script import load_script_file
from src.model.shunt import Shunt
from src.tools.engine.code.github.clone import CodeClone
from src.tools.log4py.log4py import print_log
from src.tools.log4py.log4py import LogLevel
from src.tools.engine.soft.install_soft import SoftTool
from src.config.config import configration
def main_process():
print_log(msg=f"Main Process Started: PID {os.getpid()}", level=LogLevel.INFO)
while True:
print_log("Main Process is running...")
folder_path = configration.get("script_path")
script_type=[".json", ".py", ".shell", "sh"]
scripts = load_script_file(folder_path=folder_path, script_type=script_type)
print_log(msg="Begin execute scripts.", level=LogLevel.DEBUG)
for item in scripts:
print_log(msg=f"Begin run script: {item}.", level=LogLevel.DEBUG)
script_shunt = Shunt(file_path=item, folder_path=folder_path)
script_shunt.operation()
print("|")
print("|")
print("|")
print_log(msg=f"Run script: {item} over.", level=LogLevel.DEBUG)
break
time.sleep(configration.get("mian_process_sleep_time", 300)) # 模拟主进程工作
def monitor_process():
while True:
# 使用进程ID来检查主进程是否存活
main_process_alive = False
for process in multiprocessing.active_children():
if process.name == 'MainProcess':
main_process_alive = True
break
if not main_process_alive:
print_log(msg="Main Process down! Restarting...", level=LogLevel.ERROR)
# 重启主进程
new_main_process = multiprocessing.Process(target=main_process, name='MainProcess')
new_main_process.start()
else:
print_log(mgs=f"Main Process is alive.", level=LogLevel.INFO)
time.sleep(30) # 每隔3秒检查一次
if __name__ == "__main__":
# 创建主进程
main_proc = multiprocessing.Process(target=main_process, name='MainProcess')
main_proc.start()
# 创建监控进程
monitor_proc = multiprocessing.Process(target=monitor_process)
monitor_proc.start()
main_proc.join()
monitor_proc.join()
| MongoliaCavalry/BronzeMan | main.py | main.py | py | 2,464 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "src.tools.log4py.log4py.print_log",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "src.tools.log4py.log4py.LogLevel.INFO",
"line_number": 19,
"usage_type": "attribute"
},
{... |
27943726192 | import os, sys, statistics as stats
tags = []
#Outliner v1.0
#Copyright Richard Gustafsson
#Release: Oct 15 2019
def spc(i=1337):
if i != 1337:
print(i)
print("")
if os.path.isfile("tags.txt"):
with open("tags.txt") as file:
tags = [line.strip() for line in file]
print("Tags loaded:")
for index in range(0, len(tags)):
if index == len(tags)-1:
print(tags[index])
else:
print(tags[index], end=", ")
spc()
else:
print("ERROR: Could not find file tags.txt in this directory")
print("Make a file called tags.txt and write one tag per line")
input("Press any key to continue...")
sys.exit()
def clear():
if os.name == "nt":
os.system("cls")
else:
os.system("clear")
def separate(file):
with open(file) as f:
tempFirstLine = f.readline()
tempFirstLine = tempFirstLine.split()
#reset the file pointer, otherwise the first row would
#be ignored since it already has been read with f.readline()
f.seek(0)
#create a list containing each line, with any whitespace and
#other weird chars like \n removed
tempList = []
for line in f.readlines():
tempList.append(line.rstrip())
return tempList, len(tempFirstLine)
def makeNumber(i):
try:
return int(i)
except:
return 0
def tryPrint(n):
try:
print(f"---{tags[n]}---")
except:
print("ERROR: UNKNOWN TAG")
def flattenData(fp):
spc()
spc()
data, dataPerRow = separate(fp)
lineCount = 0
for line in data:
lineCount += 1
for i in range(1, dataPerRow+1):
totalSum = 0
medianList = []
for line in data:
tempLine = line.split()
#if the list is still in range, continue
try:
totalSum += makeNumber(tempLine[i-1])
medianList.append(makeNumber(tempLine[i-1]))
except:
pass
#Calculate the average and the median
avg = totalSum / lineCount
med = stats.median(medianList)
#print the content
tryPrint(i-1)
print(f"Sum: {totalSum}, Average: {round(avg, 1)}, Median {med}")
spc()
spc()
print("Ready for another round")
while True:
filePath = input("Name of data file (with extension)? ")
if len(filePath) > 0:
if os.path.isfile(filePath):
flattenData(filePath)
else:
spc("ERROR: Not a file")
else:
spc("ERROR: Files must contain at least one character") | Weeaboo420/pythonCollection | Outliner/outliner.py | outliner.py | py | 2,263 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.name",
"line_number": 32,... |
19421956825 | # import the libraries
from datetime import timedelta
# The DAG object; we'll need this to instantiate a DAG
from airflow import DAG
# Operators; we need this to write tasks!
from airflow.operators.bash_operator import BashOperator
# This makes scheduling easy
from airflow.utils.dates import days_ago
#defining DAG arguments
# You can override them on a per-task basis during operator initialization
default_args = {
'owner': 'Amarigh',
'start_date': days_ago(0),
'email': ['amarigmustapha@gmail.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
# defining the DAG
# define the DAG
dag = DAG(
'ETL_server_access_dag',
default_args=default_args,
description='ETL server access processing',
schedule_interval=timedelta(days=1),
)
download= BashOperator(
task_id='Download',
bash_command='wget https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DB0250EN-SkillsNetwork/labs/Apache%20Airflow/Build%20a%20DAG%20using%20Airflow/web-server-access-log.txt' ,
dag=dag,
)
extract= BashOperator(
task_id='extract',
bash_command=' cut -d"#" -f1,4 web-server-access-log.txt > extracted-data.txt ',
dag=dag,
)
transform = BashOperator (
task_id="transform" ,
bash_command=' tr [--lower--] [--upper--] | tr "#" "," > extracted-data.csv',
dag=dag,
)
load = BashOperator(
task_id="load",
bash_command='gzip extracted-dat.csv',
dag=dag,
)
download >> extract >> transform >> load
| Amarigh/Apache_Airflow_DAG | ETL_Server_Access_Log_Processing.py | ETL_Server_Access_Log_Processing.py | py | 1,513 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "airflow.utils.dates.days_ago",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "airflow.DAG",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.... |
8128511000 | #Run old_indexer first!
import sys
import whoosh.index as index
import whoosh.qparser as qparser
from whoosh.searching import Searcher
correct = 0
queries = 0
#Opens index
ix = index.open_dir("oldIndex")
#Opens test file
with open(sys.argv[1], 'r') as f:
while True:
#Reads next query/url pair
line = f.readline()
if not line:
break
#Loads query and expeced answer
query = line.split(';')[0]
expected = line.split(';')[1].rstrip('\n')
retrieved = []
found = False
queries += 1
#Handles query
qp = qparser.QueryParser("content", schema=ix.schema)
q = qp.parse(query)
#Searches index for query, checks top 3 URLs for expected URL
with ix.searcher() as searcher:
results = searcher.search(q)
#Correctness metric: Is our expected URL in the top 3?
for i in range(3):
retrieved.append(results[i]['title'])
if expected == retrieved[i]:
correct += 1
found = True
print("Query {}:".format(queries), query)
print("URL:", expected)
if found:
print("PASSED")
else:
print("FAILED")
#Uncomment to print top 3 URLs from IR
#print("{}\n{}\n{}\n".format(retrieved[0], retrieved[1], retrieved[2]))
print("{} / {} queries matched.".format(correct, queries))
print("Accuracy: ", float(correct/queries))
| gale2307/Jarvis | old_ir_tester.py | old_ir_tester.py | py | 1,596 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "whoosh.index.open_dir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "whoosh.index",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "whoosh.qparser.Query... |
24011947026 | # --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import torch
from torch import nn
import numpy as np
from core.utils import *
import torch.nn.functional as F
from torch.distributions import Normal
from core.network_arch import *
class PointTrajLatentNet(nn.Module):
def __init__(
self,
input_dim=3,
pointnet_nclusters=32,
pointnet_radius=0.02,
model_scale=1,
output_model_scale=1,
extra_latent=0,
large_feature=False,
feature_option=0,
extra_pred_dim=2,
group_norm=True,
**kwargs
):
"""
pointnet++ backbone network
"""
super(PointTrajLatentNet, self).__init__()
self.input_dim = 3 + extra_latent
self.model_scale = model_scale
self.encoder = create_encoder(model_scale, pointnet_radius, pointnet_nclusters, self.input_dim, feature_option, group_norm=group_norm)
self.feature_option = feature_option
self.fc_embedding = nn.Linear(int(self.model_scale * 512), int(output_model_scale * 512))
self.extra_pred_dim = extra_pred_dim
self.fc_extra = get_fc_feat_head(int(self.model_scale * 512), [256, 64], extra_pred_dim)
def forward(
self,
pc,
grasp=None,
feature_2=False,
train=True,
traj_state=None,
traj_inbatch_index=None,
traj_time_batch=None,
traj_latent=None,
encode=True
):
input_features = pc
extra = input_features
if input_features.shape[-1] != 4096:
input_features = input_features[...,6:]
input_features = input_features[:, :self.input_dim].contiguous()
object_grasp_pc = input_features.transpose(1, -1)[..., :3].contiguous()
point_feat = pointnet_encode(self.encoder, object_grasp_pc, input_features)
z, extra = self.fc_embedding(point_feat), self.fc_extra(point_feat)
extra = point_feat
return z, extra
class PointNetFeature(nn.Module):
def __init__(
self,
input_dim=3,
pointnet_nclusters=32,
pointnet_radius=0.02,
model_scale=1,
extra_latent=0,
split_feature=False,
policy_extra_latent=-1,
critic_extra_latent=-1,
action_concat=False,
feature_option=0,
group_norm=True,
**kwargs ):
"""
poinet++ feature network
"""
super(PointNetFeature, self).__init__()
self.input_dim = 3 + extra_latent
input_dim = (3 + policy_extra_latent if policy_extra_latent > 0 else self.input_dim )
self.policy_input_dim = input_dim
self.model_scale = model_scale
self.encoder = create_encoder(
model_scale, pointnet_radius, pointnet_nclusters, self.policy_input_dim, feature_option, group_norm=group_norm )
input_dim = 3 + critic_extra_latent if critic_extra_latent > 0 else input_dim
self.critic_input_dim = input_dim
if action_concat: self.critic_input_dim = input_dim + 6
self.value_encoder = create_encoder(
model_scale, pointnet_radius, pointnet_nclusters, self.critic_input_dim, feature_option, group_norm=group_norm )
self.feature_option = feature_option
def forward(
self,
pc,
grasp=None,
concat_option="channel_wise",
feature_2=False,
train=True,
traj_state=None,
traj_inbatch_index=None,
traj_time_batch=None,
traj_latent=None ):
input_features = pc
extra = input_features
if input_features.shape[-1] != 4096:
input_features = input_features[...,6:]
input_features = (
input_features[:, : self.critic_input_dim].contiguous()
if feature_2
else input_features[:, : self.policy_input_dim].contiguous()
)
object_grasp_pc = input_features.transpose(1, -1)[..., :3].contiguous()
encoder = self.value_encoder if feature_2 else self.encoder
z = pointnet_encode(encoder, object_grasp_pc, input_features)
return z, extra
class STPointNetFeature(nn.Module):
def __init__(
self,
input_dim=3,
pointnet_nclusters=32,
pointnet_radius=0.02,
model_scale=1,
extra_latent=0,
feature_option=1,
group_norm=True,
**kwargs ):
"""
spatiotemporal point network
"""
super(STPointNetFeature, self).__init__()
self.base_dim = 4 + extra_latent
self.encoder, self.output_dim = create_encoder(
model_scale, pointnet_radius, pointnet_nclusters,
self.base_dim, feature_option, traj_net=True, group_norm=group_norm )
self.feature_option = feature_option
def forward(
self,
pc,
grasp=None,
concat_option="channel_wise",
feature_2=False,
train=True,
traj_state=None,
traj_inbatch_index=None,
traj_time_batch=None,
traj_latent=None
):
input_features = pc
if input_features.shape[-1] != 4096:
input_features = input_features[...,6:] # ignore hand points
input_features_vis = input_features
traj_time_batch = traj_time_batch[...,None].expand(-1, -1, input_features.shape[2])
input_features = torch.cat((input_features, traj_time_batch), dim=1)
pc = input_features.transpose(1, -1)[..., :3].contiguous()
global_feat = []
for idx in torch.unique(traj_inbatch_index): # each traj pc separate process no speed up with batch
index = torch.where(traj_inbatch_index == idx)
size = len(index[0])
global_pc = input_features[index].transpose(1, -1).contiguous().view(-1, self.base_dim).T.contiguous()[None]
global_feat_i = self.encoder[0](global_pc)[0].expand(size, -1)
global_feat.append(global_feat_i)
global_feat = torch.cat(global_feat, dim=0)
input_features = input_features[:,:self.base_dim].contiguous()
local_feat_1 = pointnet_encode(self.encoder[-1], pc, input_features) # each timestep pc
z = torch.cat((global_feat, local_feat_1), dim=1)
return z, input_features_vis
class TrajSamplerNet(nn.Module):
def __init__(
self,
num_inputs,
num_actions,
hidden_dim,
action_space=None,
extra_pred_dim=0,
config=None,
input_dim=3,
**kwargs ):
"""
latent plan sampler network
"""
super(TrajSamplerNet, self).__init__()
self.config = config
self.setup_latent_sampler(**kwargs)
def setup_latent_sampler(self, **kwargs):
config = self.config
input_dim = config.traj_latent_size
self.curr_state_encoder = eval(config.traj_vae_feature_extractor_class)(**kwargs)
self.sampler_bottleneck = create_bottleneck(config.policy_traj_latent_size, config.normal_vae_dim)
self.cvae_encoder = get_fc_feat_head(input_dim + config.policy_traj_latent_size, [1024, 512, 512, 256, 256, 128], config.policy_traj_latent_size)
self.cvae_decoder = get_fc_feat_head(input_dim + config.normal_vae_dim, [1024, 512, 512, 256, 256, 128], config.policy_traj_latent_size)
self.apply(weights_init_)
def forward(self,
curr_point_state,
exec_point_state=None,
grasp=None,
train=True,
index_mask=None,
extra_time=None,
traj_latent=None,
traj_time_batch=None,
traj_inbatch_index=None,
encode=True,
vis=False):
traj_sampler_latent, extra_feat_pred = self.curr_state_encoder(curr_point_state,
traj_latent=traj_latent,
traj_time_batch=traj_time_batch,
traj_inbatch_index=traj_inbatch_index,
encode=encode)
return traj_sampler_latent, None, None, extra_feat_pred
def forward_bottleneck(self, traj_feat, traj_inbatch_index=None, prev_latent=None, traj_latent=None):
sampler_mu, sampler_logsigma = self.sampler_bottleneck[0](traj_feat), self.sampler_bottleneck[1](traj_feat)
if traj_inbatch_index is not None:
sampler_mu_, sampler_logsigma_ = sampler_mu[traj_inbatch_index], sampler_logsigma[traj_inbatch_index]
sample = reparameterize(sampler_mu_, sampler_logsigma_)
else:
sample = reparameterize(sampler_mu, sampler_logsigma)
return sample, sampler_mu, sampler_logsigma
def conditional_sampler_vae_head(self, traj_feat, traj_inbatch_index=None, conditional_latent=None):
"""
conditional vae forward pass
"""
sampler_mu, sampler_logsigma = None, None
if conditional_latent is not None:
encoded_latent = self.cvae_encoder(torch.cat((traj_feat[traj_inbatch_index][:len(conditional_latent)], conditional_latent), dim=-1))
sampled_encoded_latent, sampler_mu, sampler_logsigma = self.forward_bottleneck(encoded_latent)
else:
sampled_encoded_latent = sample_gaussian((max(traj_feat.shape[0], len(traj_inbatch_index)), self.config.normal_vae_dim), truncate_std=self.config.test_log_sigma_clip).cuda()
decoded_latent = self.cvae_decoder(torch.cat((traj_feat[traj_inbatch_index], sampled_encoded_latent), dim=-1))
return decoded_latent, sampler_mu, sampler_logsigma, sampled_encoded_latent
class TrajEmbeddingNet(nn.Module):
def __init__(
self,
feature_extractor_class,
num_inputs,
num_actions,
hidden_dim,
action_space=None,
extra_pred_dim=0,
config=None,
input_dim=3,
**kwargs
):
"""
latent plan embedding network
"""
super(TrajEmbeddingNet, self).__init__()
config.num_inputs = num_inputs
config.action_dim = num_actions
config.action_space = PandaTaskSpace6D()
self.config = config
self.traj_encoder = eval(feature_extractor_class)(**kwargs)
self.fc_embedding = get_fc_feat_head(self.traj_encoder.output_dim, [512], config.traj_latent_size, end_with_act=True)
self.traj_fc_embedding = nn.Linear(config.traj_latent_size, config.traj_latent_size)
self.apply(weights_init_)
def forward(self,
traj_point_state=None,
train=True,
traj_state=None,
traj_joint=None,
traj_inbatch_index=None,
traj_time=None,
traj_goal=None,
traj_action=None,
traj_pose=None,
vis=False,
val=False,
**kwargs):
return self.traj_encoder( traj_point_state,
traj_state=traj_state,
traj_inbatch_index=traj_inbatch_index,
traj_time_batch=traj_time)
def head(self, feat, traj_inbatch_index, val=False):
"""
summarize local and global point features
"""
feat_embedding = self.fc_embedding(feat)
traj_feat = []
for idx in torch.unique(traj_inbatch_index):
traj_idx = torch.where(traj_inbatch_index == idx)
global_feat_embedding = feat_embedding[traj_idx].max(0)
traj_feat.append(global_feat_embedding[0])
traj_feat = torch.stack(traj_feat, dim=0)
traj_feat = self.traj_fc_embedding(traj_feat)
return traj_feat, None
| liruiw/HCG | core/networks.py | networks.py | py | 11,936 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
72743725223 | import os
import pandas as pd
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.pipeline import make_pipeline, Pipeline
import platform
import sys
import sklearn
import tensorflow as tf
file_path = 'H:/Study/Hackarthon/dacon/shopping/dataset/dataset'
train = pd.read_csv('H:/Study/Hackarthon/dacon/shopping/dataset/dataset/train.csv')
test = pd.read_csv('H:/Study/Hackarthon/dacon/shopping/dataset/dataset/test.csv')
sample_submission = pd.read_csv('H:/Study/Hackarthon/dacon/shopping/dataset/dataset/sample_submission.csv')
print(f"-os:{platform.platform()}")
print(f"-python:{sys.version}")
print(f"-pandas:{pd.__version__}")
print(f"-numpy:{np.__version__}")
print(f"-sklearn: {sklearn.__version__}")
print(f'-tensorflow: {tf.__version__}')
# print(train.shape) (6255, 13)
# print(test.shape) (180, 12)
# print(sample_submission.shape) (180, 2)
# print(train.info())
# # Column Non-Null Count Dtype
# --- ------ -------------- -----
# 0 id 6255 non-null int64
# 1 Store 6255 non-null int64
# 2 Date 6255 non-null object
# 3 Temperature 6255 non-null float64
# 4 Fuel_Price 6255 non-null float64
# 5 Promotion1 2102 non-null float64
# 6 Promotion2 1592 non-null float64
# 7 Promotion3 1885 non-null float64
# 8 Promotion4 1819 non-null float64
# 9 Promotion5 2115 non-null float64
# 10 Unemployment 6255 non-null float64
# 11 IsHoliday 6255 non-null bool
# 12 Weekly_Sales 6255 non-null float64
# dtypes: bool(1), float64(9), int64(2), object(1)
# memory usage: 592.6+ KB
# None
train = train.fillna(0)
# print(train.shape) (6255, 13)
def date_encoder(date):
day, month, year = map(int, date.split('/'))
return day, month, year
train['Day'] = train['Date'].apply(lambda x: date_encoder(x)[0])
train['Month'] = train['Date'].apply(lambda x: date_encoder(x)[1])
train['Year'] = train['Date'].apply(lambda x: date_encoder(x)[2])
train = train.drop(columns=['Day', 'Date'])
scaler = StandardScaler()
scaler.fit(train[['Promotion1', 'Promotion2','Promotion3', 'Promotion4', 'Promotion5']])
scaled = scaler.transform(train[['Promotion1','Promotion2','Promotion3','Promotion4','Promotion5']])
train[['Scaled_Promotion1','Scaled_Promotion2',
'Scaled_Promotion3','Scaled_promotion4',
'Scaled_Promotion5']] = scaled
train = train.drop(columns=['Promotion1','Promotion2','Promotion3','Promotion4','Promotion5'])
test = test.fillna(0)
test['Month']=test['Date'].apply(lambda x: date_encoder(x)[1])
test['Year'] = test['Date'].apply(lambda x:date_encoder(x)[2])
test = test.drop(columns=['Date'])
scaled = scaler.transform(test[['Promotion1','Promotion2','Promotion3','Promotion4','Promotion5']])
test[['Scaled_Promotion1','Scaled_Promotion2','Scaled_Promotion3','Scaled_promotion4','Scaled_Promotion5']] = scaled
test = test.drop(columns=['Promotion1','Promotion2','Promotion3','Promotion4','Promotion5'])
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split, KFold, cross_val_score, StratifiedKFold
model = make_pipeline(StandardScaler(), RandomForestRegressor())
# model = RandomForestRegressor()
train = train.drop(columns=['id'])
test = test.drop(columns=['id'])
x_train = train.drop(columns=['Weekly_Sales'])
y_train = train['Weekly_Sales']
model.fit(x_train, y_train)
prediction = model.predict(test)
print('---------------------------예측된 데이터의 상위 10개의 값 확인 --------------------------------- \n')
print(prediction[:10])
sample_submission['Weekly_Sales'] = prediction
# sample_submission.head()
sample_submission.to_csv('H:/Study/Hackarthon/dacon/shopping/dataset/dataset/sookook7.csv', index = False)
| KMLEE1989/Study | Dacon/shopping/shopping_1.py | shopping_1.py | py | 4,021 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "platform.platform",
... |
36236100422 | from django.shortcuts import render
from django.http import *
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
# from django.db.models import Count
from .models import *
from .forms import *
def home(request):
if request.user.is_authenticated:
if Hospital.objects.filter(user = request.user):
hospitals = Hospital.objects.filter(user = request.user)
current_hospital = Hospital.objects.filter(user = request.user)[0]
hospital_requests = Request.objects.filter(hospital__id__in = Hospital.objects.filter(name = current_hospital.name))
return render(request, "hospital_home.html", {"hospital_requests" : hospital_requests, "hospitals" : hospitals})
else:
requests = []
blood_details = Hospital.objects.all().order_by("name")
if Receiver.objects.filter(user = request.user):
receiver = Receiver.objects.get(user = request.user)
requests = Request.objects.filter(receiver = receiver)
if blood_details:
page = request.GET.get('page',1)
paginator = Paginator(blood_details, 10)
try:
bloods = paginator.page(page)
except PageNotAnInteger:
bloods = paginator.page(1)
except EmptyPage:
bloods = paginator.page(paginator.num_pages)
return render(request, "home.html", {"blood_details" : bloods,"requests" : requests})
else:
return HttpResponse("No data available")
else:
blood_details = Hospital.objects.all().order_by("name")
if blood_details:
page = request.GET.get('page',1)
paginator = Paginator(blood_details, 10)
try:
bloods = paginator.page(page)
except PageNotAnInteger:
bloods = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
context = {'blood_details' : bloods}
return render(request,'home.html',context)
else:
return HttpResponse("No data available")
def hospital_registration(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
hospital_form = HospitalForm(data=request.POST)
if user_form.is_valid() and hospital_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
hospital = hospital_form.save(commit=False)
hospital.user = user
if not (user_form.errors or hospital_form.errors):
hospital.save()
registered = True
else:
print(user_form.errors,hospital_form.errors)
else:
user_form = UserForm()
hospital_form = HospitalForm()
return render(request,'hospital_registration.html',
{'user_form':user_form,
'hospital_form':hospital_form,
'registered':registered})
def receiver_registration(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
receiver_form = ReceiverForm(data=request.POST)
if user_form.is_valid() and receiver_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
receiver = receiver_form.save(commit=False)
receiver.user = user
if not (user_form.errors or receiver_form.errors):
receiver.save()
registered = True
else:
print(user_form.errors,receiver_form.errors)
else:
user_form = UserForm()
receiver_form = ReceiverForm()
return render(request,'receiver_registration.html',
{'user_form':user_form,
'receiver_form':receiver_form,
'registered':registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('home'))
else:
return HttpResponse("Your account was inactive.")
else:
print("Someone tried to login and failed.")
print("They used username: {} and password: {}".format(username,password))
return HttpResponse("Invalid login details given")
else:
return render(request, 'login.html', {})
@login_required(login_url='login')
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('home'))
@login_required(login_url='login')
def add_blood_details(request):
registered = False
if request.method == "POST":
blood_detail_form = BloodDetailForm(data = request.POST)
if blood_detail_form.is_valid():
if Hospital.objects.filter(user = request.user):
hospitals = Hospital.objects.filter(user = request.user)
hospital = hospitals[0]
new_hospital = Hospital(user = request.user, name=hospital.name, street = hospital.street, city = hospital.city, pin = hospital.pin)
blood_detail = blood_detail_form.cleaned_data.get("blood_group")
print("bbg : ",blood_detail)
# hospital = Hospital.objects.get(user = request.user)
print("hospital : ",hospital.name)
new_hospital.blood_group = blood_detail_form.cleaned_data.get("blood_group")
new_hospital.save()
return HttpResponseRedirect(reverse('home'))
else:
print("NO HOSPITAL FOUND WITH GIVEN USER")
return HttpResponse("NO HOSPITAL FOUND WITH GIVEN USER")
else:
return HttpResponse("Invalid Form details")
else:
blood_detail_form = BloodDetailForm()
return render(request,'blood_detail_form.html',{'blood_detail_form':blood_detail_form})
@login_required(login_url='login')
def send_request(request,pk):
done = False
# new_receiver = Receiver.objects.get(user = request.user)
if Receiver.objects.filter(user = request.user):
new_receiver = Receiver.objects.get(user = request.user)
new_hospital = Hospital.objects.get(pk = pk)
if new_hospital.is_requested == True:
return HttpResponse("Request already exists for this hospital and blood")
else:
new_hospital.is_requested = True
new_hospital.save()
new_request = Request(receiver = new_receiver,hospital = new_hospital)
new_request.save()
return HttpResponseRedirect(reverse('home'))
else:
return HttpResponse("Only receiver is authorised to request blood") | hrsh-4/blood-bank | bank/views.py | views.py | py | 7,332 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 34,
"usage_type": "name"
... |
38221038133 | from itertools import permutations
def is_Prime(number):
if number == 1 or number == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def solution(numbers):
answer = set()
number = list(map(str, numbers))
comb_number = []
for i in range(1, len(number) + 1):
tmp = list(permutations(number, i))
for x in tmp:
comb_number.append(int(''.join(x)))
for x in comb_number:
if is_Prime(x):
answer.add(x)
return len(answer)
| kh-min7/Programmers | 42839(소수 찾기).py | 42839(소수 찾기).py | py | 597 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.permutations",
"line_number": 17,
"usage_type": "call"
}
] |
25162153371 | import json
import logging
import os
import requests
import uuid
from typing import Dict
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class TestGCWebhook:
def setup(self):
self.tenant = os.getenv("TENANT", "rapha")
self.stage = os.getenv("STAGE", "x")
self.base_url = f'https://{self.tenant}.ps.{self.stage}.newstore.net'
LOGGER.info(f'Setting up test with base url: {self.base_url}')
def issue_gc(self, balance: Dict):
ns_request = {
"amount": {
"value": balance.get("amount"),
"currency": balance.get("currency")
},
"customer": {
"id": "34683bfjbfjbdfkbfk",
"name": "Jay DoubleYou",
"email": "john_doe@ns.com"
},
"idempotence_key": "NewStore_Quajoo-" + uuid.uuid4().hex
}
r = requests.post(url=f'{self.base_url}/adyen_gift_card/gift_card/issue',
json=ns_request
)
assert r.status_code == 200
response = r.json()
LOGGER.info(f"Issued gc: {json.dumps(response, indent=4)}")
return r.json()
def test_check_balance(self):
balance = {"amount": 22.22, "currency": "USD"}
card_issued = self.issue_gc(balance)
r = requests.post(url=f'{self.base_url}/adyen_gift_card/gift_card/balance',
json=card_issued.get("identifier"))
assert r.status_code == 200
response = r.json()
LOGGER.info(f"got gc balance: {json.dumps(response, indent=4)}")
assert response.get("value") == balance.get("amount")
| NewStore/int-cinori | integrations/adyen_gift_card/tests/integration/test_gc_api.py | test_gc_api.py | py | 1,692 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_n... |
31256076584 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import openstack.connection
from ospurge.resources import cinder
from ospurge.tests import mock
class TestBackups(unittest.TestCase):
def setUp(self):
self.cloud = mock.Mock(spec_set=openstack.connection.Connection)
self.creds_manager = mock.Mock(cloud=self.cloud)
def test_list(self):
self.assertIs(self.cloud.list_volume_backups.return_value,
cinder.Backups(self.creds_manager).list())
self.cloud.list_volume_backups.assert_called_once_with()
def test_delete(self):
backup = mock.MagicMock()
self.assertIsNone(cinder.Backups(self.creds_manager).delete(backup))
self.cloud.delete_volume_backup.assert_called_once_with(backup['id'])
def test_disable(self):
backup = mock.MagicMock()
with self.assertLogs(level='WARNING'):
cinder.Backups(self.creds_manager).disable(backup)
def test_to_string(self):
backup = mock.MagicMock()
self.assertIn("Volume Backup",
cinder.Backups(self.creds_manager).to_str(backup))
class TestSnapshots(unittest.TestCase):
def setUp(self):
self.cloud = mock.Mock(spec_set=openstack.connection.Connection)
self.creds_manager = mock.Mock(cloud=self.cloud)
def test_list(self):
self.assertIs(self.cloud.list_volume_snapshots.return_value,
cinder.Snapshots(self.creds_manager).list())
self.cloud.list_volume_snapshots.assert_called_once_with()
def test_delete(self):
snapshot = mock.MagicMock()
self.assertIsNone(
cinder.Snapshots(self.creds_manager).delete(snapshot))
self.cloud.delete_volume_snapshot.assert_called_once_with(
snapshot['id'])
def test_disable(self):
snapshot = mock.MagicMock()
with self.assertLogs(level='WARNING'):
cinder.Snapshots(self.creds_manager).disable(snapshot)
def test_to_string(self):
snapshot = mock.MagicMock()
self.assertIn("Volume Snapshot ",
cinder.Snapshots(self.creds_manager).to_str(snapshot))
class TestVolumes(unittest.TestCase):
def setUp(self):
self.cloud = mock.Mock(spec_set=openstack.connection.Connection)
self.creds_manager = mock.Mock(cloud=self.cloud, project_id=42)
def test_check_prerequisite(self):
self.cloud.list_volume_snapshots.return_value = []
self.assertEqual(
False,
cinder.Volumes(self.creds_manager).check_prerequisite()
)
self.cloud.list_volume_snapshots.assert_called_once_with()
self.cloud.list_servers.assert_called_once_with()
def test_list(self):
self.assertIs(self.cloud.list_volumes.return_value,
cinder.Volumes(self.creds_manager).list())
self.cloud.list_volumes.assert_called_once_with()
def test_should_delete(self):
self.assertEqual(
False,
cinder.Volumes(self.creds_manager).should_delete(
{'os-vol-tenant-attr:tenant_id': 84})
)
self.assertEqual(
True,
cinder.Volumes(self.creds_manager).should_delete(
{'os-vol-tenant-attr:tenant_id': 42})
)
def test_delete(self):
volume = mock.MagicMock()
self.assertIsNone(cinder.Volumes(self.creds_manager).delete(volume))
self.cloud.delete_volume.assert_called_once_with(volume['id'])
def test_disable(self):
volume = mock.MagicMock()
cinder.Volumes(self.creds_manager).disable(volume)
self.cloud.update_volume.assert_called_once_with(
volume['id'],
metadata={'readonly': 'true'}
)
def test_to_string(self):
volume = mock.MagicMock()
self.assertIn("Volume ",
cinder.Volumes(self.creds_manager).to_str(volume))
| nerdicbynature/ospurge | ospurge/tests/resources/test_cinder.py | test_cinder.py | py | 4,466 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "ospurge.tests.mock.Mock",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "ospurge.tests.mock",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "ope... |
8806894100 | import requests
## HTTP GET Request
req = requests.get('https://beomi.github.io/beomi.github.io_old/')
## HTML 소스 가져오기
html = req.text
## HTTP Header 가져오기
header = req.headers
## HTTP Status 가져오기 (200: 정상)
status = req.status_code
## HTTP가 정상적으로 되었는지 (True/False)
is_ok = req.ok
print(html)
print(header)
print(status)
print(is_ok) | astinaus/python_study | crawler/requests_test.py | requests_test.py | py | 386 | python | ko | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 4,
"usage_type": "call"
}
] |
74105448422 | #!/usr/bin/env python3
"""Convert font to images of letters."""
import sys
import os
from PIL import Image, ImageFont, ImageDraw
LETTER_SIZE = 60
try:
font_file = sys.argv[1]
output_folder = sys.argv[2]
except IndexError:
sys.stderr.write("Usage: {} [ttf file] [output folder]\n".format(sys.argv[0]))
sys.exit(0)
# use a truetype font
font_name = os.path.basename(font_file).split(".")[0]
font = ImageFont.truetype(font_file, LETTER_SIZE - 2)
im = Image.new("RGB", (LETTER_SIZE, LETTER_SIZE))
draw = ImageDraw.Draw(im)
symbols = [ord(c) for c in "!?.,'[]()"]
up_letters_cyr = list(range(ord("А"), ord("Я") + 1))
lo_letters_cyr = list(range(ord("а"), ord("я") + 1))
up_letters_lat = list(range(ord("A"), ord("Z") + 1))
lo_letters_lat = list(range(ord("a"), ord("z") + 1))
numbers = list(range(ord("0"), ord("9") + 1))
characters = up_letters_lat + lo_letters_lat \
+ up_letters_cyr + lo_letters_cyr \
+ numbers
for code in characters:
w, h = draw.textsize(chr(code), font=font)
im = Image.new("RGB", (w, h), color="#FFFFFF")
draw = ImageDraw.Draw(im)
char = chr(code)
draw.text((0, 0), char, font=font, fill="#000000")
if char.islower():
char += "_lo"
im.save(f"letters/{font_name}_{char}.png")
| kirilenkobm/floating_letters | font_to_letters.py | font_to_letters.py | py | 1,280 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_n... |
70996246505 | import json
from datetime import datetime, timedelta
import openpyxl
import requests
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import make_password
from django.contrib.auth.mixins import AccessMixin
from django.db.models import Sum, Q, Count, F
from django.http.response import JsonResponse
from django.shortcuts import render, redirect
from django.utils.safestring import mark_safe
from django.views.generic import TemplateView
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from account.functions import checkPhone, sendSmsOneContact, sendSmsOneContact_from_sms_to
from account.models import Plan, Invoice, Card, Company_default_poles, Company, Account, Company_type_choise
from board.models import LeadPoles, LeadAction, CategoryProduct, Region, Lead, District, Task, Product, SMSTemplate, \
SMS_template_choise, Payment_type, Shopping
from board.views import is_B2B, register_lead_send_sms
from goal.models import Goal
from main.models import Calendar, Objections, ObjectionWrite, Script, Debtors, ImportTemplate
class LeadMinSerializer(ModelSerializer):
class Meta:
model = Lead
fields = ['id', 'name', 'surname', 'price', 'phone']
class SMSTemplateMinSerializer(ModelSerializer):
class Meta:
model = SMSTemplate
fields = ['id', 'name', 'type', 'date', 'text']
def pretty_encrypt(string, length, character):
return character.join(string[i:i + length] for i in range(0, len(string), length))
def ChartLead(request):
leads = []
losed = []
finished = []
filter_kwargs = {}
if request.user.is_director:
filter_kwargs['created_user__company'] = request.user.company
else:
filter_kwargs['created_user'] = request.user
for i in range(1, 13):
year = datetime.today().year
if i == 12:
month2 = 1
year2 = year + 1
else:
month2 = i + 1
year2 = year
gte = str(year) + '-' + str(i) + '-01 00:00:00'
lte = str(year2) + '-' + str(month2) + '-01 00:00:00'
count1 = Lead.objects.filter(date__gte=gte, date__lt=lte, status__lte=3, **filter_kwargs).count()
count2 = Lead.objects.filter(finishedDate__gte=gte, finishedDate__lt=lte, status=4, **filter_kwargs).count()
count3 = Lead.objects.filter(finishedDate__gte=gte, finishedDate__lt=lte, status=5, **filter_kwargs).count()
leads.append(count1)
losed.append(count2)
finished.append(count3)
return {
'leads': leads,
'losed': losed,
'finished': finished,
}
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def update_or_create_card(request):
try:
data = request.data
company = request.user.company
card, created = Card.objects.get_or_create(company=company)
card.name = data['name']
card.number = data['number']
card.expire = data['expire']
card.token = data['token']
card.active = data['verify'] == 'true'
card.save()
if created:
messages.success(request, f'{card.name} muvaffaqqiyatli qo\'shildi!')
else:
messages.success(request, f'{card.name} muvaffaqqiyatli o\'zgartirildi!')
return Response({"message": "Success"})
except:
return Response({"message": "Error"})
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def edit_company(request):
try:
data = request.POST
company = request.user.company
company.name = data['name']
company.phone = data['phone']
company.manzil = data['manzil']
company.creator = data['creator']
company.about = data['about']
company.type = data['company_type']
company.save()
messages.success(request, f'{company.name} muvaffaqqiyatli o\'zgartirildi!')
return redirect('cabinet')
except:
return redirect('cabinet')
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def edit_product(request, pk):
if request.method == "GET":
try:
product = Product.objects.get(id=pk)
context = {
"product": product
}
return render(request, 'edit_product.html', context)
except:
return redirect('products')
else:
try:
product = Product.objects.get(id=pk)
data = request.POST
product.name = data['name']
product.price = int(data['price'])
product.comment = data['comment']
product.save()
return redirect('products')
except:
return redirect('products')
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def add_product(request):
if request.method == "GET":
return render(request, 'add_product.html')
else:
try:
data = request.POST
Product.objects.create(
company=request.user.company,
name=data['name'],
price=int(data['price']),
comment=data['comment']
)
return redirect('products')
except:
return redirect('products')
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def add_payment_type(request):
if request.method == "GET":
return render(request, 'add_payment_type.html')
else:
try:
Payment_type.objects.create(
company=request.user.company,
name=request.POST['name'],
)
return redirect('products')
except:
return redirect('products')
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def edit_payment_type(request, pk):
if request.method == "GET":
try:
payment_type = Payment_type.objects.get(id=pk)
context = {
"payment_type": payment_type
}
return render(request, 'edit_payment_type.html', context)
except:
return redirect('products')
else:
try:
payment_type = Payment_type.objects.get(id=pk)
data = request.POST
payment_type.name = data['name']
payment_type.save()
return redirect('products')
except:
return redirect('products')
class Register_class(TemplateView, AccessMixin):
template_name = 'register.html'
def post(self, *args, **kwargs):
try:
data = self.request.POST
if Account.objects.filter(username=data['login']).count() == 0:
if data['password1'] == data['password2']:
plan = Plan.objects.filter(is_trial=True).first()
company = Company.objects.create(
name=data['comp_name'],
phone=data['comp_tel'],
manzil=data['comp_manzil'],
creator=data['comp_creator'],
about=data['comp_about'],
type=data['company_type'],
plan=plan,
active=True
)
user = Account.objects.create(
username=data['login'],
password=make_password(data['password1']),
first_name=data['firstname'],
last_name=data['lastname'],
company=company,
is_director=True
)
for i in Company_default_poles:
LeadPoles.objects.create(
name=i[0],
status=i[1],
company=company
)
Invoice.objects.create(company=company,
start=datetime.now(),
end=datetime.now() + timedelta(days=14),
plan=company.plan,
summa=company.plan.price,
active=True)
messages.success(self.request,
f"{user.username} ro'yxatdan o'tdi!!!\n Sizga 14 kunlik sinov muddasi bilan tizimdan foydalanish huquqi berildi.")
login(self.request, user)
return redirect('cabinet')
else:
messages.error(self.request, "Parollar bir xil emas!")
else:
messages.error(self.request, f"Bu {data['login']} login tanlangan! Boshqa tanlang")
context = self.get_context_data(*args, **kwargs)
context['post'] = self.request.POST
context['company_types'] = Company_type_choise
return render(self.request, self.template_name, context)
except:
return redirect('register')
def get_context_data(self, *args, **kwargs):
context = super(Register_class, self).get_context_data(**kwargs)
context['company_types'] = Company_type_choise
return context
class Home_new_class(TemplateView, AccessMixin):
template_name = 'home_new.html'
def get_context_data(self, *args, **kwargs):
context = super(Home_new_class, self).get_context_data(**kwargs)
context['home'] = 'active'
context['users'] = Account.objects.filter(company=self.request.user.company)
context['lead_poles'] = LeadPoles.objects.filter(company=self.request.user.company)
mijoz = Lead.objects.filter(created_user__company=self.request.user.company)
context['lead'] = mijoz.filter(status__gte=0, status__lte=3).count()
context['lead0'] = mijoz.filter(status=4).count()
context['lead1'] = mijoz.filter(status=5).count()
context['chart'] = ChartLead(self.request)
debt = mijoz.filter(debt__gt=0)
context['debtor'] = debt.count()
context['debtor_sum'] = debt.aggregate(Sum('debt'))['debt__sum']
if context['debtor_sum'] is None:
context['debtor_sum'] = 0
# begin goal
if self.request.user.is_director:
accounts = Account.objects.filter(company=self.request.user.company)
list = []
for a in accounts:
lc = Lead.objects.filter(created_user=a).count()
try:
goal = Goal.objects.get(user=a, oy=datetime.today().month, yil=datetime.today().year)
t = {
'name': a.first_name,
'surname': a.last_name,
'foiz': int((lc / goal.mijoz_soni) * 100)
}
except:
t = {
'name': a.first_name,
'surname': a.last_name,
'foiz': 0
}
list.append(t)
context['acc'] = list
else:
lc = Lead.objects.filter(created_user=self.request.user).count()
try:
goal = Goal.objects.get(user=self.request.user, oy=datetime.today().month, yil=datetime.today().year)
t = {
'name': self.request.user.first_name,
'surname': self.request.user.last_name,
'foiz': int((lc / goal.mijoz_soni) * 100)
}
except:
t = {
'name': self.request.user.first_name,
'surname': self.request.user.last_name,
'foiz': 0
}
context['a'] = t
# end goal
Query = Lead.objects.filter(created_user__company=self.request.user.company)
context['leads_count'] = Query.count()
context['leads_summa'] = Query.aggregate(Sum('price'))['price__sum']
if context['leads_summa'] is None:
context['leads_summa'] = 0
context['active_leads_count'] = Query.filter(status__lt=4).count()
context['active_leads_summa'] = Query.filter(status__lt=4).aggregate(Sum('price'))['price__sum']
if context['active_leads_summa'] is None:
context['active_leads_summa'] = 0
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class CalenApp(TemplateView, AccessMixin):
template_name = 'apps-calendar.html'
def get_context_data(self, *args, **kwargs):
context = super(CalenApp, self).get_context_data(**kwargs)
context['appcalendar'] = 'active'
context['users'] = Lead.objects.filter(created_user__company=self.request.user.company)
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class Product_list_class(TemplateView, AccessMixin):
template_name = 'product_list.html'
def get_context_data(self, *args, **kwargs):
context = super(Product_list_class, self).get_context_data(**kwargs)
context['product_page'] = "active"
context['products'] = Product.objects.filter(company=self.request.user.company)
context['payment_types'] = Payment_type.objects.filter(company=self.request.user.company)
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect('login')
return super().dispatch(request, *args, **kwargs)
def GetCalendar(request):
if request.user.is_director:
calens = Calendar.objects.filter(created_user__company=request.user.company)
else:
calens = Calendar.objects.filter(created_user=request.user)
c = []
for i in calens:
j = {
'id': i.id,
'color': i.color,
'event': i.event,
'date': i.date
}
c.append(j)
dt = {
"calendars": c,
}
return JsonResponse(dt)
def AddEvent(request):
user = request.user
if request.method == "POST":
r = request.POST
event = r['event']
date = r['date']
color = r['color']
Calendar.objects.create(event=event, date=date, color=color, created_user=user)
return redirect('calendar1')
class Etiroz(TemplateView, AccessMixin):
template_name = 'etiroz.html'
def get_context_data(self, *args, **kwargs):
context = super(Etiroz, self).get_context_data(**kwargs)
context['etiroz'] = 'active'
context['objections'] = Objections.objects.filter(create_user__company=self.request.user.company)
context['objectionwrite'] = ObjectionWrite.objects.filter(create_user__company=self.request.user.company)
try:
context['ckeditor'] = Script.objects.filter(create_user__company=self.request.user.company).first()
except:
context['ckeditor'] = None
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class Target(TemplateView, AccessMixin):
template_name = 'target.html'
def get_context_data(self, *args, **kwargs):
context = super(Target, self).get_context_data(**kwargs)
context['target'] = 'active'
if self.request.user.is_director:
context['lead'] = Lead.objects.filter(status__gte=1, status__lte=4,
created_user__company=self.request.user.company)
context['mijoz'] = Lead.objects.filter(status=5, created_user__company=self.request.user.company)
context['lead0'] = Lead.objects.filter(status=0, created_user__company=self.request.user.company)
context['promouter'] = Lead.objects.filter(status=6, created_user__company=self.request.user.company)
context['lead_count'] = Lead.objects.filter(status__gte=1, status__lte=4,
created_user__company=self.request.user.company).count()
context['mijoz_count'] = Lead.objects.filter(status=5,
created_user__company=self.request.user.company).count()
context['lead0_count'] = Lead.objects.filter(status=0,
created_user__company=self.request.user.company).count()
context['promouter_count'] = Lead.objects.filter(status=6,
created_user__company=self.request.user.company).count()
else:
context['lead'] = Lead.objects.filter(
status__gte=1, status__lte=4, created_user=self.request.user)
context['mijoz'] = Lead.objects.filter(status=5, created_user=self.request.user)
context['lead0'] = Lead.objects.filter(status=0, created_user=self.request.user)
context['promouter'] = Lead.objects.filter(status=6, created_user=self.request.user)
context['lead_count'] = Lead.objects.filter(status__gte=1, status__lte=4,
created_user=self.request.user).count()
context['mijoz_count'] = Lead.objects.filter(status=5, created_user=self.request.user).count()
context['lead0_count'] = Lead.objects.filter(status=0, created_user=self.request.user).count()
context['promouter_count'] = Lead.objects.filter(status=6,
created_user=self.request.user).count()
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class Clients(TemplateView, AccessMixin):
template_name = 'clients.html'
def get_context_data(self, *args, **kwargs):
context = super(Clients, self).get_context_data(**kwargs)
context['client'] = 'active'
if self.request.user.is_director:
context['clients'] = Lead.objects.filter(created_user__company=self.request.user.company)
else:
context['clients'] = Lead.objects.filter(created_user=self.request.user)
context['template_excel'] = ImportTemplate.objects.first()
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class Setting(TemplateView, AccessMixin):
template_name = 'setting.html'
def get_context_data(self, *args, **kwargs):
context = super(Setting, self).get_context_data(**kwargs)
context['setting'] = 'active'
context['token'] = self.request.user.company.tg_token
context['users'] = Account.objects.filter(company=self.request.user.company)
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
@login_required
def importLead(request):
if request.method == 'GET':
return render(request, 'importLead.html')
else:
try:
excel_file = request.FILES['leads']
wb = openpyxl.load_workbook(excel_file)
worksheet = wb.active
count = 0
user = request.user
for row in worksheet.iter_rows():
if count == 0:
count += 1
else:
name = row[0].value
surname = row[1].value
price = int(row[2].value)
company = row[3].value
companyAddress = row[4].value
phone = row[5].value
lead = Lead.objects.create(
name=name,
status=5,
surname=surname,
price=price,
company=company,
companyAddress=companyAddress,
phone=phone,
created_user=user
)
LeadAction.objects.create(lead=lead, changer=user)
messages.success(request, "Mijozlar muvaffaqqiyatli yuklandi")
except:
messages.error(request, "Yuklashda xatolik")
return redirect('clients')
# begin smstemplate
@api_view(['GET'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def sms_template_status_change(request):
try:
pk = int(request.GET.get("pk"))
val = int(request.GET.get("val"))
obj = SMSTemplate.objects.get(id=pk)
newV = True
if val == 0:
newV = False
obj.active = newV
obj.save()
return Response({"status": 200})
except:
return Response({"status": 500})
@api_view(['GET'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def search_lead(request):
try:
text = request.GET.get('text')
leads = Lead.objects.filter(
created_user__company=request.user.company
).filter(
Q(name__icontains=text) |
Q(surname__icontains=text) |
Q(company__icontains=text) |
Q(phone__icontains=text)
)[:30]
return Response(LeadMinSerializer(leads, many=True).data)
except:
return Response([])
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def filter_lead(request):
try:
poles = json.loads(request.data.get('poles'))
status = json.loads(request.data.get('status'))
leads = Lead.objects \
.filter(created_user__company=request.user.company) \
.filter(Q(status__in=status) |
(Q(status=0) & Q(pole_id__in=poles)))
# status=0 bolishi kerak lead bordda bo'lishi uchun
return Response(LeadMinSerializer(leads, many=True).data)
except:
return Response({"message": "error"}, status=500)
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def save_sms_template(request):
try:
name = request.data['name']
text = request.data['smstext']
smstype = request.data['sms_type']
date = request.data['date']
leads = json.loads(request.data['leads'])
if request.data.get('pk'):
pk = int(request.data.get('pk'))
template = SMSTemplate.objects.get(id=pk)
template.name = name
template.text = text
template.type = smstype
template.save()
else:
template = SMSTemplate.objects. \
create(company=request.user.company,
name=name, text=text,
type=smstype,
active=True
)
if smstype == "Bayram va boshqalar":
template.leads.clear()
template.leads.set(leads)
template.date = date
template.save()
return Response({})
except:
return Response({}, status=500)
@login_required()
def delete_sms_template(request, pk):
try:
SMSTemplate.objects.get(company=request.user.company, id=pk).delete()
except:
pass
return redirect("sms")
# end smstemplate
class Debt(TemplateView, AccessMixin):
template_name = 'debt.html'
def get_context_data(self, *args, **kwargs):
context = super(Debt, self).get_context_data(**kwargs)
context['debt'] = 'active'
context['debtors'] = Lead.objects.filter(debt__gt=0, created_user__company=self.request.user.company)
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class Sms(TemplateView, AccessMixin):
template_name = 'sms.html'
def get_context_data(self, *args, **kwargs):
context = super(Sms, self).get_context_data(**kwargs)
context['lead_count'] = Lead.objects.filter(created_user__company=self.request.user.company).count()
context['sms'] = 'active'
context['illness'] = CategoryProduct.objects.all()
context['lead_status_types'] = [item for item in Lead.status_types if item[0] > 0]
context['lead_poles'] = LeadPoles.objects.filter(company=self.request.user.company)
context['leads'] = Lead.objects.filter(created_user__company=self.request.user.company)
context['sms_templates'] = SMSTemplate.objects.filter(company=self.request.user.company)
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class NewSMSTemplate_class(TemplateView, AccessMixin):
template_name = 'newSmsTemplate.html'
def get_context_data(self, *args, **kwargs):
context = super(NewSMSTemplate_class, self).get_context_data(**kwargs)
context['sms_templates'] = SMS_template_choise
context['lead_status_types'] = [item for item in Lead.status_types if item[0] > 0]
context['lead_poles'] = LeadPoles.objects.filter(company=self.request.user.company)
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class EditSMSTemplate_class(TemplateView, AccessMixin):
template_name = 'editSMSTemplate.html'
smstemplate = None
def get_context_data(self, *args, **kwargs):
context = super(EditSMSTemplate_class, self).get_context_data(**kwargs)
context['sms_templates'] = SMS_template_choise
context['lead_status_types'] = [item for item in Lead.status_types if item[0] > 0]
context['lead_poles'] = LeadPoles.objects.filter(company=self.request.user.company)
context['current_template'] = self.smstemplate
context['current_leads_dumps'] = json.dumps(LeadMinSerializer(self.smstemplate.leads.all(), many=True).data)
context['current_template_dumps'] = json.dumps(SMSTemplateMinSerializer(self.smstemplate).data)
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
pk = kwargs['pk']
try:
self.smstemplate = SMSTemplate.objects.get(company=self.request.user.company, id=pk)
except SMSTemplate.DoesNotExist:
return redirect("sms")
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class Cabinet(TemplateView, AccessMixin):
template_name = 'companyCabinet.html'
def post(self, *args, **kwargs):
new_plan = int(self.request.POST['new_plan'])
company = self.request.user.company
company.plan_id = new_plan
company.save()
messages.success(self.request,
mark_safe(f'{company.name} tarifi {company.plan.name} ga o\'zgartirildi! Joriy tarifni amal '
f'qilishi yakunlandan so\'ng <span style="color:#000">{company.plan.name}</span> tarif ishga tushadi.'))
return redirect('cabinet')
def get_context_data(self, *args, **kwargs):
context = super(Cabinet, self).get_context_data(**kwargs)
context['plans'] = Plan.objects.all()
invoices = Invoice.objects.filter(company=self.request.user.company).order_by('-id')
try:
card = Card.objects.get(company=self.request.user.company)
except Card.DoesNotExist:
card = None
context['card'] = card
if card:
context['number'] = pretty_encrypt(card.number, 4, ' ')
context['expire'] = pretty_encrypt(card.expire, 2, '/')
context['invoices'] = invoices
context['active_invoice'] = invoices.filter(active=True).first()
context['company_types'] = Company_type_choise
try:
context['kam_summa'] = abs(self.request.user.company.balance - self.request.user.company.plan.price)
except:
pass
context['kabinet'] = "active"
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
class CardAdd_or_edit(TemplateView, AccessMixin):
template_name = 'add_card.html'
def post(self, *args, **kwargs):
new_plan = int(self.request.POST['new_plan'])
company = self.request.user.company
company.plan_id = new_plan
company.save()
messages.success(self.request,
mark_safe(f'{company.name} tarifi {company.plan.name} ga o\'zgartirildi! Joriy tarifni amal '
f'qilishi yakunlandan so\'ng <span style="color:#000">{company.plan.name}</span> tarif ishga tushadi.'))
return redirect('cabinet')
def get_context_data(self, *args, **kwargs):
context = super(CardAdd_or_edit, self).get_context_data(**kwargs)
try:
card = Card.objects.get(company=self.request.user.company)
except Card.DoesNotExist:
card = None
if card is not None:
context['card'] = card
context['merchant_id'] = settings.PAYCOM_MERCHANT_ID
context['paycom_is_test'] = settings.PAYCOM_IS_TEST
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
def SmsGateway(request):
if request.method == 'POST':
sms = request.POST['sms']
company = request.user.company
status_codes = request.POST.get('user_type')
pole_list = request.POST.get('pole_type')
if status_codes == "":
status = []
else:
status = json.loads(status_codes)
if pole_list == "":
poles = []
else:
poles = json.loads(pole_list)
leads_id = request.POST.getlist('leads')
leads = Lead.objects.filter(id__in=leads_id)
success_send_count = 0
error_send_count = 0
Leads = Lead.objects \
.filter(created_user__company=company) \
.filter(Q(status__in=status) | (Q(status=0) & Q(
pole_id__in=poles)))
if company.sms_activated:
for lead in Leads:
can, phone = checkPhone(lead.phone)
if can:
result = sendSmsOneContact(company, phone, sms)
if result.status_code == 200:
success_send_count += 1
else:
error_send_count += 1
else:
error_send_count += 1
for lead in leads:
can, phone = checkPhone(lead.phone)
if can:
result = sendSmsOneContact(company, phone, sms)
if result.status_code == 200:
success_send_count += 1
else:
error_send_count += 1
else:
error_send_count += 1
elif company.smsto_activated:
for lead in Leads:
result = sendSmsOneContact_from_sms_to(company, lead.phone, sms)
if result.status_code == 200:
success_send_count += 1
else:
error_send_count += 1
for lead in leads:
result = sendSmsOneContact_from_sms_to(company, lead.phone, sms)
if result.status_code == 200:
success_send_count += 1
else:
error_send_count += 1
if success_send_count > 0:
messages.success(request, f"{success_send_count} ta sms jo'natildi!")
if error_send_count > 0:
messages.error(request, f"{error_send_count} ta sms jo'natilmadi!")
return redirect('sms')
class Hodim(TemplateView, AccessMixin):
template_name = 'hodim.html'
def dispatch(self, request, *args, **kwargs):
if not request.user.is_director:
return redirect('home')
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super(Hodim, self).get_context_data(**kwargs)
context['hodim'] = 'active'
context['users'] = Account.objects.filter(company=self.request.user.company)
return context
def DeleteHodim(request):
h_id = request.GET.get('id')
Account.objects.get(id=h_id).delete()
return redirect('hodim')
def ObjectWrite(request):
if request.method == "POST":
obj = request.POST['objection']
sol = request.POST['solution']
ObjectionWrite.objects.create(objection=obj, solution=sol, create_user=request.user)
return redirect('etiroz')
else:
return redirect('etiroz')
def Obj(request):
if request.method == "POST":
obj = request.POST['objection']
sol = request.POST['solution']
Objections.objects.create(objection=obj, solution=sol, create_user=request.user)
return redirect('etiroz')
else:
return redirect('etiroz')
def CalenEdit(request):
today = datetime.now()
c = Calendar.objects.filter(date__gte=today).order_by('date')
context = {
'calens': c,
}
return render(request, 'calenedit.html', context)
def CalenEditForm(request):
context = {}
if request.method == "GET":
pk = request.GET.get('id')
c = Calendar.objects.get(id=pk)
context = {
'calen': c
}
return render(request, 'caleneditform.html', context)
def CalenDel(request):
if request.method == "GET":
pk = request.GET.get('id')
Calendar.objects.get(id=pk).delete()
return redirect('calenedit')
def Delete(request):
if request.method == "GET":
pk = request.GET.get('id')
t = request.GET.get('t')
if t == '1':
o = Objections.objects.get(id=pk)
o.delete()
elif t == '2':
o = ObjectionWrite.objects.get(id=pk)
o.delete()
return redirect('etiroz')
else:
return redirect('etiroz')
def SaveEditCalen(request):
if request.method == "POST":
r = request.POST
pk = r['id']
event = r['event']
date = r['date']
color = r['color']
c = Calendar.objects.get(id=pk)
c.event = event
c.color = color
c.date = date
c.save()
return redirect('calenedit')
def AddHodim(request):
if request.method == "POST":
r = request.POST
fam = r['fam']
ism = r['ism']
username = r['username']
password = r['password']
try:
current_count = Account.objects.filter(company=request.user.company).count()
if request.user.company.plan.max_worker_count <= current_count:
return redirect('hodim')
except:
pass
try:
Account.objects.create(username=username, password=make_password(password), first_name=ism, last_name=fam,
company=request.user.company)
except:
messages.error(request, "Bu username mavjud")
return redirect('hodim')
def Edito(request):
if request.method == "GET":
pk = request.GET.get('id')
t = request.GET.get('t')
try:
ck = Script.objects.first()
except:
ck = None
if t == '1':
o = Objections.objects.get(id=pk)
context = {
'objections': Objections.objects.filter(create_user__company=request.user.company),
'objectionwrite': ObjectionWrite.objects.filter(create_user__company=request.user.company),
'obj': o,
'ckeditor': ck,
't': 1
}
return render(request, 'etiroz.html', context)
elif t == '2':
o = ObjectionWrite.objects.get(id=pk)
context = {
'objections': Objections.objects.filter(create_user__company=request.user.company),
'objectionwrite': ObjectionWrite.objects.filter(create_user__company=request.user.company),
'obj': o,
'ckeditor': ck,
't': 2
}
return render(request, 'etiroz.html', context)
else:
return redirect('etiroz')
def Save(request):
if request.method == "POST":
o = request.POST['objection']
s = request.POST['solution']
id = request.POST['id']
t = request.POST['t']
if t == '1':
obj = Objections.objects.get(id=id)
obj.objection = o
obj.solution = s
obj.save()
elif t == '2':
obj = ObjectionWrite.objects.get(id=id)
obj.objection = o
obj.solution = s
obj.save()
return redirect('etiroz')
def Ckeditor(request):
if request.method == 'POST':
ck = request.POST['editor1']
try:
s = Script.objects.filter(create_user__company=request.user.company).first()
s.text = ck
s.save()
except:
Script.objects.create(text=ck, create_user=request.user)
return redirect('etiroz')
else:
return redirect('etiroz')
def Edit(request):
if not request.user.company.active:
return redirect('cabinet')
if request.method == 'GET':
id = request.GET.get('id')
lead = Lead.objects.get(id=id)
if lead.step1 is None:
step = 1
elif lead.step2 is None:
step = 2
elif lead.step3 is None:
step = 3
elif lead.step4 is None:
step = 4
else:
step = 5
try:
user = {
'id': lead.id,
'first_name': lead.name,
'last_name': lead.surname,
'birthday': lead.birthday,
'phone': lead.phone,
'region': lead.district.region.name,
'district': lead.district.name,
'degree': lead.degr[lead.degree - 1][1],
'abcxyz': lead.abcxyz,
'step1': lead.step1,
'step2': lead.step2,
'step3': lead.step3,
'step4': lead.step4,
'step5': lead.step5,
'note': lead.note,
}
except:
user = {
'id': lead.id,
'first_name': lead.name,
'last_name': lead.surname,
'birthday': lead.birthday,
'phone': lead.phone,
'degree': lead.degr[lead.degree - 1][1],
'abcxyz': lead.abcxyz,
'step1': lead.step1,
'step2': lead.step2,
'step3': lead.step3,
'step4': lead.step4,
'step5': lead.step5,
'note': lead.note,
}
context = {
'userr': user,
'step': step,
'lead': lead,
'region': Region.objects.all(),
'district': District.objects.all(),
'notes': LeadAction.objects.filter(lead_id=id),
'products': Product.objects.filter(company=request.user.company),
'payment_types': Payment_type.objects.filter(company=request.user.company),
'shoppings': Shopping.objects.filter(lead=lead)
}
return render(request, 'edit.html', context)
elif request.method == 'POST':
id = int(request.POST['id'])
u = Lead.objects.get(id=id)
try:
surname = request.POST['surname']
u.surname = surname
except:
pass
try:
phone = request.POST['phone']
u.phone = phone
except:
pass
try:
region = request.POST['region']
u.region = region
except:
pass
try:
district = request.POST['district']
u.district = district
except:
pass
try:
birthday = request.POST['birthday']
u.birthday = birthday
except:
pass
try:
district = request.POST['district']
u.district_id = district
except:
pass
try:
abc = request.POST['abc']
u.abcxyz = abc
except:
pass
try:
notes = request.POST['notes']
u.note = notes
except:
pass
try:
u.join_from = request.POST['join_from']
except:
pass
try:
u.phone2 = request.POST['phone2']
except:
pass
u.save()
return redirect('target')
def AddUser(request):
if not request.user.company.active:
return redirect('cabinet')
u = request.user
if request.method == "POST":
r = request.POST
ism = r['ism']
fam = r['fam']
phone = r['tel']
phone2 = r['tel2']
birthday = r['birth']
dis = r['district']
abc = r['abc']
price = r['price']
join_from = r['join_from']
try:
Lead.objects.get(phone=phone, created_user__company=request.user.company)
messages.add_message(request, messages.ERROR, f"{ism} avval ro'yxatdan o'tgan")
return redirect('adduser')
except:
u = Lead.objects.create(name=ism,
surname=fam,
phone=phone,
phone2=phone2,
birthday=birthday,
abcxyz=abc,
district_id=dis,
created_user=u,
price=price,
join_from=join_from,
pole_id=int(r['lead_pole']))
if is_B2B(request):
u.company = r['com']
u.companyAddress = r['comadd']
u.save()
u.save()
register_lead_send_sms(u)
messages.add_message(request, messages.SUCCESS, f"{phone} Qo'shildi")
return redirect('target')
else:
context = {
'region': Region.objects.all(),
'lead_poles': LeadPoles.objects.filter(company=request.user.company),
'district': District.objects.all(),
'products': Product.objects.filter(company=request.user.company),
'payment_types': Payment_type.objects.filter(company=request.user.company)
}
return render(request, 'adduser.html', context)
def Up(request):
if not request.user.company.active:
return redirect('cabinet')
if request.method == 'GET':
id = int(request.GET.get('id'))
s = int(request.GET.get('s'))
u = Lead.objects.get(id=id)
u.status = s
u.date = datetime.now()
u.save()
return redirect('target')
def Login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.error(request, 'Login yoki Parol noto`g`ri kiritildi!')
return redirect('login')
else:
return render(request, 'login.html')
def Logout(request):
logout(request)
messages.success(request, "Tizimdan chiqish muvaffaqiyatli yakunlandi!")
return redirect('login')
def customhandler404(request, exception, template_name='404.html'):
response = render(request, template_name)
response.status_code = 404
return response
def AddNotes(request):
if not request.user.company.active:
return redirect('cabinet')
u = request.user
if request.method == "POST":
note = request.POST['note']
id = request.POST['id']
color = request.POST['color']
LeadAction.objects.create(note=note, lead_id=id, color=color, changer=u)
url = '/edit/?id=' + str(id)
return redirect(url)
def DebtHistory(request):
if not request.user.company.active:
return redirect('cabinet')
if request.method == "GET":
id = request.GET.get('id')
ol = Debtors.objects.filter(user_id=id, debt=1, user__created_user__company=request.user.company).order_by(
'-id')
ber = Debtors.objects.filter(user_id=id, debt=0, user__created_user__company=request.user.company).order_by(
'-id')
context = {
'olingan': ol,
'berilgan': ber,
'usr': id,
}
return render(request, 'debthistory.html', context)
def AddDebt(request):
if not request.user.company.active:
return redirect('cabinet')
if request.method == "POST":
r = request.POST
u_id = r['u_id']
debt = r['debt']
summa = int(r['summa'])
user = Lead.objects.get(id=u_id)
if debt == '1':
user.debt += summa
user.save()
else:
user.debt -= summa
user.save()
Debtors.objects.create(user_id=u_id, summa=summa, debt=debt, create_user=request.user)
url = '/debthistory/?id=' + u_id
return redirect(url)
def AddDebtor(request):
if request.method == "POST":
r = request.POST
u_id = r['debtor']
debt = int(r['debt'])
user = Lead.objects.get(id=u_id)
user.debt += debt
user.save()
Debtors.objects.create(user_id=u_id, summa=debt, debt=1, create_user=request.user)
return redirect('debt')
else:
context = {
'debtors': Lead.objects.filter(debt=0, created_user__company=request.user.company),
'method': 'get',
}
return render(request, 'adddebtor.html', context)
def EditSpin(request):
if request.method == "POST":
r = request.POST
u_id = r['u_id']
step = r['step']
st = r['st']
url = '/edit/?id=' + u_id
user = Lead.objects.get(id=u_id)
if st == '1':
user.step1 = step
elif st == '2':
user.step2 = step
elif st == '3':
user.step3 = step
elif st == '4':
user.step4 = step
elif st == '5':
user.step5 = step
user.save()
return redirect(url)
else:
return redirect('target')
def PostEvent(request):
data = json.loads(request.body)
user = data['user']
title = data['title']
time = data['start']
className = data['className']
Calendar.objects.create(user_id=user, event=title, date=time, color=className, created_user=request.user)
return JsonResponse({})
def DelEvent(request):
id = request.GET.get('id')
Calendar.objects.get(id=id).delete()
return JsonResponse({})
def EditEvent(request):
data = json.loads(request.body)
id = data['id']
user = data['user']
title = data['title']
time = data['start']
className = data['className']
c = Calendar.objects.get(id=id)
c.user_id = user
c.event = title
c.date = time
c.color = className
c.save()
return JsonResponse({})
@login_required
def main_statistika(request):
try:
# types = ((1, "Bugunlik"), (2, "Haftalik"), (3, "Oylik"), (4, "Sana range"))
user_pk = int(request.GET.get('pk'))
type = int(request.GET.get('type'))
sana = datetime.today().date()
if type == 1:
sana1 = datetime(sana.year, sana.month, sana.day)
sana2 = datetime.fromordinal(sana.toordinal() + 1)
elif type == 2:
sana1 = datetime.fromordinal(sana.toordinal() - 6)
sana2 = datetime.fromordinal(sana.toordinal() + 1)
elif type == 3:
sana1 = datetime.fromordinal(sana.toordinal() - 29)
sana2 = datetime.fromordinal(sana.toordinal() + 1)
elif type == 4:
date1 = request.GET.get('sana1')
date2 = request.GET.get('sana2')
dt1 = date1.split('/')
dt2 = date2.split('/')
sana1 = datetime(int(dt1[2]), int(dt1[0]), int(dt1[1]))
sana2 = datetime(int(dt2[2]), int(dt2[0]), int(dt2[1]))
else:
return Response({"message": "type error"})
user = request.user
d_f_kwargs = {
"date__gte": sana1,
"date__lt": sana2,
}
users_data = []
for i in Account.objects.filter(company=user.company):
Query = Lead.objects.filter(created_user=i, created_user__company=user.company, **d_f_kwargs)
count = Query.count()
summ = Query.aggregate(Sum('price'))['price__sum']
t = {
'id': i.id,
'first_name': i.first_name,
'last_name': i.last_name,
'count': count,
'summ': 0
}
if summ:
t['summ'] = summ
users_data.append(t)
lead_filter_kw = {}
lead_action_kw = {}
if request.user.is_director:
if user_pk == 0:
lead_filter_kw['created_user__company'] = user.company
lead_action_kw['changer__company'] = user.company
else:
lead_filter_kw['created_user_id'] = user_pk
lead_action_kw['changer_id'] = user_pk
else:
lead_filter_kw['created_user'] = user
lead_action_kw['changer'] = user
Query = Lead.objects.filter(**lead_filter_kw, **d_f_kwargs)
QAction = LeadAction.objects.filter(**lead_action_kw, **d_f_kwargs)
TQ = Task.objects.filter(**lead_filter_kw, **d_f_kwargs)
lead_query = Query.filter(status__lt=4).values('pole') \
.annotate(count=Count(F('pole')), summa=Sum(F("price")))
leadActionQuery = QAction.filter(newStatus=4).values('lead__pole', 'newStatus') \
.annotate(count=Count(F('newStatus')), summa=Sum(F("lead__price")))
leadPoles = LeadPoles.objects.filter(company=user.company)
leadPoles_data = []
for pole in leadPoles:
dic = {
"pole": pole.id,
"count": 0,
"summa": 0,
"losed_count": 0,
"losed_summa": 0,
}
for item in lead_query:
if item['pole'] == pole.id:
dic["count"] = item['count']
dic["summa"] = item['summa']
for item in leadActionQuery:
if item['lead__pole'] == pole.id:
dic["losed_count"] = item['count']
dic["losed_summa"] = item['summa']
leadPoles_data.append(dic)
dt = {
'losed': {
'count': Query.filter(status=4).count(),
'summa': Query.filter(status=4).aggregate(Sum('price'))['price__sum'],
},
'finished': {
'count': Query.filter(status=5).count(),
'summa': Query.filter(status=5).aggregate(Sum('finishedPrice'))['finishedPrice__sum'],
},
'task': {
'register': TQ.filter(status=0).count(),
'doing': TQ.filter(status=1).count(),
'done': TQ.filter(status=2).count(),
'deleted': TQ.filter(status=3).count(),
},
'lead_poles_data': leadPoles_data,
'users': users_data
}
return JsonResponse(dt)
except:
return JsonResponse({"message": "error"})
def addtoken(request):
t = request.POST['token']
a = Account.objects.get(company=request.user.company, is_director=True)
a.company.tg_token = t
a.save()
return redirect('setting')
def addsms(request):
n = request.POST['nickname']
c = request.POST['callback']
email = request.POST['email']
password = request.POST['password']
company = Company.objects.get(id=request.user.company.id)
company.sms_from = n
company.sms_activated = False
company.sms_callback_url = c
company.sms_email = email
company.sms_password = password
response = requests.post('http://notify.eskiz.uz/api/auth/login', data={
"email": email,
"password": password,
})
if response.status_code == 200:
company.sms_activated = True
company.sms_token = response.json()['data']['token']
response2 = requests.get('http://notify.eskiz.uz/api/auth/user', headers={
"Authorization": f"Bearer {company.sms_token}",
})
if response2.status_code == 200:
company.sms_balans = response2.json()['data']['balance']
company.save()
return redirect('setting')
def addsmsto(request):
client_id = request.POST['client_id']
secret = request.POST['secret']
sender_id = request.POST['sender_id']
company = Company.objects.get(id=request.user.company.id)
company.smsto_client_id = client_id
company.smsto_secret = secret
company.smsto_sender_id = sender_id
response = requests.post('https://auth.sms.to/oauth/token', data={
"client_id": client_id,
"secret": secret,
"expires_in": company.smsto_expires_in,
})
company.smsto_activated = False
if response.status_code == 200:
company.smsto_token = response.json()['jwt']
company.save()
return redirect('setting')
def EditUser(request):
r = request.POST
id = r['id']
ism = r['ism']
fam = r['fam']
phone = r['phone']
birthday = r['date']
a = Lead.objects.get(id=id)
a.name = ism
a.surname = fam
a.phone = phone
a.birthday = birthday
a.save()
url = '/edit/?id=' + str(id)
return redirect(url)
def GetRegion(request):
id = request.GET.get('id')
dist = District.objects.filter(region_id=id)
dis = []
for d in dist:
t = {
'id': d.id,
'name': d.name
}
dis.append(t)
data = {
'district': dis
}
return JsonResponse(data)
def GetHodim(request):
pk = request.GET.get('id')
us = Account.objects.get(id=pk)
dis = {
'id': us.id,
'fam': us.last_name,
'ism': us.first_name
}
data = {
'user': dis
}
return JsonResponse(data)
def EditHodim(request):
r = request.POST
id = r['id']
fam = r['fam']
ism = r['ism']
username = r['username']
password = r['password']
us = Account.objects.get(id=id)
try:
Account.objects.get(username=username)
messages.error(request, 'Loginni o`zgartiring')
return redirect('setting')
except:
us.username = username
us.first_name = ism
us.last_name = fam
us.password = make_password(password)
us.save()
messages.success(request, 'Hodim taxrirlandi')
return redirect('setting')
| fnabiyevuz/crm | main/views.py | views.py | py | 58,006 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "board.models.Lead",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 38,
"usage_type"... |
21160011627 | import matplotlib.pyplot as plt
import networkx as nx
from queue import PriorityQueue
import time
start_time = time.time()
G = nx.Graph()
file = open("data.csv",'r')
lines = file.readlines()
edges = []
for row in range(0,len(lines[0:]),2):
header = lines[row]
header = header[:len(header)-1].split(',')
header[2] = int(header[2])
edges.append(tuple(header))
#edges = [('a', 'b', 9), ('a', 'd', 2), ('a', 'c', 5), ('d', 'c', 4),
# ('d', 'b', 6), ('d', 'e', 4), ('e', 'c', 5), ('e', 'b', 5)]
G.add_weighted_edges_from(edges[:1000])
plt.figure(1)
nx.draw_networkx(G, with_labels = True)
plt.savefig("OriginalGraphPrim.png")
print("!---Original Graph---!")
print("Total number of edges: ", int(G.number_of_edges()))
class Prims:
def __init__(self):
self.tree = []
self.table = {}
self.queTable = {}
self.que = PriorityQueue()
def pickRoot(self, node):
self.rootNode = node
def initializePriorityQueue(self, listOfNodes):
self.que.put((0,self.rootNode))
for node in listOfNodes:
if node != self.rootNode:
self.que.put((10000,node))
self.queTable[node] = 10000
def initializeTable(self, listOfNodes):
for node in listOfNodes:
self.table[node] = '-'
def primsAlgo(self, listOfEdges, NoOfNodes):
while not self.que.empty():
nextVertex = self.que.get()
self.tree.append(nextVertex[1])
for edge in listOfEdges:
if nextVertex[1] in edge:
edge = list(edge)
edge.remove(nextVertex[1])
edge = tuple(edge)
if edge[0] not in self.tree:
if edge[1] < self.queTable[edge[0]]:
self.queTable[edge[0]] = edge[1]
self.table[edge[0]] = nextVertex[1]
self.que.put((edge[1],edge[0]))
if(len(self.tree) == NoOfNodes):
while not self.que.empty():
nextVertex = self.que.get()
def ShowMST(self):
MST = nx.Graph()
edges = []
for vertex in self.table.keys():
if self.table[vertex] != '-':
edges.append((vertex,self.table[vertex],self.queTable[vertex]))
MST.add_weighted_edges_from(edges)
plt.figure(2)
nx.draw_spring(MST, with_labels = True)
plt.savefig("MSTPrim.png")
print("!---Minimum Spanning Tree---!")
print("Total number of edges: ", int(MST.number_of_edges()))
MSTPrims = Prims()
MSTPrims.pickRoot('5')
MSTPrims.initializePriorityQueue(list(G.nodes()))
MSTPrims.initializeTable(list(G.nodes()))
MSTPrims.primsAlgo(edges[:1000], int(G.number_of_nodes()))
MSTPrims.ShowMST()
plt.show()
print("\n\nTotal time taken for all processes: ")
print("--- %s seconds ---" % (time.time() - start_time))
file = open("time.csv",'a')
file.write(str(time.time() - start_time))
file.write('\n')
file.close() | UsamaA99/Krus-Prim | primsAlgo.py | primsAlgo.py | py | 3,164 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
3355703340 | from modules.base_classes import *
import modules.comp_map as cmp
def main():
n = int(input())
rectangles = [Rectangle(*(map(int, input().split()))) for _ in range(n)]
m = int(input())
points = [Point2D(*map(int, input().split())) for _ in range(m)]
x_values, y_values = cmp.fill_zipped_coord(rectangles)
matrix = cmp.fill_matrix(x_values, y_values, rectangles)
counts = cmp.get_count(points, x_values, y_values, matrix)
return counts
if __name__ == "__main__":
print(*main())
| B-S-B-Rabbit/algorithm_lab2 | compression_map.py | compression_map.py | py | 521 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "modules.comp_map.fill_zipped_coord",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "modules.comp_map",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "modules.comp_map.fill_matrix",
"line_number": 14,
"usage_type": "call"
},
{
"... |
35553806288 | import tweepy
import configparser
from datetime import datetime
from dateutil import tz
from threading import Thread
config = configparser.ConfigParser(interpolation=None)
config.read('twitterkeys.ini')
api_key = config["twitter"]["APIKey"]
api_key_secret = config["twitter"]["APIKeySecret"]
bearer_token = config["twitter"]["BearerToken"]
access_token = config["twitter"]["AccessToken"]
access_token_secret = config["twitter"]["AccessTokenSecret"]
auth = tweepy.OAuth1UserHandler(api_key, api_key_secret, access_token, access_token_secret)
client = tweepy.Client(bearer_token=bearer_token, consumer_key=api_key, consumer_secret=api_key_secret, access_token=access_token, access_token_secret=access_token_secret)
api = tweepy.API(auth)
today = datetime.now()
today = today.replace(hour=7, minute=30)
try:
tweets = client.get_users_tweets(id=3690023483, exclude="replies", start_time=today)
except:
print("could not get")
class StockEntry():
def __init__(self, ticker, buy_type, buy_value, buy_price):
self.ticker = ticker
self.buy_price = buy_price
if "c" in buy_type:
self.buy_type = "call"
else:
self.buy_type = "put"
self.buy_value = buy_value
self.filled = False
self.closed = False
self.option_amount = 0
self.stop_loss_upper = 0.1
self.stop_loss_lower = 0.1
def __str__(self):
return_string = "Ticker: " + self.ticker + "\n"
return_string += "Option type: " + self.buy_type + "\n"
return_string += "Option value: " + self.buy_value + "\n"
return_string += "Bought price: " + self.buy_price + "\n"
return return_string
def set_loss(self, stop_loss):
self.stop_loss_lower = stop_loss[0]
self.stop_loss_upper = stop_loss[1]
def buy(self):
print("Buying " + self.ticker + "...")
include_lotto = False
stock_entries = []
for tweet in tweets.data:
if "entry" in str(tweet):
if "LOTTO" in str(tweet) and not include_lotto:
pass
else:
words = str(tweet).split(" ")
print(words)
ticker = words[1]
buy_type = words[2][-1]
buy_value = words[2][:-1]
buy_price = words[4]
stock_entries.append(StockEntry(ticker, buy_type, buy_value, buy_price))
for entry in stock_entries:
print(entry) | gatordevin/TradingBot | twitterbot.py | twitterbot.py | py | 2,403 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuth1UserHandler",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tweepy.Client",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tweep... |
31471559038 | import itertools
import operator
import re
import dataclasses
from dataclasses import dataclass, field
from typing import List, Tuple, Iterable, Dict, Optional, Set
from robotoff import settings
from robotoff.ml.langid import DEFAULT_LANGUAGE_IDENTIFIER, LanguageIdentifier
from robotoff.products import ProductDataset
from robotoff.taxonomy import TaxonomyType, get_taxonomy
from robotoff.utils import get_logger, text_file_iter
from robotoff.utils.cache import CachedStore
from robotoff.utils.es import generate_msearch_body
from robotoff.utils.text import FR_NLP_CACHE
from robotoff.utils.types import JSONType
from spacy.util import get_lang_class
logger = get_logger(__name__)
SPLITTER_CHAR = {"(", ")", ",", ";", "[", "]", "-", "{", "}"}
# Food additives (EXXX) may be mistaken from one another, because of their edit distance proximity
BLACKLIST_RE = re.compile(r"(?:\d+(?:[,.]\d+)?\s*%)|(?:[0-9])(?![\w-])")
PUNCTUATION_BLACKLIST_RE = re.compile(r"[_•:]")
E_BLACKLIST_RE = re.compile(r"(?<!\w)(?:E ?\d{3,5}[a-z]*)")
OffsetType = Tuple[int, int]
class TokenLengthMismatchException(Exception):
pass
def get_fr_known_tokens() -> Set[str]:
tokens = set(text_file_iter(settings.INGREDIENT_TOKENS_PATH, comment=False))
tokens = tokens.union(set(text_file_iter(settings.FR_TOKENS_PATH, comment=False)))
return tokens
FR_KNOWN_TOKENS = CachedStore(get_fr_known_tokens)
def extract_ingredients_from_taxonomy(lang: str):
taxonomy = get_taxonomy(TaxonomyType.ingredient.name)
ingredients = set()
for key, node in taxonomy.nodes.items():
synonyms: List[str] = node.get_synonyms(lang)
for synonym in synonyms:
ingredients.add(synonym.lower())
return ingredients
def extract_tokens_ingredients_from_taxonomy(lang: str):
ingredients = extract_ingredients_from_taxonomy(lang)
nlp_class = get_lang_class(lang)
nlp = nlp_class()
tokens = set()
for doc in nlp.pipe(texts=ingredients):
for token in doc:
tokens.add(token.orth_)
return tokens
def dump_token_ingredients_from_taxonomy(lang: str):
tokens = sorted(extract_tokens_ingredients_from_taxonomy(lang))
with settings.INGREDIENT_TOKENS_PATH.open("w") as f:
for token in tokens:
f.write(token + "\n")
@dataclass
class Ingredients:
text: str
normalized: str
offsets: List[OffsetType] = field(default_factory=list)
def iter_normalized_ingredients(self) -> Iterable[str]:
for start, end in self.offsets:
yield self.normalized[start:end]
def get_ingredient(self, index) -> str:
start, end = self.offsets[index]
return self.text[start:end]
def get_normalized_ingredient(self, index) -> str:
start, end = self.offsets[index]
return self.normalized[start:end]
def ingredient_count(self) -> int:
return len(self.offsets)
@dataclass
class TermCorrection:
original: str
correction: str
start_offset: int
end_offset: int
is_valid: bool = True
@dataclass
class Correction:
term_corrections: List[TermCorrection]
score: int
def normalize_ingredients(ingredient_text: str):
normalized = ingredient_text
for regex in (E_BLACKLIST_RE, BLACKLIST_RE, PUNCTUATION_BLACKLIST_RE):
while True:
try:
match = next(regex.finditer(normalized))
except StopIteration:
break
if match:
start = match.start()
end = match.end()
normalized = normalized[:start] + " " * (end - start) + normalized[end:]
else:
break
return normalized
def process_ingredients(ingredient_text: str) -> Ingredients:
offsets = []
chars = []
normalized = normalize_ingredients(ingredient_text)
start_idx = 0
for idx, char in enumerate(normalized):
if char in SPLITTER_CHAR:
offsets.append((start_idx, idx))
start_idx = idx + 1
chars.append(" ")
else:
chars.append(char)
if start_idx != len(normalized):
offsets.append((start_idx, len(normalized)))
normalized = "".join(chars)
return Ingredients(ingredient_text, normalized, offsets)
def generate_corrections(
client, ingredients_text: str, lang: Optional[str] = None, **kwargs
) -> List[Correction]:
if lang is None:
language_identifier: LanguageIdentifier = DEFAULT_LANGUAGE_IDENTIFIER.get()
predicted_languages = language_identifier.predict(
ingredients_text.lower(), threshold=0.5
)
if predicted_languages and predicted_languages[0].lang != "fr":
predicted_language = predicted_languages[0]
logger.info(
"Predicted language is not 'fr': {} "
"(confidence: {})\n{}".format(
predicted_language.lang,
predicted_language.confidence,
ingredients_text,
)
)
return []
corrections = []
ingredients: Ingredients = process_ingredients(ingredients_text)
normalized_ingredients: Iterable[str] = ingredients.iter_normalized_ingredients()
for idx, suggestions in enumerate(
_suggest_batch(client, normalized_ingredients, **kwargs)
):
offsets = ingredients.offsets[idx]
normalized_ingredient = ingredients.get_normalized_ingredient(idx)
options = suggestions["options"]
if not options:
continue
option = options[0]
original_tokens = analyze(client, normalized_ingredient)
suggestion_tokens = analyze(client, option["text"])
try:
term_corrections = format_corrections(
original_tokens, suggestion_tokens, offsets[0]
)
for term_correction in term_corrections:
term_correction.is_valid = is_valid_correction(term_correction)
corrections.append(Correction(term_corrections, option["score"]))
except TokenLengthMismatchException:
# logger.warning("The original text and the suggestions must have the same number "
# "of tokens: {} / {}".format(original_tokens, suggestion_tokens))
continue
return corrections
def is_valid_correction(
correction: TermCorrection, plural: bool = True, original_known: bool = True
) -> bool:
if plural and is_plural_correction(correction):
return False
if original_known and is_original_ingredient_known(correction.original):
return False
# Tokens with numbers are tricky to correct
if any(x.isdigit() for x in correction.correction):
return False
return True
def is_plural_correction(correction: TermCorrection) -> bool:
original_str = correction.original.lower()
correction_str = correction.correction.lower()
return (
len(original_str) > len(correction_str)
and original_str.endswith("s")
and correction_str == original_str[:-1]
) or (
len(correction_str) > len(original_str)
and correction_str.endswith("s")
and original_str == correction_str[:-1]
)
def is_original_ingredient_known(text: str):
nlp = FR_NLP_CACHE.get()
known_tokens = FR_KNOWN_TOKENS.get()
for token in nlp(text):
if token.lower_ not in known_tokens:
return False
return True
def generate_corrected_text(corrections: List[TermCorrection], text: str):
valid_corrections = [c for c in corrections if c.is_valid]
sorted_corrections = sorted(
valid_corrections, key=operator.attrgetter("start_offset")
)
corrected_fragments = []
last_correction = None
for correction in sorted_corrections:
if last_correction is None:
corrected_fragments.append(text[: correction.start_offset])
else:
corrected_fragments.append(
text[last_correction.end_offset : correction.start_offset]
)
corrected_fragments.append(correction.correction)
last_correction = correction
if last_correction is not None:
corrected_fragments.append(text[last_correction.end_offset :])
return "".join(corrected_fragments)
def format_corrections(
original_tokens: List[Dict], suggestion_tokens: List[Dict], offset: int = 0
) -> List[TermCorrection]:
corrections: List[TermCorrection] = []
if len(original_tokens) != len(suggestion_tokens):
raise TokenLengthMismatchException()
for original_token, suggestion_token in zip(original_tokens, suggestion_tokens):
original_token_str = original_token["token"]
suggestion_token_str = suggestion_token["token"]
if original_token_str.lower() != suggestion_token_str:
if original_token_str.isupper():
token_str = suggestion_token_str.upper()
elif original_token_str.istitle():
token_str = suggestion_token_str.capitalize()
else:
token_str = suggestion_token_str
token_start = original_token["start_offset"]
token_end = original_token["end_offset"]
corrections.append(
TermCorrection(
original=original_token_str,
correction=token_str,
start_offset=offset + token_start,
end_offset=offset + token_end,
)
)
return corrections
def suggest(text: str, client, **kwargs) -> Dict:
corrections = generate_corrections(client, text, **kwargs)
term_corrections = list(
itertools.chain.from_iterable((c.term_corrections for c in corrections))
)
return {
"corrections": [dataclasses.asdict(c) for c in term_corrections],
"text": text,
"corrected": generate_corrected_text(term_corrections, text),
}
def analyze(client, ingredient_text: str):
r = client.indices.analyze(
index=settings.ELASTICSEARCH_PRODUCT_INDEX,
body={"tokenizer": "standard", "text": ingredient_text},
)
return r["tokens"]
def _suggest_batch(client, texts: Iterable[str], **kwargs) -> List[Dict]:
suggester_name = "autocorrect"
index_name = kwargs.pop("index_name", settings.ELASTICSEARCH_PRODUCT_INDEX)
queries = (
generate_suggest_query(text, name=suggester_name, **kwargs) for text in texts
)
body = generate_msearch_body(index_name, queries)
response = client.msearch(body=body, doc_type=settings.ELASTICSEARCH_TYPE)
suggestions = []
for r in response["responses"]:
if r["status"] != 200:
root_cause = response["error"]["root_cause"][0]
error_type = root_cause["type"]
error_reason = root_cause["reason"]
print("Elasticsearch error: {} [{}]" "".format(error_reason, error_type))
continue
suggestions.append(r["suggest"][suggester_name][0])
return suggestions
def generate_suggest_query(
text,
confidence=1,
size=1,
min_word_length=4,
suggest_mode="missing",
name="autocorrect",
reverse: bool = True,
):
direct_generators = [
{
"field": "ingredients_text_fr.trigram",
"suggest_mode": suggest_mode,
"min_word_length": min_word_length,
}
]
if reverse:
direct_generators.append(
{
"field": "ingredients_text_fr.reverse",
"suggest_mode": suggest_mode,
"min_word_length": min_word_length,
"pre_filter": "reverse",
"post_filter": "reverse",
},
)
return {
"suggest": {
"text": text,
name: {
"phrase": {
"confidence": confidence,
"field": "ingredients_text_fr.trigram",
"size": size,
"gram_size": 3,
"direct_generator": direct_generators,
"smoothing": {"laplace": {"alpha": 0.5}},
}
},
}
}
def predict_insight(client, text: str, barcode: str, **kwargs) -> Optional[JSONType]:
corrections = generate_corrections(client, text, **kwargs)
if not corrections:
return None
term_corrections = list(
itertools.chain.from_iterable((c.term_corrections for c in corrections))
)
return {
"corrections": [dataclasses.asdict(c) for c in term_corrections],
"text": text,
"corrected": generate_corrected_text(term_corrections, text),
"barcode": barcode,
"lang": "fr",
"index_name": kwargs.get("index_name", "product"),
}
def generate_insights(
client, max_errors: Optional[int] = None, **kwargs
) -> Iterable[JSONType]:
dataset = ProductDataset(settings.JSONL_DATASET_PATH)
product_iter = (
dataset.stream()
.filter_by_country_tag("en:france")
.filter_text_field("lang", "fr")
.filter_nonempty_text_field("ingredients_text_fr")
.iter()
)
if max_errors is not None:
product_iter = (
p
for p in product_iter
if int(p.get("unknown_ingredients_n", 0)) <= max_errors
)
for product in product_iter:
text = product["ingredients_text_fr"]
barcode = product["code"]
insight = predict_insight(client, text, barcode=barcode, **kwargs)
if insight:
yield insight
| alexouille123/robotoff | robotoff/ingredients.py | ingredients.py | py | 13,629 | python | en | code | null | github-code | 36 | [
{
"api_name": "robotoff.utils.get_logger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.compile",
"lin... |
22247301648 | import sys
import numpy as np
import matplotlib.pyplot as plt
fname = sys.argv[1] # get filename from argument
samples = []
for s in sys.argv[2:len(sys.argv)]:
samples.append(s)
data = open(fname, "r") # Open file from BMG (export as table) and store in a list
file_stored = []
for i in data:
file_stored.append(i.split(","))
data.close()
Sample_names = []
y = []
check = False
for i in file_stored:
if check:
Sample_names.append(i[1] + i[2])
y.append(i[3:len(i)-1] + [i[len(i)-1].split("\n")[0]])
elif i[1] == " Time":
check = True
continue
for i in Sample_names:
print(i)
times = file_stored[1]
times = times[3:len(times)]
x = []
for i in times:
st = i.split(" ")
if st[3] == "":
x.append(int(st[1])*60)
else:
x.append(int(st[1])*60 + int(st[3]))
x_int = []
for i in x:
x_int.append(int(i))
y_int = []
for i in y:
ySub_int = []
for j in i:
ySub_int.append(int(j))
y_int.append(ySub_int)
Sample_Ids = []
Sample_rep_n = []
for i in Sample_names:
if not i in Sample_Ids:
Sample_Ids.append(i)
Sample_rep_n.append(Sample_names.count(i))
y_Av = []
y_Std = []
for i in Sample_Ids:
reps = []
reps_Av = []
reps_Std = []
for j in range(0,len(Sample_names)):
if Sample_names[j] == i:
reps.append(y_int[j])
for r in range(0,len(reps[0])):
avs = []
for z in range(0,len(reps)):
avs.append(float(reps[z][r]))
reps_Av.append(np.average(avs))
reps_Std.append(np.std(avs))
y_Av.append(reps_Av)
y_Std.append(reps_Std)
#### Average
sample_show = []
n = 0
leg = []
for i in Sample_Ids:
for j in samples:
if j in i:
sample_show.append(n)
leg.append(i)
n += 1
colors = []
for i in sample_show:
a = plt.errorbar(x,y_Av[i],yerr=y_Std[i],capsize=2)
for c in range(0,Sample_rep_n[i]):
colors.append(a.lines[0].get_color())
plt.legend(leg)
sample_show_scatter = []
n = 0
for i in Sample_names:
for j in samples:
if j in i:
sample_show_scatter.append(n)
n += 1
m = 0
for i in sample_show_scatter:
plt.scatter(x,y_int[i], facecolor = colors[m])
m += 1
plt.show()
#### Raw Data
# sample_show = []
#
# n = 0
# for i in Sample_names:
# for j in samples:
# if j in i:
# sample_show.append(n)
# n += 1
#
# for i in sample_show:
# plt.plot(x,y_int[i])
# plt.show()
| Brad0440/BioTools | BMG_Plot.py | BMG_Plot.py | py | 2,496 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.average",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number"... |
11565107048 |
import morse, keras
import numpy as np
from scipy import signal
channels = 1
samples_per_sec = 100
max_seconds = 5
max_samples = max_seconds * samples_per_sec
trans_seconds = 5
trans_samples = trans_seconds * samples_per_sec
latent_dim = 100
TOKENS = "$^0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ "
num_decoder_tokens = len(TOKENS)
target_token_index = dict([(char, i) for i, char in enumerate(TOKENS)])
max_translation_length = 62
use_lstm = True
# detection model
def make_model(input_shape = (max_samples,channels)):
input_layer = keras.layers.Input(input_shape)
nf = 64
ks = 7
conv1 = keras.layers.Conv1D(filters=nf, kernel_size=ks, padding="same")(input_layer)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.MaxPooling1D()(conv1)
conv1 = keras.layers.ReLU()(conv1)
conv1 = keras.layers.Dropout(0.3)(conv1)
conv2 = keras.layers.Conv1D(filters=nf, kernel_size=ks, padding="same")(conv1)
conv2 = keras.layers.BatchNormalization()(conv2)
conv2 = keras.layers.MaxPooling1D()(conv2)
conv2 = keras.layers.ReLU()(conv2)
conv2 = keras.layers.Dropout(0.2)(conv2)
conv3 = keras.layers.Conv1D(filters=nf, kernel_size=ks, padding="same")(conv2)
conv3 = keras.layers.BatchNormalization()(conv3)
conv3 = keras.layers.MaxPooling1D()(conv3)
conv3 = keras.layers.ReLU()(conv3)
conv3 = keras.layers.Dropout(0.1)(conv3)
conv4 = keras.layers.Conv1D(filters=nf, kernel_size=ks, padding="same")(conv3)
conv4 = keras.layers.BatchNormalization()(conv4)
conv4 = keras.layers.MaxPooling1D()(conv4)
conv4 = keras.layers.ReLU()(conv4)
conv4 = keras.layers.Dropout(0.1)(conv4)
conv5 = keras.layers.Conv1D(filters=nf, kernel_size=ks, padding="same")(conv4)
conv5 = keras.layers.BatchNormalization()(conv5)
conv5 = keras.layers.MaxPooling1D()(conv5)
conv5 = keras.layers.ReLU()(conv5)
gap = keras.layers.GlobalAveragePooling1D()(conv5)
output_layer = keras.layers.Dense(1, activation="sigmoid")(gap)
return keras.models.Model(inputs=input_layer, outputs=output_layer)
# translation model
# https://keras.io/examples/nlp/lstm_seq2seq/
def make_trans_model(input_shape = (trans_samples,channels)):
input_layer = keras.layers.Input(input_shape)
nf = 96
ks = 7
conv1 = keras.layers.Conv1D(filters=nf, kernel_size=ks, padding="same")(input_layer)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.AveragePooling1D()(conv1)
conv1 = keras.layers.ReLU()(conv1)
conv1 = keras.layers.Dropout(0.6)(conv1)
conv2 = keras.layers.Conv1D(filters=nf, kernel_size=ks, padding="same")(conv1)
conv2 = keras.layers.BatchNormalization()(conv2)
conv2 = keras.layers.AveragePooling1D()(conv2)
conv2 = keras.layers.ReLU()(conv2)
conv2 = keras.layers.Dropout(0.4)(conv2)
conv3 = keras.layers.Conv1D(filters=nf, kernel_size=ks, padding="same")(conv2)
conv3 = keras.layers.BatchNormalization()(conv3)
conv3 = keras.layers.AveragePooling1D()(conv3)
conv3 = keras.layers.ReLU()(conv3)
conv3 = keras.layers.Dropout(0.2)(conv3)
if use_lstm:
conv8 = keras.layers.LSTM(nf*2, return_sequences=True)(conv3)
else:
conv7 = keras.layers.Conv1D(filters=nf, kernel_size=ks, activation="relu", padding="same")(conv3)
conv8 = keras.layers.TimeDistributed(keras.layers.Dense(nf*2, activation="relu"))(conv7)
concat = keras.layers.Concatenate(axis=2)([conv3, conv8])
dense = keras.layers.TimeDistributed(keras.layers.Dense(num_decoder_tokens, activation="softmax"))(concat)
return keras.models.Model(inputs=input_layer, outputs=dense)
#encoder_lstm = keras.layers.LSTM(latent_dim, return_sequences=True, dropout=0.1)(conv5)
#decoder_lstm = keras.layers.LSTM(num_decoder_tokens, dropout=0.1, return_sequences=True)(encoder_lstm)
#return keras.models.Model(inputs=input_layer, outputs=decoder_lstm)
#encoder_inputs = conv5
#encoder = keras.layers.LSTM(latent_dim, return_state=True, dropout=0.2)
#encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
#encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
#decoder_inputs = keras.Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
#decoder_lstm = keras.layers.LSTM(latent_dim, return_sequences=True, return_state=True)
#decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
#decoder_dense = keras.layers.Dense(num_decoder_tokens, activation="softmax")
#decoder_outputs = decoder_dense(decoder_outputs)
#return keras.models.Model(inputs=[input_layer, decoder_inputs], outputs=decoder_outputs)
class DataGenerator(keras.utils.Sequence):
'Generates detection data for Keras'
def __init__(self, batch_size=256):
'Initialization'
self.dim = (batch_size,max_samples,channels)
self.batch_size = batch_size
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return 1000
def __getitem__(self, index):
'Generate one batch of data'
x_train = []
y_train = []
for i in range(0,self.batch_size):
msg, x, posns = morse.generate_detection_training_sample(max_samples)
x = np.reshape(x, (-1,1))
y = len(msg) > 0 and msg[0] != '~'
x_train.append(x)
y_train.append(y)
return np.array(x_train), np.array(y_train)
def on_epoch_end(self):
'Updates indexes after each epoch'
class TranslationGenerator(keras.utils.Sequence):
'Generates detection data for Keras'
def __init__(self, batch_size=256):
'Initialization'
self.dim = (batch_size,trans_samples,channels)
self.batch_size = batch_size
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return 1000
def __getitem__(self, index):
'Generate one batch of data'
x_train = []
y_train = []
for i in range(0, self.batch_size):
msg, x, posns = morse.generate_translation_training_sample(trans_samples)
assert(len(posns) == len(msg)+1)
x = np.reshape(x, (-1,1))
y = np.zeros((max_translation_length, num_decoder_tokens))
str = ['.'] * max_translation_length
# iterate over all bins
for i, char in enumerate(msg):
if use_lstm:
# lstm, bin goes at end of symbol
pos = posns[i+1] / trans_samples * max_translation_length
ofs = int(round(pos))+1
else:
# put bin smack dab in middle of the feature
pos = ((posns[i] + posns[i+1]) / trans_samples / 2) * max_translation_length
ofs = int(round(pos))
# is this symbol in the window?
if ofs > 0 and ofs < max_translation_length-1 and posns[i] > 0 and posns[i+1] < trans_samples:
tti = target_token_index[msg[i]]
#y[ofs-1, tti] = 1/3
y[ofs+0, tti] = 1/1
#y[ofs+1, tti] = 1/3
str[ofs] = char
# set "no symbol" probability for bins
for ofs in range(0,max_translation_length):
y[ofs, 0] = max(0.0, 1.0 - np.sum(y[ofs]))
x_train.append(x)
y_train.append(y)
#print(''.join(str))
return np.array(x_train), np.array(y_train)
def on_epoch_end(self):
'Updates indexes after each epoch'
class CWDetectorTranslator:
def __init__(self, sample_rate, overlap=0.5, wndsizes=[512,256,128]):
self.sr = sample_rate
self.overlap = overlap
self.wndsizes = wndsizes
self.nsamples = int((128 * (1-overlap) + 1) * max_samples / 2)
self.wnd = np.zeros((self.nsamples * 8,))
self.detections = []
detect_checkpoint_fn = "weights_detect.h5"
self.detect_model = make_model()
self.detect_model.load_weights(detect_checkpoint_fn)
trans_checkpoint_fn = "weights_translate.h5"
self.trans_model = make_trans_model()
self.trans_model.load_weights(trans_checkpoint_fn)
def clear(self):
self.wnd[:] = 0
def add_samples(self, samples):
# shift window by 1/2
n = len(samples)
self.wnd[0:-n] = self.wnd[n:]
# add new samples
self.wnd[-n:] = samples
# convert to spectrogram at three different scales
specs = []
for nps in self.wndsizes:
nov = int(nps * self.overlap)
wndsamp = max_samples * (nps-nov+1)
w = self.wnd[-wndsamp:]
frequencies, times, spectrogram = signal.spectrogram(w, fs=self.sr, nperseg=nps, noverlap=nov)
specs.append(spectrogram[:, 0:max_samples])
self.spec = np.concatenate(specs, axis=0)
def detect(self):
xy = self.spec #[:, 0:max_samples]
# normalize spectrogram
ymin = np.min(xy, axis=1)
ymax = np.max(xy, axis=1)
xy = (xy - ymin[:,None]) / (ymax - ymin + 1e-6)[:,None]
#xy = (xy - np.min(xy)) / (np.max(xy) - np.min(xy) + 1e-6)
self.xy = xy
xy = np.reshape(xy, (xy.shape[0], xy.shape[1], 1))
p = self.detect_model.predict(xy[:, 0:max_samples])
self.detections = np.argwhere(p > 0.5)
# combine adjacent bins
for i in range(0, len(self.detections)-1):
y = self.detections[i][0]
if y == self.detections[i+1][0] - 1:
#self.xy[y+1] = np.maximum(self.xy[y], self.xy[y+1])
self.xy[y+1] = (self.xy[y] + self.xy[y+1]) / 2
self.detections[i] = (-1,-1)
def translate(self):
results = []
for y,i in self.detections:
if y >= 0:
row = self.xy[y]
row = np.reshape(row, (1, row.shape[0], 1))
t = self.trans_model.predict(row)[0]
results.append((y, bins2msg(t)))
return results
def bins2msg(t):
nbins = t.shape[0]
# pick the best choice for each bin
msg = ['.'] * nbins
for j in range(0, nbins):
k = np.argmax(t[j])
if k>0:
msg[j] = TOKENS[k]
return ''.join(msg)
| sehugg/cwkeras | cwmodel.py | cwmodel.py | py | 10,643 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "keras.layers.Input",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.Conv1D",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "keras.layers... |
25299883898 | import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from typing import List
engine = create_engine('sqlite:///../data/data.db', echo=False)
con = engine.connect()
df = pd.read_sql('select * from patient', con=con)
con.close()
def detect_duplicates(df:pd.DataFrame) -> pd.DataFrame:
# remove the patients with duplicated id
df = df[~df.patient_id.duplicated()]
# remove the patients with null names
df['name'] = df['given_name'] + ' ' + df['surname']
df = df[~df.name.isnull()]
# Using phone number to detect duplicates
# first remove the rows with phone numbers being None
df = df.dropna(subset=["phone_number"])
# then remove patient ids corresponding to duplicated phone numbers
df = df[~df.patient_id.isin(set(df[df.phone_number.duplicated()].patient_id.values))]
def rm_gb(cols:List[str], data:pd.DataFrame) -> pd.DataFrame:
'''groups by columns and removes rows where groupby count resulted in more than 1 example
if there are more than 1 occurences of "name" == "Gerard" and "state"=="nsw" then
rm_gb(['name', 'state']) => should remove it
'''
gb_cols = data.groupby(cols).count()
gb_cols = gb_cols[gb_cols.patient_id > 1].reset_index()
dup = data[data[cols[0]].isin(gb_cols[cols[0]])]
for col in cols[1:]:
dup = dup[df[col].isin(gb_cols[col])]
data = data[~data.patient_id.isin(dup[dup.name.duplicated()].patient_id.values)]
return data
df = rm_gb(['name', 'postcode'], df)
df = rm_gb(['name', 'address_1'], df)
df = rm_gb(['name', 'suburb'], df)
return df
df = detect_duplicates(df) | monkeyusage/duplicates | scripts/detect_duplicates.py | detect_duplicates.py | py | 1,699 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "typing.L... |
12279662576 | # third party
import torch
import torch.nn as nn
class IMDBDataset:
def __init__(self, reviews, targets):
"""
Argument:
reviews: a numpy array
targets: a vector array
"""
self.reviews = reviews
self.target = targets
def __len__(self):
# return length of dataset
return len(self.reviews)
def __getitem__(self, index):
# return review and target of that index in torch tensor
review = torch.tensor(self.reviews[index, :], dtype=torch.long)
target = torch.tensor(self.target[index], dtype=torch.float)
return {"review": review, "target": target}
class LSTM(nn.Module):
def __init__(self, embedding_matrix):
super(LSTM, self).__init__()
# Number of words = number of rows in embedding matrix
num_words = embedding_matrix.shape[0]
embedding_dim = embedding_matrix.shape[1]
# input embedding layer
self.embedding = nn.Embedding(
num_embeddings=num_words,
embedding_dim=embedding_dim,
)
self.embedding.weight = nn.Parameter(
torch.tensor(embedding_matrix, dtype=torch.float32),
)
# requires grad is false since we use pre-trained word embeddings
self.embedding.weight.requires_grad = False
# instantiate LSTM
self.lstm = nn.LSTM(
embedding_dim,
128,
bidirectional=True,
batch_first=True,
)
self.out = nn.Linear(512, 2)
def forward(self, x):
# embed
x = self.embedding(x)
# pass embedding to lstm
hidden, _ = self.lstm(x)
# apply mean and max pooling on lstm output
avg_pool = torch.mean(hidden, 1)
max_pool, index_max_pool = torch.max(hidden, 1)
out = torch.cat((avg_pool, max_pool), 1)
# final pred
out = self.out(out)
return out
| seedatnabeel/Data-IQ | src/models/nlp_models.py | nlp_models.py | py | 1,958 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "torch.tensor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_num... |
7284886713 | #!/usr/bin/env python
# coding: utf-8
# # Integración con Python
#
# Esta sección pretende ser un compendio (esperemos que claro y ordenado) de todo el `Python`
# que hemos ido usando en el Capítulo 4.
#
# Esta sección también puede servir como repaso de los conceptos más aplicados que hemos introducido en dicho capítulo.
# ## Integrales iteradas con `Sympy`
#
# Mostramos, sobre un ejemplo, el cálculo de integrales iteradas con la ayuda de `Sympy`.
#
# En este caso vamos a calcular la integral
#
# $$
# \int_{1}^{2}\left[\int_{1}^{x} (2xy + 3y^2)dy \right] dx.
# $$
# Lo haremos de dos maneras diferentes:
#
# 1. integrando primero respecto a $y$ y luego respecto a $x$,
# 2. integrando primero respecto a $x$ y luego respecto a $y$.
# In[2]:
import sympy as sp
x, y = sp.symbols('x y', real=True) # define as variables simbólicas x, y
f_expr = 2*x*y + 3*y**2
f = sp.Lambda((x,y), f_expr)
res1 = sp.integrate(f_expr, (y, 1, x), (x, 1, 2))
display(res1)
res2 = sp.integrate(f(x,y), (y, 1, x), (x, 1, 2))
display(res2)
# ## Cálculo del área de una superficie utilizando ' Sympy'
#
# En este caso, debemos integrar la función $1$ en la región de que se trate.
#
# Como ejemplo, vamos a calcular la superficie de la región que mostramos a continuación:
#
# <img src="../../images/4.2.Region1.png" width="400"/>
# In[3]:
import sympy as sp
x, y = sp.symbols('x y', real=True) # define as variables simbólicas x, y
# res = sp.integrate(1, (y, sp.cos(x), sp.sin(x)), (x, 1, 2))
res = sp.integrate(1, (y, 0, sp.sin(x)), (x, 1, 2))
display(res)
display(sp.N(res))
# ## Cálculo de volúmenes con ' Sympy'
#
# Para calcular el volumen entre el plano $XY$ y el gráfico de una función positiva, debemos integrar esa función en el dominio correspondiente.
#
# Como ejemplo, vamos a calcular (de dos maneras distintas, según el orden elegido para las variables) el volumen de la región sólida delimitada por el plano $z=f(x,y)=2–x–2y$ y los tres planos de coordenadas
#
# <img src="../../images/05_vol_iter.png" width="250"/>
#
# In[4]:
import sympy as sp
x, y = sp.symbols('x y', real=True)
f = sp.Lambda((x,y),2-x-2*y)
volumen = sp.integrate(f(x,y), (y, 0, (2-x)/2), (x, 0, 2))
display(volumen)
volumen2 = sp.integrate(f(x,y), (x, 0, 2-2*y), (y, 0, 1))
display(volumen2)
# ## Cálculo del valor medio con ' Sympy'
#
# Si queremos calcular el valor medio de una función de dos variables en una región plana, $R$, debemos aplicar la fórmula
#
# $$
# \textrm{Valor medio}=\dfrac{1}{\textrm{Área}\,(R)}\int_R\int f(x,y)\,dA.
# $$
#
# Calculamos a continuación el valor medio de $f(x,y)=\dfrac{1}{2}xy$ sobre la región del plano $R$ dada por un rectángulo con vértices $(0,0)$, $(4,0)$, $(4,3)$ y $(0,3)$. Para ello, observamos que el área de la región rectangular $R$ es $4\times 3=12$, y que los límites para $x$ e $y$ son $0\leq x\leq 4$ e $0\leq y\leq 3$, como se muestra en la figura
#
# <img src="../../images/05_valor_medio.png" width="350"/>
# In[5]:
import sympy as sp
x, y = sp.symbols('x y', real=True)
f = sp.Lambda((x,y),1/12*1/2*x*y)
val_m = sp.integrate(f(x,y), (y, 0, 3), (x, 0, 4))
display(val_m)
# ## Integración con cambio de variable en `Sympy`
#
# Para realizar un cambio de variable debemos aplicar la fórmula
#
# $$
# \displaystyle \int_{R} \int G(x,y) \, dx \, dy = \int_{S} \int (G \circ \mathbf{F})(u,v) |\det \mathrm{J}\mathbf{F}(u,v)| \, du \, dv.
# $$
#
# Como ejemplo, vamos a integrar $\displaystyle \int_R \int 9xy \, dA$, siendo $R$ la región que mostramos en la siguiente figura
#
# <img src="../../images/5.4_Ejemplo_1.png" width="250"/>
#
# Consideramos el siguiente cambio de variable:
#
# $$
# \left\{\begin{array}{lcl}
# u &=& x + y \\
# v &=& x - 2y
# \end{array}\right.
# $$
# In[6]:
import sympy as sp
x, y, u, v = sp.symbols('x y u v', real=True) # define las variables simbólicas x, y, u, v
# Definimos las funciones F y G como matrices
F = sp.Matrix([ 1/3*(2*u+v), 1/3*(u-v) ])
G = sp.Matrix([ 9*x*y ])
# Definimos la nueva función a integrar
GoF_expr = G.subs(x,1/3*(2*u+v)).subs(y,1/3*(u-v))
# Calculamos el determinante de la matriz jacobiana asociada a F
det_jac_F = sp.det( F.jacobian([u,v]) )
# Calculamos la integral sobre la nueva región de integración
res = sp.integrate(GoF_expr*abs(det_jac_F), (v, -4, 0), (u, 1, 4))
display('Valor de la integral: ')
display(sp.simplify(res[0]))
# ## Integración con cambio a polares con ' Sympy'
#
# Como caso particular de especial interés, al cambiar de coordenadas cartesianas a polares, la fórmula anterior resulta
#
# $$
# \int_{R} \int G(x,y) \, dA = \int_{\alpha}^{\beta} \int_{g_1(\theta)}^{g_2(\theta)} G(r\cos(\theta),r\sin(\theta)) r \, dr \, d\theta.
# $$
#
# Vamos a aplicarlo a un ejemplo concreto, en el que calcularemos la integral $\displaystyle \int_R \int (x^2 + y) \, dA$, siendo
# $R$ la región anular mostrada en la siguiente figura:
#
# <img src="../../images/5.4_Ejemplo_2.png" width="300"/>
# In[7]:
import sympy as sp
x, y, r, th = sp.symbols('x y r th', real=True) # define las variables simbólicas x, y, r, th
# Definimos las funciones F y G como matrices
F = sp.Matrix([ r*sp.cos(th), r*sp.sin(th) ])
G = sp.Matrix([ x**2 + y ])
# Definimos la nueva función a integrar
GoF_expr = G.subs(x,r*sp.cos(th)).subs(y,r*sp.sin(th))
# Calculamos el determinante de la matriz jacobiana asociada a F
det_jac_F = sp.det( F.jacobian([r,th]) )
# Calculamos la integral sobre la nueva región de integración
res = sp.integrate(GoF_expr*abs(det_jac_F), (r, 1, sp.sqrt(5)), (th, 0, 2*sp.pi))
display('Valor de la integral: ')
display(sp.simplify(res[0]))
| GCED-CM/JB-Calculo2-UDC | _build/jupyter_execute/capitulos/05/07.Integracion.py | 07.Integracion.py | py | 5,684 | python | es | code | 2 | github-code | 36 | [
{
"api_name": "sympy.symbols",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sympy.Lambda",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sympy.integrate",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sympy.integrate",
"lin... |
70222738664 | import telebot
from telebot import types
import sqlite3
bot = telebot.TeleBot('1835870307:AAHlXuytmI_rtPbjNLj3PzBU3oaeGe7yboY')
# Получение списка администраторов
def get_administrators():
list = []
for i in get_db_connection().execute('SELECT * FROM administrators').fetchall():
list.append(i[1])
return list
def get_db_connection():
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
return conn
def get_welcome_message():
information_about_company = get_db_connection().execute('SELECT * FROM welcome')
text = ''
for i in information_about_company:
text = ''.join(i)
return text
def get_start_message():
information_about_company = get_db_connection().execute('SELECT * FROM start_message')
for i in information_about_company:
return i[0]
def send_new_admin(admin):
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('INSERT INTO administrators(admin_id) VALUES (?) ', (admin, )).fetchall()
conn.commit()
conn.close()
def edit_start_message(message):
try:
chat_id = message.chat.id
text = message.text
msg = bot.send_message(chat_id, 'Напишите новый текст стартового сообщения')
bot.register_next_step_handler(msg, edit_start_message_next_step)
except:
bot.send_message(chat_id, 'Что-то пошло не так')
def edit_start_message_next_step(message):
try:
conn = sqlite3.connect('bot_db')
chat_id = message.chat.id
text = message.text
conn.execute('UPDATE start_message SET title = (?) WHERE ROWID=1', (text, ))
conn.commit()
bot.send_message(chat_id, 'Стартовое сообщение было успешно изменено')
conn.close()
except:
bot.send_message(chat_id, 'Что-то пошло не так')
def edit_information_company(message):
try:
chat_id = message.chat.id
bot.send_message(chat_id, get_welcome_message())
msg = bot.send_message(chat_id, 'Напишите новый текст приветствия')
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
bot.register_next_step_handler(msg, edit_information_company_next_step)
except:
bot.send_message(chat_id, 'Что-то пошло не так')
def edit_information_company_next_step(message):
try:
chat_id = message.chat.id
text = message.text
if text.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('UPDATE welcome SET title=?', (text,))
conn.commit()
conn.close()
bot.send_message(chat_id, 'Сообщение приветствия было успешно изменено')
except:
bot.send_message(chat_id, 'Что-то пошло не так')
def show_last_post():
list = []
for i in get_db_connection().execute('SELECT * FROM bot ORDER BY ID DESC LIMIT 1').fetchall():
for j in range(len(i)):
list.append(i[j])
text = f'Название вакансии: {list[1]}\n\n' \
f'Описание вакансии: {list[2]}\n\n' \
f'Локализация: {list[3]}\n\n' \
f'Почасовая ставка: {list[4]}'
return text
# Редактирование вакансии
id = int
def edit_offer(message):
try:
chat_id = message.chat.id
msg = bot.send_message(chat_id, 'Напиши ID поста для редактирования')
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
bot.register_next_step_handler(msg, edit_offer_next_step)
except:
bot.send_message(chat_id, 'Что-то пошло не так')
def edit_offer_next_step(message):
try:
global id
chat_id = message.chat.id
id = message.text
bot.send_message(chat_id, show_offer(id))
if message.text.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
msg = bot.send_message(chat_id, 'Напиши новое название вакансии:')
bot.register_next_step_handler(msg, edit_offer_title)
except:
bot.send_message(chat_id, 'Какая-то ошибка')
def edit_offer_title(message):
try:
global id
chat_id = message.chat.id
title = message.text
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
if title.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('UPDATE bot SET title=(?) WHERE id=(?)', (title, id))
conn.commit()
conn.close()
msg = bot.send_message(chat_id, 'Теперь введите описание')
bot.register_next_step_handler(msg, edit_offer_description)
except:
bot.send_message(message.chat.id, 'Что-то пошло не так')
def edit_offer_description(message):
try:
global id
chat_id = message.chat.id
text = message.text
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
if text.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('UPDATE bot SET description=(?) WHERE id=(?)', (text, id))
conn.commit()
conn.close()
msg = bot.send_message(chat_id, 'Теперь введите местоположение работы')
bot.register_next_step_handler(msg, edit_offer_location)
except:
bot.send_message(message.chat.id, 'Что-то пошло не так')
def edit_offer_location(message):
try:
global id
chat_id = message.chat.id
text = message.text
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
if text.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('UPDATE bot SET location=(?) WHERE id=(?)', (text, id))
conn.commit()
conn.close()
msg = bot.send_message(chat_id, 'Теперь введите почасову оплату')
bot.register_next_step_handler(msg, edit_offer_salary)
except:
bot.send_message(message.chat.id, 'Что-то пошло не так')
def edit_offer_salary(message):
try:
global id
chat_id = message.chat.id
text = message.text
if text.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('UPDATE bot SET salary=(?) WHERE id=(?)', (text, id))
conn.commit()
conn.close()
msg = bot.send_message(chat_id, 'Вакансия была успешно отредактирована')
id = int
except:
bot.send_message(message.chat.id, 'Что-то пошло не так')
# Удаление вакансии
def remove_offer(message):
try:
chat_id = message.chat.id
name = message.text
msg = bot.reply_to(message, 'Введите ID вакансии, которую хотите удалить')
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
bot.register_next_step_handler(msg, remove_offer_next_step)
except:
bot.reply_to(message, 'Что-то пошло не так')
def remove_offer_next_step(message):
text = message.text
if text.lower() == 'отмена':
bot.send_message(message.chat.id, 'Действие отменено')
return
if not text.isdigit():
msg = bot.reply_to(message, 'Ошибка. ИД включает в себя только номерные знаки.')
bot.register_next_step_handler(msg, remove_offer_next_step)
return
try:
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('DELETE FROM bot WHERE id = ?', (int(text),))
conn.commit()
conn.close()
msg = bot.reply_to(message, 'Вакансия была успешно удалена', reply_markup=admin_keyboard())
except:
msg = bot.reply_to(message, 'Какая-то ошибка')
# конец удаления вакансии
# Добавление новой вакансии
def add_new_offer(message):
try:
chat_id = message.chat.id
name = message.text
msg = bot.send_message(chat_id, 'Напишите название новой вакансии: ')
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
bot.register_next_step_handler(msg, add_title)
except:
bot.send_message(message.chat.id, 'Что-то пошло не так')
def add_title(message):
try:
chat_id = message.chat.id
title = message.text
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
if title.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('INSERT INTO bot(title) VALUES (?)', (title,))
conn.commit()
conn.close()
msg = bot.send_message(chat_id, 'Теперь введите описание')
bot.register_next_step_handler(msg, add_description)
except:
bot.send_message(message.chat.id, 'Что-то пошло не так')
def add_description(message):
try:
chat_id = message.chat.id
description = message.text
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
if description.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('UPDATE bot SET description=(?) where id = (select MAX(id) from bot)', (description,))
conn.commit()
conn.close()
msg = bot.send_message(chat_id, 'Введите местоположение работы')
bot.register_next_step_handler(msg, add_location)
except:
bot.send_message(message.chat.id, 'Что-то пошло не так')
def add_location(message):
try:
chat_id = message.chat.id
location = message.text
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
if location.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('UPDATE bot SET location=(?) where id = (select MAX(id) from bot)', (location,))
conn.commit()
conn.close()
msg = bot.send_message(chat_id, 'Напишите почасовую ставку для данной вакансии')
bot.register_next_step_handler(msg, add_salary)
except:
bot.send_message(message.chat.id, 'Что-то пошло не так')
def add_salary(message):
try:
chat_id = message.chat.id
salary = message.text
if salary.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('UPDATE bot SET salary=(?) where id = (select MAX(id) from bot)', (salary,))
conn.commit()
conn.close()
msg = bot.send_message(chat_id, 'Вакансия была успешно добавлена')
bot.reply_to(msg, show_last_post())
except:
bot.send_message(message.chat.id, 'Что-то пошло не так')
# Получение вакансии
def get_offer(id):
conn = get_db_connection()
post = conn.execute('SELECT * FROM bot WHERE id = ?',
(id,)).fetchone()
conn.close()
if post is None:
return 'Nie ma takiej oferty'
return post
def show_offer(id):
try:
list = []
for i in get_db_connection().execute('SELECT * FROM bot WHERE id = ?', (int(id),)).fetchall():
for j in range(len(i)):
list.append(i[j])
list.append('\n')
text = ''
text = ''.join(list[1::])
return text
except:
return
# Добавление нового администратора
def add_admin(message):
try:
chat_id = message.chat.id
name = message.text
msg = bot.reply_to(message, 'Напишите ID аккаунта, который хотите добавить в администраторы: ')
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
bot.register_next_step_handler(msg, process_age_step)
except Exception as e:
bot.reply_to(message, 'Что-то пошло не так')
def process_age_step(message):
try:
chat_id = message.chat.id
text = message.text
if text.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
if not text.isdigit():
msg = bot.reply_to(message, 'Ошибка. ИД включает в себя только номерные знаки.')
bot.register_next_step_handler(msg, process_age_step)
return
try:
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('INSERT INTO administrators(admin_id) VALUES (?)', (int(text),)).fetchall()
conn.commit()
conn.close()
msg = bot.reply_to(message, 'Администратор был успешно добавлен', reply_markup=admin_keyboard())
except:
msg = bot.reply_to(message, 'Какая-то ошибка')
except:
bot.reply_to(message, 'oooops')
# конец добавлние администратора
# Удаление администратора
def remove_admin(message):
try:
chat_id = message.chat.id
name = message.text
bot.send_message(message.chat.id, str(get_administrators()))
msg = bot.reply_to(message, 'Напишите ID аккаунта, который хотите удалить из администраторов: ')
bot.send_message(chat_id, 'Для отмены напиши \"Отмена\" без кавычек')
bot.register_next_step_handler(msg, remove_admin_next_step)
except Exception as e:
bot.reply_to(message, 'Что-то пошло не так %s' % (e))
def remove_admin_next_step(message):
try:
chat_id = message.chat.id
text = message.text
if text.lower() == 'отмена':
bot.send_message(chat_id, 'Действие отменено')
return
if not text.isdigit():
msg = bot.reply_to(message, 'Ошибка. ИД включает в себя только номерные знаки.')
bot.register_next_step_handler(msg, process_age_step)
return
try:
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
conn.execute('DELETE FROM administrators WHERE admin_id=?', (int(text),)).fetchall()
conn.commit()
conn.close()
msg = bot.reply_to(message, 'Администратор был успешно удален', reply_markup=admin_keyboard())
except:
msg = bot.reply_to(message, 'Какая-то ошибка')
except:
bot.reply_to(message, 'ooooops')
# Конец удаление администраторе
def show_contact_list(message):
try:
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
chat_id = message.chat.id
list = []
text = ''
for i in conn.execute('SELECT * FROM contact'):
for j in i:
list.append(str(j) + ' ')
list.append('\n')
text = text.join(list)
bot.send_message(chat_id, text)
conn.close()
except:
bot.send_message(message.chat.id, 'Что-то пошло не так')
def contact(message):
try:
conn = sqlite3.connect('bot_db')
conn.row_factory = sqlite3.Row
chat_id = message.chat.id
text = message.text
bot.send_message(get_administrators()[0], 'Поступила новая заявка: \n' + text)
conn.execute('INSERT INTO contact(text) VALUES (?)', (text,))
conn.commit()
conn.close()
bot.send_message(chat_id, 'Ваша заявка была успешно отправлена')
except:
bot.send_message(chat_id, 'Что-то пошло не так')
def start_keyboard():
keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)
about = types.KeyboardButton(text='Информация о фирме')
offers = types.KeyboardButton(text='Предложения по работе')
contact = types.KeyboardButton(text='Сконтактироваться')
keyboard.add(about, offers, contact)
return keyboard
def admin_keyboard():
keyboard = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)
add_offer = types.KeyboardButton(text='Добавить новую вакансию')
delete_offer = types.KeyboardButton(text='Удалить вакансию')
add_admin = types.KeyboardButton(text='Добавить администратора')
delete_admin = types.KeyboardButton(text='Удалить администратора')
edit_offer = types.KeyboardButton(text='Редактировать вакансию')
list_admins = types.KeyboardButton(text='Список администраторов')
edit_info_about_company = types.KeyboardButton(text='Редактировать информацию о фирме')
show_contact_list = types.KeyboardButton(text='Показать список заявок')
edit_start_message = types.KeyboardButton(text='Редактировать стартовое сообщение')
keyboard.add(add_offer, edit_offer, delete_offer, add_admin, delete_admin, list_admins, edit_info_about_company,
show_contact_list, edit_start_message)
return keyboard
@bot.message_handler(commands=["start"])
def send_welcome(message):
bot.send_chat_action(message.chat.id, 'upload_photo')
bot.send_message(message.chat.id, get_start_message() , reply_markup=start_keyboard())
@bot.message_handler(commands=["adminmenu"])
def adminmenu(message):
if message.from_user.id in get_administrators():
bot.send_message(message.from_user.id, text='Добро пожаловать в меню администратора',
reply_markup=admin_keyboard())
else:
bot.send_message(message.from_user.id, text='У тебя нет прав для входа в это меню')
@bot.message_handler(content_types=['text'])
def get_text_message(message):
if message.text == 'Предложения по работе':
keyboard = types.InlineKeyboardMarkup()
for i in get_db_connection().execute('SELECT * FROM bot').fetchall():
keyboard.add(types.InlineKeyboardButton(text=i[1], callback_data=i[0]))
bot.send_message(message.from_user.id,
text='Список актуальных вакансий \n(нажми на интересующую тебя вакансию чтобы получить подробную информацию): ',
reply_markup=keyboard)
elif message.text == 'Информация о фирме':
bot.send_message(message.chat.id, text=get_welcome_message(), reply_markup=start_keyboard())
elif message.text == 'Сконтактироваться':
bot.register_next_step_handler(bot.send_message(message.chat.id,
'Введите свое Имя, номер телефона, и напишите какая вакансия вас интересует.'),
contact)
elif message.text == 'Список администраторов' and message.chat.id in get_administrators():
bot.send_message(message.chat.id, text=str(get_administrators()), reply_markup=admin_keyboard())
elif message.text == 'Добавить администратора' and message.chat.id in get_administrators():
add_admin(message)
elif message.text == 'Удалить администратора' and message.chat.id in get_administrators():
remove_admin(message)
elif message.text == 'Показать список заявок' and message.chat.id in get_administrators():
show_contact_list(message)
elif message.text == 'Добавить новую вакансию' and message.chat.id in get_administrators():
add_new_offer(message)
elif message.text == 'Редактировать информацию о фирме' and message.chat.id in get_administrators():
edit_information_company(message)
elif message.text == 'Редактировать стартовое сообщение' and message.chat.id in get_administrators():
edit_start_message(message)
elif message.text == 'Редактировать вакансию' and message.chat.id in get_administrators():
keyboard = types.InlineKeyboardMarkup()
for i in get_db_connection().execute('SELECT * FROM bot').fetchall():
keyboard.add(types.InlineKeyboardButton(text=f'id = {i[0]} // {i[1]}', callback_data=i[0]))
msg = bot.send_message(message.from_user.id, text='Cписок актуальных вакансий: ', reply_markup=keyboard)
edit_offer(message)
elif message.text == 'Удалить вакансию' and message.chat.id in get_administrators():
keyboard = types.InlineKeyboardMarkup()
for i in get_db_connection().execute('SELECT * FROM bot').fetchall():
keyboard.add(types.InlineKeyboardButton(text=f'id = {i[0]} // {i[1]}', callback_data=i[0]))
bot.send_message(message.from_user.id, text='Список актуальных вакансий: ', reply_markup=keyboard)
remove_offer(message)
else:
bot.send_message(message.from_user.id, 'Не понимаю вас...')
@bot.callback_query_handler(func=lambda call: True)
def callback_worker(call):
toInt = int(call.data)
if type(toInt) == int:
msg = get_offer(toInt)
a = f'Название вакансии: {msg[1]}\n\n' \
f'Описание вакансии: {msg[2]}\n\n' \
f'Локализация: {msg[3]}\n\n' \
f'Почасовая ставка: {msg[4]}'
bot.send_message(call.message.chat.id, a)
bot.polling(none_stop=True)
| bygimen01/SerhiiBot | bot.py | bot.py | py | 24,230 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "telebot.TeleBot",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sqlite3.Row",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
... |
15500224657 | import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
"""使用scikit-learn提供的数值归一化"""
if __name__ == "__main__":
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666)
standardScaler = StandardScaler()
standardScaler.fit(X_train)
print("特征均值 : {}".format(standardScaler.mean_)) # 各个特征的均值
print("特征方差 : {}".format(standardScaler.scale_)) # 各个特征标准差
X_train = standardScaler.transform(X_train) # 归一化处理
X_test = standardScaler.transform(X_test)
knn_clf = KNeighborsClassifier(n_neighbors=3)
knn_clf.fit(X_train, y_train)
print("归一化处理后的准确率 : {}".format(knn_clf.score(X_test, y_test)))
| ediltwwj/MachinelLearning | ModelTest/scalerTest.py | scalerTest.py | py | 975 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 14,
"usage_type": "call"
},
{
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.