blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
421e7cdaf623fa7b8157f576be9ee78581a188c9 | 02bcb7bc8563cbc95ecd00689b58f4195e9329f3 | /nets/cifar_nets.py | 0712af08e7e874aee4328d5242a417cfa2b192c6 | [] | no_license | lovecodestudent/RRSVM_pytorch | 53a8d3503b4d8a31931d69549a2464385ddacd81 | 6c7fcec26fb2e176f6cde749a6de3829c981a019 | refs/heads/master | 2021-09-07T03:50:13.442248 | 2018-02-16T21:32:35 | 2018-02-16T21:32:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,828 | py | import torch.nn as nn
import torch.nn.functional as F
from RRSVM.RRSVM_v1 import RRSVM
from pt_utils.t_sets import getOutputSize
import torch
from torch.autograd import Variable
class BaseNet(nn.Module):
def __init__(self, input_size=None):
super(BaseNet, self).__init__()
if not input_size:
input_size = torch.Size([1, 3, 32, 32])
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv1_outputsize = getOutputSize(input_size, self.conv1)
self.pool1 = nn.MaxPool2d(2, 2)
self.pool1_outputsize = getOutputSize(self.conv1_outputsize, self.pool1)
self.conv2 = nn.Conv2d(6, 16, 5)
self.conv2_outputsize = getOutputSize(self.pool1_outputsize, self.conv2)
self.pool2 = nn.MaxPool2d(2, 2)
self.pool2_outputsize = getOutputSize(self.conv2_outputsize, self.pool2)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
# self.fc1_outputsize = getOutputSize(self.conv2_outputsize.view(), self.fc1)
self.fc2 = nn.Linear(120, 84)
# self.fc2_outputsize = getOutputSize(self.fc1_outputsize, self.fc2)
self.fc3 = nn.Linear(84, 10)
# self.fc3_outputsize = getOutputSize(self.fc2_outputsize, self.fc3)
print "InitDone"
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class RRSVMNetV1(nn.Module):
def __init__(self, input_size=None):
super(RRSVMNetV1, self).__init__()
if not input_size:
input_size = torch.Size([1, 3, 32, 32])
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv1_outputsize = getOutputSize(input_size, self.conv1)
self.conv1_RRSVM = RRSVM(6, 28)
self.pool1 = nn.MaxPool2d(2, 2)
self.pool1_outputsize = getOutputSize(self.conv1_outputsize, self.pool1)
self.conv2 = nn.Conv2d(6, 16, 5)
self.conv2_outputsize = getOutputSize(self.pool1_outputsize, self.conv2)
self.conv2_RRSVM = RRSVM(16, 10)
self.pool2 = nn.MaxPool2d(2, 2)
self.pool2_outputsize = getOutputSize(self.conv2_outputsize, self.pool2)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
# self.fc1_outputsize = getOutputSize(self.conv2_outputsize.view(), self.fc1)
self.fc2 = nn.Linear(120, 84)
# self.fc2_outputsize = getOutputSize(self.fc1_outputsize, self.fc2)
self.fc3 = nn.Linear(84, 10)
# self.fc3_outputsize = getOutputSize(self.fc2_outputsize, self.fc3)
print "InitDone"
def forward(self, x):
x = self.pool1(F.relu(self.conv1_RRSVM(self.conv1(x))))
x = self.pool2(F.relu(self.conv2_RRSVM(self.conv2(x))))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# class RRSVMNet(nn.Module):
# def __init__(self, input_size=None):
# super(RRSVMNet, self).__init__()
# if not input_size:
# input_size = torch.Size([1, 3, 32, 32])
#
# self.conv1 = nn.Conv2d(3, 6, 5)
# self.conv1_outputsize = getOutputSize(input_size, self.conv1)
# self.conv1_rrsvm = RRSVM(in_channels=6, gridsize=28)
#
# self.pool1 = nn.MaxPool2d(2, 2)
# self.pool1_outputsize = getOutputSize(self.conv1_outputsize, self.pool1)
# self.conv2 = nn.Conv2d(6, 16, 5)
# self.conv2_outputsize = getOutputSize(self.pool1_outputsize, self.conv2)
#
# self.conv3 = nn.Conv2d(16, 32, 3)
# self.conv3_outputsize = getOutputSize(self.conv2_outputsize, self.conv3)
#
# self.conv4 = nn.Conv2d(32, 64, 3)
# self.conv4_outputsize = getOutputSize(self.conv3_outputsize, self.conv4)
#
# self.RRSVM = RRSVM(in_channels=64, gridsize=6)
#
#
# self.fc1 = nn.Linear(16 * 5 * 5, 120)
# # self.fc1_outputsize = getOutputSize(self.conv2_outputsize.view(), self.fc1)
#
# self.fc2 = nn.Linear(120, 84)
# # self.fc2_outputsize = getOutputSize(self.fc1_outputsize, self.fc2)
#
# self.fc3 = nn.Linear(84, 10)
# # self.fc3_outputsize = getOutputSize(self.fc2_outputsize, self.fc3)
#
# print "InitDone"
#
# def forward(self, x):
# x = self.pool1(F.relu(self.conv1(x)))
# x = self.pool2(F.relu(self.conv2(x)))
# x = x.view(-1, 16 * 5 * 5)
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.fc3(x)
# return x
if __name__ == '__main__':
baseNet = RRSVMNetV1()
for i in range(10):
input_tensor = torch.FloatTensor(torch.randn([3, 3, 32, 32]))
input_v = Variable(input_tensor, requires_grad=True)
output = baseNet(input_v)
print "DONE"
| [
"zwei@adobe.com"
] | zwei@adobe.com |
5517731694153dd6d0907d6cfb449d2a3888c706 | 4e30749e4ec62929de303f7399915e8863e24c74 | /P148.py | a1f5e211375aad1d96d2553df8e02c24ad81627e | [] | no_license | aditi419/P148h | 8af7780494bb421e0847356b1b3e5403c82b74f5 | fedd92144aec375f5c41826ea75832224578f450 | refs/heads/main | 2023-07-15T16:51:39.427471 | 2021-08-28T16:02:27 | 2021-08-28T16:02:27 | 400,833,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | from tkinter import *
import random
root = Tk()
root.title('List')
root.geometry('400x400')
ListItems = Label(root)
RandomNum = Label(root)
picnicItems = ['Picnic Blanket','Picnic Basket','Cooler','Chairs']
ListItems['text'] = 'Listed Items: ' + str(picnicItems)
def stuff():
randomNumbers = random.sample(range(0,3),1)
RandomNum['text'] = 'Put Item Number' + str(randomNumbers) + "In Bag"
btn = Button(root,text = 'Which item to put in bag?', command = stuff, bg = 'orange', fg = 'black')
btn.place(relx = 0.5, rely = 0.5, anchor = CENTER)
ListItems.place(relx = 0.5, rely = 0.6, anchor = CENTER)
RandomNum.place(relx = 0.5, rely = 0.7, anchor = CENTER)
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
edd843a86dade9def9d2f7c90e31ddddc0266609 | ad0ec4a275f4733208f199d97f7ab4c15db51312 | /lib/django_dbtemplates-0.8.0-py2.6.egg/dbtemplates/admin.py | 1b0924bdf1a9832f5fc38e8bd930e232a879bee2 | [] | no_license | OKTago/Bore | 179f9409ae443a2e4bdff4a402aeca2d07eaaba4 | a5043ca6803a659f5d411e0c7fccee3ab98244aa | refs/heads/master | 2021-01-10T20:26:20.049364 | 2010-11-11T10:11:26 | 2010-11-11T10:11:26 | 1,071,075 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,272 | py | import posixpath
from django import forms
from django.contrib import admin
from django.utils.translation import ungettext, ugettext_lazy as _
from django.utils.safestring import mark_safe
from dbtemplates import settings
from dbtemplates.models import (Template, backend, remove_cached_template,
add_template_to_cache)
# Check if django-reversion is installed and use reversions' VersionAdmin
# as the base admin class if yes
if settings.USE_REVERSION:
from reversion.admin import VersionAdmin as TemplateModelAdmin
else:
from django.contrib.admin import ModelAdmin as TemplateModelAdmin
class CodeMirrorTextArea(forms.Textarea):
"""
A custom widget for the CodeMirror browser editor to be used with the
content field of the Template model.
"""
class Media:
css = dict(screen=[posixpath.join(settings.MEDIA_PREFIX, 'css/editor.css')])
js = [posixpath.join(settings.MEDIA_PREFIX, 'js/codemirror.js')]
def render(self, name, value, attrs=None):
result = []
result.append(
super(CodeMirrorTextArea, self).render(name, value, attrs))
result.append(u"""
<script type="text/javascript">
var editor = CodeMirror.fromTextArea('id_%(name)s', {
path: "%(media_prefix)sjs/",
parserfile: "parsedjango.js",
stylesheet: "%(media_prefix)scss/django.css",
continuousScanning: 500,
height: "40.2em",
tabMode: "shift",
indentUnit: 4,
lineNumbers: true
});
</script>
""" % dict(media_prefix=settings.MEDIA_PREFIX, name=name))
return mark_safe(u"".join(result))
if settings.USE_CODEMIRROR:
TemplateContentTextArea = CodeMirrorTextArea
else:
TemplateContentTextArea = forms.Textarea
if settings.AUTO_POPULATE_CONTENT:
content_help_text = _("Leaving this empty causes Django to look for a "
"template with the given name and populate this field with its content.")
else:
content_help_text = ""
class TemplateAdminForm(forms.ModelForm):
"""
Custom AdminForm to make the content textarea wider.
"""
content = forms.CharField(
widget=TemplateContentTextArea({'rows': '24'}),
help_text=content_help_text, required=False)
class Meta:
model = Template
class TemplateAdmin(TemplateModelAdmin):
form = TemplateAdminForm
fieldsets = (
(None, {
'fields': ('name', 'content'),
'classes': ('monospace',),
}),
(_('Advanced'), {
'fields': (('sites'),),
}),
(_('Date/time'), {
'fields': (('creation_date', 'last_changed'),),
'classes': ('collapse',),
}),
)
list_display = ('name', 'creation_date', 'last_changed', 'site_list')
list_filter = ('sites',)
search_fields = ('name', 'content')
if backend:
actions = ['invalidate_cache', 'repopulate_cache']
def invalidate_cache(self, request, queryset):
if not backend:
self.message_user(request, ("There is no active cache backend."))
return
for template in queryset:
remove_cached_template(template)
message = ungettext(
"Cache of one template successfully invalidated.",
"Cache of %(count)d templates successfully invalidated.",
len(queryset))
self.message_user(request, message % {'count': len(queryset)})
invalidate_cache.short_description = _("Invalidate cache of selected templates")
def repopulate_cache(self, request, queryset):
if not backend:
self.message_user(request, ("There is no active cache backend."))
return
for template in queryset:
add_template_to_cache(template)
message = ungettext(
"Cache successfully repopulated with one template.",
"Cache successfully repopulated with %(count)d templates.",
len(queryset))
self.message_user(request, message % {'count': len(queryset)})
repopulate_cache.short_description = _("Repopulate cache with selected templates")
def site_list(self, template):
return ", ".join([site.name for site in template.sites.all()])
site_list.short_description = _('sites')
admin.site.register(Template, TemplateAdmin)
| [
"marco.ferragina@oktago.com"
] | marco.ferragina@oktago.com |
9e6ce10d1dd0a0d4a8cdb718b8d5834908c38f81 | 1cb496cab9ddf375083bb13dfefb40757d15a5dc | /pyrigate/config.py | a7c15da434008b6c7bf0d486f968c0553e414e35 | [
"MIT"
] | permissive | AmerTm/pyrigate | 201c9d5ae38470c0eda6e14a7b26d958d43c611b | d9002f80a6b17078e99cec892f87bac0e6e0bb59 | refs/heads/master | 2020-08-20T11:41:55.614329 | 2019-08-14T21:30:49 | 2019-08-14T21:30:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Generic plant configuration class."""
import json
from pyrigate.validation import plant_configuration_schema
class PlantConfiguration(object):
"""Light-weight configuration class for plants.
Its main purpose is to provide sensible defaults when certain setting are
not found.
"""
def __init__(self, path=None):
"""Initialise a configuration, optionally reading from a file."""
self._config = {}
self.load(path)
@classmethod
def extension(cls):
"""File extension of configuration files."""
return 'json'
def load(self, path):
"""Load a plant configuration from a file."""
if path:
with open(path) as fh:
self._config =\
plant_configuration_schema.validate(json.load(fh))
@property
def valid(self):
return bool(self._config)
@property
def name(self):
return self._config['name']
@property
def description(self):
return self._config['description']
@property
def scheme(self):
return self._config['scheme']
@property
def amount(self):
return self._config['amount']
@property
def frequency(self):
return self._config['frequency']
@property
def per(self):
return self._config['per']
@property
def times(self):
return self._config['times']
def __getitem__(self, key):
return self._config[key]
def __setitem__(self, key, value):
self._config[key] = value
def __str__(self):
result = []
max_width = max(len(str(v)) for v in self._config.values())
fmt = '{{bold}}{0:<20} {{reset}}{1:>{2}}'
for k, v in self._config.items():
line = fmt.format(k, str(v), max_width)
result.append(line)
return '\n'.join(result)
| [
"alexander.asp.bock@gmail.com"
] | alexander.asp.bock@gmail.com |
ad6c0f965ce7a6c341d00ced7b1dbf7ac4aa9582 | 091f397adba3eeb36557ff84f6b0845e2952ba48 | /roles/.venv/bin/ansible-test | 8821d1729640795f4b3a7966041cf003409217d9 | [
"MIT",
"BSD-3-Clause"
] | permissive | Alex-Mbulankende/testingshit | 0d6d07d0754704abb8a76cb6fe5e620ff96b47d5 | 6cbe5c8da2c030d1e3c989b86adf7b740ac65418 | refs/heads/master | 2022-03-12T04:11:00.763799 | 2019-12-01T05:31:19 | 2019-12-01T05:31:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | #!/Users/tjblogumas/blogumas/development/testingshit/roles/.venv/bin/python3
# PYTHON_ARGCOMPLETE_OK
"""Command line entry point for ansible-test."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
def main():
"""Main program entry point."""
ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
source_root = os.path.join(ansible_root, 'test', 'lib')
if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', 'cli.py')):
# running from source, use that version of ansible-test instead of any version that may already be installed
sys.path.insert(0, source_root)
# noinspection PyProtectedMember
from ansible_test._internal.cli import main as cli_main
cli_main()
if __name__ == '__main__':
main()
| [
"tj.blogumas@codeblogkc.com"
] | tj.blogumas@codeblogkc.com | |
1be0ca744558dc2bd73eb51acecfb4a29395a6ae | 0cb534f6625b7b1ef2e3779b30fae31777fae2af | /products/migrations/0012_auto_20201019_1008.py | f708662a024ef7e398618e4d80567c207c2d933d | [] | no_license | ducnguyen99/digital_world | 34f2496567ff64f6907f79897c74aeb9c15c87ad | ff5259ed0b4fce961f160567e0a97b3f201726d4 | refs/heads/master | 2022-12-31T21:43:26.415363 | 2020-10-22T11:16:33 | 2020-10-22T11:16:33 | 286,378,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # Generated by Django 3.1 on 2020-10-19 10:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0011_auto_20201019_0944'),
]
operations = [
migrations.AlterField(
model_name='order',
name='delivery_price',
field=models.DecimalField(blank=True, choices=[(0, 0), (5, 5), (12, 12)], decimal_places=2, default=0, max_digits=7, null=True),
),
]
| [
"nguyenhuungocduc@gmail.com"
] | nguyenhuungocduc@gmail.com |
53c8084f71dafc506e57810a71b1c84208a39d2e | 5c68846730c473da870176862b08847c289d711c | /I0320014_soal1_tugas6.py | 40f95957dead4be1c46bd0af36699e544acf7abf | [] | no_license | audreyalexandra/Audrey-Alexandra_I0320014_Wildan_Tugas6 | 0a55eb1d77d6574be7fcf9991d05654824c14077 | 56eaab5f1a7d4fee7f59f4f4cfc1b4ac329de08f | refs/heads/main | 2023-04-05T19:48:51.153369 | 2021-04-09T23:58:25 | 2021-04-09T23:58:25 | 355,900,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | i = 1
while i<=10:
print("Hello World")
i= i+1 | [
"audreyalexandra18@gmail.com"
] | audreyalexandra18@gmail.com |
27b97a688d3bb6fd3ac0f10b1ed784f008c2145f | fa4313083d646eaa0b0e071d6367e6f3feccebef | /replace_except_expr.py | 768036b62d1a9dd231eb56e3ce211ac45401648a | [] | no_license | Rosuav/ExceptExpr | 0be15eb05de52067f556a290a7518ad764379e1b | 09392a7de1dd301e70061d8b9e557f4f14fc3d2b | refs/heads/master | 2016-09-05T14:49:10.306855 | 2014-03-04T14:04:21 | 2014-03-04T14:04:21 | 16,904,403 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,830 | py | import ast
import sys
sys.path.append('pep-463-except-expr/Tools/parser')
sys.path.append('Tools/parser')
import find_except_expr
# If true, comment blocks will be inserted to explain the changes made.
# Otherwise, the translation will be "pure".
explain = True
# If omitted, expression->expression except blocks will be translated. This is
# not recommended for normal use - it leads to many translations which, while
# technically correct, are not really helpful. So it's better to not, except
# maybe to stress-test the translations.
find_except_expr.compare_key[ast.Expr] = lambda node: float("nan")
class Walker(find_except_expr.Walker):
def __init__(self, *args):
self.updates=[]
super().__init__(*args)
def report(self, node, desc):
# For this translation, the 'as' keyword is not supported.
if node.handlers[0].name is not None: return
# Figure out which lines to edit.
# TODO: Make sure nothing else has these lines. For now, it
# just specifies the line numbers to be deleted.
lines=[n.lineno for n in ast.walk(node) if hasattr(n,'lineno')]
self.updates.append((min(lines), max(lines), node))
# TODO: This assumes all indentation is done with spaces. Support tabs.
def indent(line):
"""Return the indentation from a line of source code"""
return b' ' * (len(line) - len(line.lstrip(b' ')))
def search(fn):
with open(fn,"rb") as f:
data = f.read()
try:
tree = ast.parse(data)
except SyntaxError as e:
return
w=Walker(fn)
w.visit(tree)
if not w.updates: return # Nothing to change.
w.updates.append((float("inf"),float("inf"),None)) # Marker
print("Updating",fn)
with open(fn, "wb") as f:
stop = -1
for lineno,line in enumerate(data.split(b"\n")[:-1], 1):
while lineno > stop:
start,stop,node = w.updates.pop(0)
if lineno < start:
f.write(line + b"\n")
continue
if lineno == start:
# Indent with the same number of spaces as the start line
sp = indent(line)
# I have no idea what the encoding of the source file
# should be. Assume ASCII is enough for these parts.
# Also, this will be useful only if unparse is loaded.
if explain: f.write(sp + b"# PEP 463 automated translation:\n")
# This transformation works for Assign, AugAssign, and Expr.
exceptexpr = ast.ExceptExp(
body=node.body[0].value,
etype=node.handlers[0].type or ast.Name("BaseException", ast.Load()),
value=node.handlers[0].body[0].value
)
node = node.body[0] # Should this do a deepcopy?
node.value = exceptexpr
f.write(sp + find_except_expr.dump(node).encode('ascii') + b"\n")
# Optionally write out a comment block
if explain:
f.write(sp + b'# ' + line[len(sp):] + b'\n')
if lineno == stop:
f.write(sp + b"# End PEP 463 translation\n")
if __name__ == "__main__":
for fn in sys.argv[1:]:
search(fn)
| [
"rosuav@gmail.com"
] | rosuav@gmail.com |
431bfc1d0f37c81c577f6d7e7ac0080fdb36555c | a71ed9977a7774a6e0fc06e83bd01647de46bdb5 | /apigateway/logic/rpe_predictor.py | ac8cf608a26d381efa87ea8ff557992857f1c9c6 | [] | no_license | biometrixtech/plans | 7ba4df97d84d7bf252a919e88fa9cbaa597fde0a | 4d7efb77baca15e8722c37d2cdc7dc84d878ae90 | refs/heads/master | 2022-12-25T00:11:54.022346 | 2020-08-14T14:23:44 | 2020-08-14T14:23:44 | 138,765,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | import os
from models.movement_tags import Gender
from fathomapi.utils.xray import xray_recorder
from datastores.ml_model_datastore import MLModelsDatastore
class RPEPredictor(object):
def __init__(self):
self.model = MLModelsDatastore.get_hr_model()
@xray_recorder.capture('logic.RPEPredictor.predict_rpe')
def predict_rpe(self, hr, user_weight=60.0, user_age=20.0, vo2_max=40.0, gender=Gender.female):
"""
:param hr:
:param user_weight:
:param user_age:
:param vo2_max:
:param gender:
:return:
"""
if gender == Gender.female:
gender = 1.0
else:
gender = 0.0
max_hr = 207 - .7 * user_age
percent_max_hr = hr / max_hr
features = [[gender, user_weight, vo2_max, max_hr, percent_max_hr]]
if os.environ.get('CODEBUILD_RUN', '') == 'TRUE':
return 4
else:
predicted_rpe = self.model.predict(features)[0]
if predicted_rpe < 0:
predicted_rpe = 0
predicted_rpe = round(predicted_rpe, 1)
return predicted_rpe
| [
"dipesh@biometrixtech.com"
] | dipesh@biometrixtech.com |
ed4e407b9e2ea18bea2534cf9cef0cfcc4442a0e | cbb5694845e59f2ba94f687075364f450150d0ad | /users/migrations/0007_alter_profile_bio.py | 760ebed2d27dea0ed0a2c7489d179f9f11f2b752 | [] | no_license | PhantomManYt/hackingtons_social-master | e1fafa81ec68275728e64d777d60613af071d746 | 4ee7747154aa9cfe5ff3b0fec5ba73821021ceb5 | refs/heads/main | 2023-07-13T08:30:15.930469 | 2021-08-20T04:40:16 | 2021-08-20T04:40:16 | 398,139,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Generated by Django 3.2.6 on 2021-08-20 00:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_alter_profile_image'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=models.TextField(blank=True, null=True),
),
]
| [
"64277260+PhantomManYt@users.noreply.github.com"
] | 64277260+PhantomManYt@users.noreply.github.com |
072c176eb09f3f0d1c12137a8e34e31477c5eac4 | a8e152cea157b18703c3dc368079b14db1296e5c | /apf/metrics/pyroscope/__init__.py | c0c090a73e2a9018bc5932ceb8b61bdeb11e2565 | [
"MIT"
] | permissive | alercebroker/APF | f2f148c376c0dd9ae27f573e8c84eadb78e813b0 | b2333f52b81486654a396254ee9f40a687f4b080 | refs/heads/main | 2023-08-08T12:27:19.397352 | 2023-07-21T16:01:03 | 2023-07-21T16:01:03 | 223,983,195 | 3 | 2 | MIT | 2023-07-21T16:01:04 | 2019-11-25T15:38:52 | Python | UTF-8 | Python | false | false | 29 | py | from .profile import profile
| [
"pedrogallardorobinson@gmail.com"
] | pedrogallardorobinson@gmail.com |
29e0124b661379789bc2c4f409a4f7bd2b152893 | 45d0f74916a2f63b6b73937a4210247bd17e295d | /kite_purchase.py | 8960c4fddeb0ab1ad3f26a8df71a97249444d59b | [] | no_license | Pradeep9497/manufacturingerp | 100167dd66f619c2b5dd9b66b8364414ffeb77a4 | 3de338cbf98109fe47aace21fcab0a823239457b | refs/heads/master | 2021-09-10T23:56:50.690416 | 2018-04-04T13:49:33 | 2018-04-04T13:49:33 | 114,199,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,787 | py | from openerp import tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
import time
from datetime import date
import openerp.addons.decimal_precision as dp
from datetime import datetime
dt_time = time.strftime('%m/%d/%Y %H:%M:%S')
class kite_purchase(osv.osv):
_name = "kite.purchase"
_description = "KG Purchase"
_order = "creation_date desc"
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_total': 0.0,
}
val1 =0
for line in order.line_id:
val1 += line.required_qty * line.unit_price
res[order.id]['amount_total']=(round(val1))
return res
_columns = {
'name': fields.char('Purchase Indent No',size=128,readonly=True,states={'draft': [('readonly', False)]}),
'supplier':fields.many2one('res.partner','Supplier',size=128),
'ind_no': fields.char('Department Indent No',size=128, select=True,readonly=True,states={'draft': [('readonly', False)]}),
'ind_date': fields.char('Department Indent Date',size=128, select=True,readonly=True,states={'draft': [('readonly', False)]}),
'dept_name': fields.char('Department Name',size=128, select=True,readonly=True,states={'draft': [('readonly', False)]}),
'dept_code': fields.char('Department code',size=128, select=True,readonly=True,states={'draft': [('readonly', False)]}),
'creation_date':fields.datetime('Creation Date',readonly=True),
'state': fields.selection([('draft', 'Draft'),('cancel', 'Cancelled'),('confirm', 'Confirmed')],'Status'),
'active':fields.boolean('Active'),
'amount_total': fields.function(_amount_all, string='Total',store=True,multi="sums",help="The total amount"),
#################
'line_id':fields.one2many('kite.purchase.line','product_id','product_line',readonly=True,states={'draft': [('readonly', False)]}),
}
_defaults = {
'creation_date': fields.date.context_today,
'state':'draft',
'active':True,
'creation_date' : lambda * a: time.strftime('%Y-%m-%d'),
}
def create(self, cr, uid, vals,context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'kite.purchase') or '/'
order = super(kite_purchase, self).create(cr, uid, vals, context=context)
return order
def confirm(self,cr,uid,ids,context=None):
purchase_plan_obj = self.pool.get('kite.purchase.planning')
rec = self.browse(cr,uid,ids[0])
rec.write({'state':'confirm'})
ir_mail_server = self.pool.get('ir.mail_server')
msg = ir_mail_server.build_email(
email_from = ['radeepp0@gmail.com'],
email_to = ['prasanthnsn11@gmail.com'],
subject = 'Test',
body = 'Hi Nice',
#~ email_cc = 'prasanth@gmail.com',
object_id = ids and ('%s-%s' % (ids, 'purchase.indent')),
subtype = 'html',
subtype_alternative = 'plain')
res = ir_mail_server.send_email(cr, uid, msg,mail_server_id=2, context=context)
print"successs.............................................."
return True
kite_purchase()
class kite_purchase_line(osv.osv):
_name = "kite.purchase.line"
_description = "KG Purchase line"
_columns = {
'product_name':fields.char('Product Name'),
'product_code':fields.char('Product Code',size=128),
'unit_price':fields.float('Unit Price'),
'required_qty':fields.float('Required Qty'),
'total_price':fields.float('Total price'),
'product_id':fields.many2one('kite.purchase','Product line'),
'pro_type':fields.selection([('direct','Direct'),('fromcp','From purchase Plan')],'Production Type'),
}
_defaults = {
'pro_type' : 'direct',
}
kite_purchase_line()
| [
"noreply@github.com"
] | noreply@github.com |
a44c312b288d21db66156e2ee38ac70257256d20 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02918/s539072224.py | 2dbe24e4788b05696bc3160ba49b9b37d37af922 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | import sys
import numpy as np
input = lambda: sys.stdin.readline().rstrip()
INF = 10**9 + 1
def solve():
N, K = map(int, input().split())
S = np.array(list(input()), dtype='str')
if N == 1:
print(0)
exit()
ri = INF
kc = 0
fs = S[0]
if fs == 'R':
nfs = 'L'
else:
nfs = 'R'
for i in range(N):
if S[i] == nfs:
ri = min(ri, i)
elif S[i] == fs and ri != INF:
S[ri:i] = fs
ri = INF
kc += 1
if kc == K:
break
else:
if ri != INF and S[-1] == nfs:
S[ri:N] = fs
# print(S)
happy = 0
for i in range(N - 1):
if S[i] == S[i + 1]:
happy += 1
print(happy)
if __name__ == '__main__':
solve()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b66fee40edbe7f81bdb4c9bc16a49fc66944ec23 | cc6167caf1875d5cbad8cb83018c1ee60d9be665 | /Py07/to_upper.py | df13cc5b32cdb796efff747c16c08b2fae6fcfaa | [] | no_license | rcabezas29/Python-Basics | d1bc51c31f1d279f392b6b91b8b9ba36548b4270 | 00adb96b69b701a1ccb0f0d9b89ea1dd6f57b2a1 | refs/heads/main | 2023-06-16T13:59:25.469943 | 2021-07-06T14:01:33 | 2021-07-06T14:01:33 | 365,013,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | def to_upper(s):
a = s.lower()
return a.swapcase()
def main():
print(to_upper("Hola"))
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
efd021c0316156776876ce0abeeb3c3283a39a3d | eea3f04dc73d4536083c74cac4478835a31c4a94 | /chinese_song_generation/data_utils.py | 75424c8a5cc4c880bf635faf9ab900953138832f | [] | no_license | yscoder-github/news-generate | 15d5f9acecc92add201fb3c53aa211c0aa474e1f | 6b8a98375db984dea9edb4abff72191477bdb406 | refs/heads/master | 2023-05-26T19:58:00.797573 | 2019-07-18T01:30:36 | 2019-07-18T01:30:36 | 187,489,859 | 4 | 4 | null | 2023-05-22T22:14:54 | 2019-05-19T14:50:32 | Python | UTF-8 | Python | false | false | 5,906 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import jieba
from six.moves import urllib
from tensorflow.python.platform import gfile
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
#print(sentence)
for space_separated_fragment in jieba.cut(sentence.strip()):
if isinstance(space_separated_fragment, str):
word = str.encode(space_separated_fragment)
else:
word = space_separated_fragment
words.append(word)
return words
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=False):
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100 == 0:
print(" processing line %d" % counter)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
print('>> Full Vocabulary Size :',len(vocab_list))
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def initialize_vocabulary(vocabulary_path):
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
#ct = 0
#for kk in vocab.keys():
# print(kk)
# ct += 1
# if ct == 5:
# break
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=False):
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
#print(words[0].decode("utf8"))
#print(words[1])
if not normalize_digits:
return [vocabulary.get(w.decode("utf8"), UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(re.sub(_DIGIT_RE, b"0", w), UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocabulary_path,
tokenizer=None, normalize_digits=False):
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(line, vocab, tokenizer,
normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
def prepare_custom_data(working_directory, train_enc, train_dec, test_enc, test_dec, enc_vocabulary_size, dec_vocabulary_size, tokenizer=None):
# Create vocabularies of the appropriate sizes.
enc_vocab_path = os.path.join(working_directory, "vocab%d.enc" % enc_vocabulary_size)
dec_vocab_path = os.path.join(working_directory, "vocab%d.dec" % dec_vocabulary_size)
create_vocabulary(enc_vocab_path, train_enc, enc_vocabulary_size, tokenizer)
create_vocabulary(dec_vocab_path, train_dec, dec_vocabulary_size, tokenizer)
# Create token ids for the training data.
enc_train_ids_path = train_enc + (".ids%d" % enc_vocabulary_size)
dec_train_ids_path = train_dec + (".ids%d" % dec_vocabulary_size)
data_to_token_ids(train_enc, enc_train_ids_path, enc_vocab_path, tokenizer)
data_to_token_ids(train_dec, dec_train_ids_path, dec_vocab_path, tokenizer)
# Create token ids for the development data.
enc_dev_ids_path = test_enc + (".ids%d" % enc_vocabulary_size)
dec_dev_ids_path = test_dec + (".ids%d" % dec_vocabulary_size)
data_to_token_ids(test_enc, enc_dev_ids_path, enc_vocab_path, tokenizer)
data_to_token_ids(test_dec, dec_dev_ids_path, dec_vocab_path, tokenizer)
return (enc_train_ids_path, dec_train_ids_path, enc_dev_ids_path, dec_dev_ids_path, enc_vocab_path, dec_vocab_path)
| [
"yscoder@foxmail.com"
] | yscoder@foxmail.com |
68458c655c1f3d9180eeeb19b34f722715887bfa | 79646b0fb15c2f609798ece7c2068af694447bc4 | /scripts/start1.py | 742d4083ab4888aa8a32d59870d339dd7abbf95f | [] | no_license | ankitsaini2609/Automation | ddc689ee2387bf0c09a067d1615b4ab83d6cbba7 | fd67c39edbfc5bea20ee1e3e8bd2d6b44da2329b | refs/heads/master | 2020-03-11T01:53:44.810857 | 2018-08-06T18:39:20 | 2018-08-06T18:39:20 | 129,704,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | import os,shutil,subprocess
path = os.getcwd()
dirs = os.listdir(path)
for item in dirs:
if os.path.isdir(path+"/"+item):
shutil.copyfile("/root/dataset/scripts/automate.sh",path+"/"+item+"/automate.sh")
os.chdir(path+"/"+item)
try:
s = subprocess.check_output(["/bin/bash","-c","sh automate.sh"])
except:
print("Problem in subprocess")
os.chdir(path)
| [
"ankitsaini2609@gmail.com"
] | ankitsaini2609@gmail.com |
b5f48eaec9414d0da620433ba933b3f89f318c36 | 98decc08fe01b81af5e790288425a8dde1772a14 | /demo.py | 4d07446ef5f61896a581f5c816fb93949ff70cfa | [] | no_license | bcwingnut/apriori | 6f204ba90083ee3c9c4798bbfa1849aad8046150 | 2d5224478e02c2a1441d0429398c83e45447642e | refs/heads/master | 2020-03-09T05:59:03.319977 | 2018-06-22T12:38:59 | 2018-06-22T12:38:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | import apriori
# Train model and write to file
# support = minimum number of lines containing a group of items
# needed to consider the group appears frequently
# a line of data is valid only if
# min_set_size <= number of items in the line <= max_sey_size
AP = apriori.APriori(data='./test_datasets/freqout.out',
out='./test_datasets/result.txt')
AP.find_frequent(support=50, min_set_size=2, max_set_size=5)
# Read trained data from file
# wanted_items = [items needing to be found]
# detected_items = [items detected by other procedures]
'''
AP.read_data(data_file='./test_datasets/result.txt')
frequent_list = AP.get_frequent_list_of(
wanted_items=[36], detected_items=[38, 39])
print(frequent_list[36])
''' | [
"csh199883@gmail.com"
] | csh199883@gmail.com |
64b73e76e9a871dc1c4f0f5b76f1a170080ce545 | 74dfbefd05ff71903bc2d13979100ac9a903de8e | /trajectron/evaluation/evaluation.py | 92c45f2b1a62134dc68ff1e4319a7c872f56ba86 | [
"MIT",
"BSD-2-Clause"
] | permissive | zhuMingXu/social-nce-trajectron-plus-plus | 0de7c256f49d86591ecf56659788fd5da9276f63 | fb36669c52af964a8afa58304b9b407e394a3105 | refs/heads/master | 2023-06-19T23:41:43.608244 | 2021-07-22T21:27:54 | 2021-07-22T21:27:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,343 | py | import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.ndimage import binary_dilation
from scipy.stats import gaussian_kde
from utils import prediction_output_to_trajectories
import visualization
from matplotlib import pyplot as plt
def compute_ade(predicted_trajs, gt_traj):
error = np.linalg.norm(predicted_trajs - gt_traj, axis=-1)
ade = np.mean(error, axis=-1)
return ade.flatten()
def compute_fde(predicted_trajs, gt_traj):
final_error = np.linalg.norm(predicted_trajs[:, :, -1] - gt_traj[-1], axis=-1)
return final_error.flatten()
def compute_kde_nll(predicted_trajs, gt_traj):
kde_ll = 0.
log_pdf_lower_bound = -20
num_timesteps = gt_traj.shape[0]
num_batches = predicted_trajs.shape[0]
for batch_num in range(num_batches):
for timestep in range(num_timesteps):
try:
kde = gaussian_kde(predicted_trajs[batch_num, :, timestep].T)
pdf = np.clip(kde.logpdf(gt_traj[timestep].T), a_min=log_pdf_lower_bound, a_max=None)[0]
kde_ll += pdf / (num_timesteps * num_batches)
except np.linalg.LinAlgError:
kde_ll = np.nan
return -kde_ll
def compute_obs_violations(predicted_trajs, map):
obs_map = map.data
interp_obs_map = RectBivariateSpline(range(obs_map.shape[1]),
range(obs_map.shape[0]),
binary_dilation(obs_map.T, iterations=4),
kx=1, ky=1)
old_shape = predicted_trajs.shape
pred_trajs_map = map.to_map_points(predicted_trajs.reshape((-1, 2)))
traj_obs_values = interp_obs_map(pred_trajs_map[:, 0], pred_trajs_map[:, 1], grid=False)
traj_obs_values = traj_obs_values.reshape((old_shape[0], old_shape[1]))
num_viol_trajs = np.sum(traj_obs_values.max(axis=1) > 0, dtype=float)
return num_viol_trajs
def interpolate_traj(traj, num_interp=4):
'''
Add linearly interpolated points of a trajectory
'''
sz = traj.shape
dense = np.zeros((sz[0], (sz[1] - 1) * (num_interp + 1) + 1, 2))
dense[:, :1, :] = traj[:, :1]
for i in range(num_interp+1):
ratio = (i + 1) / (num_interp + 1)
dense[:, i+1::num_interp+1, :] = traj[:, 0:-1] * (1 - ratio) + traj[:, 1:] * ratio
return dense
def compute_col(predicted_traj, predicted_trajs_all, thres=0.2, num_interp=4):
'''
Input:
predicted_trajs: predicted trajectory of the primary agents
predicted_trajs_all: predicted trajectory of all agents in the scene
'''
ph = predicted_traj.shape[0]
dense_all = interpolate_traj(predicted_trajs_all, num_interp)
dense_ego = interpolate_traj(predicted_traj[None, :], num_interp)
distances = np.linalg.norm(dense_all - dense_ego, axis=-1)
mask = distances[:, 0] > 0
return distances[mask].min(axis=0) < thres
def compute_batch_statistics(prediction_output_dict,
dt,
max_hl,
ph,
node_type_enum,
kde=True,
obs=False,
map=None,
prune_ph_to_future=False,
best_of=False,
col=False):
(prediction_dict,
_,
futures_dict) = prediction_output_to_trajectories(prediction_output_dict,
dt,
max_hl,
ph,
prune_ph_to_future=prune_ph_to_future)
batch_error_dict = dict()
for node_type in node_type_enum:
batch_error_dict[node_type] = {'ade': list(), 'fde': list(), 'col_joint': list(), 'col_truth': list(), 'col_cross': list(), 'kde': list(), 'obs_viols': list()}
for t in prediction_dict.keys():
if col:
prediction_joint = list()
futures_joint = list()
for node in prediction_dict[t].keys():
prediction_joint.append(prediction_dict[t][node][0,0])
futures_joint.append(futures_dict[t][node])
prediction_joint = np.stack(prediction_joint, axis=0)
futures_joint = np.stack(futures_joint, axis=0)
for node in prediction_dict[t].keys():
ade_errors = compute_ade(prediction_dict[t][node], futures_dict[t][node])
fde_errors = compute_fde(prediction_dict[t][node], futures_dict[t][node])
if col:
idx_neighbors = abs(futures_joint[:, 0, 0] - futures_dict[t][node][None, 0, 0]) > 1e-8
if idx_neighbors.sum() > 0:
num_interp = 4
col_joint = compute_col(prediction_dict[t][node][0,0], prediction_joint[idx_neighbors], num_interp=num_interp).astype(float)
col_cross = compute_col(prediction_dict[t][node][0,0], futures_joint[idx_neighbors], num_interp=num_interp).astype(float)
col_truth = compute_col(futures_dict[t][node], futures_joint[idx_neighbors], num_interp=num_interp)
col_joint[col_truth] = float('nan')
col_cross[col_truth] = float('nan')
col_truth = col_truth.astype(float)
if col_truth.any():
# skip frames where the groud truth observations lead to collisions
ade_errors[:] = float('nan')
fde_errors[:] = float('nan')
else:
col_joint = np.array([float('nan')] * (56))
col_truth = np.array([float('nan')] * (56))
col_cross = np.array([float('nan')] * (56))
else:
col_joint = 0
col_truth = 0
col_cross = 0
if kde:
kde_ll = compute_kde_nll(prediction_dict[t][node], futures_dict[t][node])
else:
kde_ll = 0
if obs:
obs_viols = compute_obs_violations(prediction_dict[t][node], map)
else:
obs_viols = 0
if best_of:
ade_errors = np.min(ade_errors, keepdims=True)
fde_errors = np.min(fde_errors, keepdims=True)
kde_ll = np.min(kde_ll)
batch_error_dict[node.type]['ade'].extend(list(ade_errors))
batch_error_dict[node.type]['fde'].extend(list(fde_errors))
batch_error_dict[node.type]['col_joint'].extend([col_joint])
batch_error_dict[node.type]['col_truth'].extend([col_truth])
batch_error_dict[node.type]['col_cross'].extend([col_cross])
batch_error_dict[node.type]['kde'].extend([kde_ll])
batch_error_dict[node.type]['obs_viols'].extend([obs_viols])
return batch_error_dict
def log_batch_errors(batch_errors_list, log_writer, namespace, curr_iter, bar_plot=[], box_plot=[]):
for node_type in batch_errors_list[0].keys():
for metric in batch_errors_list[0][node_type].keys():
metric_batch_error = []
for batch_errors in batch_errors_list:
metric_batch_error.extend(batch_errors[node_type][metric])
if len(metric_batch_error) > 0:
log_writer.add_histogram(f"{node_type.name}/{namespace}/{metric}", metric_batch_error, curr_iter)
log_writer.add_scalar(f"{node_type.name}/{namespace}/{metric}_mean", np.mean(metric_batch_error), curr_iter)
log_writer.add_scalar(f"{node_type.name}/{namespace}/{metric}_median", np.median(metric_batch_error), curr_iter)
if metric in bar_plot:
pd = {'dataset': [namespace] * len(metric_batch_error),
metric: metric_batch_error}
kde_barplot_fig, ax = plt.subplots(figsize=(5, 5))
visualization.visualization_utils.plot_barplots(ax, pd, 'dataset', metric)
log_writer.add_figure(f"{node_type.name}/{namespace}/{metric}_bar_plot", kde_barplot_fig, curr_iter)
if metric in box_plot:
mse_fde_pd = {'dataset': [namespace] * len(metric_batch_error),
metric: metric_batch_error}
fig, ax = plt.subplots(figsize=(5, 5))
visualization.visualization_utils.plot_boxplots(ax, mse_fde_pd, 'dataset', metric)
log_writer.add_figure(f"{node_type.name}/{namespace}/{metric}_box_plot", fig, curr_iter)
def print_batch_errors(batch_errors_list, namespace, curr_iter):
for node_type in batch_errors_list[0].keys():
for metric in batch_errors_list[0][node_type].keys():
metric_batch_error = []
for batch_errors in batch_errors_list:
metric_batch_error.extend(batch_errors[node_type][metric])
if len(metric_batch_error) > 0:
print(f"{curr_iter}: {node_type.name}/{namespace}/{metric}_mean", np.mean(metric_batch_error))
print(f"{curr_iter}: {node_type.name}/{namespace}/{metric}_median", np.median(metric_batch_error))
| [
"yuejiang.liu@outlook.com"
] | yuejiang.liu@outlook.com |
696ed35bb22e3e88c3a56f39a9cc885a938a4dda | cf8636929ab18fab2ef2fc79c2b522213f57cea1 | /schedule/schedule3.py | 7c524d27dce04a4b266bca35e050f4ccfcdbe5c9 | [] | no_license | LuckyStrike-zhou/excel_helper | ad4b7a1eb8b13ed677795c2e8c1b07c4b3dd6d71 | c912e2247c4737000125c589c5c664a9fc4dfdc2 | refs/heads/main | 2023-02-23T05:59:57.868146 | 2021-01-11T14:10:02 | 2021-01-11T14:10:02 | 315,235,934 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | from tkinter import *
root = Tk()
label1 = Label(root, text="Enter Number")
E1 = Entry(root, bd=5)
def isPrime(lbl):
entry1 = int(E1.get())
for d in range(2, entry1):
if entry1 % d == 0:
lbl.config(text="Not prime")
break
else:
lbl["text"] = "Prime"
submit = Button(root, text="Submit", command=lambda: isPrime(label1))
label1.pack()
E1.pack()
submit.pack(side=BOTTOM)
root.mainloop()
| [
""
] | |
0276923d8773904d13396821fac157e851fd5120 | 76bf0ddf40494ca10ca96800a61487600ce4edc2 | /clientside/migrations/0002_auto_20210627_0717.py | 2c080727537165859fc1a06eedf811902bc48a7f | [] | no_license | albertbolt1/csrabledimageupload | bef0d4acb988cd4ba1165130dc7b054d0f7787f2 | ba6688ddcd30163fdbdbcad5643f5ac60e2a807f | refs/heads/main | 2023-06-29T06:20:23.308786 | 2021-07-11T15:12:32 | 2021-07-11T15:12:32 | 380,535,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # Generated by Django 3.2.4 on 2021-06-27 01:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('clientside', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='plantdiseaseimage',
old_name='plantimage1',
new_name='Image1',
),
migrations.RenameField(
model_name='plantdiseaseimage',
old_name='plantimage2',
new_name='Image2',
),
migrations.RenameField(
model_name='plantdiseaseimage',
old_name='plantimage3',
new_name='Image3',
),
]
| [
"45144020+albertbolt1@users.noreply.github.com"
] | 45144020+albertbolt1@users.noreply.github.com |
fb2dac07af82c220e6e4a2f95942ed4fa435a178 | 6ffa236a008d1cd1dc70f2c8ea0544d20ec350ee | /aries_cloudagent/messaging/issue_credential/v1_0/messages/credential_stored.py | 59973aa3320b3ad20a261d63f724ad3d305ab2b3 | [
"Apache-2.0"
] | permissive | blockpass-identity-lab/aries-fl-demo | 99e849f782dd80e729e3fe24c3af2881c5c49dca | 310b748c1ac2e814ec6f97c46ddbb9985584e0fc | refs/heads/master | 2022-07-06T18:37:16.007582 | 2020-04-23T15:48:33 | 2020-04-23T15:48:33 | 221,698,330 | 5 | 0 | Apache-2.0 | 2021-02-26T02:40:03 | 2019-11-14T12:58:58 | Python | UTF-8 | Python | false | false | 911 | py | """A credential stored message."""
# from marshmallow import fields
from ....agent_message import AgentMessage, AgentMessageSchema
from ..message_types import CREDENTIAL_STORED
HANDLER_CLASS = (
"aries_cloudagent.messaging.issue_credential.v1_0.handlers."
"credential_stored_handler.CredentialStoredHandler"
)
class CredentialStored(AgentMessage):
"""Class representing a credential stored message."""
class Meta:
"""Credential metadata."""
handler_class = HANDLER_CLASS
schema_class = "CredentialStoredSchema"
message_type = CREDENTIAL_STORED
def __init__(self, **kwargs):
"""Initialize credential object."""
super(CredentialStored, self).__init__(**kwargs)
class CredentialStoredSchema(AgentMessageSchema):
"""Credential stored schema."""
class Meta:
"""Schema metadata."""
model_class = CredentialStored
| [
"srklump@hotmail.com"
] | srklump@hotmail.com |
1b3dc5794b8d78ef65f79d30b8852f8c5138c541 | b3d00f85b47f6787461061ac955aba4b544e9976 | /trainset_input.py | 678c07722a99cfe6ed433ae7b58bf167a6c5020a | [
"MIT"
] | permissive | chciw/Adversary-about-RGBD-SOD | 94eb9f3863ac93ae42496fc21339a7fc41e1f906 | 19edc78b3abf45313f63212750aaedefb3b82de0 | refs/heads/main | 2023-07-22T08:11:28.283731 | 2021-08-24T02:17:44 | 2021-08-24T02:17:44 | 390,727,685 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,360 | py | """
Utilities for importing the nju2000 and nlpr dataset.
Each image in the dataset is a numpy array of shape (224, 224, 3), with the values
being unsigned integers (i.e., in the range 0,1,...,255).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import sys
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import cv2
version = sys.version_info
class TrainData(object):
"""
Inputs to constructor
=====================
The training data containing 1500+500=2000 examples, the test data
containing 485+498=983 examples
"""
def __init__(self):
root='/home/jackice/PycharmProjects/Adversary-about-RGBD-SOD-main/dataset/nju2000'
root2='/home/jackice/PycharmProjects/Adversary-about-RGBD-SOD-main/dataset/NLPR'
train_img_path =root+'/train/Img/'
train_deep_path=root+'/train/deep/'
train_gt_path=root+'/train/GT/'
test_img_path =root+'/test/Img/'
test_deep_path=root+'/test/deep/'
test_gt_path=root+'/test/GT/'
train_img_path2 =root2+'/train/Img/'
train_deep_path2=root2+'/train/deep/'
train_gt_path2=root2+'/train/GT/'
test_img_path2 =root2+'/test/Img/'
test_deep_path2=root2+'/test/deep/'
test_gt_path2=root2+'/test/GT/'
size=224
train_img = np.zeros((2000, size, size, 3), dtype='float32')
train_deep=np.zeros((2000,size,size,1), dtype='uint8')
train_labels = np.zeros((2000,size,size,1), dtype='uint8')
train_names=[]
test_img = np.zeros((983, size, size, 3), dtype='float32')
test_deep=np.zeros((983,size,size,1), dtype='uint8')
test_labels = np.zeros((983,size,size,1), dtype='uint8')
test_names=[]
i=0
for root, dirs, files in os.walk(train_img_path):
for file in files:
filename = os.path.splitext(file)[0]
if os.path.splitext(file)[1]=='.txt':
continue
train_img[i,:,:,:]=cv2.resize(cv2.imread(train_img_path+file),(size,size),interpolation=cv2.INTER_CUBIC)
train_deep[i,:,:,0]=cv2.resize(cv2.imread(train_deep_path+filename+'.png',0),(size,size),interpolation=cv2.INTER_CUBIC)
train_labels[i,:,:,0]=cv2.resize(cv2.imread(train_gt_path+filename+'.png',0),(size,size),interpolation=cv2.INTER_CUBIC)
train_names.append(filename)
i=i+1
print(i)
for root, dirs, files in os.walk(train_img_path2):
for file in files:
filename = os.path.splitext(file)[0]
if os.path.splitext(file)[1]=='.txt':
continue
train_img[i,:,:,:]=cv2.resize(cv2.imread(train_img_path2+file),(size,size),interpolation=cv2.INTER_CUBIC)
train_deep[i,:,:,0]=cv2.resize(cv2.imread(train_deep_path2+filename+'.png',0),(size,size),interpolation=cv2.INTER_CUBIC)
train_labels[i,:,:,0]=cv2.resize(cv2.imread(train_gt_path2+filename+'.png',0),(size,size),interpolation=cv2.INTER_CUBIC)
train_names.append(filename)
i=i+1
print(i)
i=0
for root,dirs,files in os.walk(test_img_path):
for file in files:
filename = os.path.splitext(file)[0]
if os.path.splitext(file)[1] == '.txt':
continue
test_img[i,:,:,:]=cv2.resize(cv2.imread(test_img_path+file),(size,size),interpolation=cv2.INTER_CUBIC)
test_deep[i,:,:,0]=cv2.resize(cv2.imread(test_deep_path+filename+'.png',0),(size,size),interpolation=cv2.INTER_CUBIC)
test_labels[i,:,:,0]=cv2.resize(cv2.imread(test_gt_path+filename+'.png',0),(size,size),interpolation=cv2.INTER_CUBIC)
test_names.append(filename)
i=i+1
print(i)
for root,dirs,files in os.walk(test_img_path2):
for file in files:
filename = os.path.splitext(file)[0]
if os.path.splitext(file)[1] == '.txt':
continue
test_img[i,:,:,:]=cv2.resize(cv2.imread(test_img_path2+file),(size,size),interpolation=cv2.INTER_CUBIC)
test_deep[i,:,:,0]=cv2.resize(cv2.imread(test_deep_path2+filename+'.png',0),(size,size),interpolation=cv2.INTER_CUBIC)
test_labels[i,:,:,0]=cv2.resize(cv2.imread(test_gt_path2+filename+'.png',0),(size,size),interpolation=cv2.INTER_CUBIC)
test_names.append(filename)
i=i+1
print(i)
self.train_data = DataSubset(train_img, train_deep,train_labels,train_names)
self.eval_data = DataSubset(test_img,test_deep, test_labels,test_names)
class DataSubset(object):
def __init__(self, xs, ds,ys,names):
self.xs = xs
self.n = xs.shape[0]
self.ds=ds
self.ys = ys
self.names=names
self.batch_start = 0
self.cur_order = np.random.permutation(self.n)
def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
if self.n < batch_size:
raise ValueError('Batch size can be at most the dataset size')
if not multiple_passes:
actual_batch_size = min(batch_size, self.n - self.batch_start)
if actual_batch_size <= 0:
raise ValueError('Pass through the dataset is complete.')
batch_end = self.batch_start + actual_batch_size
batch_xs = self.xs[self.cur_order[self.batch_start : batch_end], ...]
batch_ds = self.ds[self.cur_order[self.batch_start: batch_end], ...]
batch_ys = self.ys[self.cur_order[self.batch_start : batch_end], ...]
batch_ns = []
for idx in self.cur_order[self.batch_start: batch_end]:
# print(idx)
batch_ns.append(self.names[idx])
self.batch_start += actual_batch_size
return batch_xs, batch_ds,batch_ys,batch_ns
actual_batch_size = min(batch_size, self.n - self.batch_start)
if actual_batch_size < batch_size:
if reshuffle_after_pass:
self.cur_order = np.random.permutation(self.n)
self.batch_start = 0
batch_end = self.batch_start + batch_size
batch_xs = self.xs[self.cur_order[self.batch_start : batch_end], ...]
batch_ds = self.ds[self.cur_order[self.batch_start: batch_end], ...]
batch_ys = self.ys[self.cur_order[self.batch_start : batch_end], ...]
batch_ns=[]
for idx in self.cur_order[self.batch_start: batch_end]:
# print(idx)
batch_ns.append(self.names[idx])
self.batch_start += batch_size
return batch_xs, batch_ds,batch_ys,batch_ns
class AugmentedTrainData(object):
"""
Data augmentation wrapper over a loaded dataset.
Inputs to constructor
=====================
- raw_TrainData: the loaded nju2000 dataset, via the TrainData class
- sess: current tensorflow session
- model: current model (needed for input tensor)
"""
def __init__(self, raw_TrainData, sess, model):
assert isinstance(raw_TrainData, TrainData)
self.image_size = 224
# create augmentation computational graph for rgb
self.x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 224, 224, 3])
padded = tf.map_fn(lambda img: tf.image.resize_image_with_crop_or_pad(
img, self.image_size + 4, self.image_size + 4),
self.x_input_placeholder)
cropped = tf.map_fn(lambda img: tf.random_crop(img, [self.image_size,self.image_size,3]), padded)
flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), cropped)
self.augmented = flipped
# create augmentation computational graph for d
self.d_input_placeholder = tf.placeholder(tf.uint8, shape=[None, 224, 224, 1])
padded_d = tf.map_fn(lambda img: tf.image.resize_image_with_crop_or_pad(
img, self.image_size + 4, self.image_size + 4),
self.d_input_placeholder)
cropped_d = tf.map_fn(lambda img: tf.random_crop(img, [self.image_size,self.image_size,1]), padded_d)
flipped_d = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), cropped_d)
self.augmented_d = flipped_d
# self.label_names = raw_TrainData.label_names
self.train_data = AugmentedDataSubset(raw_TrainData.train_data, sess,
self.x_input_placeholder,
self.d_input_placeholder,
self.augmented,
self.augmented_d)
self.eval_data = AugmentedDataSubset(raw_TrainData.eval_data, sess,
self.x_input_placeholder,
self.d_input_placeholder,
self.augmented,
self.augmented_d)
class AugmentedDataSubset(object):
def __init__(self, raw_datasubset, sess, x_input_placeholder,d_input_placeholder,augmented,augmented_d):
self.sess = sess
self.raw_datasubset = raw_datasubset
self.x_input_placeholder = x_input_placeholder
self.d_input_placeholder = d_input_placeholder
self.augmented = augmented
self.augmented_d=augmented_d
def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
raw_batch = self.raw_datasubset.get_next_batch(batch_size, multiple_passes,reshuffle_after_pass)
# images = raw_batch[0].astype(np.float32)
return self.sess.run([self.augmented,self.augmented_d],
feed_dict={self.x_input_placeholder:raw_batch[0],
self.d_input_placeholder:raw_batch[1]}), raw_batch[2] | [
"noreply@github.com"
] | noreply@github.com |
8e003e0f09d1d1e726aac6086a86610b4f1d87c7 | 55df546c36e81bbd8786bb092e3be4cf2b2db065 | /example/python/permissions/can_get_all_acc_txs.py | f787d6ed964c1d4580bd3a52d4b578dbdf5c25cb | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | NickMavronick/iroha | d9dacb651475ac8e2edb0c8fd7af129e08097ca4 | 4839bae80fbf5184af9df9600552fab12af1c513 | refs/heads/master | 2020-06-16T00:26:30.674709 | 2019-07-05T04:35:33 | 2019-07-05T05:21:30 | 195,431,244 | 1 | 0 | Apache-2.0 | 2019-07-05T15:27:13 | 2019-07-05T15:27:13 | null | UTF-8 | Python | false | false | 812 | py | #
# Copyright Soramitsu Co., Ltd. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
import irohalib
import commons
import primitive_pb2
admin = commons.new_user('admin@first')
alice = commons.new_user('alice@second')
iroha = irohalib.Iroha(admin['id'])
@commons.hex
def genesis_tx():
test_permissions = [primitive_pb2.can_get_all_acc_txs]
genesis_commands = commons.genesis_block(admin, alice, test_permissions, multidomain=True)
tx = iroha.transaction(genesis_commands)
irohalib.IrohaCrypto.sign_transaction(tx, admin['key'])
return tx
@commons.hex
def account_transactions_query():
query = iroha.query('GetAccountTransactions', creator_account=alice['id'], account_id=admin['id'], page_size=10)
irohalib.IrohaCrypto.sign_query(query, alice['key'])
return query
| [
"andrei@soramitsu.co.jp"
] | andrei@soramitsu.co.jp |
49239cd741a705842914498b8d8adcf755414d87 | 462e53caefc202f1e48f7a3891b27dad6d4032f1 | /src/networkcloud/azext_networkcloud/aaz/latest/networkcloud/clustermanager/_create.py | 17de61aba3e388ebf787cfed6f192b7718b26b70 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | bragi92/azure-cli-extensions | a910f14a0613f5611b08ed34ce8d67c1ad05013e | e9aebbcbd3df15fd874a32babc40ae1a0ba23c1f | refs/heads/k8s-extension/public | 2023-08-04T13:22:05.747918 | 2023-07-28T15:45:27 | 2023-07-28T15:45:27 | 205,455,084 | 0 | 0 | MIT | 2019-08-30T20:50:25 | 2019-08-30T20:50:25 | null | UTF-8 | Python | false | false | 16,035 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkcloud clustermanager create",
is_experimental=True,
)
class Create(AAZCommand):
"""Create a new cluster manager or update properties of the cluster manager if it exists.
:example: Create or update cluster manager
az networkcloud clustermanager create --name "clusterManagerName" --location "location" --analytics-workspace-id "/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/microsoft.operationalInsights/workspaces/logAnalyticsWorkspaceName" --fabric-controller-id "/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.ManagedNetworkFabric/networkFabricControllers/fabricControllerName" --managed-resource-group-configuration name="my-managed-rg" --tags key1="myvalue1" key2="myvalue2" --resource-group "resourceGroupName"
"""
_aaz_info = {
"version": "2022-12-12-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.networkcloud/clustermanagers/{}", "2022-12-12-preview"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.cluster_manager_name = AAZStrArg(
options=["-n", "--name", "--cluster-manager-name"],
help="The name of the cluster manager.",
required=True,
fmt=AAZStrArgFormat(
pattern="^([a-zA-Z0-9][a-zA-Z0-9-_]{0,28}[a-zA-Z0-9])$",
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
# define Arg Group "ClusterManagerParameters"
_args_schema = cls._args_schema
_args_schema.location = AAZResourceLocationArg(
arg_group="ClusterManagerParameters",
help="The geo-location where the resource lives",
required=True,
fmt=AAZResourceLocationArgFormat(
resource_group_arg="resource_group",
),
)
_args_schema.tags = AAZDictArg(
options=["--tags"],
arg_group="ClusterManagerParameters",
help="Resource tags.",
)
tags = cls._args_schema.tags
tags.Element = AAZStrArg()
# define Arg Group "Properties"
_args_schema = cls._args_schema
_args_schema.analytics_workspace_id = AAZStrArg(
options=["--analytics-workspace-id"],
arg_group="Properties",
help="The resource ID of the Log Analytics workspace that is used for the logs collection.",
)
_args_schema.availability_zones = AAZListArg(
options=["--availability-zones"],
arg_group="Properties",
help="Field deprecated, this value will no longer influence the cluster manager allocation process and will be removed in a future version. The Azure availability zones within the region that will be used to support the cluster manager resource.",
)
_args_schema.fabric_controller_id = AAZStrArg(
options=["--fabric-controller-id"],
arg_group="Properties",
help="The resource ID of the fabric controller that has one to one mapping with the cluster manager.",
required=True,
)
_args_schema.managed_resource_group_configuration = AAZObjectArg(
options=["--managed-resource-group-configuration"],
arg_group="Properties",
help="The configuration of the managed resource group associated with the resource.",
)
_args_schema.vm_size = AAZStrArg(
options=["--vm-size"],
arg_group="Properties",
help="Field deprecated, this value will no longer influence the cluster manager allocation process and will be removed in a future version. The size of the Azure virtual machines to use for hosting the cluster manager resource.",
)
availability_zones = cls._args_schema.availability_zones
availability_zones.Element = AAZStrArg()
managed_resource_group_configuration = cls._args_schema.managed_resource_group_configuration
managed_resource_group_configuration.location = AAZStrArg(
options=["location"],
help="The location of the managed resource group. If not specified, the location of the parent resource is chosen.",
)
managed_resource_group_configuration.name = AAZStrArg(
options=["name"],
help="The name for the managed resource group. If not specified, the unique name is automatically generated.",
fmt=AAZStrArgFormat(
max_length=75,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.ClusterManagersCreateOrUpdate(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class ClusterManagersCreateOrUpdate(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200_201,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200, 201]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200_201,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/clusterManagers/{clusterManagerName}",
**self.url_parameters
)
@property
def method(self):
return "PUT"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"clusterManagerName", self.ctx.args.cluster_manager_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-12-12-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("location", AAZStrType, ".location", typ_kwargs={"flags": {"required": True}})
_builder.set_prop("properties", AAZObjectType, ".", typ_kwargs={"flags": {"required": True, "client_flatten": True}})
_builder.set_prop("tags", AAZDictType, ".tags")
properties = _builder.get(".properties")
if properties is not None:
properties.set_prop("analyticsWorkspaceId", AAZStrType, ".analytics_workspace_id")
properties.set_prop("availabilityZones", AAZListType, ".availability_zones")
properties.set_prop("fabricControllerId", AAZStrType, ".fabric_controller_id", typ_kwargs={"flags": {"required": True}})
properties.set_prop("managedResourceGroupConfiguration", AAZObjectType, ".managed_resource_group_configuration")
properties.set_prop("vmSize", AAZStrType, ".vm_size")
availability_zones = _builder.get(".properties.availabilityZones")
if availability_zones is not None:
availability_zones.set_elements(AAZStrType, ".")
managed_resource_group_configuration = _builder.get(".properties.managedResourceGroupConfiguration")
if managed_resource_group_configuration is not None:
managed_resource_group_configuration.set_prop("location", AAZStrType, ".location")
managed_resource_group_configuration.set_prop("name", AAZStrType, ".name")
tags = _builder.get(".tags")
if tags is not None:
tags.set_elements(AAZStrType, ".")
return self.serialize_content(_content_value)
def on_200_201(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200_201
)
_schema_on_200_201 = None
@classmethod
def _build_schema_on_200_201(cls):
if cls._schema_on_200_201 is not None:
return cls._schema_on_200_201
cls._schema_on_200_201 = AAZObjectType()
_schema_on_200_201 = cls._schema_on_200_201
_schema_on_200_201.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200_201.location = AAZStrType(
flags={"required": True},
)
_schema_on_200_201.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200_201.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_schema_on_200_201.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200_201.tags = AAZDictType()
_schema_on_200_201.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200_201.properties
properties.analytics_workspace_id = AAZStrType(
serialized_name="analyticsWorkspaceId",
)
properties.availability_zones = AAZListType(
serialized_name="availabilityZones",
)
properties.cluster_versions = AAZListType(
serialized_name="clusterVersions",
flags={"read_only": True},
)
properties.detailed_status = AAZStrType(
serialized_name="detailedStatus",
flags={"read_only": True},
)
properties.detailed_status_message = AAZStrType(
serialized_name="detailedStatusMessage",
flags={"read_only": True},
)
properties.fabric_controller_id = AAZStrType(
serialized_name="fabricControllerId",
flags={"required": True},
)
properties.managed_resource_group_configuration = AAZObjectType(
serialized_name="managedResourceGroupConfiguration",
)
properties.manager_extended_location = AAZObjectType(
serialized_name="managerExtendedLocation",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.vm_size = AAZStrType(
serialized_name="vmSize",
)
availability_zones = cls._schema_on_200_201.properties.availability_zones
availability_zones.Element = AAZStrType()
cluster_versions = cls._schema_on_200_201.properties.cluster_versions
cluster_versions.Element = AAZObjectType()
_element = cls._schema_on_200_201.properties.cluster_versions.Element
_element.support_expiry_date = AAZStrType(
serialized_name="supportExpiryDate",
flags={"read_only": True},
)
_element.target_cluster_version = AAZStrType(
serialized_name="targetClusterVersion",
flags={"read_only": True},
)
managed_resource_group_configuration = cls._schema_on_200_201.properties.managed_resource_group_configuration
managed_resource_group_configuration.location = AAZStrType()
managed_resource_group_configuration.name = AAZStrType()
manager_extended_location = cls._schema_on_200_201.properties.manager_extended_location
manager_extended_location.name = AAZStrType(
flags={"required": True},
)
manager_extended_location.type = AAZStrType(
flags={"required": True},
)
system_data = cls._schema_on_200_201.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200_201.tags
tags.Element = AAZStrType()
return cls._schema_on_200_201
class _CreateHelper:
"""Helper class for Create"""
__all__ = ["Create"]
| [
"noreply@github.com"
] | noreply@github.com |
22eec8aa33a194f79cc77b65625df03bdd5af45d | 145f1ccf9aba3e01300ae18ec43675ad03ff0e7b | /Lab 3/Lab_3.py | 22bf7e68c50f072dc9973d06b2c5f5f7a114e748 | [] | no_license | IzzatHaikal/OS | 3b5cdc673dc058136f09f3ae6ea40d267aafce9f | 079ddc95026e685d48cac4a8079bc8360db76897 | refs/heads/main | 2023-04-01T16:17:47.380218 | 2021-04-05T08:00:44 | 2021-04-05T08:00:44 | 349,398,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | def SJN():
jobNum = int(input("Enter the number of job(s): "))
job = [None]*jobNum
for x in range(jobNum):
aUnicode = ord("A")+x
job[x] = int(input("Enter job {} CPU cycle: ".format(chr(aUnicode))))
aUnicode += 1
job.sort()
total = totalSJN(job, jobNum)
print("Average turnaround time: {}".format(total))
def totalSJN(job, jobNum):
total = 0
for x in range(jobNum):
total += totalRecurse(job, x)
avgTotal = total/jobNum
return avgTotal
def totalRecurse(job, x):
if x > 0:
total = job[x] + totalRecurse(job, x-1)
else:
total = job[0]
return total
def RR():
jobNum = int(input("Enter the number of job(s): "))
job = [None]*jobNum
for x in range(jobNum):
aUnicode = ord("A")+x
job[x] = int(input("Enter job {} CPU cycle: ".format(chr(aUnicode))))
aUnicode += 1
jobArrive = [None]*jobNum
for x in range(jobNum):
aUnicode = ord("A")+x
jobArrive[x] = int(input("Enter job {} arrival time: ".format(chr(aUnicode))))
aUnicode += 1
timeQuantum = int(input("Enter time quantum: "))
jobFinish = [0]*jobNum
jobCheck = [False]*jobNum
timeQuantumAccumulate = 0
while not all(jobCheck):
for x in range(jobNum):
if job[x] >= timeQuantum and jobCheck[x] == False:
job[x] -= timeQuantum
timeQuantumAccumulate += timeQuantum
hold = timeQuantumAccumulate
jobFinish[x] = hold
elif job[x] < timeQuantum and jobCheck[x] == False:
timeQuantumAccumulate += job[x]
job[x] = 0
hold = timeQuantumAccumulate
jobFinish[x] = hold
if job[x] == 0 and jobCheck[x] == False:
jobCheck[x] = True
total = 0
for x in range(jobNum):
total += jobFinish[x] - jobArrive[x]
print("Average turnaround time: {}".format(total/jobNum))
choice = int(input("Enter 1 for SJN or 2 for Round Robin: "))
if choice == 1:
SJN()
elif choice == 2:
RR()
else:
print("Invalid Input") | [
"58548850+IzzatHaikal@users.noreply.github.com"
] | 58548850+IzzatHaikal@users.noreply.github.com |
9e10fab873f443c80734eb13d6241d1f1ec1f36f | 1cc98b96c69e8d722455af46eaead8ca0193d03b | /action/commission.py | 350b6afe44e570ce82eb3a9832127c1b28312435 | [] | no_license | sesametian/commission | ae09ffbd46f36c692e321bbbf3adf4ed6b0bb56a | 6e22fe9be938b30d64f3080f2f4428468a6647e2 | refs/heads/master | 2020-07-05T16:24:26.116750 | 2019-08-16T09:58:40 | 2019-08-16T09:58:40 | 202,698,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,009 | py | from public.public_db import *
from public.public_excel import *
def b0_b1_b2_result(DB_zhenxuan_and_type_rate_amount, EX_resdict, DB_orderpaid):
IsPass = True
errorInf = None
for ZTRA in DB_zhenxuan_and_type_rate_amount:
if ZTRA[0] == EX_resdict[1] and ZTRA[1] == 0: # 对比甄选收益
print('EX_resdict[13]', EX_resdict[13])
ZX_amount = DB_orderpaid * (EX_resdict[13] / 100)
if ZTRA[3] == ZX_amount:
print("甄选收益正确")
else:
IsPass = False
errorInf = "甄选收益错误"
print(errorInf)
return IsPass,errorInf
elif ZTRA[0] == EX_resdict[6] and ZTRA[1] == 1: # 对比一级收益
# print(ZX_amount * ((EX_resdict[14] / 10) * EX_resdict[4]))
if ZTRA[3] == ZX_amount * ((EX_resdict[14] / 10) * EX_resdict[4]):
print("一级收益正确")
else:
IsPass = False
errorInf = "一级收益错误"
return IsPass, errorInf
elif ZTRA[0] == EX_resdict[9] and ZTRA[1] == 2:
if ZTRA[3] == ZX_amount * (EX_resdict[15] / 10):
print("二级收益正确")
else:
IsPass = False
errorInf = "二级收益错误"
return IsPass, errorInf
elif ZTRA[0] == EX_resdict[11] and ZTRA[1] == 3:
if ZTRA[3] == round(ZX_amount * (EX_resdict[16] / 10)):
print("育成收益正确")
else:
IsPass = False
errorInf = "育成收益错误"
return IsPass, errorInf
print('b0_b1_b2', IsPass)
return IsPass, errorInf
def b0_b2_result(DB_zhenxuan_and_type_rate_amount, EX_resdict, DB_orderpaid):
IsPass = True
errorInf = None
for ZTRA in DB_zhenxuan_and_type_rate_amount:
if ZTRA[0] == EX_resdict[1] and ZTRA[1] == 0: # 对比甄选收益
ZX_amount = DB_orderpaid * (EX_resdict[11] / 100)
if ZTRA[3] == ZX_amount:
print("甄选收益正确")
else:
IsPass = False
errorInf = "甄选收益错误"
return IsPass, errorInf
elif ZTRA[0] == EX_resdict[6] and EX_resdict[6] == get_recommend_id_for_saler_binding_mapping_by_user_id(
EX_resdict[1]) \
and ZTRA[1] == 1: # 对比一级收益
if ZTRA[3] == ZX_amount * ((EX_resdict[12] / 10) * EX_resdict[4]):
print("一级收益正确")
else:
IsPass = False
errorInf = "一级收益错误"
return IsPass, errorInf
elif ZTRA[0] == EX_resdict[9] and ZTRA[1] == 3:
if ZTRA[3] == round(ZX_amount * (EX_resdict[13] / 10)):
print("育成收益正确")
else:
IsPass = False
errorInf = "育成收益错误"
return IsPass, errorInf
print('b0_b2', IsPass)
return IsPass, errorInf
def b1_b1_b2_result(DB_zhenxuan_and_type_rate_amount, EX_resdict, DB_orderpaid):
IsPass = True
errorInf = None
for ZTRA in DB_zhenxuan_and_type_rate_amount:
if ZTRA[0] == EX_resdict[1] and ZTRA[1] == 0:
ZX_amount = DB_orderpaid * (EX_resdict[13] / 100)
if ZTRA[3] == ZX_amount: # 对比甄选收益
print("甄选收益正确")
else:
IsPass = False
errorInf = "甄选收益错误"
return IsPass, errorInf
elif ZTRA[0] == EX_resdict[6] and ZTRA[1] == 1: # 对比一级收益
if ZTRA[3] == ZX_amount * ((EX_resdict[14] / 10) * EX_resdict[4]):
print("一级收益正确")
else:
IsPass = False
errorInf = "一级收益错误"
return IsPass, errorInf
elif ZTRA[0] == EX_resdict[9] and ZTRA[1] == 2: # 对比二级收益
if ZTRA[3] == ZX_amount * (EX_resdict[15] / 10):
print("二级收益正确")
else:
IsPass = False
errorInf = "二级收益错误"
return IsPass, errorInf
elif ZTRA[0] == EX_resdict[11] and ZTRA[1] == 3: # 对比育成收益
if ZTRA[3] == round(ZX_amount * (EX_resdict[16] / 10)):
print("育成收益正确")
else:
IsPass = False
errorInf = "育成收益错误"
return IsPass, errorInf
print('b1-b1-b2', IsPass)
return IsPass, errorInf
def b1_b2_result(DB_zhenxuan_and_type_rate_amount, EX_resdict, DB_orderpaid):
IsPass = True
errorInf = None
for ZTRA in DB_zhenxuan_and_type_rate_amount:
if ZTRA[0] == EX_resdict[1] and ZTRA[1] == 0: # 对比甄选收益
ZX_amount = DB_orderpaid * (EX_resdict[11] / 100)
if ZTRA[3] == ZX_amount:
print("甄选收益正确")
else:
IsPass = False
errorInf = "甄选收益错误"
return IsPass, errorInf
elif ZTRA[0] == EX_resdict[6] and ZTRA[1] == 1: # 对比一级收益
#print('!!!!!!!!!!!!!!!!',ZX_amount)
if ZTRA[3] == ZX_amount * ((EX_resdict[12] / 10) * EX_resdict[4]):
print("一级收益正确")
else:
IsPass = False
errorInf = "一级收益错误"
return IsPass, errorInf
elif ZTRA[0] == EX_resdict[9] and ZTRA[1] == 3: # 对比育成收益
if ZTRA[3] == round(ZX_amount * (EX_resdict[13] / 10)):
print("育成收益正确")
else:
IsPass = False
errorInf = "育成收益错误"
return IsPass, errorInf
print('b1-b2', IsPass)
return IsPass, errorInf
def b2_b1_result(DB_zhenxuan_and_type_rate_amount, EX_resdict, DB_orderpaid):
IsPass = True
errorInf = None
for ZTRA in DB_zhenxuan_and_type_rate_amount:
if ZTRA[0] == EX_resdict[1] and ZTRA[1] == 0: # 对比甄选收益
ZX_amount = DB_orderpaid * (EX_resdict[7] / 100)
if ZTRA[3] == ZX_amount:
print("甄选收益正确")
else:
IsPass = False
errorInf = "甄选收益错误"
print(errorInf)
return IsPass, errorInf
elif ZTRA[0] == EX_resdict[5] and get_recommend_id_for_saler_binding_mapping_by_user_id(EX_resdict[1]) == EX_resdict[5] \
and ZTRA[1] == 1: # 对比一级收益
if ZTRA[3] == ZX_amount * ((EX_resdict[8] / 10) * EX_resdict[3]):
print("一级收益正确")
else:
IsPass = False
errorInf = "一级收益错误"
return IsPass, errorInf
print('b2-b1', IsPass)
return IsPass, errorInf
| [
"hichcock_tian@hotmail.com"
] | hichcock_tian@hotmail.com |
8b3a97ebe43ae145f472de830429cf5e306e5269 | 5c902cfea2856b5b591a9e4de4ecf7d66d01c3a0 | /백준/기초1/수학1/나머지.py | 36861f45235b2a9988962ca407e259b38e24cc23 | [] | no_license | VIXXPARK/pythonAlgorithm | 9cbedf1e9dc387756bed1793081be90e77daf9e8 | 8675fc0e078d90620ecf9dae95c1ccd6bcd36d37 | refs/heads/main | 2023-05-29T10:41:51.900075 | 2021-06-17T23:28:51 | 2021-06-17T23:28:51 | 316,072,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | import sys
a,b,c= map(int,sys.stdin.readline().rstrip().split())
print((a+b)%c)
print(((a%c)+(b%c))%c)
print((a*b)%c)
print(((a%c)*(b%c))%c) | [
"vixx170627@gmail.com"
] | vixx170627@gmail.com |
2d7098cb8174e3779d78a54cffcff3d299651034 | 5174346f6bd374cc8873a41ed336b7545756d753 | /examples/prompts/toolbar-prompt.py | ff31c5f2951a01c99352e655915d09e1f94ff7bc | [
"BSD-3-Clause"
] | permissive | calebstewart/python-prompt-toolkit | f06dd911399b75e9d4985b485a3e9897c04bf1d6 | 3f9f9a927b2d1a208e59af73e574825df2901e69 | refs/heads/master | 2022-07-02T16:23:24.682709 | 2020-05-14T22:45:14 | 2020-05-14T22:45:14 | 263,998,820 | 1 | 0 | null | 2020-05-14T18:51:02 | 2020-05-14T18:51:01 | null | UTF-8 | Python | false | false | 260 | py | #!/usr/bin/env python
"""
Prompt for user input as a toolbar which disappears after submission.
"""
from prompt_toolkit import prompt
if __name__ == "__main__":
answer = prompt(message="prompt$ ", prompt_in_toolbar=True)
print(f"You said: {answer}")
| [
"caleb.stewart94@gmail.com"
] | caleb.stewart94@gmail.com |
4c2bdb7c3c1f0ffd2ca09b91c2b25d6b3bd6dc4c | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /PORMain/pirates/effects/DarkPortal.py | 0cf27ecf8deadbcfe7623641ee7db325b3b0db04 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | from panda3d.core import TransparencyAttrib
# File: D (Python 2.4)
from direct.interval.IntervalGlobal import *
from PooledEffect import PooledEffect
from EffectController import EffectController
from otp.otpbase import OTPRender
import random
class DarkPortal(PooledEffect, EffectController):
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
self.speed = 0.75
self.holdTime = 2.5
self.size = 40
self.explosionSequence = 0
self.explosion = loader.loadModel('models/effects/darkPortal')
self.explosion.setDepthTest(0)
self.setDepthWrite(0)
self.explosion.setFogOff()
self.explosion.setLightOff()
self.explosion.setHpr(0, -90, 0)
self.explosion.reparentTo(self)
self.hide()
self.explosion.hide(OTPRender.MainCameraBitmask)
self.explosion.showThrough(OTPRender.EnviroCameraBitmask)
self.explosion.setBin('shadow', 0)
self.explosion.setTransparency(TransparencyAttrib.MAlpha)
self.explosion.setDepthWrite(0)
def createTrack(self, rate = 1):
self.explosion.setScale(1)
self.explosion.setColorScale(1, 1, 1, 0.75)
scaleUp = self.explosion.scaleInterval(self.speed, self.size, startScale = 0.0, blendType = 'easeIn', other = render)
scaleDown = self.explosion.scaleInterval(self.speed, 0.0, startScale = self.size, blendType = 'easeIn', other = render)
self.track = Sequence(Func(self.show), scaleUp, Wait(self.holdTime), scaleDown, Func(self.hide), Func(self.cleanUpEffect))
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| [
"brandoncarden12345@gmail.com"
] | brandoncarden12345@gmail.com |
2cb377c43ca73e2cf72a65ab4b4636613e972e8c | b0038c62b3ac288d2dae54ac73fc7a65b1abaefe | /leetcode/medium/16-3sum-closest.py | 6962ac18e17fef931adb52e79be1ef33c07c0458 | [] | no_license | rebornbd/PROBLEM-SOLVE | a9acb5b4c3ad5a7df45fa515842ffb0bdad04512 | b0f20141748fc0937d9caea19ae5fa13b1f01bc7 | refs/heads/master | 2023-07-15T15:39:00.251292 | 2021-08-31T08:58:47 | 2021-08-31T08:58:47 | 400,992,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | class Solution:
def threeSumClosest(self, nums, target):
nums.sort()
numsLen = len(nums)
res = float('inf')
for i in range(0, numsLen-2):
if i > 0 and (nums[i] == nums[i-1]): continue
low = i + 1
high = numsLen - 1
while high > low:
sum = nums[i] + nums[low] + nums[high]
if sum == target: return sum
elif sum < target: low += 1
else: high -= 1
prevDistance = abs(target - res)
currDistance = abs(target - sum)
if (prevDistance > currDistance):
res = sum
return res
solve = Solution()
nums = [-1,2,1,-4]
target = 1
res = solve.threeSumClosest(nums, target)
print(res)
| [
"dipta.dhar.bd@gmail.com"
] | dipta.dhar.bd@gmail.com |
328fb9e4369aa42b565a12b663af3028621e6923 | 579737178a1ca75756c85124cbe97fc1d2bded60 | /pyutil/pub.py | 0b22e21d4c121d0f02500420ab0c73069d032d80 | [] | no_license | jbester/python2-util | b183a7b4e430c691badca8352f2a2d1555deb5a7 | 02c1e14c3eb8dad7509df58c6cd8fca192778897 | refs/heads/master | 2021-12-29T07:20:05.311325 | 2011-12-30T21:29:32 | 2011-12-30T21:29:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,274 | py | ################################################################################
# File : pub.py
# Function : Publish/Subscribe Library
################################################################################
# Newest version can be obtained at http://www.freshlime.org
# Send comments or questions to code at freshlime dot org
################################################################################
# Copyright (c) 2011, J. Bester
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of the authors names of its contributors may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import collections
__subscribers = collections.defaultdict(list)
__events = []
__inprogress = False
def listen( topic, handler ):
"""
Listen to the topic specified
@param topic: topic to listen for
@param handler: method, static method, or function to handle the topic
@return: None
"""
global __subscribers
print 'listen',topic
__subscribers[topic].append(handler)
def emit( topic, *args, **kw ):
"""
Send a topic
Note: this function preserves global ordering of topics.
If you emit topic A which in turn emits topic B. All handlers
of A will be called prior to any processing of topic B
@param topic: topic name
@param *args: ordered arguments
@param **kw: keyword arguments
"""
global __inprogress, __events, __subscribers
__events.append( (topic, args, kw) )
# if an event sends an additional event queue it up
# this ensures global ordering of topics i.e.
# if you have an ordered set of three topics A, B, C
# all of A handlers are processed prior to B being processed
if __inprogress == False:
__inprogress = True
while len(__events) > 0:
evt = __events.pop(0)
(topic, args, kw) = evt
for listener in __subscribers[topic]:
try:
listener( *args, **kw )
except:
pass
__inprogress = False
| [
"jbester@gmail.com"
] | jbester@gmail.com |
06864ac65e787aa808340b9594fcc3a612d1f8c5 | 4e30e4f4a98710b0874593166f7d9caff7eb3320 | /Trabajo Final/Script.py | f690e14e5dfaa386eb5cecbdc4d3d5f62cfa8e33 | [] | no_license | majocubero/PF-3115-Laboratorios | 8e53ac7cbec6dde115c42225d056366f2606bbf3 | 42e6c45456d6c72028a3f338fa633c3be3c9e6d5 | refs/heads/master | 2022-11-17T09:09:17.448543 | 2020-06-30T23:49:45 | 2020-06-30T23:49:45 | 257,762,262 | 0 | 0 | null | 2020-06-16T23:29:59 | 2020-04-22T01:46:58 | Python | UTF-8 | Python | false | false | 2,833 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 18:43:02 2020
@author: osvaldo
"""
import pandas as pd
from sklearn.model_selection import train_test_split
#Todo lo comentado fue lo que se hizo para dividir los datos de manera balanceada
'''
#leyendo los datos
datos=pd.read_csv("/home/osvaldo/Documents/CURSOS/Machine Learning and Statistics/Trabajo Final /Datos/bank-full.csv", sep=";")
x=datos.drop(['y'], axis=1)
y=datos.y
# Dividiendo el data set en trainig set y test set
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0, stratify=y)
# Pruebas del balance de y
Training=pd.concat([X_train, y_train], axis=1)
Testing=pd.concat([X_test, y_test], axis=1)
X_train.to_csv(r'/home/osvaldo/Documents/CURSOS/Machine Learning and Statistics/Trabajo Final /Datos/X_train.csv')
y_train.to_csv(r'/home/osvaldo/Documents/CURSOS/Machine Learning and Statistics/Trabajo Final /Datos/y_train.csv')
X_test.to_csv(r'/home/osvaldo/Documents/CURSOS/Machine Learning and Statistics/Trabajo Final /Datos/X_test.csv')
y_test.to_csv(r'/home/osvaldo/Documents/CURSOS/Machine Learning and Statistics/Trabajo Final /Datos/y_test.csv')
Training.to_csv(r'/home/osvaldo/Documents/CURSOS/Machine Learning and Statistics/Trabajo Final /Datos/Training.csv', index = False)
Testing.to_csv(r'/home/osvaldo/Documents/CURSOS/Machine Learning and Statistics/Trabajo Final /Datos/Testing.csv', index = False)
# cambiando el -1 por 0 de la columna pdays
datos['pdays']= datos['pdays'].replace(-1,0)
'''
# Cargando training que es de quien se van a obtener los descriptivos
Training=pd.read_csv("/home/osvaldo/Documents/CURSOS/Machine Learning and Statistics/Trabajo Final /Datos/Training.csv", sep=",")
#Función para crear tablas cruzadas
def tablas(columna,apredecir):
tb=pd.crosstab(columna, apredecir)
tb['%_acep']=(tb.yes)/(tb.yes + tb.no)*100
return(tb.sort_values(by='%_acep', ascending=False))
# % aceptación por tipo de trabajo
tablas(Training.job,Training.y)
#% aceptación por estado civil
tablas(Training.marital, Training.y)
# % aceptación por grado de educacion
tablas(Training.education, Training.y)
# % aceptacion si ha quedado default en el pasado
tablas(Training.default,Training.y)
# % aceptacion personas con hipoteca
tablas(Training.housing,Training.y)
# % aceptacion personas con prestamos personales
tablas(Training.loan,Training.y)
# Tablas con promedios
print(pd.crosstab(Training.y,Training.y, values= Training.balance, aggfunc='mean' ).round(2))
print(pd.crosstab(Training.y,Training.y, values= Training.duration, aggfunc='mean' ).round(2))
print(pd.crosstab(Training.y,Training.y, values= Training.campaign, aggfunc='mean' ).round(2))
print(pd.crosstab(Training.y,Training.y, values= Training.pdays, aggfunc='mean' ).round(2))
| [
"osvaldour@gmail.com"
] | osvaldour@gmail.com |
93048f8876fc96b4c7fd4bda1e6719756d628118 | 222d4f2dfb63a66b5de274b785cb92393a2e0fe9 | /after6pm_191113/04.py | 17381eb6ff56e9032c7c90fbf870c88dae44464e | [] | no_license | GyuReeKim/PycharmProjects | fd2584c3ff1369510a7f246f2089cefb77035d9d | dd4f0e15b4e72c68b054489c54f24fa0ba5b9ed3 | refs/heads/master | 2020-07-03T11:44:54.951147 | 2019-11-21T00:07:43 | 2019-11-21T00:07:43 | 201,894,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | # 연구소
# 벽세우기
import sys
sys.stdin = open('04.txt', 'r')
def f(i, j, lab):
pass
N, M = map(int, input().split())
lab = [list(map(int, input().split())) for _ in range(N)]
print(lab)
f(0, 0, lab) | [
"starkim96@naver.com"
] | starkim96@naver.com |
b6eb1a9acaec58050efa63ac3b0227a9e160bd5e | e8601b69369d36a18ce2256c5d7e55ca87c8daf1 | /hello/userprofile/tests/test_views.py | bba4ab67c89dc9b6114b9ba727223778218d0c88 | [] | no_license | tdn1234/Django | 6f95be985bb0ef79ca8353c78a60fcfb79327305 | e76bda7972dd9d26600e15ff72a734bce85cea18 | refs/heads/master | 2021-01-10T02:51:48.009666 | 2015-11-13T11:02:26 | 2015-11-13T11:02:26 | 46,097,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import Client
from django.test.client import RequestFactory
class ViewUserTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = User.objects.create_user('john', 'lennon@gmail.com', '111111')
def test_access_user_list(self):
# non-login users will be redirect to login page
response = self.client.get('/users/')
self.assertEqual(response.status_code, 302)
def test_login_user(self):
self.client.login(username='john', password='111111')
response = self.client.get('/users/')
self.assertEqual(response.status_code, 200)
def test_blank_page(self):
self.client.login(username='john', password='111111')
response = self.client.get('/users/?page=4000')
self.assertEqual(response.status_code, 200)
class ViewSearchTestCase(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user('john', 'lennon@gmail.com', '111111')
def test_access_user_list_non_login(self):
# non-login users will be redirect to login page
response = self.client.get('/users/search/')
self.assertEqual(response.status_code, 302)
def test_access_user_list_login(self):
# list all users when get request without search keyword
self.client.login(username='john', password='111111')
response = self.client.get('/users/search/')
self.assertEqual(response.status_code, 200)
def test_search_with_search_keyword(self):
User.objects.create_user('adam', 'adam@gmail.com', '111111')
self.client.login(username='john', password='111111')
# make sure server will response user Adam when client search with keyword adam
response = self.client.get('/users/search/?s=adam')
self.assertContains(response, 'adam')
# john will not be displayed in searching list with adam keyword
self.assertNotContains(response, 'john')
self.assertEqual(response.status_code, 200)
| [
"tdn1234cntt@gmail.com"
] | tdn1234cntt@gmail.com |
55f0d2481754c912cb2c42f535a00f9435de0ad1 | 2e92d8d84c70636d1ccc45cfd323e87881c9bb97 | /distributed-crawler/bot.py | 0c2b0655115de6c1bb0272c5e9a0944a6791db50 | [] | no_license | cctvkill1/test | 258238ee28a747068a76391c4a99c6d6432fea68 | 622ab91273ba8a7fb20710754631795dc95ed5d8 | refs/heads/master | 2021-01-17T07:34:13.316543 | 2019-01-17T14:48:01 | 2019-01-17T14:48:01 | 23,092,358 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | # -*- coding: utf-8 -*-
import Queue
initial_page = "http://www.renminribao.com"
url_queue = Queue.Queue()
seen = set()
seen.insert(initial_page)
url_queue.put(initial_page)
while(True): #一直进行直到海枯石烂
if url_queue.size()>0:
current_url = url_queue.get() #拿出队例中第一个的url
store(current_url) #把这个url代表的网页存储好
for next_url in extract_urls(current_url): #提取把这个url里链向的url
if next_url not in seen:
seen.put(next_url)
url_queue.put(next_url)
else:
break
if __name__ == '__main__':
main()
| [
"1097422313@qq.com"
] | 1097422313@qq.com |
717c663e7b15e05259d4441cf607714ca2bcc424 | e0cf5c5c98661f39b990a1a871292ba307dfa82a | /VIPclient/python/portal.py | e45acc5ba35615b63b154abd9a2f499f4b1234f0 | [
"CC-BY-4.0"
] | permissive | CISI-INSERM/OpenDose-Computing | 1ad3551d82442047b997c29b22fb65f2ad434ffe | d32f5bdb2a58712ca9f05af6909f055b15cacda1 | refs/heads/master | 2020-03-25T14:36:26.358289 | 2019-12-23T10:51:32 | 2019-12-23T10:51:32 | 143,863,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | import os
import argparse
import configparser
from gate import Gate
from gate_lab import GateLab
from gate_CL import GateCL
class Portal():
def __init__(self):
pass
def parseParams(self):
print("Portal :: parse params")
parser = argparse.ArgumentParser()
parser.add_argument("--config", "-C", help="path to the configuration file in cfg format", type=str)
return parser.parse_args()
# get init values from config file
def parseConfig(self, config_path):
cfgparser = configparser.RawConfigParser()
cfgparser.read(config_path)
config = {}
config['apiKey'] = cfgparser.get('application', 'apikey')
config['gaterelease'] = cfgparser.get('application', 'gaterelease')
config['application'] = cfgparser.get('application', 'name')
config['CPUparam'] = cfgparser.get('application', 'CPUparam')
config['gateinput'] = cfgparser.get('inputs', 'input')
config['macfile'] = cfgparser.get('inputs', 'macfile')
config['outputdir'] = cfgparser.get('inputs', 'outputdir')
config['email'] = cfgparser.get('inputs', 'email')
config['maxExecsNb'] = int(cfgparser.get('jobs', 'maxjobs'))
config['jobfile'] = os.environ['OPENDOSE_PATH'] + "/" + cfgparser.get('jobs', 'jobfile')
return config
def launchGate(self):
if config['application'] == "GateLab":
batch = GateLab(config)
elif args.type == "GateCL":
batch = GateCL(config)
if __name__ == "__main__":
print("OPENDOSE_PATH: ", os.environ['OPENDOSE_PATH'])
portal = Portal()
args = portal.parseParams()
config = portal.parseConfig(args.config)
portal.launchGate() | [
"daniel.salas@inserm.fr"
] | daniel.salas@inserm.fr |
eb3fc67560a4cb8799759fb9f9260b886aa4b1dc | 0d366c90d6f70c6c2252b45c1b581fe3732a7998 | /tkdemo/tkdemo/remember.py | 0af0e66d3e344a3d8483a2faaf6e39b9d229e9e9 | [] | no_license | borisbho/PythonPractice | d67f84b3bc0db8ae2711f4d61a9150fad61db646 | deb320174c4354eb1ba6121df9d1c89a5ccdd463 | refs/heads/master | 2020-06-03T11:49:20.615973 | 2019-06-12T11:53:44 | 2019-06-12T11:53:44 | 191,556,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,959 | py | from tkinter import Tk, Label, Frame, Button, Entry, Canvas, PhotoImage
import random
root = Tk()
root.title('Remember The GUI Year')
question_1 = ("the start of the Revolutionary War", 1775)
question_2 = ("the United States Constitution signed", 1783)
question_3 = ("President Lincoln assassinated", 1865)
question_4 = ("Theodore Roosevelt's first day in office as President of the United States", 1901)
question_5 = ("the beginning of World War II", 1939)
question_6 = ("the first personal computer introduced", 1975)
question_7 = ("the Berlin Wall taken down", 1989)
question_8 = ("the seattle seahawks superbowl champions", 2013)
question_9 = ("the social media company facebook started", 2004)
question_10 = ("the company amazon started", 1994)
list_of_questions = [question_1, question_2, question_3, question_4, question_5, question_6, question_7, question_8, question_9, question_10]
random.shuffle(list_of_questions)
def doEntry():
score = int(label_counter['text'])
year_input = int(entry_name.get())
event_year = list_of_questions[question_number-1][1]
if year_input == event_year:
score += 10
label_message.config(text= '10 Points | Score: ' + str(score) + '| Correct Answer: ' + str(event_year))
elif abs(year_input-event_year) <= 5:
score += 5
label_message.config(text='5 Points | Score: ' + str(score) + '| Correct Answer: ' + str(event_year))
elif abs(year_input-event_year) > 5 and abs(year_input-event_year) <= 10:
score += 1
label_message.config(text='2 Points | Score: ' + str(score) + '| Correct Answer: ' + str(event_year))
else:
label_message.config(text='0 Points | Score: ' + str(score) + '| Correct Answer: ' + str(event_year))
label_title.config(text = "In What Year was" + " " + list_of_questions[question_number][0])
label_question_number = Label(
text = '1',
fg = '#000',
font = "Arial 20 bold italic"
)
question_number = int(label_question_number['text'])
label_question_number.grid(column=0,row=0)
label_title = Label(
text ="In What Year was" + " " + list_of_questions[question_number][0],
fg = "#000",
font = "Arial 20 bold italic"
)
label_title.grid(column=1, row=0)
label_counter = Label(
text='0',
bg='dark blue',
fg='#ff0',
width=4,
font = 'Arial 14 bold italic'
)
label_counter.grid(column=2, row =3)
btn_submit = Button(
text='SUBMIT',
bg='turquoise',
fg='black',
width=9,
command = doEntry
)
btn_submit.grid(column=0,row=4)
frame_entry = Frame(root)
frame_entry.grid(column=0, row=3)
entry_name = Entry(frame_entry)
entry_name.pack(side='top')
label_message = Label(
text ="",
fg = "#000",
font = "Arial 20 bold italic"
)
label_message.grid(column=1, row=2)
root.mainloop()
#question_number = int(label_question_number['text'])
# def doEntry():
# score = int(label_counter['text'])
# year_input = int(entry_name.get())
# event_year = list_of_questions[question_number-1][1]
# if year_input == event_year:
# score += 10
# label_message.config(text= '10 Points | Score: ' + str(score) + '| Correct Answer: ' + str(event_year))
# elif abs(year_input-event_year) <= 5:
# score += 5
# label_message.config(text='5 Points | Score: ' + str(score) + '| Correct Answer: ' + str(event_year))
# elif abs(year_input-event_year) > 5 and abs(year_input-event_year) <= 10:
# score += 1
# label_message.config(text='2 Points | Score: ' + str(score) + '| Correct Answer: ' + str(event_year))
# else:
# label_message.config(text='0 Points | Score: ' + str(score) + '| Correct Answer: ' + str(event_year))
# i = 9
# final_score = 0
# question_number = 1
# while i is not -1:
# try:
# random_int = 0
# year_input = int(entry_name.get())
# event_year = list_of_questions[random_int][1]
# label_title.config(text="In What Year was " + " " + list_of_questions[random_int][0])
# if year_input == event_year:
# final_score += 10
# label_message.config(text= '10 Points | Score: ' + str(final_score) + '| Correct Answer: ' + str(event_year))
# elif abs(year_input-event_year) <= 5:
# final_score += 5
# label_message.config(text='5 Points | Score: ' + str(final_score) + '| Correct Answer: ' + str(event_year))
# elif abs(year_input-event_year) > 5 and abs(year_input-event_year) <= 10:
# final_score += 1
# label_message.config(text='2 Points | Score: ' + str(final_score) + '| Correct Answer: ' + str(event_year))
# elif abs(year_input-event_year) > 10 and abs(year_input-event_year) <= 20:
# final_score += 1
# label_message.config(text='1 Points | Score: ' + str(final_score) + '| Correct Answer: ' + str(event_year))
# else:
# label_message.config(text='0 Points | Score: ' + str(final_score) + '| Correct Answer: ' + str(event_year))
# del list_of_questions[random_int]
# i -= 1
# question_number += 1
# random_int += 1
# entry_name.delete(0,'end')
# except ValueError:
# break
# if final_score == 100:
# label_message.config(text='Total Score: ' + str(final_score) + ', You did really good!\n')
# elif final_score < 100 and final_score >= 70:
# label_message.config(text='Total Score: ' + str(final_score) + ', You did alright!')
# else:
# label_message.config(text='Total Score: ' + str(final_score) + ', Youre Trash!') | [
"noreply@github.com"
] | noreply@github.com |
a1ad55236b217f8b3a31a24a32f154b5d7313462 | 607cdcb7dec0bd14052b78de281c6d3049c32b9b | /scraper/parser.py | c2f2cd306bf4c9d95dc682914b5b2ca5200830d0 | [
"BSD-3-Clause"
] | permissive | ea-sal/scraper | d81027cae8d39e232abd230821b1a1c98c9ed7de | c088dc3dc613fec94e297ac71302d2305b44b14c | refs/heads/master | 2022-09-23T16:35:45.178767 | 2020-05-26T15:36:23 | 2020-05-26T15:36:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | py | from parsel import Selector
def get_features_from_item(item_url_responce) -> dict:
"""
takes item link
and gets item's characteristics from link html
:item_url_responce item's url response
:return: item features as a dictionary
"""
item_response_html = Selector(item_url_responce.text)
item_features = {
'URL': item_url_responce.url,
'title': item_response_html.xpath(
'//td[@class="review"]/b[text()="Название (англ.)"]/../following-sibling::*[2]/text()').get(),
'country': item_response_html.xpath(
'//td[@class="review"]/b[text()="Производство"]/../following-sibling::*[2]/text()').get(),
'genres': item_response_html.xpath(
'//td[@class="review"]/b[text()="Жанр"]/../following-sibling::*[2]/a/text()').getall(),
'audience': item_response_html.xpath(
'//td[@class="review"]/b[text()="Целевая аудитория"]/../following-sibling::*[2]/a/text()').get(),
'season': item_response_html.xpath(
'//td[@class="review"]/b[text()="Сезон"]/../following-sibling::*[2]/a/text()').get(),
'based_on': item_response_html.xpath(
'//td[@class="review"]/b[text()="Основа"]/../following-sibling::*[2]/a/text()').get(),
'based_on_title': item_response_html.xpath(
'//td[@class="review"]/b[text()="Снято по манге"]/../following-sibling::*[2]/a/text()').get(),
'producer': item_response_html.xpath(
'//td[@class="review"]/b[text()="Режиссёр"]/../following-sibling::*[2]/a/text()').get(),
'original_author': item_response_html.xpath(
'//td[@class="review"]/b[text()="Автор оригинала"]/../following-sibling::*[2]/a/text()').get(),
'avg_point': item_response_html.xpath(
'//td[@class="review"]/b[text()="Средний балл"]/../following-sibling::*[2]/text()').get().split(' ')[0][:3],
'voters_num': item_response_html.xpath(
'//td[@class="review"]/b[text()="Проголосовало"]/../following-sibling::*[2]/text()').get().split(' ')[0],
'place': item_response_html.xpath(
'//td[@class="review"]/b[text()="Место в "]/../following-sibling::*[2]/text()').get().split(' ')[0]
}
return item_features
| [
"easaltykova@avito.ru"
] | easaltykova@avito.ru |
e8dbf9e194c596ccb086a0b20048def936cb4f39 | d361ec226065d77a69e5179a5746cc1d76ede8de | /FBTserver/apps/account/urls.py | c7591c434ee49e33cbb22885e02a730e9fee36cc | [] | no_license | zhujinliang/FBTserver | e12c5af945c9f45b55aee1c32bcfcdcb0deda826 | ba739230a659f9e15f744b168ef6bbfd3e49fe2c | refs/heads/master | 2016-08-04T09:45:25.733153 | 2013-07-06T08:50:34 | 2013-07-06T08:50:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from django.conf.urls import patterns, include, url
from FBTserver.apps.account.views import RegisterView
from FBTserver.apps.account.views import LoginView
from FBTserver.apps.account.views import LogoutView
from FBTserver.apps.account.views import SetPasswordView
urlpatterns = patterns('',
url(r'^register/$', RegisterView.as_view()),
url(r'^login/$', LoginView.as_view()),
url(r'^logout/$', LogoutView.as_view()),
url(r'^set-password/$', SetPasswordView.as_view()),
)
| [
"jzhu@redhat.com"
] | jzhu@redhat.com |
6238fb8273504cdc518dfb9eb499d99cd1154705 | 78ef02f8d148713a8ae6e96120761d5e3887098b | /RF_P/PROPMAT-master/matlab_to_propmat/loop.py | 1175a4ed95e43a536792648e23ba3ef470077668 | [] | no_license | DrikoldLun/RF_yORCA | 99bbd93c84be7182546feb5d5c384f35b41d55c0 | 63008599459bd5c9c4479e159c4e32af7b35067d | refs/heads/master | 2023-06-17T09:07:43.015052 | 2021-07-07T13:02:33 | 2021-07-07T13:02:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | import os
#stalst = os.listdir('../RF_data/event_data_Pcorrected0.1')
#parafile = 'paraRF.cfg'
#config = configparser.ConfigParser()
#config.read(parafile)
#event_path = config.get('path','event_path')
#stalst = os.listdir(event_path)
#stalst = ['CC06','EE04','CC07']
#stalst = ['CC04','CC05','WC03']
#stalst = ['CC05']
#stalst = ['CC02','CC04','CC05','CC07','CC08','WC03','WC04']
stalst = ['CC02','CC04','CC05','CC07','CC08','WC03','WC02']
#stalst = ['CC07','CC08']
if not os.path.exists('log'):
os.mkdir('log')
for sta in stalst:
runlog = 'log/run_'+sta+'_PCAstacktmp.log'
runerr = 'log/run_'+sta+'_PCAstacktmp.err'
os.system('nohup python -u propmat.py -S'+sta+' 1>> '+runlog+' 2>> '+runerr+' &')
#os.system('nohup python -u checkrotate.py -S'+sta+' -L paraRF.cfg 1>> '+runlog+' 2>> '+runerr+' &')
| [
"zhanglnju2015@gmail.com"
] | zhanglnju2015@gmail.com |
b695d8ad94119bbc9b73c478b2406f4ae32ae1be | 071078a168ab73e8f4fb3e47f5c1096b3e41d0ea | /Orders/views.py | e9a5666f559957776b49d9ea450e0c434eeeac72 | [] | no_license | rashad-farajullayev/drf-example | 8cc4745fd739cda2f8b41d526ba8b755979bf5c5 | 99db8850690dd17422292d27d5b774d5d0432332 | refs/heads/master | 2023-04-14T10:15:29.853733 | 2021-04-25T14:46:04 | 2021-04-25T14:46:04 | 361,368,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | from rest_framework import permissions
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import Order
from .permissions import IsOwnerOfOrder
from .serializer import OrderSerializer, OrderDetailsSerializer
class OrderListView(APIView):
permission_classes = [permissions.IsAuthenticated, IsOwnerOfOrder]
serializer_class = OrderDetailsSerializer
def get(self, request, customer_id):
self.check_permissions(request)
orders = Order.objects.filter(customer_id=customer_id)
for order in orders:
self.check_object_permissions(request, order)
serializer = OrderDetailsSerializer(orders, many=True)
return Response(serializer.data)
def post(self, request, customer_id):
serializer = OrderDetailsSerializer(data=request.data)
if serializer.is_valid():
if serializer.save():
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
Response("Provided data does not appear to be valid", status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
| [
"rashad.farajullayev@atltech.az"
] | rashad.farajullayev@atltech.az |
9c14ff371ae3c1b5705cf3b2b0e761044559e189 | 16af907b1a93c15a55b1bc711db4750ee4afd07c | /krow_package/Krow/__init__.py | c4e6c4665ce2984c3dd29670c8a466c15fbc2b0a | [] | no_license | KrowNetwork/Krowsearch | 03929e097248b5d8541ba6bab959c987d326e7e9 | 2477ea9b24f8cd8d17ee4a34da7ded903d4467aa | refs/heads/master | 2020-03-17T04:14:40.154484 | 2018-11-20T00:11:23 | 2018-11-20T00:11:23 | 133,267,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from Krow.chain import Chain
from Krow.applicant import Applicant
from Krow.employer import Employer
from Krow.job import Job
from Krow.errors import *
| [
"siegel.tucker@gmail.com"
] | siegel.tucker@gmail.com |
45aad19c79479fd3824ea58eb7d7717279b0b008 | 6858cbebface7beec57e60b19621120da5020a48 | /ply/modimport.py | f82d08c44b979f0b39be6a4dfe34acf53fbfc6e1 | [] | no_license | ponyatov/PLAI | a68b712d9ef85a283e35f9688068b392d3d51cb2 | 6bb25422c68c4c7717b6f0d3ceb026a520e7a0a2 | refs/heads/master | 2020-09-17T01:52:52.066085 | 2017-03-28T07:07:30 | 2017-03-28T07:07:30 | 66,084,244 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | >>> import tokrules
>>> lexer = lex.lex(module=tokrules)
>>> lexer.input("3 + 4")
>>> lexer.token()
LexToken(NUMBER,3,1,1,0)
>>> lexer.token()
LexToken(PLUS,'+',1,2)
>>> lexer.token()
LexToken(NUMBER,4,1,4)
>>> lexer.token()
None
>>> | [
"dponyatov@gmail.com"
] | dponyatov@gmail.com |
524b26645d22e5350ca96393ae4a8f8c7410257e | 4c76dbfaa8f2ca33945e303be90b579c79bd4008 | /renesola/apps/freight/management/commands/build_angular_js.py | 50d8bdd16a9ceccc64c3c8823bb5058badf95821 | [] | no_license | fogcitymarathoner/renasola | 42c32243df4e4c1246d9a85cfb9251aed2264309 | 9089dcc0ffc57a76799f5e99244df644256e08ea | refs/heads/master | 2021-01-11T00:32:40.358446 | 2016-10-10T18:49:50 | 2016-10-10T18:49:50 | 70,517,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | __author__ = 'marc'
from django.core.management.base import BaseCommand
from django.conf import settings
import os
from renesola_lib.angular_helpers import build_js
class Command(BaseCommand):
"""
field delimiter ';'
text delmiter '"'
"""
args = ''
help = ''
def handle(self, *args, **options):
build_js()
| [
"marc@fogtest.com"
] | marc@fogtest.com |
d54db077ad045ae5605a1a04b178f9fac106b3ab | 30a456e3012c663782d2a07a0ff67c377d63790d | /data/ass2json.py | 08561f2f6f0db1ff52593268932f24b680e40cf8 | [
"MIT"
] | permissive | akx/my-video | 41099725fd96f369a1e8e671667e2e7be3256f42 | b1135809f81a34026536d1a8532390dc5f1c7945 | refs/heads/master | 2021-01-24T10:30:53.608241 | 2016-10-01T18:30:34 | 2016-10-01T18:30:34 | 69,733,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | import argparse
import re, json
import sys
K_RE = re.compile(r'(\{\\k([0-9]+)\})')
def parse_time(dt):
h, m, s, hs = [float(int(p, 10)) for p in re.split('[:.,]', dt)]
return h * 60 * 60 + m * 60 + s * 1 + hs / 100.0
def parse_ass(infp):
for line in infp:
if not line.startswith('Dialogue:'):
continue
line = line.split(',', 9)
start = parse_time(line[1])
end = parse_time(line[2])
parts = K_RE.split(line[-1])[1:]
word_durations = zip([int(s, 10) / 100.0 for s in parts[1::3]], [s.strip() for s in parts[2::3]])
for i, (dur, word) in enumerate(word_durations):
d = {
'time': round(start, 3),
'word': word,
}
if i == 0:
d['verse'] = True
yield d
start += dur
def main():
ap = argparse.ArgumentParser()
ap.add_argument('file', type=argparse.FileType())
ap.add_argument('-o', '--output', type=argparse.FileType('w'), default=None)
ap.add_argument('--indent', default=None, type=int)
args = ap.parse_args()
json.dump(
list(parse_ass(args.file)),
(args.output or sys.stdout),
indent=args.indent,
)
if __name__ == '__main__':
main()
| [
"akx@iki.fi"
] | akx@iki.fi |
3d9f9ad922536fe8a855676dc1949a8d29cbcb0e | ee149168125c6fe3718f525ccf204e36534dc2e4 | /services/db/posts.py | 54cc68b08bba23b039007126fbdd4269bed16c24 | [
"Apache-2.0"
] | permissive | vadimkorr/scheduled-posts-manager | 063d1bdb75d92f18ad461c652ac12e5f1a570e0d | 89fd5779afbd8cd7efdc7e663a2dfb00510304e6 | refs/heads/master | 2021-08-18T11:10:33.059173 | 2019-03-29T20:27:01 | 2019-03-29T20:27:01 | 157,437,694 | 0 | 0 | Apache-2.0 | 2021-06-10T21:00:23 | 2018-11-13T19:53:27 | Python | UTF-8 | Python | false | false | 965 | py | import boto3
import datetime
dynamodb = boto3.resource(
'dynamodb') # , region_name="us-west-2") # , endpoint_url="http://localhost:8001")
postsTable = dynamodb.Table('posts')
def putPost(post):
postsTable.put_item(Item=post)
def getPost(id):
response = postsTable.get_item(Key={
'id': id
})
return response['Item']
def removePost(id):
postsTable.delete_item(Key={
'id': id
})
# putPost({
# 'id': 'post1',
# 'user_id': 'user1',
# 'date_created': str(datetime.datetime.now())
# })
# print('getting item', getPost('post2'))
def getPosts():
response = postsTable.scan()
items = response['Items']
while True:
if response.get('LastEvaluatedKey'):
response = postsTable.scan(
ExclusiveStartKey=response['LastEvaluatedKey'])
items += response['Items']
else:
break
return items
print(postsTable.creation_date_time)
| [
"korobeinikovv@mail.ru"
] | korobeinikovv@mail.ru |
3ce64f0f7cf14fcc36f5f71986bfe87885493f36 | 2bfd3d0d347bb53c49addb13baeb051e7d15f459 | /medilix/users/migrations/0013_auto_20170430_1051.py | 24f25ee3ac2256acb9d74a21d9c150505692e667 | [] | no_license | faradzh/medilix | ec2a6bd2a491c7de38e3935dbed9892f621f7212 | 6c41464fbc84c6850c7c8c24d4dbac52a938b071 | refs/heads/master | 2021-03-19T11:54:16.677656 | 2017-06-05T00:07:13 | 2017-06-05T00:07:13 | 85,694,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-30 10:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('health', '0002_auto_20170430_0922'),
('users', '0012_auto_20170430_1050'),
]
operations = [
migrations.RemoveField(
model_name='doctorprofile',
name='hospitals',
),
migrations.AddField(
model_name='doctorprofile',
name='hospitals',
field=models.ManyToManyField(to='health.Hospital'),
),
]
| [
"faradj.musaev@gmail.com"
] | faradj.musaev@gmail.com |
1b3572d5cffbf8f576dfef7358f169283ebc397f | 1735d169e79c3b0d5390b46bfc03be4a225b92aa | /manual_tests/testSimulation.py | 3c796fa3ba4243efc0fd1c16c066b15c146e3e49 | [] | no_license | marcoaaguiar/yaocptool | c5750973ffc924e5ff9228f4f827ff161ecd075a | 820513fbff6d7690dfed807fff005325ccd2f452 | refs/heads/master | 2023-07-15T15:04:01.339000 | 2020-06-08T19:18:03 | 2020-06-08T19:18:03 | 104,109,025 | 14 | 0 | null | 2020-06-08T19:18:04 | 2017-09-19T17:56:42 | Python | UTF-8 | Python | false | false | 1,593 | py | from yaocptool.modelling import SystemModel
# create model
model = SystemModel(name="dae_system")
x = model.create_state("x", 2)
y = model.create_algebraic_variable("y", 2)
u = model.create_control("u")
a = model.create_parameter("a")
b = model.create_theta("b")
model.include_equations(
ode=[-a * x[0] + b * y[0], -x[1] + y[1] + u[0]],
alg=[-y[0] - x[1]**2, -y[1] - x[0]**1],
)
x_0 = [1, 2]
sim_result = model.simulate(x_0,
range(1, 10),
0.5,
u=1.0,
p=[1],
theta=dict(zip(range(0, 9), [0] * 9)))
# sim_result.plot([{'x': 'all'}, {'y': 'all'}, {'u': 'all'}])
# Include data at the end of the sim_result
copy_of_sim_result = sim_result.get_copy()
sim_result2 = model.simulate(x_0,
range(20, 30),
19,
u=1.0,
p=[1],
theta=dict(zip(range(0, 10), [0] * 10)))
copy_of_sim_result.extend(sim_result2)
# copy_of_sim_result.plot([{'x': 'all'}, {'y': 'all'}, {'u': 'all'}])
sim_result3 = model.simulate(x_0,
range(-20, -10),
-21,
u=1.0,
p=[1],
theta=dict(zip(range(0, 10), [0] * 10)))
copy_of_sim_result.extend(sim_result3)
copy_of_sim_result.plot([{"x": ["x_0", 1]}, {"y": "all"}, {"u": "all"}])
# sim_result.plot([{'x': 'all'}, {'y': 'all'}, {'u': 'all'}])
| [
"marcoaaguiar@gmail.com"
] | marcoaaguiar@gmail.com |
12421fad05503e978ebc58d3a49c3a9bd1325f54 | 876cf08f89a4ecf1c0479362586e5f78ca92e3a2 | /system-tools.py | 41abac388b09c42a12c2471bdf2f90bbf5b44352 | [] | no_license | TheTheseus/dsk-systemtool95 | 6969b4252a9297fc4ca169d412dbff0b7b438008 | 127baee28f52ad0b2895408f25178d00c206e8be | refs/heads/master | 2021-09-07T10:08:13.013639 | 2018-02-21T12:44:44 | 2018-02-21T12:44:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | import sys
import os
import time
import requests
import redis
from flask import Flask, request
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
redis_db = redis.StrictRedis(host=str(os.environ['REDIS_SERVICE_HOST']), port=str(os.environ['REDIS_SERVICE_PORT']))
class SystemToolTime(Resource):
def get(self):
## SYSTEM_TIME is the name of the subscribed microservice that is been onboarded.
## The subscribed microservice should always be provided in caps while fetching from redis db.
ms_json = eval(redis_db.get('DSKM2102'))
## 10002 is the port on which the subscibed microservice "system-time" is running.
url = str(ms_json["10002"])
headers = {
'API-KEY': redis_db.get('API-KEY')
}
resp = requests.request("GET", url, headers=headers)
#resp = requests.get(url)
return resp.json()
class SystemToolUpTime(Resource):
def get(self):
## SYSTEM_UPTIME is the name of the subscribed microservice that is been onboarded.
## The subscribed microservice should always be provided in caps while fetching from redis db.
ms_json = eval(redis_db.get('DSKM2102UPTIME'))
## 10004 is the port on which the subscibed microservice "system-uptime" is running.
url = str(ms_json["10004"])
headers = {
'API-KEY': redis_db.get('API-KEY')
}
resp = requests.request("GET", url, headers=headers)
#resp = requests.get(url)
return resp.json()
api.add_resource(SystemToolTime, '/systemtoolstime')
api.add_resource(SystemToolUpTime, '/systemtoolsuptime')
if __name__ == '__main__':
if(len(sys.argv) > 1):
run_port = sys.argv[1]
else:
run_port = 10000
app.run(host='0.0.0.0',port=int(run_port), debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
69f41c9bc89099a762ed51e3431a4d30ecbb6e04 | 55582eae17ed06d174b4f2fb6aae77a3fecf7300 | /main.py | 84406cfee178dbea83341e47e34061efa20ed2d7 | [
"MIT"
] | permissive | EMBEDDIA/EMBEDDIA-summarization-service | a4c9c1bda4baf95d2b105192b24d0d4c29d08527 | 01607377082edb0a5169a3aebe04a6aaf9b78c90 | refs/heads/main | 2023-06-10T11:45:48.583738 | 2021-06-29T11:35:15 | 2021-06-29T11:35:15 | 379,161,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | from sklearn.metrics.pairwise import cosine_similarity
import networkx as nx
from sklearn.preprocessing import MinMaxScaler
from utils.encoders import SentenceBERT
def summarize(comments, n):
sentence_encoder = SentenceBERT()
# Compute the sentence embeddings
embeddings = sentence_encoder.encode_sentences(comments)
# similarity matrix
sim_mat = cosine_similarity(embeddings)
# rescale
scaler = MinMaxScaler(feature_range=(0, 1))
sim_mat = scaler.fit_transform(sim_mat.flatten().reshape(-1, 1)).reshape(len(embeddings), len(embeddings))
# calculate pagerank
nx_graph = nx.from_numpy_array(sim_mat)
scores = nx.pagerank(nx_graph, alpha=0.85, max_iter=500) # number of cycles to converge
ranked_sentences = sorted(((scores[i], s) for i, s in enumerate(comments)), reverse=True)
return [sent for score, sent in ranked_sentences[:n]]
if __name__ == '__main__':
# load example_comments.txt (assuming pre-tokenized comments on a sentence level and concatenated together)
with open('example_comments.txt') as f:
comments = [sent.strip() for sent in f.readlines()]
# run main
summarize(comments, n=2)
| [
"ales.zagar@hotmail.com"
] | ales.zagar@hotmail.com |
9b5c0769b5bff8e323791aacf80b06da0c16643b | 7af470bb3343cfbacb58aea800822384425471cc | /DCNN-Pytorch/main_autoencoder.py | 70b74f19a5ca651a2d73eeeffb897f682b4a3da3 | [
"Apache-2.0"
] | permissive | SiChiTong/deepracing | 751551a2a616b38b4786ee810466f8246ac1fb6a | 4272069ac249e910ae19f404e746176e4d630fa2 | refs/heads/main | 2023-06-13T09:08:04.029721 | 2021-01-04T18:36:05 | 2021-01-04T18:36:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,762 | py | import comet_ml
import torch
import torch.nn as NN
import torch.nn.functional as F
import torch.utils.data as data_utils
import deepracing_models.data_loading.proto_datasets as PD
from tqdm import tqdm as tqdm
import deepracing_models.nn_models.LossFunctions as loss_functions
import deepracing_models.nn_models.Models
import deepracing_models.nn_models.VariationalModels
import numpy as np
import torch.optim as optim
import pickle
from datetime import datetime
import os
import string
import argparse
import torchvision.transforms as transforms
import yaml
import shutil
import skimage
import skimage.io
import deepracing
from deepracing import trackNames
import deepracing.backend
import imageio
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
import deepracing_models.math_utils.bezier
import socket
import json
from comet_ml.api import API, APIExperiment
import cv2
import torchvision, torchvision.transforms as T
from deepracing_models.data_loading.image_transforms import GaussianBlur
from deepracing.raceline_utils import loadBoundary
from deepracing import searchForFile
import deepracing.path_utils.geometric as geometric
import deepracing_models.math_utils as mu
#torch.backends.cudnn.enabled = False
def run_epoch(experiment, encoder, optimizer, dataloader, recon_loss, loss_weights, use_tqdm = False, plot=False):
cum_loss = 0.0
cum_param_loss = 0.0
cum_position_loss = 0.0
cum_velocity_loss = 0.0
num_samples=0.0
if use_tqdm:
t = tqdm(enumerate(dataloader), total=len(dataloader))
else:
t = enumerate(dataloader)
encoder.train()
dataloaderlen = len(dataloader)
dev = next(encoder.parameters()).device # we are only doing single-device training for now, so this works fine.
dtype = next(encoder.parameters()).dtype # we are only doing single-device training for now, so this works fine.
for (i, imagedict) in t:
images = imagedict["image"].type(dtype).to(device=dev)
batch_size = images.shape[0]
z, recon = encoder(images)
loss = recon_loss(recon, images)
# if plot:
# print("Session times: ")
# print(session_times)
# print("Normalized session times: ")
# print(s)
# fig, (axin, axrecon) = plt.subplots(nrows=1, ncols=2)
# images_np = np.round(255.0*images[0].detach().cpu().numpy().copy().transpose(0,2,3,1)).astype(np.uint8)
# reconimages_np = np.round(255.0*reconstructed_images[0].detach().cpu().numpy().copy().transpose(0,2,3,1)).astype(np.uint8)
# #image_np_transpose=skimage.util.img_as_ubyte(images_np[-1].transpose(1,2,0))
# # oap = other_agent_positions[other_agent_positions==other_agent_positions].view(1,-1,60,2)
# # print(oap)
# ims = []
# for i in range(images_np.shape[0]):
# ims.append([axin.imshow(images_np[i]), axrecon.imshow(reconimages_np[i])])
# ani = animation.ArtistAnimation(fig, ims, interval=250, blit=True, repeat=True)
# plt.show()
optimizer.zero_grad()
loss.backward(retain_graph=False)
# Weight and bias updates.
optimizer.step()
# logging information
# current_position_loss_float = float(current_position_loss.item())
# num_samples += 1.0
# if not debug:
# experiment.log_metric("current_position_loss", current_position_loss_float)
if use_tqdm:
t.set_postfix({"recon" : loss.item()})#, "KLD" : KLD.item()})
def go():
parser = argparse.ArgumentParser(description="Train Image Curve Encoder")
parser.add_argument("dataset_config_file", type=str, help="Dataset Configuration file to load")
parser.add_argument("model_config_file", type=str, help="Model Configuration file to load")
parser.add_argument("output_directory", type=str, help="Where to put the resulting model files")
parser.add_argument("--debug", action="store_true", help="Don't actually push to comet, just testing")
parser.add_argument("--plot", action="store_true", help="Plot images upon each iteration of the training loop")
parser.add_argument("--models_to_disk", action="store_true", help="Save the model files to disk in addition to comet.ml")
parser.add_argument("--tqdm", action="store_true", help="Display tqdm progress bar on each epoch")
parser.add_argument("--gpu", type=int, default=None, help="Override the GPU index specified in the config file")
args = parser.parse_args()
dataset_config_file = args.dataset_config_file
debug = args.debug
plot = args.plot
models_to_disk = args.models_to_disk
use_tqdm = args.tqdm
with open(dataset_config_file) as f:
dataset_config = yaml.load(f, Loader = yaml.SafeLoader)
config_file = args.model_config_file
with open(config_file) as f:
config = yaml.load(f, Loader = yaml.SafeLoader)
print(dataset_config)
image_size = dataset_config["image_size"]
input_channels = config["input_channels"]
batch_size = config["batch_size"]
learning_rate = config["learning_rate"]
momentum = config["momentum"]
project_name = config["project_name"]
manifold_channels = config["manifold_channels"]
loss_weights = config["loss_weights"]
use_float = config["use_float"]
loss_func = config["loss_func"]
if args.gpu is not None:
gpu = args.gpu
config["gpu"] = gpu
else:
gpu = config["gpu"]
num_epochs = config["num_epochs"]
num_workers = config["num_workers"]
print("Using config:\n%s" % (str(config)))
encoder = deepracing_models.nn_models.VariationalModels.ConvolutionalAutoencoder(manifold_channels, input_channels)
# print("encoder:\n%s" % (str(encoder)))
if use_float:
encoder = encoder.float()
else:
encoder = encoder.double()
dtype = next(encoder.parameters()).dtype
if loss_func=="mse":
recon_loss = NN.MSELoss().type(dtype)
elif loss_func=="bce":
recon_loss = NN.BCELoss().type(dtype)
else:
raise ValueError("Unknown loss function: %s" %(loss_func,))
dsets=[]
alltags = set(dataset_config.get("tags",[]))
if gpu>=0:
print("moving stuff to GPU")
device = torch.device("cuda:%d" % gpu)
encoder = encoder.cuda(gpu)
recon_loss = recon_loss.cuda(gpu)
else:
device = torch.device("cpu")
optimizer = optim.SGD(encoder.parameters(), lr = learning_rate, momentum=momentum)
image_size = dataset_config["image_size"]
for dataset in dataset_config["datasets"]:
dlocal : dict = {k: dataset_config[k] for k in dataset_config.keys() if (not (k in ["datasets"]))}
dlocal.update(dataset)
print("Parsing database config: %s" %(str(dlocal)))
root_folder = dlocal["root_folder"]
dataset_tags = dlocal.get("tags", [])
alltags = alltags.union(set(dataset_tags))
image_folder = os.path.join(root_folder,"images")
image_files = [f for f in os.listdir(image_folder) if os.path.isfile(os.path.join(image_folder,f)) and (os.path.splitext(f)[-1].lower() in {".jpg", ".png"})]
image_mapsize = int(float(np.prod(image_size)*3+12)*float(len(image_files))*1.1)
image_lmdb_folder = os.path.join(image_folder,"image_lmdb")
image_wrapper = deepracing.backend.ImageLMDBWrapper()
image_wrapper.readDatabase( image_lmdb_folder , mapsize=image_mapsize )
keys = image_wrapper.getKeys()
current_dset = PD.ImageDataset(image_wrapper, keys=keys, image_size=image_size)
dsets.append(current_dset)
print("\n")
if len(dsets)==1:
dset = dsets[0]
else:
dset = torch.utils.data.ConcatDataset(dsets)
dataloader = data_utils.DataLoader(dset, batch_size=batch_size,
shuffle=True, num_workers=num_workers, pin_memory=gpu>=0)
print("Dataloader of of length %d" %(len(dataloader)))
if debug:
print("Using datasets:\n%s", (str(dataset_config)))
main_dir = args.output_directory
if debug:
output_directory = os.path.join(main_dir, "debug")
os.makedirs(output_directory, exist_ok=True)
experiment = None
else:
experiment = comet_ml.Experiment(workspace="electric-turtle", project_name=project_name)
output_directory = os.path.join(main_dir, experiment.get_key())
if os.path.isdir(output_directory) :
raise FileExistsError("%s already exists, this should not happen." %(output_directory) )
os.makedirs(output_directory)
experiment.log_parameters(config)
experiment.log_parameters(dataset_config)
dsetsjson = json.dumps(dataset_config, indent=1)
experiment.log_parameter("datasets",dsetsjson)
experiment.log_text(dsetsjson)
if len(alltags)>0:
experiment.add_tags(list(alltags))
experiment_config = {"experiment_key": experiment.get_key()}
yaml.dump(experiment_config, stream=open(os.path.join(output_directory,"experiment_config.yaml"),"w"), Dumper=yaml.SafeDumper)
yaml.dump(dataset_config, stream=open(os.path.join(output_directory,"dataset_config.yaml"), "w"), Dumper = yaml.SafeDumper)
yaml.dump(config, stream=open(os.path.join(output_directory,"model_config.yaml"), "w"), Dumper = yaml.SafeDumper)
experiment.log_asset(os.path.join(output_directory,"dataset_config.yaml"),file_name="datasets.yaml")
experiment.log_asset(os.path.join(output_directory,"experiment_config.yaml"),file_name="experiment_config.yaml")
experiment.log_asset(os.path.join(output_directory,"model_config.yaml"),file_name="model_config.yaml")
i = 0
if debug:
for asdf in range(0,num_epochs):
run_epoch(experiment, encoder, optimizer, dataloader, recon_loss, loss_weights, use_tqdm=True, plot=plot)
else:
encoderpostfix = "epoch_%d_encoder.pt"
decoderpostfix = "epoch_%d_decoder.pt"
optimizerpostfix = "epoch_%d_optimizer.pt"
with experiment.train():
while i < num_epochs:
time.sleep(2.0)
postfix = i + 1
if models_to_disk:
encoderfile = encoderpostfix % (postfix-1)
optimizerfile = optimizerpostfix % (postfix-1)
else:
encoderfile = "encoder.pt"
optimizerfile = "optimizer.pt"
print("Running Epoch Number %d" %(postfix))
#dset.clearReaders()
tick = time.time()
run_epoch(experiment, encoder, optimizer, dataloader, recon_loss, loss_weights, use_tqdm=use_tqdm)
tock = time.time()
print("Finished epoch %d in %f seconds." % ( postfix , tock-tick ) )
experiment.log_epoch_end(postfix)
encoderout = os.path.join(output_directory,encoderfile)
with open(encoderout,'wb') as f:
torch.save(encoder.state_dict(), f)
with open(encoderout,'rb') as f:
experiment.log_asset(f,file_name=encoderpostfix %(postfix,) )
optimizerout = os.path.join(output_directory, optimizerfile)
with open(optimizerout,'wb') as f:
torch.save(optimizer.state_dict(), f)
with open(optimizerout,'rb') as f:
experiment.log_asset(f,file_name=optimizerpostfix %(postfix,) )
i = i + 1
import logging
if __name__ == '__main__':
logging.basicConfig()
go()
| [
"ttw2xk@virginia.edu"
] | ttw2xk@virginia.edu |
f4fa2a2f4e94d2575f4e90e294396d09f4467611 | 56d0a689a62c1d3090947816f424a3166799d000 | /app/util/util.py | c8af6460dcf1ceeef380ac9177e551c7a8d7c3cc | [] | no_license | jrnn/sholdr | 0df3a2ae0117d23d1b0f7a73d9eceaab5610ae57 | d2253e275de6654d8542208afde33830393f24c0 | refs/heads/master | 2020-03-18T04:49:27.915457 | 2018-06-26T08:51:36 | 2018-06-26T08:51:36 | 134,308,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,659 | py | """
This module contains generic utility functions that are needed here and
there.
The several 'apply'-something functions below seem trivial, but they are
needed for WTForm transformations (passed into a field as 'filters').
"""
import re
import uuid
def apply_lower(s):
"""
Apply .lower() to given value if applicable.
"""
if s is not None and hasattr(s, "lower"):
return s.lower()
else:
return s
def apply_strip(s):
"""
Apply .strip() to given value if applicable.
"""
if s is not None and hasattr(s, "strip"):
return s.strip()
else:
return s
def apply_upper(s):
"""
Apply .upper() to given value if applicable.
"""
if s is not None and hasattr(s, "upper"):
return s.upper()
else:
return s
def format_share_range(lower, upper, places = 0):
"""
Pad the two given numbers with zeroes on left to requested number of places,
and fill in periods as thousand separators, then return the two formatted
numbers with an em dash in the middle. (This is used primarily for titles of
certificates.)
"""
f = "{:0>%sd}" % places
lower = ".".join(f.format(lower)[::-1][i:i+3] for i in range(0, places, 3))[::-1]
upper = ".".join(f.format(upper)[::-1][i:i+3] for i in range(0, places, 3))[::-1]
return "%s—%s" % (lower, upper,)
def get_consecutive_ranges(ns):
"""
Find all ranges of consecutive numbers from a _SORTED_ integer list. Return
the first and last numbers of each range as a tuple (first, last).
"""
res = []
if not ns:
return res
prev = first = ns[0] - 2
for n in ns:
if n != prev + 1:
if prev != ns[0] - 2:
res.append( (first, prev,) )
first = n
prev = n
res.append( (first, prev,) )
return res
def get_uuid():
"""
Return a v4 UUID as string without dashes in the middle.
"""
return re.sub(
"-",
"",
str(uuid.uuid4())
)
def is_within_range(t, ts):
"""
Check whether the given integer pair (t) is within at least one of the given
ranges (ts). t must be a tuple, and ts a list of tuples.
"""
(a, b,) = t
for (l, u,) in ts:
if l <= a and b <= u:
return True
return False
def rs_to_dict(rs):
"""
Iterate through a ResultProxy, translate each row into a dictionary with the
exact same keys as the ResultProxy, and return the dictionaries as a list.
"""
res = []
for r in rs:
res.append({ key : r[key] for key in rs.keys() })
return res
| [
"juho.juurinen@gmail.com"
] | juho.juurinen@gmail.com |
fc3cf43a004aa546f7a758790863ae7c54f32330 | 9dd43be6dbb1e528f9799d65b443709da8f6ea5b | /build/gazebo_ros_demos/custom_plugin_tutorial/catkin_generated/pkg.develspace.context.pc.py | 9bff73921acb5e36eb9d81b53614e25888663ffb | [] | no_license | wangdwes/catkin | 03de2986113f9c938e6968052b40c911765980b0 | 918b2d369d9b2861048a57810ec6c7981cabf59a | refs/heads/master | 2021-01-01T17:48:31.308964 | 2014-10-24T16:19:22 | 2014-10-24T16:19:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;gazebo_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "gazebo_tutorials"
PROJECT_SPACE_DIR = "/home/dawei/catkin-ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"wangdwes@gmail.com"
] | wangdwes@gmail.com |
d4b86ce8c2dcf80ee5ce28fd7f9795ce4bfad495 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /93o6y6WKFpQKoDg4T_18.py | 69dd497b116184ce41ce029c7571c5d85d204ef8 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py |
def sort_by_length(lst):
return sorted(lst, key=len)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
6064a370749e471e4cefe3193b1717f0000647d6 | af4f7d5522e362c207ddc5a06872923fdb38765a | /weixin/views.py | ea98ae4482d92af55be6ba149ba6387c3c05f91d | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | jixuduxing/django-ex | 288b823f7f0282675fc3f206ffd1d0a847f741fc | fab0a44045206ccb0038d372c70867d5dedbc0a8 | refs/heads/master | 2021-01-21T19:27:49.899007 | 2017-07-03T02:44:34 | 2017-07-03T02:44:34 | 92,139,106 | 0 | 0 | null | 2017-05-23T06:47:54 | 2017-05-23T06:47:54 | null | UTF-8 | Python | false | false | 5,438 | py | import os
from django.shortcuts import render
from django.conf import settings
from django.views.generic.base import View
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.template.loader import render_to_string
from django.utils.encoding import smart_str
import hashlib
import time
from .weimsg import WeiMsg
import xml.etree.ElementTree as ET
import traceback
import urllib2
import json
# TOKEN = "******"
API_KEY = 'bb0d3afc6f344cc1a5c177e73a9d3bea'
def paraseMsgXml(rootElem):
msg = {}
if rootElem.tag == 'xml':
for child in rootElem:
msg[child.tag] = smart_str(child.text)
return msg
def getReplyXml(msg,replyContent):
TextReply = "<xml><ToUserName><![CDATA[%s]]></ToUserName><FromUserName><![CDATA[%s]]></FromUserName><CreateTime>%s</CreateTime><MsgType><![CDATA[%s]]></MsgType><Content><![CDATA[%s]]></Content><FuncFlag>0</FuncFlag></xml>";
TextReply = TextReply % (msg['FromUserName'],msg['ToUserName'],str(int(time.time())),'text',replyContent)
return TextReply
def responseMsg(request):
rawStr = smart_str(request.raw_post_data)
msg = paraseMsgXml(ET.fromstring(rawStr))
if msg['Content'] == 'Hello2BizUser':
replyContent = 'thankyou!'
else:
replyContent = 'Hello'
return getReplyXml(msg, replyContent)
TOKEN = "jixuduxing"
def checkSignature(request):
global TOKEN
signature = request.GET.get("signature", None)
timestamp = request.GET.get("timestamp", None)
nonce = request.GET.get("nonce", None)
echoStr = request.GET.get("echostr",None)
token = TOKEN
tmpList = [token,timestamp,nonce]
tmpList.sort()
tmpstr = "%s%s%s" % tuple(tmpList)
tmpstr = hashlib.sha1(tmpstr).hexdigest()
if tmpstr == signature:
return echoStr
else:
return None
@csrf_exempt
def handleRequest(request):
if request.method == 'GET':
response = HttpResponse(checkSignature(request),content_type="text/plain")
return response
elif request.method == 'POST':
response = HttpResponse(responseMsg(request),content_type="application/xml")
return response
else:
return None
class Weixin(View):
token = 'jixuduxing'
def validate(self, request):
import logging
signature = request.GET.get('signature', '')
logging.debug('signature:'+signature)
timestamp = request.GET.get('timestamp', '')
logging.debug('timestamp:'+timestamp)
nonce = request.GET.get('nonce', '')
logging.debug('nonce:'+nonce)
list = [self.token, timestamp, nonce]
list.sort()
sha2 = hashlib.sha1()
map(sha2.update, list)
hashcode = sha2.hexdigest()
logging.debug('hashcode:' + hashcode)
if hashcode == signature:
return True
logging.debug('return False')
return False
@csrf_exempt
def get(self, request):
import logging
logging.debug('request:')
if self.validate(request):
logging.debug('echostr')
return HttpResponse(request.GET.get('echostr', '2'))
logging.debug('2')
return HttpResponse(request.GET.get('echostr', '2'))
@csrf_exempt
def post(self,request):
import logging
try:
logging.debug('post')
logging.debug(repr(request))
logging.debug(request.body)
recv_msg = WeiMsg(request.body)
raw_tulingURL = "http://www.tuling123.com/openapi/api?key=%s&%s&info=" % (API_KEY,recv_msg.from_user_name)
tulingURL = "%s%s" % (raw_tulingURL,urllib2.quote(recv_msg.content))
req=urllib2.Request(tulingURL)
raw_json=urllib2.urlopen(req).read()
hjson=json.loads(raw_json)
length=len(hjson.keys())
content=hjson['text'].encode('utf-8')
if length==3:
replyContent= "%s%s"%(content,hjson['url'].encode('utf-8'))
elif length==2:
replyContent= content
else:
return "please input again."
context = {
'toUser': recv_msg.from_user_name,
'fromUser': recv_msg.to_user_name,
'createTime': int(time.time()),
'type': recv_msg.msg_type,
'content': replyContent,
}
# context = {
# 'toUser': recv_msg.from_user_name,
# 'fromUser': recv_msg.to_user_name,
# 'createTime': int(time.time()),
# 'type': recv_msg.msg_type,
# 'content': recv_msg.content,
# }
logging.debug(str(context) )
rendered = render_to_string('reply_text.xml', context)
return HttpResponse(rendered)
except Exception as ex:
logging.debug('something wrong:')
logging.debug(ex)
# raise PermissionDenied
def hello(request):
# hostname = os.getenv('HOSTNAME', 'unknown')
if request.method == 'GET':
rsp = request.GET.get('echostr', 'error')
else:
# message = wechat.get_message()
return HttpResponse("Hello world ! ")
return HttpResponse(rsp)
return render(request, 'welcome/index.html', {
'hostname': hostname,
'database': database.info(),
'count': PageView.objects.count()
})
| [
"jixuduxing@gmail.com"
] | jixuduxing@gmail.com |
52d2a549bfa98b389a989407e8f89608d0b6d76c | 72e4271f6f6cbe98ae40807f0f22133de026e3fa | /LightBlog/comment/migrations/0001_initial.py | 5eeea8760b99f57c3cebd408eb87d7ce9f1293c8 | [] | no_license | QGtiger/LightBlog_backend | 7a2aa7e87ec83f44756152a53771cf8b36a93999 | 3be4c23a5cb5377e671031403a73e682df05a494 | refs/heads/master | 2020-08-03T01:27:01.430625 | 2020-05-06T03:58:17 | 2020-05-06T03:58:17 | 211,581,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | # Generated by Django 2.1.5 on 2019-04-23 12:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('article', '0006_auto_20190423_1211'),
]
operations = [
migrations.CreateModel(
name='Comment_reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reply_type', models.IntegerField(default=0)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('body', models.TextField()),
('reply_comment', models.IntegerField(default=0)),
('comment_reply', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment_reply', to='article.Comment')),
('comment_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='commentator_reply', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
},
),
]
| [
"qg6285@apac.arcsoft.corp"
] | qg6285@apac.arcsoft.corp |
cb7fb08c690282edfd833933070c697f756dcb10 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/ADD/ADDmonoPhoton_MD_1_d_8_TuneCUETP8M1_13TeV_pythia8_cfi.py | 302e89726365a986e9049cc298156fb6aa79d2a4 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,425 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring( ## see details on http://home.thep.lu.se/~torbjorn/php8135/ExtraDimensionalProcesses.php?filepath=files/
'ExtraDimensionsLED:ffbar2Ggamma = on',
'ExtraDimensionsLED:CutOffmode = 1',
'ExtraDimensionsLED:t = 0.5',
'ExtraDimensionsLED:n = 8',
'ExtraDimensionsLED:MD = 1000.',
'ExtraDimensionsLED:LambdaT = 1000.',
'5000039:m0 = 1200.',
'5000039:mWidth = 1000.',
'5000039:mMin = 1.',
'5000039:mMax = 13990.',
'PhaseSpace:pTHatMin = 130.'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',)
)
)
| [
"sheffield@physics.rutgers.edu"
] | sheffield@physics.rutgers.edu |
bc332131f0a6a057ee10e5475640eb28f9b9d4e2 | f846ffb38d1e4ec046afa9091363f1b560806a4b | /blog/home/migrations/0003_comment.py | 4e72789654e6e91e09b02177f1d73268b3b8b9ef | [
"MIT"
] | permissive | poemcoding/django_blog | 800990abf939b8f996bc94ee47e535b7a4e1d540 | 2a9746d075ff17cc3ffc62b233fd9f17997f6a9e | refs/heads/main | 2023-06-14T05:59:18.897694 | 2021-07-09T14:10:31 | 2021-07-09T14:10:31 | 376,488,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | # Generated by Django 2.2 on 2021-06-29 14:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0002_article'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('article', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.Article')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '评论管理',
'verbose_name_plural': '评论管理',
'db_table': 'tb_comment',
},
),
]
| [
"1270502322@qq.com"
] | 1270502322@qq.com |
85beef64fe4dbf5c85d8171398f3294ae72f2cd2 | d9a577a1e6b04413050f4259b0534cd91e412b86 | /src/blog/migrations/0004_auto_20190501_1639.py | 02ec0f400cb56e81d29e5903da26a67a866ddeed | [] | no_license | thinkcodeforlife/django-blog | fe344767fdd0ff5ddabc6274408b5099826152e1 | 0ea098157466e5d33763ee08a4b81e27af7d668a | refs/heads/master | 2020-05-24T08:20:40.982134 | 2019-05-17T10:38:23 | 2019-05-17T10:38:23 | 187,182,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # Generated by Django 2.2 on 2019-05-01 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_blogpost_slug'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='slug',
field=models.SlugField(unique=True),
),
]
| [
"thinkcodeforlife@users.noreply.github.com"
] | thinkcodeforlife@users.noreply.github.com |
9699d2d730841cae22d82c3afd27e8c48dcedfe4 | 241dab5ef171872d15485809b2616319ed7eec71 | /backend/scripts/data_scrape/eoddata_scrape_daily.py | 5f9c1f499227a581793821130aba27e5ca037a0f | [] | no_license | gitori90/Python_stocks_scrape_analysis | 2dca7d99e8d70e114c517262636b481b1a7abd41 | b9d471f107460626d576364e619358f48230da52 | refs/heads/master | 2022-11-20T10:04:54.607357 | 2020-07-23T10:14:12 | 2020-07-23T10:14:12 | 265,858,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,674 | py | import pandas as pd
import threading
import backend.scripts.data_scrape.path_finding_functions as path_functions
from backend.scripts.df_utils.data_frame_utilities import replace_all_nan
def load_sheet_to_dataframe(sheet_name, title):
try:
symbols_file_name = path_functions.get_symbols_file_path(title)
except:
exit("No symbols file found.")
sheet_dataframe = pd.read_excel(symbols_file_name, sheet_name=sheet_name)
return sheet_dataframe
def get_company_daily_data(filtered_web_page, symbol):
company_data_row = ['nan'] * 6
for i in range(len(filtered_web_page)):
if filtered_web_page[i].get_text() == symbol:
high = filtered_web_page[i + 2].get_text()
low = filtered_web_page[i + 3].get_text()
close = filtered_web_page[i + 4].get_text()
volume = filtered_web_page[i + 5].get_text()
value_change = filtered_web_page[i + 6].get_text()
percent_change = filtered_web_page[i + 8].get_text()
try:
percent_change = str(float(percent_change) * float(value_change)/abs(float(value_change)))
except:
pass
company_data_row = [high, low, close, volume, value_change, percent_change]
break
return company_data_row
def build_data_frame_from_rows(list_of_daily_data_rows):
columns = []
for row_element in range(len(list_of_daily_data_rows[0])):
new_column = []
for column_element in range(len(list_of_daily_data_rows)):
new_column.append(list_of_daily_data_rows[column_element][row_element])
columns.append(new_column)
new_page_data_dict = {}
headers = ["High", "Low", "Close", "Volume", "Value-Change", "Percent-Change"]
for i in range(len(headers)):
new_page_data_dict[headers[i]] = columns[i]
page_data_frame = pd.DataFrame(new_page_data_dict)
return page_data_frame
class ReadPartialDailyData(threading.Thread):
def __init__(self, sheet_name, target_url, closing_url, title):
threading.Thread.__init__(self)
self.sheet_name = sheet_name
self.closing_url = closing_url
self.target_url = target_url
self.title = title
self.loaded_partial_frame_list = []
def run(self):
partial_daily_data_frames_list = []
full_url = self.target_url + self.sheet_name + self.closing_url
page = path_functions.use_requests_get(full_url)
first_filter = page.find_all("table", class_="quotes")
second_filter = first_filter[0].find_all("td")
sheet_dataframe = load_sheet_to_dataframe(self.sheet_name, self.title)
daily_data_rows = []
for symbol in sheet_dataframe['Symbol']:
daily_company_data_row = get_company_daily_data(second_filter, symbol)
daily_data_rows.append(daily_company_data_row)
new_daily_letter_frame = build_data_frame_from_rows(daily_data_rows)
add_to_sheet_dataframe = sheet_dataframe.join(new_daily_letter_frame, how='outer')
remove_redundant_column_dataframe = add_to_sheet_dataframe.drop(columns=["Unnamed: 0"])
replaced_nan_dataframe = replace_all_nan(remove_redundant_column_dataframe)
partial_daily_data_frames_list.append(replaced_nan_dataframe)
self.loaded_partial_frame_list = partial_daily_data_frames_list
def read_daily_data(target_url, title, sheet_letter_list):
closing_url = ".htm"
page_read_classes = []
# threading:
for letter in sheet_letter_list:
page = ReadPartialDailyData(letter, target_url, closing_url, title)
page_read_classes.append(page)
for read_class in page_read_classes:
read_class.start()
for read_class in page_read_classes:
read_class.join()
daily_data_frames_list = []
for read_class in page_read_classes:
daily_data_frames_list.extend(read_class.loaded_partial_frame_list)
return daily_data_frames_list
def write_daily_data_to_excel(today_site_data_list, site_title, sheet_letter_list):
file_path = path_functions.set_daily_data_file_path(site_title)
writer = pd.ExcelWriter(file_path, engine='xlsxwriter')
for i in range(len(sheet_letter_list)):
sheet_dataframe = today_site_data_list[i]
sheet_dataframe.to_excel(writer, sheet_name=sheet_letter_list[i], index=False)
writer.save()
def create_daily_data(section_root_list_url, site_title, sheet_letter_list):
today_site_data_list = read_daily_data(section_root_list_url, site_title, sheet_letter_list)
write_daily_data_to_excel(today_site_data_list, site_title, sheet_letter_list)
| [
"orifreedman@gmail.com"
] | orifreedman@gmail.com |
fe5021e04b8f5a928fd508dbb912b1ca292dde6e | 041d13ee537f70b192e28ee2c5e64c0d1ed21e3f | /mutate.py | 11dafb10391d3b62b54f842b39edd6b82b563194 | [] | no_license | rafgro/vanilla_evo | 359694a2630331ef9f3cd159ef5458abed51201d | b7522bf882d08109a20d5ccb744d69601bcb381a | refs/heads/main | 2023-02-25T03:37:41.148399 | 2021-01-04T19:58:57 | 2021-01-04T19:58:57 | 316,687,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,312 | py | """
Mutation Module
Main Purpose
------------
Introduce primary source of variability within genomes
"""
from random import randint
from codon import Codon
def mutate(agenome, frequency_table):
""" Change sequence of codons in a random way
Parameters
----------
agenome: Genome class
Diploid genome to modify IN PLACE
frequency_table: dictionary
Should have frequencies in percentage of
singles - single substitutions anywhere
expansions - expansion at any side of the genome
deletions - single deletions anywhere
"""
# correcting frequency table
for k, v in frequency_table.items():
frequency_table[k] *= 1000
# calculations of local frequencies and actual events
no_of_substitutions = 0
no_of_expansions = 0
no_of_deletions = 0
for _ in range(len(agenome.sequence_A)):
dice_roll = randint(1, 100000)
if dice_roll < frequency_table['singles']:
no_of_substitutions += 1
if dice_roll < frequency_table['expansions']:
no_of_expansions += 1
if dice_roll < frequency_table['deletions']:
no_of_deletions += 1
# single substitutions
for _ in range(no_of_substitutions):
locus = randint(0, agenome.min_length())
if randint(0, 1) == 0: # diploid substitution
agenome.sequence_A[locus].mutate()
agenome.sequence_B[locus].mutate()
else: # haploid substituion
if randint(0, 1) == 0:
agenome.sequence_A[locus].mutate()
else:
agenome.sequence_B[locus].mutate()
# single deletions
for _ in range(no_of_deletions):
locus = randint(0, agenome.min_length())
if randint(0, 1) == 0: # diploid deletion
del agenome.sequence_A[locus]
del agenome.sequence_B[locus]
else: # haploid deletion
if randint(0, 1) == 0:
del agenome.sequence_A[locus]
else:
del agenome.sequence_B[locus]
# expansions
for _ in range(no_of_expansions):
expansion = Codon()
expansion_copy = Codon(init=expansion.val)
agenome.sequence_A.append(expansion)
agenome.sequence_B.append(expansion_copy)
# finish
| [
"rafal.grochala@op.pl"
] | rafal.grochala@op.pl |
5588811602468117dcf4c2c815b823cd9c66efd6 | 0bb474290e13814c2498c086780da5096453da05 | /abc151/C/main.py | de4737e84810f2b025becf5752de28655a3a7833 | [] | no_license | ddtkra/atcoder | 49b6205bf1bf6a50106b4ae94d2206a324f278e0 | eb57c144b5c2dbdd4abc432ecd8b1b3386244e30 | refs/heads/master | 2022-01-25T15:38:10.415959 | 2020-03-18T09:22:08 | 2020-03-18T09:22:08 | 208,825,724 | 1 | 0 | null | 2022-01-21T20:10:20 | 2019-09-16T14:51:01 | Python | UTF-8 | Python | false | false | 1,054 | py | #!/usr/bin/env python3
import sys
sys.setrecursionlimit(10000000)
INF = 1<<32
def solve(N: int, M: int, p: "List[int]", S: "List[str]"):
dp = [[0, 0] for i in range(N+1)]
for i in range(M):
if S[i] == 'AC':
dp[p[i]][0] = 1
else:
if dp[p[i]][0] == 0:
dp[p[i]][1] += 1
ac = len([dp[i][0] for i in range(1, N+1) if dp[i][0] > 0])
wa = sum([dp[i][1] for i in range(1, N+1) if dp[i][0] > 0])
# print(dp[:10])
# print([dp[i][0] for i in range(1, N+1)])
print(ac, wa)
return
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
M = int(next(tokens)) # type: int
p = [int()] * (M) # type: "List[int]"
S = [str()] * (M) # type: "List[str]"
for i in range(M):
p[i] = int(next(tokens))
S[i] = next(tokens)
solve(N, M, p, S)
if __name__ == '__main__':
main()
| [
"deritefully@gmail.com"
] | deritefully@gmail.com |
fdb50e78d8f3cd88a933c0267dc004b8cf101a43 | 424f489cb3bdbec53431bbcfbcb63f54ff030e15 | /apps/users/serializers.py | 919e4c3cad1951bfbbd0e7acebee7397bfe0a344 | [] | no_license | overproof9/curs-valutar | 9f93c8c3ba89dd95e54bd98fefe63c610cc089fa | db6019e6247ff54f703c30c7149b0de66e56d7eb | refs/heads/master | 2022-11-06T08:31:39.611894 | 2020-06-24T07:05:58 | 2020-06-24T07:05:58 | 274,331,809 | 0 | 0 | null | 2020-06-23T06:55:27 | 2020-06-23T06:55:26 | null | UTF-8 | Python | false | false | 297 | py | from django.contrib.auth.models import User
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
class Meta:
model = User
fields = ("first_name", "last_name", "username", "password",) | [
"ordersone@gmail.com"
] | ordersone@gmail.com |
db3d0d50a7710a487c431dd03dac4101012f8f99 | 3e8be7dd89413314afa0d70cf2933d467e94946d | /solution.py | ad68b77ec4735b3be2c54cdd28774a61e14151e0 | [] | no_license | shubhra-3/Area-of-burnt-forst-prdiction | e4b709c8824c4477f7beb29cb35eacc38c400d8e | 7b31b990f006d4d34b011985e13fe96222f1c7eb | refs/heads/master | 2020-04-22T16:45:35.329814 | 2019-02-13T14:24:48 | 2019-02-13T14:24:48 | 170,519,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,289 | py |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv("train.csv")
#colnames=['Id','X','Y','month','day','5','6','7','8','9','10','11','12','13']
testset = pd.read_csv('test.csv')#, names=colnames, header=None)
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
dataset['area'] = (1+dataset['area']) # for 0 problem of log
dataset['area'] = np.log(dataset['area'])
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, -1].values
test = testset.iloc[:, 1:-1].values
# Encoding categorical data
# Encoding the Independent Variable
labelencoder_X = LabelEncoder()
labelencoder_Xt = LabelEncoder()
#this sequence is important, since toarray() wont work if we have categorical features
# so we have weeks, then months in our final X, then rest other columns in sequence they apper in dataset
X[:, 2] = labelencoder_X.fit_transform(X[:, 2])
X[:, 3] = labelencoder_X.fit_transform(X[:, 3])
test[:, 2] = labelencoder_Xt.fit_transform(test[:, 2])
test[:, 3] = labelencoder_Xt.fit_transform(test[:, 3])
onehotencoder = OneHotEncoder(categorical_features = [2])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
onehotencoder = OneHotEncoder(categorical_features = [2])
test = onehotencoder.fit_transform(test).toarray()
test = test[:, 1:]
onehotencoder = OneHotEncoder(categorical_features = [12])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
onehotencoder = OneHotEncoder(categorical_features = [12])
test = onehotencoder.fit_transform(test).toarray()
test = test[:, 1:]
"""from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values='NaN',strategy='mean',axis=0)
imputer = imputer.fit(X[: ,1:3])
X[: ,1:3] =imputer.transform(X[: ,1:3])"""
"""from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X=LabelEncoder()
X[: ,0] = labelencoder_X.fit_transform(X[: ,0])
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
labelencoder_Y=LabelEncoder()
Y = labelencoder_Y.fit_transform(Y)"""
from sklearn.cross_validation import train_test_split
X_train ,X_test,Y_train,Y_test = train_test_split(X, Y, test_size=0, random_state=0)
"""from sklearn.preprocessing import StandardScaler
sc_X=StandardScaler()
X_train=sc_X.fit_transform(X_train)
X_test=sc_X.fit_transform(X_test)"""
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((450 ,1)).astype(int), values = X , axis = 1)
X_opt = X[:, [0 ,1 ,2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,16,17,18,19,20,21,22,23,24,25,26]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#26
X_opt = X[:, [0 ,1 ,2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,16,17,18,19,20,21,22,23,24,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#4
X_opt = X[:, [0 ,1 ,2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,16,17,18,19,20,21,22,23,24,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#1
X_opt = X[:, [0 ,2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,16,17,18,19,20,21,22,23,24,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#7
X_opt = X[:, [0 ,2, 3, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15,16,17,18,19,20,21,22,23,24,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#18
X_opt = X[:, [0 ,2, 3, 5, 6, 8, 9,10, 11, 12, 13, 14, 15,16,17 ,19,20,21,22,23,24,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#10
X_opt = X[:, [0 ,2, 3, 5, 6, 8, 9, 11, 12, 13, 14, 15,16,17 ,19,20,21,22,23,24,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#11
X_opt = X[:, [0 ,2, 3, 5, 6, 8, 9, 12, 13, 14, 15,16,17 ,19,20,21,22,23,24,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#24
X_opt = X[:, [0 ,2, 3, 5, 6, 8, 9, 12, 13, 14, 15,16,17 ,19,20,21,22,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#13
X_opt = X[:, [0 ,2, 3, 5, 6, 8, 9, 12, 14, 15,16,17 ,19,20,21,22,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#19
X_opt = X[:, [0 ,2, 3, 5, 6, 8, 9, 12, 14, 15,16,17,20,21,22,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#12
X_opt = X[:, [0 ,2, 3, 5, 6, 8, 9, 14, 15,16,17,20,21,22,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#6
X_opt = X[:, [0 ,2, 3, 5, 8, 9, 14, 15,16,17,20,21,22,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#22
X_opt = X[:, [0 ,2, 3, 5, 8, 9, 14, 15,16,17,20,21,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#2
X_opt = X[:, [0, 3, 5, 8, 9, 14, 15,16,17,20,21,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#3
X_opt = X[:, [0 , 5, 8, 9, 14, 15,16,17,20,21,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#0
X_opt = X[:, [5, 8, 9, 14, 15,16,17,20,21,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#5
X_opt = X[:, [ 8, 9, 14, 15,16,17,20,21,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#14
X_opt = X[:, [ 8, 9, 15,16,17,20,21,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#15
X_opt = X[:, [ 8, 9,16,17,20,21,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#9
X_opt = X[:, [ 8,16,17,20,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
#20
X_opt = X[:, [ 8,16,17,23,25]]
regressor_OLS = sm.OLS( endog = Y ,exog = X_opt).fit()
regressor_OLS.summary()
X_trainopt = X_train[:, [ 8,16,17,23,25]]
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_trainopt , Y_train)
X_testopt = X_test[:, [ 8,16,17,23,25]]
Y_pred = regressor.predict(X_testopt)
from sklearn.metrics import mean_squared_error
from math import sqrt
Y_pred[:, 0] = np.exp(Y_pred[:, 0])
rms = sqrt(mean_squared_error(Y_test, Y_pred))
test_opt = test[:, [ 8,16,17,23,25]]
pred = regressor.predict(test_opt)
#pred_anti = [(np.exp(x)) for x in [i for i in pred]]
solution = pd.read_csv('sampleSubmission.csv')
solution = pd.DataFrame({'area':pred, 'Id':testset['Id']})
solution['area']=np.exp(solution['area'])
solution['area']=1+solution['area']
solution.to_csv('sampleSubmission.csv',index = False, sep=',', header=True, columns=["Id","area"])
"""
from sklearn.tree import DecisionTreeClassifier
regressorDT = DecisionTreeClassifier(random_state = 0)
regressorDT.fit(X_train , Y_train)
Y_predDT = regressorDT.predict(X_test)
rmsDT = sqrt(mean_squared_error(Y_test, Y_predDT))
"""
from sklearn.ensemble import RandomForestClassifier
regressorRF = RandomForestClassifier(n_estimators = 100,random_state = 0)
regressorRF.fit(X_train , Y_train)
Y_predDT = regressorRF.predict(X_test)
rmsRF = sqrt(mean_squared_error(Y_test, Y_predRF)) | [
"noreply@github.com"
] | noreply@github.com |
e24d7c430d94b09680a7007fec6b40a7a58a2934 | 192dae6c34e098de419ddac360e2ff33ae7fd0eb | /rubbish/neighours/neighbour_api.py | 41ee4960fa4921a187ab31fe965b48ff1fcafdfd | [] | no_license | whelks-chance/wiserd3 | 33298d1344253e5cca26c984c265f4b15e475d72 | 1df72cc8e01f991dfb017d77df3e864418cd8ed9 | refs/heads/master | 2023-05-05T02:53:33.945769 | 2022-04-20T08:19:56 | 2022-04-20T08:19:56 | 38,719,496 | 3 | 1 | null | 2023-04-21T20:25:23 | 2015-07-07T23:04:36 | JavaScript | UTF-8 | Python | false | false | 10,629 | py | # coding=utf-8
import os
from copy import deepcopy
from openpyxl.reader.excel import load_workbook
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wiserd3.settings')
import django
django.setup()
from django.db import connections
from dataportal3 import models
import json
import requests
from django.contrib.gis.geos import GEOSGeometry
from openpyxl import Workbook
class Neighbours():
def __init__(self):
self.area_ids = set()
self.table_as_arrays = []
self.default_geojson = { "type": "FeatureCollection",
"features": [
]
}
self.default_feature = { "type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0] ]
]
},
"properties": {
"prop0": "value0",
"prop1": {"this": "that"}
}
}
def do_thing(self, lat='51.4791121', lng='-3.1779989'):
api_url = "https://api.neighbourhoodalert.co.uk/api/NHWNScheme/getLocalSchemes?lat={}&lng={}&numberPerPage=100&page=1".format(lat, lng)
page_url = "http://www.ourwatch.org.uk/postcode-search/?postcode=cf101aa"
res = requests.get(
api_url,
headers={
'Authorization': 'Bearer AP__ApOeCABbigwm2NsBRQJlJr9dKBzbwTQch3OtrQGPOiL97ccoeZYir07kG9IgTtFnZNFNOHAuuXiOzbhKPC1YagS1EOL5eDy9JkyTZd_0lOlDCgzZwmhkzm9xzFeoLkMKHzncF3SLC4DLHAvH5JxUcWTawHWE1mHuHRyeBx-sfalfWMlrMPxJMiplao5vAxUnviBucnqmsSMC-pgylvqs1c4jNNisGzjF4GH3mfMPf1NOXwY6dDExr_xVNREDxnlVq5fpgII4n6LreZ5Qf4WMGeq6VGerwZElDBuZZ4xO0bjVns0nPd9m0MJr3jTqcnxLPbtyZLgm0XRzh45JxliEsmvofKIv1uJvYLcMjj6GeQ-q6EP4nj-gOqJ2rdYCTKuV3fJ0FRW2XIkpfwWXgPjxceWRIj-VSlsilLzrXJg'
}
)
# print res.text
json_res = json.loads(res.text)
for a in json_res:
print a['scheme_name']
nhw_scheme_id = a['nhw_scheme_id']
num_members = a['scheme_number_of_members']
print nhw_scheme_id
if nhw_scheme_id not in self.area_ids:
self.area_ids.add(nhw_scheme_id)
wkt = a['area']
if 'MULTI' not in wkt:
if 'LINESTRING' in wkt:
pass
else:
wkt = 'LINESTRING (' + wkt + ')'
else:
wkt = wkt.replace('(', '')
wkt = wkt.replace(')', '')
wkt = wkt.replace('MULTI', 'LINESTRING(')
wkt += ')'
try:
geom = GEOSGeometry(wkt, srid=4326)
# print geom.json
convex_hull = geom.convex_hull
# print convex_hull
print convex_hull.area
# print convex_hull.centroid
print convex_hull.json
self.table_as_arrays.append([
nhw_scheme_id,
a['scheme_name'],
a['postcode'],
num_members,
a['number_of_households'],
convex_hull.centroid.json,
wkt,
convex_hull.json
])
print '\n\n'
except Exception as err:
print err, nhw_scheme_id
else:
print 'Repeated', nhw_scheme_id
print len(json_res)
print self.area_ids
def save(self, filename='neighbourhood_lists.xls', skip=[]):
wb = Workbook()
ws = wb.active
ws.cell(row=1, column=1).value = 'id'
ws.cell(row=1, column=2).value = 'name'
ws.cell(row=1, column=3).value = 'postcode'
ws.cell(row=1, column=4).value = 'scheme_number_of_members'
ws.cell(row=1, column=5).value = 'number_of_households'
ws.cell(row=1, column=6).value = 'centroid'
ws.cell(row=1, column=7).value = 'WKT'
ws.cell(row=1, column=8).value = 'JSON'
done_list = []
for itr, school in enumerate(self.table_as_arrays):
if school not in skip:
itr_offset = itr + 3
# print type(table_as_arrays[itr][0][0])
ws.cell(row=itr_offset, column=1).value = unicode(self.table_as_arrays[itr][0])
ws.cell(row=itr_offset, column=2).value = unicode(self.table_as_arrays[itr][1])
ws.cell(row=itr_offset, column=3).value = unicode(self.table_as_arrays[itr][2])
ws.cell(row=itr_offset, column=4).value = unicode(self.table_as_arrays[itr][3])
ws.cell(row=itr_offset, column=5).value = unicode(self.table_as_arrays[itr][4])
ws.cell(row=itr_offset, column=6).value = unicode(self.table_as_arrays[itr][5])
ws.cell(row=itr_offset, column=7).value = unicode(self.table_as_arrays[itr][6])
ws.cell(row=itr_offset, column=8).value = unicode(self.table_as_arrays[itr][7])
done_list.append(school)
else:
print 'Skipped {}'.format(school)
# print 'done_list', done_list
return wb.save(filename)
def build_grid(self):
# Get all the postcodes
districts = ['LL58', 'LL59', 'LL60', 'LL61', 'LL62', 'LL64', 'LL65', 'LL66', 'LL67', 'LL68', 'LL69', 'LL70', 'LL71', 'LL72', 'LL73', 'LL74', 'LL75', 'LL76', 'LL77', 'LL78',
'NP1', 'NP2', 'NP23', 'NP3',
'CF31', 'CF32', 'CF33', 'CF34', 'CF35', 'CF36',
'CF3', 'CF46', 'CF81', 'CF82', 'CF83', 'NP1', 'NP2',
'CF1', 'CF2', 'CF3', 'CF4', 'CF5', 'CF83',
'SA14', 'SA15', 'SA16', 'SA17', 'SA18', 'SA19', 'SA20', 'SA31', 'SA32', 'SA33', 'SA34', 'SA35', 'SA38', 'SA39', 'SA4', 'SA40', 'SA44', 'SA48', 'SA66',
'SA38', 'SA40', 'SA43', 'SA44', 'SA45', 'SA46', 'SA47', 'SA48', 'SY20', 'SY23', 'SY24', 'SY25',
'LL16', 'LL18', 'LL21', 'LL22', 'LL24', 'LL25', 'LL26', 'LL27', 'LL28', 'LL29', 'LL30', 'LL31', 'LL32', 'LL33', 'LL34', 'LL57',
'CH7', 'LL11', 'LL15', 'LL16', 'LL17', 'LL18', 'LL19', 'LL20', 'LL21', 'LL22',
'CH1', 'CH4', 'CH5', 'CH6', 'CH7', 'CH8', 'LL11', 'LL12', 'LL18', 'LL19',
'CF1', 'CF32', 'CF35', 'CF5', 'CF61', 'CF62', 'CF63', 'CF64', 'CF71',
'LL21', 'LL23', 'LL33', 'LL35', 'LL36', 'LL37', 'LL38', 'LL39', 'LL40', 'LL41', 'LL42', 'LL43', 'LL44', 'LL45', 'LL46', 'LL47', 'LL48', 'LL49', 'LL51', 'LL52', 'LL53', 'LL54', 'LL55', 'LL56', 'LL57', 'SY20',
'CF46', 'CF47', 'CF48',
'NP4', 'NP5', 'NP6', 'NP7',
'SA10', 'SA11', 'SA12', 'SA13', 'SA18', 'SA8', 'SA9',
'CF3', 'NP1', 'NP10', 'NP19', 'NP20', 'NP6', 'NP9',
'SA34', 'SA35', 'SA36', 'SA37', 'SA41', 'SA42', 'SA43', 'SA61', 'SA62', 'SA63', 'SA64', 'SA65', 'SA66', 'SA67', 'SA68', 'SA69', 'SA70', 'SA71', 'SA72', 'SA73',
'CF44', 'CF48', 'HR3', 'HR5', 'LD1', 'LD2', 'LD3', 'LD4', 'LD5', 'LD6', 'LD7', 'LD8', 'NP7', 'NP8', 'SA10', 'SA11', 'SA9', 'SY10', 'SY15', 'SY16', 'SY17', 'SY18', 'SY19', 'SY20', 'SY21', 'SY22', 'SY5',
'CF35', 'CF37', 'CF38', 'CF39', 'CF4', 'CF40', 'CF41', 'CF42', 'CF43', 'CF44', 'CF45', 'CF72',
'SA1', 'SA18', 'SA2', 'SA3', 'SA4', 'SA5', 'SA6', 'SA7',
'NP4', 'NP44', 'NP6',
'LL11', 'LL12', 'LL13', 'LL14', 'LL20', 'SY13', 'SY14']
print len(districts)
for pcode in districts:
print pcode
first_found = models.SpatialdataPostCode.objects.filter(label__startswith=pcode)[:1]
if len(first_found):
print first_found[0].label
try:
geom = GEOSGeometry(first_found[0].geom, srid=27700)
coords = json.loads(geom.centroid.json)['coordinates']
lat = coords[1]
lng = coords[0]
print lat, lng
print geom.area
print '\n'
self.do_thing(lat, lng)
self.save(filename='neighbourhood_lists3_{}.xls'.format(pcode))
except Exception as e2:
print e2, pcode, 'DB Error'
nw_x = -5.60852
nw_y = 53.48805
se_x = -2.00000
se_y = 51.13800
def read_xls_build_geojson(self):
wb2 = load_workbook('neighbourhood_lists3.xlsx')
print wb2.get_sheet_names()
ws = wb2[wb2.get_sheet_names()[0]]
colJSON = ws['H']
colA = ws['A']
colB = ws['B']
colC = ws['C']
colD = ws['D']
colE = ws['E']
print len(colJSON)
all_features = []
for itr in range(2, len(colJSON)):
print colJSON[itr].value
new_feature = deepcopy(self.default_feature)
new_feature['geometry'] = json.loads(colJSON[itr].value)
geom = GEOSGeometry(colJSON[itr].value)
new_feature["properties"] = {
"id": colA[itr].value,
"name": colB[itr].value,
"postcode": colC[itr].value,
"scheme_number_of_members": colD[itr].value,
"number_of_households": colE[itr].value,
"area": geom.area
}
new_feature["crs"] = {
'type': 'name',
'properties': {
'name': 'WGS84'
}
}
all_features.append(new_feature)
self.default_geojson['features'] = all_features
print self.default_geojson
with open('big.geojson', 'wr') as all_features_file:
all_features_file.write(json.dumps(self.default_geojson, indent=4))
if __name__ == "__main__":
nwapi = Neighbours()
# nwapi.build_grid()
# nwapi.save(filename='neighbourhood_lists3.xls')
# nwapi.do_thing()
nwapi.read_xls_build_geojson()
| [
"i.c.harvey@cs.cf.ac.uk"
] | i.c.harvey@cs.cf.ac.uk |
9eeb6493e7ffc4de7c553d77979a09da3caeaa1e | 8e1668e35a8df9968ab14d16db089b51dbe6dd51 | /python/algorithms/contests/four_divisors.py | 77b0e61ed7442f35a879a90753b56c9b384e7f7b | [] | no_license | Chalmiller/competitive_programming | f1ec0184d1ff247201522ab90ca8e66b3f326afc | b437080d1ba977c023baf08b7dc5c3946784e183 | refs/heads/master | 2021-03-24T05:11:59.383916 | 2020-08-24T22:07:41 | 2020-08-24T22:07:41 | 247,519,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | from typing import *
class Solution:
def sumFourDivisors(self, nums: List[int]) -> int:
divisors = 0
for i in nums:
num_divisor = []
for j in range(i+1):
if len(num_divisor) > 4:
break
if i%(j+1) == 0:
num_divisor.append(j+1)
if len(num_divisor) == 4:
sum_divisors = sum(num_divisor)
divisors += sum_divisors
return divisors
nums = [21,4,7]
obj = Solution()
obj.sumFourDivisors(nums)
| [
"chalmiller1@gmail.com"
] | chalmiller1@gmail.com |
4d620495621fd8734bc2f5085f0814fab0602439 | db6d37fcf5545acd3dd9910674c0f43c90410e0a | /iterminal/controllers.py | 88f3f762f83c05c23a9cf06bbd3546a14f2d520a | [] | no_license | capalmer1013/i | 629bb44b4640fc91be883ca2e47c6a3d81f51a0b | 4e0bc895ad232cad7dfefefec35a67346da6794b | refs/heads/master | 2023-02-23T02:35:44.270400 | 2022-04-27T03:04:21 | 2022-04-27T03:04:21 | 86,883,795 | 0 | 0 | null | 2023-02-16T00:32:58 | 2017-04-01T04:29:44 | Python | UTF-8 | Python | false | false | 519 | py | import curses
from iterminal.constants import UP, DOWN, LEFT, RIGHT
def inputController(stdscr, p):
while True:
key = stdscr.getch()
#stdscr.addstr(0, 0, str(key))
dirDict = {curses.KEY_UP: UP, curses.KEY_DOWN: DOWN, curses.KEY_LEFT: LEFT, curses.KEY_RIGHT: RIGHT}
shootDict = {ord('w'): UP, ord('a'): LEFT, ord('s'): DOWN, ord('d'): RIGHT}
if key in dirDict.keys():
p.move(dirDict[key])
elif key in shootDict.keys():
p.shoot(shootDict[key]) | [
"capalmer1013@gmail.com"
] | capalmer1013@gmail.com |
ed8a2c4e242ca9537a384879948138caac9539c7 | 870e0facc200437912f3ac6c79ee6c1b810135a1 | /mdbtoSav.py | 9f5f93c497839c1f6d01414a502565933f4bd040 | [] | no_license | jhovannycanas/mdb_to_spss | 5565893a2decf2614e3503f23b9668b95cda43d9 | 7d7bf1707d61fd185ad81b2e80c7cca887d042bf | refs/heads/master | 2021-06-15T04:03:23.692339 | 2017-03-21T15:25:29 | 2017-03-21T15:25:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | __author__ = 'jhovanny'
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
#sys.setdefaultencoding("utf-8")
#reload(sys)
import savReaderWriter
import locale
import os
import collections
from obdc import *
varLabels = {'var1': 'This is variable 1',
'v2': 'This is v2!',
'bdate': 'dob'}
file="Agropecuario.sav"
#file="Hogares.sav"
preguntas, dicpreguntas, vartypes, varlabels, medicion, valuelabels= metadata("Agropecuario-ccc.mdb")
with savReaderWriter.SavWriter(file,preguntas,vartypes,valuelabels,varlabels,formats=None,missingValues=None,measureLevels = medicion, ioLocale='Spanish_Spain.1252') as sav:
pass
| [
"jhovannycanas@gmail.com"
] | jhovannycanas@gmail.com |
854a857b9eedc99be8a2332e23c37f43e09f4bc4 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /FjiriGn8gc5RE8Xm2_7.py | efeef575fedcd049a250bbc0cfb0345e324e582a | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | """
Write a function that takes `fuel` (liters), `fuel_usage` (liters/100km),
`passengers`, `air_con` (boolean) and returns maximum distance that car can
travel.
* `fuel` is the number of liters of fuel in the fuel tank.
* `fuel_usage` is basic fuel consumption per 100 km (with the driver inside only).
* Every additional passenger is increasing basic fuel consumption by 5%.
* If the air conditioner is ON `True`, its increasing total (not basic) fuel consumption by 10%.
### Examples
total_distance(70.0, 7.0, 0, False) ➞ 1000.0
total_distance(36.1, 8.6, 3, True) ➞ 331.8
total_distance(55.5, 5.5, 5, false) ➞ 807.3
### Notes
* `fuel` and `fuel_usage` are always greater than 1.
* `passengers` are always greater or equal to 0.
* Round your answer to the nearest tenth.
"""
def total_distance(fuel, fuel_usage, passengers, air_con):
air = 0
if air_con:
air = 1
return round((1000*fuel)/(fuel_usage*((0.05*passengers+1)*(air+10))),1)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
f62ec39eec2222c4bc33d3bddf25ea8511ed90fe | e63771573603a6eba9178e15770c427849bb4644 | /django/arzaq/settings.py | 9b2f8bff32e2ed8e61a18f74d06cdc1cfba5db31 | [] | no_license | sohepalslamat/WebApplaction-Arzaq | 44b581725802d0ab9c2ba42b5757ae4cee6a0ea5 | 15504ef3a4f4b5815099b8cd1f9e4489e2a859ec | refs/heads/master | 2023-02-02T11:59:40.123142 | 2019-12-05T20:43:24 | 2019-12-05T20:43:24 | 214,710,377 | 0 | 0 | null | 2019-10-12T20:37:57 | 2019-10-12T20:23:15 | JavaScript | UTF-8 | Python | false | false | 3,641 | py | """
Django settings for arzaq project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3!0s&at=ojwcq0(lh5ts5o!-01ch=9(zf@axps=9jhkdn2#amr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'main.apps.MainConfig',
'units.apps.UnitsConfig',
'items.apps.ItemsConfig',
'users.apps.UsersConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders', #CORS
'rest_framework',
'rest_framework.authtoken' #acces token
]
########## acces token && permissions
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated', )
}
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', #CORS
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True #CORS
ROOT_URLCONF = 'arzaq.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'arzaq.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"sohep.alslamat91@gmail.com"
] | sohep.alslamat91@gmail.com |
44dd80d350dc467c7a172ab66eb9a3ecfda16c3f | af83851eabb687a9549dedfe131aa620de9b60d2 | /Leetcode/equal-Domino-rows.py | 4fa5ad984b2a556be8c10c02171d6c6304d471a1 | [] | no_license | LalitGsk/Programming-Exercises | e6a6607d72583f6668839e2325d96fb976e9e44f | f575e662f28ea13984304f2f0639dc1cb392e679 | refs/heads/master | 2020-07-26T18:44:54.516607 | 2020-07-09T04:44:40 | 2020-07-09T04:44:40 | 208,736,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 6 19:11:26 2019
@author: lalit
Minimum Domino Rotations For Equal Row
"""
class Solution:
def minDominoRotations(self, A: list[int], B: list[int]) -> int:
def check(x):
rotations_a = 0
rotations_b = 0
for i in range(len(A)):
if A[i] != x and B[i] != x:
return -1
elif A[i] != x:
rotations_a += 1
elif B[i] != x:
rotations_b += 1
return(min(rotations_a, rotations_b))
rotations = check(A[0])
if rotations != -1 or A[0] == B[0]:
return rotations
else:
return check(B[0]) | [
"noreply@github.com"
] | noreply@github.com |
f17025743fc841a91077662b31a3cb066d361be2 | a5e5d39f42f468d35f18aab3e78c3c090046b0df | /apps/contacts/forms.py | 72c512374bed6f2e74a37ac9c50a2a1151e9ee6e | [] | no_license | WayneLambert/portfolio | 66198dfc18b3f254e6bc726575903c3e8f570dc4 | 7e02165386e4784f81e15bae0325a77cf45f410d | refs/heads/main | 2023-02-04T18:08:13.559223 | 2023-01-29T14:13:59 | 2023-01-29T14:13:59 | 180,239,669 | 5 | 1 | null | 2023-02-04T07:07:10 | 2019-04-08T22:02:22 | JavaScript | UTF-8 | Python | false | false | 568 | py | from django import forms
from captcha.fields import ReCaptchaField
from captcha.widgets import ReCaptchaV3
from apps.contacts.models import Contact
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('first_name', 'last_name', 'email', 'message')
captcha = ReCaptchaField(
widget=ReCaptchaV3(
attrs={
'data-theme': 'light',
'data-size': 'invisible',
}
)
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| [
"wayne.a.lambert@gmail.com"
] | wayne.a.lambert@gmail.com |
537b1e6af4b96fd09dba3bd4344c38fb66b9ca65 | d4e9a392d7465a5c10417364dd91cd5dd3c5d935 | /app/preprocess.py | d0fbfc80e4a532a5803e9d7632c2c1743c42d9e6 | [] | no_license | MaayanLab/harmonizome-ml | 045f866bac4683a23dd8a393e48f9f09bb08c35d | 5cebd194d771b1d7eabeb65a1c81ce0c78bf7a80 | refs/heads/master | 2020-03-21T13:26:26.132737 | 2020-03-05T22:46:38 | 2020-03-05T22:46:38 | 138,605,770 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | #!/usr/bin/env python
import os
import nbformat
from flask import render_template
from . import app
from .model import build_fields
from .runtime import ipynb_import_from_file
from .template.nbtemplate_parse import parse_fields
from .util import app_dir, globalContext
@app.template_filter('filter')
def reverse_filter(arr, attr, val):
def maybe_eval(v):
if callable(v):
return v()
return v
return [v
for v in arr
if maybe_eval(getattr(v, attr)) == val]
def main():
with app.test_request_context('/'):
for _, _, files in os.walk(app_dir + '/templates/ipynb/'):
for file in files:
file, ext = os.path.splitext(file)
if ext != '.ipynb':
continue
print('Building %s...' % (file))
nb = ipynb_import_from_file(
app_dir + '/templates/ipynb/%s.ipynb' % (file)
)
context = dict(
filename=file,
**globalContext,
**build_fields(),
)
fields = [field
for cell in nb.cells
for field in parse_fields(
cell['source'],
context,
)]
form_out = open(app_dir + '/templates/%s.html' % (file), 'w')
try:
if os.path.isfile(app_dir + '/templates/ipynb/%s.html' % (file)):
# Custom template
print(
render_template('ipynb/%s.html' % (file),
**context,
fields=fields,
),
file=form_out,
)
else:
# General template
print(
render_template('layout/ipynb.j2',
**context,
fields=fields,
),
file=form_out,
)
except Exception as e:
print(e)
finally:
form_out.close()
break
| [
"u8sand@gmail.com"
] | u8sand@gmail.com |
1fb7519779c1e12faf9d7ed6d7b1edb5f68fa6d2 | d7e9cdf4e92388b897066010c1b8d12d22ada267 | /python/code/sensors/BMP280_read.py | 26250e9d74b2c40170f237c4acc23743b1897c0f | [] | no_license | adavidson32/EOTG | 700c146944c98ea88f4288bed97e7ab7f738996a | 8ad02cfb0e71e972b7a8789ffaba2a60ab2d29ee | refs/heads/master | 2020-05-26T13:55:54.343848 | 2017-04-25T18:43:40 | 2017-04-25T18:43:40 | 85,002,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,560 | py | import logging
import Adafruit_GPIO.I2C as I2C
i2c = I2C()
# BMP280 default address.
BMP280_I2CADDR = 0x76
BMP280_CHIPID = 0xD0
# BMP280 Registers
BMP280_DIG_T1 = 0x88 # R Unsigned Calibration data (16 bits)
BMP280_DIG_T2 = 0x8A # R Signed Calibration data (16 bits)
BMP280_DIG_T3 = 0x8C # R Signed Calibration data (16 bits)
BMP280_DIG_P1 = 0x8E # R Unsigned Calibration data (16 bits)
BMP280_DIG_P2 = 0x90 # R Signed Calibration data (16 bits)
BMP280_DIG_P3 = 0x92 # R Signed Calibration data (16 bits)
BMP280_DIG_P4 = 0x94 # R Signed Calibration data (16 bits)
BMP280_DIG_P5 = 0x96 # R Signed Calibration data (16 bits)
BMP280_DIG_P6 = 0x98 # R Signed Calibration data (16 bits)
BMP280_DIG_P7 = 0x9A # R Signed Calibration data (16 bits)
BMP280_DIG_P8 = 0x9C # R Signed Calibration data (16 bits)
BMP280_DIG_P9 = 0x9E # R Signed Calibration data (16 bits)
BMP280_CONTROL = 0xF4
BMP280_RESET = 0xE0
BMP280_CONFIG = 0xF5
BMP280_PRESSUREDATA = 0xF7
BMP280_TEMPDATA = 0xFA
class BMP280(object):
def __init__(self, address=BMP280_I2CADDR, i2c=None, **kwargs):
self._logger = logging.getLogger('Adafruit_BMP.BMP280')
self._device = i2c.get_i2c_device(address, **kwargs)
chip_ip = self._device.readU8(BMP280_CHIPID)
print("Chip ID = ", str(chip_id))
#if chip_id != 0x58:
# raise Exception('Unsupported chip')
# Load calibration values.
self._load_calibration()
self._device.write8(BMP280_CONTROL, 0x3F)
def _load_calibration(self):
self.cal_t1 = int(self._device.readU16(BMP280_DIG_T1)) # UINT16
self.cal_t2 = int(self._device.readS16(BMP280_DIG_T2)) # INT16
self.cal_t3 = int(self._device.readS16(BMP280_DIG_T3)) # INT16
self.cal_p1 = int(self._device.readU16(BMP280_DIG_P1)) # UINT16
self.cal_p2 = int(self._device.readS16(BMP280_DIG_P2)) # INT16
self.cal_p3 = int(self._device.readS16(BMP280_DIG_P3)) # INT16
self.cal_p4 = int(self._device.readS16(BMP280_DIG_P4)) # INT16
self.cal_p5 = int(self._device.readS16(BMP280_DIG_P5)) # INT16
self.cal_p6 = int(self._device.readS16(BMP280_DIG_P6)) # INT16
self.cal_p7 = int(self._device.readS16(BMP280_DIG_P7)) # INT16
self.cal_p8 = int(self._device.readS16(BMP280_DIG_P8)) # INT16
self.cal_p9 = int(self._device.readS16(BMP280_DIG_P9)) # INT16
self._logger.debug('T1 = {0:6d}'.format(self.cal_t1))
self._logger.debug('T2 = {0:6d}'.format(self.cal_t2))
self._logger.debug('T3 = {0:6d}'.format(self.cal_t3))
self._logger.debug('P1 = {0:6d}'.format(self.cal_p1))
self._logger.debug('P2 = {0:6d}'.format(self.cal_p2))
self._logger.debug('P3 = {0:6d}'.format(self.cal_p3))
self._logger.debug('P4 = {0:6d}'.format(self.cal_p4))
self._logger.debug('P5 = {0:6d}'.format(self.cal_p5))
self._logger.debug('P6 = {0:6d}'.format(self.cal_p6))
self._logger.debug('P7 = {0:6d}'.format(self.cal_p7))
self._logger.debug('P8 = {0:6d}'.format(self.cal_p8))
self._logger.debug('P9 = {0:6d}'.format(self.cal_p9))
def _load_datasheet_calibration(self):
# Set calibration from values in the datasheet example. Useful for debugging the
# temp and pressure calculation accuracy.
self.cal_t1 = 27504
self.cal_t2 = 26435
self.cal_t3 = -1000
self.cal_p1 = 36477
self.cal_p2 = -10685
self.cal_p3 = 3024
self.cal_p4 = 2855
self.cal_p5 = 140
self.cal_p6 = -7
self.cal_p7 = 15500
self.cal_p8 = -14500
self.cal_p9 = 6000
def read_raw(self, register):
"""Reads the raw (uncompensated) temperature or pressure from the sensor."""
raw = self._device.readU16BE(register)
raw <<= 8
raw = raw | self._device.readU8(register + 2)
raw >>= 4
self._logger.debug('Raw value 0x{0:X} ({1})'.format(raw & 0xFFFF, raw))
return raw
def _compensate_temp(self, raw_temp):
""" Compensate temperature """
t1 = (((raw_temp >> 3) - (self.cal_t1 << 1)) *
(self.cal_t2)) >> 11
t2 = (((((raw_temp >> 4) - (self.cal_t1)) *
((raw_temp >> 4) - (self.cal_t1))) >> 12) *
(self.cal_t3)) >> 14
return t1 + t2
def read_temperature(self):
"""Gets the compensated temperature in degrees celsius."""
raw_temp = self.read_raw(BMP280_TEMPDATA)
compensated_temp = self._compensate_temp(raw_temp)
temp = float(((compensated_temp * 5 + 128) >> 8)) / 100
self._logger.debug('Calibrated temperature {0}'.format(temp))
return temp
def read_pressure(self):
"""Gets the compensated pressure in Pascals."""
raw_temp = self.read_raw(BMP280_PRESSUREDATA)
compensated_temp = self._compensate_temp(raw_temp)
p1 = compensated_temp - 128000
p2 = p1 * p1 * self.cal_p6
p2 += (p1 * self.cal_p6) << 17
p2 += self.cal_p4 << 35
p1 = ((p1 * p1 * self.cal_p3) >> 8) + ((p1 * self.cal_p2) << 12)
p1 = ((1 << 47) + p1) * (self.cal_p1) >> 33
if 0 == p1:
return 0
p = 1048576 - raw_temp
p = (((p << 31) - p2) * 3125) / p1
p1 = (self.cal_p9 * (p >> 13) * (p >> 13)) >> 25
p2 = (self.cal_p8 * p) >> 19
p = ((p + p1 + p2) >> 8) + ((self.cal_p7) << 4)
return float(p / 256)
def read_altitude(self, sealevel_pa=101325.0):
"""Calculates the altitude in meters."""
# Calculation taken straight from section 3.6 of the datasheet.
pressure = float(self.read_pressure())
altitude = 44330.0 * (1.0 - pow(pressure / sealevel_pa, (1.0 / 5.255)))
self._logger.debug('Altitude {0} m'.format(altitude))
return altitude
def read_sealevel_pressure(self, altitude_m=0.0):
"""Calculates the pressure at sealevel when given a known altitude in
meters. Returns a value in Pascals."""
pressure = float(self.read_pressure())
p0 = pressure / pow(1.0 - altitude_m / 44330.0, 5.255)
self._logger.debug('Sealevel pressure {0} Pa'.format(p0))
return p0
sensor = BMP280()
print('Temp = {0:0.2f} *C'.format(sensor.read_temperature()))
print('Pressure = {0:0.2f} Pa'.format(sensor.read_pressure()))
print('Altitude = {0:0.2f} m'.format(sensor.read_altitude()))
print('Sealevel Pressure = {0:0.2f} Pa'.format(sensor.read_sealevel_pressure()))
| [
"noreply@github.com"
] | noreply@github.com |
fa9ba509291b1804040cd413eb7052b6604d9ee3 | b62c0f6beef30a82170082e5538946e70d1d1c32 | /apps/Image/models.py | 543ef73759f5df826005f35c7bcdcb8e648d51fd | [] | no_license | IOSZYQ/LushuTools | a78153a8ea5321128614d453b6fe305795a650d9 | f05adc60917b47973a3f55b2f52b3b786438dfd0 | refs/heads/master | 2020-04-13T10:54:04.336775 | 2019-01-25T11:27:45 | 2019-01-25T11:27:45 | 163,156,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from django.db import models
# Create your models here.
class Image(models.Model):
imageName = models.CharField(verbose_name='图片文件名', max_length=64, unique=True)
class Meta:
verbose_name = "图片"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.imageName)
| [
"hualing_zyq@126.com"
] | hualing_zyq@126.com |
1112cd1cd75612f8e8bcc2c5a3a0903f36c25d3b | 06013cedb6e028397ef8be1798a2d15148ad350d | /lab_3/var_5/flask_server.py | 259936dcdf07ee7d76875c84a1ec7f22814aaff9 | [] | no_license | ShJL/IrfmLabs | 495f7d652380a8235a2291093c320196d1f30a6f | 36fc6b97fb9213ecdeb8851ef48b021d1ca98690 | refs/heads/master | 2020-04-22T23:19:13.414526 | 2019-04-08T20:45:03 | 2019-04-08T20:45:03 | 170,737,753 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,472 | py | import flask
import sqlite3
import db.sql_consts as sc
_OK = 200
_BAD_REQUEST = 400
_NOT_FOUND = 404
app = flask.Flask(__name__)
def _get_db():
db = getattr(flask.g, "_db_connection", None)
if db is None:
db = flask.g._db_connection = sqlite3.connect(sc.DATABASE)
db.row_factory = lambda c, r: {x[0]: r[i] for i, x in enumerate(c.description)}
return db
def _query_db(query, args=(), select=True):
db = _get_db().execute(query, args)
if select:
return db.fetchall()
_get_db().commit()
def _exists(obj_id):
return len(_query_db(f"select 1 from {sc.SQL_TABLE} where Id = ?", [obj_id])) == 1
def _response(method, oid, status):
return flask.jsonify({"method": method, "Id": oid, "status": status})
def _extend_params(args):
params = args.to_dict()
for field in sc.SQL_COLUMNS:
if field not in params:
params[field] = None
return params
def _placeholders(fields):
return tuple((f":{field}" for field in fields))
_SURVEY_PREFIX = "/api/survey"
@app.route("/api/")
def api_index():
return flask.render_template("index.html", api_prefix=_SURVEY_PREFIX)
@app.route(_SURVEY_PREFIX + "/", methods=["GET"])
def survey_index():
list_objs = _query_db(f"select * from {sc.SQL_TABLE}")
return flask.jsonify({"total": len(list_objs), "rows": list_objs})
@app.route(_SURVEY_PREFIX + "/<int:obj_id>", methods=["GET"])
def survey_show(obj_id):
if not _exists(obj_id):
flask.abort(_NOT_FOUND)
return flask.jsonify(
_query_db(f"select * from {sc.SQL_TABLE} where Id = ?", [obj_id])[0]
)
@app.route(_SURVEY_PREFIX, methods=["POST"])
@app.route(_SURVEY_PREFIX + "/<int:obj_id>", methods=["POST"])
def survey_create(obj_id=None):
args = _extend_params(flask.request.args)
if obj_id is not None:
args["Id"] = obj_id
_query_db(
f"""insert into {sc.SQL_TABLE}
({','.join(args.keys())})
values
({','.join(_placeholders(args.keys()))})""",
args,
False
)
if obj_id is None:
obj_id = _query_db(
f"select Id from {sc.SQL_TABLE} where rowid = last_insert_rowid()"
)[0]["Id"]
return _response("create", obj_id, _OK)
@app.route(_SURVEY_PREFIX + "/<int:obj_id>", methods=["PUT"])
def survey_update(obj_id):
if not _exists(obj_id):
flask.abort(_NOT_FOUND)
args = flask.request.args.to_dict()
args["Id"] = obj_id
_query_db(
f"""update {sc.SQL_TABLE}
set {','.join(map('='.join, zip(args.keys(), _placeholders(args.keys()))))}
where Id = :Id""",
args,
False
)
return _response("update", obj_id, _OK)
@app.route(_SURVEY_PREFIX + "/<int:obj_id>", methods=["DELETE"])
def survey_destroy(obj_id):
if not _exists(obj_id):
flask.abort(_NOT_FOUND)
_query_db(f"delete from {sc.SQL_TABLE} where Id = ?", [obj_id], False)
return _response("delete", obj_id, _OK)
@app.route(_SURVEY_PREFIX + "/number/<string:gender>", methods=["GET"])
def survey_number_of_people(gender):
if gender not in ("male", "female"):
flask.abort(_BAD_REQUEST)
return flask.jsonify(
_query_db(f"select count(*) as count from {sc.SQL_TABLE} where Gender = ?", [gender])[0]
)
@app.teardown_appcontext
def close_connection(exception):
db = flask.g.pop("_db_connection", None)
if db is not None:
db.commit()
db.close()
| [
"LDDD5@yandex.ru"
] | LDDD5@yandex.ru |
40c7a96a66c6ce84439222e54679cc51149bc0ba | a86293a2033c06410aa8ed19bcbce8ca55ea3c55 | /src/client_libraries/python/dynamics/customerinsights/api/models/cds_org_info.py | e414e4e2f31a4ed4afa9f160f9258d839d0aa435 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ramotheonly/Dynamics365-CustomerInsights-Client-Libraries | a3ca28aa78d2b5509e65d9895ff4a0d42d05f611 | e00632f7972717b03e0fb1a9e2667e8f9444a0fe | refs/heads/main | 2023-08-02T08:09:04.063030 | 2021-09-28T22:42:15 | 2021-09-28T22:42:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CdsOrgInfo(Model):
"""The information for CDS Organization in BAP.
:param friendly_name: Gets the Cds Organization Friendly Name
:type friendly_name: str
:param url: Gets the Cds Organization Url
:type url: str
:param state: Gets the Cds Organization State
:type state: str
:param location: Gets region location of Cds Organization
:type location: str
:param environment_sku: Gets SKU of Cds Organization
:type environment_sku: str
:param expiration_time: Gets the expiration time of CDS Organization if
the SKU is Trial
:type expiration_time: datetime
:param max_allowed_expiration_time: Gets the max allowed expiration time
of CDS Organization if the SKU is Trial
:type max_allowed_expiration_time: datetime
"""
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'environment_sku': {'key': 'environmentSku', 'type': 'str'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'max_allowed_expiration_time': {'key': 'maxAllowedExpirationTime', 'type': 'iso-8601'},
}
def __init__(self, **kwargs):
super(CdsOrgInfo, self).__init__(**kwargs)
self.friendly_name = kwargs.get('friendly_name', None)
self.url = kwargs.get('url', None)
self.state = kwargs.get('state', None)
self.location = kwargs.get('location', None)
self.environment_sku = kwargs.get('environment_sku', None)
self.expiration_time = kwargs.get('expiration_time', None)
self.max_allowed_expiration_time = kwargs.get('max_allowed_expiration_time', None)
| [
"michaelajohnston@mac.com"
] | michaelajohnston@mac.com |
a99dbdf037c0559627072edbf0cd2f7e24983bb2 | 01f77b70dfb8817a913414fd25d9ed44ba3cd1f4 | /oscar_invoices/urls.py | 1bc931c736f24795068621e2e1d47790be762a5e | [] | no_license | luiz158/django-oscar-invoices | ca2cf8b70347000399c5316532aca7e52d0f77a3 | 9cc3425410641a95832bda93155e4d2bfa95ac7e | refs/heads/master | 2023-07-02T22:21:03.318698 | 2020-10-06T16:01:02 | 2020-10-06T16:01:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from django.urls import re_path
from . import views
app_name = "oscar_invoices"
urlpatterns = [
re_path(r"invoice/(?P<pk>\d+)/", views.InvoicePreviewView.as_view(), name="invoice"),
]
| [
"sasha@sasha0.ru"
] | sasha@sasha0.ru |
200e9ea914f49811117ea1217accd0bb9a5faa8e | 0845c7a5bcd8426dbaa20d146b75b5bc00c41f3a | /simulation/utils/road/sections/test/zebra_crossing_test.py | 163161ec81fc8648de27cfe904baa755dd9ca084 | [
"MIT"
] | permissive | GeSteine/kitcar-gazebo-simulation | b96b407e34b61b9084846e5fea68f14a9eec78fd | adfdadd83cbdb6d577c665076f831704bde95f1d | refs/heads/master | 2022-11-17T06:04:09.237815 | 2020-06-24T18:32:42 | 2020-06-24T18:32:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | import unittest
from simulation.utils.geometry import Point, Polygon, Transform
from simulation.utils.road.sections.zebra_crossing import ZebraCrossing
import simulation.utils.road.sections.type as road_section_type
from simulation.utils.road.config import Config
class ModuleTest(unittest.TestCase):
def test_zebra_crossing(self):
TF = Transform([1, 1], 0)
LENGTH = 2
zc = ZebraCrossing(length=LENGTH, transform=TF)
self.assertEqual(zc.__class__.TYPE, road_section_type.ZEBRA_CROSSING)
self.assertEqual(
zc.frame,
TF
* Polygon(
[
Point(0, -Config.road_width),
Point(0, Config.road_width),
Point(LENGTH, Config.road_width),
Point(LENGTH, -Config.road_width),
]
),
)
if __name__ == "__main__":
unittest.main()
| [
"ufzev@student.kit.edu"
] | ufzev@student.kit.edu |
d7b9f75239b7b347072009145160460e4ebf3de9 | ac9bff6f5ae0d52acf3541f0002b69dd5ad8855a | /dataEncode.py | a2146304f52d41472f30d7870eba5c2621658f4d | [] | no_license | Dofolk/BiRen | 610f8daa978a97adacea165fc701532d5f127ed8 | 5fc76c3cdb435bbcde5537e51980c53e105e32b4 | refs/heads/master | 2023-07-08T15:01:31.835696 | 2019-07-05T00:57:54 | 2019-07-05T00:57:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,587 | py | import numpy as np
import os
import sys
import commands
fileName = sys.argv[1]
genome = sys.argv[2]
evolutionaryConservation = sys.argv[3]
# Check argv length
if len(sys.argv) != 4:
print "Miss required parameter"
print "Usage :"
print " python example genome.fa EvolutionaryConservation.bw"
os._exit(0)
else:
pass
# Check files exists
if os.path.exists("./"+fileName+".bed") and os.path.exists("./"+genome) and os.path.exists("./"+evolutionaryConservation):
pass
else:
print "Usage :"
print " python example genome.fa EvolutionaryConservation.bw"
os._exit(0)
# Check example.bed legality
# Each region should be larger than 200bp but not beyond 8000bp
checkObj = open("./"+fileName+".bed")
for line in checkObj:
line = line.strip("\r\n")
temps = line.split("\t")
start = int(temps[1])
end = int(temps[2])
length = end - start
if length < 200 and length <= 8000:
print "Line : "
print " "+line
print "Each region should be larger than 200bp but not beyond 8000bp."
os._exit(0)
checkObj.close()
# Build folder
outdir = "./"+fileName
if os.path.exists(outdir):
pass
else:
os.mkdir(outdir)
# Build 1k format bed
os.system("python transformTo1Kformat.py "+fileName)
print "Build 1k format bed Done."
# getfasta
os.system("bedtools getfasta -fi ./"+genome+" -bed "+fileName+"_1k.bed -fo "+fileName+"_1k.fa")
print fileName+" getfasta Done."
# One-hot transform
numSample = commands.getstatusoutput("wc -l "+fileName+"_1k.bed")[1].split(" ")[0]
os.system("python OneHotTransform.py "+fileName+"_1k "+numSample)
print "One-hot transform Done."
# DeepSEA Encode
os.system("luajit DeepSEA.lua --dataset "+fileName+"_1k")
print "DeepSEA encode Done."
# Add EvolutionaryConservation Score
os.system("python ExtractCoreRegion.py "+fileName)
os.system("bigWigAverageOverBed "+evolutionaryConservation+" "+fileName+"_200.bed "+fileName+".tab -bedOut="+fileName+".cons.bed")
numSample = commands.getstatusoutput("wc -l "+fileName+".cons.bed")[1].split(" ")[0]
os.system("python SortConsBed.py "+fileName+" "+numSample)
os.system("python AddConsToDeepSeaRes.py "+fileName)
os.system("python RNNDataFormat.py "+fileName)
os.system("rm "+fileName+".cons")
os.system("rm "+fileName+".cons.bed")
os.system("rm "+fileName+".tab")
os.system("rm "+fileName+"_1k.bed")
os.system("rm "+fileName+"_1k.fa")
os.system("rm "+fileName+"_1k.npy")
os.system("rm "+fileName+"_1k_deepsea.npy")
os.system("rm "+fileName+"_200.bed")
os.system("rm "+fileName+"_deepsea_cons.npy")
print "Add EvolutionaryConservation Score and RNN data format Done."
print "..."
print "Data Encode Done." | [
"noreply@github.com"
] | noreply@github.com |
759453a9942cf164858e0646369370b634ed8630 | 751300a23242cfe393f86ff489339ffc81319efc | /speaker_spotting/speaker_spotting_oracle_cluster2-dev.py | 9ee8cd16ac65ab6ad961b195a92ffb3714d90be2 | [] | no_license | yinruiqing/speaker_spotting | bc349791a59c6caa2a840fb39aa1d4c1221f99e9 | c2fbdcbf2885d9545abe8bf1e19b2c412b0680ee | refs/heads/master | 2021-05-04T14:39:33.213405 | 2018-05-03T10:28:11 | 2018-05-03T10:28:11 | 120,207,231 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,962 | py |
# coding: utf-8
# ```bash
# $ pip install pyannote.metrics==1.4.1
# $ pip install pyannote.db.odessa.ami==0.5.1
# ```
import clustering
import numpy as np
from pyannote.audio.features import Precomputed
precomputed = Precomputed('/vol/work1/bredin/speaker_spotting/embeddings')
from pyannote.database import get_protocol, FileFinder
protocol = get_protocol('AMI.SpeakerSpotting.MixHeadset', progress=True)
# enrolment consists in summing all relevant embeddings
def speaker_spotting_enrol(current_enrolment):
enrol_with = current_enrolment['enrol_with']
embeddings = precomputed(current_enrolment)
return np.sum(embeddings.crop(enrol_with), axis=0, keepdims=True)
models = {}
for current_enrolment in protocol.development_enrolment():
model_id = current_enrolment.pop('model_id')
models[model_id] = speaker_spotting_enrol(current_enrolment)
REFERENCE = {}
for current_file in protocol.development():
uri = current_file['uri']
if uri not in REFERENCE:
REFERENCE[uri] = Annotation(uri=uri)
REFERENCE[uri].update(current_file['annotation'])
# Trials
from pyannote.core import SlidingWindow, SlidingWindowFeature
from pyannote.audio.embedding.utils import cdist
from pyannote.core import Annotation,Segment, Timeline
# trial consists in comparing each embedding to the target embedding
def speaker_spotting_try_system2(current_trial):
""" speaker spotting system based on the oracle
clustering system
"""
# target model
# record the model embedding vector
# and model id
model = {}
model_id = current_trial['model_id']
model_embedding = models[current_trial['model_id']]
model['mid'] = model_id
model['embedding'] = model_embedding
# where to look for this target
try_with = current_trial['try_with']
# precomputed embedding
embeddings = precomputed(current_trial)
# annotation of current file
oracle_diarization = REFERENCE[current_trial['uri']].crop(current_trial['try_with'])
# find index of first and last embedding fully included in 'try_with'
indices = embeddings.sliding_window.crop(try_with, mode='strict')
first, last = indices[0], indices[-1]
onlineOracleClustering = clustering.OnlineOracleClustering(current_trial['uri'])
start = embeddings.sliding_window[0].start
data = np.zeros((len(embeddings.data), 1))
for i, (window, _) in enumerate(embeddings):
# make sure the current segment is in 'try_with'
if i < first:
start = window.end
continue
if i > last:
break
so_far = Segment(start, window.end)
current_annotation = oracle_diarization.crop(so_far)
score = 0.
for segment, _, label in current_annotation.itertracks(label=True):
example = {}
example['label'] = label
example['segment'] = segment
example['embedding'] = embeddings.crop(segment, mode='center')
example['indice'] = [i]
# compute the distance with model
example['distances'] = {}
example['distances'][model['mid']] = list(cdist(example['embedding'],
model['embedding'],
metric='cosine').flatten())
# update the online oracle clustering
onlineOracleClustering.upadateCluster(example)
if not onlineOracleClustering.empty():
# compute the current score
min_dist = min(onlineOracleClustering.modelDistance(model))
score = max(score, 2-min_dist)
data[i] = score
start = window.end
# transform scores to sliding window features
data = data[first:last+1]
sliding_window = SlidingWindow(start=embeddings.sliding_window[first].start,
duration=embeddings.sliding_window.duration,
step=embeddings.sliding_window.step)
return SlidingWindowFeature(data, sliding_window)
# Depending on the value of the detection threshold, the alarm will be triggered with a different latency.
def process_score(scores):
min_score = 0
res = []
for (window, score) in scores:
if score > min_score:
res.append([window.end, score[0]])
min_score = score[0]
return res
def process_trial(trial, scores):
res = {}
pscores = process_score(scores)
res['uri'] = trial['uri']
res['model_id'] = trial['model_id']
res['scores'] = pscores
return res
llss = []
for current_trial in protocol.development_trial():
reference = current_trial.pop('reference')
hypothesis = speaker_spotting_try_system2(current_trial)
llss.append(process_trial(current_trial, hypothesis))
import simplejson as json
with open('llss.txt', 'w') as outfile:
json.dump(llss, outfile)
| [
"yinruiqing110@gmail.com"
] | yinruiqing110@gmail.com |
b65a0d35db9f6025977c8d15e05fa76d1205eabb | ebfa76d74294fa64648146e6726cc69d3e25e23f | /parlai/mturk/tasks/light/light_chat_eval/worlds.py | 1ecac1421555897bf2765a369fcd6ba4428df1e3 | [
"MIT"
] | permissive | ritvik1512/ContrastiveLearning4Dialogue | 17b770a8c777aca9f5be273ff41f02c4530f8ff3 | 873c20d21ee810750179356c353d2cce244db028 | refs/heads/master | 2023-03-12T15:17:43.635546 | 2021-02-24T07:20:27 | 2021-02-24T07:20:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,029 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.mturk.core.worlds import MTurkOnboardWorld, MTurkTaskWorld
from parlai.mturk.core.agents import (
MTURK_DISCONNECT_MESSAGE,
RETURN_MESSAGE,
TIMEOUT_MESSAGE,
)
import time
def is_disconnected(act):
return 'text' in act and act['text'] in [
MTURK_DISCONNECT_MESSAGE,
RETURN_MESSAGE,
TIMEOUT_MESSAGE,
]
class LightEvalTestWorld(MTurkOnboardWorld):
"""
Task world that gives a pre-determined task as a test.
Assigns a blocking qualification if the worker fails the test.
"""
GESTURES = list(
map(
lambda x: 'gesture ' + x,
[
'applaud',
'blush',
'cry',
'dance',
'frown',
'gasp',
'grin',
'groan',
'growl',
'yawn',
'laugh',
'nod',
'nudge',
'ponder',
'pout',
'scream',
'shrug',
'sigh',
'smile',
'stare',
'wave',
'wink',
],
)
)
block_act = {'id': 'System', 'text': "FAILED", 'task_data': {'turn': 'FAILED'}}
def block_loop(self):
print('Worker {} failed onboarding'.format(self.mturk_agent.worker_id))
self.mturk_agent.observe(self.block_act)
self.mturk_agent.mturk_manager.soft_block_worker(self.mturk_agent.worker_id)
act = self.mturk_agent.act()
while not is_disconnected(act):
self.mturk_agent.observe(self.block_act)
act = self.mturk_agent.act()
return True
def __init__(self, opt, mturk_agent):
self.mturk_agent = mturk_agent
self.opt = opt
self.did_complete = False
self.wrong = 0
self.episodeDone = False
def parley(self):
self.mturk_agent.update_agent_id('TestEmote')
first_act = {
'id': 'System',
'text': 'FIRST_TURN',
'task_data': {
'wrong': 0,
'turn': 'FIRST_TURN',
'actions': self.GESTURES,
'agent_id': 'Guard',
'text': 'Bahahaha that\'s a great one! Where\'d you get that from?',
'persona': 'I\'m a guard of the royal family. I have a loud laugh, '
'and people hear it often as I love jokes. I stand up for '
'rightousness, and have a short temper when it comes to '
'insults against the king. Sometimes you need to knock '
'some sense into people.',
'base_name': 'Guard',
'partner_name': 'Jester',
'setting': 'You are in the servants\' quarters. Many people are '
'sitting around waiting to be called for services. It\'s '
'cozy, but not cramped. A chest is here. A Jester is here. '
'You are carrying a spear.',
},
}
self.mturk_agent.observe(first_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
while act['text'] != 'gesture laugh':
self.wrong += 1
if self.wrong > 3:
return self.block_loop()
first_act['task_data']['wrong'] = self.wrong
self.mturk_agent.observe(first_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
self.mturk_agent.update_agent_id('TestSpeech')
correct_phrase = (
'Now you better watch your tongue Jester. '
'I won\'t have you badmouthing our king.'
)
second_act = {
'id': 'System',
'text': 'SECOND_TURN',
'task_data': {
'wrong': 0,
'turn': 'SECOND_TURN',
'curr_message_context': {'action': 'gesture frown'},
'actions': [
'You think you can say whatever you want because we\'re alone?',
'Do you want to grab some tea?',
'What makes you think you can stand up to me, silly man? I have three times your strength. I have weapons to the teeth. What would make you think this was a good idea?', # NOQA
'Yeah that guy is something of a jerk',
'I just feel he doesn\'t have the best sense of humor...',
'Yeah landlubber, aye find this is a great hiding spot too.',
'If only you could say that to my face one more time. I\'ve missed you too much...', # NOQA
'One more beer for the gang? I feel like you would be the type to have plenty to drink.', # NOQA
'The servants quarters are pretty tightly packed aren\'t they?',
'I hate being an archer...',
correct_phrase,
'Once upon a time I lived for that king, but nowadays I feel like I could go without him. Thats why I\'m here in the servants quarters.', # NOQA
'Hey there little fella, do you think you can get me some food?',
'I know you want more than just some of our wares, I\'m selling everything.', # NOQA
'One more song! I know you know a few more of them!',
'If that isn\'t a good joke, I don\'t know what is? Hahahahaha',
'Three fort nights too late, I will not stand for this! You should have been here sooner!', # NOQA
'Aw sweetheart, I just want you to know how much I care.',
'I have no spells for you! My wizardry is just for me and my acolytes.', # NOQA
'How did you find out the kinds of jokes that the king likes so much?', # NOQA
],
},
}
self.mturk_agent.observe(second_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
while act['text'] != correct_phrase:
self.wrong += 1
if self.wrong > 3:
return self.block_loop()
second_act['task_data']['wrong'] = self.wrong
self.mturk_agent.observe(second_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
self.mturk_agent.update_agent_id('TestAct')
third_act = {
'id': 'System',
'text': 'THIRD_TURN',
'task_data': {
'wrong': 0,
'turn': 'THIRD_TURN',
'text': 'You gotta get your senses straight. Hyah! '
'Consider this a warning...',
'actions': [
'drop spear',
'wield spear',
'hug Jester',
'examine chest',
'get coins from chest',
'hit Jester',
'steal ball from Jester',
],
},
}
self.mturk_agent.observe(third_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
if act['text'] != 'hit Jester':
self.wrong += 1
if self.wrong > 3:
return self.block_loop()
third_act['task_data']['wrong'] = self.wrong
self.mturk_agent.observe(third_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
self.did_complete = True
self.mturk_agent.observe(
{
'id': 'System',
'text': 'FINAL_TURN',
'task_data': {'turn': 'FINAL_TURN', 'wrong': 0},
}
)
self.episodeDone = True
time.sleep(3)
return
class LightEvalTaskWorld(MTurkTaskWorld):
"""
Task world steps the worker through a conversation, giving them cands to select from
as if they are a retrieval model.
"""
def __init__(self, opt, mturk_agents, sample, use_train, max_wrong):
self.mturk_agent = mturk_agents[0]
self.sample_acts = sample
self.turn = 0
self.episodeDone = False
self.completed = False
self.selections = []
self.corrects = [
ex['labels'][0] if 'labels' in ex else ex['eval_labels'] for ex in sample
]
self.use_train = use_train
self.max_wrong = max_wrong
def extract_from_flag(self, text, flag):
return text.split(flag)[1]
def get_current_turn_context(self):
all_lines = []
for act in self.sample_acts[: self.turn]:
lines = act['text'].split('\n')
if lines[-1].startswith('_self'):
lines = lines[:-1]
all_lines += lines
lines = all_lines + self.sample_acts[self.turn]['text'].split('\n')
lines = list(filter(lambda x: len(x) > 0, lines))
setting_name = 'Setting withheld'
setting_desc = 'Setting description withheld'
self_name = 'Character withheld'
partner_name = 'Partner withheld'
self_persona = 'Persona withheld'
self_act = ''
self_text = 'Spoken text withheld'
messages = []
self_message = {}
partner_message = {}
# Handle current turn context separately
if lines[-1].startswith('_self'):
self_line = lines[-1]
lines = lines[:-1]
# Extract current turn context
if self_line.startswith('_self_say'):
self_text = self.extract_from_flag(self_line, '_self_say')
elif self_line.startswith('_self_act'):
self_act = self.extract_from_flag(self_line, '_self_act')
elif self_line.startswith('_self_emote'):
self_act = self.extract_from_flag(self_line, '_self_emote')
# Construct the rest of the context
for line in lines:
if line.startswith('_setting_name'):
setting_name = self.extract_from_flag(line, '_setting_name')
elif line.startswith('_setting_desc'):
setting_desc = self.extract_from_flag(line, '_setting_desc')
elif line.startswith('_partner_name'):
partner_name = self.extract_from_flag(line, '_partner_name')
elif line.startswith('_self_name'):
self_name = self.extract_from_flag(line, '_self_name')
elif line.startswith('_self_persona'):
self_persona = self.extract_from_flag(line, '_self_persona')
elif line.startswith('_partner'):
if 'id' in self_message:
messages.append(self_message)
self_message = {}
if line.startswith('_partner_say'):
partner_message['id'] = partner_name
partner_message['text'] = self.extract_from_flag(
line, '_partner_say'
)
if line.startswith('_partner_act'):
partner_message['task_data'] = {
'action': self.extract_from_flag(line, '_partner_act')
}
if line.startswith('_partner_emote'):
partner_message['task_data'] = {
'action': 'gesture '
+ self.extract_from_flag(line, '_partner_emote')
}
elif line.startswith('_self'):
if 'id' in partner_message:
messages.append(partner_message)
partner_message = {}
if line.startswith('_self_say'):
self_message['id'] = self_name
self_message['text'] = self.extract_from_flag(line, '_self_say')
if line.startswith('_self_act'):
self_message['task_data'] = {
'action': self.extract_from_flag(line, '_self_act')
}
if line.startswith('_self_emote'):
self_message['task_data'] = {
'action': 'gesture '
+ self.extract_from_flag(line, '_self_emote')
}
if 'id' in partner_message:
messages.append(partner_message)
act = {
'id': 'System',
'text': 'TASK_DATA',
'task_data': {
'actions': sorted(self.sample_acts[self.turn]['label_candidates']),
'text': self_text,
'curr_message_context': {'action': self_act},
'agent_id': self_name,
'base_name': self_name,
'persona': self_persona,
'partner_name': partner_name,
'setting': setting_desc,
'setting_name': setting_name,
'messages': messages,
},
}
return act
def parley(self):
self.mturk_agent.observe(self.get_current_turn_context())
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
self.selections.append(act['text'])
self.turn += 1
if self.turn == len(self.sample_acts):
self.episodeDone = True
self.completed = True
wrong = 0
if self.use_train:
for i in range(len(self.selections)):
if self.selections[i] != self.corrects[i]:
wrong += 1
if wrong > self.max_wrong:
self.completed = False
self.mturk_agent.mturk_manager.soft_block_worker(
self.mturk_agent.worker_id
)
print('Worker failed in train', self.mturk_agent.worker_id)
def episode_done(self):
return self.episodeDone
def shutdown(self):
self.mturk_agent.shutdown()
def get_custom_task_data(self):
# brings important data together for the task, to later be used for
# creating the dataset. If data requires pickling, put it in a field
# called 'needs-pickle'.
return {
'selections': self.selections,
'corrects': self.corrects,
'episode': self.sample_acts,
'training': self.use_train,
}
| [
"caihengyi@ict.ac.cn"
] | caihengyi@ict.ac.cn |
364b52dce6b0b45a45ebc5c83fd807abaf14be33 | f4be48866d7d2181d563939cee82671b893ddbb9 | /kubecd/tests/helm_test.py | 9f7ced88d45076beffcc4b869d72427b85b75dbd | [
"Apache-2.0"
] | permissive | cvega/kubecd | cd893639cf6e683caa6a94eea730cf956be3f0ed | 3e181d29caa4220c19dcd80ed44e5aaec1e6c955 | refs/heads/master | 2020-03-26T22:19:51.775077 | 2018-06-20T06:12:23 | 2018-06-20T06:12:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | from .. import helm as sut
def test_lookup_value():
assert 'foo' == sut.lookup_value('a.b', {'a': {'b': 'foo'}})
assert 'foo' == sut.lookup_value(['a', 'b'], {'a': {'b': 'foo'}})
assert sut.lookup_value(['b.a'], {'a': {'b': 'foo'}}) is None
def test_key_is_in_values():
assert sut.key_is_in_values(['image', 'tag'], {'image': {'tag': 'foo'}})
assert sut.key_is_in_values('image.tag', {'image': {'tag': 'foo'}})
assert not sut.key_is_in_values(['image', 'tag'], {'imageTag': 'foo'})
assert not sut.key_is_in_values(['image.tag'], {'imageTag': 'foo'})
| [
"stig@zedge.net"
] | stig@zedge.net |
03ea1e159edc4c6f2403ada2fec1c9b11f7da13f | 43bf5f016b72ae137e18d72bffaec4a3fb9f2fea | /E_learning/E_learning/settings.py | b38dfd995319549de6f4f97431e889e2f3c14753 | [] | no_license | oyewunmio/Django_works | 374a414543ee91093328993c287bcc7e4d900a5c | 99161bcf5542122cf0703af809e336d5e7888c54 | refs/heads/main | 2023-07-03T09:57:10.534592 | 2021-08-15T17:31:28 | 2021-08-15T17:31:28 | 389,153,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,498 | py | """
Django settings for E_learning project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-lt&pp+uz=)%u50cx^z=%(dp^gq10i4wa#y9vvoau(3gr_z%8)%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'books.apps.BooksConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'E_learning.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join('BASE', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'E_learning.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'E_learning',
'USER': 'postgres',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '5433',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join('BASE','media') | [
"42620838+oyewunmio@users.noreply.github.com"
] | 42620838+oyewunmio@users.noreply.github.com |
cd101dd7fed78764bf7b94b637a62e38eca94487 | ad1697f00a25fd249ccd4a175cb29f1cb89fd4d4 | /app/app.py | 90074be9ace0662badc3e0dd8f08509692e67531 | [] | no_license | sarathk-vmw/test18 | 427b0f7efc48d3ede0b7e703003757dcbed5f12d | 0e006d060dcebb65c39d83203b0960d32527e7f5 | refs/heads/master | 2021-01-11T06:58:26.602181 | 2017-08-20T07:00:48 | 2017-08-20T07:00:48 | 71,971,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | from flask import Flask
app = Flask(__name__)
@app.route("/")
def index():
return "Welcome Docker"
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=9000)
| [
"srt181@example.com"
] | srt181@example.com |
1ad40da9f025f979beaadbf21c9fdb2e06ead2ac | 03c1325893b502b7855f83287e02e7f14af4f1c7 | /projects/py/chapter14/amazon-dynamodb-part-iv.py | 33eb30bdf82b22ac3da8f477fa5ad77771e646da | [] | no_license | elgeish/Computing-with-Data | 8562a15a74df6f379296b84e393a358eebf3d3fc | 5547dc28c027e023783238be78eab216ec5204f4 | refs/heads/master | 2023-07-29T06:00:26.625191 | 2023-07-16T00:32:38 | 2023-07-16T00:32:38 | 145,339,359 | 15 | 24 | null | 2023-07-16T00:32:40 | 2018-08-19T21:38:09 | Java | UTF-8 | Python | false | false | 558 | py | from pprint import pprint
import boto3
from boto3.dynamodb.conditions import Key
dynamodb = boto3.resource('dynamodb')
response = dynamodb.Table('reminder').query(
KeyConditionExpression=Key('timestamp').eq(1511647270)
)
pprint(response['Items'])
## [{u'text': u'batch write example',
## u'timestamp': Decimal('1511647270'),
## u'ttl': Decimal('1511733670'),
## u'userID': u'geish@voicera.ai'},
## {u'text': u'another user',
## u'timestamp': Decimal('1511647270'),
## u'ttl': Decimal('1511733670'),
## u'userID': u'user@example.com'}]
| [
"elgeish@gmail.com"
] | elgeish@gmail.com |
60521fbf55813b5e5554d270c2914a972760e620 | 75c3ce2153613a0ff754f51062beec325aa2bb26 | /xiaoEdaifa/trade/migrations/0004_auto_20190622_2200.py | e390f179f767c9839223e532be6374fe8b090dc2 | [] | no_license | chan3256995/vueproject | a3c600ea2880b694a53b6f346bcb840581a7d1fc | 681d5a943f8699750ced49b40097bb7f24c810aa | refs/heads/master | 2023-02-21T04:21:01.964410 | 2023-02-10T11:14:13 | 2023-02-10T11:14:13 | 198,947,244 | 0 | 0 | null | 2022-12-11T20:30:08 | 2019-07-26T04:39:25 | JavaScript | UTF-8 | Python | false | false | 410 | py | # Generated by Django 2.2.1 on 2019-06-22 14:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0003_auto_20190622_1042'),
]
operations = [
migrations.AlterField(
model_name='refundapply',
name='add_time',
field=models.BigIntegerField(default=1561212055.3240602),
),
]
| [
"801314902qq.com"
] | 801314902qq.com |
467121700a3bbae0045b9fa5e7aba2e67e68fe10 | ddab9365317ea104f1a20ebdc35a62e47b12c659 | /tests/test_aragon_d5_gas_analytics.py | dfd50ca9742b12b13bc70040de3ec61bc1c0262b | [] | no_license | d5-ai/aragon_d5_gas_anayltics | 5606957b331efb319455a2985b7e568558f0045a | 59e1db395d551b9d5371cc8ab937b4a6b4c508a9 | refs/heads/master | 2022-12-26T00:58:42.749449 | 2020-10-14T12:26:55 | 2020-10-14T12:26:55 | 303,974,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | from aragon_d5_gas_analytics import __version__
def test_version():
assert __version__ == '0.1.0'
| [
"araa@connect.ust.hk"
] | araa@connect.ust.hk |
a78e3bfd191120d9e43f6a701cd5118e8c862b15 | c9b41d89848a5e4019b033f7f0585c5eb02c89cc | /variables/amihud.py | 51e21b6f459bfb28c702f0f13fbdf2be5eabaf3a | [] | no_license | gaitkin/insider-predictions | 956a587d6d30df1a795964947521c943ae6c305a | 46e2711fb8b1cafd6865bba8852ca3bfca3060bb | refs/heads/main | 2023-05-26T19:01:07.318354 | 2021-06-07T13:34:22 | 2021-06-07T13:34:22 | 372,782,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,463 | py | #!/usr/bin/env python
# coding: utf-8
#Import libraries used
import pandas as pd
import numpy as np
import math
from datetime import datetime, timedelta
#Import closing/daily returns data CSV as dataframe
df_closing = pd.read_csv ("closingdata.csv")
df_daily_returns = pd.read_csv ("dailyreturns.csv")
#Select relevant columns used in closing dataset
df_closing = df_closing[["id","stamp","value"]]
#Merge both dataframes
df_merged = pd.merge(left=df_daily_returns, right=df_closing, how="left", left_on=["id","stamp"], right_on=["id","stamp"])
#Drop NA & 0 values
df_merged = df_merged.dropna(subset = ["returns"])
df_merged.drop(df_merged[(df_merged["value"] == 0)].index , inplace=True)
#Create Amihud column with first step of calculations
df_merged['amihud'] = df_merged.apply(lambda row: abs(row["returns"]) / row["value"], axis = 1)
#Transform stamp to datetime
df_merged["stamp"] = pd.to_datetime(df_merged["stamp"])
#Set stamp column as index
df_merged = df_merged.set_index(["stamp"])
delta_days = 30
start_date = datetime(2010, 1, 5)
now = datetime.now()
iterations = int((now - start_date).days / delta_days)
#Create empty dataframe
transactions = {
"id": [],
"stamp": [],
"summy": [],
"county": [],
}
id_index = list(df_merged.columns).index("id")
amihud_index = list(df_merged.columns).index("amihud")
#Next step of Amihud calculations
for i in range(iterations):
begin_transaction_date = (start_date + timedelta(days=delta_days * i)).strftime("%Y-%m-%d")
end_transaction_date = (start_date + timedelta(days=delta_days * (i + 1))).strftime("%Y-%m-%d")
df2 = df_merged.loc[
begin_transaction_date: end_transaction_date
].groupby(["id"], as_index=False).sum()
df3 = df_merged.loc[
begin_transaction_date: end_transaction_date
].groupby(["id"], as_index=False).count()
transactions["id"] += list(df2.iloc[:, id_index])
transactions["stamp"] += [begin_transaction_date] * len(list(df2.iloc[:, id_index]))
transactions["summy"] += list(df2.iloc[:, amihud_index])
transactions["county"] += list(df3.iloc[:, amihud_index])
df_amihud = pd.DataFrame(transactions, columns = ["id", "stamp", "summy", "county"])
df_amihud = df_amihud.sort_values(by=['id', "stamp"])
#Final step of Amihud calculations
df_amihud['amihud'] = df_amihud.apply(lambda row: (row["summy"]) / row["county"], axis = 1)
#Save as CSV
df_amihud.to_csv ("amihud.csv", index = False)
| [
"noreply@github.com"
] | noreply@github.com |
166c22366b18b72c0e2b554c1b08c41c2df395ea | 3f1c663a877a0947dfb3fde38bb0d0082ecd26b1 | /pdml2arff.py | 69d64f3f75b336428cbeb37c27690df246b6166f | [] | no_license | KyungRul/Graduation-Thesis | 0e9495272d92f19409f989a1bcc1d576ab15c4b0 | 76272955b9d790d804b127d1ba14aa5d379b9bdf | refs/heads/master | 2020-07-28T15:23:36.404490 | 2020-04-30T07:20:30 | 2020-04-30T07:20:30 | 209,450,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,100 | py | #!/usr/bin/python
# change the path (above) to reflect where you have python installed
#
# this script will take a tshark generated pdml file and turn it
# into an arff formatted file, suitable for ingestment by weka
# here's how to create the pdml file from pcap:
# tshark -T pdml -r <infile> > <outfile>
# (adding -V gets you no more data)
# usage of this script: pdml2arff.py <outfile> (outfile is pdml from above)
# ./pdml2arff.py <input_file> -o <output_file(optional)> -n (convert all strings to numerics
import csv
from os import listdir, stat
import gc
class myDialect(csv.Dialect):
delimiter = ','
quotechar = '"'
quoting = csv.QUOTE_NONNUMERIC
lineterminator = "\n"
doublequote = False
skipinitialspace = False
#
# Define a simple class to wrap functions
#
class PdmlConvert:
def __init__(self, templateString, numbers_only=False):
self.template = templateString
self.numbers_only = numbers_only
self.headers = []
self.results = []
self.packet_count = 1
def __del__(self):
del self.template
del self.numbers_only
del self.headers
del self.results
del self.packet_count
gc.collect()
print("Objects deleted")
#
# convert the given input to ARFF format
#
def convert_file(self, input_file, **kwargs):
fname, ext = self.parse_filename(input_file)
output_file = kwargs.get('output_file', fname + '.arff').replace('pdml', 'arff')
self.parse_file(input_file)
header = self.build_header(input_file) # build the top section of output file
self.write_to_file(header, output_file) # write top section to output file
self.append_array_of_dict_to_csv(output_file) # write data to output file
#
# uses xml.dom.minidom to parse input xml file
# - reads each packet -> proto -> field
# - creates a key/value results dict {} for each field
# - new fields are added to headers array
#
def parse_file(self, file):
from xml.dom import minidom # load minidom
# self.clean_file(file) # found a parsing error in input data, see clean_file for info
xmldoc = minidom.parse(file) # use minidom to parse xml
for packet in xmldoc.getElementsByTagName('packet'): # for every packet -> proto -> field...
self.parse_packet(packet)
#
#
#
def parse_packet(self, packet):
id = self.packet_count
self.packet_count += 1
arf = self.create_arf(id)
for proto in packet.getElementsByTagName('proto'):
arf = self.parse_proto_into_arf(arf, proto)
self.results.append(arf)
#
#
#
def parse_proto_into_arf(self, arf, proto):
proto_name = proto.getAttribute('name')
for field in proto.getElementsByTagName('field'):
arf = self.parse_field_into_arf(proto_name, arf, field)
return arf
#
# parse_field_into_arf ( proto_name , arf , field )
# Adds any field or subfields to arf {} if it has a value
#
def parse_field_into_arf(self, proto_name, arf, field):
field_name = field.getAttribute('name') # get name attribute of field
name = self.build_name(field_name, proto_name) # build name grand.parent.child
arf = self.append_key_value(name, self.get_value_from_field(field), arf) # append key/val to arf dict {}
# Some fields have children subfields with values
for subfield in field.getElementsByTagName('field'):
sf_name = subfield.getAttribute('name')
name = self.build_name(sf_name, field.getAttribute('name'), proto_name)
arf = self.append_key_value(name, self.get_value_from_field(subfield), arf)
return arf
#
#
#
def append_key_value(self, key, value, map):
if value == '':
return map
if not key in self.headers:
self.headers.append(key)
map[key] = value
return map
#
# Returns an unmaskedvalue or a vlue or '' from field attributes
#
def get_value_from_field(self, field):
if field.hasAttribute('unmaskedvalue'):
return field.getAttribute('unmaskedvalue')
elif field.hasAttribute('value'):
return field.getAttribute('value')
else:
return ''
#
#
#
def build_name(self, name, parent, grand=''):
ret = name
if not str(name).startswith(parent):
ret = parent + '.' + ret
if not grand == '':
if not ret.startswith(grand):
ret = grand + '.' + ret
return ret
#
#
#
def create_arf(self, id):
if not 'packet_id' in self.headers:
self.headers.append('packet_id')
return {'packet_id': id}
#
# This clean file is a simple xml cleaner of the <proto> </proto> element
# In the input files I've seen, there is an extra </proto> which shows up
# just before a '</packet>' in the data (often but not always). So this function
# counts each opening '<proto' and closing '</proto>' and whenever we see an extra
# (count < 0) we do not output that extra one. This seems to clean the file properly.
#
def clean_file(self, file):
import re
stack = 0
output = []
for line in open(file, 'r'):
if re.search('<proto', line):
stack += 1
elif re.search('</proto>', line):
stack -= 1
if stack >= 0:
output.append(line)
else:
stack += 1
o = open(file, 'wb')
for line in output:
o.write(line.encode('UTF-8', 'replace'))
#
# Appends and Array of Dictionaries to given filename
# - inserts headers at beginning (of where appending happens)
#
def append_array_of_dict_to_csv(self, filename):
csvfile = open(filename, 'a') # open file for appending
dialect = myDialect()
csvw = csv.DictWriter(csvfile, self.headers, '?', dialect=dialect) # instantiate DictWriter
for kvs in self.results: # for every dict result, append dict to csv
if self.numbers_only:
kvs = self.map2num(kvs)
csvw.writerow(kvs)
#
# Writes text to filename
#
def write_to_file(self, text, filename):
f = open(filename, 'wb')
f.write(text.encode('UTF-8', 'replace'))
#
# Build header/top section of output file
#
def build_header(self, filename):
from string import Template
text = Template(self.template) # Template example:
attr_str = "" # temp = Template('this is a $INSERT')
for attr in self.headers: # print temp.substitute(INSERT='test')
# attr_str += "@attribute " + attr + " STRING" + "\n" # use this if outputting "string" data type
attr_str += "@attribute " + attr + " STRING" + "\n" # use this if outputting "numeric" data type
return text.substitute(RELATION=filename, ATTRIBUTES=attr_str)
#
# Parse a filename into its base name and extension
# returns [basename,ext] or 'Invalid Filename'
#
def parse_filename(self, name):
import re
r = re.search(r"(\S+)(\.\S{1,4})$", name)
if r:
return [r.group(1), r.group(2)]
else:
raise Exception('Invalid Filename')
#
# converts each value of the given map/dict to an integer using str2num
#
def map2num(self, m):
result = {}
for k, v in m.iteritems():
result[k] = self.str2num(v)
return result
#
# Convert a string to a number (takes the ord value of each letter and
# combines it then converts it to int)
# i.e. str2num( 'abc' ); ord('a') = 97; "979899" => returns 979899 as int
#
def str2num(self, s):
if type(s) is int:
return s
num = ''
for letter in s:
o = ord(letter)
num += str(o)
return int(num)
#
# Write errors to log
#
def error_log(self, message):
f = open('pdml.errors.log', 'wb')
f.write(message)
# Template ARFF File
arff = '''
%
% This arff created by pdml2arff.py
% Written by Tim Stello with input from Charlie Fowler, spring 2013
% This script takes a pdml file created by tshark and converts it to arff
%
@relation $RELATION
%
%attributes
%
$ATTRIBUTES
%
@data
%
'''
#
# Main: this portion executes only when this file is executed
# from the command line. If you 'import' this file, this section
# will not execute
#
if __name__ == '__main__':
import sys
usage = "./pdml2arffpy <input_file> -o <output_file (optional)> -n (convert all strings to numerics)\n"
# numbers_only = False
# if '-n' in sys.argv:
# numbers_only = True
# sys.argv.remove('-n')
# pdmlc = PdmlConvert(arff, numbers_only)
# l = len(sys.argv)
# if l == 2:
# pdmlc.convert_file(sys.argv[1])
# elif l == 4:
# pdmlc.convert_file(sys.argv[1], {'output_file': sys.argv[3]})
# else:
# print(usage)
# # sys.exit
numbers_only = False
# pdmlc = PdmlConvert(arff, numbers_only)
files = listdir('./RawData/iot-network-intrusion-dataset/Packets_pdml/')
# for file in files:
# if 'mirai' in file:
# print('{} cleaning starts'.format(file))
# pdmlc.clean_file('./RawData/iot-network-intrusion-dataset/Packets_pdml/' + file)
# print('{} cleaning finished'.format(file))
done_files = listdir("./RawData/iot-network-intrusion-dataset/Packets_arff")
for file in files:
if (file.replace('pdml', 'arff') not in done_files) and ('mirai' in file):
print('{} starts'.format(file))
pdmlc = PdmlConvert(arff, numbers_only)
pdmlc.convert_file('./RawData/iot-network-intrusion-dataset/Packets_pdml/' + file)
del pdmlc
print('{} finished'.format(file))
| [
"kyungrulbae@gmail.com"
] | kyungrulbae@gmail.com |
b4d713dbbbe9bdb10d591bc40c6f752200008f97 | a12d2d99d6bbb00bf4dabcab8c3efe5ea4714648 | /DistractedDriverMaskHandsAndFace.py | cb255bd777238c310b8c3e0f2709123ec5589192 | [] | no_license | devyhia/slim-backup | 01a702f65d4d5a2994cd2fac1ee225646e819fd9 | 26471cf66cf8e8ceff136a692ae6ba36fea90952 | refs/heads/master | 2021-01-12T00:14:25.456094 | 2017-10-17T09:54:35 | 2017-10-17T09:54:35 | 78,696,206 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | import pickle
from glob import glob
import numpy as np
from PIL import Image
import Shared
from tqdm import tqdm
import os
with open('/home/devyhia/LendingAHand/result.pickle') as f:
hands = pickle.load(f)
with open('/home/devyhia/FaceDetection_CNN/result.pickle') as f:
faces = pickle.load(f)
def mask_image(img_path, boxes):
img = Image.open(img_path)
if len(boxes) == 0:
return img
mask = np.zeros((1080, 1920, 3))
# box = boxes[0] # there is only one box per image! (by construction)
for box in boxes:
x0 = int(np.floor(box[0]))
y0 = int(np.floor(box[1]))
x1 = int(np.ceil(box[2]))
y1 = int(np.ceil(box[3]))
mask[y0:y1, x0:x1, :] = 1
return Image.fromarray(mask.astype(np.uint8) * img) # <-- Masked Image
img_count = 0
for k in tqdm(faces.keys(), total=len(faces.keys()), desc="Masking Hands & Face"):
img_path = k.replace('\n', '') # avoid the \n at the end of each file!
if '.original.jpg' not in img_path:
img_path = img_path.replace('.jpg', '.original.jpg')
faceBoxes = faces[k]
pref = '.'.join(k.split('.')[:2])
wOrig = pref + '.original.jpg\n'
noOrig = pref + '.jpg\n'
handBoxes = hands[wOrig] if wOrig in hands else hands[noOrig]
boxes = faceBoxes + handBoxes
save_path = img_path.replace('.original.jpg', '.hands_and_face.jpg')
# if os.path.isfile(save_path): continue
masked_img = mask_image(img_path, boxes)
masked_img.save(save_path)
| [
"devyhia@aucegypt.edu"
] | devyhia@aucegypt.edu |
ad796b01f49b7944d7c81a65fdb929ca1235c040 | 64ec8731553aa08c33373b212bbe431b1a23b97c | /test/util/util_spatial.py | 74e2b2692deec5adc94efe1ca8e6186db7ba6e48 | [
"BSD-3-Clause",
"MIT"
] | permissive | ChetanNathwani/pyrolite | 98947fde265b25beea839f24495d68bbdb726eed | 8de9c67855305115517418e127bf26de84ff062d | refs/heads/master | 2023-07-26T18:57:28.024540 | 2021-07-08T09:19:02 | 2021-07-08T09:19:02 | 367,300,779 | 0 | 0 | NOASSERTION | 2021-05-14T09:23:47 | 2021-05-14T08:35:50 | null | UTF-8 | Python | false | false | 7,785 | py | import unittest
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import cartopy.crs as ccrs
HAVE_CARTOPY = True
except ImportError:
HAVE_CARTOPY = False
from pyrolite.util.spatial import *
from pyrolite.util.math import isclose # nan-equalling isclose
class TestGreatCircleDistance(unittest.TestCase):
def setUp(self):
self.ps = zip(
np.array(
[
([0, 0], [0, 0]), # should be 0
([-170, 0], [170, 0]), # should be 20
([0, -90], [0, 90]), # should be 180
([-45, 0], [45.0, 0.0]), # should be 90
([-90, -90], [90.0, 90.0]), # should be 180
([-90, -45], [90.0, 45.0]), # should be 180, rotation of above
([-90, -0], [90.0, 0.0]), # should be 180, rotation of above
([-60, 20], [45.0, 15.0]),
([-87.0, 67.0], [34, 14]),
([-45, -45], [45.0, 45.0]),
([-45, -30], [45.0, 30.0]),
]
),
[0, 20, 180, 90, 180, 180, 180, None, None, None, None],
)
def test_default(self):
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect):
distance = great_circle_distance(*ps)
distance_r = great_circle_distance(*ps[::-1])
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
"""
ax = plt.subplot(111, projection=ccrs.Mollweide()) # ccrs.Orthographic(0, 0))
ax.figure.set_size_inches(8, 8)
ax.stock_img()
ax.plot(
*np.array([*ps]).T,
color="blue",
marker="o",
transform=ccrs.Geodetic()
)
ax.plot(*np.array([*ps]).T, color="gray", transform=ccrs.PlateCarree())
plt.text(
**np.array([*ps])[0] + [5, 5],
"{:2.0f}".format(distance),
horizontalalignment="left",
fontsize=10,
transform=ccrs.Geodetic()
)
plt.show()"""
def test_absolute(self):
for ps, expect in self.ps:
for absolute in [True, False]:
with self.subTest(ps=ps, expect=expect, absolute=absolute):
distance = great_circle_distance(*ps, absolute=absolute)
distance_r = great_circle_distance(*ps[::-1], absolute=absolute)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
def test_degrees(self):
for ps, expect in self.ps:
for degrees in [True, False]:
with self.subTest(ps=ps, expect=expect, degrees=degrees):
if not degrees:
ps = np.deg2rad(
ps
) # convert to radians to give sensible output
distance = great_circle_distance(*ps, degrees=degrees)
distance_r = great_circle_distance(*ps[::-1], degrees=degrees)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
def test_Vicenty(self):
method = "vicenty"
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect, method=method):
distance = great_circle_distance(*ps, method=method)
distance_r = great_circle_distance(*ps[::-1], method=method)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
def test_haversine(self):
method = "haversine"
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect, method=method):
distance = great_circle_distance(*ps, method=method)
distance_r = great_circle_distance(*ps[::-1], method=method)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
def test_cosines(self):
method = "cosines"
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect, method=method):
distance = great_circle_distance(*ps, method=method)
distance_r = great_circle_distance(*ps[::-1], method=method)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
class TestPieceWise(unittest.TestCase):
def test_pieces(self):
x1, x2 = 0.0, 10.0
segment_ranges = [(x1, x2)]
for segments in [1, 2, 3]:
with self.subTest(segments=segments):
result = list(piecewise(segment_ranges, segments=segments))
self.assertTrue(len(result) == segments)
def test_multiple_ranges(self):
x1, x2 = 0.0, 10.0
segment_ranges = [(x1, x2), (x2, x1), (x1, x2)]
segments = 2
result = list(piecewise(segment_ranges, segments=segments))
self.assertTrue(len(result) == segments ** len(segment_ranges))
class TestSpatioTemporalSplit(unittest.TestCase):
def test_split(self):
x1, x2 = 0, 10
segments = 2
params = dict(age=(0, 10), lat=(-10, 10), lo=(-90, 90))
result = list(spatiotemporal_split(segments=segments, **params))
self.assertTrue([isinstance(item, dict) for item in result])
self.assertTrue(len(result) == segments ** len(params))
class TestNSEW2Bounds(unittest.TestCase):
def setUp(self):
self.params = {
k: v
for (k, v) in zip(
["west", "south", "east", "north"], np.random.randint(1, 10, 4)
)
}
def test_conversion(self):
result = NSEW_2_bounds(self.params)
self.assertTrue(isinstance(result, list))
def test_order(self):
order = ["minx", "maxx", "miny", "maxy"]
result = NSEW_2_bounds(self.params, order=order)
self.assertTrue(result[1] == self.params["east"])
class TestLevenshteinDistance(unittest.TestCase):
def test_string(self):
pairs = [
("bar", "car"),
("bart", "car"),
("Saturday", "Sunday"),
("kitten", "sitting"),
]
expect = [1, 2, 3, 3]
for pair, exp in zip(pairs, expect):
with self.subTest(pair=pair, exp=exp):
dist = levenshtein_distance(*pair)
self.assertTrue(dist == exp)
def test_list(self):
pairs = [
([1, 2, 3], [1, 2, 2]),
(["A", "B", "C"], ["A", "B"]),
(["A", "B", "C", "D"], ["A", "E", "C"]),
]
expect = [1, 1, 2]
for pair, exp in zip(pairs, expect):
with self.subTest(pair=pair, exp=exp):
dist = levenshtein_distance(*pair)
self.assertTrue(dist == exp)
if __name__ == "__main__":
unittest.main()
| [
"morgan.j.williams@hotmail.com"
] | morgan.j.williams@hotmail.com |
7f1f96006e29e9599c5b43b0d55227bd8c5c2601 | 7150cc28822dd224c68356a3e820cd4c5b63e877 | /Leetcode_easy/191. Number of 1 Bits/191_Number of 1 Bits.py | 5bad0096ed6ea6575b6a34b69a87f00aebaa6bb9 | [] | no_license | duxun93/LeetCodeProject | 7db0dfa629dc0b287b783437da76698719b84581 | d089a9ce2d7dd4cebb966e52283b21b4d81253d6 | refs/heads/master | 2020-04-08T04:48:33.851157 | 2019-01-04T12:55:39 | 2019-01-04T12:55:39 | 159,033,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | #https://leetcode.com/problems/number-of-1-bits/
#python 字符串是个神器,都变成字符串迭代就ok。
def hammingWeight(n):
s = str(n)
j = 0
for i in s :
if i == '1' :
j = j + 1
return j
print(hammingWeight(11111111111111111111111111111101)) | [
"1850175167@qq.com"
] | 1850175167@qq.com |
fb96fd228b595751b23398728c4cc3dcae170fee | 0d8c030beb0cf8d6ebc8b67f86619400f9a8300c | /unpacking_function_argument.py | 418457e19ff951995b7d6862f51b1d27da9e752a | [] | no_license | czach81/python_exercises | b50dd5c55883d2d1d2488340e866fd24ddcebb27 | d8b4a267502310019a4d4d669c02d7e38c8c5de2 | refs/heads/master | 2020-09-15T13:09:29.340334 | 2019-11-26T21:37:25 | 2019-11-26T21:37:25 | 223,454,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | # you have a function that needs to take in a collection of data (elements unknown)
# one element or many elements
def greeting(*args): # unpacking start with a star(astrix) then name argument list args --unpacked
print('Hi ' + ' '.join(args)) # concatenate the arguments .join then pass in args(no astrix only needed in the function declaration)
# print(args) -returns a tuple data structure
greeting('Kristine', 'M', 'Hudgens') # call greeting function-3 arguments works the same way as below
greeting('Tiffany', 'Hudgens')
#------------------------------------------------------------
#------------------------------------------------------------
def greeting(*names): # doesn't have to be args can be names--BUT NOT PYTHON BEST PRACTICE
print('Hi ' + ' '.join(names))
greeting('Kristine', 'M', 'Hudgens')
greeting('Tiffany', 'Hudgens')
#------------------------------------------------------------
#------------------------------------------------------------
#------------------------------------------------------------
def greeting(time_of_day, *args): #positional argument first then pass in the arguments(args)
print(f"Hi {' '.join(args)}, I hope that you're having a good {time_of_day}") # join takes in a collection-joins it-turns into a string and places a space
#wrapping the join in the curly brackets like it is above { } is using string literal remember
greeting('Afternoon', 'Kristine', 'M', 'Hudgens')
greeting('Morning', 'Tiffany', 'Hudgens')
| [
"christopherpzach@gmail.com"
] | christopherpzach@gmail.com |
fe9b3528b6a13ece78a8b5bb615828732c4ea9f4 | 0df9864d526e1878a80e61cb03419e0d4361456c | /dodo-flask/app.py | 31250efed87ce2fad63c7ee3e89a2ce272ee6f56 | [] | no_license | Yuhemei/dodo | e7ba586717d5c8778e7242abe25f7a87776bc5bb | 87306e23160a72cb84c87c886c05e163a03bd28c | refs/heads/master | 2023-06-18T02:36:51.753534 | 2020-11-29T12:50:42 | 2020-11-29T12:50:42 | 291,392,200 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | from flask import Flask
from flask_cors import CORS
import config
from model import Category
app = Flask(__name__)
CORS(app)
app.config['SECRET_KEY'] = config.flask_secret_key
from views.auth import *
from views.category import *
from views.errors import *
from views.posts import *
from views.upload import *
from views.user import *
if __name__ == '__main__':
# Category(name='java').save()
# Category(name='css').save()
# Category(name='python').save()
app.run(debug=True) | [
"yuyongjin@csii.com.cn"
] | yuyongjin@csii.com.cn |
b92defed3b5e8993f941de86c1d080d39aa48810 | c73beb04d101ca8d98c9126b1c47b4f19cc35066 | /week1/calculator.py | f7f372af8b41c269b4a182934923f6716834ac12 | [] | no_license | fywest/python | a5ecf62e1f8cdf59c936da81b478c371f169aec4 | cd97438679d8e129b3cb75d76226b16e7e7850ac | refs/heads/master | 2022-12-13T06:15:04.021492 | 2019-05-28T19:21:18 | 2019-05-28T19:21:18 | 130,403,136 | 0 | 0 | null | 2022-12-08T05:08:55 | 2018-04-20T19:02:57 | Python | UTF-8 | Python | false | false | 929 | py | import sys
if __name__=='__main__':
if len(sys.argv)<2:
print("please input salary amount")
exit(1)
print(sys.argv[1])
try:
amount=int(sys.argv[1])
tax=0.0
amount_fortax=0.0
amount_fortax=amount-0-3500
if amount_fortax<=0:
tax=0;
elif amount_fortax>80000:
tax=amount_fortax*0.45-13505
elif amount_fortax>55000:
tax=amount_fortax*0.35-5505
elif amount_fortax>35000:
tax=amount_fortax*0.30-2755
elif amount_fortax>9000:
tax=amount_fortax*0.25-1005
elif amount_fortax>4500:
tax=amount_fortax*0.20-555
elif amount_fortax>1500:
tax=amount_fortax*0.1-105
else:
tax=amount_fortax*0.03-0
print("{0:.2f}".format((tax)))
exit(0)
except ValueError:
print("Parameter Error")
exit(1)
| [
"fywest2109@hotmail.com"
] | fywest2109@hotmail.com |
f1fa874c7bec4593ab87f840c26270883a4e970a | ee170fc4a6fe7180608c0182cbe7e1119fb1cf91 | /src/main/python/merge.py | 8611ff09c8fb1417b820292715286efb6ba9d4e5 | [] | no_license | rimamittal/petFinder | 7085b10e13b85ecd9af45562b263f9dfb9d6c1ae | 384000756b7597ab597fcc447321980a65e40615 | refs/heads/master | 2020-05-04T18:42:47.651880 | 2019-04-03T20:20:01 | 2019-04-03T20:20:01 | 179,363,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | import pandas as pd
def merge(df, text_df, image_df):
completeData = pd.merge(df, text_df, left_on='PetID', right_on='petid', how='left')
completeData = pd.merge(completeData, image_df, left_on='PetID', right_on='petid', how='left')
completeData.drop(columns=['petid_x', 'petid_y'], inplace=True)
completeData.fillna(0, inplace=True)
return completeData
train_df = pd.read_pickle("~/projects/petFinder/data/train.pkl")
test_df = pd.read_pickle("~/projects/petFinder/data/test.pkl")
train_text_df = pd.read_pickle("~/projects/petFinder/data/pet_text_data.pkl")
test_text_df = pd.read_pickle("~/projects/petFinder/data/pet_text_data_test.pkl")
train_image_df = pd.read_pickle("~/projects/petFinder/data/firstPhoto/pet_first_image_data_final.pkl")
test_image_df = pd.read_pickle("~/projects/petFinder/data/firstPhoto/pet_first_image_data_test_final.pkl")
# train_image_df = pd.read_pickle("~/projects/petFinder/data/pet_image_data_final.pkl")
# test_image_df = pd.read_pickle("~/projects/petFinder/data/pet_image_data_test_final.pkl")
complete_train_df = merge(train_df, train_text_df, train_image_df)
complete_test_df = merge(test_df, test_text_df, test_image_df)
# complete_train_df.to_pickle("../../../data/train_complete.pkl")
# complete_test_df.to_pickle("../../../data/test_complete.pkl")
complete_train_df.to_pickle("../../../data/firstPhoto/train_complete.pkl")
complete_test_df.to_pickle("../../../data/firstPhoto/test_complete.pkl")
| [
"Rima@NextrowLaptop-Rima.local"
] | Rima@NextrowLaptop-Rima.local |
9ad30ee9734df856d50edf0d943d9924d00ca67a | 1c8bcd2d8e129a92e3328f47d2a452814c033327 | /kaggle/otto-group-product-classification-challenge/script_30.py | 2250ea4fb9cf07c4c72a3fb83dcb6c31ab8ca81f | [
"MIT"
] | permissive | josepablocam/janus-public | 425334706f9a4519534779b7f089262cf5cf0dee | 4713092b27d02386bdb408213d8edc0dc5859eec | refs/heads/main | 2023-03-08T15:21:12.461762 | 2021-02-25T20:53:02 | 2021-02-25T20:53:02 | 314,606,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,253 | py | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfTransformer
import lightgbm as lgb
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 100)
dtypes = {f"feat_{i}": "int32" for i in range(1, 94)}
dtypes["id"] = "int32"
dtypes["target"] = "string"
df_train = pd.read_csv(
"/kaggle/input/otto-group-product-classification-challenge/train.csv",
dtype=dtypes
).set_index("id")
class_to_order = dict()
order_to_class = dict()
for idx, col in enumerate(df_train.target.unique()):
order_to_class[idx] = col
class_to_order[col] = idx
df_train["target_ord"] = df_train["target"].map(class_to_order).astype("int16")
feature_columns = [
col for col in df_train.columns if col.startswith("feat_")
]
target_column = ["target_ord"]
X_train, X_valid, y_train, y_valid = train_test_split(
df_train[feature_columns], df_train[target_column],
test_size=0.3, random_state=42,
stratify=df_train[target_column]
)
tfidf = TfidfTransformer()
tfidf_feature_train = tfidf.fit_transform(X_train).toarray().astype("float32")
tfidf_feature_valid = tfidf.transform(X_valid).toarray().astype("float32")
X_train_tfidf = np.hstack((X_train.values, tfidf_feature_train))
X_valid_tfidf = np.hstack((X_valid.values, tfidf_feature_valid))
params = {
'objective': "multiclass",
'metric': {"multi_logloss"},
'num_class': 9,
'seed': 42,
'lambda_l1': 0.0036682603550733813,
'lambda_l2': 8.924549306063208,
'num_leaves': 113,
'feature_fraction': 0.48000000000000004,
'bagging_fraction': 1.0,
'bagging_freq': 0,
'min_child_samples': 20
}
dataset_train = lgb.Dataset(X_train_tfidf, y_train)
dataset_valid = lgb.Dataset(X_valid_tfidf, y_valid)
booster = lgb.train(
params,
dataset_train,
feature_name=(
[f"feat_{i}" for i in range(1, 94)]
+ [f"tfidf_{i}" for i in range(1, 94)]
),
num_boost_round=500,
valid_sets=dataset_valid,
early_stopping_rounds=20,
)
best_iteration = booster.best_iteration
print(best_iteration)
lgb.plot_importance(
booster,
max_num_features=30,
figsize=(12, 10),
dpi=300,
);
df_test = pd.read_csv(
"/kaggle/input/otto-group-product-classification-challenge/test.csv",
dtype=dtypes
).set_index("id")
tfidf = TfidfTransformer()
tfidf_feature_train_all = tfidf.fit_transform(df_train[feature_columns]).toarray().astype("float32")
X_train_all_tfidf = np.hstack((df_train[feature_columns].values, tfidf_feature_train_all))
dataset_train_all = lgb.Dataset(X_train_all_tfidf, df_train[target_column])
booster = lgb.train(
params,
dataset_train_all,
feature_name=(
[f"feat_{i}" for i in range(1, 94)]
+ [f"tfidf_{i}" for i in range(1, 94)]
),
num_boost_round=best_iteration,
)
df_test
tfidf_feature_test = tfidf.transform(df_test).toarray()
X_test_tfidf = np.hstack((df_test[feature_columns].values, tfidf_feature_test))
pred = booster.predict(X_test_tfidf)
for idx, col in order_to_class.items():
df_test[col] = pred[:,idx]
df_test[[f"Class_{i}" for i in range(1, 10)]].to_csv('submission.csv', index=True)
| [
"jcamsan@mit.edu"
] | jcamsan@mit.edu |
50286763bdd321a0902f41cc5fa33b8f3e85b1bd | bcd22d1bb172982b513b5ff0463bd56d920321ef | /project/routes.py | 582f8ad4166148827f02ff04ec70d75e1f367605 | [
"MIT"
] | permissive | wanh23/bulletin-webvr | 49743e92de839a6a51e9c7671dba84c16c97af9d | 5f42b4af996d5d22f306d6a2be410b03b0cdaf9d | refs/heads/master | 2020-08-28T17:14:38.573921 | 2019-10-26T19:38:00 | 2019-10-26T19:38:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,755 | py | from flask import jsonify, request, render_template, url_for, flash, redirect
import numpy as np
from project import application, db
from project.db_models import Message
from profanity import profanity
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import json
from sqlalchemy import asc
MAX_MESSAGES = 5
# SERVING WEBPAGES
@application.route("/")
def index():
return render_template("index.html")
@application.route("/rooms")
def rooms():
return render_template("rooms.html")
@application.route("/wellness")
def wellness():
return render_template("vr.html")
@application.route("/test")
def test():
return render_template("test.html")
# SERVER-SIDE PROCESSING
@application.route("/getinput", methods = ['POST'])
def getinput():
if request.method == 'POST':
try:
values = request.get_json()
print(values)
messages = process_messages(values)
print(messages)
(to_add, to_remove) = generate_return_lists(messages)
'''for key in messages.keys():
messages_in_db = Messages.query.filter_by(id=key).all()
if len(messages_in_db) == 0:
to_remove.append(key)'''
return jsonify({"toAdd":to_add, "toRemove": to_remove})
except:
return jsonify({"error":"error"})
#converts list of message objects into convenient dictionary (2D dictionary)
def messages_to_dict(messages):
dic = {}
for m in messages:
d = {}
d["data"] = m["data"]
d["xrot"] = m["xrot"]
d["yrot"] = m["yrot"]
dic[m["id"]] = d
return dic
@application.route("/submit", methods = ['POST'])
def submit():
if request.method == 'POST':
try:
values = request.get_json()
messages = process_messages(values["messages"])
message_approved = is_bad_message(values["message"])
if message_approved == False:
new_id = get_available_id()
#replaces oldest entry
if new_id == False:
#gets minimum primary key (oldest message)
to_replace = Message.query.order_by(asc(Message.id)).first()
db.session.delete(to_replace)
new_message = Message(mes_id=to_replace.mes_id, content = values["message"], xrot=values["xrot"], yrot=values["yrot"])
#just adds a new entry
else:
new_message = Message(mes_id=new_id, content = values["message"], xrot=values["xrot"], yrot=values["yrot"])
db.session.add(new_message)
db.session.commit()
(to_add, to_remove) = generate_return_lists(messages)
return jsonify({"toAdd":to_add, "toRemove": to_remove, "approved": not message_approved})
except:
return jsonify({"error":"error"})
def get_available_id():
messages = Message.query.with_entities(Message.mes_id).all()
#makes list of used ids
ids = [i[0] for i in messages]
#if we have not used all of our allotted messages, just use the next ID (auto-increment basically)
if len(ids) < MAX_MESSAGES:
return len(ids) + 1
for i in range(1, MAX_MESSAGES+1):
if i not in ids:
return i
return False
# parses message JSON
def process_messages(m):
messages = []
for val in m:
messages.append(json.loads(val))
return messages_to_dict(messages)
# decides what needs to be added or updated
def generate_return_lists(messages):
db_messages = Message.query.all();
# arrays to return to the frontend
to_remove = []
to_add = []
# checks if new messages were added or if contents changed
for m in db_messages:
#if we need to just add a message
if m.mes_id not in messages:
print(messages)
print(m.mes_id)
to_add.append({"id": m.mes_id, "data": m.content, "xrot": m.xrot, "yrot": m.yrot})
else:
# if an existing id has changed
if m.content != messages[m.mes_id]["data"] or m.xrot != messages[m.mes_id]["xrot"] or m.yrot != messages[m.mes_id]["yrot"]:
print("REMOVING")
to_remove.append(m.mes_id)
to_add.append({"id": m.mes_id, "data": m.content, "xrot": m.xrot, "yrot": m.yrot})
return (to_add, to_remove)
# message approval
def is_bad_message(msg):
if(profanity.contains_profanity(msg)):
return True
with open('phrases.json') as f:
data = json.load(f)
for phrase in data:
for word in data[phrase]:
ratio = fuzz.token_set_ratio(word,msg)
if ratio > 60:
return True
return False
| [
"mykytasolonko@gmail.com"
] | mykytasolonko@gmail.com |
e0b2abc6fdc9f4a635de40691e9ad06c3da0a16b | 1525501b7361e36ec126d3c74707705442240cc3 | /main.py | 2d0237e955e154f97a8de4e8d28c28ade3150784 | [] | no_license | aclaughan/create_device_banners | ceccd118767541e3f40c2a1eb34a68e83887ace9 | 150072d13ea07623e0c13a84f802eab9836863d2 | refs/heads/master | 2023-04-09T15:39:46.490140 | 2021-04-25T11:13:24 | 2021-04-25T11:13:24 | 361,405,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,993 | py | import logging
logging.basicConfig(level=logging.INFO, filename="app.log")
def create_device_banners(hostname: str, model: str) -> str:
"""
Just give this function a hostname and model type and it will
creates standard exec and login banners for a cisco device.
you can vary the segment sizes, by adjusting list(segments). The total
segment space can't be larger than 61 characters.
:param hostname: (str) The device hostname
:param model: (str) The device model
:return: (str) the config text containing the two banners.
"""
logging.info(f"hostname: {hostname}, model: {model}")
segments = [11, 36, 14]
info = ['AT&T-C4', hostname, model]
frame = " +"
e_ban = " |"
for index, segment in enumerate(segments):
frame += f"{'-' * segment}+"
e_ban += f"{info[index]:^{segment}}|"
logging.info(e_ban)
return f"""
banner exec ~
{frame}
{e_ban.upper()}
{frame}
~
!
banner login ~
+---------------------------------------------------------------+
| |
| LEGAL NOTICE |
| |
| UNAUTHORISED ACCESS TO THIS DEVICE IS PROHIBITED |
| |
| You must have explicit, authorised permission to access or |
| configure this device. Unauthorised attempts and actions to |
| access or use this system may result in civil and/or criminal |
| penalties. All activities performed on this device are logged |
| and monitored. |
| |
+---------------------------------------------------------------+
~
!
"""
def main():
print(create_device_banners(hostname="na-ca-sfc-pe-01", model="asr-9910"))
if __name__ == "__main__":
main()
| [
"git@claughan.com"
] | git@claughan.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.