blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a009d9abecf09932989e971d0f48d7f11ffa8c9 | 27e2ff8b2d154537cd94c2347414cba41614080d | /pycbc/templates.py | 2503a38dfdf7b10192a7b9c3c07e5290539c5174 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | mchestr/pycbc | 27f66708c1c42ef11cfb57be6f73ab2a7162e5f4 | c215c1f177fe383ec6e797437fa2d5f4727eb9f3 | refs/heads/main | 2023-07-08T19:56:01.308717 | 2020-11-07T20:52:55 | 2020-11-07T20:52:55 | 310,798,838 | 0 | 0 | null | 2023-07-04T05:34:45 | 2020-11-07T08:18:58 | Python | UTF-8 | Python | false | false | 999 | py | import logging
import random
import arrow
import calendar
from jinja2 import Environment, PackageLoader, select_autoescape
from pycbc import jokes
log = logging.getLogger(__name__)
env = Environment(loader=PackageLoader('pycbc', 'templates'),
autoescape=select_autoescape())
def weekday(date):
return calendar.day_name[arrow.get(date).weekday()]
env.globals['weekday'] = weekday
def generate_email(config, user, branches, encrypted_token, template='fancy_email.jinja2'):
template = env.get_template(template)
special_greeting = ''
try:
options = [jokes.generate(config), *user.get('special_greetings', [])]
special_greeting = random.choice(options)
except Exception as exc:
log.exception(exc)
content = template.render(branches=branches, token=encrypted_token,
api_gateway=config['api_gateway'],
name=user.first_name, special=special_greeting)
return content
| [
"mikeachester@gmail.com"
] | mikeachester@gmail.com |
4ea8b9138920af0be650f735d886ec58d19d6b14 | 14d09ed6a45ffcdfaf9962f8918886c923fd71a7 | /COMP1753/week5/L03 Decisions/05HelloNames.py | 862b6662cd3ef44e615dfcbb0831784264fefa27 | [] | no_license | morzen/Greenwhich1 | 7360954f938cfd869d77d5c1966abfe0e8f9545d | 74d51dbfffdd9d09981d2a81b24d5f0e9938c52a | refs/heads/master | 2020-04-01T21:17:24.614474 | 2019-10-06T12:25:46 | 2019-10-06T12:25:46 | 153,648,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | first_name = input("What is your first name? ")
last_name = input("What is your last name? ")
if first_name == str("Chris") and last_name == str("Walshaw"):
print("Hello Chris Walshaw, COMP1753 module leader")
else :
print("Hello " + first_name + " " + last_name)
print()
input("Press return to continue ...")
| [
"bm4904f@gre.ac.uk"
] | bm4904f@gre.ac.uk |
e1c47f72d7976c8b5798657afd4d830972fd1a31 | a6ec4056d7c013a0d386e45e57048596cb0cfcf6 | /learnpython/lists.py | 908b3cfedfa438000430b55c2e0372f49c977b12 | [] | no_license | EvanTheB/BitsAndBobs | 5fd7b91f0d5d5e0715e8a6fa5ed6925a97f5b018 | c726e9661fdaa534c46af53f4f55913eb5e3dc55 | refs/heads/master | 2020-03-30T18:59:26.303099 | 2015-03-21T23:47:15 | 2015-03-21T23:47:15 | 22,348,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | test = [1,'cat',2.0,25]
print(test)
b = test*2
c = [test]*2
print(b)
print(c)
test.pop(3)
print(b)
print(c)
#print(test.sort())
#dictionaries
print('dict')
testd = {}
testd['key'] = 'value'
testd[5] = 10
print(testd)
print(testd.keys())
print(testd.values())
| [
"evanbenn@gmail.com"
] | evanbenn@gmail.com |
76b890dea7dc6256b628591d40653c1e7fa09250 | b0223d6e7c33540f767855082601782513fd243d | /models_base/resnet.py | b9c237fb4e5c9e7c7a0f8983bf629379ba4d6da0 | [] | no_license | piggy2008/pt_saliency_attention | 4d433d806cd4167eb6dbc6d04ef1d94b10e17eeb | 095a572a0b3d523d7d54eb79d33c6c81b7d1d09d | refs/heads/master | 2020-03-27T04:26:51.379343 | 2019-01-03T11:20:36 | 2019-01-03T11:20:36 | 145,940,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,267 | py | import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from lib.nn import SynchronizedBatchNorm2d
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
__all__ = ['ResNet', 'resnet18', 'resnet50', 'resnet101'] # resnet101 is coming soon!
model_urls = {
'resnet18': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet18-imagenet.pth',
'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth',
'resnet101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet101-imagenet.pth'
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = SynchronizedBatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = SynchronizedBatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, modelD=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = SynchronizedBatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = SynchronizedBatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = SynchronizedBatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.modelD = modelD
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
if self.stride == 2 and self.modelD:
x = F.avg_pool2d(x, 2, 2)
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = SynchronizedBatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(64, 64)
self.bn2 = SynchronizedBatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = conv3x3(64, 128)
self.bn3 = SynchronizedBatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer_backup(block, 64, layers[0])
self.layer2 = self._make_layer_backup(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer_backup(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer_backup(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer_backup(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
SynchronizedBatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
SynchronizedBatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, True))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on Places
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet18']))
return model
'''
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on Places
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet34']))
return model
'''
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on Places
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet50']), strict=False)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on Places
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet101']), strict=False)
return model
# def resnet152(pretrained=False, **kwargs):
# """Constructs a ResNet-152 model.
#
# Args:
# pretrained (bool): If True, returns a model pre-trained on Places
# """
# model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
# if pretrained:
# model.load_state_dict(load_url(model_urls['resnet152']))
# return model
def load_url(url, model_dir='./pretrained', map_location=None):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
filename = url.split('/')[-1]
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urlretrieve(url, cached_file)
return torch.load(cached_file, map_location=map_location)
| [
"810656271@qq.com"
] | 810656271@qq.com |
aa9637a4166c3fd287284fd19525b83dafae4bd4 | 802954a472addc75274061ac11e0b4ac205c7464 | /coop_cms/migrations/0005_no_duplicate_in_nodes.py | 625a6845f8c9c7bb8a298710b9e2645905e43a66 | [] | no_license | credis/coop_cms | f8207bfd1d4c0322ef6659c756e4a90fabfb4e98 | aafbd58c8fc83340fcf76d3099432628f2ddc89c | refs/heads/master | 2021-01-23T13:48:42.861287 | 2013-09-12T13:39:04 | 2013-09-12T13:39:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,436 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'NavNode', fields ['object_id', 'content_type']
db.create_unique('coop_cms_navnode', ['object_id', 'content_type_id'])
def backwards(self, orm):
# Removing unique constraint on 'NavNode', fields ['object_id', 'content_type']
db.delete_unique('coop_cms_navnode', ['object_id', 'content_type_id'])
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'coop_cms.article': {
'Meta': {'object_name': 'Article'},
'content': ('django.db.models.fields.TextField', [], {'default': "u'Page content'"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'publication': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'default': "u'Page title'"})
},
'coop_cms.document': {
'Meta': {'object_name': 'Document'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'coop_cms.image': {
'Meta': {'object_name': 'Image'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'coop_cms.link': {
'Meta': {'object_name': 'Link'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'coop_cms.navnode': {
'Meta': {'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'NavNode'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['coop_cms.NavNode']", 'null': 'True', 'blank': 'True'})
},
'coop_cms.navtree': {
'Meta': {'object_name': 'NavTree'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'coop_cms.navtype': {
'Meta': {'object_name': 'NavType'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_rule': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'search_field': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['coop_cms']
| [
"contact@quinode.fr"
] | contact@quinode.fr |
4cd79181130987db75faf7e250e83b9863e339bb | 5d6dd782e0b29817b3c27d5d6984909152813444 | /dbbase/urls.py | 3d183271c6790a11b27359533230ad4817dbcaab | [] | no_license | smartslee/hospacc | 387d8a7e42e068080738e365045a23d6d8a1f222 | 5bd42a9e729f3c90ff4b87185167f64fe79aac01 | refs/heads/master | 2020-04-01T12:59:50.743213 | 2019-10-07T08:13:41 | 2019-10-07T08:13:41 | 153,232,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | from django.urls import path
from . import views
from .views import (HdbUpdateView,IndexView, SearchFormView,HdbCreateView,HdbDeleteView, HdbprintView)
urlpatterns = [
path('list/', views.index, name ='list'),
# url(r'^dbedit/', views.hospdb_list, name ='edit'),
path('input/', views.inputdb, name ='inputdbn'),
path('', views.homep, name ='home'),
path('dblistView/', views.IndexView.as_view(), name ='indexview'),
path('<int:pk>/', views.HdbdetailView.as_view(), name="detail"),
path('print(<int:pk>)/', views.HdbprintView.as_view(), name="print"),
path('hdb/add/', views.HdbCreateView.as_view(), name="hdb_add"),
path('update/<int:pk>/', HdbUpdateView.as_view(), name='update'),
path('delete/<int:pk>/', HdbDeleteView.as_view(), name='delete'),
#url(r'^list$',ProductListView.as_view(), name="ProductListView"),
# url(r'^list/(?P<pk>\d+)/$',ProductDetailView.as_view(), name="ProductDetailview"),
path('search',SearchFormView.as_view(),name='search'),
path('login/', views.signin, name='login'),
path('logout/', views.logout, name='logout'),
] | [
"you@example.com"
] | you@example.com |
a76256e5c53a0f726234358d2eeec7cce0cde04f | 06ab66fe85631fb8e0351245af483b3a8e98295b | /src/config/logger.py | a708dd302034317cdf2dbf836a63869ed4a63415 | [] | no_license | SeanCherngTW/toy-real-time-bidding-buyer | ed62d8e60f196bff06ad69765f7ae8e711b66ea1 | 82e09598649d2ffd4aecc6356257fa3c5a0504ea | refs/heads/main | 2023-06-12T18:19:07.445796 | 2021-07-05T14:16:40 | 2021-07-05T14:16:40 | 383,154,896 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | import os
import logging
from os.path import exists
from logging import handlers
class DebugLog(object):
def __init__(self, ad_path_config):
self.model_name = ad_path_config['model_name']
self.log_file_path = ad_path_config['log_file_path'] + self.model_name + ".log"
self.dst_dir = ad_path_config['dst_dir']
self.prepare_log_path()
self.logger = self.logger_initialize()
self.logger.propagate = False
def prepare_log_path(self):
if not os.path.exists(self.dst_dir):
os.mkdir(self.dst_dir)
def logger_initialize(self):
logger = logging.getLogger(self.model_name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'[%(asctime)s] - [%(name)s] - [%(filename)s] - %(levelname)s - %(message)s'
)
fh = handlers.RotatingFileHandler(
filename=self.log_file_path,
backupCount=1,
encoding="utf-8",
)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
| [
"seancherng.tw@gmail.com"
] | seancherng.tw@gmail.com |
d10e42952007e9088b3ced512aea139bd4f279eb | 72cab8442a4e267054ed2bbbaeb671b339ae929a | /axelrod/tests/strategies/test_verybad.py | 5fe7a9601bce664b9db85bf1270be25fffd6e29e | [
"MIT"
] | permissive | Epsiilon/Axelrod | 6e95cce2da48bf72db077ff66f1a0f728a8850d6 | bd774cad62673bb4eb73a6cacfc24c2038111d30 | refs/heads/master | 2020-05-24T09:48:52.250631 | 2017-05-07T16:43:33 | 2017-05-07T16:43:33 | 84,844,964 | 0 | 0 | null | 2017-03-13T15:47:25 | 2017-03-13T15:47:25 | null | UTF-8 | Python | false | false | 1,199 | py | """Tests for the VeryBad strategy."""
import axelrod
from .test_player import TestPlayer
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestCooperator(TestPlayer):
name = "VeryBad"
player = axelrod.VeryBad
expected_classifier = {
'memory_depth': float('inf'),
'stochastic': False,
'makes_use_of': set(),
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
# Starts by cooperating for the first 3 moves.
self.first_play_test(C)
self.responses_test([C], [C] * 2, [C] * 2)
self.responses_test([C], [C] * 3, [D] * 3)
#Cooperate if opponent's probable action to Defect
self.responses_test([D], [C] * 13 + [D] * 7, [D] * 16 + [C] * 4)
#Cooperate if opponent's probable action to Cooperate
self.responses_test([C], [D] * 13 + [C] * 7, [C] * 12 + [D] * 8)
#TitForTat if opponent's equally probable to Cooperate or Defect
self.responses_test([D], [D] * 13 + [C] * 11, [C] * 12 + [D] * 12)
self.responses_test([C], [D] * 13 + [C] * 11, [D] * 12 + [C] * 12)
| [
"janga1997@gmail.com"
] | janga1997@gmail.com |
fa3cbd3ad7cd94a52fabb8f93054a8f1f6d2c9bb | 5f61cdf65429f4436a2ab77ff1a66c453b9593eb | /RandomForestiris.py | c31b4f952397d5cf52b4289ffb6a1ba0787e6c53 | [
"Unlicense"
] | permissive | shva-ds/Sheela | 2fa4adf8e5ab9aba164fa22e5218e1896638d0d5 | eb67b0e5c2e112f55164a478ce74ab81feeb5b7a | refs/heads/main | 2023-06-09T12:15:55.682281 | 2021-06-20T11:51:36 | 2021-06-20T11:51:36 | 371,900,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,805 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 11 19:21:21 2021
@author: Sheela Vatsala
"""
# RandomForest with iris dataset with feature importance
#Import scikit-learn dataset library
from sklearn import datasets
#Load dataset
iris = datasets.load_iris()
# print the label species(setosa, versicolor,virginica)
print(iris.target_names)
# print the names of the four features
print(iris.feature_names)
# print the iris data (top 5 records)
print(iris.data[0:5])
# print the iris labels (0:setosa, 1:versicolor, 2:virginica)
print(iris.target)
# Creating a DataFrame of given iris dataset.
import pandas as pd
data=pd.DataFrame({
'sepal length':iris.data[:,0],
'sepal width':iris.data[:,1],
'petal length':iris.data[:,2],
'petal width':iris.data[:,3],
'species':iris.target
})
data.head()
# Import train_test_split function
from sklearn.model_selection import train_test_split
X=data[['sepal length', 'sepal width', 'petal length', 'petal width']] # Features
y=data['species'] # Labels
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# 70% training and 30% test
#Import Random Forest Model
from sklearn.ensemble import RandomForestClassifier
#Create a Gaussian Classifier
clf=RandomForestClassifier(n_estimators=100)
#Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
clf.predict([[3, 5, 4, 2]])
array([2])
from sklearn.ensemble import RandomForestClassifier
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=1,
oob_score=False, random_state=None, verbose=0,
warm_start=False)
#Create a Gaussian Classifier
clf=RandomForestClassifier(n_estimators=100)
#Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train,y_train)
import pandas as pd
feature_imp = pd.Series(clf.feature_importances_,index=iris.feature_names).sort_values(ascending=False)
feature_imp
import matplotlib.pyplot as plt
import seaborn as sns
# Creating a bar plot
sns.barplot(x=feature_imp, y=feature_imp.index)
# Add labels to your graph
plt.xlabel('Feature Importance Score')
plt.ylabel('Features')
plt.title("Visualizing Important Features")
plt.legend()
plt.show()
# Import train_test_split function
from sklearn.model_selection import train_test_split
# Split dataset into features and labels
X=data[['petal length', 'petal width','sepal length']] # Removed feature "sepal length"
y=data['species']
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=5)
from sklearn.ensemble import RandomForestClassifier
#Create a Gaussian Classifier
clf=RandomForestClassifier(n_estimators=100)
#Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train,y_train)
# prediction on test set
y_pred=clf.predict(X_test)
#Import scikit-learn metrics module for accuracy calculation
from sklearn.metrics import confusion_matrix, accuracy_score
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
| [
"noreply@github.com"
] | shva-ds.noreply@github.com |
b017183f68ca96610977efea57ace0d75e6dd0ca | 5c839d92516308a520f42c6c2ddc8528b2bbaaba | /rating.py | 1a588a8605f9ee066d657907dcb982e2f17f90de | [] | no_license | pobedinskiy/DenisPobedinskiyOlegovich_hw2 | 10e8beaa3d3aac630581016e7350e266588e2eb1 | c47c27dcfc80c075f1c6ac8e76935f8d1b2c9812 | refs/heads/master | 2023-07-28T02:34:24.929120 | 2021-09-12T11:19:18 | 2021-09-12T11:19:18 | 405,618,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | print('My new rating') | [
"dennis8860@gmail.com"
] | dennis8860@gmail.com |
e8a350bbecb3418f7688eee928a5f52857f03e26 | 032786fbdeb06b3e9cbaba3fc4d96eafdf54b823 | /app/recipes/migrations/0001_initial.py | 4f9947e069c86c20c560425b9c9ab6e1848eb469 | [] | no_license | nlicitra/ClassyAngular | 9754eb60ac553f9cf2ccca2974fb5b85fb112bb9 | a539f46a3750e7c7875bc36963f0678d16396d8e | refs/heads/master | 2021-01-10T06:22:52.228700 | 2016-01-08T15:43:31 | 2016-01-08T15:43:31 | 47,842,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import recipes.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('ingredients', recipes.models.ListField()),
],
options={
},
bases=(models.Model,),
),
]
| [
"nickerous@gmail.com"
] | nickerous@gmail.com |
e5a029fdccc0a4fb8e7f28a477ae67fc5a8ab5a5 | f4b6060e1f50cfed399bfaf1502d4b5b3aea2197 | /app/entity_routes.py | e258707f13106d7d3a13deb91ae2522ccf0d3573 | [] | no_license | dascalustefan/Scholarium_italy2019 | 56716120c196034d4193a367353d15bd6e36937c | 122e7ae4096059a1fb9b230db7a1f429049ca3f1 | refs/heads/master | 2020-05-17T09:31:52.385238 | 2019-04-28T06:55:55 | 2019-04-28T06:55:55 | 183,635,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,268 | py | from app import app
from cli import MultichainCLI
from flask import render_template, request, session
"""
This module maps the url's of the third party entity functionalities: verification of diploma validation and searching
for diplomas in blockchain.
"""
@app.route('/verify_diploma')
def verify_diploma():
"""
Method that verifies if a diploma is valid. It lists all the transaction of the given asset and checks whether the
cnp+diploma hash is found in the university+high authority multisignature address. It also checks if the
diploma was revoked, the cnp+diploma was transferred to burn address.
Args:
diploma_hash (str): the hash of the diploma.
asset_name (str): the name of the asset of the university.
stud_cnp (str): the student id.
Returns:
(html): the entity page.
(dict): response variable that contains a message that says whether the diploma is valid.
"""
response = dict()
diploma_hash = request.args.get("diploma_hash")
asset_name = request.args.get("asset_name")
stud_cnp = request.args.get("stud_cnp")
asset_transactions = MultichainCLI.list_assets_transactions(asset_name)
burn_address = MultichainCLI.get_burn_address()
diploma_hash_exists = False
diploma_revoked = False
for transaction in asset_transactions:
if len(transaction["data"]) > 0:
if stud_cnp + diploma_hash == transaction["data"][0]:
if burn_address not in transaction["addresses"]:
diploma_hash_exists = True
else:
diploma_revoked = True
if diploma_hash_exists and not diploma_revoked:
response["valid"] = "Diploma is valid"
else:
response["valid"] = "Diploma is not valid"
return render_template("entity.html", response=response)
@app.route('/search_diplomas')
def search_diplomas():
"""Method that search for diplomas of a student, given its cnp(student id). Firstly, it lists all the assets found
in the blockchain, then it lists all the transactions of every asset. If the student cnp is found in any transaction
and it was not revoked, the diploma is added to the response variable
Args:
stud_cnp (str): the id of the student.
Returns:
(html): the entity page
(dict): response variable that contains all the valid diplomas with the corresponding asset name of the
university that issued it.
"""
response = dict()
response["diplomas"] = dict()
stud_cnp = request.args.get("stud_cnp")
assets = MultichainCLI.list_assets()
burn_address = MultichainCLI.get_burn_address()
for asset in assets:
asset_transaction = MultichainCLI.list_assets_transactions(asset["name"])
for transaction in asset_transaction:
if len(transaction["data"]) > 0:
if stud_cnp == transaction["data"][0][0:4]:
if burn_address not in transaction["addresses"]:
response["diplomas"][transaction["data"][0][4:]] = asset["name"]
else:
response["diplomas"].pop(transaction["data"][0][4:])
return render_template("entity.html", response=response)
| [
"st3fandascalu@gmail.com"
] | st3fandascalu@gmail.com |
52a7f9771fffbcc34aea9ac7336c109151e3ff2d | b4cf5c62dffd36dfbee58d03569d7cf45c88aac4 | /tensorflow_graph_in_jupyter.py | d7eaf0bfa029b4bb687a0155425755385cbfa2be | [
"Apache-2.0"
] | permissive | yutiansut/handson-ml | 77a4d0a3555f6b6834a093275938ccaed0cb185d | 9fd997d97e8a2f4b47fdacf5da1158e24ddd929c | refs/heads/master | 2021-05-15T13:24:15.878598 | 2018-10-05T02:23:29 | 2018-10-05T02:23:29 | 107,095,782 | 3 | 0 | Apache-2.0 | 2020-06-27T02:06:28 | 2017-10-16T07:54:36 | Jupyter Notebook | UTF-8 | Python | false | false | 2,159 | py | from __future__ import absolute_import, division, print_function, unicode_literals
# This module defines the show_graph() function to visualize a TensorFlow graph within Jupyter.
# As far as I can tell, this code was originally written by Alex Mordvintsev at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
# The original code only worked on Chrome (because of the use of <link rel="import"...>, but the version below
# uses Polyfill (copied from this StackOverflow answer: https://stackoverflow.com/a/41463991/38626)
# so that it can work on other browsers as well.
import numpy as np
import tensorflow as tf
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = b"<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script src="//cdnjs.cloudflare.com/ajax/libs/polymer/0.3.3/platform.js"></script>
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
| [
"aurelien.geron@gmail.com"
] | aurelien.geron@gmail.com |
aa16b99912de047624e2bd95c3b786e1a01aea29 | c39b8d4df920376c8d771f875cba2a676b332085 | /app_analyzer/models.py | 74af69195ec5ec0c8cf52c40c687d1bd2a91d91f | [] | no_license | kishorpawar/analyzer | e8e2d4dcc7197b9b1f1b445501e5b31ba72bfb39 | 4be14fd42ccbea605031f99eaa5574e6320d3d41 | refs/heads/master | 2020-05-18T10:03:48.629888 | 2014-06-16T18:28:58 | 2014-06-16T18:28:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,141 | py | from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
from Analyzer import constants
# Create your models here.
class Personal(models.Model):
first_name = models.CharField(max_length = 100)
middle_name = models.CharField(max_length = 100, null = True)
last_name = models.CharField(max_length = 100)
date_of_birth = models.DateField()
gender = models.CharField(max_length = 1, choices = constants.GENDER)
preffered_contact = PhoneNumberField()
secondary_contact = PhoneNumberField(blank = True)
email = models.EmailField(max_length = 100)
citizenship = models.CharField(max_length = 100)
passport_no = models.CharField(max_length = 300)
class Meta:
db_table = "an_personal_info"
class Admission(models.Model):
candidate = models.ForeignKey(Personal)
session = models.CharField(max_length = 50, choices = constants.SESSION)
year = models.DateField()
degree = models.CharField(max_length = 300, choices = constants.DEGREES)
program = models.CharField(max_length = 300)
specialization = models.CharField(max_length = 300, null = True)
last_school = models.CharField(max_length = 300)
address = models.CharField(max_length = 300, null = True)
city = models.CharField(max_length = 100, null = True)
year_gratuated = models.DateField()
class Meta:
db_table = "an_admission_info"
class Experience(models.Model):
candidate = models.ForeignKey(Personal)
job_title = models.CharField(max_length = 100)
employer = models.CharField(max_length = 100)
duration = models.SmallIntegerField()
class Meta:
db_table = "an_experience_info"
class TestScore(models.Model):
candidate = models.ForeignKey(Personal)
test = models.CharField(max_length = 10, choices = constants.TESTS)
test_type = models.CharField(max_length = 20, choices = constants.TEST_TYPES, null = True)
score = models.FloatField()
class Meta:
db_table = "an_test_score" | [
"kishor@Darkhorse.(none)"
] | kishor@Darkhorse.(none) |
55bced985c65b5c53802b4f2816dfec33e4ca925 | 49a813240ff777deff3b0bdd93b73dab5f77bd92 | /algorithms self/linear-regression-code-self-written.py | e80109f8d35dfbd48438c55e400c651fb8055c4e | [] | no_license | avidliznr/Machine-learning-algorithms | 943a6ea7e7e2fa31a194222697238a749778ac67 | e210914da73bfeb7ad6f518797a5d57177272696 | refs/heads/master | 2021-06-25T18:24:58.188846 | 2020-11-19T17:45:25 | 2020-11-19T17:45:25 | 168,866,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
import random
#xs = np.array([1,2,3,4,5,6,], dtype=np.float64)
#ys = np.array([5,4,6,5,6,7], dtype=np.float64)
def best_fit_slope_and_b(xs,ys):
m = ( ((mean(xs)*mean(ys)) - mean(xs*ys) ) /
(mean(xs)*mean(xs) - mean(xs*xs) ) )
b = mean(ys) - m*mean(xs)
return m,b
def create_dataset(hm,variance,step=2,corellation=False):
val = 1
ys =[]
xs=[]
for i in range(hm):
y = val + random.randrange(-variance,variance)
ys.append(y)
if corellation and corellation == 'pos':
val+=step
elif corellation and corellation == 'neg':
val -= step
xs = [i for i in range(len(ys))]
return np.array(xs,dtype=np.float64),np.array(ys, dtype=np.float64)
#print(m,b)
def squared_error(ys_original,ys_line):
return sum((ys_line-ys_original)**2)
def coefficient_of_determination(ys_original,ys_line):
y_mean_line = [mean(ys_original) for y in ys_original]
squared_error_regression_line = squared_error(ys_original,ys_line)
squared_error_y_mean = squared_error(ys_original,y_mean_line)
return 1 - (squared_error_regression_line / squared_error_y_mean)
xs,ys = create_dataset(40,10,2,corellation='pos')
m,b = best_fit_slope_and_b(xs,ys)
#predictin a sample value
predict_x = 8
predict_y = m*predict_x +b
#end of sample value prediction
regression_line = [(m*x) + b for x in xs]
#print(regression_line)
r_squared = coefficient_of_determination(ys,regression_line)
print(r_squared)
plt.plot(regression_line)
plt.scatter(predict_x,predict_y,color = 'g')
plt.scatter(xs,ys)
plt.show()
| [
"avidliznr@users.noreply.github.com"
] | avidliznr@users.noreply.github.com |
723d9f4e653edb3ee2f8f30e43fdbbd6773d9d52 | 3d4c7b9c179322e6bdb3c7a0c137919364806cb3 | /tests/align/align_test.py | 8e0f2a1cbe184a5e323550c1b7e668a1179807ef | [
"Apache-2.0"
] | permissive | flexflow/FlexFlow | 291282d27009924a427966e899d7c2fda9c20cec | b2ec6cb5d2b898db1ad4df32adf5699bc48aaac7 | refs/heads/inference | 2023-09-04T05:25:02.250225 | 2023-09-03T14:15:07 | 2023-09-03T14:15:07 | 160,988,469 | 1,139 | 186 | Apache-2.0 | 2023-09-14T17:56:24 | 2018-12-08T23:43:13 | C++ | UTF-8 | Python | false | false | 3,424 | py | from align_utils import TensorAlignmentData, align_tensors
import os
import sys
from typing import Callable
sys.path.append("./align/")
BASE_DIR = "tests/align/out"
param_weight_op = {'conv2d', 'embedding', 'view_embedding', 'linear'}
param_bias_op = {'conv2d', 'linear'}
no_grad_op = {"getitem"}
def prepend_dirname_fn(dirname: str) -> Callable[[str], str]:
def f(filename):
return os.path.join(dirname, filename)
return f
def test_embedding():
_test_operator('embedding')
def test_view_embedding():
_test_operator('view_embedding')
def test_getitem():
_test_operator('getitem')
def test_conv2d():
_test_operator('conv2d')
def test_add():
_test_operator('add')
def test_concat():
_test_operator('concat')
def test_subtract():
_test_operator('subtract')
def test_multiply():
_test_operator('multiply')
def test_pool2d():
_test_operator('pool2d')
def test_reducesum():
_test_operator('reducesum')
def test_reshape():
_test_operator('reshape')
def test_flat():
_test_operator('flat')
def test_sin():
_test_operator('sin')
def test_transpose():
_test_operator('transpose')
def test_exp():
_test_operator('exp')
def test_cos():
_test_operator('cos')
def test_scalar_add():
_test_operator('scalar_add')
def test_scalar_sub():
_test_operator('scalar_sub')
def test_scalar_multiply():
_test_operator('scalar_multiply')
def test_scalar_truediv():
_test_operator('scalar_truediv')
def test_relu():
_test_operator('relu')
def test_sigmoid():
_test_operator('sigmoid')
def test_tanh():
_test_operator('tanh')
def test_identity():
_test_operator('identity')
def test_linear():
_test_operator('linear')
# def test_max():
# _test_operator('max')
# def test_min():
# _test_operator('min')
def test_gather():
_test_operator('gather')
def _test_operator(operater_name):
out_dir = os.path.join(BASE_DIR, operater_name)
expand = prepend_dirname_fn(out_dir)
if (operater_name in no_grad_op):
align_tensors(
[
TensorAlignmentData(
operater_name + "_out",
expand("ff_out.pt"),
expand("torch_out.pt"),
),
]
)
return
# test output
align_tensors(
[
TensorAlignmentData(
operater_name + "_out",
expand("ff_out.pt"),
expand("torch_out.pt"),
),
TensorAlignmentData(
operater_name + "_out_grad",
expand("ff_out_grad.pt"),
expand("torch_out_grad.pt"),
),
]
)
# test weight
if (operater_name in param_weight_op):
align_tensors(
[
TensorAlignmentData(
operater_name + "_weight_grad",
expand("ff_weight_grad.pt"),
expand("torch_weight_grad.pt"),
),
]
)
# test bias
if (operater_name in param_bias_op):
align_tensors(
[
TensorAlignmentData(
operater_name + "_bias_grad",
expand("ff_bias_grad.pt"),
expand("torch_bias_grad.pt")
)
]
)
| [
"noreply@github.com"
] | flexflow.noreply@github.com |
179db733a06211c1c9fc8e0c372729dfdb609d89 | 26233e55838c1a14c3140c8f0da5653955f9927d | /UserApp/views.py | bfa28f3beec9914c4731bd539a4ceb0d22d5b7f3 | [] | no_license | ShraddhaBurse/online_music_store | ad15182bc065144d2a8d4aa71b4624666152be65 | 0152f4f1595bff6ee13e0449fdebeb0028adcf5f | refs/heads/main | 2023-07-01T14:11:36.458246 | 2021-07-30T16:10:06 | 2021-07-30T16:10:06 | 391,122,187 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | from django.http.response import HttpResponse
from django.shortcuts import render, redirect
from AdminApp.models import Category,Song
from UserApp.models import UserInfo
# Create your views here.
def home(request):
cats = Category.objects.all()
songs = Song.objects.all()
return render(request,"home.html",{"cats":cats,"songs":songs})
def ShowSong(request,sid):
cats = Category.objects.all()
songs = Song.objects.filter(Category=sid)
return render(request,"home.html",{"cats":cats,"songs":songs})
def PlaySong(request,sid):
cats=Category.objects.all()
song=Song.objects.get(id=sid)
return render(request,"playsong.html",{"song":song,"cats":cats,"sid":sid})
def SignUp(request):
if(request.method=="POST"):
email = request.POST["email"]
username = request.POST["uname"]
password = request.POST["pwd"]
if UserInfo.objects.filter(username=username).exists():
return render(request,"SignUp.html",{"invalid":username+" already taken."})
else:
new_user=UserInfo()
new_user.email=email
new_user.username=username
new_user.password=password
new_user.save()
return redirect(Login)
else:
request.session.clear()
return render(request,"SignUp.html",{})
def Login(request):
if(request.method=="GET"):
return render(request,"Login.html",{})
else:
username = request.POST["uname"]
password = request.POST["pwd"]
try:
new_user = UserInfo.objects.get(username=username,password=password)
request.session["uname"]=username
except:
pass
return redirect(home)
def Logout(request):
request.session.clear()
return redirect(home) | [
"virajholmukhe@gmail.com"
] | virajholmukhe@gmail.com |
f46c3cbdb857e7569be36953edcb048b226f17a6 | 6dc58c6202da59943f846c0f8c65346dd58e633d | /src/reverse_e/Collapse_tables2.py | f09f0165413fb91a3ceada31a5eebc89d70dc938 | [] | no_license | PaulCotney/TE_db_reporting | aeb91764f09f7af21dcc3384c29a78c416a161d2 | 3802e47b48e64144fa77878e02c484261bc8bb10 | refs/heads/master | 2020-04-06T11:08:22.696540 | 2019-03-07T15:21:04 | 2019-03-07T15:21:04 | 157,405,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,001 | py | import csv
import numpy as np
import pandas as pd
import sqlite3
import datetime
# Copied from ./Anwica/TransposableElement/TEprobeCounts/Step03_CollapseTable-allState.ipynb
database_file = '../TE_db.sqlite'
connection = sqlite3.connect(database_file)
connection.text_factory = str
source_df = None
# This is stupidly inefficient, but is temporary
def build_source_df():
cursor = connection.cursor()
sample_name_loc = dict()
loc_sample_name = dict()
pos = 0
for sample_name in cursor.execute('select distinct name from sample'):
sample_name_loc[sample_name[0]] = pos
loc_sample_name[pos] = sample_name[0]
pos = pos + 1
row_counts = cursor.execute('select count(*) from tmp_summary').fetchone()[0]
column_counts = len(sample_name_loc.keys())
calls = np.zeros((row_counts, column_counts), dtype=int)
summary = []
row_pos = {}
row_count = 0
for row in cursor.execute('SELECT * FROM tmp_summary'):
line = []
for i in range(1, len(row)):
line.append(row[i])
summary.append(line)
row_pos[row[0]] = row_count
row_count += 1
for row in cursor.execute('SELECT a.raw_id, b.name, a.value FROM sample_call a JOIN sample b WHERE a.sample_id = b.pid'):
calls[row_pos[row[0]]][sample_name_loc[row[1]]] = row[2]
for summary_line_on in range(0, len(summary)):
for sample_on in range(0, len(sample_name_loc)):
summary[summary_line_on].append(calls[summary_line_on, sample_on])
columns = ['my_id','side','chrom','pos','strand','ref_like_prefix','insertion_after_prefix',
'insertion_before_suffix','ref_like_suffix','REF','TE']
for i in range(len(loc_sample_name)):
columns.append(loc_sample_name[i])
return pd.DataFrame(summary, columns=columns)
def getPopulation():
founder_bwt = '/csbiodata/BWTs/sangerMsbwt'
cc_bwt = '/csbiodata/perlegen/CC_bwt'
cc_avoid = ['CC039M3730_UNC_NYGC', 'CC019M6839_UNC_UNC', 'CC081M0332_UNC_UNC']
sanger_founder = ["C57BL6JN", "129S1", "NOD", "NZOdna", "CASTdna", "PWKdna", "WSBdna"]
unc_founder = ["AJ_F321", "C57BL6J_m003636A", "129S1_M157", "NOD_M146", "NZO_FNNN", "CAST_M090", "PWK_F6114",
"WSB_FNNN"]
cc_samples = []
#TODO fix this.
for bwtfile in open('cc_bwt.txt', 'r'):
sample = bwtfile.split("/")[-1]
if sample in cc_avoid:
continue
cc_samples.append(sample)
sample_population = {}
for sample in sanger_founder:
sample_population[sample] = 'sanger_founder'
for sample in unc_founder:
sample_population[sample] = 'unc_founder'
for sample in cc_samples:
sample_population[sample] = 'cc'
return sample_population
# Builds dictionary with the values for chrom, pos, strand, ref_prefix and ref_suffix from Mappable_TEs and
# FinalMatrix_collapsed2.csv. The data for Mappable is in the db and is all that is needed.
def get_ref_dict():
file1 = "Mappable_TEs.csv"
file2 = "FinalMatrix_collapsed2.csv"
ref_dict = {}
df = pd.read_csv(filepath_or_buffer=file1, sep=',', dtype={'chromo': str, 'pos': int})
df = df.replace(np.nan, '', regex=True)
data = df.loc[:, ].values
for d in data:
[chromo, pos, strand] = d[0:3]
[ref_prefix, ref_suffix] = d[6:8]
ref_dict[(chromo, pos, strand)] = [ref_prefix, ref_suffix]
# chromo,pos,strand,ref_te,state,before
df = pd.read_csv(filepath_or_buffer=file2, sep=',', dtype={'chromo': str, 'pos': int})
df = df.replace(np.nan, '', regex=True)
data = df.loc[:, ].values
for d in data:
[chromo, pos, strand, _, _, ref_prefix, ref_suffix] = d[0:7]
ref_dict[(chromo, pos, strand)] = [ref_prefix, ref_suffix]
return ref_dict
# Removes the sequences without ref locations from the ref dictionary produced by get_ref_dict into the scratch file
def remove_not_found_refseq():
connection.execute('DROP TABLE IF EXISTS tmp_summary')
connection.execute('CREATE TABLE tmp_summary AS select * FROM raw')
cursor = connection.cursor()
ref_dict = get_ref_dict()
delete_count = 0
for key in ref_dict.keys():
delete_count += 1
cursor.execute('DELETE FROM tmp_summary WHERE chrom = ? AND pos = ? AND strand =?', (key[0], key[1], key[2]))
print ("Removed [{} locations]".format(delete_count))
def get_counts_te(df):
dtype = {'chromo': str, 'pos': int}
# df_te = pd.read_sql_query("", connection)
# df_te = df_te.replace(np.nan, '', regex=True)
df_te = df[(df['TE'] == 1)]
data = df_te.iloc[:, ].values
ref_dict = get_ref_dict()
# print ref_dict
header = list(df_te)
all_te_context = {}
sample_list = header[11:]
# We are building the following dictionaries below
pos_dict = {}
pos_to_TE_id = {}
pos_to_side = {}
for d in data:
[my_id, side, chromo, pos, strand] = d[0:5]
[prefix, te_start, te_end, suffix] = d[5:9]
original_context = False if len(
[i for i in prefix + te_start + te_end + suffix if i.islower()]) > 0 else True
pos_to_TE_id[(chromo, pos, strand)] = my_id
if original_context:
pos_to_side[(chromo, pos, strand)] = side
[ref_prefix, ref_suffix] = ref_dict[(chromo, pos, strand)]
pid = pos_dict.get((chromo, pos, strand), len(pos_dict))
pos_dict[(chromo, pos, strand)] = pid
all_te_context[(chromo, pos, strand)] = [ref_prefix, ref_suffix]
#We now initialize the
m, n = len(pos_dict), len(sample_list)
with_te = np.zeros((m, n), dtype=int)
without_te = np.zeros((m, n), dtype=int)
# Populate the with_te with the counts from the samples
for d in data:
[chromo, pos, strand] = d[2:5]
pid = pos_dict[(chromo, pos, strand)]
sample_count = d[13:]
for sid, c in enumerate(sample_count):
with_te[pid][sid] += c
labels = []
pid_to_chromo = {v: k for k, v in pos_dict.iteritems()}
for pid in pid_to_chromo:
[chromo, pos, strand] = pid_to_chromo[pid]
l = [chromo, pos, strand]
labels.append(l)
dtype = {'chromo': str, 'pos': int}
# df_te = pd.read_csv(filepath_or_buffer="FinalMatrix_v3.csv", dtype=dtype, sep=',')
# df_te = df_te.replace(np.nan, '', regex=True)
df_no_te = df[(df['TE'] == 0)]
data = df_no_te.iloc[:, ].values
for d in data:
[chromo, pos, strand] = d[2:5]
sample_count = d[13:]
pid = pos_dict[(chromo, pos, strand)]
for sid, c in enumerate(sample_count):
without_te[pid][sid] += c
return with_te, without_te, labels, sample_list, all_te_context, pos_to_TE_id, pos_to_side
# Uses the interval and sample tables to determine the genotype.
def get_genotype(high_count_strain, chrom, pos, state):
cursor = connection.cursor()
chrom = "'%s'" % chrom
query = """select S.name,I.genotype from Intervals I, Samples S where S.sid=I.sid
and I.chromosome = %s and I.start <= %d and I.end >= %d""" % (chrom, pos, pos)
cursor.execute(query)
slist = [i for i in cursor.fetchall() if i[0].find('CC081') < 0]
founders = ['AA', 'BB', 'CC', 'DD', 'EE', 'FF', 'GG', 'HH']
genotype_to_strain, strain_to_genotype = {}, {}
for s, g in slist:
strain = s[:5]
if g not in founders:
continue
genotype_to_strain[g] = genotype_to_strain.get(g, []) + [strain]
strain_to_genotype[strain] = g
geno_with_TE = set()
for strain in high_count_strain:
g = strain_to_genotype.get(strain, 'XX')
geno_with_TE.add(g)
fully_consistent_geno = []
if state == 'private':
return "%s(%s)" % (list(high_count_strain)[0], list(geno_with_TE)[0])
geno_with_TE = geno_with_TE - {'XX'}
for g in geno_with_TE:
strain_list = genotype_to_strain[g]
clist = set(strain_list) - set(high_count_strain)
if len(clist) == 0:
fully_consistent_geno.append(g)
else:
consistent = len(strain_list) - len(clist)
total = len(strain_list)
g = "%s(%d/%d)" % (g, consistent, total)
fully_consistent_geno.append(g)
return "|".join(sorted(fully_consistent_geno))
def collapse(df):
with_te, without_te, labels, sample_list, all_te_context, pos_to_TE_id, pos_to_side = get_counts_te(df)
(m, n) = with_te.shape
collapsed_count = np.zeros((m, n), dtype=int) - 1
sample_population = getPopulation()
new_data = []
# m = 10
# change above line later
for pid in range(m):
[chromo, pos, strand] = labels[pid]
my_id = pos_to_TE_id[(chromo, pos, strand)]
side = pos_to_side[(chromo, pos, strand)]
[ref_prefix, ref_suffix] = all_te_context[(chromo, pos, strand)]
# print sample_count
high_count = np.where(with_te[pid] > 4)[0]
high_count_sample = [sample_list[sid] for sid in high_count]
high_count_strain = set()
non_cc = False
for sample in high_count_sample:
# Paul added as bandaid
if sample not in sample_population.keys():
sample_population[sample] = "Unknown"
if sample_population[sample] == "cc":
strain = sample[:5]
else:
strain = sample
non_cc = True
high_count_strain.add(strain)
if len(high_count_strain) == 1:
state = 'questionable' if non_cc else 'private'
elif len(high_count_sample) == len(sample_list):
state = 'fixed'
else:
state = 'shared'
geno = get_genotype(high_count_strain, chromo, pos, state)
for sid in high_count:
collapsed_count[pid][sid] = 1
high_count_nonTE = np.where(without_te[pid] > 4)[0]
for sid in high_count_nonTE:
collapsed_count[pid][sid] += 1
row_data = [my_id, side, chromo, pos, strand, state, ref_prefix, ref_suffix, geno] + list(
collapsed_count[pid])
new_data.append(row_data)
print collapsed_count
header = ['my_id', 'side', 'chromo', 'pos', 'strand', 'state', 'before', 'after', 'genotype']
header = header + sample_list
filename = "FinalMatrix_ALL_collapsed.csv"
with open(filename, 'wb') as fp:
a = csv.writer(fp, delimiter=',')
a.writerows([header])
for row_data in new_data:
a.writerows([row_data])
# print row_data
print("Wrote file %s [%d lines]" % (filename, len(new_data)))
if __name__ == '__main__':
remove_not_found_refseq()
timestart = datetime.datetime.now()
print "Begin\t%02d:%02d:%02d" % (timestart.time().hour, timestart.time().minute, timestart.time().second)
timeloadfin = datetime.datetime.now()
source_df = build_source_df()
difference = timeloadfin - timestart
source_df.to_csv('test.csv')
collapse(source_df)
| [
"paulcotn@email.unc.edu"
] | paulcotn@email.unc.edu |
84086e215703b8aa319c5a751799c69a075b8126 | a01b0d36896c50d975a3b43643a5f8b9b4c7a0ce | /N-Grams/trigrams.py | bf166442b9f255fe30780f7d3df756e0bbf086fe | [] | no_license | DaanvanderValk/CDA2 | 10bd3d3362817b9119c999adf3dd5ecd301af8c5 | adf10b20893f00e22ec08febaa44fb835e1d9125 | refs/heads/master | 2021-07-08T09:17:38.966583 | 2019-01-14T10:10:41 | 2019-01-14T10:10:41 | 133,715,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,089 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 30 12:29:30 2018
@author: sande
"""
from pandas import read_csv
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../')
from Data.datareader import *
#for trigrams, iterate over all sensors
#F_PU3, F_PU5, F_PU9 have been ignored because they contain only zeros in the dataset
for fields in ['L_T1', 'L_T2', 'L_T3', 'L_T4', 'L_T5','L_T6', 'L_T7', 'F_PU1','F_PU2', 'F_PU4', 'F_PU6', 'F_PU7', 'F_PU8', 'F_PU10','F_PU11', 'F_V2', 'S_V2', 'P_J269', 'P_J300', 'P_J256', 'P_J289', 'P_J415','P_J302', 'P_J306', 'P_J307', 'P_J317', 'P_J14', 'P_J422']:
print ("for sensor", fields)
series = read_3_series(fields)
#and for each sensor, discretize the data using different number of quantiles
for no_quantiles in [5,10,15,20]:
_, bins = pd.qcut(series, no_quantiles, retbins=True, duplicates = 'drop')
cut_data, bins = pd.qcut(series, no_quantiles, retbins=True, labels=range(len(bins)-1),duplicates = 'drop')
#create a matrix of dimensions of no_quantiles
occurrences_train = np.ones((no_quantiles,no_quantiles,no_quantiles))
sum = 0
#try for tri-grams
for i in range(0,len(cut_data)-5):
a = cut_data[i]
b = cut_data[i+1]
c = cut_data[i+2]
occurrences_train[a][b][c] = occurrences_train[a][b][c] + 1
#convert to probabilities
occurrences_train_prob = np.true_divide(occurrences_train, len(cut_data))
#For test data, evaluate the performance
series = read_4_series(fields)
#print (occurrences_train_prob)
_, bins = pd.qcut(series, no_quantiles, retbins=True, duplicates = 'drop')
#no_quantiles = len(bins)
cut_data, bins = pd.qcut(series, no_quantiles, retbins=True, labels=range(len(bins)-1), duplicates = 'drop')
for threshold in [0.0001, 0.0002, 0.0005, 0.001, 0.005]:
count = 0
true_positive_count = 0
for i in range(0,len(cut_data)-5):
a = cut_data[i]
b = cut_data[i+1]
c = cut_data[i+2]
if (occurrences_train_prob[a][b][c] < threshold):
#Count the number of tri grams which are under threshold
count = count + 1
if attack_at_time_04(cut_data.index[i+2]) :
#If there is an attack at the time of the third entry of tri grams, then it is true positive
true_positive_count = true_positive_count + 1
if ((count > 0) and (true_positive_count > 0)) :
print ("Precision: ",true_positive_count/count," for threshold: ", threshold," and number of percentiles: ",no_quantiles)
else:
print ("no true positives")
#once we find out the sensors, percentile and threshold which gives us the best result, we can print the time of attack as detected by our
#model to see which attacks have been identified correctly from those attacks mentioned in the png file provided with the dataset
no_quantiles = 5
threshold = 0.0002
#set to hold all unique attacks identified by our model
attacks = set()
true_positive_counts_of_all_sensors = 0
no_of_alarms = 0
for fields in ['L_T1', 'L_T2', 'L_T3', 'L_T4', 'L_T5','L_T6', 'L_T7', 'F_PU1','F_PU2', 'F_PU4', 'F_PU6', 'F_PU7', 'F_PU8', 'F_PU10','F_PU11', 'F_V2', 'S_V2', 'P_J269', 'P_J300', 'P_J256', 'P_J289', 'P_J415','P_J302', 'P_J306', 'P_J307', 'P_J317', 'P_J14', 'P_J422']:
series = read_3_series(fields)
print ("for sensor", fields)
_, bins = pd.qcut(series, no_quantiles, retbins=True, duplicates = 'drop')
cut_data, bins = pd.qcut(series, no_quantiles, retbins=True, labels=range(len(bins)-1),duplicates = 'drop')
#create a matrix of dimensions of no_quantiles
occurrences_train = np.ones((no_quantiles,no_quantiles,no_quantiles))
sum = 0
#try for tri-grams
for i in range(0,len(cut_data)-5):
a = cut_data[i]
b = cut_data[i+1]
c = cut_data[i+2]
occurrences_train[a][b][c] = occurrences_train[a][b][c] + 1
#convert to probabilities
occurrences_train_prob = np.true_divide(occurrences_train, len(cut_data))
series = read_4_series(fields)
_, bins = pd.qcut(series, no_quantiles, retbins=True, duplicates = 'drop')
cut_data, bins = pd.qcut(series, no_quantiles, retbins=True, labels=range(len(bins)-1), duplicates = 'drop')
count = 0
count_for_precision = 0
precision_sum = 0
true_positive_count = 0
for i in range(0,len(cut_data)-5):
a = cut_data[i]
b = cut_data[i+1]
c = cut_data[i+2]
if (occurrences_train_prob[a][b][c] < threshold):
count = count + 1
no_of_alarms = no_of_alarms + 1
if attack_at_time_04(cut_data.index[i+2]) :
attacks.add(get_attack_number_04(cut_data.index[i+2]))
true_positive_count = true_positive_count + 1
true_positive_counts_of_all_sensors = true_positive_counts_of_all_sensors + 1
if ((count > 0) and (true_positive_count > 0)) :
print ("Using best parameters and threshold we get Precision: ",true_positive_count/count," for threshold: ", threshold," and number of percentiles: ",no_quantiles)
else:
print ("no true positives")
#attacks detected as per https://github.com/DaanvanderValk/CDA2/blob/master/Data/BATADAL_dataset04_attacks.png
print ("Attacks detected ", attacks)
print ("Aggregated precision for effective sensors ", (true_positive_counts_of_all_sensors/no_of_alarms)) | [
"sandesh.mj2805@gmail.com"
] | sandesh.mj2805@gmail.com |
624a545054f495f9a230007aaf11c7383c585b3d | 22512fdcecc0de2ceb466cd5b4d90616c3b4589d | /Vrobo.py | 46ce84201b2de47af1427819f16540eed7f9b83d | [] | no_license | JoseMiracydeSouzaFilho/Samto | 06f4dfd3a8069c57df567b48618d3a7405076cb4 | 4fac239aa6b220e93c7d23b3e9ac30116cdefffd | refs/heads/master | 2020-12-30T16:02:04.169888 | 2017-05-13T20:13:23 | 2017-05-13T20:13:23 | 91,200,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,056 | py | # coding: utf-8
# encoding: win-1252
import os
from functools import partial
from tkinter import *
import threading
from time import gmtime, strftime
import socket
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import xml.sax
class CreatedHandler(FileSystemEventHandler):
patterns = ["*.xml"]
def on_created(self, event):
if event.is_directory:
return
filepath, ext = os.path.splitext(event.src_path)
#print(' filepath criado : _--> ', filepath)
if 'TestRunReport' in filepath:
if ext == '.xml' and '-fixed' not in filepath:
parser = xml.sax.make_parser()
msg=''
parser.setContentHandler(getreportXML(msg))
parser.parse(event.src_path)
print ('chamou getReport : ', filepath)
class getreportXML(xml.sax.handler.ContentHandler):
def __init__(self, msg):
xml.sax.handler.ContentHandler.__init__(self)
self.prefixo = ''
self.msg = msg
togleTagXML("0")
# É chamado quando uma novo tag é encontrada
def startElement(self, tag, attr):
self.prefixo += ' '
if tag in ['Passed','RunName','Start','Duration']:
print (self.prefixo +'TAG ->', tag)
self.msg += tag + ','
togleTagXML("1")
# É chamado quando texto é encontrado
def characters(self, txt):
if txt.strip():
if achouTag:
if "," in txt :
s0 = txt.split(",")[0] # recurso para tirar a virgula do Duration
s1 = txt.split(",")[1]
txt = s0 + s1
self.msg+=txt + ','
print (self.prefixo + 'TXT - >', txt)
togleTagXML("0")
if 'Duration' in self.msg:
print(" Verificar a ultima virgula na string do Handler : ", self.msg)
getMsgXLM(self.msg)
# É chamado quando o fim de uma tag é encontrada
def endElement(self, name):
self.prefixo = self.prefixo[:-2]
def togleTagXML(x):
if "1" in x:
global achouTag
achouTag = True
elif "0":
achouTag = False
def getMsgXLM(msg):
print ('Valor de State :',STATE)
if STATE != ESTADO_MANUT: # So transmite para server se o estado nao for igual a Manutenção
msg = ESTADO_ATIVO + ',' + SETUP +','+ 'EngTest'+',' + ENGTEST + ',' + 'Project' + ',' + PROJECT + ',' + 'Binary'+','\
+ BINARY + ',' + msg + 'Mode of Operation' + ',' + MODE
print ('String Final para Socket: ', msg)
envia_msg_Server(msg)
class GuiPart(object):
def __init__(self, master, get_dataentry, get_maint):
# self.queue = queue
self.master = master
# Inicia Interface Grafica,
master.title('ROBO CLIENT')
master.option_add('*Font', 'Arial 10')
master.option_add('*EntryField.Entry.Font', 'Courier 10')
master.option_add('*Listbox*Font', 'Courier 10')
# Defino o tipo de Letra do Menu
# Desenho o Menu
self.menubar = Menu(master)
master.geometry("480x600+300+50")
self.cmdmenu = Menu(self.menubar)
self.cmdmenu.add_command(label='Open...', underline=0)
self.cmdmenu.add('separator')
self.cmdmenu.add_command(label='Maintenance', underline=0,
background='white', activebackground='green',
command= get_maint)
self.menubar.add_cascade(label="File", menu=self.cmdmenu)
master.config(menu=self.menubar)
Frame1 = Frame(master,borderwidth=2,relief=GROOVE, highlightthickness=2, highlightbackground="#111")
Label(Frame1, text = "ENG TEST :").grid(row=0,column=0,padx=5,pady=5,sticky=W)
self.HOST = StringVar()
self.HOST.set(IP_Server)
self.ENGTEST = StringVar()
Entry(Frame1, textvariable= self.ENGTEST).grid(row=0,column=1,padx=5,pady=5)
Label(Frame1, text = "PROJECT :").grid(row=1,column=0,padx=5,pady=5,sticky=W)
self.PORT = IntVar()
self.PORT.set(21000)
self.PROJECT = StringVar()
Entry(Frame1, textvariable=self.PROJECT).grid(row=1,column=1,padx=5,pady=5)
Label(Frame1, text="BINARY :").grid(row=2, column=0, padx=5, pady=5, sticky=W)
self.BINARY = StringVar()
Entry(Frame1, textvariable=self.BINARY).grid(row=2, column=1, padx=1, pady=1)
#Label(Frame1, text="MODE :").grid(row=3, column=0, padx=5, pady=5, sticky=W)
self.MODE = IntVar()
#Entry(Frame1, textvariable=self.MODE).grid(row=3, column=1, padx=5, pady=5)
Radiobutton(Frame1,text=' AUTOMATIC ',value=1,variable=self.MODE,indicatoron=0).grid(row=0, column=2, padx=5, pady=5)
Radiobutton(Frame1,text=' MANUAL ', value=2, variable=self.MODE,indicatoron=0).grid(row=1, column=2, padx=5, pady=5)
self.MODE.set(1)
Frame1.grid(row=0,column=0,padx=5,pady=5,sticky=W+E)
Frame3 = Frame(master, borderwidth=2, relief='sunken', highlightthickness=2, highlightbackground="#111")
self.lbl_Robo = Label(Frame3, relief=RAISED, borderwidth=2, text=" LOG TRACKING from Server ")
self.lbl_Robo.grid(row=0, column=0, padx=5, pady=5, sticky=W + E)
self.Text_Robo = Text(Frame3, width=54, height=20, state=DISABLED)
self.Text_Robo.config(state=NORMAL)
self.Text_Robo.insert(END, '-> Robo Client Ready .... \n')
self.Text_Robo.yview_scroll(1, "pages")
self.Text_Robo.config(state=DISABLED)
self.Text_Robo.grid(row=1, column=0, padx=5, pady=5, sticky=W)
scroll_r1 = Scrollbar(Frame3, command=self.Text_Robo.yview)
self.Text_Robo.configure(yscrollcommand=scroll_r1.set)
scroll_r1.grid(row=1, column=2, padx=5, pady=5, sticky=E + S + N)
Frame3.grid(row=2, column=0, padx=5, pady=5)
self.bt_start = Button(Frame1, text=' CONFIRM ', bg="#ECE82E", command=get_dataentry)
self.bt_start.grid(row=2, column=2, rowspan=2, padx=5, pady=5)
class ThreadReception_Server(threading.Thread):
def __init__(self, conn,text):
threading.Thread.__init__(self)
self.connexion = conn # Socket Robotino
self.text = text
def run(self):
while True:
try:
# Recebe msg do Server
message_rec = self.connexion.recv(1024).decode(encoding='UTF-8')
if "START" in message_rec:
print ('Servidor Ativo')
if "ACK" in message_rec:
print ('Servidor Recebeu os Dados Corretamente')
else :
print ('Servidor not Recebeu os Dados Corretamente - Reenviar ')
except socket.error:
pass
class ThreadedClient(object):
def __init__(self, master,state):
self.master = master
self.estado = state
# Create the queue
# self.queue = queue.Queue()
# Set up the GUI part
self.gui = GuiPart(self.master, self.get_data_entry,self.get_maint)
self.robo = Conecta_Server(self.gui.Text_Robo,self.gui.lbl_Robo) #Thread de conexao com SAMTO server
self.robo.start()
self.get_laststate()
if STATE == ESTADO_DISP:
self.get_data_entry()
def get_laststate(self):
try:
with open(path_manutfile,'r') as f: # abre o arquivo para le a ultima linha / o ultimo estado resgitrado
r = f.readlines()
last_line = r [len(r)-1]
self.estado = last_line
global STATE
STATE = self.estado
print ('ultima linha ',self.estado)
f.close()
except IOError:
with open(path_manutfile,"a") as f: # Se o arquivo nao existe cria 1a vez
f.close()
with open(path_manutfile, "a") as f:
f.write(ESTADO_DISP + '\n')
f.close()
STATE = ESTADO_DISP
print(' File manutencao criado e Dado inicial inserido', ESTADO_DISP)
def get_data_entry(self):
global ENGTEST, PROJECT, BINARY, MODE, STATE
STATE = ESTADO_DISP # tem que avaaliar se o estado anterior era manuten
envia_msg_Server(STATE) # Saiu de Manutençao e foi para disp ou Clicou n botao
with open(path_manutfile, "a") as f:
f.write(STATE + '\n')
f.close()
ENGTEST = self.gui.ENGTEST.get()
if ENGTEST =="":
ENGTEST = 'none'
PROJECT = self.gui.PROJECT.get()
if PROJECT =="":
PROJECT = 'none'
BINARY = self.gui.BINARY.get()
if BINARY =="":
BINARY = 'none'
MODE = str(self.gui.MODE.get())
if MODE == "1":
MODE = "AUTO"
else:
MODE = "MAN"
self.gui.Text_Robo.config(state=NORMAL)
self.gui.lbl_Robo.config(bg='#6EEC78')
self.gui.Text_Robo.insert (END," \n : : : : : : : : : : : : : : : : : : : : : : : : : : : : : : : : : : \n")
self.gui.Text_Robo.insert (END," Welcome , Session Started for : \n")
self.gui.Text_Robo.insert(END,ENGTEST + '\n')
self.gui.Text_Robo.insert(END,PROJECT + '\n')
self.gui.Text_Robo.insert(END,BINARY + '\n')
self.gui.Text_Robo.insert(END,MODE + '\n')
self.gui.Text_Robo.yview_scroll(1, "pages")
self.gui.Text_Robo.config(state=DISABLED)
# return [ENGTEST,PROJECT,BINARY,MODE]
def get_maint(self):
self.estado = ESTADO_MANUT
self.gui.Text_Robo.config(state=NORMAL)
self.gui.lbl_Robo.config(bg='#ECE82E')
self.gui.Text_Robo.insert(END," \n ################################################ \n")
self.gui.Text_Robo.insert(END," State of Maintenace was activated :")
self.gui.Text_Robo.insert(END," \n ################################################ \n")
self.gui.Text_Robo.yview_scroll(1, "pages")
self.gui.Text_Robo.config(state=DISABLED)
t1 = Toplevel(self.gui.master,borderwidth=5, bg='white')
t1.title(" Maintenance ")
t1.geometry("220x100+800+80")
Label(t1, text="Enter Maintenance code :").grid(row=0,column=0)
self.manutext = StringVar()
self.manutentry = Entry(t1,textvariable=self.manutext).grid(row=1,column=0)
self.mybutton = Button(t1,text = "OK",bg="#ECE82E",command = partial(self.sendmsg,self.estado)).grid(row=2,column=0, rowspan=2)
def sendmsg(self,state): #envia o Estado de manutenção para o Server
global STATE
STATE = state
code = self.manutext.get()
print(' Digitou o codigo : ',code)
msg = state + ',' + SETUP + ',' + ENGTEST + ',' + code + ',' + strftime("%Y-%m-%d %H:%M:%S", gmtime()) + ',' \
+ '1hora'
envia_msg_Server(msg)
with open(path_manutfile,"a") as f:
f.write(state + '\n')
f.close()
class Conecta_Server(threading.Thread):
def __init__(self,text,lbl):
threading.Thread.__init__(self)
self.text = text
self.lbl = lbl
def run(self):
global CONECTA_SERVER
global SOCKET_ROBO
while 1:
if CONECTA_SERVER == False:
try:
server_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_Socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
server_Socket.connect((IP_Server, PORT_SERVER))
# Conversa com (server ): Lança uma thread para pegar as messages
th_R = ThreadReception_Server(server_Socket,self.text)
th_R.start()
print("[+] Nova Thread Iniciada para Robo: "+IP_Server+":"+str(PORT_SERVER))
CONECTA_SERVER = True
SOCKET_ROBO = server_Socket
self.lbl.config(bg='#6EEC78')
self.text.config(state=NORMAL)
self.text.insert(END, ' ------------------------------------------------------------------------\n')
self.text.insert(END, 'SAMTO Server is Connected ....\n')
self.text.insert(END, ' ------------------------------------------------------------------------\n')
self.text.yview_scroll(1, "pages")
self.text.config(state=DISABLED)
except socket.error:
print('Error','The connection with server has failed .')
self.text.config(state=NORMAL)
self.text.insert(END, 'The connection with server has failed.... Trying \n')
self.text.yview_scroll(1, "pages")
self.text.config(state=DISABLED)
CONECTA_SERVER = False
class envia_msg_Server(object):
def __init__(self,message):
self.msg = message
#self.txt = text
if CONECTA_SERVER == True:
try:
SOCKET_ROBO.send(bytes(self.msg,"UTF8"))
print ('Mensagem enviada para SERVER :',self.msg)
except socket.error:
print ('Mensagem nao enviada para SERVER :',self.msg)
pass
if __name__ == "__main__":
ESTADO_ATIVO = "TESTING"
ESTADO_MANUT = "MAINTENANCE"
ESTADO_DISP = "AVALIABLE"
MESAGEM_LOG = 'LOG'
#IP_Server = '105.112.146.197'
IP_Server = '127.0.0.1'
PORT_SERVER = 21000
CONECTA_SERVER = False
msgfinal = ''
SETUP = 'ANRITSU_LTE_1_ATT'
event_handler = CreatedHandler()
observer = Observer()
pathline = "/home/jmiracy/RTDTests"
path_manutfile = '/home/jmiracy/Samto/maintenance.txt'
observer.schedule(event_handler, pathline, True)
observer.start()
root = Tk()
client = ThreadedClient(root,ESTADO_DISP)
root.mainloop() | [
"jmiracy@gmail.com"
] | jmiracy@gmail.com |
773bd8d5905ffdfbfc401c174598d1d6aa238f05 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/hby.py | d2745b4aff60063d0c31bd24fb6ac64149e7e987 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'hBY':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
829a60803827790a24c17e21c99521fc7746dd54 | bae7e3b6cdfd6f354b79dbc849c1969a46aed586 | /hiAPP/plot_linkage_matrix.py | 62439c042f1b38aa4eb1a36072056960d65d5d01 | [
"MIT"
] | permissive | jmborr/LDRDSANS | 7f6b8ef44db3b93972ae9bff08a641067c19bae1 | b8081ecb78da46a530d61efd3cb6764f3b17b567 | refs/heads/master | 2021-07-24T23:49:38.271100 | 2017-11-05T22:36:40 | 2017-11-05T22:36:40 | 71,494,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | # -*- coding: utf-8 -*-
"""
Matplotlib of the dendogram associated with the linkage matrix.
Thanks to Jorn's Blog
<https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/>
"""
# needed imports
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
import numpy as np
import argparse
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plots a dendogram from a scipy.cluster.hierarchy linkage matrix.")
parser.add_argument("linkage", type=str, help="linkage matrix file, output from fpch2scph.py")
parser.add_argument("--p", type=int, default=10, help="show only the last p merged clusters")
args=parser.parse_args()
Z=np.loadtxt(args.linkage)
plt.title('Hierarchical Clustering Dendrogram (truncated)')
plt.xlabel('sample index')
plt.ylabel('RMSD (Angstroms)')
dendrogram(
Z,
truncate_mode='lastp', # show only the last p merged clusters
p=args.p, # show only the last p merged clusters
show_leaf_counts=False, # otherwise numbers in brackets are counts
leaf_rotation=90.,
leaf_font_size=12.,
show_contracted=True, # to get a distribution impression in truncated branches
)
plt.show()
sys.exit(0)
| [
"borreguero@gmail.com"
] | borreguero@gmail.com |
e31565d46f8c52d5e3146ca06e87d779640dae00 | bb37a1929333502fc9de2cfeff82f0f2cdfa7dad | /listview/listview/settings.py | b813fe20c750a695c202568df36c603c0918fb62 | [] | no_license | Arnie09/Arnie09-NetajiSubhashEngineeringCollege-DesignLab2021-PracticalAssignment | 5580118a50ce2ea8e89e707058ce999fb67bf0eb | 94484595b49e9eaca55e209bbf113630c3bc1b77 | refs/heads/main | 2023-06-12T21:04:30.135138 | 2021-07-03T11:26:33 | 2021-07-03T11:26:33 | 382,597,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,301 | py | """
Django settings for listview project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-k9%cf+@o41$z^qmew0m@90(%+_z+v75i)5&zyi$gs6*7r18u&_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'list',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'listview.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'listview.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"arnab.chanda@thoughtworks.com"
] | arnab.chanda@thoughtworks.com |
6293f718af7c11f17a2c427bc21677b67e77f207 | 133cec887e1a01e19639f7d2a2c86195472abf0d | /OOPS/Inner_class_creation.py | a2c7527ad3d5c4acf996b5136a0d0e445fe0246c | [] | no_license | gunupati/Programs | 371fc066b646bea5599ab14397da4e7466735872 | c8be8ee8c9d999704e0cfb9111e14032637c8d87 | refs/heads/master | 2020-07-03T12:31:47.818573 | 2019-11-07T14:21:32 | 2019-11-07T14:21:32 | 201,905,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | class outer:
def __init__(self):
print('Outer class object creation')
class inner:
def __init__(self):
print('inner class object creation')
def m1(self):
print("inner class method")
o=outer()
i=o.inner()
i.m1() | [
"gunupatimahesh@gmail.com"
] | gunupatimahesh@gmail.com |
afd81c419a25a3e752cc0ae8170cabff14f2a825 | f7b2835087f0eab87952021d19f5f1e50ae340f7 | /gyojin/07_구구단.py | bcc8aea653163228c964fe62aae88dd7847f23eb | [] | no_license | zeroistfilm/week01 | c8c926a3fea1af70b33b6fad904aa91e8707284c | 606e1c47c61dcbc7f9fb7b38c1e53341e95879c1 | refs/heads/main | 2023-02-09T17:50:17.016172 | 2020-12-17T07:10:57 | 2020-12-17T07:10:57 | 320,431,489 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py |
x = int(input())
for i in range(1, 10):
print(f'{x} * {i} = {x * i}')
| [
"noreply@github.com"
] | zeroistfilm.noreply@github.com |
ce67d5e4cbc106774ba02c02cb38b2fa7b165403 | b01eee55884e21412a1812593996a0d9156e20bc | /cipp/x64assembler/instructions/push_reg.py | d3c6d03e68af5bf12c7f9965096d230b1733a50b | [] | no_license | JacquesLucke/cipp | 46bdb7eebaeb863f424c92542ea56b49b5f0fe2e | d4f38fd1fc84aed9cbf49b85bf6c4b96f2561f71 | refs/heads/master | 2021-10-27T18:29:23.288884 | 2019-04-18T15:36:52 | 2019-04-18T15:36:52 | 123,611,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | from .. bits import Bits
from .. block import Instruction
class PushRegInstr(Instruction):
def __init__(self, reg):
assert reg.size in (16, 64)
self.reg = reg
def toIntelSyntax(self):
return f"push {self.reg.name}"
def toMachineCode(self):
if self.reg.size == 64:
return self.toMachineCode_64()
elif self.reg.size == 16:
return self.toMachineCode_16()
else:
raise Exception()
def toMachineCode_64(self):
prefix = Bits.fromHex("" if self.reg.group == 0 else "41")
opcode = Bits.fromHexAndOffset("50", self.reg.number)
return prefix + opcode
def toMachineCode_16(self):
return Bits.fromHex("66") + self.toMachineCode_64()
| [
"mail@jlucke.com"
] | mail@jlucke.com |
dfd8591802490cdd7cc4e0e62c496a27cd8ed1c7 | dbfdc9c8978beb0fb7869c7d605735bbdc1a2459 | /aremind/apps/wbn_registration/migrations/0005_auto__add_field_wbuser_date_registered.py | 6a8cae0cca2f8cd3ffeb1c59cb8309fe4348c4c5 | [] | no_license | dimagi/WBNigeria | 1f1115068bbfeba570d740e1b3cd7950bc94788b | 29b277e2b23c37004c2ab6016bab86febf593f91 | refs/heads/master | 2021-01-25T09:53:08.688031 | 2013-08-13T23:17:39 | 2013-08-13T23:17:39 | 3,768,087 | 0 | 0 | null | 2013-02-26T02:13:56 | 2012-03-19T20:08:42 | Python | UTF-8 | Python | false | false | 3,518 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'WBUser.date_registered'
db.add_column('wbn_registration_wbuser', 'date_registered', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 3, 28, 15, 42, 47, 667000)), keep_default=False)
def backwards(self, orm):
# Deleting field 'WBUser.date_registered'
db.delete_column('wbn_registration_wbuser', 'date_registered')
models = {
'rapidsms.backend': {
'Meta': {'object_name': 'Backend'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'rapidsms.connection': {
'Meta': {'unique_together': "(('backend', 'identity'),)", 'object_name': 'Connection'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Backend']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'primary_backend': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact_primary'", 'null': 'True', 'to': "orm['rapidsms.Backend']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'wbn_registration.wbuser': {
'Meta': {'object_name': 'WBUser'},
'connection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Connection']"}),
'date_registered': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_registered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'survey_question_ans': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'want_more_surveys': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['wbn_registration']
| [
"adewinter@dimagi.com"
] | adewinter@dimagi.com |
0054ca5cde322d97a8151893ce49bbc4034e3353 | 130a98632d2ab4c171503b79e455b7aa27a1dda4 | /models/research/object_detection/models/ssd_feature_extractor_test.py | 29c43e376c6167b61a256eb0812ee4d3bcee3ed5 | [
"Apache-2.0",
"MIT"
] | permissive | aboerzel/German_License_Plate_Recognition | d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787 | 6fc53292b1d3ce3c0340ce724c2c11c77e663d27 | refs/heads/master | 2023-01-30T18:08:37.339542 | 2023-01-07T07:41:36 | 2023-01-07T07:41:36 | 245,586,430 | 34 | 12 | MIT | 2023-01-07T07:41:37 | 2020-03-07T07:16:51 | Python | UTF-8 | Python | false | false | 9,695 | py | # Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base test class SSDFeatureExtractors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import test_utils
class SsdFeatureExtractorTestBase(test_case.TestCase):
def _build_conv_hyperparams(self, add_batch_norm=True):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
if add_batch_norm:
batch_norm_proto = """
batch_norm {
scale: false
}
"""
conv_hyperparams_text_proto += batch_norm_proto
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def conv_hyperparams_fn(self):
with slim.arg_scope([]) as sc:
return sc
@abstractmethod
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
use_depthwise: Whether to use depthwise convolutions.
Returns:
an ssd_meta_arch.SSDFeatureExtractor or an
ssd_meta_arch.SSDKerasFeatureExtractor object.
"""
pass
def _create_features(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
use_keras=False):
kwargs = {}
if use_explicit_padding:
kwargs.update({'use_explicit_padding': use_explicit_padding})
if use_depthwise:
kwargs.update({'use_depthwise': use_depthwise})
if num_layers != 6:
kwargs.update({'num_layers': num_layers})
if use_keras:
kwargs.update({'use_keras': use_keras})
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
**kwargs)
return feature_extractor
def _extract_features(self,
image_tensor,
feature_extractor,
use_keras=False):
if use_keras:
feature_maps = feature_extractor(image_tensor)
else:
feature_maps = feature_extractor.extract_features(image_tensor)
return feature_maps
def check_extract_features_returns_correct_shape(self,
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shapes,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False):
with test_utils.GraphContextOrNone() as g:
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn(image_tensor):
return self._extract_features(
image_tensor,
feature_extractor,
use_keras=use_keras)
image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor], graph=g)
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_extract_features_returns_correct_shapes_with_dynamic_inputs(
self,
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shapes,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False):
with test_utils.GraphContextOrNone() as g:
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn(image_height, image_width):
image_tensor = tf.random_uniform([batch_size, image_height, image_width,
3], dtype=tf.float32)
return self._extract_features(
image_tensor,
feature_extractor,
use_keras=use_keras)
feature_maps = self.execute_cpu(graph_fn, [
np.array(image_height, dtype=np.int32),
np.array(image_width, dtype=np.int32)
], graph=g)
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_extract_features_raises_error_with_invalid_image_size(
self,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=False,
use_depthwise=False):
with test_utils.GraphContextOrNone() as g:
batch = 4
width = tf.random.uniform([], minval=image_width, maxval=image_width+1,
dtype=tf.int32)
height = tf.random.uniform([], minval=image_height, maxval=image_height+1,
dtype=tf.int32)
shape = tf.stack([batch, height, width, 3])
preprocessed_inputs = tf.random.uniform(shape)
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn():
feature_maps = self._extract_features(
preprocessed_inputs,
feature_extractor,
use_keras=use_keras)
return feature_maps
if self.is_tf2():
with self.assertRaises(ValueError):
self.execute_cpu(graph_fn, [], graph=g)
else:
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute_cpu(graph_fn, [], graph=g)
def check_feature_extractor_variables_under_scope(self,
depth_multiplier,
pad_to_multiple,
scope_name,
use_keras=False,
use_depthwise=False):
variables = self.get_feature_extractor_variables(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
for variable in variables:
self.assertTrue(variable.name.startswith(scope_name))
def get_feature_extractor_variables(self,
depth_multiplier,
pad_to_multiple,
use_keras=False,
use_depthwise=False):
g = tf.Graph()
with g.as_default():
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
self._extract_features(
preprocessed_inputs,
feature_extractor,
use_keras=use_keras)
return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
| [
"andreas.boerzel@gmx.de"
] | andreas.boerzel@gmx.de |
c8ceb6e153e1b04a42f5ea879a910a36e7a62c50 | 0655ec7185943ba633fa2ec9118a0976b1b8d8ae | /all/fasle sanj.py | 0eade886c0b8dd6ae27bcaf25ee551b9505ce4d3 | [] | no_license | mohsenna38/opencv-all | be35bb2ebb4d830344adb26594e5b3270bdd0962 | f6ce87b9d4b5c611aaf27fcc0febd664f0cb7bb4 | refs/heads/master | 2020-04-18T09:47:40.900050 | 2019-02-18T21:18:45 | 2019-02-18T21:18:45 | 167,447,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | x1 = int(input("x1 yek ro bgu"))
print(x1)
y1 = int(input("y1 yek ro bgu"))
print(y1)
x2 = int(input("x2 yek ro bgu"))
print(x2)
y2 = int(input("y2 yek ro bgu"))
print(y2)
def jamfasle(x1,y1,x2,y2):
x=x2-x1
y=y2-y1
xx=x**2
yy=y**2
d=yy+xx**0.5
return d
fasle = jamfasle(x1,y1,x2,y2)
print(fasle)
| [
"mohsennasiri8@gmail.com"
] | mohsennasiri8@gmail.com |
38d602e7c2e5cc1c2781c19e675a92a2426616e1 | 249811e5db6ad7dcccab926a09e1047e73aa875b | /cfg.py | cfd18f12f8f8dcf26a37eef2d710c0050c2d9bce | [
"MIT"
] | permissive | abhik375/Audio-Classification | 76e56c6b5f0a35176d67fc876952e6abb747c258 | 0c3540aea8fff1e28c48ad5d22f2f791cfe9a6b1 | refs/heads/main | 2023-03-28T04:30:54.851519 | 2021-04-01T20:00:35 | 2021-04-01T20:00:35 | 353,515,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | import os
#convolution mode
class Config:
def __init__(self, mode = 'conv', nfilt = 26, nfeat = 13, nfft = 512, rate = 16000):
self.mode = mode
self.nfilt = nfilt
self.nfeat = nfeat
self.nfft = nfft
self.rate = rate
self.step = int(rate/10)
self.model_path = os.path.join('models', mode + '.model')
self.p_path = os.path.join('pickles', mode + '.p') | [
"abhikmahato65@gmail.com"
] | abhikmahato65@gmail.com |
407b4abf04e3855afbac1ad0454b325e0d2d990a | 594055d2cf0ad9dbe9e06a219a7ddb07c4410459 | /Amazon/String/49-Group_Anagrams.py | 03f945a848e6f131bb80beef53cbdf3225ba3afa | [] | no_license | ankurgokhale05/LeetCode | 975ed3a496e039607773b5e94f1ff517fc01644d | 0f3aacf35a28e8b85db1be0a7f945f2d7ece0bfc | refs/heads/master | 2023-07-04T01:46:12.035053 | 2021-08-15T02:14:15 | 2021-08-15T02:14:15 | 275,960,404 | 0 | 2 | null | 2020-10-01T01:49:48 | 2020-06-30T00:55:13 | Python | UTF-8 | Python | false | false | 887 | py | '''
Method: To sort the string and compare each sorted string with the original string if same they are appended to same list
Time Complexity: O(NKlog K) where N is the length of strs, and K is the maximum length of a string in strs. The outer loop has complexity O(N) as we iterate through each string. Then, we sort each string in O(Klog K) time.
Space Complexity: O(NK), the total information content stored in ans
'''
class Solution:
def groupAnagrams(self, strs: str) -> List[List[str]]:
dict = {}
result = []
for word in strs:
sortedword = "".join(sorted(word))
if sortedword not in dict:
dict[sortedword] = [word]
else:
dict[sortedword].append(word)
for items in dict.values():
result.append(items)
return result
| [
"ankurgokhale@Ankurs-MBP.home"
] | ankurgokhale@Ankurs-MBP.home |
a67e65154da47a404819a48b3876530ac6a2f615 | 5a07afccf0445fb2e8404356d926d4facfbd69ee | /qloop/loop.py | 5aaa78a3f07ffcc42724688b347a197d0c94003e | [] | no_license | pfreixes/qloop | 10ef88d551a58d88d92612ef06b6d49f8b1bbfbf | f4af938630f298672ac8b17fb21396f4c4ab3c4e | refs/heads/master | 2020-09-15T15:20:02.856519 | 2017-06-15T21:36:38 | 2017-06-15T21:36:38 | 94,479,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,517 | py | """
Loop based on the default loop that implements a fair callback
scheduling based on queues. Each queue stands for a partition which
are isolated between them. User can decide to spawn a coroutine and its
future asynhronous resources to a specific partition using the new loop
method called `spawn`, perhaps:
>>> async def foo():
>>> await asyncio.sleep(0):
>>>
>>> async def main(loop):
>>> loop.spawn(foo())
>>>
>>> loop = asyncio.get_event_loop()
>>> loop.run_until_complete(main(loop))
Future tasks and IO file descriptors created by spawned coroutines will be
allocated at the same partition, sharing the same reactor queue.
The different queues belonging to each partition are iterated in a round robin
way,meanwhile each queue is handled as a regular FIFO queue. If, and only if,
a queue runs out of callabcks the IO process is executed again, processing only
those events related to the file descriptors that belong to that specific
partion that owns that queue. The same for the scheduled calls.
"""
import collections
import heapq
import logging
from asyncio import Task
from asyncio import SelectorEventLoop
from asyncio import selectors
from asyncio.log import logger
from asyncio.base_events import _format_handle
from asyncio.base_events import _MIN_SCHEDULED_TIMER_HANDLES
from asyncio.base_events import _MIN_CANCELLED_TIMER_HANDLES_FRACTION
from asyncio import events
from asyncio.events import BaseDefaultEventLoopPolicy
__all__ = ("Loop", "EventLoopPolicy")
_ROOT_PARTITION = object()
def _find_partition(loop):
# find the partition by following the next strategies
# 1 - Callback is called inside of a scope task
# 2 - If not, use the ROOT_PARTITION
try:
return Task.current_task(loop=loop).partition
except AttributeError:
# out of a task scope
return _ROOT_PARTITION
class _Partition:
def __init__(self):
self.tasks = set()
self.readers = set()
self.writers = set()
self.handles = collections.deque()
self.scheduled = []
class Loop(SelectorEventLoop):
def __init__(self, selector=None):
self._partitions = {
_ROOT_PARTITION: _Partition()
}
self._p_to_process = set((_ROOT_PARTITION,))
self._task_factory = self._inherit_queue
super().__init__(selector)
def spawn(self, coro, partition=None):
"""Place and run a coro to a specific and isolated partition,
if partition is not given a new one will be created.
Return a task object bound to the coro.
"""
task = Task(coro, loop=self)
partition = partition if partition else task
task.partition = partition
if task._source_traceback:
del task._source_traceback[-1]
try:
self._partitions[partition].tasks.add(task)
except KeyError:
self._partitions[partition] = _Partition()
self._partitions[partition].tasks.add(task)
self._p_to_process.add(partition)
return task
def _inherit_queue(self, coro):
"""Create a new task inheriting the partition
assigned to the current task.
If there is no current task, or the curren task
does not have any queue will be assinged to the
root one.
Return a task object
"""
task = Task(coro, loop=self)
task.partition = _find_partition(self)
if task._source_traceback:
del task._source_traceback[-1]
self._partitions[task.partition].add_task(task)
task.add_done_callback(
self._partitions[task.partition].remove_task,
task
)
return task
def _call_soon(self, callback, args):
# find the partition by following the next strategies
# 1 - The callback is a method realated to a task
# 2 - If not, usual strategy
try:
if isinstance(callback.__self__, Task):
partition = callback.__self__.partition
else:
partition = _find_partition(self)
except AttributeError:
partition = _find_partition(self)
handle = events.Handle(callback, args, self)
if handle._source_traceback:
del handle._source_traceback[-1]
self._partitions[partition].handles.append(handle)
return handle
def _add_reader(self, fd, callback, *args):
super()._add_reader(fd, callback, *args)
partition = _find_partition(self)
self._partitions[partition].readers.add(fd)
def _remove_reader(self, fd):
super()._remove_reader(fd)
partition = _find_partition(self)
self._partitions[partition].readers.remove(fd)
def _add_writer(self, fd, callback, *args):
super()._add_reader(fd, callback, *args)
partition = _find_partition(self)
self._partitions[partition].writers.add(fd)
def _remove_writer(self, fd):
super()._remove_writer(fd)
partition = _find_partition(self)
self._partitions[partition].writers.remove(fd)
def _process_events(self, event_list):
for key, mask in event_list:
fileobj, (reader, writer) = key.fileobj, key.data
for partition in self._p_to_process:
if fileobj in self._partitions[partition].readers or\
fileobj in self._partitions[partition].writers:
if mask & selectors.EVENT_READ and reader is not None:
if reader._cancelled:
self._remove_reader(fileobj)
else:
self._partitions[partition].handles.append(reader)
if mask & selectors.EVENT_WRITE and writer is not None:
if writer._cancelled:
self._remove_writer(fileobj)
else:
self._partitions[partition].handles.append(writer)
break
def call_at(self, when, callback, *args):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self)
if timer._source_traceback:
del timer._source_traceback[-1]
partition = _find_partition(self)
heapq.heappush(self._partitions[partition].scheduled, timer)
timer._scheduled = True
return timer
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
Basically a copy of the original one, but running ready
callbacks applying a round robin strategy between the differnet
partitions. Once a queue, if it had at least one callback, runs out
of callbacks the IO loop is requested again for its IO and time
handles.
"""
sched_count = sum(
[len(self._partitions[p].scheduled) for p in self._p_to_process])
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count > _MIN_CANCELLED_TIMER_HANDLES_FRACTION): # noqa
for partition in self._p_to_process:
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._partitions[partition].scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._partitions[partition].scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
for partition in self._p_to_process:
# Remove delayed calls that were cancelled from head of queue.
while self._partitions[partition].scheduled and\
self._partitions[partition].scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(
self._partitions[partition].scheduled
)
handle._scheduled = False
timeout = None
any_handles = any(
[bool(self._partitions[p].handles) for p in self._partitions]
)
any_scheduled = any(
[bool(self._partitions[p].scheduled) for p in self._p_to_process]
)
if any_handles or self._stopping:
timeout = 0
elif any_scheduled:
# Compute the desired timeout.
when = min(
[self._partitions[p].scheduled[0]._when for p in self._p_to_process] # noqa
)
timeout = max(0, when - self.time())
if self._debug and timeout != 0:
t0 = self.time()
event_list = self._selector.select(timeout)
dt = self.time() - t0
if dt >= 1.0:
level = logging.INFO
else:
level = logging.DEBUG
nevent = len(event_list)
if timeout is None:
logger.log(level, 'poll took %.3f ms: %s events',
dt * 1e3, nevent)
elif nevent:
logger.log(level,
'poll %.3f ms took %.3f ms: %s events',
timeout * 1e3, dt * 1e3, nevent)
elif dt >= 1.0:
logger.log(level,
'poll %.3f ms took %.3f ms: timeout',
timeout * 1e3, dt * 1e3)
else:
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
for partition in self._p_to_process:
while self._partitions[partition].scheduled:
handle = self._partitions[partition].scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._partitions[partition].scheduled)
handle._scheduled = False
self._partitions[partition].handles.append(handle)
partitions = [
p for p in self._partitions if self._partitions[p].handles
]
ntodo = max(
[len(self._partitions[p].handles) for p in self._partitions]
)
cnt = 0
p_to_process = set()
handles_executed_per_partition = {p: 0 for p in self._partitions}
while not p_to_process and cnt < ntodo:
for partition in partitions:
try:
handle = self._partitions[partition].handles.popleft()
except IndexError:
if handles_executed_per_partition[partition] > 0:
p_to_process.add(partition)
continue
else:
handles_executed_per_partition[partition] += 1
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
cnt += 1
if p_to_process:
self._p_to_process = p_to_process
else:
# keep with the same ones, we didnt run the queues.
# FIXME : it can create starvation
pass
handle = None # Needed to break cycles when an exception occurs.
class EventLoopPolicy(BaseDefaultEventLoopPolicy):
_loop_factory = Loop
| [
"pau.freixes@skyscanner.net"
] | pau.freixes@skyscanner.net |
0e055d8bdca93c5d1b469427dacbba4a2a9d9dbe | 8ac4a3fd724a35b5122d8f05184985089dc8c06a | /main.py | b7b75a0d34c92aa44e2db713058b01442e5af4c8 | [] | no_license | fanpika/PythonScraping | 54c139e9828541e323a7662b884dc130922dc566 | ff12999c844a693380b99fdbcb5a459b4902c9a8 | refs/heads/main | 2023-02-05T23:34:15.786645 | 2020-12-19T13:59:36 | 2020-12-19T13:59:36 | 322,859,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # -*- encoding: utf-8 -*-
import os
from indeed import get_jobs as get_indeed_jobs
from stackoverflow import get_jobs as get_so_jobs
from save import save_to_file
os.system('clear')
indeed_jobs = get_indeed_jobs()
# print(indeed_jobs)
so_jobs = get_so_jobs()
# print(so_jobs)
jobs = indeed_jobs + so_jobs
# print(jobs)
save_to_file(jobs)
# save_to_file([]) | [
"choihwang@gmail.com"
] | choihwang@gmail.com |
b0863e4ac2a46ab17c080627e50e262478533bc2 | 38b2cf51cbd5cdbb062bf1393dae12237dd4f071 | /斐波那契数列.py | 81303d91ceb73b31b4e07d0d6681fd610614c679 | [] | no_license | Wsssssss/target-offer | 4986ab6ec45a0dd123d4d578c041057df98f81ce | c595e5abee5244602adf9a285380bec9816e17e5 | refs/heads/master | 2020-06-17T03:33:46.318935 | 2019-09-16T15:48:57 | 2019-09-16T15:48:57 | 195,782,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # 题目描述
# 大家都知道斐波那契数列,现在要求输入一个整数n,请你输出斐波那契数列的第n项(从0开始,第0项为0)n<=39
class Solution:
def Fibonacci(self, n):
# write code here
a = 0
b = 1
if n <= 1:
return n
if n == 2:
return 1
else:
for i in range(n):
a, b = b, a + b
return a
| [
"569467716@qq.com"
] | 569467716@qq.com |
a19341832df5aa7bd0970ac6ef6b9c9a7279c21a | 73b5d880fa06943c20ff0a9aee9d0c1d1eeebe10 | /tinyos-1.x/contrib/ucb/apps/LandmarkRouting/lossy.py | 404b3df55a95a17dbacc58e49ca3b896c54ce7b8 | [
"Intel"
] | permissive | x3ro/tinyos-legacy | 101d19f9e639f5a9d59d3edd4ed04b1f53221e63 | cdc0e7ba1cac505fcace33b974b2e0aca1ccc56a | refs/heads/master | 2021-01-16T19:20:21.744228 | 2015-06-30T20:23:05 | 2015-06-30T20:23:05 | 38,358,728 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | from simcore import *
if not sim.__driver.pluginManager.getPlugin("RadioModelGuiPlugin").isRegistered():
print "Please create radio model first using the Radio Model Plugin."
else:
pf = open('packet','w')
space = ' '
end = ' 0.0 0.0\n'
for i in motes:
for j in motes:
s = str(i.getID()) + space + str(j.getID()) + space
if i.getID() == j.getID():
continue
elif i.getID() == 1 or i.getID() == 0:
continue
elif j.getID() == 1 or j.getID() == 0:
continue
elif radio.getLossRate(i.getID(), j.getID()) < 1.0:
s += str(radio.getLossRate(i.getID(),j.getID())) + end
pf.write(s)
pf.flush()
pf.close()
| [
"lucas@x3ro.de"
] | lucas@x3ro.de |
5578900efa36b30cfa069bd50ea26a3a99f34f21 | 49756cfa9f803fd0a92518bad520f7faf1fb20a4 | /extract/create_wizard_rows.py | fbed97afdc83c9e75aff314e6472960f7ca8290d | [
"Apache-2.0"
] | permissive | bcgov/cas-ggircs-ciip-2018-extract | 84bd833600f4facea15d08184aae582d073af2e1 | 0e38aad60ea6a726c2f010ad6046cb9ce91db0c2 | refs/heads/develop | 2021-08-30T08:24:57.831794 | 2021-08-24T16:46:24 | 2021-08-24T16:46:24 | 202,811,489 | 0 | 0 | Apache-2.0 | 2021-08-24T16:46:25 | 2019-08-16T23:25:34 | Python | UTF-8 | Python | false | false | 1,129 | py | def insert(cursor, data, unique_slug):
# Idempotence check: if a row exists in ciip_application_wizard matching the unique_slug do not create it
cursor.execute(
'''
select form_id from ggircs_portal.ciip_application_wizard caw join ggircs_portal.form_json fj on caw.form_id=fj.id and fj.slug = %s;
''',
(unique_slug,)
)
res = cursor.fetchone()
if res is None:
statement = """
insert into ggircs_portal.ciip_application_wizard (form_id, form_position, is_active)
values ((select id from ggircs_portal.form_json where slug=%s), %s, false)
"""
cursor.execute((statement), data)
def create_2018_wizard_rows(cursor):
# Admin form
admin_data = (
"admin-2018",
0
)
insert(cursor, admin_data, 'admin-2018')
# Emission form
emission_data = (
"emission-2018",
1
)
insert(cursor, emission_data, 'emission-2018')
# Admin form
fuel_data = (
"fuel-2018",
2
)
insert(cursor, fuel_data, 'fuel-2018')
# Admin form
production_data = (
"production-2018",
3
)
insert(cursor, production_data, 'production-2018')
| [
"dylan@button.is"
] | dylan@button.is |
2ea1613cc3d1742d83136ec572bd256a812ef42a | c2e5a28c11b7e82ba9c92530c4a3ac12d59ad3b4 | /swarm/core/objectpath/utils/timeutils.py | 9ec09308558fb96f5541add19f23ba31790e44b4 | [] | no_license | waiverson/FitNesse_sample | 2109d14e20e012f81804ad155876f50c3e5c1f6c | d1e73915db7e2874d7ef87712110869ea00605b9 | refs/heads/master | 2021-01-17T23:17:11.268579 | 2018-01-05T08:02:38 | 2018-01-05T08:02:38 | 44,143,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,597 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of ObjectPath released under AGPL v3 license.
# Copyright (C) 2008-2010 Adrian Kalbarczyk
import datetime
import sys, os
try:
import pytz
TIMEZONE_CACHE={
"UTC":pytz.utc
}
except ImportError:
if os.isatty(sys.stdin.fileno()) and sys.stdout.isatty():
print("WARNING! pytz is not installed. Localized times are not supported.")
HOURS_IN_DAY=24
now=datetime.datetime.now
def round9_10(n):
i=int(n)
if n-i>0.9:
return i+1
return i
# TODO its 31 minuta, should be 31 minut - probably done
def age(date, reference=None, lang="en"):
if reference is None:
reference=now()
td=reference-date #TimeDelta
days=float(td.days)
langIsPL=lang=="pl"
if days:
years=round9_10(days/356)
if years:
if langIsPL:
return (years, years is 1 and "rok" or years<5 and "lata" or "lat")
else:
return (years, years is 1 and "year" or "years")
months=round9_10(days/30)
if months:
if langIsPL:
return (months, months is 1 and "miesiąc" or 1<months<5 and "miesiące" or "miesięcy")
else:
return (months, months is 1 and "month" or "months")
weeks=round9_10(days/7)
if weeks:
if langIsPL:
return (weeks, weeks is 1 and "tydzień" or weeks%10 in [0,1,5,6,7,8,9] and "tygodni" or "tygodnie")
else:
return (weeks, weeks is 1 and "week" or "weeks")
days=int(days)
if langIsPL:
return (days, days is 1 and "dzień" or "dni")
else:
return (days, days is 1 and "day" or "days")
seconds=float(td.seconds)
if seconds is not None:
hours=round9_10(seconds/3600)
if hours:
if langIsPL:
return (hours, hours is 1 and "godzina" or 1<hours<5 and "godziny" or "godzin")
else:
return (hours, hours is 1 and "hour" or "hours")
minutes=round9_10(seconds/60)
if minutes:
if langIsPL:
return (minutes, minutes is 1 and "minuta" or 1<minutes<5 and "minuty" or "minut")
else:
return (minutes, minutes is 1 and "minute" or "minutes")
seconds=int(seconds)
if langIsPL:
return (seconds, seconds is 1 and "sekunda" or 1<seconds<5 and "sekundy" or "sekund")
else:
return (seconds, seconds is 1 and "second" or "seconds")
# return (0,"seconds")
def date(d):
if d:
d=d[0]
t=type(d)
if t is datetime.datetime:
return datetime.date(d.year,d.month,d.day)
if t in (tuple,list):
return datetime.date(*d)
return datetime.date.today()
def date2list(d):
return [d.year,d.month,d.day]
def time(d):
if not d or not d[0]:
d=now()
else:
d=d[0]
t=type(d)
if t in (tuple,list):
return datetime.time(*d)
return datetime.time(d.hour,d.minute,d.second,d.microsecond)
def time2list(t):
return [t.hour,t.minute,t.second,t.microsecond]
def addTimes(fst,snd):
l1=time2list(fst)
l2=time2list(snd)
t=[l1[0]+l2[0],l1[1]+l2[1],l1[2]+l2[2],l1[3]+l2[3]]
t2=[]
one=0
ms=t[3]
if ms>=10000:
t2.append(ms-10000)
one=1
else:
t2.append(ms)
for i in (t[2],t[1]):
i=i+one
one=0
if i>=60:
t2.append(i-60)
one=1
# elif i==60:
# t2.append(0)
# one=1
else:
t2.append(i)
hour=t[0]+one
if hour>=HOURS_IN_DAY:
t2.append(hour-HOURS_IN_DAY)
else:
t2.append(hour)
return datetime.time(*reversed(t2))
def subTimes(fst,snd):
l1=time2list(fst)
l2=time2list(snd)
t=[l1[0]-l2[0],l1[1]-l2[1],l1[2]-l2[2],l1[3]-l2[3]]
t2=[]
one=0
ms=t[3]
if ms<0:
t2.append(10000+ms)
one=1
else:
t2.append(ms)
for i in (t[2],t[1]):
i=i-one
one=0
if i>=0:
t2.append(i)
else:
t2.append(60+i)
one=1
hour=t[0]-one
if hour<0:
t2.append(HOURS_IN_DAY+hour)
else:
t2.append(hour)
return datetime.time(*reversed(t2))
def dateTime(arg):
"""
d may be:
- datetime()
- [y,m,d,h[,m[,ms]]]
- [date(),time()]
- [[y,m,d],[h,m,s,ms]]
and permutations of above
"""
l=len(arg)
if l is 1:
dt=arg[0]
typed=type(dt)
if typed is datetime.datetime:
return dt
if typed in (tuple,list) and len(dt) in [5,6,7]:
return datetime.datetime(*dt)
if l is 2:
date=time=None
if type(arg[0]) is datetime.date:
d=arg[0]
date=[d.year,d.month,d.day]
if type(arg[0]) in (tuple,list):
date=arg[0]
if type(arg[1]) is datetime.time:
t=arg[1]
time=[t.hour,t.minute,t.second,t.microsecond]
if type(arg[1]) in (tuple,list):
time=arg[1]
return datetime.datetime(*date+time)
# dt - dateTime, tzName is e.g. 'Europe/Warsaw'
def UTC2local(dt,tzName="UTC"):
try:
if tzName in TIMEZONE_CACHE:
tz=TIMEZONE_CACHE[tzName]
else:
tz=TIMEZONE_CACHE[tzName]=pytz.timezone(tzName)
return TIMEZONE_CACHE["UTC"].localize(dt).astimezone(tz)
except Exception:
return dt
| [
"237476161@qq.com"
] | 237476161@qq.com |
c470d642b42855dd66e9d3b205461e6df341714c | c2e1ded98eb0478979c43d2da662668f0ed5e964 | /count.py | cc24cecc9b743ca0b972d7ad11c4fd5539c0e991 | [] | no_license | ntubiolin/HuaYan | 6d224bb4dc76fef35a4c006db8a04e060ed339c1 | ce611cfd71204515aa596ac10eb248c966b31235 | refs/heads/master | 2022-12-20T11:07:50.655421 | 2020-09-30T17:08:21 | 2020-09-30T17:08:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | """
八十卷《大方廣佛華嚴經》有幾個字呢?
"""
from utils import Lines_in_sutra
count = 0
for volume in range(1, 81):
readdata = Lines_in_sutra(volume)
for row in range(9, len(readdata)):
line = readdata[row]
count+=len(line)-1 # 去掉換行符
print("《大方廣佛華嚴經》總字數:{}字。".format(count)) | [
"r08942078@ntu.edu.tw"
] | r08942078@ntu.edu.tw |
43ba977cd9af1edca05f2fa5553e39f9b6ee6b0a | 9e627a5cba5a32eb48d86096ffc100753452efd6 | /settings/base.py | 3587621ce86030fa3e92a1e71fa2101457895788 | [] | no_license | Pysuper/CMS | 5c1c673b842d08ad91aa678c0bdd81a58434dfaf | db2407feac5e1bdd6ed327bc25985e326ece51e7 | refs/heads/master | 2023-01-20T03:30:32.207699 | 2020-11-05T08:53:53 | 2020-11-05T08:53:53 | 288,090,360 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,756 | py | import datetime
import os
import sys
from utils import log_theme
######################################## Django 基础配置 ########################################
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
SECRET_KEY = 'qwe_123123'
DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0', '127.0.0.1', 'localhost']
INSTALLED_APPS = [
'simpleui',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'rest_framework_jwt',
'rest_framework.authtoken',
'users.apps.UsersConfig',
'voice.apps.VoiceConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'middleware.auth.AuthMiddleware' # 手动实现中间件
"middleware.auth.AuthMiddleWare",
]
ROOT_URLCONF = 'cms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [ # 模板中间件
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cms.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1', # 数据库主机
'PORT': 3306, # 数据库端口
'USER': 'root', # 数据库用户名
'PASSWORD': 'root', # 数据库用户密码
'NAME': 'cms',
'OPTIONS': {
'read_default_file': os.path.dirname(os.path.abspath(__file__)) + '/my.cnf',
'init_command': "SET sql_mode='STRICT_TRANS_TABLES,"
"NO_ZERO_IN_DATE,NO_ZERO_DATE,"
"ERROR_FOR_DIVISION_BY_ZERO,"
"NO_AUTO_CREATE_USER'",
},
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False, # 是否禁用已经存在的日志器
'formatters': { # 日志信息显示的格式
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(lineno)d %(message)s'
},
'simple': {
'format': '[%(levelname)s] %(message)s'
},
},
'filters': { # 对日志进行过滤
'require_debug_true': { # django在debug模式下才输出日志
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': { # 日志处理方法
'console': { # 向终端中输出日志
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': { # 向文件中输出日志
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, "logs/cms.log"), # 日志文件的位置
'maxBytes': 300 * 1024 * 1024,
'backupCount': 10,
'formatter': 'verbose'
},
},
'loggers': { # 日志器
'django': { # 定义了一个名为django的日志器
'handlers': ['console', 'file'], # 可以同时向终端与文件中输出日志
'propagate': True, # 是否继续传递日志信息
'level': 'INFO', # 日志器接收的最低日志级别
},
}
}
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },
]
CACHES = { # 可以使用不同的配置,实现读写分离
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://0.0.0.0:6379/0", # 安装redis的主机的 IP 和 端口
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"CONNECTION_POOL_KWARGS": {
"max_connections": 1000,
"encoding": 'utf-8'
},
"PASSWORD": "root" # redis密码
}
}
}
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
LOGIN_URL = '/login/'
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(BASE_DIR, 'static') # TODO: uwsgi + Nginx时, 使用ROOT
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), ] # TODO: 使用runserver时候,使用DIRS
AUTH_USER_MODEL = 'users.User' # 指明使用自定义的用户模型类
CORS_ORIGIN_WHITELIST = (
'https://127.0.0.1:8001',
'https://localhost:8001',
'https://127.0.0.1:8001',
'http://127.0.0.1:8001',
'http://localhost:8001',
'http://192.168.43.230:8001'
)
CORS_ALLOW_CREDENTIALS = True # 允许携带cookie
######################################## DRF 信息配置 ########################################
REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'utils.exceptions.exception_handler', # 异常处理
'DEFAULT_PAGINATION_CLASS': 'utils.pagination.StandardResultsSetPagination', # 分页
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_USE_CACHE': 'default', # 缓存存储
'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 60, # 缓存时间
}
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=1), # 指明token的有效期
'JWT_RESPONSE_PAYLOAD_HANDLER': 'users.utils.jwt_response_payload_handler', # 指定使用的JWT返回的函数
}
AUTHENTICATION_BACKENDS = [
'users.utils.UsernameMobileAuthBackend', # JWT用户认证登录
'django.contrib.auth.backends.ModelBackend' # Admin用户登录
]
WHITE_REGEX_URL_LIST = [
"/",
"/favicon.ico",
"/user/logout/",
"/user/sms/",
"/user/register/",
"/user/image/code/",
"/user/login/sms/",
"/user/login/user/",
"/users/search/",
"/users/card/",
]
######################################## Simpleui 配置 ########################################
SIMPLEUI_HOME_INFO = False # 服务器信息
SIMPLEUI_ANALYSIS = False # 不收集分析信息
SIMPLEUI_STATIC_OFFLINE = True # 离线模式
# SIMPLEUI_LOGO = 'http://127.0.0.1:8899/favicon.ico' # LOGO
SIMPLEUI_ICON = {'Users': 'fab fa-apple', '任务信息': 'fas fa-user-tie'} # 自定义图标
# SIMPLEUI_CONFIG = {
# 'system_keep': False,
# 'menu_display': ['User'],
# 'dynamic': True,
# 'menus': [
# {
# 'app': 'User',
# 'name': 'User',
# 'icon': 'fa fa-audio-description',
# 'models': [
# {'name': '抱怨工况', 'icon': 'far fa-circle', 'url': 'audio/status/'},
# {'name': '抱怨描述', 'icon': 'far fa-circle', 'url': 'audio/description/'},
# {'name': '抱怨音频', 'icon': 'far fa-circle', 'url': 'audio/audio/'},
# {'name': '抱怨频率', 'icon': 'far fa-circle', 'url': 'audio/frequency/'},
# ]
# }
# ]
# }
######################################## 验证码、短信配置 ########################################
# 图片验证码中字体文件的路径
TTF_PATH = os.path.join(BASE_DIR, 'static/ttf/')
SMS_TEMPLATES = {
"register": "682844",
"login": "682844",
"update": "682844",
"more": ["682844", "682843", "682840"]
}
TENCENT_SMS_APPID = 1400407994
TENCENT_SMS_APPKEY = "0dd1c9e4004fe503700c08d4e4d5098e"
TENCENT_SMS_SIGN = "郑兴涛个人公众号"
######################################## QQ 登录配置 ########################################
QQ_CLIENT_ID = '1111087317' # ID
QQ_CLIENT_SECRET = 'jS5yEvAmur7pXGMp' # 密钥
QQ_REDIRECT_URI = 'http://www.xxxx.xxx/oauth_callback.html' # 回调域 | [
"182176453@qq.com"
] | 182176453@qq.com |
e10b5edbd1bf008072ca0b8acbd9a20b92405ab0 | a91a1752fe70dcb4f2cd26b88bdc281ff1bc5d34 | /upload_form/migrations/0001_initial.py | 9bb3eecc2633101177c1da5cf072e8ef2075319a | [
"MIT"
] | permissive | nineharker/AssetLibrary | f78cff0c41fe35a03176a141a3f46474f68b39ce | 02da9f7360cdd3f96ea425aebc701f01803a7c0c | refs/heads/master | 2022-11-28T16:39:05.145148 | 2020-02-21T06:51:03 | 2020-02-21T06:51:03 | 235,040,105 | 0 | 0 | MIT | 2022-11-22T05:20:25 | 2020-01-20T07:11:19 | Python | UTF-8 | Python | false | false | 721 | py | # Generated by Django 2.0.1 on 2018-05-29 04:19
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ImageFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.CharField(max_length=20)),
('file_name', models.CharField(max_length=50)),
('upload_time', models.DateTimeField(default=datetime.datetime.now)),
('image', models.ImageField(upload_to='images')),
],
),
]
| [
"nineharker45@gmail.com"
] | nineharker45@gmail.com |
7e967e17a8cee14926f90a19aa87d873124a7167 | 8e20275fa88003747895bb85c055b8a2180c327d | /maximum_modular/my_toolbox.py | 00ba46ee3d8a62a9563e2add3b4cfac109c2bf80 | [] | no_license | jfasch/2021-02-22 | 859a5bc3a356a4c1fcf367c40b80f9d4db5580d0 | 6ccd3c3b8dd63556571377bb35d849aad88bd640 | refs/heads/main | 2023-03-13T04:47:49.800721 | 2021-02-24T11:08:11 | 2021-02-24T11:08:11 | 341,463,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | def maximum(l, r):
if l < r:
return r
else:
return l
| [
"jf@faschingbauer.co.at"
] | jf@faschingbauer.co.at |
a8513c2bcaf6ba990351727cca1efa8221a26ec3 | a4b2284d4f11c1f447c19b6ee22603c32f7bd70c | /part3.py | 92af42b9d0fb8c90cf3b5c3d736a09ecae4cee40 | [] | no_license | gagemm1/Challenges | 0c60927c56cdbd8a223e147de305284521d04cd7 | 9c24abe497dcf44f812af7523a77161d499ac467 | refs/heads/master | 2021-04-03T00:50:40.787784 | 2020-03-19T02:19:11 | 2020-03-19T02:19:11 | 248,343,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | '''
Write an alternative implementation of the following method.
/// <summary>
/// Gets a sub-item summary for a given item number.
/// </summary>
/// <param name="itemNumber">The item number of the item to get the sub-item summary of.</param>
public SubItemSummary[] GetSubItemSummary(string itemNumber)
{
IEnumerable<Item> subItems = GetSubItems(itemNumber);
List<SubItemSummary> subItemSummary = new List<SubItemSummary>();
foreach (Item item in subItems)
{
IEnumerable<SubItemSummary> tempSummaries = TransformSubItems(item, item.GetSubItems());
subItemSummary.AddRange(tempSummaries);
}
return subItemSummary.ToArray();
}
'''
#since I'm doing this all in python, I'll try and recreate it in python as an alternative implementation
#(as far as I can understand what that function above is doing)
#it seems it's a function that is a 2d array that for each element in the array this function adds another something/anything to the end
#then returns the list
def appendSomething(arr):
#initialize var for the loop
i = 0
#start appending something to the list
while i < len(arr):
arr[i].append(something)
i += 1
#print(arr)
return arr
#below used in actual implementation of the method
#arr = [[0,1],[2,3],[4,5]]
#something = 'hi'
#appendSomething(arr) | [
"noreply@github.com"
] | gagemm1.noreply@github.com |
11d79e94a701387767477012e0356720f2af986e | 0cecc682fa62cd25003626618e4aa452f5b0fa45 | /SeleniumWithPython/webdriver.py | 18dea44fb9f9ce079205b9f2ae37635cf6fa7bf7 | [] | no_license | phanivarmabh/SeleniumSessions | c93d30c6ea1f5f941d16403481306284e2cbc254 | 3670398dc8f1940a7022ede4bb31f3de02480a17 | refs/heads/master | 2023-05-14T07:22:21.323304 | 2021-05-30T18:05:07 | 2021-05-30T18:05:07 | 372,282,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
import time
driver = webdriver.Chrome(executable_path="E:\\Selenium\\Drivers\\chromedriver.exe")
driver.get("http://www.google.com")
print(driver.title)
driver.find_element(By.NAME,'q').send_keys("naveen automationlabs")
optionsList = driver.find_elements(By.CSS_SELECTOR,'ul.erkvQe li span')
print(len(optionsList))
for ele in optionsList:
print(ele.text)
if ele.text == "naveen automationlabs youtube":
ele.click()
break
time.sleep(10)
driver.quit() | [
"phanivarmabh@gmail.com"
] | phanivarmabh@gmail.com |
c4f68e35862c14cbd809bdcfd72bda4d65fbdee8 | 9f8d8827cae42cea9b7d76880b9debd2abb67432 | /Deep_Metric_Learning_for_Image_Retrieval/web/static/program/cosface-resnet50-master/main.py | 55812dabd4e86ba3e179031b691a5d776ac3635f | [] | no_license | zhouhanyuxi/Machine-Learning-projects | 998234b84cf21990141144f3a591e78117384430 | 05227ebb263dc64b79f8841f822fe2c73953d4f7 | refs/heads/master | 2022-12-23T08:47:07.724022 | 2020-09-25T14:24:32 | 2020-09-25T14:24:32 | 298,232,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,665 | py | from __future__ import print_function
from __future__ import division
import argparse
import os
import time
import torch
import torch.utils.data
import torch.optim
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
import net
from dataset import ImageList
import lfw_eval
import layer
#os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
# Training settings
parser = argparse.ArgumentParser(description='PyTorch CosFace')
# DATA
parser.add_argument('--root_path', type=str, default='',
help='path to root path of images')
parser.add_argument('--database', type=str, default='CUB',
help='Which Database for train. (WebFace, VggFace2)')
parser.add_argument('--train_list', type=str, default=None,
help='path to training list')
parser.add_argument('--batch_size', type=int, default=512,
help='input batch size for training (default: 512)')
parser.add_argument('--is_gray', type=bool, default=False,
help='Transform input image to gray or not (default: False)')
# Network
parser.add_argument('--network', type=str, default='sphere20',
help='Which network for train. (sphere20, sphere64, LResNet50E_IR)')
# Classifier
parser.add_argument('--num_class', type=int, default=None,
help='number of people(class)')
parser.add_argument('--classifier_type', type=str, default='MCP',
help='Which classifier for train. (MCP, AL, L)')
# LR policy
parser.add_argument('--epochs', type=int, default=30,
help='number of epochs to train (default: 30)')
parser.add_argument('--lr', type=float, default=0.1,
help='learning rate (default: 0.1)')
parser.add_argument('--step_size', type=list, default=None,
help='lr decay step') # [15000, 22000, 26000][80000,120000,140000][100000, 140000, 160000]
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=5e-4,
metavar='W', help='weight decay (default: 0.0005)')
# Common settings
parser.add_argument('--log_interval', type=int, default=100,
help='how many batches to wait before logging training status')
parser.add_argument('--save_path', type=str, default='checkpoint/',
help='path to save checkpoint')
parser.add_argument('--no_cuda', type=bool, default=False,
help='disables CUDA training')
parser.add_argument('--workers', type=int, default=4,
help='how many workers to load data')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
if args.database is 'CUB': # default
args.train_list = '/mnt/batch/tasks/shared/LS_root/mounts/clusters/yikai/code/Users/yili.lai/recognition/CUB_200_2011/image_list_train.txt'
args.num_class = 200
args.step_size = [220,880,1760]
elif args.database is 'VggFace2':
args.train_list = 'replace-able'
args.num_class = 8069
args.step_size = [80000, 120000, 140000]
elif args.database is 'WebFace':
args.train_list = '/home/wangyf/dataset/CASIA-WebFace/CASIA-WebFace-112X96.txt'
args.num_class = 10572
args.step_size = [16000, 24000]
else:
raise ValueError("NOT SUPPORT DATABASE! ")
def main():
# --------------------------------------model----------------------------------------
#print(args.network=='LResNet50E_IR')
if args.network == 'sphere20':
model = net.sphere(type=20, is_gray=args.is_gray)
model_eval = net.sphere(type=20, is_gray=args.is_gray)
elif args.network == 'sphere64':
model = net.sphere(type=64, is_gray=args.is_gray)
model_eval = net.sphere(type=64, is_gray=args.is_gray)
elif args.network == 'LResNet50E_IR':
model = net.LResNet50E_IR(is_gray=args.is_gray)
model_eval = net.LResNet50E_IR(is_gray=args.is_gray)
else:
raise ValueError("NOT SUPPORT NETWORK! ")
model = torch.nn.DataParallel(model).to(device)
model_eval = model_eval.to(device)
#print(model)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
model.module.save(args.save_path + 'CosFace_0_checkpoint.pth')
# 512 is dimension of feature
classifier = {
'MCP': layer.MarginCosineProduct(512, args.num_class).to(device),
'AL' : layer.AngleLinear(512, args.num_class).to(device),
'L' : torch.nn.Linear(512, args.num_class, bias=False).to(device)
}[args.classifier_type]
# ------------------------------------load image---------------------------------------
if args.is_gray:
train_transform = transforms.Compose([
transforms.Grayscale(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
transforms.Normalize(mean=(0.5,), std=(0.5,))
]) # gray
else:
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0]
])
train_loader = torch.utils.data.DataLoader(
ImageList(root=args.root_path, fileList=args.train_list,
transform=train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=True)
print('length of train Database: ' + str(len(train_loader.dataset)))
print('Number of Identities: ' + str(args.num_class))
# --------------------------------loss function and optimizer-----------------------------
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD([{'params': model.parameters()}, {'params': classifier.parameters()}],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# ----------------------------------------train----------------------------------------
# lfw_eval.eval(args.save_path + 'CosFace_0_checkpoint.pth')
for epoch in range(1, args.epochs + 1):
train(train_loader, model, classifier, criterion, optimizer, epoch)
model.module.save(args.save_path + 'CosFace_' + str(epoch) + '_checkpoint.pth')
lfw_eval.eval(model_eval, args.save_path + 'CosFace_' + str(epoch) + '_checkpoint.pth', args.is_gray)
print('Finished Training')
def train(train_loader, model, classifier, criterion, optimizer, epoch):
model.train()
print_with_time('Epoch {} start training'.format(epoch))
time_curr = time.time()
loss_display = 0.0
for batch_idx, (data, target) in enumerate(train_loader, 1):
iteration = (epoch - 1) * len(train_loader) + batch_idx
adjust_learning_rate(optimizer, iteration, args.step_size)
data, target = data.to(device), target.to(device)
# compute output
output = model(data)
# print(output)
# print(target)
if isinstance(classifier, torch.nn.Linear):
output = classifier(output)
else:
output = classifier(output, target)
loss = criterion(output, target)
loss_display += loss.item()
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
time_used = time.time() - time_curr
loss_display /= args.log_interval
if args.classifier_type is 'MCP':
INFO = ' Margin: {:.4f}, Scale: {:.2f}'.format(classifier.m, classifier.s)
elif args.classifier_type is 'AL':
INFO = ' lambda: {:.4f}'.format(classifier.lamb)
else:
INFO = ''
print_with_time(
'Train Epoch: {} [{}/{} ({:.0f}%)]{}, Loss: {:.6f}, Elapsed time: {:.4f}s({} iters)'.format(
epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader),
iteration, loss_display, time_used, args.log_interval) + INFO
)
time_curr = time.time()
loss_display = 0.0
import numpy as np
import torch
# check if cuda is available for boost
cuda = torch.cuda.is_available()
def extract_embeddings(dataloader, model):
with torch.no_grad():
model.eval()
embeddings = np.zeros((len(dataloader.dataset), 512))
labels = np.zeros(len(dataloader.dataset))
k = 0
for images, target in dataloader:
if cuda:
images = images.cuda()
embeddings[k:k+len(images)] = model.forward(images).data.cpu().numpy()
labels[k:k+len(images)] = target.numpy()
k += len(images)
return embeddings, labels
def print_with_time(string):
print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + string)
def adjust_learning_rate(optimizer, iteration, step_size):
"""Sets the learning rate to the initial LR decayed by 10 each step size"""
if iteration in step_size:
lr = args.lr * (0.1 ** (step_size.index(iteration) + 1))
print_with_time('Adjust learning rate to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
else:
pass
if __name__ == '__main__':
print(args)
main()
| [
"1127282462@qq.com"
] | 1127282462@qq.com |
ebe28ba829d41375ea2277992a6c26a686369d1b | 1f6e695fb8919bf872908d93392dea90f33efea8 | /setup.py | 232b3200f11b70330c7a489847ea3ebb1303b8be | [
"MIT"
] | permissive | term1830/DSPT7-kt18 | 7791dfc60cd320c2510d5085a4687d423b881bc2 | c0f1dd1a581980529d4680cabce4d39f1d31da5e | refs/heads/master | 2022-12-10T19:30:45.014272 | 2020-09-04T20:09:19 | 2020-09-04T20:09:19 | 292,156,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # setup.py file
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="mylambdatakt18", # the name that you will install via pip
version="1.0",
author="KT",
author_email="term1830@gmail.com",
description="Testing fuction",
long_description=long_description,
long_description_content_type="text/markdown", # required if using a md file for long desc
#license="MIT",
url="https://github.com/term1830/DSPT7-kt18",
#keywords="",
packages=find_packages() # ["my_lambdata"]
) | [
"term1830@gmail.com"
] | term1830@gmail.com |
8dd422f08e2656cb735a42627db80fb888ccf58a | 833221999949430d4e6ce458f65427bb0f23dfdd | /63/63.py | a75545bd17f637b91633561b9bb9f891b07b2cbf | [] | no_license | JohnBatmanMySlinky/project-euler | 1a3220f85192091c7815e9588b9b83d962af6eec | 69609da6d2c4bfbf96af434453e7c0a959454782 | refs/heads/master | 2023-03-07T06:13:38.994329 | 2021-02-07T22:45:35 | 2021-02-07T22:45:35 | 289,784,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | answer = 0
for y in range(1,100):
x = 1
while len(str(x**y)) <= y:
if len(str(x**y)) == y:
print(str(x) + '^' + str(y) + ' = ' + str(x**y))
answer += 1
x += 1
print('answer: ' + str(answer))
| [
"johnmyslinski@Johns-MacBook-Air.local"
] | johnmyslinski@Johns-MacBook-Air.local |
86217b8ad98af74e230fccd5dd6b6ce138424d16 | 256504fc3f44df7b75777c02269845040646bf25 | /ServerTest.py | 98158771f329a28339abd82ee40c979239d855e0 | [] | no_license | tsp-project-next/server_test | d53e4d21638c9243f42c3498a695aad12bfd655f | fe7e091f580181ab8907d965a716aba110750dec | refs/heads/master | 2020-05-17T21:02:58.553867 | 2019-04-28T21:48:24 | 2019-04-28T21:48:24 | 183,962,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | import subprocess
import tempfile
import time
import os
clients = []
numClients = 200
counter = 0
for i in range(0, numClients):
output = tempfile.TemporaryFile()
#time.sleep(1)
process = subprocess.Popen(['java', '-jar', '/Users/connor/projectnext-gradle/build/libs/projectnext-gradle-1.0-SNAPSHOT.jar'], stdout = output)
clients.append((output, process, i))
for output, process, i in clients:
process.wait()
counter += 1
output.seek(0)
#print("Client " + str(i) + ":\n" + output.read().decode('utf-8'))
output.close()
if (counter == numClients):
print("There were " + str(counter) + " clients that successfully communicated 8 unique packet types with the server" | [
"noreply@github.com"
] | tsp-project-next.noreply@github.com |
cc14c80806c715c0f9c76cb2abbdbf8b40c418a9 | ba39a4c9cb54b7c183e4032a112fbeb2a222dc73 | /uploads/core/homepage_demo/lib/welding_defect_recognizer/Recognizer.py | 8f65d8a44bc4b1f2e5d00f0acf92695b306c020c | [] | no_license | LEEKYOUNGHWA/django_alba | 12b7a985319227cccdbed1b510491fb23421d66b | 46f91ccd4573e0661da96d2c36579f439340cf88 | refs/heads/master | 2021-01-19T12:21:45.245814 | 2017-12-11T07:23:52 | 2017-12-11T07:23:52 | 100,778,326 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,086 | py | import tensorflow as tf
import tensorflow.contrib.slim as slim
from uploads.core.homepage_demo.lib.welding_defect_recognizer.LeNet import LeNet
from uploads.core.homepage_demo.lib.welding_defect_recognizer.utils import *
class Recognizer(object):
def __init__(self, sess, model_path):
self.image_size = (120,48)
self.num_class = 2
self.roi = [11, 500, 2570, 1523]
self.colors = [(0,0,255), (0,255,0)]
self.label_to_class = ['NG', 'OK']
self.sess = sess
## create network
self.net = LeNet(self.image_size, self.num_class)
self.images_T, self.prob_T = self.net.get_test_tensors()
## get model variables
variables = slim.filter_variables(slim.get_model_variables(), include_patterns=['LeNet'])
## initialize variables
self.sess.run(tf.variables_initializer(variables))
## load model
restorer = tf.train.Saver(variables)
restorer.restore(self.sess, model_path)
## transform params
self.first_transform_params = {
"to_gray": True,
"roi": {
"output_size": [320,128],
"margin_ratio": 0.3,
}
}
self.second_transform_params = {
"color": {
"to_bgr": False,
},
"output_size": [120,48],
}
def run(self, image):
image, roi = first_preprocessing(image, self.roi, self.first_transform_params)
image = second_preprocessing(image, roi, self.second_transform_params)
prob = self.sess.run(self.prob_T, {self.images_T:image[None]})[0]
return np.argmax(prob)
def draw(self, image, label):
image = image.copy()
class_name = self.label_to_class[label]
# class_conf = '{:.2f}'.format(conf)
color = self.colors[label]
cv2.putText(image, class_name, (10,280), cv2.FONT_HERSHEY_PLAIN, 25, color, thickness=20)
# cv2.putText(image, class_conf, (10,550), cv2.FONT_HERSHEY_PLAIN, 25, color, thickness=20)
return image
| [
"ekfrl0526@naver.com"
] | ekfrl0526@naver.com |
fc49994cbf7356c6fd241ebfa3d48ca03c7d5983 | f0a5ad7b8aa39f51f233391fead0da3eabecc4ee | /.history/toolbox/tradaExtract_20191128085816.py | a1f0a049b0449f364b7c3a9c579677dbaf4a3ae4 | [] | no_license | OseiasBeu/webScrapping | e0a524847e55b24dbbd3d57bbe7fa43b4e101f48 | 1e72c7551aea355a891043baecfcbab8a89e719a | refs/heads/master | 2022-10-25T18:12:50.858653 | 2020-06-18T01:29:24 | 2020-06-18T01:29:24 | 224,681,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | from bs4 import BeautifulSoup
arr = [['#', 'clienteEstado', 'warehouseId', 'Pendentes', 'de', 'integrao'], ['1', 'SP', '2404', '48'], ['2', 'SP', '2462', '10'], ['3', 'SP', '7100', '7'], ['4', 'MG', 'BR19_A002', '6'], ['5', 'SP', 'BR19_A002', '6'], ['6', 'PE', 'BR19_A002', '5'], ['7', 'SP', '2444', '3'], ['8', 'MG', '7100', '2'], ['9', 'RJ', 'BR19_A002', '2'], ['10', 'BA', 'BR19_A002', '2'], ['11', 'MG', '0', '1'], ['12', 'SP', '7134', '1'], ['13', 'SP', '7136', '1'], ['14', 'SP', 'BR1F_A002', '1']]
soup = BeautifulSoup(arr).encode("utf-8")
print(arr) | [
"oseiasbeu@outlook.com"
] | oseiasbeu@outlook.com |
6171b8e1aaffc27ebb5b2e594409e8ce47552e37 | ae9d32213e4ab423965e4a7f3ba1e6abfea85817 | /PreplotCalculator.py | 93599d5d0fedb8bd01a8babfdb6fcdffc49ae537 | [] | no_license | syntaxnoob/SpawnerDistance | 9e8d68123a8eb6835cff33f991b12bb153fb0858 | a07767d5e9358bb2b1efde171ee4a5c297302933 | refs/heads/master | 2022-07-31T08:24:35.172896 | 2020-05-23T16:22:09 | 2020-05-23T16:22:09 | 263,573,361 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | #!/bin/python3
import math
import pandas as pd
### Variabel ###
# spawner coordinates (Xcoordinate, Ycoordinate, Zcoordinate)
Spawners = [(370, 28, 886), (365, 37, 945), (359, 39, 917), (381, 42, 917),
(351, 44, 931), (362, 44, 891), (408, 44, 927), (429, 35, 897)]
Bigsum = 0
Distancelist = [] # List with Blockindex and Distances
Blocklist = [] # List with Blockindex and X/Y/Z coordinates
Sumlist = [] # List with Distances
Blockindex = -3 # Blockindex is the index for the searched block
maxdistance = 16 # Max distance from player to spawner
Xcoords = []
Ycoords = []
Zcoords = []
bestlist = [] # List of blockindexes
goedblok = [] # List of bestlist blocks
### Find Search area ###
for d in Spawners:
Xcoords.append(d[0])
Ycoords.append(d[1])
Zcoords.append(d[2])
Xcoords.sort()
Ycoords.sort()
Zcoords.sort()
minX = Xcoords[0]
minY = Ycoords[0]
minZ = Zcoords[0]
maxX = Xcoords[-1]
maxY = Ycoords[-1]
maxZ = Zcoords[-1]
# Could be optimized
### Brute force the shortest distance ###
for i in range(minX, maxX): # Xcoords Loop
Blockindex = Blockindex + 1
for j in range(minY, maxY): # Ycoords Loop
Blockindex = Blockindex + 1
for k in range(minZ, maxZ): # Zcoords Loop
Blockindex = Blockindex + 1
for l in range(0, 7):
# Pythagorean.
distance = math.sqrt(
math.pow((Spawners[l][0] - i), 2) + math.pow((Spawners[l][1] - j), 2) + math.pow((Spawners[l][2] - k), 2))
if (distance > maxdistance):
# Later used to calculate the amount of spawners that will be activated.
Bigsum = 1000000 + Bigsum
else: # Distance is allways positive
Bigsum = distance + Bigsum
Distancelist.append(Blockindex)
Distancelist.append(Bigsum)
Sumlist.append(Bigsum)
Blocklist.append(Blockindex)
Blocklist.append(i)
Blocklist.append(j)
Blocklist.append(k)
Bigsum = 0
Blockindex = Blockindex - 1
Blockindex = Blockindex - 1
Sumlist.sort()
print(Sumlist[0])
ID = (Distancelist.index(Sumlist[0]))
DI = Blocklist.index(ID)
print ("The block that is closest to all spawners is:", Blocklist[DI + 1], ",",
Blocklist[DI + 2], ",", Blocklist[DI + 3], ".", "And you activate:", round((7000000 - Distancelist[ID]) / 1000000), "Spawners.")
for i in range(len(Distancelist)):
if (Distancelist[i] > 1000000):
if (Distancelist[i] < 5000000):
bestlist.append(Distancelist[(i - 1)])
else:
continue
else:
continue
### Bestlist is GOED, niet aankomen ###
for v in range(len(bestlist)):
if(v == (len(bestlist) - 1)):
break
else:
for w in range(len(Blocklist)):
if (bestlist[v] == Blocklist[w]):
goedblok.append(Blocklist[(w + 1):(w + 4)])
break
else:
continue
print("blocks dat 3 spawners activeren: ", len(bestlist))
pd.DataFrame(goedblok).to_csv("3spawner.csv", index=False)
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
fb78400b698ffe504845b634c0e815b2a3ad8c63 | bcf8477e937b883af034d962688e04c05cefa79d | /biquge/content_parser.py | 97e9143aa164a82f3e957b66c340a89ce6cfdca1 | [] | no_license | chenglp1215/biquge_scrapy | 44247b1ec50948918470c4033b8496acb2728d58 | a0b2a32aa07e32b2d7f687ff37f9c8c45f620e68 | refs/heads/master | 2021-05-13T16:47:06.332665 | 2018-01-09T11:25:31 | 2018-01-09T11:25:31 | 116,802,527 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | # coding=utf-8
import copy
from bs4 import BeautifulSoup
class ModifyContent(object):
def __init__(self, content):
self.soup = BeautifulSoup(content)
self.page_content = self.soup.find(id="content")
self.page_title = self.soup.find(class_="bookname")
@staticmethod
def clear_attr(content, tag_name, attrs):
for each in content(tag_name):
for each_attr in attrs:
del (each[each_attr])
def del_tag(self, tag_name):
for each in self.page_content(tag_name):
each.extract()
def get_content(self):
content = u""
for each in self.page_content.children:
if unicode(each).strip() == u"<br/>":
continue
else:
content += u"%s\n" % unicode(each)
return content
def get_title(self):
return self.page_title.h1.text | [
"chenglongping@100tal.com"
] | chenglongping@100tal.com |
42d9f8b9b3fbd63a0d922f62a6dd5c190f2bb97c | 5dc190fec80026b183ff99f32a4ae0d6b790c9d7 | /story/intro.py | 571b7a6a17bc7e057317d5d0626106570b1c9130 | [] | no_license | calispotato/story | c625ea266abed66bec4bc2b068f771aadcc01367 | fdc8b4f7b8f5501e21bf275714193a44d644a523 | refs/heads/master | 2021-01-22T08:59:50.556096 | 2015-10-02T16:42:40 | 2015-10-02T16:42:40 | 42,484,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | """Introduction
Welcome to the relm of Yinfall, do you wish to begin your adventure?
"""
on_y="road"
on_yes="road"
on_no="quit"
on_n="quit"
| [
"Blueroofgames@gmail.com"
] | Blueroofgames@gmail.com |
1c8f7d199cf024ccf032dcd7de0d772ff67d1700 | 730783fc2828753394eb038d169629a534819715 | /settings.py | ba1362fb6af4c27700bdeffa2aa588e4856e9e35 | [] | no_license | YaKalmar/awesome-telegram-bot | 1e1aa6144b0a7419b1bfbcb7b58b7e53f9b7640b | 852d36c6f636105a1c55446bc60fd2e2b0bd2a85 | refs/heads/master | 2021-07-07T01:04:10.310922 | 2017-10-03T11:06:44 | 2017-10-03T11:06:44 | 105,578,026 | 1 | 1 | null | 2017-10-03T11:06:45 | 2017-10-02T19:50:39 | Python | UTF-8 | Python | false | false | 139 | py | from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
| [
"panyushovpavel@local"
] | panyushovpavel@local |
40102fa17b4a670491bc1756de970ba813bc589a | 3a296bfcd82143bdb7f4e84f3bcc7ed34d4dc010 | /Python/1078OccurrencesAfterBigram.py | 11e1ffe8eda92112cc4e85e97df41285e0d46424 | [] | no_license | meihanhan/leetcode_solutions | 0dd93eec71a420c457d9e8a0d921c5951697f522 | 2e86f75bac611e1915145ae3676d0c4bdf893e9e | refs/heads/master | 2020-07-31T16:17:54.819885 | 2019-10-18T19:51:02 | 2019-10-18T19:51:02 | 210,671,348 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | def findOcurrences(text: str, first: str, second: str):
res = []
splitText = text.split()
for i in range(len(splitText) - 2):
if splitText[i] == (first):
if splitText[i + 1] == (second):
res.append(splitText[i + 2])
i += 2
return res | [
"meihan.xue@ucdconnect.ie"
] | meihan.xue@ucdconnect.ie |
3b497b13bfb03c08d8605c64566caeff353afe1f | a1aadb13c35f2a3fb27078090e5a582a3ea462f1 | /devel/py-pyobjc-core/patches/patch-setup.py | f046aa0efda0c7712c4171148edac369e6c807f7 | [] | no_license | fidelix-project/pkgsrc | 702346ca3a74b3dced9de29b07d342154466d1bd | 8a6673aa3e19b8604d2077015dc4673304399afc | refs/heads/master | 2022-11-06T04:48:33.983672 | 2020-06-28T14:06:28 | 2020-06-28T14:06:28 | 273,759,036 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | $NetBSD: patch-setup.py,v 1.1 2019/11/18 17:05:06 adam Exp $
Do not add debug symbols.
Do not override compiler optimiztion flags.
--- setup.py.orig 2019-11-18 16:02:47.000000000 +0000
+++ setup.py
@@ -66,7 +66,6 @@ def get_sdk_level(sdk):
# CFLAGS for the objc._objc extension:
CFLAGS = [
- "-g",
"-fexceptions",
# Loads of warning flags
"-Wall",
@@ -137,7 +136,7 @@ if get_config_var("Py_DEBUG"):
elif isinstance(cfg_vars[k], str) and "-O3" in cfg_vars[k]:
cfg_vars[k] = cfg_vars[k].replace("-O3", "-O1 -g")
-else:
+elif False:
# Enable -O4, which enables link-time optimization with
# clang. This appears to have a positive effect on performance.
cfg_vars = get_config_vars()
| [
"adam@pkgsrc.org"
] | adam@pkgsrc.org |
ca46bb856d561d725345a0a14058c5877a4cac0e | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/ashvin/icml2020/d4rl/test1.py | 99515aca2a2dba3519cd10dc424cb31a7cf4af19 | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,000 | py | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.hdf5_path_loader import HDF5PathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
if __name__ == "__main__":
variant = dict(
num_epochs=101,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(2E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
# num_gaussians=1,
),
qf_kwargs=dict(
hidden_sizes=[256, 256, ],
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=HDF5PathLoader,
path_loader_kwargs=dict(),
add_env_demos=False,
add_env_offpolicy_data=False,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=False,
load_env_dataset_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
# 'env': ["pen-sparse-v0", "door-sparse-v0"],
'env': ["halfcheetah-mixed-v0", "walker2d-mixed-v0", "hopper-mixed-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.1, 0.3, 1.0, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
# 'trainer_kwargs.clip_score': [0.5, ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0, ],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
# 'qf_kwargs.output_activation': [Clamp(max=0)],
# 'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [1, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
| [
"alexanderkhazatsky@gmail.com"
] | alexanderkhazatsky@gmail.com |
32895883f473a363754d89be15e7b46457dd22b0 | dd14ceec07566eeb2538e5dc5eabf8b9d3d40074 | /music_controller/spotify/migrations/0003_alter_spotifytoken_expires_in.py | e676179ea07be9b301026b2ffdcc98826f784d6f | [] | no_license | PaddyCooper08/Block-party | c464bfea9ac43d50e915079deba370a73986daa9 | 5b47e4c2f0deb7043ffb3049a82c7e01d8ab18f6 | refs/heads/master | 2023-08-07T02:00:01.140402 | 2021-10-03T16:18:16 | 2021-10-03T16:18:16 | 400,838,872 | 1 | 0 | null | 2021-09-19T19:06:57 | 2021-08-28T16:25:29 | Python | UTF-8 | Python | false | false | 398 | py | # Generated by Django 3.2.7 on 2021-09-19 18:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spotify', '0002_alter_spotifytoken_expires_in'),
]
operations = [
migrations.AlterField(
model_name='spotifytoken',
name='expires_in',
field=models.DateTimeField(),
),
]
| [
"1210@rgsg.co.uk"
] | 1210@rgsg.co.uk |
54d65f8cc2a728a5c709b480c678d53059ed4b73 | c8698a1bb03da9668d4224140b90e517f6099a59 | /util.py | 77ad60b54a18221ef93a8026eaa8d2180c1a482c | [] | no_license | TravisNoles/tshockweb | f01931dc4c76a75199c88653d31360458b6742b3 | c305d78a371e76baa9dcb1cf2ebf7b78cb646ef8 | refs/heads/master | 2021-01-16T18:02:01.192063 | 2012-10-01T07:32:29 | 2012-10-01T07:32:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | def load_file(filename):
with open(filename) as f:
return f.read()
def tail(f, n=20, offset=None):
"""Reads a n lines from f with an offset of offset lines. The return
value is a tuple in the form ``(lines, has_more)`` where `has_more` is
an indicator that is `True` if there are more lines in the file.
"""
avg_line_length = 74
to_read = n + (offset or 0)
while 1:
try:
f.seek(-(avg_line_length * to_read), 2)
except IOError:
# woops. apparently file is smaller than what we want
# to step back, go to the beginning instead
f.seek(0)
pos = f.tell()
lines = f.read().splitlines()
if len(lines) >= to_read or pos == 0:
return '\n'.join(lines[-to_read:offset and -offset or None])
## return lines[-to_read:offset and -offset or None], \
## len(lines) > to_read or pos > 0
avg_line_length *= 1.3
def dprint(string1, string2='', more=False):
if str(string2) != '':
string2 = ': ' + str(string2)
print '------------'
print str(string1) + str(string2)
if more == False:
print '------------'
| [
"pinchhazard@gmail.com"
] | pinchhazard@gmail.com |
8bc41e2481648668b551489ff9fef8b4a6aae3f5 | 9eb706ffe48d416db4205a319f1b54e29a9c0757 | /blog/models.py | 8539e09e46ca5a74a5ee8ed04feb5b05e3e491ea | [] | no_license | joaotavares09/blog | d939461bb0d7eeb28f086a1d7ffb2541e44fc7db | d85bc9b3bb0f8f46791e148670b1621300bd561b | refs/heads/master | 2020-12-11T16:14:20.360979 | 2020-01-20T21:46:43 | 2020-01-20T21:46:43 | 233,894,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
photo = models.ImageField(upload_to='imagens/', null=True, blank=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title | [
"jentavares09@gmail.com"
] | jentavares09@gmail.com |
0028010283ad48a85bea1db9f94dcd8429547af1 | d3e073ff82bc075208719540215db7a26bf199a5 | /eventex/subscriptions/migrations/0003_auto_20180122_0606.py | 1951d20d5ec4d923988626546b1431200b6daf61 | [
"MIT"
] | permissive | gustavo7lagoas/eventex_wttd | 59fb9076121ccbcf7309dd2b3ebb90a61376cb4c | 96ef7111341af391c8d97e4e0bc54fa4f668657c | refs/heads/master | 2019-08-20T02:06:50.675243 | 2018-03-31T08:05:42 | 2018-03-31T08:05:42 | 66,985,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-01-22 06:06
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('subscriptions', '0002_auto_20180119_0622'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='uid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
migrations.AlterField(
model_name='subscription',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"gustavo7lagoas@gmail.com"
] | gustavo7lagoas@gmail.com |
e175b362960c39bd1374d19983d28efcda994c62 | 433d4d6b7693ad396cacf7a816c542630af6c530 | /Buổi 3/Shop1.py | facd59c16e9f4c5426afc1e17bf7556c56e10159 | [] | no_license | mrd198/NguyenThaiDuc-Funamental-C4E32 | 062a324b5db2b8ac5eb3603c9562a380c2948825 | a28da5c27e52f7e764434e4daedfe96ecfe71a53 | refs/heads/master | 2020-05-17T20:06:56.942234 | 2019-06-12T14:59:35 | 2019-06-12T14:59:35 | 183,936,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | items = ["T-Shirt", "Sweater"]
while True:
print("Welcome to our shop, what do you want (C, R, U, D)?: ",end='')
n = (input())
if n == "R":
print("Our item:",end=' ')
print(*items,sep=', ')
elif n == "C":
new_item = input("Enter new item:")
items.append(new_item)
print("Our items:", end ="")
print(*items, sep=",")
elif n == "U":
update_position = int(input("Update position?"))
new__item = input("New item?")
items[update_position] = new__item
print("Our items:", end ="")
print(*items, sep=",")
elif n == "D":
delete_position = int(input("Delete position?"))
del items[delete_position]
print("Our items:", end ="")
print(*items, sep=",")
else:
print("Unknown data")
break | [
"50080807+mrd198@users.noreply.github.com"
] | 50080807+mrd198@users.noreply.github.com |
5022b105c714e2dc4421650a004f69e753e7f87b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_unbarring.py | 324d198051173b711ebc3f517ecffc2d0ffdcc48 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _UNBARRING():
def __init__(self,):
self.name = "UNBARRING"
self.definitions = unbar
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['unbar']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c6228a24ffa131bea6cb1d0dbcd6a0ed28883457 | 839b5fe977ab6986e9ba8d170a3d2e1401e2cbf5 | /src/hana_exporter/shaptools/shapcli.py | b584732cabe7d882614fb2ea0e7acb28895ed99a | [
"MIT"
] | permissive | redbearder/sapspa | d90d2a4f76fe2878bb734daef4b04dbbc162c96e | 924755a8ecf80571976df7fc24588cb1cacf37f0 | refs/heads/master | 2022-05-26T10:08:04.763714 | 2020-06-12T10:06:59 | 2020-06-12T10:06:59 | 187,381,550 | 62 | 10 | MIT | 2022-05-25T02:59:52 | 2019-05-18T16:37:16 | Vue | UTF-8 | Python | false | false | 13,679 | py | """
Code to expose some useful methods using the command line
:author: xarbulu
:organization: SUSE LLC
:contact: xarbulu@suse.com
:since: 2019-07-11
"""
import logging
import argparse
import json
from shaptools import hana
PROG = 'shapcli'
LOGGING_FORMAT = '%(message)s'
class DecodedFormatter(logging.Formatter):
"""
Custom formatter to remove the b'' from the logged text
"""
def format(self, record):
message = super(DecodedFormatter, self).format(record)
if message.startswith('b\''):
message = message.split('\'')[1]
return message
class ConfigData(object):
"""
Class to store the required configuration data
"""
def __init__(self, data_dict, logger):
try:
self.sid = data_dict['sid']
self.instance = data_dict['instance']
self.password = data_dict['password']
self.remote = data_dict.get('remote', None)
except KeyError as err:
logger.error(err)
logger.error('Configuration file must have the sid, instance and password entries')
raise
def setup_logger(level):
"""
Setup logging
"""
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = DecodedFormatter(LOGGING_FORMAT)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level=level)
return logger
def parse_arguments():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(PROG)
parser.add_argument(
'-v', '--verbosity',
help='Python logging level. Options: DEBUG, INFO, WARN, ERROR (INFO by default)')
parser.add_argument(
'-r', '--remote',
help='Run the command in other machine using ssh')
parser.add_argument(
'-c', '--config',
help='JSON configuration file with SAP HANA instance data (sid, instance and password)')
parser.add_argument(
'-s', '--sid', help='SAP HANA sid')
parser.add_argument(
'-i', '--instance', help='SAP HANA instance')
parser.add_argument(
'-p', '--password', help='SAP HANA password')
subcommands = parser.add_subparsers(
title='subcommands', description='valid subcommands', help='additional help')
hana_subparser = subcommands.add_parser(
'hana', help='Commands to interact with SAP HANA databse')
sr_subparser = subcommands.add_parser(
'sr', help='Commands to interact with SAP HANA system replication')
parse_hana_arguments(hana_subparser)
parse_sr_arguments(sr_subparser)
args = parser.parse_args()
return parser, args
def parse_hana_arguments(hana_subparser):
"""
Parse hana subcommand arguements
"""
subcommands = hana_subparser.add_subparsers(
title='hana', dest='hana', help='Commands to interact with SAP HANA databse')
subcommands.add_parser(
'is_running', help='Check if SAP HANA database is running')
subcommands.add_parser(
'version', help='Show SAP HANA database version')
subcommands.add_parser(
'start', help='Start SAP HANA database')
subcommands.add_parser(
'stop', help='Stop SAP HANA database')
subcommands.add_parser(
'info', help='Show SAP HANA database information')
subcommands.add_parser(
'kill', help='Kill all SAP HANA database processes')
subcommands.add_parser(
'overview', help='Show SAP HANA database overview')
subcommands.add_parser(
'landscape', help='Show SAP HANA database landscape')
subcommands.add_parser(
'uninstall', help='Uninstall SAP HANA database instance')
dummy = subcommands.add_parser(
'dummy', help='Get data from DUMMY table')
dummy.add_argument(
'--key_name',
help='Keystore to connect to sap hana db '\
'(if this value is set user, password and database are omitted')
dummy.add_argument(
'--user_name', help='User to connect to sap hana db')
dummy.add_argument(
'--user_password', help='Password to connect to sap hana db')
dummy.add_argument(
'--database', help='Database name to connect')
hdbsql = subcommands.add_parser(
'hdbsql', help='Run a sql command with hdbsql')
hdbsql.add_argument(
'--key_name',
help='Keystore to connect to sap hana db '\
'(if this value is set user, password and database are omitted')
hdbsql.add_argument(
'--user_name', help='User to connect to sap hana db')
hdbsql.add_argument(
'--user_password', help='Password to connect to sap hana db')
hdbsql.add_argument(
'--database', help='Database name to connect')
hdbsql.add_argument(
'--query', help='Query to execute')
user_key = subcommands.add_parser(
'user', help='Create a new user key')
user_key.add_argument(
'--key_name', help='Key name', required=True)
user_key.add_argument(
'--environment', help='Database location (host:port)', required=True)
user_key.add_argument(
'--user_name', help='User to connect to sap hana db', required=True)
user_key.add_argument(
'--user_password', help='Password to connect to sap hana db', required=True)
user_key.add_argument(
'--database', help='Database name to connect', required=True)
backup = subcommands.add_parser(
'backup', help='Create node backup')
backup.add_argument(
'--name', help='Backup file name', required=True)
backup.add_argument(
'--database', help='Database name to connect', required=True)
backup.add_argument(
'--key_name', help='Key name')
backup.add_argument(
'--user_name', help='User to connect to sap hana db')
backup.add_argument(
'--user_password', help='Password to connect to sap hana db')
def parse_sr_arguments(sr_subparser):
"""
Parse hana sr subcommand arguements
"""
subcommands = sr_subparser.add_subparsers(
title='sr', dest='sr', help='Commands to interact with SAP HANA system replication')
state = subcommands.add_parser(
'state', help='Show SAP HANA system replication state')
state.add_argument('--sapcontrol', help='Run with sapcontrol', action='store_true')
status = subcommands.add_parser(
'status', help='Show SAP HANAsystem replication status')
status.add_argument('--sapcontrol', help='Run with sapcontrol', action='store_true')
subcommands.add_parser(
'disable', help='Disable SAP HANA system replication (to be executed in Primary node)')
cleanup = subcommands.add_parser(
'cleanup', help='Cleanup SAP HANA system replication')
cleanup.add_argument('--force', help='Force the cleanup', action='store_true')
subcommands.add_parser(
'takeover', help='Perform a takeover operation (to be executed in Secondary node)')
enable = subcommands.add_parser(
'enable', help='Enable SAP HANA system replication primary site')
enable.add_argument('--name', help='Primary site name', required=True)
register = subcommands.add_parser(
'register', help='Register SAP HANA system replication secondary site')
register.add_argument('--name', help='Secondary site name', required=True)
register.add_argument('--remote_host', help='Primary site hostname', required=True)
register.add_argument(
'--remote_instance', help='Primary site SAP HANA instance number', required=True)
register.add_argument(
'--replication_mode', help='System replication replication mode', default='sync')
register.add_argument(
'--operation_mode', help='System replication operation mode', default='logreplay')
unregister = subcommands.add_parser(
'unregister', help='Unegister SAP HANA system replication secondary site')
unregister.add_argument('--name', help='Primary site name', required=True)
copy_ssfs = subcommands.add_parser(
'copy_ssfs', help='Copy current node ssfs files to other host')
copy_ssfs.add_argument('--remote_host', help='Other host name', required=True)
copy_ssfs.add_argument(
'--remote_password',
help='Other host SAP HANA instance password (sid and instance must match '\
'with the current host)', required=True)
# pylint:disable=W0212
def uninstall(hana_instance, logger):
"""
Uninstall SAP HANA database instance
"""
logger.info(
'This command will uninstall SAP HANA instance '\
'with sid %s and instance number %s (y/n): ',
hana_instance.sid, hana_instance.inst)
response = input()
if response == 'y':
user = hana.HanaInstance.HANAUSER.format(sid=hana_instance.sid)
hana_instance.uninstall(user, hana_instance._password)
else:
logger.info('Command execution canceled')
def run_hdbsql(hana_instance, hana_args, cmd):
"""
Run hdbsql command
"""
hdbsql_cmd = hana_instance._hdbsql_connect(
key_name=hana_args.key_name,
user_name=hana_args.user_name,
user_password=hana_args.user_password)
cmd = '{hdbsql_cmd} {database}\\"{cmd}\\"'.format(
hdbsql_cmd=hdbsql_cmd,
database='-d {} '.format(hana_args.database) if hana_args.database else '',
cmd=cmd)
hana_instance._run_hana_command(cmd)
def run_hana_subcommands(hana_instance, hana_args, logger):
"""
Run hana subcommands
"""
str_args = hana_args.hana
if str_args == 'is_running':
result = hana_instance.is_running()
logger.info('SAP HANA database running state: %s', result)
elif str_args == 'version':
hana_instance.get_version()
elif str_args == 'start':
hana_instance.start()
elif str_args == 'stop':
hana_instance.stop()
elif str_args == 'info':
hana_instance._run_hana_command('HDB info')
elif str_args == 'kill':
hana_instance._run_hana_command('HDB kill-9')
elif str_args == 'overview':
hana_instance._run_hana_command('HDBSettings.sh systemOverview.py')
elif str_args == 'landscape':
hana_instance._run_hana_command('HDBSettings.sh landscapeHostConfiguration.py')
elif str_args == 'uninstall':
uninstall(hana_instance, logger)
elif str_args == 'dummy':
run_hdbsql(hana_instance, hana_args, 'SELECT * FROM DUMMY')
elif str_args == 'hdbsql':
run_hdbsql(hana_instance, hana_args, hana_args.query)
elif str_args == 'user':
hana_instance.create_user_key(
hana_args.key_name, hana_args.environment, hana_args.user_name,
hana_args.user_password, hana_args.database)
elif str_args == 'backup':
hana_instance.create_backup(
hana_args.database, hana_args.name, hana_args.key_name,
hana_args.user_name, hana_args.user_password)
def run_sr_subcommands(hana_instance, sr_args, logger):
"""
Run hana subcommands
"""
str_args = sr_args.sr
if str_args == 'state':
# hana_instance.get_sr_state()
cmd = 'hdbnsutil -sr_state{}'.format(' --sapcontrol=1' if sr_args.sapcontrol else '')
hana_instance._run_hana_command(cmd)
elif str_args == 'status':
# hana_instance.get_sr_status()
cmd = 'HDBSettings.sh systemReplicationStatus.py{}'.format(
' --sapcontrol=1' if sr_args.sapcontrol else '')
hana_instance._run_hana_command(cmd, exception=False)
elif str_args == 'disable':
hana_instance.sr_disable_primary()
elif str_args == 'cleanup':
hana_instance.sr_cleanup(sr_args.force)
elif str_args == 'takeover':
hana_instance._run_hana_command('hdbnsutil -sr_takeover')
elif str_args == 'enable':
hana_instance.sr_enable_primary(sr_args.name)
elif str_args == 'register':
hana_instance.sr_register_secondary(
sr_args.name, sr_args.remote_host, sr_args.remote_instance,
sr_args.replication_mode, sr_args.operation_mode)
elif str_args == 'unregister':
hana_instance.sr_unregister_secondary(sr_args.name)
elif str_args == 'copy_ssfs':
hana_instance.copy_ssfs_files(sr_args.remote_host, sr_args.remote_password)
def load_config_file(config_file, logger):
"""
Load configuration file data
"""
with open(config_file, 'r') as f_ptr:
json_data = json.load(f_ptr)
return json_data
# pylint:disable=W0212
def run():
"""
Main execution
"""
parser, args = parse_arguments()
logger = setup_logger(args.verbosity or logging.DEBUG)
# If -c or --config flag is received data is loaded from the configuration file
if args.config:
data = load_config_file(args.config, logger)
config_data = ConfigData(data, logger)
elif args.sid and args.instance and args.password:
config_data = ConfigData(vars(args), logger)
else:
logger.info(
'Configuration file or sid, instance and passwords parameters must be provided\n')
parser.print_help()
exit(1)
if args.remote:
config_data.remote = args.remote
try:
hana_instance = hana.HanaInstance(
config_data.sid, config_data.instance,
config_data.password, remote_host=config_data.remote)
if vars(args).get('hana'):
run_hana_subcommands(hana_instance, args, logger)
elif vars(args).get('sr'):
run_sr_subcommands(hana_instance, args, logger)
else:
parser.print_help()
except Exception as err:
logger.error(err)
exit(1)
if __name__ == "__main__": # pragma: no cover
run()
| [
"redbearder@gmail.com"
] | redbearder@gmail.com |
933a6403b9fcb0ceb52489beafbf2322a55457d1 | a4c4f93540b00dee9fc4f29423d6887ad38b9744 | /5chang_size.py | 24fc6882c537b5dff97ac855e281d343e04048ec | [] | no_license | yunruowu/chinesschess | 85dac9384482efe70bf6a8af5343c635de673037 | 0ef26c75118ae1a22567bea0a0b28d118294d30a | refs/heads/master | 2020-05-03T03:21:35.949036 | 2019-04-01T13:00:58 | 2019-04-01T13:00:58 | 178,396,029 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | import pygame
from pygame.locals import *
from sys import exit
import time
SCREEN_SIZE = (640,480)
background_image = 'image/xia.jpg'
screen = pygame.display.set_mode(SCREEN_SIZE,RESIZABLE,32)
background = pygame.image.load(background_image).convert()
while True:
event = pygame.event.wait()
if event.type == QUIT:
exit()
if event.type == VIDEORESIZE:
SCREEN_SIZE = event.size
pygame.display.set_caption("Window resize to" + str(event.size))
print("adsdsaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
screen_width,screen_height = SCREEN_SIZE
for y in range(0,screen_height,background.get_height()):
for x in range(0,screen_width,background.get_width()):
# print("sssssssds")
screen.blit(background,(x,y))
#time.sleep(1)
pygame.display.update() | [
"mcdxwan@outlook.com"
] | mcdxwan@outlook.com |
7b98da2a271da01e6a3ecf40650fe0c35e12806f | 10969f8af12e1d7bbef4f73bf28c914c38b304a1 | /myproject/webapp/migrations/0013_testrecord_stud_batch.py | c817b80ca8dce1e0a997ce152c8829f92ff8db76 | [] | no_license | rash296/mgserver | 3a4f63044a502f06fa73da2f1d1f035d2317648e | 579ac5bf1668d571605cd83ebfa7d3d4814e37a8 | refs/heads/master | 2021-01-01T04:06:53.552186 | 2016-04-15T06:40:17 | 2016-04-15T06:40:17 | 56,229,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('webapp', '0012_auto_20160412_0938'),
]
operations = [
migrations.AddField(
model_name='testrecord',
name='stud_batch',
field=models.IntegerField(null=True),
),
]
| [
"rashmiwilson296@gmail.com"
] | rashmiwilson296@gmail.com |
fce7541977411e876802edacdb39d8e9fba16e1d | 612151ec646f816d899a0b30103b1eaf83bb8a0e | /Defcom/Server/User.py | 436bc47d42e377388bd805164708c35c2f89a921 | [] | no_license | thavythach/csci4xx-applied-cryptography-DefCom | d464f64cc74aa7d23da1314e48e78e8b906871e1 | 66ffa2be63894e05716f51b08c6b7af76b9172b4 | refs/heads/master | 2021-08-28T12:32:28.718534 | 2017-12-12T08:37:02 | 2017-12-12T08:37:02 | 111,674,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py |
class User:
def __init__(self, user_name, password, public_key, certificate):
self.user_name = user_name
self.password = password
self.public_key = public_key
self.certificate = certificate
def __str__(self):
return "Username: " + self.user_name + " password: " + self.password | [
"thavythach@gmail.com"
] | thavythach@gmail.com |
b18327335bf7381a5c62249466674a9b3869463d | 5c07caf3198bff925171e8629873c5f9e674fe8a | /python_env/bin/pip3 | 2ebf159cf9b24600356a62e3214fe11839cb307d | [] | no_license | aspyltsov65/CCMN | 598980928ee569fa8b47b21070ff0f630b95e329 | 69e91df5692b43ead7de84a574c336bc7b10cd24 | refs/heads/master | 2022-12-23T19:44:01.820312 | 2019-06-10T20:15:25 | 2019-06-10T20:15:25 | 182,294,663 | 2 | 2 | null | 2022-12-09T21:58:14 | 2019-04-19T16:31:46 | Python | UTF-8 | Python | false | false | 252 | #!/Users/apyltsov/Projects/CCMN/python_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"aspyltsova.fitu16@kubg.edu.ua"
] | aspyltsova.fitu16@kubg.edu.ua | |
979bb7bcb412033c7187ed519e81690a81801121 | d4ea02450749cb8db5d8d557a4c2616308b06a45 | /students/HarveyHerela/lesson05/mailroom3.py | ded807fd235fc9b1f38decbdc6b5946619ff43ca | [] | no_license | UWPCE-PythonCert-ClassRepos/Self_Paced-Online | 75421a5bdd6233379443fc310da866ebfcd049fe | e298b1151dab639659d8dfa56f47bcb43dd3438f | refs/heads/master | 2021-06-16T15:41:07.312247 | 2019-07-17T16:02:47 | 2019-07-17T16:02:47 | 115,212,391 | 13 | 160 | null | 2019-11-13T16:07:35 | 2017-12-23T17:52:41 | Python | UTF-8 | Python | false | false | 4,815 | py | #!/usr/bin/env python
import datetime
import os
donors = [{"name": ("Chase", "Zuma"), "donations": [25 * x for x in range(2)]},
{"name": ("Marshall", "Rocky"), "donations": [30 * x for x in range(2, 4)]},
{"name": ("Skye", "Rubble"), "donations": [45 * x for x in range(10, 13)]}]
def get_name(prompt):
"""Anything can be a name, so accept anything as a name."""
getting_name = True
while getting_name:
print("Type 'list' for a list of names in the database, '~' to go back")
response = input(prompt)
response = response.strip()
if response.lower() == "list":
for d in donors:
print("{0} {1}".format(*d["name"]))
else:
getting_name = False
return response
def send_thank_you():
"""Prompts for a donor name and donation amount, then prints a 'thank you' message to the screen."""
firstname = get_name("First name: ")
if firstname == "~":
return
lastname = get_name("Last name: ")
if lastname == "~":
return
amount = "nada"
while not amount.isnumeric():
print("Donation amount, or '~' to go to main menu:")
amount = input("$ ")
amount = amount.strip()
if amount == '~':
return
amount = float(amount)
# Add the donation
name = (firstname, lastname)
for d in donors:
if name in d:
# If it's an existing donor, add it to their donation list.
d["donations"].append(amount)
break
else:
# If it's a new donor, create a new entry.
donors.append({"name": name, "donations": [amount]})
thank_you_msg = "Thank you {firstname} {lastname} for your generous donation of ${amount:.2f}".format(
firstname=firstname, lastname=lastname, amount=amount)
print(thank_you_msg)
def get_total(donor_data):
return donor_data[1]
def create_row(donor_data):
fullname = "{0} {1}".format(*donor_data["name"])
total = sum(donor_data["donations"])
num = len(donor_data["donations"])
avg = total / num
return [fullname, total, num, avg]
def create_report():
"""Prints a list of all donors, sorted by total amount given."""
report = str()
report += "Donor Name | Total Given | Num Gifts | Average Gift"
report += "\n"
report += "--------------------------------------------------------"
report += "\n"
# Create a list with the data to be printed
data_list = [create_row(d) for d in donors]
# Sort the list by total amount given
data_list.sort(key=get_total, reverse=True)
# Define the formatter
line_fmtr = "{name:<14} ${total:>13.2f} {num:>9d} ${avg:>12.2f}\n".format
for d in data_list:
report += line_fmtr(name=d[0], total=d[1], num=d[2], avg=d[3])
print("\n", report)
def send_letters_everyone():
"""Create letters for everyone in the database, and write them to files."""
num = 0
subdir = str(datetime.datetime.now())
subdir = subdir.replace(':', '_')
try:
os.mkdir(subdir)
except OSError as oserr:
print(oserr)
print("\nThere was an error creating the directory.\n")
return
for d in donors:
filename = "./{dir}/{0}_{1}.txt".format(*d["name"], dir=subdir)
try:
with open(filename, 'w') as outfile, open("template.txt", 'r') as infile:
donations = d["donations"]
last = donations[len(donations) - 1]
total = 0
for gift in donations:
total += gift
name = d["name"]
outfile.write(infile.read().format(firstname=name[0], lastname=name[1], amount=last, total=total))
except FileNotFoundError as fnferr:
print(fnferr)
print("\nThere was an error reading the template file")
return
except KeyError as kerr:
print("Key Error:", kerr, "is a bad key.")
print("There was an error with the template {keys}. Verify the {keys} are correct.")
return
except ValueError as verr:
print("There was an error while reading the formatting strings.")
return
num += 1
print(num, "letters created.")
if __name__ == "__main__":
choices = {
"1": send_thank_you,
"2": create_report,
"3": send_letters_everyone}
response = 0
while response != "0":
print("\n\nPick an action by number:")
print("1: Send a 'thank you' note")
print("2: Create a report")
print("3: Send letters to everyone")
print("0: Quit")
response = input(":")
response = response.strip()
if response in choices:
choices[response]()
| [
"herelah@uw.edu"
] | herelah@uw.edu |
f161dfd93a9ee6f9e40d4a3e791a7fd91f35b6f9 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_12261.py | 6048a7e7b0d30415153952be2d88ff3f533b7b1a | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | # weird django file upload error
enctype="multipart/form-data"
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
2520af0b5128fb372cc2fef73350890249d44869 | 5396a46275e52bfc972f05097e925742d5bbf2d1 | /_2016/eola/thumbnails.py | 9bc1e91212c3f71dae4f75496806adaf7814e43c | [
"MIT"
] | permissive | 3b1b/videos | 6ab0e4fe0fb07d15b5455f8726131a880437c42c | e841b1410fdda2d3bddb7cfa12ce070a3b66a026 | refs/heads/master | 2023-08-29T01:37:23.424512 | 2023-08-16T03:35:03 | 2023-08-16T03:35:03 | 325,873,493 | 4,601 | 1,868 | null | 2023-03-30T08:15:37 | 2020-12-31T21:07:33 | Python | UTF-8 | Python | false | false | 4,029 | py | from manim_imports_ext import *
from _2016.eola.chapter9 import Jennifer, You
class Chapter0(LinearTransformationScene):
CONFIG = {
"include_background_plane" : False,
"t_matrix" : [[3, 1], [2, -1]]
}
def construct(self):
self.setup()
self.plane.fade()
for mob in self.get_mobjects():
mob.set_stroke(width = 6)
self.apply_transposed_matrix(self.t_matrix, run_time = 0)
class Chapter1(Scene):
def construct(self):
arrow = Vector(2*UP+RIGHT)
vs = OldTexText("vs.")
array = Matrix([1, 2])
array.set_color(TEAL)
everyone = VMobject(arrow, vs, array)
everyone.arrange(RIGHT, buff = 0.5)
everyone.set_height(4)
self.add(everyone)
class Chapter2(LinearTransformationScene):
def construct(self):
self.lock_in_faded_grid()
vectors = VMobject(*[
Vector([x, y])
for x in np.arange(-int(FRAME_X_RADIUS)+0.5, int(FRAME_X_RADIUS)+0.5)
for y in np.arange(-int(FRAME_Y_RADIUS)+0.5, int(FRAME_Y_RADIUS)+0.5)
])
vectors.set_submobject_colors_by_gradient(PINK, BLUE_E)
words = OldTexText("Span")
words.scale(3)
words.to_edge(UP)
words.add_background_rectangle()
self.add(vectors, words)
class Chapter3(Chapter0):
CONFIG = {
"t_matrix" : [[3, 0], [2, -1]]
}
class Chapter4p1(Chapter0):
CONFIG = {
"t_matrix" : [[1, 0], [1, 1]]
}
class Chapter4p2(Chapter0):
CONFIG = {
"t_matrix" : [[1, 2], [-1, 1]]
}
class Chapter5(LinearTransformationScene):
def construct(self):
self.plane.fade()
self.add_unit_square()
self.plane.set_stroke(width = 6)
VMobject(self.i_hat, self.j_hat).set_stroke(width = 10)
self.square.set_fill(YELLOW, opacity = 0.7)
self.square.set_stroke(width = 0)
self.apply_transposed_matrix(self.t_matrix, run_time = 0)
class Chapter9(Scene):
def construct(self):
you = You()
jenny = Jennifer()
you.change_mode("erm")
jenny.change_mode("speaking")
you.shift(LEFT)
jenny.shift(2*RIGHT)
vector = Vector([3, 2])
vector.center().shift(2*DOWN)
vector.set_stroke(width = 8)
vector.tip.scale(2)
you.coords = Matrix([3, 2])
jenny.coords = Matrix(["5/3", "1/3"])
for pi in jenny, you:
pi.bubble = pi.get_bubble(SpeechBubble, width = 3, height = 3)
if pi is you:
pi.bubble.shift(MED_SMALL_BUFF*RIGHT)
else:
pi.coords.scale(0.8)
pi.bubble.shift(MED_SMALL_BUFF*LEFT)
pi.bubble.add_content(pi.coords)
pi.add(pi.bubble, pi.coords)
pi.look_at(vector)
self.add(you, jenny, vector)
class Chapter10(LinearTransformationScene):
CONFIG = {
"foreground_plane_kwargs" : {
"x_radius" : FRAME_WIDTH,
"y_radius" : FRAME_HEIGHT,
"secondary_line_ratio" : 1
},
"include_background_plane" : False,
}
def construct(self):
v_tex = "\\vec{\\textbf{v}}"
eq = OldTex("A", v_tex, "=", "\\lambda", v_tex)
eq.set_color_by_tex(v_tex, YELLOW)
eq.set_color_by_tex("\\lambda", MAROON_B)
eq.scale(3)
eq.add_background_rectangle()
eq.shift(2*DOWN)
title = OldTexText(
"Eigen", "vectors \\\\",
"Eigen", "values"
, arg_separator = "")
title.scale(2.5)
title.to_edge(UP)
# title.set_color_by_tex("Eigen", MAROON_B)
title[0].set_color(YELLOW)
title[2].set_color(MAROON_B)
title.add_background_rectangle()
self.add_vector([-1, 1], color = YELLOW, animate = False)
self.apply_transposed_matrix([[3, 0], [1, 2]])
self.plane.fade()
self.remove(self.j_hat)
self.add(eq, title)
| [
"grant@3blue1brown.com"
] | grant@3blue1brown.com |
ccf55017fbc1f4207985eaa80f14722daf999f3e | 269feb0a04e10df899b7cf0d37c42fd295fd6ac0 | /5_三角形斜边长.2.py | 5c697de2b506b4189fc2744884b4f1700bf259d0 | [] | no_license | zhangxingxing12138/card | c0134951ded50b7cb8c129c28e07252f35796052 | 793de5c5546143b59f8fd169a4e0c2cea1a5b416 | refs/heads/master | 2020-03-23T11:45:29.070458 | 2018-10-16T00:15:01 | 2018-10-16T00:15:01 | 141,519,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | a=input("直角边a:")
b=input("直角边b:")
A=float(a)
B=float(b)
c=(A*A+B*B)**(1/2)
print(c)
| [
"you@example.com"
] | you@example.com |
6b9e7f9c11cceb46a46c9f7585e17c3c82e83ca1 | 42618fe7e1e5ede9579131726f6c204e75bd3fd8 | /bookB116/api/booking/__init__.py | 5fe66e968f97c39172e6634daa6e58a4ba1560f2 | [] | no_license | pkuphysu/bookB116-backend | df6d13a493bb86c39e257833db3bf71cbb7c2b2c | aa4af1e2afac0ab4a9c0838aa1b4cd7f160bef4c | refs/heads/master | 2023-08-22T12:32:56.500854 | 2021-10-07T16:25:42 | 2021-10-07T16:25:42 | 349,361,519 | 0 | 0 | null | 2021-05-02T01:30:10 | 2021-03-19T09:05:49 | Python | UTF-8 | Python | false | false | 121 | py | from flask import Blueprint
bp = Blueprint('booking', __name__)
from . import routes # noqa
from . import cli # noqa
| [
"36528777+AllanChain@users.noreply.github.com"
] | 36528777+AllanChain@users.noreply.github.com |
5cc0139aa5321db4c991af5ca4902a1878f8d7f1 | ec1deb682fb96a1f937f2fca5f161aa951462876 | /unittestPython/part_1/name_function.py | 61209de86dc7aec85c8f1a819784981abebebc0c | [] | no_license | AnatoliKosarev/Python-beginner-course--Teclado- | 31d82f5e9a1f39e2970323bed9de1fd539990565 | fa91199938d6975b5874341585343566caaf3600 | refs/heads/main | 2023-06-30T12:14:33.779827 | 2021-07-24T11:16:19 | 2021-07-24T11:16:19 | 376,371,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | def get_formatted_name(first, last, middle=""): # middle name is optional
if middle:
full_name = f"{first} {middle} {last}"
else:
full_name = f"{first} {last}"
return full_name.title()
| [
"anatoli.kosarev@gmail.com"
] | anatoli.kosarev@gmail.com |
74fca2c41cfc395205bb1486f3f18e4ee709c657 | d8c123d71133e2ef3cd9c2818e1107238d2e6314 | /net.py | d216df7a0f6964b694a2382a89eabe6a073f63fd | [] | no_license | ljf61/XXXNet | 381df4f15e8060551aafd4ee19e359c1881c5a26 | be923d14d083a3aff43145361cd72d09d5499802 | refs/heads/master | 2020-03-12T23:01:16.645489 | 2018-05-04T09:04:45 | 2018-05-04T09:04:45 | 130,857,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | from net_parts import *
class AE(nn.Module):
def __init__(self):
super(AE,self).__init__()
self.encoder = nn.Sequential(
inconv(6, 64),
down1(64, 128),
down1(128, 256),
down2(256, 512),
down2(512, 512),
down2(512, 1024),
encoder_out(1024, 1024))
self.decoder = nn.Sequential(
only_up(1024, 512),
only_up(512, 256),
only_up(256, 128),
only_up(128, 64),
only_up(64, 32),
outconv(32, 1))
self.poseRegress1 = nn.Sequential(
fc(1024*4*7,2048),
fc(2048,128),
nn.Linear(128,2))
def forward(self, x):
featureVector = self.encoder(x)
featureVector_pose = featureVector.view((-1,1024*4*7))
#print(featureVector.size())
pose_xy = self.poseRegress1(featureVector_pose)
#pose_yaw = self.poseRegress2(featureVector)
depth = self.decoder(featureVector)
return pose_xy, depth | [
"1451161@tongji.edu.cn"
] | 1451161@tongji.edu.cn |
db09f5e6aeb8defe8a7c9c365689f0ee46b07dc4 | 2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac | /Dsz/PyScripts/Lib/dsz/mca/network/cmd/banner/errors.py | 3dffd24a2b423eab69b50b74ee3889931f22a361 | [] | no_license | FingerLeakers/DanderSpritz_docs | f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364 | d96b6a71c039b329f9f81544f645857c75360e7f | refs/heads/master | 2021-01-25T13:05:51.732149 | 2018-03-08T01:22:49 | 2018-03-08T01:22:49 | 123,527,268 | 2 | 0 | null | 2018-03-02T03:48:31 | 2018-03-02T03:48:30 | null | UTF-8 | Python | false | false | 1,606 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_CALLBACK_FAILED = mcl.status.framework.ERR_START + 1
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 2
ERR_SOCKET_INIT_FAILURE = mcl.status.framework.ERR_START + 3
ERR_SOCKET_BIND_FAILURE = mcl.status.framework.ERR_START + 4
ERR_SOCKET_OPTION_FAILURE = mcl.status.framework.ERR_START + 5
ERR_CONNECT_FAILURE = mcl.status.framework.ERR_START + 6
ERR_SEND_FAILURE = mcl.status.framework.ERR_START + 7
ERR_PACKET_TOO_LARGE = mcl.status.framework.ERR_START + 8
ERR_RECV_ERROR = mcl.status.framework.ERR_START + 9
ERR_RECV_TIMEOUT = mcl.status.framework.ERR_START + 10
ERR_NOT_IMPLEMENTED = mcl.status.framework.ERR_START + 11
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_CALLBACK_FAILED: 'Error making callback',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_SOCKET_INIT_FAILURE: 'Socket initialization failed',
ERR_SOCKET_BIND_FAILURE: 'Failed to bind to given source port',
ERR_SOCKET_OPTION_FAILURE: 'Failed to set socket option',
ERR_CONNECT_FAILURE: 'Connect request failed',
ERR_SEND_FAILURE: 'Send failed',
ERR_PACKET_TOO_LARGE: 'The given packet is too large to send',
ERR_RECV_ERROR: 'Error receiving data',
ERR_RECV_TIMEOUT: 'Timeout waiting for data',
ERR_NOT_IMPLEMENTED: 'Not implemented on this platform'
} | [
"francisck@protonmail.ch"
] | francisck@protonmail.ch |
447fc54eea01a339401254a7ab9eea6548c5d5d1 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/intentions/PyInvertIfConditionIntentionTest/generalNoElseTry.py | 8071d065e802d90e83cc718813bbe0e7adcdde7c | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 207 | py | def func():
value = "not-none"
<caret>if value is None:
print("None")
return
try:
return int(value)
except ValueError:
raise RuntimeError("Value is not int") | [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
afd1982cb921757a335e6fe1ccdb27b036605403 | cb4c3bee18aa9f08fe5de3b2a6d40b2d97610555 | /labb2/LinkedQFile.py | 324ddfd1390021751bce063bdcabb98ad4755566 | [] | no_license | harofax/Tilda | c587a4b11c0a5c944e9ca350ff6e792ccf5508eb | 67c9597ebd1bf6821b6913945531c8829b76237d | refs/heads/master | 2020-08-01T10:13:28.657537 | 2019-10-11T08:55:17 | 2019-10-11T08:55:17 | 210,961,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | class Node:
def __init__(self, value):
self.value = value
self.next_node = None
class LinkedQ:
def __init__(self):
self.first = None
self.last = None
self.size = 0
def enqueue(self, x):
new_node = Node(x)
if self.last is not None:
self.last.next_node = new_node
else:
self.first = new_node
self.last = new_node
self.size += 1
def dequeue(self):
if self.first.next_node:
result = self.first
self.first = self.first.next_node
self.size -= 1
return result.value
else:
result = self.first
self.size -= 1
return result.value
def is_empty(self):
if self.size == 0:
return True
else:
return False
def print_queue(self):
current = self.first
printlist = []
while current:
printlist.append(current.value)
current = current.next_node
print(printlist)
def get_size(self):
return self.size | [
"daniel.kh555@gmail.com"
] | daniel.kh555@gmail.com |
d2128b6ddba0947aaaf966f992839c4ab1c0ca07 | 1380713fcb151c2448bf520725012f21ebcceccd | /gastronom/discount/migrations/0009_auto_20201210_1117.py | 77006becaa698ff0ac6cd8a3fff466f2d3571b9b | [] | no_license | oshevelo/octPY | 172b9b1dec49c179a4df9033fbc9f94d922211f3 | 6b7cff3876ef154b57c6ea89c4a59bd352711a7d | refs/heads/main | 2023-03-02T10:56:47.434512 | 2021-02-07T10:32:26 | 2021-02-07T10:32:26 | 302,440,968 | 9 | 12 | null | 2021-02-08T15:57:05 | 2020-10-08T19:23:50 | Python | UTF-8 | Python | false | false | 438 | py | # Generated by Django 2.2 on 2020-12-10 09:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discount', '0008_merge_20201210_1116'),
]
operations = [
migrations.AlterField(
model_name='discountcart',
name='valid_date_start',
field=models.DateTimeField(auto_now_add=True, verbose_name='Active from'),
),
]
| [
"alexandr.pichkurov@gmail.com"
] | alexandr.pichkurov@gmail.com |
1fb7e441f0b01c1a959827aa4ff80c6bf4ced77a | a24cedf9dea47ba64fbf779a8c18f39bd9a196cf | /halo_roller/urls.py | a81861188bce43e27eb6deda9e1867e535029ead | [] | no_license | rkuykendall/halo-roller | e795d19e0d1beef8a91a9bf417ce2c4908b1666b | 73b5ec77fc4070c4bf9694ffe99497ab3abb39f7 | refs/heads/master | 2022-12-18T16:10:35.798073 | 2020-09-23T22:12:01 | 2020-09-23T22:12:01 | 297,977,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | """halo_roller URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('games.urls')),
]
| [
"robert@rkuykendall.com"
] | robert@rkuykendall.com |
9cb0854377c33c661a9ce385afdb362fe2179a06 | 85be00c9255e364e53cb5b57f92005f02b0500ff | /Signature/migrations/0007_auto_20210110_0108.py | 22496435908bc37609ae639b4857ebc79685f545 | [] | no_license | Deemjan/Alfa2 | ba8827a16b6d230463f0385ababb7730394e9fc1 | dc6e272e33ebaa035e66db2867c76a8be8f9d95c | refs/heads/CodeRefactored | 2023-05-29T19:06:01.623491 | 2021-03-20T12:10:47 | 2021-03-20T12:10:47 | 326,788,452 | 0 | 1 | null | 2021-05-02T12:50:14 | 2021-01-04T19:33:38 | Python | UTF-8 | Python | false | false | 792 | py | # Generated by Django 3.1.2 on 2021-01-09 20:08
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('Signature', '0006_auto_20210110_0106'),
]
operations = [
migrations.AlterField(
model_name='keytable',
name='dateOfCreation',
field=models.DateTimeField(default=datetime.datetime(2021, 1, 9, 20, 8, 20, 502959, tzinfo=utc)),
),
migrations.AlterField(
model_name='keytable',
name='dateOfExpiration',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 8, 20, 8, 20, 502959, tzinfo=utc), verbose_name='Срок действия подписи'),
),
]
| [
"orda1500@gmail.com"
] | orda1500@gmail.com |
c5e7c256b27e7736e6f9494d088ef758f7a7134b | 19d66411a3d46cb59067ef356adba6b66894d089 | /music_163/save_as_json.py | b4ac2431ff1d3336805a11a937774ee92ad17ad3 | [] | no_license | microw/music-kg | de16e7b0847abf76ba2c63391d84b0d871f2f219 | 71f4ea2678d37b812e8011f1bd17616227d144f2 | refs/heads/master | 2020-04-08T02:46:04.840601 | 2018-10-22T09:52:31 | 2018-10-22T09:52:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,104 | py | #coding:utf-8
import json
import os
import codecs
def read_json():
with codecs.open('../test/data/savejson.json', 'r','utf-8') as f:
message = '[' + f.read() + ']'
# message = f.read()
# print(message)
data = json.loads(message)
# print(data)
print(data[0])
# json.dump(data, f)
return data
def save_json(data):
json_str = json.dumps(data, ensure_ascii=False)
with codecs.open('../test/data/savejson.json', 'w') as f:
f.write('[' + json_str + ']')
def add_json(data):
json_str = json.dumps(data, ensure_ascii=False)
file = '../test/data/savejson.json'
print(json_str)
if os.path.exists(file):
if os.path.getsize(file):
# print('文件存在且不为空')
with codecs.open(file, 'a' , 'utf-8') as f:
s = "," + "\n" + json_str
f.write(s)
else:
# print('文件存在且为空')
with codecs.open(file, 'a', 'utf-8') as f:
f.write(json_str)
else:
# print('文件不存在')
with codecs.open(file, 'w', 'utf-8') as f:
f.write(json_str)
def save_entity(data, file):
# file = '../test/data/entity.txt'
if os.path.exists(file):
if os.path.getsize(file):
# print('文件存在且不为空')
with codecs.open(file, 'a', 'utf-8') as f:
f.write('\n')
f.write(data)
else:
# print('文件存在且为空')
with codecs.open(file, 'a', 'utf-8') as f:
f.write(data)
else:
# print('文件不存在')
with codecs.open(file, 'w', 'utf-8') as f:
f.write(data)
def save_entity_album(data):
file = '../test/data/entity_album.txt'
if os.path.exists(file):
if os.path.getsize(file):
# print('文件存在且不为空')
with codecs.open(file, 'a', 'utf-8') as f:
f.write('\n')
f.write(data)
else:
# print('文件存在且为空')
with codecs.open(file, 'a', 'utf-8') as f:
f.write(data)
else:
# print('文件不存在')
with codecs.open(file, 'w', 'utf-8') as f:
f.write(data)
def save_entity_music(data):
file = '../test/data/entity_music.txt'
if os.path.exists(file):
if os.path.getsize(file):
# print('文件存在且不为空')
with codecs.open(file, 'a', 'utf-8') as f:
f.write('\n')
f.write(data)
else:
# print('文件存在且为空')
with codecs.open(file, 'a', 'utf-8') as f:
f.write(data)
else:
# print('文件不存在')
with codecs.open(file, 'w', 'utf-8') as f:
f.write(data)
if __name__ == '__main__':
data = {
'name': '朱彤',
'shares': 100,
'price': 542.23
}
read_json()
| [
"zhutonggo@gmail.com"
] | zhutonggo@gmail.com |
ce8e770da65270910d6cf73784a0b9f62fe3f47c | 071edd7e107b89fa5571e8a785116831f88d8c0a | /monitor/database/info/history.py | 22c8d7b4fa32f8731dcc717d08b2752e5683571e | [] | no_license | functor/planetlab-monitor | d28d8b798776a603079d8e762c66a5a6c0cdddb3 | d7297f18f600b3dcb99581937f6646283f9824eb | refs/heads/master | 2021-05-15T11:20:55.710311 | 2011-12-24T00:14:33 | 2011-12-24T00:14:33 | 108,300,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,574 | py | from elixir import Entity, Field, OneToMany, ManyToOne, ManyToMany
from elixir import options_defaults, using_options, setup_all
from elixir import String, Integer as Int, DateTime, Boolean
from elixir.ext.versioned import *
from datetime import datetime,timedelta
from monitor.database.dborm import mon_metadata, mon_session
__metadata__ = mon_metadata
__session__ = mon_session
# your data model
class HistoryNodeRecord(Entity):
hostname = Field(String(250),primary_key=True)
last_checked = Field(DateTime,default=datetime.now)
last_changed = Field(DateTime,default=datetime.now)
status = Field(String,default="unknown")
haspcu = Field(Boolean,default=False)
firewall = Field(Boolean,default=False)
plc_nodeid = Field(Int,default=1)
plc_siteid = Field(Int,default=1)
acts_as_versioned(ignore=['last_changed', 'last_checked'])
@classmethod
def by_hostname(cls, hostname):
return cls.query.filter_by(hostname=hostname).first()
class HistoryPCURecord(Entity):
plc_pcuid = Field(Int,primary_key=True)
last_checked = Field(DateTime,default=datetime.now)
last_changed = Field(DateTime,default=datetime.now)
status = Field(String,default="unknown")
last_valid = Field(DateTime,default=None)
valid = Field(String,default="unknown")
acts_as_versioned(ignore=['last_changed', 'last_checked'])
@classmethod
def by_pcuid(cls, pcuid):
return cls.query.filter_by(pcuid=pcuid).first()
class HistorySiteRecord(Entity):
loginbase = Field(String(250),primary_key=True)
plc_siteid = Field(Int,default=1)
last_checked = Field(DateTime,default=datetime.now)
last_changed = Field(DateTime,default=datetime.now)
nodes_total = Field(Int,default=0)
nodes_up = Field(Int,default=0)
slices_total = Field(Int,default=0)
slices_used = Field(Int,default=0)
# all nodes offline and never-contact.
new = Field(Boolean,default=False)
enabled = Field(Boolean,default=False)
status = Field(String,default="unknown")
message_id = Field(Int, default=0)
message_status = Field(String, default=None)
message_queue = Field(String, default=None)
message_created = Field(DateTime, default=None)
#message_last_reply = Field(DateTime, default=None)
penalty_level = Field(Int, default=0)
penalty_applied = Field(Boolean, default=False)
penalty_pause = Field(Boolean, default=False)
penalty_pause_time = Field(DateTime, default=None)
acts_as_versioned(ignore=['last_changed', 'last_checked', 'message_status', 'penalty_pause_time'])
@classmethod
def by_loginbase(cls, loginbase):
return cls.query.filter_by(loginbase=loginbase).first()
| [
"soltesz@cs.princeton.edu"
] | soltesz@cs.princeton.edu |
971d6c7a8b93db04103d5493b66aab379de626ae | 2794764ddbe9daf666601014cb84e5ca7b6ca7c3 | /Account/urls.py | d1d10c86cebf2fd2a839bfcf8f84f540ce97c97e | [] | no_license | aydanaderi/goldoon | 5b7341f1b94cb607bcc7b895fe22a6affb817cd7 | 3f4cc6a526eae70f55833d0b07d5209b243aff20 | refs/heads/main | 2023-01-19T16:12:22.837854 | 2020-11-26T15:46:24 | 2020-11-26T15:46:24 | 311,077,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | from django.urls import path
from knox import views as knox_views
from . import views
urlpatterns = [
path('signup/', views.RegisterAPI.as_view(), name = 'register'),
path('login/', views.LoginAPI.as_view(), name = 'login'),
path('logout/', knox_views.LogoutView.as_view(), name = 'logout'),
path('change_password/', views.ChangePasswordView.as_view(), name = 'change-password'),
path('reset/', views.ResetPasswodView, name = 'Reset_Password'),
path('<int:username_id>/reset/confirm/', views.ConfirmResetPasswodView , name = 'confirm_Reset_password'),
path('profile/', views.ProfileView, name = 'profile'),
]
| [
"ayda.f.naderi@gmail.com"
] | ayda.f.naderi@gmail.com |
d2abb6f2ca0db30eff1b7c9cd045858a1b1837b6 | 46adba1fe06298743f018abd0096c753256ac03a | /src/procgraph_vehicles/cairo_map_display.py | d05071495667e74265c75eb29020abf49801f486 | [] | no_license | ajaycharan/vehicles | 9e0f21a2550c56e83303329c1bdf8c40bde5c0eb | 2cfe467422160f90bc76800216ac42c0f13f2c4d | refs/heads/master | 2021-05-28T21:03:35.466743 | 2013-07-19T06:59:51 | 2013-07-19T06:59:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,686 | py | from cairo_utils import cairo_pixels, cairo_text_align
from geometry import SE2_from_SE3, SE3
from procgraph import BadConfig, Block
from procgraph.block_utils import make_sure_dir_exists
from procgraph_images import posneg, scale, reshape2d
from vehicles_cairo import (cairo_save, cairo_transform,
vehicles_cairo_display_all, cairo_rototranslate, cairo_ref_frame)
import numpy as np
import os
import subprocess
class VehiclesCairoDisplay(Block):
''' Produces a top-down plot of a circular arena. '''
Block.alias('vehicles_cairo_display')
Block.config('format', 'pdf|png', default='pdf')
Block.config('file', 'Output file (pdf)', default=None)
Block.output('rgb', 'RGB data (png)')
Block.config('transparent', 'Outputs RGB with transparent bg',
default=False)
Block.config('width', 'Image width in points.', default=600)
Block.config('height', 'Image height in points.', default=600)
Block.config('sidebar_width', default=200)
# Sidebar options
Block.config('display_sidebar', default=True)
Block.config('trace', 'Trace the path', default=False)
Block.config('plotting_params',
'Configuration to pass to vehicles_cairo_display_all()',
default={})
Block.config('sidebar_params',
'Configuration to pass to create_sidebar()',
default={})
Block.config('swf', 'Converts PDF to SWF using pdf2swf', default=True)
Block.input('boot_obs', '')
def get_shape(self):
w = self.config.width
if self.config.display_sidebar:
w += self.config.sidebar_width
h = self.config.height
return (w, h)
def init(self):
self.format = self.config.format
(w, h) = self.get_shape()
self.total_width = w
self.total_height = h
self.frame = 0
if self.format == 'pdf':
self.init_pdf()
elif self.format == 'png':
self.init_png()
else:
raise BadConfig('Invalid format %r.' % self.format, self, 'format')
self.count = 0
self.fps = None
self.t0 = None
self.tmp_cr = None
def init_pdf(self):
self.filename = self.config.file
self.tmp_filename = self.filename + '.active'
make_sure_dir_exists(self.filename)
self.info("Creating file %r." % self.filename)
import cairo
self.surf = cairo.PDFSurface(self.tmp_filename, # @UndefinedVariable
self.total_width,
self.total_height)
def init_png(self):
import cairo
w, h = self.total_width, self.total_height
# note (w,h) here and (h,w,h*4) below; I'm not sure but it works
self.argb_data = np.empty((h, w, 4), dtype=np.uint8)
self.argb_data.fill(255)
self.surf = cairo.ImageSurface.create_for_data(# @UndefinedVariable
self.argb_data,
cairo.FORMAT_ARGB32, # @UndefinedVariable
w, h, w * 4)
def update(self):
# Estimate fps
if self.count == 0:
self.t0 = self.get_input_timestamp(0)
if self.count >= 1:
delta = self.get_input_timestamp(0) - self.t0
self.fps = 1.0 * self.count / delta
self.count += 1
if self.format == 'pdf':
self.update_pdf()
elif self.format == 'png':
self.update_png()
else:
assert False
def update_png(self):
import cairo
cr = cairo.Context(self.surf) # @UndefinedVariable
self.draw_everything(cr)
self.surf.flush()
if not self.config.transparent:
rgb = self.argb_data[:, :, :3].copy()
# fix red/blue inversion
rgb[:, :, 0] = self.argb_data[:, :, 2]
rgb[:, :, 2] = self.argb_data[:, :, 0]
assert rgb.shape[2] == 3
else:
rgb = self.argb_data.copy()
# fix red/blue inversion
rgb[:, :, 0] = self.argb_data[:, :, 2]
rgb[:, :, 2] = self.argb_data[:, :, 0]
assert rgb.shape[2] == 4
self.output.rgb = rgb
def update_pdf(self):
import cairo
# If I don't recreate it, it will crash
cr = cairo.Context(self.surf) # @UndefinedVariable
if not self.config.transparent:
# Set white background
bg_color = [1, 1, 1]
cr.rectangle(0, 0, self.total_width, self.total_height)
cr.set_source_rgb(bg_color[0], bg_color[1], bg_color[2])
cr.fill()
else:
# Green screen :-)
cr.rectangle(0, 0, self.total_width, self.total_height)
cr.set_source_rgba(0, 1, 0, 0)
cr.fill()
self.draw_everything(cr)
self.surf.flush()
self.surf.show_page() # Free memory self.cr?
def draw_everything(self, cr):
boot_obs = self.input.boot_obs
if 'id_episode' in boot_obs:
id_episode = boot_obs['id_episode'].item()
else:
id_episode = ''
id_vehicle = boot_obs['id_robot'].item()
if 'extra' in boot_obs:
extra = boot_obs['extra'].item()
else:
extra = {}
def extra_draw_world(cr):
if 'servonav' in extra:
plot_servonave(cr, extra['servonav'])
if 'servoing_poses' in extra:
plot_servoing_poses(cr, extra['servoing_poses'])
plotting_params = self.config.plotting_params
plotting_params['extra_draw_world'] = extra_draw_world
sidebar_params = self.config.sidebar_params
# todo: check
sim_state = extra['robot_state']
observations_values = boot_obs['observations']
commands = boot_obs['commands']
commands_source = boot_obs['commands_source'].item()
timestamp = boot_obs['time_from_episode_start'].item()
with cairo_save(cr):
if self.config.display_sidebar:
padding = 0.03 * self.config.width
map_width = self.config.width - 2 * padding
map_height = self.config.height - 2 * padding
cr.translate(padding, padding)
else:
map_width = self.config.width
map_height = self.config.height
with cairo_save(cr):
cr.rectangle(0, 0, map_width, map_height)
cr.clip()
# TODO: implement trace
vehicles_cairo_display_all(cr,
map_width,
map_height,
sim_state,
**plotting_params)
if self.config.display_sidebar:
cr.set_line_width(1)
cr.set_source_rgb(0, 0, 0)
cr.rectangle(0, 0, map_width, map_height)
cr.stroke()
if self.config.display_sidebar:
with cairo_transform(cr, t=[self.config.width, 0]):
create_sidebar(cr, width=self.config.sidebar_width,
height=self.config.height,
sim_state=sim_state,
id_vehicle=id_vehicle,
id_episode=id_episode,
timestamp=timestamp,
observations_values=observations_values,
commands_values=commands,
commands_source=commands_source,
**sidebar_params)
def finish(self):
if self.format == 'pdf':
self.finish_pdf()
def finish_pdf(self):
self.surf.finish()
if os.path.exists(self.filename):
os.unlink(self.filename)
if os.path.exists(self.tmp_filename):
os.rename(self.tmp_filename, self.filename)
if self.config.swf:
if self.fps is None:
self.error('Only one frame seen?')
else:
basename, _ = os.path.splitext(self.filename)
swf = '%s.swf' % basename
try:
command = ['pdf2swf',
# "-b", # --defaultviewer
# "-l", # --defaultloader
'-G', # flatten
'-s', 'framerate=%d' % self.fps,
self.filename,
'-o', swf]
self.info(' '.join(command))
subprocess.check_call(command)
except Exception as e:
self.error('Could not convert to swf: %s' % e)
if os.path.exists(swf):
os.unlink(swf)
self.info("Completed %r." % self.filename)
class VehiclesDisplay(VehiclesCairoDisplay):
''' Produces a top-down plot of a circular arena. '''
Block.alias('vehicles_cairo_display_all')
Block.config('format', 'pdf|png', default='pdf')
Block.config('file', 'Output file (pdf)', default=None)
Block.output('rgb', 'RGB data (png)')
Block.config('transparent', 'Outputs RGB with transparent bg',
default=False)
Block.config('width', 'Image width in points.', default=600)
Block.config('height', 'Image height in points.', default=600)
Block.config('trace', 'Trace the path', default=False)
Block.config('plotting_params',
'Configuration to pass to vehicles_cairo_display_all()',
default={})
Block.config('swf', 'Converts PDF to SWF using pdf2swf', default=True)
Block.input('boot_obs')
def get_shape(self):
w = self.config.width
h = self.config.height
return (w, h)
def draw_everything(self, cr):
sim_state = self.input.boot_obs
map_width = self.config.width
map_height = self.config.height
plotting_params = self.config.plotting_params
with cairo_save(cr):
cr.rectangle(0, 0, map_width, map_height)
cr.clip()
# TODO: implement trace
vehicles_cairo_display_all(cr,
map_width,
map_height,
sim_state,
**plotting_params)
def create_sidebar(cr, width, height, sim_state, id_vehicle, id_episode, # @UnusedVariable
timestamp, observations_values,
commands_values, commands_source,
bg_color=None,
show_observations=True,
show_commands=True,
show_annotations=True):
if len(commands_values.shape) == 1:
commands_values = np.array([commands_values.tolist()])
commands_rgb = posneg(commands_values,
max_value=(+1), # not sure +1
nan_color=[1, 1, 1])
observations_rgb = scale(reshape2d(observations_values), min_value=0,
nan_color=[1, 1, 1])
import cairo
if bg_color is not None:
cr.rectangle(0, 0, width, height)
cr.set_source_rgb(bg_color[0], bg_color[1], bg_color[2])
cr.fill()
fo = cairo.FontOptions() # @UndefinedVariable
fo.set_hint_style(cairo.HINT_STYLE_FULL) # @UndefinedVariable
fo.set_antialias(cairo.ANTIALIAS_GRAY) # @UndefinedVariable
cr.set_font_options(fo)
# M = width / 20.0
M = width / 15.0
legend_font_size = M * 0.75
details_font_size = M
label_font = 'Mono'
legend_font = 'Serif'
cr.set_source_rgb(0, 0, 0)
padding_fraction = 0.1
padding = width * padding_fraction
nvalues = 128
bar_width = 0.4 * width
bar_ratio = 0.15
bar_height = bar_width * bar_ratio
spacer = 0.05 * width
values = np.linspace(-1, +1, nvalues)
values = np.vstack([values] * 1)
colorbar_posneg = posneg(values)
values = np.linspace(-1, +1, nvalues)
values = np.vstack([values] * 1)
colorbar_scale = scale(values)
cr.translate(0, 2 * M)
if show_observations:
with cairo_transform(cr, t=[width / 2, 0]):
cr.select_font_face(label_font)
cr.set_font_size(M)
cairo_text_align(cr, 'observations', halign='center')
cr.translate(0, M * 0.8)
with cairo_transform(cr, t=[padding, 0]):
data_width = width - 2 * padding
# Don't draw grid if there are many pixels
if max(observations_rgb.shape[0], observations_rgb.shape[1]) > 15:
grid_color = None
else:
grid_color = [1, .9, .9]
last_height = cairo_pixels(cr, observations_rgb, width=data_width,
# Force square
height=data_width,
grid_color=grid_color)
cr.translate(0, last_height)
cr.translate(0, spacer)
with cairo_transform(cr, t=[width / 2, 0]):
with cairo_transform(cr, t=[-bar_width / 2, 0]):
last_height = cairo_pixels(cr, colorbar_scale,
bar_width, height=bar_height,
grid_color=None)
cr.set_font_size(legend_font_size)
cr.select_font_face(legend_font)
with cairo_transform(cr, t=[0, bar_height / 2]):
with cairo_transform(cr, t=[-bar_width / 2 - M / 2, 0]):
cairo_text_align(cr, '0', 'right', 'middle')
with cairo_transform(cr, t=[+bar_width / 2 + M / 2, 0]):
cairo_text_align(cr, '1', 'left', 'middle')
cr.translate(0, last_height + spacer * 3)
if show_commands:
with cairo_transform(cr, t=[width / 2, 0]):
cr.select_font_face(label_font)
cr.set_font_size(M)
cairo_text_align(cr, 'commands', halign='center')
cr.translate(0, M * 0.8)
padding = padding * 2
with cairo_transform(cr, t=[padding, 0]):
data_width = width - 2 * padding
last_height = cairo_pixels(cr, commands_rgb, data_width)
cr.translate(0, last_height)
cr.translate(0, spacer)
with cairo_transform(cr, t=[width / 2, 0]):
with cairo_transform(cr, t=[-bar_width / 2, 0]):
last_height = cairo_pixels(cr, colorbar_posneg,
bar_width, height=bar_width * bar_ratio,
grid_color=None)
cr.set_font_size(legend_font_size)
cr.select_font_face(legend_font)
with cairo_transform(cr, t=[0, bar_height / 2]):
with cairo_transform(cr, t=[-bar_width / 2 - M / 2, 0]):
cairo_text_align(cr, '-1', 'right', 'middle')
with cairo_transform(cr, t=[+bar_width / 2 + M / 2, 0]):
cairo_text_align(cr, '+1', 'left', 'middle')
cr.translate(0, last_height + spacer * 2)
if show_annotations:
cr.translate(width / 10, 0)
strings = ['vehicle: %s' % id_vehicle,
' agent: %s' % commands_source,
'episode: %s' % id_episode,
' time: %6.2f' % timestamp,
]
cr.select_font_face('Mono')
max_len = max(len(x) for x in strings)
padding = 5
font_size = 1.6 * width / (max_len + padding)
cr.set_font_size(font_size)
line = details_font_size * 1.2
for s in strings:
with cairo_save(cr):
cr.show_text(s)
cr.stroke()
cr.translate(0, line)
def plot_servoing_poses(cr, servoing_poses):
# TODO
goal = SE3.from_yaml(servoing_poses['goal'])
with cairo_rototranslate(cr, goal):
cairo_ref_frame(cr, l=0.5)
def plot_servonave(cr, servonav):
locations = servonav['locations']
# current_goal = servonav['current_goal']
for _, loc in enumerate(locations):
pose = SE2_from_SE3(SE3.from_yaml(loc['pose']))
with cairo_rototranslate(cr, pose):
# if current_goal == i:
# cairo_ref_frame(cr, l=0.5)
# else:
grey = [.6, .6, .6]
cairo_ref_frame(cr, l=0.5, x_color=grey, y_color=grey)
| [
"andrea@cds.caltech.edu"
] | andrea@cds.caltech.edu |
97db2337e260c58a117da2a98c9bc3a3e9d2f9d1 | cc79daf3eb5294a4ba27cb9a18aaf311a19532e8 | /app/textract.py | 5d3fff514a495cfdb2d70043b2ec878a5cacbaac | [] | no_license | zhengxingliu/TextConvert-Serverless | beac854a49470add93f72abbaa1cc1ae35228b07 | 1c94ab34465df76080abe677f571f8b39cf5711b | refs/heads/master | 2021-05-17T06:58:28.818986 | 2020-03-28T01:34:47 | 2020-03-28T01:34:47 | 250,685,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,180 | py | from flask import render_template, request, session, url_for, redirect
from app import app
from app import attachment
import json, os, uuid, boto3, datetime
@app.route('/image/<postID>',methods=['GET'])
def image(postID):
s3 = boto3.client('s3')
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table("a3Textract")
try: # look for user uploaded image
response = table.get_item(Key={'postID': postID})
image = response['Item']['filename']
except: # display nothing if no image has been uploaded
image = ''
url = ''
text = ''
return render_template('image/image.html', postID=postID, url=url, text=text)
else: # get uploaded image from s3
s3 = boto3.client('s3')
url = s3.generate_presigned_url('get_object', Params={'Bucket': 'ece1779a3textract',
'Key': 'upload-' + postID + '-' + str(image)},ExpiresIn=3600)
try: # check if textract has been performed for current image
s3name = "textract-" + postID + '-' + image.split('.')[0] + '.txt'
client = boto3.resource('s3')
obj = client.Object('ece1779a3textract', s3name)
body = obj.get()['Body'].read()
text = body.decode(encoding="utf-8")
except: # otherwise perform textract for the first and only time
print("performing textract")
name = 'upload-' + postID + '-' + image
print("source:", name)
client = boto3.client('lambda')
payload = {"file": name}
response = client.invoke(
FunctionName='a3textract',
Payload=json.dumps(payload))
s3name = "textract-" + postID + '-' + image.split('.')[0] + '.txt'
print('newname:',s3name)
client = boto3.resource('s3')
try:
obj = client.Object('ece1779a3textract', s3name)
body = obj.get()['Body'].read()
text = body.decode(encoding="utf-8")
except:
print("file not readable")
text = ''
else: print('read textract')
else:
print("textract already performed for this file")
return render_template('image/image.html', postID=postID, url=url, text = text)
# check if uploaded file has allowed image extensions
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/image_submit<postID>',methods=['POST'])
def image_submit(postID):
file = request.files['file']
# verify acceptable file format
if allowed_file(file.filename) == True :
path = os.path.join('/tmp', file.filename)
file.save(path)
# create empty post for new file
if postID == 'new':
postID = uuid.uuid4().hex
timestamp = str(datetime.datetime.now())
timestamp = timestamp.rsplit('.')[0]
# make new entry in databse
client = boto3.resource('dynamodb')
table = client.Table("a3Textract")
response = {"postID": postID,
"filename": file.filename,
"author": session['username'],
"timestamp": timestamp}
table.put_item(Item=response)
else:
# if this is an existing post then update database
client = boto3.resource('dynamodb')
table = client.Table("a3Textract")
timestamp = str(datetime.datetime.now())
timestamp = timestamp.rsplit('.')[0]
table.update_item(
Key={'postID': postID},
UpdateExpression='SET #attr1 = :val1, #attr2 = :val2',
ExpressionAttributeNames={'#attr1': 'timestamp', '#attr2': "filename"},
ExpressionAttributeValues={':val1': timestamp, ':val2': file.filename})
# save image to bucket
s3 = boto3.client('s3')
name = 'upload-' + postID + '-' + file.filename
with open(path, "rb") as f:
s3.upload_fileobj(f, "ece1779a3textract", name)
os.remove(path)
return redirect(url_for("image", postID=postID))
@app.route('/image_delete/<postID>',methods=['POST'])
def image_delete(postID):
try:
#delete from dynamodb
s3 = boto3.client('s3')
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table("a3Textract")
response = table.get_item(Key={'postID': postID})
image = response['Item']['filename']
table.delete_item(Key={'postID': postID})
# delete from s3
original = 'upload-' + postID + '-' + image
text = 'textract-' + postID + '-' + image.split('.')[0] + '.txt'
s3 = boto3.client('s3')
s3.delete_object(Bucket="ece1779a3textract", Key=original)
s3.delete_object(Bucket="ece1779a3textract", Key=text)
print("detele orginal image",original)
print("detele textract image",text)
except:
print("nothing to delete")
return redirect(url_for('index'))
else:
return redirect(url_for('index'))
| [
"karenliu@Karens-MacBook-Pro.local"
] | karenliu@Karens-MacBook-Pro.local |
aae9e47e7b71195487d7d663057314cd18ed9106 | ada78b2ab024ea917bb7351f3ca867892f1bdb5d | /jobs/views.py | 2410c73b63af04619d7085c36b628101076d71c8 | [] | no_license | stephanebruckert/linkedin-company-jobs | 5260edfbd8c1aa7b11838f1cb6e43fa44d9e4357 | b8d56950b085018c2b5ef49363b9a815efcfa4fe | refs/heads/master | 2021-01-10T20:17:43.612137 | 2014-10-13T15:20:32 | 2014-10-13T15:20:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
from linkedin import linkedin
from oauthlib import *
from jobs.models import Employer
CONSUMER_KEY = '77cfcdlhhozb9o'
CONSUMER_SECRET = 'Wtr666Df8DDg1GMV'
USER_TOKEN = '65e9b8d1-23f7-4f6d-bd21-564b283f6b1d'
USER_SECRET = 'f0342bfa-ed92-4828-904b-f32c2dacd8b8'
RETURN_URL = 'http://localhost:8000/'
authentication = linkedin.LinkedInDeveloperAuthentication(
CONSUMER_KEY, CONSUMER_SECRET,
USER_TOKEN, USER_SECRET,
RETURN_URL, linkedin.PERMISSIONS.enums.values())
application = linkedin.LinkedInApplication(authentication)
def index(request):
latest_employer_list = Employer.objects.all()
for job in latest_employer_list:
id=job.linkedin_id
company_info = application.get_companies(company_ids=[id], selectors=['description'])
job.description = company_info["values"][0]["description"]
template = loader.get_template('jobs/index.html')
context = RequestContext(request, {
'latest_employer_list': latest_employer_list,
})
return HttpResponse(template.render(context))
def detail(request, id):
employer = Employer.objects.get(id=id)
jobs = application.get_company_updates(employer.linkedin_id, params={'count': 15})
latest_jobs_list = []
for i in range(0, 14):
try:
latest_jobs_list.append((jobs['values'][i]['updateContent']['companyJobUpdate']['job']).items)
continue
except KeyError:
print i
template = loader.get_template('jobs/jobs.html')
context = RequestContext(request, {
'latest_jobs_list': latest_jobs_list,
})
return HttpResponse(template.render(context)) | [
"contact@stephanebruckert.com"
] | contact@stephanebruckert.com |
52687dd3e226c00321eb0f1dc9a5a5abbfd6a575 | 65a26dca7787add30b542d7d9369f4851425b86f | /02_笨办法学python/ex14.py | 145a6e0b3541c22b408b1789d324b561b3423404 | [] | no_license | fuyong2gh/python | b688a6025332c8e40bcfd769025e50783a0a0dd6 | 13f15541fa45808b9dcc7434f81516c94cd6e16d | refs/heads/master | 2020-08-27T08:58:13.401505 | 2019-10-31T13:08:18 | 2019-10-31T13:08:18 | 217,309,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | from sys import argv
script, user_name = argv
prompt = '>>>'
print(f"Hi {user_name}, I'm the {script} script")
print("I'd like to ask you a few questions")
print(f"Do you like me {user_name}")
likes = input(prompt)
print(f"Where do you live {user_name}")
lives = input(prompt)
print("What kind of computer do you have? ")
computer = input(prompt)
print(f"""
Alright, so you said {likes} about liking me.
You live in {lives}. Not sure where that is.
And you have a {computer} computer. Nice.
""" )
| [
"yongfu_scnu@163.com"
] | yongfu_scnu@163.com |
1c57bba12ea1d28e3d22c8f069be2ea6fb0a8d9d | aca4f00c884e1d0e6b2978512e4e08e52eebd6e9 | /2021/atcoder.jp/abc/196/prob.py | 561d92c060025a984e9491c8ceafd39586a1b707 | [] | no_license | jki14/competitive-programming | 2d28f1ac8c7de62e5e82105ae1eac2b62434e2a4 | ba80bee7827521520eb16a2d151fc0c3ca1f7454 | refs/heads/master | 2023-08-07T19:07:22.894480 | 2023-07-30T12:18:36 | 2023-07-30T12:18:36 | 166,743,930 | 2 | 0 | null | 2021-09-04T09:25:40 | 2019-01-21T03:40:47 | C++ | UTF-8 | Python | false | false | 400 | py | from math import floor
from sys import stderr, stdout
def solution(s):
p = s.find('.')
if p == -1:
p = len(s)
stdout.write('%d\n' % int(s[:p]))
def main():
while True:
try:
s = raw_input().strip()
solution(s)
except EOFError:
break
except ValueError:
continue
if __name__ == '__main__':
main()
| [
"jki14wz@gmail.com"
] | jki14wz@gmail.com |
eb7dacb2a889fc84f5836863fb625cb7d023ecd6 | 1e4b63cf99fa80423e3fe13c2e6e58d247c67f2b | /MessagesSystem/wsgi.py | 20d029b5b6d61ae3760d6484f0651aeaba35923d | [] | no_license | ZoeyA11/messages--system | 9980fa685d7ddb6c7a36bf95edf113d9da89bb08 | a4197226a332e4796e8f0621ff746327dcdc39ca | refs/heads/main | 2023-08-23T16:03:10.118737 | 2021-09-29T12:38:30 | 2021-09-29T12:38:30 | 410,680,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for MessagesSystem project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MessagesSystem.settings')
application = get_wsgi_application()
| [
"zoeyal77@gmail.com"
] | zoeyal77@gmail.com |
01d27e782e49665473091078916c1421e21a2327 | d73447e403ee9bf5549d822e5448fa794f5ab6fd | /src/room.py | 7245011682b03d7e7c597f14b30ef00179a5b7e8 | [] | no_license | wurde/Intro-Python-II | ee9c6beb18a7bc24a4648e9c3fa4e1a31ef5dfb7 | 2ef3de57fd6727a43916915ff254a01db8accc08 | refs/heads/master | 2020-07-19T05:55:29.477738 | 2019-09-06T17:50:24 | 2019-09-06T17:50:24 | 206,387,327 | 0 | 0 | null | 2019-09-06T15:56:12 | 2019-09-04T18:33:23 | null | UTF-8 | Python | false | false | 1,106 | py | #
# Define class
#
class Room:
def __init__(self, **kwargs):
self.name = kwargs['name']
self.description = kwargs['description']
self.items = kwargs['items']
if 'north' in kwargs:
self.north = kwargs['north']
else:
self.north = None
if 'south' in kwargs:
self.south = kwargs['south']
else:
self.south = None
if 'east' in kwargs:
self.east = kwargs['east']
else:
self.east = None
if 'west' in kwargs:
self.west = kwargs['west']
else:
self.west = None
def __str__(self):
return self.name
def has_item(self, name):
items = [item.name.lower().strip() for item in self.items]
if name.lower().strip() in items:
return True
else:
return False
def take(self, name):
fetchItem = None
for item in self.items:
if item.name.lower().strip() == name.lower().strip():
fetchItem = item
self.items = [item for item in self.items if item.name.lower().strip() != name.lower().strip()]
return fetchItem
def drop(self, item):
self.items.append(item)
| [
"andy@accreu.com"
] | andy@accreu.com |
079f29a1843d83c527184ce310f40e27594476bb | bfa3a5fc2790502ef6f22bffdb3d5553eabc78dd | /stack/StackException.py | be0c9639e6a4263df7efdbf9fbb127d73bec0cd5 | [] | no_license | huangsheng6668/my_leetcode | fa48adf10f8732dc2e3aa363db11274f0cdf9173 | 88d937f5e034038ad1e4251d206479a015b083bd | refs/heads/master | 2022-04-09T04:08:26.952558 | 2020-03-18T01:49:17 | 2020-03-18T01:49:17 | 228,052,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | """
@File : StackException.py
@Description : 栈的自定义异常类
@author : sheng
@date time : 2020/1/31 21:26
@version : v1.0
"""
class StackException(Exception):
def __init__(self, message):
super().__init__(self)
self.message = message
def __str__(self):
return self.message
| [
"huangsheng6668@163.com"
] | huangsheng6668@163.com |
42909c7bfec0b3018707dc8bdc3d52d8830251e4 | 0722301f27bb20ab42400fa90a8a5254d20a421f | /lesson20.py | 49f0b499093f47e3359a36130e0bce798239ce44 | [] | no_license | poliarus/Stepik_Python_course | c21ffa6a9e7736adf824f8839715f3e629f2f8ac | c270d0968028f98455353cbf923883aa5313f7c6 | refs/heads/master | 2020-09-16T13:20:12.406254 | 2019-11-24T17:46:31 | 2019-11-24T17:46:31 | 223,782,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | while True:
i = int(input("num: "))
if i < 10:
continue
elif i > 100:
break
else:
print(i)
| [
"polyarus13@gmail.com"
] | polyarus13@gmail.com |
5858b76c38aaf5d9afaeeb90732962404b497d58 | 5ed7570f75430435913a005a895150dff0c9bb63 | /old_py_yahtzee/yahtzee_probability.py | 5627fb077b6d18ffd9258ae79019fa0da4478e8f | [] | no_license | sudoddbe/yahtzee | 312c91590360c4e34d580309e967b32dd69fca80 | c9f6e3c9092ca11282a119b0f461796a2d76c6c1 | refs/heads/master | 2021-01-01T22:08:08.678245 | 2019-12-20T18:31:40 | 2019-12-20T18:31:40 | 239,363,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,017 | py | import numpy as np
from scipy.special import factorial
import itertools
def tuple_is_subset(big_set, small_set):
N_small = len(small_set)
if N_small == 0:
return (True, big_set)
N_big = len(big_set)
tmp_set = [e for e in big_set]
for es in small_set:
if es in tmp_set:
tmp_set.remove(es)
else:
#Not a subset
return (False, None)
return (True, tmp_set)
def roll_probability(wanted_rolls):
rolls = len(wanted_rolls)
dice_values = [1,2,3,4,5,6]
repitions = {dv: 0 for dv in dice_values}
for dv in dice_values:
for wr in wanted_rolls:
repitions[dv] += wr == dv
probability = factorial(rolls) *(1.0/6.0)**rolls
probability /= np.prod(factorial(np.array(repitions.values())))
return probability
def dice_probability_dict(input_sets = None, output_sets = None):
if input_sets == None:
assert(output_sets == None)
dice_values = [1,2,3,4,5,6]
input_sets = [p for n in range(6) for p in itertools.combinations_with_replacement(dice_values, n)]
output_sets = [p for p in itertools.combinations_with_replacement(dice_values, 5)]
forward_probability_dict = {input_set : {} for input_set in input_sets}
reverse_probability_dict = {output_set : {} for output_set in output_sets}
for input_set in input_sets:
for output_set in output_sets:
(is_subset,diff_set) = tuple_is_subset(output_set, input_set)
if not is_subset:
forward_probability_dict[input_set][output_set] = 0.0
reverse_probability_dict[output_set][input_set] = 0.0
else:
#Currently no consideration of combinations...
probability = roll_probability(diff_set)
forward_probability_dict[input_set][output_set] = probability
reverse_probability_dict[output_set][input_set] = probability
return forward_probability_dict, reverse_probability_dict
| [
"david.belfrage@hotmail.com"
] | david.belfrage@hotmail.com |
4b9a7c6bc95e18fb3d86671c9a1e32c78d444c0d | 8ff1b4925becaa68dd2bdf0bce3fa647d405f6ef | /problems/problem-23.py | ac3f156663029c72ada1f7e30c6690c1da776e73 | [] | no_license | chanchs/euler | 61dc078d54940f62961efe91cbf0befdd88843f9 | 32787ca9abb83d21d03b710136dc27d0159d7f6a | refs/heads/master | 2022-09-08T06:18:46.159434 | 2020-04-17T19:08:50 | 2020-04-17T19:08:50 | 75,235,991 | 0 | 0 | null | 2022-08-23T01:11:19 | 2016-11-30T23:20:23 | Python | UTF-8 | Python | false | false | 1,020 | py | import time
if __name__=="__main__":
start = time.time()
max = 28123
sum = 0
n = []
a = []
for i in range(1, max + 1):
sum = 0
n.append(i)
j = 1
for j in range(1, int(i/2) + 1):
if i % j == 0:
sum += j
j = j + 1
if sum > i:
#print("{} is abundant".format(i))
a.append(i)
#elif sum < i:
# print("{} is deficient".format(i))
#elif sum == i:
# print("{} is perfect".format(i))
for i in range(len(a)):
sum = 0
for j in range(i, len(a)):
sum = a[i] + a[j]
if sum in n:
n.remove(sum)
print("{0} is sum of {1} + {2}".format(sum, a[i], a[j]))
elif sum > max:
break
sum = 0
for i in range(len(n)):
sum += n[i]
print("non abundant sum = {}".format(sum))
end = time.time()
print("Completed in {0:.2}s".format(end - start)) | [
"csharma@carbonite.com"
] | csharma@carbonite.com |
06e7fad981c91e48e6ee48c2b7664fff11a7c56c | 42ff916cebda453b0b6848cd1630f231922afa14 | /asgn0/asgn0_1155097582.py | 4b05932c2bd3cd22398503aa28d0042a725c44cf | [] | no_license | UncleYi2016/homework_IEMS5703 | 766c44b53e52eac29b39e018c1510fdd66bf7d98 | 41acce547e992bc69b72ab1560bd845598ee113b | refs/heads/master | 2021-05-09T13:10:57.725856 | 2018-04-27T15:50:00 | 2018-04-27T15:50:00 | 119,027,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | import operator
def problem_1(a,b):
num = 0
for i in range(a,b):
if(i % 7 == 0 and i % 3 != 0):
num += 1
return num
def problem_2(n):
result = 0
if(n < 0 or n > 9):
return 0
tmp1 = n*100 + n*10 + n
tmp2 = n*10000 + n*1000 + tmp1
result = n + tmp1 + tmp2
return result
def problem_3(nums):
max_sum = 0
length = len(nums)
for i in range(1, length-1):
tmp = nums[i-1] + nums[i] + nums[i+1]
if(max_sum < tmp):
max_sum = tmp
return max_sum
def problem_4(sentence):
output = ''
sentence_elements = sentence.split(' ')
sentence_elements.sort()
length = len(sentence_elements)
for i in range(0, length):
output += sentence_elements[i] + ' '
output = output.strip()
return output
def problem_5(sentence):
output = []
sentence = sentence.lower()
sentence_elements = sentence.split(' ')
length = len(sentence_elements)
output_dir = dict()
for i in range(length):
if sentence_elements[i] in output_dir:
tmp = output_dir[sentence_elements[i]]
output_dir[sentence_elements[i]] = tmp+1
else:
output_dir[sentence_elements[i]] = 1
output_dir_sort = sorted(output_dir.items(),key = operator.itemgetter(1),reverse = True)
output_length = 5
if(len(output_dir_sort) < 5):
output_length = len(output_dir_sort)
for j in range(output_length):
output.append(output_dir_sort[j])
return output
def problem_6(path):
output = []
data_file = open(path, 'r')
column = []
column_line = data_file.readline()
column_line = column_line.strip()
column_elements = column_line.split(',')
for element in column_elements:
column.append(element)
for line in data_file:
line = line.strip()
data_elements = line.split(',')
dict_node = {}
length = len(data_elements)
for i in range(length):
dict_node[column[i]] = data_elements[i]
output.append(dict_node)
return output
# if __name__ == '__main__':
# print('problem_1():\n\t' + str(problem_1(10,30)))
# print('problem_2():\n\t' + str(problem_2(100)))
# print('problem_3():\n\t' + str(problem_3([1, 3, -2, 4, 8, -9, 0, 5])))
# print('problem_4():\n\t' + str(problem_4('the chinese university of hong kong')))
# print('problem_5():\n\t' + str(problem_5('The Transmission Control Protocol (TCP) is one of the main protocols of the Internet')))
# print('problem_6():\n\t' + str(problem_6('data.csv')))
| [
"uncleyi@UncleYis-MacBook-Pro.local"
] | uncleyi@UncleYis-MacBook-Pro.local |
6736e9c75c62cd7579f7620f40f7d8c1608aac0d | e45c82beeaba1c3646928d9bab881cb011a26aed | /python/optparse_lab1.py | 4a89282bc39457ce174dd762bfb8473375146bb9 | [] | no_license | IvanChulichkov/configs | a807ff3cdbdc112a159964af8247b6344d4439f3 | a12605e51df0b2879a24bfabb56a427bf9898cb0 | refs/heads/master | 2023-02-09T16:40:00.351002 | 2020-08-05T06:53:32 | 2020-08-05T06:53:32 | 290,879,507 | 0 | 0 | null | 2020-08-27T20:53:34 | 2020-08-27T20:53:33 | null | UTF-8 | Python | false | false | 309 | py | import optparse
def main():
p = optparse.OptionParser()
p.add_option("-s", "--sysadmin", dest="admin", default="Admin",
help="setting up admin name", metavar="<name>")
options, args = p.parse_args()
print(f'Hello, {options.admin}!')
if __name__ == '__main__':
main()
| [
"revolman.eis@gmail.com"
] | revolman.eis@gmail.com |
4e4b7278b5d85aced09f29bfe8d49d79fc5fb567 | c1ee8f22ece4fc39cb94fe19832fcba8e45cf5bc | /프로그래머스/문자열 내 마음대로 정렬하기.py | a45fa146443052022e2644fb242635aa218465d9 | [] | no_license | JeongHanJun/BOJ | ae6b1c64c5b3226deef2708ae447aa1225333a92 | a865624fb0a9291b68f99af8535f708554fa0b41 | refs/heads/master | 2023-03-31T02:22:58.974437 | 2021-04-02T02:43:57 | 2021-04-02T02:43:57 | 258,809,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # 문자열 내 마음대로 정렬하기
# 제목부터 sorted, key, lambda 가 떠오른다.
def solution(strings, n):
answer = sorted(strings, key = lambda x : (x[n], x))
return answer
s1 = ["sun", "bed", "car"]
n1 = 1
s2 = ["abce", "abcd", "cdx"]
n2 = 2
print(solution(s1, n1))
print(solution(s2, n2)) | [
"noreply@github.com"
] | JeongHanJun.noreply@github.com |
7faf21b3d81b85edbd984555f7dd773edd9447b0 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/desktopvirtualization/workspace.py | 84777b1001b9f8903ed1e5875ed1dbd416496651 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,931 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WorkspaceArgs', 'Workspace']
@pulumi.input_type
class WorkspaceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
application_group_references: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetIdentityArgs']] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetPlanArgs']] = None,
sku: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetSkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Workspace resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[str]]] application_group_references: List of applicationGroup resource Ids.
:param pulumi.Input[str] description: Description of Workspace.
:param pulumi.Input[str] friendly_name: Friendly name of Workspace.
:param pulumi.Input[str] kind: Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] managed_by: The fully qualified resource ID of the resource that manages this resource. Indicates if this resource is managed by another Azure resource. If this is present, complete mode deployment will not delete the resource if it is removed from the template since it is managed by another resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if application_group_references is not None:
pulumi.set(__self__, "application_group_references", application_group_references)
if description is not None:
pulumi.set(__self__, "description", description)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if managed_by is not None:
pulumi.set(__self__, "managed_by", managed_by)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if workspace_name is not None:
pulumi.set(__self__, "workspace_name", workspace_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="applicationGroupReferences")
def application_group_references(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of applicationGroup resource Ids.
"""
return pulumi.get(self, "application_group_references")
@application_group_references.setter
def application_group_references(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "application_group_references", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of Workspace.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of Workspace.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ResourceModelWithAllowedPropertySetIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> Optional[pulumi.Input[str]]:
"""
The fully qualified resource ID of the resource that manages this resource. Indicates if this resource is managed by another Azure resource. If this is present, complete mode deployment will not delete the resource if it is removed from the template since it is managed by another resource.
"""
return pulumi.get(self, "managed_by")
@managed_by.setter
def managed_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_by", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['ResourceModelWithAllowedPropertySetPlanArgs']]:
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetPlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['ResourceModelWithAllowedPropertySetSkuArgs']]:
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetSkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the workspace
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_name", value)
class Workspace(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_references: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetIdentityArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetPlanArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Represents a Workspace definition.
API Version: 2021-02-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] application_group_references: List of applicationGroup resource Ids.
:param pulumi.Input[str] description: Description of Workspace.
:param pulumi.Input[str] friendly_name: Friendly name of Workspace.
:param pulumi.Input[str] kind: Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] managed_by: The fully qualified resource ID of the resource that manages this resource. Indicates if this resource is managed by another Azure resource. If this is present, complete mode deployment will not delete the resource if it is removed from the template since it is managed by another resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WorkspaceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a Workspace definition.
API Version: 2021-02-01-preview.
:param str resource_name: The name of the resource.
:param WorkspaceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_references: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetIdentityArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetPlanArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WorkspaceArgs.__new__(WorkspaceArgs)
__props__.__dict__["application_group_references"] = application_group_references
__props__.__dict__["description"] = description
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["identity"] = identity
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
__props__.__dict__["managed_by"] = managed_by
__props__.__dict__["plan"] = plan
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["cloud_pc_resource"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["object_id"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:desktopvirtualization/v20190123preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20190924preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20191210preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20200921preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201019preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201102preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201110preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210114preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210201preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210309preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210401preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210712:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210903preview:Workspace")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Workspace, __self__).__init__(
'azure-native:desktopvirtualization:Workspace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Workspace':
"""
Get an existing Workspace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WorkspaceArgs.__new__(WorkspaceArgs)
__props__.__dict__["application_group_references"] = None
__props__.__dict__["cloud_pc_resource"] = None
__props__.__dict__["description"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["friendly_name"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["managed_by"] = None
__props__.__dict__["name"] = None
__props__.__dict__["object_id"] = None
__props__.__dict__["plan"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Workspace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationGroupReferences")
def application_group_references(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of applicationGroup resource Ids.
"""
return pulumi.get(self, "application_group_references")
@property
@pulumi.getter(name="cloudPcResource")
def cloud_pc_resource(self) -> pulumi.Output[bool]:
"""
Is cloud pc resource.
"""
return pulumi.get(self, "cloud_pc_resource")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of Workspace.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
The etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal etag convention. Entity tags are used for comparing two or more entities from the same requested resource. HTTP/1.1 uses entity tags in the etag (section 14.19), If-Match (section 14.24), If-None-Match (section 14.26), and If-Range (section 14.27) header fields.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly name of Workspace.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ResourceModelWithAllowedPropertySetResponseIdentity']]:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> pulumi.Output[Optional[str]]:
"""
The fully qualified resource ID of the resource that manages this resource. Indicates if this resource is managed by another Azure resource. If this is present, complete mode deployment will not delete the resource if it is removed from the template since it is managed by another resource.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> pulumi.Output[str]:
"""
ObjectId of Workspace. (internal use)
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter
def plan(self) -> pulumi.Output[Optional['outputs.ResourceModelWithAllowedPropertySetResponsePlan']]:
return pulumi.get(self, "plan")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.ResourceModelWithAllowedPropertySetResponseSku']]:
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
26d13e4f0ecb08e455798eadfe6fa1e6ec855e3a | cd847bb6162a44945e7882992be6a8e99cd475b2 | /venv/bin/venv/bin/wheel | a26c6118b433f85ea2d30bd0128814e1bbf1d383 | [] | no_license | jasvr/wags_to_wings | 60e04375e3273e9db23f16d7f7d18263e5b14a93 | d03edcdd0db27efadb5ec7e8321ae30f23f0216a | refs/heads/master | 2020-05-04T23:42:55.924620 | 2019-04-04T22:40:55 | 2019-04-04T22:40:55 | 179,553,036 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 260 | #!/Users/jasvrgs/wdi/projects/hackathon/venv/bin/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jas.vrgs@gmail.com"
] | jas.vrgs@gmail.com | |
f4e908c32eaa6944ee1d713c8343664e5f0c623e | 2ffe5f73cda92c6156d1ec6d488089c7b0c8eb5f | /newyear/urls.py | 70c1071eb9d367fbf25863cc8c64ebb810a93867 | [] | no_license | Himangshu1086/DjangoDemo | 53077865a4d0db72d3bd44473f63a53fbb9779dc | 52039dbff7b4406d8434de3511c76e0145dbb8cf | refs/heads/master | 2022-12-03T15:47:16.637358 | 2020-08-21T18:45:32 | 2020-08-21T18:45:32 | 289,163,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | from django.urls import path
from . import views
urlpatterns = [
path("",views.index, name="index")
]
| [
"Himangshu1086"
] | Himangshu1086 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.