blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e74b080d362653d77f8d000047b27bb8e024085e | 93abb9b7b992883120905bd90d80a307d19fe388 | /webDemo/screenPictures/failScreen/__init__.py | bc52c9a5533f94cca6acbf8a311859d2a497e8ff | [] | no_license | zll90021258/webdemo | 9a70dab8324a4ca81807989c6402033c36434952 | 8fd779989bf62b056c250782214f7d5f0d9456db | refs/heads/master | 2023-08-22T10:52:05.906887 | 2021-09-19T09:49:59 | 2021-09-19T09:49:59 | 400,932,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | # -*- coding: utf-8 -*-
# @Author : ZENGLINGLING
# @Email : 984355579@qq.com
# @Software: PyCharm
# @Time : 2021/9/3 10:29
# @File : __init__.py.py
| [
"984355579@qq.com"
] | 984355579@qq.com |
e4080117abce56c6b75ce3170626dd76bd0649eb | 61ed94c97aaa92e25ef48d075246da61b17261b8 | /notebooks/finding_my_triplets.py | 2362be024ce7da308d72cadf0bc10e9cbffa7e61 | [] | no_license | tadgeislamins/verbs_research | 68280bca5f24453f88bb508e83096ba63963e354 | 183e3a9d61e4f150d0dc24a1cca6b0ad3da2fc68 | refs/heads/master | 2021-07-01T17:38:08.263275 | 2021-03-01T17:52:43 | 2021-03-01T17:52:43 | 221,745,430 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,671 | py | from conllu import parse
from pymorphy2 import MorphAnalyzer
pymorphy2_analyzer = MorphAnalyzer()
import pandas as pd
def create_path_and_file_name(name):
path = '../data/{}.xlsx'.format(name)
text_file_name = 'sents_{}.txt'.format(name)
conllu_file_name = 'parsed_sents_{}.conllu'.format(name)
file_name = [path, text_file_name, conllu_file_name]
return file_name
def read_preprocess_data(names):
dataframe = pd.read_excel(names[0])
dataframe.drop(columns=['Reversed left context', 'Reversed center', 'Title', 'Author', 'Birthday', 'Header',
'Publ_year', 'Medium', 'Ambiguity', 'Publication', 'Sphere', 'Type', 'Topic'], inplace = True)
return dataframe
def create_text_file(dataframe, names):
with open(names[1], 'w', encoding = "UTF-8") as f:
for i in range(dataframe.shape[0]):
sent = dataframe['Full context'][i]
f.write(sent[:sent.find('[')-1] + '\n')
def read_conllu(conllu_fname): # reading conllu file with parsed sentences
with open(conllu_fname, 'r', encoding = "UTF-8") as f:
parsed_sents = f.read()
sents = parse(parsed_sents)
return sents
def check_object(token, object_form): # checking if form of object is
# identical with the form we search for this construction
if not token['feats'] or 'Case' not in token['feats']:
return False
else:
if 'Foreign' in token['feats'] and token['feats']['Foreign'] == 'Yes':
return False
# print(object_form)
for feature in object_form:
if token['feats'][feature] != object_form[feature]:
return False
return True
def find_triplet_in_sentence(sent, verb_lemmas, object_form, prep_in_var_of_constr=None, prep_in_constr=None):
# finding triplet (verb_lemmas, object, prep_in_var_of_constr) for one sentence
syntagrus_pymorphy_dict = {'Acc': 'accs', 'Dat': 'datv', 'Gen': 'gent', 'Ins': 'ablt', 'Loc': 'loct'}
triplet = {}
verb_id = None
object_id = None
for token in sent:
# ищем id глагола, чтобы потом искать его зависимые
if token['lemma'] in verb_lemmas:
verb_id = token['id']
triplet['verb'] = token['form']
triplet['verb_id_for_sent'] = verb_id - 1
# ищем зависимые (NB: в нашем случае зависимые всегда идут после глагола, поэтому в одном цикле)
if token['head'] == verb_id:
pymorphy_object_form = syntagrus_pymorphy_dict[object_form['Case']]
pymorphy_token = pymorphy2_analyzer.parse(token['form'])
# ..... # парсинг pymorphy
# if (token['upostag'] in ['NOUN', 'PROPN', 'PRON'] and check_object(token, object_form)):
if token['upostag'] in ['NOUN', 'PROPN', 'PRON'] and (check_object(token, object_form) or pymorphy_token[0].tag.case == pymorphy_object_form):
object_id = token['id']
break # мы нашли объект (TODO: bla bla)
if prep_in_constr and object_id: # looking for a preposition, if there is
if prep_in_var_of_constr:
for token in sent:
if token['head'] == object_id and token['form'] in prep_in_var_of_constr:
triplet['object'] = sent[object_id - 1]['form']
triplet['object_id_for_sent'] = object_id - 1
triplet['preposition'] = prep_in_var_of_constr[0] # в триплет записываем основной вариант предлога вне зависимости от того, который был изначально
else:
for token in sent: # looking for a preposition, if there isn't
if token['head'] == object_id:
if token['form'] in prep_in_constr:
break
elif token['upostag']:
if token['upostag'] == 'ADP':
break
else:
triplet['object'] = sent[object_id - 1]['form']
triplet['object_id_for_sent'] = object_id - 1
else:
if object_id:
triplet['object'] = sent[object_id - 1]['form']
triplet['object_id_for_sent'] = object_id - 1
return triplet
def get_all_triples(sentences, verb_lemmas, object_form, prep_in_var_of_constr=None, prep_in_constr=None):
# finding triplet for all sentences (returns dictionary)
triples = []
for i, sent in enumerate(sentences):
triplet = find_triplet_in_sentence(sent, verb_lemmas, object_form, prep_in_var_of_constr, prep_in_constr)
triplet['id'] = i
triples.append(triplet)
return triples
def count_necessary_triplets(necessary_triplets, sentences):
count = len(necessary_triplets)
for tr in necessary_triplets:
sent_id = tr['id']
start_position = tr['verb_id_for_sent']
end_position = tr['object_id_for_sent']+1
if len(sentences[sent_id]) < end_position:
end_position = len(sentences[sent_id])
# start_position = tr['verb_id_for_sent']-3
# if start_position < 0:
# start_position = 0
# end_position = tr['object_id_for_sent']+4
# if len(sentences[sent_id]) < end_position:
# end_position = len(sentences[sent_id])
tokens = sentences[sent_id][start_position:end_position]
preview_list = [token['form'] for token in tokens]
print('id', sent_id, *preview_list)
return count
def show_preview_for_sent(triplet, sentence):
start_position = triplet['verb_id_for_sent']-3
if start_position < 0:
start_position = 0
end_position = triplet['object_id_for_sent']+4
if len(sentence) < end_position:
end_position = len(sentence)
tokens = sentence[start_position:end_position]
preview_list = [token['form'] for token in tokens]
print(*preview_list)
def get_indexes(triplets):
ids = []
for tr in triplets:
if 'object' in tr:
ids.append(tr['id'])
return ids
def get_standart_date(date):
if date.find('-') != -1:
date_array = date.split('-')
mean_date = (int(date_array[0]) + int(date_array[1])) // 2 #если произведение создавалось несколько лет, то берём среднее арифметическое верхней и нижней границы
return mean_date
else:
date_array = date.split('.')
return date_array[0]
if __name__ == '__main__':
sentences = read_conllu('parsed_sents_pisat-18-2.conllu')
object_form = {'Case': 'Dat'}
verb_lemmas = ['писать']
prep_in_var_of_constr = ['к', 'ко', 'къ']
prep_in_constr = ['к', 'ко', 'къ']
triplets = get_all_triples(sentences, verb_lemmas, object_form, prep_in_var_of_constr, prep_in_constr)
# ids = get_indexes(triplets2)
# print(ids)
for tr in triplets:
print(tr)
# a = get_standart_date('1456-1876')
# print(a)
print(count_triplets(triplets, sentences))
print(sentences[370])
print(sentences[223])
# tokens = sentences[225][0:5]
# preview_list = [token['form'] for token in tokens]
# print(*preview_list)
# print(find_triplet_in_sentence(sentences1[83], verb_lemmas, object_form, prep_in_var_of_constr=None, prep_in_constr=None)) | [
"ttaisia2@gmail.com"
] | ttaisia2@gmail.com |
5c2750caeef0712f7efec8adc5693540966e40f7 | 44a8e18b67472896a4a160c05fc6c014d7cdf99d | /cloud/user/models.py | 8e95e6ec6aa64403140f37117cea23d17a1c94d1 | [] | no_license | skydreamever/dreamcloud | 74c212629bf068a1e76294180656e193116e307d | 7887fc471f80cb5151bbdeebb467f65a69a97483 | refs/heads/master | 2020-09-20T13:58:28.659235 | 2016-08-31T11:58:35 | 2016-08-31T11:58:35 | 66,642,463 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | #!/usr/bin/env python
# encoding: utf-8
from sqlalchemy import Column
from ..extension import db
from .constants import USER_NORMAL, USER_ADMIN
from werkzeug import generate_password_hash, check_password_hash
from flask.ext.login import UserMixin
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = Column(db.Integer, primary_key=True)
name = Column(db.String(128), nullable=False, unique=True)
_password = Column('password', db.String(256), nullable=False)
# 通过只显示密码字段对密码进行隐藏加密
def _get_password(self):
return self._password
def _set_password(self, password):
self._password = generate_password_hash(password)
password = db.synonym('_password',
descriptor=property(_get_password,
_set_password))
def check_password(self, password):
if self.password is None:
return False
return check_password_hash(self.password, password)
type_code = Column(db.SmallInteger, default=USER_NORMAL)
def is_admin(self):
return self.type_code == USER_ADMIN
@classmethod
def authenticate(cls, login, password):
user = cls.query.filter(User.name == login).first()
if user:
authenticated = user.check_password(password)
else:
authenticated = False
return user, authenticated
| [
"dxs9642@gmail.com"
] | dxs9642@gmail.com |
886ccfa0a2d69e87f65ff1728f0eeb6d851c77c0 | 6afe7df39c1d0841ab645bd41558ad3f62136c93 | /Number pairs with zero sum.py | e4b160055d10b81f0db5cc161edac4451e4f0652 | [] | no_license | utanasan/Python-bootcamp | e7a20eda4bdd2a4189050495e6c3f3f369a0425d | 89deb7423c8fb77d2b6d37e871fa4b53a11c2ed2 | refs/heads/main | 2023-07-08T06:41:04.053938 | 2021-08-03T20:45:55 | 2021-08-03T20:45:55 | 370,017,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | l1=[2,4,-5,6,8,-2]
l2=[2,-6,8,3,5,-2]
l3=[]
for i in l1:
for j in l2:
if i+j==0:
l3.append((i,j))
print(l3)
# OR
l4 = [(i,j) for i in l1 for j in l2 if i+j==0]
print(l4) | [
"noreply@github.com"
] | utanasan.noreply@github.com |
fae5997202e7057348feddcaaeb8259a9835ebb2 | db517eed5d464ff74d03167a3c8b5b6f76381a4f | /home/.IntelliJIdea2016.3/config/plugins/python/helpers/coverage_runner/run_coverage.py | 4403c99bdae3216185dc0b130b663fcb0a3665eb | [] | no_license | Tharyrok/dotfile | c989a2036b9a879d772f5cd50be5d61a8f228552 | ee93331789d27e8c2f3fedf07a5bad8b4704d804 | refs/heads/master | 2021-01-11T22:37:25.806928 | 2017-01-16T04:29:22 | 2017-01-16T04:29:22 | 79,003,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | """Coverage.py's main entrypoint."""
import os
import sys
bundled_coverage_path = os.getenv('BUNDLED_COVERAGE_PATH')
if bundled_coverage_path:
sys_path_backup = sys.path
sys.path = [p for p in sys.path if p != bundled_coverage_path]
from coverage.cmdline import main
sys.path = sys_path_backup
else:
from coverage.cmdline import main
coverage_file = os.getenv('PYCHARM_COVERAGE_FILE')
coverage_file = coverage_file[0:-len(".coverage")]
run_cov = os.getenv('PYCHARM_RUN_COVERAGE')
if os.getenv('CREATE_TEMP_COVERAGE_FILE'):
line = 'LOG: PyCharm: File mapping:%s\t%s\n'
import tempfile
(h, new_cov_file) = tempfile.mkstemp(prefix='pycharm-coverage')
print(line%(coverage_file + ".coverage", new_cov_file + ".coverage"))
print(line%(coverage_file + '.syspath.txt', new_cov_file + '.syspath.txt'))
print(line%(coverage_file + '.xml', new_cov_file + '.xml'))
coverage_file = new_cov_file
if coverage_file:
os.environ['COVERAGE_FILE'] = coverage_file + ".coverage"
if run_cov:
a_file = open(coverage_file + '.syspath.txt', mode='w')
a_file.write(os.getcwd()+"\n")
for path in sys.path: a_file.write(path + "\n")
a_file.close()
argv = []
for arg in sys.argv:
if arg.startswith('-m'):
argv.append('-m')
argv.append(arg[2:])
else:
argv.append(arg)
sys.argv = argv
cwd = os.getcwd()
try:
main()
finally:
if run_cov:
os.chdir(cwd)
main(["xml", "-o", coverage_file + ".xml", "--ignore-errors"]) | [
"dev@tharyrok.eu"
] | dev@tharyrok.eu |
2ea747e7a97063f59f0d0d4584ff5c12e534398b | 90deb98bd63bdc0f08d80954d3edb3a277e63cd1 | /arq/jobs.py | 1d4c756caae0842df1a7973d086f698534b73085 | [
"MIT"
] | permissive | filmor/arq | 93a97852eb2aa554ce2c6d548fcfa7dac35b74b4 | f0b4b8b4db2df0c950069f98d5d62c104912e48d | refs/heads/master | 2020-03-13T21:08:36.514480 | 2018-01-10T15:36:23 | 2018-01-10T15:36:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,365 | py | """
:mod:`jobs`
===========
Defines the ``Job`` class and descendants which deal with encoding and decoding job data.
"""
import base64
import os
from datetime import datetime
import msgpack
from .utils import DEFAULT_CURTAIL, from_unix_ms, timestamp, to_unix_ms_tz, truncate
__all__ = ['JobSerialisationError', 'Job', 'DatetimeJob']
class ArqError(Exception):
pass
class JobSerialisationError(ArqError):
pass
def gen_random():
"""
generate a lowercase alpha-numeric random string of length 24.
Should have more randomness for its size thank uuid
"""
return base64.b32encode(os.urandom(10))[:16].decode().lower()
# "device control one" should be fairly unique as a dict key and only one byte
DEVICE_CONTROL_ONE = '\x11'
class Job:
"""
Main Job class responsible for encoding and decoding jobs as they go
into and come out of redis.
"""
__slots__ = 'id', 'queue', 'queued_at', 'class_name', 'func_name', 'args', 'kwargs', 'raw_queue', 'raw_data'
def __init__(self, raw_data: bytes, *, queue_name: str=None, raw_queue: bytes=None) -> None:
"""
Create a job instance be decoding a job definition eg. from redis.
:param raw_data: data to decode, as created by :meth:`arq.jobs.Job.encode`
:param raw_queue: raw name of the queue the job was taken from
:param queue_name: name of the queue the job was dequeued from
"""
self.raw_data = raw_data
if queue_name is None and raw_queue is None:
raise ArqError('either queue_name or raw_queue are required')
self.queue = queue_name or raw_queue.decode()
self.raw_queue = raw_queue or queue_name.encode()
self.queued_at, self.class_name, self.func_name, self.args, self.kwargs, self.id = self.decode_raw(raw_data)
self.queued_at /= 1000
@classmethod
def encode(cls, *, job_id: str=None, queued_at: int=None, class_name: str, func_name: str,
args: tuple, kwargs: dict) -> bytes:
"""
Create a byte string suitable for pushing into redis which contains all
required information about a job to be performed.
:param job_id: id to use for the job, leave blank to generate a uuid
:param queued_at: time in ms unix time when the job was queue, if None now is used
:param class_name: name (see :attr:`arq.main.Actor.name`) of the actor class where the job is defined
:param func_name: name of the function be called
:param args: arguments to pass to the function
:param kwargs: key word arguments to pass to the function
"""
queued_at = queued_at or int(timestamp() * 1000)
try:
return cls.encode_raw([queued_at, class_name, func_name, args, kwargs, cls.generate_id(job_id)])
except TypeError as e:
raise JobSerialisationError(str(e)) from e
@classmethod
def generate_id(cls, given_id):
return given_id or gen_random()
@classmethod
def msgpack_encoder(cls, obj):
"""
The default msgpack encoder, adds support for encoding sets.
"""
if isinstance(obj, set):
return {DEVICE_CONTROL_ONE: list(obj)}
else:
return obj
@classmethod
def msgpack_object_hook(cls, obj):
if len(obj) == 1 and DEVICE_CONTROL_ONE in obj:
return set(obj[DEVICE_CONTROL_ONE])
return obj
@classmethod
def encode_raw(cls, data) -> bytes:
return msgpack.packb(data, default=cls.msgpack_encoder, use_bin_type=True)
@classmethod
def decode_raw(cls, data: bytes):
return msgpack.unpackb(data, object_hook=cls.msgpack_object_hook, encoding='utf8')
def to_string(self, args_curtail=DEFAULT_CURTAIL):
arguments = ''
if self.args:
arguments = ', '.join(map(str, self.args))
if self.kwargs:
if arguments:
arguments += ', '
arguments += ', '.join(f'{k}={v!r}' for k, v in sorted(self.kwargs.items()))
return '{s.id:.6} {s.class_name}.{s.func_name}({args})'.format(s=self, args=truncate(arguments, args_curtail))
def short_ref(self):
return '{s.id:.6} {s.class_name}.{s.func_name}'.format(s=self)
def __str__(self):
return self.to_string()
def __repr__(self):
return f'<Job {self} on {self.queue}>'
DEVICE_CONTROL_TWO = '\x12'
TIMEZONE = 'O'
class DatetimeJob(Job):
"""
Alternative Job which copes with datetimes. None timezone naïve dates are supported but
the returned datetimes will use a :mod:`datetime.timezone` class to define the timezone
regardless of the timezone class originally used on the datetime object (eg. ``pytz``).
"""
@classmethod
def msgpack_encoder(cls, obj):
if isinstance(obj, datetime):
ts, tz = to_unix_ms_tz(obj)
result = {DEVICE_CONTROL_TWO: ts}
if tz is not None:
result[TIMEZONE] = tz
return result
else:
return super().msgpack_encoder(obj)
@classmethod
def msgpack_object_hook(cls, obj):
if len(obj) <= 2 and DEVICE_CONTROL_TWO in obj:
return from_unix_ms(obj[DEVICE_CONTROL_TWO], utcoffset=obj.get(TIMEZONE))
else:
return super().msgpack_object_hook(obj)
| [
"s@muelcolvin.com"
] | s@muelcolvin.com |
dbca3b4496e1250884f1cf3fdcb788e0047251b9 | bf658e3ffb5651a625f06e4566112649d4fc2d6f | /apps/mascota/views.py | 78828abce1d2c2124f39f1862efba723b74e2205 | [] | no_license | jeissonmgz/adopcion_mascota | 8c2aae1722afd9478ed813928902e47969c3fa98 | 7b699996cccced2ee4fefaba62678e7593eafb30 | refs/heads/master | 2021-08-31T21:02:54.923230 | 2017-12-22T22:49:08 | 2017-12-22T22:49:08 | 115,155,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from apps.mascota.forms import MascotaForm
# Create your views here.
def index(request):
return render(request, "mascota/index.html")
def mascota_view(request):
if request.method == 'POST':
form = MascotaForm(request.POST)
if form.is_valid():
form.save()
return redirect('mascota:index')
else:
form = MascotaForm()
return render(request, 'mascota/mascota_form.html', {'form': form})
def mascota_list(request):
mascota = Mascota.objects.all()
contexto = {'mascotas':mascota}
return render(request, 'mascota_list.html', contexto) | [
"desarrolladorweb@biolodos.com"
] | desarrolladorweb@biolodos.com |
e63264b6ad8c9d0ef23cef42693ff74821bb8363 | 0155e132fcc42350e2d8d06fd783702799fed6d1 | /ideas/admin.py | dc8d2c0720ae80d156d8c9d42121b008c2f3e65b | [
"MIT"
] | permissive | asheahan/invigorate | f6eb82cace50be05ee4e537d78e4c005dd08bcfd | e6d53cdde5d03eef0d7009362063eb6682bc7f1c | refs/heads/master | 2021-01-20T06:15:28.865686 | 2017-09-07T03:55:56 | 2017-09-07T03:55:56 | 101,498,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# Register your models here.
from .models import Idea, Inspiration, Concept, Profile
class IdeaAdmin(admin.ModelAdmin):
list_display = ('label', 'created_date')
list_display_links = ('label',)
ordering = ('created_date',)
search_fields = ('label', 'description',)
class ConceptAdmin(admin.ModelAdmin):
list_display = ('category', 'label',)
list_display_links = ('label',)
list_filter = ('category',)
ordering = ('category', 'label',)
search_fields = ('label',)
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
class UserAdmin(UserAdmin):
inlines = (ProfileInline,)
admin.site.register(Idea, IdeaAdmin)
admin.site.register(Concept, ConceptAdmin)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| [
"asheahan@gmail.com"
] | asheahan@gmail.com |
d6b2ebd6e7410c347fbd9cca0ef660c405d57c19 | f889d26fec0c4da86c2b857191564e5ee57430a8 | /Python_basics/Old_Library.py | d48474542c5c5e35f110371bd00b37b1758518fe | [] | no_license | Grigorov999/SoftUni-Python | 4da6ecea760d13d7571723b8faa771b3be0199f6 | cb4f8f352fc48cb1ae8b2efd803265707a37227f | refs/heads/master | 2022-12-13T15:04:05.303204 | 2020-09-11T20:32:29 | 2020-09-11T20:32:29 | 294,784,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | book_name = input()
books_count = int(input())
counter = 0
book_found = False
while counter < books_count:
current_book = input()
if current_book == book_name:
book_found = True
print(f'You checked {counter} books and found it.')
break
counter += 1
if not book_found:
print(f'The book you search is not here!')
print(f'You checked {books_count} books.')
| [
"emilgrigorov888@gmail.com"
] | emilgrigorov888@gmail.com |
534ceee3636879d0c7307ece1b5c9007fadb5026 | d414716af0d044fa966e79aa1f0b2d75bd0c11ce | /manage.py | e4f0301e6c3ebb0d1a32208231f3f65abf71b3ee | [] | no_license | unkill1987/Carinfo | a2131899436992f77e7ccb23d014efce518e3f09 | ef3256e509de3b44f9d992d79e4e351bad505db4 | refs/heads/master | 2020-04-10T20:24:22.692089 | 2018-12-20T09:24:40 | 2018-12-20T09:24:40 | 161,266,538 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'carinfo.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"unkill1987@naver.com"
] | unkill1987@naver.com |
44a11aef9399e554b020e9fc9fb961732626e3be | 065bd815eef1099541af983aeea8e65e1bfd5acc | /data/__init__.py | 0486537e57679e62c81d0cb5208a3da3ed31aa6d | [] | no_license | thomaskoppelaar/alutiiq | 085cc050b2ecf746b52dfebc3e77f922abb36b2c | d183d5cb5a3bf07414f4380bef9d31635dc7b513 | refs/heads/master | 2023-03-20T06:17:01.258178 | 2021-03-08T10:09:33 | 2021-03-08T10:09:33 | 334,695,378 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from data.card import Card
from data.player import Player
from data.session_objects import s_store, s_turn_counter
| [
"36331365+thomaskoppelaar@users.noreply.github.com"
] | 36331365+thomaskoppelaar@users.noreply.github.com |
906b363396d9f81c162b3334ea3f8fc91469b5d7 | e94149632d0b8f4d21a20d91e6811f8683305b91 | /nj-sinc/src/cmd/ucsm/server/local_lun_remove.py | bb93b432be93b02335009c205fde8b50485860e3 | [] | no_license | huhe56/nj-snic | cc84f3b17459ccb7592076e40f1275f2d0e7bd3c | 6a33cd8ea610e8ee24f5748e1afe7b4da1a3f190 | refs/heads/master | 2021-01-10T10:09:45.218268 | 2015-05-14T01:33:09 | 2015-05-14T01:33:09 | 36,896,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | '''
Created on Aug 26, 2014
@author: huhe
'''
from main.define import Define
from lib.ucsm import UCSM
from cmd.ucsm.server import sp_define
if __name__ == '__main__':
param = {}
param['chassis_id'] = 1
param['cartridge_id'] = 9
param['server_id'] = 1
param['tag_local_lun_name'] = 'lun141_1'
ucsm = UCSM(Define.UCSM_HOSTNAME);
sp_define.remove_local_lun(ucsm.get_ssh(), param)
ucsm.exit()
| [
"huan.he@gmail.com"
] | huan.he@gmail.com |
f7b3033abbffc59bb77ce0801784a595aa9821d1 | 4be5c172c84e04c35677f5a327ab0ba592849676 | /python/leetcode/unique_paths_ii/unique_paths_ii.py | 3cdf92f1c359c3b7d2a6b32488d8026d34b9638a | [] | no_license | niranjan-nagaraju/Development | 3a16b547b030182867b7a44ac96a878c14058016 | d193ae12863971ac48a5ec9c0b35bfdf53b473b5 | refs/heads/master | 2023-04-06T20:42:57.882882 | 2023-03-31T18:38:40 | 2023-03-31T18:38:40 | 889,620 | 9 | 2 | null | 2019-05-27T17:00:29 | 2010-09-05T15:58:46 | Python | UTF-8 | Python | false | false | 3,955 | py | '''
https://leetcode.com/problems/unique-paths-ii/
63. Unique Paths II
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
Now consider if some obstacles are added to the grids. How many unique paths would there be?
An obstacle and empty space is marked as 1 and 0 respectively in the grid.
Note: m and n will be at most 100.
Example 1:
Input:
[
[0,0,0],
[0,1,0],
[0,0,0]
]
Output: 2
Explanation:
There is one obstacle in the middle of the 3x3 grid above.
There are two ways to reach the bottom-right corner:
1. Right -> Right -> Down -> Down
2. Down -> Down -> Right -> Right
'''
'''
Solution Outline:
0. Allowed directions are R, D
1. Consider moving to cell x,y from 0,0
If there were no obstacles, it would be (num_paths_to(x-1,y) + num_paths_to(x,y-1))
with num_paths_to(x,0) == 1, (only direction allowed is down)
and num_paths_to(0,y) == 1 (only direction allowed is right) {for any 0<=x<m,0<=y<n}
2. With obstacles,
if x,0 is an obstacle,
then the column looks like (x=2 in the example)
[[0
[0
[1
[0
[0 0 . . .
num_paths_to(0,0) = 1
num_paths_to(1,0) = 1
num_paths_to(2,0) = 0 (blockade)
num_paths_to(3,0) = 0 (can' get past blockade moving only D)
num_paths_to(4,0) = 0
Similarly, if (0,y) is an obstacle,
then the first row looks like (y=1 in the example)
[[0 1 0 0 0 0]
num_paths_to(0,0) = 1
num_paths_to(0,1) = 0 (blockade)
num_paths_to(0,y) = 0 (for all y > 1) (can't get past blockade moving only R)
For any random(x,y),
if x,y is an obstacle, then num_paths_to(x,y) = 0
otherwise,
num_paths_to(x,y) = sum(num_paths_to(x-1,y), num_paths_to(x,y-1))
Sample run 1:
A= [
[0,0,0],
[0,1,0],
[0,0,0]
]
DP: [
[0,0,0],
[0,0,0],
[0,0,0]
]
Fill DP row 0,
DP: [
[1,1,1],
[0,0,0],
[0,0,0]
]
Fill DP col 0,
DP: [
[1,1,1],
[1,0,0],
[1,0,0]
]
(x,y): (1,1) is a blockade
DP: [
[1,1,1],
[1,0,0],
[1,0,0]
]
(x,y): (1,2) == sum(left, up) == sum(DP[1,1], DP[0,2]) == 1
DP: [
[1,1,1],
[1,0,1],
[1,0,0]
]
(x,y): (2,1) == sum(left,up) == sum(DP[2,0], DP[1,1]) == 1
DP: [
[1,1,1],
[1,0,1],
[1,1,0]
]
(x,y): (2,2) == sum(left,up) == sum(DP[2,1], DP[1,2]) == 2
DP: [
[1,1,1],
[1,0,1],
[1,1,2]
]
'''
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if not obstacleGrid:
return 0
m = len(obstacleGrid)
n = len(obstacleGrid[0])
# End cell is blocked
if obstacleGrid[-1][-1] == 1:
return 0
DP = [[0 for _ in xrange(n)] for _ in xrange(m)]
# first row
for j in xrange(n):
if obstacleGrid[0][j] == 1:
break
DP[0][j] = 1
# first column
for i in xrange(m):
if obstacleGrid[i][0] == 1:
break
DP[i][0] = 1
for i in xrange(1, m):
for j in xrange(1, n):
if obstacleGrid[i][j] == 0:
DP[i][j] = DP[i-1][j] + DP[i][j-1]
# if A[i][j] is an obstacle, DP[i][j] remains 0
return DP[-1][-1]
if __name__ == '__main__':
s = Solution()
assert s.uniquePathsWithObstacles(\
[
[0,0,0],
[0,1,0],
[0,0,0]
]) == 2
assert s.uniquePathsWithObstacles(\
[
[0,0,0],
[0,1,0],
[0,0,1]
]) == 0
assert s.uniquePathsWithObstacles(\
[
[0,0,1,0],
[0,1,0,0],
[0,0,0,0],
[1,0,0,0]
]) == 3
assert s.uniquePathsWithObstacles(\
[
[0,0,1,0],
[0,1,0,0],
[0,0,0,0],
[0,0,0,0],
[1,0,0,0]
]) == 9
| [
"vinithepooh@gmail.com"
] | vinithepooh@gmail.com |
c099a00e3607244ed563c090554ceef5b389d42c | 912f2c974e089dcc071f1bb6c4126ead82ddd971 | /django/main/migrations/0027_auto__chg_field_baseskill_aptitude__chg_field_baseitem_itemType__chg_f.py | 1bda4a3a7018e5aa4796eea0dde7af3a31f028b6 | [
"MIT"
] | permissive | Critical-Impact/ffrpg-gen | 639566f8e19101d5b7ad5fd03366fffbad5306ad | 022a5dde9fd9b073d550520fb6bc1041ba40e733 | refs/heads/master | 2021-01-19T13:25:08.708146 | 2014-05-19T06:08:20 | 2014-05-19T06:08:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,358 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'BaseSkill.aptitude'
db.alter_column('main_baseskill', 'aptitude_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Aptitude'], null=True, on_delete=models.SET_NULL))
# Changing field 'BaseItem.itemType'
db.alter_column('main_baseitem', 'itemType_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.ItemCategory'], null=True, on_delete=models.SET_NULL))
# Changing field 'ItemCategory.baseSkill'
db.alter_column('main_itemcategory', 'baseSkill_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.BaseSkill'], null=True, on_delete=models.SET_NULL))
# Changing field 'Item.character'
db.alter_column('main_item', 'character_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Character'], null=True, on_delete=models.SET_NULL))
# Changing field 'Item.baseItem'
db.alter_column('main_item', 'baseItem_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.BaseItem'], null=True, on_delete=models.SET_NULL))
# Changing field 'Character.bodySlot'
db.alter_column('main_character', 'bodySlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True, on_delete=models.SET_NULL))
# Changing field 'Character.handSlot'
db.alter_column('main_character', 'handSlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True, on_delete=models.SET_NULL))
# Changing field 'Character.weaponSlot'
db.alter_column('main_character', 'weaponSlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True, on_delete=models.SET_NULL))
# Changing field 'Character.headSlot'
db.alter_column('main_character', 'headSlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True, on_delete=models.SET_NULL))
# Changing field 'Character.secondWeaponSlot'
db.alter_column('main_character', 'secondWeaponSlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True, on_delete=models.SET_NULL))
# Changing field 'Character.job'
db.alter_column('main_character', 'job_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Job'], null=True, on_delete=models.SET_NULL))
# Changing field 'Character.race'
db.alter_column('main_character', 'race_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Race'], null=True, on_delete=models.SET_NULL))
# Changing field 'Character.accessorySlot2'
db.alter_column('main_character', 'accessorySlot2_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True, on_delete=models.SET_NULL))
# Changing field 'Character.accessorySlot'
db.alter_column('main_character', 'accessorySlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True, on_delete=models.SET_NULL))
# Changing field 'UserProfile.currentCharacter'
db.alter_column('main_userprofile', 'currentCharacter_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Character'], null=True, on_delete=models.SET_NULL))
# Changing field 'Skill.character'
db.alter_column('main_skill', 'character_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Character'], null=True, on_delete=models.SET_NULL))
# Changing field 'Skill.baseSkill'
db.alter_column('main_skill', 'baseSkill_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.BaseSkill'], null=True, on_delete=models.SET_NULL))
# Changing field 'Job.expertiseSkill'
db.alter_column('main_job', 'expertiseSkill_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.BaseSkill'], null=True, on_delete=models.SET_NULL))
# Changing field 'Job.aptitude'
db.alter_column('main_job', 'aptitude_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Aptitude'], null=True, on_delete=models.SET_NULL))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'BaseSkill.aptitude'
raise RuntimeError("Cannot reverse this migration. 'BaseSkill.aptitude' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'BaseSkill.aptitude'
db.alter_column('main_baseskill', 'aptitude_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Aptitude']))
# User chose to not deal with backwards NULL issues for 'BaseItem.itemType'
raise RuntimeError("Cannot reverse this migration. 'BaseItem.itemType' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'BaseItem.itemType'
db.alter_column('main_baseitem', 'itemType_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.ItemCategory']))
# Changing field 'ItemCategory.baseSkill'
db.alter_column('main_itemcategory', 'baseSkill_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.BaseSkill'], null=True))
# User chose to not deal with backwards NULL issues for 'Item.character'
raise RuntimeError("Cannot reverse this migration. 'Item.character' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Item.character'
db.alter_column('main_item', 'character_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Character']))
# User chose to not deal with backwards NULL issues for 'Item.baseItem'
raise RuntimeError("Cannot reverse this migration. 'Item.baseItem' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Item.baseItem'
db.alter_column('main_item', 'baseItem_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.BaseItem']))
# Changing field 'Character.bodySlot'
db.alter_column('main_character', 'bodySlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True))
# Changing field 'Character.handSlot'
db.alter_column('main_character', 'handSlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True))
# Changing field 'Character.weaponSlot'
db.alter_column('main_character', 'weaponSlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True))
# Changing field 'Character.headSlot'
db.alter_column('main_character', 'headSlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True))
# Changing field 'Character.secondWeaponSlot'
db.alter_column('main_character', 'secondWeaponSlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True))
# User chose to not deal with backwards NULL issues for 'Character.job'
raise RuntimeError("Cannot reverse this migration. 'Character.job' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Character.job'
db.alter_column('main_character', 'job_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Job']))
# User chose to not deal with backwards NULL issues for 'Character.race'
raise RuntimeError("Cannot reverse this migration. 'Character.race' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Character.race'
db.alter_column('main_character', 'race_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Race']))
# Changing field 'Character.accessorySlot2'
db.alter_column('main_character', 'accessorySlot2_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True))
# Changing field 'Character.accessorySlot'
db.alter_column('main_character', 'accessorySlot_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Item'], null=True))
# Changing field 'UserProfile.currentCharacter'
db.alter_column('main_userprofile', 'currentCharacter_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Character'], null=True))
# User chose to not deal with backwards NULL issues for 'Skill.character'
raise RuntimeError("Cannot reverse this migration. 'Skill.character' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Skill.character'
db.alter_column('main_skill', 'character_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Character']))
# User chose to not deal with backwards NULL issues for 'Skill.baseSkill'
raise RuntimeError("Cannot reverse this migration. 'Skill.baseSkill' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Skill.baseSkill'
db.alter_column('main_skill', 'baseSkill_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.BaseSkill']))
# Changing field 'Job.expertiseSkill'
db.alter_column('main_job', 'expertiseSkill_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.BaseSkill'], null=True))
# Changing field 'Job.aptitude'
db.alter_column('main_job', 'aptitude_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Aptitude'], null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True', 'symmetrical': 'False'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'blank': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True', 'related_name': "'user_set'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True', 'related_name': "'user_set'"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'db_table': "'django_content_type'", 'unique_together': "(('app_label', 'model'),)", 'ordering': "('name',)", 'object_name': 'ContentType'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.aptitude': {
'Meta': {'object_name': 'Aptitude'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '20'})
},
'main.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'armour': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'availability': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'damageAttribute': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'damageDieCount': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '10'}),
'damageDieSize': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '10'}),
'damageScale': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '100'}),
'evasion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'itemSlot': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '2'}),
'itemType': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.ItemCategory']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'magicalArmour': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'magicalEvasion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '100'}),
'target': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '100'}),
'tier': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'main.baseitemability': {
'Meta': {'object_name': 'BaseItemAbility'},
'baseItem': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.BaseItem']", 'symmetrical': 'False', 'related_name': "'abilities'"}),
'craftPoints': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'equippableTo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tier': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'usedInCrafting': ('django.db.models.fields.BooleanField', [], {})
},
'main.baseskill': {
'Meta': {'object_name': 'BaseSkill'},
'aptitude': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Aptitude']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'attribute': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '20'}),
'halfRate': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '20'}),
'skillType': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '20'}),
'specialized': ('django.db.models.fields.BooleanField', [], {})
},
'main.character': {
'Meta': {'object_name': 'Character'},
'accessorySlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'equippedAccessories'"}),
'accessorySlot2': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'equippedAccessories2'"}),
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '4'}),
'agility': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'baseHP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'baseMP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'blurb': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '1000'}),
'bodySlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'equippedBodies'"}),
'bonusAptitudes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Aptitude']", 'symmetrical': 'False'}),
'characterImage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.ImageFile']", 'null': 'True', 'related_name': "'characterImages'"}),
'gil': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '10'}),
'handSlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'equippedHands'"}),
'headSlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'equippedHeads'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Job']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'characters'"}),
'level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'magic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '100'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Race']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'characters'"}),
'secondWeaponSlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'equippedSecondaryWeapons'"}),
'speed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'spirit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'strength': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'traits': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Trait']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'related_name': "'characters'"}),
'vitality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'weaponSlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'equippedWeapons'"}),
'xp': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'max_length': '10'})
},
'main.imagefile': {
'Meta': {'object_name': 'ImageFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '254'})
},
'main.item': {
'Meta': {'object_name': 'Item'},
'baseItem': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseItem']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'items'"}),
'damageAttribute': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'main.itemcategory': {
'Meta': {'object_name': 'ItemCategory'},
'baseSkill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseSkill']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'craftPoints': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'subCategory': ('django.db.models.fields.IntegerField', [], {'max_length': '2'})
},
'main.job': {
'Meta': {'object_name': 'Job'},
'accuracyBonus': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'aptitude': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Aptitude']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'expertiseAttribute': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '20'}),
'expertiseSkill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseSkill']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hasMP': ('django.db.models.fields.BooleanField', [], {}),
'hpDie': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.ItemCategory']", 'symmetrical': 'False'}),
'maxAgility': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxMagic': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpeed': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpirit': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxStrength': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxVitality': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'mpDie': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '20'}),
'skillPoints': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
},
'main.overviewbox': {
'Meta': {'object_name': 'OverviewBox'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'viewName': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.overviewboxsetting': {
'Meta': {'object_name': 'OverviewBoxSetting'},
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'related_name': "'overviewBoxSettings'"}),
'enabled': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'overviewBox': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.OverviewBox']"}),
'sortOrder': ('django.db.models.fields.IntegerField', [], {}),
'spanFull': ('django.db.models.fields.BooleanField', [], {})
},
'main.race': {
'Meta': {'object_name': 'Race'},
'dayVision': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'hearing': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lifeSense': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'magicSense': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxAgility': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxMagic': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpeed': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpirit': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxStrength': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxVitality': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '20'}),
'nightVision': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'smell': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
},
'main.skill': {
'Meta': {'object_name': 'Skill'},
'baseSkill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseSkill']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'related_name': "'skills'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'specialization': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '20'})
},
'main.trait': {
'Meta': {'object_name': 'Trait'},
'cost': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'traitType': ('django.db.models.fields.IntegerField', [], {})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'currentCharacter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['main'] | [
"criticalimpact@gmail.com"
] | criticalimpact@gmail.com |
cf31ecc2b83b678a293a203e36bcc98175318eaa | 0a46d1fd4149efdac4b77273034ad9f347b6147e | /repair/apps/conclusions/urls.py | c92814bdd9fba654ee3f328f7be8383f2b7b60b6 | [] | no_license | MaxBo/REPAiR-Web | 176af4b740109c007b5fb205137f102d94f18b90 | a5ba34f085f0d5af5ea3ded24706ea54ab39e7cb | refs/heads/master | 2023-03-22T19:23:02.417872 | 2020-10-02T14:02:30 | 2020-10-02T14:02:30 | 103,665,316 | 9 | 8 | null | 2022-12-07T17:21:47 | 2017-09-15T14:18:10 | Jupyter Notebook | UTF-8 | Python | false | false | 163 | py | from django.conf.urls import url
from repair.apps.conclusions import views
urlpatterns = [
url(r'^$', views.ConclusionsIndexView.as_view(), name='index'),
]
| [
"christoph.franke@web.de"
] | christoph.franke@web.de |
85efa0fd9a8add204cb93978b9a3bd97be9f9aac | 283e8713290bef56629ae3ae1cdc3074127e40fc | /venv/Lib/site-packages/libtiff/lzw.py | 9e486b647d50931c1aa1a6e5c25a57c35a7838a9 | [] | no_license | cminmins/Pixel_processing | faf3cd4d84653ba2d5e47bf2a8eb2060f11b90eb | cb08f922342737f9dfc7b327170b93edd63eea7d | refs/heads/master | 2022-02-24T01:10:32.387232 | 2022-02-05T15:23:37 | 2022-02-05T15:23:37 | 144,678,441 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,082 | py | """ Encoder and decoder of Lempel-Ziv-Welch algorithm for TIFF.
This module is obsolete, use tif_lzw extension module instead.
"""
# Author: Pearu Peterson
# Created: May 2010
import numpy
default_backend='bittools'
#default_backend='bittools'
if default_backend=='bitarray':
from bitarray import bitarray
if default_backend=='bittools':
from bittools import setword, getword
CODECLEAR = 256
CODEEOI = 257
CODESTART = 258
def encode_bitarray(seq, max_bits=12):
""" Compress sequence using Lempel-Ziv-Welch algorithm for TIFF.
Parameters
----------
seq : {str, numpy.ndarray}
max_bits : int
Specify maximum bits for encoding table.
Returns
-------
bseq : bitarray
See also
--------
decode_bitarray
"""
if isinstance (seq, numpy.ndarray):
seq = seq.tostring()
r = bitarray(0, endian='little')
write = r.fromword
init_table = [(chr(code),code) for code in range (256)]
table = {}
table_get = table.get
table_clear = table.clear
table_update = table.update
sup_code2 = (1<<max_bits) - 2
next_code = CODESTART
bits = 9
max_code = (1<<bits)
s = ''
table_update(init_table)
index = 0
write(CODECLEAR, bits)
for c in seq:
s1 = s + c
if s1 in table:
s = s1
else:
write(table_get(s), bits)
table[s1] = next_code
next_code += 1
s = c
if next_code==sup_code2:
write(table_get(s), bits)
write(CODECLEAR, bits)
s = ''
table_clear()
table_update(init_table)
next_code = CODESTART
bits = 9
max_code = (1<<bits)
elif next_code==max_code:
bits += 1
max_code = (1<<bits)
if s:
write(table_get(s), bits)
write(CODEEOI, bits)
return r
def encode_bittools(seq, max_bits=12):
""" Compress sequence using Lempel-Ziv-Welch algorithm for TIFF.
Parameters
----------
seq : {str, numpy.ndarray}
max_bits : int
Specify maximum bits for encoding table.
Returns
-------
bseq : numpy.ndarray
See also
--------
decode_bittools
"""
if isinstance (seq, numpy.ndarray):
nbytes = seq.nbytes*2
seq = seq.tostring()
else:
nbytes = len(seq)*2
r = numpy.zeros((nbytes,), dtype=numpy.ubyte)
init_table = [(chr(code),code) for code in range (256)]
table = {}
table_get = table.get
table_clear = table.clear
table_update = table.update
sup_code2 = (1<<max_bits) - 2
next_code = CODESTART
bits = 9
max_code = (1<<bits)
s = ''
table_update(init_table)
index = setword(r, 0, bits, CODECLEAR, 1)
for c in seq:
s1 = s + c
if s1 in table:
s = s1
else:
index = setword(r, index, bits, table_get(s), 1)
table[s1] = next_code
next_code += 1
s = c
if next_code==sup_code2:
index = setword(r, index, bits, table_get(s), 1)
index = setword(r, index, bits, CODECLEAR, 1)
s = ''
table_clear()
table_update(init_table)
next_code = CODESTART
bits = 9
max_code = (1<<bits)
elif next_code==max_code:
bits += 1
max_code = (1<<bits)
if s:
index = setword(r, index, bits, table_get(s), 1)
index = setword(r, index, bits, CODEEOI)
bytes = index//8
if 8*bytes < index:
bytes += 1
return r[:bytes]
def decode_bitarray(bseq):
""" Decompress Lempel-Ziv-Welch encoded sequence.
Parameters
----------
bseq : {bitarray, numpy.ndarray}
Returns
-------
seq : str
See also
--------
encode_bitarray
"""
if isinstance(bseq, numpy.ndarray):
bseq = bitarray(bseq, endian='little')
assert bseq.endian ()=='little',repr(bseq.endian())
read = bseq.toword
init_invtable = [(code, chr(code)) for code in range (256)]
table = [chr(code) for code in range(256)] + ['CODECLEAR', 'CODEEOI']
table_append = table.append
table_len = table.__len__
bits = 9
max_code2 = (1<<bits) - 2
i = 0
seq = []
seq_append = seq.append
while True:
code = read(i, bits)
i += bits
if code==CODEEOI:
break
elif code==CODECLEAR:
del table[CODESTART:]
bits = 9
max_code2 = (1<<bits) - 2
code = read(i, bits)
i += bits
old_str = table[code]
seq_append(old_str)
old_code = code
else:
l = table_len()
if code < l:
s = table[code]
table_append(old_str + s[0])
old_str = s
else:
old_str = old_str + old_str[0]
table_append(old_str)
seq_append(old_str)
old_code = code
if l==max_code2:
bits += 1
max_code2 = (1<<bits) - 2
return ''.join(seq)
def decode_bittools(bseq):
""" Decompress Lempel-Ziv-Welch encoded sequence.
Parameters
----------
bseq : numpy.ndarray
Returns
-------
seq : str
See also
--------
encode_bittools
"""
init_invtable = [(code, chr(code)) for code in range (256)]
table = [chr(code) for code in range(256)] + ['CODECLEAR', 'CODEEOI']
table_append = table.append
table_len = table.__len__
bits = 9
max_code2 = (1<<bits) - 2
i = 0
seq = []
seq_append = seq.append
while True:
code, i = getword(bseq, i, bits)
if code==CODEEOI:
break
elif code==CODECLEAR:
del table[CODESTART:]
bits = 9
max_code2 = (1<<bits) - 2
code, i = getword(bseq, i, bits)
old_str = table[code]
seq_append(old_str)
old_code = code
else:
l = table_len()
if code < l:
s = table[code]
table_append(old_str + s[0])
old_str = s
else:
old_str = old_str + old_str[0]
table_append(old_str)
seq_append(old_str)
old_code = code
if l==max_code2:
bits += 1
max_code2 = (1<<bits) - 2
return ''.join(seq)
#print 'backend:', default_backend
if default_backend=='bitarray':
encode = encode_bitarray
decode = decode_bitarray
def encode_array(arr):
return encode_bitarray(arr).toarray ()
if default_backend=='bittools':
encode = encode_array = encode_bittools
decode = decode_bittools
def test_lzw():
for s in ['TOBEORNOTTOBEORTOBEORNOT', '/WED/WE/WEE/WEB/WET'][:0]:
r = encode (s)
a = decode (r)
assert a==s,repr((a,s))
if 1:
f = open(__file__)
s = f.read ()
f.close ()
r = encode (s)
a = decode (r)
assert a==s
print('ok')
import sys
import os
import time
for fn in sys.argv[1:]:
if not os.path.exists(fn):
continue
t0 = time.time()
f = open(fn, 'rb')
s = f.read()
f.close()
t = time.time()-t0
print('Reading %s took %.3f seconds, bytes = %s' % (fn, t, len(s)))
t0 = time.time()
r = encode(s)
t = time.time()-t0
sz = len(r)
if default_backend=='bitarray':
sz //= 8
print('Encoding took %.3f seconds, compress ratio = %.3f, Kbytes per second = %.3f' % (t, len (s)/sz, len(s)/t/1024))
t0 = time.time()
s1 = decode(r)
t = time.time()-t0
print('Decoding took %.3f seconds, Kbytes per second = %.3f' % (t, (sz/t)/1024))
assert s1==s
if __name__=='__main__':
test_lzw()
| [
"41716029+cminmins@users.noreply.github.com"
] | 41716029+cminmins@users.noreply.github.com |
ca1b80ebaa3f1cc2adf38ef25569233652dfc9f2 | 52fa10d6580ccabdefba2ae7576401bc7e9b437a | /TreeGen.py | bf0ad84c890c3b5ccef30132bb3aa0452e5048c5 | [] | no_license | Sinle4Cat/NaivePruningMethod | f7e68e1f7c1a6ccd33ea4b16cd50d572f2017f21 | aa0cd07d74646c0c461d8d0f45cf6ab89a3662a6 | refs/heads/main | 2023-02-14T18:43:50.547100 | 2021-01-12T13:23:35 | 2021-01-12T13:23:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,151 | py | import nltk.tree as tree
from stanfordcorenlp import StanfordCoreNLP
import nltk
import copy
import re
from treelib import Tree as Tr
Sub=[]#识别到的从句
Remain=[]#识别到的不可拆词
Trunk=[]#记录为token位置
RelationShip={}
TokenAll=[] #每个句子的全部token
RemoveTreedist={}
StrGen=[]
RemainLt=[] #需要保留的token 不能正常删除
nlpEN=None
class node:
def __init__(self, type, flag=0, word=""):
self.type = type
self.flag = flag
self.word = word
def ccPart(dependency,token):#################
dependencyParse = copy.deepcopy(dependency)
findSource=[]
findCC=[]
findDis={}
RemoveList=[]
DependList=[]
ccStr=[]
isRetoken=[i for i in range(len(token))]
for dependencyitem in dependency:
tag,begin,end=dependencyitem
if tag=="conj":
findSource.append(begin)
findCC.append(end)
if len(findCC) == 0:
return []
for item in findSource:
flag=False
for key in findDis.keys():
if(key==item):
flag=True
if flag==False:
findDis[item]=1
else:
count=findDis[item]
count+=1
findDis[item]=count
mains=[]
#判断前后是否有对匹配对依赖
for i in range(len(findCC)):
dependencyParseTemp = copy.deepcopy(dependency)
dependencyParseTemp2 = copy.deepcopy(dependency)
if(findDis[findSource[i]]==1):
stack=[]
stack.append(findSource[i])
removeItem1=[]
removeItem=[]
#判断是否为根节点
cutindex=-1
for relation in dependencyParseTemp:
tag, begin, end = relation
if (begin == findSource[i] and tag=='nsubj'):
cutindex=end
while(len(stack)>0):
popitem=stack.pop()
removeItem.append(popitem)
num = len(dependencyParseTemp)
j = 0
while j < num:
relation = dependencyParseTemp[j]
tag,begin,end=relation
if(begin==popitem and end>cutindex and end!=findCC[i]):
stack.append(end)
dependencyParseTemp.remove(relation)
j-=1
num-=1
j=j+1
num=len(dependencyParseTemp)
j=0
while j<num:
relation=dependencyParseTemp[j]
tag, begin, end = relation
if (tag == "cc" and begin == findCC[i]):
removeItem.append(end)
dependencyParseTemp.remove(relation)
j=j-1
num=num-1
if (end == findSource[i]):
end = findCC[i]
dependencyParseTemp.remove(relation)
relation = (tag, begin, end)
dependencyParseTemp.insert(j,relation)
if(begin == findSource[i]):
if(tag!='aux'):
begin=findCC[i]
dependencyParseTemp.remove(relation)
relation = (tag, begin, end)
dependencyParseTemp.insert(j, relation)
else:
dependencyParseTemp.remove(relation)
j = j - 1
num = num - 1
j+=1
stack.append(findCC[i])
while len(stack)>0:
popitem = stack.pop()
removeItem1.append(popitem)
num = len(dependencyParseTemp2)
j = 0
while j < num:
relation = dependencyParseTemp2[j]
tag, begin, end = relation
if (begin == popitem):
stack.append(end)
dependencyParseTemp2.remove(relation)
j -= 1
num -= 1
j = j + 1
isRt1 = copy.deepcopy(isRetoken)
isRt2 = copy.deepcopy(isRetoken)
for j in removeItem:
isRt1.remove(j-1)
for j in removeItem1:
isRt2.remove(j-1)
str1=replacenth(token,isRt1)
str2 = replacenth(token, isRt2)
if(str1[-1]==' '):
str1=str1[0:len(str1)-1]
if(str1[-1]!='.'):
str1+='.'
if(str2[-1] == ' '):
str2 = str2[0:len(str2) - 1]
if (str2[-1] != '.'):
str2 += '.'
ccStr.append(str2)
ccStr.append(str1)
else:
if findSource[i] not in mains:
mains.append(findSource[i])
stack = []
stack.append(findSource[i])
removeItem = []
# 判断是否为根节点
# 删除出去我们选择的conj以外的所有同级别内容
while (len(stack) > 0):
popitem = stack.pop()
removeItem.append(popitem)
num = len(dependencyParseTemp)
j = 0
while j < num:
relation = dependencyParseTemp[j]
tag, begin, end = relation
if (begin == popitem and end != findCC[i]) or ( begin == popitem and tag == "conj" and end != findCC[i]):
stack.append(end)
dependencyParseTemp.remove(relation)
j -= 1
num -= 1
j = j + 1
num = len(dependencyParseTemp)
j = 0
while j < num:
relation = dependencyParseTemp[j]
tag, begin, end = relation
if (tag == "cc" and begin == findCC[i]):
removeItem.append(end)
dependencyParseTemp.remove(relation)
j = j - 1
num = num - 1
if (end == findSource[i]):
end = findCC[i]
dependencyParseTemp.remove(relation)
relation = (tag, begin, end)
dependencyParseTemp.insert(j, relation)
j += 1
isRt1 = copy.deepcopy(isRetoken)
for j in removeItem:
isRt1.remove(j - 1)
str1 = replacenth(token, isRt1)
str1 = str1.replace(','," ")
if (str1[-1] == ' '):
str1 = str1[0:len(str1) - 1]
ccStr.append(str1)
for i in mains:
stack = []
dependencyParseTemp = copy.deepcopy(dependency)
for relation in dependencyParseTemp:
tag, begin, end = relation
if (begin == i and tag == "conj"):
stack.append(end)
dependencyParseTemp.remove(relation)
removeItem = []
# 判断是否为根节点
# 删除出去我们选择的conj以外的所有同级别内容
while (len(stack) > 0):
popitem = stack.pop()
removeItem.append(popitem)
for relation in dependencyParseTemp:
tag, begin, end = relation
if (begin == popitem ) or (begin == popitem and tag == "conj"):
stack.append(end)
dependencyParseTemp.remove(relation)
isRt1 = copy.deepcopy(isRetoken)
for i in removeItem:
isRt1.remove(i - 1)
str1 = replacenth(token, isRt1)
str1 = str1.replace(',', " ")
if (str1[-1] == ' '):
str1 = str1[0:len(str1) - 1]
ccStr.append(str1)
return ccStr
def dependencyTree(dependency,token):
tree=[]
dependencyParse=copy.deepcopy(dependency)
token = copy.deepcopy(token)
tree = Tr()
root = -1
#构建依存树
while (len(dependencyParse) != 0):
dependencyParseItem = dependencyParse.pop(0)
i, begin, end = dependencyParseItem
if begin == 0:
root = end
tree.create_node(token[end - 1], end, data=node(i, 0, token[end - 1]))
continue
elif tree.contains(begin):
tree.create_node(token[end - 1], end, parent=begin, data=node(i, 0, token[end - 1]))
elif len(dependencyParse)>=1:
dependencyParse.append(dependencyParseItem)
return tree,root
def Cons_Traversal(t):
queue= []
queue.append(t)
current = ""
while queue:
current = queue.pop(0)
if isinstance(current, tree.Tree):
flag=False
if current.label()=="SBAR":
Sub.append(current.leaves())
continue
for i in range(len(current)):
if isinstance(current[i], tree.Tree)and(current[i].label()=="HYPH"):
flag=True
if(flag==False):
for i in range(len(current)):
queue.append(current[i])
#print(current.label(),current)
else:
Remain.append(current.leaves())
elif isinstance(current, str):
#print(current)
pass
def traverse_tree(tree):
print("tree:", tree)
if(tree.label()=="SBAR"):
Sub.append(tree.leaves())
return
if(tree.label()=="NP" or tree.label()=="ADJP" or tree.label()=="ADVP"):
#判断是否有副词组合,形容词组合,名词组合。在判断某些词汇的时候遇到了一些问题。先去除了表语的识别。
print(tree.leaves())
if len(tree.leaves())>1:
Remain.append(tree.leaves())
return
# print("tree2:",tree[0])
for subtree in tree:
if type(subtree) == nltk.tree.Tree:
traverse_tree(subtree)
def Depd_Travesal(dependency_tree,token,Trunk):
# 保留主干成
dependencyParse=copy.deepcopy(dependency_tree)
token = copy.deepcopy(token)
root = -1
while (len(dependencyParse) != 0):
dependencyParseItem = dependencyParse.pop(0)
i, begin, end = dependencyParseItem
if RelationShip.get(i,-10)==3:
Trunk.append(end-1)
def readConf():
Path = "../RelateWork/Relationship"
f = open(Path)
line = f.readline()
while line:
line = line[0:len(line) - 1]
sarr = line.split(' ')
RelationShip[sarr[0]]=eval(sarr[1])
line = f.readline()
f.close()
def Prune(dependency_tree,token):
pass
def Pruning(Tree,root,dependency,token,isReToken,string):
global StrGen
if(len(Tree.children(root))>0):
temp=Tree.children(root)
temp.sort(key=lambda x:len(Tree.children(x.identifier)))
temp.reverse()
rember=0
for node in temp:
index = node.identifier
if (len(Tree.children(index)) == 0):
break
rember = rember + 1
tempmax=[]
if(rember>0):
tempmax=temp[:rember]
templow=temp[rember:]
templow.reverse()
temp=tempmax+templow
for node in temp:
index=node.identifier
tag=node.data.type
if RelationShip.get(tag,-10)==1:
if len(Tree.children(root)) > 0:
string, isReToken = Pruning(Tree, index, dependency, token, isReToken, string)
elif RelationShip.get(tag,-10)==-10 or RelationShip.get(tag,-10)==3:
if len(Tree.children(root)) > 0:
string, isReToken = Pruning(Tree, index, dependency, token, isReToken, string)
elif RelationShip.get(tag, -10) == 4:
return string,isReToken
elif RelationShip.get(tag,-10)==0 :
if len(Tree.children(index)) > 0:
string,isRetoken=Pruning(Tree,index,dependency,token,isReToken,string)
if len(Tree.children(index))>=0 :
remove = Tree.remove_subtree(index)
removeNode=remove.nodes
listRemove0=[]
#判断Remain是否可以整体删除
# for lll in RemainLt:
# flag=0
# for jj in lll:
# for iii in removeNode:
# if(iii==jj):
# flag=1
# break
#
# if flag==1:
# break
# test=1
# for ln in range(len(lll)):
# t1=0
# for iii in removeNode:
# if(lll[ln]==iii):
# t1=1
# break
# if(t1==0):
# test=0
# if test==0:
# return string,isReToken
dependencyParse = copy.deepcopy(dependency)
##进行删除
for i in removeNode:
listRemove0.append(i)
for i in removeNode:
isReToken.remove(i-1)
treeTemp=copy.deepcopy(Tree)
RemoveTreedist[index] = treeTemp
for tuple in dependency:
if tuple[2] == index:
dependency.remove(tuple)
for i in listRemove0:
string= replacenth(token, isReToken)
string = string.replace(" ", " ")
StrGen.append(string)
elif not JustReMain(index):
remove = Tree.remove_subtree(index)
removeNode = remove.nodes
listRemove0 = []
dependencyParse = copy.deepcopy(dependency)
##进行删除
for i in removeNode:
listRemove0.append(i)
for i in removeNode:
isReToken.remove(i-1)
for tuple in dependency:
if tuple[2] == index:
dependency.remove(tuple)
tempTree=copy.deepcopy(Tree)
RemoveTreedist[index]=tempTree
string= replacenth(token, isReToken)
string = string.replace(" ", " ")
StrGen.append(string)
else :
type=Tree.nodes[root].data.type
index = Tree.nodes[root].identifier
if RelationShip.get(type,-10)==0 and not JustReMain(index):
remove = Tree.remove_subtree(index)
removeNode = remove.nodes
listRemove0 = []
dependencyParse = copy.deepcopy(dependency)
##进行删除
for i in removeNode:
isReToken.remove(i-1)
for i in removeNode:
listRemove0.append(i)
for tuple in dependency:
if tuple[2] == index:
dependency.remove(tuple)
tempTree = copy.deepcopy(Tree)
RemoveTreedist[index] = tempTree
string= replacenth(token, isReToken)
string=string.replace(" "," ")
StrGen.append(string)
return string, isReToken
else:
pass
return string,isReToken
def JustReMain(index):
for i in RemainLt:
for j in i:
if(index==j):
return True
return False
def TokenToStr(string,Token,isRemaintoken):
pass
# isRemaintoken.sort()
# newstring=""
# for i in isRemaintoken:
# if len(token[i])==1 and not ('a'<=token[i]<='z' or 'A'<=token[i]<='Z' or '0'<=token[i]<='9'):
# if(len(newstring)>=1):
# charw=newstring[-1]
# if(charw==' '):
# newstring=newstring[:len(newstring)-1]
# if(token[i]==','):
# newstring+=token[i]
# else:
# newstring+=token[i]+" "
# else:
# newstring=newstring+token[i]+" "
def replacenth(token,isRetoken):
newstring=""
for i in range(len(isRetoken)):
if len(token[isRetoken[i]])==1 and not ('a'<=token[isRetoken[i]]<='z' or 'A'<=token[isRetoken[i]]<='Z' or '0'<=token[isRetoken[i]]<='9'):
if(len(newstring)>=1):
charw=newstring[-1]
if(charw==' '):
newstring=newstring[:len(newstring)-1]
if(i==','):
newstring+=token[isRetoken[i]]
else:
newstring+=token[isRetoken[i]]+" "
else:
newstring=newstring+token[isRetoken[i]]+" "
return newstring
def justTokenSame(token1,token2):
if(len(token1)+1!=len(token2)):
return False
else:
flag=False
index1=0
index2=0
while index1<len(token1):
if flag==False:
if(token1[index1]!=token2[index2]):
index2+=1
flag==True
else:
index1+=1
index2+=1
else:
if (token1[index1] != token2[index2]):
return False
return True
def replacethL(string ,sub,isReToken):
result=""
newtoken=[]
for y in range(20):
try:
new_sent = replacenth1(string, sub, y + 1).replace(" ", " ")
newtoken=nlpEN.word_tokenize(new_sent)
if(justTokenSame(newtoken,isReToken)):
result=new_sent
break
except:
break
return result,newtoken
def replacenth1(string, sub, n):
where = [m.start() for m in re.finditer(sub, string)][n - 1]
before = string[:where]
after = string[where:]
after = after.replace(sub,"", 1)
newString = before + after
return newString
def RemoveSub(string,sub,token):
subtoken=nlpEN.word_tokenize(sub)
Remainls=[]
for i in range(0, len(token)-len(subtoken)+1):
flag=0
index=0
while(index<len(subtoken)):
if(token[i+index]!=subtoken[index]):
break
index+=1
if(index ==len(subtoken)):
for j in range(len(subtoken)):
Remainls.append(j+i)
return Remainls
def FindRemain(token):
RemainList=[]
for i in range(len(token)):
for item in Remain:
index=0
itemNum=len(item)
while(index<itemNum):
if token[i+index]!=item[index]:
break
index+=1
if index==itemNum:
ll=[]
for j in range(itemNum):
ll.append(j+i)
RemainList.append(ll)
return RemainList
def Gen(sent):
source_tree = tree.Tree.fromstring(nlpEN.parse(sent))
sentSub = []
global Sub
global StrGen
Sub.clear()
StrGen.clear()
sentSubRemain = []
MainRemoveSub = []
token = nlpEN.word_tokenize(sent)
MainRemain = [i for i in range(len(token))]
# 对于从句的删除应该是连续的token
Cons_Traversal(source_tree) # 识别不可拆组合词 从句
if len(Sub) > 0:
str = copy.deepcopy(sent)
for i in range(len(Sub)):
sub = ""
for j in Sub[i]:
sub += j + " "
sub = sub[0:len(sub) - 1]
isRemain = RemoveSub(str, sub, token)
sub += "."
sentSubRemain.append(isRemain)
sentSub.append(sub)
for sentsubtoken in sentSubRemain:
for j in sentsubtoken:
MainRemain.remove(j)
# 对主句进行处理:
sentMain = replacenth(token, MainRemain)
if(sentMain[0]==',' or sentMain[0]=='.'):
sentMain= sentMain[1:len(sentMain)]
token_main = nlpEN.word_tokenize(sentMain)
dep_main = nlpEN.dependency_parse(sentMain)
global RemainLt
RemainLt.clear()
RemainLt = FindRemain(token_main)
Trunk.clear()
Depd_Travesal(dep_main, token_main, Trunk) # 识别主干内容
strlist = ccPart(dep_main, token_main)
if (len(strlist) == 0):
dp_tree_main, root = dependencyTree(dep_main, token_main)
re_token_main = [i for i in range(len(token_main))]
#dp_tree_main.show()
# Tree,root,dependency,token,isReToken,string)
string, re_token_main = Pruning(dp_tree_main, root, dep_main, token_main, re_token_main, sentMain)
else:
for sentCC in strlist:
StrGen.append(sentCC)
token_cc = nlpEN.word_tokenize(sentCC)
dep_cc = nlpEN.dependency_parse(sentCC)
RemainLt.clear()
RemainLt = FindRemain(token_cc)
Trunk.clear()
Depd_Travesal(dep_cc, token_cc, Trunk)
dp_tree_cc, root = dependencyTree(dep_cc, token_cc)
re_token_cc = [i for i in range(len(token_cc))]
#dp_tree_cc.show()
# Tree,root,dependency,token,isReToken,string)
string, re_token_cc = Pruning(dp_tree_cc, root, dep_cc, token_cc, re_token_cc, sentCC)
# 对从句进行处理
for sentitem in sentSub:
StrGen.append(sentitem)
Trunk.clear()
dependency_tree = nlpEN.dependency_parse(sentitem)
token_sub = nlpEN.word_tokenize(sentitem)
RemainLt = FindRemain(token_sub)
Depd_Travesal(dependency_tree, token_sub, Trunk) # 识别主干内容
strlist = ccPart(dependency_tree, token_sub)
if (len(strlist) == 0):
dp_tree_sub, root = dependencyTree(dependency_tree, token_sub)
re_token_sub = [i for i in range(len(token_sub))]
string, re_token_main = Pruning(dp_tree_sub, root, dependency_tree, token_sub, re_token_sub, sentitem)
strlist = ccPart(dep_main, token_main)
else:
for sentCC in strlist:
StrGen.append(sentCC)
token_cc = nlpEN.word_tokenize(sentCC)
dep_cc = nlpEN.dependency_parse(sentCC)
RemainLt.clear()
RemainLt = FindRemain(token_cc)
Trunk.clear()
Depd_Travesal(dep_cc, token_cc, Trunk)
dp_tree_cc, root = dependencyTree(dep_cc, token_cc)
re_token_cc = [i for i in range(len(token_cc))]
#dp_tree_cc.show()
# Tree,root,dependency,token,isReToken,string)
string, re_token_cc = Pruning(dp_tree_cc, root, dep_cc, token_cc, re_token_cc, sentCC)
StrResult=copy.deepcopy(StrGen)
return StrResult
def genAll(dataset,nlpEN):
file = open(dataset, "r", encoding="utf-8")
dic = {}
Strlist=[]
for line in file:
sent = line.split("\n")[0]
print(sent)
dependcy = nlpEN.dependency_parse(sent)
token = nlpEN.word_tokenize(sent)
strr=""
for i, begin, end in dependcy:
if begin - 1 < 0:
first = "NULL"
else:
first = token[begin - 1]
last = token[end - 1]
strr+=i+'-'.join([str(begin), first])+ '-'.join([str(end), last])+"\n"
Strlist.append(strr)
phrases = Gen(sent)
dic[sent] = []
for t in phrases:
dic[sent].append(t)
return dic,Strlist
def print1(dic,Strlist):
n=0
fo = open("Result.txt", "w",encoding='utf-8')
for sent in dic:
print("句子:"+sent)
fo.write("句子:"+sent+"\n")
print("依存关系:")
fo.write("依存关系:"+"\n")
print(Strlist[n])
fo.write(Strlist[n]+"\n")
print("派生句子:")
fo.write("派生句子:"+"\n")
n=n+1
for new in dic[sent]:
print(" "+new)
fo.write(" "+new + "\n")
fo.write("\n")
if __name__ == '__main__':
readConf()#读取Relationship
dataset = "../dataset/temp"
nlpEN = StanfordCoreNLP(r'D:\nlpenvironment\stanford-corenlp-4.1.0')
# sent = "We're going to Ferguson right now because the police killed an 18-year-old boy and it wasn't right."
# str=Gen(sent)
dist,strlist=genAll(dataset, nlpEN)
print1(dist,strlist)
nlpEN.close() | [
"35651345+Sinleasswecan@users.noreply.github.com"
] | 35651345+Sinleasswecan@users.noreply.github.com |
64247d29f239cd834930ca1147148a57905fa355 | 75f046e1a2ffb34ee9b066e41af4d32d1b45a3c4 | /preprocess/create_mask.py | 64e13eee3e6a4f03f28efcc079df008ae3919afb | [] | no_license | matthewr6/mri-contrast-synth | 25f01035bbe8f514871ba1b3fd166acd5cf7be96 | e8fdf4da58bfc209d6c3124938131f8d4855238c | refs/heads/master | 2023-05-31T11:23:40.498356 | 2021-06-09T01:48:33 | 2021-06-09T01:48:33 | 347,223,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | import os
import sys
import glob
import subprocess
import numpy as np
import nibabel as nib
paths = glob.glob('/data/mradovan/7T_WMn_3T_CSFn_pairs/*')
infiles = [
# 'CSFn.nii.gz',
# 'CSFnS.nii.gz',
'WMn.nii.gz',
]
for infile in infiles:
if 'WMn' in infile:
outfile = '{}B_direct.nii.gz'.format(infile.split('.')[0])
else:
outfile = '{}B.nii.gz'.format(infile.split('.')[0])
processes = []
running = 0
process_batch_size = 48
for subj_path in paths:
processes.append(
subprocess.Popen('cd {} && mri_watershed {} {}'.format(subj_path, infile, outfile), shell=True)
)
running += 1
if running >= process_batch_size:
running = 0
for p in processes:
p.wait()
processes = []
for p in processes:
p.wait()
| [
"mradovan@stanford.edu"
] | mradovan@stanford.edu |
9cf5ec04d45c55aa2d077d17107a6832fd095729 | f8166c72c0514f39ff4fc6bbb3d56ac2d8089fb0 | /whileInput.py | 46d75a3531d93d86855555c132488e21d76b1ddc | [] | no_license | KahlilMonteiro-lpsr/class-samples | 5f2a1dd68971b0c5ad342c709454493275f6138a | 6ed5e8fa37ca2bda53f4bc387a829a97d20b8f2c | refs/heads/master | 2021-01-18T22:08:13.251542 | 2016-06-08T22:07:53 | 2016-06-08T22:07:53 | 48,007,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | # purposely an infinite loop
myInput = "LPS"
while myInput != "leave":
myInput = raw_input()
print("You said: " + myInput)
| [
"lps@lps-1011PX.(none)"
] | lps@lps-1011PX.(none) |
928398d8189b6ca4fad6f24fb3459cbd6c5a60bb | 43c7deacc67e00981717e8b4c15a3e1db2577004 | /analysis_original.py | 23cdf56dc280a0b0270ea2b6a9c7b4e209c2499a | [] | no_license | MikeHathaway/Options-Analysis | 010a8721af22e57825616e16a3a91555b447915b | 3661999b5e102aab7a2f1dde3b4b0882751ebd77 | refs/heads/master | 2021-01-10T16:22:21.205359 | 2016-03-24T02:37:20 | 2016-03-24T02:37:20 | 54,335,923 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | import pandas as pd
import numpy as np
import csv
import time
from datetime import date
today = date.today()
#select stock ticker to be studied - work in progress
# raw_input('Enter a Stock ticker, ho: ') + '.csv'
#Open selected option chain
file = open('sune.csv')
csv_file = csv.reader(file)
file_headers = next(csv_file)
print file_headers
#initiatilze environment
rownum = 0
Strike = 0
Bid = 0
Ask = 0
spread = 0
#open_interest = row[11]
potential_options = []
#MVP Option = 0 - Identify a specific option in the chain based upon favorable characteristics
#Analyze key chain parameters - Add put call ratio later
#def bid_ask_spread(Call, Put, Strike, Bid, Ask):
for row in csv_file:
Bid = float(row[6])
Ask = float(row [7])
spread = Ask - Bid
row[13:13] = [str(spread)]
row.append(row[13:13])
print row
def spread_analysis(spread, open_interest):
for row in csv_file:
if float(spread) >= 0.2: # and Open_Interest > raw_input :
del row
else:
print row
return spread
print spread_analysis
my_writer = csv.DictWriter(file, fieldnames=file_headers)
#GOAL: screen the option chain for options that meet certain characteristics (certain bid ask spread, ratio, open interest)
#Good tutorial on opening csv: https://newcircle.com/s/post/1572/python_for_beginners_reading_and_manipulating_csv_files
#Tutorial on manipulating CSV https://courses.cs.washington.edu/courses/cse140/13wi/csv-parsing.html
| [
"mahathaway93@gmail.com"
] | mahathaway93@gmail.com |
23d6b88ce59595df9dd3ecdbf1b9eba12717da91 | db0d09febeaff8b5a34f19cd33ef537b9e8069a1 | /opentaxii/taxii/services/inbox.py | b972f6f97691ed789714ab91cd287e55dc9519c1 | [
"BSD-3-Clause"
] | permissive | binaryflesh/OpenTAXII | 64c8610bd6c71213f59bcecad0def06bf7df15cc | 971605a5a27c38cfe324bfc8588d2fc708a5d11e | refs/heads/master | 2020-09-14T07:01:54.677400 | 2019-10-08T10:57:44 | 2019-10-08T10:57:44 | 223,058,555 | 0 | 0 | NOASSERTION | 2019-11-21T01:06:46 | 2019-11-21T01:06:45 | null | UTF-8 | Python | false | false | 4,097 | py |
from libtaxii.constants import (
SVC_INBOX, MSG_INBOX_MESSAGE, SD_ACCEPTABLE_DESTINATION,
ST_DESTINATION_COLLECTION_ERROR, ST_NOT_FOUND, SD_ITEM
)
from opentaxii.local import context
from opentaxii.exceptions import UnauthorizedException
from ..utils import is_content_supported
from ..entities import ContentBindingEntity
from ..exceptions import StatusMessageException
from ..converters import (
content_binding_entities_to_content_bindings,
service_to_service_instances
)
from .abstract import TAXIIService
from .handlers import InboxMessageHandler
class InboxService(TAXIIService):
service_type = SVC_INBOX
handlers = {
MSG_INBOX_MESSAGE: InboxMessageHandler
}
destination_collection_required = False
accept_all_content = False
supported_content = []
def __init__(self, accept_all_content=False,
destination_collection_required=False,
supported_content=None, **kwargs):
super(InboxService, self).__init__(**kwargs)
self.accept_all_content = accept_all_content
supported_content = supported_content or []
self.supported_content = [
ContentBindingEntity(c) for c in supported_content]
self.destination_collection_required = destination_collection_required
def is_content_supported(self, content_binding, version=None):
if self.accept_all_content:
return True
return is_content_supported(
self.supported_content, content_binding, version=version)
def get_destination_collections(self):
return self.server.persistence.get_collections(self.id)
def validate_destination_collection_names(self, name_list, in_response_to):
name_list = name_list or []
if ((self.destination_collection_required and not name_list)
or (not self.destination_collection_required and name_list)):
if not name_list:
message = ('A Destination_Collection_Name is required '
'and none were specified')
else:
message = ('Destination_Collection_Names are prohibited '
'for this Inbox Service')
details = {
SD_ACCEPTABLE_DESTINATION: [
c.name for c in self.get_destination_collections()
if c.available]}
raise StatusMessageException(
ST_DESTINATION_COLLECTION_ERROR,
message=message,
in_response_to=in_response_to,
status_details=details)
if not name_list:
return []
collections = []
destinations_map = {
c.name: c for c in self.get_destination_collections()}
for name in name_list:
if name in destinations_map:
collection = destinations_map[name]
if context.account.can_modify(name):
collections.append(collection)
else:
raise UnauthorizedException(
message=('User can not write to collection {}'
.format(name)))
else:
raise StatusMessageException(
ST_NOT_FOUND,
message='Collection {} was not found'.format(name),
in_response_to=in_response_to,
extended_headers={SD_ITEM: name})
return collections
def to_service_instances(self, version):
service_instances = service_to_service_instances(self, version)
if self.accept_all_content:
return service_instances
for instance in service_instances:
instance.inbox_service_accepted_content = (
self.get_supported_content(version))
return service_instances
def get_supported_content(self, version):
if self.accept_all_content:
return []
return content_binding_entities_to_content_bindings(
self.supported_content, version)
| [
"sergey@polzunov.com"
] | sergey@polzunov.com |
0e8f0395bd3ee6ef3a7298a4834795ce12651d62 | 5380c405f082f95c3da3fcbb2e0936dddb27831e | /Py/unittests/__init__.py | 947b2920f3d23f8e3f1e8bd3f8d1862b486ba861 | [] | no_license | Steinecke/capstone-final | 873a9acea129e9dbf08bc4abe7872feecfd7b9e4 | 5ea705925b5a9eb744c15011f016efc976c34c6d | refs/heads/main | 2023-01-23T01:22:52.501550 | 2020-12-07T13:49:47 | 2020-12-07T13:49:47 | 319,331,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import unittest
import getopt
import sys
import os
## parse inputs
from unittests.TestsApi import ApiTest
try:
optlist, args = getopt.getopt(sys.argv[1:],'v')
except getopt.GetoptError:
print(getopt.GetoptError)
print(sys.argv[0] + "-v")
print("... the verbose flag (-v) may be used")
sys.exit()
VERBOSE = False
RUNALL = False
sys.path.append(os.path.realpath(os.path.dirname(__file__)))
for o, a in optlist:
if o == '-v':
VERBOSE = True
## api tests
from unittests.TestsApi import ApiTest
ApiTestSuite = unittest.TestLoader().loadTestsFromTestCase(ApiTest)
## model tests
from unittests.TestsModel import *
ModelTestSuite = unittest.TestLoader().loadTestsFromTestCase(ModelTest)
## logger tests
from unittests.TestsLogger import *
LoggerTestSuite = unittest.TestLoader().loadTestsFromTestCase(LoggerTest)
MainSuite = unittest.TestSuite([LoggerTestSuite,ModelTestSuite,ApiTestSuite])
| [
"ute.kerstin.steinecke@de.ibm.com"
] | ute.kerstin.steinecke@de.ibm.com |
817fa6226c7e68f0e510fe35234e9688f28d0382 | 991a26c52d82fe5db840f7547f8c6abe4f3d07cc | /hackerrank/interview_preparation_kit/arrays/Medium_NewYearChaos_20190203.py | 56486ca5ffbbd1f187cfe63cc68734fe151b64b6 | [] | no_license | fleamon/algorithm_python | 4f715ee5e6474f19c033e87eeb7853d9865883a9 | 19289c27204031690174f6aa15a87de6413269e0 | refs/heads/master | 2023-04-02T11:32:06.433537 | 2021-04-15T05:55:20 | 2021-04-15T05:55:20 | 194,265,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | # -*- encoding: utf-8
import math
import os
import random
import re
import sys
"""
Sample Input
2
5
2 1 5 3 4
5
2 5 1 3 4
Sample Output
3
Too chaotic
input 첫줄 개수만큼의 짝수번째줄 input 크기의 3이상의 홀수 라인의 배열에 대해서 원래의 순차적 배열이 몇번의 원소 이동으로 만들어졌는가?
"""
def minimumBribes(q):
res = 0
# q : [2, 1, 5, 3, 4]
# p : [1, 0, 4, 2, 3]
# i : [0, 1, 2, 3, 4]
#################
# q : [2, 5, 1, 3, 4]
# p : [1, 4, 0, 2, 3]
# i : [0, 1, 2, 3, 4]
for i, p in enumerate(q):
p = p - 1
if p - i > 2:
print("Too chaotic")
return
for j in range(max(p - 1, 0), i):
if q[j] > p:
res = res + 1
print res
if __name__ == '__main__':
t = int(raw_input())
for t_itr in xrange(t):
n = int(raw_input())
q = map(int, raw_input().rstrip().split())
minimumBribes(q)
| [
"fleamon@nate.com"
] | fleamon@nate.com |
9c916129fe72fbdc76aaf2997d9bbdfa460fd235 | de54e5ddf4d350176d70c2bb1501b878285a18b8 | /fpn.py | 04a74603728490c73565dff2f7b4854aee3e9411 | [] | no_license | lizhe960118/find-star | e1d73b78b29087ca2e83990354b96b7406eaedf4 | e233dca4fe9a5faf6df9b6a4e0b2f29a7eb096b0 | refs/heads/master | 2020-05-18T10:55:52.008399 | 2019-05-01T03:49:32 | 2019-05-01T03:49:32 | 184,363,943 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,390 | py | '''RetinaFPN in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.downsample = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.downsample(x)
out = F.relu(out)
return out
# 基础残差块
class ResNetBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channel, out_channel, stride=1, downsample=None):
super(ResNetBasicBlock, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm2d(out_channel))
self.relu = nn.ReLU(inplace=True)
self.layer2 = nn.Sequential(
nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channel))
self.downsample = downsample
self.stride = stride
def forward(self,x):
residual = x
out = self.layer1(x)
out = self.relu(out)
out = self.layer2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class FPN(nn.Module):
def __init__(self, block, num_blocks):
super(FPN, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) # 3*3 s1
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=1, stride=1, bias=False) # 1*1 s1
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False) # 3*3 s2
self.bn3 = nn.BatchNorm2d(64)
# Bottom-up layers
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.conv5 = nn.Conv2d(1024, 256, kernel_size=3, stride=2, padding=1)
# self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
# self.conv6 = nn.Conv2d(2048, 256, kernel_size=3, stride=2, padding=1)
# self.conv7 = nn.Conv2d( 256, 256, kernel_size=3, stride=2, padding=1)
# Lateral layers
# self.latlayer1 = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0)
# self.latlayer2 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
# self.latlayer3 = nn.Conv2d( 512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
# Top-down layers
self.toplayer1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.toplayer2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_,_,H,W = y.size()
return F.upsample(x, size=(H,W), mode='bilinear') + y
def forward(self, x):
# Bottom-up
c1 = F.relu(self.bn1(self.conv1(x)))
c1 = F.relu(self.bn2(self.conv2(c1)))
c1 = F.relu(self.bn3(self.conv3(c1)))
# c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)
c2 = self.layer1(c1) # 300 * 300
c3 = self.layer2(c2)
c4 = self.layer3(c3)
p5 = self.conv5(c4)
# c5 = self.layer4(c4)
# p6 = self.conv6(c5)
# p7 = self.conv7(F.relu(p6))
# Top-down
p4 = self.latlayer1(c4)
p3 = self._upsample_add(p4, self.latlayer2(c3))
p3 = self.toplayer1(p3)
p2 = self._upsample_add(p3, self.latlayer3(c2))
p2 = self.toplayer2(p2)
# p5 = self.latlayer1(c5)
# p4 = self._upsample_add(p5, self.latlayer2(c4))
# p4 = self.toplayer1(p4)
# p3 = self._upsample_add(p4, self.latlayer3(c3))
# p3 = self.toplayer2(p3)
return p2, p3, p4, p5
def FPN50():
# return FPN(Bottleneck, [3,4,6,3])
return FPN(Bottleneck, [3, 4, 6])
def FPN101():
return FPN(Bottleneck, [2,4,23,3])
def test():
net = FPN50()
# fms = net(Variable(torch.randn(1,3,600,300)))
fms = net(Variable(torch.randn(1, 3, 832, 832)))
for fm in fms:
print(fm.size())
# test() | [
"2957308424@qq.com"
] | 2957308424@qq.com |
eddfc651738e0287513e742aa45ead153d7d9ed2 | 3886762b3f57cfa13ae8a3dd2226e25cc293bde0 | /get-data.py | 5ab24d73a8ba926a18da600a0bd1000d28fe50e2 | [] | no_license | parkchaelin/CLI-development | 1d2d704d383d8113621339861b99382b9d2fb463 | fac3913f72bbdcf3e55d04ebd108be0133703503 | refs/heads/master | 2023-01-27T12:00:19.777793 | 2020-12-14T08:21:18 | 2020-12-14T08:21:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,929 | py | import pandas as pd
import pymysql
from urllib.request import urlopen
from bs4 import BeautifulSoup
import json
from datetime import datetime, date,timedelta
import os
import sys
import urllib.request
import re
## functions
# function that deal with date
"""
google: week를 sunday to saturday로 설정
--> 날짜를 하루씩 더함
"""
def plus_one_day(googletrends_df):
date2 = []
for i in range(len(googletrends_df)):
after_one_day = datetime.strptime(googletrends_df.iloc[i].date, '%Y-%m-%d').date() + timedelta(days=1)
after_one_day = after_one_day.strftime("%Y-%m-%d")
date2.append(after_one_day)
return date2
"""
db에 저장할 때,
date를 년, 월, 일로 저장해서 반환
"""
def getDate(df_row):
date = str(df_row)
year = date[0:4]
month = date[4:6]
day = date[6:9]
return int(year), int(month), int(day)
"""
날짜에서 year, month, day 추출
"""
def extract_year_and_month_day(period):
year = period.split('-')[0]
month = period.split('-')[1]
day = period.split('-')[2]
return year, month, day
def extract_year_and_month(period):
year = period[0:4]
month = period[4:6]
return year, month
# get files
def getPortalTrendsFiles ():
# kakao trend
kakaotrends_df = pd.read_csv('./data/kakaotrend.csv', header=7, sep=',', skip_blank_lines = True)
kakaotrends_df = kakaotrends_df.rename({'일': 'date', '경제':'kakao'}, axis = 'columns')
kakaotrends_df = kakaotrends_df[0:147]
# google trend
googletrends_df = pd.read_csv('./data/googletrend.csv', header=1, sep=',', skip_blank_lines = True)
googletrends_df = googletrends_df.rename({'주': 'date', '경제: (대한민국)':'google'}, axis = 'columns')
googletrends_df['date'] = plus_one_day(googletrends_df)
return kakaotrends_df, googletrends_df
def getNaverDatalabAPI(client_id, client_secret):
# naver datalab
start_date = '2016-01-01'
today = date.today().strftime('%Y-%m-%d')
url = "https://openapi.naver.com/v1/datalab/search";
body = "{\"startDate\":\""+start_date+"\",\"endDate\":\""+today+"\",\"timeUnit\":\"week\",\"keywordGroups\":[{\"groupName\":\"경제\",\"keywords\":[\"경제\"]}]}";
request = urllib.request.Request(url)
request.add_header("X-Naver-Client-Id",client_id)
request.add_header("X-Naver-Client-Secret",client_secret)
request.add_header("Content-Type","application/json")
response = urllib.request.urlopen(request, data=body.encode("utf-8"))
rescode = response.getcode()
if(rescode==200):
response_body = response.read()
scrapped = response_body.decode('utf-8')
else:
print("Error Code:" + rescode)
jsonResult = json.loads(scrapped)
navertrends_df = pd.DataFrame(jsonResult['results'][0]['data'])
navertrends_df = navertrends_df.rename({'period': 'date', 'ratio':'naver'}, axis = 'columns')
return navertrends_df
def get3TrendsTable(googletrends_df, kakaotrends_df, navertrends_df):
tmp = pd.merge(googletrends_df, kakaotrends_df, how='left')
portaltrends_df = pd.merge(tmp, navertrends_df, how='left')
portaltrends_df = portaltrends_df.fillna(0)
return portaltrends_df
def getNewsCountingFiles():
# news counting
news_df = pd.read_json('./data/bigkinds.json')
return news_df
def createTables():
# portal trends
create_table_query = """
CREATE TABLE IF NOT EXISTS portal_trends_ratio(
id BIGINT(7) NOT NULL AUTO_INCREMENT,
year bigint(4) NOT NULL,
month bigint(2) NOT NULL,
day bigint(2) NOT NULL,
google double,
kakao double,
naver double,
primary key(id) )
charset=utf8mb4;
"""
cur.execute(create_table_query)
# newscounting
create_table_query = """
CREATE TABLE IF NOT EXISTS news_counting(
id BIGINT(7) NOT NULL AUTO_INCREMENT,
year bigint(4) NOT NULL,
month bigint(2) NOT NULL,
day bigint(2) NOT NULL,
keyword1 bigint(100),
keyword2 bigint(100),
keyword3 bigint(100),
keyword4 bigint(100),
keyword5 bigint(100),
primary key(id) )
charset=utf8mb4;
"""
cur.execute(create_table_query)
# ccsi
create_table_query = """
CREATE TABLE IF NOT EXISTS ccsi(
id BIGINT(7) NOT NULL AUTO_INCREMENT,
year bigint(4) NOT NULL,
month bigint(2) NOT NULL,
ccsi double,
primary key(id) )
charset=utf8mb4;
"""
cur.execute(create_table_query)
# cci
create_table_query = """
CREATE TABLE IF NOT EXISTS coincident_composite_index(
id BIGINT(7) NOT NULL AUTO_INCREMENT,
year bigint(4) NOT NULL,
month bigint(2) NOT NULL,
cci double,
primary key(id) )
charset=utf8mb4;
"""
cur.execute(create_table_query)
def insertRecords(portaltrends_df, news_df, ccsi, cci):
# portal trends
for i in range(len(portaltrends_df)):
query = """
Insert into portal_trends_ratio (year, month, day, google, kakao, naver) values (%d, %d, %d, %f, %f, %f) ;
"""
year, month, day = extract_year_and_month_day(portaltrends_df.iloc[i].date)
google_cnt = portaltrends_df.iloc[i].google
kakao_cnt = portaltrends_df.iloc[i].kakao
naver_cnt = portaltrends_df.iloc[i].naver
mystring = (query % (int(year), int(month), int(day), float(google_cnt), float(kakao_cnt), float(naver_cnt)))
print(mystring)
cur.execute(mystring)
# news counting
for i in range(len(news_df)):
query = """
Insert into news_counting (year, month, day, keyword1, keyword2, keyword3, keyword4, keyword5 ) values (%d, %d, %d, %d, %d, %d, %d, %d) ;
"""
year, month, day = getDate(news_df.iloc[i]['date'])
keyword1_cnt = news_df.iloc[i]['침체']
keyword2_cnt = news_df.iloc[i]['금융위기']
keyword3_cnt = news_df.iloc[i]['불황']
keyword4_cnt = news_df.iloc[i]['폭락']
keyword5_cnt = news_df.iloc[i]['외환위기']
mystring = ( query % (year, month, day, keyword1_cnt, keyword2_cnt, keyword3_cnt, keyword4_cnt, keyword5_cnt) )
if (i % 10 == 0):
print(mystring)
cur.execute(mystring)
# ccsi
for i in range(len(ccsi)):
query = """
Insert into ccsi (year, month, ccsi) values (%d, %d, %f) ;
"""
year, month = extract_year_and_month(ccsi.iloc[i].TIME)
ccsi_value = ccsi.iloc[i].DATA_VALUE
mystring = (query % (int(year), int(month), float(ccsi_value)))
print(mystring)
cur.execute(mystring)
## cci
for i in range(len(cci)):
query = """
Insert into coincident_composite_index (year, month, cci) values (%d, %d, %f) ;
"""
year, month = extract_year_and_month(cci.iloc[i].TIME)
coincident_value = cci.iloc[i].DATA_VALUE
mystring = (query % (int(year), int(month), float(coincident_value)))
print(mystring)
cur.execute(mystring)
def getEcosAPI(API_KEY, code, max, start_month, end_month):
url = 'http://ecos.bok.or.kr/api/StatisticSearch/%s/json/kr/1/%s/%s/MM/%s/%s/?/?/?/' % (API_KEY, max, code, start_month, end_month)
result = urlopen(url)
html = result.read()
return json.loads(html)
def getCCSI(API_KEY):
data = getEcosAPI(API_KEY, code = '040Y002', max = 100000, start_month = '201509', end_month = '202011')["StatisticSearch"]["row"]
produce = pd.DataFrame(data)
ccsi = produce[produce['ITEM_CODE1'] == 'FME'] # CCSI는 item_code1 'FME'
ccsi = ccsi.loc[:, ['TIME', 'DATA_VALUE']].reset_index(drop=True)
return ccsi
def getCCI(API_KEY):
data = getEcosAPI(API_KEY, code = '085Y026', max = 100000, start_month = '201509', end_month = '202011')["StatisticSearch"]["row"]
produce = pd.DataFrame(data)
cci = produce[produce['ITEM_CODE1'] == 'I16D'] # CCI는 item_code1 'I16D'
cci = cci.loc[:, ['TIME', 'DATA_VALUE']].reset_index(drop=True)
return cci
## main
# DB connection
conn = pymysql.connect(host = "127.0.0.1", user = [USER], passwd = [PASSWORD], db = [DATABASE], cursorclass = pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("show databases")
print(cur.fetchall())
cur.execute("use CLI")
# load data
kakaotrends_df, googletrends_df = getPortalTrendsFiles()
navertrends_df = getNaverDatalabAPI(client_id, client_secret)
portaltrends_df = get3TrendsTable(googletrends_df, kakaotrends_df, navertrends_df)
news_df = getNewsCountingFiles()
# 소비자심리지수, 소비자 동행지수 df 생성
API_KEY = "YOUR_API_KEY"
ccsi = getCCSI(API_KEY)
cci = getCCI(API_KEY)
# create table
createTables()
# insert records to table
insertRecords(portaltrends_df, news_df, ccsi, cci)
# db connection close
conn.commit()
cur.close()
conn.close()
| [
"2hyes.com@gmail.com"
] | 2hyes.com@gmail.com |
b645e1547c414ecb417fa09a0ab91efdd675b570 | b02c8b5667b7e97af560d8a6aea4d9c7c4d97ac9 | /config.py | 96419eb7c75ee9e8e582c7bba061e651c4bc75a8 | [] | no_license | edgarlizarazo/circleci102 | 198301acf035132a78aadb2a3d365f33eaf97648 | b0c4e59610f8181dadf8286daab1aa9876c005c0 | refs/heads/master | 2023-02-07T18:17:45.553497 | 2021-03-17T22:15:58 | 2021-03-17T22:15:58 | 229,484,068 | 0 | 0 | null | 2023-02-02T06:42:15 | 2019-12-21T21:15:45 | Python | UTF-8 | Python | false | false | 3,489 | py | class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
# run mailhog for development emails
MAIL_SERVER = os.environ.get('SERVER') or 'localhost'
MAIL_PORT = os.environ.get('MAIL_PORT') or 1025
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') or False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME') or 'mailhog'
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') or 'mailhog'
CIRCULATE_MAIL_SUBJECT_PREFIX = '[Circulate]'
CIRCULATE_MAIL_SENDER = os.environ.get('CIRCULATE_MAIL_SENDER') or \
'Circulate Admin <circulate@example.com>'
CIRCULATE_ADMIN = os.environ.get('CIRCULATE_ADMIN') or 'circulate@example.com'
CIRCULATE_POSTS_PER_PAGE = 20
CIRCULATE_FOLLOWERS_PER_PAGE = 50
CIRCULATE_COMMENTS_PER_PAGE = 30
CIRCULATE_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'postgresql://localhost/circulate'
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'postgresql://localhost/circulate'
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'postgresql://localhost/circulate'
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.CIRCULATE_MAIL_SENDER,
toaddrs=[cls.CIRCULATE_ADMIN],
subject=cls.CIRCULATE_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.WARNING)
app.logger.addHandler(syslog_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'unix': UnixConfig,
'default': DevelopmentConfig
}
| [
"noreply@github.com"
] | edgarlizarazo.noreply@github.com |
58b80e368310044cbc32962e10ec2d19ebe7e790 | ccdc81165a6bfb65fd4d7956ed223dec94b3057d | /src/models/static/directions.py | dfe6b72841f5cc9b92ac3425d663f5c4dcd5e19b | [] | no_license | asimyildiz/webservice-for-xibo1.7-python | 1a1f76e161526061094b012f6446b83115fa21a9 | d6e27825a627d0f8b7f514c93c5f636a338b0b06 | refs/heads/master | 2020-10-01T17:42:35.248951 | 2019-12-13T05:27:03 | 2019-12-13T05:27:03 | 227,589,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
__author__ = "asim"
__date__ = "$19.Eki.2015 19:58:31$"
# TODO add to this class, directions when we need one
class directions():
LEFT="left"
RIGHT="right"
UP="up"
DOWN="down" | [
"asimyildiz@istanbulmd.com"
] | asimyildiz@istanbulmd.com |
e340f3cd9c8770e8f3bb5e2d9144515f2d746863 | b45b2e382ba748909bd909b667d67b3f94b7951e | /semana05_threading/DownloaddaImagem.py | 5fdd8cfaa65a4e170cc160b333edfde09d0d10cd | [] | no_license | PedroPauloPP/SEII-PedroPaulo | 8d9e718b702f40d6b4ca80700853614731144c51 | 0b40b67b0ecb4646e1ea66c09bbe5027f9c0a259 | refs/heads/main | 2023-02-01T04:39:26.784142 | 2020-12-08T21:04:49 | 2020-12-08T21:04:49 | 309,513,504 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | #Thread 3
#Download da imagem
import requests
import time
import concurrent.futures
img_urls = [
'https://images.unsplash.com/photo-1516117172878-fd2c41f4a759',
'https://images.unsplash.com/photo-1532009324734-20a7a5813719',
'https://images.unsplash.com/photo-1524429656589-6633a470097c',
'https://images.unsplash.com/photo-1530224264768-7ff8c1789d79',
'https://images.unsplash.com/photo-1564135624576-c5c88640f235',
'https://images.unsplash.com/photo-1541698444083-023c97d3f4b6',
'https://images.unsplash.com/photo-1522364723953-452d3431c267',
'https://images.unsplash.com/photo-1513938709626-033611b8cc03',
'https://images.unsplash.com/photo-1507143550189-fed454f93097',
'https://images.unsplash.com/photo-1493976040374-85c8e12f0c0e',
'https://images.unsplash.com/photo-1504198453319-5ce911bafcde',
'https://images.unsplash.com/photo-1530122037265-a5f1f91d3b99',
'https://images.unsplash.com/photo-1516972810927-80185027ca84',
'https://images.unsplash.com/photo-1550439062-609e1531270e',
'https://images.unsplash.com/photo-1549692520-acc6669e2f0c'
]
t1 = time.perf_counter()
'''
O download é realizado pela função abaixo, uma a uma considerando o url como parâmetro passado.
Na linha 33, temos a requisição da imagem passada, dividindo depois a string da url e adicionando a extensão de arquivo
de imagem .jpg ao final do nome. Depois, cada arquivo é aberto em bytes e escrito.
'''
def download_image(img_url):
img_bytes = requests.get(img_url).content
img_name = img_url.split('/')[3]
img_name = f'{img_name}.jpg'
with open(img_name, 'wb') as img_file:
img_file.write(img_bytes)
print(f'{img_name} foi baixado!')
'''
ThreadPoolExecutor tem por função executar o código em um thread separado, desse modo, executa-se a função e processa-se os resultados
dessas execuções para cada arquivo.
'''
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(download_image, img_urls)
t2 = time.perf_counter()
print(f'Completo em {t2-t1} segundos')
| [
"pedropaulobm3@gmail.com"
] | pedropaulobm3@gmail.com |
79d70ef8cdd409a874c3891be9cd5a8b172f5b9c | fd89a4a2e976c6c972e7b1759f04a2db2de1a198 | /week4_divide_and_conquer/3_improving_quicksort/sorting.py | bc11469cb0b890d02515cf5133886da6edd42fed | [] | no_license | skarensmoll/algorithms-tlbx | 2769d82411c9a6cb064a737b60ec595afc28c4d2 | b281f14feed1299610847ee6346da9e003d1748c | refs/heads/master | 2023-08-29T02:20:03.182874 | 2021-11-07T13:32:47 | 2021-11-07T13:32:47 | 374,073,146 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | # Uses python3
import sys
import random
def partition3(a, l, r):
#write your code here
pass
def partition2(a, l, r):
x = a[l]
j = l
for i in range(l + 1, r + 1):
if a[i] <= x:
j += 1
a[i], a[j] = a[j], a[i]
a[l], a[j] = a[j], a[l]
return j
def swap(a, l, r):
temp = a[l]
a[l] = a[r]
a[r] = temp
def partition3(a, lo, hi):
l = lo
r = lo
u = hi
pivot = a[hi]
while r <= u:
if a[r] < pivot:
swap(a, l, r)
l += 1
r += 1
elif a[r] > pivot:
swap(a, r, u)
u -=1
else:
r += 1
return l - 1, r
def randomized_quick_sort(a, l, r):
if l >= r:
return
k = random.randint(l, r)
a[r], a[k] = a[k], a[r]
m, n = partition3(a, l, r)
randomized_quick_sort(a, l, m);
randomized_quick_sort(a, n, r);
if __name__ == '__main__':
input = input()
n, a = 5, [2, 3, 9, 2, 2]
randomized_quick_sort(a, 0, n - 1)
for x in a:
print(x, end=' ')
| [
"karenpedraza@Karens-MacBook-Pro.local"
] | karenpedraza@Karens-MacBook-Pro.local |
6a9b1dff843c271968e2310de2eb10d4b19033db | 50b509abb8cf3e22a4dddf90ff998bfbaae01f71 | /custom/app/metrics/metrics_cpu_task_time.py | 1102f42fe51e3f0558891d35b1ac7eaacfdd78b2 | [] | no_license | ragnarkurm/docker-monitor | a69c4ce1ec4b0a89a9cf48491edeb56d998cffd2 | 6ccb271cef308f545c2516a6965ed5459a78b88e | refs/heads/master | 2021-01-21T14:07:44.325123 | 2016-06-12T07:21:33 | 2016-06-12T07:21:33 | 56,938,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | keys = [
"id",
"user",
"nice",
"system",
"idle",
"iowait",
"irq",
"softirq",
"steal",
"guest",
"guest_nice",
]
import re
def metrics():
f = open('/host/proc/stat', 'r')
line = f.readline()
f.close()
line = line.strip()
values = re.split('\s+', line)
data = dict(zip(keys, values))
del data["id"]
for k in data:
data[k] = {
'measurement': 'cpu_task_time',
'tags': {
'task': k,
},
'fields': {
'value': int(data[k]),
},
}
return data.values()
| [
"ragnar.kurm@gmail.com"
] | ragnar.kurm@gmail.com |
da99298304da102fff9fdc00df027f9acd57795c | b947f66258b9879919b45afbbcf011904558d2ba | /workSpace/hc_sr04.py | 886728910c246f5aaf9ad8a59cc5f26722fe11c5 | [] | no_license | Makerfabs/MakePython-ESP8266 | d3c005566f6c91eba2c8ee0fcc3cd984190e9265 | 4d02d5939a897b67b8de061b2680c88cae1ef79c | refs/heads/master | 2023-07-11T13:54:57.819790 | 2023-06-25T02:41:03 | 2023-06-25T02:41:03 | 252,667,396 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from hcsr04 import HCSR04
from machine import Pin,I2C
import ssd1306
i2c = I2C(scl=Pin(5), sda=Pin(4), freq=100000) #Init i2c
lcd=ssd1306.SSD1306_I2C(128,64,i2c)
sensor = HCSR04(trigger_pin=13, echo_pin=12,echo_timeout_us=1000000)
try:
while True:
distance = sensor.distance_cm()
if(distance == sensor.distance_cm()):
lcd.fill(0)
lcd.text("Distance:",30,20)
lcd.text(str(distance),30,40)
lcd.show()
except KeyboardInterrupt:
pass
| [
"noreply@github.com"
] | Makerfabs.noreply@github.com |
b96bfac9435a26fb0dac083564d3a9020962e566 | e4d4149a717d08979953983fa78fea46df63d13d | /Week8/Day1/projects/animals_project/manage.py | a880c80321ea81b2c1b015e806c82f0971f83302 | [] | no_license | fayblash/DI_Bootcamp | 72fd75497a2484d19c779775c49e4306e602d10f | a4e8f62e338df5d5671fd088afa575ea2e290837 | refs/heads/main | 2023-05-05T20:55:31.513558 | 2021-05-27T06:48:40 | 2021-05-27T06:48:40 | 354,818,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'animals_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"fayblash@gmail.com"
] | fayblash@gmail.com |
95ad4fd20d715d2b592087416dd9db29358e23b9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02407/s580407099.py | 11041f9012ef0a39f8fbc696d88e6c36fe254b03 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | n = int(input())
a = list(map(int,input().split()))
a.reverse()
for i,elem in enumerate(a):
if i != 0:
print (" ", end='')
print (elem, end='')
print ('')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
37416ae207a95ca269005d08f020dd3f0e703430 | 14de7abd1267122ad128c130f45ff86a087ed5cd | /nlp/match_blocks.py | 7e4efbe57c5b3b8a5ce86ca674e74a43cecd808f | [
"MIT"
] | permissive | saurabhXcode/tf-attentive-conv | 64124c470acdb26125680d903cc97ae1cc68a4b9 | 8dcc403575392c8e5c6c287432272a781410c49c | refs/heads/master | 2020-04-12T12:21:35.091291 | 2018-08-11T00:26:44 | 2018-08-11T00:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,308 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <artex.xh@gmail.com> <https://hanxiao.github.io>
import tensorflow as tf
from nlp.encode_blocks import CNN_encode
from nlp.nn import linear_logit, dropout_res_layernorm
def AttentiveCNN_match(context, query, context_mask, query_mask,
scope='AttentiveCNN_Block', reuse=None, **kwargs):
with tf.variable_scope(scope, reuse=reuse):
cnn_wo_att = CNN_encode(context, filter_size=3, direction='none', act_fn=None)
att_context, _ = Attentive_match(context, query, context_mask, query_mask)
cnn_att = CNN_encode(att_context, filter_size=1, direction='none', act_fn=None)
output = tf.nn.tanh(cnn_wo_att + cnn_att)
return dropout_res_layernorm(context, output, **kwargs)
def Attentive_match(context, query, context_mask, query_mask,
score_func='dot', causality=False,
scope='attention_match_block', reuse=None, **kwargs):
with tf.variable_scope(scope, reuse=reuse):
batch_size, context_length, num_units = context.get_shape().as_list()
_, query_length, _ = query.get_shape().as_list()
if score_func == 'dot':
score = tf.matmul(context, query, transpose_b=True)
elif score_func == 'bilinear':
score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True)
elif score_func == 'scaled':
score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True) / \
(num_units ** 0.5)
elif score_func == 'additive':
score = tf.squeeze(linear_logit(
tf.tanh(tf.tile(tf.expand_dims(linear_logit(context, num_units, scope='context_x_We'), axis=2),
[1, 1, query_length, 1]) +
tf.tile(tf.expand_dims(linear_logit(query, num_units, scope='query_x_We'), axis=1),
[1, context_length, 1, 1])), 1, scope='x_ve'), axis=3)
else:
raise NotImplementedError
mask = tf.matmul(tf.expand_dims(context_mask, -1), tf.expand_dims(query_mask, -1), transpose_b=True)
paddings = tf.ones_like(mask) * (-2 ** 32 + 1)
masked_score = tf.where(tf.equal(mask, 0), paddings, score) # B, Lc, Lq
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(masked_score[0, :, :]) # (Lc, Lq)
tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense() # (Lc, Lq)
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(masked_score)[0], 1, 1]) # B, Lc, Lq
paddings = tf.ones_like(masks) * (-2 ** 32 + 1)
masked_score = tf.where(tf.equal(masks, 0), paddings, masked_score) # B, Lc, Lq
query2context_score = tf.nn.softmax(masked_score, axis=2) * mask # B, Lc, Lq
query2context_attention = tf.matmul(query2context_score, query) # B, Lc, D
context2query_score = tf.nn.softmax(masked_score, axis=1) * mask # B, Lc, Lq
context2query_attention = tf.matmul(context2query_score, context, transpose_a=True) # B, Lq, D
return (query2context_attention, # B, Lc, D
context2query_attention) # B, Lq, D
| [
"hanhxiao@tencent.com"
] | hanhxiao@tencent.com |
d32523bc329dea5f064b200616642b72d95259f4 | cc1f992cf04c71ebd8993b7ac2ffb566e5a60702 | /mysite/settings.py | 5ca20cc84b66cef46db819a0b9433fb8f832ccc4 | [] | no_license | pupadhyay1/DjangoFirstApp | f7a203b2a514c4fadb610ddba5bd2128b18433bd | d3055ce54f1825f07da8d34df5312be9e4d682d6 | refs/heads/master | 2016-09-05T14:59:33.499222 | 2015-06-28T06:26:16 | 2015-06-28T06:26:16 | 38,179,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,698 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '113l-!=$v7p-q(+le5_rm7@8xuhnx%-7@kanrbhhfsz18(xd%d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC+10'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"pankaj.m.upadhyay@gmail.com"
] | pankaj.m.upadhyay@gmail.com |
3da0b77fd7e77ed9d412ecfad98e0a08bd3e81ee | 1b24c146ed43853557a3bdc934d8283f5654bb9e | /Gold_Badge/Tree_Pre_order.py | b948934efb23a806c290f596376fe4e7e271f917 | [] | no_license | suyalmukesh/Hacker | 14186ca72d9a8d563ad8cd8e8890e4084489e78e | 76f6ab37405d603c6fdc078eba934bc0ac11f7db | refs/heads/master | 2022-11-28T02:59:12.802563 | 2020-08-04T08:28:55 | 2020-08-04T08:28:55 | 277,568,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | class Node:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
self.level = None
def __str__(self):
return str(self.info)
class BinarySearchTree:
def __init__(self):
self.root = None
def create(self, val):
if self.root == None:
self.root = Node(val)
else:
current = self.root
while True:
if val < current.info:
if current.left:
current = current.left
else:
current.left = Node(val)
break
elif val > current.info:
if current.right:
current = current.right
else:
current.right = Node(val)
break
else:
break
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.info (the value of the node)
"""
def pre(root):
# Write your code here
res = []
if root:
res.append(root.info)
res = res + pre(root.left)
res = res + pre(root.right)
return res
def preOrder(root):
a = pre(tree.root)
for i in range(len(a)):
print(a[i] , end = " ")
tree = BinarySearchTree()
t = int(input())
arr = list(map(int, input().split()))
for i in range(t):
tree.create(arr[i])
preOrder(tree.root)
# using two way for printing , but the driver code was not editable
# and was not correctly working , so I did a workaround and it worked
| [
"Mukesh.Suyal@experian.com"
] | Mukesh.Suyal@experian.com |
15188d314ca5c19012b753c109d9188603b486e4 | bceda6e6ba78ecbb146e1d99a7a563d6b9db46cf | /update_configs.py | f7edb2a7ea6675c2b37c4217c11bd16b700633ce | [] | no_license | mtao/config | c863383a6aaca14c1a0fd73e8caa286c41536200 | 4d315c1343a0372503cc7b4fa929c68db97e3ecb | refs/heads/master | 2018-07-28T14:17:46.245224 | 2018-06-02T14:50:56 | 2018-06-02T14:50:56 | 21,939,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,789 | py | #/usr/bin/env python3
import sys
import os
import shutil
import difflib
mypath = os.path.dirname(os.path.realpath(__file__))
default_filename = os.path.join(mypath,"settings.txt")
default_configdir = os.path.join(mypath,"configs")
class Command:
update_target = 0
update_source = 1
update_noop= 2
__update_readable__ = {update_target: "Target",
update_source: "Source",
update_noop: "NoOp"}
def __init__(self, target):
self.__target__ = target
def __type_str__(self):
raise NotImplementedError
def diff(self):
raise NotImplementedError
def __get_file_age__(filename):
st = os.stat(os.path.expanduser(filename))
return st.st_mtime
def target_age(self):
return Command.__get_file_age__(self.__target__)
def source_age(self):
raise NotImplementedError
def target(self):
return self.__target__
def source(self):
return NotImplementedError
def update_style(self):
if len(self.diff()) == 0:
return self.update_noop
ta = self.target_age()
na = self.source_age()
if ta == na:
return Command.update_noop
elif ta < na:
return Command.update_target
else:
return Command.update_source
def update_human_readable(self):
return Command.__update_readable__[self.update_style()]
def __repr__(self):
return "{0}:[{1}],{2}".format(self.__type_str__(),self.__target__,self.update_human_readable())
class FileCommand(Command):
def __type_str__(self):
return "File"
def __init__(self,target,args):
super().__init__(target)
self.__source__ = os.path.join(default_configdir,args[0])
#TODO: this only works for text files I think?
source_lines = open(os.path.expanduser(self.__source__),"r").readlines()
target_lines = open(os.path.expanduser(self.__target__),"r").readlines()
self.__diff__ = list(difflib.unified_diff(source_lines,target_lines))
def diff(self):
return self.__diff__
def source_age(self):
return Command.__get_file_age__(self.__source__)
class GitCommand(Command):
def __type_str__(self):
return "Git"
def diff(self):
return []
def __init__(self,target,args):
super().__init__(target)
def source_age(self):
return -1
def make_command(cmd,target,args):
if cmd == "=":
return FileCommand(target,args)
elif cmd == ":git:":
return GitCommand(target,args)
else:
raise Exception("Unknown Command",cmd)
class Settings:
def __init__(self, lines):
self.__name__ = lines[0][1:-1]
print(self.__name__)
#triplets of command, target, command args
self.__commands__ = [make_command(line[1],line[0],line[2:]) for line in map(lambda x: x.split(), lines[1:])]
def process(self):
for cmd in self.__commands__:
style = cmd.update_style()
print(cmd)
if style is not Command.update_noop:
print("".join(cmd.diff()))
def __main__():
filename = default_filename
if len(sys.argv) > 1:
filename = argv[1]
with open(filename,"r") as f:
lines = list(filter(lambda x: len(x) > 0,map(lambda x: x.strip(), f.readlines())))
print(lines)
indices = []
for i,line in enumerate(lines):
if line[0] == '[':
indices.append(i)
indices.append(len(lines))
settings = [Settings(lines[i:j]) for i,j in zip(indices[:-1],indices[1:])]
for s in settings:
s.process()
if __name__ == "__main__":
__main__()
| [
"mtao@dgp.toronto.edu"
] | mtao@dgp.toronto.edu |
cd4b07a3d02f99c573bec8833d8456984b0f6bdf | 5bd19a2c40a9b9b5a7565f18a2ef14d3d2d59026 | /helper.py | 758eb162884f642b0804862c57a74d7ea3b8cc94 | [] | no_license | iJohnMaged/afk-arena-stages-discord-bot | 283f003f786c3e38aa1d044c6ed7aae694f0b843 | 3f19fc4038830d801127290b2a3fe30e21d8bd3b | refs/heads/master | 2022-12-09T18:21:15.699866 | 2020-09-05T11:18:19 | 2020-09-05T11:18:19 | 293,060,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,507 | py |
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseUpload, MediaIoBaseDownload
from apiclient import errors
import os
import discord
import pickle
import os.path
import io
import requests
from consts import TOKEN, CHANNEL_NAME, SEARCH_CHANNEL, NOTTI_BANANA, PEPE, COOKIE, REGEX, LAST_UPLOADED_TABLE, UPLOADED_STAGES
from tinydb import Query
from discord import File
SCOPES = ['https://www.googleapis.com/auth/drive']
Stage = Query()
async def upload_search_towers(floor, ctx, tower, memo, service, DB=None):
if ctx.message.channel.name == SEARCH_CHANNEL and not ctx.message.author.bot:
return_message = ""
search_folder = get_folder_id_by_name(
tower, service, memo)
stage_ids = search_file_in_folder(
search_folder, floor, service)
if stage_ids is not None:
for stage_id, file_name in stage_ids:
# return_message += f"Stage link: https://drive.google.com/file/d/{stage_id}\n"
if DB is not None:
stage_doc = DB.table(UPLOADED_STAGES).get(
Stage.file_id == stage_id)
if stage_doc:
return_message += f"Upload caption: {stage_doc['message']}\n"
try:
stage_file = download_file(stage_id, service)
sending_file = File(stage_file, f"{file_name}.jpg")
await ctx.send(return_message, file=sending_file)
except Exception as e:
print(e)
print(e.args)
else:
await ctx.send(f"Couldn't find it, sowwy {PEPE}")
return
if not (ctx.message.channel.name == CHANNEL_NAME and not ctx.message.author.bot):
pass
for attachment in ctx.message.attachments:
uploaded_file_id = upload_file(service, attachment.url, tower,
ctx.message.author, floor, memo)
if uploaded_file_id is not None:
print(f"Uploaded floor {floor}")
await ctx.message.add_reaction('👍')
return uploaded_file_id
else:
await ctx.message.add_reaction('👎')
def download_file(file_id, service):
request = service.files().get_media(fileId=file_id)
print(request)
print(dir(request))
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
fh.seek(0)
return fh
def delete_file(service, file_id):
"""Permanently delete a file, skipping the trash.
Args:
service: Drive API service instance.
file_id: ID of the file to delete.
"""
try:
service.files().delete(fileId=file_id).execute()
except errors.HttpError as error:
print(f'An error occurred: {error}')
def createRemoteFolder(folderName, drive_service, parentID=None):
# Create a folder on Drive, returns the newely created folders ID
body = {
'name': folderName,
'mimeType': "application/vnd.google-apps.folder"
}
if parentID:
body['parents'] = [parentID]
root_folder = drive_service.files().create(body=body).execute()
return root_folder['id']
def search_file_in_folder(folder_id, stage, drive_service):
page_token = None
solutions = []
while True:
response = drive_service.files().list(q=f"'{folder_id}' in parents",
spaces='drive',
fields='nextPageToken, files(id, name)',
pageToken=page_token).execute()
for file in response.get('files', []):
# Process change
if file.get('name').startswith(f"{stage}-"):
print('Found file: %s (%s)' %
(file.get('name'), file.get('id')))
solutions.append((file.get('id'), file.get('name')))
page_token = response.get('nextPageToken', None)
if page_token is None:
break
if len(solutions) != 0:
return solutions
def get_folder_id_by_name(folder_name, drive_service, memo):
if folder_name in memo:
return memo[folder_name]
page_token = None
while True:
response = drive_service.files().list(q=f"mimeType='application/vnd.google-apps.folder' and name = '{folder_name}'",
spaces='drive',
fields='nextPageToken, files(id, name)',
pageToken=page_token).execute()
for file in response.get('files', []):
# Process change
print('Found file: %s (%s)' % (file.get('name'), file.get('id')))
memo[folder_name] = file.get('id')
return memo[folder_name]
page_token = response.get('nextPageToken', None)
if page_token is None:
break
def init_g_drive():
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('drive', 'v3', credentials=creds)
def upload_file(service, url, folder, author, stage, memo):
image = requests.get(url)
# Get folder
folder_id = get_folder_id_by_name(folder, service, memo)
media = MediaIoBaseUpload(io.BytesIO(
image.content), mimetype='image/jpeg')
author_name = None
if author.nick is not None:
author_name = author.nick
else:
author_name = author.name
uploaded_file = service.files().create(
media_body=media,
body={"name": f'{stage}-{author_name}',
'parents': [folder_id]}
).execute()
if uploaded_file:
return uploaded_file['id']
else:
return None
| [
"john.maged@swisodev.com"
] | john.maged@swisodev.com |
827370360a0c207ac6273117c06be4bf6b0b163e | 882be627c49870ae6f2f81963a3cfc9b719c0011 | /wscript | 601c17dd013a3c54bc088dbbc86fb37531affd98 | [] | no_license | brettviren/cogs | 794142a04c87ce148e939f8ded852dfa1f6df9bc | 681d1ed7e12cd2e7469a5ba3fd7261dc4f8f4c26 | refs/heads/master | 2022-11-30T12:26:19.624956 | 2020-08-10T12:34:44 | 2020-08-10T12:34:44 | 273,746,410 | 0 | 1 | null | 2020-08-10T12:34:45 | 2020-06-20T16:40:57 | JavaScript | UTF-8 | Python | false | false | 2,954 | #!/usr/bin/env waf
VERSION='0.0.0'
APPNAME='cogs'
import os.path as osp
def options(opt):
opt.load('compiler_cxx')
opt.load('waf_unit_test')
opt.add_option('--quell-tests', action='store_true', default=False,
help='Compile but do not run the tests (default=%default)')
opt.add_option('--with-ers', default=None,
help='Set to ERS install area')
opt.add_option('--with-nljs', default=None,
help='Point nlohmann json install area')
opt.add_option('--with-boost', default=None,
help='Set to BOOST install area (needed by ERS)')
def configure(cfg):
cfg.load('compiler_cxx')
cfg.load('waf_unit_test')
cfg.env.CXXFLAGS += ['-std=c++17', '-ggdb3', '-Wall', '-Werror']
## nlohmann::json
nljs = getattr(cfg.options, 'with_nljs', None)
if nljs:
print("using " + nljs)
setattr(cfg.env, 'INCLUDES_NLJS', [osp.join(nljs, "include")])
cfg.check(features='cxx cxxprogram', define_name='HAVE_NLJS',
header_name='nlohmann/json.hpp',
use='NLJS', uselib_store='NLJS', mandatory=True)
## ERS
ers = getattr(cfg.options, 'with_ers',None)
if ers:
setattr(cfg.env, 'RPATH_ERS', [osp.join(ers, 'lib')]);
setattr(cfg.env, 'LIBPATH_ERS', [osp.join(ers, 'lib')]);
setattr(cfg.env, 'INCLUDES_ERS', [osp.join(ers, 'include')]);
cfg.check(features='cxx cxxprogram', define_name='HAVE_ERS',
header='ers/ers.h', lib=['ers','ErsBaseStreams'],
use='ERS', uselib_store='ERS', mandatory=True)
## Boost is not needed directly by cogs but ERS needs it.
boost = getattr(cfg.options, 'with_boost', None)
if boost:
setattr(cfg.env, 'RPATH_BOOST', [osp.join(boost, 'lib')]);
setattr(cfg.env, 'LIBPATH_BOOST', [osp.join(boost, 'lib')]);
setattr(cfg.env, 'INCLUDES_BOOST', [osp.join(boost, 'include')]);
cfg.check(features='cxx cxxprogram', define_name='HAVE_BOOST',
header=['boost/filesystem/filesystem.hpp',
'boost/preprocessor/preprocessor.hpp'],
lib=['boost_filesystem'],
use='BOOST', uselib_store='BOOST', mandatory=True)
cfg.write_config_header('config.hpp')
def build(bld):
bld.recurse("test")
use=['ERS','BOOST','NLJS']
sources = bld.path.ant_glob('src/*.cpp');
bld.shlib(features='cxx', includes='inc',
source = sources, target='cogs',
uselib_store='COGS', use=use)
bld.install_files('${PREFIX}/include/cogs',
bld.path.ant_glob("inc/cogs/**/*.hpp"),
cwd=bld.path.find_dir('inc/cogs'),
install_path=bld.env.PREFIX + '/lib',
relative_trick=True)
from waflib.Tools import waf_unit_test
bld.add_post_fun(waf_unit_test.summary)
bld.recurse("demo")
| [
"brett.viren@gmail.com"
] | brett.viren@gmail.com | |
cdc237084299675f5c218544154e89c2be810335 | 980434e03e722eaf3a5ff4ab4f1971c8d1cde4c5 | /宝石与石头.py | a2ae90f7262a28b814a440bfe3b1d2cf7a48bc01 | [] | no_license | arry-lee/arryleetcode | c9c548b0defc9771e4e488b3e760809364456c99 | b4b9b971ec81a921cca606dfa46ea4109d975dfb | refs/heads/master | 2020-07-26T14:11:27.645307 | 2019-09-15T23:31:09 | 2019-09-15T23:31:09 | 208,670,826 | 1 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 172 | py | #±¦Ê¯Óëʯͷ
#2019-08-17 06:20:13
class Solution:
def numJewelsInStones(self, J: str, S: str) -> int:
return len([stone for stone in S if stone in J]) | [
"arry_lee@qq.com"
] | arry_lee@qq.com |
a22b59fd7f6692be58027a707c4a7e46b8252802 | 4eeb732f66f890cc823d176bdff349232e057fff | /gnokhorOptique/wsgi.py | 68ef8808973950ec31469b79971734b9b42a6140 | [] | no_license | bassiroufaye/apiGnokhor | 4b5fa4446c99dd22021c605fe36c85a4eb079974 | 14b5d8e8f18c2d80ec10258fbb23977c70ad3fcb | refs/heads/master | 2020-06-04T08:05:05.858226 | 2019-06-14T14:14:21 | 2019-06-14T14:14:21 | 191,937,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for gnokhorOptique project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gnokhorOptique.settings')
application = get_wsgi_application()
| [
"bassiroufaye815@gmail.com"
] | bassiroufaye815@gmail.com |
838be619882db60b75ef9cb128d925a8d39a081f | 2884f3c1269d566487e3abd404fc521d93437964 | /safe_b64.py | f1d82ca5d20ba0c8cfb8a59e93ef5fda1b34a63d | [] | no_license | eterinfi/python | 4d25b52bfc2a852cc27823b3787f39d46ace90a7 | 5214af7d69c6ba2d2573f143b909182977115237 | refs/heads/master | 2020-12-04T13:49:32.647323 | 2016-12-05T07:50:53 | 2016-12-05T07:50:53 | 67,704,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | import base64
def safe_b64decode(s, altchars=None):
while len(s) % 4 != 0:
s += '='
return base64.b64decode(s, altchars)
s = 'YWJjZA'
try:
print 'Result: ', base64.b64decode(s)
except TypeError:
print '(Safe-decoding)',
print safe_b64decode(s) | [
"gaorui84@gmail.com"
] | gaorui84@gmail.com |
27464648ad0199b336598c09ba3e7ef7c44630da | 9c0b03f8070460afa43a9903b2956adff872bb69 | /guoxi_bridge.py | e2dd776c96f424ba9bde140a78efb3344484c28a | [
"MIT"
] | permissive | Hizudao/AbaqusPython | c4a4b54d3785aa8720a4d375f3a20ee5d6a97f23 | d9c72d15f8928f1938cee46a4b39e2c44b03b62f | refs/heads/master | 2021-09-01T14:30:16.772076 | 2017-11-07T23:59:14 | 2017-11-07T23:59:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,478 | py | # -*- coding: mbcs -*-
from abaqus import *
from abaqusConstants import *
from interaction import *
from optimization import *
from sketch import *
from visualization import *
from connectorBehavior import *
import regionToolset
#session.journalOptions.setValues(replayGeometry=COORDINATE,recoverGeometry=COORDINATE)
trussLength=1.0
beamLength=1.0
cLoad=1 #only refers to scale
#-----------------------------------------------------
# Create a model.
myModel = mdb.Model(name='InteractionTestModel')
#-----------------------------------------------------
from part import *
# Create a sketch for the base feature.
mySketch = myModel.ConstrainedSketch(name='trussSketch',sheetSize=trussLength*2)
# Create the line.
mySketch.Line(point1=(0.0, 0.0), point2=(trussLength, 0.0))
# Create a three-dimensional, deformable part.
myTrussPart = myModel.Part(name='trussPart', dimensionality=THREE_D, type=DEFORMABLE_BODY)
# Create the part's base feature
myTrussPart.BaseWire(sketch=mySketch)
# Create a sketch for the base feature.
mySketch = myModel.ConstrainedSketch(name='beamSketch',sheetSize=beamLength*2)
# Create the line.
mySketch.Line(point1=(trussLength, 0.0), point2=(trussLength+beamLength, 0.0))
# Create a three-dimensional, deformable part.
myBeamPart = myModel.Part(name='beamPart', dimensionality=THREE_D, type=DEFORMABLE_BODY)
# Create the part's base feature
#This method creates a first Feature object by creating a planar wire from the given ConstrainedSketch object.
myBeamPart.BaseWire(sketch=mySketch)
#-----------------------------------------------------
from material import *
# Create a material.
#mySteel = myModel.Material(name='Steel')
myTrussMaterial=myModel.Material(name='trussMaterial')
myModel.materials['trussMaterial'].Elastic(table=((1.0, 0.3), ))
# Create the elastic properties
#elasticProperties = (209.E9, 0.28)
#mySteel.Elastic(table=(elasticProperties, ) )
#-------------------------------------------------------
from section import *
myTrussSection=myModel.TrussSection(name='trussSection', material='trussMaterial',
area=1.0)
#a:bottom;b:height
myModel.RectangularProfile(name='beamProfile', a=12.0, b=1.0)
myBeamSection=myModel.BeamSection(name='beamSection', profile='beamProfile',
poissonRatio=0.28, integration=BEFORE_ANALYSIS,
table=((1.0, 1.0), ), alphaDamping=0.0, beamShape=CONSTANT,
betaDamping=0.0, centroid=(0.0, 0.0), compositeDamping=0.0,
consistentMassMatrix=False, dependencies=0, shearCenter=(0.0, 0.0),
temperatureDependency=OFF, thermalExpansion=OFF)
# Assign the section to the region. The region refers
# to the single cell in this model.
trussRegion=regionToolset.Region(edges=myTrussPart.edges)
myTrussPart.SectionAssignment(region=trussRegion, sectionName='trussSection',
offset=0.0, offsetField='',offsetType=MIDDLE_SURFACE,
thicknessAssignment=FROM_SECTION)
myModel.parts['trussPart'].assignBeamSectionOrientation(method=
N1_COSINES, n1=(0.0, 0.0, 1.0), region=Region(
edges=myTrussPart.edges.findAt(((trussLength/4, 0.0, 0.0),
), ((trussLength/2, 0.0, 0.0), ), )))
#beamRegion = (myBeamPart.cells,)
beamRegion=regionToolset.Region(edges=myBeamPart.edges)
myBeamPart.SectionAssignment(region=beamRegion, sectionName='beamSection',
offset=0.0, offsetField='',offsetType=MIDDLE_SURFACE,
thicknessAssignment=FROM_SECTION)
myModel.parts['beamPart'].assignBeamSectionOrientation(method=
N1_COSINES, n1=(0.0, 0.0, 1.0), region=Region(
edges=myBeamPart.edges.findAt(((trussLength+beamLength/4, 0.0, 0.0),
), ((trussLength+beamLength/2, 0.0, 0.0), ), )))
#-------------------------------------------------------
from assembly import *
# Create a part instance.
myAssembly = myModel.rootAssembly
myAssembly.DatumCsysByDefault(CARTESIAN)
myTrussInstance = myAssembly.Instance(name='trussInstance',
part=myTrussPart, dependent=ON)
myBeamInstance = myAssembly.Instance(name='beamInstance',
part=myBeamPart, dependent=ON)
# MPC constraint
v1 = myAssembly.instances['trussInstance'].vertices
verts1 = v1.findAt(((trussLength, 0.0, 0.0), ))
region1=regionToolset.Region(vertices=verts1)
v1 = myAssembly.instances['beamInstance'].vertices
verts1 = v1.findAt(((trussLength, 0.0, 0.0), ))
region2=regionToolset.Region(vertices=verts1)
myModel.MultipointConstraint(name='Constraint-1',
controlPoint=region1, surface=region2, mpcType=PIN_MPC,
userMode=DOF_MODE_MPC, userType=0, csys=None)
#-------------------------------------------------------
from step import *
# Create a step. The time period of the static step is 1.0,
# and the initial incrementation is 0.1; the step is created
# after the initial step.
myModel.StaticStep(name='structStep', previous='Initial',
nlgeom=OFF, description='Load of the struct.')
#-------------------------------------------------------
from load import *
v=myAssembly.instances['trussInstance'].vertices
verts=v.findAt(((0.0, 0.0, 0.0), ),)
myAssembly.Set(vertices=verts,name='Set-fix1')
region=myAssembly.sets['Set-fix1']
myModel.DisplacementBC(name='BC-1', createStepName='structStep',
region=region, u1=0.0, u2=0.0, u3=0.0, ur1=0.0, ur2=0.0, ur3=UNSET,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM,fieldName='',
localCsys=None)
v=myAssembly.instances['beamInstance'].vertices
verts=v.findAt(((trussLength+beamLength, 0.0, 0.0), ),)
myAssembly.Set(vertices=verts, name='Set-fix2')
region=myAssembly.sets['Set-fix2']
myModel.DisplacementBC(name='BC-2', createStepName='structStep',
region=region, u1=0.0, u2=0.0, u3=0.0, ur1=0.0, ur2=0.0, ur3=0.0,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='',
localCsys=None)
#mdb.models['Model-1'].rootAssembly.Set(name='Set-3', vertices=
# mdb.models['Model-1'].rootAssembly.instances['Part-1-1'].vertices.findAt(((
# 2.0, 0.0, 0.0), )))
v=myAssembly.instances['beamInstance'].vertices
verts=v.findAt((((trussLength+beamLength)/2, 0.0, 0.0), ),)
myAssembly.Set(vertices=verts, name='Set-force')
region=myAssembly.sets['Set-force']
myModel.ConcentratedForce(name='centerLoad', createStepName='structStep',
region=region, cf2=-1.0*cLoad, distributionType=UNIFORM, field='',
localCsys=None)
#-------------------------------------------------------
#from mesh import *
import mesh
# Assign an element type to the part instance.
#region = (myInstance.cells,)
#elemType = mesh.ElemType(elemCode=B31, elemLibrary=STANDARD)
#myAssembly.setElementType(regions=region, elemTypes=(elemType,))
# Seed the part instance.
myTrussPart.seedPart(size=0.2,
deviationFactor=0.1, minSizeFactor=0.1)
#need:
#from abaqus import *
#from abaqusConstants import *
elemType1=mesh.ElemType(elemCode=T3D2)
pR=(myTrussPart.edges,)
myTrussPart.setElementType(regions=pR, elemTypes=(elemType1,))
# Mesh the part instance.
myTrussPart.generateMesh()
myBeamPart.seedPart(size=0.2,
deviationFactor=0.1, minSizeFactor=0.1)
elemType2=mesh.ElemType(elemCode=B32)
pR=(myBeamPart.edges,)
myBeamPart.setElementType(regions=pR, elemTypes=(elemType2,))
# Mesh the part instance.
myBeamPart.generateMesh()
#-------------------------------------------------------
myAssembly.regenerate()
#-------------------------------------------------------
from job import *
# Create an analysis job for the model and submit it.
jobName='InteractionTest'
myJob=mdb.Job(name=jobName, model='InteractionTestModel')
myJob.submit(consistencyChecking=OFF)
# Save by ldn
| [
"eamdfan@126.com"
] | eamdfan@126.com |
7664bdbcd0f519eb8e0cab01b9522c568c1d58af | 0b36f42f9ab35d6171901b9c645c9b18105ac10e | /app_read_developer.py | 35b8b53989e8d01a6044a68f692ff0d6a4c0d2cf | [] | no_license | jhs2jhs/AndroidAppsCollector | aa53f13918772de49dad0f5e63d476ee397ecb7d | 23a58e190f6a0ede5e0dc8eeda38040aedd7b791 | refs/heads/master | 2021-05-26T20:01:20.107506 | 2012-11-19T01:04:47 | 2012-11-19T01:04:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,938 | py | import httplib
import json
from bs4 import BeautifulSoup
import bs4
import urlparse
import urllib
from datetime import datetime
import http
import db_app
import db_sql
import err
import time
import util
from cate_read_google_play import *
import db_developer
def db_init():
db_developer.db_init()
db_app.db_init()
def developer_merge():
rows = db_app.db_get_g(db_sql.sql_developer_merge_get, ())
i_t = len(rows)
print '** start to merge developer test list %d from %s to %s **'%(i_t, db_app.db_path, db_developer.db_path)
i = 0
p = 0
db = db_developer.db
c = db.cursor()
for row in rows:
developer_href = row[0]
developer_website = row[1]
c.execute(db_sql.sql_developer_merge_insert, (developer_href, developer_website, ))
p, i = util.p_percent_copy(p, i, i_t, 1, db)
db.commit()
c.close()
def developer_read_store_main():
finish = True
rows = db_developer.db_get_g(db_sql.sql_developer_read_store_get, ())
i_t = len(rows)
i = 0
for row in rows:
i = i + 1
print '%d of %d'%(i, i_t)
developer_href = row[0]
start_num = row[1]
developer_read_store_loop(developer_href, start_num)
#return
def developer_read_store_loop(developer_href, start_num):
start_num = int(start_num)
flag = True
while flag == True:
flag = developer_read_store(developer_href, start_num)
start_num = start_num + 12
util.sleep()
def developer_read_store(developer_href, start_num):
url = '%s&start=%d&num=12'%(developer_href, start_num)
print '** developer %s **'%(url)
try:
status, body = android_https_get(url)
if status == 404:
print '== 404'
db_developer.db_execute_g(db_sql.sql_developer_store_read_status_update, (developer_href, ))
return False
if status != 200:
raise Exception('app read https connection error: %s'%(str(status)))
soup = BeautifulSoup(body)
developer_read_store_website(developer_href, soup)
developer_read_store_app(developer_href, soup)
db_developer.db_execute_g(db_sql.sql_developer_store_start_num_update, (start_num, developer_href,)) ## record this page has been successfully read
return True
except Exception as e:
err.except_p(e)
return False
def developer_read_store_app(developer_href, soup):
apps_fa = soup.find_all(name='li', attrs={'class':'goog-inline-block'})
for li in apps_fa:
if li.has_key('data-docid'):
app_id = li['data-docid'].strip()
db_developer.db_execute_g(db_sql.sql_developer_app_insert, (developer_href, app_id, ))
print '\t%s'%(app_id)
def developer_read_store_website(developer_href, soup):
website_fa = soup.find_all(name='div', attrs={'class':'developer-website'})
if len(website_fa) == 1:
website_f = website_fa[0]
if website_f.a != None:
if website_f.a.has_key('href'):
developer_website = website_f.a['href'].strip()
db_developer.db_execute_g(db_sql.sql_developer_website_update, (developer_website, developer_href, ))
#print developer_website
############ developer external web site check, it does not need to within google player page
## developer merge
def website_merge():
rows = db_developer.db_get_g(db_sql.sql_developer_website_merge_get, ())
i_t = len(rows)
print '** start to merge developer social %d **'%(i_t)
i = 0
p = 0
db = db_developer.db
c = db.cursor()
for row in rows:
developer_website = row[0]
c.execute(db_sql.sql_developer_website_merge_insert, (developer_website, ))
p, i = util.p_percent_copy(p, i, i_t, 5, db)
db.commit()
c.close()
def website_read_main():
print 'start'
rows = db_developer.db_get_g(db_sql.sql_developer_website_read_get, ())
i_t = len(rows)
i = 0
for row in rows:
i = i + 1
print '%d of %d'%(i, i_t),
developer_website = row[0]
website_qs = urlparse.urlparse(developer_website.strip()).query
website_q = urlparse.parse_qs(website_qs)
if website_q.has_key('q') and len(website_q['q'])>0:
real_href = website_q['q'][0].strip()
db_developer.db_execute_g(db_sql.sql_developer_website_real_href_update, (real_href, developer_website, ))
if len(real_href) < 8:
db_developer.db_execute_g(db_sql.sql_developer_website_read_status_update, (developer_website, ))
continue
print real_href
if 'facebook.com' in real_href:
db_developer.db_execute_g(db_sql.sql_developer_website_facebook_update, (real_href, developer_website, ))
db_developer.db_execute_g(db_sql.sql_developer_website_read_status_update, (developer_website, ))
continue
if 'twitter.com' in real_href:
db_developer.db_execute_g(db_sql.sql_developer_website_twitter_update, (real_href, developer_website, ))
db_developer.db_execute_g(db_sql.sql_developer_website_read_status_update, (developer_website, ))
continue
if 'plus.google.com' in real_href:
db_developer.db_execute_g(db_sql.sql_developer_website_google_plus_update, (real_href, developer_website, ))
db_developer.db_execute_g(db_sql.sql_developer_website_read_status_update, (developer_website, ))
continue
if 'youtube.com' in real_href:
db_developer.db_execute_g(db_sql.sql_developer_website_youtube_update, (real_href, developer_website, ))
db_developer.db_execute_g(db_sql.sql_developer_website_read_status_update, (developer_website, ))
continue
website_read(developer_website, real_href)
#break
'''
import urllib2
import re
import mechanize
br = mechanize.Browser()
#br.set_proxies({"http": "joe:password@myproxy.example.com:3128","ftp": "proxy.example.com",}) # proxy example
#br.set_proxies({'http':''})
br.set_handle_refresh(True)
br.set_handle_robots(False)
#br.set_debug_redirects(True)
#br.set_debug_http(True)
def website_read(developer_website, real_href):
url = real_href
try:
resp = br.open(url)
#print '** redirect:', br.geturl()
body = resp.read()
soup = BeautifulSoup(body)
website_twitter(developer_website, soup)
website_facebook(developer_website, soup)
website_youtube(developer_website, soup)
website_google_plus(developer_website, soup)
db_developer.db_execute_g(db_sql.sql_developer_website_read_status_update, (developer_website, ))
br.clear_history()
except urllib2.URLError as e: #### need to fingure out error handler
err.except_p(e)
except urllib2.HTTPError as e:
err.except_p(e)
'''
# to be used later, but not now
def meta_redirect(content):
soup = BeautifulSoup(content)
result = soup.find('meta', attrs={'http-equiv':'refresh'})
if result:
#print result
wait, text = result['content'].split(';')
text = text.strip()
if text.lower().startswith('url='):
url = text[4:]
return url
return None
import urllib2
import re
def website_read(developer_website, real_href):
url = real_href
try:
opener = urllib2.build_opener()
opener.addHeaders = [('User-agent', 'Mozilla/5.1 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/9.0.1')]
f = opener.open(url, timeout=20)
body = f.read()
'''
print body
refresh_url = meta_redirect(body)
while refresh_url:
f = opener.open(url, timeout=5)
body = f.read()
refresh_url = meta_redirect(body)
print refresh_url
'''
soup = BeautifulSoup(body)
website_twitter(developer_website, soup)
website_facebook(developer_website, soup)
website_youtube(developer_website, soup)
website_google_plus(developer_website, soup)
db_developer.db_execute_g(db_sql.sql_developer_website_read_status_update, (developer_website, ))
except Exception as e:
err.except_p(e)
def website_twitter(developer_website, soup):
hrefs = ''
href_fa = soup.find_all(href=re.compile('twitter.com'))
for href_f in href_fa:
if href_f.has_key('href'):
href = href_f['href']
hrefs = '%s:%s'%(hrefs, href)
print '\t%s'%href
#print hrefs
db_developer.db_execute_g(db_sql.sql_developer_website_twitter_update, (hrefs, developer_website, ))
def website_facebook(developer_website, soup):
hrefs = ''
href_fa = soup.find_all(href=re.compile('facebook.com'))
for href_f in href_fa:
if href_f.has_key('href'):
href = href_f['href']
hrefs = '%s:%s'%(hrefs, href)
print '\t%s'%href
#print hrefs
db_developer.db_execute_g(db_sql.sql_developer_website_facebook_update, (hrefs, developer_website, ))
def website_youtube(developer_website, soup):
hrefs = ''
href_fa = soup.find_all(href=re.compile('youtube.com'))
for href_f in href_fa:
if href_f.has_key('href'):
href = href_f['href']
hrefs = '%s:%s'%(hrefs, href)
print '\t%s'%href
#print hrefs
db_developer.db_execute_g(db_sql.sql_developer_website_youtube_update, (hrefs, developer_website, ))
def website_google_plus(developer_website, soup):
hrefs = ''
href_fa = soup.find_all(href=re.compile('plus.google.com'))
for href_f in href_fa:
if href_f.has_key('href'):
href = href_f['href']
hrefs = '%s:%s'%(hrefs, href)
print '\t%s'%href
#print hrefs
db_developer.db_execute_g(db_sql.sql_developer_website_google_plus_update, (hrefs, developer_website, ))
#### db merge
def db_merge_developer():
rows = db_developer.db_get_g(db_sql.sql_merge_developer_app_get_developer, ())
i_t = len(rows)
print '* merge developer from %s to %s %d *'%(db_developer.db_path, db_app.db_path, i_t)
i = 0
p = 0
db = db_app.db
c = db.cursor()
for row in rows:
developer_href = row[0]
start_num = row[1]
store_read_status = row[2]
developer_website = row[3]
scrape_create_date = row[4]
scrape_update_date = row[5]
c.execute(db_sql.sql_merge_developer_app_insert_developer, (developer_href, start_num, store_read_status, developer_website, scrape_create_date, scrape_update_date, ))
p, i = util.p_percent_copy(p, i, i_t, 1, db)
db.commit()
c.close()
def db_merge_developer_app():
rows = db_developer.db_get_g(db_sql.sql_merge_developer_app_get_developer_app, ())
i_t = len(rows)
print '* merge developer_app from %s to %s %d *'%(db_developer.db_path, db_app.db_path, i_t)
i = 0
p = 0
db = db_app.db
c = db.cursor()
for row in rows:
developer_href = row[0]
app_id = row[1]
c.execute(db_sql.sql_merge_developer_app_insert_developer_app, (developer_href, app_id ))
c.execute(db_sql.sql_app_insert, (app_id, ))
p, i = util.p_percent_copy(p, i, i_t, 1, db)
db.commit()
c.close()
def db_merge_developer_social():
rows = db_developer.db_get_g(db_sql.sql_merge_developer_app_get_developer_social, ())
i_t = len(rows)
print '* merge developer_social from %s to %s %d *'%(db_developer.db_path, db_app.db_path, i_t)
i = 0
p = 0
db = db_app.db
c = db.cursor()
for row in rows:
developer_website = row[0]
real_href = row[1]
twitter_href = row[2]
facebook_href = row[3]
google_plus_href = row[4]
youtube_href = row[5]
website_read_status = row[6]
scrape_create_date = row[3]
scrape_update_date = row[4]
c.execute(db_sql.sql_merge_developer_app_insert_developer_social, (developer_website, real_href, twitter_href, facebook_href, google_plus_href, youtube_href, website_read_status, scrape_create_date, scrape_update_date, ))
p, i = util.p_percent_copy(p, i, i_t, 1, db)
db.commit()
c.close()
def from_developer_to_app_developer():
db_init()
developer_merge()
#developer_read_store_main()
def from_app_to_developer_developer():
db_init()
db_merge_developer()
db_merge_developer_app()
def from_developer_to_app_website():
db_init()
#website_merge()
website_read_main()
def from_app_to_developer_website():
db_init()
db_merge_developer_social()
if __name__ == '__main__':
#from_developer_to_app_developer()
#from_app_to_developer_developer()
#
from_developer_to_app_website()
#from_app_to_developer_website()
| [
"jianhua.shao1986@gmail.com"
] | jianhua.shao1986@gmail.com |
9922859b218cc58faa49f6c1dd6a94382365aa2c | d82dbec429b293df4a48a1dad96be36aa4539149 | /model/model.py | 785420b04c0de71808f32b38659632574a481440 | [] | no_license | kimyeondu/image-classification-level1-23 | 6dcb309dd6601c4ad3473bfab058e29cdce58f5d | 8eb3a4dcb14f3952b202b986ba85d3ccb73539e0 | refs/heads/main | 2023-08-28T16:29:11.548701 | 2021-10-20T16:03:51 | 2021-10-20T16:03:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,298 | py | import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import timm
from timm.models.vision_transformer import VisionTransformer, _cfg
from base import BaseModel
class BaseModule(nn.Module):
def __init__(self, num_classes=18):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.25)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, num_classes)
self.init_param()
def init_param(self): #
for m in self.modules():
if isinstance(m,nn.Conv2d): # init conv
nn.init.kaiming_normal_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m,nn.BatchNorm2d): # init BN
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
elif isinstance(m,nn.Linear): # lnit dense
nn.init.kaiming_normal_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = self.conv3(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout2(x)
x = self.avgpool(x)
x = x.view(-1, 128)
return self.fc(x)
def conv_batch(in_num, out_num, kernel_size=3, padding=1, stride=1):
return nn.Sequential(
nn.Conv2d(in_num, out_num, kernel_size=kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(out_num),
nn.LeakyReLU())
# Residual block
class DarkResidualBlock(nn.Module):
def __init__(self, in_channels):
super(DarkResidualBlock, self).__init__()
reduced_channels = int(in_channels/2)
self.layer1 = conv_batch(in_channels, reduced_channels, kernel_size=1, padding=0)
self.layer2 = conv_batch(reduced_channels, in_channels)
def forward(self, x):
residual = x
out = self.layer1(x)
out = self.layer2(out)
out += residual
return out
class Darknet53(nn.Module):
def __init__(self, num_classes):
super(Darknet53, self).__init__()
self.block = DarkResidualBlock
self.num_classes = num_classes
self.conv1 = conv_batch(3, 32)
self.conv2 = conv_batch(32, 64, stride=2)
self.residual_block1 = self.make_layer(self.block, in_channels=64, num_blocks=1)
self.conv3 = conv_batch(64, 128, stride=2)
self.residual_block2 = self.make_layer(self.block, in_channels=128, num_blocks=2)
self.conv4 = conv_batch(128, 256, stride=2)
self.residual_block3 = self.make_layer(self.block, in_channels=256, num_blocks=8)
self.conv5 = conv_batch(256, 512, stride=2)
self.residual_block4 = self.make_layer(self.block, in_channels=512, num_blocks=8)
self.conv6 = conv_batch(512, 1024, stride=2)
self.residual_block5 = self.make_layer(self.block, in_channels=1024, num_blocks=4)
self.global_avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(1024, self.num_classes)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.residual_block1(out)
out = self.conv3(out)
out = self.residual_block2(out)
out = self.conv4(out)
out = self.residual_block3(out)
out = self.conv5(out)
out = self.residual_block4(out)
out = self.conv6(out)
out = self.residual_block5(out)
out = self.global_avg_pool(out)
out = out.view(-1, 1024)
out = self.fc(out)
return out
def make_layer(self, block, in_channels, num_blocks):
layers = []
for i in range(0, num_blocks):
layers.append(block(in_channels))
return nn.Sequential(*layers)
class ReadTimmModule(BaseModel): #
def __init__(self, model_arch, num_classes, pretrained=True):
super().__init__()
self.model = timm.create_model(model_name=model_arch, num_classes=num_classes, pretrained=pretrained)
def forward(self, x):
x= self.model(x)
return x
class ReadTorchvisionModule(BaseModel): #
def __init__(self, model_arch, num_classes, pretrained=True, classifier = None):
super().__init__()
self.model = eval(f"models.{model_arch}(pretrained={pretrained})")
self.model.fc = eval(classifier)
def forward(self, x):
x= self.model(x)
return x
class CustomModel(nn.Module):
def __init__(self):
super(CustomModel, self).__init__()
self.model = timm.create_model('tf_efficientnet_b4', pretrained=True)
self.model.classifier = nn.Linear(1792, 1024)
self.fc1 = nn.Linear(1024, 3)
self.fc2 = nn.Linear(1024, 2)
self.fc3 = nn.Linear(1024, 3)
def forward(self, x):
fc_output = self.model(x)
mask = self.fc1(fc_output)
gender = self.fc2(fc_output)
age = self.fc3(fc_output)
return mask, gender, age
| [
"onefence1994@gmail.com"
] | onefence1994@gmail.com |
8e2d46cd15a261f4d03799358dd4012e984ea338 | 8b2ffe44a6a6867a469c2b705f077017a6350f6a | /shopping_basket/shopping_basket/PercentagePriceAdjuster.py | dd788937062fa2ef3d60af2807c739042142a803 | [] | no_license | andyhasit/june_workshop_solutions | bd0b32a582f2e16c9dcaf6a75011c696b42036c2 | d7145753fb8a16fbedb74c9378ac2e4872c23f3f | refs/heads/master | 2021-01-20T19:52:46.010287 | 2016-06-17T09:47:35 | 2016-06-17T09:47:35 | 60,838,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | """
PercentagePriceAdjuster applies a percentage discount above a certain value
"""
class PercentagePriceAdjuster(object):
"""
Applies a percentage discount above a certain value.
"""
def __init__(self, percentage_discount, min_value=0):
self._percentage_discount = percentage_discount
self._min_value = min_value
def get_adjusted_price(self, previous_total, items):
"""
Returns new total if previous is above minimum.
"""
if previous_total > self._min_value:
total = float(previous_total)
return total - total * self._percentage_discount/100
return previous_total
| [
"andyhasit@gmail.com"
] | andyhasit@gmail.com |
62a27b7c1408d5fea91f586d9a9593286ce5a880 | 49353bbb7afc5e7eae6b8b6ae4cb31e092a3f761 | /constants.py | 6a29f2dd53d6c1e7e8c165a644f997fa755baa6b | [] | no_license | magnus-ISU/RT-bot | 1335abdda42e0ce0f47438809222c1ee3fae5cfc | f462818c204054bc3e2d741cfb27eb106379c9e5 | refs/heads/master | 2023-08-12T06:21:18.517345 | 2021-09-26T06:27:45 | 2021-09-26T06:27:45 | 410,469,817 | 0 | 0 | null | 2021-09-26T06:28:35 | 2021-09-26T06:28:35 | null | UTF-8 | Python | false | false | 596 | py | CLASS_SPECIFIERS = ["SE", "COMS", "CPRE", "SD", "STAT", "EE"]
MAJORS = {
"SE": 746125179842854922,
"SYSE": 746125180702818486,
"COMSCI": 746131438977417237,
"COMPE": 746125166467088495,
"CYBERSECE": 746125169264689184,
"EE": 746125171206651964,
"INFOASSURE": 746125174947971163,
"DATASCI": 803318943049711687
}
OTHERS = {
"GAMERS": 761764627377029120,
"MOVIEWATCHER": 750880017390764062,
"PLUGGEDIN": 750913471259869254,
"ALUMNUS": 745267379897892914,
"HELPER": 745404656586326116
}
ADD = "add"
REMOVE = "remove"
COMMANDS = [ADD, REMOVE, "rm"]
| [
"zeldatwili1@gmail.com"
] | zeldatwili1@gmail.com |
d21ab71fd92fd043000de53f2cc733961ddbb79f | 2e2843ead0186fca5f124743395cf1f65f8e579f | /father/urls.py | 5cec4ad8f2d3b6e146074d7c02489928bf232b00 | [] | no_license | django-spain/django-father-rest-framework | 5ce003569d3bdc3d7873de791c25e4a98b6ae57c | 0dd1cd61dd1a624f0baa27320fc99388ca7ca620 | refs/heads/master | 2022-02-21T23:02:23.972257 | 2019-08-27T00:45:11 | 2019-08-27T00:45:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | """father URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1.0/', include('book.urls')),
]
| [
"falconsoft.3d@gmail.com"
] | falconsoft.3d@gmail.com |
14ad54db5c256cc3b75e342a59c96ef948a299bc | 0a5d6c06f58dd95de3a50e45c504d3d9b454163d | /djantaxishare/settings.py | 406fab047b689364b09cc8b358aa39ea0fac1a51 | [] | no_license | abeinstein/taxishare | 4c8f47a8538397a68a5579922c2b8acaeb1fca4f | 5497eb1a2f64ea300a7ca4db12aa10c7635dd073 | refs/heads/master | 2021-01-22T09:05:29.264788 | 2012-07-17T22:08:54 | 2012-07-17T22:08:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,595 | py | # Django settings for djantaxishare project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '/Users/abeinstein/Documents/Hacking/djantaxishare/db/taxishare.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/Users/abeinstein/Documents/Hacking/media/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/Users/abeinstein/Documents/Hacking/media"
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'o7)-^9vgnw6c(51&6j__8!%bg=78-h*#qf($e)6^mz*gx2vyz1'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'djantaxishare.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'djantaxishare.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/Users/abeinstein/Documents/Hacking/djantaxishare/templates'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'taxis',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"abeinstein@uchicago.edu"
] | abeinstein@uchicago.edu |
b004160023402e2925034e06e7c40a061e706784 | 772db1333476b3c1c166e78ccfbfaeb36930d4d4 | /webilex/settings.py | 29f2eb9131d8c43fcbafe979c3b6eb9790262232 | [] | no_license | developerpath/kb | b9677875af357ecd6bd068356a2223f95b700dbc | d5fa8bcba25001dddee921ab35c203ad9acf989b | refs/heads/master | 2020-07-23T21:18:14.792791 | 2019-09-11T04:16:27 | 2019-09-11T04:16:27 | 207,708,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,441 | py | import os
from django.urls import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lwx12+#5&2cjczpbxpy)*-=4**jquwe+6im$pokr(svpg(keop'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
LOGOUT_REDIRECT_URL = reverse_lazy('login')
LOGIN_REDIRECT_URL = reverse_lazy('space_view')
# Application definition
INSTALLED_APPS = [
'knowledgebase',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webilex.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webilex.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
#TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
#Static files
STATIC_URL = '/static/'
| [
"graphs@mail.ru"
] | graphs@mail.ru |
dca1e5277177f0e2be3521b793ee9a7eb0f23e2a | bc96f513e45ac3d03c14b4db7e259d709fb115d3 | /ScanSeed.py | 7edb71fa95203d3c322d95401274aa502b2de8fd | [] | no_license | major-lab/pySeqTools | 075fb5de14dd6b7a3022d996b226afd549baa761 | 8231976c2d9524ed8f7b12df02a5bf5d0b92bd52 | refs/heads/master | 2020-05-07T10:16:23.091465 | 2014-07-24T19:57:54 | 2014-07-24T19:57:54 | 21,914,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | from script.seqTools import *
from script.ppPrint import *
__author__ = 'nath'
def scan(mRNA,miR):
"""
scan a sequence of mRNA 5' -> 3' looking for a MRE. The seed is defined by the nt 2->7 of the miRNA
return a dictionary
position (int): type of seed (str)
"""
seed = miR.seq[1:7]
seed_rc = revCompl(seed)
listHits = re.finditer(seed_rc,mRNA.seq)
A1 = False
m8 = False
res = {}
for m in listHits:
if mRNA.seq[m.end()] == 'A':
A1 = True
if mRNA.seq[m.start()-1] == revCompl(miR.seq[7]):
m8=True
if A1 and m8:
seedtype = "8mer"
res[m.start()-1] = seedtype
#in the case of 8mer, position on m8
elif m8:
seedtype = "7mer-m8"
res[m.start()-1] = seedtype
#in the case of 7mer-m8, position on m8
elif A1:
seedtype = "7mer-A1"
res[m.start()] = seedtype
#in the case of 7mer-A1, position on m7
else:
seedtype = "6mer"
res[m.start()] = seedtype
return res
def getFPScan(seedtype):
res = [0,0,0,0]
if not seedtype:
return res
else:
res[0]=1
if seedtype == "7mer-A1":
res[1] = 1
elif seedtype == "7mer-m8":
res[2] = 1
elif seedtype == "8mer":
res = [1,1,1,1]
return res
def sumFPScan(fp,newFp):
res = fp.copy()
for i in range(0,len(newFp)):
res[i] += newFp[i]
return res
if __name__ == '__main__':
fc = fastaContainer("../seq_p21.fa")
mRNA = fc.findSeq("SM_000001")
fc = fastaContainer("../data/miR_wu.fa")
mir = fc.findSeq("MIMAT0000063","MIMAT")
print (mir.seq)
res = scan(mRNA,mir)
fp = [0,0,0,0]
for pos in res.keys():
doPPrint(mRNA.seq,mir.seq,pos,offsetmRNA=30)
newFp = getFPScan(res.get(pos))
fp = sumFPScan(fp,newFp)
print (fp)
| [
"naweill@gmail.com"
] | naweill@gmail.com |
1f91b53ab596c2f95e54959103b1003ab3398b37 | 79c9a9d52b7901210fe526981454641a50f17077 | /bbs/forum/urls.py | a3bd5b6cd17296426bbc1c4ff43377a992d683ec | [] | no_license | yfjelley/ptt_jf | 6c2f9af237945f2807d66f958a59139246c76118 | aa1bf56c5e42b533f4ed976054242082cba1f8c0 | refs/heads/master | 2023-04-02T01:18:22.922472 | 2023-03-29T11:24:30 | 2023-03-29T11:24:30 | 40,532,105 | 1 | 0 | null | 2022-12-26T19:50:06 | 2015-08-11T09:02:11 | HTML | UTF-8 | Python | false | false | 1,595 | py | from django.conf.urls import patterns, include, url
api_urlpatterns = patterns('bbs.forum.api',
url(r'^topic/(?P<topic_id>\d+)/$', 'topic_api', name='topic_api'),
url(r'^topics/$', 'topics_api', name='topics_api'),
url(r'^post/(?P<post_id>\d+)/$', 'post_api', name='post_api'),
url(r'^/simditor-upload/$', 'simditor_upload', name='simditor_upload'),
)
urlpatterns = patterns('bbs.forum.views', url(r'^$', 'index', name='bbsindex'),
url(r'^topic/(?P<topic_id>\d+)/$', 'topic_view', name='topic_view'),
url(r'^topic/(?P<topic_id>\d+)/reply/$', 'create_reply', name='create_reply'),
url(r'^topic/(?P<topic_id>\d+)/append/$', 'add_appendix', name='add_appendix'),
url(r'^topic/(?P<topic_id>\d+)/delete/$', 'del_topic', name='delete_topic'),
url(r'^topic/(?P<topic_id>\d+)/edit/$', 'edit_topic', name='edit_topic'),
url(r'^post/(?P<post_id>\d+)/delete/$', 'del_reply', name='delete_post'),
url(r'^node/$', 'node_all', name='node_all'),
url(r'^topic1/(?P<topic_id>\d+)/$', 'collect_topic', name='collect_topic'),
url(r'^node/(?P<node_id>\d+)/$', 'node_view', name='node_view'),
#url(r'^node-list/(?P<theme_id>\d+)/$', 'node_list', name='node_list'),
url(r'^node/(?P<node_id>\d+)/create/$', 'create_topic', name='create_topic'),
url(r'collected-view/$', 'collected_view', name='collected_view'),
url(r'^search/(?P<keyword>.*?)/$', 'search', name='search'),
url(r'^recent/$', 'recent', name='recent'),
url(r'^previewer/$', 'previewer', name='previewer'),
url(r'getverifycode/$', 'verifycode', name='verifycode'),
) | [
"root@iZ28vg01g91Z"
] | root@iZ28vg01g91Z |
ced152ee74e1836bdeb08bcfe32e146b988939d7 | 556db265723b0cc30ad2917442ed6dad92fd9044 | /tensorflow/python/training/experimental/mixed_precision_global_state.py | 6f0a179db65b1ebb31c2cbc1265eaf71b2a09fd6 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | graphcore/tensorflow | c1669b489be0e045b3ec856b311b3139858de196 | 085b20a4b6287eff8c0b792425d52422ab8cbab3 | refs/heads/r2.6/sdk-release-3.2 | 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 | Apache-2.0 | 2023-03-25T01:13:37 | 2018-12-21T13:30:38 | C++ | UTF-8 | Python | false | false | 2,635 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains global variables related to mixed precision.
This is not part of mixed_precision.py to avoid a circular dependency.
mixed_precision.py depends on Session, and Session depends on this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import tf_export
# Whether the mixed precision graph rewrite has been enabled or not with
# `enable_mixed_precision_graph_rewrite`. Used to turn on auto_mixed_precision
# in ConfigProtos passed to Sessions.
_mixed_precision_graph_rewrite_is_enabled = False
# True if a Session has been created without the mixed precision graph rewrite
# being enabled. Used to give a warning if mixed precision is enabled after a
# Session has already been created.
_non_mixed_precision_session_created = False
# Whether the global tf.keras.mixed_precision.Policy uses mixed precision. Used
# to raise an error message if both a mixed Policy and the graph rewrite are
# used at the same time.
_using_mixed_precision_policy = False
@tf_export('__internal__.train.is_mixed_precision_graph_rewrite_enabled', v1=[])
def is_mixed_precision_graph_rewrite_enabled():
return _mixed_precision_graph_rewrite_is_enabled
def set_mixed_precision_graph_rewrite_enabled(enabled):
global _mixed_precision_graph_rewrite_is_enabled
_mixed_precision_graph_rewrite_is_enabled = enabled
def non_mixed_precision_session_created():
return _non_mixed_precision_session_created
def set_non_mixed_precision_session_created(created):
global _non_mixed_precision_session_created
_non_mixed_precision_session_created = created
def is_using_mixed_precision_policy():
return _using_mixed_precision_policy
@tf_export('__internal__.train.set_using_mixed_precision_policy', v1=[])
def set_using_mixed_precision_policy(is_using):
global _using_mixed_precision_policy
_using_mixed_precision_policy = is_using
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
aaea5254ff7c5baa2b88167bc723ed9bfa96693a | cb8b4e8e89ff20994d37c99e0723b167be281369 | /prime.py | d111bbe4c6189a72867b0ac67cb87d6eec66025a | [] | no_license | astronaftis/playground-comp-phys-theory | 6bb110bea43e410802006be0d2a251de217c0b81 | 389b5fd3b7b240c17b4d9167e3c6e5715b7efbc5 | refs/heads/master | 2021-06-06T08:23:10.648120 | 2015-12-18T23:37:49 | 2015-12-18T23:37:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | '''
HW03
Ritesh Pakala
Partner: Ian Cone
This function finds all prime numbers between 2 given integers.
Given integers are entered via command line, and then all prime
numbers in between are printed
'''
import numpy as np
def primer(a, b):
global count;
for i in range(a, b):
count = 0
for j in range(1, i+1):
if(i%j==0):
count+=1
if(count<=2 & i != 1):
print i
primer(1, 101) | [
"Clarke@Clarkes-MacBook-Pro.local"
] | Clarke@Clarkes-MacBook-Pro.local |
612701ff796f856d1f580a50885f2372bb69e094 | a9f2d572a04c727c8339ce3bc15fa812fa992661 | /EU_GENERAL/derive_norm_parms_dsamp500.py | 7c38c116cdc810feefda14d9de336c71d8a9169d | [] | no_license | dmgroppe/SZR_ANT | adca8eeb4c3f8bad8bacf35ad56b3fbc2d5e19a7 | 1ebf8e07b7f713934f29b72a7d68fb06a0c1846d | refs/heads/master | 2020-04-04T22:50:23.664377 | 2018-10-04T14:42:06 | 2018-10-04T14:42:06 | 81,873,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,625 | py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import ieeg_funcs as ief
# Get file list & figure out max and min feature values
in_fnames=list() # List of kdownsampled*.npz files (1 per sub)
sub_list=list() # Corresponding list of subs
# find all subsamp files
path_dict=ief.get_path_dict()
in_dir=os.path.join(path_dict['szr_ant_root'],'EU_GENERAL','KDOWNSAMP')
#in_dir='/Users/davidgroppe/PycharmProjects/SZR_ANT/EU_GENERAL/KDOWNSAMP/'
f_ct=0
for f in os.listdir(in_dir):
if f.startswith('kdownsampled_'):
in_fnames.append(f)
tmp=f.split('_')[1]
sub_list.append(int(tmp.split('.')[0]))
mat=np.load(os.path.join(in_dir,f))
if f_ct==0:
min_ftrs=np.min(mat['ftrs_dsamp'],axis=0) # 30 dim vector
max_ftrs=np.max(mat['ftrs_dsamp'],axis=0) # 30 dim vector
else:
min_ftrs=np.minimum(min_ftrs,np.min(mat['ftrs_dsamp'],axis=0)) # 30 dim vector
max_ftrs=np.maximum(max_ftrs,np.max(mat['ftrs_dsamp'],axis=0)) # 30 dim vector
f_ct+=1
n_band=6
band_labels=['Delta','Theta','Alpha','Beta','Gamma','HGamma']
n_edm=5
n_ftr=n_band*n_edm
n_file=len(in_fnames)
print('%d files total' % n_file)
bin_edge_list=list()
n_bin_list=list()
n_targ_bin=500 #?? make bigger
ictal_p_list=list()
nonictal_p_list=list()
label_list=list()
bin_cntr_list=list()
acc_list=list()
for a in range(n_ftr):
bin_cntr_list.append([])
# Create the histogram edges. Edges may differ across feature but are the same across subs
for edm_ct in range(n_edm):
for b_ct in range(n_band):
bin_edge_list.append(np.linspace(min_ftrs[b_ct+edm_ct*n_band],max_ftrs[b_ct+edm_ct*n_band],n_targ_bin))
#bin_edge_list.append(np.logspace(min_ftrs[b_ct+edm_ct*n_band],max_ftrs[b_ct+edm_ct*n_band],n_targ_bin))
n_bin_list.append(len(bin_edge_list[-1]))
label_list.append(band_labels[b_ct]+'-EDM'+str(edm_ct))
acc_list.append(np.zeros((n_file,n_bin_list[-1])))
# Loop over features and find best threshold for class discrimination
print('Computing accuracy for each feature using a range of thresholds...')
n_wind=np.zeros(n_ftr)
for f_ct, f in enumerate(in_fnames):
print('Working on ftr %d/%d' % (f_ct+1,n_ftr))
mat=np.load(os.path.join(in_dir,f))
n_wind[f_ct]=mat['ftrs_dsamp'].shape[0]
# Loop over features
for edm_ct in range(n_edm):
#for edm_ct in range(1):
raw_ftrs=mat['ftrs_dsamp'].T
for b_ct in range(n_band):
sens=np.zeros(len(bin_edge_list[b_ct+edm_ct*n_band]))
spec=np.zeros(len(bin_edge_list[b_ct+edm_ct*n_band]))
for thresh_ct, thresh in enumerate(bin_edge_list[b_ct+edm_ct*n_band]):
y_hat=(raw_ftrs[b_ct+edm_ct*n_band,:]>=thresh)
sens[thresh_ct]=np.mean(y_hat[mat['szr_class_dsamp']==1])
spec[thresh_ct]=np.mean(y_hat[mat['szr_class_dsamp']==0]==0)
acc=np.abs(-.5+(sens+spec)/2)
acc_list[b_ct+edm_ct*n_band][f_ct,:]=acc
print('Done!')
# Import normalization factors
#in_fname='/Users/davidgroppe/PycharmProjects/SZR_ANT/EU_GENERAL/KDOWNSAMP/ftr_limits.csv'
in_fname=os.path.join(path_dict['szr_ant_root'],'EU_GENERAL','KDOWNSAMP','ftr_limits.csv')
print('Loading normalization parameters from %s' % in_fname)
ftr_lims=pd.read_csv(in_fname,sep=',')
#ftr_lims.head()
# Loop over each feature and figure out what translation and division factor should be
cntr=np.zeros(n_ftr)
div_fact=np.zeros(n_ftr)
plt.figure(10)
plt.clf()
for f_ct in range(n_ftr):
low_bnd=ftr_lims.iloc[f_ct,2]
up_bnd=ftr_lims.iloc[f_ct,1]
# Re-center bins
ftr_vals=bin_edge_list[f_ct]
ftr_vals[ftr_vals>up_bnd]=up_bnd
ftr_vals[ftr_vals<low_bnd]=low_bnd
rng=up_bnd-low_bnd;
div_fact[f_ct]=rng/(2*3.99)
ftr_vals=ftr_vals/div_fact[f_ct]
cntr[f_ct]=-3.99-ftr_vals[0] # add this to feature value to make min possible value=-3.99
ftr_vals=ftr_vals+cntr[f_ct]
plt.plot(ftr_vals,np.mean(acc_list[f_ct],axis=0))
plt.xlabel('Single Feature Threshold')
plt.ylabel('Accuracy')
plt.title('Mean Normalized Feature Accuracy')
plt.show()
# print(ftr_vals[0])
# print(ftr_vals[-1])
# Save normalization parameters to disk
#out_fname='/Users/davidgroppe/PycharmProjects/SZR_ANT/EU_GENERAL/KDOWNSAMP/norm_factors.npz'
out_fname=os.path.join(path_dict['szr_ant_root'],'EU_GENERAL','KDOWNSAMP','norm_factors.npz')
print('Saving normalization parameters to %s' % out_fname)
np.savez(out_fname,cntr=cntr,div_fact=div_fact,ftr_labels=label_list,in_fnames=in_fnames)
| [
"dgroppe@cogsci.ucsd.edu"
] | dgroppe@cogsci.ucsd.edu |
d13fb66958a5b18bff4f2185782788b69da08b6b | 2b9d26c31bb02c2c07aef31cdffb9d52ade7bd3a | /semestr7/subd/migrations/0002_auto_20161116_0833.py | 91c8723a329b73d31b06d6f9246de3d095bbc237 | [
"MIT"
] | permissive | FongeBeats/omgtu | 2471bc8b6a7a492085e87e4952505a2f2b4c822c | 0cad61a2ce6a7c9bb5c9cac72c16eccf33bb4b77 | refs/heads/master | 2023-03-16T08:30:59.926578 | 2017-10-04T06:49:36 | 2017-10-04T06:49:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-16 08:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('subd', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='client',
name='name',
),
migrations.AddField(
model_name='client',
name='fio',
field=models.CharField(blank=True, max_length=200, verbose_name=b'\xd0\xa4\xd0\x98\xd0\x9e'),
),
]
| [
"rfedorov@linkentools.com"
] | rfedorov@linkentools.com |
608c6950e3ea01cb315800f549a5a2fe49a6bc95 | bb9626ae673326043990b7a28fe5c8f6f4b3afbd | /Assignment2/seq_prediction.py | 6753948c5863adeccdfacef351eabe911fd38fec | [
"MIT"
] | permissive | alexzhwqc/deep_learning_practice_part | 932d81981c943b1229bb6b648da9f4713a316ad0 | abde1097f1b1904a62ba60addbfc3080b32f9510 | refs/heads/master | 2020-05-01T05:50:03.609041 | 2019-03-24T01:39:51 | 2019-03-24T01:39:51 | 177,313,435 | 0 | 0 | null | 2019-03-23T16:33:54 | 2019-03-23T16:33:54 | null | UTF-8 | Python | false | false | 3,276 | py | from models import GRU
import torch
import collections
import os
from models import RNN
"""
Notation of this program:
(1) Function: predict a sequence by using the function generate();
(2) initialize variables: PATH, emb_size, hidden_size, seq_len, batch_size,
vocab_size, num_layers, dp_keep_prob, model_type, train_path
(3) train_path represent the path of data file "data\\ptb.train.txt"
(4) PATH is the path of data file of model_state_dict "GRU_SGD_LR_SCHEDULE_0\\best_params.pt"
"""
PATH = os.path.join("GRU_SGD_LR_SCHEDULE_0", "best_params.pt")
# Change path if use RNN to load trained parameters
# Check with parameters used in problem 4.1 to be sure they are the same
emb_size = 200
hidden_size = 1500
seq_len = 35
batch_size = 20
vocab_size = 10000
num_layers = 2
dp_keep_prob = 0.35
model_type = 'RNN'
#load model from file 'GRU_SGD_LR_SCHEDULE_0\\best_params.pt'
def _load_model(emb_size, hidden_size, seq_len, batch_size, vocab_size, num_layers, dp_keep_prob, PATH, model_type):
# Load model (Change to RNN if you want RNN to predict)
if model_type=='RNN':
model = RNN(emb_size, hidden_size, seq_len, batch_size, vocab_size, num_layers, dp_keep_prob)
else:
model = GRU(emb_size, hidden_size, seq_len, batch_size, vocab_size, num_layers, dp_keep_prob)
if torch.cuda.is_available():
model.load_state_dict(torch.load(PATH)).cuda()
model.eval()
else:
model.load_state_dict(torch.load(PATH, map_location='cpu'))
model.eval()
return model
def _read_words(filename):
with open(filename, "r") as f:
return f.read().replace("\n", "<eos>").split()
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
id_to_word = dict((v, k) for k, v in word_to_id.items())
return word_to_id, id_to_word
#assign the variables with values, data is in the file of data//ptb.train.txt.
def _prepare():
prefix = "ptb"
data_path = "data"
train_path = os.path.join(data_path, prefix + ".train.txt")
word_to_id, id_2_word = _build_vocab(train_path)
hidden = torch.Tensor(num_layers, batch_size, hidden_size)
hidden = torch.nn.init.zeros_(hidden)
generated_seq_len = seq_len # the generated length should be equal to seq_len and seq_len*2 (see requirements in 5.3)
word_id = torch.LongTensor(1, batch_size).random_(0, vocab_size) # Select the first word randomly
return id_2_word, word_to_id, generated_seq_len, hidden, word_id
#call function to load model and prepare the variables
model = _load_model(emb_size, hidden_size, seq_len, batch_size, vocab_size, num_layers, dp_keep_prob, PATH, model_type)
id_2_word, word_to_id, generated_seq_len, hidden, word_id=_prepare()
#call generate Function
samples = model.generate(word_id, hidden, generated_seq_len)
#show the result
print(id_2_word[word_id[0][0].item()])
#for i in range(generated_seq_len):
for i in range(batch_size):
print(id_2_word[samples[1][i].item()])
| [
"noreply@github.com"
] | alexzhwqc.noreply@github.com |
919212fcb94d69eca28a1659068ff07e436cbabe | a8165e09e4579d88ad8ed7b36232b45aeb0d0794 | /algorithms_methods/w1/2.3.5.py | d2f3681b2e637d1a1946e522dea8232440b5d8f1 | [] | no_license | 80aX/stepik | 280e9d8f90cfe286166067eed5ef422dd80dc8b0 | 7b3d9562f770cb60194c5845b8ce38e694d0f931 | refs/heads/master | 2021-09-09T16:00:43.998642 | 2018-03-17T16:45:25 | 2018-03-17T16:45:25 | 109,884,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | # Задача на программирование: наибольший общий делитель
# По данным двум числам 1≤a,b≤2⋅10^9 найдите их наибольший общий делитель.
# Sample Input 1:
# 18 35
# Sample Output 1:
# 1
# Sample Input 2:
# 14159572 63967072
# Sample Output 2:
# 4
def evklid_nod(a, b):
# вычисляем наибольший общий делитель a, b по алгоритму Евклида
if a == 0 or b == 0:
return a + b
elif a >= b:
return evklid_nod(a % b, b)
elif b >= a:
return evklid_nod(a, b % a)
def main():
a, b = map(int, input().split())
print(evklid_nod(a, b))
if __name__ == "__main__":
main()
| [
"avkids@yandex.ru"
] | avkids@yandex.ru |
b86ec928c6773bf06612918d4529cd6c0064d111 | fc4ed088c732116aca3065973ba5f29f13ff672e | /users/serializers.py | f0484d6a268557525fe9fcb0bec6dc1705d6a288 | [] | no_license | thagafi/ams-be | 17cbf625a35e6e7e4ef9ea5d9efb1faa90a66c67 | 7ac8b11722a7e248c0ee2ce1b1fd415cc62fb1d5 | refs/heads/main | 2023-03-18T21:04:45.181167 | 2021-03-13T22:49:22 | 2021-03-13T22:49:22 | 347,460,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py | from rest_framework import serializers
from .models import User, Role
from django.contrib.auth.models import Permission
class PermissionSerializer(serializers.ModelSerializer):
class Meta:
model = Permission
fields = '__all__'
class PermissionRelatedField(serializers.StringRelatedField):
def to_representation(self, value):
return PermissionSerializer(value).data
def to_internal_value(self, data):
return data
class RoleSerializer(serializers.ModelSerializer):
permissions = PermissionRelatedField(many=True)
class Meta:
model = Role
fields = '__all__'
def create(self, validated_data):
permissions = validated_data.pop('permissions', None)
instance = self.Meta.model(**validated_data)
instance.save()
instance.permissions.add(*permissions)
instance.save()
return instance
class RoleRelatedField(serializers.RelatedField):
def to_representation(self, instance):
return RoleSerializer(instance).data
def to_internal_value(self, data):
return self.queryset.get(pk=data)
class UserSerializer(serializers.ModelSerializer):
role = RoleRelatedField(many=False, queryset=Role.objects.all())
class Meta:
model = User
fields = ['id', 'first_name', 'second_name', 'third_name', 'last_name', 'username', 'password', 'rank', 'role']
extra_kwargs = {
'password': {'write_only': True}
}
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
# def update(self, instance, validated_data):
# password = validated_data.pop('password', None)
# if password is not None:
# instance.set_password(password)
# instance.save()
# return instance | [
"thagafi1@icloud.com"
] | thagafi1@icloud.com |
3e22413ed013adf061d58432d6029792a3b64c2d | 127d0f1251e543ba271f532471e5153f1ce5597c | /sprint7/schedule.py | c1bab544401f07970c74ecfcd06b491baab76864 | [] | no_license | BugChef/yandex_alghoritms | 0e608d2df899a547cf03273cc4d2f95ca424d3e3 | afe41cbb10878dc1dcfddd1d37ce8d4590f47f68 | refs/heads/main | 2023-04-01T23:09:42.688374 | 2021-04-16T20:03:38 | 2021-04-16T20:03:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | n = int(input())
times = []
for _ in range(n):
time1, time2 = [float(x) for x in input().split()]
times.append((time2, time1))
times.sort()
result = []
for time2, time1 in times:
if not result:
result.append((time1, time2))
continue
last1, last2 = result[-1]
if time1 >= last2:
result.append((time1, time2))
print(len(result))
for r in result:
r1 = r[0] if not r[0].is_integer() else int(r[0])
r2 = r[1] if not r[1].is_integer() else int(r[1])
print(r1, r2)
| [
"kovalcuk@MacBook-Pro-Ila.local"
] | kovalcuk@MacBook-Pro-Ila.local |
09018e0be0d1189db97fad7103f982719fe99170 | e25b917f2f0ce28f6d046afaa9c0faddf0aeae34 | /Tutorials/split_and_merging.py | 0e4d372a63ff7e204d74bc9d502e062c6df0682b | [] | no_license | pmusau17/ComputerVision | d9344c22ed1fe1bf8a8c6166a060c307c08529a5 | 2fcdce0a967567c15232fe3c9f02982ca95e5796 | refs/heads/master | 2020-12-11T00:30:20.292943 | 2020-06-03T21:54:36 | 2020-06-03T21:54:36 | 233,751,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | import numpy as np
import argparse
import cv2
#create argument parser
ap=argparse.ArgumentParser()
ap.add_argument('-i','--image', required=True)
args=vars(ap.parse_args())
#load the image
image=cv2.imread(args['image'])
(B,G,R)=cv2.split(image)
#this will display each of the channels as grayscale
cv2.imshow("Red",R)
cv2.imshow("Green",G)
cv2.imshow("Blue",B)
cv2.waitKey(0)
#this is what I want because I want zeros in the other channels and I hope it gets the
#correct predition
zeros = np.zeros(image.shape[:2],dtype='uint8')
cv2.imshow("Red",cv2.merge([zeros,zeros,R]))
cv2.imshow("Green",cv2.merge([zeros,G,zeros]))
cv2.imshow("Blue",cv2.merge([B,zeros,zeros]))
cv2.waitKey(0)
merged=cv2.merge([B,G,R])
cv2.imshow("Merged",merged)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"pmusau13ster@gmail.com"
] | pmusau13ster@gmail.com |
96f6244b8bb0b5ffd08d1c5e77a1a92edea101a5 | 63d16a923475d93a6a609d1834c6794504b68299 | /Snake.py | d28465cb70951a0788f7e14b0b50cb6187c4da0e | [] | no_license | intertag/CodeCamp2021-Intro | e14adb2a91460779a06a1e9384af77e22573dfa8 | 56458858509ee6af0e8f4a8f7221032d0f98ffc1 | refs/heads/master | 2023-08-04T01:56:22.684050 | 2021-09-27T04:39:04 | 2021-09-27T04:39:04 | 410,253,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | import turtle
def zeichne_richtung(himmelsrichtung):
heading = hole_heading(himmelsrichtung)
turtle.setheading(heading)
turtle.forward(10)
def hole_heading(richtung):
if richtung == "N":
return 90
if richtung == "O":
return 0
if richtung == "S":
return 270
if richtung == "W":
return 180
richtungen = "N W S N O W S O W N W N"
richtungen_liste = richtungen.split()
for n in richtungen_liste:
zeichne_richtung(n)
turtle.done()
| [
"CBode@dspace.de"
] | CBode@dspace.de |
96cb046a55740f07cda14ed16ce067db9d35c21b | 0a031aa16adc8aa9aff3e2e389662986d8c11673 | /renew_token.py | 6e40e20b2069c814245d9cd4b36d219b5d7f27ba | [
"MIT"
] | permissive | Fireblossom/xiaozhu_ZohoForm_Teams | abb9d7e87d27f32c3db464c080e46060b670165d | 465c67d74f7083e180deccc3515eb31c86fbd2ae | refs/heads/master | 2022-12-28T12:22:45.815852 | 2020-10-06T20:32:46 | 2020-10-06T20:32:46 | 301,752,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | import time
import pickle
import config
from adal import AuthenticationContext
import requests
def device_flow_session(ctx, client_id, renew_session=None, refresh_token=''):
"""Obtain an access token from Azure AD (via device flow) and create
a Requests session instance ready to make authenticated calls to
Microsoft Graph.
client_id = Application ID for registered "Azure AD only" V1-endpoint app
Returns Requests session object if user signed in successfully. The session
includes the access token in an Authorization header.
User identity must be an organizational account (ADAL does not support MSAs).
"""
if renew_session is None:
device_code = ctx.acquire_user_code(config.RESOURCE, client_id)
# display user instructions
print(device_code['message'])
token_response = ctx.acquire_token_with_device_code(config.RESOURCE,
device_code,
client_id)
else:
token_response = ctx.acquire_token_with_refresh_token(refresh_token,
client_id,
config.RESOURCE)
print(token_response['expiresOn'])
if not token_response.get('accessToken', None):
return None
session = requests.Session()
session.headers.update({'Authorization': f'Bearer {token_response["accessToken"]}',
'SdkVersion': 'sample-python-adal',
'x-client-SKU': 'sample-python-adal'})
return session, token_response
FILENAME = 'session.pkl'
def save_session(session):
with open(FILENAME, 'wb') as file:
pickle.dump(session, file)
print(time.asctime( time.localtime(time.time()) ), 'session saved.')
if __name__ == '__main__':
ctx = AuthenticationContext(config.AUTHORITY_URL, api_version=None)
GRAPH_SESSION = None
response = {'refreshToken': ''}
while True:
GRAPH_SESSION, response = device_flow_session(ctx, config.CLIENT_ID, GRAPH_SESSION, response['refreshToken'])
save_session(GRAPH_SESSION)
time.sleep(3000) | [
"duanxu961112@gmail.com"
] | duanxu961112@gmail.com |
3e409b314d86cd80a537041ca9269282f6335069 | edda0022158e069a0c04f73dc2b3ee4effc2aa13 | /SVS13py/windmodel_correction.py | 05343d6bd510237290db2702d95960f0df4ca31d | [] | no_license | IAA-CSIC/dragom | d2910959769e9875a9824eecad6d4f1989338af7 | 3cca3a5b32d6cfc3c0d9c769f1bc16f93344e45c | refs/heads/master | 2023-04-14T21:32:13.583010 | 2021-04-21T07:53:38 | 2021-04-21T07:53:38 | 271,499,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,883 | py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
from scipy import optimize
from matplotlib.gridspec import GridSpec
from itertools import product
import SVS13py.main_functions as main_functions
import SVS13py.mf as mf
from SVS13py.vertslider import VertSlider
R_p = lambda z, s, a: (a - z)**(1/s)
z1_0 = lambda z, z1, s, a, i: np.tan(i) * R_p(z1,s,a) + z1 - z
z2_0 = lambda z, z2, s, a, i: np.tan(i) * R_p(z2,s,a) - z2 + z
z1 = lambda z, s, a, i: optimize.brentq(lambda z1: z1_0(z,z1,s,a,i), 0, z)
z2 = lambda z, s, a, i: optimize.brentq(lambda z2: z2_0(z,z2,s,a,i), z, a)
R1 = lambda z, s, a, i: R_p(z1(z,s,a,i),s,a)
R2 = lambda z, s, a, i: R_p(z2(z,s,a,i),s,a)
dx_zaxis = lambda z, s, a, i: 1/(2*np.cos(i)) * np.abs((R2(z,s,a,i) - R1(z,s,a,i)))
dz_zaxis = lambda z, s, a, i: dx_zaxis(z,s,a,i) / np.cos(i)
dz_zaxis_2 = lambda z, s, a, i: 1/np.sin(i) * np.abs((z1(z,s,a,i)+z2(z,s,a,i))/2 - z)
def dz_zaxis_try(z, s, a, i):
try:
return dz_zaxis(z, s, a, i)
except:
return 0
def dx_zaxis_try(z, s, a, i):
try:
return dx_zaxis(z, s, a, i)
except:
return 0
def proj_correction(a_p, s_p, i_p, z_point, fig=None, axs=None,):
#paraboloid
zs_r = np.linspace(0, a_p, 1000)
zs_p = np.array([z for z in zs_r] + [z for z in zs_r[::-1]])
rs_p = np.array([R_p(z,s_p,a_p,) for z in zs_r] + \
[-R_p(z,s_p,a_p,) for z in zs_r[::-1]])
#zaxis
zaxis_x, zaxis_z = np.array([0]*len(zs_r)), zs_r
#difference between zaxis and the center of the isovelocities
zs_dx = np.linspace(1.5,a_p-0.01, 1000)
dxs_zaxis = np.array([-dx_zaxis(z,s_p,a_p,i_p,) for z in zs_dx])
#z1 and z2 points, defines the proyected isovelocities
z1_point, z2_point = z1(z_point,s_p,a_p,i_p), z2(z_point,s_p,a_p,i_p)
R1_point, R2_point = R1(z_point,s_p,a_p,i_p), R2(z_point,s_p,a_p,i_p)
#we rotate everything
p_cart = {'x':rs_p, 'y':0, 'z':zs_p}
zaxis_cart = {'x':zaxis_x, 'y':0, 'z':zaxis_z}
dxs_zaxis_cart = {'x':dxs_zaxis, 'y':0, 'z':zs_dx}
points_cart = {'x':np.array([-R1_point, R2_point]),
'y':0,
'z':np.array([z1_point, z2_point])}
z_point_cart = {'x':0,'y':0,'z':z_point}
p_rot = main_functions.rot(p_cart, 'y', i_p)
zaxis_rot = main_functions.rot(zaxis_cart, 'y', i_p)
dxs_zaxis_rot = main_functions.rot(dxs_zaxis_cart, 'y', i_p)
points_rot = main_functions.rot(points_cart, 'y', i_p)
z_point_rot = main_functions.rot(z_point_cart, 'y', i_p)
#lets simulate the deprojection
x_obs = np.mean(points_rot['x'])
D_iso = np.abs(points_rot['x'][0]-points_rot['x'][1])
z_depr = x_obs / np.sin(i_p)
# we do assume that the radii is the mean obs.
r_edge = np.sqrt((D_iso/2.)**2 + z_depr**2)
theta_angle = np.arctan((D_iso/2.) / z_depr)
z_depr_cart = {'x':0, 'y':0, 'z':z_depr}
z_depr_rot = main_functions.rot(z_depr_cart, 'y', i_p)
if axs is None:
nrow = 1
ncol = 2
ngrid = 2
magical_factor = 15
wspace = 0
hspace = 0
font_size = 15
fig = plt.figure(figsize=(nrow*magical_factor,(ncol+1)*ngrid,))
gs1 = GridSpec(nrow, (ncol+1)*ngrid, )
gs1.update(wspace=wspace, hspace=hspace,)
axs = {}
n = 0
for i,j in product(range(nrow), [i for i in range(ncol*ngrid)][::ngrid]):
axs[n] = plt.subplot(gs1[i,j:j+ngrid])
n += 1
#non_rotated plots
axs[1].plot(0,0,'k*')
axs[1].plot(rs_p,zs_p, c='b')
axs[1].plot(dxs_zaxis, zs_dx, c='r')
axs[1].plot(zaxis_x, zaxis_z, 'k--')
#rotated plots
axs[0].plot(0,0,'k*')
axs[0].plot(p_rot['x'], p_rot['z'], c='b')
axs[0].plot(zaxis_rot['x'], zaxis_rot['z'], 'k--')
axs[0].plot(dxs_zaxis_rot['x'], dxs_zaxis_rot['z'], c='r')
axs[0].plot(points_rot['x'], points_rot['z'], c='k')
axs[0].plot(np.mean(points_rot['x']), points_rot['z'][1], 'xk')
axs[0].plot(z_point_rot['x'], z_point_rot['z'], 'xg')
axs[0].plot(np.mean(points_rot['x']), z_depr_rot['z'], 'xm')
#deprojected plots
axs[1].plot(0, z_depr, 'xm')
axs[1].plot(points_cart['x'], points_cart['z'], alpha=0.2, c='k')
axs[1].plot([-D_iso/2,D_iso/2], [z_depr,z_depr], 'k')
axs[1].plot(0,z_point,'gx')
#axs[1].plot()
for n in axs:
axs[n].set_aspect('equal')
axs[0].set_title('Projected')
axs[0].set_xlabel("x' (arcsec)")
axs[0].set_ylabel("line of sight (arcsec)")
axs[1].set_title('Deprojected')
axs[1].set_xlabel("x (arcsec)")
axs[1].set_ylabel("z (arcsec)")
class ProjCorrect(object):
init_params = {'a_p':8., 's_p':2.5, 'i_p':np.pi / 9, 'z_point':6.5}
limit_slider = {'a_p_l':1, 'a_p_u':10.,
's_p_l':2.1, 's_p_u':5,
'i_p_l':0, 'i_p_u':np.pi,
'z_point_l':2.5, 'z_point_u':8}
def __init__(self, a_p=None, s_p=None, i_p=None, z_point=None, **kwargs):
"""
Kwargs can be: init_chan
"""
self.params = {'a_p':a_p, 's_p':s_p, 'i_p':i_p, 'z_point':z_point}
for param in self.params:
self.params[param] = self.params[param] \
if self.params[param] is not None \
else self.init_params[param]
self.create_fig()
self.fig.subplots_adjust(left=0.25, bottom=0.35)
self.create_axes()
self.param_sliders = None
self.update_buttons()
self.proj_correction(axs=self.axs)
def create_fig(self):
nrow = 1
ncol = 2
ngrid = 2
magical_factor = 15
wspace = 0
hspace = 0
font_size = 15
self.fig = plt.figure(figsize=(nrow*magical_factor,(ncol+1)*ngrid,))
gs1 = GridSpec(nrow, (ncol+1)*ngrid, )
gs1.update(wspace=wspace, hspace=hspace,)
self.axs = {}
n = 0
for i,j in product(range(nrow), [i for i in range(ncol*ngrid)][::ngrid]):
self.axs[n] = plt.subplot(gs1[i,j:j+ngrid])
n += 1
def create_axes(self):
"""
CAUTION: axis must be created and removed in the same order!
"""
self.slider_ax = {param: self.fig.add_axes([0.25,
0.25-i*0.03,
0.65,
0.03])
for i,param in enumerate(self.params)}
def update_buttons(self,):
"""
Updates the state of the sliders and buttons (for example, after a fit)
"""
# plt.cla()
# self.remove_axes()
# for ax in self.axs:
#
# self.create_fig()
# self.create_axes()
self.param_sliders = {param: Slider(self.slider_ax[param],
param,
self.limit_slider[param+'_l'],
self.limit_slider[param+'_u'],
valinit=self.params[param])
for param in self.params}
for param in self.params:
self.param_sliders[param].on_changed(self.sliders_on_changed)
def sliders_on_changed(self, val):
self.update_params(*[self.param_sliders[param].val
for param in self.param_sliders])
self.fig.canvas.draw_idle()
def update_params(self, a_p, s_p, i_p, z_point):
self.params = {'a_p':a_p, 's_p':s_p, 'i_p':i_p, 'z_point':z_point}
self.axs[0].clear()
self.axs[1].clear()
self.proj_correction(axs=self.axs)
def proj_correction(self, axs=None):
proj_correction(**self.params, axs=axs)
self.fig.canvas.draw_idle()
| [
"gblazquez@iaa.es"
] | gblazquez@iaa.es |
c5f64cf12c0150157b017b2f87759ce2de2c677b | 22e0f3ba4009f45ea994d4438c71540ccaed8ba7 | /ex1.py | ed41a4c72268e01feaea9a5e818e27ee1aecf596 | [] | no_license | justien/lpthw | 7c6a3bba1852c45ff6a0168db402da781da7522c | 2095de718a130108189f0650baae8110e1af1958 | refs/heads/master | 2021-01-19T11:18:04.053727 | 2017-04-11T15:54:54 | 2017-04-11T15:54:54 | 87,952,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | print ""
print ""
print ""
# print "Hello World!"
# print "Hello Again"
# print "I like typing this."
# print "This is fun."
# print 'Yay! Printing.'
# print "I'd much rather you 'not'."
# print 'I "said" do not touch this.'
print "Another line -------------- "
print ""
print ""
print ""
| [
"justinelera@gmail.com"
] | justinelera@gmail.com |
2cb5f05ee9b859d5c5c0937a71ccbb6fe3d024af | 4a14ba192f8779e36945ec8e8f0849aab9bc921b | /blog/migrations/0001_initial.py | 1eef812f6f79c07afbfe8dd9f1ff90a006f05d46 | [] | no_license | ferrari5712/djang1 | 17cfbe6f5730109828c6d88c5caf5ca12b9280ca | 64c153510e34127d5828aebd39b635fd17097770 | refs/heads/master | 2020-04-10T20:20:11.400109 | 2018-12-11T02:01:08 | 2018-12-11T02:01:08 | 161,264,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.1.4 on 2018-12-11 00:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"ferrari5712@gmail.com"
] | ferrari5712@gmail.com |
f93105415c178127e75cccf7532f79b817f324ad | 526c67db75cd8b822de8c49f6d20f405b0e335b4 | /common/saxtree.py | 2be8f8a9d9f7526e0b1b35e0b13d5c27307d2b6d | [] | no_license | daym/scratchpost.org | 49cb1d79da23e14a2b6c69f78d2745ccc6652ed8 | 1404e4a832ca74ef83ec2b0b628a6f59504d841c | refs/heads/master | 2016-09-15T18:17:09.302497 | 2015-03-13T21:07:36 | 2015-03-13T21:07:36 | 32,099,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,400 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import xml.sax.saxutils as saxutils # escape (simple XML escape), unescape, quoteattr, XMLGenerator(out, encoding = "UTF-8")
import xml.sax.xmlreader as xmlreader
import xml.sax
attr0 = xmlreader.AttributesImpl({})
out = None
doc = None
#nodes = []
class XMLGenerator(saxutils.XMLGenerator):
def __init__(self, out, *args, **kwargs):
saxutils.XMLGenerator.__init__(self, out, encoding = "UTF-8") # *args, **kwargs)
self._out = out # do NOT mess with the damn encoding there
def init(out1):
global out
global doc
out = out1
doc = XMLGenerator(out)
doc.processingInstruction("xml", "version=\"1.0\" encoding=\"utf-8\"")
#<html><body><p>hello</p></body></html>
class Document(object):
@classmethod
def __enter__(klass):
doc.startDocument()
@classmethod
def __exit__(klass, ty, val, tb):
doc.endDocument()
class Node(object):
@classmethod
def __enter__(klass):
pass
@classmethod
def __exit__(klass, ty, val, tb):
pass
def enc(v):
if isinstance(v, unicode):
return(v.encode("utf-8"))
else:
return(v)
class Element(Node):
def __init__(self, **attrs):
self.attrs = attrs # dict([(enc(k),enc(v)) for k, v in attrs.items()])
#self.children = children
def __enter__(self):
attr1 = xmlreader.AttributesImpl(self.attrs)
doc.startElement(self.__class__.__name__.lower(), attr1)
def __exit__(self, ty, val, tb):
doc.endElement(self.__class__.__name__.lower())
class Text(Node):
def __init__(self, value):
self.value = value # enc(value)
def __enter__(self):
doc.characters(self.value) # .decode("utf-8"))
def __exit__(self, ty, val, tb):
pass
class HTML(Element):
pass
class HEAD(Element):
pass
class BODY(Element):
pass
class P(Element):
pass
class DIV(Element):
pass
class NAV(Element):
pass
class UL(Element):
pass
class LI(Element):
pass
class A(Element):
pass
class TABLE(Element):
pass
class TR(Element):
pass
class TD(Element):
pass
class TH(Element):
pass
class META(Element):
pass
class LINK(Element):
pass
class SCRIPT(Element):
pass
class IMG(Element):
pass
class BR(Element): # clear
pass
class OBJECT(Element):
pass
class EM(Element):
pass
class Deunicodizer(object):
def __init__(self, doc):
self.doc = doc
def processingInstruction(self, name, value):
return self.doc.processingInstruction(enc(name), enc(value))
def characters(self, text):
return self.doc.characters(enc(text))
def setDocumentLocator(self, *args, **kwargs):
self.doc.setDocumentLocator(*args, **kwargs)
def startDocument(self):
self.doc.startDocument()
def startElement(self, name, attrs):
attrs = dict([(enc(k),enc(v)) for k, v in attrs.items()])
name = enc(name)
self.doc.startElement(name, attrs)
def endElement(self, name):
self.doc.endElement(enc(name))
def endDocument(self):
self.doc.endDocument()
def copyFrom(f):
xml.sax.parse(f, Deunicodizer(doc))
if __name__ == "__main__":
import StringIO
io = StringIO.StringIO()
init(io)
with HTML():
with BODY():
with P():
with Text("hällo"):
pass
print(io.getvalue())
assert(io.getvalue() == """<?xml version="1.0" encoding="utf-8"?><html><body><p>hällo</p></body></html>""")
io.seek(0)
io2 = StringIO.StringIO()
doc = XMLGenerator(io2)
copyFrom(io)
assert(io2.getvalue() == """<?xml version="1.0" encoding="UTF-8"?>
<html><body><p>hällo</p></body></html>""")
| [
"dannym+a@scratchpost.org"
] | dannym+a@scratchpost.org |
898316ce540a51acc258e553cfa5eac4ed15984a | fdf3df666cea513771db0badf0b52edba4179f3d | /cloud_net/cloud_net 2/cloud_net/settings.py | 1b15c926f42e6c41fd1d08e217026b8f28778b4a | [] | no_license | ixxnafri/Django-Web-Application-on-AWS | affeeb010063a1801ac0e2af69ee5a56679abc3c | 42e9eb9611e521240a5318a1ef8303dd49f89486 | refs/heads/master | 2020-04-19T21:18:56.454262 | 2019-01-31T00:41:21 | 2019-01-31T00:41:21 | 168,438,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,985 | py | """
Django settings for cloud_net project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd*j@x24s9^qu#r(jcm48)t37&7i++(&h&#l^ytr&=y1@p3y2(&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['13.58.120.114', '.elasticbeanstalk.com']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'frinet.apps.FrinetConfig',
'rest_framework',
'webpack_loader',
'rest_framework.authtoken',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'cloud_net.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['frinet/template'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cloud_net.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
#DATABASES = {
# 'default': {
#'ENGINE': 'django.db.backends.sqlite3',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3')
# 'ENGINE': 'django.db.backends.mysql',
# 'OPTIONS': {
# 'read_default_file': os.path.join(BASE_DIR, 'my.cnf')
# }
#
# }
#}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "build"),
]
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
AUTH_USER_MODEL = 'frinet.User'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
)
}
CACHES = {
'default': {
'BACKEND': 'django_elasticache.memcached.ElastiCache',
'LOCATION': 'cs436-cache.42hh05.cfg.use2.cache.amazonaws.com:11211',
}
}
| [
"ixz@wirelessprv-10-193-58-164.near.illinois.edu"
] | ixz@wirelessprv-10-193-58-164.near.illinois.edu |
dec0ae4b5b56ad49518cde730f854a7359693d14 | e94ca0c4c264932555c4489329bec3ae499d8fc7 | /simple_linear_regression.py | 6f7a4b4429ee34cb07c01b3957d51d90c14c01f8 | [] | no_license | Amlanmukherjee/Machine-Learning-Basics | ba8d5b2d324a2540c1b4e5c442de508a0aa744e0 | 02cb2d0f289c219654fcac54f104eed3748b6669 | refs/heads/master | 2020-06-08T13:02:11.203653 | 2019-06-26T19:53:42 | 2019-06-26T19:53:42 | 193,232,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | # -*- coding: utf-8 -*-
"""simple_linear_regression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1iEtIJXiiP8mGBXmpGJdB3cpaOtDWfpWo
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-400]
diabetes_X_test = diabetes_X[-400:]
diabetes_y_train = diabetes.target[:-400]
diabetes_y_test = diabetes.target[-400:]
regr = linear_model.LinearRegression()
regr.fit(diabetes_X_train, diabetes_y_train)
diabetes_y_pred = regr.predict(diabetes_X_test)
print('Coefficients: \n', regr.coef_)
print("Mean squared error: %.6f"
# % mean_squared_error(diabetes_y_test, diabetes_y_pred))
print('Variance score: %.6f' % r2_score(diabetes_y_test, diabetes_y_pred))
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, diabetes_y_pred, color='red', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show() | [
"noreply@github.com"
] | Amlanmukherjee.noreply@github.com |
17237a95039c4b7a5d68f70a91b7049b857dfa02 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/kmp_20200716201539.py | c0ccf11ea7faea0cd681296187ca576428e15267 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | '''
Given a text txt[0..n-1] and a pattern pat[0..m-1],
write a function search(char pat[], char txt[]) that prints all occurrences of pat[] in txt[]. You may assume that n > m.
'''
def pattern(txt,pat):
# Catepillar algorithm
# we have a left and right pointer
# then the length of the search string
# when searching for the string when they don't match move the right pointer
# to increase the window size
# if the match return poisition of left, store it in an array
# when the len(sub) > substring move the left pointer
if pat in txt:
left = 0
right = 1
while right < len(txt) and left < len(txt):
if txt[left:right] == pat
print('index',txt.index(pat))
pattern("AABAACAADAABAABA","AABA") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
27e8803d674a0051577b0ea55fe47c0ff7bdfdd3 | 0013921ed8436fadcf4068481971009f89ad8f83 | /test_first.py | ffdbe1fffc294d89b132c239d0b392aa29c60791 | [] | no_license | abhinav-cmd/url-shortener-atom | 138499c10be7e7a9f05cc8adb1898887de212323 | 96cf85cda7517758a3f9f94d4ab9d50d00624cb6 | refs/heads/master | 2023-03-21T08:40:30.772192 | 2021-03-12T21:49:09 | 2021-03-12T21:49:09 | 347,084,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from urlshort import create_app
def test_shorten(client):
response=client.get('/')
assert b'Shorten' in response.data
def test_shortenzW(client):
response=client.get('/')
assert b'Shortenz' in response.data
| [
"abhinavsvvv@gmail.com"
] | abhinavsvvv@gmail.com |
7e55793da633262b7668c3725a56fd2f7a268ef0 | 64ed5a49a3bf0ece83412ddf29035a67a5fa3da7 | /src/data/make_dataset.py | 249dd480633fcee89d3f0874c54bff2a977730bf | [] | no_license | neighdough/mftax | 1d02f0f75e158f126ee3455794f01f09c6c3f975 | 1461e0b93036548bbb7067abd4072b5c335fd8ea | refs/heads/master | 2020-03-31T14:37:42.955183 | 2018-12-07T21:47:51 | 2018-12-07T21:47:51 | 152,303,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,656 | py | # -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from sqlalchemy import create_engine, text
import os
import numpy as np
import pandas as pd
import warnings
import itertools
warnings.filterwarnings("ignore")
MF_CODES = ["002", "003", "059", "061", "067"]
TAX_RATES = {"0": 0.03195986, "D": 0.0405}
#os.chdir(os.path.dirname(__file__))
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
database_url = os.getenv("DATABASE_URL")
engine = create_engine(database_url)
try:
df = pd.read_sql("select * from mftax.asmt", engine)
except:
pass
@click.group()
def main():
pass
@main.command()
@click.argument("city", default="0")
@click.option("--num-units", default=list(range(2,21)),
help="List containing the number of units to be used in analysis.")
@click.option("--rates", default=[np.round(i, 3) for i in list(np.arange(.25, .4, .025))],
help="List containing the tax rates to be used in the analysis.")
def table_revenue_estimate(city, num_units, rates):
"""
Creates table in project schema containing the results of estimated tax generated by
changing multi-family tax rates.
Args:
city (str): City code derived from the first character of a parcelid. The analysis
is developed primarily to evaluate Memphis and Shelby County, but additional
values include:
"0": City of Memphis, "D": Shelby County, "A": Arlington, "B": Bartlett,
"M": Millington, "G": Germantown, "L": Lakeland
Returns:
None
"""
logger = logging.getLogger(__name__)
logger.info("building table mftax.revenue_estimate")
tot = current_revenue(city) #current total city
tot_co = current_revenue(city_rate="D") #current total county
p_rates = []
p_units = []
taxes = []
taxes_co = [] #estimated taxes county
max_units = max(num_units)
min_units = min(num_units)
for rate, unit in itertools.product(rates, num_units):
#projected city tax revenue if rate reduced for all units of size unit and below
p_taxes = (df[(df.parcelid.str[0] == city) &
(df["class"] == "C") &
(df.luc.isin(MF_CODES)) &
# (df.livunit.between(2,unit))].rtotapr.sum()
(df.livunit == unit)].rtotapr.sum()
* rate * TAX_RATES[city]
)
#current city tax revenue
c_taxes = (df[(df.parcelid.str[0] == city) &
(df["class"] == "C") &
(df.luc.isin(MF_CODES)) &
(df.livunit != unit) &
(df.livunit.between(min_units, max_units))].rtotasmt.sum()
* TAX_RATES[city]
)
#projected county tax revenue
p_taxes_co = (df[(df.parcelid.str[0] == city) &
(df["class"] == "C") &
(df.luc.isin(MF_CODES)) &
# (df.livunit.between(2,unit))].rtotapr.sum()
(df.livunit == unit)].rtotapr.sum()
* rate * TAX_RATES["D"]
)
#current county tax revenue
c_taxes_co = (df[(df.parcelid.str[0] == city) &
(df["class"] == "C") &
(df.luc.isin(MF_CODES)) &
(df.livunit != unit) &
#(df.livunit > unit)].rtotasmt.sum() * TAX_RATES["D"]
(df.livunit.between(min_units, max_units))].rtotasmt.sum()
* TAX_RATES["D"]
)
taxes.append(p_taxes + c_taxes)
p_units.append(unit)
p_rates.append(rate)
taxes_co.append(c_taxes_co + p_taxes_co)
data = {"tax_rate":p_rates, "num_units":p_units, "est_tax":taxes, "est_tax_co": taxes_co}
tax = pd.DataFrame(data)
tax["pct_diff"] = (np.abs(tax.est_tax-tot)/((tax.est_tax+tot)/2))
tax["pct_diff_co"] = (np.abs(tax.est_tax_co-tot_co)/((tax.est_tax_co+tot_co)/2))
tax.to_sql("revenue_estimates", engine, schema="mftax",
if_exists="replace", index_label="fid")
tax.to_csv("../../reports/estimated_tax_single.csv", index=False)
# @click.command()
# @click.argument('input_filepath', type=click.Path(exists=True))
# @click.argument('output_filepath', type=click.Path())
@main.command()
def table_asmt():#(input_filepath, output_filepath):
"""
pulls data from relevant sources (../raw, postgresql, etc.) and adds it to the schema
for this analysis (i.e. mftax)
Args:
None
Returns:
None
"""
logger = logging.getLogger(__name__)
logger.info('building schema and loading data')
q_schema = ("create schema if not exists mftax")
engine.execute(q_schema)
q_drop = ("drop table if exists mftax.asmt")
engine.execute(q_drop)
q_load = ("create table mftax.asmt as "
"select row_number() over() fid, parcelid, "
"concat(adrno, ' ', adrstr, ' ', adrsuf, ' ', zip1) addr, "
"a.class, a.luc, livunit, zoning, rtotasmt, rtotapr, "
"tractid, sca_parcels.wkb_geometry "
"from sca_parcels "
"left join sca_asmt a on parcelid = a.parid "
"left join sca_pardat pd on parcelid = pd.parid "
"left join (select wkb_geometry, geoid10 tractid from geography.tiger_tract_2010) t "
"on st_intersects(st_centroid(sca_parcels.wkb_geometry), t.wkb_geometry)"
)
engine.execute(q_load)
q_idx = ("create index ix_asmt_parcelid on mftax.asmt (parcelid);"
"create index gix_asmt_wkb_geometry on mftax.asmt using gist(wkb_geometry);"
"alter table mftax.asmt add primary key (fid);"
)
engine.execute(q_idx)
@main.command()
@click.option("--unit-step", default=1, required=False,
help="Set the step value to be used to increment the number of living units.")
@click.option("--rate-step", default=.025, required=False,
help="Set the step value to be used to increment the tax rate.")
def table_tax_rates_livunits(unit_step, rate_step, **kwargs):
"""
Creates tables in project schema to hold the number of living units and the tax
rates to be used throughout the analysis.
Keyword Arguments:
unit_min (int): minimum number of living units
unit_max (int): maximum number of living units
rate_min (float): minimum tax rate to be considered
rate_max (float): maximum tax rate to be considered
Returns:
None
"""
logger = logging.getLogger(__name__)
logger.info('loading livunits and tax rate tables')
if "unit_min" in kwargs:
unit_min = kwargs["unit_min"]
else:
unit_min = 2
if "unit_max" in kwargs:
unit_max = kwargs["unit_max"]
else:
unit_max = 21
if "rate_min" in kwargs:
rate_min = kwargs["rate_min"]
else:
rate_min = .25
if "rate_max" in kwargs:
rate_max = kwargs["rate_max"]
else:
rate_max = .4
rates = [np.round(i, 3) for i in list(np.arange(rate_min, rate_max, rate_step))]
units = list(range(unit_min, unit_max, unit_step))
df_rates = pd.DataFrame(rates, columns=["tax_rate"])
df_rates.to_sql("tax_rates", engine, schema="mftax", if_exists="replace", index_label="fid")
df_units = pd.DataFrame(units, columns=["livunit"])
df_units.to_sql("livunits", engine, schema="mftax", if_exists="replace", index_label="fid")
@main.command()
def update_zoning():
"""
Adds a new column to the asmt table and populates it with the zoning type that the
parcel centroid falls in. This is necessary because there is some discrepancy
between the value in zoning column that the parcel has listed and the zone in which
the parcel actually resides
"""
logger = logging.getLogger(__name__)
logger.info("adding and updating column zoning_actual to mftax.asmt")
update = ("alter table mftax.asmt "
"drop column if exists zoning_actual;"
"alter table mftax.asmt "
"add column zoning_actual text;"
"update mftax.asmt a "
"set zoning_actual = (regexp_split_to_array(zone_type, '\('))[1] "
"from mftax.zoning z "
"where st_intersects(st_centroid(a.wkb_geometry), z.wkb_geometry);"
)
engine.execute(update)
@main.command()
def table_mdn_apr_by_luc():
logger = logging.getLogger(__name__)
logger.info("building table mftax.mdn_apr_by_luc")
q = ("create table mftax.mdn_apr_by_luc as "
"select row_number() over() fid, t.geoid10 tractid, median(rtotapr) mdn_apr, "
"luc, median(livunit) mdn_livunit, "
"count(luc)/sum(count(luc)) over (partition by geoid10) as pct_luc "
"from mftax.asmt a, geography.tiger_tract_2010 t "
"where st_intersects(a.wkb_geometry, t.wkb_geometry) "
"and luc in ('{}') "
"group by t.geoid10, luc "
"order by geoid10, luc "
)
engine.execute("drop table if exists mftax.mdn_apr_by_luc")
engine.execute(q.format("', '".join(MF_CODES+['062'])))
q_idx = ("create index idx_tractid_mdn_apr_by_luc on mftax.mdn_apr_by_luc (tractid);"
"alter table mftax.mdn_apr_by_luc add primary key (fid);"
)
engine.execute(q_idx)
@main.command()
def table_vacancy_by_zoning():
logger = logging.getLogger(__name__)
logger.info("building table mftax.vacancy_by_zoning")
q = ("create table mftax.vacancy_by_zoning as "
"select geoid10 tractid, zoning_actual, count(parcelid) num_vac "
"from mftax.asmt a, geography.tiger_tract_2010 t "
"where st_intersects(st_centroid(a.wkb_geometry), t.wkb_geometry) "
"and zoning_actual similar to '%(RU|CMU|CMP|RW|OG|CBD)%' "
"and luc = '000' "
"group by geoid10, zoning_actual "
"order by geoid10, zoning_actual "
)
engine.execute(text(q))
@main.command()
@click.option("--rates", default=[np.round(i, 3) for i in list(np.arange(.25, .4, .025))],
help="List containing the tax rates to be used in the analysis.")
def table_revenue_projections(rates):
"""
"""
vacancy = pd.read_sql("select * from mftax.vacancy_by_zoning", engine)
luc_value = pd.read_sql("select * from mftax.mdn_apr_by_luc", engine)
#uses lookup table to identify lucs that are compatible with each zone
#luc values are in order by number of units with high rise apartments (059) first
#and duplexes (059) last so that land use with greatest potential for density is
#evaluated first
luc_mf_value = luc_value[luc_value.luc != '062']
zoning = pd.read_csv("../../data/raw/luc_zoning_lookup.csv", dtype=str)
taxes_mf_scenario = np.zeros(len(rates))
for i in vacancy.index:
tract, zone, vac = vacancy.loc[i]
#compare the compatible lucs in zoning to find index position of best luc
#as defined above
compatible_lucs = zoning[zone].tolist()
tract_lucs = set(luc_mf_value[luc_mf_value.tractid == tract].luc)
luc_idx = [compatible_lucs.index(i) for i in set(compatible_lucs).intersection(tract_lucs)]
if luc_idx: #there's a compatible land use, so get the median appraisal for that tract
best_luc = compatible_lucs[min(luc_idx)]
mdn_val = luc_mf_value[(luc_mf_value.tractid == tract) &
(luc_mf_value.luc == best_luc)].mdn_apr.values[0]
else: #no compatible zone, so get median value for that zone from entire city
best_luc = zoning[zone][0]
mdn_val = luc_mf_value[luc_mf_value.luc == best_luc].mdn_apr.median()
taxes_mf_scenario = [tax + vac*mdn_val*rate*TAX_RATES[city]
for tax, rate
in zip(taxes_mf_scenario, rates)
]
#calculate potential revenue using current land use distribution patterns
vacancy_totals = vacancy.groupby("tractid").num_vac.sum()
luc_value = luc_value.join(vacancy_totals, on="tractid")
taxes_current_trends = np.zeros(len(rates))
m = lambda row, rate: row.mdn_apr*row.pct_luc*row.num_vac*rate*TAX_RATES[city]
taxes_current_trends = [tax + luc_value.apply(m, axis=1, args=(rate,)).sum()
for tax,rate
in zip(taxes_current_trends, rates)
]
df_comparison = pd.DataFrame(data={"current_trends":taxes_current_trends,
"mf_projections":taxes_mf_scenario},
index=rates)
df_comparison.to_csv("../../data/processed/current_projected_tax.csv", index_label="rate")
def current_revenue(city="0", city_rate=None, max_units=20):
"""
Estimates current tax revenue generated by multi-family units for a given city.
Args:
city (str): City code derived from the first character of a parcelid. The analysis
is developed primarily to evaluate Memphis and Shelby County, but additional
values include:
"0": City of Memphis
"D": Shelby County
"A": Arlington
"B": Bartlett
"M": Millington
"G": Germantown
"L": Lakeland
Optional:
city_rate (str): Can specify second city code if the amount of Shelby County tax
is needed.
max_units (int): If provided, will change the range of living units to be used
to calculate the total. If not, the default maximum of 20 is used.
Returns:
Float value representing the total mount of tax revenue currently generated by
multi-family units.
"""
rate = city_rate if city_rate else city
# df = pd.read_sql("select * from mftax.asmt", engine)
return df[(df.parcelid.str[0] == city) &
(df["class"] == "C") &
(df.luc.isin(MF_CODES)) &
#(df.livunit >= 2)].rtotasmt.sum()
(df.livunit.between(2, max_units))].rtotasmt.sum() * TAX_RATES[rate]
@main.command()
@click.argument("city", default="0")
@click.option("--num-units", default=list(range(2,21)),
help="List containing the number of units to be used in analysis.")
@click.option("--rates", default=[np.round(i, 3) for i in list(np.arange(.25, .4, .025))],
help="List containing the tax rates to be used in the analysis.")
@click.option("--unit-step", default=1, required=False,
help="Set the step value to be used to increment the number of living units.")
@click.option("--rate-step", default=.025, required=False,
help="Set the step value to be used to increment the tax rate.")
def rebuild_all(city, num_units, rates, unit_step, rate_step):
"""
Rebuilds all tables in project schema
"""
table_asmt()
table_revenue_estimate(city, num_units, rates)
set_units_and_rates(unit_step, rate_step, **kwargs)
update_zoning()
table_mdn_apr_by_luc()
table_vacancy_by_zoning()
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
os.chdir(os.path.dirname(__file__))
main()
| [
"neighdough@gmail.com"
] | neighdough@gmail.com |
d6db78cbc8e88ec12c049d25fffbbe429655373c | c22b9c7c4a854ed985e777bcbecd18870439b334 | /byteofpy/file.py | b2c51954e6226494b8066a0e68daaff28ff6f548 | [
"BSD-3-Clause"
] | permissive | pezy/python_test | ceb35a8a63ca8ebe26ffa5c72ace664718c7b328 | b019a0d9f267b5071c37fc85c9acaf27e9146625 | refs/heads/master | 2021-01-19T01:09:20.820202 | 2016-07-30T08:35:15 | 2016-07-30T08:35:15 | 18,096,404 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | poem = '''\
Programming is fun
When the work is done
if you wanna make your work also fun:
use Python!
'''
# Open for writing
f = open('poem.txt', 'w')
# Write text to file
f.write(poem)
f.close()
# If no mode is specified
# Read mode is assumed by default
f = open('poem.txt')
while True:
line = f.readline()
# Zero length indicates EOF
if len(line) == 0:
break
print line,
f.close()
| [
"urbancpz@gmail.com"
] | urbancpz@gmail.com |
cc080838094fc53325f5e1ad83d05cd8d1a69c7f | 352109461d5c703f793e7db3c4d5a5ad163a216f | /server/data_processing/index_wrapper.py | 55c75665a6f43dcc95c603033e3284010002e46d | [
"Apache-2.0"
] | permissive | wrosko/exbert | 509c9d248d801e0a811cc00c79997435260c3e35 | 0ea81018649bb09b81b54c8bfa444d71e235ba48 | refs/heads/master | 2022-11-23T11:44:59.044637 | 2020-07-23T15:57:22 | 2020-07-23T15:57:22 | 281,986,569 | 0 | 0 | null | 2020-07-23T15:18:36 | 2020-07-23T15:18:35 | null | UTF-8 | Python | false | false | 3,368 | py | from functools import partial
import faiss
import numpy as np
from pathlib import Path
from typing import Iterable
from utils.f import memoize
from transformers import AutoConfig
@memoize
def get_config(model_name):
return AutoConfig.from_pretrained(model_name)
FAISS_LAYER_PATTERN = 'layer_*.faiss'
LAYER_TEMPLATE = 'layer_{:02d}.faiss'
def create_mask(head_size:int , n_heads:int, selected_heads:Iterable[int]):
"""Create a masked vector of size (head_size * n_heads), where 0 indicates we don't care about the contribution of that head 1 indicates that we do care
Parameters:
-----------
head_size: Hidden dimension of the heads
n_heads: Number of heads the model has
selected_heads: Which heads we don't want to zero out
"""
mask = np.zeros(n_heads)
for h in selected_heads:
mask[int(h)] = 1
return np.repeat(mask, head_size)
class Indexes:
"""Wrapper around the faiss indices to make searching for a vector simpler and faster.
Assumes there are files in the folder matching the pattern input
"""
def __init__(self, folder, pattern=FAISS_LAYER_PATTERN):
self.base_dir = Path(folder)
self.n_layers = len(list(self.base_dir.glob(pattern))) - 1 # Subtract final output
self.indexes = [None] * (self.n_layers + 1) # Initialize empty list, adding 1 for input
self.pattern = pattern
self.__init_indexes()
# Extract model name from folder hierarchy
self.model_name = self.base_dir.parent.parent.stem
self.config = get_config(self.model_name)
self.nheads = self.config.num_attention_heads
self.hidden_size = self.config.hidden_size
assert (self.hidden_size % self.nheads) == 0, "Number of heads does not divide cleanly into the hidden size. Aborting"
self.head_size = int(self.config.hidden_size / self.nheads)
def __getitem__(self, v):
"""Slices not allowed, but index only"""
return self.indexes[v]
def __init_indexes(self):
for fname in self.base_dir.glob(self.pattern):
print(fname)
idx = fname.stem.split('_')[-1]
self.indexes[int(idx)] = faiss.read_index(str(fname))
def search(self, layer, query, k):
"""Search a given layer for the query vector. Return k results"""
return self[layer].search(query, k)
class ContextIndexes(Indexes):
"""Special index enabling masking of particular heads before searching"""
def __init__(self, folder, pattern=FAISS_LAYER_PATTERN):
super().__init__(folder, pattern)
self.head_mask = partial(create_mask, self.head_size, self.nheads)
# Int -> [Int] -> np.Array -> Int -> (np.Array(), )
def search(self, layer:int, heads:list, query:np.ndarray, k:int):
"""Search the embeddings for the context layer, masking by selected heads"""
assert max(heads) < self.nheads, "max of selected heads must be less than nheads. Are you indexing by 1 instead of 0?"
assert min(heads) >= 0, "What is a negative head?"
unique_heads = list(set(heads))
mask_vector = self.head_mask(unique_heads)
mask_vector = mask_vector.reshape(query.shape)
new_query = (query * mask_vector).astype(np.float32)
return self[layer].search(new_query, k)
| [
"benjamin.hoover@ibm.com"
] | benjamin.hoover@ibm.com |
6809085f9885e6f57126dab2ff54953c84d4801d | 77aa8c9213cfb5c44c2d62579b7e92f64a479800 | /player9.py | d5a6ef4b0c069ffa2b1b79aeed242fbdf9e6d372 | [] | no_license | aiswarya98/programs | 9761f0ab0cb5c7b93e34f7ed534af012a9bfffdb | 96ffb4c3b0c32ea54bd769bfa4c728ac9710bb5a | refs/heads/master | 2020-06-13T23:58:37.756727 | 2019-08-14T05:03:51 | 2019-08-14T05:03:51 | 194,829,076 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | x,y=map(str,input().split())
a=x.lower()
b=y.lower()
z=a[0].upper()+a[1:]
q=b[0].upper()+b[1:]
print(z,q,end=' ')
| [
"noreply@github.com"
] | aiswarya98.noreply@github.com |
20669ccdb3751b10784605a3c603bc93463b40f7 | ccdbb604d1caa2c8c59ac0a0476f17433055b09d | /WeChatXy/settings.py | d7c8ee577062602c2bf990f8deac3b71fa8ad23b | [] | no_license | MxyEI/WeChatXy | 169afcf37cf5d5d7d7f9058d0c1180ab61177541 | 4b3b8c39d0b6917435ddd13015ed0e1801ead26e | refs/heads/master | 2023-03-16T14:13:47.720384 | 2017-10-20T09:26:36 | 2017-10-20T09:26:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,249 | py | """
Django settings for WeChatXy project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b*#tcouy1gaq1jbab4-q#62_3p%n2_-)=a#x9%95wg*wqf)9co'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'WeChatXy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'WeChatXy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
# )
STATIC_ROOT = os.path.join(BASE_DIR, "static")
| [
"627290749@qq.com"
] | 627290749@qq.com |
8c44fcf9ba2c5152b408c904304e659528c432ce | 7e321b98c5f13f07450f244ca2f57f02c5c0cf50 | /narcs-chat/narc_server.py | a47f0c49d8de8a67e412641fe6f377df20e220d2 | [] | no_license | wompersand/old-janky-projects | 0d234335ab0f0ab4fd9c16d461c1aea0e1a9170f | 9f1d70a0dc301302edf2e2851f3904c5f3bfc415 | refs/heads/master | 2020-06-30T20:37:01.343080 | 2019-08-07T01:25:38 | 2019-08-07T01:25:38 | 200,946,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,125 | py | from rsa import *
import socket
import select
from chat_client_polished import encrypt, decrypt, pad16, depad16
from Crypto.Cipher import AES
import base64
import sys
# for niceness ######################################################
import sys
from colorama import init
from termcolor import cprint
from pyfiglet import figlet_format
init(strip=not sys.stdout.isatty()) # strip colors if stdout is redir.
#####################################################################
'''
BROADCASTING FUNCTION
---------------------
(1) This chat server will work like a chat room.
Once a client sends us data to post, the server will send (forward)
the data to all active clients except for the sending client. This will be done with either CHEAP RSA enryption or AES depending on what the client has chosen. Each client comes up with their own AES shared secret and gives it to the server to be stored in a keyring bound to the client's identity.
AES shared secret exchange is done using CHEAP RSA. This means the client uses the server's public key to encrypt and send the shared secret to the server.
(2) Data section of TCP packet will be encrypted using either CHEAP RSA or AES.
'''
def broadcast_more_like_breadcast(sender_socket, message):
# the conditional in this FOR loop is so that we do not send this message
# to either the master socket or the sending client.
for sock in ACTIVE_CONNECTIONS_LIST:
if sock != sender_socket and sock != master_socket:
try:
message_1 = message
fd = sock.fileno()
# AES
if NO_ENCRYPTION:
sock.send(message_1)
elif fd in ACTIVE_CONNECTIONS_KEYRING_AES:
print "\n[*] broadcasting using AES on connection {}".format(fd)
message_1 = pad16(message)
sock.send('E'+base64.b64encode(ACTIVE_CONNECTIONS_KEYRING_AES[fd][0].encrypt(message_1)))
print "\n[*] encrypting using AES key for connection {}.".format(fd)
# RSA
elif fd in ACTIVE_CONNECTIONS_KEYRING:
current_pub_key = ACTIVE_CONNECTIONS_KEYRING[fd]
print "\n[*] encrypting using pubkey {} for connection {}.".format(current_pub_key, fd)
sock.send( 'E'+encrypt(message_1,current_pub_key) )
else:
sock.send(message_1)
except:
# this exception handles broken connections which
# are just assumed to be closed by client.
fd = sock.fileno()
sock.close()
ACTIVE_CONNECTIONS_LIST.remove(sock)
del ACTIVE_CONNECTIONS_KEYRING[fd]
del ACTIVE_CONNECTIONS_KEYRING_AES[fd]
if __name__=="__main__":
DEBUG = 0
# DEFINITIONS (global := capitalization)
NO_ENCRYPTION = 0
ACTIVE_CONNECTIONS_LIST = []
ACTIVE_CONNECTIONS_KEYRING = {}
ACTIVE_CONNECTIONS_KEYRING_AES = {}
BUFFER_RECV = 16384
PORT = 1337
IP = '0.0.0.0'
KEY_SIZE = 24
if len(sys.argv) > 1:
if sys.argv[1] == 'NO_ENCRYPTION':
NO_ENCRYPTION = 1
server_public, server_private = generate_key_pair(KEY_SIZE)
public_message = 'PUBKEY200 '+ str(server_public.n) + ' ' + str(server_public.e)
master_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# needed?
master_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
master_socket.bind((IP, PORT))
master_socket.listen(10)
# add master socket to active connections list A(readable sockets)
ACTIVE_CONNECTIONS_LIST.append(master_socket)
print "\n[*] started chat server on port {}.".format(PORT)
cprint(figlet_format('NARCS\n SERVER', font='colossal'), 'yellow', attrs=['bold'])
if NO_ENCRYPTION:
print "*** WARNING: running in plaintext mode (no encryption used). ***"
# start main loop
while True:
###########################################################
# retrieve list of sockets ready to be read using select()
read_soccs, write_soccs, error_soccs = select.select(ACTIVE_CONNECTIONS_LIST, [], [])
###########################################################
for socc in read_soccs:
# case [1]: NEW CONNECTION
if socc == master_socket:
# here a new connection is received at the master (server) socket
socketfd, addr = master_socket.accept()
ACTIVE_CONNECTIONS_LIST.append(socketfd)
print "\n[+] client <%s, %s> has connected. Added to active connections list." % addr
# [1a] send server public key to new client:
socketfd.send(public_message)
# announce client entry to room.
m = "\n{} HAS ENTERED THE ROOM.\n".format(addr)
broadcast_more_like_breadcast(socketfd, m)
# case [2]: MESSAGE received from existing client.
else:
# process data received.
# try/catch block for robustness.
try:
data = socc.recv(BUFFER_RECV)
if data:
data_tok = data.split(' ')
# HANDLE FORMATTED MESSAGES FOR KEY EXCHANGE
if data_tok[0] == 'PUBKEY200':
# proto for adding public key
n_client = int(data_tok[1])
e_client = int(data_tok[2])
fd = socc.fileno()
ACTIVE_CONNECTIONS_KEYRING[int(fd)] = publicKey(n = n_client, e = e_client)
print "\n[*] public key {} added to keyring for connection {}.".format(ACTIVE_CONNECTIONS_KEYRING[fd], fd)
socc.send("\n[**] server has received your public key.\n")
print "\n[*] GOT KEY."
elif data_tok[0] == 'AESKEY200':
# proto for adding AES shared key
if DEBUG:
print '1 - AESKEY200'
key_iv_client = decrypt(' '.join(data_tok[1:]), server_private)
if DEBUG:
print '1a - decrypted: {}'.format(key_iv_client)
key_iv_client = key_iv_client.split('@!delim!@')
if DEBUG:
print '1b - split: {}'.format(key_iv_client)
key_client = key_iv_client[0]
if DEBUG:
print '2 - key len: {}'.format(len(key_client))
iv_client = key_iv_client[1]
if DEBUG:
print '3 - iv len: {}'.format(len(iv_client))
fd = socc.fileno()
if DEBUG:
print fd
# tuple for encryption and decryption
# in stored tuple: first element is for encryption, second is for decryption.
ACTIVE_CONNECTIONS_KEYRING_AES[int(fd)] = (AES.new(key_client, AES.MODE_CBC, iv_client), AES.new(key_client, AES.MODE_CBC, iv_client) )
print "\n[*] shared key {} added to AES keyring for connection {}.".format(ACTIVE_CONNECTIONS_KEYRING_AES[fd], fd)
socc.send("\n[**] server has received your AES key.\n")
# SANITY CHECK
if DEBUG:
test = 'dog'
entest = ACTIVE_CONNECTIONS_KEYRING_AES[int(fd)][0].encrypt(pad16(test))
print 'ISSUE WITH ENCRYPTION?'
detest = ACTIVE_CONNECTIONS_KEYRING_AES[int(fd)][1].decrypt(entest)
print "detest type: {}".format(type(detest))
detestupad = depad16(detest)
print "SANITY CHECK: [{}] ENCRYPTS TO [{}]".format(test, entest)
print "which decrypts to [{}]".format(detestupad)
print "\n[*] GOT AES KEY."
else:
if data[0] == 'E':
if DEBUG:
print "received message [{}] of lenght: {}".format(data, len(data))
fd = socc.fileno()
if fd in ACTIVE_CONNECTIONS_KEYRING_AES:
de_message = ACTIVE_CONNECTIONS_KEYRING_AES[fd][1].decrypt(base64.b64decode(data[1:]))
if DEBUG:
print "raw decrypted AES message is: [{}]".format(de_message)
de_message = depad16( de_message )
if DEBUG:
print "broadcasting [{}] using AES keyring.".format(de_message)
broadcast_more_like_breadcast(socc, "\r"+str(socc.getpeername())+" -> "+ de_message )
else:
broadcast_more_like_breadcast(socc, "\r"+str(socc.getpeername())+" -> "+ decrypt(data[1:], server_private))
else:
broadcast_more_like_breadcast(socc, "\r"+str(socc.getpeername())+" -> "+ data)
except:
en_message = "\n[-] client <{}> has LEFT the room.\n".format(addr)
broadcast_more_like_breadcast(socc,en_message)
print "\n[-]client <%s:%s> has gone offline.\n" % addr
fd = socc.fileno()
socc.close()
ACTIVE_CONNECTIONS_LIST.remove(socc)
del ACTIVE_CONNECTIONS_KEYRING[fd]
del ACTIVE_CONNECTIONS_KEYRING_AES[fd]
continue
master_socket.close()
#if __name__ == "__main__": main()
| [
"fish@carp.lad"
] | fish@carp.lad |
c55a7f11171dec79d8d48c8833eb853080eb98b4 | 77a79db13c04c035e5f42b136bf37de2bedec52f | /LIB/build/lib/libraries/FGES/runner.py | 2f81a5a30d39b731ebe67a170092681f51b76cd2 | [
"MIT"
] | permissive | sarthak-chakraborty/CausIL | 6e71ea4091c1b5bcf7aff5469926e80fc99282a1 | 31e2ca0fbf9f2a6e9416020f1c8cb8a6d2371f97 | refs/heads/main | 2023-07-08T15:35:02.861313 | 2023-07-03T16:08:59 | 2023-07-03T16:08:59 | 597,629,481 | 8 | 1 | null | 2023-02-27T05:32:56 | 2023-02-05T05:50:42 | Python | UTF-8 | Python | false | false | 913 | py | from sklearn.preprocessing import KBinsDiscretizer
from libraries.FGES.fges_main import FGES
import time
import pandas as pd
def fges_runner(data, nodes, score, knowledge=None, disc=None, n_bins=5, file_name=None):
if disc is not None:
discretizer = KBinsDiscretizer(n_bins, encode='ordinal', strategy=disc)
nodes_list = []
for node in nodes:
if(node[1]['type'] == 'cont'):
nodes_list.append(node[0])
data[nodes_list] = discretizer.fit_transform(data[nodes_list])
variables = list(range(len(data.to_numpy()[0])))
# print("Running FGES on graph with " + str(len(variables)) + " nodes.")
start_time = time.time()
fges = FGES(variables, nodes, data, knowledge=knowledge, filename=file_name, save_name=file_name, score=score)
result = fges.search()
# print("--- %s seconds ---" % (time.time() - start_time))
return result
| [
"sarthak.chakraborty@gmail.com"
] | sarthak.chakraborty@gmail.com |
2d5ee23a8cba0fea02e4f205bafc24f5c98fc027 | 375e5bca82843647941068bd7634cf7adf2015ca | /tests/test_transforms_resize_modulo_pad_crop.py | f72a33b918735569e106f2221c7a10a6e1392d92 | [
"MIT"
] | permissive | civodlu/trw | cd57e7bded7fdb0a9d623ed9cd50645fab96583b | 11c59dea0072d940b036166be22b392bb9e3b066 | refs/heads/master | 2023-02-08T09:56:39.203340 | 2023-02-07T14:22:16 | 2023-02-07T14:22:16 | 195,147,670 | 12 | 2 | MIT | 2020-10-19T15:24:11 | 2019-07-04T01:19:31 | Python | UTF-8 | Python | false | false | 1,864 | py | import unittest
import trw
import torch
import numpy as np
class TestTransformsResizeModuloPadCrop(unittest.TestCase):
def test_crop_mode_torch(self):
batch = {
'images': torch.rand([2, 3, 64, 64], dtype=torch.float32)
}
tfm = trw.transforms.TransformResizeModuloCropPad(60)
transformed = tfm(batch)
assert transformed['images'].shape == (2, 3, 60, 60)
def test_crop_mode_torch_multiples(self):
# test with multiple of `multiples_of` shape
batch = {
'images': torch.rand([2, 3, 64, 64], dtype=torch.float32)
}
tfm = trw.transforms.TransformResizeModuloCropPad(10)
transformed = tfm(batch)
assert transformed['images'].shape == (2, 3, 60, 60)
def test_crop_mode_torch_different_shape(self):
batch = {
'images': torch.rand([2, 3, 64, 64], dtype=torch.float32),
'images2': torch.rand([2, 1, 64, 64], dtype=torch.float32)
}
batch['images'][0, 0, 32, 32] = 42.0
batch['images2'][0, 0, 32, 32] = 42.0
tfm = trw.transforms.TransformResizeModuloCropPad(60)
transformed = tfm(batch)
# make sure we can handle different shapes of the same dimension
assert transformed['images'].shape == (2, 3, 60, 60)
assert transformed['images2'].shape == (2, 1, 60, 60)
# make sure the crop/pad are the same for the different images
indices = np.where(batch['images'].numpy() == 42)
assert (batch['images2'][indices] == 42.0).all()
def test_pad_mode_torch(self):
batch = {
'images': torch.rand([2, 3, 65, 65], dtype=torch.float32)
}
tfm = trw.transforms.TransformResizeModuloCropPad(32, mode='pad')
transformed = tfm(batch)
assert transformed['images'].shape == (2, 3, 96, 96)
| [
"civodlu@gmail.com"
] | civodlu@gmail.com |
aa2fafe5d101fa0bc9ddda95a62548b8f0521549 | 4112e360776ab2e7d122d246544514bdaf9ea8a9 | /nbp.py | b3b2f6408fe7044e50933d07476d0a8ca78aa029 | [] | no_license | pbeat8/SmallApi | 4517ceea44269363ce8026eaa6011f9d542a4349 | 4d4304a2f80997e7b510b863438e54bfd7e0f285 | refs/heads/master | 2023-04-08T03:38:04.110350 | 2021-04-19T11:01:39 | 2021-04-19T11:01:39 | 345,459,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,769 | py | import requests
import datetime as dtm
import matplotlib.pyplot as plt
def _url(path):
return 'http://api.nbp.pl/api/' + path
def get_avg_rates_date(code, start, end):
ret = []
if type(start) != type(dtm.date.today()):
fstdate = dtm.datetime.strptime(start, "%Y-%m-%d").date()
else:
fstdate = start
if type(end) != type(dtm.date.today()):
snddate = dtm.datetime.strptime(end, "%Y-%m-%d").date()
else:
snddate = end
days = snddate - fstdate
while days > dtm.timedelta(days=93):
get = requests.get(
_url('exchangerates/rates/A/{}/{}/{}/'.format(code, fstdate.strftime("%Y-%m-%d"),
(fstdate + dtm.timedelta(days=92)).strftime("%Y-%m-%d"))))
if get.status_code != 200:
print("Error : {}".format(get.status_code))
return []
ret += get.json()['rates']
fstdate += dtm.timedelta(days=93)
days = snddate - fstdate
get = requests.get(
_url(
'exchangerates/rates/A/{}/{}/{}/'.format(code, fstdate.strftime("%Y-%m-%d"), snddate.strftime("%Y-%m-%d"))))
if get.status_code != 200:
print("Error {} ".format(get.status_code))
return []
ret += get.json()['rates']
return ret
def get_avg_rates(code, days):
return get_avg_rates_date(code, (dtm.date.today() - dtm.timedelta(days=days)), dtm.date.today())
def get_one_day_rate(code, date):
get = requests.get(_url('exchangerates/rates/A/{}/{}/'.format(code, date)))
if get.status_code != 200:
print("Error: {}".format(get.status_code))
return ()
return get.json()['rates']
def get_closest_one_day_rate(code, date):
get = requests.get(_url('exchangerates/rates/A/{}/{}/'.format(code, date)))
if get.status_code != 200:
if get.status_code == 404:
datebefore = dtm.datetime.strptime(date, "%Y-%m-%d").date() - dtm.timedelta(days=1)
date = datebefore.strftime("%Y-%m-%d")
return get_closest_one_day_rate(code, date)
else:
print("Error: {} -->{}".format(get.status_code, _url('exchangerates/rates/A/{}/{}/'.format(code, date))))
return []
return get.json()['rates']
def plot(rates1, rates2, label1, label2, dates):
fig, ax = plt.subplots(figsize=(5, 3))
ax.plot(dates, rates1, label=label1)
ax.plot(dates, rates2, label=label2)
ax.set_title('Średni kurs walut z ostatnich 120 dni')
ax.legend(loc='upper left')
ax.set_xlabel('Daty')
ax.set_ylabel('Kurs w PLN')
ax.set_xlim(xmin=min(dates), xmax=max(dates))
ax.set_ylim(ymin=3.6*1.1, ymax=4.9*1.1)
plt.gcf().autofmt_xdate(rotation=25)
fig.tight_layout()
plt.show()
| [
"61192446+pbeat8@users.noreply.github.com"
] | 61192446+pbeat8@users.noreply.github.com |
f280b4134fbf765f438d41362606efd502c38e48 | 726fcbe1ea138a9fc7d4e555ed8a3c9916f67825 | /backup/modules/gmail.py | a7481a0f0f4f74d3f626c35753dca39775091729 | [] | no_license | apex2060/pangolin_pad | 2188b92a6fef0146f81d62a896c1c7153e396bb3 | fd4595ce53a2f6579782d8bdd5d0a683a9514507 | refs/heads/master | 2021-06-05T14:03:22.445949 | 2016-08-01T12:23:57 | 2016-08-01T12:23:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,335 | py | #!/usr/bin/python
# ----- GMAIL INTERFACE MODULE -----
# Allows the Pi to send emails via a pre-authorised gmail account
# v1.0:
# Secure log in
# Send email to any address with any subject & content
# Attach file to message
# List unread messages
# Not yet implemented:
# Read messages
import smtplib
import string
import poplib
from email import parser
import email
import mimetypes
import email.mime.application
def login(sender, password):
# Checks log-in credentials
server = smtplib.SMTP_SSL('smtp.gmail.com', 465) # NOTE: This is the GMAIL SSL port.
try:
server.login(sender, password)
server.quit()
return True
except:
return False
def message(sender, password, to, subject, message):
# Send message (text only)
note = " (via GM's rPi emailApp)"
SUBJECT = subject + note
TO = to
FROM = sender
text = message
BODY = string.join((
'From: %s' % FROM,
'To: %s' % TO,
'Subject: %s' % SUBJECT,
'',
text
), '\r\n')
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login(sender, password)
server.sendmail(FROM, [TO], BODY)
server.quit()
def attach(sender, password, to, subject, message, filename):
# Create a text/plain message
note = " (via GM's rPi emailApp)"
msg = email.mime.Multipart.MIMEMultipart()
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = to
# The main body is just another attachment
body = email.mime.Text.MIMEText(message)
msg.attach(body)
# Attachment
fp=open(filename,'rb')
att = email.mime.application.MIMEApplication(fp.read(),_subtype="dat")
fp.close()
att.add_header('Content-Disposition','attachment',filename=filename)
msg.attach(att)
# send via gmail
s = smtplib.SMTP('smtp.gmail.com')
s.starttls()
s.login(sender, password)
s.sendmail(sender, [to], msg.as_string())
s.quit()
def read(username, password):
pop_conn = poplib.POP3_SSL('pop.gmail.com')
pop_conn.user(username)
pop_conn.pass_(password)
#Get messages from server:
messages = [pop_conn.retr(i) for i in range(1, len(pop_conn.list()[1]) + 1)]
# Concat message pieces:
messages = ["\n".join(mssg[1]) for mssg in messages]
#Parse message intom an email object:
messages = [parser.Parser().parsestr(mssg) for mssg in messages]
for message in messages:
print message['subject']
print pop_conn.list()[1]
pop_conn.quit()
| [
"pangolinpaw@hotmail.co.uk"
] | pangolinpaw@hotmail.co.uk |
c659e3b677690fd4e2c63b55424526db1582cdc8 | 2bf22ce5c2e4b42d524bab7fbafb9a955e9b3976 | /Tesseract/server.py | 30760690fcc6f3e7fd5e6a5c341f07095ce1f5c9 | [] | no_license | ferjad/doc_utils | e506706e0358a7583b047dc8e9c6c2d2abb13b56 | 7c705e1c9a3a3f953d56035400df85083e44d50b | refs/heads/master | 2020-03-18T18:55:15.828955 | 2019-05-22T14:43:47 | 2019-05-22T14:43:47 | 135,122,707 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | import os
from flask import Flask, request
from werkzeug import secure_filename
import requests
import subprocess
app = Flask(__name__)
# save files to this folder
UPLOAD_FOLDER = 'uploads'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#save file on post request
@app.route('/', methods=['POST'])
def upload_file():
if request.method == 'POST':
#get the file
file = request.files['file']
#if file is present
if file:
#get filename
filename = secure_filename(file.filename)
#save file
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
#get tesseract output
result = subprocess.check_output(['tesseract', 'uploads/'+filename,'stdout'])
return result.decode('utf-8')
if __name__ == '__main__':
#run server on localhost and port 9200
app.run(host='0.0.0.0', port=9200)
| [
"14beemnaeem@seecs.edu.pk"
] | 14beemnaeem@seecs.edu.pk |
4458de82c3dd6bf952a20bbf03df9c82d222b510 | 75ff9ab8cbe39709610c900573e33a24cbdc6a23 | /app/blogengine/blog/migrations/0003_auto_20190601_1326.py | 25376afe7ecdc28e9ffc46f3c87067a8f4e1ce3c | [] | no_license | VItaliiMihda/blog_engine | 56332fe2074031f48b1b25e105d6d471d1c060cd | a762e7d916be51dee458a068babada55b36b58a5 | refs/heads/master | 2023-04-27T10:02:17.755477 | 2019-06-08T11:13:58 | 2019-06-08T11:13:58 | 186,293,033 | 0 | 0 | null | 2023-04-21T20:32:17 | 2019-05-12T19:01:20 | Python | UTF-8 | Python | false | false | 582 | py | # Generated by Django 2.2.1 on 2019-06-01 10:26
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20190601_1252'),
]
operations = [
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(default=django.contrib.auth.models.User, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"vitalik.migda@gmail.com"
] | vitalik.migda@gmail.com |
35eb793cf5c0da87028642fd214f456bcf09a337 | 722ea16bde740c3cf42166c9b0d7689ebaf5de87 | /wagtail_recycle_bin/views.py | 98ca57df4106b51e817208dce1ec29642924d06f | [
"MIT"
] | permissive | alvianorami/wagtail-trash | 0df127560088ef22f861a9eb38b8cae71a2c3cce | 992091c11fde2e50cfb212758b47b66ebc3aebc1 | refs/heads/master | 2023-01-25T04:23:05.042826 | 2020-10-27T10:03:39 | 2020-10-27T10:03:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | import json
from django.utils.http import is_safe_url
from django.utils.translation import gettext as _
from django.shortcuts import redirect, render
from wagtail.core.models import Site, Page
from wagtail.core import hooks
from wagtail.admin import messages
from wagtail.admin.views.pages import delete
from .models import RecycleBinPage, RecycleBin
from .utils import recycle_bin_for_request, generate_page_data, restore_and_move_page
from .forms import MoveForm
def get_valid_next_url_from_request(request):
next_url = request.POST.get("next") or request.GET.get("next")
if not next_url or not is_safe_url(
url=next_url, allowed_hosts={request.get_host()}
):
return ""
return next_url
def recycle_delete(request, page):
if not request.method == 'POST':
return
recycle_bin = recycle_bin_for_request(request)
parent = page.get_parent()
if parent.id == recycle_bin.id:
page.delete(user=request.user)
messages.success(
request, _("Page '{0}' deleted.").format(page.get_admin_display_title())
)
else:
RecycleBin.objects.create(
page=page, parent=parent, user=request.user, data=generate_page_data(page)
)
page.get_descendants(inclusive=True).unpublish()
page.move(recycle_bin, pos="first-child", user=request.user)
messages.success(
request,
_("Page '{0}' moved to recycle bin.").format(
page.get_admin_display_title()
),
)
next_url = get_valid_next_url_from_request(request)
if next_url:
return redirect(next_url)
return redirect("wagtailadmin_explore", parent.id)
def recycle_move(request, page_id):
if request.method == "POST":
rb = RecycleBin.objects.get(page_id=page_id)
move_to_page = Page.objects.get(pk=request.POST.get("move_page"))
restore_and_move_page(rb, move_to_page, request)
messages.success(
request,
_("Page '{0}' successfully restored.").format(
rb.page.get_admin_display_title()
),
)
return redirect("wagtailadmin_explore", rb.page_id)
return render(
request,
"wagtail_recycle_bin/move.html",
{
"form": MoveForm(),
},
)
def recycle_restore(request, page_id, move_to_id=None):
rb = RecycleBin.objects.get(page_id=page_id)
page = rb.page
if not page.permissions_for_user(request.user).can_edit():
raise PermissionDenied
restore_and_move_page(rb, rb.parent, request)
messages.success(
request,
_("Page '{0}' successfully restored.").format(page.get_admin_display_title()),
)
return redirect("wagtailadmin_explore", page_id)
| [
"hello@rinti.se"
] | hello@rinti.se |
a6159c8300fb0b87abae3a18443aa2a0f5289589 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02909/s153186210.py | 30d874e36f07b14826fef70e4615890c76d5b77a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | s=input()
if s=="Sunny":
a="Cloudy"
elif s=="Cloudy":
a="Rainy"
else:
a="Sunny"
print(a) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
53d526a07ecc2ee317fa8c79e6da01dda2cb561a | 657755b9ce45eb1e1d13368bb1441e804eff8389 | /platform-modules/batch-models/src/test/resources/python/main/vidyavaani/content/get_concepts.py | 6c1bfa4bd6074b31ba89b777c6b3d51717ccad65 | [
"MIT"
] | permissive | Kaali09/sunbird-analytics | c894fb234b2ea97e2aacff1e373ca80871c5cd22 | eebb74a9b97bf9d19b61cca7ca8befc40207c3c7 | refs/heads/master | 2020-09-29T20:08:30.942369 | 2019-11-08T10:15:16 | 2019-11-08T10:15:16 | 163,248,192 | 0 | 0 | MIT | 2018-12-27T04:46:01 | 2018-12-27T04:46:00 | null | UTF-8 | Python | false | false | 24 | py | print "getting concepts" | [
"amitb@ilimi.in"
] | amitb@ilimi.in |
f494446b3502a7598d0dc1298d55887c13b58736 | 0d20bdccbcc8512d07637820d009c47f46c86ddf | /messages/migrations/0013_auto_20180429_1640.py | fe78b8d5465ee2b7de8f375a7b603a157574b8f2 | [] | no_license | Snuggle/flossy | edc78122c906e0427575557ed71f7eafa4c40728 | b8e51360ed074e19482ac63033b589e071623821 | refs/heads/main | 2022-05-02T11:07:02.247321 | 2022-02-10T23:05:42 | 2022-02-10T23:05:42 | 131,497,789 | 0 | 0 | null | 2022-04-22T23:35:58 | 2018-04-29T13:42:59 | Python | UTF-8 | Python | false | false | 419 | py | # Generated by Django 2.0.4 on 2018-04-29 16:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('messagesapp', '0012_auto_20180429_1629'),
]
operations = [
migrations.AlterField(
model_name='message',
name='datetime',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
| [
"snuggle@sprinkly.net"
] | snuggle@sprinkly.net |
4ea6aa9ce676c7bc47eda703741b9d2b39042d9a | b640e2566c4465aee6f961c5fd1640e7115e2ca0 | /test.py | 2a94f99cf8a9fd5c4df2f4c231732c7a7bbd72ab | [] | no_license | roonyi/myapp2 | 3ff5617709eb1d2ceab8339850e4d7fc18e0a127 | c3c864b433afedc0e45fbb78ef66d0e6e59cd1d3 | refs/heads/master | 2022-05-24T00:02:54.534005 | 2020-05-03T01:27:32 | 2020-05-03T01:27:32 | 260,783,697 | 0 | 0 | null | 2020-05-03T01:27:33 | 2020-05-02T22:06:58 | HTML | UTF-8 | Python | false | false | 209 | py | print("hello world")
print('hola mundo')
print('hello world')
print('hola mundo')
print('hello world')
print('hola mundo')
print('hello world')
X=0
print('hola mundo')
print('hello world')
print('hola mundo')
| [
"Roonyi@gmail.com"
] | Roonyi@gmail.com |
03a3ca057562ab99e7a7f7fa9b400e945b6c94f8 | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib2/tables/14/4_2_0_1_table.py | f018e05360c37820a130d303e43efa4aad77138d | [
"Apache-2.0"
] | permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 11,096 | py | def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Specific humidity', 'units': 'kg/kg'},
{'abbr': 1, 'code': 1, 'title': 'Relative humidity', 'units': '%'},
{'abbr': 2, 'code': 2, 'title': 'Humidity mixing ratio', 'units': 'kg/kg'},
{'abbr': 3, 'code': 3, 'title': 'Precipitable water', 'units': 'kg m-2'},
{'abbr': 4, 'code': 4, 'title': 'Vapour pressure', 'units': 'Pa'},
{'abbr': 5, 'code': 5, 'title': 'Saturation deficit', 'units': 'Pa'},
{'abbr': 6, 'code': 6, 'title': 'Evaporation', 'units': 'kg m-2'},
{'abbr': 7, 'code': 7, 'title': 'Precipitation rate', 'units': 'kg m-2 s-1'},
{'abbr': 8, 'code': 8, 'title': 'Total precipitation', 'units': 'kg m-2'},
{'abbr': 9,
'code': 9,
'title': 'Large-scale precipitation (non-convective)',
'units': 'kg m-2'},
{'abbr': 10,
'code': 10,
'title': 'Convective precipitation',
'units': 'kg m-2'},
{'abbr': 11, 'code': 11, 'title': 'Snow depth', 'units': 'm'},
{'abbr': 12,
'code': 12,
'title': 'Snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 13,
'code': 13,
'title': 'Water equivalent of accumulated snow depth',
'units': 'kg m-2'},
{'abbr': 14, 'code': 14, 'title': 'Convective snow', 'units': 'kg m-2'},
{'abbr': 15, 'code': 15, 'title': 'Large-scale snow', 'units': 'kg m-2'},
{'abbr': 16, 'code': 16, 'title': 'Snow melt', 'units': 'kg m-2'},
{'abbr': 17, 'code': 17, 'title': 'Snow age', 'units': 'd'},
{'abbr': 18, 'code': 18, 'title': 'Absolute humidity', 'units': 'kg m-3'},
{'abbr': 19,
'code': 19,
'title': 'Precipitation type',
'units': 'Code table 4.201'},
{'abbr': 20,
'code': 20,
'title': 'Integrated liquid water',
'units': 'kg m-2'},
{'abbr': 21, 'code': 21, 'title': 'Condensate', 'units': 'kg/kg'},
{'abbr': 22, 'code': 22, 'title': 'Cloud mixing ratio', 'units': 'kg/kg'},
{'abbr': 23, 'code': 23, 'title': 'Ice water mixing ratio', 'units': 'kg/kg'},
{'abbr': 24, 'code': 24, 'title': 'Rain mixing ratio', 'units': 'kg/kg'},
{'abbr': 25, 'code': 25, 'title': 'Snow mixing ratio', 'units': 'kg/kg'},
{'abbr': 26,
'code': 26,
'title': 'Horizontal moisture convergence',
'units': 'kg kg-1 s-1'},
{'abbr': 27, 'code': 27, 'title': 'Maximum relative humidity', 'units': '%'},
{'abbr': 28,
'code': 28,
'title': 'Maximum absolute humidity',
'units': 'kg m-3'},
{'abbr': 29, 'code': 29, 'title': 'Total snowfall', 'units': 'm'},
{'abbr': 30,
'code': 30,
'title': 'Precipitable water category',
'units': 'Code table 4.202'},
{'abbr': 31, 'code': 31, 'title': 'Hail', 'units': 'm'},
{'abbr': 32, 'code': 32, 'title': 'Graupel (snow pellets)', 'units': 'kg/kg'},
{'abbr': 33,
'code': 33,
'title': 'Categorical rain',
'units': 'Code table 4.222'},
{'abbr': 34,
'code': 34,
'title': 'Categorical freezing rain',
'units': 'Code table 4.222'},
{'abbr': 35,
'code': 35,
'title': 'Categorical ice pellets',
'units': 'Code table 4.222'},
{'abbr': 36,
'code': 36,
'title': 'Categorical snow',
'units': 'Code table 4.222'},
{'abbr': 37,
'code': 37,
'title': 'Convective precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 38,
'code': 38,
'title': 'Horizontal moisture divergence',
'units': 'kg kg-1 s-1'},
{'abbr': 39,
'code': 39,
'title': 'Per cent frozen precipitation',
'units': '%'},
{'abbr': 40, 'code': 40, 'title': 'Potential evaporation', 'units': 'kg m-2'},
{'abbr': 41,
'code': 41,
'title': 'Potential evaporation rate',
'units': 'W m-2'},
{'abbr': 42, 'code': 42, 'title': 'Snow cover', 'units': '%'},
{'abbr': 43,
'code': 43,
'title': 'Rain fraction of total cloud water',
'units': 'Proportion'},
{'abbr': 44, 'code': 44, 'title': 'Rime factor', 'units': 'Numeric'},
{'abbr': 45,
'code': 45,
'title': 'Total column integrated rain',
'units': 'kg m-2'},
{'abbr': 46,
'code': 46,
'title': 'Total column integrated snow',
'units': 'kg m-2'},
{'abbr': 47,
'code': 47,
'title': 'Large scale water precipitation (non-convective)',
'units': 'kg m-2'},
{'abbr': 48,
'code': 48,
'title': 'Convective water precipitation',
'units': 'kg m-2'},
{'abbr': 49,
'code': 49,
'title': 'Total water precipitation',
'units': 'kg m-2'},
{'abbr': 50,
'code': 50,
'title': 'Total snow precipitation',
'units': 'kg m-2'},
{'abbr': 51,
'code': 51,
'title': 'Total column water (Vertically integrated total water (vapour + '
'cloud water/ice))',
'units': 'kg m-2'},
{'abbr': 52,
'code': 52,
'title': 'Total precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 53,
'code': 53,
'title': 'Total snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 54,
'code': 54,
'title': 'Large scale precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 55,
'code': 55,
'title': 'Convective snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 56,
'code': 56,
'title': 'Large scale snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 57, 'code': 57, 'title': 'Total snowfall rate', 'units': 'm/s'},
{'abbr': 58, 'code': 58, 'title': 'Convective snowfall rate', 'units': 'm/s'},
{'abbr': 59, 'code': 59, 'title': 'Large scale snowfall rate', 'units': 'm/s'},
{'abbr': 60,
'code': 60,
'title': 'Snow depth water equivalent',
'units': 'kg m-2'},
{'abbr': 61, 'code': 61, 'title': 'Snow density', 'units': 'kg m-3'},
{'abbr': 62, 'code': 62, 'title': 'Snow evaporation', 'units': 'kg m-2'},
{'abbr': 63, 'code': 63, 'title': 'Reserved'},
{'abbr': 64,
'code': 64,
'title': 'Total column integrated water vapour',
'units': 'kg m-2'},
{'abbr': 65,
'code': 65,
'title': 'Rain precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 66,
'code': 66,
'title': 'Snow precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 67,
'code': 67,
'title': 'Freezing rain precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 68,
'code': 68,
'title': 'Ice pellets precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 69,
'code': 69,
'title': 'Total column integrated cloud water',
'units': 'kg m-2'},
{'abbr': 70,
'code': 70,
'title': 'Total column integrated cloud ice',
'units': 'kg m-2'},
{'abbr': 71, 'code': 71, 'title': 'Hail mixing ratio', 'units': 'kg/kg'},
{'abbr': 72,
'code': 72,
'title': 'Total column integrated hail',
'units': 'kg m-2'},
{'abbr': 73,
'code': 73,
'title': 'Hail precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 74,
'code': 74,
'title': 'Total column integrated graupel',
'units': 'kg m-2'},
{'abbr': 75,
'code': 75,
'title': 'Graupel (snow pellets) precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 76,
'code': 76,
'title': 'Convective rain rate',
'units': 'kg m-2 s-1'},
{'abbr': 77,
'code': 77,
'title': 'Large scale rain rate',
'units': 'kg m-2 s-1'},
{'abbr': 78,
'code': 78,
'title': 'Total column integrated water (all components including '
'precipitation)',
'units': 'kg m-2'},
{'abbr': 79, 'code': 79, 'title': 'Evaporation rate', 'units': 'kg m-2 s-1'},
{'abbr': 80, 'code': 80, 'title': 'Total condensate', 'units': 'kg/kg'},
{'abbr': 81,
'code': 81,
'title': 'Total column-integrated condensate',
'units': 'kg m-2'},
{'abbr': 82, 'code': 82, 'title': 'Cloud ice mixing-ratio', 'units': 'kg/kg'},
{'abbr': 83,
'code': 83,
'title': 'Specific cloud liquid water content',
'units': 'kg/kg'},
{'abbr': 84,
'code': 84,
'title': 'Specific cloud ice water content',
'units': 'kg/kg'},
{'abbr': 85,
'code': 85,
'title': 'Specific rainwater content',
'units': 'kg/kg'},
{'abbr': 86,
'code': 86,
'title': 'Specific snow water content',
'units': 'kg/kg'},
{'abbr': 90,
'code': 90,
'title': 'Total kinematic moisture flux',
'units': 'kg kg-1 m s-1'},
{'abbr': 91,
'code': 91,
'title': 'u-component (zonal) kinematic moisture flux',
'units': 'kg kg-1 m s-1'},
{'abbr': 92,
'code': 92,
'title': 'v-component (meridional) kinematic moisture flux',
'units': 'kg kg-1 m s-1'},
{'abbr': 93,
'code': 93,
'title': 'Relative humidity with respect to water',
'units': '%'},
{'abbr': 94,
'code': 94,
'title': 'Relative humidity with respect to ice',
'units': '%'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
| [
"baudouin.raoult@ecmwf.int"
] | baudouin.raoult@ecmwf.int |
19da32c04cff4dbd7149d508b7d5296ef497a5e1 | 23998cdb444f98eccc61a8fe369f5201e3e2d9f6 | /ReverseGam.py | 29ad90ca22ea756b817f555f67a4d567df41877e | [] | no_license | anuta-tamelo/python | c948809dc393f14be5bb6e5eed8c95129f3ea284 | 385b944daba55039032f693ae6d2542ca2c15259 | refs/heads/master | 2020-04-06T16:16:18.405030 | 2018-12-16T01:57:53 | 2018-12-16T01:57:53 | 157,613,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,918 | py | # Reversegam: a clone of Othello/Reversi
import random
import sys
WIDTH = 8 # Board is 8 spaces wide
HEIGHT = 8 # Board is 8 spaces tall
def drawBoard(board):
# This function prints the board that it was passed. Returns None.
print(' 12345678')
print(' +--------+')
for y in range(HEIGHT):
print('%s|' % (y+1), end='')
for x in range(WIDTH):
print(board[x][y], end='')
print('|%s' % (y+1))
print(' +--------+')
print(' 12345678')
def getNewBoard():
# Creates a brand-new, blank board data structure.
board = [[' '] * HEIGHT for _ in range(8)]
return board
def isValidMove(board, tile, xstart, ystart):
# Returns False if the player's move on space xstart, ystart is invalid.
# If it is a valid move, returns a list of spaces that would become the player's if they made a move here.
if board[xstart][ystart] != ' ' or not isOnBoard(xstart, ystart):
return False
otherTile = 'O' if tile == 'X' else 'X'
tilesToFlip = []
for xdirection, ydirection in [[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1]]:
x, y = xstart, ystart
x += xdirection # First step in the x direction
y += ydirection # First step in the y direction
while isOnBoard(x, y) and board[x][y] == otherTile:
# Keep moving in this x & y direction.
x += xdirection
y += ydirection
if isOnBoard(x, y) and board[x][y] == tile:
# There are pieces to flip over. Go in the reverse direction until we reach the original space, noting all the tiles along the way.
while True:
x -= xdirection
y -= ydirection
if x == xstart and y == ystart:
break
tilesToFlip.append([x, y])
if len(tilesToFlip) == 0: # If no tiles were flipped, this is not a valid move.
return False
return tilesToFlip
def isOnBoard(x, y):
# Returns True if the coordinates are located on the board.
return x >= 0 and x <= WIDTH - 1 and y >= 0 and y <= HEIGHT - 1
def getBoardWithValidMoves(board, tile):
# Returns a new board with periods marking the valid moves the player can make.
boardCopy = getBoardCopy(board)
for x, y in getValidMoves(boardCopy, tile):
boardCopy[x][y] = '.'
return boardCopy
def getValidMoves(board, tile):
# Returns a list of [x,y] lists of valid moves for the given player on the given board.
validMoves = []
for x in range(WIDTH):
for y in range(HEIGHT):
if isValidMove(board, tile, x, y):
validMoves.append([x, y])
return validMoves
def getScoreOfBoard(board):
# Determine the score by counting the tiles. Returns a dictionary with keys 'X' and 'O'.
xscore = 0
oscore = 0
for x in range(WIDTH):
for y in range(HEIGHT):
if board[x][y] == 'X':
xscore += 1
elif board[x][y] == 'O':
oscore += 1
return {'X':xscore, 'O':oscore}
def enterPlayerTile():
# Lets the player type which tile they want to be.
# Returns a list with the player's tile as the first item and the computer's tile as the second.
tile = ''
while not (tile == 'X' or tile == 'O'):
print('Do you want to be X or O?')
tile = input().upper()
# The first element in the list is the player's tile, and the second is the computer's tile.
return ['X', 'O'] if tile == 'X' else ['O', 'X']
def whoGoesFirst():
# Randomly choose who goes first.
return 'computer' if random.randint(0, 1) == 0 else 'player'
def makeMove(board, tile, move):
# Place the tile on the board at xstart, ystart, and flip any of the opponent's pieces.
# Returns False if this is an invalid move; True if it is valid.
xstart, ystart = move[0], move[1]
tilesToFlip = isValidMove(board, tile, xstart, ystart)
if not tilesToFlip:
return False
board[xstart][ystart] = tile
for x, y in tilesToFlip:
board[x][y] = tile
return True
def getBoardCopy(board):
# Make a duplicate of the board list and return it.
boardCopy = getNewBoard()
for x in range(WIDTH):
for y in range(HEIGHT):
boardCopy[x][y] = board[x][y]
return boardCopy
def isOnCorner(x, y):
# Returns True if the position is in one of the four corners.
return (x == 0 or x == WIDTH - 1) and (y == 0 or y == HEIGHT - 1)
def getPlayerMove(board, playerTile):
# Let the player enter their move.
# Returns the move as [x, y] (or returns the strings 'hints' or 'quit').
DIGITS1TO8 = '12345678'
while True:
print('Enter your move, "quit" to end the game, or "hints" to toggle hints.')
move = input().lower()
if move == 'quit' or move == 'hints':
return move
if len(move) == 2 and move[0] in DIGITS1TO8 and move[1] in DIGITS1TO8:
x = int(move[0]) - 1
y = int(move[1]) - 1
if isValidMove(board, playerTile, x, y):
return [x, y]
else:
print('A move should flip at least one tile.')
else:
print('That is not a valid move. Enter the column (1-8) and then the row (1-8).')
print('For example, 81 will move on the top-right corner.')
def getComputerMove(board, computerTile):
# Given a board and the computer's tile, determine where to
# move and return that move as a [x, y] list.
possibleMoves = getValidMoves(board, computerTile)
random.shuffle(possibleMoves) # randomize the order of the moves
# Always go for a corner if available.
for x, y in possibleMoves:
if isOnCorner(x, y):
return [x, y]
# Find the highest-scoring move possible.
bestScore = -1
for x, y in possibleMoves:
boardCopy = getBoardCopy(board)
makeMove(boardCopy, computerTile, [x, y])
score = getScoreOfBoard(boardCopy)[computerTile]
if score > bestScore:
bestMove = [x, y]
bestScore = score
return bestMove
def printScore(board, playerTile, computerTile):
scores = getScoreOfBoard(board)
print('You: %s points. Computer: %s points.' % (scores[playerTile], scores[computerTile]))
def playGame(playerTile, computerTile):
showHints = False
turn = whoGoesFirst()
print('The ' + turn + ' will go first.')
# Clear the board and place starting pieces.
board = getNewBoard()
board[3][3] = 'X'
board[3][4] = 'O'
board[4][3] = 'O'
board[4][4] = 'X'
while True:
playerValidMoves = getValidMoves(board, playerTile)
computerValidMoves = getValidMoves(board, computerTile)
if playerValidMoves == [] and computerValidMoves == []:
return board # No one can move, so end the game.
elif turn == 'player': # Player's turn
if playerValidMoves != []:
if showHints:
validMovesBoard = getBoardWithValidMoves(board, playerTile)
drawBoard(validMovesBoard)
else:
drawBoard(board)
printScore(board, playerTile, computerTile)
move = getPlayerMove(board, playerTile)
if move == 'quit':
print('Thanks for playing!')
sys.exit() # Terminate the program.
elif move == 'hints':
showHints = not showHints
continue
else:
makeMove(board, playerTile, move)
turn = 'computer'
elif turn == 'computer': # Computer's turn
if computerValidMoves != []:
drawBoard(board)
printScore(board, playerTile, computerTile)
input('Press Enter to see the computer\'s move.')
move = getComputerMove(board, computerTile)
makeMove(board, computerTile, move)
turn = 'player'
print('Welcome to Reversegam!')
playerTile, computerTile = enterPlayerTile()
while True:
finalBoard = playGame(playerTile, computerTile)
# Display the final score.
drawBoard(finalBoard)
scores = getScoreOfBoard(finalBoard)
print('X scored %s points. O scored %s points.' % (scores['X'], scores['O']))
if scores[playerTile] > scores[computerTile]:
print('You beat the computer by %s points! Congratulations!' % (scores[playerTile] - scores[computerTile]))
elif scores[playerTile] < scores[computerTile]:
print('You lost. The computer beat you by %s points.' % (scores[computerTile] - scores[playerTile]))
else:
print('The game was a tie!')
print('Do you want to play again? (yes or no)')
if not input().lower().startswith('y'):
break
| [
"anna.tamelo@gmail.com"
] | anna.tamelo@gmail.com |
2ebcefcec2c64fbf9f76368c9e52f2c4f6031297 | 1a80c38ea020a8b18bb2c61b55caff8a38f553b9 | /SWEA/sol/5356.py | ad8db3bcc47a8c35ae33aab7759fffd9c7fb8cff | [] | no_license | jiwookseo/problem-solving | 775a47825dc73f8a29616ef7011e8ee7be346f80 | eefbefb21608ae0a2b3c75c010ae14995b7fc646 | refs/heads/master | 2020-04-19T03:11:02.659816 | 2019-08-14T08:59:06 | 2019-08-14T08:59:06 | 167,926,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | for tc in range(1, int(input())+1):
s = [input() for _ in range(5)]
l = [len(i) for i in s]
ml = max(l)
temp = ""
for c in range(ml):
for r in range(5):
if l[r] > c:
temp += s[r][c]
print("#{} {}".format(tc, temp))
| [
"spjo21@naver.com"
] | spjo21@naver.com |
8aed56aca5803e0616dbfa61773b52b4be2e1572 | f266284f07c07b52e185feeefd3a6b86f791550f | /3 - CreateModels.py | 30b0a23c02582374bc8395dfdd9048334a0587dd | [] | no_license | kato1903/Sklearn-Siniflandirma-Sablonu | 51e22cfd5ba030714f348df8491e3cbef98177e3 | 4c4c6bcbc1cfcf0dba7e2b6df832c814c90bc762 | refs/heads/master | 2020-08-09T03:41:16.271147 | 2019-10-09T18:03:16 | 2019-10-09T18:03:16 | 213,988,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,796 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 6 23:07:02 2019
@author: Toprak
"""
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import LinearRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
LogisticRegression = LogisticRegression(multi_class='multinomial',solver='lbfgs')
KNeighbors = KNeighborsClassifier(n_neighbors = 100)
SupportVector = svm.SVC(gamma='scale',probability=True)
NaiveBayes = GaussianNB()
DecisionTree = tree.DecisionTreeClassifier()
RandomForest = RandomForestClassifier(n_estimators=10)
ExtreemRandom = ExtraTreesClassifier(n_estimators=10)
AdaBoost = AdaBoostClassifier()
LDA = LinearDiscriminantAnalysis()
MLPClassifier = MLPClassifier(alpha=1, max_iter=1000)
QDA = QuadraticDiscriminantAnalysis()
nuSVC = NuSVC(gamma='scale',probability=True)
GradientBoostingClassifier = GradientBoostingClassifier()
LogisticRegression.fit(x,y)
KNeighbors.fit(x,y)
SupportVector.fit(x,y)
NaiveBayes.fit(x,y)
DecisionTree.fit(x,y)
RandomForest.fit(x,y)
ExtreemRandom.fit(x,y)
AdaBoost.fit(x,y)
LDA.fit(x,y)
MLPClassifier.fit(x,y)
QDA.fit(x,y)
nuSVC.fit(x,y)
GradientBoostingClassifier.fit(x,y) | [
"noreply@github.com"
] | kato1903.noreply@github.com |
7ec774038e3b6422e1e9f052dcec519132adfd87 | 3cc578c4485f48a5ae5980c3179cd1be0ee95249 | /losses/dsm.py | 1593cf7c490b03a65ab7be0ab1f996d418865f5a | [
"MIT"
] | permissive | ksw0306/AdversarialConsistentScoreMatching | 4bfa19a204bff8747363c453ea958b82de9f747a | 7a6fd84fc46180b0c7ac5680a3610c0b27982561 | refs/heads/master | 2022-12-21T13:05:02.801787 | 2020-09-14T18:09:52 | 2020-09-14T18:09:52 | 299,557,205 | 1 | 0 | MIT | 2020-09-29T08:45:00 | 2020-09-29T08:44:59 | null | UTF-8 | Python | false | false | 2,860 | py | import torch
from losses.ssim import MSSSIMLoss
def anneal_dsm_score_estimation(args, scorenet, samples, sigmas, labels=None, hook=None):
""" Computes the loss
L = 0.5 MSE[ sθ(samples + σz; σ), -z ]
= 0.5 MSE[ sθ(samples + σz; σ), (samples - samples_perturbed) /σ] """
labels_ = torch.randint(0, len(sigmas), (samples.shape[0],), device=samples.device)
used_sigmas = sigmas[labels_].view(samples.shape[0], *([1] * len(samples.shape[1:])))
z = torch.randn_like(samples)
noise = z * used_sigmas
perturbed_samples = samples + noise
scores = scorenet(perturbed_samples, labels)
# Target
if args.target == 'dae':
target = samples
elif args.target == 'gaussian': # Default
target = -z
# target = - 1 / (used_sigmas ** 2) * noise
else:
raise NotImplementedError()
loss = _compute_loss(scores, target, args)
if hook is not None:
hook.write(loss, labels_)
# Adversarial: Returns the denoised sample [This is just to prevent having to resample from a noise,
# when training the discriminator when doing GAN]
fake_denoised_samples = None
if args.adversarial:
if args.target == 'dae':
fake_denoised_samples = scores
elif args.target == 'gaussian': # Default
fake_denoised_samples = scores * used_sigmas + samples
else:
raise NotImplementedError()
return loss.mean(dim=0), fake_denoised_samples, scores
def dsm_score_evaluation(args, scorenet, samples, sigmas):
labels = torch.randint(0, len(sigmas), (samples.shape[0],), device=samples.device)
used_sigmas = sigmas[labels].view(samples.shape[0], *([1] * len(samples.shape[1:])))
z = torch.randn_like(samples)
perturbed_samples = samples + z * used_sigmas
scores = scorenet(perturbed_samples, labels)
# Target
if args.target == 'gaussian':
target = -z
else:
raise NotImplementedError()
#loss = _compute_loss(scores, target, args)
# covariance = (2 - torch.var((scores - target).flatten(1), dim=-1)) / 2
variance = scores.flatten(1).var(dim=-1)
l2 = (scores - target).flatten(1).norm(dim=-1)
l2scaled = (scores.flatten(1) / variance.unsqueeze(1) - target.flatten(1)).norm(dim=-1)
return used_sigmas.flatten(), variance, l2, l2scaled
def _compute_loss(scores, target, args):
if args.loss == "l2":
loss = 0.5 * ((scores - target) ** 2)
elif args.loss == "l1":
loss = torch.abs(scores - target)
elif args.loss == "l1_msssim": # Hybrid loss which better correlates with high quality
msssim_loss = MSSSIMLoss()
loss = .16 * torch.abs(scores - target) + .84 * msssim_loss(scores, target)
else:
raise NotImplementedError
return loss.view(scores.shape[0], -1).sum(dim=-1)
| [
"alexia.jolicoeur-martineau@mail.mcgill.ca"
] | alexia.jolicoeur-martineau@mail.mcgill.ca |
1e96f55bbf95abd6065f6e005d3b2854bedf7cde | 609e8d0c7b9aa7748fafa54cfef9138bc76552ca | /manage.py | eca254d6c09b698aaffb6339afe299cb65cc20e6 | [] | no_license | zt19994/blog | cff8b42a23a916654b263e078ab936c2ea38e531 | b03bbc6d64b22cc9780b49854054edeb5239724f | refs/heads/master | 2021-06-29T22:01:27.250286 | 2017-09-11T11:32:23 | 2017-09-11T11:32:23 | 103,111,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"zt191610942@163.com"
] | zt191610942@163.com |
169bcab941592a4bae380420dabc0e7f61ef6762 | 5784e21246afc7f2899b1ce8f6a4e8233665d750 | /my_memory_card.py | f245af328dd026c65660036054214b166e6a3e6e | [
"CC0-1.0"
] | permissive | PSGPavel/Labirnt1.0 | 7ed1181b2957cd8a32dbd9a592251a52d99d85c7 | c584bb17e4d397fd61ff60abf63a59c95aa29d6a | refs/heads/main | 2023-05-09T07:56:15.731913 | 2021-06-06T08:24:37 | 2021-06-06T08:24:37 | 374,305,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,767 | py | #создай приложение для запоминания информации
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QApplication, QWidget, QHBoxLayout, QVBoxLayout, QGroupBox, QButtonGroup, QRadioButton, QPushButton, QLabel, QMessageBox)
from random import shuffle
app = QApplication([])
btn_OK = QPushButton('Ответить')
lb_Question = QLabel('Самый сложный вопрос в мире!')
RadioGroupBox = QGroupBox("Варианты ответов")
rbtn_1 = QRadioButton('Вариант 1')
rbtn_2 = QRadioButton('Вариант 2')
rbtn_3 = QRadioButton('Вариант 3')
rbtn_4 = QRadioButton('Вариант 4')
RadioGroup = QButtonGroup()
RadioGroup.addButton(rbtn_1)
RadioGroup.addButton(rbtn_2)
RadioGroup.addButton(rbtn_3)
RadioGroup.addButton(rbtn_4)
layout_ans1 = QHBoxLayout()
layout_ans2 = QVBoxLayout()
layout_ans3 = QVBoxLayout()
layout_ans2.addWidget(rbtn_1)
layout_ans2.addWidget(rbtn_2)
layout_ans3.addWidget(rbtn_3)
layout_ans3.addWidget(rbtn_4)
layout_ans1.addLayout(layout_ans2)
layout_ans1.addLayout(layout_ans3)
RadioGroupBox.setLayout(layout_ans1)
AnsGroupBox = QGroupBox("Результат теста")
lb_Result = QLabel('прав ты или нет?')
lb_Correct = QLabel('ответ будет тут!')
layout_res = QVBoxLayout()
layout_res.addWidget(lb_Result, alignment=(Qt.AlignLeft | Qt.AlignTop))
layout_res.addWidget(lb_Correct, alignment=Qt.AlignHCenter, stretch=2)
AnsGroupBox.setLayout(layout_res)
layout_line1 = QHBoxLayout()
layout_line2 = QHBoxLayout()
layout_line3 = QHBoxLayout()
layout_line1.addWidget(lb_Question, alignment=(Qt.AlignHCenter | Qt.AlignVCenter))
layout_line2.addWidget(RadioGroupBox)
layout_line2.addWidget(AnsGroupBox)
AnsGroupBox.hide()
layout_line3.addWidget(btn_OK, stretch=2)
layout_card = QVBoxLayout()
layout_card.addLayout(layout_line1, stretch=2)
layout_card.addLayout(layout_line2, stretch=8)
layout_card.addLayout(layout_line3, stretch=1)
class Gerund():
def __init__(self, question, right_answer, wrong1, wrong2, wrong3):
self.question = question
self.right_answer = right_answer
self.wrong1 = wrong1
self.wrong2 = wrong2
self.wrong3 = wrong3
question_list = []
question_list.append(Gerund('Куда на курортных пляжах просят не заплывать отдыхающих?', 'За буйки', 'За горизонт', 'В камыши', 'За границу'))
question_list.append(Gerund("При падении чего принято загадывать желание?", "Звезды", "Температуры", "Курса рубля", "Дисциплины" ))
question_list.append(Gerund("Что показывает судья футболисту, делая предупреждение?", "Желтую карточку", "Бюллетень", "Язык", "Паспорт" ))
question_list.append(Gerund("Какой рубрики в разделе объявлений не существует?", "Обую", "Продам", "Куплю", "Сниму" ))
question_list.append(Gerund("Какой запрет реже всего нарушают российские граждане?", "Не влезай, убьет!", "Не курить!", "Соблюдайте очередь!", "Вход - по пропускам!" ))
def show_question():
''' показать панель вопросов '''
RadioGroupBox.show()
AnsGroupBox.hide()
btn_OK.setText('Ответить')
RadioGroup.setExclusive(False)
rbtn_1.setChecked(False)
rbtn_2.setChecked(False)
rbtn_3.setChecked(False)
rbtn_4.setChecked(False)
RadioGroup.setExclusive(True)
answers = [rbtn_1, rbtn_2, rbtn_3, rbtn_4]
def ask(q: Gerund):
''' функция записывает значения вопроса и ответов в соответствующие виджеты,
при этом варианты ответов распределяются случайным образом'''
shuffle(answers)
answers[0].setText(q.right_answer)
answers[1].setText(q.wrong1)
answers[2].setText(q.wrong2)
answers[3].setText(q.wrong3)
lb_Question.setText(q.question)
lb_Correct.setText(q.right_answer)
show_question()
def check_answer():
''' если выбран какой-то вариант ответа, то надо проверить и показать панель ответов'''
if answers[0].isChecked():
window.count1 = window.count1 + 1
lb_Result.setText("Правильно")
RadioGroupBox.hide()
AnsGroupBox.show()
btn_OK.setText('Следующий вопрос')
else:
if answers[1].isChecked() or answers[2].isChecked() or answers[3].isChecked():
lb_Result.setText("Неверно")
RadioGroupBox.hide()
AnsGroupBox.show()
btn_OK.setText('Следующий вопрос')
def next_question():
window.count = window.count + 1
if window.count >= len(question_list):
victory_win = QMessageBox()
victory_win.setText("Ты прошёл тест на " + str(window.count1) + " баллов")
victory_win.exec_()
q = question_list[window.count]
ask(q)
def click_Ok():
if btn_OK.text() == "Ответить":
check_answer()
else:
next_question()
window = QWidget()
window.setLayout(layout_card)
window.setWindowTitle('Memo Card')
window.count = -1
window.count1 = 0
btn_OK.clicked.connect(click_Ok)
next_question()
window.show()
app.exec()
| [
"noreply@github.com"
] | PSGPavel.noreply@github.com |
6a741aff70337c012262ea259c09137ac5469616 | 6f0f25d3b579b065bc13db13885b322be2e8b78f | /ado4.py | 822df24cc1090ef042e8e7b35301a6960f050e98 | [] | no_license | wildgrok/Python | 881cb29f2055e63f34cc2d67237f7336c12805f8 | 58c5d5328bfae5aaa15a34694a38af5af4fe1f09 | refs/heads/master | 2021-06-23T18:48:47.561291 | 2020-11-16T01:54:40 | 2020-11-16T01:54:40 | 139,497,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | import win32com.client
adoConn = win32com.client.Dispatch('ADODB.Connection')
#connect = "Provider=SQLOLEDB.1;Data Source=2ua2070dlj\sql2014;Initial Catalog=msdb;Integrated Security=SSPI;"
connect = 'Provider=SQLOLEDB.1;Data Source=ccltstecosqldb1\\tstecosql1;Initial Catalog=msdb;Integrated Security=SSPI;'
sql = '''
SELECT sysjobs.name,
hist.message + '(' + CAST(hist.run_date as varchar) + '-' + CAST(hist.run_time as varchar) + ')' as message
FROM sysjobs, sysjobhistory as hist
WHERE sysjobs.job_id = hist.job_id
AND hist.run_status = 0
AND hist.instance_id = (SELECT MAX(instance_id) FROM sysjobhistory WHERE sysjobhistory.job_id = sysjobs.job_id)
'''
adoConn.Open(connect)
alog = adoConn.Execute(sql)
while not alog[0].EOF:
task=alog[0].Fields(0).Value
entry=alog[0].Fields(1).Value
print('<TR>\n')
print('<TD VALIGN=top><FONT FACE="COURIER" SIZE=2>%s</FONT></TD>\n' % (task))
print('<TD VALIGN=top><FONT FACE="COURIER" SIZE=2>%s</FONT></TD>\n' % (entry))
print('</TR>\n')
alog[0].MoveNext()
| [
"jbesad@yahoo.com"
] | jbesad@yahoo.com |
952f0ccca47807b4540c47e2a8a72c32c763961a | a8a5772674e62beaa4f5b1f115d280103fd03749 | /persistence.py | 00a776c4f7564a2e7aae6994a1f3c1b497b94024 | [] | no_license | tahentx/pv_workbook | c6fb3309d9acde5302dd3ea06a34ad2aee0de4b7 | 08912b0ef36a5226d23fa0430216a3f277aca33b | refs/heads/master | 2022-12-12T20:39:35.688510 | 2021-03-30T03:20:54 | 2021-03-30T03:20:54 | 172,827,250 | 0 | 1 | null | 2022-12-08T16:47:39 | 2019-02-27T02:25:24 | Python | UTF-8 | Python | false | false | 188 | py | n = 38941
value = [int(x) for x in str(n)]
persist = value[0] * value[1]
next_value = [int(x) for x in str(persist)]
persist_again = next_value[0] * next_value[1]
print(str(persist_again)
| [
"hendricks.ta@gmail.com"
] | hendricks.ta@gmail.com |
370b26c3e96679ec48353c93b5ab6a07f4efd95d | a2c694dffd799b7cc6d952cf9dffd20b88d28bf8 | /days/10-12-pytest/interest_calculator/program.py | 1812e2f6c34263faac2dd217262fdc9a9a6ef364 | [
"MIT"
] | permissive | masaimahapa/100daysofcode-with-python-course | 97309bb7d4b27a68bccd20180d1a457bbc3e890a | 4e85631fcfd74d3afdfc6cb8db71ddacfefbcbc9 | refs/heads/master | 2020-06-30T13:31:28.795084 | 2019-10-25T08:40:59 | 2019-10-25T08:40:59 | 200,841,332 | 0 | 0 | null | 2019-08-06T11:52:21 | 2019-08-06T11:52:21 | null | UTF-8 | Python | false | false | 751 | py | def main():
try:
principal_amount= float(input('How much money are you investing?'))
except ValueError:
print('Must be amount in numbers')
main()
years= int(input('For how many years?'))
interest= float(input('What is the interest rate? (1% = 0.01)'))
end_amount= calculate_interest(principal_amount, years, interest)
print('At the end of your investment, you will have {} rands.'.format(end_amount))
def calculate_interest(principal_amount, years, interest):
return round(principal_amount*(1+ interest)** years, 2)
def show_header():
print('-------------------------')
print(' Interest Calculator')
print('-------------------------')
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | masaimahapa.noreply@github.com |
05f6da044977d12f49574500ccb24d84c43ab32d | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/webdriver/pylib/selenium/webdriver/common/desired_capabilities.py | 0f97e7273aeda07105d9a8c34258dad8554e9e60 | [
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 2,994 | py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Desired Capabilities implementation.
"""
class DesiredCapabilities(object):
"""
Set of default supported desired capabilities.
Use this as a starting point for creating a desired capabilities object for
requesting remote webdrivers for connecting to selenium server or selenium grid.
Usage Example::
from selenium import webdriver
selenium_grid_url = "http://198.0.0.1:4444/wd/hub"
# Create a desired capabilities object as a starting point.
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['platform'] = "WINDOWS"
capabilities['version'] = "10"
# Instantiate an instance of Remote WebDriver with the desired capabilities.
driver = webdriver.Remote(desired_capabilities=capabilities,
command_executor=selenium_grid_url)
Note: Always use '.copy()' on the DesiredCapabilities object to avoid the side
effects of altering the Global class instance.
"""
FIREFOX = {
"browserName": "firefox",
"acceptInsecureCerts": True,
"moz:debuggerAddress": True,
}
INTERNETEXPLORER = {
"browserName": "internet explorer",
"platformName": "windows",
}
EDGE = {
"browserName": "MicrosoftEdge",
}
CHROME = {
"browserName": "chrome",
}
OPERA = {
"browserName": "opera",
}
SAFARI = {
"browserName": "safari",
"platformName": "mac",
}
HTMLUNIT = {
"browserName": "htmlunit",
"version": "",
"platform": "ANY",
}
HTMLUNITWITHJS = {
"browserName": "htmlunit",
"version": "firefox",
"platform": "ANY",
"javascriptEnabled": True,
}
IPHONE = {
"browserName": "iPhone",
"version": "",
"platform": "mac",
}
IPAD = {
"browserName": "iPad",
"version": "",
"platform": "mac",
}
WEBKITGTK = {
"browserName": "MiniBrowser",
"version": "",
"platform": "ANY",
}
WPEWEBKIT = {
"browserName": "MiniBrowser",
"version": "",
"platform": "ANY",
}
| [
"jengelh@inai.de"
] | jengelh@inai.de |
4a7e30d16625191a7cfb309eb48458da9a984587 | 7be24432e7d2f5e4f015519b6e8c38e4ce518296 | /test~ | ce49e1021f3cf9f6872cbefac38393a6893cbf69 | [] | no_license | naiyucko/pythonPort | e43b41875d138d36f9ae3f5b0eb9599cd55172d9 | 9c7608988108394ee2132ca872eb5530e11ca957 | refs/heads/master | 2021-01-16T22:55:07.571885 | 2013-07-25T18:34:39 | 2013-07-25T18:34:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,830 | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
# generated by wxGlade 0.6.5 (standalone edition) on Thu Jan 31 15:48:57 2013
from wxPython.wx import *
# begin wxGlade: extracode
# end wxGlade
class MainWindow(wxFrame):
def __init__(self, *args, **kwds):
# begin wxGlade: MainWindow.__init__
kwds["style"] = wxDEFAULT_FRAME_STYLE
wxFrame.__init__(self, *args, **kwds)
self.label_1 = wxStaticText(self, -1, "AZ:")
self.text_ctrl_1 = wxTextCtrl(self, -1, "0.0\n", style=wxTE_READONLY)
self.label_2 = wxStaticText(self, -1, "EL")
self.text_ctrl_2 = wxTextCtrl(self, -1, "0.0", style=wxTE_READONLY)
self.label_2_copy = wxStaticText(self, -1, "RA\n")
self.text_ctrl_2_copy = wxTextCtrl(self, -1, "0.0", style=wxTE_READONLY)
self.label_2_copy_1 = wxStaticText(self, -1, "DEC")
self.text_ctrl_2_copy_1 = wxTextCtrl(self, -1, "0.0", style=wxTE_READONLY)
self.label_2_copy_2 = wxStaticText(self, -1, "UTC")
self.text_ctrl_2_copy_2 = wxTextCtrl(self, -1, "0.0", style=wxTE_READONLY)
self.label_2_copy_3 = wxStaticText(self, -1, "LST")
self.text_ctrl_2_copy_3 = wxTextCtrl(self, -1, "0.0", style=wxTE_READONLY)
self.label_2_copy_4 = wxStaticText(self, -1, "LOCAL")
self.text_ctrl_2_copy_4 = wxTextCtrl(self, -1, "0.0", style=wxTE_READONLY)
self.notebook_1 = wxNotebook(self, -1, style=0)
self.notebook_1_pane_1 = wxPanel(self.notebook_1, -1)
self.notebook_1_pane_2 = wxPanel(self.notebook_1, -1)
self.notebook_1_pane_3 = wxPanel(self.notebook_1, -1)
self.notebook_1_pane_4 = wxPanel(self.notebook_1, -1)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MainWindow.__set_properties
self.SetTitle("frame_2")
# end wxGlade
def __do_layout(self):
# begin wxGlade: MainWindow.__do_layout
sizer_2 = wxBoxSizer(wxVERTICAL)
sizer_3 = wxBoxSizer(wxVERTICAL)
sizer_4 = wxBoxSizer(wxHORIZONTAL)
grid_sizer_1 = wxGridSizer(7, 2, 0, 0)
grid_sizer_1.Add(self.label_1, 0, wxALL|wxEXPAND, 0)
grid_sizer_1.Add(self.text_ctrl_1, 0, wxEXPAND, 0)
grid_sizer_1.Add(self.label_2, 0, wxALL|wxEXPAND, 0)
grid_sizer_1.Add(self.text_ctrl_2, 0, wxEXPAND, 0)
grid_sizer_1.Add(self.label_2_copy, 0, wxALL|wxEXPAND, 0)
grid_sizer_1.Add(self.text_ctrl_2_copy, 0, wxEXPAND, 0)
grid_sizer_1.Add(self.label_2_copy_1, 0, wxALL|wxEXPAND, 0)
grid_sizer_1.Add(self.text_ctrl_2_copy_1, 0, wxEXPAND, 0)
grid_sizer_1.Add(self.label_2_copy_2, 0, wxALL|wxEXPAND, 0)
grid_sizer_1.Add(self.text_ctrl_2_copy_2, 0, wxEXPAND, 0)
grid_sizer_1.Add(self.label_2_copy_3, 0, wxALL|wxEXPAND, 0)
grid_sizer_1.Add(self.text_ctrl_2_copy_3, 0, wxEXPAND, 0)
grid_sizer_1.Add(self.label_2_copy_4, 0, wxALL|wxEXPAND, 0)
grid_sizer_1.Add(self.text_ctrl_2_copy_4, 0, wxEXPAND, 0)
sizer_4.Add(grid_sizer_1, 1, wxEXPAND, 0)
sizer_3.Add(sizer_4, 1, wxEXPAND, 0)
self.notebook_1.AddPage(self.notebook_1_pane_1, "Joy Stick")
self.notebook_1.AddPage(self.notebook_1_pane_2, "RA/DEC")
self.notebook_1.AddPage(self.notebook_1_pane_3, "Scanning ")
self.notebook_1.AddPage(self.notebook_1_pane_4, "Options")
sizer_3.Add(self.notebook_1, 1, wxEXPAND, 0)
sizer_2.Add(sizer_3, 1, wxEXPAND, 0)
self.SetSizer(sizer_2)
sizer_2.Fit(self)
self.Layout()
# end wxGlade
# end of class MainWindow
if __name__ == "__main__":
app = wxPySimpleApp(0)
wxInitAllImageHandlers()
frame_1 = (None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
| [
"peterm@deepspace.ucsb.edu"
] | peterm@deepspace.ucsb.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.