blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
156869ad9e5674ab69c9b1b1d37b2d2d4946460c | 7ae374f11cc2e9673fb2c39d00e942253418b41a | /connect_H351/telnet_H351.py | 199d5322ff7cbbf4f64dcf01e540e2edbee3d854 | [] | no_license | zeewii/H351 | 44c05acf0f814558f1fa8d8e2a9c942fee707985 | 80231ff0434a15835d6b484cbc498b2f963d048c | refs/heads/master | 2021-01-10T00:57:18.501664 | 2015-09-28T10:39:24 | 2015-09-28T10:39:24 | 43,277,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,360 | py | #coding=utf-8
#描述:本模块为通过使用pexpect模块登录telnet输入命令,并取出输入结果
#作者:曾祥卫
import datetime
import pexpect
#输入:user-登录名,ip-登录ip,password1-登录密码1,password2-登录密码2,command-输入命令
#输出:输入命令返回的结果
def telnet_command(user,ip,password1,password2,command):
try:
#远程主机登录后出现的字符串
finish = ":/#"
# 为telnet命令生成一个spawn类的子程序对象
child = pexpect.spawn('telnet %s'%ip)
#列出期望出现的字符串,"login","Unknown host",EOF,超时
i = child.expect(["(?i)Username", "(?i)Unknown host", pexpect.EOF, pexpect.TIMEOUT])
#匹配到了EOF或TIMEOUT,表示EOF或超时或"(?i)Unknown host",程序打印提示信息并退出
if i !=0:
print u"telnet登录失败,由于登录时超时或EOF或主机名无效"
child.close(force=True)
#如果匹配Username字符成功,输入用户名
else:
child.sendline(user)
#列出期望出现的字符串,'password',EOF,超时
i = child.expect(["(?i)Password", pexpect.EOF, pexpect.TIMEOUT])
#如果匹配EOF,超时,打印信息并退出
if i != 0:
print u"telnet登录失败,由于输入密码时超时或EOF"
#强制退出
child.close(force=True)
#匹配到了password,输入password1
child.sendline(password1)
#期望出现字符串'router>',输入'sh'
child.expect('router>')
child.sendline('sh')
#列出期望出现的字符串,'password',EOF,超时
i = child.expect(["(?i)Password", pexpect.EOF, pexpect.TIMEOUT])
#如果匹配EOF,超时,打印信息并退出
if i != 0:
print u"telnet登录失败,由于输入密码时超时或EOF"
#强制退出
child.close(force=True)
#匹配到了password,输入password1
child.sendline(password2)
#期待远程主机的命令提示符出现
child.expect(finish)
#如果匹配提示符成功,输入执行命令
child.sendline(command)
#期待远程主机的命令提示符出现
child.expect(finish)
# 将命令结果输出
result = child.before
print result
#将执行命令的时间和结果以追加的形式保存到telnet_log.txt文件中备份文件
f = open('telnet_log.txt','a')
str1 = str(datetime.datetime.now())+' '
f.writelines(str1+result)
f.close()
# 将 telnet 子程序的执行权交给用户
#child.interact()
#退出telent子程序
child.close(force=True)
#返回命令的输出结果
return result
#异常打印原因
except pexpect.ExceptionPexpect, e:
print 'telnet连接失败',str(e)
if __name__ == '__main__':
user = '100msh'
ip = '192.168.11.1'
password1 = '100msh'
password2 = '@w$r^y*i90'
command = 'ifconfig br-lan'
result = telnet_command(user,ip,password1,password2,command)
print result | [
"zeewii@sina.com"
] | zeewii@sina.com |
9902a282708246d8a194f92f9c74a17d677957e7 | 7f3faa642df02d48fd4dbf7c22223ca81c545d94 | /Payment/manage.py | 1c4554eae75d1b7c7c6e5d32aeb944d8689b1f7b | [
"Apache-2.0"
] | permissive | silop4all/payment-api | fbcc23572a216eabba998f52b45db5a664a8d172 | c331af421cd882d0b82d291251d1ce1c7f1a7223 | refs/heads/master | 2021-05-06T10:36:16.842595 | 2018-03-01T14:02:11 | 2018-03-01T14:02:11 | 114,152,868 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | #!/usr/bin/env python
"""
Command-line utility for administrative tasks.
"""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"Payment.settings"
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"pathanasoulis@ep.singularlogic.eu"
] | pathanasoulis@ep.singularlogic.eu |
5131a9c888902430b3d4a3d54233d26783ca9679 | bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6 | /AtCoder/abc/065d.py | 9fc11d2c49550f58cf2f400ec67dd7e78aefe5b5 | [] | no_license | y-oksaku/Competitive-Programming | 3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db | a3ff52f538329bed034d3008e051f30442aaadae | refs/heads/master | 2021-06-11T16:14:12.635947 | 2021-05-04T08:18:35 | 2021-05-04T08:18:35 | 188,639,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | import heapq
N = int(input())
cities = [0 for _ in range(N)]
for i in range(N):
x, y = map(int, input().split())
cities[i] = (i, x, y)
edges = [[] for _ in range(N)]
cities.sort(key=lambda A : A[1])
for i in range(N - 1):
a, xFrom, yFrom = cities[i]
b, xTo, yTo = cities[i + 1]
cost = min(abs(xFrom - xTo), abs(yFrom - yTo))
edges[a].append((cost, b))
edges[b].append((cost, a))
cities.sort(key=lambda A : A[2])
for i in range(N - 1):
a, xFrom, yFrom = cities[i]
b, xTo, yTo = cities[i + 1]
cost = min(abs(xFrom - xTo), abs(yFrom - yTo))
edges[a].append((cost, b))
edges[b].append((cost, a))
vertex = set([0])
newEdge = []
que = []
for cost, to in edges[0]:
heapq.heappush(que, (cost, to))
ans = 0
while len(vertex) < N:
cost, now = heapq.heappop(que)
if now in vertex:
continue
ans += cost
vertex.add(now)
for c, to in edges[now]:
if not to in vertex:
heapq.heappush(que, (c, to))
print(ans) | [
"y.oksaku@stu.kanazawa-u.ac.jp"
] | y.oksaku@stu.kanazawa-u.ac.jp |
9b2ff42f279cb4b23f9099a170ceb8081fe7fc7b | 6e1ec298b06055e4c3ff831ea011b0857e4a6735 | /breakout.py | a688b45d419f647b3d2508d0d883c19dd9acc658 | [] | no_license | tylorjilk/ball_game | 47ab13cdf485b5f92dbc4464a259b2c8821632ab | 8652dd1144a00e17c3fe1c0e137bbbce4a1eed00 | refs/heads/master | 2020-04-07T07:06:53.815537 | 2018-11-21T03:23:19 | 2018-11-21T03:23:19 | 158,164,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,792 | py | import pygame, sys, random
from pygame.locals import *
import breakout_constants as bc
"""
COLORS = {
0 : (255, 255, 255), # WHITE
1 : ( 0, 0,0), # BLACK
2 : ( 0, 255, 255), # AQUA
3 : ( 0, 0, 255), # BLUE
4 : (255, 0, 255), # FUCHSIA
5 : (128, 128, 128), # GRAY
6 : ( 0, 128, 0), # GREEN
7 : ( 0, 255, 0), # LIME
8 : (128, 0, 0), # MAROON
9 : ( 0, 0, 128), # NAVY_BLUE
10 : (128, 128, 0), # OLIVE
11 : (128, 0, 128), # PURPLE
12 : (255, 0, 0), # RED
13 : (192, 192, 192), # SILVER
14 : ( 0, 128, 128), # TEAL
15 : (255, 255, 0) # YELLOW
}
"""
class Ball:
def __init__(self, x, y, vx, vy, col, rad):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.color = col
self.radius = rad
class Paddle:
def __init__(self, x, y, wid, ht, col):
self.x = x
self.y = y
self.width = wid
self.height = ht
self.color = col
ball = Ball(bc.ballX, bc.ballY, bc.ballVX, bc.ballVY, bc.ballColor, bc.ballRadius)
paddle = Paddle(bc.paddleX, bc.paddleY, bc.paddleWidth, bc.paddleHeight, bc.paddleColor)
def main():
pygame.init()
fpsClock = pygame.time.Clock()
# set up the window
DISPLAYSURF = pygame.display.set_mode((bc.DISPLAY_WIDTH, bc.DISPLAY_HEIGHT), 0, 32)
pygame.display.set_caption(bc.DISPLAY_CAPTION)
initializeBallValues()
drawBall(DISPLAYSURF)
drawPaddle(DISPLAYSURF)
while True:
DISPLAYSURF.fill(bc.WHITE)
checkBallPath()
moveBall()
movePaddle()
drawBall(DISPLAYSURF)
drawPaddle(DISPLAYSURF)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
fpsClock.tick(bc.FPS)
def initializeBallValues():
ball.vx = random.uniform(bc.ballVXMin, bc.ballVXMax)
if bool(random.getrandbits(1)):
ball.vx = -ball.vx
ball.vy = random.uniform(bc.ballVYMin, bc.ballVYMax)
if bool(random.getrandbits(1)):
ball.vy = -ball.vy
def drawBall(DISPLAYSURF):
pygame.draw.circle(DISPLAYSURF, ball.color, (int(ball.x), int(ball.y)), ball.radius)
def drawPaddle(DISPLAYSURF):
pygame.draw.rect(DISPLAYSURF, paddle.color, (paddle.x, paddle.y, paddle.width, paddle.height))
def checkBallPath():
checkDisplayEdges()
checkPaddleCollision()
def moveBall():
ball.x += ball.vx
ball.y += ball.vy
def movePaddle():
mousex, mousey = pygame.mouse.get_pos()
paddle.x = mousex - paddle.width / 2
if (paddle.x < 0):
paddle.x = 0
elif (paddle.x + paddle.width > bc.DISPLAY_WIDTH):
paddle.x = bc.DISPLAY_WIDTH - paddle.width
def checkDisplayEdges():
# Check right edge
if (ball.x + ball.radius + ball.vx >= bc.DISPLAY_WIDTH):
ball.vx = -ball.vx
# Check left edge
if (ball.x - ball.radius + ball.vx <= 0):
ball.vx = -ball.vx
# Check top edge
if (ball.y - ball.radius + ball.vy <= 0):
ball.vy = -ball.vy
# Check bottom edge
if (ball.y + ball.radius + ball.vy >= bc.DISPLAY_HEIGHT):
ball.vy = -ball.vy
def checkPaddleCollision():
# Check top edge
if (ball.y + ball.radius <= paddle.y and ball.y + ball.radius + ball.vy >= paddle.y and ball.x + ball.vx <= paddle.x + paddle.width and ball.x + ball.vx >= paddle.x):
ball.vy = -ball.vy
# Check right edge
if (ball.x - ball.radius >= paddle.x + paddle.width and ball.x + ball.vx - ball.radius <= paddle.x + paddle.width and ball.y + ball.vy >= paddle.y and ball.y + ball.vy <= paddle.y + paddle.height):
ball.vx = -ball.vx
# Check left edge
if (ball.x + ball.radius <= paddle.x and ball.x + ball.vx + ball.radius >= paddle.x and ball.y + ball.vy >= paddle.y and ball.y + ball.vy <= paddle.y + paddle.height):
ball.vx = -ball.vx
# Check bottom edge
if (ball.y - ball.radius >= paddle.y + paddle.height and ball.y - ball.radius + ball.vy <= paddle.y + paddle.height and ball.x + ball.vx >= paddle.x and ball.x + ball.vx <= paddle.x + paddle.width):
ball.vy = -ball.vy
if __name__ == '__main__':
main() | [
"tjilk2663@gmail.com"
] | tjilk2663@gmail.com |
1e1e14496a4ff4181136795e1206bfc147a0a3b7 | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/leetcode/LeetcodePythonProject/leetcode_0751_0800/LeetCode759_EmployeeFreeTime.py | ff1eb9feb5d31dc20ea349c46e94b43ceaedee09 | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 1,595 | py | '''
Created on Mar 30, 2018
@author: tongq
'''
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def employeeFreeTime(self, schedule):
"""
:type schedule: List[List[Interval]]
:rtype: List[Interval]
"""
import heapq
heap = []
for arr in schedule:
for inter in arr:
heapq.heappush(heap, [inter.start, inter.end])
temp = heapq.heappop(heap)
res = []
while heap:
if temp[1] < heap[0][0]:
res.append(Interval(temp[1], heap[0][0]))
temp = heapq.heappop(heap)
else:
if temp[1] < heap[0][1]:
temp = heap[0]
heapq.heappop(heap)
return res
def test(self):
testCases = [
[
[[1,2],[5,6]],
[[1,3]],[[4,10]],
],
[
[[1,3],[6,7]],[[2,4]],
[[2,5],[9,12]],
],
]
for schedule in testCases:
print('schedule: %s' % schedule)
arr = []
for arr0 in schedule:
arr.append([Interval(inter[0], inter[1]) for inter in arr0])
schedule = arr
result = self.employeeFreeTime(schedule)
res = [[inter.start, inter.end] for inter in result]
print('result: %s' % res)
print('-='*30+'-')
if __name__ == '__main__':
Solution().test()
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
4e96486fda291297b6b7b7b5830180b891f7de07 | 54df8336b50e8f2d7dbe353f0bc51a2b3489095f | /Django/Django_Old/disa-py/member/admin.py | f571a992fa1e04afacdcd4fa42521d6044b42e0d | [] | no_license | SurendraKumarAratikatla/MyLenovolapCodes1 | 42d5bb7a14bfdf8d773ee60719380ee28ff4947a | 12c56200fcfd3e5229bfeec209fd03b5fc35b823 | refs/heads/master | 2023-06-17T15:44:18.312398 | 2021-07-19T10:28:11 | 2021-07-19T10:28:11 | 387,358,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | from django.contrib import admin
# Register your models here.
from models import *
from django.contrib.admin import ModelAdmin
from django.db.models.fields import Field
from django.contrib.admin import SimpleListFilter
'''#from assign.disa-py.disa.admin_site import custom_admin_site
class country(SimpleListFilter):
title = 'name' # or use _('country') for translated title
parameter_name = 'name'
def lookups(self, request, model_admin):
list_of_countries = []
queryset = Organisation.objects.all()
for countries in queryset:
list_of_countries.append(self.id)
return sorted(list_of_countries, key=lambda tp: tp[1])
def queryset(self, request, queryset):
if self.value():
return queryset.filter(organisations_id=self.value())
return str(queryset)
class CityAdmin(ModelAdmin):
list_filter = (country, )
@admin.register(Author, Reader, Editor, site=custom_admin_site)
class PersonAdmin(admin.ModelAdmin):
pass
'''
'''class AddressAdmin(admin.ModelAdmin):
list_display = ('mid','address','city','district','state','country','pin','phone')
#list_display = ('full_address', 'pin')
ordering = ['country']
actions = [ 'mark_seen']
def mark_seen(self, request, queryset):
queryset.update(status='p')
mark_seen.short_description = "Mark seen"
def my_view(request, *args, **kwargs):
user1 = Seva.objects.values_list('sevaday', flat=True)[0];
return u'%s' % (user1)
admin.site.register_view('somepath', view=my_view)'''
admin.site.register(Address, AddressAdmin)
admin.site.register(Awardee, AwardeeAdmin)
admin.site.register(LunarDate, LunarAdmin)
admin.site.register(Member, MembersAdmin)
admin.site.register(NakshatramRasiPadamData, NakshatramRasiPadamDataAdmin)
admin.site.register(Seva, SevasAdmin)
admin.site.register(DonationKind, DonationKindAdmin)
admin.site.register(DonationCash, DonationCashAdmin)
admin.site.register(DonationAsset, DonationAssetAdmin)
admin.site.register(DonationService, DonationServiceAdmin)
admin.site.register(MaasamType, commonAdmin)
admin.site.register(NakshatramType, commonAdmin)
# admin.site.register(OauthAccesstoken, commonAdmin)
# admin.site.register(OauthAuthCode, commonAdmin)
# admin.site.register(OauthRefreshToken, commonAdmin)
admin.site.register(Organisation, commonAdmin)
admin.site.register(Profile, commonAdmin)
admin.site.register(SVExtra, commonAdmin)
admin.site.register(PadamType, commonAdmin)
admin.site.register(PakshamType, commonAdmin)
admin.site.register(RasiType, commonAdmin)
admin.site.register(SequenceNumber, commonAdmin)
admin.site.register(SevaAddress, commonAdmin)
admin.site.register(SevaCategory, commonAdmin)
admin.site.register(Tag, commonAdmin)
admin.site.register(TithiType, commonAdmin)
admin.site.register(MedicalProfile, MedicalProfileAdmin)
admin.site.register(StaffProfile, StaffProfileAdmin)
admin.site.register(User, commonAdmin)
admin.site.register(Transaction)
admin.site.register(SevasAddress, SevasAddressAdmin)
admin.site.register(AssetLand, AssetLandAdmin)
admin.site.register(AssetBuilding, AssetBuildingAdmin)
admin.site.register(AssetEquipment, AssetEquipmentAdmin)
admin.site.register(Trustee, TrusteeAdmin)
admin.site.register(Honorary, commonAdmin)
admin.site.register(Complimentary, commonAdmin)
admin.site.register(Relatives, RelativesAdmin)
admin.site.register(Duration)
| [
"suendra.aratikatla1608@gmail.com"
] | suendra.aratikatla1608@gmail.com |
39d359c0ace912c7ed11aeb496577e257dfdcd23 | d52413173437ba73ecdf822ca895e659f00a8ce7 | /kiwibackend/application/module/skill/admin.py | 5dd10402159c123e12d215cf4d186a1bd65287e3 | [] | no_license | whiteprism/mywork | 2329b3459c967c079d6185c5acabd6df80cab8ea | a8e568e89744ca7acbc59e4744aff2a0756d7252 | refs/heads/master | 2021-01-21T11:15:49.090408 | 2017-03-31T03:28:13 | 2017-03-31T03:28:13 | 83,540,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | from django.contrib import admin
from skill.models import Skill
admin.site.register(Skill)
| [
"snoster@163.com"
] | snoster@163.com |
1d2db470220c93818fef669f95833c53bfc67818 | 0dc9968c34f74f6ff15435104806956169d6c82a | /algorithm/compare_tripet.py | 519515ce23471f85179b0b6a1b0551eacbbf7458 | [] | no_license | tasnuvaleeya/hackerRank | 5ac0e5b089e8da83980b37b1dea45def20fe02e0 | 7259324ea0692ce36c494d9b8913eef8e2211aa9 | refs/heads/master | 2021-04-09T17:37:41.769210 | 2018-04-12T13:55:56 | 2018-04-12T13:55:56 | 125,638,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | import sys
def solve(a0, a1, a2, b0, b1, b2):
# Complete this function
a = (1 if a0 > b0 else 0) + (1 if a1 > b1 else 0) + (1 if a2 > b2 else 0)
b = (1 if b0 > a0 else 0) + (1 if b1 > a1 else 0) + (1 if b2 > a2 else 0)
return (a, b)
a0, a1, a2 = input().strip().split(' ')
a0, a1, a2 = [int(a0), int(a1), int(a2)]
b0, b1, b2 = input().strip().split(' ')
b0, b1, b2 = [int(b0), int(b1), int(b2)]
result = solve(a0, a1, a2, b0, b1, b2)
print(" ".join(map(str, result)))
| [
"tasnuva2606@gmail.com"
] | tasnuva2606@gmail.com |
4392fca4c3c0225f07480c0bda0f2da978c94667 | c6f192df8c4fe1b86eb15935d0c08e7fbca1e59c | /homeassistant/components/philips_js/__init__.py | 154df3ed21417e1d327e44ba284926c88ef3a873 | [
"Apache-2.0"
] | permissive | roblandry/home-assistant | aefc3a5f7806a39e5a1cf3a0b390c6364d2b8f5e | bac44cf473f84c1923a1181e4c5851257e873b83 | refs/heads/dev | 2023-03-11T07:25:47.611159 | 2022-08-13T17:33:57 | 2022-08-13T17:33:57 | 173,534,099 | 0 | 0 | Apache-2.0 | 2023-03-10T06:56:55 | 2019-03-03T05:18:54 | Python | UTF-8 | Python | false | false | 6,205 | py | """The Philips TV integration."""
from __future__ import annotations
import asyncio
from collections.abc import Callable, Coroutine, Mapping
from datetime import timedelta
import logging
from typing import Any
from haphilipsjs import ConnectionFailure, PhilipsTV
from haphilipsjs.typing import SystemType
from homeassistant.components.automation import AutomationActionType
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_API_VERSION,
CONF_HOST,
CONF_PASSWORD,
CONF_USERNAME,
Platform,
)
from homeassistant.core import Context, HassJob, HomeAssistant, callback
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CONF_ALLOW_NOTIFY, CONF_SYSTEM, DOMAIN
PLATFORMS = [
Platform.MEDIA_PLAYER,
Platform.LIGHT,
Platform.REMOTE,
Platform.SWITCH,
]
LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Philips TV from a config entry."""
system: SystemType | None = entry.data.get(CONF_SYSTEM)
tvapi = PhilipsTV(
entry.data[CONF_HOST],
entry.data[CONF_API_VERSION],
username=entry.data.get(CONF_USERNAME),
password=entry.data.get(CONF_PASSWORD),
system=system,
)
coordinator = PhilipsTVDataUpdateCoordinator(hass, tvapi, entry.options)
await coordinator.async_refresh()
if (actual_system := tvapi.system) and actual_system != system:
data = {**entry.data, CONF_SYSTEM: actual_system}
hass.config_entries.async_update_entry(entry, data=data)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
entry.async_on_unload(entry.add_update_listener(async_update_entry))
return True
async def async_update_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Update options."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class PluggableAction:
"""A pluggable action handler."""
def __init__(self, update: Callable[[], None]) -> None:
"""Initialize."""
self._update = update
self._actions: dict[
Any, tuple[HassJob[..., Coroutine[Any, Any, None]], dict[str, Any]]
] = {}
def __bool__(self):
"""Return if we have something attached."""
return bool(self._actions)
@callback
def async_attach(self, action: AutomationActionType, variables: dict[str, Any]):
"""Attach a device trigger for turn on."""
@callback
def _remove():
del self._actions[_remove]
self._update()
job = HassJob(action)
self._actions[_remove] = (job, variables)
self._update()
return _remove
async def async_run(self, hass: HomeAssistant, context: Context | None = None):
"""Run all turn on triggers."""
for job, variables in self._actions.values():
hass.async_run_hass_job(job, variables, context)
class PhilipsTVDataUpdateCoordinator(DataUpdateCoordinator[None]):
"""Coordinator to update data."""
config_entry: ConfigEntry
def __init__(self, hass, api: PhilipsTV, options: Mapping) -> None:
"""Set up the coordinator."""
self.api = api
self.options = options
self._notify_future: asyncio.Task | None = None
self.turn_on = PluggableAction(self.async_update_listeners)
super().__init__(
hass,
LOGGER,
name=DOMAIN,
update_interval=timedelta(seconds=30),
request_refresh_debouncer=Debouncer(
hass, LOGGER, cooldown=2.0, immediate=False
),
)
@property
def system(self) -> SystemType:
"""Return the system descriptor."""
if self.api.system:
return self.api.system
return self.config_entry.data[CONF_SYSTEM]
@property
def unique_id(self) -> str:
"""Return the system descriptor."""
entry = self.config_entry
if entry.unique_id:
return entry.unique_id
assert entry.entry_id
return entry.entry_id
@property
def _notify_wanted(self):
"""Return if the notify feature should be active.
We only run it when TV is considered fully on. When powerstate is in standby, the TV
will go in low power states and seemingly break the http server in odd ways.
"""
return (
self.api.on
and self.api.powerstate == "On"
and self.api.notify_change_supported
and self.options.get(CONF_ALLOW_NOTIFY, False)
)
async def _notify_task(self):
while self._notify_wanted:
res = await self.api.notifyChange(130)
if res:
self.async_set_updated_data(None)
elif res is None:
LOGGER.debug("Aborting notify due to unexpected return")
break
@callback
def _async_notify_stop(self):
if self._notify_future:
self._notify_future.cancel()
self._notify_future = None
@callback
def _async_notify_schedule(self):
if self._notify_future and not self._notify_future.done():
return
if self._notify_wanted:
self._notify_future = asyncio.create_task(self._notify_task())
@callback
def _unschedule_refresh(self) -> None:
"""Remove data update."""
super()._unschedule_refresh()
self._async_notify_stop()
async def _async_update_data(self):
"""Fetch the latest data from the source."""
try:
await self.api.update()
self._async_notify_schedule()
except ConnectionFailure:
pass
| [
"noreply@github.com"
] | roblandry.noreply@github.com |
e550fac15b167c3188308efb9f1c170f2ad93141 | e474ee1a64b5c14b00fd701420847096a209105c | /min.py | 262f292f9148ffe439c561472f8d3e9f8cf7ccce | [] | no_license | cosarara/CoffeeMaker | 5bde0a2cc1a6561794a680d03b8e531524636f1c | 73ad455b731bce51ebce21c43cf571b8fadb9634 | refs/heads/master | 2021-06-04T13:37:47.609192 | 2016-08-31T13:28:35 | 2016-08-31T13:29:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | #!/usr/bin/env python3
import pigpio
import time
import sys
SERVO = 4
if len(sys.argv) > 1:
SERVO = int(sys.argv[1])
MIN_PW = 1100
MID_PW = 1450
MAX_PW = 2000
pi = pigpio.pi()
print("min", SERVO)
pi.set_servo_pulsewidth(SERVO, MIN_PW)
| [
"cosarara97@gmail.com"
] | cosarara97@gmail.com |
b2b27bb05f6ee9fa684ab184aab98b2328e8fb80 | 16dcbf88ae9514109151fe5ff447b2b653ddf48b | /2016/012-polynom/polynom 2.py | 2847370b97cc567bdecda9cfbb8aa6c5054e1f08 | [] | no_license | ChristerNilsson/Lab | efa55ef5e79dff84b232dfcf94473eacdb263175 | b1f730f45ec6e901bd14c1e4196aa5e0f591ecd2 | refs/heads/master | 2023-07-06T04:35:09.458936 | 2023-06-24T21:40:54 | 2023-06-24T21:40:54 | 48,474,249 | 8 | 8 | null | 2022-12-10T07:03:31 | 2015-12-23T06:51:11 | JavaScript | UTF-8 | Python | false | false | 4,381 | py | # -*- coding: utf-8 -*-
from sympy import S
# Polynom 2: Lista 0,1,2,3,... Value, Add, Mul, Diff, Integrate, Prettyprint
# Objektorienterat
class Polynom(object):
def __init__(self, polynom):
self.polynom = polynom
def __call__(self, x):
return sum([factor * x ** exponent for exponent,factor in enumerate(self.polynom)])
def __eq__(self,other):
return self.polynom == other.polynom
def __str__(self):
res = []
for degree,factor in enumerate(self.polynom):
a,b,c,d,e = '','','','',''
if factor == 0:
continue
if factor > 0:
a = '+'
if factor == 1:
if degree == 0:
b = str(factor)
elif factor == -1:
b = '-'
else:
b = str(factor)
if degree != 0:
c = '*'
if degree == 0:
pass
elif degree == 1:
d = 'x'
else:
d = 'x**'
if '/' in str(degree):
e = '(' + str(degree) + ')'
else:
e = str(degree)
res.append(a+b+c+d+e)
if not res:
res.append('0')
res = ''.join(res)
if res[0] == '+':
res = res[1:]
return res
def __add__(self, other):
return Polynom([(0 if f1 is None else f1) + (0 if f2 is None else f2) for f1,f2 in map(None, self.polynom, other.polynom)])
def __sub__(self, other):
return self + Polynom([-factor for factor in other.polynom])
def __mul__(self,other):
p1 = self.polynom
p2 = other.polynom
res = [0] * (len(p1) + len(p2))
for exp1,f1 in enumerate(p1):
for exp2,f2 in enumerate(p2):
res[exp1 + exp2] += f1 * f2
if not res:
return Polynom(res)
while res[-1] == 0:
res.pop()
if not res:
break
return Polynom(res)
def diff(self):
res = []
for degree,factor in enumerate(self.polynom):
if degree != 0:
res.append(factor * degree)
return Polynom(res)
def integrate(self):
res = [0]
for degree,factor in enumerate(self.polynom):
res.append(1.0 * factor / (degree + 1))
return Polynom(res)
a = Polynom([5,-7,3]) # f(x) = 5 -7*x + 3*x**2
assert a(0) == 5
assert a(1) == 1
assert a(2) == 3
assert Polynom([]) + Polynom([]) == Polynom([])
assert Polynom([1]) + Polynom([]) == Polynom([1])
assert Polynom([]) + Polynom([1]) == Polynom([1])
assert Polynom([1]) + Polynom([1]) == Polynom([2])
assert Polynom([1]) + Polynom([2]) == Polynom([3])
assert Polynom([1,0,1]) + Polynom([2,3]) == Polynom([3,3,1])
assert Polynom([]) * Polynom([]) == Polynom([])
assert Polynom([1]) * Polynom([]) == Polynom([])
assert Polynom([]) * Polynom([1]) == Polynom([])
assert Polynom([1]) * Polynom([1]) == Polynom([1])
assert Polynom([1]) * Polynom([2]) == Polynom([2])
assert Polynom([1,0,1]) * Polynom([2,3]) == Polynom([2,3,2,3])
assert Polynom([]).diff() == Polynom([])
assert Polynom([1]).diff() == Polynom([])
assert Polynom([1,2]).diff() == Polynom([2])
assert Polynom([1,2,3]).diff() == Polynom([2,6])
assert Polynom([5,-7,3]).diff() == Polynom([-7,6])
assert Polynom([]).integrate() == Polynom([0])
assert Polynom([1]).integrate() == Polynom([0,1])
assert Polynom([1,2]).integrate() == Polynom([0,1,1])
assert Polynom([1,2,3]).integrate() == Polynom([0,1,1,1])
assert Polynom([5,-7,3]).integrate() == Polynom([0,5,-3.5,1])
# Beräkna ytan mellan polynomen y=x och y=x*x, för x mellan 0 och 1
a = Polynom([0,1])
b = Polynom([0,0,1])
c = a - b
f = c.integrate()
assert str(f(1) - f(0)) == '0.166666666667'
assert str(Polynom([])) == '0'
assert str(Polynom([0])) == '0'
assert str(Polynom([1])) == '1'
assert str(Polynom([0,0])) == '0'
assert str(Polynom([0,1])) == 'x'
assert str(Polynom([0,-1])) == '-x'
assert str(Polynom([0,2])) == '2*x'
assert str(Polynom([0,-2])) == '-2*x'
a = [5, -7, 3]
assert str(Polynom(a)) == '5-7*x+3*x**2'
assert str(Polynom(a).diff()) == '-7+6*x'
assert str(Polynom(a).diff().diff()) == '6'
assert str(Polynom(a).diff().diff().diff()) == '0'
assert str(Polynom([0,-7,-3])) == '-7*x-3*x**2'
| [
"janchrister.nilsson@gmail.com"
] | janchrister.nilsson@gmail.com |
615c765924cdcc3f3b06987b647bfc202354395b | 3018a139a3403ae1cc6319d50635db66155f00a8 | /experiments/experiments/migrations/0004_corpus_pkl.py | c788fc9a1669dc5447bbc73536b1fed53c37dbee | [] | no_license | umd-huang-lab/private-topic-model-tensor-methods | ad9222b77cffc68f55d7c53bd25627d85b699713 | 3397cd1c44a62e1da41ffb3516bab5089d04ff55 | refs/heads/master | 2022-12-20T12:17:35.285464 | 2020-07-01T18:34:25 | 2020-07-01T18:34:25 | 228,098,370 | 3 | 0 | null | 2022-12-08T10:14:20 | 2019-12-14T22:35:46 | Python | UTF-8 | Python | false | false | 416 | py | # Generated by Django 3.0.7 on 2020-06-26 01:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('experiments', '0003_parentexperiment_n_topics'),
]
operations = [
migrations.AddField(
model_name='corpus',
name='pkl',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| [
"furongh@cs.umd.edu"
] | furongh@cs.umd.edu |
456d1b7dcc9770fbbd73c74764f549079b035733 | 4fd56b22ba00072817904c45f6b18844034f58f0 | /projectapi/urls.py | 4bc4445e2366ca58c269085b94fa45c39e920dd6 | [
"MIT"
] | permissive | kelvinrono/projectApi | 0bf7a2766a5279ca4b27e8b3d55e352f7661f083 | 873ea90bff9ec1004d1f936d4fdcec47f95759c3 | refs/heads/master | 2023-06-19T16:04:26.886938 | 2021-07-20T20:47:40 | 2021-07-20T20:47:40 | 386,591,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views
from django.conf import settings
from django.conf.urls.static import static
from django_registration.backends.one_step.views import RegistrationView
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('api.urls')),
path('accounts/register/', RegistrationView.as_view(success_url='/'),name='django_registration_register'),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('django_registration.backends.one_step.urls')),
]
| [
"ronohkelvin99@gmail.com"
] | ronohkelvin99@gmail.com |
5bdcab451f18491b6f98b4efe73e1d9d6a108e54 | 6aad57d3e189aded2552fdf0d11bae71dd226872 | /distance_field/build/devel/_setup_util.py | 0886c3b20c0a9a4d65aa47f2f26831a7b8a4cfdf | [] | no_license | sid24ss/mha-stuff | bedecfdd75ed9562040422ceade0138b29c3ca10 | 28d10ca50ed9a71e661106a44a1c083f6cfeac6c | refs/heads/master | 2021-01-19T14:59:47.938089 | 2013-11-04T23:34:38 | 2013-11-04T23:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,370 | py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import os
import platform
import sys
# environment at generation time
CMAKE_PREFIX_PATH = '/opt/ros/groovy'.split(';')
setup_dir = '/usr0/home/venkatrn/groovy_workspace/sandbox/distance_field/build/devel'
if setup_dir and setup_dir not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, setup_dir)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': 'lib',
'PATH': 'bin',
'PKG_CONFIG_PATH': 'lib/pkgconfig',
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolder = env_var_subfolders[key]
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte'))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolder):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if subfolder:
path = os.path.join(path, subfolder)
# exclude any path already in env and any path we already added
if path not in environ_paths and path not in checked_paths:
checked_paths.append(path)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
specific_env_hooks = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
generic_env_hooks.remove(generic_env_hooks_by_filename[filename])
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
specific_env_hooks.remove(specific_env_hooks_by_filename[filename])
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS', os.pathsep.join(generic_env_hooks + specific_env_hooks)))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
exit(1)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
sys.exit(0)
| [
"venkatrn@andrew.cmu.edu"
] | venkatrn@andrew.cmu.edu |
bfbefb717a8b22bbac49cdef813b4deb72871a51 | 173f10f1791afcf982097d5bb7fd0cd78bdbecfc | /idlesporklib/ScrolledList.py | bc7f1194a7268473cae3a8f94f4719b090d16c68 | [] | no_license | vexlerneil/idlespork | b0c0f17d66c3573e110db131655eddef22906798 | 2ccb427937183edc08d805f9eb68ef3a9ac0df3b | refs/heads/master | 2020-03-21T12:56:07.319469 | 2018-04-25T20:18:27 | 2018-04-25T20:18:27 | 138,578,929 | 0 | 0 | null | 2018-06-25T10:20:51 | 2018-06-25T10:20:50 | null | UTF-8 | Python | false | false | 4,428 | py | from Tkinter import Frame, Scrollbar, Listbox, Menu, Tk
import re
from idlesporklib import macosxSupport
class ScrolledList:
default = "(None)"
def __init__(self, master, **options):
# Create top frame, with scrollbar and listbox
self.master = master
self.frame = frame = Frame(master)
self.frame.pack(fill="both", expand=1)
self.vbar = vbar = Scrollbar(frame, name="vbar")
self.vbar.pack(side="right", fill="y")
self.listbox = listbox = Listbox(frame, exportselection=0,
background="white")
if options:
listbox.configure(options)
listbox.pack(expand=1, fill="both")
# Tie listbox and scrollbar together
vbar["command"] = listbox.yview
listbox["yscrollcommand"] = vbar.set
# Bind events to the list box
listbox.bind("<ButtonRelease-1>", self.click_event)
listbox.bind("<Double-ButtonRelease-1>", self.double_click_event)
if macosxSupport.isAquaTk():
listbox.bind("<ButtonPress-2>", self.popup_event)
listbox.bind("<Control-Button-1>", self.popup_event)
else:
listbox.bind("<ButtonPress-3>", self.popup_event)
listbox.bind("<Key-Up>", self.up_event)
listbox.bind("<Key-Down>", self.down_event)
# Mark as empty
self.clear()
def close(self):
self.frame.destroy()
def clear(self):
self.listbox.delete(0, "end")
self.empty = 1
self.listbox.insert("end", self.default)
def append(self, item):
if self.empty:
self.listbox.delete(0, "end")
self.empty = 0
self.listbox.insert("end", str(item))
def get(self, index):
return self.listbox.get(index)
def click_event(self, event):
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
self.on_select(index)
return "break"
def double_click_event(self, event):
index = self.listbox.index("active")
self.select(index)
self.on_double(index)
return "break"
menu = None
def popup_event(self, event):
if not self.menu:
self.make_menu()
menu = self.menu
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
menu.tk_popup(event.x_root, event.y_root)
def make_menu(self):
menu = Menu(self.listbox, tearoff=0)
self.menu = menu
self.fill_menu()
def up_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index - 1
else:
index = self.listbox.size() - 1
if index < 0:
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def down_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index + 1
else:
index = 0
if index >= self.listbox.size():
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def select(self, index):
self.listbox.focus_set()
self.listbox.activate(index)
self.listbox.selection_clear(0, "end")
self.listbox.selection_set(index)
self.listbox.see(index)
# Methods to override for specific actions
def fill_menu(self):
pass
def on_select(self, index):
pass
def on_double(self, index):
pass
def _scrolled_list(parent):
root = Tk()
root.title("Test ScrolledList")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
class MyScrolledList(ScrolledList):
def fill_menu(self): self.menu.add_command(label="right click")
def on_select(self, index): print "select", self.get(index)
def on_double(self, index): print "double", self.get(index)
scrolled_list = MyScrolledList(root)
for i in range(30):
scrolled_list.append("Item %02d" % i)
root.mainloop()
if __name__ == '__main__':
from idlesporklib.idle_test.htest import run
run(_scrolled_list)
| [
"goldberg.lior@gmail.com"
] | goldberg.lior@gmail.com |
671694a895191b6a38dd5e8c5edb352183f00584 | ed424d11d3403f5f9c6e01a49bdf069b34301baf | /products/migrations/0007_auto_20180509_2244.py | 835ebd0a13f0b605ed04fa7a1e1bf1eb71a03f8e | [] | no_license | Madanshrestha/ecommerce | ed41b392939bca22d22e577eb967dc9f58faff39 | 017d499cde94c29a53d8e154f8d16109270377bf | refs/heads/master | 2020-03-18T16:10:59.859738 | 2018-06-30T12:25:54 | 2018-06-30T12:25:54 | 134,950,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-09 16:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0006_auto_20180509_2243'),
]
operations = [
migrations.AlterField(
model_name='product',
name='slug',
field=models.SlugField(blank=True),
),
]
| [
"madan.stha3@gmail.com"
] | madan.stha3@gmail.com |
ebfcbce8b481ec4712de8f2e087f414c1f682900 | 4978566caeb9a47474eb25bfa5546fa2140bd9d7 | /main.py | 309d89cfac49a31091a4192e2f8eb08d720892d1 | [] | no_license | OlehMaistrenko/Lab1_SRP | 983ffefef6508d610b88833b06afc1026d6d487c | f87d36776dec2f600346c35bdfe6f03d1e7c0fa6 | refs/heads/master | 2021-01-01T05:03:06.092647 | 2016-05-11T20:14:01 | 2016-05-11T20:14:01 | 58,576,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,517 | py | # -*- coding: utf-8 -*-
import urllib2
import datetime
import pandas as pd
import os
import matplotlib.pyplot as plt
def download(index):
if index < 10:
index = "0"+str(index)
else:
index = str(index)
regions = {1: "Vinnytsya", 2: "Volyn", 3:"Dnipropetrovs'k", 4:"Donets'k", 5:"Zhytomyr", 6:"Zacarpathia", 7:"Zaporizhzhya",
8:"Ivano-Frankivs'k", 9:"Kiev", 10:"Kirovohrad", 11:"Luhans'k", 12:"L'viv", 13:"Mykolayiv", 14:"Odessa", 15:"Poltava",
16:"Rivne", 17:"Sumy", 18:"Ternopil'", 19:"Kharkiv", 20:"Kherson", 21:"Khmel'nits'kyy", 22:"Cherkasy", 23:"Chernivtsi",
24:"Chernihiv", 25:"Crimea"}
index1 = Reindex(int(index))
url = "http://www.star.nesdis.noaa.gov/smcd/emb/vci/gvix/G04/ts_L1/ByProvince/Mean/L1_Mean_UKR.R"+index1+".txt"
vhi_url = urllib2.urlopen(url)
name = "vhi_id_%s %s %s.csv" % (index1, regions[int(index)], datetime.datetime.now().strftime('%d %b %Y %H-%M-%S'))
out = open(name,'wb')
out.write(vhi_url.read())
out.close()
print ("VHI is successfully downloaded...")
def RegionSelect():
print("You can download data for some region:")
regions = {1: "Vinnytsya", 2: "Volyn", 3:"Dnipropetrovs'k", 4:"Donets'k", 5:"Zhytomyr", 6:"Zacarpathia", 7:"Zaporizhzhya",
8:"Ivano-Frankivs'k", 9:"Kiev", 10:"Kirovohrad", 11:"Luhans'k", 12:"L'viv", 13:"Mykolayiv", 14:"Odessa", 15:"Poltava",
16:"Rivne", 17:"Sumy", 18:"Ternopil'", 19:"Kharkiv", 20:"Kherson", 21:"Khmel'nits'kyy", 22:"Cherkasy", 23:"Chernivtsi",
24:"Chernihiv", 25:"Crimea"}
i = 1
while i < 25:
print(i, regions[i])
i+=1
print("\nPlease enter the index of the region.")
index = 0
flag = True
while flag:
try:
index = int(input())
except ValueError:
print("Please enter the number in range from 1 to 25.")
else:
if index < 1 or index > 27:
print("Please enter the number in range from 1 to 25.")
else:
flag = False
return index
def FileSelect():
files = []
i = 0
for file in os.listdir(os.getcwd()):
if file.endswith(".csv"):
i += 1
files.append(file)
print i,")",file
choise = int(input("Select the file from the list above: "))
df = pd.read_csv(files[choise-1],index_col=False, header=1)
return df
def Reindex(i):
arr = {1:"24", 2: "25", 3: "05",4: "06", 5: "27", 6:"23", 7:"26", 8:"07", 9:"11", 10:"13", 11:"14", 12:"15", 13:"16", 14:"17", 15:"18",
16:"19", 17:"21", 18:"22", 19:"08", 20:"09", 21:"10", 22:"01", 23:"03", 24:"02", 25:"04"}
return arr[i]
def read():
print FileSelect().to_string(index=False)
def minMax():
df = FileSelect()
year =input("Enter the year to determine Max и Min values VHI: ")
print "Max: ", df[df["year"] == year]["VHI"].max()
print "Min: ", df[df["year"] == year]["VHI"].min()
def extreme():
df = FileSelect()
print "All extreme drought by year:"
print df[df["VHI"] < 15][df["VHI"] != -1][["year","VHI"]]
proc = input("Enter the percentage of the territory: ")
print df[df["VHI"] < 15][df["%Area_VHI_LESS_15"] > proc][["year","week","VHI","%Area_VHI_LESS_15"]].to_string(index=False)
def moderate():
df = FileSelect()
print "Moderate drought for years:"
print df[df["VHI"] < 35][df["VHI"] != -1][["year","VHI"]].to_string(index=False)
proc = input("Enter the percentage of the territory: ")
print df[df["VHI"] < 35][df["%Area_VHI_LESS_35"] > proc][["year","week","VHI","%Area_VHI_LESS_35"]].to_string(index=False)
def plot():
df = FileSelect()
year =input("Enter year: ")
plt.figure(1)
plt.plot(df[df["year"] == year]["week"], df[df["year"] == year]["VHI"] , label = year)
plt.legend()
plt.title("Plot for %s year" % (str(year)))
plt.grid(True)
plt.show()
while (True):
print ("1. Download CSV")
print ("2. View CSV")
print ("3. Max & Min VHI")
print ("4. Extreme drought")
print ("5. Moderate drought")
print ("6. Plot")
print ("0. Exit")
choice = int(input('Choose 0 of 6 : '))
if choice == 1:
download(RegionSelect())
elif choice == 2:
read()
elif choice == 3:
minMax()
elif choice == 4:
extreme()
elif choice == 5:
moderate()
elif choice == 6:
plot()
elif choice == 0:
break
else:
print ("Try again!(0-6)")
| [
"oleh.maistrenko@gmail.com"
] | oleh.maistrenko@gmail.com |
8a2cb23ac6b69c233096f89a4ec76d97cb4c38e8 | 8852d7d0ef1e1aceeefdd30581fed7c34c369bf4 | /MeanShiftfromScratchDynamicBandwidth.py | 57f214b4cb51ff3641c0ec9ef7daaa505477eb51 | [] | no_license | iamycee/PythonML_Basics | 7bbf644beda576f97dcaa3b544a16680e3c89995 | 5bd281cd33ff9e0cd912b4488d0babfdd45beb06 | refs/heads/master | 2021-04-12T01:57:42.847091 | 2018-03-18T19:27:35 | 2018-03-18T19:27:35 | 125,759,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,105 | py | import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import numpy as np
X= np.array([[1,2], [1.5,1.8], [5,8], [8,8], [1,0.6], [9,11], [8,2], [10,2], [9,3]])
#from sklearn.datasets.samples_generator import make_blobs
#X,y= make_blobs(n_samples=15, centers=3, n_features=2)
colors= 10*["g","r","c","b","k"]
#___MEAN SHIFT___#
# 1. Assign every single data point as a cluster center
# 2. Take data points within each cluster center's radius(bandwidth),
# take the mean of all these datapoints and get a new cluster center
# 3. Repeat step 2 until you get convergence.
class MeanShift:
def __init__(self, radius=None, radius_norm_step=100): #We have to manually set radius in this case
self.radius= radius
self.radius_norm_step= radius_norm_step
def fit(self, data):
if self.radius == None:
all_data_centroid= np.average(data, axis=0) #take average of ALL the data
all_data_norm= np.linalg.norm(all_data_centroid) #basically makes it into a non zero vector
self.radius= all_data_norm/self.radius_norm_step
centroids= {}
for i in range(len(data)):
centroids[i]= data[i] #set each point as a centroid
weights= [i for i in range(self.radius_norm_step)][::-1] #[::-1] reverses the list, in our case it is [99, 98, 97, 96,...., 3, 2, 1]
while True:
new_centroids= [] #whenever we find new centroids, we add them here
for i in centroids:
in_bandwidth=[] #all points in BW of current centroid to be added here
centroid= centroids[i]
for featureset in data:
distance= np.linalg.norm(featureset - centroid)
if distance==0:
distance= 0.000000001
weight_index= int(distance/self.radius) #number of radius steps taken, i.e the closer it it, the higher the weight(hence we reversed the weights list)
if weight_index > self.radius_norm_step - 1:
weight_index= self.radius_norm_step - 1 #if findex is too large, set it to the max
to_add= (weights[weight_index]**2)*[featureset]
#to_add is a large list of the weight squared multiplied by the featureset. So its like your feature 100 times
in_bandwidth += to_add
#we avearge over this huge value
new_centroid= np.average(in_bandwidth, axis=0) #axis=0 means average over ALL the values
new_centroids.append(tuple(new_centroid))
#set takes only unique values; "sort the list version of these unique values"
uniques= sorted(list(set(new_centroids)))
to_pop= []
for i in uniques:
for ii in uniques:
if i == ii:
pass
elif np.linalg.norm(np.array(i) - np.array(ii)) <= self.radius: #if it is <= one step away, add it to pop and break
to_pop.append(ii)
break
for i in to_pop:
try:
uniques.remove(i)
except:
pass
prev_centroids= dict(centroids)
centroids= {}
for i in range(len(uniques)):
centroids[i]= np.array(uniques[i]) #store these unique values in the centroids list
optimized= True
for i in centroids:
if not np.array_equal(centroids[i], prev_centroids[i]): #if not the same i.e if centroid has moved
optimized= False #if centroid has moved, it means that alg is not optimized yet
if not optimized:
break
if not optimized:
break
self.centroids= centroids
self.classifications= {}
for i in range(len(self.centroids)):
self.classifications[i]= []
for featureset in data:
distances= [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
classification= distances.index(min(distances))
self.classifications[classification].append(featureset)
def predict(self, data):
distances= [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
classification= distances.index(min(distance))
return classification
clf= MeanShift()
clf.fit(X)
centroids=clf.centroids
for classification in clf.classifications:
color= colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker='x', color=color, s=150, linewidth=5)
#plt.scatter(X[:,0], X[:,1], s=150)
for c in centroids:
plt.scatter(centroids[c][0], centroids[c][1], color='k', marker='*', s=150)
plt.show()
| [
"thisisycee@gmail.com"
] | thisisycee@gmail.com |
6c377ddf31afb64d168fac43c34d5a8f8b3d6389 | b562fa1cef5f47d59a5fd07aee716468f645cc4c | /MENU/menu04.py | 72b8e9126017217bf68c7930e8a8a972c872458f | [] | no_license | heagueron/gistpy | 9d8495ec41846361f864526c516a026317189f55 | 3190842f8e925e563db51f2be2f6c16ebf3be70c | refs/heads/master | 2020-09-10T08:13:21.056939 | 2019-11-28T15:41:19 | 2019-11-28T15:41:19 | 221,697,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py | # Simple enough, just import everything from tkinter.
from tkinter import *
# Here, we are creating our class, Window, and inheriting from the Frame
# class. Frame is a class from the tkinter module. (see Lib/tkinter/__init__)
class Window(Frame):
# Define settings upon initialization. Here you can specify
def __init__(self, master=None):
# parameters that you want to send through the Frame class.
Frame.__init__(self, master)
#reference to the master widget, which is the tk window
self.master = master
#with that, we want to then run init_window, which doesn't yet exist
self.init_window()
#Creation of init_window
def init_window(self):
# changing the title of our master widget
self.master.title("GUI")
# allowing the widget to take the full space of the root window
self.pack(fill=BOTH, expand=1)
# creating a menu instance
menu = Menu(self.master)
self.master.config(menu=menu)
# create the sueldo object)
sueldo = Menu(menu)
# adds a command to the menu option, calling it exit, and the
# command it runs on event is client_exit
sueldo.add_command(label="Exit", command=self.client_exit)
#added "sueldo" to our menu
menu.add_cascade(label="Calculo de Sueldo", menu=sueldo)
# create the file object)
edit = Menu(menu)
# adds a command to the menu option, calling it exit, and the
# command it runs on event is client_exit
edit.add_command(label="Undo")
#added "file" to our menu
menu.add_cascade(label="Edit", menu=edit)
def client_exit(self):
exit()
# root window created. Here, that would be the only window, but
# you can later have windows within windows.
root = Tk()
root.geometry("400x300")
#creation of an instance
app = Window(root)
#mainloop
root.mainloop() | [
"luisenaguero@gmail.com"
] | luisenaguero@gmail.com |
0cf65d19171af3499e2f681ef98e3bf479285522 | 1a4c66a4310dd58df3d5958e8f2981b05e1fca03 | /cube.py | 70e6dea3df0d9f7e2bff7624db802cb5de5f423e | [] | no_license | Szewy/VirtualCamera | 5817565e6fde37944edab2383b13b03558c47669 | 041adc6f2d72d2799709e68a58751751974c3faa | refs/heads/master | 2020-04-08T04:40:54.022842 | 2018-11-25T12:06:04 | 2018-11-25T12:06:04 | 159,027,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py | import line3D
import point3D
class Cube:
lines = []
def __init__(self, middle, sideLength):
self.lowerBase(middle, sideLength)
self.upperBase(middle, sideLength)
self.sides(middle, sideLength)
def move(self, direction):
for line in self.lines:
line.move(direction)
def rotate(self, direction):
for line in self.lines:
line.rotate(direction)
def lowerBase(self, middle, sideLength):
half = sideLength / 2.0
line = line3D.Line3D(point3D.Point3D(middle.x-half, middle.y-half, middle.z - half), point3D.Point3D(middle.x - half, middle.y - half, middle.z + half))
self.lines.append(line)
line = line3D.Line3D(point3D.Point3D(middle.x - half, middle.y - half, middle.z - half), point3D.Point3D(middle.x + half, middle.y - half, middle.z - half))
self.lines.append(line)
line = line3D.Line3D(point3D.Point3D(middle.x + half, middle.y - half, middle.z + half), point3D.Point3D(middle.x - half, middle.y - half, middle.z + half))
self.lines.append(line)
line = line3D.Line3D(point3D.Point3D(middle.x + half, middle.y - half, middle.z + half), point3D.Point3D(middle.x + half, middle.y - half, middle.z - half))
self.lines.append(line)
def upperBase(self, middle, sideLength):
half = sideLength / 2.0
line = line3D.Line3D(point3D.Point3D(middle.x - half, middle.y + half, middle.z - half), point3D.Point3D(middle.x - half, middle.y + half, middle.z+ half))
self.lines.append(line)
line = line3D.Line3D(point3D.Point3D(middle.x - half, middle.y + half, middle.z - half), point3D.Point3D(middle.x + half, middle.y + half, middle.z - half))
self.lines.append(line)
line = line3D.Line3D(point3D.Point3D(middle.x + half, middle.y + half, middle.z + half), point3D.Point3D(middle.x - half, middle.y + half, middle.z + half))
self.lines.append(line)
line = line3D.Line3D(point3D.Point3D(middle.x + half, middle.y + half, middle.z + half), point3D.Point3D(middle.x + half, middle.y + half, middle.z - half))
self.lines.append(line)
def sides(self, middle, sideLength):
half = sideLength / 2.0
line = line3D.Line3D(point3D.Point3D(middle.x - half, middle.y + half, middle.z - half), point3D.Point3D(middle.x - half, middle.y - half, middle.z - half))
self.lines.append(line)
line = line3D.Line3D(point3D.Point3D(middle.x - half, middle.y + half, middle.z + half), point3D.Point3D(middle.x - half, middle.y - half, middle.z + half))
self.lines.append(line)
line = line3D.Line3D(point3D.Point3D(middle.x + half, middle.y + half, middle.z - half), point3D.Point3D(middle.x + half, middle.y - half, middle.z - half))
self.lines.append(line)
line = line3D.Line3D(point3D.Point3D(middle.x + half, middle.y + half, middle.z + half), point3D.Point3D(middle.x + half, middle.y - half, middle.z + half))
self.lines.append(line)
def getLines(self):
return self.lines | [
"noreply@github.com"
] | Szewy.noreply@github.com |
4ca085cb307ab32919455ea5d2cdbc1ea7d1c3fe | 13affff9ce87e688f759a57da27947b23ab93dce | /pse/EEGModels_torch.py | 98d672e2e7119a9e22b5343d1c2a99528240ab0a | [] | no_license | KNU-BrainAI/TSA | 09bfe7b6ea14b26098296d0c2206c893f37056a6 | 1e553765c2651485c47285abc2f614c0adac851f | refs/heads/main | 2023-09-04T07:09:47.719560 | 2021-10-01T04:47:49 | 2021-10-01T04:47:49 | 333,003,458 | 1 | 0 | null | 2021-03-04T12:42:29 | 2021-01-26T07:19:28 | Python | UTF-8 | Python | false | false | 3,070 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class EEGNet(nn.Module):
def __init__(self):
super(EEGNet, self).__init__()
# Conv2D Layer
#kernel length = sampling rate / 2
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(1, 256)),
nn.BatchNorm2d(16, False)
)
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(64, 1),groups=16),
nn.BatchNorm2d(32, False),
nn.AvgPool2d(1, 16)
)
self.layer3 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(1,32), groups=32),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(1,1)),
nn.BatchNorm2d(32, False),
nn.AvgPool2d(1, 32)
)
self.flatten = nn.Flatten()
self.linear1 = nn.Linear(32*4,2)
def forward(self, x):
# Conv2D
x = F.pad(x,(127,128,0,0))
x = self.layer1(x)
# Depthwise conv2D
x = F.elu(self.layer2(x))
x = F.dropout(x, 0.5)
# Separable conv2D
x = F.pad(x,(15,16,0,0))
x = F.elu(self.layer3(x))
x = F.dropout(x, 0.5)
#Flatten
x = self.flatten(x)
#Linear
x = self.linear1(x)
return x
class ConstrainedLinear(nn.Linear):
def forward(self, input):
return F.linear(input, self.weight.clamp(min=-0.25, max=0.25), self.bias)
class Deep_ConvNet(nn.Module):
def __init__(self, bias=False, num_class=2):
super(Deep_ConvNet, self).__init__()
self.conv_split = nn.Sequential(
nn.Conv2d(1, 25, (1,10), 1),
nn.Conv2d(25, 25, (64,1), 1, bias=False),
)
self.post_conv = nn.Sequential(
nn.BatchNorm2d(25),
nn.ELU(),
nn.MaxPool2d((1,3), 3),
nn.Dropout(0.3)
)
self.conv_pool1 = nn.Sequential(
nn.Conv2d(25, 50, (1,10), 1, bias=False),
nn.BatchNorm2d(50),
nn.MaxPool2d((1,3), 3),
nn.Dropout(0.3)
)
self.conv_pool2 = nn.Sequential(
nn.Conv2d(50, 100, (1,10), 1, bias=False),
nn.BatchNorm2d(100),
nn.MaxPool2d((1,3), 3),
nn.Dropout(0.3)
)
self.conv_pool3 = nn.Sequential(
nn.Conv2d(100, 200, (1,10), 1, bias=False),
nn.BatchNorm2d(200),
nn.MaxPool2d((1,3), 3),
nn.Dropout(0.3)
)
self.conv_fc = nn.Sequential(
ConstrainedLinear(200*14*1, num_class)
)
def forward(self, x):
out = self.conv_split(x)
out = self.post_conv(out)
out = self.conv_pool1(out)
out = self.conv_pool2(out)
out = self.conv_pool3(out)
out = out.view(-1, np.prod(out.shape[1:]))
out = self.conv_fc(out)
return out | [
"ssissi@knu.ac.kr"
] | ssissi@knu.ac.kr |
783d2e364fd6b142dfe2a0d74ebc6c547bb4c2d6 | 9ca455d13ed883dffcf01c425e54c0301821069b | /migotki/common.py | 88eb7a751691db82f0f228598e5a9a1ce08f0043 | [] | no_license | spc16670/migotka | 705f7c1ce4043b4ba32b2ce32077211884a205f6 | df27ddde0650027ee4bcd7a5660cef5c4413a483 | refs/heads/master | 2023-07-22T23:24:06.484008 | 2020-07-04T14:07:39 | 2020-07-04T14:07:39 | 221,889,764 | 0 | 1 | null | 2023-07-06T21:53:40 | 2019-11-15T09:26:31 | Python | UTF-8 | Python | false | false | 3,082 | py | import numpy as np
from dao import PATIENTS
def first_last_and_independent_data(key, indicator, data, s_label='Training_sessions'):
training_firsts = []
independent_firsts = []
training_lasts = []
independent_lasts = []
for c in data:
# training
trainings = c.get_training_sessions(indicator)
first = trainings[0]
first_total = first[key]
if not np.isnan(first_total):
training_firsts.append(first_total)
last = trainings[-1]
last_total = last[key]
if not np.isnan(last_total):
training_lasts.append(last_total)
# independent
nasa = c.data[indicator]
s_ix = c.data[s_label]
independent = nasa[s_ix:]
if not independent:
continue
first_independent = independent[0]
first_independent_total = first_independent[key]
if not np.isnan(first_independent_total):
independent_firsts.append(first_independent_total)
last_independent = independent[-1]
last_independent_total = last_independent[key]
if not np.isnan(last_independent_total):
independent_lasts.append(last_independent_total)
return training_firsts, training_lasts, independent_firsts, independent_lasts
def first_and_last_data(key, indicator, data=None):
if not data:
data = PATIENTS
firsts = []
lasts = []
common = []
for p in data:
patients_trainings = p.get_training_sessions(key)
# firsts
first = patients_trainings[0]
first_total = first[indicator]
if not np.isnan(first_total):
firsts.append(first_total)
# lasts
last = patients_trainings[-1]
last_total = last[indicator]
if p.name == 'p9':
continue
if not np.isnan(last_total):
lasts.append(last_total)
if not np.isnan(first_total) and not np.isnan(last_total):
common.append([first_total, last_total])
# wilcoxson
common_a = [c[0] for c in common]
common_b = [c[1] for c in common]
# from scipy.stats import ranksums
# p = ranksums(common_a, common_b)
# p_round = round(p.pvalue, 3)
from scipy.stats import wilcoxon
stat, p = wilcoxon(common_a, common_b)
p_round = round(p, 3)
return firsts, lasts, common, p_round
def last_and_sessions(data, indicator='NASA_TLX', key='total', count_key='Training_sessions'):
lasts = []
session = {}
for sid in range(1, 11):
session[sid] = []
for p in data:
nasa = p.data[indicator]
s_ix = p.data[count_key]
last_training_session = nasa[s_ix-1]
last_total = last_training_session[key]
if not np.isnan(last_total):
lasts.append(last_total)
independent_sessions = nasa[s_ix:]
for ix, s in enumerate(independent_sessions):
s_independent_total = s[key]
if not np.isnan(s_independent_total):
session[ix+1].append(s_independent_total)
return lasts, session | [
"simon@ionas.co.uk"
] | simon@ionas.co.uk |
f45c2b76f00e66c11ab699be1106a10ec6d3de56 | bf57320b19d102f9c34e902d714e0af293c34725 | /tests/bundle/test_syntax_error.py | 7c922f904509aef608740ac868bcb5f9cc854454 | [
"Apache-2.0"
] | permissive | RichDijk/ontology-toolkit | 819b6cfb47330accc6763f0f2049ab53e796fc34 | 9f589bcd33952de5f5b1cfc6bd9f9cca09222123 | refs/heads/master | 2023-06-09T08:21:25.705838 | 2020-12-09T17:53:24 | 2020-12-09T17:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | from onto_tool import onto_tool
from pytest import raises
import re
def test_syntax_export(caplog):
with raises(SystemExit) as wrapped_exit:
onto_tool.main([
'bundle', 'tests/bundle/syntax_error.yaml'
])
assert wrapped_exit.type == SystemExit
assert wrapped_exit.value.code == 1
logs = caplog.text
assert re.search(r'Error parsing .*malformed_rdf.ttl at 3', logs)
| [
"boris.pelakh@semanticarts.com"
] | boris.pelakh@semanticarts.com |
df436c894732830af4907c083fb42f9652a62306 | d773d7a415a298ef89c9ecae4cb8589508331ced | /mathoperation.py | f5296b693814e2db67e3ccdaf1781242d02a7bed | [] | no_license | vikasiitb/basic-math-functions | 727c8fe1a67a8f9180739d5aabfcf32f03f3966c | 0b9e991976fff909e865d6b6f3d39341748ddc28 | refs/heads/master | 2020-05-03T08:31:46.347382 | 2019-03-30T08:03:57 | 2019-03-30T08:03:57 | 178,528,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py |
x = int(input("Enter first number"))
y = int(input("Enter second number"))
a = input("What kind of operation do you wish to perform?\n\
A) Addition\n\
B) Subtraction\n\
C) Multiplication\n\
D) Division\n\
E) Remainder\n\
")
#addition
if a in ['A', 'A)','(A)','A) Addition']:
z = x + y
print(z)
#subtraction
elif a in ['B','B)','(B)','B) Subtraction']:
z = x - y
print(z)
#multiplication
elif a in ['C','C)','(C)','C) Multiplication']:
z = x * y
print(z)
#division
elif a in ['D','D)','(D)','D) Division']:
z = x % y
print (z)
#modulus
elif a in ['E','E)','(E','E) Remainder']:
z = x % y
print (z)
| [
"noreply@github.com"
] | vikasiitb.noreply@github.com |
2304d55764d05234b86f033d8dcebf7cbc47df6c | 1d49c8ed557f11a46abe63661059ff6f9539e8a0 | /2017/day5/maze.py | b3ae833a5e9e5f4cb588a841bd7be3e77f7e05a2 | [] | no_license | geirtul/advent_of_code | fc56026ffe4084ac77b4f69e13d723a1133aebcb | 41aa150426aeee0614a3f3f5205e1d0eacc5dfd3 | refs/heads/master | 2021-12-20T13:59:25.855594 | 2021-12-18T00:19:46 | 2021-12-18T00:19:46 | 159,924,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | import numpy as np
data = np.loadtxt("input.txt", dtype=int)
steps = 0
pos = 0
while pos >= 0 and pos <= len(data)-1:
new_pos = pos + data[pos]
# Comment if-else block for part 1
if data[pos] >= 3:
data[pos] -= 1
else:
data[pos] += 1
#data[pos] += 1 # uncomment for part 1
pos = new_pos
steps+=1
print(steps)
| [
"ulvik@luring.uio.no"
] | ulvik@luring.uio.no |
19eb1182906a25bd4be329e444b55aae2bf59e9c | 4cdea8c9d4f71e6459ac1a0d66856199eb4e8d46 | /experimental/lane_change/explore.py | 1d6f3467bdae70b0e25b14894dd8ef2dcfba53f1 | [] | no_license | chrisngan24/fydp | 0a8c94c0667a42a8f618f9cb6f4a07f768ea74fc | 9e51bc6c49c0e5427a0f93fdc14e90a1cbf94cc8 | refs/heads/master | 2021-01-09T06:57:45.129755 | 2016-03-31T01:16:17 | 2016-03-31T01:16:17 | 43,266,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,384 | py |
import pandas as pd
import numpy as np
import datetime
import math
import os
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import gridspec
from ggplot import *
import json
from IPython.display import Image
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
import sklearn.cluster as cluster
import sklearn.cross_validation as cross_validation
import sys
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.externals import joblib
from scipy import signal
data_direc = os.path.join( "data")
plot_direc = os.path.join("plots")
model_direc = os.path.join("models")
lane_change_models_direc = os.path.join(model_direc, "lane_changes")
ignore_columns = ["Unnamed: 0", "az", "gx", "gz_raw", "gy","ax", "ay", "theta", "time_diff", "faceBottom", "faceLeft", "faceRight", "faceTop", "isFrontFace", "noseX", "noseY", "time", "timestamp_y", "frameIndex", "timestamp_x"]
# relevant_columns = ['gz', 'gz_0', 'gz_1', 'gz_2', 'gz_3', 'gz_4', 'gz_5', 'gz_6', 'gz_7', 'gz_8', 'gz_9']
relevant_columns = ['gz', 'gz_4']
window_size=5
step=10
n_clusters=3
moving_average_size = 20
scaler = preprocessing.MinMaxScaler(feature_range=(-1,1))
def normalize(arr):
min_x = min(arr)
range_x = max(arr) - min_x
return [ float(x-min_x) / float(range_x) for x in arr ]
def subtract_from_prev_val(df, col, step=1):
"""
Subtract column value from the previous
column value n steps away
"""
return (df[col] - df.shift(periods=step)[col])
def generate_features(df, suffix = '_diff_', step=1, relevant_features=[], ignore_columns=[]):
"""
Generate the features, returns a new data frame of all
transformed features (same length as input)
:param df: - input data frame
:param suffix: - the ending of the new column, default is change nothing
to column name
:param step: - delta from how many index periods away
:param ignore_columns: - what are the columns to ignore
"""
# cols = self.get_active_columns(df, ignore_columns)
cols = relevant_features
deltas = {}
for c in cols:
deltas['%s%s'% (c, suffix)] = subtract_from_prev_val(df, c, step=step)
df_new = pd.DataFrame(deltas)
return df_new
def generate_windowed_df(df):
windowed = generate_features(df,relevant_features=relevant_columns, step=step, ignore_columns=ignore_columns)
windowed = windowed.fillna(0)
for c in relevant_columns:
windowed[c] = signal.detrend(df[c])
return windowed
def generate_windows(df, window=10, ignore_columns=ignore_columns):
"""
Apply the future windows to the dataframe
"""
points = []
cols = df.columns.values.tolist()
for ic in ignore_columns:
if ic in cols:
cols.remove(ic)
for i, r in df.iterrows():
w_start = i
w_end = min(i + 100, len(df)-1)
row = r.to_dict()
df_w = df.loc[w_start:w_end].reset_index(drop=True)
for j in xrange(0,window):
if j < len(df_w):
window_row = df_w.loc[j].to_dict()
else:
window_row = None
for c in cols:
name = '%s_%s' % (c, j)
row[name] = window_row[c] if window_row != None else None
points.append(row)
return pd.DataFrame(points).fillna(0)
def cluster_using_kmeans(df, filename, n_components=2, n_clusters=3):
pca = PCA(n_components=n_components)
X = pca.fit_transform(df)
kmean = KMeans(n_clusters=n_clusters)
Y = kmean.fit_predict(df)
return Y
def movingaverage(interval, window_size):
window = np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, "same")
def generate_training_and_test_data(df, cluster_labels, train_percentage):
le = preprocessing.LabelEncoder()
df.Labels = le.fit(cluster_labels).transform(cluster_labels)
y = df.Labels
X = df
test_index = int(len(df) * train_percentage)
X_train = X[:test_index]
y_train = y[:test_index]
X_test = X[test_index:]
y_test = y[test_index:]
return X_train, y_train, X_test, y_test
def random_forest(x_train, y_train, x_test, y_test):
clf = RandomForestClassifier(n_estimators=10, n_jobs=-1)
clf.fit(x_train, y_train)
accuracy = clf.score(x_test, y_test)
joblib.dump(clf, "%s/random_forest.pkl" %model_direc)
return accuracy
def knn(x_train, y_train, x_test, y_test):
clf = KNeighborsClassifier()
clf.fit(x_train, y_train)
accuracy = clf.score(x_test, y_test)
joblib.dump(clf, "%s/knn.pkl" %model_direc)
return accuracy
def svm(x_train, y_train, x_test, y_test):
clf = SVC()
clf.fit(x_train, y_train)
accuracy = clf.score(x_test, y_test)
joblib.dump(clf, "%s/svm.pkl" %model_direc)
return accuracy
def logistic_regression(x_train, y_train, x_test, y_test):
clf = LogisticRegression()
clf.fit(x_train, y_train)
accuracy = clf.score(x_test, y_test)
joblib.dump(clf, "%s/logistic_regression.pkl" %model_direc)
return accuracy
def train_all_models(x_train, y_train, x_test, y_test):
print "Random forest: ", random_forest(x_train, y_train, x_test, y_test)
print "KNN: ", knn(x_train, y_train, x_test, y_test)
# print "SVM: ", svm(x_train, y_train, x_test, y_test)
# print "Logistic Regression: ", logistic_regression(x_train, y_train, x_test, y_test)
def get_data(filename):
df = pd.read_csv(filename)
df.fillna(0, inplace=True)
return df
def train():
left_dfs = []
right_dfs = []
neg_dfs = []
for subdir, dirs, files in os.walk(data_direc):
for d in dirs:
if d.startswith("left_10") and not d.startswith("left_turn"):
df = pd.read_csv("%s/fused.csv" %os.path.join(data_direc, d))
df['gz'] = movingaverage(df['gz'], moving_average_size)
# df['gz'] = scaler.fit_transform(df['gz'])
left_dfs.append(df)
elif d.startswith("right_10") and not d.startswith("right_turn"):
df = pd.read_csv("%s/fused.csv" %os.path.join(data_direc, d))
df['gz'] = movingaverage(df['gz'], moving_average_size)
# df['gz'] = scaler.fit_transform(df['gz'])
right_dfs.append(df)
elif d.startswith("neg_") or d.startswith("right_turn") or d.startswith("left_turn"):
df = pd.read_csv("%s/fused.csv" %os.path.join(data_direc, d))
df['gz'] = movingaverage(df['gz'], moving_average_size)
# df['gz'] = scaler.fit_transform(df['gz'])
neg_dfs.append(df)
df_left = pd.concat(left_dfs, axis=0, join="outer", join_axes=None, ignore_index=True, keys=None, levels=None, names=None, verify_integrity=False)
df_right = pd.concat(right_dfs, axis=0, join="outer", join_axes=None, ignore_index=True, keys=None, levels=None, names=None, verify_integrity=False)
df_neg = pd.concat(neg_dfs, axis=0, join="outer", join_axes=None, ignore_index=True, keys=None, levels=None, names=None, verify_integrity=False)
windowed_df_left = generate_windows(df_left, window=window_size)
windowed_df_right = generate_windows(df_right, window=window_size)
windowed_df_neg = generate_windows(df_neg, window=window_size)
# left_clusters = cluster_using_kmeans(windowed_df_left, "", n_clusters=n_clusters)
# right_clusters = cluster_using_kme ans(windowed_df_right, "", n_clusters=n_clusters)
# c1_left = left_clusters[np.where(left_clusters!=left_clusters[0])[0][0]]
# c1_right = right_clusters[np.where(right_clusters!=right_clusters[0])[0][0]]
# left_clusters = np.array(map(lambda x: 0 if x == left_clusters[0] else 2 if x == c1_left else 1, left_clusters))
# right_clusters = np.array(map(lambda x: 0 if x == right_clusters[0] else 2 if x == c1_right else 1, right_clusters))
# neg_clusters = np.array([left_clusters[0]]*len(windowed_df_neg))
df_train = pd.concat([windowed_df_left, windowed_df_right], join="outer", ignore_index=True)
df_train = df_train[relevant_columns]
cluster_labels = cluster_using_kmeans(df_train, "", n_clusters=n_clusters)
x_train, y_train, x_test, y_test = generate_training_and_test_data(df_train, np.array(cluster_labels), 0.99)
train_all_models(x_train, y_train, x_test, y_test)
print x_train.columns
def predict(direc):
df = get_data("%s/model.csv" %direc)
df['gz'] = movingaverage(df['gz'], moving_average_size)
# df['gz'] = scaler.fit_transform(df['gz'])
windowed_df_test = generate_windows(df, window=window_size)
windowed_df_test = windowed_df_test[relevant_columns]
model_name = "knn"
clf = joblib.load("%s/%s.pkl" %(model_direc, model_name))
predicted_labels_test = clf.predict(windowed_df_test)
windowed_df_test['theta'] = df['theta']
plt.figure()
plt.scatter(df.index, df["theta"], c=predicted_labels_test, cmap='jet')
plt.title("Test Windowed Kmeans Clustering Angle (K = %s)" %str(n_clusters))
plt.savefig("%s/%s_%s.png" %(direc, "windowed_kmeans_test_theta", str(n_clusters)))
plt.figure()
plt.scatter(df.index, df["gz"], c=predicted_labels_test, cmap='jet')
plt.title("Test Windowed Kmeans Clustering Angular Velocity (K = %s) " %str(n_clusters))
plt.savefig("%s/%s_%s.png" %(direc, "windowed_kmeans_test_gz", str(n_clusters)))
with open("%s/events.txt" %direc, 'w') as f:
f.write(str(detect_events(df, predicted_labels_test)))
def detect_events(df, predicted_labels_test):
null_label = predicted_labels_test[0]
state = 0
events = {
"left_lc_start": set(),
"left_lc_end" : set(),
"right_lc_start": set(),
"right_lc_end" : set(),
}
left_lc_start = 0
right_lc_start = 0
left_lc_end = 0
right_lc_end = 0
pos_label = 2
neg_label = 1
null_label = 0
left_lane_sequence = [null_label, pos_label, neg_label, pos_label, null_label]
right_lane_sequence = [null_label, neg_label, pos_label, neg_label, null_label]
left_index = 0
right_index = 0
for i in xrange(len(predicted_labels_test)):
if predicted_labels_test[i] == left_lane_sequence[left_index]:
left_index = (left_index + 1) % len(left_lane_sequence)
print 'left', i, left_index, left_lane_sequence[left_index]
if predicted_labels_test[i] == right_lane_sequence[right_index]:
right_index = (right_index + 1) % len(right_lane_sequence)
print 'right', i, right_index, right_lane_sequence[right_index]
if left_index == 1:
left_lc_start = i
if right_index == 1:
right_lc_start = i
if left_index == len(left_lane_sequence) - 1:
left_index = 0
right_index = 0
left_lc_end = i
elif right_index == len(right_lane_sequence) - 1:
left_index = 0
right_index = 0
right_lc_end = i
if left_lc_start > 0 and left_lc_end > 0 and left_lc_end - left_lc_start > 30:
events["left_lc_start"].add(left_lc_start)
events["left_lc_end"].add(left_lc_end)
if right_lc_start > 0 and right_lc_end > 0 and right_lc_end - right_lc_start > 30:
events["right_lc_start"].add(right_lc_start)
events["right_lc_end"].add(right_lc_end)
for k, v in events.iteritems():
events[k] = sorted(list(v))
events_indices = []
for i in xrange(len(events['left_lc_start'])):
t = (events['left_lc_start'][i], events['left_lc_end'][i], 'left_lane_change')
events_indices.append(t)
for i in xrange(len(events['right_lc_start'])):
t = (events['right_lc_start'][i], events['right_lc_end'][i], 'right_lane_change')
events_indices.append(t)
return events_indices
if __name__ == "__main__":
# train()
left_dfs = []
right_dfs = []
neg_dfs = []
for subdir, dirs, files in os.walk(data_direc):
for d in dirs:
predict(os.path.join(data_direc, d))
predict(os.path.join(data_direc))
| [
"angelagu93@gmail.com"
] | angelagu93@gmail.com |
0181f8f8efef82d05e9575408e76a583138cd4df | c4e3ea5b1fc68c0669228cfd38eef8e390f5665a | /zo_table/transposition_table.py | 6aa629015e90deade6a6ad8ff638fb3e94e9d0b1 | [] | no_license | theshevon/A2-COMP30024 | 3ce1910c3f42fe0b9b0a248a50ad7f951c6554ad | 9f69ea0c523067886d0fe8a13e90728afd8b6a7b | refs/heads/master | 2020-05-18T18:35:01.411496 | 2019-05-21T12:22:27 | 2019-05-21T12:22:27 | 184,589,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,021 | py |
import numpy as np
import array
class T_table():
match_count= 0
num_entries = np.uint64(100000)
#key = np.zeros([1,1], dtype= "int64")
t_key = np.zeros([1,num_entries], dtype = "uint64")
#t_move = np.ndarray([1,num_entries])
t_move = np.ndarray([1,num_entries], dtype = [('start_move', np.int16, (1,2)), ("end_move", np.int16, (1,2))])
t_score = np.ndarray([1,num_entries], dtype = "int16")
t_depth = np.ndarray([1,num_entries], dtype = "int8")
#t_ancient = np.full([1, num_entries], 1 , dtype = "bool")
t_flag = np.ndarray([1, num_entries], dtype = "int8")
def lookup_score(self, key, depth):
index = self.get_index(key)
#print(index)
if key == self.t_key[0,index] and self.t_depth[0,index]>=depth:
#print("match\n")
#self.match_count+=1
#print("match")
return self.t_score[0,index], self.t_flag[0,index] , self.return_tuple(index)
else:
return None, None, None
def replace_entry(self, key, score ,depth, action, flag):
index = self.get_index(key)
self.t_key[0,index] = key
self.t_score[0,index]= score
self.t_depth[0,index] = depth
self.t_flag[0,index]= flag
#adding tuple indicating move
self.t_move[0,index]["start_move"][0, 0] = action[0][0]
self.t_move[0,index]["start_move"][0, 1] = action[0][1]
self.t_move[0,index]["end_move"][0, 0] = action[1][0]
self.t_move[0,index]["end_move"][0, 1] = action[1][1]
#self.t_move[0,index] = move
def get_index(self, key):
#print(key)
#print(key%self.num_entries)
return key%self.num_entries
def return_tuple(self, index):
a= ((self.t_move[0,index]["start_move"][0,0], self.t_move[0,index]["start_move"][0,1]) , (self.t_move[0,index]["end_move"][0,0], self.t_move[0,index]["end_move"][0,1]))
#print(a)
return a
| [
"crowed@student.unimelb.edu.au"
] | crowed@student.unimelb.edu.au |
a77af95f91af2688c10e3aed7e6317a7aef168d9 | 6fdc0486e22d1d6908b9c1cf8d8361c44c1b01c0 | /multitask.py | 693a9e873442b2a561695fab1f6fdd54be920924 | [] | no_license | whiplash003/srmj_multitask | 885bab4491b377d3c6052a3742ffc54dae3c3cf2 | e55f8a6f71374d07c72537faf86ef2ca81cff51c | refs/heads/master | 2022-09-17T08:09:31.113792 | 2020-05-27T14:26:51 | 2020-05-27T14:26:51 | 264,629,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,379 | py |
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import math
import time
import os
import copy
from torch.utils.data import Dataset, DataLoader
# from PIL import Image
from random import randrange
import torch.nn.functional as F
from sklearn.metrics import f1_score
from prefetch_generator import BackgroundGenerator
import pandas as pd
from sklearn.model_selection import train_test_split
from visdom import Visdom
class DataLoaderX(DataLoader):
def __iter__(self):
return BackgroundGenerator(super().__iter__())
# num_data = 40000 # 麻将的局数*4(只取后4手,包含train和test)
batch_size = 256
# # 记录loss和acc
# train_loss = [[], [], [], []]
# val_loss = [[], [], [], []]
# train_acc = [[], [], []]
# val_acc = [[], [], []]
# 初始化loss和acc
viz = Visdom(env='srmj')
x, y = 0, 0
win1 = viz.line(X=np.array([x]), Y=np.array([[y,y,y,y]]),
opts=dict(title='train_Loss',legend=['epoch_loss','opp1_loss','opp2_loss','opp3_loss']))
win2 = viz.line(X=np.array([x]), Y=np.array([[y,y,y,y]]),
opts=dict(title='val_Loss',legend=['epoch_loss','opp1_loss','opp2_loss','opp3_loss']))
win3 = viz.line(X=np.array([x]), Y=np.array([[y,y,y]]),
opts=dict(title='train_Acc',legend=['opp1_acc','opp2_acc','opp3_acc']))
win4 = viz.line(X=np.array([x]), Y=np.array([[y,y,y]]),
opts=dict(title='val_Acc',legend=['opp1_acc','opp2_acc','opp3_acc']))
# 查看是否用GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# path
data_path = os.getcwd() + '/data/data/'
labels_path = os.getcwd() + '/data/labels/'
# 自定义Dataset
class srmj_dataset(Dataset):
def __init__(self, king_of_lists, transform=None):
self.king_of_lists = king_of_lists
self.transform = transform
def __getitem__(self, index):
# 只取了每局的后四手
# x_numpy = np.load(
# data_path+'train' + str(math.floor(index / 4)) + '.npy')
# x_numpy = x_numpy[-(index % 4 + 1)]
#
# y_label = np.load(labels_path+'effect_tile'+str(math.floor(index / 4)) + '.npy')
# opp1_waiting = y_label[-(index % 4 + 1)][0] # opp1_waiting
# opp2_waiting = y_label[-(index % 4 + 1)][1] # opp2_waiting
# opp3_waiting = y_label[-(index % 4 + 1)][2] # opp3_waiting
# 取每局的每一手
x_numpy = torch.from_numpy(self.king_of_lists[0][index])
if self.transform is not None:
x_numpy = self.transform(x_numpy)
# list_of_labels = [torch.from_numpy(np.array(opp1_waiting)),
# torch.from_numpy(np.array(opp2_waiting)),
# torch.from_numpy(np.array(opp3_waiting))]
list_of_labels = [torch.from_numpy(self.king_of_lists[1][index][0]),
torch.from_numpy(self.king_of_lists[1][index][1]),
torch.from_numpy(self.king_of_lists[1][index][2])]
# list_of_labels = torch.FloatTensor(list_of_labels)
# print(list_of_labels)
return x_numpy, list_of_labels[0], list_of_labels[1], list_of_labels[2]
def __len__(self):
return len(self.king_of_lists[0])
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((256,256)),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize((224,224)),
#transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
# 定义多任务模型的class
class multi_output_model(torch.nn.Module):
def __init__(self, model_core, dd):
super(multi_output_model, self).__init__()
self.resnet_model = model_core
self.x1 = nn.Linear(512, 256)
nn.init.xavier_normal_(self.x1.weight)
self.bn1 = nn.BatchNorm1d(256, eps=1e-2)
self.x2 = nn.Linear(256,128)
nn.init.xavier_normal_(self.x2.weight)
self.bn2 = nn.BatchNorm1d(128, eps=1e-2)
self.x3 = nn.Linear(128,64)
nn.init.xavier_normal_(self.x3.weight)
self.bn3 = nn.BatchNorm1d(64, eps=1e-2)
# self.x3 = nn.Linear(64,32)
# nn.init.xavier_normal_(self.x3.weight)
# comp head 1
# heads
self.y1o = nn.Linear(64, 34)
nn.init.xavier_normal_(self.y1o.weight) #
self.y2o = nn.Linear(64, 34)
nn.init.xavier_normal_(self.y2o.weight)
self.y3o = nn.Linear(64, 34)
nn.init.xavier_normal_(self.y3o.weight)
# self.d_out = nn.Dropout(dd)
def forward(self, x):
x = self.resnet_model(x)
# x1 = F.relu(self.x1(x))
x1 = self.bn1(F.relu(self.x1(x)))
# x = F.relu(self.x2(x))
# x1 = F.relu(self.x3(x))
x2 = self.bn2(F.relu(self.x2(x1)))
x3 = self.bn3(F.relu(self.x3(x2)))
# heads
y1o = torch.sigmoid(self.y1o(x3)) # should be sigmoid
y2o = torch.sigmoid(self.y2o(x3)) # should be sigmoid
y3o = torch.sigmoid(self.y3o(x3)) # should be sigmoid
# y1o = self.y1o(x1)
# y2o = self.y2o(x1)
# y3o = self.y3o(x1)
# y4o = self.y4o(x1)
# y5o = self.y5o(x1) #should be sigmoid|
return y1o, y2o, y3o
# 训练模型函数
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 100
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_loss0 = 0.0
running_loss1 = 0.0
running_loss2 = 0.0
running_corrects = 0
opp1_corrects = []
opp2_corrects = []
opp3_corrects = []
total_opp1 = []
total_opp2 = []
total_opp3 = []
# Iterate over data.
for inputs, opp1_waiting, opp2_waiting, opp3_waiting in dataloaders_dict[phase]:
inputs = torch.tensor(inputs, dtype=torch.float32)
inputs = inputs.to(device)
# print(inputs.size())
opp1_waiting = opp1_waiting.to(device)
opp2_waiting = opp2_waiting.to(device)
opp3_waiting = opp3_waiting.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# print(inputs)
outputs = model(inputs)
loss0 = criterion[0](outputs[0], opp1_waiting.float())
loss1 = criterion[1](outputs[1], opp2_waiting.float())
loss2 = criterion[2](outputs[2], opp3_waiting.float())
# backward + optimize only if in training phase
if phase == 'train':
loss = loss0 + loss1 + loss2
# print(loss, loss0,loss1, loss2, loss3,loss4)
loss.backward()
optimizer.step()
# statisticsutputs[2]
running_loss += loss.item() * inputs.size(0)
running_loss0 += loss0.item() * inputs.size(0)
running_loss1 += loss1.item() * inputs.size(0)
running_loss2 += loss2.item() * inputs.size(0)
opp1_corrects.append(
float((np.rint(outputs[0].cpu().detach().numpy()) == opp1_waiting.cpu().detach().numpy()).sum()))
total_opp1.append(float((opp1_waiting.size()[0] * opp1_waiting.size(1))))
opp2_corrects.append(
float((np.rint(outputs[1].cpu().detach().numpy()) == opp2_waiting.cpu().detach().numpy()).sum()))
total_opp2.append(float((opp2_waiting.size()[0] * opp2_waiting.size(1))))
opp3_corrects.append(
float((np.rint(outputs[2].cpu().detach().numpy()) == opp3_waiting.cpu().detach().numpy()).sum()))
total_opp3.append(float((opp3_waiting.size()[0] * opp3_waiting.size(1))))
epoch_loss = running_loss / dataset_sizes[phase]
epoch_loss0 = running_loss0 / dataset_sizes[phase]
epoch_loss1 = running_loss1 / dataset_sizes[phase]
epoch_loss2 = running_loss2 / dataset_sizes[phase]
opp1_acc = float(sum(opp1_corrects)) / sum(total_opp1)
opp2_acc = float(sum(opp2_corrects)) / sum(total_opp2)
opp3_acc = float(sum(opp3_corrects)) / sum(total_opp3)
# opp1_corrects_array = np.rint(outputs[0].cpu().detach().numpy())
# opp1_acc = f1_score(opp1_waiting.cpu().float(), opp1_corrects_array,
# average='macro')
# opp2_corrects_array = np.rint(outputs[1].cpu().detach().numpy())
# opp2_acc = f1_score(opp2_waiting.cpu().float(), opp2_corrects_array,
# average='macro')
# opp3_corrects_array = np.rint(outputs[2].cpu().detach().numpy())
# opp3_acc = f1_score(opp3_waiting.cpu().float(), opp3_corrects_array,
# average='macro')
print('{} epoch loss: {:.4f} opp1_waiting loss: {:.4f} '
'opp2_waiting loss: {:.4f} opp3_waiting loss: {:.4f} '.format(
phase, epoch_loss, epoch_loss0,epoch_loss1, epoch_loss2,))
print('{} opp1_corrects: {:.4f} '
'opp2_corrects: {:.4f} opp3_corrects: {:.4f} '.format(
phase, opp1_acc, opp2_acc, opp3_acc))
# 添加loss和acc到数组中
if phase == 'train':
# train_loss[0].append(loss)
# train_loss[1].append(loss0)
# train_loss[2].append(loss1)
# train_loss[3].append(loss2)
# train_acc[0].append(opp1_waiting_corrects)
# train_acc[1].append(opp2_waiting_corrects)
# train_acc[2].append(opp3_waiting_corrects)
# 更新loss acc曲线
viz.line(X=np.array([epoch]), Y=np.array([[epoch_loss,epoch_loss0,epoch_loss1,epoch_loss2]]),
win=win1, update='append')
viz.line(X=np.array([epoch]), Y=np.array([[opp1_acc,opp2_acc,opp3_acc]]),
win=win3, update='append')
# time.sleep(0.5)
if phase == 'val':
# val_loss[0].append(loss)
# val_loss[1].append(loss0)
# val_loss[2].append(loss1)
# val_loss[3].append(loss2)
# val_acc[0].append(opp1_waiting_corrects)
# val_acc[1].append(opp2_waiting_corrects)
# val_acc[2].append(opp3_waiting_corrects)
# 更新loss acc曲线
viz.line(X=np.array([epoch]), Y=np.array([[epoch_loss, epoch_loss0, epoch_loss1, epoch_loss2]]),
win=win2, update='append')
viz.line(X=np.array([epoch]), Y=np.array([[opp1_acc, opp2_acc, opp3_acc]]),
win=win4, update='append')
# time.sleep(0.5)
# deep copy the model
if phase == 'val' and epoch_loss < best_acc:
print('saving with loss of {}'.format(epoch_loss),
'improved over previous {}'.format(best_acc))
best_acc = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(float(best_acc)))
# load best model weights
model.load_state_dict(best_model_wts)
return model
# 这里我是按顺序分割的train和test
# X = [i for i in range(num_data)]
# y = [i for i in range(num_data)]
# X_train, X_test = X[:math.floor(num_data*0.8)],X[math.floor(num_data*0.8):]
# y_train, y_test = y[:math.floor(num_data*0.8)],y[math.floor(num_data*0.8):]
# train_lists = [X_train, y_train]
# test_lists = [X_test, y_test]
# 按比例随机分割train核test
X = np.load(os.getcwd()+'/data/data_sum.npy')[:100000]
y = np.load(os.getcwd()+'/data/label_sum.npy')[:100000]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
train_lists = [X_train, y_train]
test_lists = [X_test, y_test]
# 构造好了train数据集和test数据集
training_dataset = srmj_dataset(king_of_lists = train_lists)
test_dataset = srmj_dataset(king_of_lists = test_lists )
print(len(X_train))
# 数据装载
dataloaders_dict = {'train': DataLoaderX(training_dataset, batch_size=batch_size, shuffle=True, num_workers=8,pin_memory=True),
'val':DataLoaderX(test_dataset, batch_size=batch_size, shuffle=True, num_workers=8,pin_memory=True)
}
dataset_sizes = {'train':len(train_lists[0]),
'val':len(test_lists[0])}
# 使用resnet50预训练模型
model_ft = models.resnet101(pretrained=True)
# 修改输入层的通道数为8
w = model_ft.conv1.weight.clone()
model_ft.conv1 = nn.Conv2d(8, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
model_ft.conv1.weight = torch.nn.Parameter(torch.cat((w, torch.zeros(64, 5, 7, 7)), dim=1))
model_ft.avgpool = nn.AdaptiveAvgPool2d(1)
# for param in model_ft.parameters():
# param.requires_grad = False
# print(model_ft)
# num_ftrs = model_ft.classifier[6].in_features
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 512)
# 构建有效牌的多任务模型
dd = .1
model_1 = multi_output_model(model_ft, dd)
model_1 = model_1.to(device)
# print(model_1)
# print(model_1.parameters())
# 设置损失函数
criterion = [nn.BCELoss(), nn.BCELoss(), nn.BCELoss()]
# 设置学习率
lrlast = .001
lrmain = .0001
optim = optim.Adam(
[
{"params": model_1.resnet_model.parameters()},
{"params": model_1.x1.parameters(), "lr": lrlast},
{"params": model_1.y1o.parameters(), "lr": lrlast},
{"params": model_1.y2o.parameters(), "lr": lrlast},
{"params": model_1.y3o.parameters(), "lr": lrlast},
],
lr=lrmain)
# optim = optim.Adam(model_1.parameters(),lr=lrmain)#, momentum=.9)
# Observe that all parameters are being optimized
optimizer_ft = optim
# Decay LR by a factor of 0.1 every 5 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=10, gamma=0.1)
# 开始训练
model_ft1 = train_model(model_1, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=200)
#将loss acc保存
def array2File(name,array,type):
output = open(os.getcwd()+'/result/V2_epoch200/' + name + '.txt', 'w')
for i in range(len(array)):
for j in range(len(array[i])):
if type == 'loss':
output.write(str(array[i][j].item()))
elif type == 'acc':
output.write(str(array[i][j]))
output.write(' ')
output.write('\n')
output.close()
# 暂且先不用将loss acc记录到文本的方法
# array2File('train_loss',train_loss,'loss')
# array2File('train_acc',train_acc,'acc')
# array2File('val_loss',val_loss,'loss')
# array2File('val_acc',val_acc,'acc')
# 将模型保存
torch.save(model_ft1.state_dict(), os.getcwd()+'/model/V2/resnet_split_lr_1-0001.pth')
| [
"2594195172@qq.com"
] | 2594195172@qq.com |
a89548e0c620831876e4403baf731815438a0186 | 9242182e3a8dca24ea3125b09f438aadc9b9d63a | /MC_8TeV/officialMC/files_60000/runMuMuPiKPAT_MC_29Jan_60000_10.py | 9053282a3c3db881406a0deb76a450b3f6ae89da | [] | no_license | surnairit/Z4430_analysis | a43b84f29b261465e00f329806115acc410c8f3a | 1a2c7a049369385885280c2461da7041dfd770ff | refs/heads/master | 2021-01-18T16:05:16.521358 | 2017-03-30T14:28:08 | 2017-03-30T14:28:08 | 86,694,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,824 | py | import FWCore.ParameterSet.Config as cms
import FWCore.Utilities.FileUtils as FileUtils
process = cms.Process('NTUPLE')
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
#,SkipEvent = cms.untracked.vstring('ProductNotFound')
)
# import of standard configurations
process.load('FWCore/MessageService/MessageLogger_cfi')
process.MessageLogger.suppressInfo = cms.untracked.vstring( "mkcands" )
process.MessageLogger.suppressWarning = cms.untracked.vstring( "mkcands" )
process.MessageLogger.cerr.FwkReport.reportEvery = 1000 #1
#MC = False
MC = True
if MC :
#official = False
official = True
MCMotherId = 511 # 511 B0 (=anti-B0), 531 Bs0
#MCMotherId = 531
if MCMotherId == 511 :
MCExclusiveDecay = True
elif MCMotherId == 531 :
MCExclusiveDecay = False
# Input source
process.source = cms.Source("PoolSource",
skipEvents = cms.untracked.uint32( 0 ), #with 11976 Processing run: 201707 lumi: 281 event: 383901681
fileNames = cms.untracked.vstring()
)
if (not MC) :
sourceFiles = cms.untracked.vstring( # 'root://cms-xrd-global.cern.ch/' prefix could help sometimes
'file:/lustre/cms/store/data/Run2012D/MuOnia/RECO/16Jan2013-v1/10000/2A2AF16E-516B-E211-AC81-0025905964A6.root'
)
elif MC :
if MCMotherId == 511 :
if (not official) :
# mylist = FileUtils.loadListFromFile ('filenames_format_0000.txt')
# mylist.extend ( FileUtils.loadListFromFile ('filenames_format_0001.txt') )
# mylist = FileUtils.loadListFromFile ('filenames_format_0001.txt')
# mylist.extend ( FileUtils.loadListFromFile ('filenames_format_0002.txt') )
# mylist.extend ( FileUtils.loadListFromFile ('filenames_format_0003.txt') )
# mylist.extend ( FileUtils.loadListFromFile ('filenames_format_0004.txt') )
# mylist.extend ( FileUtils.loadListFromFile ('filenames_format_0005.txt') )
# mylist.extend ( FileUtils.loadListFromFile ('filenames_set2_0000.txt') )
mylist = FileUtils.loadListFromFile ('filenames_set2_0000.txt')
mylist.extend ( FileUtils.loadListFromFile ('filenames_set2_0001.txt') )
mylist.extend ( FileUtils.loadListFromFile ('filenames_set2_0002.txt') )
mylist.extend ( FileUtils.loadListFromFile ('filenames_set2_0003.txt') )
mylist.extend ( FileUtils.loadListFromFile ('filenames_set2_0004.txt') )
mylist.extend ( FileUtils.loadListFromFile ('filenames_set2_0005.txt') )
sourceFiles = cms.untracked.vstring(
*mylist
)
else :
# offcial MC
mylist = FileUtils.loadListFromFile ('filenames_official_formatted_60000_10.txt')
#mylist.extend ( FileUtils.loadListFromFile ('filenames_official_formatted_10000.txt') )
#mylist.extend ( FileUtils.loadListFromFile ('filenames_official_formatted_60000_10.txt') )
#mylist.extend ( FileUtils.loadListFromFile ('filenames_official_formatted_80000.txt') )
sourceFiles = cms.untracked.vstring(
*mylist
)
elif MCMotherId == 531 :
sourceFiles = cms.untracked.vstring(
# Bs
'/store/mc/Summer12_DR53X/BsToPsiMuMu_2MuPtEtaFilter_8TeV-pythia6-evtgen/AODSIM/PU_S10_START53_V7A-v1/0000/005DE3B0-FDDC-E111-9812-00266CFFC198.root',
'/store/mc/Summer12_DR53X/BsToPsiMuMu_2MuPtEtaFilter_8TeV-pythia6-evtgen/AODSIM/PU_S10_START53_V7A-v1/0000/0090EB21-15DD-E111-9BE2-0017A4770800.root',
'/store/mc/Summer12_DR53X/BsToPsiMuMu_2MuPtEtaFilter_8TeV-pythia6-evtgen/AODSIM/PU_S10_START53_V7A-v1/0000/00A0B7F6-98DF-E111-866D-00266CFFC13C.root',
'/store/mc/Summer12_DR53X/BsToPsiMuMu_2MuPtEtaFilter_8TeV-pythia6-evtgen/AODSIM/PU_S10_START53_V7A-v1/0000/00A0B7F6-98DF-E111-866D-00266CFFC13C.root',
'/store/mc/Summer12_DR53X/BsToPsiMuMu_2MuPtEtaFilter_8TeV-pythia6-evtgen/AODSIM/PU_S10_START53_V7A-v1/0000/00A0B7F6-98DF-E111-866D-00266CFFC13C.root',
'/store/mc/Summer12_DR53X/BsToPsiMuMu_2MuPtEtaFilter_8TeV-pythia6-evtgen/AODSIM/PU_S10_START53_V7A-v1/0000/00A0B7F6-98DF-E111-866D-00266CFFC13C.root',
'/store/mc/Summer12_DR53X/BsToPsiMuMu_2MuPtEtaFilter_8TeV-pythia6-evtgen/AODSIM/PU_S10_START53_V7A-v1/0000/00A0B7F6-98DF-E111-866D-00266CFFC13C.root',
'/store/mc/Summer12_DR53X/BsToPsiMuMu_2MuPtEtaFilter_8TeV-pythia6-evtgen/AODSIM/PU_S10_START53_V7A-v1/0000/00A0B7F6-98DF-E111-866D-00266CFFC13C.root',
'/store/mc/Summer12_DR53X/BsToPsiMuMu_2MuPtEtaFilter_8TeV-pythia6-evtgen/AODSIM/PU_S10_START53_V7A-v1/0000/0213D14A-4CE0-E111-AC17-1CC1DE056080.root',
'/store/mc/Summer12_DR53X/BsToPsiMuMu_2MuPtEtaFilter_8TeV-pythia6-evtgen/AODSIM/PU_S10_START53_V7A-v1/0000/0239D053-E7DF-E111-96FB-00266CFFBF90.root'
)
process.PoolSource.fileNames = sourceFiles ;
process.source.inputCommands = cms.untracked.vstring(
"keep *",
"drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__RECO",
"drop *_MEtoEDMConverter_*_*"
)
process.maxEvents = cms.untracked.PSet(
# input = cms.untracked.int32( -1 ) # 256Kb in 2' for 100 events, 1Mb in 7' for 1k events, 6Mb in 50' for 8650 events, 11Mb in 66' for 10k events, 100Mb in 14h for 150k events, 1.4Gb in 4 days for 1.2M events of official MC
#input = cms.untracked.int32( 1000 ) # 310Kb in 3' for 1k events of private MC
#input = cms.untracked.int32( 100 ) # = 20Mb in 2h for 15k events, 2Mb in 10' for 1k events of Run2012C/MuOniaParked/AOD/22Jan2013-v1
#input = cms.untracked.int32( 1000 ) # = 3Mb for 6546 events, 85Kb for 100, 800kb for 1k events of BsToPsiMuMu
#input = cms.untracked.int32( 24000 ) # = 870Kb # timeout after 24500 for Run2012A/MuOnia
input = cms.untracked.int32( -1 ) # = 5718Kb # timeout after 3700 for Run2012A/MuOnia
)
#Output size of CRAB jobs ~200MB usually works well. (max 300-500 Mb according to Cesare)
process.load('Configuration.Geometry.GeometryIdeal_cff') # 53x
process.load("Configuration.StandardSequences.GeometryExtended_cff") # from Lucia
process.load("Configuration.StandardSequences.Reconstruction_cff") # from Lucia
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#process.GlobalTag.globaltag = 'FT_53_V6_AN3::All'
process.GlobalTag.globaltag = 'START53_V19F::All'
#process.GlobalTag.globaltag = 'START53_V7C::All'
process.load('Configuration/EventContent/EventContent_cff')
#
# Load common sequences
#
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskAlgoTrigConfig_cff')
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
####################################################################################
##################################good collisions############################################
#### 44x
#process.primaryVertexFilter = cms.EDFilter("GoodVertexFilter",
# vertexCollection = cms.InputTag('offlinePrimaryVertices'),
# minimumNDOF = cms.uint32(4) ,
# maxAbsZ = cms.double(24),
# maxd0 = cms.double(2)
# )
## 53x
pvSelection = cms.PSet(
minNdof = cms.double( 4. )
, maxZ = cms.double( 24. )
, maxRho = cms.double( 2. )
)
process.goodOfflinePrimaryVertices = cms.EDFilter("PrimaryVertexObjectFilter", # checks for fake PVs automatically
filterParams = pvSelection,
filter = cms.bool( False ), # use only as producer
src = cms.InputTag( 'offlinePrimaryVertices' )
)
process.primaryVertexFilter = process.goodOfflinePrimaryVertices.clone( filter = True )
process.noscraping = cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False),
#debugOn = cms.untracked.bool(True),
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
# PAT Layer 0+1
process.load("PhysicsTools.PatAlgos.patSequences_cff")
process.load("PhysicsTools.PatAlgos.cleaningLayer1.genericTrackCleaner_cfi")
process.cleanPatTracks.checkOverlaps.muons.requireNoOverlaps = cms.bool(False)
process.cleanPatTracks.checkOverlaps.electrons.requireNoOverlaps = cms.bool(False)
from PhysicsTools.PatAlgos.producersLayer1.muonProducer_cfi import *
patMuons.embedTrack = cms.bool(True)
patMuons.embedPickyMuon = cms.bool(False)
patMuons.embedTpfmsMuon = cms.bool(False)
# Prune generated particles to muons and their parents
process.genMuons = cms.EDProducer("GenParticlePruner",
src = cms.InputTag("genParticles"),
select = cms.vstring(
"drop * ", # this is the default
"++keep abs(pdgId) = 13", # keep muons and their parents
"drop pdgId == 21 && status = 2" # remove intermediate qcd spam carrying no flavour info
)
)
process.load("MuonAnalysis.MuonAssociators.patMuonsWithTrigger_cff")
from MuonAnalysis.MuonAssociators.patMuonsWithTrigger_cff import addMCinfo, useExistingPATMuons, useL1MatchingWindowForSinglets, changeTriggerProcessName, switchOffAmbiguityResolution, addDiMuonTriggers
# with some customization
if MC:
addMCinfo(process)
# since we match inner tracks, keep the matching tight and make it one-to-one
process.muonMatch.maxDeltaR = 0.05
process.muonMatch.resolveByMatchQuality = True
addDiMuonTriggers(process)
useExistingPATMuons(process,'cleanPatMuons',addL1Info=False)
changeTriggerProcessName(process, 'HLT')
switchOffAmbiguityResolution(process) # Switch off ambiguity resolution: allow multiple reco muons to match to the same trigger muon
useL1MatchingWindowForSinglets(process)
process.muonL1Info.maxDeltaR = 0.3
process.muonL1Info.fallbackToME1 = True
process.muonMatchHLTL1.maxDeltaR = 0.3
process.muonMatchHLTL1.fallbackToME1 = True
process.muonMatchHLTL2.maxDeltaR = 0.3
process.muonMatchHLTL2.maxDPtRel = 10.0
process.muonMatchHLTL3.maxDeltaR = 0.1
process.muonMatchHLTL3.maxDPtRel = 10.0
process.muonMatchHLTCtfTrack.maxDeltaR = 0.1
process.muonMatchHLTCtfTrack.maxDPtRel = 10.0
process.muonMatchHLTTrackMu.maxDeltaR = 0.1
process.muonMatchHLTTrackMu.maxDPtRel = 10.0
from PhysicsTools.PatAlgos.tools.trackTools import *
######## adding tracks refitted with different mass
from RecoTracker.TrackProducer.TrackRefitters_cff import *
from TrackingTools.MaterialEffects.RungeKuttaTrackerPropagator_cfi import *
#process.RungeKuttaTrackerPropagatorForMuons = TrackingTools.MaterialEffects.RungeKuttaTrackerPropagator_cfi.RungeKuttaTrackerPropagator.clone( Mass = cms.double(0.10565837), ComponentName = cms.string('RungeKuttaTrackerPropagatorForMuons') )
#process.refittedGeneralTracksMuon = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone( Propagator = "RungeKuttaTrackerPropagatorForMuons" )
process.RungeKuttaTrackerPropagatorForPions = TrackingTools.MaterialEffects.RungeKuttaTrackerPropagator_cfi.RungeKuttaTrackerPropagator.clone( Mass = cms.double(0.13957), ComponentName = cms.string('RungeKuttaTrackerPropagatorForPions') )
process.refittedGeneralTracksPion = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone( Propagator = "RungeKuttaTrackerPropagatorForPions" )
makeTrackCandidates( process, # patAODTrackCands
label = 'TrackCands', # output collection will be 'allLayer0TrackCands', 'allLayer1TrackCands', 'selectedLayer1TrackCands'
tracks = cms.InputTag('generalTracks'), # input track collection
#tracks = cms.InputTag('refittedGeneralTracksMuon'), # input track collection // AP changed from generalTracks
#tracks = cms.InputTag('refittedGeneralTracksPion'), # input track collection // AP changed from generalTracks
#particleType = 'mu+', # particle type (for assigning a mass) # not working, everything is a pion
particleType = 'pi+', # particle type (for assigning a mass) # not working, everything is a pion
preselection = 'pt > 0.35', # preselection cut on candidates. Only methods of 'reco::Candidate' are available
#selection = 'pt > 0.35', # Selection on PAT Layer 1 objects ('selectedLayer1TrackCands')
#selection = 'p > 0.5', # Selection on PAT Layer 1 objects ('selectedLayer1TrackCands')
selection = 'pt > 0.35 && p > 0.5', # Selection on PAT Layer 1 objects ('selectedLayer1TrackCands')
isolation = {}, # Isolations to use ('source':deltaR; set to {} for None)
isoDeposits = [],
mcAs = None # Replicate MC match as the one used for Muons
); # you can specify more than one collection for this
l1cands = getattr(process, 'patTrackCands')
l1cands.addGenMatch = False
######## adding tracks refitted with Kaon mass
#process.RungeKuttaTrackerPropagator.Mass = cms.double(0.493677)
process.RungeKuttaTrackerPropagatorForKaons = TrackingTools.MaterialEffects.RungeKuttaTrackerPropagator_cfi.RungeKuttaTrackerPropagator.clone(
Mass = cms.double(0.493677), ComponentName = cms.string('RungeKuttaTrackerPropagatorForKaons') )
process.refittedGeneralTracksKaon = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone( Propagator = "RungeKuttaTrackerPropagatorForKaons" )
###################################################
makeTrackCandidates( process, # patAODTrackCands
label = 'TrackKaonCands', # output collection will be 'allLayer0TrackCands', 'allLayer1TrackCands', 'selectedLayer1TrackCands'
#tracks = cms.InputTag('refittedGeneralTracksKaon'), # input track collection // AP changed from generalTracks
tracks = cms.InputTag('generalTracks'), # input track collection // AP changed from generalTracks
particleType = 'K+', # particle type (for assigning a mass) // AP changed from pi to K # not working, everything is a pion
#particleType = 'pi+', # particle type (for assigning a mass) // AP changed from pi to K # not working, everything is a pion
#particleType = 'mu+', # particle type (for assigning a mass) // AP changed from pi to K # not working, everything is a pion
preselection = 'pt > 0.35', # preselection cut on candidates. Only methods of 'reco::Candidate' are available
#selection = 'pt > 0.35', # Selection on PAT Layer 1 objects ('selectedLayer1TrackCands')
#selection = 'p > 0.5', # Selection on PAT Layer 1 objects ('selectedLayer1TrackCands')
selection = 'pt > 0.35 && p > 0.5', # Selection on PAT Layer 1 objects ('selectedLayer1TrackCands')
isolation = {}, # Isolations to use ('source':deltaR; set to {} for None)
isoDeposits = [],
#mcAs = 'muon' # Replicate MC match as the one used for Muons # AP "=None" ??
mcAs = None # Replicate MC match as the one used for Muons
); # you can specify more than one collection for this
l1cands = getattr(process, 'patTrackKaonCands')
l1cands.addGenMatch = False
process.load("RecoTracker.DeDx.dedxHarmonic2_cfi")
process.dedxHarmonic2Kaon = RecoTracker.DeDx.dedxHarmonic2_cfi.dedxHarmonic2.clone (
tracks = 'refittedGeneralTracksKaon',
trajectoryTrackAssociation = 'refittedGeneralTracksKaon'
)
# dE/dx hits
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff")
#process.load("RecoTracker.TrackProducer.TrackRefitters_cff") #already imported above
#process.TrackRefitter.src = 'generalTracks'
#process.TrackRefitter.src = 'refittedGeneralTracksPion'
#process.dedxHitInfo = cms.EDProducer("HSCPDeDxInfoProducer",
# #tracks = cms.InputTag("TrackRefitter"),
# #trajectoryTrackAssociation = cms.InputTag("TrackRefitter"),
# tracks = cms.InputTag("refittedGeneralTracksPion"),
# trajectoryTrackAssociation = cms.InputTag("refittedGeneralTracksPion"),
#
# UseStrip = cms.bool(True),
# UsePixel = cms.bool(True),
# MeVperADCStrip = cms.double(3.61e-06*265),
# MeVperADCPixel = cms.double(3.61e-06),
#
# UseCalibration = cms.bool(False),
# calibrationPath = cms.string("/afs/cern.ch/user/q/querten/workspace/public/dEdx/CMSSW_5_2_4/src/dEdx/ppGridProject/Gains.root"),
# ShapeTest = cms.bool(True),
# )
#
#process.dedxHitInfoKaon = cms.EDProducer("HSCPDeDxInfoProducer",
# tracks = cms.InputTag("refittedGeneralTracksKaon"),
# trajectoryTrackAssociation = cms.InputTag("refittedGeneralTracksKaon"),
#
# UseStrip = cms.bool(True),
# UsePixel = cms.bool(True),
# MeVperADCStrip = cms.double(3.61e-06*265),
# MeVperADCPixel = cms.double(3.61e-06),
#
# UseCalibration = cms.bool(False),
# calibrationPath = cms.string("/afs/cern.ch/user/q/querten/workspace/public/dEdx/CMSSW_5_2_4/src/dEdx/ppGridProject/Gains.root"),
# ShapeTest = cms.bool(True),
# )
#process.PATfilter = cms.EDFilter("X3872FilterPAT")
process.PATfilter = cms.EDFilter("Z4430FilterPAT")
process.mkcands = cms.EDAnalyzer("MuMuPiKPAT",
HLTriggerResults = cms.untracked.InputTag("TriggerResults","","HLT"),
inputGEN = cms.untracked.InputTag("genParticles"),
VtxSample = cms.untracked.string('offlinePrimaryVertices'),
SameSign = cms.untracked.bool(False),
DoMonteCarloTree = cms.untracked.bool( MC ),
MonteCarloParticleId = cms.untracked.int32(443), #original 20443
MonteCarloExclusiveDecay = cms.untracked.bool( MCExclusiveDecay ),
MonteCarloMotherId = cms.untracked.int32( MCMotherId ),
MonteCarloDaughtersN = cms.untracked.int32( 3 ), # 3 for exclusive B0->psi'KPi
#
DoMuMuMassConstraint = cms.untracked.bool(True),
#SkipJPsi = cms.untracked.bool(True),
SkipJPsi = cms.untracked.bool(False),
SkipPsi2S = cms.untracked.bool(False),
MinNumMuPixHits = cms.untracked.int32(1),
MinNumMuSiHits = cms.untracked.int32(8),
MaxMuNormChi2 = cms.untracked.double(7),
MaxMuD0 = cms.untracked.double(10.0),
sharedFraction = cms.untracked.double(0.5),
MinJPsiMass = cms.untracked.double(2.9),
MaxJPsiMass = cms.untracked.double(3.3),
MinPsiPrimeMass = cms.untracked.double(3.55),
MaxPsiPrimeMass = cms.untracked.double(3.8),
MinNumTrSiHits = cms.untracked.int32(4),
MinTrPt = cms.untracked.double(0.350),
Chi2NDF_Track = cms.untracked.double(7.0),
# Delta R
MaxMuMuTrackDR = cms.untracked.double(1.5),
MaxB0CandTrackDR = cms.untracked.double(1.5),
UseB0Dr = cms.untracked.bool(True),
MinMuMuPiKMass = cms.untracked.double(5.1),
MaxMuMuPiKMass = cms.untracked.double(5.45),
resolvePileUpAmbiguity = cms.untracked.bool(True),
addMuMulessPrimaryVertex = cms.untracked.bool(True),
#addMuMulessPrimaryVertex = cms.untracked.bool(False),
addB0lessPrimaryVertex = cms.untracked.bool(True),
Debug_Output = cms.untracked.bool(True),
##
## use the correct trigger path
##
TriggersForMatching = cms.untracked.vstring(
# 2012 displaced J/psi = Alessandra
#"HLT_DoubleMu4_Jpsi_Displaced_v9", "HLT_DoubleMu4_Jpsi_Displaced_v10", "HLT_DoubleMu4_Jpsi_Displaced_v11", "HLT_DoubleMu4_Jpsi_Displaced_v12",
# Lucia
# 2010
#"HLT_DoubleMu3_Quarkonium_v1", "HLT_DoubleMu3_Quarkonium_v2",
#"HLT_Dimuon6p5_Barrel_PsiPrime_v1",
# 2011
#"HLT_Dimuon7_PsiPrime_v1", "HLT_Dimuon7_PsiPrime_v2", "HLT_Dimuon7_PsiPrime_v3", "HLT_Dimuon7_PsiPrime_v4", "HLT_Dimuon7_PsiPrime_v5",
#"HLT_Dimuon9_PsiPrime_v1", "HLT_Dimuon9_PsiPrime_v4", "HLT_Dimuon9_PsiPrime_v5",
#"HLT_Dimuon11_PsiPrime_v1", "HLT_Dimuon11_PsiPrime_v4", "HLT_Dimuon11_PsiPrime_v5",
# inclusive psi(2S)
#"HLT_Dimuon0_PsiPrime_v3", "HLT_Dimuon0_PsiPrime_v4", "HLT_Dimuon0_PsiPrime_v5", "HLT_Dimuon0_PsiPrime_v6",
"HLT_Dimuon5_PsiPrime_v3", "HLT_Dimuon5_PsiPrime_v4", "HLT_Dimuon5_PsiPrime_v5", "HLT_Dimuon5_PsiPrime_v6",
#"HLT_Dimuon7_PsiPrime_v1", "HLT_Dimuon7_PsiPrime_v2", "HLT_Dimuon7_PsiPrime_v3", "HLT_Dimuon9_PsiPrime_v9",
#"HLT_DoubleMu3p5_LowMass_Displaced_v3", "HLT_DoubleMu3p5_LowMass_Displaced_v4", "HLT_DoubleMu3p5_LowMass_Displaced_v5", "HLT_DoubleMu3p5_LowMass_Displaced_v6"
# inclusive J/psi
"HLT_Dimuon8_Jpsi_v3", "HLT_Dimuon8_Jpsi_v4", "HLT_Dimuon8_Jpsi_v5", "HLT_Dimuon8_Jpsi_v6", "HLT_Dimuon8_Jpsi_v7",
),
FiltersForMatching = cms.untracked.vstring(
# Alessandra
#"hltDisplacedmumuFilterDoubleMu4Jpsi", "hltDisplacedmumuFilterDoubleMu4Jpsi", "hltDisplacedmumuFilterDoubleMu4Jpsi", "hltDisplacedmumuFilterDoubleMu4Jpsi"
# Kay
"hltVertexmumuFilterDimuon5PsiPrime", "hltVertexmumuFilterDimuon5PsiPrime", "hltVertexmumuFilterDimuon5PsiPrime", "hltVertexmumuFilterDimuon5PsiPrime", #"hltVertexmumuFilterDimuon7PsiPrime", "hltVertexmumuFilterDimuon7PsiPrime", "hltVertexmumuFilterDimuon7PsiPrime", "hltVertexmumuFilterDimuon7PsiPrime"
#hltDoubleMu4JpsiDisplacedL3Filtered
# inclusive J/psi (https://espace.cern.ch/cms-quarkonia/trigger-bph/SitePages/2012-InclusiveJPsi.aspx)
"hltVertexmumuFilterDimuon8Jpsi", "hltVertexmumuFilterDimuon8Jpsi", "hltVertexmumuFilterDimuon8Jpsi", "hltVertexmumuFilterDimuon8Jpsi", "hltVertexmumuFilterDimuon8Jpsi",
)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('set_below.root')
)
if (not MC) :
process.TFileService.fileName = cms.string('MuOniaRun2012C_07Oct_MuMuKPiPAT_ntpl.root')
elif MC :
if MCMotherId == 511 :
if (not official) :
#process.TFileService.fileName = cms.string('BdToPsiKpi_18Mar_MuMuPiKPAT_ntpl.root')
process.TFileService.fileName = cms.string('/lustre/cms/store/user/nsur/8TeV_MC_Private/output_ntpls/Bd2JpsiKpi_PHSP_8TeV_noPtEtaCuts_MuMuPiKPAT_small_ntpl_3.root')
else :
# process.TFileService.fileName = cms.string('officialBdToPsiKpi_18Mar_MuMuPiKPAT_ntpl.root')
process.TFileService.fileName = cms.string('/lustre/cms/store/user/nsur/Jpsi_8TeV_OfficialMC_small_ntuples/officialBdToJpsiKpi_MuMuPiKPAT_60000_10_small_ntpls.root')
elif MCMotherId == 531 :
process.TFileService.fileName = cms.string('BsToPsiMuMu_03Mar_MuMuPiKPAT_ntpl.root')
# turn off MC matching for the process
from PhysicsTools.PatAlgos.tools.coreTools import *
# old: removeMCMatching(process, ['All'], outputInProcess = False)
removeMCMatching(process,['All'],"",None,[])
process.patDefaultSequence.remove(process.patJetCorrFactors)
process.patDefaultSequence.remove(process.patJetCharge)
process.patDefaultSequence.remove(process.patJetPartonMatch)
process.patDefaultSequence.remove(process.patJetGenJetMatch)
process.patDefaultSequence.remove(process.patJetPartons)
## error in 5_3_22, so removing it
#process.patDefaultSequence.remove(process.patJetPartonAssociation)
process.patDefaultSequence.remove(process.patJetFlavourAssociation)
process.patDefaultSequence.remove(process.patJets)
## error in 53x, so removing it
#process.patDefaultSequence.remove(process.metJESCorAK5CaloJet)
#process.patDefaultSequence.remove(process.metJESCorAK5CaloJetMuons)
process.patDefaultSequence.remove(process.patMETs)
process.patDefaultSequence.remove(process.selectedPatJets)
process.patDefaultSequence.remove(process.cleanPatJets)
process.patDefaultSequence.remove(process.countPatJets)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('onia2MuMuPAT.root'),
outputCommands = cms.untracked.vstring('drop *',
#'keep *_genMuons_*_Onia2MuMuPAT', # generated muons and parents
'keep patMuons_patMuonsWithTrigger_*_NTUPLE', # All PAT muons including general tracks and matches to triggers
)
)
process.filter = cms.Sequence(
process.goodOfflinePrimaryVertices
+ process.primaryVertexFilter
+ process.noscraping
)
#44x process.filter = cms.Sequence(process.primaryVertexFilter+process.noscraping)
process.ntup = cms.Path(
#process.refittedGeneralTracksPion *
#process.refittedGeneralTracksMuon *
#process.refittedGeneralTracksKaon *
#process.offlineBeamSpot * process.TrackRefitter * process.dedxHitInfo
#process.dedxHarmonic2Kaon *
process.offlineBeamSpot #* process.dedxHitInfo
* process.filter
* process.patDefaultSequence
* process.patMuonsWithTriggerSequence
* process.PATfilter
* process.mkcands
)
process.schedule = cms.Schedule(process.ntup)
# rsync -vut --existing test/crab/runMuMuPiKPAT_dataOrMC_03Mar.py cristella@cmssusy.ba.infn.it:/cmshome/cristella/work/Z_analysis/exclusive/clean_14ott/CMSSW_5_3_22/src/UserCode/MuMuPiKPAT/test/crab/runMuMuPiKPAT_dataOrMC_03Mar.py
| [
"nairit.sur@cern.ch"
] | nairit.sur@cern.ch |
e8f86a4d03c30ac5e818061723126bff0b4b3d23 | 5334a3c5c58b7fe3503872f522a5fb04cf889d93 | /root/models.py | 4332ea9924fce29189506b0f9ea81a889008afda | [] | no_license | cyrillkin/DL_project | b6f6a40052abcf991512d1675815a3f3caafb0b7 | 4931034af5f19c06e6399becea804c1567eb3dc9 | refs/heads/master | 2023-07-16T14:00:08.221386 | 2021-08-23T12:23:10 | 2021-08-23T12:23:10 | 383,744,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | from django.db import models
from django.contrib.auth.models import User
from django.db.models.fields import DateField, TextField
from django.db.models.fields.files import ImageField
from .validators import validate_num
def user_avatar_path(instance, filename):
return f'user_{instance.user.id}/avatar/{filename}'
def user_directory_path(instance, filename):
return f'user_{instance.author.id}/photo_adverts/{filename}'
class Cat(models.Model):
"""Категории объявлений"""
name = models.TextField(max_length=50)
class Meta:
verbose_name = 'Раздел'
verbose_name_plural = 'Разделы'
class Adv(models.Model):
"""Обьявление пользователя"""
author = models.ForeignKey(User, on_delete=models.CASCADE)
header = models.TextField(max_length=50)
name_cat = models.ForeignKey(Cat, on_delete=models.CASCADE)
description = models.TextField(max_length=500)
photo = models.ImageField(upload_to=user_directory_path)
date_pub = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'Объявление'
verbose_name_plural = 'Объявления'
class Prof(models.Model):
"""Модель пользователя"""
user = models.OneToOneField(
User, on_delete=models.CASCADE, related_name='user_profile'
)
avatar = models.ImageField(upload_to=user_avatar_path)
birth_date = models.DateField(blank=True, null=True)
city = models.TextField(blank=True, null=True, validators=[validate_num])
description = models.TextField(blank=True, null=True)
class Meta:
verbose_name = 'Профиль'
verbose_name_plural = 'Профили'
| [
"vision21@yandex.ru"
] | vision21@yandex.ru |
57d4560a43aef2d5d6a28cf6a9081b60926353ed | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-mgmt-sql/azure/mgmt/sql/operations/job_target_groups_operations.py | 0bf98a502cb80ea748c22f25a5de17d8b4d37e78 | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 15,765 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class JobTargetGroupsOperations(object):
"""JobTargetGroupsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for the request. Constant value: "2017-03-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-01-preview"
self.config = config
def list_by_agent(
self, resource_group_name, server_name, job_agent_name, custom_headers=None, raw=False, **operation_config):
"""Gets all target groups in an agent.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param job_agent_name: The name of the job agent.
:type job_agent_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of JobTargetGroup
:rtype:
~azure.mgmt.sql.models.JobTargetGroupPaged[~azure.mgmt.sql.models.JobTargetGroup]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_agent.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'jobAgentName': self._serialize.url("job_agent_name", job_agent_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.JobTargetGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.JobTargetGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_agent.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/jobAgents/{jobAgentName}/targetGroups'}
def get(
self, resource_group_name, server_name, job_agent_name, target_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets a target group.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param job_agent_name: The name of the job agent.
:type job_agent_name: str
:param target_group_name: The name of the target group.
:type target_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: JobTargetGroup or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.sql.models.JobTargetGroup or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'jobAgentName': self._serialize.url("job_agent_name", job_agent_name, 'str'),
'targetGroupName': self._serialize.url("target_group_name", target_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobTargetGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/jobAgents/{jobAgentName}/targetGroups/{targetGroupName}'}
def create_or_update(
self, resource_group_name, server_name, job_agent_name, target_group_name, members, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a target group.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param job_agent_name: The name of the job agent.
:type job_agent_name: str
:param target_group_name: The name of the target group.
:type target_group_name: str
:param members: Members of the target group.
:type members: list[~azure.mgmt.sql.models.JobTarget]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: JobTargetGroup or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.sql.models.JobTargetGroup or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.JobTargetGroup(members=members)
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'jobAgentName': self._serialize.url("job_agent_name", job_agent_name, 'str'),
'targetGroupName': self._serialize.url("target_group_name", target_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'JobTargetGroup')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobTargetGroup', response)
if response.status_code == 201:
deserialized = self._deserialize('JobTargetGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/jobAgents/{jobAgentName}/targetGroups/{targetGroupName}'}
def delete(
self, resource_group_name, server_name, job_agent_name, target_group_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a target group.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param job_agent_name: The name of the job agent.
:type job_agent_name: str
:param target_group_name: The name of the target group.
:type target_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'jobAgentName': self._serialize.url("job_agent_name", job_agent_name, 'str'),
'targetGroupName': self._serialize.url("target_group_name", target_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/jobAgents/{jobAgentName}/targetGroups/{targetGroupName}'}
| [
"noreply@github.com"
] | ashirey-msft.noreply@github.com |
ce661ca3c3bd7a475604613478dc86fcd5621c01 | 539a67b7d7e65e0f65aed3ca086cf00b1a94582c | /appengine/standard/hello_world/env/bin/virtualenv | 5db5290281e161726d80fb489cc265f660f0a213 | [
"Apache-2.0"
] | permissive | HJEGeorge/wit-hackathon | af0379e4cba7f351623387e6796ab4b99b8a5904 | 18631d59af839f5c68ded3484fecebb2e72b607f | refs/heads/master | 2021-03-22T00:59:49.815162 | 2017-10-13T00:50:53 | 2017-10-13T00:50:53 | 106,671,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | #!/Users/annieliu/Github/wit-hackathon/appengine/standard/hello_world/env/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from virtualenv import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"annieliu@Annies-MacBook-Pro.local"
] | annieliu@Annies-MacBook-Pro.local | |
6d40822302c4a4c51b73501431899b2091f0a95c | dd9439f4b7b8591e4ad20e5feed8f3efc8a28587 | /writer/video_producer.py | c2be0ca6e8951151a00f99a679e8da9921f18fa8 | [
"Apache-2.0"
] | permissive | ulen2000/SINETStream-videostreaming | b09ee851819bd178da1c85814fe01db07de3b922 | d6d97a40ce6edbc124e4bc273689f30bfda6a9fe | refs/heads/main | 2023-02-17T02:33:28.550759 | 2021-01-21T04:56:54 | 2021-01-21T04:56:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,007 | py | #!/usr/bin/env python3
# Copyright (C) 2020 National Institute of Informatics
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import argparse
import sys
import cv2
from sinetstream import MessageWriter
logging.basicConfig(level=logging.INFO)
def producer(service, video, preview=False):
with MessageWriter(service, value_type='image') as writer:
image = next_frame(video)
print(image.shape)
while image is not None:
writer.publish(image)
if preview and show_preview(image):
break
image = next_frame(video)
def next_frame(video):
global n_frame
if not video.isOpened():
return None
success, frame = video.read()
n_frame += 1
return frame if success else None
def show_preview(image):
cv2.imshow(args.input_video, image)
# Hit 'q' to stop
return cv2.waitKey(25) & 0xFF == ord("q")
def gstreamer_pipeline(
capture_width=1920,
capture_height=1080,
display_width=1280,
display_height=720,
framerate=30,
flip_method=0,
):
return (
"nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
def main(service, video, width, height, preview=False):
global n_frame
if not video.isOpened():
print("ERROR: cannot open the file")
sys.exit(1)
n_frame = 0
try:
producer(service, video, width, height, preview)
finally:
video.release()
print("Fin video (#frame="+str(n_frame)+")")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SINETStream Producer")
parser.add_argument("-s", "--service", metavar="SERVICE_NAME", required=True)
parser.add_argument("-f", "--input-video", metavar="FILE")
parser.add_argument("-c", "--camera", type=int, default=0)
parser.add_argument("-p", "--preview", action="store_true", help="show on local too")
parser.add_argument("--width", type=int, default=320, help="resize width")
parser.add_argument("--height", type=int, default=240, help="resize height")
parser.add_argument("--fps", type=int, default=30, help="set video fps")
args = parser.parse_args()
print(": service="+ args.service)
pipeline = None
if args.input_video != None:
print(": input-video="+ args.input_video)
else:
pipeline = gstreamer_pipeline(capture_width=args.width, capture_height=args.height, framerate=args.fps,
flip_method=0, display_width=args.width, display_height=args.height)
print(pipeline)
if args.preview:
print("Hit 'q' to stop")
cap = cv2.VideoCapture(args.input_video) if args.input_video!=None else cv2.VideoCapture(pipeline, cv2.CAP_GSTREAMER)
main(args.service, cap, args.preview)
| [
"noreply@github.com"
] | ulen2000.noreply@github.com |
73b2dcd3138de2a974eab3de115d5eabf6b877dd | b0bce1d55b1883303e577c8664f72e4133361acc | /ann_utils/self_attention.py | ea9712199322c8d91dcf20ad6a685c6a8ebfa6f2 | [] | no_license | snownz/ai_work_banch | 23fd3c49ef1987e64263445a8f1628ffad93330c | d2c79ce5745b914de75a2f4ea7ead66b11c7affc | refs/heads/master | 2020-07-31T01:12:33.474057 | 2019-11-06T14:05:08 | 2019-11-06T14:05:08 | 210,429,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,646 | py | import tensorflow as tf
import numpy as np
from ann_utils.helper import flatten, maxpool2d,\
hw_flatten, hw_flatten_multi_head,\
softmax, upsampling2d, get_median,\
to_float, norm,\
shape_list, upsampling1d
from ann_utils.conv_layer import Conv2DLayer, SNConv2DLayer, Conv1DLayer
from ann_utils.fully_layer import FullyLayer as key
"""
Self Attention for Image Inputs
matmul( [ m1, n1 ], [ m2, n2 ] ) = [ m1, n2 ]
matmul( [ m1, n1, c1 ], [ m2, n2, c1 ] ) = [ m1, n2 ]
"""
class Self_Attention_Multi_Head_3D_GB(object):
def __init__(self, ch, key_size=1, heads=8, dp=0.0, bn=False, act=None, out_act=None):
self.key = [ Conv2DLayer( key_size, 1, 1, "attn_head_f_conv_layer_{}".format( x ), act = act ) for x in range( heads ) ]
self.query = [ Conv2DLayer( key_size, 1, 1, "attn_head_g_conv_layer_{}".format( x ), act = act ) for x in range( heads ) ]
self.value = [ Conv2DLayer( key_size, 1, 1, "attn_head_h_conv_layer_{}".format( x ), act = act ) for x in range( heads ) ]
self.oc = Conv2DLayer( ch, 1, 1, "attn_head_o_conv_layer", act = out_act )
def _qkv_(self, x, q_op, k_op, v_op, summary, is_training, reduction):
k = k_op( x, is_training = is_training ) # [bs, h, w, c'] Key
q = q_op( x, is_training = is_training ) # [bs, h, w, c'] Query
v = v_op( x, is_training = is_training ) # [bs, h, w, c] Value
k = maxpool2d( k, 2, 2 )
v = maxpool2d( v, 2, 2 )
if summary:
tf.summary.image( '1_query', q, max_outputs = 1, family = "self_attention" )
tf.summary.image( '2_key', k, max_outputs = 1, family = "self_attention" )
tf.summary.image( '3_value', v, max_outputs = 1, family = "self_attention" )
return q, k ,v
def __call__( self, x, is_training=False, summary=False, reduction=1):
with tf.variable_scope('attn'):
with tf.variable_scope('reduction_dim'):
x = maxpool2d( x, reduction, reduction )
batch_size = tf.shape(x)[0]
height = x.shape[1]
width = x.shape[2]
ch = x.shape[3]
with tf.variable_scope('q_k_v'):
# [ [ batch, h, w, c ] ]
qkv = [ self._qkv_( x, q, k, v, summary, is_training, reduction )
for q, k, v in zip( self.key, self.query, self.value ) ]
with tf.variable_scope('join_heads'):
# [ batch, heads, h, w, c ]
qs = tf.concat( [ tf.expand_dims( vl[0], axis = 1 ) for vl in qkv ], axis = 1 )
ks = tf.concat( [ tf.expand_dims( vl[1], axis = 1 ) for vl in qkv ], axis = 1 )
vs = tf.concat( [ tf.expand_dims( vl[2], axis = 1 ) for vl in qkv ], axis = 1 )
with tf.variable_scope('scaled_dop_product'):
# [ batch, heads, h * w, c ]
w, s, a = multihead_attn( qs, ks, vs )
with tf.variable_scope('merge_heads'):
# [ batch, h * w, c * heads ]
merged = merge_heads( tf.transpose( a, [0, 2, 3, 1] ) )
# a = tf.reshape( a, [ batch_size, a.shape[1], height, width, ch ] )
# merged_image = tf.reduce_mean( a, axis = 1 )
# [ batch, h, w, c * heads ]
merged_image = tf.reshape( merged, [ batch_size, height, width, merged.shape[-1] ] )
with tf.variable_scope('output_attention'):
# [ batch, h, w, c ]
o = self.oc( merged_image, is_training = is_training )
with tf.variable_scope('restore_dim'):
# [ batch, h, w, c ]
attn = upsampling2d( o, reduction )
return attn
"""
Self Attention for Sequences Inputs
"""
class Self_Attention_Multi_Head_2D_GB(object):
def __init__(self, n_state, name, heads=8, dp=0.0, act=None, out_act=None):
self.name = name
self.out_act = out_act
self.heads = heads
self.n_state = n_state
self.c = [ Conv1DLayer( n_state * 3, 1, 1, '{}_c_attn_{}'.format(name, x) ) for x in range( heads ) ]
self.o = Conv1DLayer( n_state, 1, 1, '{}_o_attn'.format(name) )
def _qkv_(self, x, op, summary, is_training):
c = op( x, is_training )
q, k, v = tf.split( c, 3, axis = 2 )
return q, k ,v
def __call__( self, x, use_mask=False, past=None, is_training=False, summary=False):
with tf.compat.v1.variable_scope('_attn_'):
qkv = [ self._qkv_( x, c, summary, is_training ) for c in self.c ]
qs = tf.concat( [ tf.expand_dims( vl[0], axis = 1 ) for vl in qkv ], axis = 1 )
ks = tf.concat( [ tf.expand_dims( vl[1], axis = 1 ) for vl in qkv ], axis = 1 )
vs = tf.concat( [ tf.expand_dims( vl[2], axis = 1 ) for vl in qkv ], axis = 1 )
present = tf.stack( [ ks, vs ], axis = 1 )
if past is not None:
pk, pv = tf.unstack( past, axis = 1 )
ks = tf.concat( [ pk, ks ], axis =- 2 )
vs = tf.concat( [ pv, vs ], axis =- 2 )
if use_mask:
w, s, a = masked_multihead_attn( qs, ks, vs )
else:
w, s, a = multihead_attn( qs, ks, vs )
a = merge_heads( tf.transpose( a, [0, 2, 3, 1] ) )
o = self.o( a, is_training )
# [ batch, h, w, c ]
attn = o
vars = [ x for x in tf.compat.v1.trainable_variables() if "{}_attn_".format( self.name ) in x.name ]
if summary:
for w in vars:
tf.summary.histogram( family = 'self_attention', name = w.name, values = w )
return attn, present, vars
def multihead_attn(q, k, v):
if len( q.shape ) == 5 and len( k.shape ) == 5 and len( v.shape ) == 5:
# N = h * w
q = hw_flatten_multi_head( q ) # [ bs, N, c ]
k = hw_flatten_multi_head( k ) # [ bs, N, c ]
v = hw_flatten_multi_head( v ) # [ bs, N, c ]
# q, k, v have shape [ batch, heads, ... ]
w = tf.matmul( q, k, transpose_b = True )
# divide by sqrt to keep stable gradients
w = w * tf.rsqrt( tf.cast( v.shape[-1].value, w.dtype ) )
w = ( w - tf.reduce_min( w ) ) / ( tf.reduce_max( w ) - tf.reduce_min( w ) )
s = softmax( w )
a = tf.matmul( s, v )
return w, s, a
"""
From OpenAI
"""
def masked_multihead_attn(q, k, v):
# q, k, v have shape [ batch, heads, sequence, features ]
w = tf.matmul( q, k, transpose_b = True )
# divide by sqrt to keep stable gradients
w = w * tf.rsqrt( tf.cast( v.shape[-1].value, w.dtype ) )
w = mask_attn_weights( w )
s = softmax( w )
a = tf.matmul( s, v )
return w, s, a
"""
From OpenAI
"""
def mask_attn_weights(w):
# w [ batch, heads, dst_sequence, src_sequence ], where information flows from src to dst.
_, _, nd, ns = shape_list( w )
b = attention_mask( nd, ns, dtype = w.dtype )
b = tf.reshape( b, [ 1, 1, nd, ns ] )
w = w * b - tf.cast( 1e10, w.dtype ) * ( 1 - b )
return w
"""
From OpenAI
"""
def attention_mask(nd, ns, *, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range( nd )[:,None]
j = tf.range( ns )
m = i >= j - ns + nd
return tf.cast( m, dtype )
"""
From OpenAI
"""
def merge_heads(x):
"""Smash the last two dimensions of x into a single dimension."""
*start, a, b = shape_list(x)
return tf.reshape( x, start + [ a * b ] )
"""
From OpenAI
"""
def split_heads(x, n_head):
# From [batch, sequence, features] to [batch, heads, sequence, features]
return tf.transpose( split_states( x, n_head ), [ 0, 2, 1, 3 ] )
"""
From OpenAI
"""
def split_states(x, n):
"""Reshape the last dimension of x into [n, x.shape[-1]/n]."""
*start, m = shape_list(x)
return tf.reshape(x, start + [n, m//n])
# class Self_Attention_Multi_Head_3D_GB(object):
# def __init__(self, ch, name, heads = 8, dp=0.0, bn=False, act=None, out_act=None):
# self.name = name
# self.out_act = out_act
# self.key = [ Conv2DLayer( ch, 1, 1, "{}_attn_head_{}_f_conv".format( name, x ), dropout = dp, bn = bn, act = act ) for x in range( heads ) ]
# self.query = [ Conv2DLayer( ch, 1, 1, "{}_attn_head_{}_g_conv".format( name, x ), dropout = dp, bn = bn, act = act ) for x in range( heads ) ]
# self.value = [ Conv2DLayer( ch, 1, 1, "{}_attn_head_{}_h_conv".format( name, x ), dropout = dp, bn = bn, act = act ) for x in range( heads ) ]
# def __create_net(self, x, q_op, k_op, v_op, summary, is_training, reduction):
# feat = maxpool2d( x, reduction, reduction )
# batch_size = tf.shape(feat)[0]
# height = feat.shape[1]
# width = feat.shape[2]
# num_channels = feat.shape[3]
# k = k_op( feat, is_training = is_training ) # [bs, h, w, c'] Key
# q = q_op( feat, is_training = is_training ) # [bs, h, w, c'] Query
# v = v_op( feat, is_training = is_training ) # [bs, h, w, c] Value
# k = maxpool2d( k, 2, 2 )
# v = maxpool2d( v, 2, 2 )
# # N = h * w
# qf = hw_flatten( q ) # [ bs, N, c ]
# kf = hw_flatten( k ) # [ bs, N, c ]
# s = tf.matmul( qf, kf, transpose_b = True ) # [ bs, Ng, Nf ]
# # sf = flatten( s )
# # beta = softmax( tf.reshape( sf, tf.shape( s ) ), 1 ) # attention map
# beta = softmax( s , 2 ) # attention map
# vf = hw_flatten( v ) # [ bs, N, c ]
# o = tf.matmul( beta, vf ) # [ bs, N, C ]
# mask = tf.reshape( o, [ batch_size, height, width, num_channels ] )
# if summary:
# tf.summary.image( '0_input', x, max_outputs = 1, family = "self_attention" )
# tf.summary.image( '2_query', q, max_outputs = 1, family = "self_attention" )
# tf.summary.image( '3_key', k, max_outputs = 1, family = "self_attention" )
# tf.summary.image( '4_value', v, max_outputs = 1, family = "self_attention" )
# tf.summary.image( '5_mask', mask, max_outputs = 1, family = "self_attention" )
# tf.summary.image( '6_scores', tf.image.resize( tf.expand_dims( s, axis = 3 ), [ 32, 32 ] ), max_outputs = 1, family = "self_attention" )
# tf.summary.image( '7_probs', tf.image.resize( tf.expand_dims( beta, axis = 3 ), [ 32, 32 ] ), max_outputs = 1, family = "self_attention" )
# return mask
# def __call__( self, x, is_training=False, summary=False, reduction=1):
# masks = [ self.__create_net( x, q, k, v, summary, is_training, reduction ) for q, k, v in zip( self.key, self.query, self.value ) ]
# msks = tf.concat( masks, axis = 3 )
# o = self.oc( msks, is_training = is_training )
# gamma = tf.get_variable( "{}_attn_gamma".format( self.name ), [1],
# initializer = tf.constant_initializer(0.0),
# trainable = is_training )
# attn = upsampling2d( gamma * o, reduction ) + x
# # attn = norm( attn, "{}_attn_".format( self.name ), is_training = is_training )
# attn = ln( ln )
# vars = [ x for x in tf.compat.v1.trainable_variables() if "{}_attn_".format( self.name ) in x.name ]
# if summary:
# tf.summary.image( '8_mask_c', o, max_outputs = 1, family = "self_attention" )
# tf.summary.image( '9_attn', attn, max_outputs = 1, family = "self_attention" )
# for w in vars:
# tf.summary.histogram( family = 'self_attention', name = w.name, values = w )
# return attn, vars
# def split_heads_2D(x, n_head):
# # From [batch, sequence, features] to [batch, heads, sequence, features]
# return tf.transpose( split_states( x, n_head ), [ 0, 2, 1, 3 ] )
# def split_heads_3D(x):
# # From [ batch, h, w, c ] to [ batch, heads , h, w, c ]
# return tf.transpose( split_states( x, n_head ), [ 0, 2, 1, 3 ] )
# def split_states(x, n):
# """Reshape the last dimension of x into [n, x.shape[-1]/n]."""
# *start, m = shape_list(x)
# return tf.reshape( x, start + [ n, m // n ] ) | [
"lucas.fernandes@softplan.com.br"
] | lucas.fernandes@softplan.com.br |
e914e0103967f2c816969579649b3bbeb1a6ac5c | 405966bfce2ff474af8d2cc6a96daf845f27ad3f | /pyraid/__init__.py | 123f1e67bd6f5dac8c80fe9b4e776ebb601ee135 | [] | no_license | nwithan8/pyraid | 59b1c2f87e334fa37c1d77df452d77df28cbd9ad | 9692287866d565d7959d9fc6e9e79c615c1cc6a9 | refs/heads/master | 2023-03-21T04:05:46.496924 | 2021-03-21T01:31:44 | 2021-03-21T01:31:44 | 349,871,318 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | from pyraid.api import API
| [
"n8gr8gbln@gmail.com"
] | n8gr8gbln@gmail.com |
d8cf3d84ed5cdc4f874eee364f2494032af887d3 | 393b101eeffb7db36248324bacac80316a9571d3 | /jkelle-TestCollatz.py | 5ab0d2e526b40cc50747f8fc91515df76b1029e2 | [] | no_license | lenako/CollatzTests | 476b576cd61810f47b4f96976baf5e949a720eaa | a96ab0a859ac68b7ca7be98b6e52212db0aae572 | refs/heads/master | 2020-05-30T21:13:28.646971 | 2014-01-29T01:14:45 | 2014-01-29T01:14:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,202 | py | #!/usr/bin/env python3
# -------------------------------
# projects/collatz/TestCollatz.py
# Copyright (C) 2014
# Glenn P. Downing
# -------------------------------
"""
To test the program:
% python TestCollatz.py > TestCollatz.out
% chmod ugo+x TestCollatz.py
% TestCollatz.py > TestCollatz.out
"""
# -------
# imports
# -------
import io
import unittest
from Collatz import *
# -----------
# TestCollatz
# -----------
class TestCollatz (unittest.TestCase) :
# ----
# read
# ----
def test_read_1 (self) :
r = io.StringIO("1 10\n")
a = [0, 0]
b = collatz_read(r, a)
i, j = a
self.assertTrue(b == True)
self.assertTrue(i == 1)
self.assertTrue(j == 10)
def test_read_2(self):
r = io.StringIO("-1 0\n")
a = [99, 99]
b = collatz_read(r, a)
i, j = a
self.assertTrue(b == True)
self.assertTrue(i == -1)
self.assertTrue(j == 0)
def test_read_3(self):
start = 3478765
end = start-1
r = io.StringIO("%s %s\n" % (start, end))
a = [None, "hello"]
b = collatz_read(r, a)
i, j = a
self.assertTrue(b == True)
self.assertTrue(i == start)
self.assertTrue(j == end)
# ----
# eval
# ----
def test_eval_1 (self) :
v = collatz_eval(1, 10)
self.assertTrue(v == 20)
def test_eval_2 (self) :
v = collatz_eval(100, 200)
self.assertTrue(v == 125)
def test_eval_3 (self) :
v = collatz_eval(201, 210)
self.assertTrue(v == 89)
def test_eval_4 (self) :
v = collatz_eval(900, 1000)
self.assertTrue(v == 174)
# -----
# print
# -----
def test_print_1(self) :
w = io.StringIO()
collatz_print(w, 1, 10, 20)
self.assertTrue(w.getvalue() == "1 10 20\n")
def test_print_2(self):
w = io.StringIO()
collatz_print(w, -1, 0, -1000)
self.assertTrue(w.getvalue() == "-1 0 -1000\n")
def test_print_3(self):
w = io.StringIO()
collatz_print(w, 0, 0, 0)
self.assertTrue(w.getvalue() == "0 0 0\n")
# -----
# solve
# -----
def test_solve_1(self):
r = io.StringIO("1 10\n100 200\n201 210\n900 1000\n")
w = io.StringIO()
collatz_solve(r, w)
self.assertTrue(w.getvalue() == "1 10 20\n100 200 125\n201 210 89\n900 1000 174\n")
def test_solve_2(self):
r = io.StringIO("1 1\n")
w = io.StringIO()
collatz_solve(r, w)
self.assertTrue(w.getvalue() == "1 1 1\n")
def test_solve_3(self):
r = io.StringIO("1 10\n179 1790\n1 1000\n")
w = io.StringIO()
collatz_solve(r, w)
self.assertTrue(w.getvalue() == "1 10 20\n179 1790 182\n1 1000 179\n")
# ---------
# cycle_len
# ---------
def test_cycle_len_1(self):
self.assertTrue(cycle_len(1,{}) == 1)
def test_cycle_len_2(self):
self.assertTrue(cycle_len(2,{}) == 2)
def test_cycle_len_3(self):
self.assertTrue(cycle_len(9,{}) == 20)
# ----
# main
# ----
print("TestCollatz.py")
unittest.main()
print("Done.")
| [
"ko.lena92@gmail.com"
] | ko.lena92@gmail.com |
8056de6c1be863831a25c5e86dc18fa6524392c1 | f75fd831eeaafb3b11a661e890dc4da89081f092 | /blogproject/blog/feeds.py | 95cf747bfbf2b2f817ceabf0d2a3b629ae1b705d | [] | no_license | peleccom/Pdjcode | 7121571199635be8b628f31afdfcc80fa082c6ce | 25ee8cc246dea324d306d9fb076245e44ce47242 | refs/heads/master | 2021-03-12T23:27:24.141631 | 2012-07-20T17:48:02 | 2012-07-20T17:48:02 | 4,851,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | from django.contrib.syndication.views import Feed
from blog.models import BlogPost
class RSSFeed(Feed):
title = "My awesome blog feed"
description = "The latest from my awesome blog"
link = "/blog/"
item_link = link
def items(self):
return BlogPost.objects.all()[:10]
def item_description(self, item):
return item.body
def item_title(self, item):
return item.title | [
"C:\\Documents and Settings\\Alexander\\Application Data\\The Bat!"
] | C:\Documents and Settings\Alexander\Application Data\The Bat! |
42773f023ad2c1b9a3b01b4a47249b9929055aa9 | 44667e7c1917a6ad930ed1fdcf47ee2d71336953 | /second/views.py | 1a53e7190ad36e50583013bd283a0ade2ed3e4e2 | [] | no_license | drhtka/react_django_hak | c5c703e634d69cc4f4d7b0ed8fe802d81d981951 | 62617aced709d98613ea314c487d9e76f0695ca7 | refs/heads/master | 2023-06-17T16:25:28.432894 | 2021-07-11T12:02:04 | 2021-07-11T12:02:04 | 384,898,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | # -*- coding: utf-8 -*-
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.template.response import TemplateResponse, HttpResponse
from second.models import WorksModel
def SecondDef(request):
# print('SecondDef')
# print(request.GET)
# print(request.GET.get('selectuser'))
# print(request.GET.get('inputuser'))
# works_add = WorksModel.objects.create(username=request.GET.get('inputuser'), datauser=request.GET.get('selectuser'))
# print(works_add)
return TemplateResponse(request, 'second.html')
def SecondDefFetch(request):
"""
# по айди города получаем районы
:param request:
:return:
"""
print('secondfetch')
print('43536')
dict1 = {}
print(request.GET)
print(request.GET.get('selectuser'))
print(request.GET.get('inputuser'))
works_add = WorksModel.objects.create(username=request.GET.get('inputuser'), datauser=request.GET.get('selectuser'))
all_works_distinct = WorksModel.objects.values('datauser').distinct()
all_works = WorksModel.objects.filter().values()
# all_child = WorksModel.objects.filter(parent_id=city_id).values('name', 'id')
# print('all_child')
# print(all_child)
#dict1['data']=list(works_add)# словарь в словаре для передачи json
# print('dict1')
# print(dict1)
# return JsonResponse(works_add)
return render(request, 'second_new.html', {'all_works_distinct':all_works_distinct, 'all_works': all_works}) | [
"drhtka@gmil.com"
] | drhtka@gmil.com |
7d24cd93a7fba526abe473e1a5d4a570cd1114e6 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2741/60760/298464.py | 353a99580a2dc99629300bfe0b7b2f83c5ddb862 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | def func(arr: list):
length=len(arr)
l=length
while l>0:
for j in range(length+1-l):
temp=arr[j:j+l]
temp2=sorted(set(temp))
if temp==temp2:
return l
l=l-1
return 0
b = input()
arr = list(map(int, b[1:len(b)-1].split(',')))
print(func(arr)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
2308b0c04994fcc9e120f82195135253102e7f8a | eaa284e89ce848e7500d08cc16b40b6c465e6b5c | /cthaeh/app.py | 7efe62c7464f73d3379b74d3b80e4182bd7ca3a7 | [
"MIT"
] | permissive | pipermerriam/cthaeh | bfac951546977eeb078df9bffb5a07536f6772ee | a3f63b0522d940af37f485ccbeed07666adb465b | refs/heads/master | 2023-08-28T08:49:23.966610 | 2020-04-28T18:17:02 | 2020-04-28T18:17:02 | 259,418,354 | 0 | 0 | MIT | 2020-04-27T18:30:54 | 2020-04-27T18:30:53 | null | UTF-8 | Python | false | false | 2,077 | py | import logging
import pathlib
from typing import Optional
from async_service import Service
from eth_typing import BlockNumber
from sqlalchemy import orm
import trio
from web3 import Web3
from cthaeh.exfiltration import Exfiltrator
from cthaeh.ir import Block as BlockIR
from cthaeh.loader import BlockLoader
from cthaeh.models import Header
from cthaeh.rpc import RPCServer
def determine_start_block(session: orm.Session) -> BlockNumber:
head = (
session.query(Header) # type: ignore
.order_by(Header.block_number.desc())
.filter(Header.is_canonical == True) # noqa: E712
.first()
)
if head is None:
return BlockNumber(0)
else:
return BlockNumber(head.block_number + 1)
class Application(Service):
logger = logging.getLogger("cthaeh.Cthaeh")
rpc_server: Optional[RPCServer] = None
def __init__(
self,
w3: Web3,
session: orm.Session,
start_block: Optional[BlockNumber],
end_block: Optional[BlockNumber],
concurrency: int,
ipc_path: Optional[pathlib.Path],
) -> None:
block_send_channel, block_receive_channel = trio.open_memory_channel[BlockIR](
128
)
if start_block is None:
start_block = determine_start_block(session)
self.exfiltrator = Exfiltrator(
w3=w3,
block_send_channel=block_send_channel,
start_at=start_block,
end_at=end_block,
concurrency_factor=concurrency,
)
self.loader = BlockLoader(
session=session, block_receive_channel=block_receive_channel
)
if ipc_path is not None:
self.rpc_server = RPCServer(ipc_path=ipc_path, session=session)
async def run(self) -> None:
self.manager.run_daemon_child_service(self.exfiltrator)
self.manager.run_daemon_child_service(self.loader)
if self.rpc_server is not None:
self.manager.run_daemon_child_service(self.rpc_server)
await self.manager.wait_finished()
| [
"pipermerriam@gmail.com"
] | pipermerriam@gmail.com |
d420e3d4e43ae9880031ddd7c272d20b74392922 | 07fbbfd9c5eb4a9a94902e011bc3f072322ede51 | /week03/example of plotting vectors.py | e263a9196ff811040d1670f8b6009e8d14642d5e | [] | no_license | dmart030/example | e6a415e422ce4404710746f334eab403672e4c91 | 0c0c2b34bfe9ccc86d8e57dc32e8dd78c10cc12c | refs/heads/master | 2021-01-22T02:48:54.782501 | 2015-06-03T05:01:07 | 2015-06-03T05:01:07 | 33,695,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 01:20:57 2015
@author: Lalecia
"""
'''
import plotly.plotly as py
from plotly.graph_objs import *
from pylab import *
from visual import vector, norm, mag
'''
from pylab import imshow,show
import numpy as np
#electric constant
k=9e9
a = aray()
#x,y are the axis values. X,Y are the mesh
x=np.arange(1,11,1)
y=np.arange(1,11,1)
X,Y = np.meshgrid(x, y)
print x
print y
z = x*y
imshow(z)
show() | [
"dmart030@ucr.edu"
] | dmart030@ucr.edu |
01b7f851c67a794d832964f77b7ae412e5c463f4 | 046d96c5076bdafbbbdde9ca8fbe311ecf413a89 | /.ipynb_checkpoints/yoda_simulator-checkpoint.py | 5dd727f65172add7845d29c0248ba7769e5fc968 | [] | no_license | junweiluo/Retirement_Planner | 16ce749849d0f387de0be0f465b6189774d9653f | e1df74a6b77251859182aa68375379886e3ec9a3 | refs/heads/master | 2021-01-06T00:56:52.441420 | 2020-02-24T17:50:21 | 2020-02-24T17:50:21 | 241,183,237 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,839 | py | import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
np.random.seed(42)
def portfolio_by_retirement(portfolio, initial_investment, withdraw_type, withdraw_number, years_to_retirement):
portfolio_dimension = portfolio.shape
for stock in range(portfolio_dimension[1]):
globals()['stock_%s' % stock]= np.random.normal(portfolio.iloc[0,stock],
portfolio.iloc[1,stock],
252*30*500).reshape(252*30,500)
#initialize variables
next_beginning_balance = np.ones((1,500))
Portfolio_30_year = np.ones((1,500))
withdraw_amount = withdraw_number/initial_investment
withdraw_rate = withdraw_number
for year in range(30):
#initialize for each year
Portfolio_1_year = np.ones((1,500))
for month in range(12):
#initialize for each month
portfolio_monthly_return = np.zeros((22,500))
for stock in range(portfolio_dimension[1]):
stock_month_daily_return = np.concatenate((next_beginning_balance,
(globals()['stock_%s' % stock][year*12*21+month*21:year*12*21+(month+1)*21])+1),
axis = 0)
portfolio_monthly_return += np.cumprod(stock_month_daily_return, axis = 0)*portfolio.iloc[2,stock]
#get balance for rebalancing in next loop.
next_beginning_balance = (portfolio_monthly_return[-1,:]).reshape(1,500)
Portfolio_1_year = np.concatenate((Portfolio_1_year,portfolio_monthly_return[1:,:]), axis = 0)
if withdraw_type != 'fixed amount':
next_beginning_balance = (portfolio_monthly_return[-1,:]).reshape(1,500)*(1-withdraw_rate)
else:
next_beginning_balance =(portfolio_monthly_return[-1,:]).reshape(1,500)-withdraw_amount
Portfolio_30_year = np.concatenate((Portfolio_30_year,Portfolio_1_year[1:,:]), axis = 0)
Portfolio_30_year_simulation = pd.DataFrame(Portfolio_30_year[1:])
return Portfolio_30_year_simulation.iloc[:years_to_retirement*252]*initial_investment
def quantile_chart(portfolio, initial_investment, withdraw_type, withdraw_number, years_to_retirement):
daily_quantiles = portfolio_by_retirement(portfolio,initial_investment, withdraw_type, withdraw_number, years_to_retirement).quantile(q=(0.10,0.5,0.9), axis = 1).T
return daily_quantiles.plot(title = f"Investment of ${initial_investment}, withdraw {withdraw_type} by {withdraw_number} in {years_to_retirement} years.",
figsize=(10,5))
def simulation_chart(portfolio, initial_investment, withdraw_type, withdraw_number, years_to_retirement):
return portfolio_by_retirement(portfolio,initial_investment, withdraw_type, withdraw_number, years_to_retirement).plot(legend = False, title = "Portfolio simulation", figsize = (15,10))
def confidence_interval(portfolio, initial_investment, withdraw_type, withdraw_number, years_to_retirement):
plt.figure() # this is top-level container for all plot elements, make sure to close it when not suing any more.
investment_ending_price = portfolio_by_retirement(portfolio,initial_investment, withdraw_type, withdraw_number, years_to_retirement).iloc[-1]
quantile_result = investment_ending_price.quantile(q=[0.05, 0.95])
investment_ending_price.plot(kind = 'hist', title="90% confidence interval for tails")
plt.axvline(quantile_result.iloc[0], color='r')
plt.axvline(quantile_result.iloc[1], color='r')
return plt
def search_withdraw_amount(portfolio, initial_investment, years_to_retirement, target_amount):
try:
min_withdraw = round(-initial_investment) #round(-initial_investment)
max_withdraw = round(initial_investment)
learning_rate = round(initial_investment/100)
for change in range(min_withdraw, max_withdraw, learning_rate):
investment_ending_price = portfolio_by_retirement(portfolio,initial_investment,'fixed amount', change, years_to_retirement).iloc[-1]
quantile_result = investment_ending_price.quantile(q=[0.10]).astype(int)
#print(f"If withdrawing ${change} annually, the 10% percentile return will be ${quantile_result.iloc[0]}.")
if quantile_result.iloc[0]<target_amount:
break
desired_withdraw_amount = change
ending_10_percentile_balance = quantile_result.iloc[0]
if desired_withdraw_amount < 0:
to_print = (f"Rather than withdrawing, you should deposit ${-desired_withdraw_amount} annually, and ending 10% percentile balance after {years_to_retirement} years would be ${ending_10_percentile_balance}.")
else:
to_print = (f"The desired withdraw amount is ${desired_withdraw_amount} annually, and ending 10% percentile balance after {years_to_retirement} years would be ${ending_10_percentile_balance}.")
except:
to_print = "Your target return is out of bound. Please input reasonable numbers!"
return print(to_print), quantile_chart(portfolio,initial_investment, 'fixed amount', desired_withdraw_amount, years_to_retirement)
def search_withdraw_rate(portfolio, initial_investment, years_to_retirement, target_amount):
try:
min_withdraw = -1000
max_withdraw = 1000
learning_rate = 5
for change in range(min_withdraw, max_withdraw, learning_rate):
investment_ending_price = portfolio_by_retirement(portfolio,initial_investment,'fixed rate', change/1000, years_to_retirement).iloc[-1]
quantile_result = investment_ending_price.quantile(q=[0.10]).astype(int)
#print(f"If withdrawing ${change} annually, the 10% percentile return will be ${quantile_result.iloc[0]}.")
if quantile_result.iloc[0]<target_amount:
break
desired_withdraw_rate = change/1000
ending_10_percentile_balance = quantile_result.iloc[0]
if desired_withdraw_rate < 0:
to_print = (f"Rather than withdrawing, you should deposit {-desired_withdraw_rate*100}% annually, and ending 10% percentile balance after {years_to_retirement} years would be ${ending_10_percentile_balance}.")
else:
to_print = (f"The desired withdraw rate is {desired_withdraw_rate*100}% annually, and ending 10% percentile balance after {years_to_retirement} years would be ${ending_10_percentile_balance}.")
except:
to_print = "Your target return is out of bound. Please input reasonable numbers!"
return print(to_print), quantile_chart(portfolio,initial_investment, 'fixed rate', desired_withdraw_rate, years_to_retirement) | [
"junwei.luo9777@gmail.com"
] | junwei.luo9777@gmail.com |
e511daa839d5f5ec938a1828c6f4e1d08361e541 | 3f7c27ccd0ab1fcbd2583cf4b764b81bd27dd718 | /apps/members/migrations/0003_auto__add_field_member_address__add_field_member_city__add_field_membe.py | bfad4ac11b208f53dc018a2f15b4d2636362d119 | [] | no_license | adamtlord/foreverland | 001ca1a91a3cc468405efb80fe7981e75b82021c | 8206ddeeb8cfbd2752ef6fa9839424718cb96e07 | refs/heads/master | 2020-04-16T00:50:51.582008 | 2016-09-21T03:27:39 | 2016-09-21T03:27:39 | 11,668,672 | 0 | 0 | null | 2016-09-04T03:46:51 | 2013-07-25T19:05:55 | Python | UTF-8 | Python | false | false | 7,747 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Member.address'
db.add_column(u'members_member', 'address',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Member.city'
db.add_column(u'members_member', 'city',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Member.state'
db.add_column(u'members_member', 'state',
self.gf('django.contrib.localflavor.us.models.USStateField')(max_length=2, null=True, blank=True),
keep_default=False)
# Adding field 'Member.zip_code'
db.add_column(u'members_member', 'zip_code',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'Member.phone'
db.add_column(u'members_member', 'phone',
self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'Member.ssn'
db.add_column(u'members_member', 'ssn',
self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.address'
db.add_column(u'members_sub', 'address',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.city'
db.add_column(u'members_sub', 'city',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.state'
db.add_column(u'members_sub', 'state',
self.gf('django.contrib.localflavor.us.models.USStateField')(max_length=2, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.zip_code'
db.add_column(u'members_sub', 'zip_code',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.phone'
db.add_column(u'members_sub', 'phone',
self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.ssn'
db.add_column(u'members_sub', 'ssn',
self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Member.address'
db.delete_column(u'members_member', 'address')
# Deleting field 'Member.city'
db.delete_column(u'members_member', 'city')
# Deleting field 'Member.state'
db.delete_column(u'members_member', 'state')
# Deleting field 'Member.zip_code'
db.delete_column(u'members_member', 'zip_code')
# Deleting field 'Member.phone'
db.delete_column(u'members_member', 'phone')
# Deleting field 'Member.ssn'
db.delete_column(u'members_member', 'ssn')
# Deleting field 'Sub.address'
db.delete_column(u'members_sub', 'address')
# Deleting field 'Sub.city'
db.delete_column(u'members_sub', 'city')
# Deleting field 'Sub.state'
db.delete_column(u'members_sub', 'state')
# Deleting field 'Sub.zip_code'
db.delete_column(u'members_sub', 'zip_code')
# Deleting field 'Sub.phone'
db.delete_column(u'members_sub', 'phone')
# Deleting field 'Sub.ssn'
db.delete_column(u'members_sub', 'ssn')
models = {
u'members.member': {
'Meta': {'object_name': 'Member'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'display_first': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'display_last': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'join_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'ssn': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'state': ('django.contrib.localflavor.us.models.USStateField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'members.sub': {
'Meta': {'object_name': 'Sub'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'ssn': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'state': ('django.contrib.localflavor.us.models.USStateField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['members'] | [
"adam.lord@gmail.com"
] | adam.lord@gmail.com |
dd7b3751dac42303218c555346b4dc3e265685c4 | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/network/v20170801/network_watcher.py | 6baeb361ea8cee421d8b1a6957351992333fc362 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,727 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['NetworkWatcher']
class NetworkWatcher(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
network_watcher_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Network watcher in a resource group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_watcher_name: The name of the network watcher.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['etag'] = etag
__props__['id'] = id
__props__['location'] = location
if network_watcher_name is None:
raise TypeError("Missing required property 'network_watcher_name'")
__props__['network_watcher_name'] = network_watcher_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20160901:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20161201:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20170301:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20170601:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20170901:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20171001:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20171101:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180101:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180201:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180401:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180601:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180701:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180801:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20181001:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20181101:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20181201:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190201:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190401:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190601:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190701:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190801:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190901:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20191101:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NetworkWatcher")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NetworkWatcher, __self__).__init__(
'azure-nextgen:network/v20170801:NetworkWatcher',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkWatcher':
"""
Get an existing NetworkWatcher resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return NetworkWatcher(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
e6ab16e5c4f7263faa44e191c9a30ba99fd3777d | 62fc25baa271eec064f3e8d26e578f44af259289 | /pic_topface_all.py | 4edd638bab667925007e8b1e5ca8a37946db625f | [] | no_license | lituan/Topface | 8b56f5063d2f3d9613721e106a548a8eb8c3142d | be7cb55fe7eac8ffd2ea01e7041195521805a9b9 | refs/heads/master | 2021-01-12T12:27:43.652857 | 2017-05-04T13:22:02 | 2017-05-04T13:22:02 | 72,499,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,533 | py | import sys
import os
import itertools
import operator
import numpy as np
import lt
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from collections import defaultdict
def polar_to_rect(theta,r):
return (r*np.cos(theta)+0.5,r*np.sin(theta)+0.5)
def plot_hotspot(ax,blade_num,patch,title,ft=6,title_posi=-0.06,line_width=1.0):
#patch format (((0,0),'R'),...)
ax.axis('off')
fontdict = {'fontsize':ft}
ax.set_title(title,fontdict,position=(0.5,title_posi))
basic = ['K','R','H']
acid = ['D','E']
aromatic = ['F','W','Y']
polar =['S','T','N','Q']
branch_phobic = ['V','L','I','M','A']
special_branch = ['P','G']
sulf = ['C']
res_hash = {'K':0,'R':0,'H':0,'D':1,'E':1,'F':2,'W':2,'Y':2,'S':3,'T':3,\
'N':3,'Q':3,'V':4,'L':4,'I':4,'M':4,'A':4,'C':5,'P':6,'G':7,'*':8}
res_hash = {'K':0,'R':0,'H':0,'D':8,'E':8,'F':8,'W':8,'Y':0,'S':0,'T':0,\
'N':8,'Q':8,'V':8,'L':8,'I':8,'M':8,'A':8,'C':8,'P':8,'G':8,'*':8}
res_hash = {'K':0,'R':0,'H':0,'D':8,'E':8,'F':1,'W':1,'Y':1,'S':2,'T':2,\
'N':8,'Q':8,'V':8,'L':8,'I':8,'M':8,'A':8,'C':8,'P':8,'G':8,'*':8}
colors = {0:'blue',1:'red',2:'green',3:'white',4:'purple',5:'brown',6:'yellow',7:'cyan',8:'none'}
color_in = {}
color_out = {}
for i in range(blade_num):
color_in[i] = 'none'
for i in range(blade_num*2):
color_out[i] = 'none'
text_in_num = []
text_out_num = []
text_in = []
text_out = []
for i,p in enumerate(patch):
b = p[0][0]
r = p[0][1]
if r == 0:
text_in_num.append(b)
text_in.append(p[1])
color_in[b] = colors.get(res_hash.get(p[1],8),'none')
else:
text_out.append(p[1])
text_out_num.append(b*2+r-1)
color_out[b*2+r-1] = colors.get(res_hash.get(p[1],8),'none')
num_in = blade_num
blade_bet = np.pi*2/num_in
theta_in = [blade_bet*i for i in range(num_in)]
r_in = 0.2
area_in = 0.064
center_in = []
for i in range(num_in):
center_in.append(polar_to_rect(theta_in[i],r_in))
circ = patches.Circle(center_in[i],area_in,alpha=0.6,color=color_in[i],transform=ax.transAxes)
ax.add_patch(circ)
num_out = blade_num*2
blade_bet = np.pi*2/num_out
theta_out = [blade_bet*(i-0.50) for i in range(num_out)]
r_out = 0.4
area_out = 0.064
center_out = []
colors = ['blue','purple']
for i in range(num_out):
center_out.append(polar_to_rect(theta_out[i],r_out))
circ = patches.Circle(center_out[i],area_out,alpha=0.6,color=color_out[i],transform=ax.transAxes)
ax.add_patch(circ)
for i,n in enumerate(text_in_num):
ax.text(center_in[n][0],center_in[n][1],text_in[i],transform=ax.transAxes,horizontalalignment='center',verticalalignment='center',**fontdict)
for i,n in enumerate(text_out_num):
ax.text(center_out[n][0],center_out[n][1],text_out[i],transform=ax.transAxes,horizontalalignment='center',verticalalignment='center',**fontdict)
for i in range(num_in):
a = center_in[i]
b = center_out[i*2]
c = center_out[i*2+1]
vx=[(a[0],a[1]),(b[0],b[1]),(c[0],c[1])]
trip=patches.Polygon(vx,alpha=0.9,ls='dotted',lw=line_width,fill=False,facecolor='none',transform=ax.transAxes)
ax.add_patch(trip)
#ax.triplot([a[0],b[0],c[0]],[a[1],b[1],c[1]],transform=ax.transAxes)
def plot_top_face(pro_hots,dirsuffix=''):
#pro_hots format: {pro_name:['RRR','KKK','YYYY',...],...}
for pro_name,pro_blade in pro_hots.iteritems():
fig = plt.figure()
ax = fig.add_subplot(111,aspect='equal')
blade_num = len(pro_blade)
title = str(pro_name) + ' ' + 'bn:' + str(blade_num)
patch = []
for i,vi in enumerate(pro_blade):
patch.append(((i,0),vi[0]))
patch.append(((i,1),vi[1]))
patch.append(((i,2),vi[2]))
plot_hotspot(ax,blade_num,patch,title,ft=12)
ofile = os.path.join(dirsuffix,str(pro_name))
fig.savefig(ofile,transparent=True,bbox_inches='tight',dpi=1000)
plt.close('all')
def plot_top_faces(pro_hots,dirsuffix=''):
#pro_hots format: {pro_name:['RRR','KKK','YYYY',...],...}
pro_names = pro_hots.keys()
fig_num = len(pro_hots)
c_num = 3
r_num = 3
if fig_num%(c_num*r_num) == 0:
p_num = fig_num//(c_num*r_num)
else:
p_num = fig_num//(c_num*r_num) + 1
for p in range(p_num):
fig = plt.figure()
for i in range(c_num*r_num):
try:
pro_name = pro_names.pop()
ax = fig.add_subplot(r_num,c_num,i+1,aspect='equal')
pro_blade = pro_hots[pro_name]
blade_num = len(pro_blade)
title = str(pro_name) + ' ' + 'bn:' + str(blade_num)
patch = []
for i,vi in enumerate(pro_blade):
patch.append(((i,0),vi[0]))
patch.append(((i,1),vi[1]))
patch.append(((i,2),vi[2]))
plot_hotspot(ax,blade_num,patch,title,title_posi=-0.10,line_width=0.6)
except:
ofile_name = str(p+1)
ofile = os.path.join(dirsuffix,ofile_name)
fig.savefig(ofile,transparent=True,bbox_inches='tight',dpi=1000)
plt.close('all')
return
ofile_name = str(p+1)
ofile = os.path.join(dirsuffix,ofile_name)
fig.savefig(ofile,transparent=True,bbox_inches='tight',dpi=1000)
plt.close('all')
def get_hotspot(wdsp_f):
wdsp_lines = wdsp_f.readlines()
wdsp_hotspot = {}
blade = {}
for line in wdsp_lines:
words = line.split()
if len(words) >= 2 and words[0] == '>':
pro_name = words[1]
pro_blades = []
pro_seq = ''
elif len(words) > 4:
pro_blades.append(words[3:-1])
pro_seq += ''.join(words[3:-1])
blade[pro_name] = pro_blades
for pro_name,pro_blades in blade.iteritems():
hotspot = []
for blade in pro_blades:
R1 = blade[2][1]
R1_2 = blade[1][-1]
if len(blade[5]) <= 5 and blade[5][1] == 'D':
D_1 = blade[5][0]
elif len(blade[5]) == 3 or len(blade[5]) == 2:
D_1 = blade[5][0]
elif 3 <= len(blade[5]) <= 5 and blade[5][2] == 'D':
D_1 = blade[5][1]
elif 4 <= len(blade[5]) <= 5 and blade[5][3] == 'D':
D_1 = blade[5][2]
elif 5 <= len(blade[5]) <= 5 and blade[5][4] == 'D':
Di_1 = blade[5][3]
elif len(blade[5]) <= 5:
D_1 = blade[5][1]
elif len(blade[5]) <= 7:
D_1 = blade[5][0]
else:
D_1 = '*'
hotspot.append(R1+R1_2+D_1)
wdsp_hotspot[pro_name] = hotspot
return wdsp_hotspot
@lt.run_time
def main():
wdsp_f = open(sys.argv[-1])
hotspot_d = get_hotspot(wdsp_f)
file_path,file_name = os.path.split(sys.argv[-1])
script_short_name, script_extension = os.path.splitext(sys.argv[0])
file_short_name, file_extension = os.path.splitext(file_name)
result_path = os.path.join(file_path,file_short_name + '_' +script_short_name+'_'+'result')
if not os.path.exists(result_path):
os.makedirs(result_path)
plot_top_faces(hotspot_d,result_path)
main()
| [
"lituantuan@foxmail.com"
] | lituantuan@foxmail.com |
2b757c4e4e70bb2d2a7db63873ae87db2e97235d | 10ab6d4974aa2459b9b7e25f554834e1e4d6ef04 | /producthunt/settings.py | 9fe3c26137974a2c1c478944f8a8397a86c78b6a | [] | no_license | The-A-Team-D/producthunt-project | 37656909caa8213bbdb44826fa7e404437f70357 | 5b47d062996cdabec742639f6bce11d7210194a2 | refs/heads/master | 2023-02-16T15:30:20.544016 | 2021-01-15T10:20:38 | 2021-01-15T10:20:38 | 275,213,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,379 | py | """
Django settings for producthunt project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xb#5ev1g*e3=zx-z_+yk%9@s!3q+k*6_qb&wk_ig3cj+%0a45+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'products.apps.ProductsConfig',
'accounts.apps.AccountsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'producthunt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['producthunt/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'producthunt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'producthunt/static/')
]
STATIC_ROOT = os.path.join(BASE_DIR,'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
| [
"ajaditya0987@gmail.com"
] | ajaditya0987@gmail.com |
6424a4ae89fb74869d8da7f2fdd774f4180ec2f2 | 9ff9f8c4066b6bb6f023e082c2ef3a6336d12a26 | /input_output.py | 4c577ec912da1ad737795b4798b5564ffca2e012 | [] | no_license | hyj97/py_learning | caf8a7d5d4ab8185449b0bfb9b708678dc0bd771 | b623834c104e210de9eedc423a38c5fb6e14e16b | refs/heads/master | 2022-12-04T15:30:40.518498 | 2020-08-24T09:12:33 | 2020-08-24T09:12:33 | 286,935,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | name = input('your name:')
gender = input('you are a boy?(y/n)')
welcome_str = 'Welcome to the matrix {prefix} {name}.'
welcome_dic = {
'prefix': 'Mr.' if gender == 'y' else 'Mrs',
'name': name
}
print('authorizing....')
print(welcome_str.format(**welcome_dic))
| [
"heyj@zetyun.com"
] | heyj@zetyun.com |
557626a838a5516f574530c6f9b0f8abecfe9102 | d438590c032484c00674def5d5949ff2d33dcc07 | /io2_portal/urls.py | 73d87a3bda881992df1e612b5720ec749307d100 | [] | no_license | map34/io2_portal | 52503504334a355e2bfcbcd23b07206e543768b4 | 1890313104218ad1f6c9baa259341e3a689afc04 | refs/heads/master | 2020-03-25T01:00:43.877008 | 2018-08-02T17:22:41 | 2018-08-02T17:22:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | """io2_portal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
import apps.home.urls
urlpatterns = [
path('', include(apps.home.urls)),
path('admin/', admin.site.urls),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"landungs@uw.edu"
] | landungs@uw.edu |
8aa5e91a514ac1dd4d2442848090f59534c2a96d | 89c9ccddca3e2bfd331dce422f6351fc307ddcdb | /CF/util/reader.py | f50f957d4dfb10b8455fcb9a47d1e63ab2175495 | [] | no_license | FlyGreyWolf/personal_recommendation | 675f825f299c3c743ea2c9c22b902f57053c9224 | a24823302216d45a8b89a0561d769e10c5244275 | refs/heads/master | 2020-05-17T18:01:35.554047 | 2019-05-04T08:08:11 | 2019-05-04T08:08:11 | 183,872,448 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | #-*-coding:utf8-*-
"""
author:xujian
date:2019****
"""
import os
#get all user favorite movies_id
#user_like -> user_id:movie_id
#user_rate_time -> user_id _movie_id : rate_time
def get_user_like(rating_file):
if not os.path.exists(rating_file):
return {},{}
read_row = 0
from_row = 0
user_like = {}
user_rate_time = {}
with open(rating_file, 'r') as f:
for line in f:
if(read_row == from_row):
read_row += 1
continue
user_info = line.strip().split(',')
if (len(user_info) < 3):
continue
[user_id, movie_id, rating, timestamp] = user_info
if user_id + "_" + movie_id not in user_rate_time:
user_rate_time[user_id + "_" + movie_id] = int(timestamp)
if float(rating) < 3.0:
continue
if user_id not in user_like:
user_like[user_id] = []
user_like[user_id].append(movie_id)
return user_like, user_rate_time
#get all movie_info
#movie_info_map -> movie_name:movie_genres
def get_movie_info(movie_info_file):
if not os.path.exists(movie_info_file):
return {}
read_row = 0
from_row = 0
movie_info_map = {}
with open(movie_info_file, 'r') as f:
for line in f:
if (read_row == from_row):
read_row += 1
continue
movie_info = line.strip().split(',')
if (len(movie_info) < 3):
continue
movie_id, genres = movie_info[0], movie_info[-1] #-1 means the last value of the array
if(len(movie_info) == 3):
movie_name = movie_info[1]
else:
movie_name = ",".join(movie_info[1:-1]) #if movie name includes the ","
if movie_id not in movie_info_map:
movie_info_map[movie_id] = [movie_name, genres]
return movie_info_map
if __name__ == "__main__":
user_like, user_rate_time = get_user_like("../data/ratings.txt")
# print(len(user_like))
# print(user_like["1"])
# #print user_click["1"]
# item_info= get_movie_info("../data/movies.txt")
# print(item_info["11"])
| [
"504574519@qq.com"
] | 504574519@qq.com |
749d528f75d3d3d8ccf3e23107b257c9136e06d8 | 44f0729433ac9bdbd67ab316f7002df087b0166c | /avatar/sources/local.py | 66542f29b312b364b8c216866b2a54d7f7d0cc58 | [
"WTFPL"
] | permissive | goneri/notmuch-avatar | 89e55c6c6ca2b6c8fc5000e16f560cac6118d01b | 77a14596e953c9974c62cb9682204896c50ef25c | refs/heads/master | 2016-09-05T13:31:04.347403 | 2015-07-15T08:47:19 | 2015-07-15T08:47:19 | 22,701,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Quick and Ugly script to fetch sender avatar.
# Copyright 2014 Gonéri Le Bouder <goneri@lebouder.net>
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# Version 2, December 2004
##
# Everyone is permitted to copy and distribute verbatim or modified
# copies of this license document, and changing it is allowed as long
# as the name is changed.
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
# 0. You just DO WHAT THE FUCK YOU WANT TO.
import os
import shutil
import avatar
class Local(object):
def __init__(self):
self.name = "local"
def fetch(self, email, target_image):
domain = avatar.EmailTools.get_domain_from_email(email)
icon_file = "./icons/%s.png" % domain
try:
shutil.copyfile(
icon_file,
target_image)
print("Using local icon for %s" % email)
return True
except IOError:
return False
| [
"goneri.lebouder@enovance.com"
] | goneri.lebouder@enovance.com |
15e1735e55b11c46890a34dc1baf16436b74651b | 241b5abe9863a8ef82c14186031e4e2cb94474da | /crawler_test.py | 0d524b0a6f82a06a1eca0e4879f218420b4a38cd | [] | no_license | MarieLeBris/C4-Crawler-api | 008540abdb6c3987f933c9306a56f93503d6463d | b0a28c363602a37fe105c9a246922a7984f95fed | refs/heads/main | 2023-08-15T16:18:18.183930 | 2021-10-13T08:55:23 | 2021-10-13T08:55:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | import os
import sys
import config
from motor_test import MOTOR
from compass_test import COMPASS
class CRAWLER():
def __init__(self):
self.MR = MOTOR(config.motor_right_IO2,config.motor_right_DIR,0)
self.ML = MOTOR(config.motor_left_IO2,config.motor_left_DIR,1)
self.CP = COMPASS(config.I2C_adresse)
def init_IO2_DIR(self):
self.MR.init_GPIO(config.motor_right_IO2)
self.ML.init_GPIO(config.motor_left_IO2)
self.MR.init_GPIO(config.motor_right_DIR)
self.ML.init_GPIO(config.motor_left_DIR)
def init_PWM(self): #ne sert a rien
self.MR.init_PWM_2()
def PWM(self,on_off):
self.MR.enable_PWM(on_off,config.motor_right_PWM)
self.ML.enable_PWM(on_off,config.motor_left_PWM)
def on_off(self, motor_on_off):
self.MR.IO2(config.motor_left_IO2, motor_on_off)
self.ML.IO2(config.motor_right_IO2, motor_on_off)
def forward(self, duty_cycle):
self.MR.DIR(config.motor_right_DIR, 0)
self.ML.DIR(config.motor_left_DIR, 1)
self.MR.duty_cycle(duty_cycle, config.motor_right_PWM)
self.ML.duty_cycle(duty_cycle, config.motor_left_PWM)
def backward(self, duty_cycle):
self.MR.DIR(config.motor_right_DIR, 1)
self.ML.DIR(config.motor_left_DIR, 0)
self.MR.duty_cycle( duty_cycle, config.motor_right_PWM)
self.ML.duty_cycle( duty_cycle, config.motor_left_PWM)
def right(self, duty_cycle):
self.MR.DIR(config.motor_right_DIR, 0)
self.ML.DIR(config.motor_left_DIR, 0)
self.MR.duty_cycle( duty_cycle, config.motor_right_PWM)
self.ML.duty_cycle(duty_cycle, config.motor_left_PWM)
def left(self, duty_cycle):
self.MR.DIR(config.motor_right_DIR, 1)
self.ML.DIR(config.motor_left_DIR, 1)
self.MR.duty_cycle( duty_cycle,config.motor_right_PWM)
self.ML.duty_cycle(duty_cycle, config.motor_left_PWM)
def init_light(self):
self.MR.init_GPIO(config.light1)
self.MR.init_GPIO(config.light2)
self.MR.init_GPIO(config.light3)
def light_on_off(self, on_off):
if motor_on_off == 1:
dir = "ON"
elif motor_on_off== 0:
dir = "OFF"
else :
dir = "error"
fichier = open("data.txt", "a")
fichier.write("Light is "+dir)
fichier.close()
def cmd_direction(self, value_direction):
fichier = open("data.txt", "a")
fichier.write("Mise en place du robot")
fichier.close()
def read_data(self):
with open("data.txt", "r") as fs:
lignes = [ligne.rstrip() for ligne in fs.readlines()]
lignes = lignes[-10:]
return str(lignes)
| [
"course@SPCOURSB-DESK2.ni.corp.natinst.com"
] | course@SPCOURSB-DESK2.ni.corp.natinst.com |
f57105204640fd248b7e897d491ac7ed2ad00954 | b7b71f325c055f70b36dd5991b63ddfbc8d18be7 | /main/urls.py | bd5e37cdeb8d0b61bb7f840edf8183a59a3022ad | [] | no_license | riyadzaigirdar/django-auth-signals | d7814284089777773dbe0964a1b73fe0ee5f5a8e | e4ebccc1e310ec6aa182c7ab30f63eb472772af4 | refs/heads/master | 2023-01-20T06:04:05.814741 | 2020-11-18T20:34:01 | 2020-11-18T20:34:01 | 314,137,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from django.conf import settings
from django.contrib import admin
from django.urls import path,include
from django.conf.urls.static import static
from rest_framework.authtoken import views
from blog import api
urlpatterns = [
path('admin/', admin.site.urls),
path('blog/', include("blog.urls")),
path('api/accounts/', api.CustomAuthToken.as_view())
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"riyadzaigir280@gmail.com"
] | riyadzaigir280@gmail.com |
4ae3ec394244e37ef2be92cdf594a1a1326e74d3 | 26008108b42f096f9a5b6008655812b66ac01250 | /object_detection/voc_dataset.py | 3f77bd0c90f963e586903f458aefa886826a7bbf | [] | no_license | nithinsubbiah/computer_vision | 717a398ad79c06bb453f5b40f6d5c3db6ed4581e | 2a60d51bb4f0c1fc782a20c9b9f0fba9866351da | refs/heads/master | 2021-01-06T06:06:58.521055 | 2020-04-27T06:50:15 | 2020-04-27T06:50:15 | 241,230,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,456 | py | from __future__ import print_function
import numpy as np
import os
import xml.etree.ElementTree as ET
import torch
import torch.nn
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
class VOCDataset(Dataset):
CLASS_NAMES = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
INV_CLASS = {}
for i in range(len(CLASS_NAMES)):
INV_CLASS[CLASS_NAMES[i]] = i
def __init__(self, split, size, data_dir='VOCdevkit/VOC2007/'):
super().__init__()
self.split = split
self.data_dir = data_dir
self.size = size
self.img_dir = os.path.join(data_dir, 'JPEGImages')
self.ann_dir = os.path.join(data_dir, 'Annotations')
split_file = os.path.join(data_dir, 'ImageSets/Main', split + '.txt')
with open(split_file) as fp:
self.index_list = [line.strip() for line in fp]
self.anno_list = self.preload_anno()
self.size = 227
# self.train_transform = transforms.Compose([transforms.Resize((self.size,self.size)), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor()])
# self.test_transform = transforms.Compose([transforms.CenterCrop((self.size,self.size)), transforms.Resize((self.size,self.size)), transforms.ToTensor()])
self.train_transform = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5)])
self.test_transform = transforms.Compose([transforms.CenterCrop((self.size,self.size))])
@classmethod
def get_class_name(cls, index):
return cls.CLASS_NAMES[index]
@classmethod
def get_class_index(cls, name):
return cls.INV_CLASS[name]
def __len__(self):
return len(self.index_list)
def preload_anno(self):
"""
:return: a list of lables. each element is in the form of [class, weight],
where both class and weight are a numpy array in shape of [20],
"""
label_list = []
for index in self.index_list:
class_names = set()
occurence_dict = {}
difficult_dict = {}
class_labels = np.zeros(20)
weights = np.ones(20)
fpath = os.path.join(self.ann_dir, index + '.xml')
tree = ET.parse(fpath)
root = tree.getroot()
for obj in root.findall('object'):
obj_name = obj.find('name').text
class_names.add(obj_name)
if not obj_name in occurence_dict:
occurence_dict[obj_name] = 1
difficult_dict[obj_name] = 0
else:
occurence_dict[obj_name] += 1
difficulty = int(obj.find("difficult").text)
occurence_dict[obj_name] += difficulty
class_idx = self.get_class_index(obj_name)
class_labels[class_idx] = 1
for c_name in class_names:
if occurence_dict[c_name] == difficult_dict[c_name]:
class_idx = self.get_class_index(obj_name)
weights[class_idx] = 0
label_list.append([class_labels, weights])
return label_list
def __getitem__(self, index):
"""
:param index: a int generated by Dataloader in range [0, __len__()]
:return: index-th element
image: FloatTensor in shape of (C, H, W) in scale [-1, 1].
label: LongTensor in shape of (Nc, ) binary label
weight: FloatTensor in shape of (Nc, ) difficult or not.
"""
findex = self.index_list[index]
fpath = os.path.join(self.img_dir, findex + '.jpg')
lab_vec, wgt_vec = self.anno_list[index]
img = Image.open(fpath)
if(self.split == 'trainval'):
img = self.train_transform(img)
# if(self.split == 'test'):
# img = self.test_transform(img)
img = transforms.functional.resize(img, size=(self.size,self.size))
img = transforms.functional.to_tensor(img)
image = torch.FloatTensor(img)
img = transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
label = torch.FloatTensor(lab_vec)
wgt = torch.FloatTensor(wgt_vec)
return image, label, wgt
| [
"nithinsubbiah@gmail.com"
] | nithinsubbiah@gmail.com |
3bf4072aecfd1b2b39b010f975de1fe77edc12eb | 9798202e54117c84d98794f964a3d12546759068 | /ivanov/statistics/treewidth/__init__.py | 97100e7b389857132c7d6640a5879f2db13ef330 | [] | no_license | idanivanov/master_thesis | 2055e8e2ada8bd98cff8bca8e5bfc0dd453dcdea | a71432b64012e6d77b9cf9aa1f19edddc052c149 | refs/heads/master | 2021-01-13T13:01:05.684302 | 2016-07-12T07:46:59 | 2016-07-12T07:46:59 | 46,619,590 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | '''
Created on Nov 24, 2015
@author: Ivan Ivanov
'''
import codecs
import itertools
def aggregate(tw_file_path):
'''From a file containing treewidths of graphs, get the number of graphs grouped by treewidth.
:param tw_file: CSV file containing treewidths in the following format: each line represents a tuple "graph_id,treewidth"
:return: Dictionary of the format { treewidth: graphs_count }
'''
lines = read_tw_file(tw_file_path)
res = {}
for k, g in itertools.groupby(lines, lambda tup: tup[1]):
group_count = sum(1 for _ in g)
if res.has_key(k):
res[k] += group_count
else:
res[k] = group_count
res["total"] = sum(res.values())
return res
def read_tw_file(tw_file_path):
tw_file = codecs.open(tw_file_path, "r", "utf8")
line = tw_file.readline()
while line:
items = line[:-1].split(u",")
if len(items) == 2:
yield (items[0], items[1])
line = tw_file.readline()
tw_file.close()
| [
"sanfan@abv.bg"
] | sanfan@abv.bg |
10065d1a3acada2593becaa51331bd03313778a6 | 379ecfb23434af43017d475fbdb0531ee1eb86e3 | /subscribeapp/models.py | 2cc4c4791dfe74022c8ca0009141e2f656ab7410 | [] | no_license | CHANWOO97/gis_2ban_2 | 8382d1bf68f8d22d26890ee19ee1acf347a35844 | 004956c8fe13c65dd269bf17a68c3753091ca961 | refs/heads/master | 2023-07-30T22:16:05.707818 | 2021-09-29T03:18:55 | 2021-09-29T03:19:01 | 381,960,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from projectapp.models import Project
class Subscription(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='subscription', null=False)
project = models.ForeignKey(Project, on_delete=models.CASCADE,
related_name='subscription', null=False)
class Meta: # 외부정보 장고용어 meta
unique_together = ['user', 'project'] # unique 지정해줌 | [
"rkaehd127@gmail.com"
] | rkaehd127@gmail.com |
6b558a36cb43d336a9e316491f4c555276fb0d1a | 45ecfa57791e1c0a613436e987713b955d6ff4e3 | /eve_db/migrations/0009_auto__del_field_invtype_graphic.py | 17f86ee4de7d6653fd85ebb813812f6b7f2c86c4 | [
"BSD-3-Clause"
] | permissive | caot/django-eve-db | f3e6148a640a907c8eac7a9845446bdc698ca776 | 425a84de4fde2b14ab17cfb81c2c2609fa427381 | refs/heads/master | 2020-06-01T11:36:36.524220 | 2019-06-07T15:27:40 | 2019-06-07T15:27:40 | 190,765,718 | 0 | 0 | BSD-3-Clause | 2019-06-07T15:19:37 | 2019-06-07T15:19:37 | null | UTF-8 | Python | false | false | 72,507 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'InvType.graphic'
db.delete_column('eve_db_invtype', 'graphic_id')
def backwards(self, orm):
# Adding field 'InvType.graphic'
db.add_column('eve_db_invtype', 'graphic',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['eve_db.EveGraphic'], null=True, blank=True),
keep_default=False)
models = {
'eve_db.agtagent': {
'Meta': {'ordering': "['id']", 'object_name': 'AgtAgent'},
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCCorporation']", 'null': 'True', 'blank': 'True'}),
'division': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCDivision']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapDenormalize']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.AgtAgentType']", 'null': 'True', 'blank': 'True'})
},
'eve_db.agtagenttype': {
'Meta': {'ordering': "['id']", 'object_name': 'AgtAgentType'},
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'eve_db.agtconfig': {
'Meta': {'ordering': "['id']", 'unique_together': "(('agent', 'key'),)", 'object_name': 'AgtConfig'},
'agent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.AgtAgent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'eve_db.chrancestry': {
'Meta': {'ordering': "['id']", 'object_name': 'ChrAncestry'},
'bloodline': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.ChrBloodline']", 'null': 'True', 'blank': 'True'}),
'charisma_bonus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'intelligence_bonus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'memory_bonus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'perception_bonus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'willpower_bonus': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'eve_db.chrattribute': {
'Meta': {'ordering': "['id']", 'object_name': 'ChrAttribute'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'eve_db.chrbloodline': {
'Meta': {'ordering': "['id']", 'object_name': 'ChrBloodline'},
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCCorporation']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'female_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'male_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bloodline_set'", 'null': 'True', 'to': "orm['eve_db.ChrRace']"}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_female_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_male_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'starter_ship_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bloodline_starter_ship_set'", 'null': 'True', 'to': "orm['eve_db.InvType']"}),
'starting_charisma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'starting_intelligence': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'starting_memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'starting_perception': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'starting_willpower': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'eve_db.chrfaction': {
'Meta': {'ordering': "['id']", 'object_name': 'ChrFaction'},
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'faction_set'", 'null': 'True', 'to': "orm['eve_db.CrpNPCCorporation']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'size_factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'solar_system': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'faction_set'", 'null': 'True', 'to': "orm['eve_db.MapSolarSystem']"}),
'station_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'station_system_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'eve_db.chrrace': {
'Meta': {'ordering': "['id']", 'object_name': 'ChrRace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'eve_db.crpactivity': {
'Meta': {'ordering': "['id']", 'object_name': 'CrpActivity'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'eve_db.crpnpccorporation': {
'Meta': {'ordering': "['id']", 'object_name': 'CrpNPCCorporation'},
'border_systems': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'corridor_systems': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enemy_corp': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'enemy_of_set'", 'null': 'True', 'to': "orm['eve_db.CrpNPCCorporation']"}),
'extent': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'faction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.ChrFaction']", 'null': 'True', 'blank': 'True'}),
'friendly_corp': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'friendly_with_set'", 'null': 'True', 'to': "orm['eve_db.CrpNPCCorporation']"}),
'fringe_systems': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hub_systems': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'initial_share_price': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'investor1': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'invested1_set'", 'null': 'True', 'to': "orm['eve_db.CrpNPCCorporation']"}),
'investor1_shares': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'investor2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'invested2_set'", 'null': 'True', 'to': "orm['eve_db.CrpNPCCorporation']"}),
'investor2_shares': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'investor3': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'invested3_set'", 'null': 'True', 'to': "orm['eve_db.CrpNPCCorporation']"}),
'investor3_shares': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'investor4': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'invested4_set'", 'null': 'True', 'to': "orm['eve_db.CrpNPCCorporation']"}),
'investor4_shares': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'min_security': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'public_share_percent': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'size_factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'solar_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapSolarSystem']", 'null': 'True', 'blank': 'True'}),
'station_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'station_system_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stations_are_scattered': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'eve_db.crpnpccorporationdivision': {
'Meta': {'ordering': "['id']", 'unique_together': "(('corporation', 'division'),)", 'object_name': 'CrpNPCCorporationDivision'},
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCCorporation']"}),
'division': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCDivision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.crpnpccorporationresearchfield': {
'Meta': {'ordering': "['id']", 'unique_together': "(('skill', 'corporation'),)", 'object_name': 'CrpNPCCorporationResearchField'},
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCCorporation']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']", 'null': 'True', 'blank': 'True'})
},
'eve_db.crpnpccorporationtrade': {
'Meta': {'ordering': "['id']", 'unique_together': "(('corporation', 'type'),)", 'object_name': 'CrpNPCCorporationTrade'},
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCCorporation']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']", 'null': 'True', 'blank': 'True'})
},
'eve_db.crpnpcdivision': {
'Meta': {'ordering': "['id']", 'object_name': 'CrpNPCDivision'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'leader_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'eve_db.crtcategory': {
'Meta': {'ordering': "['id']", 'object_name': 'CrtCategory'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'eve_db.crtcertificate': {
'Meta': {'ordering': "['id']", 'object_name': 'CrtCertificate'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrtCategory']", 'null': 'True', 'blank': 'True'}),
'cert_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrtClass']", 'null': 'True', 'blank': 'True'}),
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCCorporation']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'grade': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'icon_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'})
},
'eve_db.crtclass': {
'Meta': {'ordering': "['id']", 'object_name': 'CrtClass'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'eve_db.crtrecommendation': {
'Meta': {'ordering': "['id']", 'object_name': 'CrtRecommendation'},
'certificate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrtCertificate']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'recommendation_level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ship_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']", 'null': 'True', 'blank': 'True'})
},
'eve_db.crtrelationship': {
'Meta': {'ordering': "['id']", 'object_name': 'CrtRelationship'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_crtrelationship_set'", 'null': 'True', 'to': "orm['eve_db.CrtCertificate']"}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_crtrelationship_set'", 'null': 'True', 'to': "orm['eve_db.CrtCertificate']"}),
'parent_level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']", 'null': 'True', 'blank': 'True'})
},
'eve_db.dgmattributecategory': {
'Meta': {'ordering': "['id']", 'object_name': 'DgmAttributeCategory'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'eve_db.dgmattributetype': {
'Meta': {'ordering': "['id']", 'object_name': 'DgmAttributeType'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.DgmAttributeCategory']", 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'high_is_good': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_stackable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveUnit']", 'null': 'True', 'blank': 'True'})
},
'eve_db.dgmeffect': {
'Meta': {'ordering': "['id']", 'object_name': 'DgmEffect'},
'category': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'disallow_auto_repeat': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'discharge_attribute': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inventoryeffectdischargeattribute'", 'null': 'True', 'to': "orm['eve_db.DgmAttributeType']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'distribution': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'duration_attribute': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inventoryeffectdurationeattribute'", 'null': 'True', 'to': "orm['eve_db.DgmAttributeType']"}),
'falloff_attribute': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inventoryeffectfalloffattribute'", 'null': 'True', 'to': "orm['eve_db.DgmAttributeType']"}),
'fitting_usage_chance_attribute': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inventoryeffectfittingusagechanceattribute'", 'null': 'True', 'to': "orm['eve_db.DgmAttributeType']"}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'has_electronic_chance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_propulsion_chance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_range_chance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'is_assistance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_offensive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_warp_safe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'npc_activation_chance_attribute': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inventoryeffectnpcactivationchanceattribute'", 'null': 'True', 'to': "orm['eve_db.DgmAttributeType']"}),
'npc_usage_chance_attribute': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inventoryeffectnpcusagechanceattribute'", 'null': 'True', 'to': "orm['eve_db.DgmAttributeType']"}),
'post_expression': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pre_expression': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'range_attribute': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inventoryeffectrangeattribute'", 'null': 'True', 'to': "orm['eve_db.DgmAttributeType']"}),
'sfx_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'tracking_speed_attribute': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inventoryeffecttrackingspeedattribute'", 'null': 'True', 'to': "orm['eve_db.DgmAttributeType']"})
},
'eve_db.dgmtypeattribute': {
'Meta': {'ordering': "['id']", 'unique_together': "(('inventory_type', 'attribute'),)", 'object_name': 'DgmTypeAttribute'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.DgmAttributeType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']"}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_int': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.dgmtypeeffect': {
'Meta': {'ordering': "['id']", 'unique_together': "(('type', 'effect'),)", 'object_name': 'DgmTypeEffect'},
'effect': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.DgmEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']"})
},
'eve_db.evegraphic': {
'Meta': {'ordering': "['id']", 'object_name': 'EveGraphic'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'file': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'is_obsolete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
'eve_db.eveicon': {
'Meta': {'ordering': "['id']", 'object_name': 'EveIcon'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'file': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'})
},
'eve_db.evename': {
'Meta': {'ordering': "['id']", 'object_name': 'EveName'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvCategory']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']", 'null': 'True', 'blank': 'True'})
},
'eve_db.eveunit': {
'Meta': {'ordering': "['id']", 'object_name': 'EveUnit'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'})
},
'eve_db.invblueprinttype': {
'Meta': {'ordering': "['blueprint_type']", 'object_name': 'InvBlueprintType'},
'blueprint_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blueprint_type_set'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['eve_db.InvType']"}),
'material_modifier': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_production_limit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_blueprint_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_blueprint_type_set'", 'null': 'True', 'to': "orm['eve_db.InvType']"}),
'product_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blueprint_product_type_set'", 'to': "orm['eve_db.InvType']"}),
'productivity_modifier': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'research_copy_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'research_material_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'research_productivity_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'research_tech_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tech_level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'waste_factor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.invcategory': {
'Meta': {'ordering': "['id']", 'object_name': 'InvCategory'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'eve_db.invcontrabandtype': {
'Meta': {'ordering': "['id']", 'unique_together': "(('faction', 'type'),)", 'object_name': 'InvContrabandType'},
'attack_min_sec': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'confiscate_min_sec': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'faction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.ChrFaction']"}),
'fine_by_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_loss': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']"})
},
'eve_db.invflag': {
'Meta': {'ordering': "['id']", 'object_name': 'InvFlag'},
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'type_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'eve_db.invgroup': {
'Meta': {'ordering': "['id']", 'object_name': 'InvGroup'},
'allow_anchoring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_manufacture': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_recycle': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'is_anchored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fittable_non_singleton': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'use_base_price': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'eve_db.invmarketgroup': {
'Meta': {'ordering': "['id']", 'object_name': 'InvMarketGroup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'has_items': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvMarketGroup']", 'null': 'True', 'blank': 'True'})
},
'eve_db.invmetagroup': {
'Meta': {'ordering': "['id']", 'object_name': 'InvMetaGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'eve_db.invmetatype': {
'Meta': {'ordering': "['type']", 'object_name': 'InvMetaType'},
'meta_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvMetaGroup']"}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventorymetatype_parent_type_set'", 'to': "orm['eve_db.InvType']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventorymetatype_type_set'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['eve_db.InvType']"})
},
'eve_db.invposresource': {
'Meta': {'ordering': "['id']", 'unique_together': "(('control_tower_type', 'resource_type'),)", 'object_name': 'InvPOSResource'},
'control_tower_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tower_resource_set'", 'to': "orm['eve_db.InvType']"}),
'faction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.ChrFaction']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_security_level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'purpose': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvPOSResourcePurpose']", 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pos_resource_set'", 'to': "orm['eve_db.InvType']"})
},
'eve_db.invposresourcepurpose': {
'Meta': {'ordering': "['id']", 'object_name': 'InvPOSResourcePurpose'},
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'purpose': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'})
},
'eve_db.invtype': {
'Meta': {'ordering': "['id']", 'object_name': 'InvType'},
'base_price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'capacity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'chance_of_duplicating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvGroup']", 'null': 'True', 'blank': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'market_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvMarketGroup']", 'null': 'True', 'blank': 'True'}),
'mass': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'portion_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.ChrRace']", 'null': 'True', 'blank': 'True'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'volume': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.invtypematerial': {
'Meta': {'ordering': "['id']", 'unique_together': "(('type', 'material_type'),)", 'object_name': 'InvTypeMaterial'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'itemtype_set'", 'to': "orm['eve_db.InvType']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'material_set'", 'to': "orm['eve_db.InvType']"})
},
'eve_db.invtypereaction': {
'Meta': {'ordering': "['id']", 'unique_together': "(('reaction_type', 'input', 'type'),)", 'object_name': 'InvTypeReaction'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'reaction_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventorytypereactions_reaction_type_set'", 'to': "orm['eve_db.InvType']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventorytypereactions_type_set'", 'to': "orm['eve_db.InvType']"})
},
'eve_db.mapcelestialstatistic': {
'Meta': {'ordering': "['celestial']", 'object_name': 'MapCelestialStatistic'},
'age': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'celestial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapDenormalize']", 'unique': 'True', 'primary_key': 'True'}),
'density': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'eccentricity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'escape_velocity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'is_fragmented': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'life': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'luminosity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mass': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mass_dust': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mass_gas': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'orbit_period': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'orbit_radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pressure': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rotation_rate': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'spectral_class': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'surface_gravity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temperature': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.mapconstellation': {
'Meta': {'ordering': "['id']", 'object_name': 'MapConstellation'},
'faction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.ChrFaction']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapRegion']", 'null': 'True', 'blank': 'True'}),
'sovereignty_grace_start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sovereignty_start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.mapconstellationjump': {
'Meta': {'ordering': "['id']", 'unique_together': "(('from_constellation', 'to_constellation'),)", 'object_name': 'MapConstellationJump'},
'from_constellation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'constellation_jumps_from_constellation_set'", 'to': "orm['eve_db.MapConstellation']"}),
'from_region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'constellation_jumps_from_region_set'", 'to': "orm['eve_db.MapRegion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'to_constellation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'constellation_jumps_to_constellation_set'", 'to': "orm['eve_db.MapConstellation']"}),
'to_region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'constellation_jumps_to_region_set'", 'to': "orm['eve_db.MapRegion']"})
},
'eve_db.mapdenormalize': {
'Meta': {'ordering': "['id']", 'object_name': 'MapDenormalize'},
'celestial_index': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'constellation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapConstellation']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'orbit_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'orbit_index': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapRegion']", 'null': 'True', 'blank': 'True'}),
'security': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'solar_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapSolarSystem']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']", 'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.mapjump': {
'Meta': {'ordering': "['origin_gate']", 'object_name': 'MapJump'},
'destination_gate': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stargate_jump_destination_set'", 'to': "orm['eve_db.MapDenormalize']"}),
'origin_gate': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stargate_jump_origin_set'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['eve_db.MapDenormalize']"})
},
'eve_db.maplandmark': {
'Meta': {'ordering': "['id']", 'object_name': 'MapLandmark'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.EveIcon']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'importance': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'solar_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapSolarSystem']", 'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.mapregion': {
'Meta': {'ordering': "['id']", 'object_name': 'MapRegion'},
'faction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.ChrFaction']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.mapregionjump': {
'Meta': {'ordering': "['id']", 'unique_together': "(('from_region', 'to_region'),)", 'object_name': 'MapRegionJump'},
'from_region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'region_jumps_from_region_set'", 'to': "orm['eve_db.MapRegion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'to_region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'region_jumps_to_region_set'", 'to': "orm['eve_db.MapRegion']"})
},
'eve_db.mapsolarsystem': {
'Meta': {'ordering': "['id']", 'object_name': 'MapSolarSystem'},
'constellation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapConstellation']", 'null': 'True', 'blank': 'True'}),
'faction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'solarsystem_set'", 'null': 'True', 'to': "orm['eve_db.ChrFaction']"}),
'has_interconstellational_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_interregional_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'is_border_system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_corridor_system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fringe_system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_hub_system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_international': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'luminosity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapRegion']", 'null': 'True', 'blank': 'True'}),
'security_class': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'security_level': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sovereignty_level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sovereignty_start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sun_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']", 'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.mapsolarsystemjump': {
'Meta': {'ordering': "['id']", 'unique_together': "(('from_solar_system', 'to_solar_system'),)", 'object_name': 'MapSolarSystemJump'},
'from_constellation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'solar_system_jumps_from_constellation_set'", 'null': 'True', 'to': "orm['eve_db.MapConstellation']"}),
'from_region': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'solar_system_jumps_from_region_set'", 'null': 'True', 'to': "orm['eve_db.MapRegion']"}),
'from_solar_system': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'solar_system_jumps_from_solar_system_set'", 'to': "orm['eve_db.MapSolarSystem']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'to_constellation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'solar_system_jumps_to_constellation_set'", 'null': 'True', 'to': "orm['eve_db.MapConstellation']"}),
'to_region': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'solar_system_jumps_to_region_set'", 'null': 'True', 'to': "orm['eve_db.MapRegion']"}),
'to_solar_system': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'solar_system_jumps_to_solar_system_set'", 'to': "orm['eve_db.MapSolarSystem']"})
},
'eve_db.mapuniverse': {
'Meta': {'ordering': "['id']", 'object_name': 'MapUniverse'},
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.planetschematic': {
'Meta': {'ordering': "['id']", 'object_name': 'PlanetSchematic'},
'cycle_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pin_map': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'usable_schematics'", 'symmetrical': 'False', 'through': "orm['eve_db.PlanetSchematicsPinMap']", 'to': "orm['eve_db.InvType']"}),
'type_map': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'used_with_schematic'", 'symmetrical': 'False', 'through': "orm['eve_db.PlanetSchematicsTypeMap']", 'to': "orm['eve_db.InvType']"})
},
'eve_db.planetschematicspinmap': {
'Meta': {'ordering': "['schematic', 'type']", 'unique_together': "(('schematic', 'type'),)", 'object_name': 'PlanetSchematicsPinMap'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schematic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.PlanetSchematic']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']"})
},
'eve_db.planetschematicstypemap': {
'Meta': {'ordering': "['schematic', 'is_input', 'type']", 'unique_together': "(('schematic', 'type'),)", 'object_name': 'PlanetSchematicsTypeMap'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_input': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'schematic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.PlanetSchematic']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvType']"})
},
'eve_db.ramactivity': {
'Meta': {'ordering': "['id']", 'object_name': 'RamActivity'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'icon_filename': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'})
},
'eve_db.ramassemblyline': {
'Meta': {'ordering': "['id']", 'object_name': 'RamAssemblyLine'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.RamActivity']", 'null': 'True', 'blank': 'True'}),
'assembly_line_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.RamAssemblyLineType']", 'null': 'True', 'blank': 'True'}),
'cost_install': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cost_per_hour': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'discount_per_good_standing_point': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'maximum_char_security': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'maximum_corp_security': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'minimum_char_security': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'minimum_corp_security': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'minimum_standing': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCCorporation']", 'null': 'True', 'blank': 'True'}),
'station': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.StaStation']", 'null': 'True', 'blank': 'True'}),
'surcharge_per_bad_standing_point': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ui_grouping_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.ramassemblylinestations': {
'Meta': {'ordering': "['id']", 'unique_together': "(('station', 'assembly_line_type'),)", 'object_name': 'RamAssemblyLineStations'},
'assembly_line_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.RamAssemblyLineType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCCorporation']", 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapRegion']", 'null': 'True', 'blank': 'True'}),
'solar_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapSolarSystem']", 'null': 'True', 'blank': 'True'}),
'station': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.StaStation']"}),
'station_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.StaStationType']", 'null': 'True', 'blank': 'True'})
},
'eve_db.ramassemblylinetype': {
'Meta': {'ordering': "['id']", 'object_name': 'RamAssemblyLineType'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.RamActivity']", 'null': 'True', 'blank': 'True'}),
'base_material_multiplier': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'base_time_multiplier': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'min_cost_per_hour': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'volume': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.ramassemblylinetypedetailpercategory': {
'Meta': {'ordering': "['id']", 'unique_together': "(('assembly_line_type', 'category'),)", 'object_name': 'RamAssemblyLineTypeDetailPerCategory'},
'assembly_line_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.RamAssemblyLineType']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_multiplier': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_multiplier': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.ramassemblylinetypedetailpergroup': {
'Meta': {'ordering': "['id']", 'unique_together': "(('assembly_line_type', 'group'),)", 'object_name': 'RamAssemblyLineTypeDetailPerGroup'},
'assembly_line_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.RamAssemblyLineType']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.InvGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_multiplier': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_multiplier': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.ramtyperequirement': {
'Meta': {'ordering': "['id']", 'unique_together': "(('type', 'activity_type', 'required_type'),)", 'object_name': 'RamTypeRequirement'},
'activity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.RamActivity']"}),
'damage_per_job': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'recycle': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'required_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_type'", 'to': "orm['eve_db.InvType']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'type_requirement'", 'to': "orm['eve_db.InvType']"})
},
'eve_db.staoperation': {
'Meta': {'ordering': "['id']", 'object_name': 'StaOperation'},
'activity_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'amarr_station_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'amarr_station_operation_set'", 'null': 'True', 'to': "orm['eve_db.StaStationType']"}),
'border': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'caldari_station_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'caldari_station_operation_set'", 'null': 'True', 'to': "orm['eve_db.StaStationType']"}),
'corridor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fringe': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gallente_station_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'gallente_station_operation_set'", 'null': 'True', 'to': "orm['eve_db.StaStationType']"}),
'hub': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'jove_station_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'jove_station_operation_set'", 'null': 'True', 'to': "orm['eve_db.StaStationType']"}),
'minmatar_station_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'minmatar_station_operation_set'", 'null': 'True', 'to': "orm['eve_db.StaStationType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ratio': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.staoperationservices': {
'Meta': {'ordering': "['id']", 'unique_together': "(('operation', 'service'),)", 'object_name': 'StaOperationServices'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'operation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.StaOperation']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.StaService']"})
},
'eve_db.staservice': {
'Meta': {'ordering': "['id']", 'object_name': 'StaService'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'eve_db.stastation': {
'Meta': {'ordering': "['id']", 'object_name': 'StaStation'},
'constellation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapConstellation']", 'null': 'True', 'blank': 'True'}),
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.CrpNPCCorporation']", 'null': 'True', 'blank': 'True'}),
'docking_cost_per_volume': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'max_ship_volume_dockable': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'office_rental_cost': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'operation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.StaOperation']", 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapRegion']", 'null': 'True', 'blank': 'True'}),
'reprocessing_efficiency': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'reprocessing_hangar_flag': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'reprocessing_stations_take': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'security': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'solar_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.MapSolarSystem']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.StaStationType']", 'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'z': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'eve_db.stastationtype': {
'Meta': {'ordering': "['id']", 'object_name': 'StaStationType'},
'dock_entry_x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dock_entry_y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dock_entry_z': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dock_orientation_x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dock_orientation_y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dock_orientation_z': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'is_conquerable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'office_slots': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'operation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_db.StaOperation']", 'null': 'True', 'blank': 'True'}),
'reprocessing_efficiency': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['eve_db'] | [
"gtaylor@l11solutions.com"
] | gtaylor@l11solutions.com |
ea6cd599ec95d225c07da83b638c9d5863a7acf0 | 20cd5b32b0b0e1deccc8f74661efe3c2d17f3c82 | /user.py | 4310a3b79b342b9160e51445808c37d2b10365d3 | [] | no_license | sahanaprasad/flaskproject | 5396536d0cc2cf3739e7223be2f58c8e7bb050af | 5d50ab5f0860a48a6d67dd11989b1ef584770bc2 | refs/heads/master | 2020-04-20T23:46:00.945007 | 2019-02-05T01:46:15 | 2019-02-05T01:46:15 | 169,176,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,837 | py | from flask import Flask,render_template,request
import sqlite3 as sql
import os
app=Flask(__name__)
@app.route('/') #homepage
def home():
return render_template('friends.html')
@app.route('/adminback') #homepage
def homeadmin():
return render_template('admin.html')
@app.route('/logout') #homepage
def logout():
msg="Logged out sucessfully"
return render_template('result2.html',msg=msg)
@app.route('/visitor') #homepage
def visitor():
con=sql.connect('database2.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select *from genrestable4')
rows=cur.fetchall();
return render_template('homepage.html',rows=rows)
@app.route('/loggeduser/<username>') #homepage
def loggeduser(username):
return render_template('aftersignin.html',username=username)
@app.route('/login') #homepage
def login():
return render_template('login.html')
@app.route('/userlogin/<username>') #homepage
def userlogin(username):
return render_template('aftersignin.html',username=username)
@app.route('/signin',methods=['POST','GET']) #homepage
def signin():
if request.method=='POST':
username=request.form['username']
password=request.form['password']
con=sql.connect('database.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select username,password from users1')
rows=cur.fetchall();
for row in rows:
if(row["username"]==username and row["password"] ==password):
return render_template('aftersignin.html',username=username)
return render_template('login.html')
@app.route('/forgetpassword') #homepage
def forgetpassword():
return render_template('updatepassword.html')
@app.route('/updatepassword',methods=['POST','GET']) #homepage
def updatepassword():
if request.method=='POST':
username=request.form['username']
con=sql.connect('database.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select username from users1')
rows=cur.fetchall();
for row in rows:
if(row["username"]==username):
return render_template('upadateform.html',username=username)
return render_template('updatepassword.html')
@app.route('/updatepassword2',methods=['POST','GET']) #homepage
def updatepassword2():
if request.method=='POST':
username=request.form['username']
password=request.form['password']
con=sql.connect('database.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('Update users1 set password= ? where username= ? ',(password,username,))
msg="Password sucessfully Updated"
return render_template('updatesucessfull.html',msg=msg)
@app.route('/adminbackhome') #homepage
def homeadminback():
con=sql.connect('database2.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select *from genrestable4')
rows=cur.fetchall();
return render_template('list2.html',rows=rows)
@app.route('/homepage') # for visitor show genere list
def homepage():
con=sql.connect('database2.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select *from genrestable4')
rows=cur.fetchall();
return render_template('homepage.html',rows=rows)
@app.route('/newgenre') # Add new genere
def new_genre():
return render_template('genres.html')
@app.route('/addshow',methods=['POST','GET']) #On submit store it to the database
def addshow():
if request.method=='POST':
#try:
genre=request.form['genre']
ID=request.form['ID']
with sql.connect('database2.db')as con:
cur=con.cursor()
#cur.execute('INSERT INTO genres(genre) VALUES(?)',(genre))
cur.execute('INSERT INTO genrestable4(ID,genre) VALUES(?,?)',(ID,genre))
con.commit()
msg="New genre added sucessfully"
return render_template('result.html',msg = msg)
con.close()
@app.route('/newshow') #Add a new show
def newshow():
return render_template('shows.html')
@app.route('/showlistnow/<msg>') #Add a new show
def showlist(msg):
if(msg=="Big_Bang_Theory"):
return render_template('bbt.html')
elif(msg=="Games_of_thrones"):
return render_template('got.html')
elif(msg=="Friends"):
return render_template('friends2.html')
elif(msg=="How_I_met_your_mother"):
return render_template('himym.html')
elif(msg=="13_Reasons_why"):
return render_template('13reasons.html')
elif(msg=="Lost"):
return render_template('lost.html')
elif(msg=="Breaking_Bad"):
return render_template('bb.html')
elif(msg=="Flash"):
return render_template('flash.html')
elif(msg=="Sherlock_Homes"):
return render_template('sher.html')
elif(msg=="Supernatural"):
return render_template('supernatural.html')
@app.route('/addnewshow',methods=['POST','GET']) # On submit store it the database
def addnewshow():
if request.method=='POST':
#try:
gid=request.form['g_id']
sid=request.form['s_id']
sname=request.form['showname']
rating=request.form['rating']
suggestions=request.form['suggestions']
with sql.connect('database2.db')as con:
cur=con.cursor()
#cur.execute('INSERT INTO genres(genre) VALUES(?)',(genre))
cur.execute('INSERT INTO showlist2(g_id,s_id,sname,rating,suggestions) VALUES(?,?,?,?,?)',(gid,sid,sname,rating,suggestions))
con.commit()
msg="New show added sucessfully"
return render_template('result.html',msg = msg)
@app.route('/enternew') # open new registration page
def new_student():
return render_template('user.html')
@app.route('/addrec',methods=['POST','GET']) # on submit store it to the databse
def addrec():
if request.method=='POST':
#try:
username=request.form['username']
email=request.form['email']
first_name=request.form['first_name']
last_name=request.form['last_name']
dob=request.form['dob']
pwd=request.form['pwd']
activity=request.form['activity']
with sql.connect('database.db')as con:
cur=con.cursor()
cur.execute('INSERT INTO users1(username,email,firstname,lastname,dob,password,activity) VALUES(?,?,?,?,?,?,?)',(username,email,first_name,last_name,dob,pwd,activity))
con.commit()
msg="User account created sucessfully"
return render_template('result2.html',msg = msg)
con.close()
@app.route('/newpage/<msg>') # Show all the list of the show to the user without delete button
def new_page(msg):
con=sql.connect('database2.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select * from showlist2 where g_id= ?',(msg,))
rows=cur.fetchall();
return render_template('newpage.html',rows=rows)
@app.route('/showlist/<msg>') # Show all the list of the show to the user with delete button
def new_page2(msg):
con=sql.connect('database2.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select *from showlist2 where g_id= ?',(msg,))
rows=cur.fetchall();
return render_template('newpage2.html',rows=rows)
@app.route('/admin') #homepage login
def admin():
return render_template('adminlogin.html')
@app.route('/adminlogin',methods=['GET','POST']) #homepage admin
def adminlogin():
if request.method=='POST':
password=request.form['password']
if(password=='password'):
return render_template('admin.html')
@app.route('/list') # List all the generes to the admin
def listgenere():
con=sql.connect('database2.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select *from genrestable4')
rows=cur.fetchall();
return render_template('list2.html',rows=rows)
@app.route('/listgenereuser/<msg>') # List all the generes to the user
def listgenereuser(msg):
con=sql.connect('database2.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select ID,genre from genrestable4')
rows=cur.fetchall();
return render_template('list3.html',rows=rows,msg1=msg)
@app.route('/showlistuser/<msg>/<msg1>') # Show all the list of the show to the user with delete button
def new_page3(msg,msg1):
con=sql.connect('database2.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select s_id,sname from showlist2 where g_id= ?',(msg,))
rows=cur.fetchall();
return render_template('newpage3.html',rows=rows,msg1=msg1)
@app.route('/rateshow/<msg>/<msg1>/<msg2>') # List all the generes to the admin
def rateshow(msg,msg1,msg2):
return render_template('rateshow.html',msg=msg,msg1=msg1,msg2=msg2)
@app.route('/rateshowsubmit/<msg1>',methods=['GET','POST'])
def rateshowsubmit(msg1):
if request.method=='POST':
sid=request.form['s_id']
sname=request.form['sname']
uname=request.form['uname']
ratings=request.form['sratings']
comments=request.form['comments']
with sql.connect('database2.db')as con:
cur=con.cursor()
#cur.execute('INSERT INTO genres(genre) VALUES(?)',(genre))
cur.execute('INSERT INTO usercomment3(sid,sname,uname,rating,comments) VALUES(?,?,?,?,?)', (sid,sname,uname,ratings,comments))
msg="Your Ratings has been recorded sucesfully !!"
con.commit()
return render_template('commentadded.html',msg = msg,msg1=msg1)
@app.route('/deleteshow/<ID>') # Delete the selected genere (for admin)
def deleteshow(ID):
try:
with sql.connect('database2.db')as con:
cur=con.cursor()
cur.execute('DELETE from genrestable4 where ID= ?',(ID,))
con.commit()
msg="Record sucessfully deleted sucessfully"
finally:
con.row_factory=sql.Row
cur=con.cursor()
cur.execute('select * from genrestable4')
rows=cur.fetchall();
return render_template('list2.html',rows=rows)
con.close()
@app.route('/deleteshowlist/<ID>/<ID2>') # Delete the selected show (for admin)
def deleteshowlist(ID,ID2):
try:
with sql.connect('database2.db')as con:
cur=con.cursor()
cur.execute('DELETE from showlist2 where s_id= ? and g_id= ?',(ID,ID2,))
con.commit()
msg="record sucessfully deleted"
finally:
con.row_factory=sql.Row
cur=con.cursor()
cur.execute('select * from showlist2')
rows=cur.fetchall();
return render_template('newpage2.html',rows=rows)
con.close()
@app.route('/deleteuser/<ID>') # Delete the selected user (for admin)
def deleteuserlist(ID):
try:
with sql.connect('database.db')as con:
cur=con.cursor()
cur.execute('DELETE from users1 where username= ?',(ID,))
con.commit()
msg="record sucessfully deleted"
finally:
con.row_factory=sql.Row
cur=con.cursor()
cur.execute('select * from users1')
rows=cur.fetchall();
return render_template('list.html',rows=rows)
con.close()
@app.route('/deletecomment/<ID>/<msg>') # Delete the selected user (for admin)
def deletecomment(ID,msg):
try:
with sql.connect('database2.db')as con:
cur=con.cursor()
cur.execute('DELETE from usercomment3 where sname= ?',(ID,))
con.commit()
finally:
con.row_factory=sql.Row
cur=con.cursor()
cur.execute('select * from usercomment3')
rows=cur.fetchall();
return render_template('usercommenttableshow.html',rows=rows,msg=msg)
con.close()
@app.route('/userdetails') # show all the user details
def list():
con=sql.connect('database.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select *from users1')
rows=cur.fetchall();
return render_template('list.html',rows=rows)
@app.route('/listusercomments/<msg>') # show all the user details
def listcomments(msg):
con=sql.connect('database2.db')
con.row_factory = sql.Row
cur=con.cursor()
cur.execute('select *from usercomment3 where uname= ?',(msg,))
rows=cur.fetchall();
return render_template('usercommenttableshow.html',rows=rows,msg=msg)
@app.route('/got') #homepage
def got():
return render_template('got.html')
@app.route('/hiym') #homepage
def friends():
return render_template('himym.html')
@app.route('/sh') #homepage
def sh():
return render_template('sher.html')
@app.route('/thr') #homepage
def thr():
return render_template('13reasons.html')
@app.route('/bb') #homepage
def bb():
return render_template('bb.html')
if __name__=='__main__':
app.run(debug=True)
| [
"sahanaprasad11@gmail.com"
] | sahanaprasad11@gmail.com |
f16cfe90b3ff3b55379e55b102b27d39f13ce000 | 47c541b186b9494688fd37e508e99e5fefbcfa11 | /pygame_pong.py | 9514cd599488e7a56324e3fc859dbb513c59d5a9 | [] | no_license | vikyskapin/pygames | a974f3f38791770d629d67560b6a56b24a388eb4 | bff4d269e0fccfcff41c06b3735ab36a7fe3adad | refs/heads/main | 2023-06-21T16:32:22.134792 | 2021-07-24T13:52:47 | 2021-07-24T13:52:47 | 389,110,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,462 | py | import turtle
wn = turtle.Screen() #crea una ventana
wn.title("pong by viky")#Le pone nombre a la ventana
wn.bgcolor("black")#cambia el color de fondo
wn.setup(width=800,height=600)
wn.tracer(0)#no updatea la ventana (hace que el juego sea mas rapido porque la setapea una vez y listo)
#SCORE creo las variables que guardan los tantos
score_a = 0
score_b = 0
#Paleta A
paddle_a = turtle.Turtle()#class name
paddle_a.speed(0)#speed of animation (0) la setea a lo mas rapido
paddle_a.shape("square")#le da forma
paddle_a.color("white")#le da color
paddle_a.shapesize(stretch_wid=5,stretch_len=1)#cambia el cuadrado original ensanchandolo asi queda un rectangulo
paddle_a.penup()#hace que no dibuje una linea por donde se mueve
paddle_a.goto(-350,0)#inicializa mi objeto en un lugar en la pantalla
#Paleta B
paddle_b = turtle.Turtle()#class name
paddle_b.speed(0)#speed of animation (0) la setea a lo mas rapido
paddle_b.shape("square")#le da forma
paddle_b.color("white")#le da color
paddle_b.shapesize(stretch_wid=5,stretch_len=1)#cambia el cuadrado original ensanchandolo asi queda un rectangulo
paddle_b.penup()#hace que no dibuje una linea por donde se mueve
paddle_b.goto(350,0)#inicializa mi objeto en un lugar en la pantalla
#Pelota
ball = turtle.Turtle()#class name
ball.speed(0)#speed of animation (0) la setea a lo mas rapido
ball.shape("square")#le da forma
ball.color("white")#le da color
ball.penup()#hace que no dibuje una linea por donde se mueve
ball.goto(0,0)
ball.dx = 0.5
ball.dy = 0.5 #cada vez que se mueve se mueve por 0.5 pixeles
#ANOTADOR
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle()#no queremos que se vea solo que se vean los numeros que escribe
pen.goto(0,260)#la ubica al centro arriba
pen.write("Player A: 0 Player B: 0",align ="center", font=("Courier",24,"normal"))#le agrega el score inicial
#FUNCIONES
def paddle_a_up():
y = paddle_a.ycor() #esta funcion de turtle obtiene la coordenada y del objeto en la ventana
if(y + 20 < 260):
y += 20
paddle_a.sety(y)#le asigna a la coord y el valor "y"
def paddle_a_down():
y = paddle_a.ycor() #esta funcion de turtle obtiene la coordenada y del objeto en la ventana
if(y - 20 > -260):
y -= 20
paddle_a.sety(y)#le asigna a la coord y el valor "y"
def paddle_b_up():
y = paddle_b.ycor() #esta funcion de turtle obtiene la coordenada y del objeto en la ventana
if(y + 20 < 260):
y += 20
paddle_b.sety(y)#le asigna a la coord y el valor "y"
def paddle_b_down():
y = paddle_b.ycor() #esta funcion de turtle obtiene la coordenada y del objeto en la ventana
if(y - 20 > -260):
y -= 20
paddle_b.sety(y)#le asigna a la coord y el valor "y"
#KEYBOARD BINDING
wn.listen()#le dice a la ventana que "escuche" teclado
wn.onkeypress(paddle_a_up,"w")#cuando el usuario use la tecla "w" llama a la funcion paadle_a_up
wn.onkeypress(paddle_a_down,"s")
wn.onkeypress(paddle_b_up,"Up")
wn.onkeypress(paddle_b_down,"Down")
#Main game loop
while True:
wn.update()
#Moving the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
#border check
#coordenada y
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
#coordenada x
if ball.xcor() > 390:
ball.goto(0,0)
ball.dx *= -1 #que arranque para el lado contrario al que se fue
score_a += 1
pen.clear()#primero limpia la pantalla de lo que escribio antes si no lo escribe arriba
pen.write("Player A: {} Player B: {}".format(score_a,score_b),align ="center", font=("Courier",24,"normal"))#le agrega el score inicial
if ball.xcor() < -390:
ball.goto(0,0)
ball.dx *= -1
score_b += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(score_a,score_b),align ="center", font=("Courier",24,"normal"))#le agrega el score inicial
#COLISIONES
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor() + 40 and ball.ycor() > paddle_b.ycor() - 40):
ball.setx(340)#la dibuja un poco mas atras
ball.dx *= -1 #cambia la direccion en el eje x
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor() + 40 and ball.ycor() > paddle_a.ycor() - 40):
ball.setx(-340)
ball.dx *= -1
| [
"noreply@github.com"
] | vikyskapin.noreply@github.com |
441563450f276c9506c35dbf554426efb2961d56 | d80f24e1839b40f62baf9f4e24baea8e3b4db105 | /类/对象的创建.py | 9f248a82b3e6c199670569f567e809de3f5116bb | [] | no_license | cutejiejie/PythonFirstPro | 88222f832d7ed487c9066f651fbf1101c48f45c8 | 489edcadd76d84cc65927c2541d4e7e7604e8654 | refs/heads/master | 2023-07-04T01:12:09.723640 | 2021-07-29T09:16:22 | 2021-07-29T09:16:22 | 389,234,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | class Student:
native_pace='吉林' #直接写在类里的变量,称为类属性
def __init__(self,name,age): #name,age为实例属性
self.name=name #self.name称为实体属性,进行了一个赋值操作,将局部变量的name的值赋给实体属性
self.age=age
# 实例方法
def eat(self):
print('学生在吃饭...')
# 静态方法
@staticmethod
def method():
print('我使用了staticmethod进行修饰,所以我是静态方法')
# 类方法
@classmethod
def cl(cls):
print('我是类方法,因为我使用了classmethod进行修饰')
# 在类之外定义的称为函数,在类之内定义的称为方法
def drink():
print('喝水')
# 创建Student类的对象
stu1=Student('张三',20)
# print(id(stu1))
# print(type(stu1))
# print(stu1)
stu1.eat() #对象名.方法名()
print(stu1.name)
print(stu1.age)
print('-----------------------')
Student.eat(stu1) #35行与30行代码功能相同,都是调用Student中的eat方法
#类名.方法名(类的对象)-->实际上就是方法定义处的self
| [
"2267258221@qq.com"
] | 2267258221@qq.com |
9e4cd3a3f0ad3d1faccfb25385c969f2a114977b | 977889dd940ed9b3a3b24b5b998ff53c07165449 | /spider/spider.py | 7eae5e0c7c52aa6ddeceda0f1642a44244d858ff | [] | no_license | lluckydog/dayan | b7d3afdb86c612d247f61e880fe893408f5915dd | 1b00d007b1f4e7d162cfca8cfd2bc666dfbe25ba | refs/heads/master | 2023-08-03T05:28:02.838180 | 2020-12-01T13:08:55 | 2020-12-01T13:08:55 | 277,517,773 | 1 | 1 | null | 2020-10-13T23:20:58 | 2020-07-06T10:58:48 | Java | UTF-8 | Python | false | false | 29,729 | py | import re
import requests
import traceback
import sys
import random
import time
import js2xml
import json
import urllib
import os
from os import path
from bs4 import BeautifulSoup
from datetime import datetime
from datetime import timedelta
from lxml import etree
# from src.SnowNLPAPI.snownlp import SnowNLP
# from src.SnowNLPAPI.snownlp import sentiment
from .models import UserInfo, TweetsInfo, CommentWeiboInfo, CommentInfo
from .agents import getAgent
from .utils import time_fix, extract_weibo_content
def parse_all_content(response):
tree_node = etree.HTML(response)
content_node = tree_node.xpath('//*[@id="M_"]/div[1]')[0]
tweet_html = etree.tostring(content_node, encoding='unicode')
weibo_content = extract_weibo_content(tweet_html)
return weibo_content
class Weibo:
def __init__(self, keyword, cookie, page=5):
self.keyword = keyword
self.cookie = cookie
self.agent = getAgent()
self.page = page
self.tweets_list_to_insert = list()
def getTest(self):
print(self.agent)
return self.agent
def get_userInfo(self,user_id):
print('get user info')
try:
url = "https://weibo.cn/%d/info" % (user_id)
html = requests.get(url, cookies=self.cookie, headers=self.agent).content
selector = etree.HTML(html)
info = ";".join(selector.xpath('body/div[@class="c"]//text()')) # 获取标签里的所有text()
# 获取信息
nickname = re.findall('昵称[::]?(.*?);', info)
image = selector.xpath('body/div[@class="c"]//img/@src')
gender = re.findall('性别[::]?(.*?);', info)
place = re.findall('地区[::]?(.*?);', info)
briefIntroduction = re.findall('简介[::]?(.*?);', info)
birthday = re.findall('生日[::]?(.*?);', info)
sexOrientation = re.findall('性取向[::]?(.*?);', info)
sentiment = re.findall('感情状况[::]?(.*?);', info)
vipLevel = re.findall('会员等级[::]?(.*?);', info)
authentication = re.findall('认证[::]?(.*?);', info)
url = re.findall('互联网[::]?(.*?);', info)
#实例化
user_info = UserInfo()
user_info._id = user_id
if image:
user_info.Image = image
if nickname and nickname[0]:
user_info.NickName = nickname[0].replace(u"\xa0", "")
if gender and gender[0]:
user_info.Gender = gender[0].replace(u"\xa0", "")
if place and place[0]:
place = place[0].replace(u"\xa0", "").split(" ")
user_info.Province = place[0]
if len(place) > 1:
user_info.City = place[1]
if briefIntroduction and briefIntroduction[0]:
user_info.BriefIntroduction = briefIntroduction[0].replace(u"\xa0", "")
if birthday and birthday[0]:
try:
birthday = datetime.datetime.strptime(birthday[0], "%Y-%m-%d")
user_info.Birthday = birthday - datetime.timedelta(hours=8)
except Exception:
user_info.Constellation = birthday[0] # 有可能是星座,而非时间
if sexOrientation and sexOrientation[0]:
if sexOrientation[0].replace(u"\xa0", "") == gender[0]:
user_info.SexOrientation = "同性恋"
else:
user_info.SexOrientation = "异性恋"
if sentiment and sentiment[0]:
user_info.Sentiment = sentiment[0].replace(u"\xa0", "")
if vipLevel and vipLevel[0]:
user_info.VIPlevel = vipLevel[0].replace(u"\xa0", "")
if authentication and authentication[0]:
user_info.Authentication = authentication[0].replace(u"\xa0", "")
if url:
user_info.URL = url[0]
try:
urlothers = "https://weibo.cn/attgroup/opening?uid=%d" % (user_id)
r = requests.get(urlothers, headers=self.agent, cookies=self.cookie)
if r.status_code == 200:
selector = etree.HTML(r.content)
texts = ";".join(selector.xpath('//body//div[@class="tip2"]/a//text()'))
if texts:
num_tweets = re.findall('微博\[(\d+)\]', texts)
num_follows = re.findall('关注\[(\d+)\]', texts)
num_fans = re.findall('粉丝\[(\d+)\]', texts)
if num_tweets:
user_info.Num_Tweets = int(num_tweets[0])
if num_follows:
user_info.Num_Follows = int(num_follows[0])
if num_fans:
user_info.Num_Fans = int(num_fans[0])
def get_long_weibo(self, weibo_link):
try:
html = requests.get(weibo_link, headers=self.agent, cookies=self.cookie).content
selector = etree.HTML(html)
info = selector.xpath("//div[@class='c']")[1]
wb_content = info.xpath('//div[@id="M_"]//span[@class="ctt"]')[0].xpath(
"string(.)").replace(u"\u200b", "").encode(sys.stdout.encoding, "ignore").decode(
sys.stdout.encoding)
return wb_content
except Exception as e:
print("Error: ", e)
traceback.print_exc()
def get_retweet(self, is_retweet, info, wb_content):
try:
original_user = is_retweet[0].xpath("a/text()")
retweet_reason = info.xpath("div")[-1].xpath("string(.)").replace(u"\u200b", "").encode(
sys.stdout.encoding, "ignore").decode(
sys.stdout.encoding)
retweet_reason = retweet_reason[:retweet_reason.rindex(u"赞")]
if not original_user:
wb_content = u"转发微博已被删除"
if retweet_reason:
wb_content = (retweet_reason + "\n" + wb_content)
return wb_content
else:
original_user = original_user[0]
wb_content = (retweet_reason + "\n" + u"原始用户:" +
original_user + "\n" + u"转发内容:" + wb_content)
return wb_content
except Exception as e:
print("Error: ", e)
traceback.print_exc()
def get_weibo_content(self, info):
try:
str_t = info.xpath("div/span[@class='ctt']")
weibo_content = str_t[0].xpath("string(.)").replace(u"\u200b", "").encode(
sys.stdout.encoding, "ignore").decode(
sys.stdout.encoding)
weibo_id = info.xpath("@id")[0][2:]
a_link = info.xpath("div/span[@class='ctt']/a")
is_retweet = info.xpath("div/span[@class='cmt']")
if a_link:
if a_link[-1].xpath("text()")[0] == u"全文":
weibo_link = "https://weibo.cn/comment/" + weibo_id
wb_content = self.get_long_weibo(weibo_link)
if wb_content:
if not is_retweet:
wb_content = wb_content[1:]
weibo_content = wb_content
if is_retweet:
weibo_content = self.get_retweet(
is_retweet, info, weibo_content)
# all_content_link = tweet_node.xpath('.//a[text()="全文" and contains(@href,"ckAll=1")]')
# if all_content_link:
# print('all content link')
# all_content_url = "https://weibo.cn" + all_content_link[0].xpath('./@href')[0]
# tweet_html = requests.get(all_content_url, cookies=self.cookie, headers=self.agent)
# weibo_content = parse_all_content(tweet_html)
# else:
# print('tweet content')
# tweet_html = etree.tostring(tweet_node, encoding='unicode')
# weibo_content = extract_weibo_content(tweet_html)
return extract_weibo_content(weibo_content)
except Exception as e:
print("Error: ", e)
traceback.print_exc()
# 获取微博发布位置
def get_weibo_place(self, info):
try:
div_first = info.xpath("div")[0]
a_list = div_first.xpath("a")
weibo_place = u"无"
for a in a_list:
if ("place.weibo.com" in a.xpath("@href")[0] and
a.xpath("text()")[0] == u"显示地图"):
weibo_a = div_first.xpath("span[@class='ctt']/a")
if len(weibo_a) >= 1:
weibo_place = weibo_a[-1]
if u"的秒拍视频" in div_first.xpath("span[@class='ctt']/a/text()")[-1]:
if len(weibo_a) >= 2:
weibo_place = weibo_a[-2]
else:
weibo_place = u"无"
weibo_place = weibo_place.xpath("string(.)").encode(
sys.stdout.encoding, "ignore").decode(sys.stdout.encoding)
break
return weibo_place
except Exception as e:
print("Error: ", e)
traceback.print_exc()
# 获取微博发布时间
def get_publish_time(self, info):
try:
str_time = info.xpath("div/span[@class='ct']")
str_time = str_time[0].xpath("string(.)").encode(
sys.stdout.encoding, "ignore").decode(sys.stdout.encoding)
publish_time = str_time.split(u'来自')[0].strip()
if u"刚刚" in publish_time:
publish_time = datetime.now().strftime(
'%Y-%m-%d %H:%M')
elif u"分钟" in publish_time:
minute = publish_time[:publish_time.find(u"分钟")]
minute = timedelta(minutes=int(minute))
publish_time = (datetime.now() - minute).strftime(
"%Y-%m-%d %H:%M")
elif u"今天" in publish_time:
today = datetime.now().strftime("%Y-%m-%d")
time = publish_time[3:]
publish_time = today + " " + time
# now_time = datetime.now()
# publish_time = publish_time.replace('今天', now_time.strftime('%Y-%m-%d'))
elif u"月" in publish_time:
year = datetime.now().strftime("%Y")
month = publish_time[0:2]
day = publish_time[3:5]
time = publish_time[7:12]
publish_time = (year + "-" + month + "-" + day + " " + time)
# now_time = datetime.now()
# time_string = publish_time.replace('月', '-').replace('日', '')
# time_string = str(now_time.year) + '-' + time_string
# publish_time = time_string
else:
publish_time = publish_time[:16]
return publish_time
except Exception as e:
print("Error: ", e)
traceback.print_exc()
# 获取微博发布工具
def get_publish_tool(self, info):
try:
str_time = info.xpath("div/span[@class='ct']")
str_time = str_time[0].xpath("string(.)").encode(
sys.stdout.encoding, "ignore").decode(sys.stdout.encoding)
if len(str_time.split(u'来自')) > 1:
publish_tool = str_time.split(u'来自')[1]
else:
publish_tool = u"无"
return publish_tool
except Exception as e:
print("Error: ", e)
traceback.print_exc()
# 获取用户微博信息
def get_weibo_info(self,user_id):
try:
url = "https://weibo.cn/u/%d?filter=%d&page=1" % (
user_id, self.filter)
html = requests.get(url, cookies=self.cookie, headers=self.agent).content
selector = etree.HTML(html)
if selector.xpath("//input[@name='mp']") == []:
page_num = 1
else:
page_num = (int)(selector.xpath(
"//input[@name='mp']")[0].attrib["value"])
pattern = r"\d+\.?\d*"
# for page in range(1, 3):
for page in range(1, page_num + 1):
print('spider on page')
print(page)
url2 = "https://weibo.cn/u/%d?filter=%d&page=%d" % (
user_id, self.filter, page)
html2 = requests.get(url2, headers=self.agent, cookies=self.cookie).content
selector2 = etree.HTML(html2)
infos = selector2.xpath("//div[@class='c' and @id]")
info_s = selector2.xpath("//div[@class='c']")
print(infos)
is_empty = info_s[0].xpath("div/span[@class='ctt']")
if is_empty:
for info in infos:
tweetsItems = TweetsInfo()
tweetsItems.UserInfo_id = user_id # 微博ID
wb_id = info.xpath("@id")
# 微博内容
content = self.get_weibo_content(info)
# 微博位置
cooridinates = self.get_weibo_place(info)
# 微博发布时间
pubtime = self.get_publish_time(info)
# 微博发布工具
tools = self.get_publish_tool(info)
str_footer = info.xpath("div")[-1]
str_footer = str_footer.xpath("string(.)").encode(
sys.stdout.encoding, "ignore").decode(sys.stdout.encoding)
str_footer = str_footer[str_footer.rfind(u'赞'):]
guid = re.findall(pattern, str_footer, re.M)
# 点赞数
like = int(guid[0])
# 转发数
transfer = int(guid[1])
# 评论数
comment = int(guid[2])
if wb_id:
tweetsItems._id = wb_id[0]
if content:
tweetsItems.Content = content
# s = SnowNLP(content.replace('转发理由','').replace('转发内容', '').replace('原始用户', '').replace('转发微博已被删除', ''))
# mm = ()
# for i in s.tags:
# mm += i
# tweetsItems.tags= s.keywords(5)
# tweetsItems.pinyin = mm
# tweetsItems.sentiments=str(s.sentiments)
# print(s.keywords(5))
if cooridinates:
tweetsItems.Co_oridinates = cooridinates
if like:
tweetsItems.Like = like
if transfer:
tweetsItems.Transfer = transfer
if comment:
tweetsItems.Comment = comment
if pubtime:
tweetsItems.PubTime = pubtime
if tools:
tweetsItems.Tools = tools
try:
print('id')
print(tweetsItems._id)
TweetsInfo.objects.get(_id = tweetsItems._id)
except TweetsInfo.DoesNotExist:
print(tweetsItems)
try:
tweetsItems.save()
except Exception as e:
print("Error: ", e)
traceback.print_exc()
# try:
# print("数据抓取完毕,开始写入数据库")
# TweetsInfo.objects.bulk_create(self.tweets_list_to_insert)
# print("写入数据库成功")
# return "数据抓取完毕"
# except Exception as e:
# TweetsInfo.objects.bulk_create(self.tweets_list_to_insert)
# print("部分数据抓取失败,已抓取写入数据库成功")
# return "e:",e
except Exception as e:
print("Error微博文本: ", e)
traceback.print_exc()
def fix_time(self, publish_time):
dd=datetime.strptime(publish_time,'%a %b %d %H:%M:%S %z %Y')
publish_time = dd.strftime('%Y-%m-%d %H:%M:%S')
return publish_time
def time_fix(self, time_string):
now_time = datetime.now()
if '刚刚' in time_string:
return datetime.now().strftime('%Y-%m-%d %H:%M')
if '分钟前' in time_string:
minutes = re.search(r'^(\d+)分钟', time_string).group(1)
created_at = now_time - timedelta(minutes=int(minutes))
return created_at.strftime('%Y-%m-%d %H:%M:%S')
if '小时前' in time_string:
minutes = re.search(r'^(\d+)小时', time_string).group(1)
created_at = now_time - timedelta(hours=int(minutes))
return created_at.strftime('%Y-%m-%d %H:%M:%S')
if '今天' in time_string:
return time_string.replace('今天', now_time.strftime('%Y-%m-%d'))
if '昨天' in time_string:
created_at = now_time + timedelta(days=int(-1))
return time_string.replace('昨天', created_at.strftime('%Y-%m-%d'))
if '月' in time_string:
time_string = time_string.replace('月', '-').replace('日', '')
time_string = str(now_time.year) + '-' + time_string
return time_string
if '-' in time_string:
time_string = str(now_time.year) + '-' + time_string
return time_string
return time_string
# 获取微博评论信息
def get_comment_info(self, id):
c_urls ='https://m.weibo.cn/api/comments/show?id='+ id +'&page={}'
wb_url = 'https://m.weibo.cn/detail/' + id
wb_r = requests.get(wb_url, headers=self.agent, cookies=self.cookie).content
soup = BeautifulSoup(wb_r, 'lxml')
src = soup.select('body script')[0].string
src_text = js2xml.parse(src, debug=False)
src_tree = js2xml.pretty_print(src_text)
selector2 = etree.HTML(src_tree)
wb_id = selector2.xpath("//property[@name='id']//text()")[1]
wb_userName = selector2.xpath("//property[@name='screen_name']/string//text()")[0]
wb_userId = selector2.xpath("//property[@name='profile_url']//text()")[1].split('uid=')[1]
wb_user_profile_image_url = selector2.xpath("//property[@name='profile_image_url']//text()")[1]
wb_created_at = selector2.xpath("//property[@name='created_at']//text()")[1]
wb_source = selector2.xpath("//property[@name='source']//text()")[1]
wb_text = selector2.xpath("//property[@name='text']//text()")[1]
# https://wx2.sinaimg.cn/large/+字符串(大图)
# http://wx2.sinaimg.cn/bmiddle/+字符串(中图)
# https://wx2.sinaimg.cn/thumbnail/+字符串(小图)
wb_pic_ids = selector2.xpath("//property[@name='pic_ids']/array/string//text()")
wb_reposts = selector2.xpath("//property[@name='reposts_count']//@value")[0]
wb_comments = selector2.xpath("//property[@name='comments_count']//@value")[0]
wb_like = selector2.xpath("//property[@name='attitudes_count']//@value")[0]
# print(src_tree)
# print(wb_userName)
# print(wb_like)
commentWeiboInfo = CommentWeiboInfo()
if wb_id:
commentWeiboInfo.wb_id = wb_id
if wb_userName:
commentWeiboInfo.wb_userName = wb_userName
if wb_userId:
commentWeiboInfo.wb_userId = wb_userId
if wb_user_profile_image_url:
commentWeiboInfo.wb_user_profile_image_url = wb_user_profile_image_url
if wb_created_at:
commentWeiboInfo.wb_created_at = self.fix_time(wb_created_at)
if wb_source:
commentWeiboInfo.wb_source = wb_source
if wb_text:
commentWeiboInfo.wb_text = wb_text
if wb_pic_ids:
commentWeiboInfo.wb_pic_ids = wb_pic_ids
filepath = path.abspath(path.join(os.getcwd(), "webview/static"))
print(filepath)
for wb_pic_id in wb_pic_ids:
with urllib.request.urlopen("https://wx2.sinaimg.cn/large/" + wb_pic_id, timeout=30) as response, open(filepath +"\\"+ wb_pic_id+".jpg", 'wb') as f_save:
print("下载图片%s" % wb_pic_id)
f_save.write(response.read())
f_save.flush()
f_save.close()
if wb_reposts:
commentWeiboInfo.wb_reposts = int(wb_reposts)
if wb_comments:
commentWeiboInfo.wb_comments = int(wb_comments)
if wb_like:
commentWeiboInfo.wb_like = int(wb_like)
try:
CommentWeiboInfo.objects.get(wb_id = commentWeiboInfo.wb_id)
print("微博内容已存在数据库")
except CommentWeiboInfo.DoesNotExist:
print("微博内容抓取完毕,开始写入数据库")
commentWeiboInfo.save()
print("微博内容写入数据库成功,开始抓取评论")
except Exception as e:
return "e:",e
i = 1
comment_num = 1
while True:
r = requests.get(url = c_urls.format(i), headers=self.agent, cookies=self.cookie)
if int(r.json()['ok']) == 1:
comment_data = r.json()['data']['data']
print('正在读取第 %s 页评论:' % i)
for j in range(0,len(comment_data)):
commentInfo = CommentInfo()
print('第 %s 条评论' % comment_num)
user = comment_data[j]
wb_id = id
c_id = user['id']
c_created_at = user['created_at']
c_source = re.sub('[\U00010000-\U0010ffff]|[\uD800-\uDBFF][\uDC00-\uDFFF]','',user['source'])
c_user_id = user['user']['id']
c_user_name = user['user']['screen_name']
c_user_img = user['user']['profile_image_url']
c_user_url = user['user']['profile_url']
c_text = re.sub('<.*?>|回复<.*?>:|[\U00010000-\U0010ffff]|[\uD800-\uDBFF][\uDC00-\uDFFF]','',user['text'])
c_likenum = user['like_counts']
if wb_id:
commentInfo.CommentWeiboInfo_id = wb_id
if c_id:
commentInfo.c_id = c_id
if c_created_at:
commentInfo.c_created_at = self.time_fix(c_created_at)
if c_source:
commentInfo.c_source = c_source
if c_user_id:
commentInfo.c_userId = c_user_id
if c_user_name:
commentInfo.c_user_name = c_user_name
if c_user_img:
commentInfo.C_profile_image_url = c_user_img
if c_user_url:
commentInfo.C_profile_url = c_user_url
if c_text:
commentInfo.c_text = c_text
if c_likenum:
commentInfo.c_like_num = int(c_likenum)
comment_num += 1
try:
CommentInfo.objects.get(c_id = commentInfo.c_id)
print("评论已存在数据库")
except CommentInfo.DoesNotExist:
self.comment_list_to_insert.append(commentInfo)
print(len(self.comment_list_to_insert))
i+=1
time.sleep(2)
else:
print("跳出while=======================")
break
try:
print("评论抓取完毕,开始写入数据库")
CommentInfo.objects.bulk_create(self.comment_list_to_insert)
print("评论写入数据库成功")
return "数据抓取完毕"
except Exception as e:
return "e:",e
def get_weibo_keyword(self):
print('get keyword tweets')
try:
date_start = datetime.date.today()- datetime.timedelta(5)
date_end = datetime.date.today()
time_spread = datetime.timedelta(days=1)
url_format = "https://weibo.cn/search/mblog?hideSearchFrame=&keyword=" + self.keyword + "&page={}"
response = requests.get(url = url_format.format(1), headers=self.agent, cookies=self.cookie)
all_page = re.search(r'/> 1/(\d+)页</div>', response.text)
if all_page:
all_page = all_page.group(1)
all_page = int(all_page)
if all_page>5:
all_page = 5
for page_num in range(2, all_page + 1):
page_url = response.url.replace('page=1', 'page={}'.format(page_num))
for i in range(1,all_page+1):
tree_node = etree.HTML(response.body)
tweet_nodes = tree_node.xpath('//div[@class="c" and @id]')
for tweet_node in tweet_nodes:
try:
tweet_item = TweetsInfo()
tweet_repost_url = tweet_node.xpath('.//a[contains(text(),"转发[")]/@href')[0]
user_tweet_id = re.search(r'/repost/(.*?)\?uid=(\d+)', tweet_repost_url)
tweet_item.UserInfo_id = user_tweet_id.group(2)
tweet_item._id = user_tweet_id.group(1)
create_time_info_node = tweet_node.xpath('.//span[@class="ct"]')[-1]
create_time_info = create_time_info_node.xpath('string(.)')
if "来自" in create_time_info:
tweet_item.PubTime = time_fix(create_time_info.split('来自')[0].strip())
tweet_time.Tools = create_time_info.split('来自')[1].strip()
else:
tweet_time.PubTime = time_fix(create_time_info.strip())
like_num = tweet_node.xpath('.//a[contains(text(),"赞[")]/text()')[-1]
tweet_item.Like = int(re.search('\d+', like_num).group())
repost_num = tweet_node.xpath('.//a[contains(text(),"转发[")]/text()')[-1]
tweet_item.Transfer = int(re.search('\d+', repost_num).group())
comment_num = tweet_node.xpath(
'.//a[contains(text(),"评论[") and not(contains(text(),"原文"))]/text()')[-1]
tweet_item.Comment = int(re.search('\d+', comment_num).group())
map_node = tweet_node.xpath('.//a[contains(text(),"显示地图")]')
if map_node:
map_node = map_node[0]
map_node_url = map_node.xpath('./@href')[0]
map_info = re.search(r'xy=(.*?)&', map_node_url).group(1)
tweet_item.Co_oridinates = map_info
else:
tweet_item.Co_oridinates = u"无"
all_content_link = tweet_node.xpath('.//a[text()="全文" and contains(@href,"ckAll=1")]')
if all_content_link:
all_content_url = "https://weibo.cn" + all_content_link[0].xpath('./@href')[0]
r = requests.get(url = all_content_url.format(1), headers=self.agent, cookies=self.cookie)
temp_node = etree.HTML(r.body)
content_node = temp_node.xpath('//*[@id="M_"]/div[1]')[0]
tweet_item.content = extract_weibo_content(etree.tostring(content_node, encoding='unicode'))
else:
tweet_html = etree.tostring(tweet_node, encoding='unicode')
tweet_item.content = extract_weibo_content(tweet_html)
| [
"1036837280@qq.com"
] | 1036837280@qq.com |
e7b34f07ea94d1df4cea66d2f53e2e7381c5673b | 56a08e581557f300276ddb2c92ebcffbf8e989fa | /movieman/wsgi.py | 160c7cef4030b4d0ba8a40da82ed9ced45cb4c52 | [] | no_license | rj425/Movieman | b2e09b9b1fc8f22310c7816e8b2ab3b9f476be30 | 1de273cc98486f12d3bf5e8f0f6c7df5fc373f82 | refs/heads/master | 2022-12-06T14:27:00.062502 | 2020-08-24T19:07:40 | 2020-08-24T19:07:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for movieman project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'movieman.settings')
application = get_wsgi_application()
| [
"rj81309050@gmail.com"
] | rj81309050@gmail.com |
f54a508e835a08dc6245a63422f33b1a0598bca0 | 5252efd0922ea5be93dfc63db6de282184505346 | /ds/main/strings_arrays/reverse_strings.py | 22677aedb3ff5aa6644285d6d53535d189d40c03 | [] | no_license | faddy/ds-with-python | 157b35a5f22107f6dfba7604ed3ca87d33df6c5e | 6fba0eeb4552fa03fcbfb2f84ce747a2dc2c3e79 | refs/heads/master | 2016-09-11T05:02:18.879067 | 2013-08-18T21:47:46 | 2013-08-18T21:47:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | from data_structures.stacks import Stack
def reverse_using_stack(s):
if not s: return s
stack = Stack()
for c in s:
stack.push(c)
new_s = ''
while not stack.is_empty():
new_s += stack.pop()
return new_s
def reverse_using_swapping(s):
if not s: return s
arr = list(s)
for i in range(len(arr)/2):
beg = i
end = len(arr)-1-i
swap(arr, beg, end)
return ''.join(arr)
def swap(arr, i, j):
temp = arr[i]
arr[i] = arr[j]
arr[j] = temp
def test():
print reverse_using_swapping('')
print reverse_using_swapping('abcdef')
print reverse_using_swapping('!@#$%^&*()')
if __name__ == '__main__':
test()
| [
"fahadghanidgp@gmail.com"
] | fahadghanidgp@gmail.com |
8a69b3abdbe989e9632031a056e21efcc892c649 | c15a28ae62eb94dbf3ed13e2065195e572a9988e | /Cook book/src/9/preserving_function_metadata_when_writing_decorators/example.py | e5e1850554e8f722b7368d301f04da5a8473d8a1 | [] | no_license | xuyuchends1/python | 10798c92840a1a59d50f5dc5738b2881e65f7865 | 545d950a3d2fee799902658e8133e3692939496b | refs/heads/master | 2021-01-25T07:07:04.812140 | 2020-02-28T09:25:15 | 2020-02-28T09:25:15 | 93,647,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | import time
from functools import wraps
def timethis(func):
'''
Decorator that reports the execution time.
'''
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(func.__name__, end - start)
return result
return wrapper
if __name__ == '__main__':
@timethis
def countdown(n: int):
'''
Counts down
'''
while n > 0:
n -= 1
countdown(100000)
print('Name:', countdown.__name__)
print('Docstring:', repr(countdown.__doc__))
print('Annotations:', countdown.__annotations__)
| [
"xuyuchends@163.com"
] | xuyuchends@163.com |
478ecc8f67f27b697874e6fda41b3eaa6577c0ec | f3728b34bf785ef4d832475810971ae1d3dc0641 | /review.py | 85f90e5c4ba8ce11af41fb1281f2257edd1ce9cf | [] | no_license | CTHS-20-21/SW4_Week2_Review1 | d406476090e227325291fdb6786edd9047eb181c | 2e63b252ba79daab798de66db5409c68156774c3 | refs/heads/main | 2023-02-12T15:53:29.061398 | 2021-01-13T14:32:27 | 2021-01-13T14:32:27 | 328,696,818 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | # A Program to determine employee eligability for advancement
# Created by: <your name here>
# Copyright CTHS Engineering, Inc., 2021
# This code or any portion fo this code can be be reused without
# previous approval from the company CIO or CEO, in writing.
empName = "Sam"
#Project1(P1) - New school wing
#TA - Task Accuracy
#EstBud - Estimated Budget
#ActBud - Actual Budget
#EstMP - Estimated Manpower
#ActMP - Actual Manpower
empP1TA = 92
empP1EstBud = 1285000
empP1ActBud = 1301346
empP1EstMP = 1625
empP1EstMP = 1650
#Project2 - Custom motorcycle company warehouse
empP2TA = 98
empP2EstBud = 650000
empP2ActBud = 624000
empP2EstMP = 525
empP2ActMP = 515
#Project3 - Minor Nascar training track
empP3TA = 96
empP3EstBud = 2500000
empP3ActBud = 3231325
empP3EstMP = 1050
empP3ActMP = 1250
#Project4 - Man cave warehouse and house
empP4TA = 92
empP4EstBud = 825000
empP4ActBud = 830000
empP4EstMP = 400
empP4ActMP = 375
#your code goes below
| [
"noreply@github.com"
] | CTHS-20-21.noreply@github.com |
b1b37aea147f4eae935359ca21d61807d97cf417 | bbb8d941d0aa439ca435e0f00ddbd7330ad2db79 | /cpp/cc1.py | ee3d587fa2a2f8a3948cee50ae517e7285bcf118 | [] | no_license | dimritium/Code | 7ca940124074d7f7bca28559e0fe2f3cba24f846 | e6678b3dabe21fcd05e362bb8115f7812ad9abb8 | refs/heads/master | 2021-07-25T06:35:22.755474 | 2017-11-04T15:07:50 | 2017-11-04T15:07:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | t = int(input())
for i in range(t):
s = str(input())
dic = {}
for i in range(len(s)):
try:
dic[s[i]].append(i)
except:
dic[s[i]] = [i]
for k,v in dic.items():
flag = 0
if len(dic[k])>1:
if dic[k][-1]!=len(s)-1:
dic[k].append(len(s)-1)
for j in range(len(v)-2):
new_s = re.compile(r"["+s[dic[k][j]:dic[k][j+1]]+"]")
for l in range(j+1,len(v))
| [
"dimrishubhi@gmail.com"
] | dimrishubhi@gmail.com |
e6e8a0a6b723a3cd58723e3b49be2ffaf6364fac | 4f7d52e2c8ca632eb38c9f31774464df54582d2d | /atriaapp/atriacalendar/migrations/0001_initial.py | 863c163761d671d7d50607876fd2b388a747d3b0 | [
"MIT"
] | permissive | ansel-rangers11/atria-calendar | f847e9f9053ed4339702dca74f6620a7c1d7e240 | 4a6456d16742e95838d8fe1018547bd540b19726 | refs/heads/master | 2020-03-29T12:42:12.028498 | 2018-09-22T20:55:37 | 2018-09-22T20:55:37 | 149,913,467 | 0 | 0 | null | 2018-09-22T20:08:00 | 2018-09-22T20:08:00 | null | UTF-8 | Python | false | false | 1,782 | py | # Generated by Django 2.1.1 on 2018-09-17 01:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CalendarItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_name', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='ItemContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_cd', models.CharField(max_length=2)),
('field_name', models.CharField(max_length=200)),
('field_value', models.CharField(max_length=200)),
('calendar_item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atriacalendar.CalendarItem')),
],
),
migrations.CreateModel(
name='ItemSchedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('schedule_description', models.CharField(max_length=200)),
('item_start_date', models.DateTimeField(verbose_name='start date/time')),
('item_end_date', models.DateTimeField(verbose_name='end date/time')),
('calendar_item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atriacalendar.CalendarItem')),
],
),
]
| [
"ian@anon-solutions.ca"
] | ian@anon-solutions.ca |
52cbe4b332c585b6271b3ad11537757fe52d6c8c | 4ce7411e7a9ca278d3197a3b375443c159e17b78 | /weather/question4.py | 39f72c3bbf698644fb3a2fdc42c4c58820246ebf | [] | no_license | intelburn/mu-python | 91fc7fee5ba7ccc7780fedfd891f19489e251cca | c69e531f45d897dbb15f7daac47470972e3afa07 | refs/heads/master | 2021-08-23T02:33:37.079943 | 2017-12-02T15:14:26 | 2017-12-02T15:14:26 | 110,743,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | import CSVHandler
Raininess=CSVHandler.GetAvg(csv='weather_data.csv', Type='actual_precipitation', Default=0.0, Period='month')
print(CSVHandler.GetGreatest(Raininess))
| [
"nerdacs@gmail.com"
] | nerdacs@gmail.com |
fdf6d15105190f97c727f802372c840e0e2128cb | e54bf59bb6bd8d4a8f47611e44fc3e701063e0a4 | /Advent 3-1_2.py | 9db9c2d777dacfcd322c330d0433e0b566236327 | [] | no_license | DarkTobio/Advent-of-Code | 3e6d0e7b3d8ff5293cfe0a30b770ea33ec80f141 | 5764701b668794172851a73daad2e1b0c88287fb | refs/heads/master | 2023-03-14T22:43:40.621693 | 2021-03-24T13:57:04 | 2021-03-24T13:57:04 | 328,881,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,745 | py | #ADVENT OF CODE [3 - 1_2]
# PROBLEMA (3-1)
forest = ['...#...###......##.#..#.....##.',
'..#.#.#....#.##.#......#.#....#',
'......#.....#......#....#...##.',
'...#.....##.#..#........##.....',
'...##...##...#...#....###....#.',
'...##...##.......#....#...#.#..',
'..............##..#..#........#',
'#.#....#.........#...##.#.#.#.#',
'.#..##......#.#......#...#....#',
'#....#..#.#.....#..#...#...#...',
'#.#.#.....##.....#.........#...',
'......###..#....#..#..#.#....#.',
'##.####...#.............#.##..#',
'....#....#..#......#.......#...',
'...#.......#.#..#.........##.#.',
'......#.#.....###.###..###..#..',
'##..##.......#.#.....#..#....#.',
'..##.#..#....#.............##.#',
'....#.#.#..#..#........##....#.',
'.....####..#..#.###..#....##..#',
'#.#.......#...##.##.##..#....#.',
'.#..#..##...####.#......#..#...',
'#...##.......#...####......##..',
'...#.####....#.#...###.#.#...#.',
'....#...........#.##.##.#......',
'.....##...#.######.#..#....#..#',
'.#....#...##....#..######....#.',
'...#.....#...#####.##...#..#.#.',
'.....#...##........##.##.##.###',
'#.#..#....##....#......#....#.#',
'......##...#.........#....#.#..',
'###..#..##......##.#####.###.##',
'#.....#..##.##....#...........#',
'##..#.#..##..#.#.....#......#..',
'.#.##.#..#.#....##..#..#....#..',
'.#......##..##.#...#..#.......#',
'#....##.##..###..###......##.#.',
'....###..##.......#.###.#....#.',
'..##........#........##.....#..',
'.#..#.....#...####.##...##.....',
'....#.#.#.....#.##..##.....#..#',
'..............#.....#...#.....#',
'.#.....#......###...........#.#',
'.....#.#......#.##..#..........',
'.#......###............#.#.##..',
'.#.#....##.#..###.....#.##..#.#',
'.......#.#.#..#..#..#...##..#.#',
'.#.###...##.#.#.####.#.#...#...',
'...#.#....#......##.##.#.......',
'#...#.....##....#........##....',
'.....###...#.##.#......##.#..#.',
'..#...##.##.###..#..#......####',
'.#.##.#..#.##..##..........##..',
'..#.#.#..#.#.....#...###.....#.',
'..#..#.#....#.##.............##',
'.......#..###..#.#...#.....##.#',
'####.#.#......#..#.##.........#',
'..........#.....#..##......###.',
'..#..............#...#..##.....',
'......#.#.#..#.##.....####..##.',
'.##.#..#.#....#.......#..#.....',
'..#..#..#.##.#....###.#.#.#.#.#',
'.....#....#......###..#........',
'#.#.#..#...###.....#......#.##.',
'...#.#....#.#......#........#..',
'..#...###.#...#..#....##...#..#',
'.###.##..#..#...###.#..#.####..',
'#....#..##..##..#......#...##..',
'#.#..#...#..#...###..#.#.##....',
'##....#....##.####...#.#.###...',
'##.#...#.......#.##.##....#...#',
'..#.#..........#..#.#.#....#..#',
'..#........#...#....#....#....#',
'..#..#....#.......#........#...',
'......#....#....##.#....#.#.##.',
'.##...###.##.##....##.#...###..',
'.....##..#.#.....###..#.....###',
'....##.#.##...##..##........#..',
'#...#..##.#.#....#......#...#..',
'.###.##.#........#.####....#...',
'#.##.....#..#...#..##.##..#.#..',
'.....#.#..#....#..#...##.##.#..',
'.#......#####...##...#.#.###.#.',
'#......##....#.....#......##.#.',
'#.#.##.###.#......#####..#.....',
'........###.#...#..#.#........#',
'....#....###..#.##.#...#....#..',
'..........#..#.#....#...#.#...#',
'#.##......###.#.#.#....####...#',
'...#.#....#........##.#.....##.',
'.....##..#.#.#..###...##...#...',
'#...#...#....#....##........#..',
'.....#.........##.#......#..#..',
'#.....##..#.###.....#....##.##.',
'...#..#..#.#........##...##.#.#',
'..#..##.###.....#.#.....###.##.',
'..###.........#...##.....###...',
'...###...##.#...##....##.......',
'.#.#..#...###..#.#....#....#...',
'##..#.......#....#.#...#..#..#.',
'..#......#....####..##..#.###.#',
'..#.......##........#.#.#..#...',
'.#.......#.##.#.##.#.......#..#',
'###...#...#...#...#..#...#...##',
'..#..........#..###........##..',
'.##..#....##......##........#.#',
'......#.##......#......##...#.#',
'.#.#....#.#.#........#......#..',
'.#.#..#....####..##...##....#..',
'.#...##..#..#..#####....##.#...',
'.##.#.#...#...#.#...#.##.#...#.',
'###.#...##..#.###.#.....#.##.#.',
'#.....#.###.#.#...#..#....#.#..',
'..##..#....#......#.........###',
'.#...#...##......#...#.####....',
'..#.##...##..............#.#..#',
'..#......#..##...........#..#.#',
'..#####...#..#.......##...###..',
'..##..#....#.#...###.#...#.....',
'..#....#..#.#.......#..#.#.#...',
'.##..#.#.....##....#.......#...',
'...#.#..###...##....#....##..#.',
'.....##..#...##.####....##...#.',
'.......#.........#...#.##..####',
'........###..#..#.##.###..#....',
'.#.#..#.##.##.........#...#....',
'.###......#.....#....##....####',
'.##..##...........#.....#.....#',
'#.#.#.#.#.#.##..#.#.#..#.##....',
'.##.##...##..#....##..###..####',
'#..##.#......#...###.........#.',
'#..#...#..##..#..##.....##...#.',
'#...##..#...##.#.###.#...#.....',
'.###.....#.......#...#.##.###.#',
'..........#.#......#...###...##',
'..##....#.#..#....#.###...#..##',
'#.#..#....##.##..##.........##.',
'#.....#.##.###.#..#....##...#..',
'...#........##...#..###..######',
'#..#.#.#.#...#....#....###.#..#',
'...##.##.##.....##..#........#.',
'..#.#....#..#.......#...##.##.#',
'###.##.......##..#.####...#.#..',
'....#.#.....##.....#.#.##...#..',
'.#..##..#.....#.#..#...#..#..#.',
'.###....##...#......#.....#....',
'##.##.###......#...#...###.#...',
'#...#.##...#.#....##.....####..',
'#.#.#.#...###...##.............',
'..#....#.....##.#...#......#...',
'....#...#......#...#..#...#.#..',
'.###..#.....#..#...#....#...#..',
'..#...#.#..###.......#..#.#...#',
'#...###.##.....#....#..#.#..##.',
'...#.##.#.##......#.#.#.##.....',
'........####...##...##..#....#.',
'.#.#....#....#.#...##.###...##.',
'#.#...###.#.#.#....#....#.#....',
'.####.#..#.#....#..#.#..#..#...',
'#####...#...#...#....#.#.#..##.',
'..###.##.###...#..........#.##.',
'##.....#...#....###..###.#.#.#.',
'#..##.#..#..#..#...#.#...###..#',
'##....#.#...##......#.#...#...#',
'.##..........#.#....#...#.##..#',
'....#....####.#.####......#.###',
'..##.#.....####.#.####.......#.',
'#.##.##.#.....#.##......##...#.',
'......###..#.....#.....##......',
'..#..#....#.#...#.....#......##',
'##..#..#..###.#.#..#..##.....#.',
'#....#.#.....#####...#.#.......',
'.....#..#....#.#.#..#...#...#..',
'............#.##......##.##.#.#',
'....#...#.#........###....#....',
'..#..#...###.#....##..#..###.##',
'#.##....#...#.....##..#.##.#...',
'...##..###...#.#.....##.#......',
'.#..#.##.#####..#.......#..###.',
'...#.##...###.....####.#.#.....',
'.#......##.#.#.#.#.##.#.....#..',
'..#.##.#..##.......#.#.....##..',
'..................#....#...#...',
'.##.#..#.#.#..#.......#.#..##.#',
'....#........#......#..####..#.',
'#...#...##..##.#..#.......##...',
'#..#..#..#..#........####..#.#.',
'..#.#......#..#.##.##.#.#...#.#',
'...#..#......#.#.###.#.##..##..',
'..#...##.....#..#...##..#.#....',
'#.........#....#..#....##.##..#',
'..#..#.#....#..#...#.##.....#..',
'...#..#...#.#.....#..##..#.#...',
'....#........#.#....##..##..#..',
'#.....#.#..#.......#.##.....#..',
'.#.###.###...##...##..###...#..',
'.##.##.......#.#......#.....#.#',
'...#....##....#..#..........#.#',
'..#.##.........#.#.....#.....#.',
'...#.##..##.......##..##...#...',
'#.##......##.##..#.....##...##.',
'#.#.#..##...#.#............#.#.',
'....#.....#......##...#.#.....#',
'...#.#......#.#...###.......#..',
'......##..###....#.#...#.#####.',
'..#..#.#.#...##.#...###..##..#.',
'##.##.#.#.##.#..#....#...#...#.',
'#..#....######.##.#...#...#.#..',
'.#..#.##....#..#.#.......#....#',
'....#....#......##....##.#.#...',
'.###......#..#..#.......####..#',
'.#.#.....#..###...........##...',
'.##..##.##.....####..#..#..##..',
'..#..##.#......#...###.##..#.#.',
'....##..#.....###..#.##....##.#',
'#..#......#....#.........#.....',
'.#...#.....#.#..#..##....#.....',
'.##..#...#..##.#..#...........#',
'..#..##........##.......#..#...',
'#.....#....#....#.#.#...##.#...',
'...#...#.#.#..#.##.#.#...#.....',
'..#..#.#....#....###....#.#.#..',
'...###..#...#..#....#.....#....',
'...#...#.#..#.....#...###......',
'##......#..........#.#.#..#.#.#',
'....#.....#.....#..#..#.#.#.#..',
'...####...#.##...#.#..#....#.#.',
'#.##........##.............#.##',
'#.#.#.#.#.....................#',
'.#.###....#..##.##.##....#.....',
'#.#...#.####.###...#..#..#.#...',
'.##...#..###.......##..#.#.....',
'#.#.#.#...#...#.##.....#.......',
'.##.#.#.#......####..#.#.......',
'###..........#......#...##...#.',
'.........##...#.##...#.#.......',
'...#.#.....#...#..#...#..##..#.',
'.#..###...#.#.........###....#.',
'##..#...#........#.........##..',
'.....#...#.#...#.#.#...........',
'..#....##...#.#..#..#.##....##.',
'.##....#.#.....##.#..#..#...##.',
'..##......#.#...#.#.......##.#.',
'##...#..#...##.#........#.##...',
'#......#.##..#.#..#.###.......#',
'#.#...#.....#.#......#.#.#.....',
'#.....#..#.......#....##.#.#..#',
'###.#....#..##.#.##....#....#..',
'#.##.##....#.#..#.#...#....#...',
'####...#####...#.....#....##.#.',
'....#.#...#.#.##.#.#.##.#.#.###',
'#.....##.#.....#.#......#.#..#.',
'.#....##.#..#........#...##.#..',
'......#...#....##....##....##..',
'..###.....#....##.#...#..#.....',
'#....##...##.##..##.#...#...#..',
'#..#...#...#.#....##..#.#....#.',
'......................#.....#..',
'.#..#...#.........#....##...###',
'.##.#.#...##...#...#...#..#....',
'.#.###....#.#............##..#.',
'###..##.#.#.#.#....##...#......',
'.##................####...##.##',
'.#.#.........##....#.#.##.##.#.',
'....#...#...#...##...#.##.#..#.',
'.#.#........#..##.....#..#....#',
'##.#..#.#....#.....#...#...#...',
'.#...##....#.....##....#..#.#.#',
'####.....#..#....#......###.##.',
'##..##.#....###.....#...#......',
'.##.#...#.....#.#..#.#..#.#...#',
'.....#.#..#..#..###.#...###.#..',
'.#.#.##.#..#.#..#...#..#.......',
'..#.....#....#.##.##.##.......#',
'.#..##....###...#..............',
'#....#...#.#.......#....##.#...',
'....#.#..##.#....#..#.#....#.#.',
'#.........##...#.#.............',
'#.#.......##.....#...##..##.#.#',
'.......#....#..##...#..#######.',
'.#.#...##........#..#.....#.#..',
'.#.......#..#......#.##.##...##',
'.........#............#....#..#',
'.#......#...##...##...#....###.',
'.........#...#.#.###.......#...',
'###.#..#.#.#.#......##...#.#...',
'.#.........##.#....###....#.#..',
'#.#....#..#.##.#..#....##...#..',
'###.#...#..#..##........#.###..',
'.....#....#..#........#..#.##.#',
'..#.....##......#....#..#.#.#..',
'.#.........#.....#.....#.......',
'......#....#.###..#.#....#....#',
'..#.#..#.#.###.........#..#..#.',
'..#..#.#.#.........#....##.#.#.',
'#.......#........##...##....#..',
'##..#..#...###...#..##..#..#.#.',
'##..#..#....#.#..##..#..#.#..#.',
'..##.....##.#..#.#........###..',
'..#.#..#..##........#...#....##',
'.##..#....##..#..#..#..#.#....#',
'#....#.....##........#.....#.##',
'......#....#.#..#......#.##....',
'.......#..#..#......##.........',
'......#...#..##.....#......#..#',
'#..#..#....##....#........##..#',
'##....#...#.#.....#####........',
'...#.#..#.#.##.#.##..##...#....',
'..#..#..#..#..#....#..#..##...#',
'.#.....#....##.##....##.....#..',
'#...#.....#.....#.#...#.#....#.',
'.###...#..##....#..#...#.###...',
'....#..##..#.......#.##.##..###',
'#.......##.....#.......#.#...##',
'#.....#.#.#....#.#......#.#.#..',
'..##.....#..###......##........',
'.....#...#..##....#......#.....',
'#..#..#....#.#...#..###.......#',
'.....#.....#....#..#...#.#..##.',
'#####........#...#..#..##..#.#.',
'.#..#...#.##....#.#..#......###',
'#.###.#..#.....##..##....#...#.',
'.#...#.#####....##..........##.']
encounter = []
tree_lines = 0
tree_pos = 0
while tree_lines < len(forest): # RECORRE FILAS
while tree_pos < len(forest[tree_lines]): # RECORRE COLUMNAS
encounter.append(forest[tree_lines][tree_pos])
tree_pos += 3
break
if tree_pos >= len(forest[tree_lines]):
tree_pos = tree_pos - len(forest[tree_lines])
tree_lines += 1
print("Total de árboles P3-1:", encounter.count('#')) # TOTAL DE ÁRBOLES ENCONTRADOS
# PROBLEMA (3-2)
# Right 1, down 1.
# Right 3, down 1. (This is the slope you already checked.)
# Right 5, down 1.
# Right 7, down 1.
# Right 1, down 2.
import numpy as np
# tuples for moves
right = (1,3,5,7,1)
down = (1,1,1,1,2)
def conteo_arboles_ruta(right_x, down_x):
#encounter = []
tree_lines = 0
tree_pos = 0
contador_arboles = 0
while tree_lines < len(forest): # RECORRE FILAS
while tree_pos < len(forest[tree_lines]): # RECORRE COLUMNAS
if forest[tree_lines][tree_pos] == '#':
contador_arboles += 1
tree_pos += right_x
if tree_pos >= len(forest[tree_lines]):
tree_pos = tree_pos - len(forest[tree_lines])
tree_lines += down_x
break
return(contador_arboles)
rutas = []
for i in range(0, len(right)):
rutas.append(conteo_arboles_ruta(right[i],down[i]))
print("Total árboles según ID ruta",rutas)
print("Multip arboles rutas", np.prod(rutas)) | [
"64618828+DarkTobio@users.noreply.github.com"
] | 64618828+DarkTobio@users.noreply.github.com |
929f1314f711be52f0b4ecec90351d5a87451a4b | bd501f2fec27ec62b5e43a59d09bdce45a1992a4 | /syncthingmanager/migrations/0003_auto_20150205_1542.py | c1373133c44484ccf338655a30cbea82a28a88e5 | [] | no_license | rectory-school/rectory-technology-manager | 2aa315993d67bfb86a858cad77b5f1de4d8db9b5 | 19f073762412b5fea6218dcd3a2f079d2337aba6 | refs/heads/master | 2021-01-10T05:05:02.334298 | 2016-02-08T16:45:52 | 2016-02-08T16:45:52 | 51,310,993 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('syncthingmanager', '0002_auto_20150204_2117'),
]
operations = [
migrations.AlterModelOptions(
name='folder',
options={'ordering': ('name',)},
),
migrations.AlterField(
model_name='folder',
name='name',
field=models.CharField(unique=True, max_length=50),
preserve_default=True,
),
]
| [
"adam@thepeacock.net"
] | adam@thepeacock.net |
19449a8c3d7391986351f441cf5c2b743a3dbcb2 | 2c143ba64032f65c7f7bf1cbd567a1dcf13d5bb1 | /腾讯/回溯算法/022括号生成.py | c5dd0dd441e7531fdd68cfbbe845ec6452796fcd | [] | no_license | tx991020/MyLeetcode | 5b6121d32260fb30b12cc8146e44e6c6da03ad89 | cfe4f087dfeb258caebbc29fc366570ac170a68c | refs/heads/master | 2020-04-09T21:43:41.403553 | 2019-03-27T18:54:35 | 2019-03-27T18:54:35 | 160,611,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,960 | py | '''
给出 n 代表生成括号的对数,请你写出一个函数,使其能够生成所有可能的并且有效的括号组合。
例如,给出 n = 3,生成结果为:
[
"((()))",
"(()())",
"(())()",
"()(())",
"()()()"
]
'''
'''
class Solution:
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
self.res = []
self.singleStr('', 0, 0, n)
return self.res
def singleStr(self, s, left, right, n):
if left == n and right == n:
self.res.append(s)
if left < n:
self.singleStr(s + '(',left + 1, right, n)
if right < left:
self.singleStr(s + ')',left, right + 1, n)
非常牛逼的讲解,需要这样的人来给我们讲算法
####以Generate Parentheses为例,backtrack的题到底该怎么去思考?
所谓Backtracking都是这样的思路:在当前局面下,你有若干种选择。那么尝试每一种选择。如果已经发现某种选择肯定不行(因为违反了某些限定条件),就返回;如果某种选择试到最后发现是正确解,就将其加入解集
所以你思考递归题时,只要明确三点就行:选择 (Options),限制 (Restraints),结束条件 (Termination)。即“ORT原则”(这个是我自己编的)
对于这道题,在任何时刻,你都有两种选择:
加左括号。
加右括号。
同时有以下限制:
如果左括号已经用完了,则不能再加左括号了。
如果已经出现的右括号和左括号一样多,则不能再加右括号了。因为那样的话新加入的右括号一定无法匹配。
结束条件是: 左右括号都已经用完。
结束后的正确性: 左右括号用完以后,一定是正确解。因为1. 左右括号一样多,2. 每个右括号都一定有与之配对的左括号。因此一旦结束就可以加入解集(有时也可能出现结束以后不一定是正确解的情况,这时要多一步判断)。
递归函数传入参数: 限制和结束条件中有“用完”和“一样多”字样,因此你需要知道左右括号的数目。 当然你还需要知道当前局面sublist和解集res。
因此,把上面的思路拼起来就是代码:
if (左右括号都已用完) {
加入解集,返回
}
//否则开始试各种选择
if (还有左括号可以用) {
加一个左括号,继续递归
}
if (右括号小于左括号) {
加一个右括号,继续递归
}
你帖的那段代码逻辑中加了一条限制:“3. 是否还有右括号剩余。如有才加右括号”。这是合理的。不过对于这道题,如果满足限制1、2时,3一定自动满足,所以可以不判断3。
这题其实是最好的backtracking初学练习之一,因为ORT三者都非常简单明显。你不妨按上述思路再梳理一遍,还有问题的话再说。
以上文字来自 1point3arces的牛人解答
''' | [
"wudi@hetao101.com"
] | wudi@hetao101.com |
5997d5115bdfe2680462395d7811d99f9b4410f1 | 9d695277e1423df136d6381ab4a18c46bcb54de4 | /Data-Formatter/weather/w_kaggle_script.py | d8090b3244ca42263de1e094becff0924cc3694f | [] | no_license | mcSchwarzer/web_db | 7fe7b2fbfc586149feffe7fc7e7bad47a1d2795e | 8f32471632e4f8a8eb488f45895063dfec3a4ef9 | refs/heads/master | 2020-03-09T19:01:02.674156 | 2018-05-11T09:54:41 | 2018-05-11T09:54:41 | 128,947,134 | 1 | 1 | null | 2018-04-10T14:47:50 | 2018-04-10T14:33:12 | Python | UTF-8 | Python | false | false | 2,505 | py | import numpy as np
import pandas as pd
import os
import bq_helper
print(os.listdir("../input"))
# create a helper object for our bigquery dataset
DatabaseHelper = bq_helper.BigQueryHelper(active_project= "bigquery-public-data", dataset_name= "noaa_gsod")
query_List = []
for x in range(1970, 2017): query_List.append("SELECT stn AS stationenNummer,year AS jahr,mo AS monat,da AS tag,temp AS temperatur,fog AS nebel,rain_drizzle AS nieselRegen,snow_ice_pellets AS schneeEis,hail AS hagel,thunder AS donner,tornado_funnel_cloud as tornadoWolke FROM `bigquery-public-data.noaa_gsod.gsod%d`" % (x)) # select whatever you want here ...
print (query_List)
sum_of_query_sizes = 0.0
for query in query_List: sum_of_query_sizes += DatabaseHelper.estimate_query_size(query)
print (sum_of_query_sizes) #cmplx of all queries in query_list[]
year = 1970
#for every query in the query list execute the sql statement and save the resulting csv file in the output dir
for exQuery in query_List:
dataframe = DatabaseHelper.query_to_pandas_safe(exQuery, max_gb_scanned=5)
dataframe.to_csv('wetter_US_%d.csv' % (year), index = False)
print ("saved wetter_US_%d" % (year))
year += 1;
#stations:
stationsQuery = """SELECT usaf AS stationenNummer, lat As latitude, lon AS longitude FROM `bigquery-public-data.noaa_gsod.stations` WHERE country = 'US' AND lat IS NOT NULL AND lon IS NOT NULL AND NOT (lat = 0.0 AND lon = 0.0) ORDER BY usaf"""
stationsComplx = DatabaseHelper.estimate_query_size(stationsQuery)
print("querySize for stations = %d" % (stationsComplx))
stations = DatabaseHelper.query_to_pandas_safe(stationsQuery, max_gb_scanned=0.1) #cmplx is not too big ...
stations.to_csv('WetterStationen_US.csv', index = False) #saving stationsFile as output
#problems: 1. you could just save all files from 1970 - 2017 but you get an error ath like : "memory space not enough" --> you have to do it almost one at a time and save it locally
# 2. you could perform a join with dataframes (stationsNummer to lat, lon) but for that the memory from the kernel is not enough ... atleast not if you have files that big
# --> do it manually with for examplw java: HashMap with station key = stationNumber object = string --> you can check with .contains()
# 3. the stationsNumber "999999" is there wy too often ... so you can maybe just remove it completly
# 4. lat & lon is to 4% null ... also remove the stations where both lat and lon are 0.0
| [
"noreply@github.com"
] | mcSchwarzer.noreply@github.com |
65f530e1aea7c5cfdd4b278371c7f530aeeee0f2 | 00bc824ae4a262ceeaff0f31fd18bea5bc4fb26d | /Level 1/3진법.py | 7b5abc4f92cfc3f9ae146d2fea486edce45f1bd8 | [] | no_license | jeongbinboo/Programmers | e8d39efa0ff900f4823c7ef26b7888bd8831c36f | 3c86c79dab8193b5c4e528f89320bdb3e4999a13 | refs/heads/master | 2023-05-31T07:42:12.597340 | 2021-06-25T02:41:02 | 2021-06-25T02:41:02 | 379,181,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | def solution(n):
res=[]
answer=0
while n != 0:
answer=answer*3+n%3
n=n//3
return answer
| [
"jrkasey3461@naver.com"
] | jrkasey3461@naver.com |
b26cacf2a0feb88315ad9cb6258cf8b5227cf171 | 32b96f47e534ba2f7e1c7da662cf754e34418656 | /src/polls/tests.py | a12e1b08cea8c1e81804d477d6edc8f581b5269d | [] | no_license | ouyangqiong/python_django | 391e0a94ff4873c4f9ebc7e3637456ad4c17dd34 | 0f7a01c9c5f361721ed1dd2f531836669f3cc3de | refs/heads/master | 2021-01-23T18:11:35.561926 | 2014-10-14T07:14:08 | 2014-10-14T07:14:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,183 | py | from django.test import TestCase
from django.utils import timezone
import datetime
from django.core.urlresolvers import reverse
from polls.models import Question
# Create your tests here.
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is in the future
"""
time =timezone.now() - datetime.timedelta(days=30)
old_question=Question(pub_date=time)
self.assertEqual(old_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is older than 1 day
"""
time =timezone.now() + datetime.timedelta(days=30)
future_question=Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for questions whose
pub_date is within the last day
"""
time =timezone.now() - datetime.timedelta(hours=1)
recent_question=Question(pub_date=time)
self.assertEqual(recent_question.was_published_recently(), True)
def create_question(question_text,days):
"""
Create a question with the given "question_text" published the given
number of 'days' offset to now (negative for questions published
in the past ,positive for questions that have yet to be published).
"""
time = timezone.now() +datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text,pub_date=time)
class QuestionViewTests(TestCase):
def test_index_view_with_no_questions(self):
"""
If no questions exists,an appropriate message should be displayed.
"""
response=self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response,"No polls are available")
self.assertQuerysetEqual(response.context['latest_question_list'],[])
def test_index_view_with_a_past_question(self):
"""
Questions with a pub_date in the past should be displayed on the
index page
"""
create_question(question_text="Past question.",days=-30)
response=self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_a_future_question(self):
"""
Questions with a pub_date in the future should not be displayed on the index page.
"""
create_question(question_text="Future question.",days=30)
response=self.client.get(reverse('polls:index'))
self.assertContains(response,"No polls are available.",status_code=200)
self.assertQuerysetEqual(response.context['latest_question_list'],[])
def test_index_view_with_future_question_and_past_question(self):
"""
Even if both past and future questions exist,only past questions
should be displayed
"""
create_question(question_text="Past question.",days=-30)
create_question(question_text="Future question.",days=30)
response=self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'],['<Question: Past question.>'])
def test_index_view_with_two_past_questions(self):
"""
The questions index page may displayed multiple questions.
"""
create_question(question_text="Past question 1.",days=-30)
create_question(question_text="Past question 2.",days=-5)
response=self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>','<Question: Past question 1.>']
)
| [
"qiong.ouyang@ericsson.com"
] | qiong.ouyang@ericsson.com |
cff0d5ef5aea5438b5e981b1b9ec006260ee3ea4 | 827af5b7c9f934a92503ea616f772cd01383b175 | /blog/views/public/index.py | b5c48e32fb55bf9509a6d1af51e7047007eca2f6 | [] | no_license | semyon72/pylog.7myon.com | 00bb6c90adc08914c86c81e0f783913462ed31ca | 4750e71ac9786339bac46f891fb0b44307e6ba35 | refs/heads/main | 2023-04-03T20:09:40.908114 | 2021-04-16T14:29:11 | 2021-04-16T14:29:11 | 358,614,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,975 | py | # Project: blog_7myon_com
# Package:
# Filename: index.py
# Generated: 2021 Mar 10 at 16:03
# Description of <index>
#
# @author Semyon Mamonov <semyon.mamonov@gmail.com>
from django.core.paginator import PageNotAnInteger, EmptyPage
from django.db.models import fields, Case, When, F, Value, Subquery, OuterRef, Sum, functions
from django.shortcuts import get_object_or_404
from django.utils.html import mark_safe
from django.views.generic import ListView, DetailView
from .author import PublicMostPopularAuthorView
from .blog import PublicMostPopularBlogView
from .entry import PublicEntryDetailView
from ...models import Entry, Author, Blog, EntryText
from ...models_tools import Regexp, IContains, StripTags
class PublicIndexAsideContentMixin:
def _get_aside_content(self, view_class, daydelta=None):
kwargs = {
view_class.daydelta_kwargs: daydelta
}
template_response = view_class.as_view()(self.request, **kwargs)
return mark_safe(
template_response.render().content.decode(encoding=template_response.charset)
)
def _get_aside_context_data(self, view_class, context_key='aside', daydeltas=None):
"""
:param context_key: something like - 'blog_aside'
:param daydeltas: something like (None, 7, 30, 365)
:return:
"""
if not daydeltas:
daydeltas = (None,)
context_kwargs = (
(context_key+'_'+('full' if not daydelta else (str(daydelta)+'days')), daydelta)
for daydelta in daydeltas
)
aside_context_data = {}
for ckey, daydelta in context_kwargs:
aside_context_data[ckey] = self._get_aside_content(view_class, daydelta)
return aside_context_data
def get_aside_context(self):
view_settings = (
# (PublicMostPopularBlogView, (None, 7, 30, 365)), # (...7, 30, 365) 'redundant information'
(PublicMostPopularBlogView, None),
(PublicMostPopularAuthorView, None)
)
result = {}
for view_class, daydeltas in view_settings:
context_key = view_class.model.__name__.lower()+'_aside'
result.update(self._get_aside_context_data(view_class, context_key, daydeltas))
return result
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['aside_content'] = self.get_aside_context()
return context
class PublicIndexView(PublicIndexAsideContentMixin, ListView):
template_name = 'blog/public/index.html'
paginate_by = 10
model = Entry
ordering = ['-pub_date']
def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True, **kwargs):
paginator = super().get_paginator(queryset, per_page, orphans, allow_empty_first_page, **kwargs)
paginator._page = paginator.page
# copied from Paginator.get_page(self, number):
def _get_page(page_number=1):
try:
number = paginator.validate_number(page_number)
except PageNotAnInteger:
number = 1
except EmptyPage:
number = paginator.num_pages
return paginator._page(number)
paginator.page = _get_page
return paginator
def get_entry_detail_content(self, entry, truncate_text_to_length=None):
view_names = {
'author': {PublicEntryDetailView._VIEW_NAME_KEY: 'blog:public_index_author'},
'blog': {PublicEntryDetailView._VIEW_NAME_KEY: 'blog:public_index_blog'},
'entry': {PublicEntryDetailView._VIEW_NAME_KEY: 'blog:public_index_entry'},
}
template_response = PublicEntryDetailView.as_view(
truncate_text_to_length=truncate_text_to_length,
view_names=view_names,
)(self.request, **{PublicEntryDetailView.object_kwarg: entry})
return mark_safe(template_response.render().content.decode(encoding=template_response.charset))
def entry_detail_contents_to_entries(self, entries, truncate_text_to_length=256):
if len(entries) > 0:
orig_mutable = self.request.GET._mutable
if not orig_mutable:
self.request.GET._mutable = True
page_kwarg = PublicEntryDetailView.page_kwarg
orig_page = self.request.GET.pop(page_kwarg, None)
for entry in entries:
entry.entry_detail_content = self.get_entry_detail_content(entry, truncate_text_to_length)
if orig_page is not None:
self.request.GET.setlist(page_kwarg, orig_page)
self.request.GET._mutable = orig_mutable
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(object_list=object_list, **kwargs)
entries = context.get('object_list', [])
self.entry_detail_contents_to_entries(entries)
return context
class PublicIndexByAuthorView(PublicIndexView):
object_kwarg = 'id'
object_model = Author
def get_object(self):
obj = self.kwargs.get(self.object_kwarg)
if obj is not None and isinstance(obj, self.model):
return obj
return get_object_or_404(self.object_model, pk=obj)
def get_queryset(self):
queryset = self.get_object().entry_set.all().filter(inactive=False)
ordering = self.get_ordering()
if ordering:
if isinstance(ordering, str):
ordering = (ordering,)
queryset = queryset.order_by(*ordering)
return queryset
class PublicIndexByBlogView(PublicIndexByAuthorView):
object_model = Blog
class PublicIndexEntryView(PublicIndexAsideContentMixin, DetailView):
template_name = PublicIndexView.template_name
model = Entry
pk_url_kwarg = 'id'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
entry = context.get('object')
entry.entry_detail_content = PublicIndexView.get_entry_detail_content(self, entry) # None or 0 means the all content
return context
class PublicIndexSearchView(PublicIndexView):
""" This view needs in stored function 'strip_tags'
Implementation for MySQL
/* https://stackoverflow.com/a/13346684 */
DROP FUNCTION IF EXISTS strip_tags;
DELIMITER |
CREATE FUNCTION strip_tags($str text) RETURNS text
BEGIN
DECLARE $start, $end INT DEFAULT 1;
LOOP
SET $start = LOCATE("<", $str, $start);
IF (!$start) THEN RETURN $str; END IF;
SET $end = LOCATE(">", $str, $start);
IF (!$end) THEN SET $end = $start; END IF;
SET $str = INSERT($str, $start, $end - $start + 1, "");
END LOOP;
END; |
DELIMITER ;
Test
SELECT STRIP_TAGS('<span>hel<b>lo <a href="world">wo<>rld</a> <<x>again<.') REGEXP '.*Hello.+wo.+ag.*' as `clean_text`;
"""
search_kwarg = 'q'
headline_cost = 50
body_text_cost = 35
def _get_value_expression(self):
q = self.request.GET.get(self.search_kwarg, None)
if q:
return [v for v in q.split() if v]
def get_queryset(self):
# Need to implement - This more optimized query -
# duration of execution on 100.000 rows of entries and 160.713 rows of entrytext is 10.593 sec - 11.109 sec
# if to make use LIKE '%on_%world%' instead of REGEXP '.*on.+world.*' then will be faster, in practice
# SELECT *
# FROM (
# SELECT `blog_entry`.`id`, `blog_entry`.`blog_id`, `blog_entry`.`author_id`,
# `blog_entry`.`headline`, `blog_entry`.`create_date`, `blog_entry`.`pub_date`,
# `blog_entry`.`mod_date`, `blog_entry`.`inactive`,
# COALESCE((
# SELECT SUM(0.35) AS `rank`
# FROM `blog_entrytext` U0
# WHERE (
# (STRIP_TAGS(U0.`body_text`) REGEXP '.*on.+world.*')
# AND U0.`entry_id` = `blog_entry`.`id`
# ) GROUP BY U0.`entry_id` ORDER BY NULL
# ), 0.0) AS `text_rank`,
# CASE WHEN (`blog_entry`.`headline` REGEXP '.*on.+world.*') THEN 0.5 ELSE 0.0 END AS `entry_rank`
# FROM `blog_entry`
# WHERE NOT `blog_entry`.`inactive`
# ) as r
# WHERE r.text_rank + r.entry_rank > 0
# ORDER BY r.text_rank + r.entry_rank DESC, r.pub_date DESC
val = self._get_value_expression()
qs = super().get_queryset()
if val is not None:
sq_bt_rank = EntryText.objects.filter(
# body_text__striptags__iregex=val,
# Regexp(StripTags(F('body_text')), val),
IContains(StripTags(F('body_text')), val),
entry=OuterRef('pk')
).values('entry').annotate(rank=Sum(self.body_text_cost, output_field=fields.IntegerField())).values('rank')
qs = qs.annotate(
text_rank=functions.Coalesce(Subquery(sq_bt_rank), 0, output_field=fields.IntegerField()),
# entry_rank=Case(When(Regexp(F('headline'), val), then=Value(50)), default=Value(0), output_field=fields.IntegerField()),
entry_rank=Case(When(IContains(F('headline'), val), then=Value(self.headline_cost)), default=Value(0), output_field=fields.IntegerField()),
total_rank=F('text_rank')+F('entry_rank')
).filter(inactive=False, total_rank__gt=0).order_by('-total_rank', '-pub_date')
# Now query is - But this query is less optimized than above "Need to implement" -
# duration of execution on 100.000 rows of entries and 160.713 rows of entrytext is 24.719 sec - 29.219 sec
# SELECT `blog_entry`.`id`, `blog_entry`.`blog_id`, `blog_entry`.`author_id`,
# `blog_entry`.`headline`, `blog_entry`.`create_date`, `blog_entry`.`pub_date`,
# `blog_entry`.`mod_date`, `blog_entry`.`inactive`,
# COALESCE((SELECT SUM(0.35e0) AS `rank` FROM `blog_entrytext` U0 WHERE ((STRIP_TAGS(U0.`body_text`) REGEXP '.*ce.+pl.*') AND U0.`entry_id` = `blog_entry`.`id`) GROUP BY U0.`entry_id` ORDER BY NULL), 0.0e0) AS `text_rank`,
# CASE WHEN (`blog_entry`.`headline` REGEXP '.*ce.+pl.*') THEN 0.5e0 ELSE 0.0e0 END AS `entry_rank`,
# (COALESCE((SELECT SUM(0.35e0) AS `rank` FROM `blog_entrytext` U0 WHERE ((STRIP_TAGS(U0.`body_text`) REGEXP '.*ce.+pl.*') AND U0.`entry_id` = `blog_entry`.`id`) GROUP BY U0.`entry_id` ORDER BY NULL), 0.0e0) + CASE WHEN (`blog_entry`.`headline` REGEXP '.*ce.+pl.*') THEN 0.5e0 ELSE 0.0e0 END) AS `total_rank`
# FROM `blog_entry`
# WHERE (
# NOT `blog_entry`.`inactive`
# AND (COALESCE((SELECT SUM(0.35e0) AS `rank` FROM `blog_entrytext` U0 WHERE ((STRIP_TAGS(U0.`body_text`) REGEXP '.*ce.+pl.*') AND U0.`entry_id` = `blog_entry`.`id`) GROUP BY U0.`entry_id` ORDER BY NULL), 0.0e0)
# + CASE WHEN (`blog_entry`.`headline` REGEXP '.*ce.+pl.*') THEN 0.5e0 ELSE 0.0e0 END) > 0.0e0
# )
# ORDER BY `total_rank` DESC, `blog_entry`.`pub_date` DESC
return qs.prefetch_related('author', 'blog', 'coauthors')
def add_found_info(self, entry):
found_entries = entry.fields.get('entry_rank', {}).get('value', 0) // self.headline_cost
found_entrytexts = entry.fields.get('text_rank', {}).get('value', 0) // self.body_text_cost
if found_entries + found_entrytexts > 0:
fi = {'found_entries': found_entries, 'found_entrytexts': found_entrytexts}
k = 'rank_info'
entry.fields[k] = entry.create_fields_item(None, k.replace('_', ' '), fi)
def get_entry_detail_content(self, entry, truncate_text_to_length=None):
self.add_found_info(entry)
return super().get_entry_detail_content(entry, truncate_text_to_length)
| [
"semyon.mamonov@gmail.com"
] | semyon.mamonov@gmail.com |
cfc84ef51959fd4c5bd1da10cd8cb2adca5aa115 | 19924ac187843f4131281c2a3376bf446c1be510 | /mid-term/test.py | 85fdfdef444028cfbbdf7a90a667e2e6922646b2 | [] | no_license | mcfaddja/InfoTheory-MidTerm-py | f3768c9e27c6a734f9d4f850cb9e0553689ad407 | a180abce6039b03d3e750c62371aa9ccc6aa2da2 | refs/heads/master | 2021-03-27T11:46:02.760514 | 2018-05-20T02:38:10 | 2018-05-20T02:38:10 | 120,137,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | class MyTest(object):
def __init__(self, val):
self.val = val
def __lt__(self, other):
return self.val < other.val
a = MyTest(2)
b = MyTest(3)
print(a.val)
print(a > b)
| [
"mcfaddja@uw.edu"
] | mcfaddja@uw.edu |
7b67aee50ceb658a23adcd771866a044b3dde4c2 | def03a1f8a1cb537b58288097b1cceb20f971466 | /chapter_1/variable_more.py | 552bf464cc2c69443ebc2764f0b1b2bba885f25e | [] | no_license | Pratham-vaish/Harshit-Vashisth-Python-Begginer-Course-Notes | b6cba74a3bf2e65d2a31b4cae840d78324f8276b | b3f553818fd40c11e8f333779d6c4c0bfa8c2ed9 | refs/heads/main | 2023-04-05T19:54:14.067613 | 2021-05-08T05:42:09 | 2021-05-08T05:42:09 | 365,426,324 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | #How to asign more than one variable in one line
name, age = "pratham", "14"
print('hello ' + name + 'your age is '+ age)
#give one value to multiple variable
x=z=d=7
print(x+z+d) | [
"noreply@github.com"
] | Pratham-vaish.noreply@github.com |
f7357494094720abf9346a4b74e8665a42bf7763 | 92510524c697e3f47ae5b23c202b37183d8e7025 | /Project 5/backend.py | 45fee3131d514b60daaac4d9121e384d24afed04 | [] | no_license | tcloud1105/python_projects_1 | 88ee4ea31ed5cdb43c35f58806d89fdf78128959 | d66848d4ea6cc32cef5b624fb973a314d4ecb516 | refs/heads/main | 2022-12-31T16:30:00.600408 | 2020-10-17T09:08:40 | 2020-10-17T09:08:40 | 304,830,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | import sqlite3
def connect():
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS book (id INTEGER PRIMARY KEY, title TEXT, author TEXT, year INTEGER, isbn INTEGER)")
conn.commit()
conn.close()
def insert(title, author,year,isbn):
conn = sqlite3.connect("books.db")
cur = conn.cursor()
cur.execute("INSERT INTO book VALUES(NULL,?,?,?,?)",(title, author, year, isbn))
conn.commit()
conn.close()
def view():
conn = sqlite3.connect("books.db")
cur = conn.cursor()
cur.execute("SELECT * FROM book")
rows = cur.fetchall()
conn.close()
return rows
def search(title="", author="", year="", isbn=""):
conn = sqlite3.connect("books.db")
cur = conn.cursor()
cur.execute("SELECT * FROM book WHERE title=? OR author=? OR year=? OR isbn=?",(title, author, year, isbn))
rows = cur.fetchall()
conn.close()
return rows
def delete(id):
conn = sqlite3.connect("books.db")
cur = conn.cursor()
cur.execute("DELETE FROM book WHERE id=?", (id,))
conn.commit()
conn.close()
def update(id, title, author, year, isbn):
conn = sqlite3.connect("books.db")
cur = conn.cursor()
cur.execute("UPDATE book SET title=? ,author=?, year=?, isbn=? WHERE id=? ", (title, author, year, isbn,id))
conn.commit()
conn.close()
connect()
| [
"noreply@github.com"
] | tcloud1105.noreply@github.com |
c25a6ac1be0ce69ed589150a312a429403511622 | 7273b88684fbf71d7bd9cbe1d8a43ae66f10e3e2 | /Goruntu Isleme/Beginning/ornk26.py | 09e90f627629022344bec37d5d42a3b25f587737 | [
"MIT"
] | permissive | fazlikeles/RACLAB | 8b59c8af7c5f692ae2814b1510068beafa710420 | f5ce3b8205b11d072b9dadd305c11c278f184388 | refs/heads/master | 2023-08-16T13:25:13.089773 | 2021-10-02T10:19:09 | 2021-10-02T10:19:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | #-*-coding: utf-8 -*-
#Image Filtreleri
import numpy as np
import cv2
from matplotlib import pyplot as plt
img=cv2.imread('resimler/opencv.png')
#5*5lik pixcellerin ortalamasını alır ve tüm pixcellere yazar.
blur=cv2.blur(img,(5,5))
gaus=cv2.GaussianBlur(img,(5,5),0)
median=cv2.medianBlur(img,5) #girilen pixcel degerinin ortanca degerini hesaplar
bilateral=cv2.bilateralFilter(img,9,75,75)
plt.subplot(231),plt.imshow(img),plt.title('Original')
plt.xticks([]),plt.yticks([])
plt.subplot(232),plt.imshow(blur),plt.title('Blurred')
plt.xticks([]),plt.yticks([])
plt.subplot(233),plt.imshow(gaus),plt.title('Gaussian')
plt.xticks([]),plt.yticks([])
plt.subplot(234),plt.imshow(median),plt.title('Median')
plt.xticks([]),plt.yticks([])
plt.subplot(235),plt.imshow(bilateral),plt.title('Bilateral')
plt.xticks([]),plt.yticks([])
plt.show()
| [
"nevzatbol06@gmail.com"
] | nevzatbol06@gmail.com |
62cd735bf3bd66bfa41f89911b1a16366b1a19e7 | 72f5adc4b6f79dd40e975c86abcdbd3d0ccada86 | /venv/bin/gunicorn_paster | 43cf72ceadbb1d9db6993812ddd5ce212904fb7e | [] | no_license | katrek/flask_vacancy_parser | 77101604ec5bfeb47c009b9d8329b42d9d30bf4a | bbea4ae860bb78f7264b05e92c6664f8e4c4b3cf | refs/heads/master | 2023-01-11T11:58:09.275448 | 2019-08-29T06:36:53 | 2019-08-29T06:36:53 | 204,666,913 | 1 | 1 | null | 2023-01-03T12:19:03 | 2019-08-27T09:22:35 | Python | UTF-8 | Python | false | false | 271 | #!/Users/artemtkachev/PycharmProjects/flask_parser2/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"akatrek@gmail.com"
] | akatrek@gmail.com | |
dfb054b4925fc8a2dcc7a62bd45d8be4ede2d4d9 | 2525a51ad517cfe689fb7a86a9be1392e321367c | /particle.py | d074a91b9d2d3a7a63c6d2b4544a87af84882805 | [
"MIT"
] | permissive | dpearson1983/simpleSim | 8a665fae51e8406e97d04ee4a924cba414a39df3 | db64c037ca07eddb43b05eafba41cff5b3da0029 | refs/heads/master | 2021-05-21T00:37:21.905968 | 2020-04-09T14:01:10 | 2020-04-09T14:01:10 | 252,471,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | import numpy as np
class particle:
def __init__(self, mass, r, v, a):
self.r = r
self.v = v
self.m = mass
self.a = a
def updatePos(self, dt):
self.r = self.r + self.v*dt + 0.5*self.a*dt
def updateVel(self, dt):
self.v = self.v + self.a*dt
def velVerlet(self, dt):
self.updateVel(0.5*dt)
self.updatePos(dt)
self.updateVel(0.5*dt)
| [
"dpearson@localhost.localdomain"
] | dpearson@localhost.localdomain |
ff8a17e763d29cd20ded16dc22fef3608a1b6c3a | 889f559f87be069c768c80471eb890c8b1b91e48 | /BASICS/ITERATIONS.py | a6b4dccd8550ff16048086574d0bc54105a8a650 | [] | no_license | samratkar/PYTHON | 7db36508308b096cfe7ee07757c1ac61154a2a3d | fd51a522e1275caa5e8aa34aee121fbc77127281 | refs/heads/master | 2021-07-12T12:02:09.028926 | 2020-06-25T02:09:05 | 2020-06-25T02:09:05 | 145,820,653 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | for x in [1,2,3,4] : print (x**2, end = ' ')
print (1,2,3)
| [
"samratk@gmail.com"
] | samratk@gmail.com |
583d7923f42d8bbe499465c22bf5cb51f27e66a4 | d1f5322360175ef6b9f5a8962bc4ce5112cdd71a | /home/views.py | c07f03f07d5fd9384943507757ee815dfacdc2da | [] | no_license | martokk/django-tutorials-examples | 4a9b63412f38c00f314a5a56a5df6ebe11a26063 | 23d68817d808378adeea775ac3282cca7461f495 | refs/heads/master | 2020-05-31T18:02:09.252503 | 2019-06-06T23:03:47 | 2019-06-06T23:03:47 | 190,423,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | from django.shortcuts import render
from django.conf import settings
from django.urls import URLPattern, URLResolver
urlconf = __import__(settings.ROOT_URLCONF, {}, {}, [''])
def list_urls(lis, acc=None):
if acc is None:
acc = []
if not lis:
return
l = lis[0]
if isinstance(l, URLPattern):
yield acc + [str(l.pattern)]
elif isinstance(l, URLResolver):
yield from list_urls(l.url_patterns, acc + [str(l.pattern)])
yield from list_urls(lis[1:], acc)
def home(request):
from django.conf import settings
from tutorials.urls import urlpatterns
from django.urls import get_resolver
urlpatterns = set(v[1] for k,v in get_resolver(None).reverse_dict.items())
default_apps = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
user_apps = [app for app in settings.INSTALLED_APPS if app not in default_apps]
vars = {
'urlpatterns': urlpatterns,
'default_apps': default_apps,
'user_apps': user_apps,
}
return render(request, 'home/home.html', vars)
| [
"techv76@gmail.com"
] | techv76@gmail.com |
611c6e6ae2a42d38f70e492aef54f8cf6ba89c50 | 6a37c82d14ca864389de13dca7e71b32cf98b861 | /clss/statsClass.py | 891926fd2d4f442a89cff17a1b5d79c33723c045 | [] | no_license | andrewnwebster/sportsStats_py | 39fdcaefe0380468fcba12727c4d28d66069427b | f87001155055bf79b1c0dda5e5a6a4988286e1a2 | refs/heads/master | 2022-07-08T00:42:29.494490 | 2022-07-04T21:27:48 | 2022-07-04T21:27:48 | 96,925,697 | 0 | 0 | null | 2022-07-04T21:27:49 | 2017-07-11T18:47:46 | Python | UTF-8 | Python | false | false | 43 | py | statsClass={
0:'Batting',
1:'Pitching',
} | [
"andrewnwebster@gmail.com"
] | andrewnwebster@gmail.com |
bf5cc25038b36bbd8db9b85a2521712b8946591a | 2775947a01c2b10671737eae47725435957890a5 | /to-be-implemented/vstruct/win32.py | 908eb8ab7618a154c12ecf06dbb1a4dddd3235df | [] | no_license | albertz/pydbattach | 7b4bd4b7b22ec3c0aa82b45ba29674d3c852a9a4 | bbcc187627fc80ae4bd6fc98eefe41316f722a91 | refs/heads/master | 2022-10-25T12:34:05.045449 | 2022-10-11T14:53:18 | 2022-10-11T14:53:18 | 1,798,590 | 79 | 10 | null | 2022-02-09T12:43:01 | 2011-05-25T11:52:27 | Python | UTF-8 | Python | false | false | 7,453 | py |
from vstruct.primitives import *
from vstruct import VStruct,VArray
DWORD = v_uint32
class NT_TIB(VStruct):
_fields_ = [
("ExceptionList", v_ptr), # ExceptionRegistration structures.
("StackBase", v_ptr),
("StackLimit", v_ptr),
("SubSystemTib", v_ptr),
("FiberData", v_ptr),
("Version", v_ptr),
("ArbitraryUserPtr", v_ptr),
("Self", v_ptr)
]
class SEH3_SCOPETABLE(VStruct):
_fields_ = [
("EnclosingLevel", v_int32),
("FilterFunction", v_ptr),
("HandlerFunction", v_ptr),
]
class SEH4_SCOPETABLE(VStruct):
"""
Much like the SEH3 scopetable with the stack cookie additions
"""
_fields_ = [
("GSCookieOffset", v_int32),
("GSCookieXOROffset", v_int32),
("EHCookieOffset", v_int32),
("EHCookieXOROffset", v_int32),
("EnclosingLevel", v_int32),
("FilterFunction", v_ptr),
("HandlerFunction", v_ptr),
]
class CLIENT_ID(VStruct):
_fields_ = [
("UniqueProcess", v_ptr),
("UniqueThread", v_ptr)
]
class TebReserved32Array(VArray):
_field_type_ = v_uint32
_field_count_ = 26
class TebReservedArray(VArray):
_field_type_ = v_uint32
_field_count_ = 5
class TEB(VStruct):
_fields_ = [
("TIB", NT_TIB),
("EnvironmentPointer", v_ptr),
("ClientId", CLIENT_ID),
("ActiveRpcHandle", v_ptr),
("ThreadLocalStorage", v_ptr),
("ProcessEnvironmentBlock", v_ptr),
("LastErrorValue", v_uint32),
("CountOfOwnedCriticalSections", v_uint32),
("CsrClientThread", v_ptr),
("Win32ThreadInfo", v_ptr),
("User32Reserved", TebReserved32Array),
("UserReserved", TebReservedArray),
("WOW32Reserved", v_ptr),
("CurrentLocale", v_uint32),
("FpSoftwareStatusRegister", v_uint32)
#FIXME not done!
]
# Some necissary arrays for the PEB
class TlsExpansionBitsArray(VArray):
_field_type_ = v_uint32
_field_count_ = 32
class GdiHandleBufferArray(VArray):
_field_type_ = v_ptr
_field_count_ = 34
class TlsBitMapArray(VArray):
_field_type_ = v_uint32
_field_count_ = 2
class PEB(VStruct):
_fields_ = [
("InheritedAddressSpace", v_uint8),
("ReadImageFileExecOptions", v_uint8),
("BeingDebugged", v_uint8),
("SpareBool", v_uint8),
("Mutant", v_ptr),
("ImageBaseAddress", v_ptr),
("Ldr", v_ptr),
("ProcessParameters", v_ptr),
("SubSystemData", v_ptr),
("ProcessHeap", v_ptr),
("FastPebLock", v_ptr),
("FastPebLockRoutine", v_ptr),
("FastPebUnlockRoutine", v_ptr),
("EnvironmentUpdateCount", v_uint32),
("KernelCallbackTable", v_ptr),
("SystemReserved", v_uint32),
("AtlThunkSListPtr32", v_ptr),
("FreeList", v_ptr),
("TlsExpansionCounter", v_uint32),
("TlsBitmap", v_ptr),
("TlsBitmapBits", TlsBitMapArray),
("ReadOnlySharedMemoryBase", v_ptr),
("ReadOnlySharedMemoryHeap", v_ptr),
("ReadOnlyStaticServerData", v_ptr),
("AnsiCodePageData", v_ptr),
("OemCodePageData", v_ptr),
("UnicodeCaseTableData", v_ptr),
("NumberOfProcessors", v_uint32),
("NtGlobalFlag", v_uint64),
("CriticalSectionTimeout",v_uint64),
("HeapSegmentReserve", v_uint32),
("HeapSegmentCommit", v_uint32),
("HeapDeCommitTotalFreeThreshold", v_uint32),
("HeapDeCommitFreeBlockThreshold", v_uint32),
("NumberOfHeaps", v_uint32),
("MaximumNumberOfHeaps", v_uint32),
("ProcessHeaps", v_ptr),
("GdiSharedHandleTable", v_ptr),
("ProcessStarterHelper", v_ptr),
("GdiDCAttributeList", v_uint32),
("LoaderLock", v_ptr),
("OSMajorVersion", v_uint32),
("OSMinorVersion", v_uint32),
("OSBuildNumber", v_uint16),
("OSCSDVersion", v_uint16),
("OSPlatformId", v_uint32),
("ImageSubsystem", v_uint32),
("ImageSubsystemMajorVersion", v_uint32),
("ImageSubsystemMinorVersion", v_uint32),
("ImageProcessAffinityMask", v_uint32),
("GdiHandleBuffer", GdiHandleBufferArray),
("PostProcessInitRoutine", v_ptr),
("TlsExpansionBitmap", v_ptr),
("TlsExpansionBitmapBits", TlsExpansionBitsArray),
("SessionId", v_uint32),
("AppCompatFlags", v_uint64),
("AppCompatFlagsUser", v_uint64),
("pShimData", v_ptr),
("AppCompatInfo", v_ptr),
("CSDVersion", v_ptr), # FIXME make wide char reader?
("UNKNOWN", v_uint32),
("ActivationContextData", v_ptr),
("ProcessAssemblyStorageMap", v_ptr),
("SystemDefaultActivationContextData", v_ptr),
("SystemAssemblyStorageMap", v_ptr),
("MinimumStackCommit", v_uint32),
]
class HEAP_ENTRY(VStruct):
_fields_ = [
("Size", v_uint16),
("PrevSize", v_uint16),
("SegmentIndex", v_uint8),
("Flags", v_uint8),
("Unused", v_uint8),
("TagIndex", v_uint8)
]
class ListEntry(VStruct):
_fields_ = [
("Flink", v_ptr),
("Blink", v_ptr)
]
class HeapSegmentArray(VArray):
_field_type_ = v_uint32
_field_count_ = 64
class HeapUnArray(VArray):
_field_type_ = v_uint8
_field_count_ = 16
class HeapUn2Array(VArray):
_field_type_ = v_uint8
_field_count_ = 2
class HeapFreeListArray(VArray):
_field_type_ = ListEntry
_field_count_ = 128
class HEAP(VStruct):
_fields_ = [
("Entry", HEAP_ENTRY),
("Signature", v_uint32),
("Flags", v_uint32),
("ForceFlags", v_uint32),
("VirtualMemoryThreshold", v_uint32),
("SegmentReserve", v_uint32),
("SegmentCommit", v_uint32),
("DeCommitFreeBlockThreshold", v_uint32),
("DeCommitTotalFreeThreshold", v_uint32),
("TotalFreeSize", v_uint32),
("MaximumAllocationSize", v_uint32),
("ProcessHeapsListIndex", v_uint16),
("HeaderValidateLength", v_uint16),
("HeaderValidateCopy", v_ptr),
("NextAvailableTagIndex", v_uint16),
("MaximumTagIndex", v_uint16),
("TagEntries", v_ptr),
("UCRSegments", v_ptr),
("UnusedUnCommittedRanges", v_ptr),
("AlignRound", v_uint32),
("AlignMask", v_uint32),
("VirtualAllocBlocks", ListEntry),
("Segments", HeapSegmentArray),
("u", HeapUnArray),
("u2", HeapUn2Array),
("AllocatorBackTraceIndex",v_uint16),
("NonDedicatedListLength", v_uint32),
("LargeBlocksIndex", v_ptr),
("PseudoTagEntries", v_ptr),
("FreeLists", HeapFreeListArray),
("LockVariable", v_uint32),
("CommitRoutine", v_ptr),
("FrontEndHeap", v_ptr),
("FrontEndHeapLockCount", v_uint16),
("FrontEndHeapType", v_uint8),
("LastSegmentIndex", v_uint8)
]
class EXCEPTION_RECORD(VStruct):
_fields_ = [
("ExceptionCode", DWORD),
("ExceptionFlags", DWORD),
("ExceptionRecord", v_ptr), # Pointer to the next
("ExceptionAddress", v_ptr),
("NumberParameters", DWORD),
#("ExceptionInformation", DWORD[NumberParameters])
]
class EXCEPTION_REGISTRATION(VStruct):
_fields_ = [
("prev", v_ptr),
("handler", v_ptr),
]
| [
"albert.zeyer@rwth-aachen.de"
] | albert.zeyer@rwth-aachen.de |
02a1828ac90ebe65e8530a9ceed5e2644a1b3e55 | ae4ee9f774cec305787a6f00fc9fb2fd628d3c22 | /connection.py | 78a6ce18ab603226ca7428064a5479ca458750e5 | [] | no_license | stevengritz/python_collection | 9c08579e36a9b04551fe1a4dc73070578104ea43 | b04b00b13c990c866f85221398e94f4cc5ad5074 | refs/heads/master | 2020-03-15T03:03:29.404301 | 2018-05-03T03:03:51 | 2018-05-03T03:03:51 | 131,933,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,518 | py | import json
import os
import getpass
import psycopg2
# Container for data base profile info
class DbConnectionHandler:
user = str()
db_name = str()
host = str()
port = str()
schema = str()
table_prefix_ids = list()
filter_out = dict()
user_choice = {'Y': True, 'N': False}
con = None
cur = None
def __init__(self):
pass
def load_profile(self, profile_name, directory, **optional):
if 'fp' in optional:
j_file = optional['fp']
else:
j_path = directory + '/' + "db_profile.json"
if os.path.isfile(j_path):
j_file = open(directory + '/' + "db_profile.json", "r")
else:
j_file = open(directory + '/' + "db_profile.json", "w+")
j_file.write('{\"profiles\":{}}')
j_file.seek(0)
profiles = json.load(j_file)
if profile_name in profiles['profiles']:
profile = profiles['profiles'][profile_name]
self.user = profile['user']
self.db_name = profile['db_name']
self.host = profile['host']
self.port = profile['port']
else:
create_new_profile = raw_input('Profile not found. '
'Create new Redshift connection profile called %s? (Y/[N]) ' % profile_name)
if self.user_choice.get(create_new_profile, False):
new_profile = dict()
new_profile['user'] = raw_input('Enter the username: ')
new_profile['db_name'] = raw_input('Enter the database name (e.g. cidw): ')
new_profile['host'] = raw_input('Enter the host/IP address: ')
new_profile['port'] = raw_input('Enter the port number: ')
new_profile['schema'] = raw_input('Enter the schema name: ')
prefix_string = raw_input('Enter the filter prefixes, single space'
' between multiple (leave blank for none): ')
new_profile['table_prefix_ids'] = prefix_string.split()
# filter_string = raw_input('Enter the key/value pairs to be filtered out'
# ' (leave blank for none): ')
new_profile['filter_out'] = '' # filter_string.split()
profiles['profiles'][profile_name] = new_profile
j_file.close()
j_file = open(directory + '/' + "db_profile.json", "w+")
json.dump(profiles, j_file)
j_file.close()
self.load_profile(profile_name, directory)
else:
quit(10)
def establish_connection(self, user=None, passw=None):
if passw is None:
print 'Enter password for user: %s on db: %s ...' % (self.user, self.db_name)
pw = getpass.getpass()
else:
pw = passw
if user is None:
uname = self.user
else:
uname = user
self.con = psycopg2.connect(dbname=self.db_name, host=self.host,
port=self.port, user=uname, password=str(pw))
self.con.set_session(autocommit=True)
self.cur = self.con.cursor()
print 'Connected to database name ', self.db_name
def close_connection(self):
self.cur.close()
self.con.close()
def execute_query(self, query, query_params):
self.cur.execute(query, query_params)
| [
""
] | |
e2a55bfe430a7621292f4f904a9296bf6600504e | 204330274a2c5cd093028d72cec6890a8f6530c0 | /snake.py | 6cda71e3663a428d68184a7687dc12382f6366f7 | [] | no_license | Abhi51999/Snake-Game | de40feb82896feca6765600c2afa6f2579ddf5fc | 6ea16d0e1fc5eae6e1ba2e24782d4c87fb92f65b | refs/heads/main | 2023-09-03T13:44:33.215394 | 2021-11-20T12:45:27 | 2021-11-20T12:45:27 | 430,096,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | # import required modules
import turtle
import time
import random
delay = 0.1
score = 0
high_score = 0
# Creating a window screen
wn = turtle.Screen()
wn.title("Snake Game")
wn.bgcolor("blue")
# the width and height can be put as user's choice
wn.setup(width=600, height=600)
wn.tracer(0)
# head of the snake
head = turtle.Turtle()
head.shape("square")
head.color("white")
head.penup()
head.goto(0, 0)
head.direction = "Stop"
# food in the game
food = turtle.Turtle()
colors = random.choice(['red', 'green', 'black'])
shapes = random.choice(['square', 'triangle', 'circle'])
food.speed(0)
food.shape(shapes)
food.color(colors)
food.penup()
food.goto(0, 100)
pen = turtle.Turtle()
pen.speed(0)
pen.shape("square")
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 250)
pen.write("Score : 0 High Score : 0", align="center",
font=("candara", 24, "bold"))
# assigning key directions
def goup():
if head.direction != "down":
head.direction = "up"
def godown():
if head.direction != "up":
head.direction = "down"
def goleft():
if head.direction != "right":
head.direction = "left"
def goright():
if head.direction != "left":
head.direction = "right"
def move():
if head.direction == "up":
y = head.ycor()
head.sety(y+20)
if head.direction == "down":
y = head.ycor()
head.sety(y-20)
if head.direction == "left":
x = head.xcor()
head.setx(x-20)
if head.direction == "right":
x = head.xcor()
head.setx(x+20)
wn.listen()
wn.onkeypress(goup, "w")
wn.onkeypress(godown, "s")
wn.onkeypress(goleft, "a")
wn.onkeypress(goright, "d")
segments = []
# Main Gameplay
while True:
wn.update()
if head.xcor() > 290 or head.xcor() < -290 or head.ycor() > 290 or head.ycor() < -290:
time.sleep(1)
head.goto(0, 0)
head.direction = "Stop"
colors = random.choice(['red', 'blue', 'green'])
shapes = random.choice(['square', 'circle'])
for segment in segments:
segment.goto(1000, 1000)
segments.clear()
score = 0
delay = 0.1
pen.clear()
pen.write("Score : {} High Score : {} ".format(
score, high_score), align="center", font=("candara", 24, "bold"))
if head.distance(food) < 20:
x = random.randint(-270, 270)
y = random.randint(-270, 270)
food.goto(x, y)
# Adding segment
new_segment = turtle.Turtle()
new_segment.speed(0)
new_segment.shape("square")
new_segment.color("orange") # tail colour
new_segment.penup()
segments.append(new_segment)
delay -= 0.001
score += 10
if score > high_score:
high_score = score
pen.clear()
pen.write("Score : {} High Score : {} ".format(
score, high_score), align="center", font=("candara", 24, "bold"))
# Checking for head collisions with body segments
for index in range(len(segments)-1, 0, -1):
x = segments[index-1].xcor()
y = segments[index-1].ycor()
segments[index].goto(x, y)
if len(segments) > 0:
x = head.xcor()
y = head.ycor()
segments[0].goto(x, y)
move()
for segment in segments:
if segment.distance(head) < 20:
time.sleep(1)
head.goto(0, 0)
head.direction = "stop"
colors = random.choice(['red', 'blue', 'green'])
shapes = random.choice(['square', 'circle'])
for segment in segments:
segment.goto(1000, 1000)
segment.clear()
score = 0
delay = 0.1
pen.clear()
pen.write("Score : {} High Score : {} ".format(
score, high_score), align="center", font=("candara", 24, "bold"))
time.sleep(delay)
wn.mainloop()
| [
"noreply@github.com"
] | Abhi51999.noreply@github.com |
222cf635654899e2f70d56d71f7342f179ea4fdd | a4753147801dbabfec45f6f9f47572cda77efb81 | /debugging-constructs/ibmfl/aggregator/fusion/dt_fusion_handler.py | 6888cde3062ef7e198ffce36cd05995513cf56c0 | [
"MIT"
] | permissive | SEED-VT/FedDebug | e1ec1f798dab603bd208b286c4c094614bb8c71d | 64ffa2ee2e906b1bd6b3dd6aabcf6fc3de862608 | refs/heads/main | 2023-05-23T09:40:51.881998 | 2023-02-13T21:52:25 | 2023-02-13T21:52:25 | 584,879,212 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,110 | py | """
Licensed Materials - Property of IBM
Restricted Materials of IBM
20221069
© Copyright IBM Corp. 2022 All Rights Reserved.
"""
import numpy as np
import logging
from ibmfl.model.dt_fl_model import DTFLModel
from ibmfl.model.model_update import ModelUpdate
from ibmfl.aggregator.fusion.fusion_handler import FusionHandler
from ibmfl.exceptions import HyperparamsException
logger = logging.getLogger(__name__)
class ID3FusionHandler(FusionHandler):
"""
Class for training decision tree type model in aggregator side
"""
def __init__(self,
hyperparams,
proto_handler,
data_handler,
fl_model=None,
**kwargs):
"""
Initializes an DecisionTreeFusionHandler object with provided
hyperparams, data_handler and fl_model.
:param hyperparams: Hyperparameters used for training
:type hyperparams: `dict`
:param proto_handler: Proto_handler that will be used to send message
:type proto_handler: `ProtoHandler`
:param data_handler: data handler that will be used to obtain data
:type data_handler: `DataHandler`
:param fl_model: (optional) model to be trained
:type fl_model: `model.FLModel`
:param kwargs: Additional arguments to initialize a fusion handler.
:type kwargs: `Dict`
"""
if fl_model is None:
spec = data_handler.get_dataset_info()
fl_model = DTFLModel(None, spec)
super().__init__(hyperparams, proto_handler, data_handler, fl_model,
**kwargs)
self.name = "ID3DecisionTreeFusion"
try:
if hyperparams['global'] is not None and \
'max_depth' in hyperparams['global']:
self.max_depth = hyperparams['global']['max_depth']
else:
self.max_depth = 3
logger.info('No maximum depth of the tree was provided, '
'max_depth is set to the default value ' +
str(self.max_depth))
except Exception as e:
logger.exception(str(e))
raise HyperparamsException('Global hyperparameters are badly formed. '+str(e))
def reach_termination_criteria(self, root=None):
"""
Return True when termination criteria has been reached, otherwise
returns False.
Termination criteria is reached when the tree grows to its leaves and
there is nothing to be split.
:return: boolean
:rtype: 'boolean'
"""
if root is not None and root['leaf']:
return True
return False
def build_branch(self, node, current_list_of_features=None,
current_feature_values=None, splits=[]):
"""
Create a decision tree branch on a given node.
:param node: A given node to start building the tree
:type node: `dict`
:param current_list_of_features: (Optional) A list stores current \
list of features that waiting to be split.
:type current_list_of_features: `list`
:param current_feature_values: (Optional) A list stores the \
corresponding feature value range.
:type current_feature_values: `list`
:param splits: A list containing the tree split information, \
e.g. {[feature, feature_value]}
:type splits: `list`
:return: None
"""
if self.reach_termination_criteria(node):
logger.info('Reach leaf.')
return
if current_list_of_features is None:
current_list_of_features = self.fl_model.list_of_features[:]
if current_feature_values is None:
current_feature_values = self.fl_model.feature_values[:]
split_value = node['split']
split_index = current_list_of_features.index(split_value)
current_list_of_features.remove(
current_list_of_features[split_index])
logger.info('Deleting feature ' + str(split_index) +
' from list of features')
remove_feature_values = current_feature_values[split_index]
current_feature_values = \
current_feature_values[0:split_index] + \
current_feature_values[split_index + 1:]
logger.info('Deleting feature value ' + str(remove_feature_values)
+ ' from feature value list')
for feature_value in remove_feature_values:
curr_splits = splits[:]
curr_splits.append([split_value, feature_value])
self.fl_model.update_model(
new_list_of_features=current_list_of_features[:],
new_feature_values=current_feature_values[:])
node[feature_value] = self.build_node(curr_splits)
self.build_branch(node[feature_value],
current_list_of_features[:],
current_feature_values[:],
splits=curr_splits)
def build_node(self, splits=[]):
"""
Create a tree node based on parties information, splits and max_depth requirement.
:param splits: A list containing the tree split information, e.g. {[feature_index, feature_value]}
:type splits: `list`
:return: A decision tree node
:rtype: `dict`
"""
model = self.fl_model
if len(model.feature_values) == 0 or len(splits) >= self.max_depth:
fit_params = {'split': splits,
'list_of_labels': model.list_of_labels
}
lst_model_updates = self.query_all_parties(fit_params)
model_updates = self.fusion_collected_responses(lst_model_updates)
label_counts = model_updates.get("counts_info")
return {'leaf': True,
'counts': label_counts,
'outcome': model.list_of_labels[
label_counts.index(max(label_counts))],
'split': None}
fit_params = {'split': splits[:],
'list_of_labels': model.list_of_labels,
'feature_values': model.feature_values,
'list_of_features': model.list_of_features
}
lst_model_updates = self.query_all_parties(fit_params)
model_updates = self.fusion_collected_responses(lst_model_updates)
scores = []
all_label_counts = np.array(model_updates.get("counts_info"))
all_label_counts = np.transpose(
np.reshape(all_label_counts, [-1, len(model.list_of_labels)]))
all_counts = np.sum(all_label_counts, axis=0)
all_scores = all_label_counts * np.log2(
np.divide(all_label_counts, all_counts,
out=np.zeros_like(all_label_counts, dtype=float),
where=all_counts != 0),
out=np.zeros_like(all_label_counts, dtype=float),
where=all_label_counts != 0)
score_per_feature_value = np.sum(all_scores, axis=0)
for feature_value in model.feature_values:
score = np.sum(score_per_feature_value[0:len(feature_value)],
axis=0)
score_per_feature_value = score_per_feature_value[
len(feature_value):]
scores.append(score)
return {'leaf': False,
'counts': None,
'outcome': None,
'split': model.list_of_features[scores.index(max(scores))]}
def start_global_training(self, root=None):
"""
Create a decision tree model.
:param root: (Optional) the root of the decision tree
:type root: `dict`
:return: None
"""
if root is None and len(self.fl_model.tree_model) == 0:
root = self.build_node()
else:
root = self.fl_model.tree_model
logger.info('Root of the tree is built :)')
self.build_branch(root)
self.fl_model.tree_model = root
def get_global_model(self):
"""
Returns latest tree model stored in fl_model object.
:return: A dictionary contains the tree structure
:rtype: `ModelUpdate`
"""
model_update = ModelUpdate(tree_model=self.fl_model.tree_model)
return model_update
def fusion_collected_responses(self, lst_model_updates):
"""
Receives a list of model updates, where a model update is of the type `ModelUpdate`, \
using the counts in each model_update,
it returns the sum of all counts.
:param list of model updates: Counts of type `ModelUpdate` to be summed up.
:type list of model updates: `list`
:return: Model updates with sum of counts
:rtype: `ModelUpdate`
"""
c = []
for update in lst_model_updates:
c.append(update.get('counts_info'))
counts = np.sum(np.array(c), axis=0)
return ModelUpdate(counts_info=counts.tolist())
| [
"waris@vt.edu"
] | waris@vt.edu |
1ea56982e06ea570f07156dad35b21be2ac0992e | 2f24ac1364f5aa644dbf56383079d23be938c2db | /configs/litehrnet_320k/fcn_litehr18-with-head_512x1024_8x2_320k_cityscapes.py | 7ff7db386f0242a7fce3a2175c46bb600d64fa3b | [
"Apache-2.0"
] | permissive | kingloo2014/LiteHRNet | 05a94b07446d4141af5c818a01b22a3cb5ec027b | e2b13de52e970215be566067cab7bd880010f062 | refs/heads/master | 2023-07-19T15:10:34.808470 | 2021-09-08T14:59:24 | 2021-09-08T14:59:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | _base_ = [
'../_base_/models/fcn_litehr18-with-head.py',
'../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_320k.py'
]
| [
"hejunjun@sjtu.edu.cn"
] | hejunjun@sjtu.edu.cn |
ff5c3284055fb811959033ba46debd329c10e859 | 8f16a24e48af449759bc96d1db60c6636d1f2500 | /rewatch.py | e1244fc72365f1df863bdf263c3d6e88f3984477 | [] | no_license | popcorncolonel/JakeandAmirBot | e751bd0d0fa8488fbeb1841aa474ce572524d0fd | 54a53d9438fca4abd0a7f066326261fb3e064844 | refs/heads/master | 2023-05-13T20:04:49.365077 | 2023-05-07T02:34:00 | 2023-05-07T02:34:00 | 33,703,419 | 8 | 1 | null | 2020-12-30T13:30:58 | 2015-04-10T02:15:35 | Python | UTF-8 | Python | false | false | 936 | py | import random
with open('episodes.txt', 'r') as f:
episodes = list(f)[1:]
episodes = [x.split('|')[1:-1] for x in episodes]
class Episode(object):
def __init__(self, date_str, title, url, duration, bonus_footage):
self.date_str = date_str
self.title = title
self.url = url
self.duration = duration
self.bonus_footage = bonus_footage
def __str__(self):
return self.title + ' - ' + self.date_str
def __repr__(self):
return self.__str__()
# converts from list form to object form
def transform(episode):
return Episode(episode[0], episode[1], episode[2], episode[4], episode[5] or None)
episodes = [transform(episode) for episode in episodes]
# https://gdata.youtube.com/feeds/api/videos?q=jake+and+amir+notified&max-results=2&v=2&alt=json
if __name__ == '__main__':
episode = random.choice(episodes)
print(episode)
print(episode.__dict__)
| [
"popcorncolonel@gmail.com"
] | popcorncolonel@gmail.com |
91f430f50755bd8fd1704a565306dd0b4a6b47e6 | ae552521df76c6e3d35d1f136b2e667363808421 | /aomp/aomp/cmdb/server_template_dict.py | 28566df557160fb61b7cee61f85265b60e361d40 | [] | no_license | BiYiTuan/devops-1 | dfc3be249f8c4972c58bb5e1b44f55186fe50deb | 58c0dfe420e2367dff1820e78d73d97342e3ed9e | refs/heads/master | 2021-01-22T13:31:33.672519 | 2014-03-31T08:48:16 | 2014-03-31T08:48:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | #!/usr/bin/python
# encoding: utf-8
__authors__ = ['left']
__version__ = 1.0
__date__ = '2014-01-12 15:34:38'
__licence__ = 'GPL licence'
from aomp.cmdb.models import Server_template
def server_template_dict():
global template_all_dict
template_all_dict = {}
template_all = Server_template.objects.all()
for template_name in template_all:
t_id = int(template_name.id)
t_n = template_name.template_name
t_model = template_name.template_model
t_cpu = template_name.template_cpu
t_mem = template_name.template_mem
t_disk = template_name.template_disk
if t_id not in template_all_dict:
template_all_dict[t_id] = [t_n,t_model,t_cpu,t_mem,t_disk]
return template_all_dict
def server_template_price():
global server_template_id_price
server_template_id_price = {}
template_all = Server_template.objects.all()
for template_id in template_all:
if template_id.id not in server_template_id_price:
server_template_id_price[template_id.id] = [template_id.template_name,template_id.template_money]
return server_template_id_price
| [
"chenlijun@hoolai.com"
] | chenlijun@hoolai.com |
87a95868383862865aaa76c7f00e32695dc68955 | bb373604f1b17f3ea4030dfd98eebad6d03da2ff | /utils/tokenizer.py | a3ba38049444c5b8f82630370a8ddc6e9b0329f7 | [] | no_license | dystudio/classic_chinese_punctuate | 43d348625e55e8ecb24ef469fd1dfe516135e23b | 9a657690fdd203e370957ecf6574a1a5d63a5062 | refs/heads/master | 2023-03-26T22:30:09.366543 | 2018-11-27T10:44:03 | 2018-11-27T10:44:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,602 | py | # encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: tokenizer
@time: 2018/11/24
"""
import os
import random
import json
import h5py
import numpy as np
import tqdm
from typing import List, Dict
from keras.preprocessing import sequence
from utils.embedding import Word2Vec
from utils.macros import PAD, BOS, EOS, UNK, NO_TAG
from utils import helper
class Tokenizer(object):
PAD = PAD
BOS = BOS
EOS = EOS
UNK = UNK
NO_TAG = NO_TAG
PAD_ID = 0
BOS_ID = 1
EOS_ID = 2
UNK_ID = 3
NO_TAG_ID = 1
def __init__(self):
self.url = ''
self.word2idx = {}
self.idx2word = {}
self.labels2idx = {}
self.idx2labels = {}
self.max_length = 100
self.w2v = None
def class_weights(self):
base_weight = {
helper.macros.PAD: 0.01,
helper.macros.NO_TAG: 0.7
}
weights = [base_weight.get(i, 1) for i in self.labels2idx]
return np.asarray(weights)
def build(self,
corpus_path: str,
tokenizer_path: str,
label_only=False,
min_accor=3):
if not label_only:
file_list = helper.get_all_files(corpus_path)
word2count = {}
for file in tqdm.tqdm(file_list, 'building tokens'):
lines = open(file, 'r', encoding='utf-8').read().splitlines()
for line in lines:
x, _ = helper.format_line(line)
for word in line:
word2count[word] = word2count.get(word, 0) + 1
self.word2idx = {
Tokenizer.PAD: Tokenizer.PAD_ID,
Tokenizer.BOS: Tokenizer.BOS_ID,
Tokenizer.EOS: Tokenizer.EOS_ID,
Tokenizer.UNK: Tokenizer.UNK_ID,
}
sorted_word2count = [(k, word2count[k]) for k in sorted(word2count, key=word2count.get, reverse=True)]
for word, count in sorted_word2count:
if count >= min_accor:
self.word2idx[word] = len(self.word2idx)
label2count = {
helper.macros.PAD: 0,
helper.macros.NO_TAG: 1
}
for mark in helper.TARGET_CHARS:
label2count[mark] = len(label2count)
self.labels2idx = {
Tokenizer.PAD: Tokenizer.PAD_ID,
Tokenizer.NO_TAG: Tokenizer.NO_TAG_ID
}
for k, v in label2count.items():
if k not in self.labels2idx:
self.labels2idx[k] = len(self.labels2idx)
helper.make_dir_if_needs(os.path.join(tokenizer_path, 'word2idx.json'))
if not label_only:
with open(os.path.join(tokenizer_path, 'word2idx.json'), 'w', encoding='utf-8') as w2idx:
w2idx.write(json.dumps(self.word2idx, indent=2, ensure_ascii=False))
with open(os.path.join(tokenizer_path, 'labels2idx.json'), 'w', encoding='utf-8') as l2idx:
l2idx.write(json.dumps(self.labels2idx, indent=2, ensure_ascii=False))
print('-------- tokenize finished ----------')
print('word count : {}'.format(len(self.word2idx)))
print('label count: {}'.format(len(self.labels2idx)))
print('use tokenizer by `tokenizer.load(\'{}\')`'.format(tokenizer_path))
print('-------- tokenize finished ----------')
def load(self, tokenizer_path):
self.word2idx = json.load(open(os.path.join(tokenizer_path, 'word2idx.json'), 'r', encoding='utf-8'))
self.labels2idx = json.load(open(os.path.join(tokenizer_path, 'labels2idx.json'), 'r', encoding='utf-8'))
self.idx2word = dict([(v, k) for (k, v) in self.word2idx.items()])
self.idx2labels = dict([(v, k) for (k, v) in self.labels2idx.items()])
def load_gensim(self, w2v_path):
self.w2v = Word2Vec()
self.w2v.load_gensim(w2v_path)
self.word2idx = self.w2v.word2idx
self.idx2word = self.w2v.idx2word
self.labels2idx = json.load(open(os.path.join(w2v_path, 'labels2idx.json'), 'r', encoding='utf-8'))
self.idx2labels = dict([(v, k) for (k, v) in self.labels2idx.items()])
def tokenize(self, text, padding=True) -> List[int]:
tokens = []
for char in text:
tokens.append(self.word2idx.get(char, Tokenizer.UNK_ID))
if padding:
tokens = [Tokenizer.BOS_ID] + tokens + [Tokenizer.EOS_ID]
return tokens
def de_tokenize(self, tokens: List[int], remove_padding=True) -> List[str]:
text = []
for token in tokens:
text.append(self.idx2word[token])
if remove_padding:
if text[-1] == Tokenizer.EOS:
text = text[:-1]
if text[0] == Tokenizer.BOS:
text = text[1:]
return text
def label_tokenize(self, labels, padding=True) -> List[int]:
tokens = []
for char in labels:
tokens.append(self.labels2idx[char])
if padding:
tokens = [Tokenizer.NO_TAG_ID] + tokens + [Tokenizer.NO_TAG_ID]
return tokens
def label_de_tokenize(self,
tokens: List[int],
remove_padding: bool=True,
length: int=None) -> List[str]:
text = []
if length:
tokens = tokens[:length+2]
for token in tokens:
text.append(self.idx2labels[token])
if remove_padding:
text = text[1:-1]
return text
def tokenize_files(self, files_path, data_path) -> Dict:
h5_path = os.path.join(data_path, 'dataset.h5')
h5 = h5py.File(h5_path, 'a')
data_info = {
'length': []
}
try:
h5.create_dataset('x',
shape=(500, self.max_length),
maxshape=(None, self.max_length),
dtype=np.int32,
chunks=True)
h5.create_dataset('y',
shape=(500, self.max_length),
maxshape=(None, self.max_length),
dtype=np.int32,
chunks=True)
except:
pass
current_index = 0
for file in tqdm.tqdm(helper.get_all_files(files_path),
desc='processing files'):
x_padded, y_padded, x_list, y_list = self.process_by_file(file)
for item in x_list:
data_info['length'].append(len(item))
new_index = current_index + len(x_padded)
if new_index > 500:
h5['x'].resize((new_index, self.max_length))
h5['y'].resize((new_index, self.max_length))
h5['x'][current_index:new_index] = x_padded
h5['y'][current_index:new_index] = y_padded
current_index = new_index
sample_index = random.randint(0, len(h5['x']))
print('-------- tokenize data finished --------')
print('dataset path : {}'.format(os.path.abspath(h5_path)))
print('sample x : {}'.format(h5['x'][sample_index]))
print('sample y : {}'.format(h5['y'][sample_index]))
print('----------------------------------------')
h5.close()
return data_info
def process_by_file(self, file_path, min_lengh=8):
lines = open(file_path, 'r', encoding='utf-8').read().splitlines()
x_list = []
y_list = []
for line in lines:
line = line.strip()
if line:
x, y = format_line(line)
if len(x) == len(y) and len(x) > 8:
x_list.append(self.tokenize(x))
y_list.append(self.label_tokenize(y))
x_padded = sequence.pad_sequences(x_list, maxlen=self.max_length, padding='post')
y_padded = sequence.pad_sequences(y_list, maxlen=self.max_length, padding='post')
return x_padded, y_padded, x_list, y_list
def format_line(text):
"""
格式化一行数据
:param text:
:return:
"""
text = text
target_x = []
target_label = []
for char in text:
if helper.chinese_regex.match(char):
target_x.append(char)
target_label.append('O')
elif char in helper.TARGET_CHARS and len(target_label) > 0:
target_label[-1] = char
return target_x, target_label
if __name__ == '__main__':
print("hello, world") | [
"eliyar917@gmail.com"
] | eliyar917@gmail.com |
4d4d51aa814dfd29d50290261d6d9ce681a302e8 | d2c4151eff768af64946ababc2e41c13d8973cd3 | /ARC105/a.py | f9f21d8dd457a05c96bd9eb45c5f8bcd344e63e9 | [] | no_license | Intel-out-side/AtCoder | 2de19b71981247135432aed2d6d9c2a16c3ab7f0 | 0c419d2df15fff02032432cb1b1323612484e16e | refs/heads/master | 2022-06-23T04:21:12.886072 | 2022-06-13T14:39:07 | 2022-06-13T14:39:07 | 235,240,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import math
N = int(input())
for a in range(1, 100):
tmp = N - 3**a
if tmp < 5:
print(-1)
exit()
for b in range(1, 100):
if 5**b == tmp:
print(a, b)
exit()
print(-1)
| [
"so.eng.eng.1rou@gmail.com"
] | so.eng.eng.1rou@gmail.com |
6c0587343bdec26863a772ac204092f93b6649a7 | f5accbce7661c1682e4a0b5983c3fc491c9f7b2d | /copy_messages.py | 4e864f0e1d4167a15cf0f0fea10da7f3a5426b6c | [] | no_license | Kasden45/wordcloud-from-messenger | c5f3ec19b3d6a1683b09b8300586eb510408d611 | a052c79aa0b2e1f7a3fc7a2199f89130a5ee38ae | refs/heads/main | 2023-04-01T08:50:17.291550 | 2021-03-27T20:48:19 | 2021-03-27T20:48:19 | 351,871,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | import fnmatch
import os
from shutil import copyfile, copy
from tkinter import filedialog, Tk
if __name__ == '__main__':
window = Tk()
targetPath = filedialog.askdirectory(parent=window,
initialdir=os.getcwd(),
title="Choose destination")
try:
for _, dirs, _ in os.walk(os.curdir):
for dir in dirs:
print(dir)
if not os.path.isdir(targetPath+'/'+dir):
os.mkdir(targetPath+'/'+dir)
#for _,dirs2, filenames in os.walk("%s/%s"%(os.curdir,dir)):
print("listdir:", os.listdir(dir))
for filename in os.listdir(dir):
#for filename in filenames:
if fnmatch.fnmatch(filename, 'message*'):
print(os.curdir+'/'+dir+'/'+filename)
copy(os.curdir+'/'+dir+'/'+filename, targetPath+'/'+dir)
catch Exception as e:
print(e) | [
"noreply@github.com"
] | Kasden45.noreply@github.com |
bb5260b71015d345a88ae42ed0488418d428fac1 | 1f3bed0bb480a7d163dab73f1d315741ecbc1072 | /vtkplotter_examples/other/trimesh/section.py | 5dbcbdb40fe1f58a43833bc7fec62a29fa4cd2b8 | [
"MIT"
] | permissive | ismarou/vtkplotter-examples | 1ce78197182da7496b016b27f1d5eb524c49cac6 | 1eefcc026be169ab7a77a5bce6dec8044c33b554 | refs/heads/master | 2021-03-11T18:43:22.313457 | 2020-03-03T22:11:25 | 2020-03-03T22:11:25 | 246,551,341 | 4 | 0 | null | 2020-03-11T11:18:48 | 2020-03-11T11:18:47 | null | UTF-8 | Python | false | false | 1,721 | py | import trimesh
import numpy as np
from vtkplotter import show, Plane, Text2D, printc, download
# load the mesh from filename, file objects are also supported
f = download('https://github.com/mikedh/trimesh/raw/master/models/featuretype.STL')
mesh = trimesh.load_mesh(f)
# get a single cross section of the mesh
txt = Text2D('cross section of the mesh', c='k')
mslice = mesh.section(plane_origin=mesh.centroid, plane_normal=[0,0,1])
pl = Plane(mesh.centroid, normal=[0,0,1], sx=6, sy=4, alpha=0.3)
slice_2D, to_3D = mslice.to_planar()
# show objects on N=2 non-synced renderers:
show([(mesh, pl), (slice_2D, txt)], N=2, sharecam=False, axes=True)
# if we wanted to take a bunch of parallel slices, like for a 3D printer
# we can do that easily with the section_multiplane method
# we're going to slice the mesh into evenly spaced chunks along z
# this takes the (2,3) bounding box and slices it into [minz, maxz]
z_extents = mesh.bounds[:,2]
# slice every .125 model units (eg, inches)
z_levels = np.arange(*z_extents, step=0.125)
# find a bunch of parallel cross sections
sections = mesh.section_multiplane(plane_origin=mesh.bounds[0],
plane_normal=[0,0,1],
heights=z_levels)
N = len(sections)
printc("nr. of sections:", N, c='green')
# summing the array of Path2D objects will put all of the curves
# into one Path2D object, which we can plot easily
combined = np.sum(sections)
sections.append([combined, Text2D('combined')])
# show objects in N synced renderers:
show(sections, N=N, axes=True, newPlotter=True)
# the medial axis is available for closed Path2D objects
show(slice_2D + slice_2D.medial_axis(), axes=True, newPlotter=True)
| [
"marco.musy@gmail.com"
] | marco.musy@gmail.com |
3dc9519fbdd363764163d3eefaebd7907a2214a1 | ac0957824d2730170603b6af26e38177965208a6 | /build/beginner_tutorials/catkin_generated/pkg.develspace.context.pc.py | fc1d02a638249646cd9aab97cb2ef6c8c924b854 | [] | no_license | JelenaKiblik/jekibl-rtech | 6c9c0ee78e4a2bf539ecac9f050110e96551171f | a3b4ef8bdfaba64a1209d695db78b6b7d7074c19 | refs/heads/master | 2020-08-01T06:29:20.727647 | 2019-11-17T10:38:14 | 2019-11-17T10:38:14 | 210,897,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ubuntu/jekibl-rtech/devel/include".split(';') if "/home/ubuntu/jekibl-rtech/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "beginner_tutorials"
PROJECT_SPACE_DIR = "/home/ubuntu/jekibl-rtech/devel"
PROJECT_VERSION = "0.0.0"
| [
"jekibl@ttu.ee"
] | jekibl@ttu.ee |
0784e2992c7df25988e3d41856480772a95484ca | a59b09aa933c649718d694a90dd05350aa0b0d76 | /kwiklib/dataio/tests/test_tools.py | 9f98e1efad8cedae46bfb7c572a6dd5b8a7342dc | [
"BSD-3-Clause"
] | permissive | Davidjtitus/kwiklib | d1dce69586bda264886726c2590827635cde58b8 | 617a6ceff55957728c3dc94109b64e4c427429c2 | refs/heads/master | 2021-04-04T21:21:41.352478 | 2017-02-10T18:30:38 | 2017-02-10T18:30:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,034 | py | """Unit tests for dataio.tools module."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import tempfile
import numpy as np
from kwiklib.dataio import (normalize, find_filename, save_text,
MemMappedText, load_text, save_binary, read_dat)
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_normalize():
data = np.array([.5, .75, 1.])
# normalization
data_normalized = normalize(data)
assert np.array_equal(data_normalized, [-1, 0, 1])
# normalization with a different range
data_normalized = normalize(data, range=(0, 1))
assert np.array_equal(data_normalized, [0, 0.5, 1])
# symmetric normalization (0 stays 0)
data_normalized = normalize(data, symmetric=True)
assert np.array_equal(data_normalized, data)
def test_memmap_text():
folder = tempfile.gettempdir()
filename = os.path.join(folder, 'memmap')
x = np.random.randint(size=(MemMappedText.BUFFER_SIZE + 1000, 10),
low=0, high=100)
save_text(filename, x)
m = MemMappedText(filename, np.int32)
l = m.next()
i = 0
while l is not None:
assert np.array_equal(l, x[i, :])
i += 1
l = m.next()
def test_memmap_numpy():
folder = tempfile.gettempdir()
filename = os.path.join(folder, 'memmapb')
dtype = np.int16
freq = 20000.
duration = 10.
nchannels = 32
nsamples = int(freq * duration)
x = np.random.randint(size=(nsamples, nchannels),
low=0, high=1000).astype(dtype)
save_binary(filename, x)
m = read_dat(filename, nchannels=nchannels, dtype=dtype)
slices = (slice(1000, 10000, 4), slice(2, 30, 3))
assert m.shape == x.shape
np.testing.assert_equal(x[slices], m[slices])
| [
"cyrille@cyrille"
] | cyrille@cyrille |
ee3ba80bf26c3679f11eb08d20cba0a400f60a59 | cf47fe0bf264ed24fc2966f37c20e0e63de7d336 | /bin/ignore-lines | 94e635aa6d1cf821e2d84a649ff2071356ee0b6b | [] | no_license | amandoon/datakit | e763fdd2cd5c158071ac14a807ee1ed46df25e90 | 2c3bc2351b501502353a8cf1e33bfcf2e4438a78 | refs/heads/master | 2021-01-10T14:30:22.483857 | 2016-03-10T05:57:20 | 2016-03-10T05:57:20 | 49,613,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | #!/usr/bin/python
from __future__ import print_function
import sys
import argparse
from argparse import RawTextHelpFormatter
import re
def process(lines_to_ignore):
ignore_line_list = lines_to_ignore.split(",")
linenum = 0
for line in sys.stdin:
linenum += 1
if str(linenum) in ignore_line_list:
continue
print (line, end="")
def process_command_line_args():
global args
epilog = """
Notes:
Example
To ignore lines 1 and 10 run command
ignore_lines 1,10
"""
parser = argparse.ArgumentParser(description='This script ignores '
'lines by number',
formatter_class=RawTextHelpFormatter,
epilog=epilog)
parser.add_argument('lines_to_ignore',
help='Enter comma delimited list of lines to ignore.')
args = parser.parse_args()
if __name__ == '__main__':
process_command_line_args()
process(args.lines_to_ignore)
| [
"MBansal@W7E210081.inspinc.ad"
] | MBansal@W7E210081.inspinc.ad | |
7e25132634dff914017eb55bff8d94d0909d5b28 | 796b3e24d197689b065e076be5d18a0f631340f6 | /submissionFolder/double_dqn_bot.py | b88df084dedfec68bf5167c585af519c1f17fb03 | [] | no_license | mattthelee/Marlo-Double-DQN | 51758bef48e17181a68ea223f77b771148c0c0be | 86ad7a09f030a9c83dbd8f1b89573fe899ad74c5 | refs/heads/master | 2020-04-05T03:23:53.105178 | 2018-12-13T23:05:18 | 2018-12-13T23:05:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,233 | py | import marlo
import numpy as np
import random
from keras.models import Sequential, load_model
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import MaxPooling2D,Flatten, AveragePooling2D
from collections import deque
from keras.models import model_from_yaml
from matplotlib import pyplot as plt
from past.utils import old_div # tutorial 5
import MalmoPython
import sys
import utils
import csv
from time import sleep
import pdb
from keras.backend import manual_variable_initialization
def trainAgent(env, agent):
# Train the agent given
# Maximum steps to take before telling agent to give up
goal_steps = 100
# How many games to train over
initial_games = 10000
# Batch for back-propagation
batch_size = 16
scores = deque(maxlen=50)
results = []
# Loop over the games initialised
for i in range(initial_games):
reward = 0
game_score = 0
# Short wait required to prevent loss of connection to marlo
sleep(2)
env.reset()
state = env.last_image
# For each step take an action and perform exprience replay
for j in range(goal_steps):
print("Starting goal step: ", j + 1, " of game: ", i + 1, " avg score: ", np.mean(scores))
# Choose action
action = agent.act(state)
# Run action and get response from env
new_state, reward, done, info = env.step(action)
# Useful debug line: print(f"Taking action {action}, got reward: {reward}")
# Adds this state, action, new state to memory
agent.memory.append((state,action, reward, new_state, done))
# Record gamescore for analysis
game_score += reward
# If game is done we break from loop and store score
if done:
# Score is the scores for finished games
print("Game: ",i ," complete, score: " , game_score," last 50 scores avg: ", np.mean(scores), " epsilon ", agent.epsilon)
scores.append(game_score)
break
state = new_state
oldInfo = info
# If we don't have enough memory for a batch, don't run experience replay
if len(agent.memory) > batch_size:
# Find a random batch from the memory
randomBatch = random.sample(agent.memory, batch_size)
# Perform experience replay
agent.replay(randomBatch)
# Record the stats about this game, for analysis and save to csv
results.append([game_score,j,oldInfo['observation']['TotalTime'], agent.epsilon])
with open(agent.CSVName,"w") as f:
wr = csv.writer(f)
wr.writerows(results)
# Decay the epsilon until the minimum
if agent.epsilon > agent.epsilon_min:
agent.epsilon *= agent.epsilon_decay
else:
agent.epsilon = 0
# Save the model
agent.saveModelToFile(agent.model,'model')
# every 10 games update the secondary model, starting from the 3rd
# This way the secondary model will always be at least 10 games behind the primary model
if i == 2:
agent.saveModelToFile(agent.model,'secondary')
agent.secondaryDQN = agent.loadModelFromFile('secondary')
if i % 10 == 3:
agent.secondaryDQN = agent.loadModelFromFile('secondary')
agent.saveModelToFile(agent.model,'secondary')
return scores
class agent:
def __init__(self, observation_shape, action_size, load_model_file = False, epsilon = 1.0):
# Initialise parameters for the agent
self.observation_shape = observation_shape
self.action_size = action_size
self.block_list = ['air','cobblestone','stone','gold_block']
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon_min = 0.01
self.epsilon = epsilon
self.epsilon_decay = 0.99
self.CSVName = 'dqn_bot_results.csv'
if load_model_file:
self.model = self.loadModelFromFile('model')
self.secondaryDQN = self.loadModelFromFile('secondary')
else:
# Start from scratch
self.model = self.create_model()
self.secondaryDQN = self.create_model()
def create_model(self):
# Create DQN using keras Sequential api
model = Sequential()
# This average pooling layer is quite extreme because of memory limits on machine
model.add(AveragePooling2D(pool_size=(8, 8), input_shape=(self.observation_shape)))
model.add(Conv2D(32, 8, 4))
model.add(Conv2D(16, 4, 2))
model.add(MaxPooling2D(pool_size=(4,4)))
# Flatten needed to get a single vector as output otherwise get a matrix
model.add(Flatten())
model.add(Dense(64,activation='relu'))
model.add(Dense(64,activation='relu'))
model.add(Dense(self.action_size,activation='linear'))
# Other optimisers are available, such as adam
model.compile(loss='mse', optimizer='rmsprop')
return model
def loadModelFromFile(self,file):
# Loads a previous model
# Load strucutre and weights separately to prevent tensorflow intialising and deleting weights
yaml_file = open(file + '.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
model = model_from_yaml(loaded_model_yaml)
model.load_weights(file + '_weights.h5')
model.compile(loss='mse', optimizer='rmsprop')
return model
def saveModelToFile(self,model,file):
# Saves model structure and weights to file
model_yaml = model.to_yaml()
with open(file + ".yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
model.save_weights(file+'_weights.h5')
return
def act(self, state):
# Return the epsilon-greedy action for this state
if np.random.rand() <= self.epsilon:
print("Random Action")
return random.randrange(self.action_size)
# Reshape required because of a quirk in the Keras API
act_values = self.model.predict(state.reshape([-1, 600, 800, 3]))
return np.argmax(act_values[0])
def replay(self, batch):
# Perform experience replay using the mbatch of memories supplied
x_train = []
y_train = []
for state, action, reward, newState, done in batch:
if done or len(self.memory) < 300:
# If finished or network has not had time to learn reasonable values
# Set target_q to be reward
target_q = reward
else:
# Use Bellman equation to calculate the q we should haves
# N.b. This is where the double DQN differs by using the secondaryDQN not the primary
target_q = reward + self.gamma * np.amax(self.secondaryDQN.predict(newState.reshape([-1, 600, 800, 3])))
# prediction is prediction_q
# prediction has the 5 actions and predicted q-values
prediction = self.model.predict(state.reshape([-1, 600, 800, 3]))
# Useful debug line: print(f"action: {action}, reward:{reward}, qval:{target_q}, predq:{prediction[0][action]}")
# update the action that we did take with a better target, from above. Keep others the same to not influence the network
prediction[0][action] = target_q
# Create the training data for X and Y that we use to fit the DQN on
x_train.append(state)
y_train.append(prediction[0])
# Use the training data to fit the model, via the batch
self.model.fit(np.asarray(x_train),np.asarray(y_train),epochs=1,verbose=0)
return
def main():
# If arguments are supplied when running the agent, pass them to the setup env function, else use defaults
if len(sys.argv) > 1:
env = utils.setupEnv(sys.argv[1])
elif len(sys.argv) > 2:
env = utils.setupEnv(sys.argv[1], port=sys.argv[2])
else:
env = utils.setupEnv()
# Get the number of available states and actions - generates the output of CNN
observation_shape = env.observation_space.shape
action_size = env.action_space.n
# Initialise agent and then run it.
myagent = agent(observation_shape, action_size, False,1.0)
scores = trainAgent(env, myagent)
'''
#Can start from a pre-built model
load = input("Load model? y/n or an epsilon value to continue: ")
if load == 'y':
myagent = agent(observation_shape, action_size, block_map_shape,True,0.1)
#pdb.set_trace()
scores = testAgent(env,myagent)
elif load == 'n':
myagent = agent(observation_shape, action_size,block_map_shape)
#pdb.set_trace()
scores = trainAgent(env, myagent)
else:
#TODO - how come the 'epsilon value' runs still load a model??
myagent = agent(observation_shape, action_size, block_map_shape,True,float(load))
scores = trainAgent(env,myagent)
'''
np.savetxt('dqn_botscores',np.array(scores))
#plt.plot(scores)
#plt.show()
return
if __name__ == "__main__":
main()
def blockEncoder(floorList):
# ***This function no longer used as was planned for intepreting map data for DQN ***
# We need to convert the block names from strings to vectors as they are categorical data
# takes in a i-length list of the blocks with j different block types and returns an i*j length list indicating the encoded version.
blockList = self.blockList
# TODO need to simplfy the classes to classify these under a type of: air, goal, solid, danger (lava)
blockDict = {}
for i,block in enumerate(blockList):
blockDict[block] = np.zeros(len(blockList))
blockDict[block][i] = 1
vectorisedList = []
for i in floorList:
# Adds content of list to other list. N.B. we might want to use append here depending on how we handle the data
vectorisedList.extend(blockDict[i])
return vectorisedList
| [
"matt_lee92@hotmail.co.uk"
] | matt_lee92@hotmail.co.uk |
c4313700b9cde248df3a272e2d6ebff106e5bc18 | e5a535b9ee7d954db80115a33266580a49c89527 | /vt_domain.py | 86f26d8c2f205eb9074702d80e1ff7074d1a05eb | [] | no_license | cmlh/MaltegoVTPublic | 5298200c328cb04c14faa4ec57361569ba2d8a0b | 4e7583e8b67eb219223949eda8e108dd0f504857 | refs/heads/master | 2021-01-17T21:51:15.448404 | 2015-03-10T22:11:22 | 2015-03-10T22:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,784 | py | #############################################
# VirusTotal Public API v2.0 domain lookup.
#
# Author: @michael_yip
# Email: jiachongzhi@gmail.com
# Date: 08/03/2015
#############################################
import json
import urllib
import datetime
from vt_miscellaneous import API_KEY, load_cache, dump_cache
domain_query_url = 'https://www.virustotal.com/vtapi/v2/domain/report'
def domain_lookup(domain):
''' Lookup domain information VirusTotal. '''
# Query
response_dict = ""
try:
# Check cache
cache = load_cache(domain)
if cache:
return cache
# Query VT
domain_parameters = {'domain': domain, 'apikey': API_KEY}
response = urllib.urlopen('%s?%s' % (domain_query_url, urllib.urlencode(domain_parameters))).read()
response_dict = json.loads(response)
# Cache results
dump_cache(domain, response_dict)
except Exception as e:
exit(e)
return response_dict
def whois(domain):
''' WHOIS Lookup.
NOTE: this returns the original JSON reponse from VT to save query.
'''
# Get VT response
vt_response = domain_lookup(domain)
# WHOIS
whois_string = vt_response['whois']
whois_lines = whois_string.split("\n")
whois_dict = {}
for line in whois_lines:
if line.find(":") > -1:
line_s = line.split(":")
k = line_s[0].strip()
v = line_s[1].strip()
if k in whois_dict.keys():
values = whois_dict[k]
values.append(v)
whois_dict[k] = values
else:
whois_dict[k] = [v]
return whois_dict, vt_response
def get_registrant_email(domain):
''' Get WHOIS registrant email. '''
# Get VT response
whois_dict, vt_response = whois(domain)
registrant_email = ""
for k,v in whois_dict.items():
k = k.lower().strip()
if k.find("registrant") > -1 and k.find("email") > -1:
registrant_email = v[0]
break
whois_timestamp = vt_response['whois_timestamp']
if len(registrant_email) == 0:
return ""
return registrant_email, __get_timestamp(whois_timestamp)
def get_name_servers(domain):
''' Get name servers. '''
# Get VT response
whois_dict, vt_response = whois(domain)
name_servers = []
for k,v in whois_dict.items():
k = k.lower().strip()
if k.find("name server") > -1:
name_servers = v
break
whois_timestamp = vt_response['whois_timestamp']
if len(name_servers) == 0:
return []
return name_servers, __get_timestamp(whois_timestamp)
def get_registrar(domain):
''' Get WHOIS registrant email. '''
# Get VT response
whois_dict, vt_response = whois(domain)
registrar = ""
for k,v in whois_dict.items():
k = k.lower().strip()
if k == 'registrar':
registrar = v[0].upper()
break
whois_timestamp = vt_response['whois_timestamp']
if len(registrar) == 0:
return ""
return registrar, __get_timestamp(whois_timestamp)
def get_subdomains(domain):
''' Get subdomains. '''
# Get VT response
vt_response = domain_lookup(domain)
# WHOIS
return vt_response['subdomains']
def get_ip_resolutions(domain):
''' Get passive DNS data. '''
# Get VT response
vt_response = domain_lookup(domain)
resolutions = vt_response['resolutions']
resolution_pairs = []
for resolution in resolutions:
resolution_pairs.append( (resolution['ip_address'], resolution['last_resolved']) )
return resolution_pairs
def get_detected_urls_domain(domain):
''' Get detected urls. '''
# Get VT response
vt_response = domain_lookup(domain)
detected_url_list = []
try:
detected_urls = vt_response['detected_urls']
for detected_url in detected_urls:
detected_url_list.append( (detected_url['url'], detected_url['scan_date'], detected_url['positives']) )
except Exception as e:
pass
return detected_url_list
def __get_timestamp(seconds):
''' Convert seconds into timestamp. '''
s = seconds
return datetime.datetime.fromtimestamp(s).strftime('%Y-%m-%d %H:%M:%S')
| [
"jiachongzhi@gmail.com"
] | jiachongzhi@gmail.com |
6111ce77e28347be2330629259aea70e28d41558 | c801e211b905cba7499146b26b6c37d0f746a61c | /perm/perm.py | 61f7c3457d5dbb19dc716bdd27b4b3a131fb8a4c | [] | no_license | danebou/decomp-permuter | f2d7f04e7c8ca2353f72246fabd87eaf5eab8a49 | d45c932a148743a0ff2e21d9893729558476a77a | refs/heads/master | 2020-04-25T13:28:34.148072 | 2019-03-02T01:29:37 | 2019-03-02T01:44:33 | 172,810,481 | 0 | 0 | null | 2019-02-27T00:04:56 | 2019-02-27T00:04:56 | null | UTF-8 | Python | false | false | 1,171 | py | import math
class Perm():
def __init__(self):
self.perm_count = 1
self.next_perm = None
def evaluate(self, seed):
if self.perm_count == 1:
my_eval = self._evaluate_self(None)
next_eval = self.next_perm.evaluate(seed) if self.next_perm else ''
else:
my_eval = self._evaluate_self(seed[0])
next_eval = self.next_perm.evaluate(seed[1:]) if self.next_perm else ''
return my_eval + next_eval
def _evaluate_self(self, seed):
return ''
def get_counts(self):
self_count = [self.perm_count] if self.perm_count > 1 else []
next_count = [self.next_perm.perm_count] if self.next_perm else []
return self_count + next_count
class TextPerm(Perm):
def __init__(self, text):
super().__init__()
self.text = text
def _evaluate_self(self, seed):
return self.text
class GeneralPerm(Perm):
def __init__(self, candiates):
super().__init__()
self.perm_count = len(candiates)
self.candiates = candiates
def _evaluate_self(self, seed):
return self.candiates[seed]
| [
"danebouchie@gmail.com"
] | danebouchie@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.