text stringlengths 0 1.05M | meta dict |
|---|---|
# 212. Word Search II
#
# Given a 2D board and a list of words from the dictionary, find all words in the board.
#
# Each word must be constructed from letters of sequentially adjacent cell,
# where "adjacent" cells are those horizontally or vertically neighboring.
# The same letter cell may not be used more than once in a word.
#
# For example,
# Given words = ["oath","pea","eat","rain"] and board =
#
# [
# ['o','a','a','n'],
# ['e','t','a','e'],
# ['i','h','k','r'],
# ['i','f','l','v']
# ]
# Return ["eat","oath"].
#
# Note:
# You may assume that all inputs are consist of lowercase letters a-z.
#
# click to show hint.
#
# You would need to optimize your backtracking to pass the larger test. Could you stop backtracking earlier?
#
# If the current candidate does not exist in all words' prefix, you could stop backtracking immediately.
# What kind of data structure could answer such query efficiently?
# Does a hash table work? Why or why not? How about a Trie?
# If you would like to learn how to implement a basic trie, please work on this problem:
# Implement Trie (Prefix Tree) first.
#
class TrieNode:
# Initialize your data structure here.
def __init__(self):
self.childs = dict()
self.isWord = False
class Trie:
def __init__(self):
self.root = TrieNode()
# @param {string} word
# @return {void}
# Inserts a word into the trie.
def insert(self, word):
node = self.root
for letter in word:
child = node.childs.get(letter)
if child is None:
child = TrieNode()
node.childs[letter] = child
node = child
node.isWord = True
def delete(self, word):
node = self.root
queue = []
for letter in word:
queue.append((letter, node))
child = node.childs.get(letter)
if child is None:
return False
node = child
if not node.isWord:
return False
if len(node.childs):
node.isWord = False
else:
for letter, node in reversed(queue):
del node.childs[letter]
if len(node.childs) or node.isWord:
break
return True
class Solution(object):
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
w, h = len(board[0]), len(board)
trie = Trie()
for word in words:
trie.insert(word)
visited = [[False] * w for x in range(h)]
dz = zip([1, 0, -1, 0], [0, 1, 0, -1])
ans = []
def dfs(word, node, x, y):
node = node.childs.get(board[x][y])
if node is None:
return
visited[x][y] = True
for z in dz:
nx, ny = x + z[0], y + z[1]
if nx >= 0 and nx < h and ny >= 0 and ny < w and not visited[nx][ny]:
dfs(word + board[nx][ny], node, nx, ny)
if node.isWord:
ans.append(word)
trie.delete(word)
visited[x][y] = False
for x in range(h):
for y in range(w):
dfs(board[x][y], trie.root, x, y)
return sorted(ans)
if __name__ == '__main__':
words = ["oath", "pea", "eat", "rain"]
board = [
['o', 'a', 'a', 'n'],
['e', 't', 'a', 'e'],
['i', 'h', 'k', 'r'],
['i', 'f', 'l', 'v']
]
print Solution().findWords(board, words)
| {
"repo_name": "gengwg/leetcode",
"path": "212_word_search_ii.py",
"copies": "1",
"size": "3601",
"license": "apache-2.0",
"hash": -9122605751815904000,
"line_mean": 27.5793650794,
"line_max": 108,
"alpha_frac": 0.5220772008,
"autogenerated": false,
"ratio": 3.533856722276742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9553838766785492,
"avg_score": 0.0004190312582501489,
"num_lines": 126
} |
# 2/13/14
# Charles O. Goddard
import pylab
import numpy
from matplotlib import cm
from matplotlib import pyplot
from matplotlib.colors import rgb2hex
from matplotlib.patches import Polygon
from matplotlib.collections import LineCollection
from mpl_toolkits.basemap import Basemap as Basemap
import apidata
m = Basemap(llcrnrlon=-119, llcrnrlat=22, urcrnrlon=-64, urcrnrlat=49,
projection='lcc',lat_1=33,lat_2=45,lon_0=-95, resolution=None)
# m.drawlsmask(land_color='coral',ocean_color='aqua',lakes=True)
shp_info = m.readshapefile('st99_d00', 'states', drawbounds=True)
state_populations = {'california': 38332521, 'texas': 26448193,
'new york': 19651127, 'florida': 19552860, 'illinois': 12882135,
'pennsylvania': 12773801, 'ohio': 11570808, 'georgia': 9992167,
'michigan': 9895622, 'north carolina': 9848060, 'new jersey': 8899339,
'virginia': 8260405, 'washington': 6971406, 'massachusetts': 6692824,
'arizona': 6626624, 'indiana': 6570902, 'tennessee': 6495978,
'missouri': 6044171, 'maryland': 5928814, 'wisconsin': 5742713,
'minnesota': 5420380, 'colorado': 5268367, 'alabama': 4833722,
'south carolina': 4774839, 'louisiana': 4625470, 'kentucky': 4395295,
'oregon': 3930065, 'oklahoma': 3850568, 'puerto rico': 3615086,
'connecticut': 3596080, 'iowa': 3090416, 'mississippi': 2991207,
'arkansas': 2959373, 'utah': 2900872, 'kansas': 2893957,
'nevada': 2790136, 'new mexico': 2085287, 'nebraska': 1868516,
'west virginia': 1854304, 'idaho': 1612136, 'hawaii': 1404054,
'maine': 1328302, 'new hampshire': 1323459, 'rhode island': 1051511,
'montana': 1015165, 'delaware': 925749, 'south dakota': 844877,
'alaska': 735132, 'north dakota': 723393, 'district of columbia': 646449,
'vermont': 626630, 'wyoming': 582658}
with open('../data/CY2013Registrants.csv', 'r') as fd:
registrant_list = list(apidata.read_csv(fd, 'Registrant'))
registrants = dict((r.registration_token, r) for r in registrant_list)
print('%d registrants known' % (len(registrants),))
unknown = set()
unknown_state = set()
u_pulls = 0
total_pulls = 0
state_pulls = dict((sd['NAME'].lower(), 0) for sd in m.states_info)
with open('../data/CY2013CodePulls.csv', 'r') as fd:
pulls = apidata.read_csv(fd, 'CodePull')
for pull in pulls:
token = pull.registration_token
if not token in registrants:
unknown.add(token)
else:
state = registrants[token].state.lower()
if not state in state_pulls:
unknown_state.add(state)
u_pulls += 1
else:
state_pulls[state] += 1
total_pulls += 1
for state in state_pulls:
state_pulls[state] /= float(state_populations[state])
minp, maxp = min(state_pulls.values()), max(state_pulls.values())
print('%d total pulls' % (total_pulls, ))
print('%d unknown registrants' % (len(unknown),))
print('%d unknown state names (%d pulls)' % (len(unknown_state), u_pulls))
print('(min, max) = (%d, %d)' % (minp, maxp))
cmap = pyplot.get_cmap('Greens')
ax = pyplot.gca()
for i, sd in enumerate(m.states_info):
state = sd['NAME'].lower()
pulls = state_pulls[state]
color = rgb2hex(cmap((pulls - minp) / (1.0 * maxp - minp))[:3])
poly = Polygon(m.states[i], facecolor=color)
ax.add_patch(poly)
m.drawparallels(numpy.arange(25,65,20),labels=[1,0,0,0])
m.drawmeridians(numpy.arange(-120,-40,20),labels=[0,0,0,1])
cax = cm.ScalarMappable(cmap=cmap)
cax.set_array(state_pulls.values())
pyplot.colorbar(cax)
pyplot.title('Code Pulls per Capita for CY2013')
pyplot.show()
| {
"repo_name": "thomasnat1/DataScience2014CDC",
"path": "pulls_per_state_percapita.py",
"copies": "1",
"size": "3409",
"license": "mit",
"hash": 6159638940638318000,
"line_mean": 35.6559139785,
"line_max": 74,
"alpha_frac": 0.7066588442,
"autogenerated": false,
"ratio": 2.4667149059334297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36733737501334296,
"avg_score": null,
"num_lines": null
} |
# 215. Kth Largest Element in an Array
#
# Find the kth largest element in an unsorted array.
# Note that it is the kth largest element in the sorted order, not the kth distinct element.
#
# For example,
# Given [3,2,1,5,6,4] and k = 2, return 5.
#
# Note:
# You may assume k is always valid, 1 =< k <= array's length.
#
# http://bookshadow.com/weblog/2015/05/23/leetcode-kth-largest-element-array/
import random
import heapq
from random import randint
class Solution(object):
# nlog(n)
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
# return sorted(nums)[::-1][k - 1]
return sorted(nums, reverse=True)[k - 1]
# heapq
def findKthLargest(self, nums, k):
return heapq.nlargest(k, nums)[-1]
# binary search
def findKthLargest(self, nums, k):
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = self.PartitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else:
left = new_pivot_idx + 1
def PartitionAroundPivot(self, left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
# quick sort. O(n)
# https://gengwg.blogspot.com/2017/09/quickselect.html
def findKthLargest(self, nums, k):
if not nums:
return 0
# randomly chose a pivot within nums
pivot = random.choice(nums)
# pivot = nums[0]
# split into a pile nums1 of larger elements and nums2 of smaller elements
nums1, nums2 = [], []
for num in nums:
if num > pivot:
nums1.append(num)
elif num < pivot:
nums2.append(num)
# else num == pivot
if k <= len(nums1):
# it is in the pile of larger elements
return self.findKthLargest(nums1, k)
# else: not work because there may be piles that is equal to pivot
elif k > len(nums) - len(nums2):
# it is in the pile of smaller elements
return self.findKthLargest(nums2, k - (len(nums) - len(nums2)))
else: # it is in the pile of equal to pivot elements
return pivot
if __name__ == '__main__':
print Solution().findKthLargest([3, 2, 1, 5, 6, 4], 2)
| {
"repo_name": "gengwg/leetcode",
"path": "215_kth_largest_element_in_an_array.py",
"copies": "1",
"size": "2908",
"license": "apache-2.0",
"hash": 8564448273041790000,
"line_mean": 30.6086956522,
"line_max": 92,
"alpha_frac": 0.5649931224,
"autogenerated": false,
"ratio": 3.3932322053675614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4458225327767561,
"avg_score": null,
"num_lines": null
} |
# 216. Combination Sum III
#
# Find all possible combinations of k numbers that add up to a number n,
# given that only numbers from 1 to 9 can be used and each combination should be a unique set of numbers.
#
#
# Example 1:
#
# Input: k = 3, n = 7
#
# Output:
#
# [[1,2,4]]
#
# Example 2:
#
# Input: k = 3, n = 9
#
# Output:
#
# [[1,2,6], [1,3,5], [2,3,4]]
#
# see prob 40. Just add extra constraint len==k.
class Solution(object):
def dfs(self, candidates, target, start, k, valuelist):
length = len(candidates)
if target == 0 and valuelist not in Solution.ret and len(valuelist) == k:
Solution.ret.append(valuelist)
for i in range(start, length):
if target < candidates[i]:
return
self.dfs(candidates, target - candidates[i], i + 1, k, valuelist + [candidates[i]])
def combinationSum3(self, k, n):
Solution.ret = []
# candidates is now [1, 2, ..., 9]
self.dfs(range(1, 10), n, 0, k, [])
return Solution.ret
if __name__ == '__main__':
print Solution().combinationSum3(3, 9)
| {
"repo_name": "gengwg/leetcode",
"path": "216_combination_sum_iii.py",
"copies": "1",
"size": "1101",
"license": "apache-2.0",
"hash": -9199431798715370000,
"line_mean": 24.0227272727,
"line_max": 105,
"alpha_frac": 0.5821980018,
"autogenerated": false,
"ratio": 3.1820809248554913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42642789266554915,
"avg_score": null,
"num_lines": null
} |
# 217. Contains Duplicate
#
# Given an array of integers, find if the array contains any duplicates.
# Your function should return true if any value appears at least twice in the array,
# and it should return false if every element is distinct.
class Solution(object):
# use hash map to mark if num already appeared
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
dict = {}
for num in nums:
if num in dict:
return True
dict[num] = 1
return False
# same as above, using set().
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
s = set()
for n in nums:
if n in s:
return True
s.add(n)
return False
def containsDuplicate(self, nums):
return len(nums) != len(set(nums))
# sort, then compare each element with next
def containsDuplicate(self, nums):
nums.sort()
for i in range(len(nums) - 1):
if nums[i] == nums[i + 1]:
return True
return False
if __name__ == '__main__':
print Solution().containsDuplicate([2, 3, 2, 1, 2])
| {
"repo_name": "gengwg/leetcode",
"path": "217_contains_duplicate.py",
"copies": "1",
"size": "1267",
"license": "apache-2.0",
"hash": -2830687561886935600,
"line_mean": 25.9574468085,
"line_max": 84,
"alpha_frac": 0.543804262,
"autogenerated": false,
"ratio": 4.195364238410596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000795866628156991,
"num_lines": 47
} |
# 2/18/17 (better)
class Solution(object):
def findMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
res, tot, first_idx = 0, 0, {0: -1}
for i in range(len(nums)):
tot += 1 if nums[i] else -1
if tot in first_idx:
res = max(res, i - first_idx[tot])
else:
first_idx[tot] = i
return res
# 4/13/19
class Solution:
def findMaxLength(self, nums: List[int]) -> int:
prev = 0
mapping = {0: -1} # diff:idx
res = 0
for i, num in enumerate(nums):
prev += 2 * num - 1
if prev in mapping:
res = max(res, i - mapping[prev])
else:
mapping[prev] = i
return res
## TLE
class Solution:
def findMaxLength(self, nums: List[int]) -> int:
zos = [[0, 0]]
for num in nums:
zos.append(zos[-1][:])
zos[-1][num] += 1
for l in range(int(len(nums)/2), -1, -1):
ln = 2*l
for i in range(0, len(zos) - ln):
if zos[i+ln][0] - zos[i][0] == zos[i+ln][1] - zos[i][1]:
return ln
return 0 | {
"repo_name": "cc13ny/algo",
"path": "leetcode/525-Contiguous-Array/one_pass.py",
"copies": "4",
"size": "1231",
"license": "mit",
"hash": 1671432206566151200,
"line_mean": 25.7826086957,
"line_max": 72,
"alpha_frac": 0.4354183591,
"autogenerated": false,
"ratio": 3.3091397849462365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007616459627329193,
"num_lines": 46
} |
# 218. The Skyline Problem
# A city's skyline is the outer contour of the silhouette formed by all the buildings
# in that city when viewed from a distance.
# Now suppose you are given the locations and height of all the buildings
# as shown on a cityscape photo (Figure A),
# write a program to output the skyline formed by these buildings collectively (Figure B).
#
# Buildings Skyline Contour
#
# The geometric information of each building is represented by a triplet of integers [Li, Ri, Hi],
# where Li and Ri are the x coordinates of the left and right edge of the ith building,
# respectively, and Hi is its height.
# It is guaranteed that 0 ≤ Li, Ri ≤ INT_MAX, 0 < Hi ≤ INT_MAX, and Ri - Li > 0.
# You may assume all buildings are perfect rectangles grounded on an absolutely flat surface
# at height 0.
#
# For instance, the dimensions of all buildings in Figure A are recorded as:
# [ [2 9 10], [3 7 15], [5 12 12], [15 20 10], [19 24 8] ] .
#
# The output is a list of "key points" (red dots in Figure B) in the format of
# [ [x1,y1], [x2, y2], [x3, y3], ... ] that uniquely defines a skyline.
# A key point is the left endpoint of a horizontal line segment.
# Note that the last key point, where the rightmost building ends,
# is merely used to mark the termination of the skyline, a
# nd always has zero height.
# Also, the ground in between any two adjacent buildings
# should be considered part of the skyline contour.
#
# For instance, the skyline in Figure B should be represented as:
# [ [2 10], [3 15], [7 12], [12 0], [15 10], [20 8], [24, 0] ].
#
# Notes:
#
# The number of buildings in any input list is guaranteed to be in the range [0, 10000].
# The input list is already sorted in ascending order by the left x position Li.
# The output list must be sorted by the x position.
# There must be no consecutive horizontal lines of equal height in the output skyline.
# For instance, [...[2 3], [4 5], [7 5], [11 5], [12 7]...] is not acceptable;
# the three lines of height 5 should be merged into one in the final output as such:
# [...[2 3], [4 5], [12 7], ...]
import heapq
class Solution(object):
def getSkyline(self, buildings):
"""
:type buildings: List[List[int]]
:rtype: List[List[int]]
https://leetcode.com/problems/the-skyline-problem/discuss/61261/10-line-Python-solution-104-ms
Use an infinite vertical line x to scan from left to right.
If max height changes, record [x, height] in res.
Online judge is using Python 2.7.9 and there's no max heap's push and pop method,
so we can use a min heap hp storing -H as "max heap".
set comprehension is faster and shorter than list(set((R, 0, None) for L, R, H in buildings)).
"""
events = sorted([(L, -H, R) for L, R, H in buildings] +
list({(R, 0, None) for _, R, __ in buildings}))
res = [[0, 0]]
hp = [(0, float("inf"))]
for x, negH, R in events:
while x >= hp[0][1]:
heapq.heappop(hp)
if negH:
heapq.heappush(hp, (negH, R))
if res[-1][1] + hp[0][0]:
# res += [x, -hp[0][0]],
res.append([x, -hp[0][0]])
return res[1:]
if __name__ == '__main__':
print Solution().getSkyline([[2, 9, 10], [3, 7, 15], [5, 12, 12], [15, 20, 10], [19, 24, 8]])
| {
"repo_name": "gengwg/leetcode",
"path": "218_skyline_problem.py",
"copies": "1",
"size": "3401",
"license": "apache-2.0",
"hash": -726914847247832600,
"line_mean": 43.0909090909,
"line_max": 102,
"alpha_frac": 0.627982327,
"autogenerated": false,
"ratio": 3.2738669238187077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4401849250818708,
"avg_score": null,
"num_lines": null
} |
# 219. Contains Duplicate II
#
# Given an array of integers and an integer k,
# find out whether there are two distinct indices i and j in the array
# such that nums[i] = nums[j] and the absolute difference between i and j is at most k.
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
dict = {}
for i in range(len(nums)):
if nums[i] not in dict:
dict[nums[i]] = i
else:
if i - dict[nums[i]] <= k:
return True
else: # distance larger than k. replace with current position
dict[nums[i]] = i
return False
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
dict = {}
for idx, num in enumerate(nums):
if num not in dict:
dict[num] = idx
else: # num in dict
# idx diff smaller than k
if idx - dict[num] <= k:
return True
else: # idx diff larger than k
# replace with current position, so that later diff may be smaller than k
dict[nums[i]] = i
return False
# shorter version. test in map first.
def containsNearbyDuplicate(self, nums, k):
num_map = {}
for i in range(len(nums)):
if nums[i] in num_map and i - num_map[nums[i]] <= k:
return True
num_map[nums[i]] = i
return False
| {
"repo_name": "gengwg/leetcode",
"path": "219_contains_duplicate_ii.py",
"copies": "1",
"size": "1674",
"license": "apache-2.0",
"hash": 3758234018225934000,
"line_mean": 30.5849056604,
"line_max": 93,
"alpha_frac": 0.4970131422,
"autogenerated": false,
"ratio": 4.164179104477612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5161192246677612,
"avg_score": null,
"num_lines": null
} |
# 21. Merge Two Sorted Lists - LeetCode
# https://leetcode.com/problems/merge-two-sorted-lists/description/
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
a = ListNode(0)
p = a
for i in [1,3,4,5,7,10]:
p.next = ListNode(i)
p = p.next
b = ListNode(-1)
p = b
for i in [2,8,15]:
p.next = ListNode(i)
p = p.next
# Need to implement a linked list first
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
if l1 is None:
return l2
if l2 is None:
return l1
if l1.val >= l2.val:
i = ListNode(l2.val)
i.next = self.mergeTwoLists( l1,l2.next )
return i
if l1.val < l2.val:
i = ListNode(l1.val)
i.next = self.mergeTwoLists( l1.next,l2 )
return i
s = Solution()
def traversal(node):
while True :
print node.val,
node = node.next
if node is None:
print "<"
break
traversal(a)
traversal(b)
traversal(s.mergeTwoLists(a,b)) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/021_merge-two-sorted-lists.py",
"copies": "1",
"size": "1230",
"license": "mit",
"hash": -7099723993260826000,
"line_mean": 20.9821428571,
"line_max": 67,
"alpha_frac": 0.5300813008,
"autogenerated": false,
"ratio": 3.106060606060606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4136141906860606,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Libao Jin'
__date__ = 'December 15, 2015'
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
list_1 = self.to_list(l1)
list_2 = self.to_list(l2)
lists = list_1 + list_2
lists.sort()
n_lists = len(lists)
if n_lists == 0:
return None
elif n_lists == 1:
return ListNode(lists[0])
else:
head = ListNode(lists[0])
for i in range(n_lists - 1):
if i == 0:
node = head
node.next = ListNode(lists[i+1])
node = node.next
else:
node.next = ListNode(lists[i+1])
node = node.next
return head
def to_list(self, sorted_list):
node_list = []
if sorted_list is None:
return node_list
while sorted_list.next is not None:
node_list.append(sorted_list.val)
sorted_list = sorted_list.next
node_list.append(sorted_list.val)
return node_list
def show_list_nodes(self, head):
print("New List Node:")
while head.next is not None:
print(head.val)
head = head.next
print(head.val)
if __name__ == '__main__':
s = Solution()
a = ListNode(-1)
b = ListNode(1)
c = ListNode(2)
d = ListNode(2)
e = ListNode(2)
f = ListNode(3)
g = ListNode(4)
h = ListNode(5)
a.next = b
b.next = c
c.next = d
d.next = e
e.next = f
f.next = g
g.next = h
# print(s.deleteDuplicates(i))
s.show_list_nodes(a)
s.show_list_nodes(b)
t = s.mergeTwoLists(a, b)
s.show_list_nodes(t)
| {
"repo_name": "imthomasking/LeetCode-Solutions",
"path": "solutions/021_MergeTwoSortedLists.py",
"copies": "2",
"size": "1997",
"license": "mit",
"hash": -7671783036222810000,
"line_mean": 22.7738095238,
"line_max": 52,
"alpha_frac": 0.4917376064,
"autogenerated": false,
"ratio": 3.3847457627118644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9841761146889643,
"avg_score": 0.006944444444444443,
"num_lines": 84
} |
# 21. Merge Two Sorted Lists
#
# Merge two sorted linked lists and return it as a new list.
# The new list should be made by splicing together the nodes of the first two lists.
#
# https://github.com/gengwg
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self:
return "{} -> {}".format(self.val, self.next)
class Solution:
def mergeTwoLists(self, l1, l2):
curr = dummy = ListNode(0)
while l1 and l2:
# connect curr to the smaller value node
# and advance smaller node
if l1.val < l2.val:
curr.next = l1
# another way is to create a new node using smaller listnode value
# this increases space complexity from O(1) to O(n)
# curr.next = ListNode(l1.val)
l1 = l1.next
else:
curr.next = l2
# curr.next = ListNode(l2.val)
l2 = l2.next
# advance curr to the above smaller node
curr = curr.next
# connect curr to the rest of non-empty longer list
curr.next = l1 or l2
return dummy.next
if __name__ == "__main__":
l1 = ListNode(1)
l1.next = ListNode(5)
l2 = ListNode(2)
l2.next = ListNode(3)
l2.next.next = ListNode(4)
print l1
print l2
print Solution().mergeTwoLists(l1, l2)
| {
"repo_name": "gengwg/leetcode",
"path": "021_merge_two_sorted_lists.py",
"copies": "1",
"size": "1478",
"license": "apache-2.0",
"hash": 7258764197087888000,
"line_mean": 27.4230769231,
"line_max": 84,
"alpha_frac": 0.550744249,
"autogenerated": false,
"ratio": 3.622549019607843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4673293268607843,
"avg_score": null,
"num_lines": null
} |
''' 21-plot_Sky-coverage.py
===============================================
AIM: Plots the Stray Light coverage difference between two 21-plot-Sky-coverage.py in terms of period of observation and accumulated observation time.
INPUT: files: - <orbit_id>_misc/ : files from 21-plot-Sky-coverage.py
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_<SL_angle>figures/ : <orbit_id>_<threshold_obs_time>_<max_mag><_SAA?>_SL_coverage-other-<ID1-ID2>.png/pdf/eps
CMD: 21b-plot_Sky-coverage.py
ISSUES: <NONE KNOWN>
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
REMARKS: <NONE>
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import os
import matplotlib.cm as cm
import time
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.constants as const
import resources.figures as figures
from resources.targets import *
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
alt_other = 650
alt_ref = 700
orbit_id_other='6am_%d_5_conf4e' % alt_other
orbit_id_ref='6am_%d_5_conf4e' % alt_ref
# Give fname without the extension !
sky_coverage_map_fname_ref = '6am_%d_5_conf4e-sky_map-49-mag9_SAA_accumulated' % alt_ref
sky_coverage_map_fname_other = '6am_%d_5_conf4e-sky_map-49-mag9_SAA_accumulated' % alt_other
sky_coverage_map_fname_ref = '6am_%d_5_conf4e-sky_map-79-mag12_SAA_accumulated' % alt_ref
sky_coverage_map_fname_other = '6am_%d_5_conf4e-sky_map-78-mag12_SAA_accumulated' % alt_other
# Nice plots?
fancy=True
# Save plots?
save = True
# Show figures ?
show = True
# min of scale
min_val=-14
# max of scale
max_val=14
#
step_scale=2
###########################################################################
# Formatted folders definitions
_, folder_figures_ref, folder_misc_ref = init_folders(orbit_id_ref)
_, _, folder_misc_other = init_folders(orbit_id_other)
data_ref = np.load(folder_misc_ref+sky_coverage_map_fname_ref+'.npz')
data_other = np.load(folder_misc_other+sky_coverage_map_fname_other+'.npz')
ra_grid = data_ref['ra_grid']
dec_grid = data_ref['dec_grid']
ticks = data_ref['dec_grid']
ref = data_ref['data_grid']
other = data_other['data_grid']
whereAreNaNs = np.isnan(ref);
ref[whereAreNaNs] = 0;
whereAreNaNs = np.isnan(other);
other[whereAreNaNs] = 0;
delta = ref-other
#delta[delta == 0]=np.nan
### Plotting
# transform 0 into no plotting in the data matrix
if fancy: figures.set_fancy()
fig = plt.figure()
ax=plt.subplot(111)
#ax.set_aspect(2.)
plt.grid()
print np.amax(delta)
v = np.arange(min_val,max_val+step_scale, step_scale)
vl = np.arange(min_val,max_val+step_scale, 2)
CS = plt.contour( ra_grid,dec_grid,np.fliplr(delta),colors='k',levels=vl)
plt.clabel(CS, inline=1,fmt='%+d',colors='k', fontsize=12, ticks=v)
CS = plt.contourf( ra_grid ,dec_grid,np.fliplr(delta),200,cmap=plt.cm.RdBu_r,levels=v)
plt.yticks(np.arange(-80, 100, 20.))
#v = np.linspace(min_val,max_val, 9, endpoint=True)
cbar = plt.colorbar(CS, ticks=v)
cbar.set_ticklabels([r"%+1.1f" % l for l in v])
cbar.set_label(r'$\mathrm{Days}$')
plt.xlabel('RA [hours]')
plt.ylabel('Dec [deg]')
stepra = 3
xticks = np.arange(0, 24+stepra, stepra)
print "min delta: %+02.1f" % np.amin(delta)
print "max delta: %+02.1f" % np.amax(delta)
plt.xticks(xticks)
ax.set_xticklabels([r"$%d\mathrm{h}$" % h for h in [12,9,6,3,0,21,18,15]])
if show: plt.show()
if save:
fname = sky_coverage_map_fname_ref+'---'+sky_coverage_map_fname_other
figures.savefig(folder_figures_ref+fname, fig, fancy)
print 'saved as %s' % folder_figures_ref+fname
| {
"repo_name": "kuntzer/SALSA-public",
"path": "21b_plot_delta_Sky_corverage.py",
"copies": "1",
"size": "3917",
"license": "bsd-3-clause",
"hash": 7518852237313606000,
"line_mean": 27.8014705882,
"line_max": 150,
"alpha_frac": 0.6594332397,
"autogenerated": false,
"ratio": 2.7565095003518647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3915942740051865,
"avg_score": null,
"num_lines": null
} |
''' 21-plot_Sky-coverage.py
===============================================
AIM: Plots the Stray Light coverage in % in terms of period of observation and accumulated observation time.
INPUT: files: - <orbit_id>_misc/ : files from 17-<...>.py
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_<SL_angle>figures/ : <orbit_id>_<threshold_obs_time>_<max_mag><_SAA?>_SL_coverage.png/pdf/eps
CMD: 21-plot_Sky-coverage.py
ISSUES: <NONE KNOWN>
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
REMARKS: <NONE>
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import os
import matplotlib.cm as cm
import time
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.constants as const
import resources.figures as figures
from resources.targets import *
from resources.coordinates import ecliptic2equatorial
import resources.constellations as constel
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# orbit_id
orbit_id='700_25_conf4'
apogee=700
perigee=700
# First minute in data set !
minute_ini = 0
# Last minute to look for
minute_end = 1440*365/12
# File name for the list of orbit file
orbits_file = 'orbits.dat'
# Maximum visible magnitude
mag_max = 9
# Min nb_obs_day
nb_obs_day = 50
# min of scale
min_val=0
# max of scale
max_val=90
#
step_scale=10
# Take SAA into account?
SAA = True
# Minimal minutes to be observed per orbit (if consecutive = False)
min_t_obs_per_orbit = 49
# Print much information ?
verbose = False
# Factor in the SL post treatment correction ?
SL_post_treat = True
# This is a way to vary the results by multiplying the whole pst by a number.
# This is very easy as if the pst is multiplied by a constant, it can be taken out of the
# integral and only multplying the flux is equivalent to re-running all the simulations
pst_factor = 1.
# If set to True, then it will be observations of at least (period - max_interruptions)
# If set to False, then it is minimum (period - max_interruptions) minutes per orbit,
# not necesseraly consecutive.
consecutive = False
# Nice plots?
fancy= True
# Save plots?
save = True
# Save data in ASCII form ?
savetxt = False
# Show figures ?
show = True
# Show ecliptic ?
show_ecliptic=True
# Show constellations ?
include_constellation = True
# Show Kepler field?
include_kepler = True
# File name for the input file (in a compressed binary Python format)
if SAA: note = '_SAA'
else: note = ''
if not pst_factor == 1.: note += '_%1.1fpst' % pst_factor
if SL_post_treat: note+= '_%4.3fSLreduction' % param.SL_post_treat_reduction
if not consecutive: note += '_cumul_'
#####################################################################################################################
# for every region in the sky/worthy target:
# >> Find when you can look with transit_duration [h] with maximal max_interruptions [min]
# >>>> return start and end time of observations with duration of interruptions [min]
# >> can we observe a transit ?
# >>>> Vary the start of transit time by transit_duration [h] until exoplanet_period [h]
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee, perigee)
###########################################################################
### INITIALISATION
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = -np.pi
ra_f = np.pi
rah_i = 0.
rah_f = 24.
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
rah_step = (rah_f-rah_i)/n_alpha
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (rah_i + rah_step/2+ i*rah_step for i in range(n_alpha))
rahs = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
rah_grid, dec_grid = np.meshgrid(rahs, decs)
data_grid = np.zeros(np.shape(ra_grid))
data_grid_days = np.zeros(np.shape(ra_grid))
# File name for the input file (in a compressed binary Python format)
input_fname = 'ephemerids_%ddays_%dmin_V%3.1f%s.npz' % (nb_obs_day,min_t_obs_per_orbit,mag_max,note)
# loading data
print 'loading %s' % input_fname
sys.stdout.write("Loading worthy targets...\t")
sys.stdout.flush()
data = np.load(folder_misc+input_fname)
worthy_targets = data['worthy_targets']
obs_tot=data['obs_tot']
print 'Done, %d targets loaded for nb_obs_day %3.1f' % (len(worthy_targets), nb_obs_day)
###########################################################################
# cycling through the targets:
obs_time = np.zeros(len(worthy_targets))
for index_target, target in enumerate(worthy_targets):
# tar_start = start_obs[index_target,:]
# tar_stop = stop_obs[index_target,:]
#print target.Coordinates()[0]*180./np.pi, target.Coordinates()[1]*180./np.pi
if verbose: print index_target, target.Coordinates()[0]*180./np.pi, target.Coordinates()[1]*180./np.pi
if obs_tot[index_target]>0.:
obs_time[index_target]=obs_tot[index_target]/60./24.
# Associate the density to a grid point
if target.Coordinates()[0] < np.pi:
id_ra = np.where(np.abs(ras-target.Coordinates()[0]) < 0.05)[0]
else:
id_ra = np.where(np.abs(ras-(target.Coordinates()[0]-2.*np.pi)) < 0.05)[0]
id_dec= np.where(np.abs(decs-target.Coordinates()[1]) < 0.05)[0]
# Transform density in prob of transit:
if data_grid[id_dec, id_ra] == 0 and obs_tot[index_target]>0.:
data_grid_days[id_dec, id_ra] = nb_obs_day
data_grid[id_dec, id_ra] = obs_tot[index_target]/60./24.
if verbose: print target.Coordinates()[0]*180./np.pi,'\t',target.Coordinates()[1]*180./np.pi,'\t', obs_tot[index_target]/24./60.
if verbose: print 'obs start | obs end | hours of obs'
print np.amin(data_grid), np.amax(data_grid)
co = np.size(data_grid[np.where(data_grid>0)])
print 'coverage', float(co)/float(np.size(data_grid))*100, '%'
#plt.figure()
#for index_target, target in enumerate(worthy_targets):
# c = density[index_target]
# plt.scatter(target.Coordinates()[0]*180./np.pi,target.Coordinates()[1]*180./np.pi,c=c, cmap=cm.jet, vmin=np.amin(density), vmax=np.amax(density), edgecolor='none', s=50)
#plt.xlim([0,360])
#plt.ylim([-90,90])
#plt.grid()
#cb=plt.colorbar()
#cb.set_label('Probabilty of transit of min. %d hours' % transit_duration)
###########################################################################
### Plotting
# transform 0 into no plotting in the data matrix
mag_min= np.amin(data_grid[data_grid>0])
data_grid[data_grid < mag_min] = np.nan
mag_min= np.amin(data_grid_days[data_grid_days>0])
data_grid_days[data_grid_days < mag_min] = np.nan
if fancy: figures.set_fancy()
fig = plt.figure()
ax=plt.subplot(111)
#ax.set_aspect(2.)
min_nb_obs_day = np.nanmin(data_grid)
max_nb_obs_day = np.nanmax(data_grid)
plt.grid()
ra_grid *= const.RAD
dec_grid *= const.RAD
data_grid[data_grid<min_nb_obs_day]=0
if savetxt:
np.savetxt("%s_V%d_%dd_%dm.dat" % (orbit_id, mag_max, nb_obs_day, min_t_obs_per_orbit), data_grid)
np.savetxt("ra_grid.dat", ra_grid)
np.savetxt("dec_grid.dat", dec_grid)
data_grid = np.fliplr(data_grid)
ra_grid = rah_grid
v = np.arange(min_val,max_val+step_scale, step_scale)
CS = plt.contour(ra_grid,dec_grid,data_grid,colors='k',levels=v)
plt.clabel(CS, inline=1,fmt='%d',colors='k', fontsize=12)
CS = plt.contourf(ra_grid ,dec_grid,data_grid,levels=v,cmap=plt.cm.winter)
plt.yticks(np.arange(-80, 100, 20.))
if include_constellation:
for co in constel.constellations:
co = np.asarray(co, dtype=np.float)
co[:,1] = co[:,1] / 1800.
co[:,2] = co[:,2] / 60.
idc = np.where(co[:,1] < 12.)
co[idc,1] = 12.-co[idc,1]
idc = np.where(co[:,1] > 12.)
co[idc,1] = 36.-co[idc,1]
for ii, star in enumerate(co):
if star[0] > 0:
plt.plot([co[ii-1, 1], star[1]], [co[ii-1, 2], star[2]], '.-', lw=0.7, c='grey', ms=2)
else:
plt.plot(star[1], star[2], '.', c='grey', ms=2)
if include_kepler:
kc = np.loadtxt("resources/kepler_coord.dat")
kcr = kc[:,0] / 360. * 24.
idc = np.where(kcr < 12.)
kcr[idc] = 12.-kcr[idc]
idc = np.where(kcr > 12.)
kcr = 36.-kcr
plt.plot(kcr, kc[:,1], c='r', lw=2)
stepra = 3
xticks = np.arange(0, 24+stepra, stepra)
plt.xticks(xticks)
ax.set_xticklabels([r"$%d\mathrm{h}$" % h for h in [12,9,6,3,0,21,18,15]])
#v = np.arange(np.nanmin(data_grid),np.nanmax(data_grid), 10)
cbar = plt.colorbar(CS, ticks=v)
#cbar.set_ticklabels(v)
cbar.set_label(r'$\mathrm{Accumulated\ time\ [days]}$')
plt.xlabel('RA [hours]')
plt.ylabel('Dec [deg]')
if show_ecliptic:
a=np.linspace(-np.pi, np.pi)
b=np.zeros_like(a)
res=np.rad2deg(ecliptic2equatorial(a,b))
cra = np.linspace(24,0)
plt.plot(cra,res[:,1],lw=1.2,color="k")
# Sun in june
#plt.plot([90.],[23.433],'o',color="yellow", markersize=8, zorder=5)
plt.text(18,80., r"$\mathrm{Summer\ sky}$", color='k', size='small', ha="center",weight='black')
# Sun in december
#plt.plot([-90.],[-23.433],'o',color="yellow", markersize=8, zorder=5)
plt.text(6,-80., r"$\mathrm{Winter\ sky}$", color='k', size='small', ha="center",weight='black')
###########################################################################
if not SAA: note = '_noSAA'
else: note = '_SAA'
if not pst_factor == 1.: note += '_%1.1fpst' % pst_factor
# Save plot
if save:
fname = '%s-sky_map-%d-mag%d%s_accumulated' % (orbit_id,min_t_obs_per_orbit,mag_max,note)
figures.savefig(folder_figures+fname, fig, fancy)
np.savez_compressed(folder_misc+fname, ra_grid=ra_grid, dec_grid=dec_grid, data_grid=data_grid, ticks=v)
print 'saved as %s' % fname
if show: plt.show()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "21a_plot_Sky_corverage.py",
"copies": "1",
"size": "10180",
"license": "bsd-3-clause",
"hash": -3419558329013920300,
"line_mean": 28.4219653179,
"line_max": 171,
"alpha_frac": 0.6353634578,
"autogenerated": false,
"ratio": 2.7146666666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3850030124466666,
"avg_score": null,
"num_lines": null
} |
# 21 septembre 2017
# astro_v2.py
from pylab import *
import os
def B3V_eq(x):
"""
:param x: abcsisse du point de la ligne B3V dont on veut obtenir l'ordonnee
:return: ordonnee du point de la ligne B3V correspondant a l'abscisse x (dans un graphique u-g vs g-r)
"""
return 0.9909 * x - 0.8901
def lignes(filename, n_g_r, n_u_g):
"""
:param filename: nom du fichier qui contient les donnees des etoiles dont on veut connaitre
les caracteristique u-g et g-r
:param n_g_r: numero de la colonne correspondant a u-g dans le fichier d'entree
:param n_u_g: numero de la colonne correspondant a g-r dans le fichier d'entree
:return: que dalle, c'est un generateur
"""
data = open(filename, 'r')
line = data.readline()
while line != "":
u_g = ""
g_r = ""
n_colonne = 1
for char in line:
if char == "|":
n_colonne += 1
if n_colonne == n_u_g:
if char != " " and char != "|":
u_g += char
elif n_colonne == n_g_r:
if char != " " and char != "|":
g_r += char
if n_colonne > max(n_u_g, n_g_r):
break
if u_g == "":
u_g = None
else:
u_g = float(u_g)
if g_r == "":
g_r = None
else:
g_r = float(g_r)
yield g_r, u_g
line = data.readline()
data.close()
def recupere_magnitudes(filename, n_g_r, n_u_g):
"""
:param filename: nom du fichier qui contient les donnees des etoiles dont on veut connaitre
les caracteristique u-g et g-r
:param n_g_r: numero de la colonne correspondant a u-g dans le fichier d'entree
:param n_u_g: numero de la colonne correspondant a g-r dans le fichier d'entree
:return: liste avec les donnees de la colonne g-r dans le fichier filename, et une autre avec celles de u-g
"""
colonne_u_g = []
colonne_g_r = []
for g_r, u_g in lignes(filename, n_g_r, n_u_g):
colonne_u_g.append(u_g)
colonne_g_r.append(g_r)
return colonne_g_r, colonne_u_g
def find_hot_stars(input_file, output_file, n_g_r=6, n_u_g=5):
"""
:param input_file: nom du fichier qui contient les donnees d'entree correspondant a des etoiles
:param output_file: nom du fichier qui contiendra les donnees correspondant uniquement aux etoiles chaudes
:param n_u_g: numero de la colonne correspondant a u-g dans le fichier d'entree
:param n_g_r: numero de la colonne correspondant a g-r dans le fichier d'entree
:return: None : cree juste le nouveau fichier dans le meme repertoire que celui dans lequel se trouve le programme
"""
data = open(input_file, 'r')
nfile = open(output_file, "w")
line = data.readline()
i = 0
while line != "":
i += 1
if i % 10000 == 0:
print("avancement : ", i)
u_g = ""
g_r = ""
n_colonne = 1
for char in line:
if char == "|":
n_colonne += 1
if n_colonne == n_u_g:
if char != " " and char != "|":
u_g += char
elif n_colonne == n_g_r:
if char != " " and char != "|":
g_r += char
if n_colonne > max([n_u_g, n_g_r]):
break
if u_g != "" and g_r != "" and float(u_g) <= B3V_eq(float(g_r)):
nfile.write(line)
line = data.readline()
data.close()
nfile.close()
def find_hot_stars_v2(input_file, output_file, n_g_r=7, n_u_g=6):
"""
:param input_file: nom du fichier qui contient les donnees d'entree correspondant a des etoiles
:param output_file: nom du fichier qui contiendra les donnees correspondant uniquement aux etoiles chaudes
:param n_u_g: numero de la colonne correspondant a u-g dans le fichier d'entree
:param n_g_r: numero de la colonne correspondant a g-r dans le fichier d'entree
:return: None : cree juste le nouveau fichier dans le meme repertoire que celui dans lequel se trouve le programme
"""
data = open(input_file, 'r')
nfile = open(output_file, "w")
line = data.readline()
i = 0
while line != "":
i += 1
if i % 10000 == 0:
print("avancement : ", i)
u_g = ""
g_r = ""
n_colonne = 1
for char in line:
if char == "|":
n_colonne += 1
if n_colonne == n_u_g:
if char != " " and char != "|":
u_g += char
elif n_colonne == n_g_r:
if char != " " and char != "|":
g_r += char
if n_colonne > max([n_u_g, n_g_r]):
break
if u_g != "" and g_r != "" and float(u_g) <= B3V_eq(float(g_r)):
nfile.write(line)
line = data.readline()
data.close()
nfile.close()
def fichier_reg(input_file, output_file, n_alpha, n_delta):
"""
:param input_file: fichier avec les etoiles chaudes
:param output_file: fichier en .reg
:param n_alpha: colonne avec les coordonees alpha de l'etoile
:param n_delta: colonne avec les coordonnees delta de l'etoile
:return: None
"""
data = open(input_file, 'r')
nfile = open(output_file, "w")
line = data.readline()
nfile.write('# Region file format: DS9 version 4.1\n')
nfile.write('global color=green dashlist=8 3 width=1 font=\"helvetica 10 normal roman\" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n')
nfile.write('fk5\n')
while line != "":
alpha = ""
delta = ""
n_colonne = 1
for char in line:
if char == "|":
n_colonne += 1
if n_colonne == n_alpha:
if char != " " and char != "|":
alpha += char
elif n_colonne == n_delta:
if char != " " and char != "|":
delta += char
if n_colonne > max(n_alpha, n_delta):
break
nfile.write('circle(' + alpha + ',' + delta + ',5\")\n')
line = data.readline()
data.close()
nfile.close()
def trace_graphique(titre, data_filename, SP_filename, n_g_r_data=6, n_u_g_data=5, n_g_r_SP=4, n_u_g_SP=3,
hot_stars_filename=None):
"""
:param titre: titre que l'on veut donner au graphique
:param data_filename: nom du fichier qui contient les donnees d'entree correspondant a des etoiles
:param SP_filename: nom du fichier qui contient des coordonnees de points de la sequence principale
:param n_g_r_data: numero de la colonne correspondant a g-r dans le fichier data_filename
:param n_u_g_data: numero de la colonne correspondant a u-g dans le fichier data_filename
:param n_g_r_SP: numero de la colonne correspondant a g-r dans le fichier SP_filename
:param n_u_g_SP: numero de la colonne correspondant a u-g dans le fichier SP_filename
:param hot_stars_filename: facultatif, nom du fichier contenant uniquement les donnees des etoiles chaudes
dans data_filename pour afficher d'une autre couleur les points correspondant aux etoiles chaudes
:return: None, trace le graphique u-g vs g-r avec la sequance principale et la ligne B3V
"""
# recupere donnees
g_r_data, u_g_data = recupere_magnitudes(data_filename, n_g_r_data, n_u_g_data)
g_r_SP, u_g_SP = recupere_magnitudes(SP_filename, n_g_r_SP, n_u_g_SP)
# parametre le graphique
plt.xlabel('g-r')
plt.ylabel('u-g')
plt.gca().invert_yaxis()
# trace u-g vs g-r avec nos donnees
plt.plot(g_r_data, u_g_data, '.', c='red', label='Étoiles')
if hot_stars_filename != None:
g_r_hot_stars, u_g_hot_stars = recupere_magnitudes(hot_stars_filename, n_g_r_data, n_u_g_data)
plt.plot(g_r_hot_stars, u_g_hot_stars, '.', c='blue', label='Étoiles chaudes')
# trace ligne B3V
m = min([x for x in g_r_data if x != None])
M = max([y for y in g_r_data if y != None])
x = np.linspace(m, M, 100)
plt.plot(x, B3V_eq(x), c='orange', label='Ligne B3V')
# trace sequence principale
plt.plot(g_r_SP, u_g_SP, c='black', label='Séquence principale')
# met le titre et affiche le tout
title(titre)
plt.legend()
plt.show()
def traiter_data(input_file, output_file_hot_stars, output_file_reg, output_folder=None , n_g_r=6, n_u_g=5, n_alpha=3, n_delta=4):
if output_folder is not None:
if not os.path.exists(output_folder):
os.system("mkdir " + output_folder)
input_file = output_folder + "/" + input_file
output_file_hot_stars = output_folder + "/" + output_file_hot_stars
output_file_reg = output_folder + "/" + output_file_reg
if not os.path.exists(input_file):
print("le fichier ", input_file, " n'existe pas")
if os.path.exists(output_file_hot_stars):
reponse = input("Le fichier " + output_file_hot_stars + " existe deja : voulez vous l'ecraser ? (o/n) ")
if reponse == "n":
while os.path.exists(output_file_hot_stars):
output_file_hot_stars = "new_" + output_file_hot_stars
if os.path.exists(output_file_reg):
reponse = input("Le fichier " + output_file_reg + " existe deja : voulez vous l'ecraser ? (o/n) ")
if reponse == "n":
while os.path.exists(output_file_reg):
output_file_reg = "new_" + output_file_reg
print("\noutput_file_hot_stars = ", output_file_hot_stars)
print("output_file_reg = ", output_file_reg)
find_hot_stars(input_file, output_file_hot_stars, n_g_r, n_u_g)
print("\ncatalogue d'etoiles chaudes ecrit")
fichier_reg(output_file_hot_stars, output_file_reg, n_alpha, n_delta)
print("\nfichier .reg ecrit")
def get_picture(output_file, region_name, x_size, y_size, output_folder=None, coordinate_system="J2000", survey="DSS2-red", ra="", dec=""):
output_file_for_terminal = ""
for char in output_file:
if char == " ":
output_file_for_terminal += "\ "
else:
output_file_for_terminal += char
if output_folder is not None:
output_folder_for_terminal = ""
for char in output_folder:
if char == " ":
output_folder_for_terminal += "\ "
else:
output_folder_for_terminal += char
print(output_folder_for_terminal)
if not os.path.exists(output_folder):
os.system("mkdir " + output_folder_for_terminal)
output_file_for_terminal = output_folder_for_terminal + "/" + output_file_for_terminal
region_name_for_link = ""
region_name_for_terminal = ""
for char in region_name:
if char == " ":
region_name_for_link += "+"
region_name_for_terminal += "\ "
else:
region_name_for_link += char
region_name_for_terminal += char
os.system("wget 'archive.eso.org/dss/dss/image?ra=" + ra + "&dec=" + dec + "&equinox=" + coordinate_system + "&name="
+ region_name_for_link + "&x=" + str(x_size) + "&y=" + str(y_size) + "&Sky-Survey=" + survey
+ "&mime-type=download-fits&statsmode=WEBFORM' -O " + output_file_for_terminal)
def recup_catalogue(region_name, output_file, cone_size, output_folder=None, size_unit='arcmin'):
output_file_for_terminal = ""
for char in output_file:
if char == " ":
output_file_for_terminal += "\ "
else:
output_file_for_terminal += char
if output_folder is not None:
output_folder_for_terminal = ""
for char in output_folder:
if char == " ":
output_folder_for_terminal += "\ "
else:
output_folder_for_terminal += char
if not os.path.exists(output_folder):
os.system("mkdir " + output_folder_for_terminal)
output_file_for_terminal = output_folder_for_terminal + "/" + output_file_for_terminal
region_name_for_link = ""
region_name_for_terminal = ""
for char in region_name:
if char == " ":
region_name_for_link += "+"
region_name_for_terminal += "\ "
else:
region_name_for_link += char
region_name_for_terminal += char
os.system("wget '" + 'http://vizier.cfa.harvard.edu/viz-bin/asu-tsv/VizieR?-source=II/341/&-oc.form=dec&-out.max=unlimited&-c='
+ region_name_for_link + '&-c.eq=J2000&-c.r=' + str(cone_size) + '&-c.u=' + size_unit
+ '&-c.geom=r&-out=RAJ2000&-out=DEJ2000&-out=u-g&-out=g-r2&-out=umag&-out=e_umag&-out=gmag&-out=e_gmag&-out=r2mag&-out=e_r2mag&-out=Hamag&-out=e_Hamag&-out=rmag&-out=e_rmag&-out=imag&-out=e_imag&-out.add=_Glon,_Glat&-oc.form=dec&-out.form=|+-Separated-Values'
+ "' -O " + output_file_for_terminal)
def save_plot(output_file, titre, data_filename, SP_filename, n_g_r_data=6, n_u_g_data=5, n_g_r_SP=4, n_u_g_SP=3,
hot_stars_filename=None):
"""
:param titre: titre que l'on veut donner au graphique
:param data_filename: nom du fichier qui contient les donnees d'entree correspondant a des etoiles
:param SP_filename: nom du fichier qui contient des coordonnees de points de la sequence principale
:param n_g_r_data: numero de la colonne correspondant a g-r dans le fichier data_filename
:param n_u_g_data: numero de la colonne correspondant a u-g dans le fichier data_filename
:param n_g_r_SP: numero de la colonne correspondant a g-r dans le fichier SP_filename
:param n_u_g_SP: numero de la colonne correspondant a u-g dans le fichier SP_filename
:param hot_stars_filename: facultatif, nom du fichier contenant uniquement les donnees des etoiles chaudes
dans data_filename pour afficher d'une autre couleur les points correspondant aux etoiles chaudes
:return: None, trace le graphique u-g vs g-r avec la sequance principale et la ligne B3V
"""
# recupere donnees
g_r_data, u_g_data = recupere_magnitudes(data_filename, n_g_r_data, n_u_g_data)
g_r_SP, u_g_SP = recupere_magnitudes(SP_filename, n_g_r_SP, n_u_g_SP)
# parametre le graphique
plt.xlabel('g-r')
plt.ylabel('u-g')
plt.gca().invert_yaxis()
# trace u-g vs g-r avec nos donnees
plt.plot(g_r_data, u_g_data, '.', c='red', label='Étoiles')
if hot_stars_filename != None:
g_r_hot_stars, u_g_hot_stars = recupere_magnitudes(hot_stars_filename, n_g_r_data, n_u_g_data)
plt.plot(g_r_hot_stars, u_g_hot_stars, '.', c='blue', label='Étoiles chaudes')
# trace ligne B3V
m = min([x for x in g_r_data if x != None])
M = max([y for y in g_r_data if y != None])
x = np.linspace(m, M, 100)
plt.plot(x, B3V_eq(x), c='orange', label='Ligne B3V')
# trace sequence principale
plt.plot(g_r_SP, u_g_SP, c='black', label='Séquence principale')
# met le titre et affiche le tout
title(titre)
plt.legend()
plt.savefig(output_file)
"""
def traiter_data(region_name, output_file_picture, output_file_data, output_file_data_reg, xsize_picture, ysize_picture ,size, output_folder=None, coordinate_system="J2000", survey="DSS2-red", ra="", dec="", n_g_r=7, n_u_g=6, n_alpha=1, n_delta=2):
if output_folder is not None:
if not os.path.exists(output_folder):
os.system("mkdir " + output_folder)
#ici il faut changer les noms des fichiers
output_file_picture = output_folder + "/" + output_file_picture
output_file_data = output_folder + "/" + output_file_data
output_file_data_reg = output_folder + "/" + output_file_data_reg
recup_catalogue(region_name, 'data', str(size), output_folder)
get_picture(output_file_picture, region_name, str(xsize_picture), str(ysize_picture), output_folder, coordinate_system, survey, ra, dec)
traiter_data('data', output_file_data, output_file_data_reg, output_folder, n_g_r, n_u_g, n_alpha, n_delta)
"""
def analyser_region(region_name, cone_size):
region_name_for_filenames = ""
for char in region_name:
if char != " ":
region_name_for_filenames += "_"
else:
region_name_for_filenames += char
output_folder = "data/" + region_name_for_filenames + " (" + cone_size + "arcmin)"
output_file_data = region_name_for_filenames + ".data.txt"
output_file_data_hot_stars = region_name_for_filenames + ".data_hot_stars.txt"
output_file_reg = region_name_for_filenames + ".reg"
output_file_fits = region_name_for_filenames + ".fits"
output_file_plot = region_name_for_filenames
#telecharger data et fits
#traiter data -> hot stars
#creer reg
#enregistrer graphique
#enregistrer image avec etoiles surlignées
"""save_plot("graph", "u-g vs g-r, région HII RCW 49, cone search : 3\'", "data_modifie.txt", "SP.txt", 7, 6, 4, 3,
"etoiles_chaudes_et_massives.txt")
"""
#get_picture("image.fits", "RCW 49", 10, 10, output_folder = "dossier test")
#traiter_data("data_modifie.txt", "etoiles_chaudes_et_massives.txt", "catalogue.reg")
#trace_graphique("u-g vs g-r, région HII RCW 49, cone search : 3\'", "data_modifie.txt", "SP.txt", 7, 6, 4, 3,
#"etoiles_chaudes_et_massives.txt")
#os.system("wget 'archive.eso.org/dss/dss/image?ra=&dec=&equinox=J2000&name=RCW+49&x=10&y=10&Sky-Survey=DSS2-red&mime-type=download-fits&statsmode=WEBFORM' -O image.fits")
#os.system("wget 'http://vizier.cfa.harvard.edu/viz-bin/asu-tsv/VizieR?-source=II/341/&-oc.form=dec&-out.max=unlimited&-c=RCW+49&-c.eq=J2000&-c.r=3&-c.u=arcmin&-c.geom=r&-out=RAJ2000&-out=DEJ2000&-out=u-g&-out=g-r2&-out=umag&-out=e_umag&-out=gmag&-out=e_gmag&-out=r2mag&-out=e_r2mag&-out=Hamag&-out=e_Hamag&-out=rmag&-out=e_rmag&-out=imag&-out=e_imag&-out.add=_Glon,_Glat&-oc.form=dec&-out.form=|+-Separated-Values' -O test.txt")
#os.system("ds9 image.fits -regions catalogue.reg")
#fonction("RCW 49", "image.fits", "etoiles_chaudes_et_massives.txt", "catalogue.reg", 10, 10, 3, output_folder="nouveau_dossier_test_3_42")
| {
"repo_name": "anthonygi13/Recherche_etoiles_chaudes",
"path": "astro_v2.py",
"copies": "1",
"size": "18168",
"license": "apache-2.0",
"hash": 5559698047031839000,
"line_mean": 39.2638580931,
"line_max": 429,
"alpha_frac": 0.5966187565,
"autogenerated": false,
"ratio": 2.877812995245642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8956484238733954,
"avg_score": 0.0035895026023375414,
"num_lines": 451
} |
# 222. Count Complete Tree Nodes
#
# Given a complete binary tree, count the number of nodes.
#
# Definition of a complete binary tree from Wikipedia:
# In a complete binary tree every level, except possibly the last, is completely filled,
# and all nodes in the last level are as far left as possible.
# It can have between 1 and 2h nodes inclusive at the last level h.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def countNodes(self, root):
"""
:type root: TreeNode
:rtype: int
Get the depth of child node to get the number of nodes.
http://www.tangjikai.com/algorithms/leetcode-222-complete-tree-nodes
"""
if self.getDepth(root, True) == self.getDepth(root, False):
return int(pow(2, self.getDepth(root, True))) - 1
else:
return self.countNodes(root.left)
def getDepth(self, root, isLeft):
level = 0
while root:
if isLeft:
root = root.left
else:
root = root.right
level += 1
return level
| {
"repo_name": "gengwg/leetcode",
"path": "222_count_complete_tree_nodes.py",
"copies": "1",
"size": "1236",
"license": "apache-2.0",
"hash": -7309834225454632000,
"line_mean": 28.4285714286,
"line_max": 88,
"alpha_frac": 0.5995145631,
"autogenerated": false,
"ratio": 3.8990536277602525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49985681908602525,
"avg_score": null,
"num_lines": null
} |
# 2.2.4 http.client : https://docs.python.org/3.5/library/http.client.html
from http import client
from urllib.parse import urlencode
def get():
print("get method")
conn = client.HTTPConnection("www.example.com")
conn.request("GET", "/index.html")
r1 = conn.getresponse()
print(r1.status, r1.reason)
data1 = r1.read()
conn.request("GET", "/parrot.spam")
r2 = conn.getresponse()
print(r2.status, r2.reason)
data2 = r2.read()
conn.close()
def head():
print("\nhead method")
conn = client.HTTPConnection("www.example.com")
conn.request("HEAD", "/index.html")
res = conn.getresponse()
print(res.status, res.reason)
data = res.read()
print(len(data))
print(data == b'')
def post():
print("\npost method")
params = urlencode({
'@number': 12524,
'@type': 'issue',
'@action': 'show',
})
headers = {
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'
}
conn = client.HTTPConnection("bugs.python.org")
conn.request("POST", "", params, headers)
response = conn.getresponse()
print(response.status, response.reason)
print(response.read().decode("utf-8"))
# 예시코드 동작X
def put():
conn = client.HTTPConnection("localhost", 8888)
conn.request("PUT", "/file", "***filecontents***")
response = conn.getresponse()
print(response.status, response.reason)
if __name__ == '__main__':
get()
head()
post()
| {
"repo_name": "gnidoc327/django_web_dev_chater_2",
"path": "src/client/httplib_ex/httplib35.py",
"copies": "1",
"size": "1520",
"license": "mit",
"hash": 9042315069743869000,
"line_mean": 21.1764705882,
"line_max": 74,
"alpha_frac": 0.6021220159,
"autogenerated": false,
"ratio": 3.3585746102449887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9387167214380283,
"avg_score": 0.014705882352941176,
"num_lines": 68
} |
# 225. Implement Stack using Queues - LeetCode
# https://leetcode.com/problems/implement-stack-using-queues/description/
class MyStack(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.lst = list()
self.top_ptr = -1
def push(self, x):
"""
Push element x onto stack.
:type x: int
:rtype: void
"""
if self.top_ptr == len(self.lst) - 1:
self.lst.append(x)
self.top_ptr += 1
else:
self.top_ptr += 1
self.lst[self.top_ptr] = x
return
def pop(self):
"""
Removes the element on top of the stack and returns that element.
:rtype: int
"""
if self.top_ptr == -1:
return False
ret = self.top()
self.top_ptr -= 1
return ret
def top(self):
"""
Get the top element.
:rtype: int
"""
if self.top == -1:
return False
return self.lst[self.top_ptr]
def empty(self):
"""
Returns whether the stack is empty.
:rtype: bool
"""
return self.top_ptr == -1
ans = [
(["MyStack","push","pop","push","top"],
[[],[1],[],[2],[]],
[None,None,1,None,2]),
]
# Your MyStack object will be instantiated and called as such:
for i in ans:
r = []
for j in range(len(i[0])):
if i[0][j] == "MyStack":
obj = MyStack()
r.append(None)
if i[0][j] == "push":
r.append(obj.push(i[1][j][0]))
if i[0][j] == "pop":
r.append(obj.pop())
if i[0][j] == "top":
r.append(obj.top())
print r, r == i[2] | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/225_implement-stack-using-queues.py",
"copies": "1",
"size": "1762",
"license": "mit",
"hash": 4488783732710804500,
"line_mean": 23.1506849315,
"line_max": 73,
"alpha_frac": 0.4608399546,
"autogenerated": false,
"ratio": 3.5813008130081303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45421407676081305,
"avg_score": null,
"num_lines": null
} |
# 229. Majority Element II
#
# Given an integer array of size n,
# find all elements that appear more than n/3 times.
# The algorithm should run in linear time and in O(1) space.
#
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
http://bookshadow.com/weblog/2015/06/29/leetcode-majority-element-ii/
https://leetcode.com/problems/majority-element-ii/discuss/63520/Boyer-Moore-Majority-Vote-algorithm-and-my-elaboration
可以从Moore投票算法中得到一些启发
观察可知,数组中至多可能会有2个出现次数超过 ⌊ n/3 ⌋ 的众数
记变量n1, n2为候选众数; c1, c2为它们对应的出现次数
遍历数组,记当前数字为num
若num与n1或n2相同,则将其对应的出现次数加1
否则,若c1或c2为0,则将其置为1,对应的候选众数置为num
否则,将c1与c2分别减1
最后,再统计一次候选众数在数组中出现的次数,若满足要求,则返回之。
"""
n1 = n2 = None
c1 = c2 = 0
for num in nums:
if num == n1:
c1 += 1
elif num == n2:
c2 += 1
elif c1 == 0:
n1 = num
c1 = 1
elif c2 == 0:
n2 = num
c2 = 1
else:
c1 -= 1
c2 -= 1
size = len(nums)
return [n for n in (n1, n2) if n is not None and nums.count(n) > size/3]
# O(n) space
def majorityElement(self, nums):
res = []
if not nums:
return []
for num in set(nums):
if nums.count(num) > len(nums)/3:
res.append(num)
return res
# 1 liner. O(n) space.
def majorityElement(self, nums):
return [n for n in set(nums) if nums.count(n) > len(nums)/3]
| {
"repo_name": "gengwg/leetcode",
"path": "229_majority_element_ii.py",
"copies": "1",
"size": "1979",
"license": "apache-2.0",
"hash": 7174184351880481000,
"line_mean": 24.4393939394,
"line_max": 126,
"alpha_frac": 0.5175699821,
"autogenerated": false,
"ratio": 2.4298118668596236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34473818489596236,
"avg_score": null,
"num_lines": null
} |
# 2-2 Implement an algorithm to find the kth to last element of a
# a singly linked list
# Pseudo:
# Iterate through all to find the length
# Iterate again to get the kth last
class Node:
'''A node in a singly linked list'''
def __init__(self, data):
self.data = data
self.next = None
def __str__(self):
return (str(self.data))
class LinkedList:
'''A linked list comprised of Nodes'''
def __init__(self, start=None):
self.start = start
self.current = start
self.last = start
def __str__(self):
returnString = 'LINKED LIST\n'
returnString += 'Current node data: '
returnString += str(self.current)
return returnString
def next(self):
if self.current == None:
return False
elif self.current.next == None:
return False
else:
self.current = self.current.next
return self.current
def reset(self):
self.current = self.start
# Pushes to the END; sets the current pointer to the
# node pushed, or the last nonde
def push(self, n):
if self.start == None:
print('start')
self.start = n
return
if (self.current == None):
print('resetting')
self.reset()
while (self.current.next != None):
self.next()
self.current.next = n
self.next()
def printAll(self):
self.reset()
print('LINKED LIST')
currentNodeIndex = 1
while self.next() != False:
print(str(currentNodeIndex) + ' ' + str(self.current))
currentNodeIndex += 1
# self.next()
self.reset()
def findKLast(self, n):
if (self.start == None):
return False
listLength = 0
self.reset()
for i in range(0, n):
listLength += 1
if (self.next() == False):
return False
nodeToReturn = self.start
while(self.next() != False):
listLength += 1
nodeToReturn = nodeToReturn.next
nodeIndexToReturn = listLength - int(n)
return [nodeIndexToReturn, str(nodeToReturn)]
myLinkedList = LinkedList(Node('Dave'))
print(myLinkedList)
myLinkedList.next()
myLinkedList.next()
myLinkedList.next()
print(myLinkedList)
myLinkedList.reset()
print(myLinkedList)
myLinkedList.push(Node('Nora'))
myLinkedList.push(Node('Luca'))
myLinkedList.push(Node('Riley'))
myLinkedList.push(Node('lol'))
myLinkedList.push(Node('k'))
myLinkedList.printAll()
print(myLinkedList.findKLast(1))
| {
"repo_name": "dmart914/CTCI",
"path": "02-linked-lists/2-2-kth-last.py",
"copies": "1",
"size": "2647",
"license": "apache-2.0",
"hash": -3433641861091806000,
"line_mean": 23.0636363636,
"line_max": 66,
"alpha_frac": 0.5746127692,
"autogenerated": false,
"ratio": 3.814121037463977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9872755348164803,
"avg_score": 0.003195691699834781,
"num_lines": 110
} |
# 23/05/2017
# BFS implementation with Manhattan distance heuristic
from queue import Queue
class Node(object):
def __init__(self, value, parent):
self.value = value
self.parent = parent
def knight_move(x, y):
possible_moves = [(-1, -2), (1, -2), (-1, 2), (1, 2),
(-2, -1), (2, -1), (-2, 1), (2, 1)]
manhattan = lambda tile: abs(x - tile[0]) + abs(y - tile[1])
best_distance = manhattan((0,0))
nodes = Queue()
nodes.put(Node((0, 0), None))
visited = {(0,0)}
found = False
while nodes:
curr = nodes.get()
for (dx, dy) in possible_moves:
tile = (curr.value[0] + dx, curr.value[1] + dy)
if tile == (x,y):
curr = Node(tile, curr)
found = True
break
if manhattan(tile) > best_distance + 3:
continue
elif manhattan(tile) < best_distance:
best_distance = manhattan(tile)
if tile not in visited:
visited |= set(tile)
nodes.put(Node(tile, parent=curr))
if found:
break
path = []
while curr is not None:
path.append(str(curr.value))
curr = curr.parent
print('{}'.format(" -> ".join(path[::-1])))
return len(path) - 1
| {
"repo_name": "tlgs/dailyprogrammer",
"path": "Python/easy/e316.py",
"copies": "2",
"size": "1343",
"license": "unlicense",
"hash": 5111320444954970000,
"line_mean": 26.9791666667,
"line_max": 64,
"alpha_frac": 0.487714073,
"autogenerated": false,
"ratio": 3.4973958333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4985109906333333,
"avg_score": null,
"num_lines": null
} |
# 230. Kth Smallest Element in a BST
#
# Given a binary search tree, write a function kthSmallest to find the kth smallest element in it.
#
# Note:
# You may assume k is always valid, 1 ≤ k ≤ BST's total elements.
#
# Follow up:
# What if the BST is modified (insert/delete operations) often and you need to find the kth smallest frequently?
# How would you optimize the kthSmallest routine?
class Solution(object):
# iterative
def kthSmallest(self, root, k):
# in-order traversal: left->root->right
# http://bookshadow.com/weblog/2015/07/02/leetcode-kth-smallest-element-bst/
stack = []
node = root
while node:
stack.append(node)
node = node.left
x = 0
while stack and x < k:
node = stack.pop()
x += 1
right = node.right
while right:
stack.append(right)
right = right.left
return node.val
# https://www.youtube.com/watch?v=CfNRc82ighw
def kthSmallest(self, root, k):
# global variables
self.res = 0
self.k = k
def dfs(root):
# exit condition
if root is None:
return
dfs(root.left)
# decrement k each time until 0
self.k -= 1
if self.k == 0:
self.res = root.val
return
dfs(root.right)
dfs(root)
return self.res
# same as above, but use separate method
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
self.res = 0
self.k = k
self.inorder(root)
return self.res
def inorder(self, node):
if not node:
return
self.inorder(node.left)
self.k -= 1
if self.k == 0:
self.res = node.val
return
self.inorder(node.right)
# inorder traversal and put vals into a list
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
res = []
self.inorder(root, res)
return res[k-1]
def inorder(self, node, valuelist):
if not node:
return
self.inorder(node.left, valuelist)
valuelist.append(node.val)
self.inorder(node.right, valuelist)
| {
"repo_name": "gengwg/leetcode",
"path": "230_kth_smallest_element_bst.py",
"copies": "1",
"size": "2437",
"license": "apache-2.0",
"hash": 6398647803666822000,
"line_mean": 25.7362637363,
"line_max": 112,
"alpha_frac": 0.5277435265,
"autogenerated": false,
"ratio": 3.73159509202454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.475933861852454,
"avg_score": null,
"num_lines": null
} |
# 231. Power of Two
# Given an integer, write a function to determine if it is a power of two.
# http://www.cnblogs.com/grandyang/p/4623394.html
class Solution(object):
def isPowerOfTwo(self, n):
"""
:type n: int
:rtype: bool
"""
cnt = 0
while n > 0:
cnt += (n & 1)
n >>= 1
return cnt == 1
def isPowerOfTwo(self, n):
"""
check if n can be divided by 2. If yes, divide n by 2 and check it repeatedly,
until it can not be divided by 2. then check if it equals 1.
"""
if n == 0:
return False
while n %2 == 0:
n /= 2
return n == 1
# bit operation
# http://bookshadow.com/weblog/2015/07/06/leetcode-power-of-two/
def isPowerOfTwo(self, n):
"""
如果一个整数是2的幂,那么它的二进制形式最高位为1,其余各位为0
If an integer is power of 2, there is a single bit in the binary representation of n.
n & n - 1 removes the left most bit of n.
e.g. 16 = b10000, 16 - 1 = b01111, and 16 & 16 - 1 = b10000 & b01111 = 0, also 16 != 0,
based on these facts there is only one bit in b10000, so 16 is power of 2.
"""
return n > 0 and n & (n - 1) == 0
# return n > 0 and not (n & n-1)
print Solution().isPowerOfTwo(1)
| {
"repo_name": "gengwg/leetcode",
"path": "231_power_of_two.py",
"copies": "1",
"size": "1382",
"license": "apache-2.0",
"hash": -5602216100257937000,
"line_mean": 30.5238095238,
"line_max": 95,
"alpha_frac": 0.5377643505,
"autogenerated": false,
"ratio": 3.1226415094339623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4160405859933962,
"avg_score": null,
"num_lines": null
} |
# 232. Implement Queue using Stacks - LeetCode
# https://leetcode.com/problems/implement-queue-using-stacks/description/
class MyStack(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.lst = list()
self.top_ptr = -1
def push(self, x):
"""
Push element x onto stack.
:type x: int
:rtype: void
"""
if self.top_ptr == len(self.lst) - 1:
self.lst.append(x)
self.top_ptr += 1
else:
self.top_ptr += 1
self.lst[self.top_ptr] = x
return
def pop(self):
"""
Removes the element on top of the stack and returns that element.
:rtype: int
"""
if self.top_ptr == -1:
return False
ret = self.top()
self.top_ptr -= 1
return ret
def top(self):
"""
Get the top element.
:rtype: int
"""
if self.top == -1:
return False
return self.lst[self.top_ptr]
def empty(self):
"""
Returns whether the stack is empty.
:rtype: bool
"""
return self.top_ptr == -1
class MyQueue_SingleStack(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.stack = MyStack()
def push(self, x):
"""
Push element x to the back of queue.
:type x: int
:rtype: void
"""
self.stack.push(x)
def pop(self):
"""
Removes the element from in front of queue and returns that element.
:rtype: int
"""
if self.stack.empty():
return False
temp_stack = MyStack()
while not self.stack.empty():
x = self.stack.pop()
temp_stack.push(x)
ret = temp_stack.pop()
while not temp_stack.empty():
x = temp_stack.pop()
self.stack.push(x)
return ret
def peek(self):
"""
Get the front element.
:rtype: int
"""
if self.stack.empty():
return False
temp_stack = MyStack()
while not self.stack.empty():
x = self.stack.pop()
temp_stack.push(x)
ret = temp_stack.top()
while not temp_stack.empty():
x = temp_stack.pop()
self.stack.push(x)
return ret
def empty(self):
"""
Returns whether the queue is empty.
:rtype: bool
"""
return self.stack.empty()
class MyQueue_DoubleStack(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.in_stack = MyStack()
self.out_stack = MyStack()
def push(self, x):
"""
Push element x to the back of queue.
:type x: int
:rtype: void
"""
self.in_stack.push(x)
def pop(self):
"""
Removes the element from in front of queue and returns that element.
:rtype: int
"""
self.peek()
return self.out_stack.pop()
def peek(self):
"""
Get the front element.
:rtype: int
"""
if self.out_stack.empty():
while not self.in_stack.empty():
x = self.in_stack.pop()
self.out_stack.push(x)
return self.out_stack.top()
def empty(self):
"""
Returns whether the queue is empty.
:rtype: bool
"""
return self.out_stack.empty() and self.in_stack.empty()
MyQueue = MyQueue_DoubleStack
ans = [
(["MyQueue","empty","push","pop","push","push","peek","empty"],
[[],[],[1],[],[2],[3],[],[]],
[None,True,None,1,None,None,2,False]),
]
for i in ans:
r = []
for j in range(len(i[0])):
if i[0][j] == "MyQueue":
obj = MyQueue()
r.append(None)
if i[0][j] == "push":
r.append(obj.push(i[1][j][0]))
if i[0][j] == "pop":
r.append(obj.pop())
if i[0][j] == "peek":
r.append(obj.peek())
if i[0][j] == "empty":
r.append(obj.empty())
print r, r == i[2] | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/232_implement-queue-using-stacks.py",
"copies": "1",
"size": "4269",
"license": "mit",
"hash": 7400754366323708000,
"line_mean": 23.2613636364,
"line_max": 76,
"alpha_frac": 0.4734129773,
"autogenerated": false,
"ratio": 3.84941388638413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9793629113647476,
"avg_score": 0.0058395500073307564,
"num_lines": 176
} |
# 234. Palindrome Linked List - LeetCode
# https://leetcode.com/problems/palindrome-linked-list/description/
from helper.linked_list import LinkedList, traversal
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
def flip(head):
if head is None or head.next is None:
return head
prev = None
curr = head
nxt = curr.next
while nxt:
curr.next = prev
prev = curr
curr = nxt
nxt = nxt.next
curr.next = prev
return curr
if head is None:
return True
if head.next is None:
return True
# find mid
slow = head
fast = head.next
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
# slow stop at middle
# compare new_head and head
a = head
b = flip(slow.next)
# while b.next: # WA1, [1,2], won't compare
while b:
if a.val != b.val:
return False
a = a.next
b = b.next
return True
ans = [
([],True),
([1],True),
([1,2],False),
([1,2,3],False),
([1,2,1],True),
([1,2,3,3,2,1],True),
([1,2,3,4,5],False),
([1,2,3,4,3,2,1],True),
([2,2,2,2],True),
(range(10),False),
]
s = Solution()
for i in ans:
l = LinkedList(i[0])
r = s.isPalindrome(l.head)
print r, "O" if r == i[1] else "X" | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/234_palindrome-linked-list.py",
"copies": "1",
"size": "1611",
"license": "mit",
"hash": -8592622712887277000,
"line_mean": 24.1875,
"line_max": 67,
"alpha_frac": 0.4680322781,
"autogenerated": false,
"ratio": 3.464516129032258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4432548407132258,
"avg_score": null,
"num_lines": null
} |
# 234. Palindrome Linked List
#
# Given a singly linked list, determine if it is a palindrome.
#
# Follow up:
# Could you do it in O(n) time and O(1) space?
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# http://blog.csdn.net/coder_orz/article/details/51306985
class Solution(object):
# put linked list val into an array
# then test if array is palindrome
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head or not head.next:
return True
li = []
while head:
li.append(head.val)
head = head.next
# or: return li == li[::-1]
length = len(li)
for i in range(length/2):
if li[i] != li[length - i - 1]:
return False
return True
def isPalindrome2(self, head):
if not head or not head.next:
return True
li = []
slow = fast = head
# use slow/fast to find middle of list
# push first half into stack
# must have fast.next
while fast and fast.next:
li.insert(0, slow.val)
slow = slow.next
fast = fast.next.next
# skip testing middle element if odd number
if fast:
slow = slow.next
# pop stack and compare with 2nd half
for x in li:
if x != slow.val:
return False
slow = slow.next
return True
# reverse 2nd half of linked list
# Time O(1), space O(1)
def isPalindrome3(self, head):
if not head or not head.next:
return True
slow = fast = head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
slow = slow.next
# slow point to 2nd half
slow = self.reverseList(slow)
while slow:
if head.val != slow.val:
return False
slow = slow.next
head = head.next
return True
def reverseList(self, head):
new_head = None
while head:
p = head
head = head.next
p.next = new_head
new_head = p
return new_head
| {
"repo_name": "gengwg/leetcode",
"path": "234_palindrome_linked_list.py",
"copies": "1",
"size": "2359",
"license": "apache-2.0",
"hash": -310475316620664060,
"line_mean": 23.0714285714,
"line_max": 62,
"alpha_frac": 0.5150487495,
"autogenerated": false,
"ratio": 3.9382303839732886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9902258725310022,
"avg_score": 0.01020408163265306,
"num_lines": 98
} |
# 234. Palindrome Linked List
#
# Given a singly linked list, determine if its items form a palindrome.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
def is_palindrome_rec(head):
def build_stack(node, revrsd, length):
if node is None:
return revrsd, length
return build_stack(node.next, (node.val, revrsd), length + 1)
def equal_n(node, pair, length):
if length == 0:
return True
val1 = node.val
val2, tail = pair
if val1 != val2:
return False
return equal_n(node.next, tail, length - 1)
stack, stack_size = build_stack(head, (), 0)
return equal_n(head, stack, stack_size // 2)
def is_palindrome_iter(head):
# Build a stack of the items
stack = []
node = head
while not node is None:
stack.append(node.val)
node = node.next
# Compare the stack to the list
node = head
for idx in range(len(stack) // 2):
val1 = node.val
val2 = stack[-1]
if val1 != val2:
return False
node = node.next
del stack[-1]
return True
class Solution:
def isPalindrome_rec(self, head: ListNode) -> bool:
return is_palindrome_rec(head)
def isPalindrome_iter(self, head: ListNode) -> bool:
return is_palindrome_iter(head)
isPalindrome = isPalindrome_iter
| {
"repo_name": "afbarnard/glowing-broccoli",
"path": "lc/000234.py",
"copies": "1",
"size": "1459",
"license": "mit",
"hash": 8963296934136820000,
"line_mean": 25.5272727273,
"line_max": 71,
"alpha_frac": 0.5867032214,
"autogenerated": false,
"ratio": 3.377314814814815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9449602159298938,
"avg_score": 0.002883175383175383,
"num_lines": 55
} |
# 237. Delete Node in a Linked List
#
# Write a function to delete a node (except the tail) in a singly linked list, given only access to that node.
#
# Supposed the linked list is 1 -> 2 -> 3 -> 4 and you are given the third node with value 3,
# the linked list should become 1 -> 2 -> 4 after calling your function.
#
# http://www.tangjikai.com/algorithms/leetcode-237-delete-node-in-a-linked-list
# replace current node's value with next node's.
# cur.next = cur.next.next in order to skip the next node.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
We can't really delete the node,
but we can kinda achieve the same effect by instead removing the next node
after copying its data into the node that we were asked to delete.
"""
node.val = node.next.val
node.next = node.next.next
| {
"repo_name": "gengwg/leetcode",
"path": "237_delete_node_in_linked_list.py",
"copies": "1",
"size": "1106",
"license": "apache-2.0",
"hash": -89155202194167120,
"line_mean": 34.6774193548,
"line_max": 110,
"alpha_frac": 0.6618444846,
"autogenerated": false,
"ratio": 3.6143790849673203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9609821069623379,
"avg_score": 0.03328049998878818,
"num_lines": 31
} |
"""238. Product of Array Except Self
https://leetcode.com/problems/product-of-array-except-self/
Given an array nums of n integers where n > 1, return an array output
such that output[i] is equal to the product of all the elements of
nums except nums[i].
Example:
Input: [1,2,3,4]
Output: [24,12,8,6]
Note: Please solve it without division and in O(n).
Follow up:
Could you solve it with constant space complexity?
(The output array does not count as extra space for the purpose of space
complexity analysis.)
"""
from typing import List
class Solution:
def product_except_self(self, nums: List[int]) -> List[int]:
count, zeros, product = len(nums), 0, 1
ans = [1] * count
for i in range(count):
product *= nums[i]
if nums[i] == 0:
zeros += 1
for i in range(count):
if nums[i] == 0:
if zeros == 1:
# if there's one 0 only, then output[i] won't be 0
ans[i] = 1
for j in range(count):
if nums[j] != 0:
ans[i] *= nums[j]
else:
ans[i] = 0
else:
ans[i] = product // nums[i]
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/product_of_array_except_self.py",
"copies": "1",
"size": "1279",
"license": "mit",
"hash": -491254210865534600,
"line_mean": 27.4222222222,
"line_max": 72,
"alpha_frac": 0.5332290852,
"autogenerated": false,
"ratio": 3.7507331378299122,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4783962223029912,
"avg_score": null,
"num_lines": null
} |
"""23andme genotyping data extraction."""
from datetime import date, datetime
import json
import os
import re
from subprocess import check_output
from tempfile import TemporaryFile
import requests
SNP_DATA_23ANDME_FILE = os.path.join(
os.path.dirname(__file__),
'23andme_API_snps_data_with_ref_sorted.txt')
# Was used to generate reference genotypes in the previous file.
REFERENCE_GENOME_URL = ("http://hgdownload-test.cse.ucsc.edu/" +
"goldenPath/hg19/bigZips/hg19.2bit")
API23ANDME_Y_REGIONS_JSON = os.path.join(os.path.dirname(__file__),
'23andme_y_chrom_regions.json')
VCF_FIELDS = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER',
'INFO', 'FORMAT', '23ANDME_DATA']
def snp_data_23andme():
"""Generator, returns SNP info sorted by chrom and position."""
snp_data_file = open(SNP_DATA_23ANDME_FILE)
next_line = snp_data_file.next()
while next_line.startswith('#'):
next_line = snp_data_file.next()
expected_header = ['index', 'snp', 'chromosome',
'chromosome_position', 'reference_allele']
assert next_line == '\t'.join(expected_header) + '\n'
for line in snp_data_file:
data = line.rstrip('\n').split('\t')
yield data
def api23andme_full_gen_data(access_token, profile_id):
"""Get full genotype data from 23andme API."""
headers = {'Authorization': 'Bearer %s' % access_token}
genome_data_url = "http://api.23andme.com/1/genomes/%s" % profile_id
genome_data_response = requests.get(genome_data_url, headers=headers)
genome_data = genome_data_response.json()['genome']
return genome_data
def api23andme_full_gen_infer_sex(genetic_data):
"""Check some known Y genotype calls to infer sex."""
y_regions = json.load(open(API23ANDME_Y_REGIONS_JSON))
y_seqs = ''.join([genetic_data[x[0]*2:x[0]*2+x[1]*2] for x in y_regions])
if re.search(r'[ACGT]', y_seqs):
return "Male"
else:
return "Female"
def vcf_header(source=None, reference=None, format_info=None):
"""Generate a VCF header."""
header = []
header.append("##fileformat=VCFv4.1")
header.append("##fileDate=%s%s%s" %
(str(date.today().year),
str(date.today().month).zfill(2),
str(date.today().day).zfill(2)))
if source:
header.append("##source=" + source)
if reference:
header.append("##reference=%s" % reference)
for item in format_info:
header.append("##FORMAT=" + item)
header.append('#' + '\t'.join(VCF_FIELDS))
return header
def get_genotype(genetic_data, snp_info, sex):
"""Get genotype, collapsing hemizygous locations."""
raw_genotype = genetic_data[int(snp_info[0])*2:int(snp_info[0])*2+2]
if snp_info[2] in ['MT', 'M', 'Y', 'chrM', 'chrMT', 'chrY']:
try:
assert raw_genotype[0] == raw_genotype[1]
except AssertionError:
print raw_genotype
print snp_info
print sex
raise SystemError
return raw_genotype[0]
if sex == 'Male' and snp_info[2] in ['X', 'chrX']:
# PAR X coordinates for hg19 according to UCSC are:
# chrX:60001-2699520 and chrX:154931044-155260560
if (60001 <= int(snp_info[3]) <= 2699520 or
154931044 <= int(snp_info[3]) <= 155260560):
return raw_genotype
else:
try:
assert raw_genotype[0] == raw_genotype[1]
except AssertionError:
print raw_genotype
print snp_info
print sex
raise SystemError
return raw_genotype[0]
return raw_genotype
def api23andme_to_vcf_rows(genetic_data, sex):
"""Convert 23andme locations to unsorted VCF lines."""
snp_info_data = snp_data_23andme()
for snp_info in snp_info_data:
genotype = get_genotype(genetic_data, snp_info, sex)
if snp_info[4] == '_' or genotype == '__' or genotype == '--':
continue
if not re.match(r'^[ACGT]{1,2}$', genotype):
continue
vcf_data = {x: '.' for x in VCF_FIELDS}
vcf_data['CHROM'] = snp_info[2]
vcf_data['POS'] = snp_info[3]
if snp_info[1].startswith('rs'):
vcf_data['ID'] = snp_info[1]
vcf_data['REF'] = snp_info[4]
alt_alleles = []
for alle in genotype:
if not alle == vcf_data['REF'] and alle not in alt_alleles:
alt_alleles.append(alle)
if alt_alleles:
vcf_data['ALT'] = ','.join(alt_alleles)
vcf_data['FORMAT'] = 'GT'
all_alleles = [vcf_data['REF']] + alt_alleles
genotype_indexed = '/'.join([str(all_alleles.index(x)) for
x in genotype])
vcf_data['23ANDME_DATA'] = genotype_indexed
yield '\t'.join([vcf_data[x] for x in VCF_FIELDS])
def api23andme_to_vcf(genetic_data, sex):
"""Create VCF file from 23andmeAPI full genotyping data"""
print "Creating VCF data"
commit = check_output(["git", "rev-parse", "HEAD"]).rstrip('\n')
source = ("open_humans_data_extraction.twenty_three_and_me," +
"commit:%s" % commit)
reference = REFERENCE_GENOME_URL
format_info = ['<ID=GT,Number=1,Type=String,Description="Genotype">']
vcf_header_lines = vcf_header(source=source,
reference=reference,
format_info=format_info)
for line in vcf_header_lines:
yield line + '\n'
vcf_rows = api23andme_to_vcf_rows(genetic_data, sex)
for line in vcf_rows:
yield line + '\n'
| {
"repo_name": "abramconnelly/genevieve",
"path": "file_process/utils/twentythree_and_me.py",
"copies": "4",
"size": "5716",
"license": "mit",
"hash": 5104655284128371000,
"line_mean": 36.6052631579,
"line_max": 77,
"alpha_frac": 0.5803009097,
"autogenerated": false,
"ratio": 3.244040862656073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 152
} |
"""2/3 compatibility module for Hyde."""
# This module is for cross-version compatibility. As such, several
# assignments and import will look invalid to checkers like flake8.
# These lines are being marked with ``# NOQA`` to allow flake8 checking
# to pass.
import sys
PY3 = sys.version_info.major == 3
if PY3:
# Imports that have moved.
from collections import UserDict # NOQA
import configparser # NOQA
from functools import reduce # NOQA
from http.client import HTTPConnection, HTTPSConnection # NOQA
from http.server import HTTPServer, SimpleHTTPRequestHandler # NOQA
from io import StringIO # NOQA
from urllib import parse # NOQA
from urllib.parse import quote, unquote # NOQA
# Types that have changed name.
filter = filter # NOQA
input = input # NOQA
basestring = str # NOQA
str = str # NOQA
zip = zip # NOQA
def execfile(filename, globals, locals):
"""Python 3 replacement for ``execfile``."""
# Credit: 2to3 and this StackOverflow answer
# (http://stackoverflow.com/a/437857/841994) take similar
# approaches.
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, globals, locals)
def reraise(tp, value, tb=None):
"""Reraise exceptions."""
if getattr(value, '__traceback__', tb) is not tb:
raise value.with_traceback(tb)
raise value
else:
# Imports that have moved.
from itertools import ifilter as filter, izip as zip # NOQA
import ConfigParser as configparser # NOQA
reduce = reduce
from httplib import HTTPConnection, HTTPSConnection # NOQA
from BaseHTTPServer import HTTPServer # NOQA
from SimpleHTTPServer import SimpleHTTPRequestHandler # NOQA
from cStringIO import StringIO # NOQA
from UserDict import IterableUserDict as UserDict # NOQA
import urlparse as parse # NOQA
from urllib import quote, unquote # NOQA
# Types that have changed name.
input = raw_input # NOQA
basestring = basestring # NOQA
str = unicode # NOQA
execfile = execfile # NOQA
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def iteritems(d):
"""Return iterable items from a dict."""
if hasattr(d, 'iteritems'):
return d.iteritems()
else:
return iter(d.items())
def with_metaclass(meta, *bases):
"""Assign a metaclass in a 2/3 compatible fashion."""
# Note: borrowed from https://github.com/dirn/Simon/
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('DummyMetaClass', None, {})
| {
"repo_name": "hyde/hyde",
"path": "hyde/_compat.py",
"copies": "1",
"size": "3394",
"license": "mit",
"hash": 4376309064723226600,
"line_mean": 34.7263157895,
"line_max": 72,
"alpha_frac": 0.6540954626,
"autogenerated": false,
"ratio": 4.205700123915737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5359795586515738,
"avg_score": null,
"num_lines": null
} |
# 2.3 compatibility
try:
set
except NameError:
import sets
set = sets.Set
import numpy as sp
from _delaunay import delaunay
from interpolate import LinearInterpolator, NNInterpolator
__all__ = ['Triangulation']
class Triangulation(object):
"""A Delaunay triangulation of points in a plane.
Triangulation(x, y)
x, y -- the coordinates of the points as 1-D arrays of floats
Let us make the following definitions:
npoints = number of points input
nedges = number of edges in the triangulation
ntriangles = number of triangles in the triangulation
point_id = an integer identifying a particular point (specifically, an
index into x and y), range(0, npoints)
edge_id = an integer identifying a particular edge, range(0, nedges)
triangle_id = an integer identifying a particular triangle
range(0, ntriangles)
Attributes: (all should be treated as read-only to maintain consistency)
x, y -- the coordinates of the points as 1-D arrays of floats.
circumcenters -- (ntriangles, 2) array of floats giving the (x,y)
coordinates of the circumcenters of each triangle (indexed by a
triangle_id).
edge_db -- (nedges, 2) array of point_id's giving the points forming
each edge in no particular order; indexed by an edge_id.
triangle_nodes -- (ntriangles, 3) array of point_id's giving the points
forming each triangle in counter-clockwise order; indexed by a
triangle_id.
triangle_neighbors -- (ntriangles, 3) array of triangle_id's giving the
neighboring triangle; indexed by a triangle_id.
The value can also be -1 meaning that that edge is on the convex hull of
the points and there is no neighbor on that edge. The values are ordered
such that triangle_neighbors[tri, i] corresponds with the edge
*opposite* triangle_nodes[tri, i]. As such, these neighbors are also in
counter-clockwise order.
hull -- list of point_id's giving the nodes which form the convex hull
of the point set. This list is sorted in counter-clockwise order.
"""
def __init__(self, x, y):
self.x = sp.asarray(x, dtype=sp.float64)
self.y = sp.asarray(y, dtype=sp.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x,y must be equal-length 1-D arrays")
self.circumcenters, self.edge_db, self.triangle_nodes, \
self.triangle_neighbors = delaunay(self.x, self.y)
self.hull = self._compute_convex_hull()
def _compute_convex_hull(self):
"""Extract the convex hull from the triangulation information.
The output will be a list of point_id's in counter-clockwise order
forming the convex hull of the data set.
"""
border = (self.triangle_neighbors == -1)
edges = {}
edges.update(dict(zip(self.triangle_nodes[border[:,0]][:,1],
self.triangle_nodes[border[:,0]][:,2])))
edges.update(dict(zip(self.triangle_nodes[border[:,1]][:,2],
self.triangle_nodes[border[:,1]][:,0])))
edges.update(dict(zip(self.triangle_nodes[border[:,2]][:,0],
self.triangle_nodes[border[:,2]][:,1])))
# Take an arbitrary starting point and its subsequent node
hull = list(edges.popitem())
while edges:
hull.append(edges.pop(hull[-1]))
# hull[-1] == hull[0], so remove hull[-1]
hull.pop()
return hull
def linear_interpolator(self, z, default_value=sp.nan):
"""Get an object which can interpolate within the convex hull by
assigning a plane to each triangle.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = sp.asarray(z, dtype=sp.float64)
if z.shape != self.x.shape:
raise ValueError("z must be the same shape as x and y")
return LinearInterpolator(self, z, default_value)
def nn_interpolator(self, z, default_value=sp.nan):
"""Get an object which can interpolate within the convex hull by
the natural neighbors method.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = sp.asarray(z, dtype=sp.float64)
if z.shape != self.x.shape:
raise ValueError("z must be the same shape as x and y")
return NNInterpolator(self, z, default_value)
def prep_extrapolator(self, z, bbox=None):
if bbox is None:
bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
minx, maxx, miny, maxy = sp.asarray(bbox, sp.float64)
minx = min(minx, sp.minimum.reduce(self.x))
miny = min(miny, sp.minimum.reduce(self.y))
maxx = max(maxx, sp.maximum.reduce(self.x))
maxy = max(maxy, sp.maximum.reduce(self.y))
M = max((maxx-minx)/2, (maxy-miny)/2)
midx = (minx + maxx)/2.0
midy = (miny + maxy)/2.0
xp, yp= sp.array([[midx+3*M, midx, midx-3*M],
[midy, midy+3*M, midy-3*M]])
x1 = sp.hstack((self.x, xp))
y1 = sp.hstack((self.y, yp))
newtri = self.__class__(x1, y1)
# do a least-squares fit to a plane to make pseudo-data
xy1 = sp.ones((len(self.x), 3), sp.float64)
xy1[:,0] = self.x
xy1[:,1] = self.y
from scipy import linalg
c, res, rank, s = linalg.lstsq(xy1, z)
zp = sp.hstack((z, xp*c[0] + yp*c[1] + c[2]))
return newtri, zp
def nn_extrapolator(self, z, bbox=None, default_value=sp.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.nn_interpolator(zp, default_value)
def linear_extrapolator(self, z, bbox=None, default_value=sp.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.linear_interpolator(zp, default_value)
def node_graph(self):
"""Return a graph of node_id's pointing to node_id's.
The arcs of the graph correspond to the edges in the triangulation.
{node_id: set([node_id, ...]), ...}
"""
g = {}
for i, j in self.edge_db:
s = g.setdefault(i, set())
s.add(j)
s = g.setdefault(j, set())
s.add(i)
return g
| {
"repo_name": "simion1232006/pyroms",
"path": "pyroms/delaunay/triangulate.py",
"copies": "1",
"size": "6466",
"license": "bsd-3-clause",
"hash": -1408435110932744000,
"line_mean": 36.8128654971,
"line_max": 80,
"alpha_frac": 0.6050108259,
"autogenerated": false,
"ratio": 3.5527472527472526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9648223596890538,
"avg_score": 0.0019068963513428538,
"num_lines": 171
} |
##2|3
##_|_ ##1|4
##
###use cartesian plane for dictionary
###
from myro import *
import draw
grid_position = {1: [-0.5, -0.5], 2: [-0.5, 0.5], 3: [0.5, 0.5], 4: [0.5, -0.5]}
#use c=1, d=-1 or vise versa
# 45 degree turn = 0.438
# 90 degree turn = 0.835
# 180 degree turn = 1.6478
# 360 degree turn = 3.2835
def threesixty(c, d, t):
while timeRemaining(t):
motors(c, d)
stop()
def vertical_movement(ud):#move up or down by a cell
time_box_fwd = 1.57
time_box_back = 1.6
motor_speed_lateral = 0.5
motor_speed_turn = 0.3
if(ud=="up"):
forward(motor_speed_lateral, time_box_fwd)
if(ud=="down"):
backward(motor_speed_lateral, time_box_back)
# def getPos(curPos): #returns current position of the robot
# return grid_position[curPos]
def horizontal_movement(lr): #moves left or right by 1 cell
time_90_right = 2.95 #time for motors to turn right
time_90_left = 3.04 #time for motors to turnn left
time_box_fwd = 1.57
time_box_back = 1.6
motor_speed_turn = 0.3
motor_speed_lateral = 0.5
if(lr=="right"):
threesixty(motor_speed_turn, -1*motor_speed_turn, time_90_left)#right turn
wait(.2)
forward(motor_speed_lateral, time_box_fwd)
wait(.2)
threesixty(-1*motor_speed_turn, motor_speed_turn, time_90_right)#left turn
wait(.2)
if(lr=="left"):
threesixty(-1*motor_speed_turn, motor_speed_turn, time_90_right)#left turn
wait(.2)
forward(motor_speed_lateral, time_box_fwd)
wait(.2)
threesixty(motor_speed_turn, -1*motor_speed_turn, time_90_left)#right turn
wait(.2)
def safeRemove(a,x):
if x in a:
a.remove(x)
return a
def play(curPos,destination, element): #move to right corner and play, destination is in cart form
print "curPos",curPos
cartPos = grid_position[curPos]#of robot
valid = [1,2,3,4]
if destination[0] == 1:
valid = safeRemove(valid,1)
valid = safeRemove(valid,2)
elif destination[0] == -1:
valid = safeRemove(valid,3)
valid = safeRemove(valid,4)
if destination[1] == 1:
valid = safeRemove(valid,1)
valid = safeRemove(valid,4)
elif destination[1] == -1:
valid = safeRemove(valid,2)
valid = safeRemove(valid,3)
if curPos in valid:
valid = [curPos]
print "cartPos: ",cartPos,"valid",valid,"curpos",curPos,"destination",destination
navCorner(curPos,valid[0])#both in arr form
curPos = valid[0] #valid
if (not curPos):
print "NOT CUR POS!cartPos: ",cartPos,"valid",valid,"curpos",curPos,"destination",destination
cartPos = grid_position[curPos]
print "cartPos: ",cartPos, "destination:",destination,"curpos",curPos
ud = "up"
lr = "left"
if destination[0] < cartPos[0]:
lr = "left"
else:
lr = "right"
if destination[1] < cartPos[1]:
ud = "down"
else:
ud = "up"
if element == "O":
print "O",ud,lr
draw.draw_O_at(ud,lr)
else:
print "l",ud,lr
draw.draw_l_at(ud,lr)
return curPos
def navCorner(position, destination):#moves from position to destination
#straight lines between positions
print "move from ",position," to ",destination
if (position == 1 and destination == 2):
print "move up"
vertical_movement("up")
if (position == 4 and destination == 3):
print "move up"
vertical_movement("up")
if (position == 2 and destination == 1):
print "move down"
vertical_movement("down")
if (position == 3 and destination == 4):
print "move down"
vertical_movement("down")
if (position == 1 and destination == 4):
print "move right"
horizontal_movement("right")
if (position == 2 and destination == 3):
print "move right"
horizontal_movement("right")
if (position == 4 and destination == 1):
print "move left"
horizontal_movement("left")
if (position == 3 and destination == 2):
print "move left"
horizontal_movement("left")
#diagonals between positions
if (position == 1 and destination == 3):
print "move up"
vertical_movement("up")
print "move right"
horizontal_movement("right")
if (position == 2 and destination == 4):
print "move right"
horizontal_movement("right")
print "move down"
vertical_movement("down")
if (position == 3 and destination == 1):
print "move down"
vertical_movement("down")
print "move left"
horizontal_movement("left")
if (position == 4 and destination == 2):
print "move left"
horizontal_movement("left")
print "move up"
vertical_movement("up")
| {
"repo_name": "pbardea/scribbler",
"path": "finalPrj/movement.py",
"copies": "1",
"size": "4711",
"license": "mit",
"hash": 6043197156356143000,
"line_mean": 28.8164556962,
"line_max": 98,
"alpha_frac": 0.6094247506,
"autogenerated": false,
"ratio": 3.262465373961219,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4371890124561219,
"avg_score": null,
"num_lines": null
} |
# 24.05.2007, c
# last revision: 25.02.2008
from sfepy import data_dir
from sfepy.fem.periodic import *
filename_mesh = data_dir + '/meshes/2d/special/channels_symm944t.mesh'
if filename_mesh.find( 'symm' ):
region_1 = {
'name' : 'Y1',
'select' : """elements of group 3""",
}
region_2 = {
'name' : 'Y2',
'select' : """elements of group 4 +e elements of group 6
+e elements of group 8""",
}
region_4 = {
'name' : 'Y1Y2',
'select' : """r.Y1 +e r.Y2""",
}
region_5 = {
'name' : 'Walls',
'select' : """r.EBCGamma1 +n r.EBCGamma2""",
}
region_310 = {
'name' : 'EBCGamma1',
'select' : """(elements of group 1 *n elements of group 3)
+n
(elements of group 2 *n elements of group 3)
""",
}
region_320 = {
'name' : 'EBCGamma2',
'select' : """(elements of group 5 *n elements of group 4)
+n
(elements of group 1 *n elements of group 4)
+n
(elements of group 7 *n elements of group 6)
+n
(elements of group 2 *n elements of group 6)
+n
(elements of group 9 *n elements of group 8)
+n
(elements of group 2 *n elements of group 8)
""",
}
w2 = 0.499
# Sides.
region_20 = {
'name' : 'Left',
'select' : 'nodes in (x < %.3f)' % -w2,
}
region_21 = {
'name' : 'Right',
'select' : 'nodes in (x > %.3f)' % w2,
}
region_22 = {
'name' : 'Bottom',
'select' : 'nodes in (y < %.3f)' % -w2,
}
region_23 = {
'name' : 'Top',
'select' : 'nodes in (y > %.3f)' % w2,
}
field_1 = {
'name' : '2_velocity',
'dtype' : 'real',
'shape' : (2,),
'region' : 'Y1Y2',
'approx_order' : 2,
}
field_2 = {
'name' : 'pressure',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Y1Y2',
'approx_order' : 1,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '2_velocity',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '2_velocity',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d2',
}
equations = {
'balance' :
"""dw_div_grad.i1.Y1Y2( fluid.viscosity, v, u )
- dw_stokes.i1.Y1Y2( v, p ) = 0""",
'incompressibility' :
"""dw_stokes.i1.Y1Y2( u, q ) = 0""",
}
material_1 = {
'name' : 'fluid',
'values' : {
'viscosity' : 1.0,
'density' : 1e0,
},
}
ebc_1 = {
'name' : 'walls',
'region' : 'Walls',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'top_velocity',
'region' : 'Top',
'dofs' : {'u.1' : -1.0, 'u.0' : 0.0},
}
ebc_10 = {
'name' : 'bottom_pressure',
'region' : 'Bottom',
'dofs' : {'p.0' : 0.0},
}
epbc_1 = {
'name' : 'u_rl',
'region' : ['Left', 'Right'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_y_line',
}
functions = {
'match_y_line' : (match_y_line,),
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 100,
'cache_override' : True,
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 2,
'eps_a' : 1e-8,
'eps_r' : 1e-2,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
save_format = 'hdf5' # 'hdf5' or 'vtk'
| {
"repo_name": "olivierverdier/sfepy",
"path": "examples/navier_stokes/stokes.py",
"copies": "1",
"size": "4109",
"license": "bsd-3-clause",
"hash": -5551021767705433000,
"line_mean": 20.6263157895,
"line_max": 71,
"alpha_frac": 0.4366025797,
"autogenerated": false,
"ratio": 2.6821148825065273,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3618717462206527,
"avg_score": null,
"num_lines": null
} |
# 24.09.2007, c
import sympy as s
##
# 25.09.2007, c
def create_scalar( name, n_ep ):
vec = s.matrices.zeronm( n_ep, 1 )
for ip in range( n_ep ):
vec[ip,0] = '%s%d' % (name, ip)
return vec
##
# 24.09.2007, c
def create_vector( name, n_ep, dim ):
"""ordering is DOF-by-DOF"""
vec = s.matrices.zeronm( dim * n_ep, 1 )
for ii in range( dim ):
for ip in range( n_ep ):
vec[n_ep*ii+ip,0] = '%s%d%d' % (name, ii, ip)
return vec
##
# 24.09.2007, c
def create_scalar_base( name, n_ep ):
phi = s.matrices.zeronm( 1, n_ep )
for ip in range( n_ep ):
phi[0,ip] = '%s%d' % (name, ip)
return phi
##
# 24.09.2007, c
# 25.09.2007
def create_vector_base( name, phic, dim ):
n_ep = phic.shape[1]
phi = s.matrices.zeronm( dim, dim * n_ep )
indx = []
for ii in range( dim ):
phi[ii,n_ep*ii:n_ep*(ii+1)] = phic
indx.append( ii )
return phi, indx
##
# 24.09.2007, c
def create_scalar_base_grad( name, phic, dim ):
n_ep = phic.shape[1]
gc = s.matrices.zeronm( dim, n_ep )
for ii in range( dim ):
for ip in range( n_ep ):
gc[ii,ip] = '%s%d%d' % (name, ii, ip)
return gc
##
# 24.09.2007, c
# 25.09.2007
def create_vector_base_grad( name, gc, transpose = False ):
dim, n_ep = gc.shape
g = s.matrices.zeronm( dim * dim, dim * n_ep )
indx = []
if transpose:
for ir in range( dim ):
for ic in range( dim ):
g[dim*ir+ic,n_ep*ic:n_ep*(ic+1)] = gc[ir,:]
indx.append( (ic, ir) )
else:
for ir in range( dim ):
for ic in range( dim ):
g[dim*ir+ic,n_ep*ir:n_ep*(ir+1)] = gc[ic,:]
indx.append( (ir, ic) )
return g, indx
##
# 25.09.2007, c
def create_u_operator( u, transpose = False ):
dim = u.shape[0]
op_u = s.matrices.zeronm( dim * dim, dim )
if transpose:
for ir in range( dim ):
for ic in range( dim ):
op_u[dim*ir+ic,ic] = u[ir]
else:
for ii in range( dim ):
op_u[dim*ii:dim*(ii+1),ii] = u
return op_u
##
# 24.09.2007, c
# 25.09.2007
def grad_vector_to_matrix( name, gv ):
dim2 = gv.shape[0]
dim = int( s.sqrt( dim2 ) )
gm = s.matrices.zeronm( dim, dim )
for ir in range( dim ):
for ic in range( dim ):
gm[ir,ic] = gv[dim*ir+ic,0]
return gm
##
# 24.09.2007, c
# 25.09.2007
def substitute_continuous( expr, names, u, phi ):
pu = phi * u
for ii in range( phi.lines ):
expr = expr.subs( pu[ii,0], names[ii] )
return expr
##
# 25.09.2007, c
def create_vector_var_data( name, phi, vindx, g, gt, vgindx, u ):
gu = g * u
gum = grad_vector_to_matrix( 'gum', gu )
print 'g %s:\n' % name, gum
gut = gt * u
gutm = grad_vector_to_matrix( 'gutm', gut )
print 'gt %s:\n' % name, gutm
pu = phi * u
names = ['c%s%d' % (name, indx) for indx in vindx]
cu = substitute_continuous( pu, names, u, phi )
print 'continuous %s:\n' % name, cu
gnames = ['cg%s%d_%d' % (name, indx[0], indx[1]) for indx in vgindx]
cgu = substitute_continuous( gu, gnames, u, g )
cgum = grad_vector_to_matrix( 'gum', cgu )
print 'continuous g %s:\n' % name, cgum
cgut = substitute_continuous( gut, gnames, u, g )
cgutm = grad_vector_to_matrix( 'gutm', cgut )
print 'continuous gt %s:\n' % name, cgutm
op_u = create_u_operator( cu )
print op_u
op_ut = create_u_operator( cu, transpose = True )
print op_ut
out = {
'g' : gu,
'g_m' : gum,
'q' : pu,
'c' : cu,
'cg' : cgu,
'cg_m' : cgum,
'cgt' : cgut,
'cgt_m' : cgutm,
'op' : op_u,
'opt' : op_ut,
'names' : names,
'gnames' : gnames,
}
return out
##
# 25.09.2007, c
def create_scalar_var_data( name, phi, g, u ):
gu = g * u
pu = phi * u
names = ['c%s' % name]
cu = substitute_continuous( pu, names, u, phi )
print 'continuous %s:\n' % name, cu
gnames = ['cg%s_%d' % (name, ii) for ii in range( g.shape[0] )]
cgu = substitute_continuous( gu, gnames, u, g )
print 'continuous g %s:\n' % name, cgu
op_gu = create_u_operator( cgu )
print op_gu
out = {
'g' : gu,
'q' : pu,
'c' : cu,
'cg' : cgu,
'gop' : op_gu,
'names' : names,
'gnames' : gnames,
}
return out
##
# 25.09.2007, c
def main():
n_ep = 3
dim = 2
u = create_vector( 'u', n_ep, dim )
v = create_vector( 'v', n_ep, dim )
b = create_vector( 'b', n_ep, dim )
p = create_scalar( 'p', n_ep )
q = create_scalar( 'q', n_ep )
r = create_scalar( 'r', n_ep )
## print u
## print v
phic = create_scalar_base( 'phic', n_ep )
phi, vindx = create_vector_base( 'phi', phic, dim )
gc = create_scalar_base_grad( 'gc', phic, dim )
g, vgindx = create_vector_base_grad( 'g', gc )
gt, aux = create_vector_base_grad( 'gt', gc, transpose = True )
## print phi
## print phic
## print gc
print g
print gt
ud = create_vector_var_data( 'u', phi, vindx, g, gt, vgindx, u )
vd = create_vector_var_data( 'v', phi, vindx, g, gt, vgindx, v )
bd = create_vector_var_data( 'b', phi, vindx, g, gt, vgindx, b )
pd = create_scalar_var_data( 'p', phic, gc, p )
qd = create_scalar_var_data( 'q', phic, gc, q )
rd = create_scalar_var_data( 'r', phic, gc, r )
print ud.keys()
assert bool( bd['op'].T * g == bd['opt'].T * gt )
assert bool( bd['opt'].T * g == bd['op'].T * gt )
assert bool( bd['cgt_m'] == bd['cg_m'].T )
print '((b * grad) u), v)'
form1 = vd['c'].T * bd['op'].T * ud['cg']
form2 = vd['c'].T * bd['opt'].T * ud['cgt']
print form1
print form2
print bool( form1 == form2 )
print '((v * grad) u), b)'
form1 = vd['c'].T * bd['op'].T * ud['cgt']
form2 = vd['c'].T * bd['opt'].T * ud['cg']
print form1
print form2
print bool( form1 == form2 )
print '((u * grad) v), b)'
form1 = vd['cgt'].T * bd['op'] * ud['c']
form2 = vd['cg'].T * bd['opt'] * ud['c']
print form1
print form2
print bool( form1 == form2 )
print '((b * grad) v), u)'
form1 = vd['cg'].T * bd['op'] * ud['c']
form2 = vd['cgt'].T * bd['opt'] * ud['c']
print form1
print form2
print bool( form1 == form2 )
print '((v * grad) b), u)'
form1 = vd['c'].T * bd['cgt_m'] * ud['c']
form2 = vd['c'].T * bd['cg_m'].T * ud['c']
print form1
print form2
print bool( form1 == form2 )
print '((b * grad) u), (b * grad) v)'
form1 = vd['cg'].T * bd['op'] * bd['op'].T * ud['cg']
print form1
print '((u * grad) b), (b * grad) v)'
form1 = vd['cg'].T * bd['op'] * bd['cg_m'] * ud['c']
print form1
print '(grad p, (b * grad) v)'
form1 = vd['cg'].T * bd['op'] * pd['cg']
print form1
print '(grad q, (b * grad) u)'
form1 = qd['cg'].T * bd['op'].T * ud['cg']
print form1
print '(grad q, (u * grad) b)'
form1 = qd['cg'].T * bd['cg_m'] * ud['c']
print form1
print '(grad r, (u * grad) v)'
form1 = vd['cgt'].T * rd['gop'] * ud['c']
print form1
return ud, vd, bd, pd, qd, rd
if __name__ == '__main__':
ud, vd, bd, pd, qd, rd = main()
| {
"repo_name": "olivierverdier/sfepy",
"path": "script/evalForms.py",
"copies": "2",
"size": "7350",
"license": "bsd-3-clause",
"hash": 4083073378172863500,
"line_mean": 24.6993006993,
"line_max": 72,
"alpha_frac": 0.4968707483,
"autogenerated": false,
"ratio": 2.6231263383297643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9001150542892176,
"avg_score": 0.023769308747517707,
"num_lines": 286
} |
# 240. Search a 2D Matrix II
#
# Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
#
# Integers in each row are sorted in ascending from left to right.
# Integers in each column are sorted in ascending from top to bottom.
# For example,
#
# Consider the following matrix:
#
# [
# [1, 4, 7, 11, 15],
# [2, 5, 8, 12, 19],
# [3, 6, 9, 16, 22],
# [10, 13, 14, 17, 24],
# [18, 21, 23, 26, 30]
# ]
# Given target = 5, return true.
#
# Given target = 20, return false.
# http://www.tangjikai.com/algorithms/leetcode-240-search-a-2d-matrix-ii
# http://www.cnblogs.com/yrbbest/p/5005947.html
#
# For each number, its below number is larger and left is smaller.
# The top-left number is the smallest and the bottom-right is the largest in the matrix,
# so the top-right number is the middle number, which the starting point to search.
# If matrix[i][j] < target: i ++; if matrix[i][j] > target: j--
#
# Complexity:
# O(m + n) time
# O(1) space
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if len(matrix) == 0:
return False
m = len(matrix) # row
n = len(matrix[0]) # col
i, j = 0, n-1 # bin search starting point:
# upper right corner: (i, j) = (0, n-1)
while i < m and j >= 0:
if matrix[i][j] == target:
return True
elif matrix[i][j] < target:
i += 1
else: # matrix[i][j] > target
j -= 1
return False
print(Solution().searchMatrix([[1,4,7,11,15],[2,5,8,12,19],[3,6,9,16,22],[10,13,14,17,24],[18,21,23,26,30]], 5))
print(Solution().searchMatrix([[5]], 5))
| {
"repo_name": "gengwg/leetcode",
"path": "240_search_2d_matrix_ii.py",
"copies": "1",
"size": "1854",
"license": "apache-2.0",
"hash": 4798852232135010000,
"line_mean": 28.9032258065,
"line_max": 118,
"alpha_frac": 0.5685005394,
"autogenerated": false,
"ratio": 3.074626865671642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9119635596803816,
"avg_score": 0.0046983616535651905,
"num_lines": 62
} |
""" 24.10.2016, fd control added to VCO. VCO does not output aything if input is zero.
Char to binary copied to be template for Binary Data Source copied """
from pyqtgraph.flowchart import Node
from pyqtgraph.Qt import QtGui, QtCore
from pyqtgraph.flowchart.library.common import CtrlNode
import pyqtgraph.metaarray as metaarray
import scipy.signal
import global_data
import numpy as np
from oscilloscope import Oscilloscope
from pyqtgraph.dockarea import *
class BinDataSourceNode(CtrlNode):
"""Node that outputs binary selcted data sequence over whole output array"""
""" This node, CharToBin and SigGen give error message when inserted and run Scipy with IDLE: Warning (from warnings module):
File "C:\Python27\lib\site-packages\pyqtgraph\metaarray\MetaArray.py", line 346
c = getattr(a, op)(b)
FutureWarning: comparison to `None` will result in an elementwise object comparison in the future."""
nodeName = 'BinDataSource'
uiTemplate = [
('Data Type','combo', {'values': ['000...', '111...', '1010...'], 'index': 0}),
]
def __init__(self, name):
terminals = {
'Out': dict(io='out'),
}
CtrlNode.__init__(self, name, terminals=terminals)
self.data_size = global_data.BUFFER_SIZE #10000
self.batch_size = self.data_size #/ 10 # Update the whole signal buffer at once
self.block_count = self.data_size / self.batch_size # =1
self.data = [0] * self.data_size # Signal data buffer
# Create and init update timer
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.updateSignal)
self.timer.start(global_data.UPDATE_INTERVAL)
# Pointer points to the value that will be updated next
#self.pointer = 0
def updateSignal(self): # this function is called in regular periods of UPDATE_INTERVAL
data_type = self.ctrls['Data Type'].currentText()
# Update the signal buffer array depending on the data type
for i in range(self.data_size/self.block_count): # i = 0...9999
#index = self.pointer * self.data_size/self.block_count + i # index = 0*10000/1+0, 1*10000/1+1, 2*10000+2 ???
#index = i
if data_type == '000...':
self.data[i] = 0
elif data_type == '111...':
self.data[i] = 1
elif data_type == '1010...':
self.data[i] = (i % 2 == 0)# Place 0 binary 1 and 10000 binary 0. Why not 0...9999?
# move pointer
#self.pointer += 1
# Hop to start if we have reached the end
#if self.pointer >= self.block_count: # i >= 1 ???
#if self.pointer >= self.data_size: #This does not work
# self.pointer = 0
#Force the update of output values
self.update(signal=True)
def process(self, display=True):
#Create meta array from updated data
out_data = metaarray.MetaArray(self.data, info=[{'name': 'Time', 'values': np.linspace(0, len(self.data), len(self.data))}, {}])
#Set outputs
return {'Out': out_data}
class VCONode(CtrlNode):
"""Node Voltage Controlled Oscillator VCO that generates carrier waveform,
frequency of which is controlled by input voltage.
ToDo: If there is nothing connected to input, should generate nominal frequency carrier (presently nothing)"""
nodeName = 'VCO'
uiTemplate = [
('fd kHz/V', 'spin', {'values': 100, 'step': 1, 'range': [0, 200]})
]
def __init__(self, name):
terminals = {
'In': dict(io='in', optional=False), #optional=True: operates although this terminal is not connected, however no output?.
'Out': dict(io='out'),
}
CtrlNode.__init__(self, name, terminals=terminals)
def process(self, In, display=True):
indata = In.asarray()
fd = self.ctrls['fd kHz/V'].value()
output = []
summ = 0.0
for i in range (0,global_data.BUFFER_SIZE):
summ += indata[i]
output.append( np.cos((2*1000*np.pi*i + 2*fd*np.pi*summ) / global_data.BUFFER_SIZE) )
# Generate meta array for output
out_data = metaarray.MetaArray(output, info=[{'name': 'Time', 'values': np.linspace(0, len(output), len(output))}, {}])
return {'Out': out_data}
class RectifierNode(CtrlNode):
"""Rectifier Node"""
nodeName = 'Rectifier'
uiTemplate = [
('wave mode', 'combo', {'values': ['half', 'full'], 'index': 0})
]
def __init__(self, name):
terminals = {
'In': dict(io='in'),
'Out': dict(io='out'),
}
CtrlNode.__init__(self, name, terminals=terminals)
def process(self, In, display=True):
# Read GUI parameter values
mode = self.ctrls['wave mode'].currentText()
data = In.asarray()
if mode == "half":
output = np.clip(data, 0, 100000)
else:
output = np.absolute(data)
# Generate meta array for output
out_data = metaarray.MetaArray(output, info=[{'name': 'Time', 'values': np.linspace(0, len(output), len(output))}, {}])
return {'Out': out_data}
class DCBlockNode(CtrlNode):
"""Node that calculates and subtracts the average of the data"""
nodeName = 'DCBlock'
def __init__(self, name):
terminals = {
'In': dict(io='in'),
'Out': dict(io='out'),
}
CtrlNode.__init__(self, name, terminals=terminals)
def process(self, In, display=True):
data = In.asarray()
mean = np.mean(data)
output = np.subtract(data, mean)
# Generate meta array for output
out_data = metaarray.MetaArray(output, info=[{'name': 'Time', 'values': np.linspace(0, len(output), len(output))}, {}])
return {'Out': out_data}
class OscilloscopeNode(Node):
"""Node that creates a new Oscilloscope widget and outputs data to it"""
""" Bugs: Display remains when input is disconnected"""
nodeName = 'Oscilloscope'
def __init__(self, name):
Node.__init__(self, name, terminals={'In1': {'io':'in'}, 'In2': {'io':'in'}})
#Create an oscilloscope
self.osc_widget = Oscilloscope()
# Create and and a new dock on top of tools and add oscilloscope to it
self.dock = Dock(name)
global_data.area.addDock(self.dock, 'above', global_data.tool_dock)
self.dock.addWidget(self.osc_widget)
# Create plots for both inputs
self.plot_item_1 = self.osc_widget.pwidget.plot()
self.plot_item_2 = self.osc_widget.pwidget2.plot()
# Update oscilloscope data signals based on input
def process(self, In1, In2, display=True):
if In1 is not None:
self.plot_item_1.setData(In1)
if In2 is not None:
self.plot_item_2.setData(In2)
class CharToBinaryNode(Node):
# adi edit: description of node changed:
"""Node that outputs the parameter character as 7-bit ASCII + eighth zero bit characters"""
nodeName = 'CharToBinary'
init_character = 'A'
def __init__(self, name):
## Initialize node with only a single output terminal
Node.__init__(self, name, terminals={'Out': {'io':'out'}})#,'In': {'io': 'in'}
self.char = self.init_character
self.update(signal=True)
def set_character(self, char):
# print("Changed character to " + char)
self.char = char
self.update(signal=True)
#def process(self, In, display=True):
def process(self, display=True):
#if text is empty set bits to zero
if self.char == "":
result = [0, 0, 0, 0, 0, 0, 0, 0]
data = metaarray.MetaArray(result, info=[{'name': 'Time', 'values': np.linspace(0, 7, len(result))}, {}])
else:
#Convert character to binary
bin_value = bin(ord(str(self.char)))
bin_list = list(bin_value[2:]) # remove '0b' from start and convert to array
bin_array = [int(i) for i in bin_list] #Convert string list to int list
if len (bin_array) == 6: #adi edit to account for characters with 6 bit binary code
bin_array.insert(0, 0)#adi edit
bin_array.insert(7, 0) #adi edit this eighth bit is actually kept for parity
wholeoutput = [] # adi edit ins START
for j in range(0, (global_data.BUFFER_SIZE-global_data.BUFFER_SIZE%8)/8): #(global_data.BUFFER_SIZE-global_data.BUFFER_SIZE%8)/8:
for k in range(0,8):
wholeoutput.append(bin_array[k])
data = metaarray.MetaArray(wholeoutput, info=[{'name': 'Time', 'values': np.linspace(0, len(wholeoutput), len(wholeoutput))}, {}])
# adi edit ins END
return {'Out': data}
def ctrlWidget(self): # this method is optional
text_field = QtGui.QLineEdit(self.init_character)
text_field.setMaxLength(1)
text_field.textEdited.connect(self.set_character)
return text_field
class ParityNode(CtrlNode):
"""Node that appends a parity bit to binary sequence given as input"""
nodeName = 'Parity'
uiTemplate = [
('Parity', 'combo', {'values': ['even', 'odd'], 'index': 0})
]
def __init__(self, name):
terminals = {
'In': dict(io='in'),
'Out': dict(io='out'),
}
CtrlNode.__init__(self, name, terminals=terminals)
def process(self, In, display=True):
# Read GUI parameter values
parity = self.ctrls['Parity'].currentText()
#data = In.view(np.ndarray)
data = In.asarray()
#adi edit - comments:
#Desired changes in the 'Parity Bit Node':
#Earlier version assumes that the input is 7-bit or 6-bit
#(basically less than 8-bit) and appends an eighth bit
#New version has "8-bit repeat sequence" input of which eighth bit (zero) has no meaning.
#first seven bits of the sequence have to be retained, parity bit added, sequence repeated
output = []
# copy received data to output array
for i in range(0,7):
output.append(data.item(i)) #copying seven bits from charToBinary node
# append parity bit
if parity == "odd":
#if sum(data) % 2 == 0: earlier version
if sum(output) % 2 == 0:
output.append(1)
else:
output.append(0)
else:
if sum(output) % 2 == 0:
output.append(0)
else:
output.append(1)
# Generate meta array for output
wholeoutput = []
for j in range(0,(global_data.BUFFER_SIZE-global_data.BUFFER_SIZE%8)/8):
for k in range(0,8):
wholeoutput.append(output[k])
out_data = metaarray.MetaArray(wholeoutput, info=[{'name': 'Time', 'values': np.linspace(0, len(wholeoutput), len(wholeoutput))}, {}])
return {'Out': out_data}
class CheckParityNode(CtrlNode):
"""Node that checks the integrity of data using the last bit as parity bit"""
nodeName = 'CheckParity'
uiTemplate = [
('Parity', 'combo', {'values': ['even', 'odd'], 'index': 0})
]
def __init__(self, name):
terminals = {
'In': dict(io='in'),
'Out': dict(io='out'),
}
CtrlNode.__init__(self, name, terminals=terminals)
# Create a display widget
self.widget = QtGui.QWidget()
layout = QtGui.QGridLayout()
self.widget.setLayout(layout)
self.status_widget = QtGui.QLabel("Status")
layout.addWidget(self.status_widget)
# Create and and a new dock on top of tools and add the display widget to it
self.dock = Dock(name)
global_data.area.addDock(self.dock, 'above', global_data.tool_dock)
self.dock.addWidget(self.widget)
def process(self, In, display=True):
# Read GUI parameter values
parity = self.ctrls['Parity'].currentText()
data = In.asarray()
if parity == "even":
if sum(data) % 2 == 0:
self.status_widget.setText("Message ok")
else:
self.status_widget.setText("Message incorrect")
else:
if sum(data) % 2 == 1:
self.status_widget.setText("Message ok")
else:
self.status_widget.setText("Message incorrect")
return {'Out': In}
class FFTNode(CtrlNode):
"""Node that performs FFT for 1 dimensional data array"""
nodeName = 'FFT'
def __init__(self, name):
terminals = {
'In': dict(io='in'),
'Out': dict(io='out'),
}
CtrlNode.__init__(self, name, terminals=terminals)
def process(self, In, display=True):
# Calculate FFT from input and calculate absolute values from complex values
output = np.absolute(np.fft.fft(In.asarray()))
# Generate MetaArray
out_data = metaarray.MetaArray(output, info=[{'name': 'Time', 'values': np.linspace(0, len(output), len(output))}, {}])
return {'Out': out_data}
class SigGenNode(CtrlNode):
"""Node that generates signals of different types"""
nodeName = 'SigGen'
uiTemplate = [
('waveform source', 'combo', {'values': ['None', 'ROT 0', 'ROT 1'], 'index': 0}),
('waveform', 'combo', {'values': ['sine', 'cosine', 'rectangular', 'sawtooth', 'noise', 'DC', 'Carrier', 'Carrier 90deg'], 'index': 0}),
('amplitude source', 'combo', {'values': ['None', 'POT 0', 'POT 1', 'POT 2', 'POT 3'], 'index': 0}),
('amplitude V', 'spin', {'value': 1.0, 'step': 0.01, 'range': [0.01, 10.0]}),
('frequency source', 'combo', {'values': ['None', 'POT 0', 'POT 1', 'POT 2', 'POT 3'], 'index': 0}),
('frequency kHz', 'spin', {'value': 10.0, 'step': 0.01, 'range': [0.01, 10.0]}),
('sawtooth shape', 'spin', {'value': 1.0, 'step': 0.01, 'range': [0.01, 1.0]})
]
def __init__(self, name):
terminals = {
'Out': dict(io='out')
}
CtrlNode.__init__(self, name, terminals=terminals)
self.data_size = global_data.BUFFER_SIZE
self.batch_size = self.data_size #/ 10 # Update the whole signal buffer at once
self.block_count = self.data_size / self.batch_size
self.data = [0] * self.data_size # Signal data buffer
# Create and init update timer
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.updateSignal)
self.timer.start(global_data.UPDATE_INTERVAL)
# Pointer points to the value that will be updated next
self.pointer = 0
def updateSignal(self):
# Get parameter values from GUI
waveform_source = self.ctrls['waveform source'].currentText()
amplitude_source = self.ctrls['amplitude source'].currentText()
frequency_source = self.ctrls['frequency source'].currentText()
sawtooth_shape = self.ctrls['sawtooth shape'].value()
# Read amplitude
if amplitude_source == "None":
amplitude = self.ctrls['amplitude V'].value()
else:
knob_index = int(amplitude_source[len(amplitude_source)-1]) # Read the last character
amplitude = global_data.potentiometer_values[knob_index] / 25.0 # Scale from [0,100] to [0,4]
self.ctrls['amplitude V'].setValue(amplitude)
# Read frequency in kHz
if frequency_source == "None":
frequency = self.ctrls['frequency kHz'].value()
else:
knob_index = int(frequency_source[len(frequency_source)-1]) # Read the last character
frequency = global_data.potentiometer_values[knob_index] / 25.0 # Scale from [0,100] to [0,4]
self.ctrls['frequency kHz'].setValue(frequency)
# Change selected waveform if waveform source is enabled
if waveform_source != "None":
rot_index = int(waveform_source[len(waveform_source)-1])
rot_value = global_data.rotatory_values[rot_index]
self.ctrls['waveform'].setCurrentIndex(rot_value)
waveform = self.ctrls['waveform'].currentText()
#Rename the node to correspond the wave type
#self.rename(waveform + " generator")
# Update the signal buffer array depending on the wave type
for i in range(self.data_size/self.block_count):
index = self.pointer * self.data_size/self.block_count + i
if waveform == 'sine':
self.data[index] = amplitude * np.sin(index*2*np.pi*frequency/self.data_size)
elif waveform == 'cosine':
self.data[index] = amplitude * np.cos(index*2*np.pi*frequency/self.data_size)
elif waveform == 'noise':
self.data[index] = np.random.normal(scale=amplitude)
elif waveform == 'rectangular':
self.data[index] = amplitude * scipy.signal.square(2*np.pi*frequency*index/self.data_size)
elif waveform == 'sawtooth':
self.data[index] = amplitude * scipy.signal.sawtooth(2*np.pi*frequency*index/self.data_size,width=sawtooth_shape)
elif waveform == 'DC':
self.data[index] = amplitude * np.cos(0)
elif waveform == 'Carrier':
self.data[index] = np.cos(2*1000*np.pi*index/self.data_size)
elif waveform == 'Carrier 90deg':
self.data[index] = np.cos(2*1000*np.pi*index/self.data_size+np.pi/2)
# move pointer
self.pointer += 1
# Hop to start if we have reached the end
if self.pointer >= self.block_count:
self.pointer = 0
#Force the update of output values
self.update(signal=True)
def process(self, display=True):
#Create meta array from updated data
out_data = metaarray.MetaArray(self.data, info=[{'name': 'Time', 'values': np.linspace(0, len(self.data), len(self.data))}, {}])
#Set outputs
return {'Out': out_data}
class LineEncoderNode(CtrlNode):
"""Node that repeats symbol value many times over symbol duration or decodes it using specific coding"""
#When the whole signal buffer represents 1 ms period, rate defines sumbol rate in kBauds.
nodeName = 'LineEncoder'
uiTemplate = [
('code', 'combo', {'values': ['NRZ', 'Bipolar NRZ'], 'index': 0}),
('rate, kBaud', 'spin', {'value': 10, 'step': 1, 'range': [2, 100]}),
('decode mode', 'check', {'checked': False})
]
def __init__(self, name):
terminals = {
'In': dict(io='in'),
'Out': dict(io='out'),
}
CtrlNode.__init__(self, name, terminals=terminals)
def process(self, In, display=True):
#Read parameter values
code = self.ctrls['code'].currentText()
symbolduration = global_data.BUFFER_SIZE / int(self.ctrls['rate, kBaud'].value())
decode = self.ctrls['decode mode'].isChecked()
indata = In.asarray()
data = []
#How many symbols fit into the array or buffer and take as many from indata to data
for i in range (0,(global_data.BUFFER_SIZE-global_data.BUFFER_SIZE%symbolduration)/symbolduration):
data.append(indata[i])
output = []
# duplicate each bit symbolduration times
if not decode:
if code == 'NRZ':
for i in range(0, len(data)):
for j in range(0,symbolduration):
output.append(data[i])
elif code == 'Bipolar NRZ':
for i in range(0, len(data)):
for j in range(0,symbolduration):
if data.item(i) == 1:
signal_value = 0.5
else:
signal_value = -0.5
output.append(signal_value)
# Decode with symbolduration, take mid value (if symbolduration=10, read values, 5+15+25etc.)
else:
if code == 'NRZ':
for i in range(0, len(data)/symbolduration):
index = i * symbolduration + symbolduration / 2
output.append(data.item(index))
elif code == 'Bipolar NRZ':
for i in range(0, len(data)/symbolduration):
index = i * symbolduration + symbolduration / 2
value = data.item(index)
if value > 0:
output.append(1)
else:
output.append(0)
#Generate MetaArray
out_data = metaarray.MetaArray(output, info=[{'name': 'Time', 'values': np.linspace(0, len(output), len(output))}, {}])
return {'Out': out_data}
class AmplifierNode(CtrlNode):
"""Node that amplifies or attenuates the signal"""
nodeName = 'Amplifier'
uiTemplate = [
('factor', 'spin', {'value': 1.0, 'step': 0.001, 'range': [0.0, 10.0]}),
('factor source', 'combo', {'values': ['None', 'Knob 0', 'Knob 1', 'Knob 2', 'Knob 3'], 'index': 0})
]
def __init__(self, name):
terminals = {
'In': dict(io='in'),
'Out': dict(io='out'),
}
CtrlNode.__init__(self, name, terminals=terminals)
def process(self, In, display=True):
#Read parameter values
factor = self.ctrls['factor'].value()
factor_source = self.ctrls['factor source'].currentText()
# Read value from controller knob
if factor_source != "None":
knob_index = int(factor_source[len(factor_source)-1]) # Read the last character
factor = global_data.potentiometer_values[knob_index] / 25.0 # Scale from [0,100] to [0,4]
self.ctrls['factor'].setValue(factor)
# Amplify / attenuate
output = factor * In.asarray()
#Generate MetaArray
out_data = metaarray.MetaArray(output, info=[{'name': 'Time', 'values': np.linspace(0, len(output), len(output))}, {}])
return {'Out': out_data}
class FilterNode(CtrlNode):
"""Node that filters the signal"""
nodeName = 'Filter'
uiTemplate = [
('type', 'combo', {'values': ['LPF', 'HPF', 'BPF'], 'index': 0}),
('fh-3dB', 'spin', {'value': 10, 'step': 0.1, 'range': [0.0, 1000.0]}),
('fl-3dB', 'spin', {'value': 10, 'step': 0.1, 'range': [0.0, 1000.0]})
]
def __init__(self, name):
terminals = {
'In': dict(io='in'),
'Out': dict(io='out'),
}
CtrlNode.__init__(self, name, terminals=terminals)
def process(self, In, display=True):
# Read parameter values
f_high_cutoff = self.ctrls['fh-3dB'].value()
f_low_cutoff = self.ctrls['fl-3dB'].value()
type = self.ctrls['type'].currentText()
# Compute complex spectrum by fft for input signal of the filter
fft_data = np.fft.fft(In.asarray())
# Generate complex filter function, initially all are set to zero
filter_func_complex = np.zeros(global_data.BUFFER_SIZE, dtype=np.complex_)
#Needed variables are defined in each loop to make formulas shorter and computing faster
if type == "LPF":
for i in range(0, global_data.BUFFER_SIZE/2-1):
i_fh = i/f_high_cutoff
filter_func_complex[i] = 1.0 / (1.0 + np.power(i_fh, 2)) + ((-i_fh)/(1.0 + np.power(i_fh, 2))) * 1j
for i in range(global_data.BUFFER_SIZE/2, global_data.BUFFER_SIZE-1):
#Note that in this section frequency index (i-global_data.BUFFER_SIZE) is negative that changes complex filter function to conjugate of positive portion above.
#This transferres frequencies close to global_data.BUFFER_SIZE to negative frequencies and the same filtering function can e used
ig_fh = (i-global_data.BUFFER_SIZE) / f_high_cutoff
filter_func_complex[i] = 1.0 / (1.0 + np.power(ig_fh, 2)) - ig_fh/(1.0 + np.power(ig_fh, 2)) * 1j
elif type == "HPF":
for i in range(0, global_data.BUFFER_SIZE/2-1):
i_fl = i/f_low_cutoff
filter_func_complex[i] = (np.power(i_fl, 2)/(1.0 + np.power(i_fl, 2))) + ((i_fl)/(1.0 + np.power(i_fl, 2))) * 1j
for i in range(global_data.BUFFER_SIZE/2, global_data.BUFFER_SIZE-1):
ig_fl = (i-global_data.BUFFER_SIZE)/f_low_cutoff
filter_func_complex[i] = (np.power(ig_fl, 2))/(1.0 + np.power(ig_fl, 2)) + (ig_fl/(1.0 + np.power(ig_fl, 2))) * 1j
elif type == "BPF":
for i in range(0, global_data.BUFFER_SIZE/2-1):
i_fl = i/f_low_cutoff
i_fh = i/f_high_cutoff
filter_func_complex[i] = ((np.power(i_fl, 2))+(i_fh)*(i_fl))/((1.0 + np.power(i_fh, 2))*(1.0 + np.power(i_fl, 2))) + ((i_fl)-(i_fh)* np.power(i_fl, 2))/((1.0 + np.power(i_fh, 2))*(1.0 + np.power(i_fl, 2))) * 1j
for i in range(global_data.BUFFER_SIZE/2, global_data.BUFFER_SIZE-1):
ig_fl = (i-global_data.BUFFER_SIZE)/f_low_cutoff
ig_fh = (i-global_data.BUFFER_SIZE) / f_high_cutoff
filter_func_complex[i] = ((np.power(ig_fl, 2))+ig_fh * ig_fl)/((1 + np.power(ig_fh, 2))*(1.0 + np.power(ig_fl, 2))) + (ig_fl - ig_fh* np.power(ig_fl, 2))/((1.0 + np.power(ig_fh, 2))*(1.0 + np.power(ig_fl, 2))) * 1j
#print "---"
# Filter data with filter function (in frequncy domain)
filtered_data = filter_func_complex * fft_data
# Compute inverse fft to reconstruct time domain signal for filter output
output = np.real(np.fft.ifft(filtered_data)) # Only real part to remove zero valued imaginary part
# Generate MetaArray
out_data = metaarray.MetaArray(output, info=[{'name': 'Time', 'values': np.linspace(0, len(output), len(output))}, {}])
return {'Out': out_data}
# TODO
class BinaryToCharNode(Node):
"""Node that converts a 7-bit ASCII character to a character and displays it"""
nodeName = 'BinaryToChar'
init_character = 'A'
def __init__(self, name):
## Initialize node with only a single output terminal
Node.__init__(self, name, terminals={'Out': {'io':'out'}})#,'In': {'io': 'in'}
self.char = self.init_character
self.update(signal=True)
def set_character(self, char):
# print("Changed character to " + char)
self.char = char
self.update(signal=True)
#def process(self, In, display=True):
def process(self, display=True):
#if text is empty set bits to zero
if self.char == "":
result = [0, 0, 0, 0, 0, 0, 0, 0]
data = metaarray.MetaArray(result, info=[{'name': 'Time', 'values': np.linspace(0, 7, len(result))}, {}])
else:
#Convert character to binary
bin_value = bin(ord(str(self.char)))
bin_list = list(bin_value[2:]) # remove '0b' from start and convert to array
bin_array = [int(i) for i in bin_list] #Convert string list to int list
#print(bin_array)
data = metaarray.MetaArray(bin_array, info=[{'name': 'Time', 'values': np.linspace(0, 7, len(bin_array))}, {}])
return {'Out': data}
def ctrlWidget(self): # this method is optional
text_field = QtGui.QLineEdit(self.init_character)
text_field.setMaxLength(1)
text_field.textEdited.connect(self.set_character)
return text_field
| {
"repo_name": "Tatsi/SciEdu",
"path": "nodes.py",
"copies": "1",
"size": "28307",
"license": "mit",
"hash": 3525282142127447000,
"line_mean": 40.6310240964,
"line_max": 233,
"alpha_frac": 0.560144134,
"autogenerated": false,
"ratio": 3.6478092783505156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47079534123505157,
"avg_score": null,
"num_lines": null
} |
# 241. Different Ways to Add Parentheses
#
# Given a string of numbers and operators, return all possible results
# from computing all the different possible ways to group numbers and operators.
# The valid operators are +, - and *.
#
# Example 1
# Input: "2-1-1".
#
# ((2-1)-1) = 0
# (2-(1-1)) = 2
# Output: [0, 2]
#
#
# Example 2
# Input: "2*3-4*5"
#
# (2*(3-(4*5))) = -34
# ((2*3)-(4*5)) = -14
# ((2*(3-4))*5) = -10
# (2*((3-4)*5)) = -10
# (((2*3)-4)*5) = 10
# Output: [-34, -14, -10, -10, 10]
#
# http://www.tangjikai.com/algorithms/leetcode-241-different-ways-to-add-parentheses
# Divide and Conque.
# For each sign, divide into two parts for before and after it.
class Solution(object):
def diffWaysToCompute(self, input):
"""
:type input: str
:rtype: List[int]
"""
ans = []
for i, c in enumerate(input):
if c in '+-*':
left = self.diffWaysToCompute(input[:i])
right = self.diffWaysToCompute(input[i+1:])
for m in left:
for n in right:
if c == '+':
ans.append(m + n)
elif c == '*':
ans.append(m * n)
else:
# elif c == '-':
ans.append(m - n)
# input is a single digit
if not ans:
ans.append(int(input))
return ans
if __name__ == '__main__':
print Solution().diffWaysToCompute("2-1-1")
print Solution().diffWaysToCompute("2")
| {
"repo_name": "gengwg/leetcode",
"path": "241_different_ways_to_add_parentheses.py",
"copies": "1",
"size": "1588",
"license": "apache-2.0",
"hash": 5000750482336240000,
"line_mean": 27.3571428571,
"line_max": 84,
"alpha_frac": 0.483627204,
"autogenerated": false,
"ratio": 3.260780287474333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4244407491474333,
"avg_score": null,
"num_lines": null
} |
""" 24.1 to 24.8 microns is Q
Hi-5:
snr.maglim([3.5,4.1], [290], diam=8, eta_c=0.4, eta_w=0.25, t_int=3600, snr=5, n_tel=4)
"""
#bp=[3.3,4.2]
#"{0:5.1f} & {1:5.1f} & {2:5.1f}".format(maglim(bp,280),maglim(bp,270),maglim(bp,220))
from __future__ import division, print_function
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as plt
#import astropy.units as ap_u
import astropy.constants as ap_c
import pdb
#Choose one of the following lines for 10 or 1mm PWV
#trans = np.concatenate((np.loadtxt('cptrans_zm_100_10.dat'),np.loadtxt('cptrans_nq_100_10.dat')))
trans = np.concatenate((np.loadtxt('mktrans_zm_10_10.dat'),np.loadtxt('mktrans_nq_10_10.dat')))
lmin = 10.0 #In microns
lmax = 11.0 #In microns
def vega_photonrate(wave_in):
"""Returns the photon rate based on vega magnitude, calibrated from
Tokunga and Vacca (2005, PASP, 117, 421), Hewett et al. 2006,
Cohen, Walker, Barlow, and Deacon, 1992, Astronomical Journal, vol. 104, 1650-1657
and a long-wavelength extrapolation:
m_AB(lambda1)-m_AB(lambda0) = 5 log_10 (lambda1/lambda0), from the
Rayleigh-Jeans tail. """
dwave = 1.0
try:
if len(wave_in)> 2:
print("Error: min and max wavelengths or central wavelength only")
elif len(wave_in)==2:
dwave = np.abs(0.5*(wave_in[0] - wave_in[1]))
wave = 0.5*(wave_in[0] + wave_in[1])
else:
wave = wave_in[0]
except:
wave = wave_in
filter_waves = [0.355,0.467,0.5466,0.616,0.747,0.892,1.031,1.248,1.644,2.121,2.149,2.198,3.754,4.702,10.2,30]
filter_ab = [0.927,-0.103,0.026,0.146,0.366,0.533,0.634,0.940,1.38,1.84,1.86,1.90,2.94,3.40,4.98,7.32]
#First, convert the vega magnitude into AB magnitude.
vega_ab = np.interp(np.log10(wave),np.log10(filter_waves), filter_ab)
#Now, convert to Jansky.
flux_jy = 3631.0*10**(-0.4*vega_ab)
#Now convert to photons/s/m^2/micron
wave_m = wave/1e6
photon_rate = flux_jy*1e-26*ap_c.c.value/wave_m**2/(ap_c.h.value*ap_c.c.value/wave_m)*1e-6
return photon_rate
def atm_throughput(wave_in):
"""Find the mean atmospheric throughput. NB Doesn't work between 5.6 and 7 microns"""
ww = np.where( (trans[:,0] > wave_in[0]) * (trans[:,0] < wave_in[1]) )[0]
return np.mean(trans[ww,1])
def snr_heterodyne_constant(wave_in, printit=True):
wave = np.mean(wave_in)*1e-6
#NB the following constant is per polarisation.
snr_const = atm_throughput(wave_in)*vega_photonrate(wave_in)*np.mean(wave_in)/(ap_c.c.value/wave)/2
return snr_const
def nulling_requirement(wave_in, twarm, diam):
"""Figure out the magnitude equivalent of the background"""
wave = np.mean(wave_in)*1e-6
try:
n_temps=len(twarm)
except:
twarm=[twarm]
mags=[]
for t in twarm:
#Background in photons/mode/Hz of bandwidth/s
background = 2.0/(np.exp(ap_c.h.value*ap_c.c.value/ap_c.k_B.value/t/wave) - 1)
#Background in photons/mode/unit fractional bandwidth
background *= ap_c.c.value/wave
#Signal per unit fractional bandwidth
signal = atm_throughput(wave_in)*vega_photonrate(wave_in)*np.mean(wave_in)*np.pi*(diam/2.0)**2
mags.append(2.5*np.log10(signal/background))
return mags
def snr_direct_constant(wave_in, twarm, printit=True):
"""Find the constant for the direct detection SNR equation as a function of
wavelength and warm optics temperature.
Note that the warm optics throughput has to include loss in coupling to the beam
combiner 'modes', which could be e.g. just a limited square field of view. A
complete calculation would have to include the effects of cold pupil stops etc,
which may make such a coupling loss not as extreme as a warm optics efficiency loss.
Similarly, atmospheric throughput loss is a 'warm' optics loss.
snr_direct_constant([10,11],270)*np.pi*2**2*np.sqrt(0.44/(1-0.48))*0.48*np.sqrt(0.1/10.5)*10**(-0.4*13.3)*sqrt(100*3600.)*sqrt(20)
"""
wave = np.mean(wave_in)*1e-6
try:
n_temps=len(twarm)
except:
twarm=[twarm]
snrs=[]
for t in twarm:
#Background in photons/mode/Hz of bandwidth/s
background = 2.0/(np.exp(ap_c.h.value*ap_c.c.value/ap_c.k_B.value/t/wave) - 1)
#Background in photons/mode/unit fractional bandwidth
background *= ap_c.c.value/wave
#Signal per unit fractional bandwidth
signal = atm_throughput(wave_in)*vega_photonrate(wave_in)*np.mean(wave_in)
#Background limited SNR
snrs.append(signal/np.sqrt(background))
if printit:
fmt = "{" + ":8.2e} & {".join([str(r) for r in range(len(snrs))]) + ":8.2e} \\\\"
print(fmt)
print(fmt.format(*snrs))
if len(snrs)==1:
return snrs[0]
else:
return snrs
def eta_func(eta_w,eta_c):
"""Return the component of the SNR equation due to efficiency"""
return eta_w*np.sqrt(eta_c/(1-eta_w))
def maglim(wave_in, twarm, diam=2.5, A=None, eta_c=0.44, eta_w=0.39, t_int=1e4, snr=5.0, n_tel=12,printit=True):
"""Find the point-source detection magnitude limit for PFI based on a series of assumptions"""
if not A:
A = np.pi*(diam/2)**2
frac_wave = np.abs(wave_in[1]-wave_in[0])/0.5/(wave_in[0] + wave_in[1])
maglims = []
for t in twarm:
magfunc = snr/(snr_direct_constant(wave_in, t,printit=False)*A*eta_func(eta_w,eta_c)*\
np.sqrt(t_int)*np.sqrt(frac_wave)*np.sqrt(n_tel-1))
maglims.append(-2.5*np.log10(magfunc))
if printit:
fmt = "{" + ":5.1f} & {".join([str(r) for r in range(len(maglims))]) + ":5.1f} \\\\"
print(fmt)
print(fmt.format(*maglims))
if len(maglims)==1:
return maglims[0]
def maglim_heterodyne(wave_in, diam=2.5, A=None, eta_w=0.35, t_int=1e4, snr=5.0, n_tel=12,
printit=True, polz='dual',const='new'):
"""Find the point-source detection magnitude limit for PFI based on a series of assumptions"""
if not A:
A = np.pi*(diam/2)**2
# if (wave_in[0] < 8) | (wave_in[1]>13):
# print("ERROR: N band only for now...")
# raise UserWarning
frac_wave = np.abs(wave_in[1]-wave_in[0])/0.5/(wave_in[0] + wave_in[1])
mean_wave = np.mean(wave_in)*1e-6
delta_nu = frac_wave * ap_c.c.value/mean_wave
if const=='new':
magfunc = snr/(snr_heterodyne_constant(wave_in)*A*eta_w*np.sqrt(2*t_int*delta_nu)*(n_tel-1))
else:
magfunc = snr/(9.8e-6*A*eta_w*np.sqrt(2*t_int*delta_nu)*(n_tel-1))
if polz == 'dual':
magfunc /= np.sqrt(2)
return -2.5*np.log10(magfunc)
def planck_photons_beam(temperature,wave=10.5e-6, omega=1e-16, target=0):
return 2.0*ap_c.c.value/wave**4/(np.exp(ap_c.h.value*ap_c.c.value/ap_c.k_B.value/temperature/wave) - 1)*1e-6*omega - target
def surface_brightness(wave_in, baselines=[1e3,2e3,4e3],twarm=280, diam=2.5, A=None, \
eta_c=0.44, eta_w=0.39, t_int=1e4, snr=3.0, n_tel=12,printit=True, type='direct'):
"""Find the point-source detection magnitude limit for PFI based on a series of assumptions"""
if not A:
A = np.pi*(diam/2)**2
frac_wave = np.abs(wave_in[1]-wave_in[0])/0.5/(wave_in[0] + wave_in[1])
mean_wave = np.mean(wave_in)*1e-6
delta_nu = frac_wave * ap_c.c.value/mean_wave
t_surf = []
for bl in baselines:
if type=='direct':
magfunc = snr/(snr_direct_constant(wave_in, twarm,printit=False)*A*eta_w*np.sqrt(eta_c/(1-eta_w))*\
np.sqrt(t_int)*np.sqrt(frac_wave)*np.sqrt(n_tel-1))
else:
#Just use the formula from Ireland (2014)
magfunc = snr/(9.8e-6*A*eta_w*np.sqrt(2*t_int*delta_nu)*(n_tel-1))
photons_per_beam = vega_photonrate(wave_in)*magfunc
omega = np.pi*(mean_wave/bl/2)**2
t_surf.append(op.bisect(planck_photons_beam, 20, 5000, args=(mean_wave,omega,photons_per_beam)))
if printit:
fmt = "{" + ":5.0f} & {".join([str(r) for r in range(len(t_surf))]) + ":5.0f} \\\\"
print(fmt)
print(fmt.format(*t_surf))
if len(t_surf)==1:
return t_surf[0]
else:
return t_surf
if __name__=="__main__":
maglim_1hr = maglim([3.4,4.0], [285], diam=8.0, eta_c=0.4, eta_w=0.25, t_int=3600*2, snr=5, n_tel=4)-2.5*np.log10(2)
print("Magnitude limit for Hi-5 for 1 hour integration: {:5.2f}".format(maglim_1hr)) | {
"repo_name": "mikeireland/pfi",
"path": "pfi/snr_back_of_envelope.py",
"copies": "1",
"size": "8587",
"license": "mit",
"hash": -4445615882068034600,
"line_mean": 38.3944954128,
"line_max": 134,
"alpha_frac": 0.6061488296,
"autogenerated": false,
"ratio": 2.629210042865891,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3735358872465891,
"avg_score": null,
"num_lines": null
} |
# 242. Valid Anagram
#
# Given two strings s and t, write a function to determine if t is an anagram of s.
#
# For example,
# s = "anagram", t = "nagaram", return true.
# s = "rat", t = "car", return false.
#
# Note:
# You may assume the string contains only lowercase alphabets.
#
# Follow up:
# What if the inputs contain unicode characters? How would you adapt your solution to such case?
#
# http://blog.csdn.net/coder_orz/article/details/51406015
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
ds = {}
dt = {}
for c in s:
if c in ds:
ds[c] += 1
else:
ds[c] = 1
for c in t:
if c in dt:
dt[c] += 1
else:
dt[c] = 1
return ds == dt
def isAnagram(self, s, t):
if len(s) ! = len(t):
return False
alpha = {}
beta = {}
for c in s:
alpha[c] = alpha.get(c, 0) + 1
for c in t:
beta[c] = beta.get(c, 0) + 1
return alpha == beta
# sort and compare if they are equal
def isAnagram(self, s, t):
return sorted(s) == sorted(t)
s = "rat"
t = "car"
print Solution().isAnagram(s, t)
| {
"repo_name": "gengwg/leetcode",
"path": "242_valid_anagram.py",
"copies": "1",
"size": "1332",
"license": "apache-2.0",
"hash": 105323501592310270,
"line_mean": 21.2,
"line_max": 96,
"alpha_frac": 0.4894894895,
"autogenerated": false,
"ratio": 3.338345864661654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43278353541616543,
"avg_score": null,
"num_lines": null
} |
# 247 Strobogrammatic Number II
# A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
# Find all strobogrammatic numbers that are of length = n.
# For example,
# Given n = 2, return ["11","69","88","96"].
class Solution:
def findStrobogrammatic(self, n):
res = []
if n == 0:
return res
if n % 2 == 1:
res.append('0')
res.append('1')
res.append('8')
else:
res.append('')
n /= 2
nums1 = ['0', '1', '6', '9', '8']
nums2 = ['0', '1', '9', '6', '8']
while n > 0:
tmp = []
# do not append 0 if it is last iteration
start = 1 if n == 1 else 0
for s in res:
i = start
while i < len(nums1):
tmp.append(nums1[i] + s + nums2[i])
i += 1
res = tmp # continue build on top of tmp
n -= 1
return res
print Solution().findStrobogrammatic(1)
print Solution().findStrobogrammatic(2)
print Solution().findStrobogrammatic(3)
print Solution().findStrobogrammatic(4)
| {
"repo_name": "gengwg/leetcode",
"path": "247_strobogrammatic_number_ii.py",
"copies": "1",
"size": "1188",
"license": "apache-2.0",
"hash": 6452298081975875000,
"line_mean": 27.9756097561,
"line_max": 108,
"alpha_frac": 0.5008417508,
"autogenerated": false,
"ratio": 3.514792899408284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9506893880461138,
"avg_score": 0.0017481539494294026,
"num_lines": 41
} |
# 249. Group Shifted Strings
# Given a string, we can "shift" each of its letter to its successive letter,
# for example: "abc" -> "bcd". We can keep "shifting" which forms the sequence:
# "abc" -> "bcd" -> ... -> "xyz"
# Given a list of strings which contains only lowercase alphabets, group all strings that belong to the same shifting sequence.
# For example, given: ["abc", "bcd", "acef", "xyz", "az", "ba", "a", "z"],
# A solution is:
# [
# ["abc","bcd","xyz"],
# ["az","ba"],
# ["acef"],
# ["a","z"]
# ]
#
from collections import defaultdict
class Solution:
def groupStrings(self, strings):
d = defaultdict(list)
for s in strings:
code = self.encode(s)
d[code].append(s)
return d.values()
def encode(self, s):
res = ''
for i in range(1, len(s)):
tmp = (ord(s[i]) - ord(s[i-1]) + 26) % 26
res += chr(tmp)
return res
sol = Solution()
print(sol.groupStrings(["abc", "bcd", "acef", "xyz", "az", "ba", "a", "z"]))
| {
"repo_name": "gengwg/leetcode",
"path": "249_group_shifted_strings.py",
"copies": "1",
"size": "1034",
"license": "apache-2.0",
"hash": 4033961030151673300,
"line_mean": 26.9459459459,
"line_max": 127,
"alpha_frac": 0.5512572534,
"autogenerated": false,
"ratio": 3.1144578313253013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9148264269939927,
"avg_score": 0.003490162957074722,
"num_lines": 37
} |
""" 24 August 2017. Author: Sasha Safonova.
Script that directly calls the calcPupilMask method in DonutEngine.cc, created for development purposes only.
Be sure to compile donutengine first by:
cd ../src
make clean
make swig
make
"""
from donutlib.donutengine import donutengine
import numpy as np
from matplotlib import pyplot as plt
from skimage.transform import resize
import pandas as pd
def readimg(filename, nbin):
"""Reads an example wavefront stamp in the "trim" format and turns it into a flat pupil image.
Parameters
----------
filename : string
full path to the text file containing the wavefront stamp
nbin : int
the number of pixels that the output stamp should have per side
Returns
-------
img : array
the final pupil stamp with the number of pixels per side defined by nbin
"""
img = pd.read_csv(filename, sep='\s+', header=None)
img = img.as_matrix()
img[img != 0] = 1
img = resize(img, (nbin, nbin), mode='reflect')
return img
# Set the number of pixels per side in the stamp, e.g. 512, 256, 128, etc.
binns = 512
# Set up a donutengine object
paramDict = {"inputFile":"",
"wfmFile":"",
"wfmArray":None,
"writeToFits":False,
"outputPrefix":"testone",
"xDECam":0,
"yDECam":0,
"debugFlag":False,
"rootFlag":False,
"waveLength":700.0e-9,
"nZernikeTerms":37,
"nbin":binns,
"nPixels":64,
"gridCalcMode":True,
"pixelOverSample":8,
"scaleFactor":1.,
"rzero":0.125,
"nEle":1.0e6,
"background":4000.,
"randomFlag":False,
"randomSeed":209823, # if this is an invalid integer, crazy errors will ensue
"gain":1.0,
"flipFlag":False,
"iTelescope": 5, # 5 stands for the DESI configuration
"ZernikeArray":[]}
gFitFunc = donutengine(**paramDict)
parArr = np.zeros(gFitFunc.npar)
parArr[gFitFunc.ipar_bkgd] = 4000.
parArr[gFitFunc.ipar_nEle] = 1.e6
parArr[gFitFunc.ipar_rzero] = 0.15
parArr[gFitFunc.ipar_ZernikeFirst+2] = 7.4
# Set the angle (in degrees, Zemax Echo 22 coordinate system)
# See angles for the test fields inside 00README in the testpupils directory
gFitFunc.setXYDESI(1.325, 0.89)
#calculate and get the pupil mask
gFitFunc.calcAll(parArr)
data = gFitFunc.getvImage()
pupil = gFitFunc.getvPupilMask()
# Change the test wavefront file to be used for comparison here
filename = 'field11_trim.txt'
comparisondonut=readimg(filename, binns)
plt.figure()
#plot the donutengine output in color and the ray-tracing donut over it in grey
plt.imshow(pupil, alpha=0.5)
plt.imshow(comparisondonut, alpha=0.5, cmap='Greys')
plt.ylim(0, binns)
plt.savefig("seetheplot.png")
plt.show()
| {
"repo_name": "aaronroodman/Donut",
"path": "test/testDESIpupils/checkpupil.py",
"copies": "1",
"size": "3237",
"license": "mit",
"hash": 1429703020305031400,
"line_mean": 32.3711340206,
"line_max": 109,
"alpha_frac": 0.5730614767,
"autogenerated": false,
"ratio": 3.6370786516853935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9613479316223648,
"avg_score": 0.01933216243234921,
"num_lines": 97
} |
# 251. Flatten 2D Vector
# Implement an iterator to flatten a 2d vector.
# For example,
# Given 2d vector =
# [
# [1,2],
# [3],
# [4,5,6]
# ]
# By calling next repeatedly until hasNext returns false,
# the order of elements returned by next should be: [1,2,3,4,5,6].
# Hint:
# How many variables do you need to keep track?
# Two variables is all you need. Try with x and y.
# Beware of empty rows. It could be the first few rows.
# To write correct code, think about the invariant to maintain. What is it?
# The invariant is x and y must always point to a valid point in the 2d vector.
# Should you maintain your invariant ahead of time or right when you need it?
# Not sure? Think about how you would implement hasNext(). Which is more complex?
# Common logic in two different places should be refactored into a common method.
# Follow up:
# As an added challenge, try to code it using only iterators in C++ or iterators in Java.
class Vector2D(object):
def __init__(self, vec2d):
"""
Initialize your data structure here.
:type vec2d: List[List[int]]
"""
self.vec2d = vec2d
self.row = 0
self.col = 0
self._moveToValid()
def _moveToValid(self):
"""
move i and j to a valid position, so that self.vec2d[self.i][self.j] is valid.
this also takes care of case such as [[2, 3]] at initialization.
"""
while self.row < len(self.vec2d) and self.col >= len(self.vec2d[self.row]):
self.row += 1
self.col = 0
def next(self):
"""
:rtype: int
"""
ret = self.vec2d[self.row][self.col]
self.col += 1
self._moveToValid()
return ret
def hasNext(self):
"""
:rtype: bool
"""
return self.row < len(self.vec2d)
# Your Vector2D object will be instantiated and called as such:
if __name__ == '__main__':
vec2d = [
[1,2],
[3],
[],
[4,5,6],
[]
]
# vec2d = [[3]]
i, v = Vector2D(vec2d), []
while i.hasNext(): v.append(i.next())
print(v)
| {
"repo_name": "gengwg/leetcode",
"path": "251_flatten_2d_vector.py",
"copies": "1",
"size": "2157",
"license": "apache-2.0",
"hash": 7700662942209077000,
"line_mean": 26.3037974684,
"line_max": 89,
"alpha_frac": 0.5781177561,
"autogenerated": false,
"ratio": 3.4022082018927446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9462744062356833,
"avg_score": 0.00351637912718236,
"num_lines": 79
} |
# 2520 is the smallest number that can be divided by each
# of the numbers from 1 to 10 without any remainder.
#
# What is the smallest positive number that is evenly
# divisible by all of the numbers from 1 to 20?
#
# Naive solution would be brute-force dividing numbers by
# 1-20 and checking for remainder, but that is inefficient.
#
# Can mathematically solve this by finding the largest
# prime factors and exponents for 1-20 and finding a
# product, which is automated below.
from math import sqrt, ceil, log
MAX_DIVISOR = 20
# Generates a set of prime numbers less than or equal to max_num
# using Eratosthenes method. Could be optimized a bit more, but
# fast enough for current purposes.
def prime_sieve(sieve_up_to):
# Make sieve inclusive
max_num = sieve_up_to + 1
sieve = [True] * max_num
# Result is an unordered set since order doesn't matter
result = set()
max_sieve_element = ceil(sqrt(max_num))
# Only need to sieve up to sqrt(max_num)
i = 2
for i in range(2, max_sieve_element):
if sieve[i]:
result.add(i)
sieve_iterator = i * 2
while sieve_iterator < max_num:
sieve[sieve_iterator] = False
sieve_iterator += i
for j in range(i + 1, max_num):
if sieve[j]:
result.add(j)
return result
# Replaces all sieve members with their largest exponent
# that is still less than or equal to max_value.
#
# Useful in the context of this problem to obtain
# the maximum exponents of all prime divisors
# less than or equal to MAX_DIVISOR
def adjust_sieve(sieve, max_value):
result = set()
for element in sieve:
result.add(element ** int(log(max_value, element)))
return result
def run():
sieve = prime_sieve(MAX_DIVISOR)
# print("Prime sieve {0}".format(sieve))
adjusted_sieve = adjust_sieve(sieve, MAX_DIVISOR)
# print("Adjusted factors {0}".format(adjusted_sieve))
product = 1
for element in adjusted_sieve:
product *= element
print("Product of all maximum prime factors under {0} is {1}".format(MAX_DIVISOR, product))
# for i in range(1, MAX_DIVISOR + 1):
# div_result = divmod(product, i)
# print("{0} / {1} = {2} with remainder {3}".format(product, i, div_result[0], div_result[1]))
# Sample Output:
# Product of all maximum prime factors under 20 is 232792560
#
# Total running time for Problem5.py is 0.0001831962354969647 seconds
#
# Sample Output with all debugging messages:
# Prime sieve {2, 3, 5, 7, 11, 13, 17, 19}
# Adjusted factors {5, 7, 9, 11, 13, 16, 17, 19}
# Product of all maximum prime factors under 20 is 232792560
# 232792560 / 1 = 232792560 with remainder 0
# 232792560 / 2 = 116396280 with remainder 0
# 232792560 / 3 = 77597520 with remainder 0
# 232792560 / 4 = 58198140 with remainder 0
# 232792560 / 5 = 46558512 with remainder 0
# 232792560 / 6 = 38798760 with remainder 0
# 232792560 / 7 = 33256080 with remainder 0
# 232792560 / 8 = 29099070 with remainder 0
# 232792560 / 9 = 25865840 with remainder 0
# 232792560 / 10 = 23279256 with remainder 0
# 232792560 / 11 = 21162960 with remainder 0
# 232792560 / 12 = 19399380 with remainder 0
# 232792560 / 13 = 17907120 with remainder 0
# 232792560 / 14 = 16628040 with remainder 0
# 232792560 / 15 = 15519504 with remainder 0
# 232792560 / 16 = 14549535 with remainder 0
# 232792560 / 17 = 13693680 with remainder 0
# 232792560 / 18 = 12932920 with remainder 0
# 232792560 / 19 = 12252240 with remainder 0
# 232792560 / 20 = 11639628 with remainder 0
| {
"repo_name": "YangLuGitHub/Euler",
"path": "src/scripts/Problem5.py",
"copies": "1",
"size": "3564",
"license": "mit",
"hash": -5973860079645849000,
"line_mean": 33.9411764706,
"line_max": 102,
"alpha_frac": 0.6840628507,
"autogenerated": false,
"ratio": 3.173642030276046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4357704880976046,
"avg_score": null,
"num_lines": null
} |
# 2520 is the smallest number that can be divided by each of the numbers 1:10
# What is the smallest positive number that is evenly divisible (divisible with
# no remainder) by all of the digits 1:20
# using the concept that all numbers are "built" by primes then we can
# investigate the sum of all the prime numbers, then check if that term
# is divisible by all terms 1:20
# we can check this for 1:10 where the primes are (1,2,3,5,7)
# 1*2*3*5*7 = 210, then checking to see if this is evenly divisble we see that
# it does not work for 4,8,9. All of these numbers can be decomposed into the
# sum of primes i.e. 2*2, 2*4, 3*3. We can add this to the current total
# 1*(2*(2*2))*(3*3)*5*7 = 2520
primes=(2,3,5,7,11,13,17,19)
x = 1
#find the sum of all primes < 20
for i in primes:
x *= i
print x
# added in 3 and 8 (2*2*2) after performing the analysis below
#x=x*3*8
#check if this term is evenly divisible by 1:20
for i in range(1,21):
if x%i == 0:
print 'working as intended ,',i
else:
print 'fucked ,',i
# 4, 8, 9, 12, 16, 18, 20 are not divisible
# if the above is correct we could solve this by finding the prime factors
# in the above numbers
# breaking these terms into their prime factors
# 2*2, 2*2*2, 3*3, 3*(2*2), (2*2)*(2*2), (2*3*3), (2*2*5)
# removing overlaps from the prime set i.e. (2,3,5,7,11,13,17,19)
# 2*2*2=8, 3 (note all other terms are already found in the prime set)
# thus the new prime set is (2,2,2,2,3,3,5,7,11,13,17,19) which is also
# the prime factors of the number we are looking to find... thus multiplying them
# together provides the solution
# 3, 8 these are the missing multiples based on the numbers above
# to generalize this we would need to find the set all prime factors of an
# individual number to we want to divide by (e.g. 1:20)
# then remove overlaps between between the sets (of numbers we want to divide by)
# the remaining values would make up the final set of multiples to generate
# the smallest evenly divisible number
| {
"repo_name": "Faraday1221/project_euler",
"path": "problem_5.py",
"copies": "1",
"size": "2017",
"license": "mit",
"hash": 3978744635409639400,
"line_mean": 37.7884615385,
"line_max": 81,
"alpha_frac": 0.6995537928,
"autogenerated": false,
"ratio": 2.9970282317979198,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41965820245979196,
"avg_score": null,
"num_lines": null
} |
#2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
#What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
# 232792560
#need to check if divisible by all primes smaller than 20, so:
# 2, 3, 5, 7, 11, 13, 17, 19
from itertools import count
#find first element that is divisible by all nums up to 20
#naive method, very slow
# for i in count(20000, 20):
# if i % 20 == 0:
# if i % 19 == 0:
# if i % 18 == 0:
# if i % 17 == 0:
# if i % 16 == 0:
# if i % 15 == 0:
# if i % 14 == 0:
# if i % 13 == 0:
# if i % 12 == 0:
# if i % 11 == 0:
# if i % 10 == 0:
# if i % 9 == 0:
# if i % 8 == 0:
# if i % 7 == 0:
# if i % 6 == 0:
# if i % 5 == 0:
# if i % 4 == 0:
# if i % 3 == 0:
# if i % 2 == 0:
# num = i
# break
# print(num)
i = 1
#find smallest number that is divisible by each of 1-20
for k in (range(1,21)):
if i % k > 0:
for j in range(1, 21):
# if i is not divisible by k, then multiply it by each number until it is divisible
if (i*j) % k == 0:
# set i to that value and go to next value
i *= j
break
print(i) | {
"repo_name": "ledbutter/ProjectEulerPython",
"path": "Problem5.py",
"copies": "1",
"size": "1424",
"license": "mit",
"hash": 2862918763973180400,
"line_mean": 27.7083333333,
"line_max": 107,
"alpha_frac": 0.4599719101,
"autogenerated": false,
"ratio": 2.94824016563147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8629370254928088,
"avg_score": 0.055768364160676265,
"num_lines": 48
} |
# 253. Meeting Rooms II
#
# Given an array of meeting time intervals consisting of start and end times
# [[s1,e1],[s2,e2],...] (si < ei),
# find the minimum number of conference rooms required.
#
# For example,
# Given [[0, 30],[5, 10],[15, 20]],
# return 2.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def minMeetingRooms(self, intervals):
"""
:type intervals: List[Interval]
:rtype: int
"""
starts = []
ends = []
for interval in intervals:
starts.append(interval.start)
ends.append(interval.end)
starts.sort()
ends.sort()
res = 0
end = 0
for i in range(len(intervals)):
if starts[i] < ends[end]:
res += 1
else:
end += 1
return res
if __name__ == '__main__':
intervals = [Interval(0, 30), Interval(5, 10), Interval(15, 20)]
print Solution().minMeetingRooms(intervals)
| {
"repo_name": "gengwg/leetcode",
"path": "253_meeting_rooms_ii.py",
"copies": "1",
"size": "1053",
"license": "apache-2.0",
"hash": -8847656098003055000,
"line_mean": 23.488372093,
"line_max": 76,
"alpha_frac": 0.5289648623,
"autogenerated": false,
"ratio": 3.643598615916955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9672563478216956,
"avg_score": 0,
"num_lines": 43
} |
# 254: 11111110 254: 11111110
# 255: 11111111 1: 1
# 256: 100000000 257: 100000001
# 257: 100000001 0: 0 <- line_length = 257 - 254 + 1 = 4 % 4 = 0
# 258: 100000010 258: 100000010 <- line_length = 258 - 254 + 1 = 4 % 4 = 1
# 259: 100000011 1: 1 <- line_length = 259 - 254 + 1 = 6 % 4 = 2
# 260: 100000100 261: 100000101 <- line_length = 260 - 254 + 1 = 7 % 4 = 3
# 261: 100000101 0: 0 <- line_length = 261 - 254 + 1 = 8 % 4 = 0
def checksum_single_line(line, start_id, end_id):
# is there a time limit on the checksum calculation
# take advantage that any four consecuative integers starting with an even number XOR to 0
if start_id & 1 == 1:
checksum = start_id
start = start_id + 1
else:
start = start_id
checksum = 0
line_length = end_id - start + 1
if line_length % 4 == 0:
pass
elif line_length % 4 == 1:
checksum ^= end_id
elif line_length % 4 == 2:
checksum ^= 1
elif line_length % 4 == 3:
checksum ^= end_id
checksum ^= 1
# if line % 1000 == 1:
# print ("%6d: %d -> %d checksum is %d" % (line, start_id, end_id, checksum))
return checksum
def solution(start, length):
# wrap_or_end will be "End" or "Wrap"
# End - employees will stop lining up after id 2,000,000,000
# Wrap - employee with id 0 will dutifly line up after the employee with id 2,000,000,000
wrap_or_end = "End"
if wrap_or_end == "End":
print("%s: employees will stop lining up after id 2,000,000,000" % (wrap_or_end))
elif wrap_or_end == "Wrap":
print("%s: employee with id 0 will line up after employee with id 2,000,000,000" % (wrap_or_end))
else:
print("%s: effects of reaching employee with id 2,000,000,000 will simply be ignored" % (wrap_or_end))
LINE_TOO_LONG = 100
NUMBER_OF_EMPLOYEES = 2000000001
line_handling_threshold = 10001
# done is flag to terminate outer "line" loop when a condition in inner loop triggers
done = False
checksum = 0
counted = 0
if length == 1:
checksum = long(start)
# elif length == 10000 and start == 0: # test 3 short-cut, remove when 5, 6 and 9 are working
# checksum = 82460672
# elif length >= LINE_TOO_LONG:
# return None # short circut really large lengths
elif length < line_handling_threshold:
# checksum every id that is considered
if wrap_or_end == "End":
for line in range(length):
# check to see if we have counted all employees 0 .. 2,000,000,000 inclusive
# also break if termination condition triggers in inner 'id' loop
if done:
break
for position in range(length-line):
id = start + int(line * length) + position
if id >= NUMBER_OF_EMPLOYEES:
done = True
break
checksum ^= id
if counted >= NUMBER_OF_EMPLOYEES:
done = True
break
elif wrap_or_end == "Wrap":
for line in range(length):
# check to see if we have counted all employees 0 .. 2,000,000,000 inclusive
# also break if termination condition triggers in inner 'id' loop
if done or counted >= NUMBER_OF_EMPLOYEES:
break
for position in range(length-line):
id = start + int(line * length) + position
if id >= NUMBER_OF_EMPLOYEES:
id -= NUMBER_OF_EMPLOYEES
checksum ^= id
counted += 1
if counted >= NUMBER_OF_EMPLOYEES:
done = True
break
counted += line
else:
for line in range(length):
for position in range(length-line):
id = start + int(line * length) + position
checksum ^= id
elif length >= line_handling_threshold:
print("length: {0} is greater than line_handling_threshold: {1} will use checksum_single_line()".format(length, line_handling_threshold) )
# take advantage that any four consecuative integers starting with an even number XOR to 0
if wrap_or_end == "End":
for line in range(length): # 0 .. length-1
# check to see if we have counted all employees 0 .. 2,000,000,000 inclusive
# counted >= NUMBER_OF_EMPLOYEES:
start_id = min(start + int( line * length ), 2000000000 )
if start_id >= NUMBER_OF_EMPLOYEES:
break
end_id = min(start_id + ( length - line ) - 1, 2000000000 )
checksum ^= checksum_single_line(line, start_id, end_id)
print("line: %6d: Checksum is now: %10d" % (line, checksum) )
if end_id >= NUMBER_OF_EMPLOYEES:
break
elif wrap_or_end == "Wrap":
for line in range(length):
# check to see if we have counted all employees 0 .. 2,000,000,000 inclusive
# also break if termination condition triggers in inner 'id' loop
if done or counted >= NUMBER_OF_EMPLOYEES:
break
checksum ^= checksum_single_line(line, start_id, end_id)
counted += line
else:
for line in range(length):
for position in range(length-line):
id = start + int(line * length) + position
checksum ^= id
print("line: %6d: Checksum is now: %10d" % (line, checksum) )
return checksum
print(solution(0,3))
# 2
print(solution(17,4))
# 14
print(solution(999,10000))
# 129179904
print(solution(170000001,1))
# print(solution(0,2000000001))
| {
"repo_name": "perlygatekeeper/glowing-robot",
"path": "google_test/queue_to_do/solution_debug_statements.py",
"copies": "1",
"size": "6011",
"license": "artistic-2.0",
"hash": 1276958955709478100,
"line_mean": 42.2446043165,
"line_max": 146,
"alpha_frac": 0.53684911,
"autogenerated": false,
"ratio": 3.831102613129382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4867951723129382,
"avg_score": null,
"num_lines": null
} |
# 25/4/2017
# This piece of code will run frames through vgg-19
# 'video_dset' = (112392,224,224,3)
# 'audio_dset' = (112392,18)
from extract_image_features.video_utils import *
import numpy as np
from extract_image_features.keras_pretrained_models.imagenet_utils import preprocess_input
from keras.models import Model
from extract_image_features.keras_pretrained_models.vgg19 import VGG19
### SET TO TRUE IF USING TITANX LINUX MACHINE
USE_TITANX = True
### Define video processing dimensions
frame_h = 224 # (60,60) maybe too small
frame_w = frame_h
### DEFINE OUTPUT DIRECTORY ###
if USE_TITANX:
data_extern_dest = '/home/zanoi/ZANOI/auditory_hallucinations_data/'
else: # Working on MacBook Pro
data_extern_dest = '/Volumes/SAMSUNG_SSD_256GB/ADV_CV/data/'
data_file_name = data_extern_dest + 'TopAngleFC1_dataX_dataY.h5'
### LOADING VIDEOS ###
print ("--- Loading video and audio filenames...")
if USE_TITANX:
video_dir = '/home/zanoi/ZANOI/auditory_hallucinations_videos'
else: # Working on MacBook Pro
video_dir = "/Volumes/SAMSUNG_SSD_256GB/ADV_CV/4-24_VIDAUD/EXPORTS"
video_files = [os.path.join(video_dir, file_i)
for file_i in os.listdir(video_dir)
if file_i.endswith('.mp4')]
num_videos = len(video_files)
print("num_videos: ", num_videos)
### LOADING AUDIO ###
audio_feature_dir = "../audio_vectors"
audio_f_files = [os.path.join(audio_feature_dir, file_i)
for file_i in os.listdir(audio_feature_dir)
if file_i.endswith('.mat')]
num_audio_f = len(audio_f_files)
print("num_audio_f: ", num_audio_f)
### MAIN FUNCTION LOOP FOR PROCESSING TRAINING SET ###
for i in range(num_audio_f): # Loop over all audio files
audio_prefix, audio_vector_length, audio_features = returnAudioVectors(i, audio_f_files)
print ("--------------------{ " + str(audio_prefix) + " }-----------------------")
# Find all the linked videos for the given audio vector
linked_video_f = findMatchingVideos(audio_prefix, video_files)
print(audio_f_files[i])
print(linked_video_f)
for j, video_filename in enumerate(linked_video_f):
# Process the videos linked to a particular audio vector
######## PROCESS VIDEO TO BLACK AND WHITE
print("--- Processing video to greyscale...")
output_dimensions = (frame_h,frame_w)
processed_video = processOneVideo(audio_vector_length, video_filename, output_dimensions=output_dimensions, normalize=False)
print("processed_video.shape:", processed_video.shape)
######### CONCATENATE INTO SPACETIME IMAGE
print ("--- Concatenating into Spacetime image...")
window = 3
space_time_images = createSpaceTimeImagesforOneVideo(processed_video,window) # (8377, 224, 224, 3)
print ("space_time_image.shape:", space_time_images.shape)
(num_frames, frame_h, frame_w, channels) = space_time_images.shape
########## RUN THE SPACETIME IMAGES THROUGH VGG19
print("--- Running through VGG19 FC1 layer...")
# Build the model
base_model = VGG19(weights='imagenet')
model = Model(input=base_model.input,
output=base_model.get_layer('fc1').output) # Only take the FC2 layer output
# Preallocate matrix output
CNN_FC_output = np.zeros((num_frames, 1, 4096)) # (1,8377,1,4096) -> FC2 outputs dimensions (1,4096)
for frame_num in tqdm(range(num_frames)):
img = space_time_images[frame_num]
x = np.expand_dims(img, axis=0)
x = preprocess_input(x)
fc1_features = model.predict(x) # Predict the FC2 features from VGG19, output shape is (1,4096)
CNN_FC_output[frame_num] = fc1_features # Save the FC2 features to a matrix
print("CNN_FC_output.shape:", CNN_FC_output.shape) # (8377,1,4096)
########### CREATE FINAL DATASET, concatenate FC output with audio vectors
# To avoid memory problems, we incrementally add to h5py file. A single video is processed and dumped to the h5py
if i == 0 and j == 0:
# If this is the first video file, you need to create the first entry matrix
with h5py.File(data_file_name, 'w') as f:
# Create the dataset in the h5py file
video_dset = f.create_dataset("dataX_train", CNN_FC_output.shape, maxshape=(None, 1, 4096)) # maxshape = (None, 224,224,3)
# Normalization of the audio_vectors occurs in this function -> Hanoi forgot to normalize in MATLAB!!!!
final_audio_vector = createAudioVectorDatasetForOneVid(audio_features,space_time_images.shape) # (8377, 18)
print("final_audio_vector.shape:", final_audio_vector.shape)
audio_dset = f.create_dataset("dataY_train", final_audio_vector.shape, maxshape=(None, 18))
print("Writing data to file...")
video_dset[:] = CNN_FC_output
audio_dset[:] = final_audio_vector
print("video_dset.shape:", video_dset.shape)
print("audio_dset.shape:", audio_dset.shape)
else:
with h5py.File(data_file_name, 'a') as hf:
# Normalization of the audio_vectors occurs in this function -> Hanoi forgot to normalize in MATLAB!!!!
final_audio_vector = createAudioVectorDatasetForOneVid(audio_features,
space_time_images.shape) # (8377, 18)
print("final_audio_vector.shape:", final_audio_vector.shape)
print("Writing data to file...")
video_dset = hf['dataX_train']
video_dset.resize(video_dset.len() + num_frames, axis=0)
video_dset[-num_frames:] = CNN_FC_output
audio_dset = hf['dataY_train']
audio_dset.resize(audio_dset.len() + num_frames, axis=0)
audio_dset[-num_frames:] = final_audio_vector
print("video_dset.shape:", video_dset.shape)
print("audio_dset.shape:", audio_dset.shape)
print ("Current video complete!")
####### PROCESSING THE TEST SET
# There is only one video for test set
if USE_TITANX:
test_video_filename = '/home/zanoi/ZANOI/auditory_hallucinations_videos/TEST_SET/seq7TEST_angle1.mp4'
else: # Working on MacBook Pro
test_video_filename = '/Volumes/SAMSUNG_SSD_256GB/ADV_CV/4-24_VIDAUD/EXPORTS/TEST_SET/seq7TEST_angle1.mp4'
test_audio_vector_filename = '../audio_vectors/TEST_SET/seq7TEST_audio_vectors.mat'
print ("--------------------{ PROCESSING TEST SET }-----------------------")
mat_contents = sio.loadmat(test_audio_vector_filename) # 18 x n-2
audio_features = mat_contents['audio_vectors']
audio_vector_length = audio_features.shape[1]
print("--- Processing video to greyscale...")
output_dimensions = (frame_h,frame_w)
processed_video = processOneVideo(audio_vector_length, test_video_filename, output_dimensions=output_dimensions, normalize=False)
print("processed_video.shape:", processed_video.shape)
######### CONCATENATE INTO SPACETIME IMAGE
print ("--- Concatenating into Spacetime image...")
window = 3
space_time_images = createSpaceTimeImagesforOneVideo(processed_video,window) # (8377, 224, 224, 3)
print ("space_time_image.shape:", space_time_images.shape)
(num_frames, frame_h, frame_w, channels) = space_time_images.shape
########## RUN THE SPACETIME IMAGES THROUGH VGG19
print("--- Running through VGG19 FC2 layer...")
# Build the model
base_model = VGG19(weights='imagenet')
model = Model(input=base_model.input,
output=base_model.get_layer('fc1').output) # Only take the FC2 layer output
# Preallocate matrix output
CNN_FC_output = np.zeros((num_frames, 1, 4096)) # (1,8377,1,4096) -> FC2 outputs dimensions (1,4096)
for frame_num in tqdm(range(num_frames)):
img = space_time_images[frame_num]
x = np.expand_dims(img, axis=0)
x = preprocess_input(x)
fc1_features = model.predict(x) # Predict the FC2 features from VGG19, output shape is (1,4096)
CNN_FC_output[frame_num] = fc1_features # Save the FC2 features to a matrix
print("CNN_FC_output.shape:", CNN_FC_output.shape) # (1,8377,1,4096)
# Need to create a new dataset in the original h5py file for test set that is separate from training set
with h5py.File(data_file_name, 'a') as f:
video_dset = f.create_dataset("dataX_test", CNN_FC_output.shape,
maxshape=(None, 1, 4096)) # maxshape = (None, 224,224,3)
# Normalization of the audio_vectors occurs in this function -> Hanoi forgot to normalize in MATLAB!!!!
final_audio_vector = createAudioVectorDatasetForOneVid(audio_features, space_time_images.shape) # (8377, 18)
print("final_audio_vector.shape:", final_audio_vector.shape)
audio_dset = f.create_dataset("dataY_test", final_audio_vector.shape, maxshape=(None, 18))
print("Writing data to file...")
video_dset[:] = CNN_FC_output
audio_dset[:] = final_audio_vector
print("video_dset.shape:", video_dset.shape)
print("audio_dset.shape:", audio_dset.shape)
print ("Current video complete!")
print ("--- {EVERYTHING COMPLETE HOMIEEEEEEEEE} ---") | {
"repo_name": "schen496/auditory-hallucinations",
"path": "extract_image_features/processVideosTopAngleFC1.py",
"copies": "1",
"size": "9227",
"license": "apache-2.0",
"hash": -3527483487873842000,
"line_mean": 44.9104477612,
"line_max": 139,
"alpha_frac": 0.6479895958,
"autogenerated": false,
"ratio": 3.2918301819479128,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4439819777747913,
"avg_score": null,
"num_lines": null
} |
# 254 Factor Combinations
# Numbers can be regarded as product of its factors. For example,
#
# 8 = 2 x 2 x 2;
# = 2 x 4.
#
# Write a function that takes an integer n and return all possible combinations of its factors.
#
# Note:
#
# You may assume that n is always positive.
# Factors should be greater than 1 and less than n.
#
# Examples:
# input: 1
# output:
#
# []
#
# input: 37
# output:
#
# []
#
# input: 12
# output:
#
# [
# [2, 6],
# [2, 2, 3],
# [3, 4]
# ]
#
# input: 32
# output:
#
# [
# [2, 16],
# [2, 2, 8],
# [2, 2, 2, 4],
# [2, 2, 2, 2, 2],
# [2, 4, 4],
# [4, 8]
# ]
#
import math
class Solution(object):
def getFactors(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
if n <= 1:
return []
res = []
self.helper(n, 2, [], res)
return res
# dfs
# https://nb4799.neu.edu/wordpress/?p=2416
# target: the remaining factors should multiply to target
# start_num: the remaining factors should be equal to or larger than start_num
# factors: the factors so far, stored in a list
def helper(self, target, start_num, factors, res):
# if factors:
# res.append(factors + [target])
if target == 1:
if len(factors) > 1:
res.append(list(factors))
return
#for i in range(start_num, int(math.sqrt(target))+1):
for i in range(start_num, target+1):
if target % i == 0:
factors.append(i)
self.helper(target//i, i, factors, res)
factors.pop()
sol = Solution()
print(sol.getFactors(8))
print(sol.getFactors(12))
print(sol.getFactors(32))
| {
"repo_name": "gengwg/leetcode",
"path": "254_factor_combinations.py",
"copies": "1",
"size": "1723",
"license": "apache-2.0",
"hash": -6619772164491840000,
"line_mean": 19.2705882353,
"line_max": 95,
"alpha_frac": 0.5322112594,
"autogenerated": false,
"ratio": 3.0281195079086114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9052149350380714,
"avg_score": 0.0016362833855793754,
"num_lines": 85
} |
# 256. Paint House
# There are a row of n houses, each house can be painted with one of the three colors: red, blue or green.
# The cost of painting each house with a certain color is different.
# You have to paint all the houses such that no two adjacent houses have the same color.
#
# The cost of painting each house with a certain color is represented by a n x 3 cost matrix.
# For example, costs[0][0] is the cost of painting house 0 with color red;
# costs[1][2] is the cost of painting house 1 with color green, and so on…
# Find the minimum cost to paint all houses.
#
# Note:
# All costs are positive integers.
class Solution:
def minCost(self, costs):
if costs is None or len(costs) == 0:
return 0
dp = [[0 for i in range(3)] for j in range(len(costs))]
dp[0][0] = costs[0][0]
dp[0][1] = costs[0][1]
dp[0][2] = costs[0][2]
for i in range(len(costs)):
dp[i][0] = costs[i][0] + min(dp[i-1][1], dp[i-1][2])
dp[i][1] = costs[i][1] + min(dp[i-1][0], dp[i-1][2])
dp[i][2] = costs[i][2] + min(dp[i-1][0], dp[i-1][1])
return min(dp[-1][0], dp[-1][1], dp[-1][2])
| {
"repo_name": "gengwg/leetcode",
"path": "256_paint_house.py",
"copies": "1",
"size": "1187",
"license": "apache-2.0",
"hash": -5890696462133975000,
"line_mean": 33.8529411765,
"line_max": 106,
"alpha_frac": 0.5915611814,
"autogenerated": false,
"ratio": 2.925925925925926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4017487107325926,
"avg_score": null,
"num_lines": null
} |
# 257. Binary Tree Paths
#
# Given a binary tree, return all root-to-leaf paths.
#
# For example, given the following binary tree:
#
# 1
# / \
# 2 3
# \
# 5
#
# All root-to-leaf paths are:
#
# ["1->2->5", "1->3"]
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param {TreeNode} root
# @return {string[]}
def binaryTreePaths(self, root):
result, path = [], []
self.binaryTreePathsRecu(root, path, result)
return result
def binaryTreePathsRecu(self, node, path, result):
if node is None:
return
if node.left is node.right is None:
ans = ""
for n in path:
ans += str(n.val) + "->"
result.append(ans + str(node.val))
if node.left:
path.append(node)
self.binaryTreePathsRecu(node.left, path, result)
path.pop()
if node.right:
path.append(node)
self.binaryTreePathsRecu(node.right, path, result)
path.pop()
# https://gengwg.blogspot.com/2018/03/leetcode-257-binary-tree-paths.html
class Solution:
# @param {TreeNode} root
# @return {string[]}
def binaryTreePaths(self, root):
res = []
if root is None:
return res
self.helper(root, '', res)
return res
def helper(self, node, path, result):
# if node is None: # recursion stop condition
# return
if node.left is node.right is None: # leaf node
result.append(path + str(node.val)) # append current found path
if node.left:
self.helper(node.left, path + str(node.val) + '->', result)
if node.right:
self.helper(node.right, path + str(node.val) + '->', result)
if __name__ == "__main__":
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.right = TreeNode(5)
root.left.left = TreeNode(4)
print Solution().binaryTreePaths(root)
| {
"repo_name": "gengwg/leetcode",
"path": "257_binary_tree_paths.py",
"copies": "1",
"size": "2079",
"license": "apache-2.0",
"hash": 1593984083958108200,
"line_mean": 24.3536585366,
"line_max": 75,
"alpha_frac": 0.5512265512,
"autogenerated": false,
"ratio": 3.465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45162265512,
"avg_score": null,
"num_lines": null
} |
# 258. Add Digits
#
# Given a non-negative integer num, repeatedly add all its digits until the result has only one digit.
#
# For example:
#
# Given num = 38, the process is like: 3 + 8 = 11, 1 + 1 = 2. Since 2 has only one digit, return it.
#
# Follow up:
# Could you do it without any loop/recursion in O(1) runtime?
#
class Solution(object):
# recursive
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
if num < 10:
return num
return self.addDigits(sum(int(d) for d in str(num)))
# num = sum(int(d) for d in str(num))
# need return here
# return self.addDigits(num)
# iterative
def addDigits2(self, num):
while num >= 10: # loop exit condition num < 10, i.e. is one digit
num = num // 10 + num % 10
return num
# http://bookshadow.com/weblog/2015/08/16/leetcode-add-digits/
def addDigits(self, num):
if num == 0:
return 0
return (num - 1) % 9 + 1
# easier to understand
def addDigits(self, num):
sum = 0
while num > 0:
sum += num%10
num /= 10
if sum >= 10:
return self.addDigits(sum)
else:
return sum
print Solution().addDigits(38)
print Solution().addDigits(8)
print Solution().addDigits(0)
print Solution().addDigits(138)
| {
"repo_name": "gengwg/leetcode",
"path": "258_add_digits.py",
"copies": "1",
"size": "1415",
"license": "apache-2.0",
"hash": -5389234857446795000,
"line_mean": 24.2678571429,
"line_max": 105,
"alpha_frac": 0.5533568905,
"autogenerated": false,
"ratio": 3.5463659147869673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45997228052869676,
"avg_score": null,
"num_lines": null
} |
# 259. 3Sum Smaller
# Given an array of n integers nums and a target,
# find the number of index triplets i, j, k with 0 <= i < j < k < n
# that satisfy the condition nums[i] + nums[j] + nums[k] < target.
# For example, given nums = [-2, 0, 1, 3], and target = 2.
# Return 2. Because there are two triplets which sums are less than 2:
# [-2, 0, 1]
# [-2, 0, 3]
# Follow up:
# Could you solve it in O(n2) runtime?
class Solution(object):
# 思路: 3Sum的变种,要求统计三个数的和小于给定target的次数。
# 先排序,然后用外层循环来确定第一个数, 内层循环用左右指针从两头往中间寻找。
# 得到三个数的和,如果和大于等于target,说明找大了, right指针往左移动;
# 如果找到的和小于target,我们取right-left的差值即为有效结果。
# 为什么呢? 假设left不动,那那么right像左移,直到重合之前的前一点都属于有效结果。
def threeSumSmaller(self, nums, target):
res = 0
nums.sort()
for i in range(len(nums)-2):
j = i + 1
k = len(nums) - 1
while j < k:
if nums[i] + nums[j] + nums[k] < target:
res += k-j
j += 1
else:
k -= 1
return res
if __name__ == '__main__':
print(Solution().threeSumSmaller([-2, 0, 1, 3], 2))
print(Solution().threeSumSmaller([], 2))
| {
"repo_name": "gengwg/leetcode",
"path": "259_3sum_smaller.py",
"copies": "1",
"size": "1475",
"license": "apache-2.0",
"hash": 2982647800105050000,
"line_mean": 28.575,
"line_max": 70,
"alpha_frac": 0.5553677092,
"autogenerated": false,
"ratio": 2.097517730496454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.31528854396964534,
"avg_score": null,
"num_lines": null
} |
# 26.02.2007, c
# last revision: 25.02.2008
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/elbow2.mesh'
options = {
'nls' : 'newton',
'ls' : 'ls',
'post_process_hook' : 'verify_incompressibility',
}
field_1 = {
'name' : '3_velocity',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : '1B',
}
field_2 = {
'name' : 'pressure',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Omega',
'approx_order' : 1,
}
# Can use logical operations '&' (and), '|' (or).
region_1000 = {
'name' : 'Omega',
'select' : 'elements of group 6',
}
region_0 = {
'name' : 'Walls',
'select' : 'nodes of surface -n (r.Outlet +n r.Inlet)',
'can_cells' : False,
}
region_1 = {
'name' : 'Inlet',
'select' : 'nodes by cinc0', # In
'can_cells' : False,
}
region_2 = {
'name' : 'Outlet',
'select' : 'nodes by cinc1', # Out
'can_cells' : False,
}
ebc_1 = {
'name' : 'Walls',
'region' : 'Walls',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'Inlet',
'region' : 'Inlet',
'dofs' : {'u.1' : 1.0, 'u.[0,2]' : 0.0},
}
material_1 = {
'name' : 'fluid',
'values' : {
'viscosity' : 1.25e-3,
'density' : 1e0,
},
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_velocity',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_velocity',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
variable_5 = {
'name' : 'pp',
'kind' : 'parameter field',
'field' : 'pressure',
'like' : 'p',
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d3',
}
integral_2 = {
'name' : 'i2',
'kind' : 'v',
'quadrature' : 'gauss_o3_d3',
}
##
# Stationary Navier-Stokes equations.
equations = {
'balance' :
"""+ dw_div_grad.i2.Omega( fluid.viscosity, v, u )
+ dw_convect.i2.Omega( v, u )
- dw_stokes.i1.Omega( v, p ) = 0""",
'incompressibility' :
"""dw_stokes.i1.Omega( u, q ) = 0""",
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 1000
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 5,
'eps_a' : 1e-8,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 0.99999,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
def verify_incompressibility( out, problem, state, extend = False ):
"""This hook is normally used for post-processing (additional results can
be inserted into `out` dictionary), but here we just verify the weak
incompressibility condition."""
from sfepy.base.base import Struct, debug, nm, output, assert_
vv = problem.get_variables()
one = nm.ones( (vv['p'].field.n_nod,), dtype = nm.float64 )
vv['p'].data_from_any( one )
zero = problem.evaluate('dw_stokes.i1.Omega( u, p )', p=one, u=vv['u'](),
call_mode='d_eval')
output('div( u ) = %.3e' % zero)
assert_(abs(zero) < 1e-14)
return out
##
# Functions.
import os.path as op
import utils
cinc_name = 'cinc_' + op.splitext(op.basename(filename_mesh))[0]
cinc = getattr(utils, cinc_name)
functions = {
'cinc0' : (lambda coors, domain=None: cinc(coors, 0),),
'cinc1' : (lambda coors, domain=None: cinc(coors, 1),),
}
| {
"repo_name": "olivierverdier/sfepy",
"path": "examples/navier_stokes/navier_stokes.py",
"copies": "1",
"size": "3826",
"license": "bsd-3-clause",
"hash": -6328635134330447000,
"line_mean": 20.138121547,
"line_max": 77,
"alpha_frac": 0.5083638265,
"autogenerated": false,
"ratio": 2.554072096128171,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8454939671121837,
"avg_score": 0.021499250301266783,
"num_lines": 181
} |
# 26.02.2007, c
# last revision: 25.02.2008
filename_mesh = 'database/pul_klikatak2.mesh'
options = {
'nls' : 'newton',
'ls' : 'ls',
'post_process_hook' : 'verify_incompressibility',
}
field_1 = {
'name' : '3_velocity',
'dim' : (3,1),
'domain' : 'Omega',
'bases' : {'Omega' : '3_4_P1B'}
}
field_2 = {
'name' : 'pressure',
'dim' : (1,1),
'domain' : 'Omega',
'bases' : {'Omega' : '3_4_P1'}
}
# Can use logical operations '&' (and), '|' (or).
region_1000 = {
'name' : 'Omega',
'select' : 'elements of group 6',
}
region_0 = {
'name' : 'Walls',
'select' : 'nodes of surface -n (r.Outlet +n r.Inlet)',
'can_cells' : False,
}
region_1 = {
'name' : 'Inlet',
'select' : 'nodes by cinc( x, y, z, 0 )', # In
'can_cells' : False,
}
region_2 = {
'name' : 'Outlet',
'select' : 'nodes by cinc( x, y, z, 1 )', # Out
'can_cells' : False,
}
ebc_1 = {
'name' : 'Walls',
'region' : 'Walls',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'Inlet',
'region' : 'Inlet',
'dofs' : {'u.1' : 1.0, 'u.[0,2]' : 0.0},
}
material_1 = {
'name' : 'fluid',
'mode' : 'here',
'region' : 'Omega',
'viscosity' : 1.25e-3,
'density' : 1e0,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_velocity',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_velocity',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
variable_5 = {
'name' : 'pp',
'kind' : 'parameter field',
'field' : 'pressure',
'like' : 'p',
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d3',
}
integral_2 = {
'name' : 'i2',
'kind' : 'v',
'quadrature' : 'gauss_o3_d3',
}
##
# Stationary Navier-Stokes equations.
equations = {
'namespaces' : {
'dw_' : ('stokes', 'div_grad', 'convect'),
},
'balance' :
"""+ div_grad.i2.Omega( fluid.viscosity, v, u ) + convect.i2.Omega( v, u )
- stokes.i1.Omega( v, p ) = 0""",
'incompressibility' :
"""stokes.i1.Omega( u, q ) = 0""",
}
##
# Stokes equations.
## equations = {
## 'namespaces' : {
## 'dw_' : ('div', 'grad', 'divgrad'),
## },
## 'balance' :
## """+ div_grad.i1.Omega( fluid, v, u ) - grad.i1.Omega( v, p ) = 0""",
## 'incompressibility' :
## """div.i1.Omega( q, u ) = 0""",
## }
##
# FE assembling parameters.
fe = {
'chunk_size' : 1000
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.umfpack',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 5,
'eps_a' : 1e-8,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 0.99999,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'matrix' : 'internal', # 'external' or 'internal'
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
def verify_incompressibility( out, problem, state, extend = False ):
"""This hook is normally used for post-processing (additional results can
be inserted into `out` dictionary), but here we just verify the weak
incompressibility condition."""
from sfepy.base.base import Struct, debug
from sfepy.fem import eval_term_op
vv = problem.variables
one = nm.ones( (vv['pp'].field.n_nod,), dtype = nm.float64 )
vv['pp'].data_from_data( one )
zero = eval_term_op( state,
'dw_stokes.i1.Omega( u, pp )',
problem, pp = one, call_mode = 'd_eval' )
print 'div( u ) = %.3e' % zero
return out
##
# Functions.
from valec import *
##
# Make 'cinc' refer to a cinc_* function according to the mesh file name.
import os.path as op
trunk = op.splitext( op.basename( filename_mesh ) )[0]
print trunk
cinc = eval( 'cinc_' + trunk )
print cinc
del op, trunk
| {
"repo_name": "certik/sfepy",
"path": "input/navier_stokes.py",
"copies": "1",
"size": "4183",
"license": "bsd-3-clause",
"hash": -3314064456168601000,
"line_mean": 20.6735751295,
"line_max": 78,
"alpha_frac": 0.5001195314,
"autogenerated": false,
"ratio": 2.550609756097561,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8425782254149696,
"avg_score": 0.024989406669573028,
"num_lines": 193
} |
# 260. Single Number III
#
# Given an array of numbers nums, in which exactly two
# elements appear only once and all the other elements
# appear exactly twice. Find the two elements that appear only once.
#
# For example:
#
# Given nums = [1, 2, 1, 3, 2, 5], return [3, 5].
#
# Note:
# The order of the result is not important. So in the
# above example, [5, 3] is also correct.
class Solution:
# if seeing the element again in dict, delete it
# things left are the single elements
def singleNumber(self, nums):
resultDict = {}
for i in nums:
if i in resultDict.keys():
del resultDict[i]
else:
resultDict[i] = 1
return list(resultDict.keys())
# use a dictionary to store counts for each element
def singleNumber(self, nums):
d = {}
for n in nums:
if n not in d:
d[n] = 1
else:
d[n] += 1
return [k for k, v in d.items() if v == 1]
# return [k for k in d if d[k] == 1]
# https://www.jianshu.com/p/c31bd59d7877
def singleNumber(self, nums):
both = set()
double = set()
for n in nums:
if n not in both:
both.add(n)
else:
double.add(n)
single = both - double
return list(single)
if __name__ == "__main__":
print Solution().singleNumber([1, 2, 1, 3, 2, 5])
| {
"repo_name": "gengwg/leetcode",
"path": "260_single_number_iii.py",
"copies": "1",
"size": "1455",
"license": "apache-2.0",
"hash": -8047346649812437000,
"line_mean": 25.9444444444,
"line_max": 68,
"alpha_frac": 0.5374570447,
"autogenerated": false,
"ratio": 3.6375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46749570447,
"avg_score": null,
"num_lines": null
} |
# 26/11/2019
import argparse
import asyncio
import multiprocessing
import sys
import threading
import time
def sleep_print(x):
time.sleep(x)
print(x)
return None
async def asleep_print(x):
await asyncio.sleep(x)
print(x)
return None
async def sleep_sort(numbers):
await asyncio.gather(*(asleep_print(x) for x in numbers))
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"file", nargs="?", type=argparse.FileType("r"), default=sys.stdin
)
group = parser.add_mutually_exclusive_group()
group.add_argument("-c", "--coroutines", action="store_true")
group.add_argument("-t", "--threads", action="store_true")
group.add_argument("-p", "--processes", action="store_true")
args = parser.parse_args()
numbers = []
for line in args.file:
x = int(line.strip())
numbers.append(x)
if args.threads:
for x in numbers:
threading.Timer(x, print, args=[x]).start()
elif args.processes:
for x in numbers:
multiprocessing.Process(target=sleep_print, args=[x]).start()
else:
asyncio.run(sleep_sort(numbers))
| {
"repo_name": "tlseabra/dailyprogrammer",
"path": "Python/easy/e091.py",
"copies": "2",
"size": "1203",
"license": "mit",
"hash": -1405074483794074000,
"line_mean": 22.1346153846,
"line_max": 73,
"alpha_frac": 0.6259351621,
"autogenerated": false,
"ratio": 3.6565349544072947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5282470116507294,
"avg_score": null,
"num_lines": null
} |
# 261 Graph Valid Tree
# Given n nodes labeled from 0 to n - 1 and a list of undirected edges (each edge is a pair of nodes),
# write a function to check whether these edges make up a valid tree.
#
# For example:
#
# Given n = 5 and edges = [[0, 1], [0, 2], [0, 3], [1, 4]], return true.
#
# Given n = 5 and edges = [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]], return false.
#
# Hint:
#
# Given n = 5 and edges = [[0, 1], [1, 2], [3, 4]], what should your return? Is this case a valid tree? Show More Hint
#
# Note: you can assume that no duplicate edges will appear in edges.
# Since all edges are undirected, [0, 1] is the same as [1, 0] and thus will not appear together in edges.
#
# https://nb4799.neu.edu/wordpress/?p=1143
# We need to check two properties to determine whether a set of edges form a valid tree:
#
# it has n-1 edges
# it is acyclic
class Solution(object):
# This algorithm uses an idea called union find. You first initialize each node so that each node itself forms a node set.
# (We use union_arr to record which set a node belongs to). As we traverse all edges, we will find connected components.
# The union find algorithm makes sure that every node in a connected component will point to a same node set by using find_union function.
# Therefore, if we see a new edge with two points in the same node set, we will return False because the edge makes a cycle in the graph.
# If no cycle is found, we will finally check if there are exactly n-1 edges to form a tree rather than disjoint parts in the graph.
def validTree(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype:
"""
union_arr = range(n) # only works in py2
def find_union(p):
if union_arr[p] == p:
return p
return find_union(union_arr[p])
for p1, p2 in edges:
p1_set = find_union(p1)
p2_set = find_union(p2)
#print p1_set, p2_set
if p1_set == p2_set:
return False
union_arr[p1_set] = p2_set
return len(edges) == n - 1
# DFS
# We start to visit all nodes from node 0.
# If we finish traversing all reachable nodes but there are still some adjacency matrix entry left
# then we know the given edges actually form multiple separate graphs. Therefore we should return False.
def validTree(self, n, edges):
adj = {s:[] for s in range(n)}
for p1, p2 in edges:
adj[p1].append(p2)
adj[p2].append(p1)
stk = [0]
while stk:
cur = stk.pop()
# stk.extend(xxx) equivalent to stk += xxx
stk.extend(adj.pop(cur, [])) # if `cur` not in keys, pop [] instead of raising KeyError.
return len(edges) == n - 1 and not adj
# BFS
# Similar idea as in code 2. But you implement the traversal of nodes using deque.
def validTree(self, n, edges):
from collections import deque
adj = {s:[] for s in range(n)}
for p1, p2 in edges:
adj[p1].append(p2)
adj[p2].append(p1)
q = deque()
q.append(0)
while q:
cur = q.popleft()
q.extend(adj.pop(cur, []))
return len(edges) == n - 1 and not adj
print(Solution().validTree(5, [[0, 1], [0, 2], [0, 3], [1, 4]]))
print(Solution().validTree(5, [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]]))
| {
"repo_name": "gengwg/leetcode",
"path": "261_graph_valid_tree.py",
"copies": "1",
"size": "3466",
"license": "apache-2.0",
"hash": -5779305111753292000,
"line_mean": 39.7764705882,
"line_max": 142,
"alpha_frac": 0.5931909983,
"autogenerated": false,
"ratio": 3.418145956607495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4511336954907495,
"avg_score": null,
"num_lines": null
} |
# 2/6
import csv
file = open('nfl-suspensions-data.csv', 'r')
nfl_suspensions = list(csv.reader(file))
header = nfl_suspensions[0]
nfl_suspensions = nfl_suspensions[1:]
years = {}
for record in nfl_suspensions:
if record[5] in years:
years[record[5]] += 1
else:
years[record[5]] = 1
print(years, '\n')
# 3/6
# teams = []
teams = (record[1] for record in nfl_suspensions)
unique_teams = set(teams)
games = (record[2] for record in nfl_suspensions)
unique_games = set(games)
print(unique_teams, '\n', unique_games, '\n')
# 4
class Suspension:
def __init__(self, row):
self.name = row[0]
self.team = row[1]
self.games = row[2]
self.year = row[5]
third_suspension = Suspension(nfl_suspensions[2])
print(third_suspension, '\n')
# 5
class Suspension:
def __init__(self, row):
self.name = row[0]
self.team = row[1]
self.games = row[2]
def get_year(self, row):
try:
self.year = int(row[5])
except Exception:
self.year = 0
return self.year
missing_year = Suspension(nfl_suspensions[22])
twenty_third_year = missing_year.get_year(22)
print(twenty_third_year, '\n')
| {
"repo_name": "my30/NFL-Suspension-DQ_Python_Intermediate-",
"path": "Analyses.py",
"copies": "1",
"size": "1206",
"license": "mit",
"hash": -683366799757733600,
"line_mean": 20.9272727273,
"line_max": 49,
"alpha_frac": 0.6053067993,
"autogenerated": false,
"ratio": 2.759725400457666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8865032199757665,
"avg_score": 0,
"num_lines": 55
} |
__author__ = 'Libao Jin'
__date__ = 'December 15, 2015'
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# s_nums = list(set(nums))
# nums.clear()
# for n in s_nums:
# nums.append(n)
# return len(nums)
# s_nums = list(set(nums))
# for n in s_nums:
# t = nums.count(n)
# if t == 1:
# continue
# for i in range(t - 1):
# nums.remove(n)
# return len(nums)
i = 0
while i < len(nums) - 1:
while nums[i] == nums[i+1]:
nums.remove(nums[i])
if i == len(nums) - 1:
break
i += 1
return len(nums)
if __name__ == '__main__':
s = Solution()
print(s.removeDuplicates([]))
print(s.removeDuplicates([1, 1, 2, 3]))
print(s.removeDuplicates([1, 2, 3]))
print(s.removeDuplicates([1, 3, 2, 3]))
print(s.removeDuplicates([1, 1, 2, 3, 4, 4]))
| {
"repo_name": "imthomasking/LeetCode-Solutions",
"path": "solutions/026_Remove_Duplicates_from_Sorted_Array.py",
"copies": "2",
"size": "1127",
"license": "mit",
"hash": 2132634260219477800,
"line_mean": 25.8333333333,
"line_max": 49,
"alpha_frac": 0.4480922804,
"autogenerated": false,
"ratio": 3.2761627906976742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4724255071097674,
"avg_score": null,
"num_lines": null
} |
# 27/03/2017
grid = ["########=####/#",
"# | #",
"# # #",
"# # #",
"####### #",
"# _ #",
"###############"]
grid = [[c for c in grid[y]] for y in range(0, len(grid))]
coords = [(1, 1), (1, 2), (1, 3), (5, 6), (4, 2), (1, 1),
(1, 2), (5, 5), (5, 5), (9, 1), (7, 5), (2, 2)]
def update_grid(x, y):
rules = {'S': 'F', ' ': 'S'}
grid[y][x] = rules[grid[y][x]] if grid[y][x] not in "F#|/=_" else grid[y][x]
for y in range(0, len(grid)):
for x in range(0, len(grid[0])):
if grid[y][x] not in 'F_':
continue
try:
if grid[y][x] == '_' and not any(['F' in [grid[y][x-1], grid[y][x+1], grid[y-1][x], grid[y+1][x]]]):
continue
else:
grid[y][x-1] = 'F' if grid[y][x-1] == 'S' else grid[y][x-1]
grid[y][x+1] = 'F' if grid[y][x+1] == 'S' else grid[y][x+1]
grid[y-1][x] = 'F' if grid[y-1][x] == 'S' else grid[y-1][x]
grid[y+1][x] = 'F' if grid[y+1][x] == 'S' else grid[y+1][x]
except:
continue
for c in coords:
update_grid(*c)
print('\n'.join([''.join(l) for l in grid])) | {
"repo_name": "tlseabra/dailyprogrammer",
"path": "Python/easy/e308.py",
"copies": "2",
"size": "1296",
"license": "mit",
"hash": -3613015422583168500,
"line_mean": 31.425,
"line_max": 116,
"alpha_frac": 0.3418209877,
"autogenerated": false,
"ratio": 2.6557377049180326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3997558692618033,
"avg_score": null,
"num_lines": null
} |
# 270. Closest Binary Search Tree Value
# Given a non-empty binary search tree and a target value, find the value in the BST that is closest to the target.
# Note:
# Given target value is a floating point.
# You are guaranteed to have only one unique value in the BST that is closest to the target.
class Solution:
# Record root.val and recursively search for the closest value:
# If root.val < target, search in the right sub-tree.
# If abs(result - target) < abs(root.val - target), return result.
# If root.val >= target, search in the left sub-tree.
# If abs(result - target) < abs(root.val - target), return result.
def closestValue(self, root, target):
closest = root.val
if root.val < target:
if root.right:
right = self.closestValue(root.right, target)
if abs(right - target) < abs(closest - target):
closest = right
else:
if root.left:
left = self.closestValue(root.left, target)
if abs(left - target) < abs(closest - target):
closest = left
return closest
| {
"repo_name": "gengwg/leetcode",
"path": "270_Closest_Binary_Search_Tree_Value.py",
"copies": "1",
"size": "1161",
"license": "apache-2.0",
"hash": 4346468767855701500,
"line_mean": 39.0344827586,
"line_max": 115,
"alpha_frac": 0.6106804479,
"autogenerated": false,
"ratio": 4.102473498233215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006680475112832905,
"num_lines": 29
} |
# 271. Encode and Decode String
# Design an algorithm to encode a list of strings to a string.
# The encoded string is then sent over the network
# and is decoded back to the original list of strings.
# Machine 1 (sender) has the function:
# string encode(vector strs) {
# // ... your code
# return encoded_string;
# }
# Machine 2 (receiver) has the function:
# vector decode(string s) {
# //... your code
# return strs;
# }
# So Machine 1 does:
# string encoded_string = encode(strs);
# and Machine 2 does:
# vector strs2 = decode(encoded_string);
# strs2 in Machine 2 should be the same as strs in Machine 1.
# Implement the encode and decode methods.
# Note:
# The string may contain any possible characters out of 256 valid ascii characters.
# Your algorithm should be generalized enough to work on any possible characters.
# Do not use class member/global/static variables to store states.
# Your encode and decode algorithms should be stateless.
# Do not rely on any library method such as eval or serialize methods.
# You should implement your own encode/decode algorithm.
# https://gengwg.blogspot.com/2018/06/leetcode-271-encode-and-decode-strings.html
# 用长度+特殊字符+字符串来编码
class Codec(object):
def encode(self, strs):
"""
Encodes a list of strings to a single string.
Algorithm: Length info
:type strs: List[str]
:rtype: str
To encode the string, we can add some symbols before it.
For example, for string “abc”, we can add the length 3 before it.
However, for string starting from numbers, like 123abc.
This could result in 6123abc, which is not acceptable.
Then we add an ‘#’ between the length and the string.
So 3#abc is the encoded string.
To decode the string, we can search the ‘#’ symbol.
The number before it is the length for the string,
and the string after it with length is the string we want to decode.
"""
encoded = ''
for s in strs:
encoded += str(len(s)) + '/' + s
return encoded
def decode(self, s):
"""
Decodes a single string to a list of strings.
:type s: str
:rtype: List[str]
"""
decoded = []
j = 0
for i, c in enumerate(s):
if c == '/':
offset = int(s[j:i])
decoded.append(s[i+1:i+offset+1])
j = i+offset+1
return decoded
if __name__ == '__main__':
encoded = Codec().encode(['123abc', 'xyz'])
print(encoded)
decoded = Codec().decode(encoded)
print(decoded) | {
"repo_name": "gengwg/leetcode",
"path": "271_encode_decode_string.py",
"copies": "1",
"size": "2688",
"license": "apache-2.0",
"hash": 6516824468351025000,
"line_mean": 29.4712643678,
"line_max": 88,
"alpha_frac": 0.6252830189,
"autogenerated": false,
"ratio": 3.7062937062937062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48315767251937064,
"avg_score": null,
"num_lines": null
} |
# 276 Paint Fence
# There is a fence with n posts, each post can be painted with one of the k colors.
#
# You have to paint all the posts such that no more than two adjacent fence posts have the same color.
#
# Return the total number of ways you can paint the fence.
#
# Note:
# n and k are non-negative integers.
class Solution():
# https://www.cnblogs.com/airwindow/p/4796688.html
# The problem of asking how many ways to do something is usually very easy!
# And it could always be solved through dynamic programming.
# You just need to carefully design the transitional function acoording to characteristics
# or certain restrictions.
# We know for each post, it could differ or same as its previous post's color.
# Assume:
# differ_count: represents the current post with different color with its previous post (the painting ways)
# same_count: represents the current post share the same color with its previous post (the painiting ways)
# We could have following trasitinao function
# differ_count(i) = differ_count(i-1) * (k-1) + same_count(i-1) * (k-1)
# same_count(i) = differ_count(i-1)
# //cause the current post must have the same color with post i-1,
# thus we could only use the way that differ_count(i-1)
# Base case:
# 2 is a perfect base case for use to start, since it has simple same_count and differ_count;
def numWays(self, n , k ):
if n <= 0:
return 0
if n == 1:
return k
# n == 2
same = k
diff = k*(k-1)
for i in range(3, n+1):
tmp = diff
diff = same * (k-1) + diff * (k-1)
same = tmp
# for i in range(3, n+1):
# same, diff = diff , same*(k-1) + diff*(k-1)
return same + diff
test= Solution()
print test.numWays(5,2)
| {
"repo_name": "gengwg/leetcode",
"path": "276_paint_fence.py",
"copies": "1",
"size": "1857",
"license": "apache-2.0",
"hash": 9178814039828115000,
"line_mean": 32.1607142857,
"line_max": 111,
"alpha_frac": 0.6322024771,
"autogenerated": false,
"ratio": 3.523719165085389,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46559216421853894,
"avg_score": null,
"num_lines": null
} |
# 277. Find the Celebrity
# Suppose you are at a party with n people (labeled from 0 to n - 1) and among them, there may exist one celebrity.
# The definition of a celebrity is that all the other n - 1 people know him/her but he/she does not know any of them.
# Now you want to find out who the celebrity is or verify that there is not one.
# The only thing you are allowed to do is to ask questions like: "Hi, A. Do you know B?" to get information of whether A knows B.
# You need to find out the celebrity (or verify there is not one) by asking as few questions as possible (in the asymptotic sense).
# You are given a helper function bool knows(a, b) which tells you whether A knows B.
# Implement a function int findCelebrity(n), your function should minimize the number of calls to knows.
# Note: There will be exactly one celebrity if he/she is in the party.
# Return the celebrity's label if there is a celebrity in the party.
# If there is no celebrity, return -1.
#
# https://zhuhan0.blogspot.com/2017/07/leetcode-277-find-celebrity.html
# Thought process:
# Two pass. First pass find out the celebrity candidate. Second pass verify if candidate is truly a celebrity.
#
# First pass: set candidate = 0. For each person on the right, check if they know the candidate.
# If the person knows the candidate: continue.
# Otherwise, we know the candidate cannot be the celebrity.
# We also know the people before the current person cannot be the celebrity because they know the candidate.
# So update the candidate to be the current person.
# After the first pass, we know that only the candidate can be the celebrity,
# because the people before him/her cannot, and the people after can't be either because they know the candidate.
# Second pass: check if the candidate knows any of the other people.
#
# Time complexity: O(n)
import random
def knows(a, b):
return bool(random.getrandbits(1))
# return random.choice([True, False])
class Solution:
def findCelebrity(self, n):
candidate = 0
for i in range(1, n):
if not knows(i, candidate):
candidate = i
for i in range(n):
if (i != candidate and knows(candidate, i)) or not knows(i, candidate):
return -1
return candidate
s = Solution()
print(s.findCelebrity(1))
| {
"repo_name": "gengwg/leetcode",
"path": "277_find_the_celebrity.py",
"copies": "1",
"size": "2359",
"license": "apache-2.0",
"hash": -2170157446604873500,
"line_mean": 43.5094339623,
"line_max": 131,
"alpha_frac": 0.7066553624,
"autogenerated": false,
"ratio": 3.5688350983358545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47754904607358545,
"avg_score": null,
"num_lines": null
} |
# 278. First Bad Version
#
# You are a product manager and currently leading a team to develop a new product.
# Unfortunately, the latest version of your product fails the quality check.
# Since each version is developed based on the previous version,
# all the versions after a bad version are also bad.
# Suppose you have n versions [1, 2, ..., n]
# and you want to find out the first bad one,
# which causes all the following ones to be bad.
# You are given an API bool isBadVersion(version) which will return whether version is bad.
# Implement a function to find the first bad version.
# You should minimize the number of calls to the API.
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution(object):
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
# n + 1
l, r = 1, n + 1
while l < r:
# mid = (l + r) / 2
mid = l + ((r-l) >> 1)
if isBadVersion(mid): # first bad is on the left half
r = mid
else: # first bad is on the right half
l = mid + 1
return l
def firstBadVersion(self, n):
l, r = 1, n+1
while l + 1 < r: # this way l, r just set to mid
mid = l + ((r-l) >> 1)
if isBadVersion(mid):
r = mid
else:
l = mid
# need check which of l, r is bad
return l if isBadVersion(l) else r | {
"repo_name": "gengwg/leetcode",
"path": "278_first_bad_version.py",
"copies": "1",
"size": "1572",
"license": "apache-2.0",
"hash": -5380928610539544000,
"line_mean": 31.7708333333,
"line_max": 92,
"alpha_frac": 0.5693384224,
"autogenerated": false,
"ratio": 3.7517899761336517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9777323874208039,
"avg_score": 0.00876090486512251,
"num_lines": 48
} |
# 279. Perfect Squares
# Given a positive integer n, find the least number of perfect square numbers
# (for example, 1, 4, 9, 16, ...) which sum to n.
# For example, given n = 12, return 3 because 12 = 4 + 4 + 4;
# given n = 13, return 2 because 13 = 4 + 9.
class Solution(object):
# https://gengwg.blogspot.com/2018/02/leetcode-279-perfect-squares.html
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
import sys
# dp[n] indicates that the perfect squares count of the given n
dp = [sys.maxint] * (n+1)
# dp = [n] * (n+1)
# dp[0] is 0. thus also n+1 array size.
dp[0] = 0
# calculate dp[1] to dp[n]
for i in range(1, n+1):
j = 1
# To get the value of dp[n], we should choose the min
# value from:
# dp[n - 1] + 1,
# dp[n - 4] + 1,
# dp[n - 9] + 1,
# dp[n - 16] + 1
# and so on...
while j*j <= i:
dp[i] = min(dp[i], dp[i - j*j] + 1)
j += 1
return dp[n]
# https://www.jiuzhang.com/solution/perfect-squares/#tag-highlight-lang-python
def numSquares(self, n):
while n % 4 == 0:
n /= 4
if n % 8 == 7:
return 4
for i in xrange(n+1):
temp = i * i
if temp <= n:
if int((n - temp)** 0.5 ) ** 2 + temp == n:
return 1 + (0 if temp == 0 else 1)
else:
break
return 3
print Solution().numSquares(12)
print Solution().numSquares(13)
print Solution().numSquares(6993)
| {
"repo_name": "gengwg/leetcode",
"path": "279_perfect_squares.py",
"copies": "1",
"size": "1692",
"license": "apache-2.0",
"hash": -3661270329295339000,
"line_mean": 29.7636363636,
"line_max": 82,
"alpha_frac": 0.4645390071,
"autogenerated": false,
"ratio": 3.253846153846154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42183851609461537,
"avg_score": null,
"num_lines": null
} |
# 280. Wiggle Sort
# Given an unsorted array nums, reorder it in-place such that
# nums[0] <= nums[1] >= nums[2] <= nums[3]....
# For example, given nums = [3, 5, 2, 1, 6, 4],
# one possible answer is [1, 6, 2, 5, 3, 4].
# the pattern is number in odd position is peak.
# First try to solve it without in-place:
# sort the array in increasing order.
# create a result array of the same size.
# keep 2 pointers, one from the beginning, one from the middle(notice odd/even of array).
# put beginning first, then the middle pointer, into the result array.
# Solve it in-place.
class Solution(object):
# sort and swap(i,i+1) with step 2
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
nums.sort()
for i in range(1, len(nums)-1, 2):
nums[i], nums[i+1] = nums[i+1], nums[i]
# 排序,然后两边分别取,复杂度O(nlogn)
# 注意排完序之后应该倒着来。比如[4,5,5,6]这个 数据。
def wiggleSort(self, nums):
temp = sorted(nums)
s = (len(nums) + 1) >> 1
t = len(nums)
for i in range(len(nums)):
if i & 1 == 0:
s -= 1
nums[i] = temp[s]
else:
t -= 1
nums[i] = temp[t]
# The result needs to guarantee that when i is odd, nums[i] >= nums[i - 1];
# when i is even, nums[i] <= nums[i - 1].
# The idea is that, whenever we see a nums[i] that violates this rule,
# we swap it with nums[i - 1].
# This will work because if i is odd and nums[i] < nums[i - 1],
# nums[i] is also always less than nums[i - 2];
# if i is even and nums[i] > nums[i - 1], nums[i] is also always larger than nums[i - 2].
# So swapping will not violate the relationship between nums[i - 1] and nums[i - 2].
def wiggleSort(self, nums):
for i in range(1, len(nums)):
if ((i&1) and (nums[i] < nums[i-1])) or (not(i&1) and (nums[i] > nums[i-1])):
nums[i], nums[i-1] = nums[i-1], nums[i]
print(nums)
nums = [3, 5, 2, 1, 6, 4]
Solution().wiggleSort(nums) | {
"repo_name": "gengwg/leetcode",
"path": "280_wiggle_sort.py",
"copies": "1",
"size": "2247",
"license": "apache-2.0",
"hash": -2238701915889132800,
"line_mean": 35.3333333333,
"line_max": 94,
"alpha_frac": 0.5479577788,
"autogenerated": false,
"ratio": 2.87467018469657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.392262796349657,
"avg_score": null,
"num_lines": null
} |
# 283. Move Zeroes
# Given an array nums, write a function to move all 0's to the end of it
# while maintaining the relative order of the non-zero elements.
# For example, given nums = [0, 1, 0, 3, 12],
# after calling your function, nums should be [1, 3, 12, 0, 0].
class Solution(object):
# http://bookshadow.com/weblog/2015/09/19/leetcode-move-zeroes/
# 使用两个"指针"x和y,初始令y = 0
# 利用x遍历数组nums:
# 若nums[x]非0,则交换nums[x]与nums[y],并令y+1
# y指针指向首个0元素可能存在的位置
# 遍历过程中,算法确保[y, x)范围内的元素均为0
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
y = 0
for x in range(len(nums)):
if nums[x]:
nums[x], nums[y] = nums[y], nums[x]
y += 1
if __name__ == '__main__':
nums1 = [0, 1, 0, 3, 12]
Solution().moveZeroes(nums1)
print(nums1)
nums2 = [1, 0]
Solution().moveZeroes(nums2)
print(nums2)
| {
"repo_name": "gengwg/leetcode",
"path": "283_move_zeros.py",
"copies": "1",
"size": "1115",
"license": "apache-2.0",
"hash": -7556605429947354000,
"line_mean": 27.3714285714,
"line_max": 74,
"alpha_frac": 0.5760322256,
"autogenerated": false,
"ratio": 2.4101941747572817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3486226400357282,
"avg_score": null,
"num_lines": null
} |
# 284. Peeking Iterator
# Given an Iterator class interface with methods: next() and hasNext(),
# design and implement a PeekingIterator that support the peek() operation --
# it essentially peek() at the element that will be returned by the next call to next().
# Here is an example.
# Assume that the iterator is initialized to the beginning of the list: [1, 2, 3].
# Call next() gets you 1, the first element in the list.
# Now you call peek() and it returns 2, the next element. Calling next() after that still return 2.
# You call next() the final time and it returns 3, the last element.
# Calling hasNext() after that should return false.
# Hint:
# Think of "looking ahead". You want to cache the next element.
# Is one variable sufficient? Why or why not?
# Test your design with call order of peek() before next() vs next() before peek().
# For a clean implementation, check out Google's guava library source code.
# Follow up: How would you extend your design to be generic and work with all types, not just integer?
# Below is the interface for Iterator, which is already defined for you.
class Iterator(object):
def __init__(self, nums):
"""
Initializes an iterator object to the beginning of a list.
:type nums: List[int]
"""
self.it = iter(nums)
def hasNext(self):
"""
Returns true if the iteration has more elements.
:rtype: bool
"""
return
def next(self):
"""
Returns the next element in the iteration.
:rtype: int
"""
# it = iter(self.nums)
return next(self.it)
# http://bookshadow.com/weblog/2015/09/21/leetcode-peeking-iterator/
# 引入两个额外的变量nextElement和peekFlag:
# nextElement标识peek操作预先获取的下一个元素,
# peekFlag记录当前是否已经执行过peek操作
class PeekingIterator(object):
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iter = iterator
self.peekFlag = False
self.nextElement = None
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
if not self.peekFlag: # save next element and set flag to true
self.nextElement = self.iter.next()
self.peekFlag = True
return self.nextElement
def next(self):
"""
:rtype: int
"""
if not self.peekFlag: # return normal next
return self.iter.next()
nextElement = self.nextElement # save next element
self.peekFlag = False # reset attributes back
self.nextElement = None
return nextElement
def hasNext(self):
"""
:rtype: bool
"""
return self.peekFlag or self.iter.hasNext()
# Your PeekingIterator object will be instantiated and called as such:
nums = [1, 2, 3]
it = PeekingIterator(Iterator(nums))
val = it.peek()
print(val)
it.next()
val = it.peek()
print(val)
# while it.hasNext():
# val = it.peek() # Get the next element but not advance the iterator.
# it.next() # Should return the same value as [val].
| {
"repo_name": "gengwg/leetcode",
"path": "284_peeking_iterator.py",
"copies": "1",
"size": "3302",
"license": "apache-2.0",
"hash": 7717044264119823000,
"line_mean": 29.1495327103,
"line_max": 102,
"alpha_frac": 0.6301921885,
"autogenerated": false,
"ratio": 3.7730994152046784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9876269818876593,
"avg_score": 0.005404356965617051,
"num_lines": 107
} |
# 28684
P = 4
MB = 10 ** P
MA = MB / 10
def poly3(n):
return n * (n + 1) / 2
def poly4(n):
return n * n
def poly5(n):
return n * (3 * n - 1) / 2
def poly6(n):
return n * (2 * n - 1)
def poly7(n):
return n * (5 * n - 3) / 2
def poly8(n):
return n * (3 * n - 2)
polyf = [poly3, poly4, poly5, poly6, poly7, poly8]
polyn = [list() for n in range(len(polyf))]
n = 1
while True:
p = [f(n) for f in polyf]
if p[0] >= MB:
break
for i in range(len(p)):
if MA <= p[i] < MB and p[i] % 100 >= 10:
polyn[i].append(p[i])
n += 1
n = 2
pchain = [((v,), (0,)) for v in polyn[0]]
while n <= 6:
cchain = []
for c in pchain:
for i in range(1,len(polyf)):
if i in c[1]:
continue
for v in polyn[i]:
if v not in c[0] and c[0][-1] % 100 == v / 100:
cchain.append((c[0] + (v,), c[1] + (i,)))
pchain = cchain
n += 1
for c in pchain:
if c[0][0] / 100 == c[0][-1] % 100:
print sum(c[0])
| {
"repo_name": "higgsd/euler",
"path": "py/61.py",
"copies": "1",
"size": "1043",
"license": "bsd-2-clause",
"hash": 835883291153181600,
"line_mean": 21.1914893617,
"line_max": 63,
"alpha_frac": 0.4458293384,
"autogenerated": false,
"ratio": 2.5254237288135593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34712530672135594,
"avg_score": null,
"num_lines": null
} |
# 286. WALLS AND GATES
# You are given a m x n 2D grid initialized with these three possible values.
#
# -1 – A wall or an obstacle.
# 0 – A gate.
# INF – Infinity means an empty room.
# We use the value 2^31 - 1 = 2147483647 to represent INF
# as you may assume that the distance to a gate is less than 2147483647.
# Fill each empty room with the distance to its nearest gate.
# If it is impossible to reach a gate, it should be filled with INF.
#
# For example, given the 2D grid:
#
# INF -1 0 INF
# INF INF INF -1
# INF -1 INF -1
# 0 -1 INF INF
# After running your function, the 2D grid should be:
#
# 3 -1 0 1
# 2 2 1 -1
# 1 -1 2 -1
# 0 -1 3 4
#
class Solution:
def wallsAndGates(self, rooms):
"""
https://dyang2016.wordpress.com/2016/11/02/286-walls-and-gates/
求矩阵中room到gate的最短距离这个题目用DFS 比较快。
DFS: 找的时候如果当前点的值大于distance的值,就是符合条件的点。
复杂度: 时间 MN4^K, 空间 4^N
"""
if rooms is None or len(rooms) == 0 or rooms[0] is None or len(rooms[0]) == 0:
return
for i in range(len(rooms)):
for j in range(len(rooms[0])):
if rooms[i][j] == 0:
self.dfs(rooms, i, j, 0)
def dfs(self, rooms, x, y, distance):
if x < 0 or x >= len(rooms) or y < 0 or y >= len(rooms[0]) or rooms[x][y] < distance:
return
rooms[x][y] = distance
self.dfs(rooms, x + 1, y, distance + 1)
self.dfs(rooms, x - 1, y, distance + 1)
self.dfs(rooms, x, y + 1, distance + 1)
self.dfs(rooms, x, y - 1, distance + 1)
if __name__ == '__main__':
rooms = [
[float("Inf"), -1 , 0 , float("Inf")],
[float("Inf"), float("Inf") ,float("Inf"), -1],
[float("Inf"), -1 ,float("Inf"), -1],
[0, -1, float("Inf"), float("Inf")]
]
test = Solution()
test.wallsAndGates(rooms)
print(rooms)
| {
"repo_name": "gengwg/leetcode",
"path": "286_walls_and_gates.py",
"copies": "1",
"size": "2059",
"license": "apache-2.0",
"hash": -3065197513937188400,
"line_mean": 28.1194029851,
"line_max": 93,
"alpha_frac": 0.5381855459,
"autogenerated": false,
"ratio": 2.661664392905866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3699849938805866,
"avg_score": null,
"num_lines": null
} |
# 287. Find the Duplicate Number
# Given an array nums containing n + 1 integers
# where each integer is between 1 and n (inclusive),
# prove that at least one duplicate number must exist.
# Assume that there is only one duplicate number, find the duplicate one.
# Note:
# You must not modify the array (assume the array is read only).
# You must use only constant, O(1) extra space.
# Your runtime complexity should be less than O(n2).
# There is only one duplicate number in the array, but it could be repeated more than once.
class Solution(object):
# modifies array (sort)
def findDuplicate(self, nums):
nums.sort()
for i in range(1, len(nums)):
if nums[i] == nums[i-1]:
return nums[i]
# use set
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
seen = set()
# store each element as we iterate over the array,
# check each element as we iterate over the array.
for num in nums:
if num in seen:
return num
seen.add(num)
# https://www.hrwhisper.me/leetcode-find-the-duplicate-number/
# 如果数组中元素不重复,那么,任意下标i和数组中该下标的值一一对应,
# 如 对于数组 3,4,1,2,有如下对应关系:(注意,值从1~n)
# 0 – > 2
# 1 -> 4
# 2 -> 1
# 3 -> 3
# 设这个对应关系的运算函数为f(n) ,那么,我们从下标0出发,
# 每次根据f取出下标i对应的值x,并将这个x作为下一次的下标,直到下标越界。
# 如3,4,1,2这个数组,那么有 0 – > 2-> 1-> 4
# 但如果有重复的话,中间就会产生多个映射,如3,4,1,2,3
# 0 – > 2
# 1 -> 4
# 2 -> 1
# 3 -> 3
# 4 ->3
# 继续这么做的话,会发生 0 – > 2-> 1-> 4 -> 3 -> 3->3……
# 也就是最后一定是那个重复的元素。
# 这样类似于 leetcode 142 Linked List Cycle II一样,找链表环路的起点,
# 我们先用快慢两个下标都从0开始,快下标每轮运算两次,
# 慢下标每轮运算一次,直到两个下标再次相同。
# 这时候保持快下标位置不变,将慢下标移动到0,
# 接着两个每轮分别运算一次,当这两个下标相遇时,就是环的起点,也就是重复的数。
def findDuplicate(self, nums):
slow, fast = nums[0], nums[nums[0]]
while slow != fast:
slow = nums[slow]
fast = nums[nums[fast]]
slow = 0
while slow != fast:
slow = nums[slow]
fast = nums[fast]
return slow
| {
"repo_name": "gengwg/leetcode",
"path": "287_find_duplicate_number.py",
"copies": "1",
"size": "2769",
"license": "apache-2.0",
"hash": 917608630373290500,
"line_mean": 26.5063291139,
"line_max": 95,
"alpha_frac": 0.5605154165,
"autogenerated": false,
"ratio": 2.1600397614314115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3220555177931411,
"avg_score": null,
"num_lines": null
} |
# 288 Unique Word Abbreviation
# An abbreviation of a word follows the form <first letter><number><last letter>.
# Below are some examples of word abbreviations:
# a) it --> it (no abbreviation)
# 1
# b) d|o|g --> d1g
# 1 1 1
# 1---5----0----5--8
# c) i|nternationalizatio|n --> i18n
# 1
# 1---5----0
# d) l|ocalizatio|n --> l10n
# Assume you have a dictionary and given a word,
# find whether its abbreviation is unique in the dictionary.
# A word’s abbreviation is unique if no other word from the dictionary has the same abbreviation.
# Example:
# Given dictionary = [ "deer", "door", "cake", "card" ]
# isUnique("dear") -> false
# isUnique("cart") -> true
# isUnique("cane") -> false
# isUnique("make") -> true
class ValidWordAbbr(object):
def __init__(self, dictionary):
"""
initialize your data structure here.
:type dictionary: List[str]
build a dictionary in which key is first letter and last letter plus the length between them.
Then a word (transformed into first letter+length+last letter) is unique if:
1. the dictionary doesn’t have the word as key at all
2. the dictionary has the word as key.
But the value corresponding to the key is a set only containing the word.
That means, the current word is the only word with same transformation in dictionary.
"""
self.func = lambda s: s[0] + str(len(s)-2) + s[-1]
ab = ((self.func(word), word) for word in dictionary)
d = {}
for k, v in ab:
if not d.get(k):
d[k] = set()
d[k].add(v)
self.d = d
def isUnique(self, word):
"""
check if a word is unique.
:type word: str
:rtype: bool
"""
key = self.func(word)
if not self.d.get(key):
return True
else:
if word in self.d[key] and len(self.d[key]) == 1:
return True
return False
if __name__ == '__main__':
# Your ValidWordAbbr object will be instantiated and called as such:
# vwa = ValidWordAbbr(dictionary)
# vwa.isUnique("word")
# vwa.isUnique("anotherWord")
dictionary = [ "deer", "door", "cake", "card" ]
vwa = ValidWordAbbr(dictionary)
print(vwa.isUnique('dear'))
print(vwa.isUnique('cart'))
print(vwa.isUnique('cane'))
print(vwa.isUnique('make')) | {
"repo_name": "gengwg/leetcode",
"path": "288_unique_word_abbreviation.py",
"copies": "1",
"size": "2499",
"license": "apache-2.0",
"hash": -1585880891068679400,
"line_mean": 31,
"line_max": 102,
"alpha_frac": 0.5715430862,
"autogenerated": false,
"ratio": 3.519040902679831,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45905839888798305,
"avg_score": null,
"num_lines": null
} |
# 289. Game of Life
# According to the Wikipedia's article:
# "The Game of Life, also known simply as Life, is a cellular automaton
# devised by the British mathematician John Horton Conway in 1970."
# Given a board with m by n cells, each cell has an initial state live (1) or dead (0).
# Each cell interacts with its eight neighbors (horizontal, vertical, diagonal)
# using the following four rules (taken from the above Wikipedia article):
# Any live cell with fewer than two live neighbors dies, as if caused by under-population.
# Any live cell with two or three live neighbors lives on to the next generation.
# Any live cell with more than three live neighbors dies, as if by over-population..
# Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
# Write a function to compute the next state (after one update) of the board given its current state.
# Follow up:
# Could you solve it in-place? Remember that the board needs to be updated at the same time:
# You cannot update some cells first and then use their updated values to update other cells.
# In this question, we represent the board using a 2D array.
# In principle, the board is infinite, which would cause problems
# when the active area encroaches the border of the array.
# How would you address these problems?
class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
https://leetcode.com/problems/game-of-life/discuss/73223/Easiest-JAVA-solution-with-explanation
To solve it in place, we use 2 bits to store 2 states:
[2nd bit, 1st bit] = [next state, current state]
- 00 dead (next) <- dead (current)
- 01 dead (next) <- live (current)
- 10 live (next) <- dead (current)
- 11 live (next) <- live (current)
In the beginning, every cell is either 00 or 01.
Notice that 1st state is independent of 2nd state.
Imagine all cells are instantly changing from the 1st to the 2nd state, at the same time.
Let's count # of neighbors from 1st state and set 2nd state bit.
Since every 2nd state is by default dead, no need to consider transition 01 -> 00.
In the end, delete every cell's 1st state by doing >> 1.
For each cell's 1st bit, check the 8 pixels around itself, and set the cell's 2nd bit.
Transition 01 -> 11: when board == 1 and lives >= 2 && lives <= 3.
Transition 00 -> 10: when board == 0 and lives == 3.
To get the current state, simply do
board[i][j] & 1
To get the next state, simply do
board[i][j] >> 1
"""
if not board or len(board) == 0:
return
m = len(board)
n = len(board[0])
for i in range(m):
for j in range(n):
lives = self.liveNeighbors(board, m, n, i, j)
if board[i][j] == 1 and lives in (2, 3):
board[i][j] = 3
if board[i][j] == 0 and lives == 3:
board[i][j] = 2
for i in range(m):
for j in range(n):
board[i][j] >>= 1
def liveNeighbors(self, board, m, n, i, j):
lives = 0
for x in range(max(i-1, 0), min(i+2, m)):
for y in range(max(j-1, 0), min(j+2, n)):
lives += board[x][y] & 1
# delete current element
lives -= board[i][j] & 1
return lives
| {
"repo_name": "gengwg/leetcode",
"path": "289_game_of_life.py",
"copies": "1",
"size": "3627",
"license": "apache-2.0",
"hash": 6601479205694009000,
"line_mean": 40.2159090909,
"line_max": 103,
"alpha_frac": 0.6076647367,
"autogenerated": false,
"ratio": 3.7546583850931676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48623231217931673,
"avg_score": null,
"num_lines": null
} |
# 28. Implement `strstr()`.
#
# Return the index of the first occurrence of needle in haystack, or -1
# if needle is not part of haystack. If `needle` is empty, return 0
# (consistent with `strstr` in C).
# My original, basic solution exceeded the time limit, so I had to
# implement some more complicated solutions.
def strstr_basic(haystack, needle):
hay_len = len(haystack)
ndl_len = len(needle)
if ndl_len == 0:
return 0
hay_idx = 0
while hay_idx <= hay_len - ndl_len:
ndl_idx = 0
while (ndl_idx < ndl_len and
haystack[hay_idx + ndl_idx] == needle[ndl_idx]):
ndl_idx += 1
if ndl_idx == ndl_len:
return hay_idx
hay_idx += 1
return -1
def strstr_fingerprint(haystack, needle):
hay_len = len(haystack)
ndl_len = len(needle)
# Exit early if needle is empty or larger than haystack
if ndl_len == 0:
return 0
elif ndl_len > hay_len:
return -1
# Analyze the needle to make it easier to search for
char_idxs = {}
for idx, c in enumerate(needle):
idxs = char_idxs.get(c)
if idxs is None:
char_idxs[c] = [idx]
else:
idxs.append(idx)
# Rarest characters with latest first occurrences
rarest = sorted(char_idxs.items(),
key=lambda kv: (len(kv[1]), -kv[1][0], kv[0]))
fgrprt_chars = ''.join(k * len(v) for k, v in rarest)
fgrprt_idxs = [i for k, v in rarest for i in v]
# Deallocate
char_idxs = None
# Start checking the haystack from the beginning
hay_idx = 0
ndl0 = needle[0]
while hay_idx <= hay_len - ndl_len:
# Can the current character be the start of a match?
if haystack[hay_idx] == ndl0:
# Check the haystack for the fingerprint
idx = 0
while (idx < ndl_len and
(haystack[hay_idx + fgrprt_idxs[idx]]
== fgrprt_chars[idx])):
idx += 1
if idx == ndl_len:
return hay_idx
# Increment
hay_idx += 1
return -1
def strstr_shell(haystack, needle):
hay_len = len(haystack)
ndl_len = len(needle)
# Exit early if needle is empty or larger than haystack
if ndl_len == 0:
return 0
elif ndl_len > hay_len:
return -1
# If the initial character of needle repeats in needle, then one can
# skip forward on a partial match
ndl0 = needle[0]
skip = needle.find(ndl0, 1)
if skip == -1:
skip = ndl_len
# Start checking the haystack from the beginning
hay_idx = 0
while hay_idx <= hay_len - ndl_len:
idx1 = 0
idx2 = ndl_len - 1
while (idx1 <= idx2 and
haystack[hay_idx + idx1] == needle[idx1] and
haystack[hay_idx + idx2] == needle[idx2]):
idx1 += 1
idx2 -= 1
if idx1 > idx2:
return hay_idx
hay_idx += min(idx1, skip) if idx1 > 0 else 1
return -1
class Solution:
def strStr1(self, haystack: str, needle: str) -> int:
return strstr_basic(haystack, needle)
def strStr2(self, haystack: str, needle: str) -> int:
return strstr_fingerprint(haystack, needle)
def strStr3(self, haystack: str, needle: str) -> int:
return strstr_shell(haystack, needle)
strStr = strStr1
| {
"repo_name": "afbarnard/glowing-broccoli",
"path": "lc/000028.py",
"copies": "1",
"size": "3390",
"license": "mit",
"hash": -2198578655039251500,
"line_mean": 29.8181818182,
"line_max": 72,
"alpha_frac": 0.5666666667,
"autogenerated": false,
"ratio": 3.162313432835821,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9226635222190943,
"avg_score": 0.000468975468975469,
"num_lines": 110
} |
#29-04-04
# v1.0.1
# E-mail fuzzyman AT atlantibots DOT org DOT uk (or michael AT foord DOT me DOT uk )
# Maintained at www.voidspace.org.uk/atlantibots/pythonutils.html
# Used by COnfigObj for storing config files with lists of values.
def listparse(inline, recursive = 1, comment = 1, retain = 0, lpstack = None, **keywargs):
"""Parses a line (a string) as a representation of a list. Can recursively parse nested lists. (List members can themselves be lists).
List elements are stripped - and are returned as either lists or strings.
This is useful for storing lists of information as text - for example in config files
Listparse returns the list and trailing comments or None if the list is badly built.
A valid comments exists after the end of the list (and any whitespace) and starts with a '#' or a ';'.
Returned comment will include the initial '#' or a ';'.
Commas delimit list elements.
If the first non whitespace character in a list element is '[' then that element is treated as a list.
Inside the list '[', ']', '"', "\" or '\' can be escaped with '\'
(or indeed any other character - a single '\' will always be treated as escaping the character that follows)
The leading '\' of escaped characters is *not* retained.....
Any unquoted list elements must not have an unescaped ']' in them - except to terminate the current list.
Escaping can be switched off by passing in a keyword argument 'escapechar' set to None.
If you want to use literal '\' without escaping them - then you must switch escaping off.
If you make sure every element of a list is contained within quotes - using the quot_elem function - this shouldn't be a problem).
If retain is set to 1 (default is 0) any quotes around elements will be retained.
This could be used to specify element types - e.g. if it has quotes it is a string.
So the function unquote can be used recursively to check if a list element is validly quoted.
(and here you could implement other methods for unquoted elements - e.g. check for None or integer values etc...)
*However* if an element is quoted - it must be correctly quoted, or the element will be invalid.
The default is for quotes to be removed.
If recursive is set to 0 (default is 1)
then list elements will not be recursively parsed - an element containing another list will just
be returned as a string.
(meaning an unescaped and unquoted ']' will close the current list... and listparse will say you have a bad list).
lpstack is used for recursion. Effectively it parses the current table and returns the rest of the line as well.
If comment is set to 0 (default is 1)
It causes listparse to return None if there is anything other than whitespace after a valid list.
(I.e. comments are not allowed). In this case it will only return the list.
"""
if keywargs.has_key('escapechar'):
escapechar = keywargs['escapechar'] # either True or False
else:
escapechar = True
outlist = []
inline = inline.strip()
if inline[0] != '[':
return None
inline = inline[1:].lstrip()
found_end = 0
thiselement = None
escape = 0
while inline:
if thiselement == None: # start of the element
output = unquote(inline, 0, retain, escapechar=escapechar) # partquote mode, retain quotes.......
if output == None:
return None
if output != -1: # element is quoted
thiselement, inline = output
inline = inline.lstrip()
if not inline:
return None
if inline[0] not in [',', ']']: # only two valid ways to terminate an element
return None
continue
thischar = inline[0]
inline = inline[1:]
if escape: # the current character is escaped... whatever it may be
thiselement =__add(thiselement, thischar)
escape = 0
continue
elif thischar == '\\' and escapechar:
escape = 1
# thiselement = __add(thiselement, thischar) # commenting this out means we no longer retain the initial '\' if quoting is on
continue
if recursive and not thiselement and thischar == '[':
output = listparse('[' + inline, True, comment, retain, True, escapechar=escapechar) # we have found a list element, herewith lies recursion...
if not output:
return None # which is badly formed
thiselement, inline = output
inline = inline.lstrip()
if not inline:
return None
if inline[0] not in [',', ']']: # only two valid ways to terminate an element
return None
continue
if thischar == ',': # element terminated
outlist.append(thiselement)
thiselement = None
inline = inline.lstrip()
continue
if thischar == ']':
if thiselement != None: # trap empty lists
outlist.append(thiselement)
found_end = 1
if lpstack:
return outlist, inline
break
thiselement = __add(thiselement, thischar)
if not found_end:
return None
inline = inline.strip()
if inline and not comment:
return None
elif not comment:
return outlist
if inline and inline[0] not in ['#',';']:
return None
return outlist, inline
def __add(thiselement, char):
"""Shorthand for adding a character...."""
if thiselement == None:
return char
return thiselement + char
def unquote(inline, fullquote = 1, retain = 0, **keywargs):
"""Given a line - if it's correctly quoted - it reurns the 'unquoted' value.
If not quoted at all, it returns -1.
If badly quoted, it returns None.
line is stripped before starting.
Any instances of '&mjf-quot;' found (from elem_quot) are turned back into '"'
Any instances of '&mjf-lf;' found (from elem_quot) are turned back into '\n'
Quotes can be escaped with a '\'.
'\' (or any other character) can also be escaped with a '\'.
No triple quotes though :-)
(Escaping can be switched off by passing in the keyword argument 'escapechar' set to None
If you want to use literal '\' without escaping them then you must turn escaping off).
If fullquote is set to 0 (default is 1)
then unquote will return the first correctly quoted part of the line *and* the rest of the line.
If retain is set to 1 (default is 0)
then unquote will retain the quote characters in the returned value."""
if keywargs.has_key('escapechar'):
escapechar = keywargs['escapechar']
else:
escapechar = True
outline = ''
quotes = ["'",'"']
escape = 0
index = 0
quotechar = None
inline = inline.strip()
while index < len(inline):
thischar = inline[index]
index += 1
if not quotechar and thischar not in quotes:
return -1
elif not quotechar:
quotechar = thischar
if retain:
outline += thischar
continue
if escape:
outline += thischar
escape = 0
continue
if thischar in quotes:
if thischar == quotechar:
if retain:
outline += thischar
if not fullquote:
return outline.replace('&mjf-quot;','\"').replace('&mjf-lf;','\n'), inline[index:]
elif index == len(inline):
return outline.replace('&mjf-quot;','\"').replace('&mjf-lf;','\n')
else:
return None
else:
outline += thischar
continue
if thischar == '\\' and escapechar: # a continue here to *not* retain the escape character
escape = 1
continue
outline += thischar
return None
def list_stringify(inlist):
"""Recursively rebuilds a list - making all the members strings...
Useful before writing out lists.
Used by makelist."""
outlist = []
for item in inlist:
if not isinstance(item, list):
if not isinstance(item, str):
thisitem = str(item)
else:
thisitem = item
else:
thisitem = list_stringify(item)
outlist.append(thisitem)
return outlist
def makelist(inlist):
"""Given a list - will turn it into a string... suitable for writing out.
(and then reparsing with listparse.)
Uses list_stringify to make sure all elements are strings and
elem_quote to decide the most appropriate quoting.
(This means it adds quoting to every element and, where necessary, escapes
'"' as '&mjf-quot;' and '\n' as '&mjf-lf;'........)."""
inlist = list_stringify(inlist)
outline = '['
if not inlist: # the member is set to None or is an empty list
outline += ']'
else:
for item in inlist:
if not isinstance(item, list):
outline += elem_quote(item)
outline += ', '
else:
outline += makelist(item)
outline += ', '
if outline[-2:] == ', ':
outline = outline[:-2]
outline += ']'
return outline
def elem_quote(member):
"""Simple method to add the most appropriate quote to an element.
Element is first converted to a string.
If the element contains both \' and \" then \" is escaped as '&mjf-quot;'
If the element contains \n it is escaped as '&mjf-lf;'
Both are restored transparently by unquote.
If you only have literal strings at this stage and will be parsing with escaping on -
you might want to do a replace('\\', '\\\\') on the member too...
"""
# member = str(member) # since we now stringify everything - this is probably a redundant command
if member.find("'") == -1:
outline = "'" + member + "'"
elif member.find('"') == -1:
outline = '"' + member + '"'
else:
outline = '"' + member.replace('"','&mjf-quot;')+'"'
return outline.replace('\n','&mjf-lf;')
# brief test stuff
if __name__ == '__main__':
test ='["hello", \'hello2\']'
test1 = """['hello',"hello again", and again,['hello',"hello again", and again,], and last of all]"""
print listparse('[]')
print test1
print unquote('"hello baby", hello again', 0, 1)
print listparse(test1)
print listparse(test1,1,1,1)
print listparse(test)
test1 = test1 +' # hello'
print listparse(test1)
print listparse(test1, 0) # no recursion - without recursion the list is very badly formed, so returns None
print listparse(test1, 1, 0) # the comment at the end causes listparse to return None here
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/281056_Listparse/recipe-281056.py",
"copies": "1",
"size": "11198",
"license": "mit",
"hash": 5740477960296902000,
"line_mean": 41.4166666667,
"line_max": 166,
"alpha_frac": 0.595106269,
"autogenerated": false,
"ratio": 4.37421875,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5469325019,
"avg_score": null,
"num_lines": null
} |
# 290. Word Pattern
# Given a pattern and a string str, find if str follows the same pattern.
# Here follow means a full match, such that there is a bijection
# between a letter in pattern and a non-empty word in str.
# Examples:
# pattern = "abba", str = "dog cat cat dog" should return true.
# pattern = "abba", str = "dog cat cat fish" should return false.
# pattern = "aaaa", str = "dog cat cat dog" should return false.
# pattern = "abba", str = "dog dog dog dog" should return false.
# Notes:
# You may assume pattern contains only lowercase letters,
# and str contains lowercase letters separated by a single space.
class Solution(object):
# map both list into numbers.
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
if not pattern:
return False
a = [c for c in pattern]
b = str.split()
if len(a) != len(b):
return False
d1 = {}
d2 = {}
# d1 = d2 = {} will be wrong.
i = 0
for x in a:
if x not in d1:
d1[x] = i
i += 1
i = 0
for x in b:
if x not in d2:
d2[x] = i
i += 1
for x, y in zip(a, b):
if d1[x] != d2[y]:
return False
return True
# use dict to record mapping on both directions
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
if not pattern:
return False
a = [c for c in pattern]
b = str.split()
if len(a) != len(b):
return False
dic = dict(zip(a,b))
dic2 = dict(zip(b,a))
return [dic[x] for x in a] == b and [dic2[x] for x in b] == a
def wordPattern(self, pattern, str):
s = pattern
t = str.split()
return len(set(zip(s, t))) == len(set(s)) == len(set(t)) and len(s) == len(t)
def wordPattern(self, pattern, str):
s = pattern
t = str.split()
return map(s.find, s) == map(t.index, t)
# return map(s.index, s) == map(t.index, t)
# py3
# return list(map(s.find, s)) == list(map(t.index, t))
pattern = "abba"
str = "dog cat cat dog"
print(Solution().wordPattern(pattern, str))
pattern = "abba"
str = "dog cat cat fish"
print(Solution().wordPattern(pattern, str))
| {
"repo_name": "gengwg/leetcode",
"path": "290_word_pattern.py",
"copies": "1",
"size": "2495",
"license": "apache-2.0",
"hash": -4263764307193054000,
"line_mean": 25.8279569892,
"line_max": 85,
"alpha_frac": 0.5190380762,
"autogenerated": false,
"ratio": 3.4846368715083798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.450367494770838,
"avg_score": null,
"num_lines": null
} |
# 292. Nim Game
# You are playing the following Nim Game with your friend:
# There is a heap of stones on the table, each time one of you take turns to remove 1 to 3 stones.
# The one who removes the last stone will be the winner.
# You will take the first turn to remove the stones.
# Both of you are very clever and have optimal strategies for the game.
# Write a function to determine whether you can win the game
# given the number of stones in the heap.
# For example, if there are 4 stones in the heap, then you will never win the game:
# no matter 1, 2, or 3 stones you remove, the last stone will always be removed by your friend.
# Nim游戏的解题关键是寻找“必胜态”。
# 根据题设条件:
# 当n∈[1,3]时,先手必胜。
# 当n == 4时,无论先手第一轮如何选取,下一轮都会转化为n∈[1,3]的情形,此时先手必负。
# 当n∈[5,7]时,先手必胜,先手分别通过取走[1,3]颗石头,可将状态转化为n == 4时的情形,此时后手必负。
# 当n == 8时,无论先手第一轮如何选取,下一轮都会转化为n∈[5,7]的情形,此时先手必负。
# ......
# 以此类推,可以得出结论:
# 当n % 4 != 0时,先手必胜;否则先手必负。
class Solution(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
return n % 4 != 0
print(Solution().canWinNim(4))
print(Solution().canWinNim(5))
| {
"repo_name": "gengwg/leetcode",
"path": "292_nim_game.py",
"copies": "1",
"size": "1475",
"license": "apache-2.0",
"hash": 8900999323890607000,
"line_mean": 25.4418604651,
"line_max": 99,
"alpha_frac": 0.6640281442,
"autogenerated": false,
"ratio": 1.8700657894736843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8002552904317284,
"avg_score": 0.006308205871279983,
"num_lines": 43
} |
# 294. Flip Game II
# You are playing the following Flip Game with your friend:
# Given a string that contains only these two characters: + and -,
# you and your friend take turns to flip twoconsecutive "++" into "--".
# The game ends when a person can no longer make a move and therefore the other person will be the winner.
#
# Write a function to determine if the starting player can guarantee a win.
#
# For example, given s = "++++", return true.
# The starting player can guarantee a win by flipping the middle "++" to become "+--+".
#
# Follow up:
# Derive your algorithm's runtime complexity.
class Solution:
# https://gengwg.blogspot.com/2018/06/leetcode-294-flip-game-ii.html
def canWin(self, s):
n = len(s)
s = list(s) # py string is not mutable
for i in range(n-1):
if s[i] == s[i+1] == '+':
s[i] = '-'
s[i+1] = '-'
opponentWin = self.canWin(s)
# restore original string
s[i] = '+'
s[i+1] = '+'
if not opponentWin:
return True
return False
# https://github.com/criszhou/LeetCode-Python/blob/master/294.%20Flip%20Game%20II.py
canWinCache = dict()
def canWin(self, s):
"""
:type s: str
:rtype: bool
"""
if len(s)<2:
return False
if s not in self.canWinCache:
for i in range(len(s)-1):
if s[i:i+2] == '++' and not self.canWin( s[:i]+'--'+s[i+2:] ):
ret = True
break
else:
ret = False
self.canWinCache[s] = ret
return self.canWinCache[s]
def canWin(self, s):
"""
:type s: str
:rtype: bool
"""
if s in self.canWinCache:
return self.canWinCache[s]
for i in range(len(s)-1):
if s[i:i+2] == '++':
opponentWin = self.canWin( s[:i]+'--'+s[i+2:] )
if not opponentWin:
self.canWinCache[s] = True
return True
self.canWinCache[s] = False
return False
print(Solution().canWin('++++'))
print(Solution().canWin('+++++'))
| {
"repo_name": "gengwg/leetcode",
"path": "294_flip_game_ii.py",
"copies": "1",
"size": "2261",
"license": "apache-2.0",
"hash": 6868286891973164000,
"line_mean": 29.9726027397,
"line_max": 106,
"alpha_frac": 0.5095090668,
"autogenerated": false,
"ratio": 3.583201267828843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4592710334628843,
"avg_score": null,
"num_lines": null
} |
# 295 - Find Median From Data Stream (Hard)
# https://leetcode.com/problems/find-median-from-data-stream/
import heapq
class MedianFinder:
def __init__(self):
"""
Initialize your data structure here.
"""
self.lower = []
self.higher = []
def addNum(self, num):
"""
Adds a num into the data structure.
:type num: int
:rtype: void
"""
# By default put into left. Negative values is so that the heap
# will be a max-heap, because by default it is a min-heap and I don't
# know how else to make it a max-heap :-)
if len(self.lower) == 0:
heapq.heappush(self.lower, -num)
# If lower is equal len than higher, if value is to be put in lower,
# then just put it there, if its to be put in higher, then move down
# one value from higher and then put new value in there.
elif len(self.lower) == len(self.higher):
topleft = -heapq.heappop(self.lower)
topright = heapq.heappop(self.higher)
if num < topright:
heapq.heappush(self.lower, -topleft)
heapq.heappush(self.lower, -num)
heapq.heappush(self.higher, topright)
else:
heapq.heappush(self.lower, -topleft)
heapq.heappush(self.lower, -topright)
heapq.heappush(self.higher, num)
# If higher is empty, then put new value in there only if its greater
# than value in lower, otherwise move value from lower to higher and
# then put new value in lower.
elif len(self.higher) == 0:
topleft = -heapq.heappop(self.lower)
if num > topleft:
heapq.heappush(self.lower, -topleft)
heapq.heappush(self.higher, num)
else:
heapq.heappush(self.lower, -num)
heapq.heappush(self.higher, topleft)
# If lower is greater length than higher, and both are not zero-len,
# then decide where to put new value so to both sides lenght are equal.
else:
topleft = -heapq.heappop(self.lower)
topright = heapq.heappop(self.higher)
if num > topleft:
heapq.heappush(self.higher, num)
heapq.heappush(self.higher, topright)
heapq.heappush(self.lower, -topleft)
else:
heapq.heappush(self.lower, -num)
heapq.heappush(self.higher, topright)
heapq.heappush(self.higher, topleft)
def findMedian(self):
"""
Returns the median of current data stream
:rtype: float
"""
# If odd total len, then the value on lower heap
# If even total len, avg of both heaps.
res = []
if len(self.lower) > 0:
res.append(-heapq.heappop(self.lower))
if len(self.higher) > 0:
res.append(heapq.heappop(self.higher))
if len(res) > 0:
heapq.heappush(self.lower, -res[0])
if len(res) > 1:
heapq.heappush(self.higher, res[1])
if len(res) > 0:
if len(self.lower) > len(self.higher):
return float(res[0])
else:
return (res[0] + res[1])/float(2)
return -0.0
# Your MedianFinder object will be instantiated and called as such:
# mf = MedianFinder()
# mf.addNum(1)
# mf.findMedian() | {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/03_Hard/lc_295.py",
"copies": "1",
"size": "3481",
"license": "mit",
"hash": -748537989282124800,
"line_mean": 38.1235955056,
"line_max": 79,
"alpha_frac": 0.5564492962,
"autogenerated": false,
"ratio": 3.6565126050420167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47129619012420165,
"avg_score": null,
"num_lines": null
} |
# 297. Serialize and Deserialize Binary Tree
# Serialization is the process of converting a data structure or object into a sequence of bits
# so that it can be stored in a file or memory buffer, or transmitted across a network connection link
# to be reconstructed later in the same or another computer environment.
# Design an algorithm to serialize and deserialize a binary tree.
# There is no restriction on how your serialization/deserialization algorithm should work.
# You just need to ensure that a binary tree can be serialized to a string
# and this string can be deserialized to the original tree structure.
# For example, you may serialize the following tree
# 1
# / \
# 2 3
# / \
# 4 5
# as "[1,2,3,null,null,4,5]", just the same as how LeetCode OJ serializes a binary tree.
# You do not necessarily need to follow this format, so please be creative
# and come up with different approaches yourself.
# Note: Do not use class member/global/static variables to store states.
# Your serialize and deserialize algorithms should be stateless.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# https://gengwg.blogspot.com/2018/06/leetcode-297-serialize-and-deserialize.html
# https://leetcode.com/problems/serialize-and-deserialize-binary-tree/discuss/74259/Recursive-preorder-Python-and-C++-O(n)
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
vals = []
def preorder(root):
if root:
vals.append(str(root.val))
preorder(root.left)
preorder(root.right)
else:
vals.append('#')
preorder(root)
return ' '.join(vals)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
vals = iter(data.split())
def doit():
val = next(vals)
if val == '#':
return
node = TreeNode(int(val))
node.left = doit()
node.right = doit()
return node
root = doit()
return root
if __name__ == '__main__':
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.right.left = TreeNode(4)
root.right.right = TreeNode(5)
print(Codec().deserialize((Codec().serialize(root))))
| {
"repo_name": "gengwg/leetcode",
"path": "297_serialize_deserialize_binary_tree.py",
"copies": "1",
"size": "2698",
"license": "apache-2.0",
"hash": -5026139774030140000,
"line_mean": 31.5060240964,
"line_max": 122,
"alpha_frac": 0.6282431431,
"autogenerated": false,
"ratio": 3.9101449275362317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024697317254614186,
"num_lines": 83
} |
# 298: Binary Tree Longest Consecutive Sequence
# Given a binary tree, find the length of the longest consecutive sequence path.
# The path refers to any sequence of nodes from some starting node to any node in the tree along the parent-child connections.
# The longest consecutive path need to be from parent to child (cannot be the reverse).
# For example,
#
# 1
# \
# 3
# / \
# 2 4
# \
# 5
# Longest consecutive sequence path is 3-4-5, so return 3.
# 2
# \
# 3
# /
# 2
# /
# 1
# Longest consecutive sequence path is 2-3,not 3-2-1, so return 2.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
There are three parameters of helper function, prev (the previous node), cur (the current node)
and length (the accumulated length before cur).
If cur.val == prev.val + 1, then we can continue exploring the current node’s children with length increased by 1.
Otherwise, refresh length.
The time complexity in the worst case will always be O(N) where N is the number of tree nodes.
"""
if root is None:
return 0
self.res = 0 # global variable
self.helper(root, root.left, 1)
self.helper(root, root.right, 1)
return self.res
def helper(self, prev, cur, length):
if cur is None:
self.res = max(self.res, length)
return
if cur.val == prev.val + 1:
self.helper(cur, cur.left, length+1)
self.helper(cur, cur.right, length+1)
else:
self.res = max(self.res, length)
self.helper(cur, cur.left, 1)
self.helper(cur, cur.right, 1)
| {
"repo_name": "gengwg/leetcode",
"path": "298_binary_tree_longest_consecutive_sequence.py",
"copies": "1",
"size": "1969",
"license": "apache-2.0",
"hash": -2621443725964650000,
"line_mean": 26.3194444444,
"line_max": 127,
"alpha_frac": 0.5851550585,
"autogenerated": false,
"ratio": 3.6158088235294117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4700963882029412,
"avg_score": null,
"num_lines": null
} |
"""2. Add Two Numbers
You are given two linked lists representing two non-negative numbers. The digits are stored in reverse order
and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.
Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 0 -> 8
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
l1_cur = l1
l2_cur = l2
carry = 0
while True:
carry, l1_cur.val = divmod(l1_cur.val + l2_cur.val + carry, 10)
if l1_cur.next and l2_cur.next:
l1_cur = l1_cur.next
l2_cur = l2_cur.next
else:
break
if l2_cur.next:
l1_cur.next = l2_cur.next
while carry:
if not l1_cur.next:
l1_cur.next = ListNode(0)
l1_cur = l1_cur.next
carry, l1_cur.val = divmod(l1_cur.val + carry, 10)
return l1
| {
"repo_name": "nadesico19/nadepy",
"path": "leetcode/algo_2_add_two_numbers.py",
"copies": "1",
"size": "1229",
"license": "mit",
"hash": 3944211339882085400,
"line_mean": 24.7173913043,
"line_max": 108,
"alpha_frac": 0.5077298617,
"autogenerated": false,
"ratio": 3.404432132963989,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4412161994663989,
"avg_score": null,
"num_lines": null
} |
""" 2: Algorithms
thomas moll 2015
"""
import time, random
def find_sequentially(arr, item):
""" Sequential Search
Complexity: O(n)
"""
for value, i in enumerate(arr):
# Check each item in the list
if item == value: #Runs N number of times
return True
return False
def binary_search(arr, item):
""" Binary Search
Complexity: O(log(n))
Only works on sorted arrays
"""
first = 0
last = len(arr)-1
found = False
# Note that first and last will get closer!
while first <= last and not found:
# Divide problem set
mid = (first+last)//2
if arr[mid] == item:
found = True
else:
# Decide which half to search next
if item < arr[mid]:
last = mid - 1
else:
first = mid + 1
return found
def array_equals(a, b):
""" Checks to see that two arrays
are completely equal, regardless of order
Complexity: O(n^2)
"""
i = 0
# Check all values in A
while i < len(a): # This loop runs N times
flag = False
j = 0
# Search for that value in B
while j < len(b): # This loop runs N times
if a[i] == b[j]:
flag = True
break
j+=1
if not flag:
return False
i+=1
return True
# Below are some speed tests comparing sequential to binary search
if __name__ == '__main__':
print 'Given an array of a million ordered ints...'
big_o_list = list(xrange(1000000))
item = random.randint(0, 1000000)
print 'Finding',item,'using sequential search'
t0 = time.time()
find_sequentially(big_o_list, item)
t1 = time.time()
total = t1-t0
print 'Found',item,'in',total,'MS'
item = random.randint(0, 1000000)
print 'Finding',item,'using binary search'
t2 = time.time()
binary_search(big_o_list, item)
t3 = time.time()
total = t3-t2
print 'Found',item,'in',total,'MS'
| {
"repo_name": "huiyi1990/Data-Structure-Zoo",
"path": "1-Algorithm Analysis/algorithms.py",
"copies": "9",
"size": "2193",
"license": "mit",
"hash": -6634006161716739000,
"line_mean": 23.8,
"line_max": 66,
"alpha_frac": 0.5125398997,
"autogenerated": false,
"ratio": 3.9584837545126352,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007137173810798099,
"num_lines": 85
} |
# Create a dictionary of headers containing our Authorization header.
headers = {"Authorization": "token 1f36137fbbe1602f779300dad26e4c1b7fbab631"}
# Make a GET request to the GitHub API with our headers.
# This API endpoint will give us details about Vik Paruchuri.
response = requests.get("https://api.github.com/users/VikParuchuri", headers=headers)
# Print the content of the response. As you can see, this token corresponds to the account of Vik Paruchuri.
print(response.json())
response = requests.get("https://api.github.com/users/VikParuchuri/orgs", headers=headers)
orgs = response.json()
## 3. Endpoints and Objects ##
# We've loaded headers in.
response = requests.get("https://api.github.com/users/torvalds", headers=headers)
torvalds = response.json()
## 4. Other Objects ##
# Enter your answer here.
response = requests.get("https://api.github.com/repos/octocat/Hello-World", headers=headers)
hello_world = response.json()
## 5. Pagination ##
params = {"per_page": 50, "page": 1}
response = requests.get("https://api.github.com/users/VikParuchuri/starred", headers=headers, params=params)
page1_repos = response.json()
response = requests.get("https://api.github.com/users/VikParuchuri/starred", headers=headers, params={"per_page": 50, "page": 2})
page2_repos = response.json()
## 6. User-Level Endpoints ##
# Enter your code here.
response = requests.get("https://api.github.com/user", headers=headers)
user = response.json()
## 7. POST Requests ##
# Create the data we'll pass into the API endpoint. While this endpoint only requires the "name" key, there are other optional keys.
payload = {"name": "test"}
# We need to pass in our authentication headers!
response = requests.post("https://api.github.com/user/repos", json=payload, headers=headers)
print(response.status_code)
payload = {"name": "learning-about-apis"}
response = requests.post("https://api.github.com/user/repos", json=payload, headers=headers)
status = response.status_code
## 8. PUT/PATCH Requests ##
payload = {"description": "The best repository ever!", "name": "test"}
response = requests.patch("https://api.github.com/repos/VikParuchuri/test", json=payload, headers=headers)
print(response.status_code)
payload = {"description": "Learning about requests!", "name": "learning-about-apis"}
response = requests.patch("https://api.github.com/repos/VikParuchuri/learning-about-apis", json=payload, headers=headers)
status = response.status_code
## 9. DELETE Requests ##
response = requests.delete("https://api.github.com/repos/VikParuchuri/test", headers=headers)
print(response.status_code)
response = requests.delete("https://api.github.com/repos/VikParuchuri/learning-about-apis", headers=headers)
status = response.status_code | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Apis and Scraping/Intermediate APIs-118.py",
"copies": "1",
"size": "2765",
"license": "mit",
"hash": 2526848869270532000,
"line_mean": 40.2835820896,
"line_max": 133,
"alpha_frac": 0.7446654611,
"autogenerated": false,
"ratio": 3.343409915356711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9550906018193036,
"avg_score": 0.007433871652734951,
"num_lines": 67
} |
## 2. Array Comparisons ##
countries_canada = (world_alcohol[:,2] == 'Canada')
years_1984 = (world_alcohol[:,0] == '1984')
## 3. Selecting Elements ##
country_is_algeria = (world_alcohol[:,2] == 'Algeria')
country_algeria = world_alcohol[country_is_algeria,:]
## 4. Comparisons with Multiple Conditions ##
is_algeria_and_1986 = (world_alcohol[:,0] == '1986') & (world_alcohol[:,2] == 'Algeria')
rows_with_algeria_and_1986 = world_alcohol[is_algeria_and_1986,:]
## 5. Replacing Values ##
world_alcohol[world_alcohol[:,0]=='1986',0] = '2014'
world_alcohol[world_alcohol[:,3]=='Wine',3] = 'Grog'
## 6. Replacing Empty Strings ##
is_value_empty = world_alcohol[:,4]==''
world_alcohol[is_value_empty,4]='0'
## 7. Converting Data Types ##
alcohol_consumption = world_alcohol[:,4]
alcohol_consumption = alcohol_consumption.astype(float)
## 8. Computing with NumPy ##
total_alcohol = alcohol_consumption.sum()
average_alcohol = alcohol_consumption.mean()
## 9. Total Annual Alcohol Consumption ##
canada_1986 = world_alcohol[(world_alcohol[:,0] == '1986') & (world_alcohol[:,2] == 'Canada'),:]
canada_1986[canada_1986[:,4] == '',4] = '0'
canada_alcohol = canada_1986[:,4].astype(float)
total_canadian_drinking = canada_alcohol.sum()
## 10. Calculating Consumption for Each Country ##
years = world_alcohol[:,0]
years = set(years)
countries = set(world_alcohol[:,2])
def alcohol_consumption_country_year(year):
totals = {}
for country in countries:
country_consumption = world_alcohol[(world_alcohol[:,0] == year) & (world_alcohol[:,2]== country),:]
country_consumption[country_consumption[:,4] == '',4] = '0'
country_alcohol = country_consumption[:,4].astype(float)
total_country_drinking = country_alcohol.sum()
totals[country] = total_country_drinking
return(totals)
totals = alcohol_consumption_country_year('1989')
## 11. Finding the Country that Drinks the Most ##
highest_value = 0
highest_key = None
for item,val in totals.items():
if val > highest_value:
highest_value = val
highest_key = item | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Analysis with Pandas Intermediate/Computation with NumPy-169.py",
"copies": "1",
"size": "2088",
"license": "mit",
"hash": -2203936551186162700,
"line_mean": 28.8428571429,
"line_max": 108,
"alpha_frac": 0.6685823755,
"autogenerated": false,
"ratio": 2.7509881422924902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.391957051779249,
"avg_score": null,
"num_lines": null
} |
headers = {"Authorization": "bearer 13426216-4U1ckno9J5AiK72VRbpEeBaMSKk", "User-Agent": "Dataquest/1.0"}
params = {"t": "day"}
response = requests.get("https://oauth.reddit.com/r/python/top", headers=headers, params=params)
python_top = response.json()
## 3. Getting the Most Upvoted Post ##
python_top_articles = python_top["data"]["children"]
most_upvoted = ""
most_upvotes = 0
for article in python_top_articles:
ar = article["data"]
if ar["ups"] >= most_upvotes:
most_upvoted = ar["id"]
most_upvotes = ar["ups"]
## 4. Getting Post Comments ##
url = "https://oauth.reddit.com/r/python/comments/4b7w9u"
headers = {"Authorization": "bearer 13426216-4U1ckno9J5AiK72VRbpEeBaMSKk", "User-Agent": "Dataquest/1.0"}
response = requests.get(url, headers = headers)
comments = response.json()
## 5. Getting the Most Upvoted Comment ##
comments_list = comments[1]["data"]["children"]
most_upvoted_comment = ""
most_upvotes_comment = 0
for comment in comments_list:
co = comment["data"]
if co["ups"] >= most_upvotes_comment:
most_upvoted_comment = co["id"]
most_upvotes_comment = co["ups"]
## 6. Upvoting a Comment ##
payload = {"dir": 1, "id": "d16y4ry"}
headers = {"Authorization": "bearer 13426216-4U1ckno9J5AiK72VRbpEeBaMSKk", "User-Agent": "Dataquest/1.0"}
response = requests.post("https://oauth.reddit.com/api/vote", json = payload, headers = headers)
status = response.status_code | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Apis and Scraping/Challenge_ Working with the reddit API-183.py",
"copies": "1",
"size": "1477",
"license": "mit",
"hash": -6792270547959599000,
"line_mean": 31.8444444444,
"line_max": 105,
"alpha_frac": 0.682464455,
"autogenerated": false,
"ratio": 2.8735408560311284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9021476765598806,
"avg_score": 0.006905709086464292,
"num_lines": 45
} |
#2boom (c) 2013
# v.0.4 03.04.13
from Poll import Poll
from Components.Converter.Converter import Converter
from enigma import iServiceInformation, iPlayableService
from Components.Element import cached
class ServiceInfoEX(Poll, Converter, object):
apid = 0
vpid = 1
sid = 2
onid = 3
tsid = 4
prcpid = 5
caids = 6
pmtpid = 7
txtpid = 8
xres = 9
yres = 10
atype = 11
vtype = 12
avtype = 13
fps = 14
tbps = 15
format = 16
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
if type == "apid":
self.type = self.apid
elif type == "vpid":
self.type = self.vpid
elif type == "sid":
self.type = self.sid
elif type == "onid":
self.type = self.onid
elif type == "tsid":
self.type = self.tsid
elif type == "prcpid":
self.type = self.prcpid
elif type == "caids":
self.type = self.caids
elif type == "pmtpid":
self.type = self.pmtpid
elif type == "txtpid":
self.type = self.txtpid
elif type == "tsid":
self.type = self.tsid
elif type == "xres":
self.type = self.xres
elif type == "yres":
self.type = self.yres
elif type == "atype":
self.type = self.atype
elif type == "vtype":
self.type = self.vtype
elif type == "avtype":
self.type = self.avtype
elif type == "fps":
self.type = self.fps
elif type == "tbps":
self.type = self.tbps
else:
self.type = self.format
self.sfmt = type[:]
self.poll_interval = 1000
self.poll_enabled = True
def getServiceInfoString(self, info, what, convert = lambda x: "%d" % x):
v = info.getInfo(what)
if v == -1:
return "N/A"
if v == -2:
return info.getInfoString(what)
if v == -3:
t_objs = info.getInfoObject(what)
if t_objs and (len(t_objs) > 0):
ret_val=""
for t_obj in t_objs:
ret_val += "%.4X " % t_obj
return ret_val[:-1]
else:
return ""
return convert(v)
@cached
def getText(self):
self.stream = { 'apid':"N/A", 'vpid':"N/A", 'sid':"N/A", 'onid':"N/A", 'tsid':"N/A", 'prcpid':"N/A", 'caids':"FTA", 'pmtpid':"N/A", 'txtpid':"N/A", 'xres':"", 'yres':"", 'atype':"", 'vtype':"", 'avtype':"", 'fps':"", 'tbps':"",}
streaminfo = ""
service = self.source.service
info = service and service.info()
if not info:
return ""
if self.getServiceInfoString(info, iServiceInformation.sAudioPID) != "N/A":
self.stream['apid'] = "%0.4X" % int(self.getServiceInfoString(info, iServiceInformation.sAudioPID))
if self.getServiceInfoString(info, iServiceInformation.sVideoPID) != "N/A":
self.stream['vpid'] = "%0.4X" % int(self.getServiceInfoString(info, iServiceInformation.sVideoPID))
if self.getServiceInfoString(info, iServiceInformation.sSID) != "N/A":
self.stream['sid'] = "%0.4X" % int(self.getServiceInfoString(info, iServiceInformation.sSID))
if self.getServiceInfoString(info, iServiceInformation.sONID) != "N/A":
self.stream['onid'] = "%0.4X" % int(self.getServiceInfoString(info, iServiceInformation.sONID))
if self.getServiceInfoString(info, iServiceInformation.sTSID) != "N/A":
self.stream['tsid'] = "%0.4X" % int(self.getServiceInfoString(info, iServiceInformation.sTSID))
if self.getServiceInfoString(info, iServiceInformation.sPCRPID) != "N/A":
self.stream['prcpid'] = "%0.4X" % int(self.getServiceInfoString(info, iServiceInformation.sPCRPID))
if self.getServiceInfoString(info, iServiceInformation.sPMTPID) != "N/A":
self.stream['pmtpid'] = "%0.4X" % int(self.getServiceInfoString(info, iServiceInformation.sPMTPID))
if self.getServiceInfoString(info, iServiceInformation.sTXTPID) != "N/A":
self.stream['txtpid'] = "%0.4X" % int(self.getServiceInfoString(info, iServiceInformation.sTXTPID))
self.stream['caids'] = self.getServiceInfoString(info, iServiceInformation.sCAIDs)
if self.getServiceInfoString(info, iServiceInformation.sVideoHeight) != "N/A":
self.stream['yres'] = self.getServiceInfoString(info, iServiceInformation.sVideoHeight) + ("i", "p", "")[info.getInfo(iServiceInformation.sProgressive)]
if self.getServiceInfoString(info, iServiceInformation.sVideoWidth) != "N/A":
self.stream['xres'] = self.getServiceInfoString(info, iServiceInformation.sVideoWidth)
audio = service.audioTracks()
if audio:
if audio.getCurrentTrack() > -1:
self.stream['atype'] = str(audio.getTrackInfo(audio.getCurrentTrack()).getDescription())
self.stream['vtype'] = ("MPEG2", "MPEG4", "MPEG1", "MPEG4-II", "VC1", "VC1-SM", "")[info.getInfo(iServiceInformation.sVideoType)]
self.stream['avtype'] = ("MPEG2/", "MPEG4/", "MPEG1/", "MPEG4-II/", "VC1/", "VC1-SM/", "")[info.getInfo(iServiceInformation.sVideoType)] + self.stream['atype']
if self.getServiceInfoString(info, iServiceInformation.sFrameRate, lambda x: "%d" % ((x+500)/1000)) != "N/A":
self.stream['fps'] = self.getServiceInfoString(info, iServiceInformation.sFrameRate, lambda x: "%d" % ((x+500)/1000))
if self.getServiceInfoString(info, iServiceInformation.sTransferBPS, lambda x: "%d kB/s" % (x/1024)) != "N/A":
self.stream['tbps'] = self.getServiceInfoString(info, iServiceInformation.sTransferBPS, lambda x: "%d kB/s" % (x/1024))
if self.type == self.apid:
streaminfo = self.stream['apid']
elif self.type == self.vpid:
streaminfo = self.stream['vpid']
elif self.type == self.sid:
streaminfo = self.stream['sid']
elif self.type == self.onid:
streaminfo = self.stream['onid']
elif self.type == self.tsid:
streaminfo = self.stream['tsid']
elif self.type == self.prcpid:
streaminfo = self.stream['prcpid']
elif self.type == self.caids:
streaminfo = self.stream['caids']
elif self.type == self.pmtpid:
streaminfo = self.stream['pmtpid']
elif self.type == self.txtpid:
streaminfo = self.stream['txtpid']
elif self.type == self.tsid:
streaminfo = self.stream['tsid']
elif self.type == self.xres:
streaminfo = self.stream['xres']
elif self.type == self.yres:
streaminfo = self.stream['yres']
elif self.type == self.atype:
streaminfo = self.stream['atype']
elif self.type == self.vtype:
streaminfo = self.stream['vtype']
elif self.type == self.avtype:
streaminfo = self.stream['avtype']
elif self.type == self.fps:
streaminfo = self.stream['fps']
elif self.type == self.tbps:
streaminfo = self.stream['tbps']
elif self.type == self.format:
tmp = self.sfmt[:]
for param in tmp.split():
if param != '':
if param[0] != '%':
streaminfo += param
else:
streaminfo += ' ' + self.stream[param.strip('%')] + ' '
return streaminfo
text = property(getText)
def changed(self, what):
if what[0] == self.CHANGED_SPECIFIC:
if what[1] == iPlayableService.evStart or what[1] == iPlayableService.evVideoSizeChanged or what[1] == iPlayableService.evUpdatedInfo:
Converter.changed(self, what)
elif what[0] != self.CHANGED_SPECIFIC or what[1] in self.interesting_events:
Converter.changed(self, what)
elif what[0] == self.CHANGED_POLL:
self.downstream_elements.changed(what)
| {
"repo_name": "Franc1/Enigma2-Skin-MetropolisHD",
"path": "usr/lib/enigma2/python/Components/Converter/ServiceInfoEX.py",
"copies": "1",
"size": "6968",
"license": "mit",
"hash": -5584527890525623000,
"line_mean": 37.4972375691,
"line_max": 230,
"alpha_frac": 0.6671928817,
"autogenerated": false,
"ratio": 2.7738853503184715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3941078232018471,
"avg_score": null,
"num_lines": null
} |
"""2-by-2 contingency tables and scores"""
# Copyright (c) 2017 Aubrey Barnard. This is free software released
# under the MIT license. See LICENSE for details.
import math
class TwoByTwoTable(object):
"""Traditional 2-by-2 table as used in epidemiology, etc. to compare
exposures and outcomes
"""
_num_arg_types = (int, float)
def __init__(
self,
exp_out=None, exp_no_out=None,
out_no_exp=None, no_exp_out=None,
exp_tot=None, out_tot=None, total=None,
):
# Handle construction from 3-by-3 table corners
if (isinstance(exp_out, self._num_arg_types)
and isinstance(exp_tot, self._num_arg_types)
and isinstance(out_tot, self._num_arg_types)
and isinstance(total, self._num_arg_types)
):
self._exp_out = exp_out
self._exp_tot = exp_tot
self._out_tot = out_tot
self._total = total
# Handle construction from 2-by-2 table
elif (isinstance(exp_out, self._num_arg_types)
and isinstance(exp_no_out, self._num_arg_types)
and isinstance(out_no_exp, self._num_arg_types)
and isinstance(no_exp_out, self._num_arg_types)
):
self._exp_out = exp_out
self._exp_tot = exp_out + exp_no_out
self._out_tot = exp_out + out_no_exp
self._total = (
exp_out + exp_no_out + out_no_exp + no_exp_out)
# Construction modes are only the above, bad arguments otherwise
else:
raise ValueError(
'Insufficient {0} constructor arguments.'
.format(self.__class__.__name__))
@property
def exp_out(self):
return self._exp_out
@property
def exp_no_out(self):
return self._exp_tot - self._exp_out
@property
def out_no_exp(self):
return self._out_tot - self._exp_out
@property
def no_exp_out(self):
return self._total - self._exp_tot - self._out_tot + self._exp_out
@property
def exp_tot(self):
return self._exp_tot
@property
def no_exp_tot(self):
return self._total - self._exp_tot
@property
def out_tot(self):
return self._out_tot
@property
def no_out_tot(self):
return self._total - self._out_tot
@property
def total(self):
return self._total
def smoothed(self, pseudocount=1):
"""Return a smoothed version of this table by adding the given
pseudocount to each of the four cells.
"""
return TwoByTwoTable(
exp_out=self.exp_out + pseudocount,
exp_no_out=self.exp_no_out + pseudocount,
out_no_exp=self.out_no_exp + pseudocount,
no_exp_out=self.no_exp_out + pseudocount,
)
def table_2x2(self, pseudocount=None):
if isinstance(pseudocount, self._num_arg_types):
return ((self.exp_out + pseudocount,
self.exp_no_out + pseudocount),
(self.out_no_exp + pseudocount,
self.no_exp_out + pseudocount))
else:
return ((self.exp_out, self.exp_no_out),
(self.out_no_exp, self.no_exp_out))
def table_3x3(self, pseudocount=None):
if isinstance(pseudocount, self._num_arg_types):
return ((self.exp_out + pseudocount,
self.exp_no_out + pseudocount,
self.exp_tot + 2 * pseudocount),
(self.out_no_exp + pseudocount,
self.no_exp_out + pseudocount,
self.no_exp_tot + 2 * pseudocount),
(self.out_tot + 2 * pseudocount,
self.no_out_tot + 2 * pseudocount,
self.total + 4 * pseudocount))
else:
return ((self.exp_out, self.exp_no_out, self.exp_tot),
(self.out_no_exp, self.no_exp_out, self.no_exp_tot),
(self.out_tot, self.no_out_tot, self.total))
class TemporalTwoByTwoTable(TwoByTwoTable):
"""A temporal 2-by-2 table splits the first cell (exposure and outcome)
into two counts: exposure before outcome and exposure after outcome.
"""
def __init__(
self,
exp_bef_out=None, exp_aft_out=None, exp_out=None,
exp_no_out=None, out_no_exp=None, no_exp_out=None,
exp_tot=None, out_tot=None, total=None,
):
# Construct this class
if ((isinstance(exp_bef_out, self._num_arg_types)
and isinstance(exp_aft_out, self._num_arg_types))
or (isinstance(exp_out, self._num_arg_types)
and (isinstance(exp_bef_out, self._num_arg_types)
or isinstance(exp_aft_out, self._num_arg_types)))
):
self._exp_bef_out = (exp_bef_out
if exp_bef_out is not None
else exp_out - exp_aft_out)
self._exp_aft_out = (exp_aft_out
if exp_aft_out is not None
else exp_out - exp_bef_out)
exp_out = (exp_out
if exp_out is not None
else exp_bef_out + exp_aft_out)
# Construct superclass
super().__init__(
exp_out, exp_no_out,
out_no_exp, no_exp_out,
exp_tot, out_tot, total,
)
# Otherwise arguments don't completely define a 2-by-2 table
else:
raise ValueError(
'Insufficient {0} constructor arguments.'
.format(self.__class__.__name__))
@property
def exp_bef_out(self):
return self._exp_bef_out
@property
def exp_aft_out(self):
return self._exp_aft_out
def smoothed(self, pseudocount=1):
"""Return a smoothed version of this table by adding the given
pseudocount to each of the four cells.
"""
half_count = pseudocount / 2
return TemporalTwoByTwoTable(
exp_bef_out=self.exp_bef_out + half_count,
exp_aft_out=self.exp_aft_out + half_count,
exp_out=self.exp_out + pseudocount,
exp_no_out=self.exp_no_out + pseudocount,
out_no_exp=self.out_no_exp + pseudocount,
no_exp_out=self.no_exp_out + pseudocount,
)
def cohort_table(self):
"""Creates a copy of this table that treats the counts as in a cohort
study. That is, 'exp_aft_out' is counted as part of
'out_no_exp' rather than as part of 'exp_out'. These are the
counts a cohort study would use having followed subjects over
time: the outcome happened first, so the exposure does not
matter.
"""
return TwoByTwoTable(
exp_out=self.exp_bef_out,
exp_tot=(self.exp_tot - self.exp_aft_out),
out_tot=self.out_tot,
total=self.total,
)
def binary_mutual_information(table):
"""Return the mutual information between the two binary variables in the
given 2-by-2 table
"""
# Shortcut / Special-Case the zero distribution
if table.total == 0:
return 0.0
# Do the calculation in a way that handles zeros. (If the numerator
# in the log is greater than zero, the denominator cannot be zero.)
mi_sum = 0.0
if table.exp_out > 0:
mi_sum += (table.exp_out
* math.log((table.exp_out * table.total)
/ (table.exp_tot * table.out_tot)))
if table.exp_no_out > 0:
mi_sum += (table.exp_no_out
* math.log((table.exp_no_out * table.total)
/ (table.exp_tot * table.no_out_tot)))
if table.out_no_exp > 0:
mi_sum += (table.out_no_exp
* math.log((table.out_no_exp * table.total)
/ (table.out_tot * table.no_exp_tot)))
if table.no_exp_out > 0:
mi_sum += (table.no_exp_out
* math.log((table.no_exp_out * table.total)
/ (table.no_out_tot * table.no_exp_tot)))
return mi_sum / table.total
def relative_risk(table):
"""Return the relative risk for the given 2-by-2 table"""
# When all the cells are zero or both numerators are zero the rates
# are "equal" so the relative risk is one
if ((table.exp_tot == 0 and table.no_exp_tot == 0)
or (table.exp_out == 0 and table.out_no_exp == 0)
):
return 1.0
# If the numerator rate is zero, then the relative risk cannot get
# any less, so it is also zero
elif table.exp_tot == 0:
return 0.0
# If the denominator rate is zero, then the relative risk cannot get
# any larger, so it is infinity
elif table.no_exp_tot == 0:
return float('inf')
# (eo/et)/(one/net) -> (eo*net)/(one*et)
return ((table.exp_out * table.no_exp_tot)
/ (table.out_no_exp * table.exp_tot))
def odds_ratio(table):
"""Return the odds ratio for the given 2-by-2 table"""
# When all the cells are zero or both numerators are zero the odds
# are "equal" so the ratio is one
if ((table.exp_tot == 0 and table.no_exp_tot == 0)
or (table.exp_out == 0 and table.out_no_exp == 0)
):
return 1.0
# If the numerator odds are zero, then the ratio cannot get any
# less, so it is zero
elif table.exp_tot == 0:
return 0.0
# If the denominator odds are zero, then the ratio cannot get any
# larger, so it is infinity
elif table.no_exp_tot == 0:
return float('inf')
# (eo/eno)/(one/neo) -> (eo*neo)/(eno*one)
return ((table.exp_out * table.no_exp_out)
/ (table.exp_no_out * table.out_no_exp))
def absolute_risk_difference(table):
"""Return the absolute risk difference for the given 2-by-2 table"""
# Absolute risk difference: (eo/et)-(one/net)
# Define the risk as zero if the row totals are zero to avoid
# division by zero
risk_exp = ((table.exp_out / table.exp_tot)
if table.exp_tot > 0
else 0.0)
risk_no_exp = ((table.out_no_exp / table.no_exp_tot)
if table.no_exp_tot > 0
else 0.0)
# Return the difference of the risks of outcome in the exposed and
# unexposed
return risk_exp - risk_no_exp
| {
"repo_name": "afbarnard/barnapy",
"path": "barnapy/contingency_table.py",
"copies": "1",
"size": "10557",
"license": "mit",
"hash": -7399744215904303000,
"line_mean": 35.2783505155,
"line_max": 77,
"alpha_frac": 0.548924884,
"autogenerated": false,
"ratio": 3.6216123499142365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46705372339142365,
"avg_score": null,
"num_lines": null
} |
## 2. Calculating differences ##
female_diff = (10771 - 16280.5)/16280.5
male_diff = (21790 - 16280.5)/16280.5
## 3. Updating the formula ##
female_diff = ((10771 - 16280.5)**2)/16280.5
male_diff = ((21790 - 16280.5)**2)/16280.5
gender_chisq = male_diff + female_diff
## 4. Generating a distribution ##
chi_squared_values = []
from numpy.random import random
import matplotlib.pyplot as plt
for item in range(1000):
a = random((32561,))
a[a < 0.5] = 0
a[a>=0.5] = 1
male = len([v for v in a if v==0])
female = len([v for v in a if v==1])
male_diff = (male - 16280.5) ** 2 / 16280.5
female_diff = (female - 16280.5) ** 2 / 16280.5
gender_diff = male_diff + female_diff
chi_squared_values.append(gender_diff)
plt.hist(chi_squared_values)
plt.show()
## 6. Smaller samples ##
female_diff = (107.71 - 162.805) ** 2 / 162.805
male_diff = (217.90 - 162.805) ** 2 / 162.805
gender_chisq = female_diff + male_diff
## 7. Sampling distribution equality ##
chi_squared_values = []
from numpy.random import random
import matplotlib.pyplot as plt
for item in range(1000):
a = random((300,))
a[a < 0.5] = 0
a[a>=0.5] = 1
male = len([v for v in a if v==0])
female = len([v for v in a if v==1])
male_diff = (male - 150) ** 2 / 150
female_diff = (female - 150) ** 2 / 150
gender_diff = male_diff + female_diff
chi_squared_values.append(gender_diff)
plt.hist(chi_squared_values)
plt.show()
## 9. Increasing degrees of freedom ##
diffs = []
observed = [27816, 3124, 1039, 311, 271]
expected = [26146.5, 3939.9, 944.3, 260.5, 1269.8]
for i, obs in enumerate(observed):
exp = expected[i]
diff = (obs - exp) ** 2 / exp
diffs.append(diff)
race_chisq = sum(diffs)
## 10. Using SciPy ##
from scipy.stats import chisquare
import numpy as np
observed = np.array([27816, 3124, 1039, 311, 271])
expected = np.array([26146.5, 3939.9, 944.3, 260.5, 1269.8])
chisquare_value, race_pvalue = chisquare(observed, expected) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Probability Statistics Intermediate/Chi-squared tests-172.py",
"copies": "1",
"size": "1994",
"license": "mit",
"hash": -8926437852918459000,
"line_mean": 24.5769230769,
"line_max": 60,
"alpha_frac": 0.6293881645,
"autogenerated": false,
"ratio": 2.6306068601583115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37599950246583114,
"avg_score": null,
"num_lines": null
} |
## 2. Calculating expected values ##
males_over50k = .241 * .669 * 32561
males_under50k = .759 * .669 * 32561
females_over50k = .241 * .331 * 32561
females_under50k = .759 * .331 * 32561
## 3. Calculating chi-squared ##
observed = [6662, 1179, 15128, 9592]
expected = [5249.8, 2597.4, 16533.5, 8180.3]
values = []
for i, obs in enumerate(observed):
exp = expected[i]
value = (obs - exp) ** 2 / exp
values.append(value)
chisq_gender_income = sum(values)
## 4. Finding statistical significance ##
observed = [6662, 1179, 15128, 9592]
expected = [5249.8, 2597.4, 16533.5, 8180.3]
import numpy as np
from scipy.stats import chisquare
chisq_value, pvalue_gender_income = chisquare(observed, expected)
## 5. Cross tables ##
import pandas
table = pandas.crosstab(income["sex"], [income["race"]])
print(table)
## 6. Finding expected values ##
import pandas
from scipy.stats import chi2_contingency
table = pandas.crosstab(income["sex"], [income["race"]])
chisq_value, pvalue_gender_race, df, expected = chi2_contingency(table) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Probability Statistics Intermediate/Multi category chi-squared tests-173.py",
"copies": "1",
"size": "1045",
"license": "mit",
"hash": -7487479239693211000,
"line_mean": 23.3255813953,
"line_max": 71,
"alpha_frac": 0.6880382775,
"autogenerated": false,
"ratio": 2.7072538860103625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3895292163510362,
"avg_score": null,
"num_lines": null
} |
"""2ch bbs response decoder.
Copyright (c) 2011-2014 mei raka
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL mei raka BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import re
WRITE_ERROR = 0
WRITE_OK = 1
WRITE_COOKIE = 2
def menu(body):
"""decode 2ch menu html to python data struct.
:param body: 2ch menu html string
:rtype: board url, category, board title tupled list
"""
re_category = re.compile(u'<B>([^<]+)</B><BR>')
re_board_url = re.compile(u'<A HREF=(http://'
u'[^/]+/[^/]+/)>([^<]+)<')
current_category = None
for line in body.split(u'\n'):
match_category = re_category.search(line)
if match_category:
current_category = match_category.groups()[0]
if current_category:
match_board_url = re_board_url.match(line)
if match_board_url:
url, title = match_board_url.groups()
yield (url, current_category, title)
def board_subject(body):
"""decode 2ch board subject.txt to python data.
:param body: 2ch board subject string
:rtype: thread dat id, title, rescount tupled list
"""
re_threads = re.compile(u'(\\d+).dat<>(.+)\s\\((\\d+)\\)')
for line in body.split(u'\n'):
match_threads = re_threads.match(line)
if match_threads:
dat, title, res = match_threads.groups()
yield (dat, title, int(res))
def thread_dat(body):
"""decode 2ch thread dat string to python data.
:param body: 2ch board subject string
:rtype: response name, mail, date_id, message tupled list
"""
for line in body.split(u'\n'):
splitted = line.split(u'<>')
if len(splitted) == 5:
name, mail, date_id, message, title = splitted
yield (name, mail, date_id, message)
elif len(splitted) == 6:
name, mail, date_id, message, deleted, title = splitted
yield (name, mail, date_id, message)
elif len(splitted) >= 4:
name = u''
mail = u''
date_id = u''
message = u'can not understand this line:</br> %' % (
line.replace(u'<>', u'<>'))
yield (name, mail, date_id, message)
def thread_write(body):
"""Get write status.
:param body: 2ch /test/bbs.cgi response body string
:rtype: status string, true, error, cookie
"""
re_status = re.compile(u'<\\!--\\s2ch_X:([^\\s]+)\\s-->')
match = re_status.search(body)
if match:
return match.group(1).lower()
def thread_write_form(body):
"""Get 'hidden' key and value from html."""
re_hidden = re.compile(
u'input\\stype=hidden\\s+name="([^"]+)"\\svalue="([^"]+)"')
hidden = None
for i in body.split(u'<'):
search = re_hidden.search(i)
if search:
hidden = search.groups()
if hidden:
return [(hidden[0], hidden[1])]
else:
return []
| {
"repo_name": "meiraka/python-bbs2ch",
"path": "bbs2ch/decode.py",
"copies": "1",
"size": "4316",
"license": "bsd-3-clause",
"hash": 5503994917018954000,
"line_mean": 35.5762711864,
"line_max": 79,
"alpha_frac": 0.6320667285,
"autogenerated": false,
"ratio": 3.9560036663611364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00013451708366962604,
"num_lines": 118
} |
#2C_LCD_driver.py is needed https://gist.github.com/vay3t/8b0577acfdb27a78101ed16dd78ecba1
#put it in the same folder
#add your ethereum address to eth_adress
#donate to 0x9c64Fd2804730683F3c5401aBA7285b2f33F3eDF or not , I'll live
import I2C_LCD_driver
from time import *
import time
import requests
import json
from requests.exceptions import ConnectionError
eth_address = "" # your ethereum address goes here
site = "https://etherchain.org/api/account/"
decimals = 2
final_site = site + eth_address
hdr = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
lastreported = "https://api.nanopool.org/v1/eth/reportedhashrate/" + eth_address
balance_nano = "https://api.nanopool.org/v1/eth/balance/" + eth_address
priceusd = "https://api.coinmarketcap.com/v1/ticker/ethereum/"
mylcd = I2C_LCD_driver.lcd()
req = requests.get(final_site, headers=hdr)
reqbal = requests.get(balance_nano, headers=hdr)
reqhashrate = requests.get(lastreported, headers=hdr)
reqprice = requests.get(priceusd, headers=hdr)
iteration = 0
while True:
try:
req = requests.get(final_site, headers=hdr)
except requests.exceptions.ConnectionError as e:
print e
req = "No response"
while type(req) == str:
req = requests.get(final_site, headers=hdr)
if type(req) != str:
break
try:
reqbal = requests.get(balance_nano, headers=hdr)
except requests.exceptions.ConnectionError as e: #nanopool stuff
print e
reqbal = "No response"
while type(reqbal) == str:
reqbal = requests.get(balance_nano, headers=hdr)
if type(reqbal) != str:
break
try:
reqhashrate = requests.get(lastreported, headers=hdr)
except requests.exceptions.ConnectionError as e: #hashrate for nanopool leave it and if you don't mine it will show nothing on the second row of the LCD or show 0's
print e
reqhashrate = "No response"
while type(reqhashrate) == str:
reqhashrate = requests.get(lastreported, headers=hdr)
if type(reqhashrate) != str:
break
try:
reqprice = requests.get(priceusd, headers=hdr)
except requests.exceptions.ConnectionError as e:
print e
reqprice = "No response"
while type(reqprice) == str:
reqprice = requests.get(priceusd, headers=hdr)
if type(reqprice) != str:
break
jsondata = req.json()
jsondatahash = reqbal.json()
jsondatabal = reqhashrate.json()
jsondatapriceusd = reqprice.json()
price = round((float(jsondatapriceusd[0]['price_usd'])),1)
if price >= 1000:
nanostats = "LAMBO ALERT"
final_price = str(price) + " " + str(round((float(jsondata['data'][0]['balance']) / 1000000000000000000), decimals)) + " " + str(round((float(jsondata['data'][0]['balance']) / 1000000000000000000), decimals) * float(price))
nanostats = str(round(float(jsondatahash['data']),2)) + " " + str(round(float(jsondatabal['data']), 2))
iteration = iteration + 1
mylcd.lcd_display_string(final_price, 1)
mylcd.lcd_display_string(nanostats, 2)
| {
"repo_name": "Ilyab99/HodlBot90000",
"path": "text-requests.py",
"copies": "1",
"size": "3510",
"license": "bsd-2-clause",
"hash": -8823703559330940000,
"line_mean": 38.8863636364,
"line_max": 227,
"alpha_frac": 0.6512820513,
"autogenerated": false,
"ratio": 3.243992606284658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4395274657584658,
"avg_score": null,
"num_lines": null
} |
## 2. Condensing class size ##
class_size = data['class_size']
class_size = class_size[class_size['GRADE ']=='09-12']
class_size = class_size[class_size['PROGRAM TYPE']=='GEN ED']
print(class_size.head(5))
## 3. Computing average class sizes ##
import numpy
class_size = class_size.groupby("DBN").agg(numpy.mean)
class_size.reset_index(inplace=True)
data['class_size'] = class_size
print(data['class_size'].head(5))
## 4. Condensing demographics ##
data['demographics'] = data['demographics'][data['demographics']['schoolyear']==20112012]
print(data['demographics'].head(5))
## 5. Condensing graduation ##
data['graduation']= data['graduation'][(data['graduation']['Cohort']=='2006') & (data['graduation']['Demographic']=='Total Cohort')]
print(data['graduation'].head(5))
## 6. Converting AP test scores ##
cols = ['AP Test Takers ', 'Total Exams Taken', 'Number of Exams with scores 3 4 or 5']
for col in cols:
data['ap_2010'][col] = pd.to_numeric(data['ap_2010'][col], errors='coerce')
print(data['ap_2010'].head(5))
## 8. Performing the left joins ##
combined = data["sat_results"]
combined = combined.merge(data['ap_2010'],how = 'left',on="DBN")
combined = combined.merge(data['graduation'],how = 'left',on="DBN")
print(combined.head(2))
print(combined.shape)
## 9. Performing the inner joins ##
combined = combined.merge(data['class_size'],how = 'inner',on="DBN")
combined = combined.merge(data['demographics'],how = 'inner',on="DBN")
combined = combined.merge(data['survey'],how = 'inner',on="DBN")
combined = combined.merge(data['hs_directory'],how = 'inner',on="DBN")
print(combined.head(2))
print(combined.shape)
## 10. Filling in missing values ##
combined = combined.fillna(combined.mean())
combined = combined.fillna(0)
print(combined.head(2))
## 11. Adding a school district column ##
def extractstring(string):
return(string[:2])
combined['school_dist']=combined['DBN'].apply(extractstring)
print(combined['school_dist'].head(5)) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Exploration/Data Cleaning Walkthrough_ Combining The Data-209.py",
"copies": "1",
"size": "1975",
"license": "mit",
"hash": -6883235805608522000,
"line_mean": 31.393442623,
"line_max": 132,
"alpha_frac": 0.6901265823,
"autogenerated": false,
"ratio": 3.0384615384615383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9153036743624873,
"avg_score": 0.015110275427333135,
"num_lines": 61
} |
# 2 conditional execution: if
# 2 alternative execution: if else
# 2 chained conditonals: if else elif
# raw input
# 4 def 1 bool 1 int 1 str 1 whatever you want
# main
# comparison conditionals
# or / not conditionals
# random.random = 0 to 1
# random.randint()
# str.format()
# """
def dying_age(age):
age = current_age
new_age = int(current_age) + random.randint(5, 65)
return new_age
def accidents_proba():
days_inHos = raw_input("Were you in any accidents that required multiple days of stay in the hospital? How many days were you in the hospital?: ")
if int(days_inHos) >= 5:
return ("Your life will be shortened by: ", random.randint, "JK!: ")
newer_age = int(new_age) - random.random()
else :
random.randint(0, 100)
newer_age = int(new_age) - random.random()
return newer_age
def normal_life():
daily_life = raw_input("On avergae how many times a month do you get into accidents: ")
if daily_life = 0:
return "You're quite a normal person"
elif daily_life = 1:
return "You should stay away from certain things"
elif daily_life = 2:
return "You should be home for a few days"
elif daily_life >= 3:
return "You should be home for a long long time"
def future_thoughts():
future_plans = raw_input("Do you plan to participate in extreme activities in the future? What are they: ")
print future_plans
def other_things():
stuffs = ("I like to swim ", random.randint(32, 450), "a day.: ")
def velocity():
distace = random.random(0, 1000)
time = random.randint
actual_velocity = float(random.random) / float(random.randint
return actual_velocity
if velocity = 1000/0.1
return "Winning on the way"
else:
return " "
def acceleration():
def main(): # main
current_age = raw_input("How old are you?: ")
dying_age(age)
normal_life(daily_life)
future_thoughts(future_plans)
other_things(stuffs)
velocity(actual_velocity)
main()
| {
"repo_name": "suay1936/suay1936-cmis-cs2",
"path": "conditionals1.py",
"copies": "1",
"size": "2143",
"license": "cc0-1.0",
"hash": -3933921182750078000,
"line_mean": 29.1830985915,
"line_max": 150,
"alpha_frac": 0.615025665,
"autogenerated": false,
"ratio": 3.5776293823038396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.469265504730384,
"avg_score": null,
"num_lines": null
} |
# 2-corner diag cvx quads
import numpy
import fractions
# upper half z
n = 2
m = 2
nn = max(n,m)
diag_gcd_sums = []
for i in xrange(0, n + 1):
diag_gcd_sums.append([]) # j = 0
for j in xrange(0, m + 1):
# if (j, i) == (5, 3):
# pdb.set_trace()
if i == 0 or j == 0 or j == m or ((n - i) <= ((-1.0 * n * j) / m) + n):
diag_gcd_sums[i].append(0)
else:
recJ = diag_gcd_sums[i][j-1] if j != 0 else 0
recI = diag_gcd_sums[i-1][j] if i != 0 else 0
intersection = diag_gcd_sums[i-1][j-1] if i != 0 and j != 0 else 0
if recJ == 0:
print 'HERE: ', (j, i)
recJ = intersection
diag_gcd_sum = recJ + recI - intersection + fractions.gcd(i, j)
diag_gcd_sums[i].append(diag_gcd_sum)
def print_diag_gcd_sums():
print 'diag gcd sums:'
print ' ',
for i in xrange(0, len(diag_gcd_sums[0])):
print i,
print ' ',
print ''
print ' ',
print '-' * (len(diag_gcd_sums[0]) * 5)
for i in xrange(0, len(diag_gcd_sums)):
print ' ' + str(i) + ' | ',
for j in xrange(0, len(diag_gcd_sums[i])):
print diag_gcd_sums[i][j],
print ' ',
print ''
print_diag_gcd_sums()
# round up (even if is int; int() rounds down)
diag_n = int(((-1.0 * n * (m - 1)) / m) + n) + 1
# NOT VERIFIED FOR (3,3)
# verified for (2, 2) and (3, 3) and (4, 4)!!! THIS IS INTERIOR Z ONLY
sum_oa = (1.0 / 48.0) * (n - 1) * (6 * m**2 * (n + 2) + 6 * m * (n - 2) - n * (3 * n**2 + 13 * n + 10)) - 0.5 * diag_gcd_sums[diag_n][m - 1]
sum_oc = sum_oa
# verified for (2, 2) and (3, 3) and (4, 4)!!! THIS IS INTERIOR Z ONLY
sum_d = (1.0 / 24.0) * (n - 1) * (6 * m**2 * n - 2 * m * (4 * n**2 + n + 12) + 3 * n**3 + n**2 + 10 * n + 24)
# verified for (2, 2) and (3, 3) and (4, 4)!!! THIS IS INTERIOR Z ONLY
sum_b_i = 0.25 * (n - 1) * (-4 + 2 * m**2 * (n - 1) + n**2 - m * (-6 + 3 * n + n**2) - (2 * m - n - 2) * fractions.gcd(n,m))
# verified for (2, 2) and (3, 3) and (4, 4)!!! THIS IS INTERIOR Z ONLY
sum_bo = 0.5 * (n - 1) * (4 + 2 * m**2 + m * (n - 6) - n**2)
# verified for (2, 2)
border_z = (n - 1) * (m - 1) * 2 * 3 * 2
##############################
(n, m) = (3, 3)
(zm, zn) = (2, 1)
# verified!!!1!!1!11!!!!!
oa = 0.5 * (zn * zm + zn + zm - fractions.gcd(zn, zm))
# verified!!!1!!1!11!!!!!
oc = 0.5 * (zn * zm + zn + zm - fractions.gcd(zn, zm))
# verified!!!1!!1!11!!!!!
b_i = 0.5 * (m * n - n - m - fractions.gcd(n,m)) + 1
# verified!!!1!!1!11!!!!!
b_o = n + m - 2
# verified!!!1!!1!11!!!!!
d = (m - zm + 1) * (zn + 1) - (m - zm + 1) - zn - 1
##############################
# (m, n) = (2, 2)
# (zm, zn) = (m, 1)
# (m, n) = (3, 3)
# (zm, zn) = (1, 0)
# 0.5 * (zn * zm + zn + zm - fractions.gcd(zn, zm))
# 0.5 * m * (m + 1) - 1
# 'a_c'
# (m - zm + 1) * (zn + 1) - (m - zm + 1) - zn - 1
# '-------------------------------------'
# (zm, zn) = (2, 0)
# 0.5 * (zn * zm + zn + zm - fractions.gcd(zn, zm))
# 0.5 * m * (m + 1) - 1
# 'a_c'
# (m - zm + 1) * (zn + 1) - (m - zm + 1) - zn - 1
# '-------------------------------------'
# (zm, zn) = (2, 1)
# 0.5 * (zn * zm + zn + zm - fractions.gcd(zn, zm))
# 0.5 * m * (m + 1) - 1
# 'a_c'
# (m - zm + 1) * (zn + 1) - (m - zm + 1) - zn - 1
# '-------------------------------------'
# (zm, zn) = (3, 1)
# 0.5 * (zn * zm + zn + zm - fractions.gcd(zn, zm))
# 0.5 * m * (m + 1) - 1
# 'a_c'
# (m - zm + 1) * (zn + 1) - (m - zm + 1) - zn - 1
# '-------------------------------------'
# (zm, zn) = (3, 2)
# 0.5 * (zn * zm + zn + zm - fractions.gcd(zn, zm))
# 0.5 * m * (m + 1) - 1
# 'a_c'
# (m - zm + 1) * (zn + 1) - (m - zm + 1) - zn - 1
# '-------------------------------------'
# (m, n) = (4, 4)
# (zm, zn) = (2, 1)
# (m, n) = (5, 5)
# (zm, zn) = (4, 2)
| {
"repo_name": "bgwines/project-euler",
"path": "src/in progress/1-corner-quads.py",
"copies": "1",
"size": "3812",
"license": "bsd-3-clause",
"hash": -7078288630280940000,
"line_mean": 27.447761194,
"line_max": 140,
"alpha_frac": 0.3911332634,
"autogenerated": false,
"ratio": 2.1782857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3069418977685714,
"avg_score": null,
"num_lines": null
} |
# 2. Create build/pyglet.wxs from pyglet.wxs, add all file components
# 3. Run candle and light on build/pyglet.wxs to generate
# ../../dist/pyglet.msi
import os
import re
import shutil
import subprocess
from uuid import uuid1
from xml.dom.minidom import parse
import pkg_resources
class PythonVersion:
def __init__(self, version, key_root, display_version):
self.version = version
self.display_version = display_version
self.id = 'PY' + version.replace('.', '') + key_root
self.key_root = key_root
self.key = r'SOFTWARE\Python\PythonCore\%s\InstallPath' % version
self.dir_prop = 'PYTHONHOME%s' % self.id
self.exe_prop = 'PYTHONEXE%s' % self.id
self.components = list()
PYTHON_VERSIONS = (
PythonVersion('2.4', 'HKLM', 'Python 2.4'),
PythonVersion('2.5', 'HKLM', 'Python 2.5'),
PythonVersion('2.6', 'HKLM', 'Python 2.6'),
PythonVersion('2.4', 'HKCU', 'Python 2.4 (current user only)'),
PythonVersion('2.5', 'HKCU', 'Python 2.5 (current user only)'),
PythonVersion('2.6', 'HKCU', 'Python 2.6 (current user only)'),
)
MISSING_PYTHON_MESSAGE = 'pyglet requires Python 2.4 or later. The ' \
'installation will be aborted.'
exclude_packages = list()
ids = set()
def id(name):
num = 1
id = name
while id in ids:
num += 1
id = '%s%d' % (name, num)
ids.add(id)
return id
shortnames = set()
def shortname(name, ext):
num = 1
shortname = '%s.%s' % (name[:8], ext)
while shortname in shortnames:
num += 1
shortname = '%s%02d.%s' % (name[:6], num, ext)
shortnames.add(shortname)
return shortname
def node(doc, node_name, **kwargs):
node = doc.createElement(node_name)
for key, value in list(kwargs.items()):
node.setAttribute(key, value)
return node
def add_package(name, src_dir, doc, dest_node, pyver):
if name in exclude_packages:
return
src_path = os.path.join(src_dir, name)
directory = node(doc, 'Directory',
Id=id('%sDir' % name),
Name=name)
dest_node.appendChild(doc.createTextNode('\n\n'))
dest_node.appendChild(directory)
dest_node.appendChild(doc.createTextNode('\n\n'))
directory.appendChild(doc.createTextNode('\n'))
for filename in os.listdir(src_path):
file_path = os.path.join(src_path, filename)
if os.path.isdir(file_path):
if os.path.exists(os.path.join(file_path, '__init__.py')):
add_package(filename, src_path, doc, directory, pyver)
elif filename.endswith('.py'):
add_module(filename, src_path, doc, directory, pyver)
def component_id(name, pyver):
component = id(name)
pyver.components.append(component)
return component
guid_seq = 0
def guid():
global guid_seq
guid_seq += 1
return uuid1(clock_seq=guid_seq).hex.upper()
def add_module(name, src_dir, doc, dest_node, pyver):
src_path = os.path.join(src_dir, name)
basefile = os.path.splitext(name)[0]
component = node(doc, 'Component',
Id=component_id('%sComponent' % basefile, pyver),
Guid=guid())
component.appendChild(
node(doc, 'File',
Id=id('%sPy' % basefile),
Name=name,
DiskId='1',
Source=src_path))
component.appendChild(
node(doc, 'RemoveFile',
Id=id('%sPyc' % basefile),
Name='%s.pyc' % basefile,
On='uninstall'))
component.appendChild(
node(doc, 'RemoveFile',
Id=id('%sPyo' % basefile),
Name='%s.pyo' % basefile,
On='uninstall'))
dest_node.appendChild(component)
# Some readability
dest_node.appendChild(doc.createTextNode('\n'))
def call(cmd):
print(cmd)
return subprocess.call(cmd, shell=True)
script_dir = os.path.dirname(__file__)
root_dir = os.path.join(script_dir, '../..')
dist_dir = os.path.join(root_dir, 'dist')
try:
os.makedirs(dist_dir)
except OSError:
pass
# Copy current avbin into res
shutil.copyfile('c:/windows/system32/avbin.dll',
os.path.join(script_dir, 'res', 'avbin.dll'))
# Determine release version from setup.py
version_re = re.compile("VERSION = '([^']*)'")
for line in open(os.path.join(root_dir, 'setup.py')):
match = version_re.match(line)
if match:
version = match.groups()[0]
# Create a Windows-friendly dotted number for the version
# Version string must not have any letters, so use:
# alpha = x.x.x.(0 + alpha num)
# beta = x.x.x.(16 + beta num)
# rc = x.x.x.(32 + rc num)
# release = x.x.x.128 -->
parts = list(pkg_resources.parse_version(version))
major = int(parts.pop(0))
minor = patch = tagnum = 0
if parts[0][0] != '*':
minor = int(parts.pop(0))
if parts[0][0] != '*':
patch = int(parts.pop(0))
tag = parts.pop(0)
if tag == '*alpha':
base = 0
elif tag == '*beta':
base = 16
elif tag == '*rc':
base = 32
elif tag == '*final':
base = 128
else:
assert False, 'Unrecognised version tag "%s"' % tag
if parts and parts[0][0] != '*':
tagnum = int(parts.pop(0))
assert not parts or parts[0] == '*final'
version_windows = '%d.%d.%d.%d' % (major, minor, patch, base + tagnum)
print('Version %s is Windows version %s' % (version, version_windows))
print('Writing pyglet.wxs')
# Open template wxs and find Product element
wxs = parse(os.path.join(script_dir, 'pyglet.in.wxs'))
Product = wxs.getElementsByTagName('Product')[0]
Product.setAttribute('Version', version_windows)
# Add Python discovery
for pyver in PYTHON_VERSIONS:
Property = node(wxs, 'Property',
Id=pyver.dir_prop)
Property.appendChild(
node(wxs, 'RegistrySearch',
Id='%sRegSearch' % pyver.dir_prop,
Root=pyver.key_root,
Key=pyver.key,
Type='directory'))
Product.appendChild(Property)
# Add install conditional on at least one Python version present.
Condition = node(wxs, 'Condition',
Message=MISSING_PYTHON_MESSAGE)
Condition.appendChild(wxs.createTextNode(
' or '.join([pyver.dir_prop for pyver in PYTHON_VERSIONS])))
Product.appendChild(Condition)
# Get TARGETDIR Directory element
for elem in wxs.getElementsByTagName('Directory'):
if elem.getAttribute('Id') == 'TARGETDIR':
target_dir = elem
break
# Create entire set of components for each python version (WiX 3 will
# ensure only one copy of the source file is in the archive)
for pyver in PYTHON_VERSIONS:
python_home = node(wxs, 'Directory',
Id=pyver.dir_prop)
target_dir.appendChild(python_home)
lib_dir = node(wxs, 'Directory',
Id='%sLibDir' % pyver.dir_prop,
Name='Lib')
python_home.appendChild(lib_dir)
site_packages = node(wxs, 'Directory',
Id='%sSitePackages' % pyver.dir_prop,
Name='site-packages')
lib_dir.appendChild(site_packages)
add_package('pyglet', root_dir, wxs, site_packages, pyver)
# Add all components to features
RuntimeFeature = wxs.getElementsByTagName('Feature')[0]
for pyver in PYTHON_VERSIONS:
feature = node(wxs, 'Feature',
Id='RuntimeFeature%s' % pyver.id,
Title='pyglet runtime for %s' % pyver.display_version,
Level='1',
AllowAdvertise='no')
condition = node(wxs, 'Condition',
Level='0')
condition.appendChild(wxs.createTextNode('NOT ' + pyver.dir_prop))
feature.appendChild(condition)
for component in pyver.components:
feature.appendChild(node(wxs, 'ComponentRef',
Id=component))
feature.appendChild(wxs.createTextNode('\n'))
RuntimeFeature.appendChild(feature)
# Add byte compilation custom actions
last_action = 'InstallFinalize'
InstallExecuteSequence = \
wxs.getElementsByTagName('InstallExecuteSequence')[0]
UI = wxs.getElementsByTagName('UI')[0]
for pyver in PYTHON_VERSIONS:
# Actions are conditional on the feature being installed
def cond(node):
node.appendChild(wxs.createTextNode(
'(&RuntimeFeature%s=3) AND NOT(!RuntimeFeature%s=3)' % (
pyver.id, pyver.id)))
return node
# Define the actions
Product.appendChild(node(wxs, 'CustomAction',
Id='SetPythonExe%s' % pyver.id,
Property=pyver.exe_prop,
Value=r'[%s]\pythonw.exe' % pyver.dir_prop))
Product.appendChild(node(wxs, 'CustomAction',
Id='ByteCompile%s' % pyver.id,
Property=pyver.exe_prop,
ExeCommand=r'-c "import compileall; compileall.compile_dir(\"[%s]\Lib\site-packages\pyglet\", force=1)"' % pyver.dir_prop,
Return='ignore'))
Product.appendChild(node(wxs, 'CustomAction',
Id='ByteOptimize%s' % pyver.id,
Property=pyver.exe_prop,
ExeCommand=r'-OO -c "import compileall; compileall.compile_dir(\"[%s]\Lib\site-packages\pyglet\", force=1)"' % pyver.dir_prop,
Return='ignore'))
# Schedule execution of these actions
InstallExecuteSequence.appendChild(cond(
node(wxs, 'Custom',
Action='SetPythonExe%s' % pyver.id,
After=last_action)))
InstallExecuteSequence.appendChild(cond(
node(wxs, 'Custom',
Action='ByteCompile%s' % pyver.id,
After='SetPythonExe%s' % pyver.id)))
InstallExecuteSequence.appendChild(cond(
node(wxs, 'Custom',
Action='ByteOptimize%s' % pyver.id,
After='ByteCompile%s' % pyver.id)))
last_action = 'ByteOptimize%s' % pyver.id
# Set progress text for the actions
progress = node(wxs, 'ProgressText',
Action='ByteCompile%s' % pyver.id)
progress.appendChild(wxs.createTextNode(
'Byte-compiling modules for Python %s' % pyver.version))
UI.appendChild(progress)
progress = node(wxs, 'ProgressText',
Action='ByteOptimize%s' % pyver.id)
progress.appendChild(wxs.createTextNode(
'Byte-optimizing modules for Python %s' % pyver.version))
UI.appendChild(progress)
# Write wxs file
wxs.writexml(open(os.path.join(script_dir, 'pyglet.wxs'), 'w'))
# Compile
call('candle -out %s %s' % (os.path.join(script_dir, 'pyglet.wixobj'),
os.path.join(script_dir, 'pyglet.wxs')))
# Link
call('light -sval -out %s %s' %
(os.path.join(dist_dir, 'pyglet-%s.msi' % version),
os.path.join(script_dir, 'pyglet.wixobj')))
| {
"repo_name": "bitcraft/pyglet",
"path": "tools/genmsi/genmsi.py",
"copies": "1",
"size": "11530",
"license": "bsd-3-clause",
"hash": 7561299394476157000,
"line_mean": 33.833836858,
"line_max": 159,
"alpha_frac": 0.5662619254,
"autogenerated": false,
"ratio": 3.732599546778893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4798861472178893,
"avg_score": null,
"num_lines": null
} |
# 2. Create build/pyglet.wxs from pyglet.wxs, add all file components
# 3. Run candle and light on build/pyglet.wxs to generate
# ../../dist/pyglet.msi
import os
import re
import shutil
import subprocess
from uuid import uuid1
from xml.dom.minidom import parse
import pkg_resources
class PythonVersion(object):
def __init__(self, version, key_root, display_version):
self.version = version
self.display_version = display_version
self.id = 'PY' + version.replace('.', '') + key_root
self.key_root = key_root
self.key = r'SOFTWARE\Python\PythonCore\%s\InstallPath' % version
self.dir_prop = 'PYTHONHOME%s' % self.id
self.exe_prop = 'PYTHONEXE%s' % self.id
self.components = []
PYTHON_VERSIONS = (
PythonVersion('2.4', 'HKLM', 'Python 2.4'),
PythonVersion('2.5', 'HKLM', 'Python 2.5'),
PythonVersion('2.6', 'HKLM', 'Python 2.6'),
PythonVersion('2.4', 'HKCU', 'Python 2.4 (current user only)'),
PythonVersion('2.5', 'HKCU', 'Python 2.5 (current user only)'),
PythonVersion('2.6', 'HKCU', 'Python 2.6 (current user only)'),
)
MISSING_PYTHON_MESSAGE = 'pyglet requires Python 2.4 or later. The ' \
'installation will be aborted.'
exclude_packages = []
ids = set()
def id(name):
num = 1
id = name
while id in ids:
num += 1
id = '%s%d' % (name, num)
ids.add(id)
return id
shortnames = set()
def shortname(name, ext):
num = 1
shortname = '%s.%s' % (name[:8], ext)
while shortname in shortnames:
num += 1
shortname = '%s%02d.%s' % (name[:6], num, ext)
shortnames.add(shortname)
return shortname
def node(doc, node_name, **kwargs):
node = doc.createElement(node_name)
for key, value in kwargs.items():
node.setAttribute(key, value)
return node
def add_package(name, src_dir, doc, dest_node, pyver):
if name in exclude_packages:
return
src_path = os.path.join(src_dir, name)
directory = node(doc, 'Directory',
Id=id('%sDir' % name),
Name=name)
dest_node.appendChild(doc.createTextNode('\n\n'))
dest_node.appendChild(directory)
dest_node.appendChild(doc.createTextNode('\n\n'))
directory.appendChild(doc.createTextNode('\n'))
for filename in os.listdir(src_path):
file_path = os.path.join(src_path, filename)
if os.path.isdir(file_path):
if os.path.exists(os.path.join(file_path, '__init__.py')):
add_package(filename, src_path, doc, directory, pyver)
elif filename.endswith('.py'):
add_module(filename, src_path, doc, directory, pyver)
def component_id(name, pyver):
component = id(name)
pyver.components.append(component)
return component
guid_seq = 0
def guid():
global guid_seq
guid_seq += 1
return uuid1(clock_seq=guid_seq).hex.upper()
def add_module(name, src_dir, doc, dest_node, pyver):
src_path = os.path.join(src_dir, name)
basefile = os.path.splitext(name)[0]
component = node(doc, 'Component',
Id= component_id('%sComponent' % basefile, pyver),
Guid=guid())
component.appendChild(
node(doc, 'File',
Id=id('%sPy' % basefile),
Name=name,
DiskId='1',
Source=src_path))
component.appendChild(
node(doc, 'RemoveFile',
Id=id('%sPyc' % basefile),
Name='%s.pyc' % basefile,
On='uninstall'))
component.appendChild(
node(doc, 'RemoveFile',
Id=id('%sPyo' % basefile),
Name='%s.pyo' % basefile,
On='uninstall'))
dest_node.appendChild(component)
# Some readability
dest_node.appendChild(doc.createTextNode('\n'))
def call(cmd):
print cmd
return subprocess.call(cmd, shell=True)
if __name__ == '__main__':
script_dir = os.path.dirname(__file__)
root_dir = os.path.join(script_dir, '../..')
dist_dir = os.path.join(root_dir, 'dist')
try:
os.makedirs(dist_dir)
except OSError:
pass
# Copy current avbin into res
shutil.copyfile('c:/windows/system32/avbin.dll',
os.path.join(script_dir, 'res', 'avbin.dll'))
# Determine release version from setup.py
version_re = re.compile("VERSION = '([^']*)'")
for line in open(os.path.join(root_dir, 'setup.py')):
match = version_re.match(line)
if match:
version = match.groups()[0]
# Create a Windows-friendly dotted number for the version
# Version string must not have any letters, so use:
# alpha = x.x.x.(0 + alpha num)
# beta = x.x.x.(16 + beta num)
# rc = x.x.x.(32 + rc num)
# release = x.x.x.128 -->
parts = list(pkg_resources.parse_version(version))
major = int(parts.pop(0))
minor = patch = tagnum = 0
if parts[0][0] != '*':
minor = int(parts.pop(0))
if parts[0][0] != '*':
patch = int(parts.pop(0))
tag = parts.pop(0)
if tag == '*alpha':
base = 0
elif tag == '*beta':
base = 16
elif tag == '*rc':
base = 32
elif tag == '*final':
base = 128
else:
assert False, 'Unrecognised version tag "%s"' % tag
if parts and parts[0][0] != '*':
tagnum = int(parts.pop(0))
assert not parts or parts[0] == '*final'
version_windows = '%d.%d.%d.%d' % (major, minor, patch, base + tagnum)
print 'Version %s is Windows version %s' % (version, version_windows)
print 'Writing pyglet.wxs'
# Open template wxs and find Product element
wxs = parse(os.path.join(script_dir, 'pyglet.in.wxs'))
Product = wxs.getElementsByTagName('Product')[0]
Product.setAttribute('Version', version_windows)
# Add Python discovery
for pyver in PYTHON_VERSIONS:
Property = node(wxs, 'Property',
Id=pyver.dir_prop)
Property.appendChild(
node(wxs, 'RegistrySearch',
Id='%sRegSearch' % pyver.dir_prop,
Root=pyver.key_root,
Key=pyver.key,
Type='directory'))
Product.appendChild(Property)
# Add install conditional on at least one Python version present.
Condition = node(wxs, 'Condition',
Message=MISSING_PYTHON_MESSAGE)
Condition.appendChild(wxs.createTextNode(
' or '.join([pyver.dir_prop for pyver in PYTHON_VERSIONS])))
Product.appendChild(Condition)
# Get TARGETDIR Directory element
for elem in wxs.getElementsByTagName('Directory'):
if elem.getAttribute('Id') == 'TARGETDIR':
target_dir = elem
break
# Create entire set of components for each python version (WiX 3 will
# ensure only one copy of the source file is in the archive)
for pyver in PYTHON_VERSIONS:
python_home = node(wxs, 'Directory',
Id=pyver.dir_prop)
target_dir.appendChild(python_home)
lib_dir = node(wxs, 'Directory',
Id='%sLibDir' % pyver.dir_prop,
Name='Lib')
python_home.appendChild(lib_dir)
site_packages = node(wxs, 'Directory',
Id='%sSitePackages' % pyver.dir_prop,
Name='site-packages')
lib_dir.appendChild(site_packages)
add_package('pyglet', root_dir, wxs, site_packages, pyver)
# Add all components to features
RuntimeFeature = wxs.getElementsByTagName('Feature')[0]
for pyver in PYTHON_VERSIONS:
feature = node(wxs, 'Feature',
Id='RuntimeFeature%s' % pyver.id,
Title='pyglet runtime for %s' % pyver.display_version,
Level='1',
AllowAdvertise='no')
condition = node(wxs, 'Condition',
Level='0')
condition.appendChild(wxs.createTextNode('NOT ' + pyver.dir_prop))
feature.appendChild(condition)
for component in pyver.components:
feature.appendChild(node(wxs, 'ComponentRef',
Id=component))
feature.appendChild(wxs.createTextNode('\n'))
RuntimeFeature.appendChild(feature)
# Add byte compilation custom actions
last_action = 'InstallFinalize'
InstallExecuteSequence = \
wxs.getElementsByTagName('InstallExecuteSequence')[0]
UI = wxs.getElementsByTagName('UI')[0]
for pyver in PYTHON_VERSIONS:
# Actions are conditional on the feature being installed
def cond(node):
node.appendChild(wxs.createTextNode(
'(&RuntimeFeature%s=3) AND NOT(!RuntimeFeature%s=3)' % (
pyver.id, pyver.id)))
return node
# Define the actions
Product.appendChild(node(wxs, 'CustomAction',
Id='SetPythonExe%s' % pyver.id,
Property=pyver.exe_prop,
Value=r'[%s]\pythonw.exe' % pyver.dir_prop))
Product.appendChild(node(wxs, 'CustomAction',
Id='ByteCompile%s' % pyver.id,
Property=pyver.exe_prop,
ExeCommand=r'-c "import compileall; compileall.compile_dir(\"[%s]\Lib\site-packages\pyglet\", force=1)"' % pyver.dir_prop,
Return='ignore'))
Product.appendChild(node(wxs, 'CustomAction',
Id='ByteOptimize%s' % pyver.id,
Property=pyver.exe_prop,
ExeCommand=r'-OO -c "import compileall; compileall.compile_dir(\"[%s]\Lib\site-packages\pyglet\", force=1)"' % pyver.dir_prop,
Return='ignore'))
# Schedule execution of these actions
InstallExecuteSequence.appendChild(cond(
node(wxs, 'Custom',
Action='SetPythonExe%s' % pyver.id,
After=last_action)))
InstallExecuteSequence.appendChild(cond(
node(wxs, 'Custom',
Action='ByteCompile%s' % pyver.id,
After='SetPythonExe%s' % pyver.id)))
InstallExecuteSequence.appendChild(cond(
node(wxs, 'Custom',
Action='ByteOptimize%s' % pyver.id,
After='ByteCompile%s' % pyver.id)))
last_action = 'ByteOptimize%s' % pyver.id
# Set progress text for the actions
progress = node(wxs, 'ProgressText',
Action='ByteCompile%s' % pyver.id)
progress.appendChild(wxs.createTextNode(
'Byte-compiling modules for Python %s' % pyver.version))
UI.appendChild(progress)
progress = node(wxs, 'ProgressText',
Action='ByteOptimize%s' % pyver.id)
progress.appendChild(wxs.createTextNode(
'Byte-optimizing modules for Python %s' % pyver.version))
UI.appendChild(progress)
# Write wxs file
wxs.writexml(open(os.path.join(script_dir, 'pyglet.wxs'), 'w'))
# Compile
call('candle -out %s %s' % (os.path.join(script_dir, 'pyglet.wixobj'),
os.path.join(script_dir, 'pyglet.wxs')))
# Link
call('light -sval -out %s %s' % \
(os.path.join(dist_dir, 'pyglet-%s.msi' % version),
os.path.join(script_dir, 'pyglet.wixobj')))
| {
"repo_name": "google-code-export/pyglet",
"path": "tools/genmsi/genmsi.py",
"copies": "26",
"size": "11619",
"license": "bsd-3-clause",
"hash": 9195530746191608000,
"line_mean": 34.7689873418,
"line_max": 136,
"alpha_frac": 0.5622686978,
"autogenerated": false,
"ratio": 3.71808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.