hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e4f3f5b2d09517330825637d500809f9259e85f8
| 14,722
|
py
|
Python
|
eval/vqa_data_provider_layer.py
|
minsuu/vqa_mcb
|
cb04e7cd1ceba4e7508024e5dbdfe08d1575b007
|
[
"BSD-2-Clause"
] | 214
|
2016-07-13T00:17:44.000Z
|
2022-01-28T07:06:07.000Z
|
eval/vqa_data_provider_layer.py
|
afcarl/vqa-mcb
|
172775b2ec927456eecbe1aa5878b673482f2a54
|
[
"BSD-2-Clause"
] | 23
|
2016-07-14T07:45:59.000Z
|
2019-08-15T02:56:34.000Z
|
eval/vqa_data_provider_layer.py
|
afcarl/vqa-mcb
|
172775b2ec927456eecbe1aa5878b673482f2a54
|
[
"BSD-2-Clause"
] | 87
|
2016-07-12T18:20:32.000Z
|
2021-10-30T16:44:33.000Z
|
import caffe
import numpy as np
import random
import os
import sys
import re
import json
import spacy
from operator import mul
GLOVE_EMBEDDING_SIZE = 300
CURRENT_DATA_SHAPE = None
SPATIAL_COORD = None
GLOVE = None
class LoadVQADataProvider:
def __init__(self, ques_file_path, img_file_pre, vdict_path, adict_path, \
batchsize=128, max_length=15, n_ans_vocabulary=1000, mode='train', data_shape=(2048)):
self.batchsize = batchsize
self.d_vocabulary = None
self.batch_index = None
self.batch_len = None
self.rev_adict = None
self.max_length = max_length
self.n_ans_vocabulary = n_ans_vocabulary
self.mode = mode
self.data_shape = data_shape
assert self.mode == 'test'
# spatial coordinates
normalized_coords = np.linspace(0, 2, num=14, endpoint=True, dtype=np.float32) / 200
self.x_coords = np.tile(normalized_coords, (14, 1)).reshape(1, 14, 14)
normalized_coords = normalized_coords.reshape((14, 1))
self.y_coords = np.tile(normalized_coords, (1, 14)).reshape(1, 14, 14)
self.coords = np.concatenate([self.x_coords, self.y_coords])
self.quesFile = ques_file_path
self.img_file_pre = img_file_pre
# load ques file
with open(self.quesFile,'r') as f:
print 'reading : ', self.quesFile
qdata = json.load(f)
qdic = {}
for q in qdata['questions']:
qdic[q['question_id']] = { 'qstr':q['question'], 'iid':q['image_id']}
self.qdic = qdic
# load vocabulary
with open(vdict_path,'r') as f:
vdict = json.load(f)
with open(adict_path,'r') as f:
adict = json.load(f)
self.n_vocabulary, self.vdict = len(vdict), vdict
self.n_ans_vocabulary, self.adict = len(adict), adict
self.nlp = spacy.load('en', vectors='en_glove_cc_300_1m_vectors')
self.glove_dict = {} # word -> glove vector
def getQuesIds(self):
return self.qdic.keys()
def getImgId(self,qid):
return self.qdic[qid]['iid']
def getQuesStr(self,qid):
return self.qdic[qid]['qstr']
def getAnsObj(self,qid):
if self.mode == 'test-dev' or self.mode == 'test':
return -1
return self.adic[qid]
def seq_to_list(self, s):
t_str = s.lower()
for i in [r'\?',r'\!',r'\'',r'\"',r'\$',r'\:',r'\@',r'\(',r'\)',r'\,',r'\.',r'\;']:
t_str = re.sub( i, '', t_str)
for i in [r'\-',r'\/']:
t_str = re.sub( i, ' ', t_str)
q_list = re.sub(r'\?','',t_str.lower()).split(' ')
q_list = filter(lambda x: len(x) > 0, q_list)
return q_list
def extract_answer(self,answer_obj):
""" Return the most popular answer in string."""
if self.mode == 'test-dev' or self.mode == 'test':
return -1
answer_list = [ answer_obj[i]['answer'] for i in xrange(10)]
dic = {}
for ans in answer_list:
if dic.has_key(ans):
dic[ans] +=1
else:
dic[ans] = 1
max_key = max((v,k) for (k,v) in dic.items())[1]
return max_key
def extract_answer_prob(self,answer_obj):
""" Return the most popular answer in string."""
if self.mode == 'test-dev' or self.mode == 'test':
return -1
answer_list = [ ans['answer'] for ans in answer_obj]
prob_answer_list = []
for ans in answer_list:
if self.adict.has_key(ans):
prob_answer_list.append(ans)
if len(prob_answer_list) == 0:
if self.mode == 'val' or self.mode == 'test-dev' or self.mode == 'test':
return 'hoge'
else:
raise Exception("This should not happen.")
else:
return random.choice(prob_answer_list)
def create_answer_vocabulary_dict(self, genome=False):
n_ans_vocabulary=self.n_ans_vocabulary
qid_list = self.getQuesIds()
adict = {'':0}
nadict = {'':1000000}
vid = 1
for qid in qid_list:
if genome and qid[0] == 'g':
continue
answer_obj = self.getAnsObj(qid)
answer_list = [ans['answer'] for ans in answer_obj]
for q_ans in answer_list:
# create dict
if adict.has_key(q_ans):
nadict[q_ans] += 1
else:
nadict[q_ans] = 1
adict[q_ans] = vid
vid +=1
# debug
klist = []
for k,v in sorted(nadict.items()):
klist.append((k,v))
nalist = []
for k,v in sorted(nadict.items(), key=lambda x:x[1]):
nalist.append((k,v))
alist = []
for k,v in sorted(adict.items(), key=lambda x:x[1]):
alist.append((k,v))
# remove words that appear less than once
n_del_ans = 0
n_valid_ans = 0
adict_nid = {}
for i, w in enumerate(nalist[:-n_ans_vocabulary]):
del adict[w[0]]
n_del_ans += w[1]
for i, w in enumerate(nalist[-n_ans_vocabulary:]):
n_valid_ans += w[1]
adict_nid[w[0]] = i
print 'Valid answers are : ', n_valid_ans
print 'Invalid answers are : ', n_del_ans
return n_ans_vocabulary, adict_nid
def create_vocabulary_dict(self):
#qid_list = self.vqa.getQuesIds()
qid_list = self.getQuesIds()
vdict = {'':0}
ndict = {'':0}
vid = 1
for qid in qid_list:
# sequence to list
q_str = self.getQuesStr(qid)
q_list = self.seq_to_list(q_str)
# create dict
for w in q_list:
if vdict.has_key(w):
ndict[w] += 1
else:
ndict[w] = 1
vdict[w] = vid
vid +=1
# debug
klist = []
for k,v in sorted(ndict.items()):
klist.append((k,v))
nlist = []
for k,v in sorted(ndict.items(), key=lambda x:x[1]):
nlist.append((k,v))
vlist = []
for k,v in sorted(vdict.items(), key=lambda x:x[1]):
vlist.append((k,v))
n_vocabulary = len(vlist)
#from IPython import embed; embed(); sys.exit()
return n_vocabulary, vdict
def qlist_to_vec(self, max_length, q_list):
"""
Converts a list of words into a format suitable for the embedding layer.
Arguments:
max_length -- the maximum length of a question sequence
q_list -- a list of words which are the tokens in the question
Returns:
qvec -- A max_length length vector containing one-hot indices for each word
cvec -- A max_length length sequence continuation indicator vector
glove_matrix -- A max_length x GLOVE_EMBEDDING_SIZE matrix containing the glove embedding for
each word
"""
qvec = np.zeros(max_length)
cvec = np.zeros(max_length)
glove_matrix = np.zeros(max_length * GLOVE_EMBEDDING_SIZE).reshape(max_length, GLOVE_EMBEDDING_SIZE)
for i in xrange(max_length):
if i < max_length - len(q_list):
cvec[i] = 0
else:
w = q_list[i-(max_length-len(q_list))]
if w not in self.glove_dict:
self.glove_dict[w] = self.nlp(u'%s' % w).vector
glove_matrix[i] = self.glove_dict[w]
# is the word in the vocabulary?
if self.vdict.has_key(w) is False:
w = ''
qvec[i] = self.vdict[w]
cvec[i] = 0 if i == max_length - len(q_list) else 1
return qvec, cvec, glove_matrix
def answer_to_vec(self, ans_str):
""" Return answer id if the answer is included in vocaburary otherwise '' """
if self.mode =='test-dev' or self.mode == 'test':
return -1
if self.adict.has_key(ans_str):
ans = self.adict[ans_str]
else:
ans = self.adict['']
return ans
def vec_to_answer(self, ans_symbol):
""" Return answer id if the answer is included in vocaburary otherwise '' """
if self.rev_adict is None:
rev_adict = {}
for k,v in self.adict.items():
rev_adict[v] = k
self.rev_adict = rev_adict
return self.rev_adict[ans_symbol]
def create_batch(self,qid_list):
qvec = (np.zeros(self.batchsize*self.max_length)).reshape(self.batchsize,self.max_length)
cvec = (np.zeros(self.batchsize*self.max_length)).reshape(self.batchsize,self.max_length)
ivec = (np.zeros(self.batchsize*reduce(mul, self.data_shape))).reshape(self.batchsize,*self.data_shape)
avec = (np.zeros(self.batchsize)).reshape(self.batchsize)
glove_matrix = np.zeros(self.batchsize * self.max_length * GLOVE_EMBEDDING_SIZE).reshape(\
self.batchsize, self.max_length, GLOVE_EMBEDDING_SIZE)
for i,qid in enumerate(qid_list):
# load raw question information
q_str = self.getQuesStr(qid)
q_ans = self.getAnsObj(qid)
q_iid = self.getImgId(qid)
# convert question to vec
q_list = self.seq_to_list(q_str)
t_qvec, t_cvec, t_glove_matrix = self.qlist_to_vec(self.max_length, q_list)
# convert answer to vec
try:
if type(qid) == int:
t_ivec = np.load(self.img_file_pre + str(q_iid).zfill(12) + '.jpg.npz')['x']
t_ivec = ( t_ivec / np.sqrt((t_ivec**2).sum()) )
elif qid[0] == 't':
t_ivec = np.load(self.img_file_pre_t + str(q_iid).zfill(12) + '.jpg.npz')['x']
t_ivec = ( t_ivec / np.sqrt((t_ivec**2).sum()) )
elif qid[0] =='v':
t_ivec = np.load(self.img_file_pre_v + str(q_iid).zfill(12) + '.jpg.npz')['x']
t_ivec = ( t_ivec / np.sqrt((t_ivec**2).sum()) )
elif qid[0] == 'g':
t_ivec = np.load(self.img_file_pre_g + str(q_iid) + '.jpg.npz')['x']
t_ivec = ( t_ivec / np.sqrt((t_ivec**2).sum()) )
else:
raise Exception('Error occured here')
t_ivec = np.load(self.img_file_pre + str(q_iid).zfill(12) + '.jpg.npz')['x']
t_ivec = ( t_ivec / np.sqrt((t_ivec**2).sum()) )
if SPATIAL_COORD:
t_ivec = np.concatenate([t_ivec, self.coords.copy()])
except:
t_ivec = 0.
print 'data not found for qid : ', q_iid, self.mode
# convert answer to vec
if self.mode == 'val' or self.mode == 'test-dev' or self.mode == 'test':
q_ans_str = self.extract_answer(q_ans)
else:
q_ans_str = self.extract_answer_prob(q_ans)
t_avec = self.answer_to_vec(q_ans_str)
qvec[i,...] = t_qvec
cvec[i,...] = t_cvec
ivec[i,...] = t_ivec
avec[i,...] = t_avec
glove_matrix[i,...] = t_glove_matrix
return qvec, cvec, ivec, avec, glove_matrix
def get_batch_vec(self):
if self.batch_len is None:
#qid_list = self.vqa.getQuesIds()
self.n_skipped = 0
qid_list = self.getQuesIds()
# random.shuffle(qid_list)
self.qid_list = qid_list
self.batch_len = len(qid_list)
self.batch_index = 0
self.epoch_counter = 0
def has_at_least_one_valid_answer(t_qid):
#answer_obj = self.vqa.qa[t_qid]['answers']
answer_obj = self.getAnsObj(t_qid)
answer_list = [ans['answer'] for ans in answer_obj]
for ans in answer_list:
if self.adict.has_key(ans):
return True
counter = 0
t_qid_list = []
t_iid_list = []
while counter < self.batchsize:
# get qid
t_qid = self.qid_list[self.batch_index]
# get answer
#t_ans = self.extract_answer(self.vqa.qa[t_qid]['answers'])
# get image id
#t_ann = self.vqa.loadQA([t_qid])[0]
#t_iid = t_ann['image_id']
t_iid = self.getImgId(t_qid)
if self.mode == 'val' or self.mode == 'test-dev' or self.mode == 'test':
t_qid_list.append(t_qid)
t_iid_list.append(t_iid)
counter += 1
elif has_at_least_one_valid_answer(t_qid):
t_qid_list.append(t_qid)
t_iid_list.append(t_iid)
counter += 1
else:
self.n_skipped += 1
if self.batch_index < self.batch_len-1:
self.batch_index += 1
else:
self.epoch_counter += 1
#qid_list = self.vqa.getQuesIds()
qid_list = self.getQuesIds()
# random.shuffle(qid_list)
self.qid_list = qid_list
self.batch_index = 0
print("%d questions were skipped in a single epoch" % self.n_skipped)
self.n_skipped = 0
t_batch = self.create_batch(t_qid_list)
return t_batch + (t_qid_list, t_iid_list, self.epoch_counter)
class VQADataProviderLayer(caffe.Layer):
"""
Provide input data for VQA.
"""
def setup(self, bottom, top):
self.batchsize = json.loads(self.param_str)['batchsize']
names = ['data','cont','feature','label']
if GLOVE:
names.append('glove')
self.top_names = names
top[0].reshape(15,self.batchsize)
top[1].reshape(15,self.batchsize)
top[2].reshape(self.batchsize, *CURRENT_DATA_SHAPE)
top[3].reshape(self.batchsize)
if GLOVE:
top[4].reshape(15,self.batchsize,GLOVE_EMBEDDING_SIZE)
self.mode = json.loads(self.param_str)['mode']
if self.mode == 'val' or self.mode == 'test-dev' or self.mode == 'test':
pass
else:
raise NotImplementedError
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
if self.mode == 'val' or self.mode == 'test-dev' or self.mode == 'test':
pass
else:
raise NotImplementedError
def backward(self, top, propagate_down, bottom):
pass
| 35.73301
| 111
| 0.53831
| 1,964
| 14,722
| 3.837576
| 0.139511
| 0.028659
| 0.030251
| 0.026005
| 0.392331
| 0.352262
| 0.299721
| 0.274778
| 0.232453
| 0.205519
| 0
| 0.013631
| 0.342209
| 14,722
| 411
| 112
| 35.819951
| 0.764663
| 0.046054
| 0
| 0.265574
| 0
| 0.039344
| 0.039639
| 0.001974
| 0
| 0
| 0
| 0
| 0.003279
| 0
| null | null | 0.013115
| 0.029508
| null | null | 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
900fb0840db60165c43f29441d88fb00f6f55c09
| 718
|
py
|
Python
|
fastapi_crud/fastapicrud.py
|
miikapo/fastapi-crud
|
d8517d93068b0e71fb114a695a41f48570387b9a
|
[
"Apache-2.0"
] | 5
|
2021-11-02T20:13:41.000Z
|
2022-03-19T00:01:53.000Z
|
fastapi_crud/fastapicrud.py
|
miikapo/fastapi-crud
|
d8517d93068b0e71fb114a695a41f48570387b9a
|
[
"Apache-2.0"
] | null | null | null |
fastapi_crud/fastapicrud.py
|
miikapo/fastapi-crud
|
d8517d93068b0e71fb114a695a41f48570387b9a
|
[
"Apache-2.0"
] | null | null | null |
from fastapi import Depends
from sqlalchemy.ext.asyncio import AsyncEngine
from sqlalchemy.orm import sessionmaker
from typing import AsyncGenerator
from fastapi_crud.session import Session
from fastapi_crud.router import ModelRouter
from fastapi_crud.types import Model
class FastapiCRUD:
def __init__(self, engine: AsyncEngine) -> None:
self._session_maker = sessionmaker(engine, class_=Session)
self.session: Session = Depends(self._session)
async def _session(self) -> AsyncGenerator[Session, None]:
async with self._session_maker() as session:
yield session
def create_router(self, model: Model) -> ModelRouter:
return ModelRouter(model, self.session)
| 32.636364
| 66
| 0.754875
| 86
| 718
| 6.127907
| 0.372093
| 0.104364
| 0.085389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17688
| 718
| 21
| 67
| 34.190476
| 0.891709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.4375
| 0.0625
| 0.6875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
9012c72f255c969953043607b7f84aeb3ccb4764
| 9,807
|
py
|
Python
|
octopus_deploy_swagger_client/models/artifact_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/models/artifact_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/models/artifact_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ArtifactResource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'space_id': 'str',
'filename': 'str',
'source': 'str',
'server_task_id': 'str',
'created': 'datetime',
'log_correlation_id': 'str',
'last_modified_on': 'datetime',
'last_modified_by': 'str',
'links': 'dict(str, str)'
}
attribute_map = {
'id': 'Id',
'space_id': 'SpaceId',
'filename': 'Filename',
'source': 'Source',
'server_task_id': 'ServerTaskId',
'created': 'Created',
'log_correlation_id': 'LogCorrelationId',
'last_modified_on': 'LastModifiedOn',
'last_modified_by': 'LastModifiedBy',
'links': 'Links'
}
def __init__(self, id=None, space_id=None, filename=None, source=None, server_task_id=None, created=None, log_correlation_id=None, last_modified_on=None, last_modified_by=None, links=None): # noqa: E501
"""ArtifactResource - a model defined in Swagger""" # noqa: E501
self._id = None
self._space_id = None
self._filename = None
self._source = None
self._server_task_id = None
self._created = None
self._log_correlation_id = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if space_id is not None:
self.space_id = space_id
self.filename = filename
if source is not None:
self.source = source
if server_task_id is not None:
self.server_task_id = server_task_id
if created is not None:
self.created = created
if log_correlation_id is not None:
self.log_correlation_id = log_correlation_id
if last_modified_on is not None:
self.last_modified_on = last_modified_on
if last_modified_by is not None:
self.last_modified_by = last_modified_by
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this ArtifactResource. # noqa: E501
:return: The id of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ArtifactResource.
:param id: The id of this ArtifactResource. # noqa: E501
:type: str
"""
self._id = id
@property
def space_id(self):
"""Gets the space_id of this ArtifactResource. # noqa: E501
:return: The space_id of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._space_id
@space_id.setter
def space_id(self, space_id):
"""Sets the space_id of this ArtifactResource.
:param space_id: The space_id of this ArtifactResource. # noqa: E501
:type: str
"""
self._space_id = space_id
@property
def filename(self):
"""Gets the filename of this ArtifactResource. # noqa: E501
:return: The filename of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""Sets the filename of this ArtifactResource.
:param filename: The filename of this ArtifactResource. # noqa: E501
:type: str
"""
if filename is None:
raise ValueError("Invalid value for `filename`, must not be `None`") # noqa: E501
self._filename = filename
@property
def source(self):
"""Gets the source of this ArtifactResource. # noqa: E501
:return: The source of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this ArtifactResource.
:param source: The source of this ArtifactResource. # noqa: E501
:type: str
"""
self._source = source
@property
def server_task_id(self):
"""Gets the server_task_id of this ArtifactResource. # noqa: E501
:return: The server_task_id of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._server_task_id
@server_task_id.setter
def server_task_id(self, server_task_id):
"""Sets the server_task_id of this ArtifactResource.
:param server_task_id: The server_task_id of this ArtifactResource. # noqa: E501
:type: str
"""
self._server_task_id = server_task_id
@property
def created(self):
"""Gets the created of this ArtifactResource. # noqa: E501
:return: The created of this ArtifactResource. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this ArtifactResource.
:param created: The created of this ArtifactResource. # noqa: E501
:type: datetime
"""
self._created = created
@property
def log_correlation_id(self):
"""Gets the log_correlation_id of this ArtifactResource. # noqa: E501
:return: The log_correlation_id of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._log_correlation_id
@log_correlation_id.setter
def log_correlation_id(self, log_correlation_id):
"""Sets the log_correlation_id of this ArtifactResource.
:param log_correlation_id: The log_correlation_id of this ArtifactResource. # noqa: E501
:type: str
"""
self._log_correlation_id = log_correlation_id
@property
def last_modified_on(self):
"""Gets the last_modified_on of this ArtifactResource. # noqa: E501
:return: The last_modified_on of this ArtifactResource. # noqa: E501
:rtype: datetime
"""
return self._last_modified_on
@last_modified_on.setter
def last_modified_on(self, last_modified_on):
"""Sets the last_modified_on of this ArtifactResource.
:param last_modified_on: The last_modified_on of this ArtifactResource. # noqa: E501
:type: datetime
"""
self._last_modified_on = last_modified_on
@property
def last_modified_by(self):
"""Gets the last_modified_by of this ArtifactResource. # noqa: E501
:return: The last_modified_by of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._last_modified_by
@last_modified_by.setter
def last_modified_by(self, last_modified_by):
"""Sets the last_modified_by of this ArtifactResource.
:param last_modified_by: The last_modified_by of this ArtifactResource. # noqa: E501
:type: str
"""
self._last_modified_by = last_modified_by
@property
def links(self):
"""Gets the links of this ArtifactResource. # noqa: E501
:return: The links of this ArtifactResource. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ArtifactResource.
:param links: The links of this ArtifactResource. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArtifactResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArtifactResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.940171
| 207
| 0.597634
| 1,160
| 9,807
| 4.844828
| 0.118966
| 0.042705
| 0.156584
| 0.13879
| 0.525979
| 0.431495
| 0.414947
| 0.246797
| 0.193238
| 0.04911
| 0
| 0.021758
| 0.311104
| 9,807
| 350
| 208
| 28.02
| 0.810095
| 0.352401
| 0
| 0.077922
| 1
| 0
| 0.07597
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168831
| false
| 0
| 0.019481
| 0
| 0.311688
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
901370bb81cfb113acd130dc5b682b50dbdb76b2
| 1,133
|
py
|
Python
|
NLP/DuSQL-Baseline/text2sql/models/grammar/__init__.py
|
pkulzb/Research
|
88da4910a356f1e95e1e1e05316500055533683d
|
[
"Apache-2.0"
] | 53
|
2020-03-31T16:20:53.000Z
|
2021-11-16T11:48:38.000Z
|
NLP/DuSQL-Baseline/text2sql/models/grammar/__init__.py
|
pkulzb/Research
|
88da4910a356f1e95e1e1e05316500055533683d
|
[
"Apache-2.0"
] | 1
|
2020-04-06T08:10:12.000Z
|
2020-04-06T08:10:12.000Z
|
NLP/DuSQL-Baseline/text2sql/models/grammar/__init__.py
|
pkulzb/Research
|
88da4910a356f1e95e1e1e05316500055533683d
|
[
"Apache-2.0"
] | 53
|
2020-04-01T01:59:08.000Z
|
2022-03-14T07:59:58.000Z
|
# -*- coding:utf8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""grammar model"""
from collections import namedtuple
DecoderInputsWrapper = namedtuple("DecoderInputsWrapper", "input action gmr_mask")
DecoderDynamicVocab = namedtuple("DecoderDynamicVocab",
"table table_len column column_len value value_len column2table_mask")
from text2sql.models.grammar.nets import grammar_output
from text2sql.models.grammar.infer_decoder import GrammarInferDecoder
from text2sql.models.grammar.dynamic_decode import decode_with_grammar
| 41.962963
| 103
| 0.770521
| 149
| 1,133
| 5.791946
| 0.637584
| 0.069525
| 0.062572
| 0.086906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013641
| 0.15887
| 1,133
| 26
| 104
| 43.576923
| 0.89192
| 0.54722
| 0
| 0
| 0
| 0
| 0.257606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
901749f4d47b3e2fe3385b589ae5e10eae1bbab0
| 13,064
|
py
|
Python
|
mcd2c/cfile/__init__.py
|
Asdew95/mcd2c
|
bcec95276a45b4bfd90ece5569246bd5a247368b
|
[
"Zlib"
] | 3
|
2020-08-18T19:11:39.000Z
|
2021-06-13T12:16:41.000Z
|
mcd2c/cfile/__init__.py
|
Asdew95/mcd2c
|
bcec95276a45b4bfd90ece5569246bd5a247368b
|
[
"Zlib"
] | null | null | null |
mcd2c/cfile/__init__.py
|
Asdew95/mcd2c
|
bcec95276a45b4bfd90ece5569246bd5a247368b
|
[
"Zlib"
] | 2
|
2021-06-13T12:16:47.000Z
|
2021-12-07T18:52:43.000Z
|
# Inspired by https://github.com/cogu/cfile
c_indent_char = ' '
def set_indent_char(char):
global c_indent_char
c_indent_char = char
class blank:
def __init__(self, num=1):
self.indent = 0 #Irrelevant, kept because it simplifies sequences
self.num = num
def __str__(self):
# Sequences automatically insert one line break for each element, so
# we substract one line break to account for that
return (self.num - 1) * '\n'
# line and its subclasses can be used as container classes for sequences, which
# can span multiple lines. When used on its own though it's a single line
class line:
def __init__(self, elem, indent=0):
self.elem = elem
self._indent = 0
self.indent = indent
@property
def indent(self):
return self._indent
@indent.setter
def indent(self, val):
if hasattr(self.elem, 'indent'):
self.elem.indent = val
self._indent = val
def __str__(self):
return f'{c_indent_char * self.indent}{self.elem}'
class statement(line):
def __str__(self):
return super().__str__() + ';'
class returnval(line):
def __str__(self):
return f'{c_indent_char * self.indent}return {self.elem};'
class typedef(line):
def __init__(self, elem, name, indent=0):
super().__init__(elem, indent)
self.name = name
def __str__(self):
return f'{c_indent_char * self.indent}typedef {self.elem} {self.name};'
class linecomment(line):
def __str__(self):
return f'{c_indent_char * self.indent}// {self.elem}'
class include(line):
def __init__(self, path, sys=False, indent=0):
super().__init__(
f'#include <{path}>' if sys else f'#include "{path}"', indent
)
class preprocessor(line):
directive = ''
def __init__(self, val, indent=0):
super().__init__(f'#{self.directive} {val}', indent)
class define(preprocessor):
directive = 'define'
class indef(preprocessor):
directive = 'ifndef'
class endif(line):
def __init__(self, indent=0):
super().__init__('#endif', indent)
from collections.abc import MutableSequence
# Group of elements at the same indentation level
class sequence(MutableSequence):
def __init__(self, elems=None, indent=0):
self.elems = [] if elems is None else elems
self._indent = indent
self.indent = indent
def __getitem__(self, key):
return self.elems[key]
def __setitem__(self, key, item):
if(isinstance(item, str)):
item = line(item)
item.indent = self.indent
self.elems[key] = item
def __delitem__(self, key):
del self.elems[key]
def __len__(self):
return len(self.elems)
def insert(self, key, item):
if(isinstance(item, str)):
item = line(item)
item.indent = self.indent
self.elems.insert(key, item)
@property
def indent(self):
return self._indent
@indent.setter
def indent(self, val):
for elem in self.elems:
elem.indent = val
self._indent = val
def __str__(self):
return '\n'.join([str(elem) for elem in self.elems])
#Like sequence, but joins on space instead of newline
class linesequence(sequence):
def __setitem__(self, key, item):
if(isinstance(item, str)):
item = line(item)
item.indent = self.indent if(isinstance(item, sequence)) else 0
self.elems[key] = item
def insert(self, key, item):
if(isinstance(item, str)):
item = line(item)
item.indent = self.indent if(isinstance(item, sequence)) else 0
self.elems.insert(key, item)
@property
def indent(self):
return self._indent
@indent.setter
def indent(self, val):
for elem in self.elems:
elem.indent = val if(isinstance(elem, sequence)) else 0
self._indent = val
def __str__(self):
i = c_indent_char * self.indent
return i + ' '.join([str(elem) for elem in self.elems])
# Common for block comments and block scope items
class _block(sequence):
def __init__(self, elems=None, inner_indent=1, indent=1):
self._inner_indent = inner_indent
super().__init__(elems, indent)
@property
def inner_indent(self):
return self._inner_indent
@inner_indent.setter
def inner_indent(self, val):
for elem in self.elems:
elem.indent = self._indent + val
self._inner_indent = val
@property
def indent(self):
return self._indent
@indent.setter
def indent(self, val):
for elem in self.elems:
elem.indent = val + self._inner_indent
self._indent = val
# Curly bracket {} grouped elements, optionally at different indentation level
# Does not indent first line, that's expected to be done by a wrapping class
# such as line, statement, or typedef
class block(_block):
def __str__(self):
return f'{{\n{super().__str__()}\n{self.indent * c_indent_char}}}'
# Similar to block but with block comment /* */ delimiters instead of {}
# Doesn't need to be wrapped in anything to get indentation correct
class blockcomment(_block):
def __str__(self):
i = self.indent * c_indent_char
return f'{i}/*\n{super().__str__()}\n{i}*/'
class blocktype(block):
keyword = ''
def __init__(self, name=None, elems=None, inner_indent=1, indent=0):
super().__init__(indent=indent, inner_indent=inner_indent, elems=elems)
self.name = name
def __str__(self):
if self.name:
return f'{self.keyword} {self.name} {super().__str__()}'
return f'{self.keyword} {super().__str__()}'
class struct(blocktype):
keyword = 'struct'
class union(blocktype):
keyword = 'union'
class enum(blocktype):
keyword = 'enum'
def __str__(self):
inner = ',\n'.join([str(elem) for elem in self.elems])
i = self.indent * c_indent_char
if self.name:
return f'{self.keyword} {self.name} {{\n{inner}\n{i}}}'
return f'{self.keyword} {{\n{inner}\n{i}}}'
class commablock(blocktype):
def __str__(self):
for elem in self.elems:
elem.indent = self.indent + self._inner_indent
inner = ',\n'.join([str(elem) for elem in self.elems])
return f'{{\n{inner}\n{self.indent * c_indent_char}}}'
class conditional(block):
keyword = ''
def __init__(self, condition, elems=None, inner_indent=1, indent=0):
super().__init__(indent=indent, inner_indent=inner_indent, elems=elems)
self.condition = condition
def __str__(self):
i = self.indent * c_indent_char
return f'{i}{self.keyword}({self.condition}) {super().__str__()}'
class _unspacedconditional(block):
keyword = ''
def __init__(self, condition, elems=None, inner_indent=1, indent=0):
super().__init__(indent=indent, inner_indent=inner_indent, elems=elems)
self.condition = condition
def __str__(self):
return f'{self.keyword}({self.condition}) {super().__str__()}'
class ifcond(conditional):
keyword = 'if'
class nospace_ifcond(_unspacedconditional):
keyword = 'if'
class elifcond(_unspacedconditional):
keyword = 'else if'
class elsecond(block):
keyword = 'else'
def __str__(self):
return f'{self.keyword} {super().__str__()}'
class switch(conditional):
keyword = 'switch'
def __str__(self):
s = ''
for elem in self.elems[:-1]:
s += str(elem) if elem.fall and not len(elem) else f'{elem}\n'
s += str(self.elems[-1])
i = self.indent * c_indent_char
return f'{i}{self.keyword}({self.condition}) {{\n{s}\n{i}}}'
class case(_block):
def __init__(self, val, elems=None, fall=False, inner_indent=1, indent=0):
super().__init__(elems, inner_indent, indent)
self.val = val
self.fall = fall
def __str__(self):
o = self.indent * c_indent_char
i = (self.indent + self.inner_indent) * c_indent_char
if self.fall:
return f'{o}case {self.val}:\n{super().__str__()}'
return f'{o}case {self.val}:\n{super().__str__()}\n{i}break;'
class defaultcase(_block):
def __init__(self, elems=None, fall=True, inner_indent=1, indent=0):
super().__init__(elems, inner_indent, indent)
self.fall = fall
def __str__(self):
o = self.indent * c_indent_char
i = (self.indent + self.inner_indent) * c_indent_char
if self.fall:
return f'{o}default:\n{super().__str__()}'
return f'{o}default:\n{super().__str__()}\n{i}break;'
class inlineif(statement):
keyword = 'if'
def __init__(self, condition, elem, indent=0):
super().__init__(elem, indent)
self.condition = condition
def __str__(self):
i = c_indent_char * self.indent
return i + f'{self.keyword}({self.condition}) {self.elem}'
@property
def indent(self):
return self._indent
@indent.setter
def indent(self, val):
self._indent = val
class forloop(block):
keyword = 'for'
def __init__(self, vars=None, cond=None, post=None, elems=None,
inner_indent=1, indent=0):
super().__init__(elems, inner_indent, indent)
self.vars = '' if vars is None else vars
self.cond = '' if cond is None else cond
self.post = '' if post is None else post
def __str__(self):
l1 = f'{self.vars}; {self.cond}' if self.cond else self.vars + ';'
l2 = f'{l1}; {self.post}' if self.post else l1 + ';'
i = self.indent * c_indent_char
return f'{i}{self.keyword}({l2}) {super().__str__()}'
class variable:
def __init__(self, name, typename=None, array=0):
self.name = name
self.typename = typename
self.array = array
@property
def decl(self):
return variabledecl(self.name, self.typename, self.array)
def __str__(self):
return str(self.name)
class variabledecl(variable):
def __str__(self):
if self.array:
return f'{self.typename} {self.name}[{self.array}]'
return f'{self.typename} {self.name}'
class monop:
op = ''
def __init__(self, val, preop = True):
self.val = val
self.preop = preop
def __str__(self):
if self.preop:
return f'{self.op}{self.val}'
return f'{self.op}{self.val}'
class defop(monop):
op = '*'
class refop(monop):
op = '&'
class incop(monop):
op = '++'
class decop(monop):
op = '--'
class operator:
op = ''
def __init__(self, lvalue, rvalue):
self.lvalue = lvalue
self.rvalue = rvalue
def __str__(self):
return f'{self.lvalue} {self.op} {self.rvalue}'
class assign(operator):
op = '='
class addop(operator):
op = '+'
class subop(operator):
op = '-'
class mulop(operator):
op = '*'
class addeq(operator):
op = '+='
class subeq(operator):
op = '-='
class noteq(operator):
op = '!='
class eqeq(operator):
op = '=='
class lth(operator):
op = '<'
class ltheq(operator):
op = '<='
class gth(operator):
op = '>'
class gtheq(operator):
op = '>='
class wrap:
def __init__(self, val, invert=False):
self.val = val
self.invert = invert
def __str__(self):
if self.invert:
return f'!({self.val})'
return f'({self.val})'
class fcall(MutableSequence):
def __init__(self, name, typename, args=None):
self.name = name
self.typename = typename
self.args = [] if args is None else list(args)
def __getitem__(self, key):
return self.args[key]
def __setitem__(self, key, item):
self.args[key] = item
def __delitem__(self, key):
del self.args[key]
def __len__(self):
return len(self.args)
def insert(self, key, item):
self.args.insert(key, item)
@property
def decl(self):
return fdecl(name, typename, [a.decl for a in self.args])
def __str__(self):
a = ', '.join([str(arg) for arg in self.args])
return f'{self.name}({a})'
class fdecl(fcall):
def __str__(self):
a = ', '.join([str(arg) for arg in self.args])
return f'{self.typename} {self.name}({a})'
class _file(sequence):
def __init__(self, path, elems=None):
self.path = path
super().__init__(elems)
class cfile(_file):
pass
import os
class hfile(_file):
def __init__(self, path, elems=None, guard=None):
super().__init__(path, elems)
if guard is None:
bn = os.path.basename(path)
self.guard = f'{os.path.splitext(bn)[0].upper()}_H'
else:
self.guard = guard
def __str__(self):
t = sequence([indef(self.guard), define(self.guard), blank(2)])
t.extend(self)
t.append(endif())
t.append(blank())
return str(t)
| 26.770492
| 79
| 0.606782
| 1,714
| 13,064
| 4.363477
| 0.132439
| 0.052146
| 0.038775
| 0.023533
| 0.526541
| 0.453403
| 0.407407
| 0.354593
| 0.332799
| 0.306324
| 0
| 0.003921
| 0.25819
| 13,064
| 487
| 80
| 26.825462
| 0.767826
| 0.063304
| 0
| 0.429752
| 0
| 0.002755
| 0.113975
| 0.039683
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214876
| false
| 0.002755
| 0.00551
| 0.066116
| 0.606061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
90179942b479c02a62c0f83dc133fc47dd16d363
| 906
|
py
|
Python
|
typed_python/compiler/tests/subclass_of_test.py
|
APrioriInvestments/nativepython
|
94e6b09d788e49cbe34b9b0d3c948218d7a8dcc5
|
[
"Apache-2.0"
] | 52
|
2019-04-12T18:07:56.000Z
|
2019-10-07T10:26:19.000Z
|
typed_python/compiler/tests/subclass_of_test.py
|
APrioriInvestments/nativepython
|
94e6b09d788e49cbe34b9b0d3c948218d7a8dcc5
|
[
"Apache-2.0"
] | 135
|
2019-04-15T12:52:56.000Z
|
2019-10-08T18:39:58.000Z
|
typed_python/compiler/tests/subclass_of_test.py
|
APrioriInvestments/nativepython
|
94e6b09d788e49cbe34b9b0d3c948218d7a8dcc5
|
[
"Apache-2.0"
] | 1
|
2019-04-12T13:03:38.000Z
|
2019-04-12T13:03:38.000Z
|
from typed_python import Entrypoint, SubclassOf, Class, Final, Function, ListOf
class A(Class):
pass
class B(A):
pass
class C(B, Final):
pass
def test_can_cast_subclass_of_correctly():
@Function
def f(c: SubclassOf(C)):
return "C"
@f.overload
def f(c: SubclassOf(B)):
return "B"
@f.overload
def f(c: SubclassOf(A)):
return "A"
def checkIt():
assert f(C) == "C", f(C)
assert f(B) == "B", f(B)
assert f(A) == "A", f(A)
checkIt()
Entrypoint(checkIt)()
@Entrypoint
def checkItList(x):
res = ListOf(str)()
for cls in x:
res.append(f(cls))
return res
assert checkItList(ListOf(SubclassOf(A))([A, B, C])) == ["A", "B", "C"]
assert checkItList(ListOf(SubclassOf(B))([B, C])) == ["B", "C"]
assert checkItList(ListOf(SubclassOf(C))([C])) == ["C"]
| 18.12
| 79
| 0.540839
| 123
| 906
| 3.934959
| 0.276423
| 0.020661
| 0.030992
| 0.092975
| 0.243802
| 0.243802
| 0
| 0
| 0
| 0
| 0
| 0
| 0.284768
| 906
| 49
| 80
| 18.489796
| 0.746914
| 0
| 0
| 0.15625
| 0
| 0
| 0.013245
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.1875
| false
| 0.09375
| 0.03125
| 0.09375
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
9017a7257730a81fd99b9ead002708bc4ceba13e
| 1,442
|
py
|
Python
|
experiments/toy.py
|
jcrickmer/pyvision
|
5aea7cd9a85d7d26196c375275e7bf00c27a8ac8
|
[
"MIT"
] | 53
|
2015-03-10T06:20:50.000Z
|
2021-06-07T07:34:02.000Z
|
experiments/toy.py
|
jcrickmer/pyvision
|
5aea7cd9a85d7d26196c375275e7bf00c27a8ac8
|
[
"MIT"
] | 1
|
2016-11-20T14:28:38.000Z
|
2016-11-20T14:28:38.000Z
|
experiments/toy.py
|
jcrickmer/pyvision
|
5aea7cd9a85d7d26196c375275e7bf00c27a8ac8
|
[
"MIT"
] | 56
|
2015-02-10T20:49:42.000Z
|
2021-04-03T05:41:09.000Z
|
from vision import *
from vision.track import alearn, interpolation
from vision import visualize
from vision.toymaker import *
import os
import multiprocessing
g = Geppetto()
b = Rectangle()
b = b.linear((300,300), 100)
b = b.linear((0,300), 200)
b = b.linear((300,0), 300)
g.add(b)
path = b.groundtruth()
pathdict = dict((x.frame, x) for x in path)
start = 0
stop = 299
given = [pathdict[start], pathdict[stop]]
id = "toy"
pool = multiprocessing.Pool(24)
root = os.path.dirname(os.path.abspath(__file__))
for _ in range(1):
print "Given frames are:", ", ".join(str(x.frame) for x in given)
print "Simulating with {0} clicks".format(len(given))
askingfor = alearn.pick(g, given, pool = pool, skip = 1,
bgskip = 10, bgsize = 5e3, plot = "tmp/",
errortube = 100000)
print "Requested frame {0}".format(askingfor)
print "Visualizing path with {0} clicks".format(len(given))
vit = visualize.highlight_path(g, interpolation.LinearFill(given))
base = "{0}/visualize/{1}/clicks{2}/wants{3}".format(root, id,
len(given),
askingfor)
try:
os.makedirs(base)
except:
pass
visualize.save(vit, lambda x: "{0}/{1}.jpg".format(base, x))
given.append(pathdict[askingfor])
given.sort(key = lambda x: x.frame)
| 30.041667
| 71
| 0.585298
| 187
| 1,442
| 4.481283
| 0.438503
| 0.047733
| 0.02864
| 0.026253
| 0.059666
| 0.059666
| 0
| 0
| 0
| 0
| 0
| 0.047893
| 0.276006
| 1,442
| 47
| 72
| 30.680851
| 0.754789
| 0
| 0
| 0
| 0
| 0
| 0.104022
| 0.024965
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.025641
| 0.153846
| null | null | 0.102564
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
901d3f9a5542e6bed0daf35f7574ccf1740c36b8
| 1,046
|
py
|
Python
|
game-watch-api/games/admin.py
|
fouadsan/game_watch
|
ca38d283ef8f55499ea520eb52a78ebfac8a77a4
|
[
"MIT"
] | null | null | null |
game-watch-api/games/admin.py
|
fouadsan/game_watch
|
ca38d283ef8f55499ea520eb52a78ebfac8a77a4
|
[
"MIT"
] | null | null | null |
game-watch-api/games/admin.py
|
fouadsan/game_watch
|
ca38d283ef8f55499ea520eb52a78ebfac8a77a4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from admin_interface.models import Theme as Th
from .models import Genre, Platform, Screenshot, Artwork, Mode, PlayerPerspective, Engine, Theme, Game
admin.site.unregister(Th)
admin.site.register(Genre)
admin.site.register(Platform)
admin.site.register(Mode)
admin.site.register(PlayerPerspective)
admin.site.register(Engine)
admin.site.register(Theme)
admin.site.register(Screenshot)
admin.site.register(Artwork)
class GameAdmin(admin.ModelAdmin):
fields = ('name', 'genre', 'poster',
'platforms', 'release_date', 'is_popular', 'description', 'rating', 'developer', 'publisher', 'game_modes', 'game_engines', 'player_perspective', 'themes', 'storyline', 'screenshots', 'artworks')
list_display = ('name', 'id', 'genre', 'poster_tag',
'release_date', 'is_released', 'get_users')
def get_users(self, obj):
return "\n".join([f'#{u.id}' for u in obj.users.all()])
get_users.short_description = 'users favorite'
admin.site.register(Game, GameAdmin)
| 32.6875
| 209
| 0.711281
| 129
| 1,046
| 5.658915
| 0.496124
| 0.123288
| 0.209589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137667
| 1,046
| 31
| 210
| 33.741935
| 0.809313
| 0
| 0
| 0
| 0
| 0
| 0.220841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.142857
| 0.047619
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
9021ca74c4cfd208803fde68aec8f4729d95dd36
| 1,156
|
py
|
Python
|
setup.py
|
loic-simon/asyncode
|
5f9873acf93f1a3ae6d4ca0b3dfc55acc7598969
|
[
"MIT"
] | 1
|
2021-12-22T16:09:52.000Z
|
2021-12-22T16:09:52.000Z
|
setup.py
|
loic-simon/asyncode
|
5f9873acf93f1a3ae6d4ca0b3dfc55acc7598969
|
[
"MIT"
] | null | null | null |
setup.py
|
loic-simon/asyncode
|
5f9873acf93f1a3ae6d4ca0b3dfc55acc7598969
|
[
"MIT"
] | null | null | null |
import setuptools
version = "1.0.0"
with open("README.md", "r", encoding="utf-8") as fh:
readme = fh.read()
setuptools.setup(
name="asyncode",
version=version,
author="Loïc Simon",
author_email="loic.simon@espci.org",
description="Emulating Python's interactive interpreter in asynchronous contexts",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/loic-simon/asyncode",
py_modules=["asyncode"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: AsyncIO",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Interpreters",
],
install_requires=[],
python_requires='>=3.5',
)
# python3 setup.py sdist bdist_wheel
# twine upload dist/*
| 31.243243
| 86
| 0.634948
| 126
| 1,156
| 5.753968
| 0.65873
| 0.131034
| 0.172414
| 0.17931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019956
| 0.219723
| 1,156
| 36
| 87
| 32.111111
| 0.783814
| 0.046713
| 0
| 0
| 0
| 0
| 0.532302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.033333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
9021dafddba10ec136860876afbe1b58c1dcc7f4
| 1,926
|
py
|
Python
|
test_image.py
|
GSM-Festival-2021/Prettier-photo-plus-Server
|
98c30d5c8379491d12a4cfd4ed880fd800c40a7c
|
[
"MIT"
] | null | null | null |
test_image.py
|
GSM-Festival-2021/Prettier-photo-plus-Server
|
98c30d5c8379491d12a4cfd4ed880fd800c40a7c
|
[
"MIT"
] | null | null | null |
test_image.py
|
GSM-Festival-2021/Prettier-photo-plus-Server
|
98c30d5c8379491d12a4cfd4ed880fd800c40a7c
|
[
"MIT"
] | null | null | null |
function solution(x1, y1, x2, y2) {
const x1 = 8;
const y1 = 4;
const x2 = 8;
const y2 = 10;
let soundSum = 0;
// 두 스피커 사이가 가까워 음량이 5를 넘는 경우
if (Math.abs(x1 - x2) + Math.abs(y1 - y2) < 4) return -1;
if (3 < x1 && x1 < 13 && 3 < x2 && x2 < 13 && 3 < y1 && y1 < 13 && 3 < y2 && y2 < 13) {
// 벽에 닿지 않는다면 한 스피커당 80 음량을 차지한다.
soundSum += checkWall(x1, y1)
soundSum += checkWall(x2, y2)
soundSum += 160;
return soundSum;
} else {function solution() {
const x1 = 8;
const y1 = 4;
const x2 = 8;
const y2 = 10;
let room = Array.from(Array(15), () => new Array(15).fill(0));
let roomSize = 15;
let xLocation = 0;
let yLocation = 0;
let soundSum = 0;
soundCounting(x1, x2);
console.log(room);
if (Math.abs(x1 - x2) + Math.abs(y1 - y2) < 4) return -1;
if (3 < x1 < 13 && 3 < x2 < 13 && 3 < y1 < 13 && 3 < y2 < 13) {
soundCounting(x1, x2);
console.log(room);
} else {
return -1;
}
function wallSoundCounting() {
// 아 몰루
}
function soundCounting(x, y) {
// 만약에 벽에 닿는다면
// 스피커 영역이 겹친다면
//겹치는 영역이 하나도 없다면
soundSum += 80;
for (let i = 0; i < 9; i++) {
for (let j = 0; j < 9; j++) {
room[x - 4 + i][y - 4 + j]++;
}
}
}
}
// 벽에 닿아 음량이 5를 넘는 경우
return -1;
}
function checkWall(x, y) {
let cnt = 0;
// 만약에 벽에 소리가 닿는다면
if (6 > x || x > 10 ) {
cnt += wallSoundCounting(x);
}
if (6 > y || y > 10) {
cnt += wallSoundCounting(y);
}
return cnt;
}
function wallSoundCounting(wallLocation) {
let cnt = 0;
switch (wallLocation) {
case 4:
case 12:
cnt += 16;
break;
case 5:
case 11:
cnt += 9;
break;
}
return cnt;
}
}
| 21.164835
| 91
| 0.450156
| 258
| 1,926
| 3.360465
| 0.306202
| 0.020761
| 0.018454
| 0.029988
| 0.239908
| 0.239908
| 0.168397
| 0.168397
| 0.168397
| 0.168397
| 0
| 0.107143
| 0.403946
| 1,926
| 91
| 92
| 21.164835
| 0.648084
| 0
| 0
| 0.311688
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
902524521dfdcde30b78e79a7e608392d647a998
| 6,127
|
py
|
Python
|
modules/msa/msa/contrib/uniqauth/views.py
|
haoyutan/MSA-Framework
|
7c5553b244347f26029729161e15e60b0cc805f5
|
[
"MIT"
] | 2
|
2016-11-22T11:44:52.000Z
|
2017-08-29T02:38:01.000Z
|
modules/msa/msa/contrib/uniqauth/views.py
|
haoyutan/MSA-Framework
|
7c5553b244347f26029729161e15e60b0cc805f5
|
[
"MIT"
] | null | null | null |
modules/msa/msa/contrib/uniqauth/views.py
|
haoyutan/MSA-Framework
|
7c5553b244347f26029729161e15e60b0cc805f5
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import authenticate
from msa.utils.ipware import get_ip
from msa.views import LoggedAPIView
from rest_framework import status
from rest_framework.authentication import TokenAuthentication, BasicAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.permissions import AllowAny, IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from .serializers import *
class Register(LoggedAPIView):
authentication_classes = ()
permission_classes = (AllowAny,)
serializer_class = RegisterSerializer
def post(self, request):
pp = self.serializer_class(data=request.data)
if pp.is_valid():
username = pp.validated_data['username']
password = pp.validated_data['password']
if Account.objects.count() <= 0:
user = User.objects.create_superuser(username=username, password=password, email=None)
else:
user = User.objects.create_user(username=username, password=password, email=None)
account = Account(user=user)
#account.save()
password_history = PasswordHistory(account=account, ip=get_ip(request), password=password)
password_history.save()
return Response(status=status.HTTP_201_CREATED)
else:
raise BadRequest(pp.errors)
class LogIn(LoggedAPIView):
authentication_classes = ()
permission_classes = (AllowAny,)
serializer_class = LogInSerializer
def get(self, request):
pp = self.serializer_class(data=request.GET)
if pp.is_valid():
username = pp.validated_data['username']
password = pp.validated_data['password']
user = authenticate(username=username, password=password)
if user is not None:
token, created = Token.objects.get_or_create(user=user)
'''
if not created:
token.created = timezone.now()
token.save()
'''
account = Account.objects.get(user=user)
access_log = AccessLog(account=account, ip=get_ip(request), token=token)
access_log.save()
return Response({'token': token.key})
else:
raise Unauthorized()
else:
raise BadRequest(pp.errors)
class Verify(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = None
def get(self, request):
return Response(status=status.HTTP_200_OK)
class Password(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = PasswordSerializer
def post(self, request):
pp = self.serializer_class(data=request.data)
if pp.is_valid():
username = pp.validated_data['username']
password_old = pp.validated_data['password_old']
password_new = pp.validated_data['password_new']
user = authenticate(username=username, password=password_old)
if user is not None:
user.set_password(password_new)
user.save()
account = Account.objects.get(user=user)
#account.update = timezone.now()
account.save()
password_history = PasswordHistory(account=account, ip=get_ip(request), password=password_new)
password_history.save()
user.auth_token.delete()
return Response(status=status.HTTP_202_ACCEPTED)
else:
raise Unauthorized()
else:
raise BadRequest(pp.errors)
class Detail(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = AccountSerializer
def get(self, request):
return Response(self.serializer_class(Account.objects.get(user=request.user)).data)
class Misc(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = MiscSerializer
def post(self, request):
pp = self.serializer_class(data=request.data)
if pp.is_valid():
account = Account.objects.get(user=request.user)
if account.misc:
misc = json.loads(account.misc)
else:
misc = dict()
misc[pp.validated_data['field']] = pp.validated_data['value']
account.misc = json.dumps(misc)
account.save()
return Response(status=status.HTTP_201_CREATED)
else:
raise BadRequest(pp.errors)
class AdminList(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAdminUser,)
serializer_class = AccountSerializer
def get(self, request):
return Response(self.serializer_class(Account.objects.all(), many=True).data)
class AdminReset(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAdminUser,)
serializer_class = AdminResetSerializer
def put(self, request):
pp = self.serializer_class(data=request.data)
if pp.is_valid():
username = pp.validated_data['username']
password = pp.validated_data['password']
user = User.objects.get(username=username)
user.set_password(password)
user.save()
account = Account.objects.get(user=user)
#account.update = timezone.now()
account.save()
password_history = PasswordHistory(account=account, ip=get_ip(request), password=password)
password_history.save()
user.auth_token.delete()
return Response(status=status.HTTP_202_ACCEPTED)
else:
raise BadRequest(pp.errors)
| 37.588957
| 110
| 0.647299
| 607
| 6,127
| 6.385502
| 0.172982
| 0.05805
| 0.04257
| 0.082043
| 0.714396
| 0.696078
| 0.623065
| 0.613777
| 0.564499
| 0.539474
| 0
| 0.003545
| 0.263261
| 6,127
| 162
| 111
| 37.820988
| 0.855117
| 0.012404
| 0
| 0.609375
| 0
| 0
| 0.016077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.148438
| 0.070313
| 0.023438
| 0.445313
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
902fb2aef8b4515b1af62bd380980dd14457df65
| 502
|
py
|
Python
|
apps/workspaces/migrations/0007_workspacegeneralsettings_auto_map_employees.py
|
fylein/fyle-xero-api
|
ba81af058dc413fc801d4cf7d1a8961bd42df469
|
[
"MIT"
] | null | null | null |
apps/workspaces/migrations/0007_workspacegeneralsettings_auto_map_employees.py
|
fylein/fyle-xero-api
|
ba81af058dc413fc801d4cf7d1a8961bd42df469
|
[
"MIT"
] | 6
|
2020-12-24T10:24:02.000Z
|
2021-11-30T05:04:53.000Z
|
apps/workspaces/migrations/0007_workspacegeneralsettings_auto_map_employees.py
|
fylein/fyle-xero-api
|
ba81af058dc413fc801d4cf7d1a8961bd42df469
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2021-02-19 18:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workspaces', '0006_workspacegeneralsettings_import_categories'),
]
operations = [
migrations.AddField(
model_name='workspacegeneralsettings',
name='auto_map_employees',
field=models.CharField(help_text='Auto Map Employees from Xero to Fyle', max_length=50, null=True),
),
]
| 26.421053
| 111
| 0.663347
| 55
| 502
| 5.909091
| 0.763636
| 0.043077
| 0.098462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05483
| 0.237052
| 502
| 18
| 112
| 27.888889
| 0.793734
| 0.089641
| 0
| 0
| 1
| 0
| 0.296703
| 0.156044
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
903b18672626ab4ad3a5e3aded8aea7688abd5d5
| 757
|
py
|
Python
|
standards/tests/models.py
|
GROCCAD/groccad
|
1d461043030bebe277d74b1c9df9877436baa270
|
[
"MIT"
] | 1
|
2022-03-05T03:11:51.000Z
|
2022-03-05T03:11:51.000Z
|
standards/tests/models.py
|
rocdata/rocserver
|
1d461043030bebe277d74b1c9df9877436baa270
|
[
"MIT"
] | null | null | null |
standards/tests/models.py
|
rocdata/rocserver
|
1d461043030bebe277d74b1c9df9877436baa270
|
[
"MIT"
] | null | null | null |
from functools import partial
from django.db import models
from standards.fields import CharIdField
# MODEL FIXTURES
################################################################################
class CharIdModel(models.Model):
field = CharIdField()
class CharIdModelWithPrefix(models.Model):
field = CharIdField(prefix='WP', length=10)
class NullableCharIdModel(models.Model):
field = CharIdField(blank=True, null=True)
class PrimaryKeyCharIdModel(models.Model):
id = CharIdField(primary_key=True)
class RelatedToCharIdModel(models.Model):
char_fk = models.ForeignKey('PrimaryKeyCharIdModel', models.CASCADE)
class CharIdChildModel(PrimaryKeyCharIdModel):
pass
class CharIdGrandchildModel(CharIdChildModel):
pass
| 23.65625
| 80
| 0.698811
| 70
| 757
| 7.528571
| 0.514286
| 0.104364
| 0.091082
| 0.1537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002999
| 0.11889
| 757
| 31
| 81
| 24.419355
| 0.787106
| 0.018494
| 0
| 0.117647
| 0
| 0
| 0.034796
| 0.03177
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.117647
| 0.176471
| 0
| 0.882353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
5f3bba72b50ee67716dbeda71e53db5b079da28f
| 2,435
|
py
|
Python
|
Code/Python/pract_fund1_sol.py
|
kunal-mulki/Materials
|
b76bba123002972e4063b9b24cd5dc3d980e16e9
|
[
"MIT"
] | 27
|
2016-12-07T17:38:41.000Z
|
2021-06-28T06:19:49.000Z
|
Code/Python/pract_fund1_sol.py
|
kunal-mulki/Materials
|
b76bba123002972e4063b9b24cd5dc3d980e16e9
|
[
"MIT"
] | 27
|
2016-05-28T21:32:24.000Z
|
2016-12-08T16:47:09.000Z
|
Code/Python/pract_fund1_sol.py
|
NYUDataBootcamp/Materials
|
b76bba123002972e4063b9b24cd5dc3d980e16e9
|
[
"MIT"
] | 50
|
2016-10-12T11:04:50.000Z
|
2021-06-01T23:24:45.000Z
|
"""
Practice problems, Python fundamentals 1 -- Solutions
@authors: Balint Szoke, Daniel Csaba
@date: 06/02/2017
"""
#-------------------------------------------------------
# 1) Solution
good_string = "Sarah's code"
#or
good_string = """Sarah's code"""
#-------------------------------------------------------
# 2) Solution
i = 1234
list(str(i))
#-------------------------------------------------------
# 3) Solution
year = '2016'
next_year = str(int(year) + 1)
#-------------------------------------------------------
# 4) Solution
x, y = 3, 'hello'
print(x, y)
z = x
x = y
y = z
print(x, y)
#-------------------------------------------------------
# 5) Solution
name = 'Jones'
print(name.upper())
#-------------------------------------------------------
# 6) Solution
name = 'Ulysses'
print(name.count('s'))
#-------------------------------------------------------
# 7) Solution
long_string = 'salamandroid'
long_string = long_string.replace('a', '*')
print(long_string)
#-------------------------------------------------------
# 8) Solution
ll = [1, 2, 3, 4, 5]
ll.reverse()
print(ll)
#ll.pop(1)
# or better
ll.pop(ll.index(4))
print(ll)
ll.append(1.5)
print(ll)
ll.sort()
print(ll)
#%% #-------------------------------------------------------
# 9) Solution
number = "32,054.23"
number_no_comma = number.replace(',', '')
number_float = float(number_no_comma)
print(number_float)
#or
print(float(number.replace(',', '')))
#-------------------------------------------------------
# 10) Solution
firstname_lastname = 'john_doe'
firstname, lastname = firstname_lastname.split('_')
Firstname = firstname.capitalize()
Lastname = lastname.capitalize()
print(Firstname, Lastname)
#-------------------------------------------------------
# 11-12) Solution
l = [0, 1, 2, 4, 5]
index = l.index(4)
l.insert(index, 3)
print(l)
#-------------------------------------------------------
# 13) Solution
s = 'www.example.com'
s = s.lstrip('w.')
s = s.rstrip('.c')
# or in a single line
(s.lstrip('w.')).rstrip('.com')
#-------------------------------------------------------
# 14) Solution
link = 'https://play.spotify.com/collection/albums'
splitted_link = link.rsplit('/', 1)
print(splitted_link[0])
#or
link.rsplit('/', 1)[0]
#-------------------------------------------------------
# 15) Solution
amount = "32.054,23"
ms = amount.maketrans(',.', '.,')
amount = amount.translate(ms)
print(amount)
| 21.936937
| 62
| 0.433265
| 252
| 2,435
| 4.111111
| 0.40873
| 0.007722
| 0.026062
| 0.030888
| 0.03861
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034275
| 0.113347
| 2,435
| 110
| 63
| 22.136364
| 0.445577
| 0.465708
| 0
| 0.111111
| 0
| 0
| 0.128674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.277778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5f42caff296a8e9070523febb1d633e533ecbfff
| 950
|
py
|
Python
|
tools.py
|
chougousui/keyboard_layout_for_mobile
|
3bb59169f10ac56fb82cb62be07f821f1ecac22e
|
[
"MIT"
] | 5
|
2019-06-12T09:29:06.000Z
|
2020-12-31T08:53:19.000Z
|
tools.py
|
chougousui/keyboard_layout_for_mobile
|
3bb59169f10ac56fb82cb62be07f821f1ecac22e
|
[
"MIT"
] | null | null | null |
tools.py
|
chougousui/keyboard_layout_for_mobile
|
3bb59169f10ac56fb82cb62be07f821f1ecac22e
|
[
"MIT"
] | null | null | null |
import numpy as np
def generate_cost_dict():
def inner_func(i, j):
x1 = i % 10
y1 = i // 10
x2 = j % 10
y2 = j // 10
alpha = 5
x_center = 5.5
x_radius = 7.5
y_center = 1
y_radius = 4.5
dist = np.sqrt(47 * 47 * np.square(x1 - x2) + 77 * 77 * np.square(y1 - y2))
force1 = np.exp(-1 * alpha * (
1 / (np.sqrt(np.square(x1 - x_center) + np.square(x_radius / y_radius * (y1 - y_center))) - x_radius) +
1 / x_radius))
force2 = np.exp(-1 * alpha * (
1 / (np.sqrt(np.square(x2 - x_center) + np.square(x_radius / y_radius * (y2 - y_center))) - x_radius) +
1 / x_radius))
res = (force1 + force2) / 2 * dist
return res
cost_dict = np.delete(np.delete(np.fromfunction(
lambda i, j: inner_func(i, j),
(28, 28)), 20, axis=0), 20, axis=1)
return cost_dict
| 29.6875
| 119
| 0.489474
| 144
| 950
| 3.076389
| 0.305556
| 0.110609
| 0.045147
| 0.049661
| 0.34763
| 0.34763
| 0.34763
| 0.248307
| 0.117381
| 0
| 0
| 0.095
| 0.368421
| 950
| 31
| 120
| 30.645161
| 0.643333
| 0
| 0
| 0.08
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5f47bfe261a0653163329656400b45e38dc2e334
| 2,103
|
py
|
Python
|
tests/functional_tests/authors/test_authors_login.py
|
Kaique425/recipes
|
ab188dbe1ca3891160f65a7858613b8750faa721
|
[
"MIT"
] | null | null | null |
tests/functional_tests/authors/test_authors_login.py
|
Kaique425/recipes
|
ab188dbe1ca3891160f65a7858613b8750faa721
|
[
"MIT"
] | null | null | null |
tests/functional_tests/authors/test_authors_login.py
|
Kaique425/recipes
|
ab188dbe1ca3891160f65a7858613b8750faa721
|
[
"MIT"
] | null | null | null |
import pytest
from django.contrib.auth.models import User
from django.urls import reverse
from selenium.webdriver.common.by import By
from .base import AuthorBaseFunctionalTest
@pytest.mark.functional_test
class AuthorLoginTest(AuthorBaseFunctionalTest):
def test_user_valid_data_can_login_successfully(self):
password = "testpassword"
user = User.objects.create_user(username="teste123", password=password)
self.browser.get(self.live_server_url + reverse("author:login"))
form = self.get_form()
username_field = self.get_by_id("id_username", form)
password_field = self.get_by_id("id_password", form)
username_field.send_keys(user.username)
password_field.send_keys(password)
form.submit()
body = self.browser.find_element(By.TAG_NAME, "body")
self.assertIn(f"Your logged as {user.username}", body.text)
def test_if_login_form_is_invalid(self):
self.browser.get(self.live_server_url + reverse("author:login"))
form = self.browser.find_element(
By.XPATH, "/html/body/main/div[1]/div/div[2]/form"
)
form.click()
username = self.get_by_id("id_username", form)
password = self.get_by_id("id_password", form)
username.send_keys(" ")
password.send_keys(" ")
form.submit()
self.assertIn(
"Invalid form data.", self.browser.find_element(By.TAG_NAME, "body").text
)
def test_if_login_credentials_is_invalid(self):
self.browser.get(self.live_server_url + reverse("author:login"))
form = self.browser.find_element(
By.XPATH, "/html/body/main/div[1]/div/div[2]/form"
)
form.click()
username = self.get_by_id("id_username", form)
password = self.get_by_id("id_password", form)
username.send_keys("invalid_username")
password.send_keys("invalid_password")
form.submit()
self.assertIn(
"Invalid password or username.",
self.browser.find_element(By.TAG_NAME, "body").text,
)
| 37.553571
| 85
| 0.661912
| 269
| 2,103
| 4.94052
| 0.256506
| 0.066215
| 0.040632
| 0.049661
| 0.576373
| 0.532731
| 0.498119
| 0.498119
| 0.422122
| 0.363431
| 0
| 0.004266
| 0.219686
| 2,103
| 55
| 86
| 38.236364
| 0.805606
| 0
| 0
| 0.375
| 0
| 0
| 0.152639
| 0.036139
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0.0625
| false
| 0.1875
| 0.104167
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
5f548523f9dcf1f62a0e2fe0f345f22d699939d1
| 1,728
|
py
|
Python
|
codejam/2020-qualification/d.py
|
Ashindustry007/competitive-programming
|
2eabd3975c029d235abb7854569593d334acae2f
|
[
"WTFPL"
] | 506
|
2018-08-22T10:30:38.000Z
|
2022-03-31T10:01:49.000Z
|
codejam/2020-qualification/d.py
|
Ashindustry007/competitive-programming
|
2eabd3975c029d235abb7854569593d334acae2f
|
[
"WTFPL"
] | 13
|
2019-08-07T18:31:18.000Z
|
2020-12-15T21:54:41.000Z
|
codejam/2020-qualification/d.py
|
Ashindustry007/competitive-programming
|
2eabd3975c029d235abb7854569593d334acae2f
|
[
"WTFPL"
] | 234
|
2018-08-06T17:11:41.000Z
|
2022-03-26T10:56:42.000Z
|
#!/usr/bin/env python3
# https://codingcompetitions.withgoogle.com/codejam/round/000000000019fd27/0000000000209a9e
t, b = map(int, input().split())
for _ in range(t):
xs = [None] * b
q, k, k1, k2 = 0, 0, None, None
def query(k):
global q
q += 1
print(k)
r = int(input())
return r
def complement():
global xs
for i in range(b):
if xs[i] == 0:
xs[i] = 1
elif xs[i] == 1:
xs[i] = 0
def solve():
print(''.join(str(x) for x in xs))
assert(input() == 'Y')
while True:
if q > 0 and q % 10 == 0:
if k1 is not None and k2 is not None:
v1 = query(k1+1)
v2 = query(k2+1)
if xs[k1] == v1 and xs[k2] == v2:
pass
elif xs[k1] != v1 and xs[k2] != v2:
complement()
elif xs[k1] != v1:
xs = xs[::-1]
complement()
else:
xs = xs[::-1]
elif k1 is not None:
v1 = query(k1+1)
v1 = query(k1+1)
if xs[k1] != v1:
complement()
else:
v2 = query(k2+1)
v2 = query(k2+1)
if xs[k2] != v2:
xs = xs[::-1]
else:
v1 = query(k+1)
v2 = query(b-k)
xs[k] = v1
xs[b-k-1] = v2
if v1 == v2 and k1 is None:
k1 = k
elif v1 != v2 and k2 is None:
k2 = k
k += 1
if k*2 == b:
solve()
break
| 27
| 91
| 0.358218
| 217
| 1,728
| 2.847926
| 0.262673
| 0.02589
| 0.038835
| 0.048544
| 0.168285
| 0.153722
| 0.153722
| 0
| 0
| 0
| 0
| 0.114833
| 0.516204
| 1,728
| 63
| 92
| 27.428571
| 0.624402
| 0.064236
| 0
| 0.263158
| 0
| 0
| 0.000619
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 1
| 0.052632
| false
| 0.017544
| 0
| 0
| 0.070175
| 0.035088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5f71554b9254c1a62eba83f18f61c6f664cfe709
| 2,485
|
py
|
Python
|
bdd/contact_stepts.py
|
LukinVV/python_training
|
9e6eb57fe9527fd591d563b4219c19e49188c4de
|
[
"Apache-2.0"
] | null | null | null |
bdd/contact_stepts.py
|
LukinVV/python_training
|
9e6eb57fe9527fd591d563b4219c19e49188c4de
|
[
"Apache-2.0"
] | null | null | null |
bdd/contact_stepts.py
|
LukinVV/python_training
|
9e6eb57fe9527fd591d563b4219c19e49188c4de
|
[
"Apache-2.0"
] | null | null | null |
from pytest_bdd import given, when, then
from model.contact import Contact
import random
@given('a contact list')
def contact_list(orm):
return orm.get_contact_list()
@given('a contact with <firstname>, <lastname> and <address>')
def new_contact(firstname, lastname, address):
return Contact(firstname=firstname, lastname=lastname, address=address)
@when('I add the contact to the list')
def add_new_contact(app, new_contact):
app.contact.create_new(new_contact)
@then('the new contact list is equal to the old contact list with the added contact')
def verify_contact_added(orm, contact_list, new_contact):
old_contacts = contact_list
new_contacts = orm.get_contact_list()
old_contacts.append(new_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(app, orm):
if len(orm.get_contact_list()) < 0:
app.group.create_new(Contact(firstname='some firstname'))
return orm.get_contact_list()
@given('a random contact from the list')
def random_contact(non_empty_contact_list):
return random.choice(non_empty_contact_list)
@when('I delete the contact from the list')
def delete_contact(app, random_contact):
app.contact.del_contact_by_id(random_contact.id)
@then('the new contact list is equal to the old contact list without the contact')
def verify_contact_del(orm, non_empty_contact_list, random_contact):
old_contacts = non_empty_contact_list
new_contacts = orm.get_contact_list()
old_contacts.remove(random_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
@when('I modify the contact from the list')
def modify_contact(app, new_contact, random_contact):
new_contact.id = random_contact.id
app.contact.mod_contact_by_id(new_contact)
@then('the new contact list is equal to the old contact list with the modified contact')
def verify_contact_del(orm, non_empty_contact_list, random_contact, new_contact):
old_contacts = non_empty_contact_list
non_empty_contact_list.remove(random_contact)
random_contact.firstname = new_contact.firstname
random_contact.lastname = new_contact.lastname
random_contact.address = new_contact.address
old_contacts.append(new_contact)
new_contacts = orm.get_contact_list()
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
| 42.118644
| 101
| 0.781087
| 382
| 2,485
| 4.793194
| 0.149215
| 0.150191
| 0.07373
| 0.093392
| 0.480612
| 0.449481
| 0.407974
| 0.345713
| 0.345713
| 0.345713
| 0
| 0.000462
| 0.12837
| 2,485
| 59
| 102
| 42.118644
| 0.844875
| 0
| 0
| 0.244898
| 0
| 0
| 0.184634
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 1
| 0.204082
| false
| 0
| 0.061224
| 0.061224
| 0.346939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5f72286dd657c066d24e11dfe7993aa6f68aabbc
| 769
|
py
|
Python
|
FigureMaker.py
|
space-physics/histfeas
|
caa0100087d8c2b8711c1c3cb60c322379ef5431
|
[
"MIT"
] | null | null | null |
FigureMaker.py
|
space-physics/histfeas
|
caa0100087d8c2b8711c1c3cb60c322379ef5431
|
[
"MIT"
] | null | null | null |
FigureMaker.py
|
space-physics/histfeas
|
caa0100087d8c2b8711c1c3cb60c322379ef5431
|
[
"MIT"
] | 1
|
2015-05-22T23:51:58.000Z
|
2015-05-22T23:51:58.000Z
|
#!/usr/bin/env python
"""
Figures generated by HiST program
intended for use with in/ files including:
*_flame.ini
*_impulse.ini
*_trans.ini
Flaming Aurora 2 cameras:
./FigureMaker.py in/2cam_flame.ini
Translating Aurora 2 cameras:
./FigureMaker.py in/2cam_trans.ini
Impulse Aurora (for testing):
./FigureMaker.py in/2cam_impulse.ini
Table of results for 2 and 3 cam:
./FigureMaker.py in/table_flame{2,3}.ini
REAL actual camera data (just dump synchroinzed frames:
./FigureMaker.py -m realvid in/apr14T085454
-m optim reconstruct only
"""
from histfeas import userinput, hist_figure
from histfeas.loadAnalyze import readresults, findxlsh5
P = userinput()
#%% compute
if not P["load"]:
hist_figure(P)
#%% load
flist, P = findxlsh5(P)
readresults(flist, P)
| 20.783784
| 55
| 0.758127
| 116
| 769
| 4.948276
| 0.543103
| 0.11324
| 0.10453
| 0.099303
| 0.114983
| 0.114983
| 0.114983
| 0
| 0
| 0
| 0
| 0.028571
| 0.135241
| 769
| 36
| 56
| 21.361111
| 0.834586
| 0.721717
| 0
| 0
| 1
| 0
| 0.019608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5f72dad431a7abe4ecae9aa703b14fc2183ff13a
| 2,998
|
py
|
Python
|
pyv6m/ha/v6m.py
|
dubnom/pyv6m
|
d56bf3f3d39b7c2f747b08bc1974dc3dbe6ccff8
|
[
"MIT"
] | 1
|
2020-02-16T00:42:17.000Z
|
2020-02-16T00:42:17.000Z
|
pyv6m/ha/v6m.py
|
dubnom/pyv6m
|
d56bf3f3d39b7c2f747b08bc1974dc3dbe6ccff8
|
[
"MIT"
] | null | null | null |
pyv6m/ha/v6m.py
|
dubnom/pyv6m
|
d56bf3f3d39b7c2f747b08bc1974dc3dbe6ccff8
|
[
"MIT"
] | null | null | null |
"""Component to control v6m relays and sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/v6m/
"""
import logging
import voluptuous as vol
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_HOST, CONF_PORT, CONF_NAME)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyv6m==0.0.1']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'v6m'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_NAME, default=DOMAIN): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, base_config):
"""Start V6M controller."""
from pyv6m.pyv6m import V6M
class V6MController(V6M):
"""Interface between HASS and V6M controller."""
def __init__(self, host, port):
"""Host and port of the controller."""
V6M.__init__(self, host, port, self.relay_callback,
self.sensor_callback)
self._relay_subs = {}
self._sensor_subs = {}
def register_relay(self, device):
"""Add a device to subscribe to events."""
self._register(self._relay_subs, device)
def relay_callback(self, num, old_state, new_state):
"""Process relay states."""
self._dispatch(self._relay_subs, num, new_state)
def register_sensor(self, device):
"""Add a device to subscribe to events."""
self._register(self._sensor_subs, device)
def sensor_callback(self, num, old_state, new_state):
"""Process sensor states."""
self._dispatch(self._sensor_subs, num, new_state)
def _register(self, subs, device):
if device.num not in subs:
subs[device.num] = []
subs[device.num].append(device)
def _dispatch(self, subs, num, new_state):
if num in subs:
for sub in subs[num]:
if sub.callback(new_state):
sub.schedule_update_ha_state()
config = base_config.get(DOMAIN)
host = config[CONF_HOST]
port = config[CONF_PORT]
controller = V6MController(host, port)
hass.data[config[CONF_NAME]] = controller
def cleanup(event):
controller.close()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
return True
class V6MDevice():
"""Base class of a V6M device."""
def __init__(self, controller, num, name):
"""Controller, address, and name of the device."""
self._num = num
self._name = name
self._controller = controller
@property
def num(self):
"""Device number."""
return self._num
@property
def name(self):
"""Device name."""
return self._name
@property
def should_poll(self):
"""No need to poll."""
return False
| 28.552381
| 75
| 0.615744
| 361
| 2,998
| 4.900277
| 0.304709
| 0.027134
| 0.022046
| 0.025438
| 0.134539
| 0.134539
| 0.105144
| 0.105144
| 0.062182
| 0.062182
| 0
| 0.008272
| 0.274183
| 2,998
| 104
| 76
| 28.826923
| 0.804688
| 0.166111
| 0
| 0.047619
| 0
| 0
| 0.006165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.206349
| false
| 0
| 0.079365
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5f92da5358e075a34f655feb29ca353ec1f92807
| 2,833
|
py
|
Python
|
src/jenova/components/common.py
|
inova-tecnologias/jenova
|
c975f0894b8663c6a9c9fdc7fa33590a219a6ad3
|
[
"Apache-2.0"
] | 2
|
2016-08-10T15:08:47.000Z
|
2016-10-25T14:27:51.000Z
|
src/jenova/components/common.py
|
inova-tecnologias/jenova
|
c975f0894b8663c6a9c9fdc7fa33590a219a6ad3
|
[
"Apache-2.0"
] | 41
|
2016-08-04T20:19:49.000Z
|
2017-03-07T20:05:53.000Z
|
src/jenova/components/common.py
|
inova-tecnologias/jenova
|
c975f0894b8663c6a9c9fdc7fa33590a219a6ad3
|
[
"Apache-2.0"
] | 3
|
2016-09-26T19:04:51.000Z
|
2017-10-26T22:13:45.000Z
|
import uuid, hashlib, os, yaml, logging.config, json, requests, re
from bcrypt import hashpw, gensalt
from collections import namedtuple
from sqlalchemy import create_engine
from datetime import datetime
CONFIG_FILE = os.environ.get('CONFIG_PATH_FILE')
ZimbraGrant = namedtuple(
'ZimbraGrant', [
'target_name',
'target_type',
'grantee_name',
'grantee_type',
'right',
'deny'
]
)
class CallLogger(object):
@classmethod
def logger(cls):
with open(CONFIG_FILE) as f:
logger_config = yaml.load(f)
logging.config.dictConfig(logger_config['logger'])
return logging.getLogger(os.environ.get('HOSTNAME'))
logger = CallLogger.logger()
class Config(object):
@classmethod
def load(cls):
with open(CONFIG_FILE) as f:
main_config = yaml.load(f)
return main_config
@classmethod
def gen_zimbra_grants(cls, zgrants, target_name, target_dlist, grantee_type='grp'):
"""
:param grantee_type: usr|grp|egp|all|dom|edom|gst|key|pub|email
"""
result_grants = []
for zgrant in zgrants:
result_grants.append(
ZimbraGrant(
target_name = target_name,
target_type = 'domain',
grantee_name = target_dlist,
grantee_type = grantee_type,
right = zgrant,
deny = 0
)
)
return result_grants
class InvalidCredentials(Exception):
status_code = 400
def __init__(self, message, status_code=None):
Exception.__init__(self)
self.msg = message
self.status_code = status_code
class Security(object):
def __init__(self, auth, authtoken, apikey, secretkey):
self.auth = auth
self.authtoken = authtoken
self.apikey = apikey
self.secretkey = secretkey
def is_valid_credentials(self):
if self.authtoken and self.is_valid_token():
return True
elif self.apikey and self.secretkey:
if not self.is_valid_secret_key():
raise InvalidCredentials('Wrong credentials!', 401)
else:
return False
def is_valid_token(self):
return False
def is_valid_secret_key(self):
return self.check_password(self.auth.secret_key, self.secretkey)
@classmethod
def gen_secret_key(cls, password):
plain_secretkey = hashpw(password, gensalt(log_rounds=13)).split('13$')[1]
hashed_secretkey = hashpw(plain_secretkey, gensalt(log_rounds=13))
return plain_secretkey, hashed_secretkey
@classmethod
def hash_password(cls, password):
return hashpw(password, gensalt(log_rounds=13))
@classmethod
def check_password(cls, hashed_password, user_password):
return hashpw(user_password, hashed_password) == hashed_password
@classmethod
def get_jwt_skey(self):
if os.environ.get('NODE_ENV') == 'development':
return 'changeme'
return os.environ.get('JWT_SECRET_KEY')
| 26.726415
| 85
| 0.693611
| 352
| 2,833
| 5.349432
| 0.335227
| 0.052045
| 0.025491
| 0.028678
| 0.1094
| 0.05948
| 0.025491
| 0
| 0
| 0
| 0
| 0.007117
| 0.206495
| 2,833
| 106
| 86
| 26.726415
| 0.830516
| 0.022238
| 0
| 0.129412
| 0
| 0
| 0.060617
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.141176
| false
| 0.082353
| 0.058824
| 0.047059
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
5f9463815346a08c07f5a3a2ec02e760f4e9de1f
| 3,569
|
py
|
Python
|
hbutils/binary/base.py
|
HansBug/hbutils
|
6872311c8a441c5955572e0093b10189a2b90708
|
[
"Apache-2.0"
] | null | null | null |
hbutils/binary/base.py
|
HansBug/hbutils
|
6872311c8a441c5955572e0093b10189a2b90708
|
[
"Apache-2.0"
] | 25
|
2021-10-03T06:19:05.000Z
|
2022-03-27T12:48:57.000Z
|
hbutils/binary/base.py
|
HansBug/hbutils
|
6872311c8a441c5955572e0093b10189a2b90708
|
[
"Apache-2.0"
] | null | null | null |
import struct
from typing import BinaryIO
class CIOType:
"""
Overview:
Basic IO type.
Used as base class of all the IO types.
"""
def read(self, file: BinaryIO):
"""
Read from binary IO object.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:return: Reading result.
.. warning::
Need to be implemented.
"""
raise NotImplementedError # pragma: no cover
def write(self, file: BinaryIO, val):
"""
Write object to binary IO object.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:param val: Object to write.
.. warning::
Need to be implemented.
"""
raise NotImplementedError # pragma: no cover
class CFixedType(CIOType):
"""
Overview:
Type with fixed size (such as ``int``, ``uint`` and ``float``).
"""
def __init__(self, size: int):
"""
Constructor of :class:`CFixedType`.
:param size: Size of the type.
"""
self.__size = size
@property
def size(self) -> int:
"""
Size of the given type.
"""
return self.__size
def read(self, file: BinaryIO):
raise NotImplementedError # pragma: no cover
def write(self, file: BinaryIO, val):
raise NotImplementedError # pragma: no cover
class CRangedIntType(CFixedType):
"""
Overview:
Type with fixed size and range (such as ``int`` and ``uint``).
"""
def __init__(self, size: int, minimum: int, maximum: int):
"""
Constructor of :class:`CRangedIntType`.
:param size: Size of the type.
:param minimum: Min value of the type.
:param maximum: Max value of the type.
"""
CFixedType.__init__(self, size)
self.__size = size
self.__minimum = minimum
self.__maximum = maximum
@property
def minimum(self) -> int:
"""
Min value of the type.
"""
return self.__minimum
@property
def maximum(self) -> int:
"""
Max value of the type.
"""
return self.__maximum
def read(self, file: BinaryIO):
raise NotImplementedError # pragma: no cover
def write(self, file: BinaryIO, val):
raise NotImplementedError # pragma: no cover
class CMarkedType(CFixedType):
"""
Overview:
Type with struct mark, which can be directly read by ``struct`` module.
"""
def __init__(self, mark: str, size: int):
"""
Constructor of :class:`CMarkedType`.
:param mark: Mark of the type.
:param size: Size of the type.
"""
CFixedType.__init__(self, size)
self.__mark = mark
@property
def mark(self) -> str:
"""
Mark of the type, will be used to read from binary data with ``struct`` module.
"""
return self.__mark
def read(self, file: BinaryIO):
"""
Read from binary with ``struct`` module.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:return: Result value.
"""
r, = struct.unpack(self.mark, file.read(self.size))
return r
def write(self, file: BinaryIO, val):
"""
Write value to binary IO with ``struct`` module.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:param val: Writing value.
"""
file.write(struct.pack(self.mark, float(val)))
| 24.445205
| 87
| 0.55842
| 405
| 3,569
| 4.82716
| 0.192593
| 0.025575
| 0.041432
| 0.09821
| 0.574936
| 0.493095
| 0.436829
| 0.417903
| 0.347315
| 0.347315
| 0
| 0
| 0.330905
| 3,569
| 145
| 88
| 24.613793
| 0.818677
| 0.414682
| 0
| 0.488889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.044444
| 0
| 0.577778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5f97f0b8c3e75f1f6f491e876381487088f22f49
| 771
|
py
|
Python
|
batch_run.py
|
hrishioa/Oyente
|
76c8943426727c93ab161a4e196dc6abdf636fe2
|
[
"MIT"
] | 4
|
2017-01-25T05:25:52.000Z
|
2021-02-18T08:48:51.000Z
|
batch_run.py
|
hrishioa/Oyente
|
76c8943426727c93ab161a4e196dc6abdf636fe2
|
[
"MIT"
] | null | null | null |
batch_run.py
|
hrishioa/Oyente
|
76c8943426727c93ab161a4e196dc6abdf636fe2
|
[
"MIT"
] | 1
|
2018-08-09T20:57:31.000Z
|
2018-08-09T20:57:31.000Z
|
import json
import glob
from tqdm import tqdm
import os
contract_dir = 'contract_data'
cfiles = glob.glob(contract_dir+'/contract*.json')
cjson = {}
print "Loading contracts..."
for cfile in tqdm(cfiles):
cjson.update(json.loads(open(cfile).read()))
results = {}
missed = []
print "Running analysis..."
for c in tqdm(cjson):
with open('tmp.evm','w') as of:
# print "Out: "+cjson[c][1][2:]
of.write(cjson[c][1][2:]+"\0")
os.system('python oyente.py tmp.evm -j -b')
try:
results[c] = json.loads(open('tmp.evm.json').read())
except:
missed.append(c)
print "Writing results..."
with open('results.json', 'w') as of:
of.write(json.dumps(results,indent=1))
with open('missed.json', 'w') as of:
of.write(json.dumps(missed,indent=1))
print "Completed."
| 19.769231
| 54
| 0.66537
| 123
| 771
| 4.146341
| 0.406504
| 0.047059
| 0.029412
| 0.031373
| 0.098039
| 0.098039
| 0.098039
| 0.098039
| 0
| 0
| 0
| 0.010479
| 0.133593
| 771
| 39
| 55
| 19.769231
| 0.752994
| 0.037613
| 0
| 0
| 0
| 0
| 0.232119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.148148
| null | null | 0.148148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5f981f7b480688c0f261ed48cbccc55b236c176c
| 2,266
|
py
|
Python
|
tests/test_statistics.py
|
BENR0/textory
|
0f81b8b6726298b9181be27da7aaac2dd25bd763
|
[
"MIT"
] | 1
|
2020-07-01T14:40:10.000Z
|
2020-07-01T14:40:10.000Z
|
tests/test_statistics.py
|
BENR0/textory
|
0f81b8b6726298b9181be27da7aaac2dd25bd763
|
[
"MIT"
] | 9
|
2020-02-07T11:58:51.000Z
|
2021-09-07T16:23:38.000Z
|
tests/test_statistics.py
|
BENR0/textory
|
0f81b8b6726298b9181be27da7aaac2dd25bd763
|
[
"MIT"
] | 1
|
2019-11-20T05:53:13.000Z
|
2019-11-20T05:53:13.000Z
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from textory.util import neighbour_diff_squared, num_neighbours, neighbour_count, create_kernel
from textory.statistics import variogram, pseudo_cross_variogram
@pytest.fixture
def init_np_arrays():
"""Inits two random np arrays"""
np.random.seed(42)
n = 50
a1 = np.random.random((n,n)) * 157
a2 = np.random.random((n,n)) * 237
return a1.astype(np.float32), a2.astype(np.float32)
def test_variogram(init_np_arrays):
"""THIS TEST ONLY COVERS THE VERSION WITH INEXACT NEIGHBOUR COUNT ON THE EDGES
This test needs improvement in calculation and what is tested.
Much code is shared with the "neighbour_diff_squared" test in test_util.
"""
a, _ = init_np_arrays
tmp = np.zeros_like(a)
lag = 1
lags = range(-lag, lag + 1)
rows, cols = a.shape
#calculate variogram difference
for i in range(0, cols):
for j in range(0, rows):
for l in lags:
for k in lags:
if (i+l < 0) | (i+l >= cols) | (j+k < 0) | (j+k >= rows) | ((l == 0) & (k == 0)):
continue
else:
tmp[i,j] += np.square((a[i, j] - a[i+l, j+k]))
tmp = np.nansum(tmp)
res = tmp / 40000
assert variogram(a, lag=1) == res
def test_pseudo_cross_variogram(init_np_arrays):
"""THIS TEST ONLY COVERS THE VERSION WITH INEXACT NEIGHBOUR COUNT ON THE EDGES
This test needs improvement in calculation and what is tested.
Much code is shared with the "neighbour_diff_squared" test in test_util.
"""
a, b = init_np_arrays
tmp = np.zeros_like(a)
lag = 1
lags = range(-lag, lag + 1)
rows, cols = a.shape
#calculate variogram difference
for i in range(0, cols):
for j in range(0, rows):
for l in lags:
for k in lags:
if (i+l < 0) | (i+l >= cols) | (j+k < 0) | (j+k >= rows) | ((l == 0) & (k == 0)):
continue
else:
tmp[i,j] += np.square((a[i, j] - b[i+l, j+k]))
tmp = np.nansum(tmp)
res = tmp / 40000
assert pseudo_cross_variogram(a, b, lag=1) == res
| 27.634146
| 101
| 0.566637
| 339
| 2,266
| 3.693215
| 0.274336
| 0.038339
| 0.047923
| 0.023962
| 0.699681
| 0.674121
| 0.674121
| 0.674121
| 0.674121
| 0.674121
| 0
| 0.030128
| 0.311562
| 2,266
| 81
| 102
| 27.975309
| 0.772436
| 0.242718
| 0
| 0.577778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 1
| 0.066667
| false
| 0
| 0.088889
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5f9a91b6b4cb83726c16979ae7cd27a95c8fd08d
| 12,235
|
py
|
Python
|
ultracart/models/apply_library_item_response.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | 1
|
2018-03-15T16:56:23.000Z
|
2018-03-15T16:56:23.000Z
|
ultracart/models/apply_library_item_response.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | null | null | null |
ultracart/models/apply_library_item_response.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApplyLibraryItemResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'attributes': 'list[LibraryItemAttribute]',
'cjson': 'str',
'content_type': 'str',
'email_template_vm_path': 'str',
'error': 'Error',
'metadata': 'ResponseMetadata',
'storefront_oid': 'int',
'success': 'bool',
'title': 'str',
'uuid': 'str',
'warning': 'Warning'
}
attribute_map = {
'attributes': 'attributes',
'cjson': 'cjson',
'content_type': 'content_type',
'email_template_vm_path': 'email_template_vm_path',
'error': 'error',
'metadata': 'metadata',
'storefront_oid': 'storefront_oid',
'success': 'success',
'title': 'title',
'uuid': 'uuid',
'warning': 'warning'
}
def __init__(self, attributes=None, cjson=None, content_type=None, email_template_vm_path=None, error=None, metadata=None, storefront_oid=None, success=None, title=None, uuid=None, warning=None): # noqa: E501
"""ApplyLibraryItemResponse - a model defined in Swagger""" # noqa: E501
self._attributes = None
self._cjson = None
self._content_type = None
self._email_template_vm_path = None
self._error = None
self._metadata = None
self._storefront_oid = None
self._success = None
self._title = None
self._uuid = None
self._warning = None
self.discriminator = None
if attributes is not None:
self.attributes = attributes
if cjson is not None:
self.cjson = cjson
if content_type is not None:
self.content_type = content_type
if email_template_vm_path is not None:
self.email_template_vm_path = email_template_vm_path
if error is not None:
self.error = error
if metadata is not None:
self.metadata = metadata
if storefront_oid is not None:
self.storefront_oid = storefront_oid
if success is not None:
self.success = success
if title is not None:
self.title = title
if uuid is not None:
self.uuid = uuid
if warning is not None:
self.warning = warning
@property
def attributes(self):
"""Gets the attributes of this ApplyLibraryItemResponse. # noqa: E501
Attributes from the library item # noqa: E501
:return: The attributes of this ApplyLibraryItemResponse. # noqa: E501
:rtype: list[LibraryItemAttribute]
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this ApplyLibraryItemResponse.
Attributes from the library item # noqa: E501
:param attributes: The attributes of this ApplyLibraryItemResponse. # noqa: E501
:type: list[LibraryItemAttribute]
"""
self._attributes = attributes
@property
def cjson(self):
"""Gets the cjson of this ApplyLibraryItemResponse. # noqa: E501
Cjson from library item, only populated if this library item was a cjson snippet or marketing email (not transactional) # noqa: E501
:return: The cjson of this ApplyLibraryItemResponse. # noqa: E501
:rtype: str
"""
return self._cjson
@cjson.setter
def cjson(self, cjson):
"""Sets the cjson of this ApplyLibraryItemResponse.
Cjson from library item, only populated if this library item was a cjson snippet or marketing email (not transactional) # noqa: E501
:param cjson: The cjson of this ApplyLibraryItemResponse. # noqa: E501
:type: str
"""
self._cjson = cjson
@property
def content_type(self):
"""Gets the content_type of this ApplyLibraryItemResponse. # noqa: E501
flow, campaign, cjson, upsell, transactional_email or email # noqa: E501
:return: The content_type of this ApplyLibraryItemResponse. # noqa: E501
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this ApplyLibraryItemResponse.
flow, campaign, cjson, upsell, transactional_email or email # noqa: E501
:param content_type: The content_type of this ApplyLibraryItemResponse. # noqa: E501
:type: str
"""
self._content_type = content_type
@property
def email_template_vm_path(self):
"""Gets the email_template_vm_path of this ApplyLibraryItemResponse. # noqa: E501
If a marketing email was applied, this is the path to the template encapsulating the cjson. This is needed for the UltraCart UI. # noqa: E501
:return: The email_template_vm_path of this ApplyLibraryItemResponse. # noqa: E501
:rtype: str
"""
return self._email_template_vm_path
@email_template_vm_path.setter
def email_template_vm_path(self, email_template_vm_path):
"""Sets the email_template_vm_path of this ApplyLibraryItemResponse.
If a marketing email was applied, this is the path to the template encapsulating the cjson. This is needed for the UltraCart UI. # noqa: E501
:param email_template_vm_path: The email_template_vm_path of this ApplyLibraryItemResponse. # noqa: E501
:type: str
"""
self._email_template_vm_path = email_template_vm_path
@property
def error(self):
"""Gets the error of this ApplyLibraryItemResponse. # noqa: E501
:return: The error of this ApplyLibraryItemResponse. # noqa: E501
:rtype: Error
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this ApplyLibraryItemResponse.
:param error: The error of this ApplyLibraryItemResponse. # noqa: E501
:type: Error
"""
self._error = error
@property
def metadata(self):
"""Gets the metadata of this ApplyLibraryItemResponse. # noqa: E501
:return: The metadata of this ApplyLibraryItemResponse. # noqa: E501
:rtype: ResponseMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ApplyLibraryItemResponse.
:param metadata: The metadata of this ApplyLibraryItemResponse. # noqa: E501
:type: ResponseMetadata
"""
self._metadata = metadata
@property
def storefront_oid(self):
"""Gets the storefront_oid of this ApplyLibraryItemResponse. # noqa: E501
StoreFront oid where content originates necessary for tracking down relative assets # noqa: E501
:return: The storefront_oid of this ApplyLibraryItemResponse. # noqa: E501
:rtype: int
"""
return self._storefront_oid
@storefront_oid.setter
def storefront_oid(self, storefront_oid):
"""Sets the storefront_oid of this ApplyLibraryItemResponse.
StoreFront oid where content originates necessary for tracking down relative assets # noqa: E501
:param storefront_oid: The storefront_oid of this ApplyLibraryItemResponse. # noqa: E501
:type: int
"""
self._storefront_oid = storefront_oid
@property
def success(self):
"""Gets the success of this ApplyLibraryItemResponse. # noqa: E501
Indicates if API call was successful # noqa: E501
:return: The success of this ApplyLibraryItemResponse. # noqa: E501
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this ApplyLibraryItemResponse.
Indicates if API call was successful # noqa: E501
:param success: The success of this ApplyLibraryItemResponse. # noqa: E501
:type: bool
"""
self._success = success
@property
def title(self):
"""Gets the title of this ApplyLibraryItemResponse. # noqa: E501
title of library item, usually the name of the flow or campaign, or description of cjson # noqa: E501
:return: The title of this ApplyLibraryItemResponse. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this ApplyLibraryItemResponse.
title of library item, usually the name of the flow or campaign, or description of cjson # noqa: E501
:param title: The title of this ApplyLibraryItemResponse. # noqa: E501
:type: str
"""
self._title = title
@property
def uuid(self):
"""Gets the uuid of this ApplyLibraryItemResponse. # noqa: E501
UUID of marketing email or communication flow/campaign if this library item was an email, campaign or flow # noqa: E501
:return: The uuid of this ApplyLibraryItemResponse. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this ApplyLibraryItemResponse.
UUID of marketing email or communication flow/campaign if this library item was an email, campaign or flow # noqa: E501
:param uuid: The uuid of this ApplyLibraryItemResponse. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def warning(self):
"""Gets the warning of this ApplyLibraryItemResponse. # noqa: E501
:return: The warning of this ApplyLibraryItemResponse. # noqa: E501
:rtype: Warning
"""
return self._warning
@warning.setter
def warning(self, warning):
"""Sets the warning of this ApplyLibraryItemResponse.
:param warning: The warning of this ApplyLibraryItemResponse. # noqa: E501
:type: Warning
"""
self._warning = warning
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApplyLibraryItemResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplyLibraryItemResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.211735
| 213
| 0.621087
| 1,376
| 12,235
| 5.395349
| 0.114099
| 0.056034
| 0.177802
| 0.151131
| 0.55361
| 0.483432
| 0.452856
| 0.318292
| 0.226024
| 0.176994
| 0
| 0.019562
| 0.298079
| 12,235
| 391
| 214
| 31.29156
| 0.8449
| 0.44953
| 0
| 0.079268
| 1
| 0
| 0.070269
| 0.016408
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170732
| false
| 0
| 0.018293
| 0
| 0.310976
| 0.012195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5f9c54619428b0b6d3296e3c0080e9ec17335d9c
| 2,807
|
py
|
Python
|
elecalc.py
|
shka86/py_calc
|
780167bc10e2a74741ac9620dbc859c0d310e299
|
[
"MIT"
] | null | null | null |
elecalc.py
|
shka86/py_calc
|
780167bc10e2a74741ac9620dbc859c0d310e299
|
[
"MIT"
] | null | null | null |
elecalc.py
|
shka86/py_calc
|
780167bc10e2a74741ac9620dbc859c0d310e299
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# calculation tool for a bridge circuit with two input current sources
# two current sources can supply from both of top of the bridge and middle of the bridge
# define the voltage name as follows:
# Vp: voltage at the top of the bridge
# Vn: voltage at the middle of the bridge
def paraR(R1, R2):
return R1*R2/(R1+R2)
def unbalanced_bridge( I = 1, Ra = 1, Rb = 1, Rc = 1, Rd = 1, Re = 1, Rf = 1):
print("# --- calc unbalanced bridge ---------------")
# params
print("I=", I, "A")
print("Ra=", Ra, "ohm")
print("Rb=", Rb, "ohm")
print("Rc=", Rc, "ohm")
print("Rd=", Rd, "ohm")
print("Re=", Re, "ohm")
print("Rf=", Rf, "ohm")
# delta-Y transpose
denom = Ra + Rb + (Rc + Rd)
Ralpha = Ra * Rb / denom
Rbeta = (Rc + Rd) * Ra / denom
Rgamma = Rb * (Rc + Rd) / denom
print("denom=", denom, "ohm")
print("Ralpha=", Ralpha, "ohm")
print("Rbeta=", Rbeta, "ohm")
print("Rgamma=", Rgamma, "ohm")
# I sprit
Il = (Rgamma + Rf) / ((Rbeta + Re) + (Rgamma + Rf)) * I
Ir = (Rbeta + Re) / ((Rbeta + Re) + (Rgamma + Rf)) * I
print("Il=", Il, "A")
print("Ir=", Ir, "A")
# calc Vtop and Vmid
Vl = Re * Il
Vr = Rf * Ir
print("Vl=", Vl, "V")
print("Vr=", Vr, "V")
Vtop = (Ralpha + (paraR((Rbeta + Re), (Rgamma + Rf)))) * I
Vmid = (Rd * Vl + Rc * Vr) / (Rc + Rd)
print("Vtop=", Vtop, "V")
print("Vmid=", Vmid, "V")
return Vtop, Vmid
def main():
# current of two input sources
current1 = 2.5e-3
current2 = 1.25e-3
# unbaranced brigde params
# branch on input side
Ra = 100
Rb = 100
# bridge part (series resistor)
Rc = 100
Rd = 100
# branch on ground side
Re = 50
Rf = 50
current1 = 2
current2 = 1
Vtop1, Vmid1 = unbalanced_bridge(current1, Ra, Rb, Rc, Rd, Re, Rf)
Vtop2, Vmid2 = unbalanced_bridge(current2, Ra, Rb, Rc, Rd, Re, Rf)
print("# --- sum based on superposition theorem ---------------")
print("# when two current sources supply from top")
Vp = Vtop1 + Vtop2
Vn = Vmid1 + Vmid2
print("Vp=", Vp, "V")
print("Vn=", Vn, "V")
# same meaning
# unbalanced_bridge(current1+current2, Ra, Rb, Rc, Rd, Re, Rf)
print("# when current1 from the top, current2 from the middle")
Vp = Vtop1 + Vmid2
Vn = Vmid1 + Vtop2
print("Vp=", Vp, "V")
print("Vn=", Vn, "V")
print("# when current2 from the top, current1 from the middle")
Vp = Vmid1 + Vtop2
Vn = Vtop1 + Vmid2
print("Vp=", Vp, "V")
print("Vn=", Vn, "V")
print("# when two current sources from middle")
Vp = Vmid1 + Vmid2
Vn = Vtop1 + Vtop2
print("Vp=", Vp, "V")
print("Vn=", Vn, "V")
if __name__ == '__main__':
main()
| 25.990741
| 88
| 0.540791
| 404
| 2,807
| 3.727723
| 0.240099
| 0.042497
| 0.01992
| 0.021248
| 0.179947
| 0.119522
| 0.111554
| 0.111554
| 0.078353
| 0.038513
| 0
| 0.035591
| 0.279302
| 2,807
| 107
| 89
| 26.233645
| 0.708848
| 0.199858
| 0
| 0.117647
| 0
| 0
| 0.194345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044118
| false
| 0
| 0
| 0.014706
| 0.073529
| 0.455882
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
5f9d943e1c5e5e036c07d0eb1ed8c96b9fd06019
| 4,038
|
py
|
Python
|
sixx/plugins/images.py
|
TildeBeta/6X
|
1814eb8f394b7c25b49decdd7d7249567c85f30f
|
[
"MIT"
] | 2
|
2018-03-06T20:39:49.000Z
|
2018-03-17T04:28:57.000Z
|
sixx/plugins/images.py
|
TildeBeta/TwitterImages
|
1814eb8f394b7c25b49decdd7d7249567c85f30f
|
[
"MIT"
] | 2
|
2018-03-06T20:39:46.000Z
|
2018-03-15T17:03:03.000Z
|
sixx/plugins/images.py
|
TildeBeta/TwitterImages
|
1814eb8f394b7c25b49decdd7d7249567c85f30f
|
[
"MIT"
] | 1
|
2018-04-25T22:24:40.000Z
|
2018-04-25T22:24:40.000Z
|
from math import sqrt
import asks
import datetime
import numpy as np
import random
from PIL import Image
from PIL.ImageDraw import Draw
from PIL.ImageEnhance import Brightness
from PIL.ImageFont import truetype
from curio import spawn_thread
from curious.commands import Context, Plugin, command
from io import BytesIO
from sixx.plugins.utils.pillow import add_noise, add_scanlines, antialiased_text, save_image
SCANLINES, NOISE, BOTH = range(3)
class Images(Plugin):
"""
Commands for image manipulation stuffs.
"""
@command()
async def vcr(self, ctx: Context, *, url: str):
# TODO support attachments
buffer = BytesIO()
resp = await asks.get(url, stream=True)
async for chunk in resp.body:
buffer.write(chunk)
async with ctx.channel.typing:
async with spawn_thread():
with Image.open(buffer) as image:
filter = np.random.choice(range(3), p=[0.7, 0.2, 0.1])
if filter == SCANLINES:
image = add_scanlines(image)
elif filter == NOISE:
image = add_noise(image)
else:
image = add_scanlines(image)
image = add_noise(image)
Brightness(image).enhance(2.5)
# hoo boy
text = np.random.choice(['PLAY', ' PAUSE'], p=[0.8, 0.2])
font = truetype('VCR_OSD_MONO.ttf', size=int(min(image.size) / 10))
start = datetime.datetime(1980, 1, 1, 0, 0)
now = datetime.datetime.utcnow()
# https://stackoverflow.com/a/8170651/7581432
random_date = start + datetime.timedelta(seconds=random.randint(0, int((now - start).total_seconds())))
topleft_text = antialiased_text(text, font, image.width, image.height, offset_x=1 / 30, offset_y=1 / 15)
image.paste(topleft_text, (0, 0), mask=topleft_text)
draw = Draw(image)
if text == 'PLAY':
width, height = font.getsize(text)
offset_x = width + image.width * (1 / 30) * 1.5
offset_y = image.height * (1 / 15)
draw.polygon(
[
(offset_x, offset_y),
(offset_x, offset_y + height),
(offset_x + sqrt(height ** 2 - (height / 2) ** 2), offset_y + height / 2)
],
fill=(255, 255, 255)
)
else:
_, height = font.getsize(' ')
offset_x = image.width * (1 / 35)
offset_y = image.height * (1 / 15)
part = (height - offset_x / 2) / 8
draw.rectangle(
[(offset_x, offset_y + part), (offset_x + 3 * part, offset_y - part + height)],
fill=(255, 255, 255))
draw.rectangle(
[(offset_x + 5 * part, offset_y + part), (offset_x + 8 * part, offset_y - part + height)],
fill=(255, 255, 255))
# This is a nasty hack but oh well
time, date = random_date.strftime('%H:%M|%b. %d %Y').split('|')
wrap_width = len(date)
botleft_text = antialiased_text(time.ljust(wrap_width + 1) + date, font, image.width, image.height,
offset_x=1 / 35, offset_y=13 / 15, wrap_width=wrap_width)
image.paste(botleft_text, (0, 0), mask=botleft_text)
buffer = save_image(image, format=image.format)
await ctx.channel.messages.upload(buffer, filename='shoutouts.' + image.format)
| 40.38
| 124
| 0.488856
| 437
| 4,038
| 4.398169
| 0.345538
| 0.043704
| 0.027055
| 0.021852
| 0.110302
| 0.091571
| 0.069719
| 0.069719
| 0.03538
| 0
| 0
| 0.044845
| 0.409113
| 4,038
| 99
| 125
| 40.787879
| 0.760687
| 0.037147
| 0
| 0.169014
| 0
| 0
| 0.015249
| 0
| 0
| 0
| 0
| 0.010101
| 0
| 1
| 0
| false
| 0
| 0.183099
| 0
| 0.197183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5fa27ee2e5dad2743d90292ecca26ad61a23a586
| 615
|
py
|
Python
|
inbound/admin.py
|
nilesh-kr-dubey/django-inbound-rules
|
5ca122bf915d17c04a63b1464048bba91006e854
|
[
"MIT"
] | 1
|
2020-07-31T06:34:27.000Z
|
2020-07-31T06:34:27.000Z
|
inbound/admin.py
|
nilesh-kr-dubey/django-inbound-rules
|
5ca122bf915d17c04a63b1464048bba91006e854
|
[
"MIT"
] | null | null | null |
inbound/admin.py
|
nilesh-kr-dubey/django-inbound-rules
|
5ca122bf915d17c04a63b1464048bba91006e854
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from inbound.models import Rule, InboundIP
# Register your models here.
class InboundIPInline(admin.TabularInline):
''' Inline of Inbound Rule '''
model = InboundIP
readonly_fields = ['cidr']
extra = 1
class RuleAdmin(admin.ModelAdmin):
model = Rule
list_display = ['name', 'namespace', 'url_name', 'group', 'allow_all', 'is_active', 'created']
exclude = ['alias', 'slug', 'extra']
list_filter = ['is_active', 'group', 'namespace', 'url_name']
raw_id_fields = ['group']
inlines = [InboundIPInline]
admin.site.register(Rule, RuleAdmin)
| 25.625
| 98
| 0.676423
| 71
| 615
| 5.71831
| 0.619718
| 0.098522
| 0.078818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001984
| 0.180488
| 615
| 23
| 99
| 26.73913
| 0.803571
| 0.082927
| 0
| 0
| 0
| 0
| 0.18851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.928571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
5fa6b75aa0e33eeec7402b44584c8450dcb054c7
| 1,226
|
py
|
Python
|
gssClients/gssPythonClients/download_gss.py
|
SemWES/client_libs
|
48c3af519ceaf80b3f33cf509c72376b9b3d9582
|
[
"Zlib"
] | null | null | null |
gssClients/gssPythonClients/download_gss.py
|
SemWES/client_libs
|
48c3af519ceaf80b3f33cf509c72376b9b3d9582
|
[
"Zlib"
] | null | null | null |
gssClients/gssPythonClients/download_gss.py
|
SemWES/client_libs
|
48c3af519ceaf80b3f33cf509c72376b9b3d9582
|
[
"Zlib"
] | null | null | null |
#!/bin/env python
# Copyright STIFTELSEN SINTEF 2016
import suds
import urllib2
import sys
if len(sys.argv) < 4:
print ("Usage:")
print ("\t %s gss-url outputfilename token" % sys.argv[0])
exit()
# get url:
url = sys.argv[1]
outputfileName = sys.argv[2]
sessionToken = sys.argv[3]
wsdlLocation = "https://api.caxman.eu/sintef/infrastructure/gss-0.1/FileUtilities?wsdl"
client = suds.client.Client(wsdlLocation)
resourceInformation = client.service.getResourceInformation(url, sessionToken)
readDescription = resourceInformation.readDescription
if readDescription.supported:
headers = {}
headers[readDescription.sessionTokenField] = sessionToken
if hasattr(readDescription, "headers"):
for headerField in readDescription.headers:
headers[headerField.key] = headerField.value
with open(outputfileName, "wb") as outputFile:
request = urllib2.Request(url = readDescription.url, headers=headers)
result = urllib2.urlopen(request)
while True:
buffer = result.read()
if not buffer:
break
outputFile.write(buffer)
else:
print "The given gss_url does not support read/download."
| 29.190476
| 88
| 0.686786
| 134
| 1,226
| 6.276119
| 0.544776
| 0.041617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.212072
| 1,226
| 41
| 89
| 29.902439
| 0.856108
| 0.047308
| 0
| 0
| 0
| 0.033333
| 0.14433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.1
| null | null | 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5fb1b34629d1b25a94935e87aa37911d21e8edb9
| 704
|
py
|
Python
|
estoque/admin.py
|
Felipebros/mini_curso_django
|
965dd5e8837db9dea4485e889c2b8703fb5e902d
|
[
"MIT"
] | 8
|
2019-06-18T20:20:39.000Z
|
2019-11-09T20:21:06.000Z
|
estoque/admin.py
|
Felipebros/mini_curso_django
|
965dd5e8837db9dea4485e889c2b8703fb5e902d
|
[
"MIT"
] | 8
|
2019-12-04T23:26:42.000Z
|
2022-02-10T12:02:19.000Z
|
estoque/admin.py
|
Felipebros/mini_curso_django
|
965dd5e8837db9dea4485e889c2b8703fb5e902d
|
[
"MIT"
] | 3
|
2019-06-21T22:37:32.000Z
|
2019-10-31T00:38:45.000Z
|
from django.contrib import admin
from .models import Produto, TipoProduto, Estoque
# Register your models here.
class TipoProdutoAdmin(admin.ModelAdmin):
search_fields = ['descricao',]
admin.site.register(TipoProduto, TipoProdutoAdmin)
class EstoqueAdmin(admin.ModelAdmin):
search_fields = ['produto__nome']
list_display = ('produto', 'quantidade', 'tipo_movimentacao', 'data', 'observacao')
admin.site.register(Estoque, EstoqueAdmin)
class ProdutoAdmin(admin.ModelAdmin):
search_fields = ['nome']
list_filter = ['tipo_produto', ]
list_display = ('nome', 'preco', 'tipo_produto', 'quantidade_em_estoque', 'data_ultima_atualizacao')
admin.site.register(Produto, ProdutoAdmin)
| 35.2
| 105
| 0.755682
| 76
| 704
| 6.802632
| 0.447368
| 0.087041
| 0.121857
| 0.156673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117898
| 704
| 20
| 106
| 35.2
| 0.832528
| 0.036932
| 0
| 0
| 0
| 0
| 0.223043
| 0.064993
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.785714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
5fb1ba21e31a7c2b9e588c895f10ae57243ce651
| 3,137
|
py
|
Python
|
star/star.py
|
gd-star-pp/star-pp
|
24c7289199215961fe5462b99ec600907b305d3f
|
[
"MIT"
] | 2
|
2021-10-10T23:42:30.000Z
|
2022-03-31T19:43:13.000Z
|
star/star.py
|
lotus-gd/azalea
|
24c7289199215961fe5462b99ec600907b305d3f
|
[
"MIT"
] | null | null | null |
star/star.py
|
lotus-gd/azalea
|
24c7289199215961fe5462b99ec600907b305d3f
|
[
"MIT"
] | null | null | null |
import gd, itertools
from cube import calculate_cube
from ball import calculate_ball
from helpers import average
client = gd.Client()
def calculate_ship(editor: gd.api.Editor, level: gd.Level, portal: gd.api.Object, speed, portal_count: int):
pass
def calculate_ufo(editor: gd.api.Editor, level: gd.Level, portal: gd.api.Object, speed, portal_count: int):
pass
def calculate_wave(editor: gd.api.Editor, level: gd.Level, portal: gd.api.Object, speed, portal_count: int):
pass
def calculate_robot(editor: gd.api.Editor, level: gd.Level, portal: gd.api.Object, speed, portal_count: int):
pass
def calculate_spider(editor: gd.api.Editor, level: gd.Level, portal: gd.api.Object, speed, portal_count: int):
pass
modes = {gd.PortalType.CUBE: calculate_cube,
gd.PortalType.SHIP: calculate_ship,
gd.PortalType.BALL: calculate_ball,
gd.PortalType.BALL: calculate_ufo,
gd.PortalType.UFO: calculate_ufo,
gd.PortalType.WAVE: calculate_wave,
gd.PortalType.ROBOT: calculate_robot,
gd.PortalType.SPIDER: calculate_spider,
gd.Gamemode.CUBE: calculate_cube,
gd.Gamemode.SHIP: calculate_ship,
gd.Gamemode.BALL: calculate_ball,
gd.Gamemode.BALL: calculate_ufo,
gd.Gamemode.UFO: calculate_ufo,
gd.Gamemode.WAVE: calculate_wave,
gd.Gamemode.ROBOT: calculate_robot,
gd.Gamemode.SPIDER: calculate_spider}
def main():
totalstar = []
database = gd.api.save.load()
levels = database.load_my_levels()
#level = levels.get_by_name("star test")
level = client.run(client.get_level(3884458)) # id
editor = level.open_editor()
startspeed = editor.get_start_speed()
mode = modes.get(editor.header.gamemode)
star = mode(editor, level, gd.api.Object(x=0), startspeed, -1)
totalstar.append(star)
portal_count = 0
for portal, speed in itertools.zip_longest(editor.get_portals(), editor.get_speeds()):
try:
speed = gd.Speed.from_name(gd.SpeedChange(speed.id).name)
except AttributeError: # fix speed later
pass
if portal.id == 10 or portal.id == 11 or portal.id == 45 or portal.id == 46 or portal.id == 101 or portal.id == 99 or portal.id == 286 or portal.id == 287 or portal.id == 747 or portal.id == 749:
# speed portals and other extra portals
continue
mode = modes.get(gd.PortalType(portal.id))
if mode:
star = mode(editor, level, portal, speed, portal_count)
if star is not None:
totalstar.append(star)
portal_count += 1
totalstar.sort(reverse=True)
weights = [1.25, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0]
i = 0
for star, weight in itertools.zip_longest(totalstar, weights):
if weight is None:
weight = 0
if star is None:
break
print(star, weight)
totalstar[i] = round(star*weight, 2)
i += 1
print(totalstar)
return round(average(totalstar), 2)
if __name__ == "__main__":
star = main()
print(star)
| 36.057471
| 203
| 0.646159
| 435
| 3,137
| 4.54023
| 0.234483
| 0.03038
| 0.04557
| 0.043038
| 0.229367
| 0.198987
| 0.198987
| 0.198987
| 0.198987
| 0.198987
| 0
| 0.026823
| 0.239401
| 3,137
| 87
| 204
| 36.057471
| 0.800922
| 0.030602
| 0
| 0.112676
| 0
| 0
| 0.002633
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084507
| false
| 0.084507
| 0.056338
| 0
| 0.15493
| 0.042254
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
5fba9266d157d784d487f4f6d96c252ab58bc927
| 221
|
py
|
Python
|
modules/module0/02_datastructures_and_geometry/datastructures_0b.py
|
tetov/ITA19
|
1af68a8885caf83acd98f4136d0286539ccbe63b
|
[
"MIT"
] | 7
|
2019-11-13T20:29:54.000Z
|
2020-02-26T14:30:54.000Z
|
modules/module0/02_datastructures_and_geometry/datastructures_0b.py
|
GeneKao/ITA19
|
c4b10dc183599eed4ed60d922b6ef5922d173bdb
|
[
"MIT"
] | 4
|
2019-11-07T20:57:51.000Z
|
2020-03-04T11:43:18.000Z
|
modules/module0/02_datastructures_and_geometry/datastructures_0b.py
|
GeneKao/ITA19
|
c4b10dc183599eed4ed60d922b6ef5922d173bdb
|
[
"MIT"
] | 6
|
2019-10-30T13:25:54.000Z
|
2020-02-14T14:06:09.000Z
|
import os
import compas
from compas.datastructures import Mesh
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, 'data')
FILE = os.path.join(DATA, 'faces.obj')
mesh = Mesh.from_obj(FILE)
print(mesh.summary())
| 18.416667
| 38
| 0.737557
| 35
| 221
| 4.514286
| 0.457143
| 0.113924
| 0.126582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 221
| 11
| 39
| 20.090909
| 0.810256
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.375
| 0
| 0.375
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
5fbebd443ba2cc788cd34ccb4de7f2967a894072
| 3,957
|
py
|
Python
|
vis_utils/animation/group_animation_controller.py
|
eherr/vis_utils
|
b757b01f42e6da02ad62130c3b0e61e9eaa3886f
|
[
"MIT"
] | 4
|
2020-05-20T03:55:19.000Z
|
2020-12-24T06:33:40.000Z
|
vis_utils/animation/group_animation_controller.py
|
eherr/vis_utils
|
b757b01f42e6da02ad62130c3b0e61e9eaa3886f
|
[
"MIT"
] | 1
|
2020-05-18T11:21:35.000Z
|
2020-07-07T21:25:57.000Z
|
vis_utils/animation/group_animation_controller.py
|
eherr/vis_utils
|
b757b01f42e6da02ad62130c3b0e61e9eaa3886f
|
[
"MIT"
] | 1
|
2020-07-20T06:57:13.000Z
|
2020-07-20T06:57:13.000Z
|
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from PySignal import Signal
from .animation_controller import AnimationController
from ..scene.components import ComponentBase
class GroupAnimationController(ComponentBase, AnimationController):
updated_animation_frame = Signal()
reached_end_of_animation = Signal()
def __init__(self, scene_object):
ComponentBase.__init__(self, scene_object)
self.mainContext = 0
AnimationController.__init__(self)
self._animation_controllers = []
def add_animation_controller(self, animation_controller):
self._animation_controllers.append(animation_controller)
self.frameTime = animation_controller.frameTime
def get_animation_controllers(self):
return self._animation_controllers
def update(self, dt):
""" update current frame and global joint transformation matrices
"""
dt *= self.animationSpeed
if self.isLoadedCorrectly():
if self.playAnimation:
# frame and transformation matrices
self.animationTime += dt
self.currentFrameNumber = int(self.animationTime / self.getFrameTime())
self.updateTransformation(self.currentFrameNumber)
# update gui
if self.currentFrameNumber > self.getNumberOfFrames():
self.resetAnimationTime()
self.reached_end_of_animation.emit(self.loopAnimation)
else:
self.updated_animation_frame.emit(self.currentFrameNumber)
def draw(self, modelMatrix, viewMatrix, projectionMatrix, lightSources):
return
def updateTransformation(self, frameNumber=None):
for controller in self._animation_controllers:
if frameNumber is not None:
controller.setCurrentFrameNumber(frameNumber)
controller.updateTransformation()
def resetAnimationTime(self):
AnimationController.resetAnimationTime(self)
self.currentFrameNumber = 0
self.updateTransformation(self.currentFrameNumber)
def setCurrentFrameNumber(self, frameNumber):
self.currentFrameNumber = frameNumber
self.updateTransformation(self.currentFrameNumber)
self.animationTime = self.getFrameTime() * self.currentFrameNumber
def getNumberOfFrames(self):
n_frames = [0]
n_frames += [controller.getNumberOfFrames() for controller in self._animation_controllers]
return max(n_frames)
def isLoadedCorrectly(self):
return len(self._animation_controllers) > 0
def getFrameTime(self):
if self.isLoadedCorrectly():
# print self.frameTime
return self.frameTime
else:
return 0
def toggle_animation_loop(self):
self.loopAnimation = not self.loopAnimation
| 39.57
| 98
| 0.706849
| 421
| 3,957
| 6.527316
| 0.384798
| 0.072052
| 0.052402
| 0.050218
| 0.055313
| 0.028384
| 0
| 0
| 0
| 0
| 0
| 0.002951
| 0.229214
| 3,957
| 100
| 99
| 39.57
| 0.898033
| 0.304271
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.053571
| 0.053571
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
5fcaa9f085f2d78ed188a66c5c69d0728b2a6373
| 2,640
|
py
|
Python
|
tools/common.py
|
JamzumSum/yNet
|
78506738e64321cfd26f0af70a62dd2119948e39
|
[
"MIT"
] | 5
|
2021-06-09T02:11:19.000Z
|
2021-10-04T09:00:31.000Z
|
tools/common.py
|
JamzumSum/yNet
|
78506738e64321cfd26f0af70a62dd2119948e39
|
[
"MIT"
] | null | null | null |
tools/common.py
|
JamzumSum/yNet
|
78506738e64321cfd26f0af70a62dd2119948e39
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import Iterable
import torch
from torchmetrics import ConfusionMatrix
from collections import defaultdict
argmax = lambda l: l.index(max(l))
BIRAD_MAP = ['2', '3', '4', '5']
def _lbm():
global BIRAD_MAP
BIRAD_MAP = torch.load("./data/BIRADs/meta.pt")['classname']['Yb']
@dataclass(frozen=True)
class DiagBag:
pid: str
pm: float
pb: list
ym: int
yb: int
@staticmethod
def header():
return [
'pid', 'malignant prob', 'BIRADs prob distrib', 'malignant anno',
'BIRADs anno'
]
def __iter__(self):
yield self.pid
yield f"{self.pm:.4f}"
yield '-' if self.pb is None else f"{BIRAD_MAP[argmax(self.pb)]}类 ({', '.join('%.4f' % i for i in self.pb)})"
yield str(self.ym)
yield '-' if self.yb is None else f"{BIRAD_MAP[self.yb]}类"
class Counter:
def __init__(self, diags: Iterable[DiagBag], thresh: float) -> None:
self.raw = tuple(diags)
self.K = len(BIRAD_MAP)
assert self.K >= 2
self.allInOne(thresh)
def allInOne(self, thresh):
cm = ConfusionMatrix(2, threshold=thresh)
cb = ConfusionMatrix(self.K)
cbm = ConfusionMatrix(self.K)
for d in self.raw:
cm.update(preds=torch.Tensor([d.pm]), target=torch.LongTensor([d.ym]))
cbm.update(preds=torch.Tensor([d.pb]), target=torch.LongTensor([int(d.pm > thresh)]))
if d.yb is not None:
cb.update(preds=torch.Tensor([d.pb]), target=torch.LongTensor([[d.yb]]))
self.cm = cm.compute()
self.cb = cb.compute()
self.cbm = cbm.compute()
@staticmethod
def _acc(cf):
return float(cf.diag().sum() / cf.sum())
@staticmethod
def _prec(cf: torch.Tensor):
return (cf.diag() / cf.sum(dim=1).clamp_min_(1e-5)).tolist()
@staticmethod
def _recall(cf: torch.Tensor):
return (cf.diag() / cf.sum(dim=0).clamp_min_(1e-5)).tolist()
@property
def pb_acc(self):
return self._acc(self.cb)
@property
def pm_acc(self):
return self._acc(self.cm)
@property
def pb_precision(self):
return self._prec(self.cb)
@property
def pb_recall(self):
return self._recall(self.cb)
@property
def pm_precision(self):
return self._prec(self.cm)
@property
def pm_recall(self):
return self._recall(self.cm)
@property
def m_birad(self):
return self.cbm[1].int().tolist()
@property
def b_birad(self):
return self.cbm[0].int().tolist()
_lbm()
| 24.220183
| 117
| 0.591667
| 360
| 2,640
| 4.236111
| 0.288889
| 0.057705
| 0.073443
| 0.043279
| 0.325902
| 0.24
| 0.103607
| 0.103607
| 0.103607
| 0
| 0
| 0.008214
| 0.262121
| 2,640
| 108
| 118
| 24.444444
| 0.774641
| 0
| 0
| 0.148148
| 0
| 0.012346
| 0.077652
| 0.026894
| 0
| 0
| 0
| 0
| 0.012346
| 1
| 0.197531
| false
| 0
| 0.061728
| 0.148148
| 0.493827
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
5fcddc4097a230efd88262807f43401aaaeff2ab
| 257
|
py
|
Python
|
p5.py
|
kmark1625/Project-Euler
|
e80c4f2044fdbff93331117b8f02aa0becbb0706
|
[
"MIT"
] | null | null | null |
p5.py
|
kmark1625/Project-Euler
|
e80c4f2044fdbff93331117b8f02aa0becbb0706
|
[
"MIT"
] | null | null | null |
p5.py
|
kmark1625/Project-Euler
|
e80c4f2044fdbff93331117b8f02aa0becbb0706
|
[
"MIT"
] | null | null | null |
from fractions import gcd
def smallestDiv():
"""Finds smallest number that is evenly divisible from 1 through 20"""
return reduce(lambda x,y: lcm(x,y), range(1,21))
def lcm(a,b):
return (a*b) / gcd(a,b)
if __name__ == '__main__':
print smallestDiv()
| 21.416667
| 71
| 0.692607
| 43
| 257
| 3.953488
| 0.697674
| 0.035294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.159533
| 257
| 11
| 72
| 23.363636
| 0.759259
| 0
| 0
| 0
| 0
| 0
| 0.042781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.142857
| null | null | 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39563b416a76edc246cc669718217ec4a6dc8d69
| 199
|
py
|
Python
|
tools/stress_test.py
|
chouette254/quo
|
8979afd118e77d3d0f93f9fbe8711efada7158c5
|
[
"MIT"
] | 5
|
2021-06-17T21:06:39.000Z
|
2022-03-11T06:45:51.000Z
|
tools/stress_test.py
|
chouette254/quo
|
8979afd118e77d3d0f93f9fbe8711efada7158c5
|
[
"MIT"
] | 39
|
2021-07-19T19:36:18.000Z
|
2022-02-23T14:55:08.000Z
|
tools/stress_test.py
|
secretuminc/quo
|
c4f77d52f015c612d32ed0fc2fc79545af598f10
|
[
"MIT"
] | 1
|
2021-05-31T17:19:15.000Z
|
2021-05-31T17:19:15.000Z
|
from quo import Console
from quo.pretty import Pretty
from quo.panel import Panel
DATA = "My name is Quo"
console = Console()
for w in range(130):
console.echo(Panel(Pretty(DATA), width=w))
| 15.307692
| 46
| 0.718593
| 33
| 199
| 4.333333
| 0.515152
| 0.146853
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018405
| 0.180905
| 199
| 12
| 47
| 16.583333
| 0.858896
| 0
| 0
| 0
| 0
| 0
| 0.070352
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.428571
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
395bc11ce97e1bb26dff3ffa2dd8e88c133704f6
| 2,403
|
py
|
Python
|
ietf/ipr/migrations/0007_create_ipr_doc_events.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 25
|
2022-03-05T08:26:52.000Z
|
2022-03-30T15:45:42.000Z
|
ietf/ipr/migrations/0007_create_ipr_doc_events.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 219
|
2022-03-04T17:29:12.000Z
|
2022-03-31T21:16:14.000Z
|
ietf/ipr/migrations/0007_create_ipr_doc_events.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 22
|
2022-03-04T15:34:34.000Z
|
2022-03-28T13:30:59.000Z
|
# Copyright The IETF Trust 2020, All Rights Reserved
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-17 12:32
from django.db import migrations
def create_or_delete_ipr_doc_events(apps, delete=False):
"""Create or delete DocEvents for IprEvents
Mostly duplicates IprEvent.create_doc_events(). This is necessary
because model methods, including custom save() methods, are not
available to migrations.
"""
IprEvent = apps.get_model('ipr', 'IprEvent')
DocEvent = apps.get_model('doc', 'DocEvent')
# Map from self.type_id to DocEvent.EVENT_TYPES for types that
# should be logged as DocEvents
event_type_map = {
'posted': 'posted_related_ipr',
'removed': 'removed_related_ipr',
}
for ipr_event in IprEvent.objects.filter(type_id__in=event_type_map):
related_docs = set() # related docs, no duplicates
for alias in ipr_event.disclosure.docs.all():
related_docs.update(alias.docs.all())
for doc in related_docs:
kwargs = dict(
type=event_type_map[ipr_event.type_id],
time=ipr_event.time,
by=ipr_event.by,
doc=doc,
rev='',
desc='%s related IPR disclosure: <b>%s</b>' % (ipr_event.type.name,
ipr_event.disclosure.title),
)
events = DocEvent.objects.filter(**kwargs) # get existing events
if delete:
events.delete()
elif len(events) == 0:
DocEvent.objects.create(**kwargs) # create if did not exist
def forward(apps, schema_editor):
"""Create a DocEvent for each 'posted' or 'removed' IprEvent"""
create_or_delete_ipr_doc_events(apps, delete=False)
def reverse(apps, schema_editor):
"""Delete DocEvents that would be created by the forward migration
This removes data, but only data that can be regenerated by running
the forward migration.
"""
create_or_delete_ipr_doc_events(apps, delete=True)
class Migration(migrations.Migration):
dependencies = [
('ipr', '0006_document_primary_key_cleanup'),
# Ensure the DocEvent types we need exist
('doc', '0029_add_ipr_event_types'),
]
operations = [
migrations.RunPython(forward, reverse),
]
| 34.826087
| 91
| 0.62422
| 297
| 2,403
| 4.86532
| 0.424242
| 0.044291
| 0.038754
| 0.035294
| 0.081661
| 0.081661
| 0.081661
| 0.081661
| 0.056747
| 0
| 0
| 0.017919
| 0.280067
| 2,403
| 68
| 92
| 35.338235
| 0.817341
| 0.306284
| 0
| 0
| 1
| 0
| 0.106542
| 0.035514
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.025641
| 0
| 0.179487
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
395f4cf60fb9e63158d7823964bdae4a063e3899
| 665
|
py
|
Python
|
zk_shell/tests/test_acl_reader.py
|
sellers/zk_shell
|
5f5972c4362212f97de91a75e44d2a551c7bcd51
|
[
"Apache-2.0"
] | 163
|
2015-01-24T06:17:34.000Z
|
2021-12-17T22:58:46.000Z
|
zk_shell/tests/test_acl_reader.py
|
sellers/zk_shell
|
5f5972c4362212f97de91a75e44d2a551c7bcd51
|
[
"Apache-2.0"
] | 86
|
2015-01-01T00:22:57.000Z
|
2022-03-02T14:50:59.000Z
|
zk_shell/tests/test_acl_reader.py
|
sellers/zk_shell
|
5f5972c4362212f97de91a75e44d2a551c7bcd51
|
[
"Apache-2.0"
] | 32
|
2015-02-18T17:33:16.000Z
|
2021-12-28T03:43:45.000Z
|
# -*- coding: utf-8 -*-
""" ACLReader test cases """
import unittest
from kazoo.security import ACL, Id
from zk_shell.acl import ACLReader
class ACLReaderTestCase(unittest.TestCase):
""" test watcher """
def test_extract_acl(self):
acl = ACLReader.extract_acl('world:anyone:cdrwa')
expected = ACL(perms=31, id=Id(scheme='world', id='anyone'))
self.assertEqual(expected, acl)
def test_username_password(self):
acl = ACLReader.extract_acl('username_password:user:secret:cdrwa')
expected = ACL(perms=31, id=Id(scheme='digest', id=u'user:5w9W4eL3797Y4Wq8AcKUPPk8ha4='))
self.assertEqual(expected, acl)
| 28.913043
| 97
| 0.685714
| 82
| 665
| 5.463415
| 0.45122
| 0.098214
| 0.071429
| 0.102679
| 0.263393
| 0.147321
| 0.147321
| 0.147321
| 0
| 0
| 0
| 0.02925
| 0.177444
| 665
| 22
| 98
| 30.227273
| 0.789762
| 0.087218
| 0
| 0.166667
| 0
| 0
| 0.173401
| 0.114478
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| false
| 0.166667
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
396297e39e5a9bcc3e2b8459e2edf7a1785fe3e7
| 1,575
|
py
|
Python
|
models/networks/recurrent/encoder.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
models/networks/recurrent/encoder.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
models/networks/recurrent/encoder.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import torch
class EncoderRNN(nn.Module):
def __init__(self, vocab_size, hidden_size, nlayers=2):
super(EncoderRNN, self).__init__()
self.nlayers = nlayers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(vocab_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, num_layers=nlayers)
def forward(self, input, hidden):
input = self.embedding(input)
output, hidden = self.gru(input, hidden)
return output, hidden
def initHidden(self, bsz):
#weight = next(self.parameters())
#return weight.new_zeros(self.nlayers, bsz, self.hidden_size)
#return Variable(torch.randn(self.nlayers, bsz, self.hidden_size, device='cuda'), requires_grad=True)
return torch.zeros(self.nlayers, bsz, self.hidden_size, device='cuda')
"""
# use this one when not doing multi-task learning as a baseline
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, nlayers=2):
super(EncoderRNN, self).__init__()
self.nlayers = nlayers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, nlayers)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self, bsz):
return torch.zeros(self.nlayers, bsz, self.hidden_size, device='gpu')
"""
| 35
| 109
| 0.670476
| 206
| 1,575
| 4.936893
| 0.257282
| 0.157325
| 0.110128
| 0.078663
| 0.697148
| 0.697148
| 0.634218
| 0.534907
| 0.40708
| 0.40708
| 0
| 0.004052
| 0.216508
| 1,575
| 44
| 110
| 35.795455
| 0.820097
| 0.121905
| 0
| 0
| 0
| 0
| 0.005952
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.133333
| 0.066667
| 0.533333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
39637ce1898c8dbfd20a89d25579fc15ae6c2bcd
| 432
|
py
|
Python
|
events_calendar/urls.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | null | null | null |
events_calendar/urls.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | 8
|
2021-04-08T21:57:55.000Z
|
2022-03-12T00:50:38.000Z
|
events_calendar/urls.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import events_calendar, calendar_event_detail, past_competitions
app_name = 'events_calendar'
urlpatterns = [
path('past_competitions/', past_competitions, name='past_competitions'),
path('<int:year>/<int:month>/<int:day>/<int:hour>/<slug:event>/',
calendar_event_detail, name='calendar_event_detail'),
path('<int:days>', events_calendar, name='events_calendar'),
]
| 30.857143
| 76
| 0.733796
| 54
| 432
| 5.592593
| 0.407407
| 0.18543
| 0.188742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118056
| 432
| 13
| 77
| 33.230769
| 0.792651
| 0
| 0
| 0
| 0
| 0.111111
| 0.354167
| 0.180556
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
3965e8f70ee4cbba8c4a1ffa659f82e9962bbdcf
| 619
|
py
|
Python
|
migrations/versions/6f98e24760d_session_speaker.py
|
jace/goafunnel
|
5ff25f0e6a247ff1f6e87fce2a793d1775476cc0
|
[
"BSD-2-Clause"
] | null | null | null |
migrations/versions/6f98e24760d_session_speaker.py
|
jace/goafunnel
|
5ff25f0e6a247ff1f6e87fce2a793d1775476cc0
|
[
"BSD-2-Clause"
] | null | null | null |
migrations/versions/6f98e24760d_session_speaker.py
|
jace/goafunnel
|
5ff25f0e6a247ff1f6e87fce2a793d1775476cc0
|
[
"BSD-2-Clause"
] | null | null | null |
"""session speaker
Revision ID: 6f98e24760d
Revises: 58588eba8cb8
Create Date: 2013-11-22 17:28:47.751025
"""
# revision identifiers, used by Alembic.
revision = '6f98e24760d'
down_revision = '58588eba8cb8'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('session', sa.Column('speaker', sa.Unicode(length=200), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('session', 'speaker')
### end Alembic commands ###
| 22.925926
| 89
| 0.6979
| 76
| 619
| 5.644737
| 0.578947
| 0.062937
| 0.097902
| 0.107226
| 0.205128
| 0.205128
| 0.205128
| 0.205128
| 0
| 0
| 0
| 0.102913
| 0.168013
| 619
| 26
| 90
| 23.807692
| 0.730097
| 0.471729
| 0
| 0
| 0
| 0
| 0.173469
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
396b128eaea90d279b0b41fb297fa2fa82ed6d87
| 1,930
|
py
|
Python
|
nanome/api/user/presenter_info.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 3
|
2020-07-02T13:08:27.000Z
|
2021-11-24T14:32:53.000Z
|
nanome/api/user/presenter_info.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 11
|
2020-09-14T17:01:47.000Z
|
2022-02-18T04:00:52.000Z
|
nanome/api/user/presenter_info.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 5
|
2020-08-12T16:30:03.000Z
|
2021-12-06T18:04:23.000Z
|
class PresenterInfo():
"""
| Class to fetch information about the current nanome session's presenter.
"""
def __init__(self):
self._account_id = ""
self._account_name = ""
self._account_email = ""
self._has_org = False
self._org_id = 0
self._org_name = ""
@property
def account_id(self):
"""
| The Nanome account ID of the presenter
:type: :class:`str`
"""
return self._account_id
@account_id.setter
def account_id(self, value):
self._account_id = value
@property
def account_name(self):
"""
| The Nanome account name of the presenter
:type: :class:`str`
"""
return self._account_name
@account_name.setter
def account_name(self, value):
self._account_name = value
@property
def account_email(self):
"""
| The Nanome account email of the presenter
:type: :class:`str`
"""
return self._account_email
@account_email.setter
def account_email(self, value):
self._account_email = value
@property
def has_org(self):
"""
| If the presenter belongs to an organization
:type: :class:`bool`
"""
return self._has_org
@has_org.setter
def has_org(self, value):
self._has_org = value
@property
def org_id(self):
"""
| The ID of the organization the presenter belongs to
:type: :class:`int`
"""
return self._org_id
@org_id.setter
def org_id(self, value):
self._org_id = value
@property
def org_name(self):
"""
| The name of the organization the presenter belongs to
:type: :class:`str`
"""
return self._org_name
@org_name.setter
def org_name(self, value):
self._org_name = value
| 21.208791
| 78
| 0.564249
| 226
| 1,930
| 4.561947
| 0.159292
| 0.096023
| 0.075655
| 0.069835
| 0.228904
| 0.216295
| 0.216295
| 0.216295
| 0.216295
| 0
| 0
| 0.000781
| 0.336788
| 1,930
| 90
| 79
| 21.444444
| 0.804688
| 0.251295
| 0
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.295455
| false
| 0
| 0
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
396d4f672042b6ba26b0ebbbfccf8610a433735a
| 2,976
|
py
|
Python
|
scripts/staging/sklearn/mappers/supervised.py
|
mgd-hin/systemds
|
08944a7305cbc4f4d9cbbd4565efa8bcc93b82e3
|
[
"Apache-2.0"
] | 372
|
2017-06-09T01:02:53.000Z
|
2020-06-24T05:45:00.000Z
|
scripts/staging/sklearn/mappers/supervised.py
|
ywcb00/systemds
|
5cc523971854cdf4f22e6199987a86e213fae4e2
|
[
"Apache-2.0"
] | 418
|
2017-06-08T16:27:44.000Z
|
2020-06-25T12:15:54.000Z
|
scripts/staging/sklearn/mappers/supervised.py
|
ywcb00/systemds
|
5cc523971854cdf4f22e6199987a86e213fae4e2
|
[
"Apache-2.0"
] | 190
|
2017-06-08T19:32:54.000Z
|
2020-06-15T12:26:12.000Z
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
from .mapper import Mapper
class LinearSVMMapper(Mapper):
name = 'l2svm'
sklearn_name = 'linearsvc'
is_supervised = True
mapped_output = [
'model'
]
def map_params(self):
self.mapped_params = [
'TRUE' if self.params.get('fit_intercept', False) else 'FALSE',
self.params.get('tol', 0.001),
self.params.get('C', 1.0),
self.params.get('max_iter', 100),
20, # maxii parameter is unkown in sklearn and not documented in dml
'TRUE' if self.params.get('verbose', False) else 'FALSE',
-1 # column_id is unkown in sklearn
]
class TweedieRegressorMapper(Mapper):
name = 'glm'
sklearn_name = 'tweedieregressor'
is_supervised = True
mapped_output = [
'beta'
]
def map_params(self):
# TODO: many parameters cannot be mapped directly:
# how to handle defaults for dml?
self.mapped_params = [
1, # sklearn impl supports power only, dfam
self.params.get('power', 0.0), # vpow
0, # link
1.0, # lpow
0.0, # yneg
# sklearn does not know last case
0 if self.params.get('fit_intercept', 1) else 1, # icpt
0.0, # disp
0.0, # reg
self.params.get('tol', 0.000001), # tol
200, # moi
0, # mii,
'TRUE' if self.params.get('verbose', False) else 'FALSE'
]
class LogisticRegressionMapper(Mapper):
name = 'multiLogReg'
sklearn_name = 'logisticregression'
is_supervised = True
mapped_output = [
'beta'
]
def map_params(self):
self.mapped_params = [
# sklearn does not know last case
0 if self.params.get('fit_intercept', 1) else 1,
self.params.get('tol', 0.000001), # tol
self.params.get('C', 0.0), # reg
100, # maxi
0, # maxii
'TRUE' if self.params.get('verbose', False) else 'FALSE'
]
| 33.438202
| 80
| 0.576277
| 364
| 2,976
| 4.656593
| 0.409341
| 0.076696
| 0.099705
| 0.053097
| 0.302065
| 0.273156
| 0.257227
| 0.19823
| 0.19823
| 0.127434
| 0
| 0.027856
| 0.288306
| 2,976
| 88
| 81
| 33.818182
| 0.772427
| 0.409946
| 0
| 0.464286
| 0
| 0
| 0.113769
| 0
| 0
| 0
| 0
| 0.011364
| 0
| 1
| 0.053571
| false
| 0
| 0.017857
| 0
| 0.339286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
397c6d5c141c7b6d17cf9a8f120d47ea7101ea9f
| 587
|
py
|
Python
|
tasks/migrations/0002_auto_20201008_2236.py
|
milenakowalska/todolist
|
5b5208b952e88334453935652424f8168ecf9113
|
[
"MIT"
] | null | null | null |
tasks/migrations/0002_auto_20201008_2236.py
|
milenakowalska/todolist
|
5b5208b952e88334453935652424f8168ecf9113
|
[
"MIT"
] | null | null | null |
tasks/migrations/0002_auto_20201008_2236.py
|
milenakowalska/todolist
|
5b5208b952e88334453935652424f8168ecf9113
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-10-08 22:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='background',
field=models.CharField(default='linear-gradient(to top, #48c6ef 0%, #6f86d6 100%)', max_length=300),
),
migrations.AlterField(
model_name='task',
name='done',
field=models.BooleanField(default=False),
),
]
| 24.458333
| 112
| 0.575809
| 61
| 587
| 5.47541
| 0.770492
| 0.053892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079903
| 0.296422
| 587
| 23
| 113
| 25.521739
| 0.728814
| 0.076661
| 0
| 0.117647
| 1
| 0
| 0.162963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
3980310409feb9f0ac71dbf46448b126022d5366
| 1,258
|
py
|
Python
|
support.py
|
ipascual1/spootnik_bot
|
ad7658f49705b1ce57bcc5ed84006ef658f63fa3
|
[
"Unlicense"
] | null | null | null |
support.py
|
ipascual1/spootnik_bot
|
ad7658f49705b1ce57bcc5ed84006ef658f63fa3
|
[
"Unlicense"
] | null | null | null |
support.py
|
ipascual1/spootnik_bot
|
ad7658f49705b1ce57bcc5ed84006ef658f63fa3
|
[
"Unlicense"
] | null | null | null |
import re
import os
def extract(regularE : str, init : str, stop : str, string : str):
"""
regularE: RE to catch string
init: First string to replace
stop: Last string to replace
string: String to apply the RE
With a regular expression and init and stop to replace, gets a
substring from string argument and returns it.
"""
return re.findall(regularE, string)[0]\
.replace(init, "")\
.replace(stop, "")
def get_term_clock_pid():
"""
return: int with the PID of term_clock;
-1 if process doesn't exist.
Extracts the PID of term_clock process with systemctl.
"""
# sputnikDriver prints in their own console all the PIDs of its subprocesses
ret = os.popen("systemctl status sputnikDriver.service").read()
if ret == "":
return -1
return int(extract(r"term_clock .+ PID", "term_clock ", " PID", ret))
def check_alive():
"""
return: True if java process is running;
False otherwise
Check if a java process in sputnikDriver (i.e. the Minecraft Server) is running
"""
ret = os.popen("systemctl status sputnikDriver.service").read()
return "java" in ret
| 29.255814
| 83
| 0.612878
| 165
| 1,258
| 4.624242
| 0.442424
| 0.058978
| 0.047182
| 0.031455
| 0.173001
| 0.12844
| 0.12844
| 0.12844
| 0
| 0
| 0
| 0.003382
| 0.294913
| 1,258
| 42
| 84
| 29.952381
| 0.856821
| 0.500795
| 0
| 0.142857
| 0
| 0
| 0.208178
| 0.078067
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.142857
| 0
| 0.642857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39806196aae9564f8e399df05393bb7226dec4f7
| 1,054
|
py
|
Python
|
steam.py
|
iganeshk/alfred-totp
|
f9c17fe83025c99cbfaf5413d20212aa63d7e0d5
|
[
"MIT"
] | 7
|
2020-04-12T21:16:41.000Z
|
2022-01-09T08:55:22.000Z
|
steam.py
|
iganeshk/alfred-totp
|
f9c17fe83025c99cbfaf5413d20212aa63d7e0d5
|
[
"MIT"
] | null | null | null |
steam.py
|
iganeshk/alfred-totp
|
f9c17fe83025c99cbfaf5413d20212aa63d7e0d5
|
[
"MIT"
] | 1
|
2022-03-26T16:04:53.000Z
|
2022-03-26T16:04:53.000Z
|
#!/usr/env/python3
# coding=utf-8
#
# Generate Steamguard OTP with the shared secret passed as an argument
# Ganesh Velu
import hmac
import base64
import hashlib
import codecs
import time
import sys
STEAM_DECODE_CHARS = ['2', '3', '4', '5', '6', '7', '8', '9',
'B', 'C', 'D', 'F', 'G', 'H', 'J', 'K',
'M', 'N', 'P', 'Q', 'R', 'T', 'V', 'W',
'X', 'Y']
def get_authentication_code(secret):
msg = bytes.fromhex(('%016x' % int(time.time() // 30)))
key = base64.b64decode(secret)
auth = hmac.new(key, msg, hashlib.sha1)
digest = auth.digest()
start = digest[19] & 0xF
code = digest[start:start + 4]
auth_code_raw = int(codecs.encode(code, 'hex'), 16) & 0x7FFFFFFF
auth_code = []
for i in range(5):
auth_code.append(STEAM_DECODE_CHARS[int(auth_code_raw % len(STEAM_DECODE_CHARS))])
auth_code_raw /= len(STEAM_DECODE_CHARS)
return ''.join(auth_code)
if __name__ == '__main__':
print(get_authentication_code(sys.argv[1]), end='')
| 29.277778
| 90
| 0.586338
| 149
| 1,054
| 3.95302
| 0.624161
| 0.081494
| 0.108659
| 0.047538
| 0.101868
| 0.101868
| 0.101868
| 0
| 0
| 0
| 0
| 0.039752
| 0.236243
| 1,054
| 36
| 91
| 29.277778
| 0.691925
| 0.105313
| 0
| 0
| 1
| 0
| 0.044776
| 0
| 0
| 0
| 0.013859
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.24
| 0
| 0.32
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39812282916a91f854eceaec095dab9dd29955a6
| 1,783
|
py
|
Python
|
igvc_ws/src/igvc_nav/src/path_planner/node.py
|
SoonerRobotics/igvc_software_2022
|
906e6a4fca22d2b0c06ef1b8a4a3a9df7f1d17dd
|
[
"MIT"
] | 4
|
2020-07-07T14:56:56.000Z
|
2021-08-13T23:31:07.000Z
|
igvc_ws/src/igvc_nav/src/path_planner/node.py
|
pradumn203/igvc-winners-2021
|
658233609054eafac59603a77b2a092dc002e145
|
[
"MIT"
] | 13
|
2019-11-12T02:57:54.000Z
|
2020-03-17T17:04:22.000Z
|
igvc_ws/src/igvc_nav/src/path_planner/node.py
|
pradumn203/igvc-winners-2021
|
658233609054eafac59603a77b2a092dc002e145
|
[
"MIT"
] | 3
|
2021-06-29T05:21:18.000Z
|
2021-08-23T05:03:27.000Z
|
"""
"""
class Node:
INFINITY = 1000000000
def __init__(self, row, col, cost = 0):
self.row = row
self.col = col
# Default node value
self.G = self.INFINITY
self.rhs = self.INFINITY
self.par = None
self.key = (None, None)
# Cost (for obstacles and such)
self.cost = cost
def set_g(self, G):
if G > self.INFINITY:
G = self.INFINITY
self.G = G
def set_rhs(self, rhs):
if rhs > self.INFINITY:
rhs = self.INFINITY
self.rhs = rhs
def set_par(self, par):
self.par = par
def set_key(self, key):
self.key = key
def set_cost(self, cost):
self.cost = cost
def __cmp__(self, other):
""" Sort keys with lowest priority to the top of the list"""
# Sort by the first key
comp_val = cmp(self.key[0], other.key[0])
if comp_val != 0:
return comp_val
# If there was a tie, use the second key as a tiebreaker
return cmp(self.key[1], other.key[1])
def __lt__(self, other):
comp_val = (self.key[0] < other.key[0])
if comp_val is True:
return True
elif self.key[0] == other.key[0]:
return self.key[1] < other.key[1]
return False
def __gt__(self, other):
comp_val = (self.key[0] > other.key[0])
if comp_val is True:
return True
elif self.key[0] == other.key[0]:
return self.key[1] > other.key[1]
return False
def __eq__(self, other):
if other == None:
return False
return (self.row == other.row) and (self.col == other.col)
def __hash__(self):
return hash((self.row, self.col))
| 22.858974
| 68
| 0.528884
| 251
| 1,783
| 3.613546
| 0.231076
| 0.084895
| 0.044101
| 0.071665
| 0.303197
| 0.303197
| 0.284454
| 0.284454
| 0.284454
| 0.255788
| 0
| 0.024412
| 0.356702
| 1,783
| 77
| 69
| 23.155844
| 0.766347
| 0.100953
| 0
| 0.22449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.22449
| false
| 0
| 0
| 0.020408
| 0.489796
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
3982edd57b175c1d224315f35831e37d04e0c726
| 1,408
|
py
|
Python
|
tools/generatekeypair.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 2,027
|
2019-11-12T18:05:48.000Z
|
2022-03-31T22:25:04.000Z
|
tools/generatekeypair.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 496
|
2019-11-12T18:13:37.000Z
|
2022-03-31T10:43:45.000Z
|
tools/generatekeypair.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 249
|
2019-11-12T18:02:27.000Z
|
2022-03-22T12:19:19.000Z
|
import argparse
import json
from authlib.jose import JsonWebKey
from cryptography.hazmat.primitives import serialization
def generate_key_pair(filename, kid=None):
"""
'kid' will default to the jwk thumbprint if not set explicitly.
Reference: https://tools.ietf.org/html/rfc7638
"""
options = {}
if kid:
options["kid"] = kid
jwk = JsonWebKey.generate_key("RSA", 2048, is_private=True, options=options)
print(("Writing public key to %s.jwk" % filename))
with open("%s.jwk" % filename, mode="w") as f:
f.truncate(0)
f.write(jwk.as_json())
print(("Writing key ID to %s.kid" % filename))
with open("%s.kid" % filename, mode="w") as f:
f.truncate(0)
f.write(jwk.as_dict()["kid"])
print(("Writing private key to %s.pem" % filename))
with open("%s.pem" % filename, mode="wb") as f:
f.truncate(0)
f.write(
jwk.get_private_key().private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
)
parser = argparse.ArgumentParser(description="Generates a key pair into files")
parser.add_argument("filename", help="The filename prefix for the generated key files")
args = parser.parse_args()
generate_key_pair(args.filename)
| 30.608696
| 87
| 0.648438
| 180
| 1,408
| 4.994444
| 0.455556
| 0.036707
| 0.053393
| 0.05673
| 0.106785
| 0.106785
| 0.106785
| 0.106785
| 0.082314
| 0.082314
| 0
| 0.010082
| 0.225142
| 1,408
| 45
| 88
| 31.288889
| 0.813932
| 0.078835
| 0
| 0.096774
| 1
| 0
| 0.155051
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.129032
| 0
| 0.16129
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
398533491570a42901637e1afb785d157af6a86a
| 809
|
py
|
Python
|
accounts/forms.py
|
mohsenamoon1160417237/Social_app
|
79fa0871f7b83648894941f9010f1d99f1b27ab3
|
[
"MIT"
] | null | null | null |
accounts/forms.py
|
mohsenamoon1160417237/Social_app
|
79fa0871f7b83648894941f9010f1d99f1b27ab3
|
[
"MIT"
] | null | null | null |
accounts/forms.py
|
mohsenamoon1160417237/Social_app
|
79fa0871f7b83648894941f9010f1d99f1b27ab3
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django import forms
from .models import UserProfile
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(max_length=20 , widget=forms.PasswordInput , label='Password')
password2 = forms.CharField(max_length=20 , widget=forms.PasswordInput , label="Repeat password")
class Meta:
model = User
fields = ['username' , 'email']
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError("Passwords must match")
return cd['password2']
class UserEditProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['image' , 'age']
class UserEditForm(forms.ModelForm):
class Meta:
model = User
fields = ['first_name' , 'last_name']
| 17.212766
| 98
| 0.721879
| 95
| 809
| 6.084211
| 0.494737
| 0.072664
| 0.072664
| 0.079585
| 0.342561
| 0.186851
| 0.186851
| 0.186851
| 0.186851
| 0
| 0
| 0.011782
| 0.160692
| 809
| 47
| 99
| 17.212766
| 0.83947
| 0
| 0
| 0.227273
| 0
| 0
| 0.134568
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0.272727
| 0.136364
| 0
| 0.590909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
3986c0e0bd792870f8eee7d99d0e2fa5761fa22e
| 1,429
|
py
|
Python
|
blueprints/accounts/manage/config.py
|
GetmeUK/h51
|
17d4003336857514765a42a0853995fbe3da6525
|
[
"MIT"
] | null | null | null |
blueprints/accounts/manage/config.py
|
GetmeUK/h51
|
17d4003336857514765a42a0853995fbe3da6525
|
[
"MIT"
] | 4
|
2021-06-08T22:58:13.000Z
|
2022-03-12T00:53:18.000Z
|
blueprints/accounts/manage/config.py
|
GetmeUK/h51
|
17d4003336857514765a42a0853995fbe3da6525
|
[
"MIT"
] | null | null | null |
from manhattan.manage import config
from manhattan.nav import Nav, NavItem
from blueprints.accounts.manage import blueprint
from blueprints.accounts.models import Account
__all__ = ['AccountConfig']
class AccountConfig(config.ManageConfig):
frame_cls = Account
blueprint = blueprint
@classmethod
def tabs(cls, view_type, document=None):
tabs = Nav.local_menu()
if view_type in ['api_log', 'change_log', 'activity', 'view']:
tabs.add(
NavItem(
'Details',
endpoint=AccountConfig.get_endpoint('view'),
view_args={'account': document._id}
)
)
tabs.add(
NavItem(
'Activity',
endpoint=AccountConfig.get_endpoint('activity'),
view_args={'account': document._id}
)
)
tabs.add(
NavItem(
'API log',
endpoint=AccountConfig.get_endpoint('api_log'),
view_args={'account': document._id}
)
)
tabs.add(
NavItem(
'Change log',
endpoint=AccountConfig.get_endpoint('change_log'),
view_args={'account': document._id}
)
)
return tabs
| 26.462963
| 70
| 0.491952
| 118
| 1,429
| 5.754237
| 0.338983
| 0.041237
| 0.082474
| 0.188513
| 0.32106
| 0.217968
| 0.172312
| 0.172312
| 0
| 0
| 0
| 0
| 0.416375
| 1,429
| 53
| 71
| 26.962264
| 0.814149
| 0
| 0
| 0.292683
| 0
| 0
| 0.091673
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.097561
| 0
| 0.219512
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
398adc2cec18c8f88eebd57e5b5cd30a4eaccd31
| 5,280
|
py
|
Python
|
basket/BasketGlobals.py
|
Hartman-/Basket
|
7b9c174b031c9ffac2de886f5e149adcd5f7c83f
|
[
"BSD-3-Clause"
] | 2
|
2017-02-07T11:28:58.000Z
|
2017-12-01T05:41:36.000Z
|
basket/BasketGlobals.py
|
Hartman-/Basket
|
7b9c174b031c9ffac2de886f5e149adcd5f7c83f
|
[
"BSD-3-Clause"
] | 25
|
2016-08-18T01:16:59.000Z
|
2017-02-11T03:57:20.000Z
|
basket/BasketGlobals.py
|
Hartman-/Basket
|
7b9c174b031c9ffac2de886f5e149adcd5f7c83f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import platform
from glob import glob
import utils.appconfig as appconfig
# GLOBAL CONSTANTS
# --- File Structure Constants ---
BASE_DIRS = {
'delivery': [
'CritiqueArchive'
],
'docs': [],
'frames': [],
'library': [
'models',
'templates',
'sound',
'texture'
],
'publish': [],
'source': [
'plates',
'reference'
],
'working': [
'scenes',
'assets'
]}
PROD_DIRS = [
'scenes',
'publish'
]
STAGE_DIRS = appconfig.get_config_value('law', 'stages')
FRAME_DIRS = [
'cg',
'comp',
'edit',
'elements',
'plates'
]
# GLOBAL FUNCTIONS
def curOS():
currentOS = platform.system()
return currentOS
def rootDir():
curDir = os.path.expanduser('~') + '\\Desktop\\LAW_local\\'
# MAYA LOVES TO MAKE MY LIFE DIFFICULT
# THROWING \DOCUMENTS INTO SHIT
if 'Documents' in curDir:
curDir = curDir.replace('/', '\\').replace('\\Documents', '')
return curDir
def serverDir():
# \\awexpress.westphal.drexel.edu\digm_anfx\SRPJ_LAW\ALAW\renderman\HeroShipTurntable_v002_imh29_0\images
# curDir = os.path.expanduser('~') + '\\Desktop\\LAW_s\\'
curDir = '%s' % appconfig.get_config_value('project', 'projdir')
# MAYA LOVES TO MAKE MY LIFE DIFFICULT
# THROWING \DOCUMENTS INTO SHIT
if 'Documents' in curDir:
curDir = curDir.replace('/', '\\').replace('\\Documents', '')
return curDir
def getNukeScripts():
nkFiles = glob(os.path.join(nukeDir(), '*.nk'))
return nkFiles
def nukeDir():
curDir = os.path.join(rootDir(), 'working', 'scenes', os.getenv('SEQ'), os.getenv('SHOT'), '07. Comp')
if not os.path.isdir(curDir):
raise ValueError, '%s NUKE Directory does not exist' % curDir
return curDir
def serverStageDir(stage):
curDir = os.path.join(serverDir(), 'working', 'scenes', os.getenv('SEQ'), os.getenv('SHOT'), STAGE_DIRS[stage])
if not os.path.isdir(curDir):
raise ValueError, 'Stage Directory does not exist'
return curDir
def localFramesDir():
curDir = os.path.join(rootDir(), 'frames', os.getenv('SEQ'), os.getenv('SHOT'), 'plates')
if not os.path.isdir(curDir):
raise ValueError, 'Frames Directory does not exist'
return curDir
def stageDir(stage):
baseDir = os.path.join(serverDir(), 'working', 'scenes', os.getenv('SEQ'), os.getenv('SHOT'))
# Thanks for starting at Zero lists!
curDir = os.path.join(baseDir, STAGE_DIRS[stage])
if not os.path.isdir(curDir):
raise ValueError, 'File Directory does not exist: ' + curDir
return curDir
def publishDir(stage):
baseDir = os.path.join(serverDir(), 'publish', os.getenv('SEQ'), os.getenv('SHOT'))
# Thanks for starting at Zero lists!
curDir = os.path.join(baseDir, STAGE_DIRS[stage])
if not os.path.isdir(curDir):
raise ValueError, 'File Directory does not exist: ' + curDir
return curDir
def seqDir():
curDir = os.path.join(serverDir(), 'Frames', os.getenv('SEQ'), os.getenv('SHOT'), 'plates')
if not os.path.isdir(curDir):
raise ValueError, 'Frames Directory does not exist'
return curDir
def libraryDir(sub):
curDir = os.path.join(serverDir(), 'library', str(sub))
if not os.path.isdir(curDir):
raise ValueError, 'Library Directory does not exist'
return curDir
def framesDir():
curDir = os.path.join(serverDir(), 'Frames')
print curDir
if not os.path.isdir(curDir):
raise ValueError, 'Frames Directory does not exist'
return curDir
# SET SHOW ENV VARIABLE
def setShow(show):
os.environ['SHOW'] = str(show)
# SET SEQ ENV VARIABLE
def setSeq(seq):
os.environ['SEQ'] = str(seq)
# SET SHOT ENV VARIABLE
def setShot(shot):
os.environ['SHOT'] = str(shot)
def setStage(stage):
os.environ['LAWSTAGE'] = str(stage)
def stageNum():
return int(os.getenv('LAWSTAGE'))
def applicationPath(ext):
if type(ext) is not int:
paths = {
'.ma': appconfig.get_config_value('app', 'mayaexe'),
'.mb': appconfig.get_config_value('app', 'mayaexe'),
'.nk': appconfig.get_config_value('app', 'nukeexe'),
'.hip': appconfig.get_config_value('app', 'houdiniexe'),
'.hipnc': appconfig.get_config_value('app', 'houdiniexe'),
'.hiplc': appconfig.get_config_value('app', 'houdiniexe')
}
return paths[ext]
else:
paths = {
0: appconfig.get_config_value('app', 'mayaexe'),
1: appconfig.get_config_value('app', 'mayaexe'),
2: appconfig.get_config_value('app', 'mayaexe'),
3: appconfig.get_config_value('app', 'houdiniexe'),
4: appconfig.get_config_value('app', 'mayaexe'),
5: appconfig.get_config_value('app', 'mayaexe'),
6: appconfig.get_config_value('app', 'nukeexe'),
7: appconfig.get_config_value('app', 'premiereexe')
}
return paths[ext]
if __name__ == '__main__':
print serverDir()
| 27.076923
| 115
| 0.595455
| 606
| 5,280
| 5.09901
| 0.264026
| 0.040777
| 0.093204
| 0.119094
| 0.635275
| 0.601942
| 0.406472
| 0.383172
| 0.333981
| 0.333981
| 0
| 0.004067
| 0.254924
| 5,280
| 194
| 116
| 27.216495
| 0.781393
| 0.097727
| 0
| 0.290076
| 0
| 0
| 0.178353
| 0.004633
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.030534
| null | null | 0.015267
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39a0dad5efbaf0ea7f66987d69ed3575a2e7b7d0
| 1,068
|
py
|
Python
|
python/easy/1342_Number_of_Steps_to_Reduce_a_Number_to_Zero.py
|
JackWang0107/leetcode
|
c02932190b639ef87a8d0fcd07d9cd6ec7344a67
|
[
"MIT"
] | 1
|
2021-05-22T03:27:33.000Z
|
2021-05-22T03:27:33.000Z
|
python/easy/1342_Number_of_Steps_to_Reduce_a_Number_to_Zero.py
|
JackWang0107/leetcode
|
c02932190b639ef87a8d0fcd07d9cd6ec7344a67
|
[
"MIT"
] | null | null | null |
python/easy/1342_Number_of_Steps_to_Reduce_a_Number_to_Zero.py
|
JackWang0107/leetcode
|
c02932190b639ef87a8d0fcd07d9cd6ec7344a67
|
[
"MIT"
] | null | null | null |
from typing import *
class Solution:
# 32 ms, faster than 53.97% of Python3 online submissions for Number of Steps to Reduce a Number to Zero.
# 14.2 MB, less than 35.20% of Python3 online submissions for Number of Steps to Reduce a Number to Zero.
def numberOfSteps(self, num: int) -> int:
ans = 0
while num != 0:
if num & 1:
num -= 1
else:
num = int(num/2 )
ans += 1
return ans
# 20 ms, faster than 98.79% of Python3 online submissions for Number of Steps to Reduce a Number to Zero.
# 14.3 MB, less than 35.20% of Python3 online submissions for Number of Steps to Reduce a Number to Zer
def numberOfSteps(self, num: int) -> int:
def rec(num):
if num == 0 or num == 1:
return num
elif num % 2:
return 2 + rec(num // 2)
else:
return 1 + rec(num // 2)
return rec(num)
if __name__ == "__main__":
so = Solution()
print(so.numberOfSteps(123))
| 34.451613
| 110
| 0.553371
| 155
| 1,068
| 3.76129
| 0.335484
| 0.06175
| 0.102916
| 0.178388
| 0.57976
| 0.57976
| 0.480274
| 0.480274
| 0.480274
| 0.480274
| 0
| 0.067747
| 0.364232
| 1,068
| 31
| 111
| 34.451613
| 0.790869
| 0.38764
| 0
| 0.173913
| 0
| 0
| 0.012308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.043478
| 0
| 0.434783
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39a92e95003cf25b12c9d62aa465b8c0ddd75afb
| 5,510
|
py
|
Python
|
HyperGui.py
|
MIC-Surgery-Heidelberg/HyperGUI_1.0
|
0ee8e0da85049076bb22a542d15d6c3adf6ea106
|
[
"MIT"
] | null | null | null |
HyperGui.py
|
MIC-Surgery-Heidelberg/HyperGUI_1.0
|
0ee8e0da85049076bb22a542d15d6c3adf6ea106
|
[
"MIT"
] | null | null | null |
HyperGui.py
|
MIC-Surgery-Heidelberg/HyperGUI_1.0
|
0ee8e0da85049076bb22a542d15d6c3adf6ea106
|
[
"MIT"
] | null | null | null |
"""
@author: Alexander Studier-Fischer, Jan Odenthal, Berkin Oezdemir, Isabella Camplisson, University of Heidelberg
"""
from HyperGuiModules import *
import logging
import os
#logging.basicConfig(level=logging.DEBUG)
xSize=None
ySize=None
def main():
(window, introduction, input_output, image_diagram, hist_calculation, spec_calculation, bp, circles, rectangles, crop, bs, measure, mda, spec_invest) = init()
listener = ModuleListener()
# introduction
Introduction(introduction)
# histogram calculation
HistCalculation(hist_calculation, listener)
# histogram calculation
SpecCalculation(spec_calculation, listener)
# analysis and form
analysis_and_form_frame = frame(input_output, BACKGROUND, 1, 0, 4, 2)
analysis_and_form_module = AnalysisAndForm(analysis_and_form_frame, listener)
listener.attach_module(ANALYSIS_AND_FORM, analysis_and_form_module)
# source and output
source_and_output_frame = frame(input_output, BACKGROUND, 0, 0, 1, 2)
source_and_output_module = SourceAndOutput(source_and_output_frame, listener)
listener.attach_module(SOURCE_AND_OUTPUT, source_and_output_module)
# save
save_frame = frame(input_output, BACKGROUND, 5, 0, 1, 1)
save_module = Save(save_frame, listener)
listener.attach_module(SAVE, save_module)
# save csvs
csv_frame = frame(input_output, BACKGROUND, 0, 2, 6, 1)
csv_module = CSVSaver(csv_frame, listener)
listener.attach_module(CSV, csv_module)
# info
info_frame = frame(input_output, BACKGROUND, 5, 1, 1, 1)
info_module = Info(info_frame, listener)
listener.attach_module(INFO, info_module)
# parameter specification
#parameter_frame = frame(input_output, BACKGROUND, 0, 3, 2, 1)
#parameter_module = Parameter(parameter_frame, listener)
#listener.attach_module(PARAMETER, parameter_module)
# original colour
og_color_frame = frame(image_diagram, BACKGROUND, 0, 0, 7, 6)
og_color_module = OGColour(og_color_frame, listener)
listener.attach_module(ORIGINAL_COLOUR, og_color_module)
# original colour data
og_color_data_frame = frame(image_diagram, BACKGROUND, 2, 12, 3, 2)
og_color_data_module = OGColourData(og_color_data_frame, listener)
listener.attach_module(ORIGINAL_COLOUR_DATA, og_color_data_module)
# recreated colour
recreated_color_frame = frame(image_diagram, BACKGROUND, 7, 0, 7, 3)
recreated_color_module = RecColour(recreated_color_frame, listener)
listener.attach_module(RECREATED_COLOUR, recreated_color_module)
# recreated colour data
rec_color_data_frame = frame(image_diagram, BACKGROUND, 5, 12, 4, 2)
rec_color_data_module = RecreatedColourData(rec_color_data_frame, listener)
listener.attach_module(RECREATED_COLOUR_DATA, rec_color_data_module)
# new colour
new_color_frame = frame(image_diagram, BACKGROUND, 7, 3, 7, 3)
new_color_module = NewColour(new_color_frame, listener)
listener.attach_module(NEW_COLOUR, new_color_module)
# new colour data
new_color_data_frame = frame(image_diagram, BACKGROUND, 9, 12, 3, 2)
new_color_data_module = NewColourData(new_color_data_frame, listener)
listener.attach_module(NEW_COLOUR_DATA, new_color_data_module)
# diagram
diagram_frame = frame(image_diagram, BACKGROUND, 0, 12, 2, 2)
diagram_module = Diagram(diagram_frame, listener)
listener.attach_module(DIAGRAM, diagram_module)
# histogram
histogram_frame = frame(image_diagram, BACKGROUND, 0, 6, 8, 6)
histogram_module = Histogram(histogram_frame, listener)
listener.attach_module(HISTOGRAM, histogram_module)
# absorption
absorption_spec_frame = frame(image_diagram, BACKGROUND, 8, 6, 6, 6)
absorption_module = AbsorptionSpec(absorption_spec_frame, listener)
listener.attach_module(ABSORPTION_SPEC, absorption_module)
# Batch Processing
BP_frame = frame(bp, BACKGROUND, 0, 0, 16, 16)
BP_module = BP(BP_frame, listener)
listener.attach_module(BP, BP_module)
rectangles_frame = frame(rectangles, BACKGROUND, 0, 0, 16, 16)
rectangles_module = Rectangle(rectangles_frame, listener)
listener.attach_module(rectangles, rectangles_module)
circles_frame = frame(circles, BACKGROUND, 0, 0, 16, 16)
circles_module = Circle(circles_frame, listener)
listener.attach_module(circles, circles_module)
BS_frame = frame(bs, BACKGROUND, 0, 0, 16, 16)
BS_module = BS(BS_frame, listener)
listener.attach_module(BS, BS_module)
measure_frame = frame(measure, BACKGROUND, 0, 0, 16, 16)
measure_module = Measure(measure_frame, listener)
listener.attach_module(MEASURE, measure_module)
crops_frame = frame(crop, BACKGROUND, 0, 0, 16, 16)
crops_module = crops(crops_frame, listener)
listener.attach_module(crop, crops_module)
mda_frame = frame(mda, BACKGROUND, 0, 0, 16, 16)
mda_module = MDA(mda_frame, listener)
listener.attach_module(mda, mda_module)
spec_invest_frame = frame(spec_invest, BACKGROUND, 0, 0, 16, 16)
spec_invest_module = SpecInvest(spec_invest_frame, listener)
listener.attach_module(spec_invest, spec_invest_module)
# colourbar
colour_frame = frame(image_diagram, BACKGROUND, 12, 12, 2, 2)
colour_module = Colour(colour_frame, listener)
if xSize is not None and ySize is not None:
window.geometry(str(xSize) + "x" + str(ySize))
window.mainloop()
if __name__ == '__main__':
main()
| 36.979866
| 162
| 0.741561
| 712
| 5,510
| 5.425562
| 0.15309
| 0.062128
| 0.125032
| 0.160756
| 0.494693
| 0.267668
| 0.179912
| 0
| 0
| 0
| 0
| 0.025849
| 0.171506
| 5,510
| 148
| 163
| 37.22973
| 0.820372
| 0.111797
| 0
| 0
| 0
| 0
| 0.001851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011905
| false
| 0
| 0.035714
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39ab88cab3f3527e44f2aa4992feac019e41f3f0
| 2,120
|
py
|
Python
|
PA2_Optical_Flow.py
|
tianzixie/CAP5415PA2
|
6a7f4b1f178f10b37d588e698eddd013ce193544
|
[
"MIT"
] | null | null | null |
PA2_Optical_Flow.py
|
tianzixie/CAP5415PA2
|
6a7f4b1f178f10b37d588e698eddd013ce193544
|
[
"MIT"
] | null | null | null |
PA2_Optical_Flow.py
|
tianzixie/CAP5415PA2
|
6a7f4b1f178f10b37d588e698eddd013ce193544
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 26 08:19:16 2017
@author: 0
"""
from scipy.misc import imresize
from scipy.signal import convolve,convolve2d
import scipy
from PIL import Image
import cv2
import numpy as np
img = cv2.imread("C://Users/0/Downloads/basketball1.png",0)
img2 = cv2.imread("C://Users/0/Downloads/basketball2.png",0)
#cv2.imshow('img',img)
#cv2.imshow('img2',img2)
k=(3,3)
print img
img = cv2.GaussianBlur(img, k, 1.5)
img2 = cv2.GaussianBlur(img2, k, 1.5)
cv2.imshow('img3',img)
#cv2.waitKey(10000)
cv2.destroyAllWindows()
imga=np.matrix(img)
imga2=np.matrix(img2)
#print imga
#img=Image.fromarray(imga)
#img.show()
height,width = imga.shape
#for x in range img(x,0):
print imga.shape
print height ,width
# print x
#for y in height:
# for x in width:
# print '0'
#for y in range(height):
print imga
#imga[0,1]=imga[0,1]+1
#print imga
def fx(y,x):
fx=(int(imga[y,x+1])-int(imga[y,x]))/1
return fx
def fy(y,x):
fy=(int(imga[y+1,x])-int(imga[y,x]))/1
return fy
print fx(1,0),fy(0,4)
imga=imresize(imga,(240,320))
imga2=imresize(imga2,(240,320))
print imga,imga.shape,imga2,imga2.shape
u=np.zeros([240,320])
v=np.zeros([240,320])
w2=30
w=15
#for i in range(w2):
# for y in range(w2):
#
#
# print matrix
#matrix=np.zeros([w2,w2])
#
#for x in range(w,240-w):
#
# for y in range(w,320-w):
# c=0
## matrix[w,w]=x
# print x,y
#print matrix
#def conv2(x, y, mode='same'):
# return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)
#print convolve2d(imga2,matrix,'valid')
'''
ft = scipy.signal.convolve2d(imga, 0.25 * np.ones((2,2))) + \
scipy.signal.convolve2d(imga2, -0.25 * np.ones((2,2)))
#print ft
fx,fy=np.gradient(cv2.GaussianBlur(img, k, 1.5))
fx = fx[0:478, 0:638]
fy = fy[0:478, 0:638]
ft = ft[0:478, 0:638]
#print fx,fy,ft
'''
'''
for i in range(w+1,480-w):
for j in range(w+1,640-w):
Ix = fx[i-w:i+w, j-w:j+w]
Iy = fy[i-w:i+w, j-w:j+w]
It = ft[i-w:i+w, j-w:j+w]
A = [Ix,Iy]
print fx,fy,ft
'''
#C=A.T*-It
#print C
#print curFx,curFy,curFt,U[0],U[1]
| 20.784314
| 78
| 0.618868
| 416
| 2,120
| 3.153846
| 0.240385
| 0.042683
| 0.01372
| 0.025152
| 0.137195
| 0.129573
| 0.018293
| 0.018293
| 0
| 0
| 0
| 0.098006
| 0.17217
| 2,120
| 101
| 79
| 20.990099
| 0.649573
| 0.315566
| 0
| 0
| 0
| 0
| 0.08764
| 0.083146
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.176471
| null | null | 0.176471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39ab9e369da24d4871a1bbc5c6f073cf0d4fed1f
| 743
|
py
|
Python
|
Test_data/database.py
|
mayowak/SQLite_test
|
a1185650dffe360d033e0691567ec2b2e075cae5
|
[
"MIT"
] | null | null | null |
Test_data/database.py
|
mayowak/SQLite_test
|
a1185650dffe360d033e0691567ec2b2e075cae5
|
[
"MIT"
] | null | null | null |
Test_data/database.py
|
mayowak/SQLite_test
|
a1185650dffe360d033e0691567ec2b2e075cae5
|
[
"MIT"
] | null | null | null |
#!usr/bin/env python3
#import dependecies
import sqlite3
import csv
#connect to test_data
conn = sqlite3.connect('test_data.db')
#create a cursor
c = conn.cursor()
c.execute("DROP TABLE test_data")
#create a test_data table
c.execute("""CREATE TABLE test_data(age integer,
sex text,
bmi real,
children integer,
smoker text,
region text)""")
#get test_data file
get_file = open('test_data.csv')
#read test_data file
read_file = csv.reader(get_file)
c.executemany("INSERT INTO test_data VALUES (?, ?, ?, ?, ?, ?,?)", read_file)
conn.commit()
conn.close()
| 22.515152
| 78
| 0.549125
| 88
| 743
| 4.488636
| 0.465909
| 0.182278
| 0.065823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00616
| 0.344549
| 743
| 33
| 79
| 22.515152
| 0.804928
| 0.18035
| 0
| 0
| 0
| 0
| 0.620232
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39b39323fb50875fc0c540df3d833adc6f094d24
| 2,583
|
py
|
Python
|
definition.example.py
|
JoshData/represent-boundaries
|
0a77bad99758bc77140c6c6def4f8d5e68810367
|
[
"MIT"
] | 2
|
2016-07-05T06:10:21.000Z
|
2016-10-20T17:55:13.000Z
|
definition.example.py
|
JoshData/represent-boundaries
|
0a77bad99758bc77140c6c6def4f8d5e68810367
|
[
"MIT"
] | null | null | null |
definition.example.py
|
JoshData/represent-boundaries
|
0a77bad99758bc77140c6c6def4f8d5e68810367
|
[
"MIT"
] | 2
|
2016-07-05T06:10:25.000Z
|
2020-03-04T02:22:24.000Z
|
from datetime import date
import boundaries
boundaries.register('federal-electoral-districts', # The slug of the boundary set
# The name of the boundary set for display.
name='Federal electoral districts',
# Generic singular name for a boundary from this set. Optional if the
# boundary set's name ends in "s".
singular='Federal electoral district', # If this were omitted, the same value would be generated
# Geographic extents which the boundary set encompasses
domain='Canada',
# Path to the shapefile directory. Relative to the current file, so if this file
# is in the same directory as the shapefile -- usually the case -- you can omit
# this parameter.
file='',
# Last time the source was updated or checked for new data
last_updated=date(1970, 1, 1),
# A function that's passed the feature and should return a name string
# The boundaries model provides some simple function factories for this.
name_func=boundaries.clean_attr('FEDENAME'),
# Function to extract a feature's "external_id" property
id_func=boundaries.attr('FEDUID'),
# Function to provide the slug (URL component) of the boundary
# If not provided, uses the name to generate the slug; this is usually
# what you want.
#slug_func=boundaries.attr('FEDUID'),
# Function that returns true/false to determine whether a given feature should be included
# By default, all features are included.
#is_valid_func=lambda f: True,
# Authority that is responsible for the accuracy of this data
authority='H.R.M. Queen Elizabeth II',
# A URL to the source of this data
source_url='http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/bound-limit-eng.cfm',
# A URL to the license for this data
licence_url='http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/license-eng.cfm?lang=_e&year=11&type=fed000a&format=a',
# A URL to the data file, e.g. a ZIP archive
data_url='http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/files-fichiers/gfed000a11a_e.zip',
# Notes identifying any pecularities about the data, such as columns that
# were deleted or files which were merged
notes='',
# Encoding of the text fields in the shapefile, e.g. 'utf-8'. Default: 'ascii'
encoding='iso-8859-1',
# Used only by the represent-maps app -- if you're not using that, ignore label_point_func.
# A function from a feature object to a Point where to display a label for feature on a map.
#label_point_func = lambda feature: None,
)
| 52.714286
| 139
| 0.722416
| 401
| 2,583
| 4.610973
| 0.453865
| 0.029746
| 0.030287
| 0.014602
| 0.127096
| 0.092482
| 0.092482
| 0.092482
| 0.092482
| 0.092482
| 0
| 0.019185
| 0.192799
| 2,583
| 48
| 140
| 53.8125
| 0.867626
| 0.629113
| 0
| 0
| 0
| 0.176471
| 0.474649
| 0.029126
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39b549fc5da98ce81d958623dcf67a57d0a50eec
| 2,962
|
py
|
Python
|
tyo_mq_client/publisher.py
|
e-tang/tyo-mq-client-python
|
82ea47bf8cf8a924b515149456eaecb5557a0f3e
|
[
"MIT"
] | null | null | null |
tyo_mq_client/publisher.py
|
e-tang/tyo-mq-client-python
|
82ea47bf8cf8a924b515149456eaecb5557a0f3e
|
[
"MIT"
] | 1
|
2018-06-19T23:42:27.000Z
|
2018-06-20T07:06:25.000Z
|
tyo_mq_client/publisher.py
|
e-tang/tyo-mq-client-python
|
82ea47bf8cf8a924b515149456eaecb5557a0f3e
|
[
"MIT"
] | null | null | null |
#
#
from .subscriber import Subscriber
from .logger import Logger
from .constants import Constants
from .events import Events
#
import json
class Publisher(Subscriber):
def __init__(self, name, eventDefault=None, host=None, port=None, protocol=None):
super(Publisher, self).__init__(name, host, port, protocol)
self.type = 'PRODUCER'
self.eventDefault = eventDefault if eventDefault is not None else Constants.EVENT_DEFAULT
self.on_subscription_listener = None
self.subscribers = {}
# // Initialisation
futureFunc = lambda : self.set_on_subscription_listener()
self.add_on_connect_listener(futureFunc)
#
Logger.debug("creating producer: " + self.name)
def broadcast (self, data, event=None):
self.produce(data, event, Constants.METHOD_BROADCAST)
def produce (self, data, event=None, method=None) :
if (data is None):
raise Exception("data can't be null")
if (event is None):
if (self.eventDefault is None):
raise Exception("please specifiy event")
else:
event = self.eventDefault
message = {"event":event, "message":data, "from":self.name, "method":method}
self.send_message('PRODUCE', message)
# /**
# * On Subscribe
# */
def __on_subscription (self, data) :
Logger.log("Received subscription information: " + json.dumps(data))
self.subscribers[data["id"]] = data
# // further listener
if (self.on_subscription_listener is not None):
self.on_subscription_listener(data)
def set_on_subscription_listener (self) :
event = Events.to_onsubscribe_event(self.get_id())
self.on(event, self.__on_subscription)
# /**
# * On Lost connections with subscriber(s)
# */
def __on_lost_subscriber (self, callback, data) :
Logger.log("Lost subscriber's connection")
if (callback is not None):
callback(data)
def set_on_subscriber_lost_listener (self, callback) :
event = Events.to_ondisconnect_event(self.get_id())
futureFunc = lambda data : (lambda data, cb=callback : self.__on_lost_subscriber(cb, data))(data)
self.on(event, futureFunc)
def on_subscriber_lost (self, callback) :
self.set_on_subscriber_lost_listener(callback)
# /**
# * On Unsubsribe
# */
def __on_unsubscribed (self, callback, data) :
if callback is not None:
callback(data)
def set_on_unsubscribed_listener (self, event, callback) :
event = Events.to_onunsubscribe_event(event, self.get_id())
futureFunc = lambda data : (lambda data, cb=callback: self.__on_unsubscribed(cb, data))(data)
self.on(event, futureFunc)
def on_unsubscribed (self, event, callback) :
self.set_on_unsubscribed_listener(event, callback)
| 33.280899
| 105
| 0.641458
| 338
| 2,962
| 5.408284
| 0.218935
| 0.02954
| 0.060175
| 0.04267
| 0.206236
| 0.147702
| 0.147702
| 0.147702
| 0.147702
| 0.108315
| 0
| 0
| 0.255571
| 2,962
| 88
| 106
| 33.659091
| 0.829025
| 0.045915
| 0
| 0.074074
| 0
| 0
| 0.05694
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.203704
| false
| 0
| 0.092593
| 0
| 0.314815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39b57868be76cc021f5f1127464558d697a138df
| 3,560
|
py
|
Python
|
app/authenticate.py
|
directedbyshawn/Secure-Login
|
15f2a6168986b11ffbde318333415671fb62578f
|
[
"MIT"
] | null | null | null |
app/authenticate.py
|
directedbyshawn/Secure-Login
|
15f2a6168986b11ffbde318333415671fb62578f
|
[
"MIT"
] | null | null | null |
app/authenticate.py
|
directedbyshawn/Secure-Login
|
15f2a6168986b11ffbde318333415671fb62578f
|
[
"MIT"
] | null | null | null |
'''
Authentication methods for cs166 final project.
'''
import random, hashlib
from .db import retrieve_accounts
lower_case = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
upper_case = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
nums = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']
special = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '?', '[', ']', '{', '}', ':', ';', '"', '/', '.', ',', '<', '>']
def authenticate(username, password):
''' Authenticates user upon login '''
# retrieves users from database
users = retrieve_accounts()
stored_username = ''
stored_password = ''
# finds user in records
for user in users:
if user[0] == username:
stored_username = user[0]
stored_password = user[1]
# if user is not found, false
if (stored_username == '' or stored_password == ''):
return False
# retrieves salt and stored password from pw string
salt_length = 40
salt = stored_password[:salt_length]
stored_hash = stored_password[salt_length:]
# compares inputted password with hash and returns result
hashable = salt + password
hashable = hashable.encode('utf-8')
this_hash = hashlib.sha1(hashable).hexdigest()
return this_hash == stored_hash
def verify_new_account(username, password):
'''
Method used to determine if new account credentials are valid
Parameters:
username (str) : username entered by user
password (str) : password entered by user
Returns:
status (bool) : status of if the new credentials are good or not
'''
global lower_case, upper_case, nums, special
# retrieves all users from db and makes a list of all usernames
users = retrieve_accounts()
taken_usernames = []
for accounts in users:
taken_usernames.append(accounts[0])
# status of whether or not password contains the requirements
requirement_one = len(password) >= 8
requirement_two = len(password) <= 25
requirement_three = username not in taken_usernames
requirement_lower = False
requierment_upper = False
requirement_nums = False
requirement_special = False
for char in password:
if char in lower_case:
requirement_lower = True
if char in upper_case:
requierment_upper = True
if char in nums:
requirement_nums = True
if char in special:
requirement_special = True
# SQL injection prevention
for char in username:
if char in special:
return False
status = False
if (requirement_one and requirement_two and requirement_three and requirement_lower and requierment_upper and requirement_nums and requirement_special):
status = True
return status
def random_password():
'''
Function to return randomly generated password
Returns:
password (str) : randomly generated password
'''
global lower_case, upper_case, nums, special
chars = [lower_case, upper_case, nums, special]
password_length = random.randint(12, 16)
password = ''
for i in range(password_length):
lib = chars[random.randint(0, 3)]
char = lib[random.randint(0, len(lib) - 1)]
password += char
return password
| 28.709677
| 156
| 0.589045
| 431
| 3,560
| 4.740139
| 0.334107
| 0.020558
| 0.019579
| 0.026432
| 0.077827
| 0.077827
| 0.063632
| 0.029369
| 0.029369
| 0.029369
| 0
| 0.01236
| 0.272753
| 3,560
| 124
| 157
| 28.709677
| 0.776748
| 0.214607
| 0
| 0.129032
| 0
| 0
| 0.033784
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0.274194
| 0.032258
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
39c13236092aa20981aa814b36bf7e898a69daef
| 343
|
py
|
Python
|
app.py
|
victorathanasio/KPI-test
|
cbc24ebc9b6e9304c7ff0428458c827d09bd99aa
|
[
"MIT"
] | null | null | null |
app.py
|
victorathanasio/KPI-test
|
cbc24ebc9b6e9304c7ff0428458c827d09bd99aa
|
[
"MIT"
] | null | null | null |
app.py
|
victorathanasio/KPI-test
|
cbc24ebc9b6e9304c7ff0428458c827d09bd99aa
|
[
"MIT"
] | null | null | null |
from WebApp.mainapp import app
import dash_html_components as html
import flask
from REST_API.rest_api import API
from WebApp.Layout import Layout
app.layout = Layout()
app.server.register_blueprint(API)
server = app.server
if __name__ == '__main__':
# app.run_server(debug=False, host='0.0.0.0', port=90)
app.run_server(debug=True)
| 24.5
| 58
| 0.766764
| 56
| 343
| 4.428571
| 0.482143
| 0.024194
| 0.096774
| 0.137097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020067
| 0.12828
| 343
| 13
| 59
| 26.384615
| 0.809365
| 0.151604
| 0
| 0
| 0
| 0
| 0.027682
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
39cd57d3e96930bf2512f61084f0ec5dbd909936
| 2,129
|
py
|
Python
|
django_project/apps/qfauth/forms.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | null | null | null |
django_project/apps/qfauth/forms.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 27
|
2020-02-12T07:55:58.000Z
|
2022-03-12T00:19:09.000Z
|
django_project/apps/qfauth/forms.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 2
|
2020-02-18T01:54:55.000Z
|
2020-02-21T11:36:28.000Z
|
from django import forms
from apps.forms import FormMixin
from django.core import validators
from .models import User
from django.core.cache import cache
class LoginForm(forms.Form,FormMixin):
telephone = forms.CharField(max_length=11,min_length=11)
password = forms.CharField(max_length=30,min_length=6,error_messages={"max_length":"密码最多不能超过30个字符","min_length":"密码最少不能少于6个字符"})
remember = forms.IntegerField(required=False)
class RegisterForm(forms.Form,FormMixin):
telephone = forms.CharField(max_length=11, min_length=11,validators=[validators.RegexValidator(r'1[3-9]\d{9}',message="请输入正确的手机号")])
username = forms.CharField(max_length=30)
password1 = forms.CharField(max_length=30,min_length=6,error_messages={"max_length":"密码最多不能超过30个字符","min_length":"密码最少不能少于6个字符"})
password2 = forms.CharField(max_length=30,min_length=6,error_messages={"max_length":"密码最多不能超过30个字符","min_length":"密码最少不能少于6个字符"})
img_captcha = forms.CharField(max_length=4,min_length=4)
sms_captcha = forms.CharField(max_length=4,min_length=4)
def clean(self):
cleaned_data = super(RegisterForm, self).clean()
password1 = cleaned_data.get('password1')
password2 = cleaned_data.get('password2')
if password1 != password2:
raise forms.ValidationError('两次密码输入不一致')
#验证图形验证码
img_captcha = cleaned_data.get('img_captcha')#用户输入的
cache_img_captcha = cache.get(img_captcha.lower()) #缓存中的
print(cache_img_captcha)
if not cache_img_captcha or img_captcha.lower() != cache_img_captcha.lower():
raise forms.ValidationError('图形验证码输入错误')
#验证短信验证码
telephone = cleaned_data.get('telephone')
sms_captcha = cleaned_data.get('sms_captcha') # 用户输入的
cache_sms_captcha = cache.get(telephone) # 缓存中的
if not cache_sms_captcha or sms_captcha.lower() != cache_sms_captcha.lower():
raise forms.ValidationError('短信验证码输入错误')
exists = User.objects.filter(telephone=telephone).exists()
if exists:
forms.ValidationError('该手机号已经被注册')
return cleaned_data
| 43.44898
| 136
| 0.716768
| 263
| 2,129
| 5.596958
| 0.277567
| 0.067255
| 0.092391
| 0.125
| 0.394022
| 0.326766
| 0.326766
| 0.326766
| 0.326766
| 0.27106
| 0
| 0.024803
| 0.166745
| 2,129
| 48
| 137
| 44.354167
| 0.804961
| 0.01597
| 0
| 0
| 0
| 0
| 0.115053
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0.166667
| 0.138889
| 0
| 0.5
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
39d5975250cb33441f80fb188d15a624f07f6415
| 4,216
|
py
|
Python
|
GraphOfDocs.py
|
NC0DER/GraphOfDocs
|
16603de9d8695ae8205117aa7123707d1dcbe0e0
|
[
"Apache-2.0"
] | 12
|
2020-01-27T20:26:08.000Z
|
2022-03-10T14:45:09.000Z
|
GraphOfDocs.py
|
NC0DER/GraphOfDocs
|
16603de9d8695ae8205117aa7123707d1dcbe0e0
|
[
"Apache-2.0"
] | 1
|
2021-11-17T11:45:55.000Z
|
2021-11-17T11:45:55.000Z
|
GraphOfDocs.py
|
NC0DER/GraphOfDocs
|
16603de9d8695ae8205117aa7123707d1dcbe0e0
|
[
"Apache-2.0"
] | 2
|
2020-01-27T13:17:11.000Z
|
2020-01-29T09:35:22.000Z
|
import sys
import platform
from neo4j import ServiceUnavailable
from GraphOfDocs.neo4j_wrapper import Neo4jDatabase
from GraphOfDocs.utils import generate_words, read_dataset, clear_screen
from GraphOfDocs.parse_args import parser
from GraphOfDocs.create import *
def graphofdocs(create, initialize, dirpath, window_size,
extend_window, remove_stopwords, lemmatize, stem):
# List that retains the skipped filenames.
skipped = []
current_system = platform.system()
# Open the database.
try:
database = Neo4jDatabase('bolt://localhost:7687', 'neo4j', '123')
# Neo4j server is unavailable.
# This client app cannot open a connection.
except ServiceUnavailable as error:
print('\t* Neo4j database is unavailable.')
print('\t* Please check the database connection before running this app.')
input('\t* Press any key to exit the app...')
sys.exit(1)
if create:
# Delete nodes from previous iterations.
database.execute('MATCH (n) DETACH DELETE n', 'w')
# Create uniqueness constraint on key to avoid duplicate word nodes.
database.execute('CREATE CONSTRAINT ON (word:Word) ASSERT word.key IS UNIQUE', 'w')
# Read text from files, which becomes a string in a list called dataset.
dataset = read_dataset(dirpath)
count = 1
total_count = len(dataset)
# Iterate all file records of the dataset.
for filename, file in dataset:
# Print the number of the currently processed file.
print(f'Processing {count} out of {total_count} files...' )
# Generate the terms from the text of each file.
words = generate_words(file, extend_window, remove_stopwords, lemmatize, stem)
# Create the graph of words in the database.
value = create_graph_of_words(words, database, filename, window_size)
if value is not None:
skipped.append(value)
# Update the progress counter.
count = count + 1
# Clear the screen to output the update the progress counter.
clear_screen(current_system)
# Count all skipped files and write their filenames in skipped.log
skip_count = len(skipped)
print(f'Created {total_count - skip_count}, skipped {skip_count} files.')
print('Check skipped.log for info.')
with open('skipped.log', 'w') as log:
for item in skipped:
log.write(item + '\n')
if initialize:
# Run initialization functions.
run_initial_algorithms(database)
create_similarity_graph(database)
create_clustering_tags(database)
database.close()
return
if __name__ == '__main__':
# If only one argument is specified,
# Then it's the script name.
# Print help for using the script and exit.
if len(sys.argv) == 1:
parser.print_help()
parser.exit()
# Parse all arguments from terminal.
args = parser.parse_args()
# If create flag is set but no dirpath is specified, print error.
if args.create and args.dirpath is None:
parser.error('Please set the dirpath flag and specify a valid filepath!')
# Else if create flag is specified along with a valid dirpath.
elif args.create:
print(args)
# Run the graphofdocs function with create and initialize set to True.
# The first argument (0th index) after the dirpath flag is the actual directory path.
graphofdocs(True, True, args.dirpath[0], args.window_size[0],
args.extend_window, args.insert_stopwords, args.lemmatize, args.stem)
# Else if reinitialize flag is specified, unset the create flag.
elif args.reinitialize:
print(args)
# Run the graphofdocs function with create set to False and initialize set to True.
# We also set the directory path to None, since its not needed.
graphofdocs(False, True, None, args.window_size[0],
args.extend_window, args.insert_stopwords, args.lemmatize, args.stem)
| 43.916667
| 95
| 0.64777
| 530
| 4,216
| 5.064151
| 0.349057
| 0.022355
| 0.013413
| 0.020119
| 0.131893
| 0.115499
| 0.085693
| 0.085693
| 0.052906
| 0.052906
| 0
| 0.007234
| 0.2787
| 4,216
| 95
| 96
| 44.378947
| 0.87537
| 0.307638
| 0
| 0.067797
| 1
| 0
| 0.166607
| 0.007508
| 0
| 0
| 0
| 0
| 0.016949
| 1
| 0.016949
| false
| 0
| 0.118644
| 0
| 0.152542
| 0.135593
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39dce94f390b2bc845f4a4548517b2bf61e50466
| 5,711
|
py
|
Python
|
CiscoWebAuthManager.py
|
darizotas/ciscowebauth
|
aaac65b5e78fe3246f0d4dedaf44eea4d8d293cb
|
[
"BSD-3-Clause"
] | 1
|
2018-01-22T04:43:39.000Z
|
2018-01-22T04:43:39.000Z
|
CiscoWebAuthManager.py
|
darizotas/ciscowebauth
|
aaac65b5e78fe3246f0d4dedaf44eea4d8d293cb
|
[
"BSD-3-Clause"
] | null | null | null |
CiscoWebAuthManager.py
|
darizotas/ciscowebauth
|
aaac65b5e78fe3246f0d4dedaf44eea4d8d293cb
|
[
"BSD-3-Clause"
] | null | null | null |
"""Script that establishes a session in a wireless network managed by Cisco Web Authentication.
This script requests for re-establishing a session in a wireless network managed by Cisco Web
Authentication.
Copyright 2013 Dario B. darizotas at gmail dot com
This software is licensed under a new BSD License.
Unported License. http://opensource.org/licenses/BSD-3-Clause
"""
from wlanapi.wlanapiwrapper import *
from wlanapi.wlanconninfo import *
from webauth.CiscoWebAuth import *
import sys
import argparse
import ssl
class CiscoWebAuthManager:
"""Class responsible for loging-in/out from wireless networks managed by Cisco Web Authentication.
"""
def __init__(self):
"""Initialises the class."""
self.crawler = CiscoWebAuthCrawler()
def isConnected(self, ssid):
"""Returns true whether it is currently connected to the Wlan identified by the given
ssid.
"""
try:
info = WlanConnInfo()
connected = info.isConnected(ssid)
del info
return connected
except WlanConnError as err:
del info
print err
return False
def _parseError(self, body):
"""Checks for an error or informative message"""
msg = self.crawler.getMessage(body, 'err')
if msg:
print msg
else:
# Check whether for an informative message.
msg = self.crawler.getMessage(body, 'info')
if msg:
print msg
else:
print 'I don\'t know how we arrived here. Check the Web:'
print body
def login(self, host, username, password):
"""Logs in to the wireless network"""
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
connection = httplib.HTTPSConnection(host, context=context)
url = "/login.html"
params = urllib.urlencode({\
'buttonClicked': 4, \
'err_flag': 0, 'err_msg': '', 'info_flag': 0, 'info_msg': '', \
'redirect_url': '', 'username': username, 'password': password \
})
headers = {\
'Content-Type': 'application/x-www-form-urlencoded', \
}
print "Connecting Cisco Web Authentication..."
try:
connection.request("POST", url, params, headers)
response = connection.getresponse()
except (httplib.HTTPException, socket.error) as ex:
print ex
return False
# 100 Continue.
if response.status == 200:
body = response.read()
if self.crawler.isConnected(body):
print 'Session re-established!'
else:
self._parseError(body)
else:
print response.status, response.reason
connection.close()
return True
def logout(self, host):
"""Logs out from the wireless network"""
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
connection = httplib.HTTPSConnection(host, context=context)
url = "/logout.html"
params = urllib.urlencode({\
# 'Logout': 'Logout', \
'err_flag': 0, 'err_msg': '', 'userStatus': 1 \
})
headers = {\
'Content-Type': 'application/x-www-form-urlencoded', \
}
print "Connecting Cisco Web Authentication..."
try:
connection.request("POST", url, params, headers)
response = connection.getresponse()
except (httplib.HTTPException, socket.error) as ex:
print ex
return False
# 100 Continue.
if response.status == 200:
body = response.read()
if self.crawler.isDisconnected(body):
print 'Session ended!'
else:
self._parseError(body)
else:
print response.status, response.reason
connection.close()
return True
# Main
def login(args):
"""Wrapper function to use through argparse to login to the wireless network"""
manager = CiscoWebAuthManager()
if manager.isConnected(args.ssid):
if not manager.login(args.host, args.user, args.pwd):
sys.exit(1)
else:
print "Not associated to %s. There is nothing to do." % args.ssid
def logout(args):
"""Wrapper function to use through argparse to logout to the wireless network"""
manager = CiscoWebAuthManager()
if manager.isConnected(args.ssid):
if not manager.logout(args.host):
sys.exit(1)
else:
print "Not associated to %s. There is nothing to do." % args.ssid
# Top-level argument parser
parser = argparse.ArgumentParser(description='Establishes a session in a wireless network managed ' \
'by Cisco Web Authentication.')
# SSID wireless network param
parser.add_argument('ssid', help='SSID name of the wireless network')
parser.add_argument('host', help='Cisco Web Authentication hostname or IP')
subparser = parser.add_subparsers(title='sub-commands', help='Available sub-commands')
# Login sub-command
parserCmdLogin = subparser.add_parser('login', help='Login request')
parserCmdLogin.add_argument('-u', '--user', required=True, help='User name')
parserCmdLogin.add_argument('-p', '--pwd', required=True, help='Password')
parserCmdLogin.set_defaults(func=login)
# Logout sub-command
parserCmdLogout = subparser.add_parser('logout', help='Logout request')
parserCmdLogout.set_defaults(func=logout)
args = parser.parse_args()
args.func(args)
sys.exit(0)
| 34.823171
| 102
| 0.615654
| 633
| 5,711
| 5.507109
| 0.327014
| 0.038726
| 0.044177
| 0.019507
| 0.496845
| 0.470166
| 0.470166
| 0.443775
| 0.420252
| 0.420252
| 0
| 0.007089
| 0.283663
| 5,711
| 164
| 103
| 34.823171
| 0.845026
| 0.03502
| 0
| 0.551724
| 0
| 0
| 0.152089
| 0.01436
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.025862
| 0.051724
| null | null | 0.12931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
39e14dad20bbe0a515df5d2bbdc11d428ec81e56
| 1,799
|
py
|
Python
|
yacht/data/transforms.py
|
IusztinPaul/yacht
|
c68ab7c66bde860bb91534c29e97772ba328adb5
|
[
"Apache-2.0"
] | 5
|
2021-09-03T10:16:50.000Z
|
2022-02-28T07:32:43.000Z
|
yacht/data/transforms.py
|
IusztinPaul/yacht
|
c68ab7c66bde860bb91534c29e97772ba328adb5
|
[
"Apache-2.0"
] | null | null | null |
yacht/data/transforms.py
|
IusztinPaul/yacht
|
c68ab7c66bde860bb91534c29e97772ba328adb5
|
[
"Apache-2.0"
] | 1
|
2022-03-05T16:06:46.000Z
|
2022-03-05T16:06:46.000Z
|
from abc import ABC, abstractmethod
from typing import Any, List, Optional
import pandas as pd
from yacht.config import Config
class Transform(ABC):
@abstractmethod
def __call__(self, sample: Any) -> Any:
pass
class Compose(Transform):
def __init__(self, transforms: List[Transform]):
self.transforms = transforms
def __call__(self, sample: Any) -> Any:
for transform in self.transforms:
sample = transform(sample)
return sample
class RelativeClosePriceScaling:
PRICE_COLUMNS = ['Close', 'Open', 'High', 'Low']
def __call__(self, data: pd.DataFrame) -> pd.DataFrame:
data[self.PRICE_COLUMNS] = data[self.PRICE_COLUMNS] / (data['Close'].iloc[-1] + 1e-7)
data['Volume'] = data['Volume'] / (data['Volume'].iloc[-1] + 1e-7)
return data
class AverageValueDiff:
PRICE_COLUMNS = ['Close', 'Open', 'High', 'Low']
def __call__(self, data: pd.DataFrame) -> pd.DataFrame:
close_price_average = data['Close'].mean()
volume_average = data['Volume'].mean()
data[self.PRICE_COLUMNS] = data[self.PRICE_COLUMNS] / (close_price_average + 1e-7)
data['Volume'] = data['Volume'] / (volume_average + 1e-7)
return data
#######################################################################################################################
transforms_registry = {
'RelativeClosePriceScaling': RelativeClosePriceScaling,
'AverageValueDiff': AverageValueDiff
}
def build_transforms(config: Config) -> Optional[Compose]:
input_config = config.input
if len(input_config.window_transforms) == 0:
return None
transforms = [transforms_registry[name]() for name in input_config.window_transforms]
return Compose(transforms=transforms)
| 27.676923
| 119
| 0.625347
| 192
| 1,799
| 5.651042
| 0.270833
| 0.066359
| 0.040553
| 0.073733
| 0.282028
| 0.278341
| 0.193548
| 0.193548
| 0.119816
| 0.119816
| 0
| 0.007576
| 0.192885
| 1,799
| 64
| 120
| 28.109375
| 0.739669
| 0
| 0
| 0.205128
| 0
| 0
| 0.070833
| 0.014881
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0.025641
| 0.102564
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
39ec9a70f64ddc65a70eb731b8421b2083d1e79f
| 410
|
py
|
Python
|
src/aerocloud/packages.py
|
Aerometrex/aerocloud-python-client
|
0bd15432bb0f81fc5e9ca03c48b9b15c8e8ed438
|
[
"MIT"
] | null | null | null |
src/aerocloud/packages.py
|
Aerometrex/aerocloud-python-client
|
0bd15432bb0f81fc5e9ca03c48b9b15c8e8ed438
|
[
"MIT"
] | null | null | null |
src/aerocloud/packages.py
|
Aerometrex/aerocloud-python-client
|
0bd15432bb0f81fc5e9ca03c48b9b15c8e8ed438
|
[
"MIT"
] | null | null | null |
import os
from enum import Enum
class AppPackage(Enum):
# Add packages here as required.
LASTOOLS = "lastools"
def getPackageDirectory(package: AppPackage, version: str = None):
"Gets the directory where the specified package is installed."
varName = f'AZ_BATCH_APP_PACKAGE_{package.value}'
if version != None:
varName = f'{varName}#{version}'
return os.environ[varName]
| 21.578947
| 66
| 0.702439
| 51
| 410
| 5.568627
| 0.666667
| 0.056338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204878
| 410
| 18
| 67
| 22.777778
| 0.871166
| 0.22439
| 0
| 0
| 0
| 0
| 0.325397
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
39f26329f53e08a2340c221abdf702988c619417
| 12,075
|
py
|
Python
|
hashvis.py
|
boredzo/hashvis
|
74a017c7fa9b6d48e43172ffd15fc19ccfb060e1
|
[
"BSD-3-Clause"
] | 15
|
2015-12-02T14:26:52.000Z
|
2018-01-21T15:18:59.000Z
|
hashvis.py
|
boredzo/hashvis
|
74a017c7fa9b6d48e43172ffd15fc19ccfb060e1
|
[
"BSD-3-Clause"
] | 10
|
2015-12-04T06:00:42.000Z
|
2016-07-09T21:40:53.000Z
|
hashvis.py
|
boredzo/hashvis
|
74a017c7fa9b6d48e43172ffd15fc19ccfb060e1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""hashvis by Peter Hosey
Reads from standard input or files, and prints what it reads, along with colorized versions of any hashes or signatures found in each line.
The goal here is visual comparability. You should be able to tell whether two hashes are the same at a glance, rather than having to closely compare digits (or, more probably, not bother and just assume the hashes match!).
The more obvious of the two methods used is shaping the output: Each hash will be represented as a rectangle of an aspect ratio determined by the hash. You may thus end up with one that's tall and one that's wide, or one that's square (if the hash length is a square number) and one that isn't.
If two hashes are the same shape (or if you passed --oneline), another difference is that each byte is represented by a different pair of foreground and background colors. You should thus be able to compare the color-patterns rather than having to look at individual digits.
"""
# #mark - Imports and utilities
import sys
import os
import re
import base64
import binascii
import cmath as math
range = xrange
def factors(n):
"Yield every pair of factors of n (x,y where n/x == y and n/y == x), except for (1,n) and (n,1)."
limit = math.sqrt(n).real
if n == 1:
yield (1, 1)
return
for i in range(1, int(limit + 1)):
if n % i == 0:
pair = (i, n/i)
yield pair
opposite_pair = (pair[1], pair[0])
#If n is square, one of the pairs will be (sqrt, sqrt). We want to yield that only once. All other pairs, we want to yield both ways round.
if pair != opposite_pair:
yield opposite_pair
def except_one(pairs):
"Given a sequence of pairs (x, y), yield every pair where neither x nor y is 1."
for pair in pairs:
if 1 not in pair:
yield pair
# #mark - Parsing
MD5_exp = re.compile(r'^MD5 \(.*\) = ([0-9a-fA-F]+)')
fingerprint_exp = re.compile(r'^(?:R|ECD)SA key fingerprint is (?:(?:MD5:)?(?P<hex>[:0-9a-fA-F]+)|SHA256:(?P<base64>[+/0-9a-zA-Z]+))\.')
commit_exp = re.compile(r'^commit ([0-9a-fA-F]+)')
more_base64_padding_than_anybody_should_ever_need = '=' * 64
def extract_hash_from_line(input_line):
"Returns a tuple of the extracted hash as hex, and whether it was originally hex (vs, say, base64). The hash may be None if none was found in the input."
if input_line[:1] == 'M':
match = MD5_exp.match(input_line)
if match:
return match.group(1), True
else:
return '', False
elif input_line[:1] in 'RE':
match = fingerprint_exp.match(input_line)
if match:
hex = match.group('hex')
if hex:
return hex, True
b64str = match.group('base64')
if b64str:
# Pacify the base64 module, which wants *some* padding (at least sometimes) but doesn't care how much.
b64str += more_base64_padding_than_anybody_should_ever_need
# Re-encode to hex for processing downstream. Arguably a refactoring opportunity…
return binascii.b2a_hex(base64.b64decode(b64str)), False
return '', False
elif input_line[:7] == 'commit ':
match = commit_exp.match(input_line)
if match:
return match.group(1), True
if input_line:
try:
hash, not_the_hash = input_line.split(None, 1)
except ValueError:
# Insufficient fields. This line doesn't contain any whitespace. Use the entire line.
hash = input_line
hash = hash.strip().replace('-', '')
try:
int(hash, 16)
except ValueError:
# Not a hex number.
return None, False
else:
return hash, True
def parse_hex(hex):
hex = hex.lstrip(':-')
while hex:
byte_hex, hex = hex[:2], hex[2:].lstrip(':-')
yield int(byte_hex, 16)
# #mark - Representation
def fgcolor(idx, deep_color=False):
if deep_color:
return '\x1b[38;5;{0}m'.format(idx)
idx = ((idx >> 4) & 0xf)
# 90 is bright foreground; 30 is dull foreground.
if idx < 0x8:
base = 30
else:
base = 90
idx = idx - 0x8
return '\x1b[{0}m'.format(base + idx)
def bgcolor(idx, deep_color=False):
if deep_color:
idx = ((idx & 0xf) << 4) | ((idx & 0xf0) >> 4)
# This add 128 and mod 256 is important, because it ensures double-digits such as 00 remain different colors.
return '\x1b[48;5;{0}m'.format((idx + 128) % 256)
else:
idx = (idx & 0xf)
# 100 is bright background; 40 is dull background.
if idx < 0x8:
base = 40
else:
base = 100
idx = idx - 0x8
return '\x1b[{0}m'.format(base + idx)
BOLD = '\x1b[1m'
RESET = '\x1b[0m'
def hash_to_pic(hash, only_ever_one_line=False, represent_as_hex=False, deep_color=False, _underlying_fgcolor=fgcolor, _underlying_bgcolor=bgcolor):
def fgcolor(idx):
return _underlying_fgcolor(idx, deep_color)
def bgcolor(idx):
return _underlying_bgcolor(idx, deep_color)
bytes = parse_hex(hash)
characters = list('0123456789abcdef') if represent_as_hex else [
'▚',
'▞',
'▀',
'▌',
]
if not only_ever_one_line:
pairs = list((w, h) for (w, h) in except_one(factors(len(hash) / 2)) if w >= h)
if not pairs:
# Prefer (w, 1) over (1, h) if we have that choice.
pairs = list((w, h) for (w, h) in factors(len(hash) / 2) if w >= h)
output_chunks = []
last_byte = 0
character_idx = None
for b in bytes:
def find_character(b):
character_idx = b % len(characters)
return characters[character_idx]
if not represent_as_hex:
output_chunks.append(fgcolor(b) + bgcolor(b) + find_character(b))
else:
output_chunks.append(fgcolor(b) + bgcolor(b) + find_character(b >> 4) + find_character(b & 0xf))
last_byte = b
if only_ever_one_line:
pixels_per_row, num_rows = len(hash) / 2, 1
else:
pixels_per_row, num_rows = pairs[last_byte % len(pairs)]
while output_chunks:
yield BOLD + ''.join(output_chunks[:pixels_per_row]) + RESET
del output_chunks[:pixels_per_row]
if __name__ == '__main__':
# #mark - Self-tests
run_tests = False
if run_tests:
# A square number. Should contain a diagonal pair (in this case, (16,16)).
factors_of_256 = set(factors(256))
assert factors_of_256 == set([(256, 1), (16, 16), (8, 32), (2, 128), (64, 4), (1, 256), (32, 8), (128, 2), (4, 64)])
# A rectangular number: not square, but still composite. No diagonal pair here.
factors_of_12 = set(factors(12))
assert factors_of_12 == set([(2, 6), (12, 1), (1, 12), (6, 2), (4, 3), (3, 4)])
assert (1, 256) in factors_of_256
assert (256, 1) in factors_of_256
assert (1, 256) not in except_one(factors_of_256)
assert (256, 1) not in except_one(factors_of_256)
# A prime number. Should have exactly one pair of factors.
factors_of_5 = set(factors(5))
assert factors_of_5 == set([(1, 5), (5, 1)])
assert list(parse_hex('ab15e')) == [0xab, 0x15, 0x0e]
assert list(parse_hex(':::ab:15:e')) == [0xab, 0x15, 0x0e]
assert extract_hash_from_line('RSA key fingerprint is b8:79:03:7d:00:44:98:6e:67:a0:59:1a:01:21:36:38.\n') == ('b8:79:03:7d:00:44:98:6e:67:a0:59:1a:01:21:36:38', True)
assert extract_hash_from_line('RSA key fingerprint is b8:79:03:7d:00:44:98:6e:67:a0:59:1a:01:21:36:38.') == ('b8:79:03:7d:00:44:98:6e:67:a0:59:1a:01:21:36:38', True)
#Alternate output example from https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Authentication_Keys :
assert extract_hash_from_line('RSA key fingerprint is MD5:10:4a:ec:d2:f1:38:f7:ea:0a:a0:0f:17:57:ea:a6:16.') == ('10:4a:ec:d2:f1:38:f7:ea:0a:a0:0f:17:57:ea:a6:16', True)
# Also from https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Authentication_Keys :
assert extract_hash_from_line('ECDSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.\n') == ('2cf162318aeb098415b1550fce3387bfe663cb10879556093015457ab5423fb9', False), extract_hash_from_line('ECDSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.\n')
assert extract_hash_from_line('ECDSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.') == ('2cf162318aeb098415b1550fce3387bfe663cb10879556093015457ab5423fb9', False), extract_hash_from_line('ECDSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.')
# Mix and match RSA and ECDSA with MD5 and SHA256:
assert extract_hash_from_line('ECDSA key fingerprint is MD5:10:4a:ec:d2:f1:38:f7:ea:0a:a0:0f:17:57:ea:a6:16.') == ('10:4a:ec:d2:f1:38:f7:ea:0a:a0:0f:17:57:ea:a6:16', True)
assert extract_hash_from_line('RSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.\n') == ('2cf162318aeb098415b1550fce3387bfe663cb10879556093015457ab5423fb9', False), extract_hash_from_line('RSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.\n')
#UUID
assert extract_hash_from_line('E6CD379E-12CD-4E00-A83A-B06E74CF03B8') == ('E6CD379E12CD4E00A83AB06E74CF03B8', True), extract_hash_from_line('E6CD379E-12CD-4E00-A83A-B06E74CF03B8')
assert extract_hash_from_line('e6cd379e-12cd-4e00-a83a-b06e74cf03b8') == ('e6cd379e12cd4e00a83ab06e74cf03b8', True), extract_hash_from_line('e6cd379e-12cd-4e00-a83a-b06e74cf03b8')
assert extract_hash_from_line('MD5 (hashvis.py) = e21c7b846f76826d52a0ade79ef9cb49\n') == ('e21c7b846f76826d52a0ade79ef9cb49', True)
assert extract_hash_from_line('MD5 (hashvis.py) = e21c7b846f76826d52a0ade79ef9cb49') == ('e21c7b846f76826d52a0ade79ef9cb49', True)
assert extract_hash_from_line('8b948e9c85fdf68f872017d7064e839c hashvis.py\n') == ('8b948e9c85fdf68f872017d7064e839c', True)
assert extract_hash_from_line('8b948e9c85fdf68f872017d7064e839c hashvis.py') == ('8b948e9c85fdf68f872017d7064e839c', True)
assert extract_hash_from_line('2c9997ce32cb35823b2772912e221b350717fcb2d782c667b8f808be44ae77ba1a7b94b4111e386c64a2e87d15c64a2fc2177cd826b9a0fba6b348b4352ed924 hashvis.py\n') == ('2c9997ce32cb35823b2772912e221b350717fcb2d782c667b8f808be44ae77ba1a7b94b4111e386c64a2e87d15c64a2fc2177cd826b9a0fba6b348b4352ed924', True)
assert extract_hash_from_line('2c9997ce32cb35823b2772912e221b350717fcb2d782c667b8f808be44ae77ba1a7b94b4111e386c64a2e87d15c64a2fc2177cd826b9a0fba6b348b4352ed924 hashvis.py') == ('2c9997ce32cb35823b2772912e221b350717fcb2d782c667b8f808be44ae77ba1a7b94b4111e386c64a2e87d15c64a2fc2177cd826b9a0fba6b348b4352ed924', True)
assert extract_hash_from_line('#!/usr/bin/python\n')[0] is None
# Protip: Use vis -co to generate these.
(line,) = hash_to_pic('78', represent_as_hex=True, deep_color=False)
assert line == '\033[1m\033[37m\033[100m78\033[0m', repr(line)
(line,) = hash_to_pic('7f', represent_as_hex=True, deep_color=False)
assert line == '\033[1m\033[37m\033[107m7f\033[0m', repr(line)
assert list(hash_to_pic('aebece', deep_color=False)) != list(hash_to_pic('deeefe', deep_color=False)), (list(hash_to_pic('aebece', deep_color=False)), list(hash_to_pic('deeefe', deep_color=False)))
assert list(hash_to_pic('eaebec', deep_color=False)) != list(hash_to_pic('edeeef', deep_color=False)), (list(hash_to_pic('eaebec', deep_color=False)), list(hash_to_pic('edeeef', deep_color=False)))
sys.exit(0)
# #mark - Main
use_256color = os.getenv('TERM') == 'xterm-256color'
import argparse
parser = argparse.ArgumentParser(description="Visualize hexadecimal input (hashes, UUIDs, etc.) as an arrangement of color blocks.")
parser.add_argument('--one-line', '--oneline', action='store_true', help="Unconditionally produce a rectangle 1 character tall. The default is to choose a pair of width and height based upon one of the bytes of the input.")
parser.add_argument('--color-test', '--colortest', action='store_true', help="Print the 16-color, 256-color foreground, and 256-color background color palettes, then exit.")
options, args = parser.parse_known_args()
if options.color_test:
for x in range(16):
print fgcolor(x, deep_color=False),
print bgcolor(x, deep_color=False),
else:
print
for x in range(256):
sys.stdout.write(fgcolor(x, deep_color=True) + bgcolor(x, deep_color=True) + '%02x' % (x,))
else:
print RESET
import sys
sys.exit(0)
import fileinput
for input_line in fileinput.input(args):
print input_line.rstrip('\n')
hash, is_hex = extract_hash_from_line(input_line)
if hash:
for output_line in hash_to_pic(hash, only_ever_one_line=options.one_line, represent_as_hex=is_hex, deep_color=use_256color):
print output_line
| 46.087786
| 319
| 0.729441
| 1,848
| 12,075
| 4.625
| 0.235931
| 0.029601
| 0.040365
| 0.051129
| 0.460512
| 0.433602
| 0.420498
| 0.354627
| 0.334035
| 0.261729
| 0
| 0.125386
| 0.141366
| 12,075
| 261
| 320
| 46.264368
| 0.698302
| 0.106584
| 0
| 0.17801
| 0
| 0.073298
| 0.342694
| 0.205351
| 0
| 0
| 0.00531
| 0
| 0.151832
| 0
| null | null | 0
| 0.04712
| null | null | 0.078534
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2d302744caca38acace037f6391b1ffee2c8630
| 1,432
|
py
|
Python
|
src/minescrubber/controller.py
|
alok1974/minescrubber
|
0c18d960b385a4a59ac0cf38bc69271a23c667e7
|
[
"MIT"
] | 1
|
2020-08-11T23:08:34.000Z
|
2020-08-11T23:08:34.000Z
|
src/minescrubber/controller.py
|
alok1974/minescrubber
|
0c18d960b385a4a59ac0cf38bc69271a23c667e7
|
[
"MIT"
] | null | null | null |
src/minescrubber/controller.py
|
alok1974/minescrubber
|
0c18d960b385a4a59ac0cf38bc69271a23c667e7
|
[
"MIT"
] | null | null | null |
from minescrubber_core import abstract
from . import mainwindow
class UI(abstract.UI):
def __init__(self):
self.main_window = mainwindow.MainWidget()
def init_board(self, board):
self.main_window.init_board(board)
def refresh(self, board, init_image=True):
self.main_window.refresh(board=board, init_image=init_image)
def game_over(self, board):
self.main_window.game_over(board=board)
def game_solved(self, board):
self.main_window.game_solved(board=board)
def run(self):
self.main_window.show()
@property
def new_game_signal(self):
return self.main_window.NEW_GAME_SIGNAL
@property
def cell_selected_signal(self):
return self.main_window.CELL_SELECTED_SIGNAL
@property
def cell_flagged_signal(self):
return self.main_window.CELL_FLAGGED_SIGNAL
@property
def wiring_method_name(self):
return 'connect'
class Controller(abstract.Controller):
def pre_callback(self):
import sys
from PySide2 import QtWidgets
QtWidgets.QApplication(sys.argv)
def post_callback(self):
import sys
from PySide2 import QtWidgets
app = (
QtWidgets.QApplication.instance() or
QtWidgets.QApplication(sys.argv)
)
sys.exit(app.exec_())
def run():
controller = Controller()
controller.run(ui_class=UI)
| 22.730159
| 68
| 0.670391
| 176
| 1,432
| 5.221591
| 0.272727
| 0.078346
| 0.137106
| 0.055495
| 0.292709
| 0.267682
| 0.176279
| 0.102285
| 0
| 0
| 0
| 0.001845
| 0.243017
| 1,432
| 62
| 69
| 23.096774
| 0.845941
| 0
| 0
| 0.232558
| 0
| 0
| 0.004888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.302326
| false
| 0
| 0.139535
| 0.093023
| 0.581395
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2d563db44644c1403a6f057432f77eaa66bdff6
| 1,517
|
py
|
Python
|
Chapter04/chapter4.py
|
Kushalshingote/Hands-On-Generative-Adversarial-Networks-with-Keras
|
fccada4810ba1fe8b79c5a74420a590c95623b52
|
[
"MIT"
] | 76
|
2019-05-27T23:38:53.000Z
|
2021-12-19T00:31:13.000Z
|
Chapter04/chapter4.py
|
Kushalshingote/Hands-On-Generative-Adversarial-Networks-with-Keras
|
fccada4810ba1fe8b79c5a74420a590c95623b52
|
[
"MIT"
] | 9
|
2019-05-29T21:01:32.000Z
|
2020-07-30T12:00:02.000Z
|
Chapter04/chapter4.py
|
Kushalshingote/Hands-On-Generative-Adversarial-Networks-with-Keras
|
fccada4810ba1fe8b79c5a74420a590c95623b52
|
[
"MIT"
] | 35
|
2019-05-12T04:20:54.000Z
|
2022-03-03T19:46:06.000Z
|
# get the training data D, sample the Generator with random z to produce r
N = X_train
z = np.random.uniform(-1, 1, (1, z_dim))
r = G.predict_on_batch(z)
# define our distance measure S to be L1
S = lambda n, r: np.sum(np.abs(n - r))
# compute the distances between the reference and the samples in N using the measure D
distances = [D(n, r) for n in N]
# find the indices of the most similar samples and select them from N
nearest_neighbors_index = np.argpartition(distances, k)
nearest_neighbors_images = N[nearest_neighbors_index]
# generate fake images from the discriminator
n_fake_images = 5000
z = np.random.uniform(-1, 1, (n_fake_images, z_dim))
x = G.predict_on_batch(z)
def compute_inception_score(x, inception_model, n_fake_images, z_dim):
# probability of y given x
p_y_given_x = inception_model.predict_on_batch(x)
# marginal probability of y
q_y = np.mean(p_y_given_x, axis=0)
inception_scores = p_y_given_x * (np.log(p_y_given_x) - np.log(q_y)
inception_score = np.exp(np.mean(inception_scores))
return inception_score
def get_mean_and_covariance(data):
mean = np.mean(data, axis=0)
covariance = np.cov(data, rowvar=False) # rowvar?
return mean, covariance
def compute_frechet_inception_distance(mean_r, mean_f, cov_r, cov_f):
l2_mean = np.sum((mean_r - mean_f)**2)
cov_mean, _ = np.trace(scipy.linalg.sqrtm(np.dot(cov_r, cov_f)))
return l2_mu + np.trace(cov_r) + np.trace(cov_f) - 2 * np.trace(cov_mean)
| 35.27907
| 87
| 0.712591
| 268
| 1,517
| 3.791045
| 0.343284
| 0.029528
| 0.034449
| 0.031496
| 0.122047
| 0.061024
| 0
| 0
| 0
| 0
| 0
| 0.012935
| 0.184575
| 1,517
| 42
| 88
| 36.119048
| 0.808407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2d93f0a50f1963382d3895bbaf47dcf3e2de6e0
| 1,124
|
py
|
Python
|
routes/class_incoming.py
|
fingerecho/proms-4.0
|
6c3a1fd62c9394761664e100fc1dde50fd79dc11
|
[
"CC-BY-4.0"
] | 2
|
2019-11-23T03:56:28.000Z
|
2019-12-03T15:48:34.000Z
|
routes/class_incoming.py
|
fingerecho/proms-4.0
|
6c3a1fd62c9394761664e100fc1dde50fd79dc11
|
[
"CC-BY-4.0"
] | null | null | null |
routes/class_incoming.py
|
fingerecho/proms-4.0
|
6c3a1fd62c9394761664e100fc1dde50fd79dc11
|
[
"CC-BY-4.0"
] | 3
|
2019-04-12T18:09:35.000Z
|
2020-03-14T14:38:45.000Z
|
from abc import ABCMeta, abstractmethod
import database
from . import w_l
class IncomingClass(metaclass=ABCMeta):
@abstractmethod
def __init__(self, request):
self.request = request
self.graph = None
self.uri = None
self.named_graph_uri = None
self.error_messages = None
@abstractmethod
def valid(self):
pass
@abstractmethod
def determine_uri(self):
pass
def stored(self):
""" Add an item to PROMS"""
if self.graph is None or self.named_graph_uri is None:
msg = 'The graph and the named_grapoh_uri properties of this class instance must not be None when trying ' \
'to store this instance in the provenance DB.'
self.error_messages = msg
return False
try:
w_l(str(self.graph))
w_l(str(self.named_graph_uri))
database.insert(self.graph, self.named_graph_uri)
return True
except Exception as e:
self.error_messages = ['Could not connect to the provenance database']
return False
| 29.578947
| 120
| 0.615658
| 141
| 1,124
| 4.758865
| 0.439716
| 0.053651
| 0.083458
| 0.101341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.318505
| 1,124
| 37
| 121
| 30.378378
| 0.875979
| 0.017794
| 0
| 0.225806
| 0
| 0
| 0.169553
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0.064516
| 0.096774
| 0
| 0.354839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
f2db2b20dcde6fe54280e2d0105ffc23c0015da0
| 404
|
py
|
Python
|
setup.py
|
TDGerve/ramCOH
|
328f27891906e7207344fb3c5a685648a0924dd2
|
[
"MIT"
] | 2
|
2022-03-08T12:30:55.000Z
|
2022-03-29T19:46:59.000Z
|
setup.py
|
TDGerve/ramCOH
|
328f27891906e7207344fb3c5a685648a0924dd2
|
[
"MIT"
] | null | null | null |
setup.py
|
TDGerve/ramCOH
|
328f27891906e7207344fb3c5a685648a0924dd2
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name= 'ramCOH',
version= '0.1',
description= '...',
author= 'Thomas van Gerve',
packages= setuptools.find_packages(
exclude= ['examples']
),
# package_dir= {'' : 'petroPy'},
package_data= {'ramCOH': ['static/*']},
install_requires= [
'pandas',
'matplotlib',
'numpy',
'scipy',
'csaps'
]
)
| 17.565217
| 44
| 0.534653
| 34
| 404
| 6.235294
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00692
| 0.284653
| 404
| 23
| 45
| 17.565217
| 0.726644
| 0.074257
| 0
| 0
| 0
| 0
| 0.217158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2dda34548b86bf17367a72a0ef32f5325649770
| 576
|
py
|
Python
|
python/binary_tree/104.maximum-depth-of-binary-tree.py
|
Nobodylesszb/LeetCode
|
0e902f6bff4834a93ce64cf9c57fd64297e63523
|
[
"MIT"
] | null | null | null |
python/binary_tree/104.maximum-depth-of-binary-tree.py
|
Nobodylesszb/LeetCode
|
0e902f6bff4834a93ce64cf9c57fd64297e63523
|
[
"MIT"
] | null | null | null |
python/binary_tree/104.maximum-depth-of-binary-tree.py
|
Nobodylesszb/LeetCode
|
0e902f6bff4834a93ce64cf9c57fd64297e63523
|
[
"MIT"
] | null | null | null |
"""
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
Note: A leaf is a node with no children.
Example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its depth = 3.
"""
import Math
class Solution:
def findDeep(self,root):
if not root:
return 0
if not root.left or root.right:
return 1
return 1+ Math.max(self.findDeep(root.left),self.findDeep(root.right))
| 20.571429
| 114
| 0.625
| 93
| 576
| 3.870968
| 0.537634
| 0.055556
| 0.022222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04401
| 0.289931
| 576
| 28
| 115
| 20.571429
| 0.836186
| 0.546875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
f2e49a7f41a62f84a3de746b66ce03eb20e0b955
| 1,395
|
py
|
Python
|
ipython/data/parseSource/input.py
|
cainja/RMG-Py
|
f9ad0f4244e476a28768c8a4a37410ad55bcd556
|
[
"MIT"
] | 1
|
2020-01-14T09:12:22.000Z
|
2020-01-14T09:12:22.000Z
|
ipython/data/parseSource/input.py
|
speth/RMG-Py
|
1d2c2b684580396e984459d9347628a5ceb80e2e
|
[
"MIT"
] | 72
|
2016-06-06T18:18:49.000Z
|
2019-11-17T03:21:10.000Z
|
ipython/data/parseSource/input.py
|
speth/RMG-Py
|
1d2c2b684580396e984459d9347628a5ceb80e2e
|
[
"MIT"
] | 3
|
2017-09-22T15:47:37.000Z
|
2021-12-30T23:51:47.000Z
|
# Data sources
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [('C3', False)],
seedMechanisms = ['GRI-Mech3.0'],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
# List of species
species(
label='ethane',
reactive=True,
structure=SMILES("CC"),
)
species(
label='N2',
reactive=False,
structure=adjacencyList("""
1 N u0 p1 c0 {2,T}
2 N u0 p1 c0 {1,T}
"""),
)
# Reaction systems
simpleReactor(
temperature=(1350,'K'),
pressure=(1.0,'bar'),
initialMoleFractions={
"ethane": 0.1,
"N2": 0.9
},
terminationConversion={
'ethane': 0.9,
},
terminationTime=(1e6,'s'),
)
simulator(
atol=1e-16,
rtol=1e-8,
)
model(
toleranceKeepInEdge=0.0,
toleranceMoveToCore=0.1,
toleranceInterruptSimulation=0.1,
maximumEdgeSpecies=100000,
)
options(
units='si',
saveRestartPeriod=None,
generateOutputHTML=True,
generatePlots=False,
saveEdgeSpecies=True,
saveSimulationProfiles=True,
verboseComments=True,
)
pressureDependence(
method='modified strong collision',
maximumGrainSize=(0.5,'kcal/mol'),
minimumNumberOfGrains=250,
temperatures=(300,2200,'K',2),
pressures=(0.01,100,'bar',3),
interpolation=('Chebyshev', 6, 4),
maximumAtoms=15,
)
| 19.375
| 47
| 0.632258
| 138
| 1,395
| 6.391304
| 0.710145
| 0.006803
| 0.011338
| 0.015873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064044
| 0.216487
| 1,395
| 71
| 48
| 19.647887
| 0.742909
| 0.032258
| 0
| 0.032787
| 0
| 0
| 0.138187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2e593a65e27e8bb4c6dbcd20c5d00538ad0aa1c
| 438
|
py
|
Python
|
simbench/__init__.py
|
BaraaUniKassel/simbench
|
eca679bbef2b7c61d4a42dd9d9716ad969ff6f77
|
[
"BSD-3-Clause"
] | null | null | null |
simbench/__init__.py
|
BaraaUniKassel/simbench
|
eca679bbef2b7c61d4a42dd9d9716ad969ff6f77
|
[
"BSD-3-Clause"
] | null | null | null |
simbench/__init__.py
|
BaraaUniKassel/simbench
|
eca679bbef2b7c61d4a42dd9d9716ad969ff6f77
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.
__version__ = "1.3.0"
__author__ = "smeinecke"
import os
sb_dir = os.path.dirname(os.path.realpath(__file__))
from simbench.converter import *
from simbench.networks import *
| 33.692308
| 101
| 0.783105
| 61
| 438
| 5.409836
| 0.770492
| 0.036364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029101
| 0.136986
| 438
| 12
| 102
| 36.5
| 0.843915
| 0.577626
| 0
| 0
| 0
| 0
| 0.077348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
f2efb530b1ef641d5c0b78f798aa8a3ec91dbadc
| 3,184
|
py
|
Python
|
functions/constants.py
|
Katolus/functions
|
c4aff37231432ce6ef4ed6b37c8b5baaede5975a
|
[
"MIT"
] | 4
|
2022-03-08T08:46:44.000Z
|
2022-03-19T07:52:11.000Z
|
functions/constants.py
|
Katolus/functions
|
c4aff37231432ce6ef4ed6b37c8b5baaede5975a
|
[
"MIT"
] | 114
|
2021-10-30T05:48:54.000Z
|
2022-03-06T10:57:00.000Z
|
functions/constants.py
|
Katolus/functions
|
c4aff37231432ce6ef4ed6b37c8b5baaede5975a
|
[
"MIT"
] | null | null | null |
import os
import sys
from enum import Enum
from enum import unique
from typing import List
# Set system constants based on the current platform
if sys.platform.startswith("win32"):
DEFAULT_SYSTEM_CONFIG_PATH = os.path.join(os.environ["APPDATA"], "config")
elif sys.platform.startswith("linux"):
DEFAULT_SYSTEM_CONFIG_PATH = os.path.join(os.environ["HOME"], ".config")
elif sys.platform.startswith("darwin"):
DEFAULT_SYSTEM_CONFIG_PATH = os.path.join(
os.environ["HOME"], "Library", "Application Support"
)
else:
DEFAULT_SYSTEM_CONFIG_PATH = os.path.join(os.environ["HOME"], "config")
# System configuration
PACKAGE_BASE_CONFIG_FOLDER = "ventress-functions"
PACKAGE_CONFIG_DIR_PATH = os.path.join(
DEFAULT_SYSTEM_CONFIG_PATH, PACKAGE_BASE_CONFIG_FOLDER
)
DEFAULT_LOG_FILENAME = "functions.log"
DEFAULT_LOG_FILEPATH = os.path.join(PACKAGE_CONFIG_DIR_PATH, DEFAULT_LOG_FILENAME)
# Project constants
PROJECT_VENDOR = "ventress"
PROJECT_MARK = "ventress-functions"
class ConfigName(str, Enum):
"""Represents various availabel names for a config file"""
BASE = "config.json"
class RequiredFile(str, Enum):
"""Enum for required file names in a function's directory"""
CONFIG = "config.json"
DOCKERFILE = "Dockerfile"
DOCKERIGNORE = ".dockerignore"
ENTRY_POINT = "main.py"
REQUIREMENTS = "requirements.txt"
class LoggingLevel(str, Enum):
DEBUG = "debug"
ERROR = "error"
INFO = "info"
WARNING = "warning"
class FunctionType(str, Enum):
"""Represents the various types of functions that can be run"""
HTTP = "http"
PUBSUB = "pubsub"
@classmethod
def options(cls) -> List[str]:
"""Returns a list of all the function types"""
return [enum.value for enum in cls]
class LocalStatus(str, Enum):
"""Represents the status of a function locally"""
ADDED = "added"
BUILT = "new build"
INVALID = "invalid"
NEW = "new"
REMOVED = "removed"
RUNNING = "running"
STOPPED = "stopped"
UNKNOWN = "unknown"
@classmethod
def build_statuses(cls) -> List[str]:
"""Returns a list of statuses which mean that the image is built"""
return [
cls.BUILT,
cls.RUNNING,
cls.STOPPED,
]
class CloudStatus(str, Enum):
"""Represents the status of a function on the cloud"""
DELETED = "deleted"
DEPLOYED = "deployed"
UNKNOWN = "unknown"
@property
def is_deployed(self) -> bool:
return self == CloudStatus.DEPLOYED
@unique
class CloudProvider(str, Enum):
"""Represents the various cloud providers supported by the functions package"""
# AWS = "aws"
# AZURE = "azure"
GCP = "gcp"
# LOCAL = "local"
# OPENFASS = "openfass"
# OPENSTACK = "openstack"
@classmethod
def all(cls) -> List[str]:
"""Returns all the available service types"""
return [enum.value for enum in cls]
@unique
class CloudServiceType(str, Enum):
CLOUD_FUNCTION = "cloud_function"
@classmethod
def all(cls) -> List[str]:
"""Returns all the available service types"""
return [enum.value for enum in cls]
| 25.269841
| 83
| 0.666143
| 387
| 3,184
| 5.377261
| 0.335917
| 0.02691
| 0.028832
| 0.055262
| 0.3037
| 0.250841
| 0.250841
| 0.227775
| 0.176838
| 0.156655
| 0
| 0.000806
| 0.220477
| 3,184
| 125
| 84
| 25.472
| 0.837631
| 0.218593
| 0
| 0.168831
| 0
| 0
| 0.134568
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064935
| false
| 0
| 0.064935
| 0.012987
| 0.623377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
8401c1577e1e3475bf83b16d801193d6422761d2
| 2,735
|
py
|
Python
|
dashboard/urls.py
|
playfulMIT/kimchi
|
66802cc333770932a8c8b1a44ea5d235d916a8f1
|
[
"MIT"
] | null | null | null |
dashboard/urls.py
|
playfulMIT/kimchi
|
66802cc333770932a8c8b1a44ea5d235d916a8f1
|
[
"MIT"
] | 16
|
2019-12-10T19:40:27.000Z
|
2022-02-10T11:51:06.000Z
|
dashboard/urls.py
|
playfulMIT/kimchi
|
66802cc333770932a8c8b1a44ea5d235d916a8f1
|
[
"MIT"
] | null | null | null |
from django.conf.urls import include, url, re_path
from rest_framework import routers
from . import views
urlpatterns = [
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/versiontime", views.get_last_processed_time),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/players", views.get_player_list),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/sessions", views.get_player_to_session_map),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/puzzles", views.get_puzzles),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/puzzlekeys", views.get_puzzle_keys),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/snapshotsperpuzzle", views.get_snapshot_metrics),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/attempted", views.get_attempted_puzzles),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/completed", views.get_completed_puzzles),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/timeperpuzzle", views.get_time_per_puzzle),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/funnelperpuzzle", views.get_funnel_per_puzzle),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/shapesperpuzzle", views.get_shapes_per_puzzle),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/modesperpuzzle", views.get_modes_per_puzzle),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/levelsofactivity", views.get_levels_of_activity),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/sequencebetweenpuzzles", views.get_sequence_between_puzzles),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/mloutliers", views.get_machine_learning_outliers),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/persistence", views.get_persistence_by_attempt_data),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/puzzlepersistence", views.get_persistence_by_puzzle_data),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/insights", views.get_insights),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/difficulty", views.get_puzzle_difficulty_mapping),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/misconceptions", views.get_misconceptions_data),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/competency", views.get_competency_data),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/report/(?P<start>[0-9]+)/(?P<end>[0-9]+)", views.get_report_summary),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/report", views.get_report_summary),
re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/(?P<player>[a-zA-Z0-9-_.]+)/(?P<level>[a-zA-Z0-9-_.]+)/replayurls", views.get_replay_urls),
re_path(r"^(?P<slug>[a-zA-Z0-9-_]+)/dashboard/", views.dashboard),
re_path(r"^(?P<slug>[a-zA-Z0-9-_]+)/thesisdashboard/", views.thesis_dashboard)
]
| 80.441176
| 145
| 0.697623
| 460
| 2,735
| 3.891304
| 0.180435
| 0.046927
| 0.078212
| 0.093855
| 0.501676
| 0.497207
| 0.497207
| 0.497207
| 0.497207
| 0.477095
| 0
| 0.023447
| 0.064351
| 2,735
| 33
| 146
| 82.878788
| 0.676045
| 0
| 0
| 0
| 0
| 0.064516
| 0.51298
| 0.51298
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.096774
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8406877949c3d33a1b17a8c7fd596cba40c180cf
| 3,542
|
py
|
Python
|
Restaurant_Finder_App/restaurant_finder_app/restaurant_finder_app/restaurant/migrations/0001_initial.py
|
midhun3112/restaurant_locator
|
6ab5e906f26476352176059a8952c2c3f5b127bf
|
[
"Apache-2.0"
] | null | null | null |
Restaurant_Finder_App/restaurant_finder_app/restaurant_finder_app/restaurant/migrations/0001_initial.py
|
midhun3112/restaurant_locator
|
6ab5e906f26476352176059a8952c2c3f5b127bf
|
[
"Apache-2.0"
] | null | null | null |
Restaurant_Finder_App/restaurant_finder_app/restaurant_finder_app/restaurant/migrations/0001_initial.py
|
midhun3112/restaurant_locator
|
6ab5e906f26476352176059a8952c2c3f5b127bf
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-01 13:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'default_related_name': 'categories',
},
),
migrations.CreateModel(
name='Collection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('collection_name', models.CharField(max_length=255)),
],
options={
'default_related_name': 'collections',
},
),
migrations.CreateModel(
name='Restaurant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('restaurant_name', models.CharField(max_length=255)),
('restaurant_image', models.ImageField(default='restaurant_pic/images/no-name.jpg', upload_to='images/restaurant_pic/')),
],
options={
'default_related_name': 'restaurant',
},
),
migrations.CreateModel(
name='RestaurantTiming',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('restaurant', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='restaurant_timing', to='restaurant.Restaurant')),
],
options={
'verbose_name': 'Restaurant Timing',
'verbose_name_plural': 'Restaurant Timings',
'default_related_name': 'restaurant_timing',
},
),
migrations.CreateModel(
name='WeekDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day', models.CharField(max_length=255)),
],
options={
'verbose_name': 'WeekDay',
'verbose_name_plural': 'WeekDays',
'default_related_name': 'week_day',
},
),
migrations.AddField(
model_name='restauranttiming',
name='working_days',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='restaurant_timing', to='restaurant.WeekDay'),
),
migrations.AddField(
model_name='collection',
name='restaurant',
field=models.ManyToManyField(related_name='collections', to='restaurant.Restaurant'),
),
migrations.AddField(
model_name='category',
name='restaurant',
field=models.ManyToManyField(related_name='categories', to='restaurant.Restaurant'),
),
]
| 38.5
| 168
| 0.559006
| 312
| 3,542
| 6.137821
| 0.269231
| 0.063185
| 0.065274
| 0.060052
| 0.449086
| 0.449086
| 0.432898
| 0.357702
| 0.310705
| 0.310705
| 0
| 0.01191
| 0.312535
| 3,542
| 91
| 169
| 38.923077
| 0.774538
| 0.019198
| 0
| 0.457831
| 1
| 0
| 0.209162
| 0.033996
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.036145
| 0
| 0.084337
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
840d053d29d25ef335ed6bf8148849bf05df3d8b
| 596
|
py
|
Python
|
guitar-package/guitar/guitar/fetcher/__init__.py
|
django-stars/guitar
|
9bddfd2d7b555c97dd9470b458a5f43bd805b026
|
[
"MIT"
] | null | null | null |
guitar-package/guitar/guitar/fetcher/__init__.py
|
django-stars/guitar
|
9bddfd2d7b555c97dd9470b458a5f43bd805b026
|
[
"MIT"
] | null | null | null |
guitar-package/guitar/guitar/fetcher/__init__.py
|
django-stars/guitar
|
9bddfd2d7b555c97dd9470b458a5f43bd805b026
|
[
"MIT"
] | null | null | null |
import urllib2
import json
FAKE_PACKAGES = (
'south',
'django-debug-toolbar',
'django-extensions',
'django-social-auth',
)
class GuitarWebAPI(object):
def __init__(self, url):
self.url = url
def search(self, q):
url = self.url + 'search/' + q + '/'
res = urllib2.urlopen(url)
return json.loads(res.read())
def get_config(self, package, version=None):
url = self.url + 'search/' + package + '/'
print url
res = urllib2.urlopen(url)
print res
fetcher = GuitarWebAPI('http://localhost:8000/api/v1/')
| 20.551724
| 55
| 0.587248
| 70
| 596
| 4.914286
| 0.542857
| 0.081395
| 0.087209
| 0.093023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018307
| 0.266779
| 596
| 28
| 56
| 21.285714
| 0.768879
| 0
| 0
| 0.095238
| 0
| 0
| 0.176175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.095238
| null | null | 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
841b950a79e8d2aae01b030de733c8d1017b6718
| 3,649
|
py
|
Python
|
libs/token.py
|
yareally/twitter-clone-python
|
1323c3fa4bf66f479a3092c09fb165a323eb1c85
|
[
"MIT"
] | 1
|
2020-05-22T22:13:48.000Z
|
2020-05-22T22:13:48.000Z
|
libs/token.py
|
yareally/twitter-clone-python
|
1323c3fa4bf66f479a3092c09fb165a323eb1c85
|
[
"MIT"
] | null | null | null |
libs/token.py
|
yareally/twitter-clone-python
|
1323c3fa4bf66f479a3092c09fb165a323eb1c85
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from operator import xor
import os
import scrypt
import time
from libs.rediswrapper import UserHelper
try:
xrange
except NameError:
xrange = range
class Token(object):
"""
@param user_id:
@type user_id:
@param password:
@type password:
"""
__BLOCK_SIZE = 256
__TRANS_5C = "".join(chr(x ^ 0x5c) for x in xrange(256))
__TRANS_36 = "".join(chr(x ^ 0x36) for x in xrange(256))
__I_SALT = os.urandom(16).encode('base_64')
__O_SALT = os.urandom(16).encode('base_64')
def __init__(self, user_id, password=None):
self.user_id = user_id
# get or create some password to encrypt the user verification token
self.password = password #if password else self.redis.get('token_pass')
if not self.password:
salt = os.urandom(16).encode('base_64')
self.password = scrypt.hash(os.urandom(24).encode('base_64'), salt)
def generate_token(self):
"""
Generates an encrypted token for validating a user
@return: the encrypted token (a random value and the date as a timestamp
@rtype: str
"""
# random value, user_id, timestamp
values = '%s,%s,%s' % (os.urandom(16).encode('base_64'), self.user_id, time.time())
return scrypt.encrypt(values, self.password)
def generate_hmac(self, key, message):
"""
@param key: The user's generated password
@type key: str
@param message: message to hash for client-server authentication
@type message: str
@return: the hash based message auth code (to verify against the client sent one)
@rtype: str
@see: http://en.wikipedia.org/wiki/Hash-based_message_authentication_code
"""
if len(key) > self.__BLOCK_SIZE:
salt = os.urandom(16).encode('base_64')
key = scrypt.hash(key, salt)
key += chr(0) * (self.__BLOCK_SIZE - len(key))
o_key_pad = xor(self.__TRANS_5C, key)
i_key_pad = xor(self.__TRANS_36, key)
return scrypt.hash(o_key_pad + scrypt.hash(i_key_pad + message, self.__I_SALT), self.__O_SALT)
def validate_token(self, client_token, server_token, expire_time=15):
"""
@param client_token:
@type client_token: str
@param server_token:
@type server_token: str
@param expire_time:
@type expire_time: int
@return: True if still valid
@rtype: bool
"""
if client_token != server_token:
return False
tokens = scrypt.decrypt(client_token, self.password).split(',')
if len(tokens) != 3:
return False
expired = ((time.time() - int(tokens[1])) / 3600) >= expire_time
if expired:
return False
return True
class RedisToken(Token):
"""
@param user_id:
@type user_id: int
@param redis_connection:
@type redis_connection: StrictRedis
@param password:
@type password: str
"""
def __init__(self, user_id, redis_connection, password=None):
"""
@param user_id:
@type user_id: int
@param redis_connection
@type redis_connection: StrictRedis
@param password:
@type password: str
@return:
@rtype:
"""
# get or create some password to encrypt the user verification token
self.redis = UserHelper(redis_connection, user_id)
self.password = password if password else self.redis.get('token_pass')
super(RedisToken, self).__init__(user_id, password)
| 28.960317
| 102
| 0.609482
| 465
| 3,649
| 4.567742
| 0.270968
| 0.039548
| 0.033898
| 0.040019
| 0.324859
| 0.277778
| 0.267891
| 0.202448
| 0.202448
| 0.202448
| 0
| 0.020737
| 0.28638
| 3,649
| 125
| 103
| 29.192
| 0.794931
| 0.326391
| 0
| 0.106383
| 0
| 0
| 0.029285
| 0
| 0
| 0
| 0.003841
| 0
| 0
| 1
| 0.106383
| false
| 0.191489
| 0.106383
| 0
| 0.489362
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
842064b9ee1d937a6d9bb100474bd7dafa3c5859
| 3,766
|
py
|
Python
|
applications/plugins/SofaPython/python/SofaPython/DAGValidation.py
|
sofa-framework/issofa
|
94855f488465bc3ed41223cbde987581dfca5389
|
[
"OML"
] | null | null | null |
applications/plugins/SofaPython/python/SofaPython/DAGValidation.py
|
sofa-framework/issofa
|
94855f488465bc3ed41223cbde987581dfca5389
|
[
"OML"
] | null | null | null |
applications/plugins/SofaPython/python/SofaPython/DAGValidation.py
|
sofa-framework/issofa
|
94855f488465bc3ed41223cbde987581dfca5389
|
[
"OML"
] | null | null | null |
import sys
import Sofa
import Tools
def MechanicalObjectVisitor(node):
## listing mechanical states, bottom-up from node
ancestors = []
visited = []
for p in node.getParents():
path = p.getPathName()
if not path in visited:
state = p.getMechanicalState()
if not state is None:
ancestors.append( path+"/"+state.name )
ancestors += MechanicalObjectVisitor( p )
return ancestors
class Visitor(object):
## checking that mapping graph is equivalent to node graph
## checking that independent dofs are not under other dofs in the scene graph
def __init__(self):
#print "DAGValidationVisitor"
self.error = []
def treeTraversal(self):
#print 'ValidationVisitor treeTraversal'
return -1 # dag
def processNodeTopDown(self,node):
#print node.name
state = node.getMechanicalState()
if state is None:
return True
mapping = node.getMechanicalMapping()
if mapping is None: #independent dofs
ancestors = MechanicalObjectVisitor(node)
if not len(ancestors) is 0: # an independent dof is under other dofs in the scene graph
err = "ERROR "
err += "mechanical state '"+state.getContext().getPathName()+"/"+state.name+"' is independent (no mapping)"
err += " and should not be in the child node of other mechanical states ("+Tools.listToStr(ancestors)+")"
self.error.append(err)
else: # mapped dofs
#print mapping.getName()
from_dof = mapping.getFrom()
parent_node = mapping.getContext().getParents()
parent_node_path = []
for p in parent_node:
parent_node_path.append( p.getPathName() )
from_node_path = []
for f in from_dof:
from_node_path.append( f.getContext().getPathName() )
#print parent_node_path
for f in from_node_path:
#print f
if not f in parent_node_path:
err = "ERROR "
err += "'"+mapping.getContext().getPathName()+"/"+mapping.name+"': "
err += "'"+ f + "' should be a parent node"
self.error.append(err)
#print err
for p in parent_node_path:
#print p
if not p in from_node_path:
err = "ERROR "
err += "'"+mapping.getContext().getPathName()+"/"+mapping.name+"': "
err += "'"+p + "' should NOT be a parent node"
self.error.append(err)
#print err
#print "==================="
return True
def processNodeBottomUp(self,node):
return True
def test( node, silent=False ):
## checking that mapping graph is equivalent to node graph
## checking that independent dofs are not under other dofs in the scene graph
## return a list of errors
if not silent:
print ""
print "====== SofaPython.DAGValidation.test ======================="
print ""
print "Validating scene from node '/" + node.getPathName() + "'..."
vis = Visitor()
node.executeVisitor(vis)
if not silent:
if len(vis.error) is 0:
print "... VALIDATED"
else:
print "... NOT VALID"
print ""
for e in vis.error:
print e
print ""
print "=============================================================="
sys.stdout.flush()
return vis.error
| 30.128
| 123
| 0.521774
| 383
| 3,766
| 5.062663
| 0.245431
| 0.046416
| 0.036101
| 0.024755
| 0.265601
| 0.249097
| 0.230531
| 0.215575
| 0.215575
| 0.215575
| 0
| 0.001243
| 0.359002
| 3,766
| 124
| 124
| 30.370968
| 0.801988
| 0.161445
| 0
| 0.253333
| 0
| 0
| 0.121367
| 0.03641
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.04
| null | null | 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
84226726736f353bcbde4bab4581da03be81116f
| 878
|
py
|
Python
|
Newsfeed/Newsfeed/app.py
|
akshayseth7/Intership_Snapshot
|
e262ec4939e2e5c5e2037333b7fa37f7c57d5425
|
[
"MIT"
] | null | null | null |
Newsfeed/Newsfeed/app.py
|
akshayseth7/Intership_Snapshot
|
e262ec4939e2e5c5e2037333b7fa37f7c57d5425
|
[
"MIT"
] | null | null | null |
Newsfeed/Newsfeed/app.py
|
akshayseth7/Intership_Snapshot
|
e262ec4939e2e5c5e2037333b7fa37f7c57d5425
|
[
"MIT"
] | null | null | null |
from flask import Flask , render_template, request
import google_news
app = Flask(__name__)
outFile = ''
@app.route("/")
def main():
print "Welcome!"
return render_template('index.html')
@app.route('/uploadFile', methods=['POST'])
def upload():
global outputFile
filedata = request.files['upload']
filename = filedata.filename
print 'filename:' + filename
inputFile = 'input/' + filename
outputFile = 'output/' + filename + '_output'
outputPath = 'templates/' + outputFile
filedata.save(inputFile)
print "Input Saved"
print "processing starts"
google_news.news(inputFile,outputPath)
print "processing success"
#processing
return "success"
@app.route('/download')
def download():
print 'download'
print outputFile
return render_template(outputFile)
if __name__ == "__main__":
app.run()
| 20.904762
| 50
| 0.67426
| 92
| 878
| 6.23913
| 0.445652
| 0.073171
| 0.069686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.200456
| 878
| 41
| 51
| 21.414634
| 0.817664
| 0.01139
| 0
| 0
| 0
| 0
| 0.181293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.066667
| null | null | 0.233333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
84272a9b78bd142a71da410927baa64f753039be
| 1,069
|
py
|
Python
|
TeamX/TeamXapp/migrations/0040_auto_20190712_1351.py
|
rootfinlay/SageTeamX
|
cf4cde3360c8cccb8a727ba64d66345805d7a0ed
|
[
"Unlicense"
] | null | null | null |
TeamX/TeamXapp/migrations/0040_auto_20190712_1351.py
|
rootfinlay/SageTeamX
|
cf4cde3360c8cccb8a727ba64d66345805d7a0ed
|
[
"Unlicense"
] | null | null | null |
TeamX/TeamXapp/migrations/0040_auto_20190712_1351.py
|
rootfinlay/SageTeamX
|
cf4cde3360c8cccb8a727ba64d66345805d7a0ed
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-07-12 12:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('TeamXapp', '0039_auto_20190712_1348'),
]
operations = [
migrations.AddField(
model_name='leavecalendar',
name='leave_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='TeamXapp.LeaveStatus'),
),
migrations.AlterField(
model_name='allmembers',
name='scrum_team_name',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='TeamXapp.ScrumTeam', verbose_name='Scrum team: '),
),
migrations.AlterField(
model_name='allmembers',
name='scrum_team_roles',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='TeamXapp.ScrumTeamRole', verbose_name='Scrum Team Roles: '),
),
]
| 35.633333
| 168
| 0.649205
| 120
| 1,069
| 5.65
| 0.4
| 0.058997
| 0.082596
| 0.129794
| 0.529499
| 0.529499
| 0.529499
| 0.529499
| 0.376106
| 0.376106
| 0
| 0.03753
| 0.227315
| 1,069
| 29
| 169
| 36.862069
| 0.783293
| 0.042095
| 0
| 0.304348
| 1
| 0
| 0.190802
| 0.044031
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8438a4e8ec614cde523653248e7af3039519099a
| 463
|
py
|
Python
|
mqtt_sender.py
|
kehtolaulu/iot-ccs811
|
611ca30ffaec067d730ac95c59b6800fda2cf148
|
[
"MIT"
] | null | null | null |
mqtt_sender.py
|
kehtolaulu/iot-ccs811
|
611ca30ffaec067d730ac95c59b6800fda2cf148
|
[
"MIT"
] | null | null | null |
mqtt_sender.py
|
kehtolaulu/iot-ccs811
|
611ca30ffaec067d730ac95c59b6800fda2cf148
|
[
"MIT"
] | null | null | null |
import json
from paho.mqtt.client import Client
from subscriber import Subscriber
from datetime import datetime
class MqttSender(Subscriber):
def __init__(self, client: Client, topic: str):
self.client = client
self.topic = topic
def on_next(self, message: dict):
json_message = json.dumps(message)
print(f'[{datetime.now().isoformat()}] Sending: {json_message}')
self.client.publish(self.topic, json_message)
| 25.722222
| 72
| 0.695464
| 58
| 463
| 5.413793
| 0.448276
| 0.095541
| 0.101911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.200864
| 463
| 17
| 73
| 27.235294
| 0.848649
| 0
| 0
| 0
| 0
| 0
| 0.116631
| 0.064795
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.583333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
84416b0aa44ff310962bcf2724c753d72fba9519
| 476
|
py
|
Python
|
main/schemas/location_lat.py
|
ohioh/ohioh_Framework_Cluster_3_Flask
|
69e50b9d697b5e8818305328335d26314b625732
|
[
"Apache-2.0"
] | 1
|
2020-08-11T18:37:36.000Z
|
2020-08-11T18:37:36.000Z
|
main/schemas/location_lat.py
|
ohioh/ohioh_Framework_Cluster_3_Flask
|
69e50b9d697b5e8818305328335d26314b625732
|
[
"Apache-2.0"
] | null | null | null |
main/schemas/location_lat.py
|
ohioh/ohioh_Framework_Cluster_3_Flask
|
69e50b9d697b5e8818305328335d26314b625732
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from marshmallow import Schema, EXCLUDE
import marshmallow.fields as ms_fields
class LocationLatSchema(Schema):
user_id = ms_fields.Str(required=True)
user_timestamp = ms_fields.DateTime(default=datetime.now())
location_id = ms_fields.Str(default="")
latitude = ms_fields.Float(default=0.0)
departure = ms_fields.Bool(default=False)
accuracy = ms_fields.Float(default=0.0)
class Meta:
unknown = EXCLUDE
| 23.8
| 63
| 0.737395
| 63
| 476
| 5.412698
| 0.460317
| 0.164223
| 0.058651
| 0.076246
| 0.129032
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 0.010127
| 0.170168
| 476
| 19
| 64
| 25.052632
| 0.853165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.916667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
84418df14873be48f72ce565d6b9bb740aefa623
| 411
|
py
|
Python
|
Python/34-match.py
|
strawman2511/Learning
|
21ee7bdad376060503fdc0a739fed2d7bd40f9b9
|
[
"MIT"
] | 1
|
2022-03-16T23:25:54.000Z
|
2022-03-16T23:25:54.000Z
|
Python/34-match.py
|
strawman2511/Learning
|
21ee7bdad376060503fdc0a739fed2d7bd40f9b9
|
[
"MIT"
] | null | null | null |
Python/34-match.py
|
strawman2511/Learning
|
21ee7bdad376060503fdc0a739fed2d7bd40f9b9
|
[
"MIT"
] | null | null | null |
# Till now only Python 3.10 can run match statement
def check_point(point):
match point:
case (0, 0):
print("Origin")
case (0, y):
print(f"Y = {y}")
case (x, 0)
print(f"X = {x}")
case (x, y):
print(f"X = {x}, Y = {y}")
case _:
raise ValueError("Not a point")
x = 1
y = 2
point = (x, y)
check_point(point)
| 20.55
| 51
| 0.452555
| 60
| 411
| 3.05
| 0.433333
| 0.098361
| 0.163934
| 0.087432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036145
| 0.394161
| 411
| 19
| 52
| 21.631579
| 0.698795
| 0.119221
| 0
| 0
| 0
| 0
| 0.130556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
844a39e610cb54a65514ca7f805b41f45b503518
| 3,021
|
py
|
Python
|
jarvis/resume/tests/test_utils.py
|
Anubhav722/blahblah
|
160698e06a02e671ac40de3113cd37d642e72e96
|
[
"MIT"
] | 1
|
2019-01-03T06:10:04.000Z
|
2019-01-03T06:10:04.000Z
|
jarvis/resume/tests/test_utils.py
|
Anubhav722/blahblah
|
160698e06a02e671ac40de3113cd37d642e72e96
|
[
"MIT"
] | 1
|
2021-03-31T19:11:52.000Z
|
2021-03-31T19:11:52.000Z
|
jarvis/resume/tests/test_utils.py
|
Anubhav722/blahblah
|
160698e06a02e671ac40de3113cd37d642e72e96
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from jarvis.resume.utils.extractor import get_text
from jarvis.resume.utils.parser_helper import get_urls, get_url_response, url_categories, get_github_username, get_stackoverflow_userid, get_stackoverflow_username, get_name, get_id_from_linkedin_url, get_email
from unidecode import unidecode
path_to_test_data = 'resume/tests/test_data/1.pdf'
urls = ['https://github.com/imnithin', 'http://imnithin.github.io', 'https://gist.github.com/imnithin',
'http://stackoverflow.com/users/2231236/nithin', 'https://www.linkedin.com/in/imnithink']
categories = {'blog': ['http://imnithin.github.io'], 'coding': [],
'contributions': ['https://github.com/imnithin', 'https://gist.github.com/imnithin'],
'forums': ['http://stackoverflow.com/users/2231236/nithin'], 'others': [],
'social': ['https://www.linkedin.com/in/imnithink']}
url_response = [{'name': 'https://github.com/imnithin', 'type': 'contributions'},
{'name': 'https://gist.github.com/imnithin', 'type': 'contributions'},
{'name': 'https://www.linkedin.com/in/imnithink', 'type': 'social'},
{'name': 'http://imnithin.github.io', 'type': 'blog'},
{'name': 'http://stackoverflow.com/users/2231236/nithin', 'type': 'forums'}]
class ParserHelperUtilsTest(TestCase):
"""Unit tests for Parser Helper Functions"""
def setUp(self):
self.text = get_text(path_to_test_data)
def test_get_name(self):
"""Test User Name Obtained from jarvis.resume"""
name = 'nithin'
self.assertEqual(get_name(self.text)[0], name)
def test_github_username(self):
"""Test GitHub Username"""
github_user_name = 'imnithin'
self.assertEqual(get_github_username(self.text), github_user_name)
def test_stackoverflow_user_id(self):
"""Test StackOverflow user id"""
stackoverflow_user_id = '2231236'
self.assertEqual(get_stackoverflow_userid(self.text), stackoverflow_user_id)
def test_stackoverflow_user_name(self):
"""Test StackOverflow User Name"""
stackoverflow_user_name = 'nithin'
self.assertEqual(get_stackoverflow_username(self.text), stackoverflow_user_name)
def test_get_urls(self):
self.assertEqual(get_urls(self.text), urls)
def test_url_categories(self):
values = list(categories.values()).sort()
self.assertEqual(list(url_categories(urls).values()).sort(), values)
def test_get_url_response(self):
sorted_url_response = url_response.sort()
self.assertEqual(get_url_response(categories).sort(), sorted_url_response)
def test_get_id_from_linkedin_url(self):
linkedin_id = 'imnithink'
self.assertEqual(unidecode(get_id_from_linkedin_url(self.text)).strip(), linkedin_id)
def test_get_email(self):
email = 'nithinkool14@gmail.com'
self.assertEqual(get_email(self.text)[0], email)
| 43.782609
| 210
| 0.676266
| 365
| 3,021
| 5.361644
| 0.191781
| 0.032192
| 0.064384
| 0.02606
| 0.237609
| 0.170158
| 0.043945
| 0
| 0
| 0
| 0
| 0.013333
| 0.180735
| 3,021
| 68
| 211
| 44.426471
| 0.777374
| 0.052301
| 0
| 0
| 0
| 0
| 0.249295
| 0.01763
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.222222
| false
| 0
| 0.088889
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
844bd667c2563dc8f5e9e83fc9eaf8e0c1857eb6
| 560
|
py
|
Python
|
news/admin.py
|
trojsten/news
|
aa1dfb4ee31a7f810dcd484eecafd49659292d76
|
[
"BSD-3-Clause"
] | null | null | null |
news/admin.py
|
trojsten/news
|
aa1dfb4ee31a7f810dcd484eecafd49659292d76
|
[
"BSD-3-Clause"
] | 6
|
2016-07-10T00:22:02.000Z
|
2021-12-23T22:43:41.000Z
|
news/admin.py
|
trojsten/news
|
aa1dfb4ee31a7f810dcd484eecafd49659292d76
|
[
"BSD-3-Clause"
] | 2
|
2019-04-30T20:20:38.000Z
|
2021-02-16T18:41:01.000Z
|
from django.contrib import admin
from django.db import models
from easy_select2.widgets import Select2Multiple
from news.models import Entry
class EntryAdmin(admin.ModelAdmin):
list_display = ('title', 'pub_date', 'author')
readonly_fields = ('slug',)
exclude = ('author',)
formfield_overrides = {
models.ManyToManyField: {'widget': Select2Multiple()}
}
def save_model(self, request, obj, form, change):
if not change:
obj.author = request.user
obj.save()
admin.site.register(Entry, EntryAdmin)
| 25.454545
| 61
| 0.682143
| 64
| 560
| 5.875
| 0.671875
| 0.053191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.207143
| 560
| 21
| 62
| 26.666667
| 0.84009
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.25
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
845c29a7df8a071ea4d00366b934a8a0a5899a8f
| 2,832
|
py
|
Python
|
vbb_backend/session/migrations/0002_auto_20210215_1509.py
|
patrickb42/backend-vbb-portal
|
88362bc5b4d5cab95aa67e12694f98371604b65a
|
[
"MIT"
] | 3
|
2021-04-14T02:59:09.000Z
|
2021-06-08T00:17:27.000Z
|
vbb_backend/session/migrations/0002_auto_20210215_1509.py
|
patrickb42/backend-vbb-portal
|
88362bc5b4d5cab95aa67e12694f98371604b65a
|
[
"MIT"
] | 81
|
2020-12-08T00:11:52.000Z
|
2021-08-09T18:13:32.000Z
|
vbb_backend/session/migrations/0002_auto_20210215_1509.py
|
patrickb42/backend-vbb-portal
|
88362bc5b4d5cab95aa67e12694f98371604b65a
|
[
"MIT"
] | 5
|
2021-01-12T04:50:26.000Z
|
2021-06-04T02:00:03.000Z
|
# Generated by Django 3.0.10 on 2021-02-15 15:09
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20210209_0849'),
('session', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MentorSessionAssociation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(default=False)),
('external_id', models.UUIDField(db_index=True, default=uuid.uuid4, unique=True)),
('attended', models.BooleanField(default=False)),
('mentor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='mentor_session', to='users.Mentor')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='session',
name='derived_from',
),
migrations.RemoveField(
model_name='session',
name='is_mentor_confirmed',
),
migrations.RemoveField(
model_name='studentsessionassociation',
name='is_absent',
),
migrations.RemoveField(
model_name='studentsessionassociation',
name='notes',
),
migrations.RemoveField(
model_name='studentsessionassociation',
name='wont_attend',
),
migrations.AddField(
model_name='studentsessionassociation',
name='attended',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='studentsessionassociation',
name='session',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='session_student', to='session.Session'),
),
migrations.AlterField(
model_name='studentsessionassociation',
name='student',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='student_session', to='users.Student'),
),
migrations.DeleteModel(
name='SessionRule',
),
migrations.AddField(
model_name='mentorsessionassociation',
name='session',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='session_mentor', to='session.Session'),
),
]
| 37.76
| 153
| 0.598517
| 259
| 2,832
| 6.374517
| 0.316602
| 0.049061
| 0.123561
| 0.138098
| 0.466384
| 0.430042
| 0.20533
| 0.20533
| 0.20533
| 0.20533
| 0
| 0.018146
| 0.280014
| 2,832
| 74
| 154
| 38.27027
| 0.791564
| 0.016243
| 0
| 0.441176
| 1
| 0
| 0.192888
| 0.079382
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044118
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8463673ccc7d5d8251d46b1bed4eb08caa70dd68
| 1,054
|
py
|
Python
|
src/the_tale/the_tale/game/pvp/objects.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 85
|
2017-11-21T12:22:02.000Z
|
2022-03-27T23:07:17.000Z
|
src/the_tale/the_tale/game/pvp/objects.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 545
|
2017-11-04T14:15:04.000Z
|
2022-03-27T14:19:27.000Z
|
src/the_tale/the_tale/game/pvp/objects.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 45
|
2017-11-11T12:36:30.000Z
|
2022-02-25T06:10:44.000Z
|
import smart_imports
smart_imports.all()
class BattleRequest:
__slots__ = ('id', 'initiator_id', 'matchmaker_type', 'created_at', 'updated_at')
def __init__(self, id, initiator_id, matchmaker_type, created_at, updated_at):
self.id = id
self.initiator_id = initiator_id
self.matchmaker_type = matchmaker_type
self.created_at = created_at
self.updated_at = updated_at
def ui_info(self):
return {'id': self.id,
'initiator_id': self.initiator_id,
'matchmaker_type': self.matchmaker_type.value,
'created_at': time.mktime(self.created_at.timetuple()),
'updated_at': time.mktime(self.updated_at.timetuple())}
class Battle:
__slots__ = ('id', 'matchmaker_type', 'participants_ids', 'created_at')
def __init__(self, id, matchmaker_type, participants_ids, created_at):
self.id = id
self.matchmaker_type = matchmaker_type
self.participants_ids = participants_ids
self.created_at = created_at
| 31.939394
| 85
| 0.66129
| 126
| 1,054
| 5.095238
| 0.214286
| 0.218069
| 0.124611
| 0.116822
| 0.507788
| 0.383178
| 0.383178
| 0.140187
| 0.140187
| 0
| 0
| 0
| 0.233397
| 1,054
| 32
| 86
| 32.9375
| 0.794554
| 0
| 0
| 0.26087
| 0
| 0
| 0.133903
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.086957
| 0.043478
| 0.434783
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8465fe705e2203a309cb2f80aab7f362306bc341
| 1,111
|
py
|
Python
|
testesDuranteAulas/aula019.py
|
Igor3550/Exercicios-de-python
|
e0f6e043df4f0770ac15968485fbb19698b4ac6b
|
[
"MIT"
] | null | null | null |
testesDuranteAulas/aula019.py
|
Igor3550/Exercicios-de-python
|
e0f6e043df4f0770ac15968485fbb19698b4ac6b
|
[
"MIT"
] | null | null | null |
testesDuranteAulas/aula019.py
|
Igor3550/Exercicios-de-python
|
e0f6e043df4f0770ac15968485fbb19698b4ac6b
|
[
"MIT"
] | null | null | null |
# Dicionarios
pessoas = {'nome': 'Igor', 'sexo': 'M', 'idade': 20}
print(f'O {pessoas["nome"]} tem {pessoas["idade"]} anos.')
print(pessoas.keys()) #chaves do dicionario
print(pessoas.values())#valores das chaves
print(pessoas.items())#mostra os itens do dicionario
print()
for k in pessoas.keys():
print(k)
for v in pessoas.values():
print(v)
for k, v in pessoas.items():
print(k, v)
print()
for k, v in pessoas.items():
print(f'{k} = {v}')
print()
del pessoas['sexo']# deleta uma chave
pessoas['peso'] = 72# adiciona uma nova chave
for k, v in pessoas.items():
print(f'{k} = {v}')
print()
# Dicionario dentro de uma lista
brasil = []
estado1 = {'uf': 'Rio de Janeiro', 'sigla': 'RJ'}
estado2 = {'uf': 'São Paulo', 'sigla': 'SP'}
brasil.append(estado1)
brasil.append(estado2)
print(brasil[0]['uf'])
print()
brasil = list()
estado = dict()
for c in range(0, 3):
estado['uf'] = str(input('Unidade federativa: '))
estado['sigla'] = str(input('Sigla: '))
brasil.append(estado.copy())# cópia de um dicionario
for e in brasil:
for k, v in e.items():
print(f'{k} = {v}')
| 26.452381
| 58
| 0.629163
| 170
| 1,111
| 4.111765
| 0.388235
| 0.02289
| 0.057225
| 0.040057
| 0.144492
| 0.125894
| 0.125894
| 0.091559
| 0.091559
| 0.091559
| 0
| 0.011905
| 0.168317
| 1,111
| 41
| 59
| 27.097561
| 0.744589
| 0.155716
| 0
| 0.297297
| 0
| 0
| 0.191604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.432432
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
ffbcc28e993823f93d8f7e3809b6abd49a5cc187
| 1,998
|
py
|
Python
|
froide/publicbody/admin.py
|
rufuspollock/froide
|
8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4
|
[
"MIT"
] | null | null | null |
froide/publicbody/admin.py
|
rufuspollock/froide
|
8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4
|
[
"MIT"
] | null | null | null |
froide/publicbody/admin.py
|
rufuspollock/froide
|
8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from froide.publicbody.models import (PublicBody, FoiLaw, PublicBodyTopic,
Jurisdiction)
class PublicBodyAdmin(admin.ModelAdmin):
prepopulated_fields = {
"slug": ("name",),
'classification_slug': ('classification',)
}
list_display = ('name', 'email', 'url', 'classification', 'topic', 'jurisdiction',)
list_filter = ('topic', 'jurisdiction', 'classification')
list_max_show_all = 5000
search_fields = ['name', "description", 'classification']
exclude = ('confirmed',)
raw_id_fields = ('parent', 'root', '_created_by', '_updated_by')
actions = ['export_csv', 'remove_from_index']
def export_csv(self, request, queryset):
return HttpResponse(PublicBody.export_csv(queryset),
content_type='text/csv')
export_csv.short_description = _("Export to CSV")
def remove_from_index(self, request, queryset):
from haystack import connections as haystack_connections
for obj in queryset:
for using in haystack_connections.connections_info.keys():
backend = haystack_connections[using].get_backend()
backend.remove(obj)
self.message_user(request, _("Removed from search index"))
remove_from_index.short_description = _("Remove from search index")
class FoiLawAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('name', 'meta', 'jurisdiction',)
raw_id_fields = ('mediator',)
class JurisdictionAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
class PublicBodyTopicAdmin(admin.ModelAdmin):
prepopulated_fields = {
"slug": ("name",)
}
admin.site.register(PublicBody, PublicBodyAdmin)
admin.site.register(FoiLaw, FoiLawAdmin)
admin.site.register(Jurisdiction, JurisdictionAdmin)
admin.site.register(PublicBodyTopic, PublicBodyTopicAdmin)
| 34.448276
| 87
| 0.699199
| 205
| 1,998
| 6.604878
| 0.395122
| 0.044313
| 0.079764
| 0.097489
| 0.121123
| 0.121123
| 0
| 0
| 0
| 0
| 0
| 0.002436
| 0.178178
| 1,998
| 57
| 88
| 35.052632
| 0.822168
| 0
| 0
| 0.093023
| 0
| 0
| 0.167167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.116279
| 0.023256
| 0.581395
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
ffc1d0bbd0644054a0b22502249482b17c06c941
| 2,532
|
py
|
Python
|
tests/utils/test_commons.py
|
jajomi/flow
|
c984be6f7de1a34192601c129dbc19f2ce45f135
|
[
"Apache-2.0"
] | null | null | null |
tests/utils/test_commons.py
|
jajomi/flow
|
c984be6f7de1a34192601c129dbc19f2ce45f135
|
[
"Apache-2.0"
] | 6
|
2021-03-05T16:39:42.000Z
|
2021-06-11T01:04:57.000Z
|
tests/utils/test_commons.py
|
jajomi/flow
|
c984be6f7de1a34192601c129dbc19f2ce45f135
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import mock_open
from unittest.mock import patch
import flow.utils.commons as commons
def test_extract_story_id_with_empty_list():
story_list = commons.extract_story_id_from_commit_messages([])
assert len(story_list) == 0
commit_example = [
"223342f Adding ability to specify artifactory user [#134082057]",
"4326d00 Adding slack channel option for errors [#130798449]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_two_stories():
story_list = commons.extract_story_id_from_commit_messages(commit_example)
assert len(story_list) == 2
commit_example_nested_brackets = [
"223342f Adding ability to specify artifactory user [#134082057, [bubba]]",
"4326d00 Adding slack channel option for errors [#130798449]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_nested_brackets():
story_list = commons.extract_story_id_from_commit_messages(commit_example_nested_brackets)
print(str(story_list))
assert len(story_list) == 1
commit_example_multiple_per_brackets = [
"223342f Adding ability to specify artifactory user [#134082057,#134082058]",
"4326d00 Adding slack channel option for errors [#130798449,123456]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_multiple_per_brackets():
story_list = commons.extract_story_id_from_commit_messages(commit_example_multiple_per_brackets)
print(str(story_list))
assert len(story_list) == 4
commit_example_dedup = [
"223342f Adding ability to specify artifactory user [#134082057,#134082057]",
"4326d00 Adding slack channel option for errors [#134082057,134082057]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_dedup():
story_list = commons.extract_story_id_from_commit_messages(commit_example_dedup)
print(str(story_list))
assert len(story_list) == 1
def test_write_to_file():
open_mock = mock_open()
with patch('__main__.open', open_mock, create=True):
commons.write_to_file("somefilepath", "test_write_to_file", open_func=open_mock)
open_mock.assert_called_once_with("somefilepath", "a")
file_mock = open_mock()
file_mock.write.assert_called_once_with("test_write_to_file")
| 36.171429
| 100
| 0.781991
| 361
| 2,532
| 5.155125
| 0.213296
| 0.062869
| 0.075228
| 0.051048
| 0.750672
| 0.711445
| 0.698012
| 0.698012
| 0.588393
| 0.430951
| 0
| 0.097846
| 0.120063
| 2,532
| 69
| 101
| 36.695652
| 0.737433
| 0
| 0
| 0.288462
| 0
| 0
| 0.376777
| 0.080569
| 0
| 0
| 0
| 0
| 0.134615
| 1
| 0.115385
| false
| 0
| 0.057692
| 0
| 0.173077
| 0.057692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ffc40ad7630c4587dcf4487c052a523769c15b4a
| 1,254
|
py
|
Python
|
packages/M2Crypto-0.21.1/demo/smime/unsmime.py
|
RaphaelPrevost/Back2Shops
|
5f2d369e82fe2a7b9b3a6c55782319b23d142dfd
|
[
"CECILL-B"
] | null | null | null |
packages/M2Crypto-0.21.1/demo/smime/unsmime.py
|
RaphaelPrevost/Back2Shops
|
5f2d369e82fe2a7b9b3a6c55782319b23d142dfd
|
[
"CECILL-B"
] | 6
|
2021-03-31T19:21:50.000Z
|
2022-01-13T01:46:09.000Z
|
packages/M2Crypto-0.21.1/demo/smime/unsmime.py
|
RaphaelPrevost/Back2Shops
|
5f2d369e82fe2a7b9b3a6c55782319b23d142dfd
|
[
"CECILL-B"
] | null | null | null |
#!/usr/bin/env python
"""S/MIME demo.
Copyright (c) 2000 Ng Pheng Siong. All rights reserved."""
from M2Crypto import BIO, Rand, SMIME, X509
import sys
def decrypt_verify(p7file, recip_key, signer_cert, ca_cert):
s = SMIME.SMIME()
# Load decryption private key.
s.load_key(recip_key)
# Extract PKCS#7 blob from input.
p7, bio = SMIME.smime_load_pkcs7_bio(p7file)
# Decrypt.
data = s.decrypt(p7)
# Because we passed in a SignAndEnveloped blob, the output
# of our decryption is a Signed blob. We now verify it.
# Load the signer's cert.
sk = X509.X509_Stack()
s.set_x509_stack(sk)
# Load the CA cert.
st = X509.X509_Store()
st.load_info(ca_cert)
s.set_x509_store(st)
# Verify.
p7, bio = SMIME.smime_load_pkcs7_bio(BIO.MemoryBuffer(data))
if bio is not None:
# Netscape Messenger clear-signs, when also encrypting.
data = s.verify(p7, bio)
else:
# M2Crypto's sendsmime.py opaque-signs, when also encrypting.
data = s.verify(p7)
print data
if __name__ == '__main__':
Rand.load_file('../randpool.dat', -1)
decrypt_verify(BIO.File(sys.stdin), 'client.pem', 'client2.pem','ca.pem')
Rand.save_file('../randpool.dat')
| 24.588235
| 77
| 0.6563
| 188
| 1,254
| 4.212766
| 0.484043
| 0.022727
| 0.05303
| 0.037879
| 0.159091
| 0.159091
| 0.159091
| 0.090909
| 0
| 0
| 0
| 0.040082
| 0.224083
| 1,254
| 50
| 78
| 25.08
| 0.773895
| 0.290271
| 0
| 0
| 0
| 0
| 0.081047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.090909
| null | null | 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ffc57756064cdbfdff55d925646e8ab713a50ba6
| 1,675
|
py
|
Python
|
timeseries/test.py
|
zoobree/MachineLearning
|
67fd35e67469d9f03afd5c090f2ca23f514bebfd
|
[
"Apache-2.0"
] | null | null | null |
timeseries/test.py
|
zoobree/MachineLearning
|
67fd35e67469d9f03afd5c090f2ca23f514bebfd
|
[
"Apache-2.0"
] | 1
|
2018-04-07T05:24:40.000Z
|
2018-04-07T05:24:40.000Z
|
timeseries/test.py
|
joybree/MachineLearning
|
69a381efa35436a6d211005c320576db966eea11
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
import arima
import os
import pandas as pd
class Arima_Test(unittest.TestCase):
def set_data_dir(self):
print("set_data_dir")
self.dir = "E:/code/python/MachineLearning/data/test_data/"
self.error = 0.001
self.num_percent = 0.9
def test_result_one_point(self):
true_num = 0
false_num = 0
print("****test_result_compare****")
self.set_data_dir()
filelist = os.listdir(self.dir)
list_ts_data = []
for file_name in filelist:
df_data = pd.read_csv(self.dir+file_name, encoding='utf-8', index_col='date')
df_data.index = pd.to_datetime(df_data.index)
ts_data = df_data['value']
list_ts_data.append(ts_data)
prediction_value, prediction_var, prediction_con = arima.prediction(ts_data, pre_num=1)
print(prediction_value[0])
print(ts_data[-1])
if abs(prediction_value[0] - ts_data[-1])/ts_data[-1] <= self.error:
true_num = true_num + 1
else:
false_num = false_num + 1
print(true_num)
print(false_num)
self.assertGreaterEqual(true_num / (true_num + false_num), self.num_percent)
def test_result_two_point(self):
pass
def test_result_three_point(self):
pass
def test_trend(self):
"""
increase or decrease
"""
pass
def test_obj_number(self):
pass
def test_run_time(self):
pass
def test_write_result(self):
pass
if __name__ == "__main__":
unittest.main()
| 25.769231
| 99
| 0.587463
| 220
| 1,675
| 4.145455
| 0.345455
| 0.052632
| 0.060307
| 0.065789
| 0.04386
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015491
| 0.306269
| 1,675
| 64
| 100
| 26.171875
| 0.769363
| 0.025672
| 0
| 0.130435
| 0
| 0
| 0.066542
| 0.045398
| 0
| 0
| 0
| 0
| 0.021739
| 1
| 0.173913
| false
| 0.130435
| 0.086957
| 0
| 0.282609
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
ffc7fe1be16dc65b683b9d6a05ef9740a31e195b
| 42,563
|
py
|
Python
|
ion/simulators/SBE37_SMP_simulator.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | 3
|
2016-09-20T09:50:06.000Z
|
2018-08-10T01:41:38.000Z
|
ion/simulators/SBE37_SMP_simulator.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | null | null | null |
ion/simulators/SBE37_SMP_simulator.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | 2
|
2016-03-16T22:25:49.000Z
|
2016-11-26T14:54:21.000Z
|
#!/usr/bin/env python
__author__ = 'Roger Unwin'
import socket
import time
from time import gmtime, strftime
import datetime
import string
import sys
import random
import asyncore
import thread
import getopt
import select
import os
### default values defined below (b/c class is not yet defined)
#default_port = 4001 # TCP port to run on.
#default_message_rate = 5 # 5 sec between messages when streaming
#default_sim=SBE37_random
########### BASE class here handles SBE37 behaviors
########### see below for subclasses that provide different data values
class SBE37(asyncore.dispatcher_with_send):
buf = ""
next_send = None
time_set_at = time.time()
out_buffer = ""
allowable_baud_rates = ['600', '1200', '2400', '4800', '9600', '19200', '38400']
baud_rate = '9600'
date = "010201" # MMDDYY
time = "010100" # HHMMSS
output_salinity = False
output_sound_velocity = False
format = 1
reference_preassure = 0.0
pump_installed = True
sample_number = 0
sleep_state = True
interval = random.randrange(5, 32767)
navg = 0
store_time = False
tx_real_time = True
start_mmddyy = "010201"
start_time = "010101"
sync_wait = 0
serial_sync_mode = False
logging = False
locked = False
start_later = False
tcaldate = "08-nov-05"
ta0 = -2.572242e-04
ta1 = 3.138936e-04
ta2 = -9.717158e-06
ta3 = 2.138735e-07
ccaldate = "08-nov-05"
cg = -9.870930e-01
ch = 1.417895e-01
ci = 1.334915e-04
cj = 3.339261e-05
wbotc = 1.202400e-05
ctcor = 3.250000e-06
cpcor = 9.570000e-08
pcaldate = "12-aug-05"
pa0 = 5.916199e+00
pa1 = 4.851819e-01
pa2 = 4.596432e-07
ptca0 = 2.762492e+02
ptca1 = 6.603433e-01
ptca2 = 5.756490e-03
ptcb0 = 2.461450e+01
ptcb1 = -9.000000e-04
ptcb2 = 0.000000e+00
poffset = 0.000000e+00
rcaldate = "08-nov-05"
rtca0 = 9.999862e-01
rtca1 = 1.686132e-06
rtca2 = -3.022745e-08
knock_count = 0
months = ['BAD PROGRAMMER MONTH', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
save = ""
def __init__(self, socket, thread, streaming_rate, connection_id):
self.socket = socket
self.socket.settimeout(0.0)
self.thread = thread
self.streaming_rate = streaming_rate
# causes error in ion.agents.instrument.test.test_gateway_to_instrument_agent:TestInstrumentAgentViaGateway.test_autosample
#self.max_sleep = streaming_rate/2. if streaming_rate<4 else 2.
self.max_sleep = 0.1
self.connection_id = connection_id
self.handle_read()
def handle_error(self, request, client_address):
print "%3d *** dispatcher reports error: %s %s" % (self.connection_id,client_address,request)
def get_current_time_startlater(self):
#current_time = datetime.datetime.strptime(self.date + " " + self.time, "%m%d%y %H%M%S") + datetime.timedelta( seconds=( int(time.time()) - self.time_set_at) )
format = "%d %b %Y, %H:%M:%S"
return strftime(format, gmtime())
#return current_time.strftime(format)
def get_current_time_startnow(self):
current_time = datetime.datetime.strptime(self.date + " " + self.time, "%m%d%y %H%M%S") + datetime.timedelta( seconds=( int(time.time()) - self.time_set_at) )
format = "%m-%d-%Y, %H:%M:%S"
return strftime(format, gmtime())
#return current_time.strftime(format)
def read_a_char(self):
c = None
if len(self.buf) > 0:
c = self.buf[0:1]
self.buf = self.buf[1:]
else:
self.buf = self.recv(8192)
for x in self.buf:
self.socket.send(x + '\0')
return c
def get_data(self):
data = ""
ret = self.save
try:
while True:
c = self.read_a_char()
if c == None:
break
if c == '\n' or c == '':
self.save = ""
ret += c
data = ret
break
else:
ret += c
except AttributeError:
print "%3d *** closing connection" % self.connection_id
# log_file.close()
self.socket.close()
self.thread.exit()
except:
self.save = ret
data = ""
if data:
data = data.lower()
print "%3d <-- %s"%(self.connection_id,data.strip())
# if log_file.closed == False:
# log_file.write("IN [" + repr(data) + "]\n")
return data
def send_data(self, data, debug):
try:
print "%3d --> %s"%(self.connection_id,data.strip())
self.socket.send(data)
# if log_file.closed == False:
# log_file.write("OUT [" + repr(data) + "]\n")
except Exception,e:
print "%3d *** send_data FAILED [%s] had an exception sending [%s]: %s" % (self.connection_id,debug,data,e)
def handle_read(self):
while True:
self.date = strftime("%m%d%y", gmtime())
self.time = strftime("%H%M%S", gmtime())
time.sleep(0.01)
start_time = datetime.datetime.strptime(self.start_mmddyy + " " + self.start_time, "%m%d%y %H%M%S")
current_time = datetime.datetime.strptime(self.date + " " + self.time, "%m%d%y %H%M%S") + \
datetime.timedelta( seconds=( int(time.time()) - self.time_set_at) )
if self.start_later == True:
if current_time > start_time:
self.start_later = False # only trigger once
self.logging = True
#------------------------------------------------------------------#
data = self.get_data()
if self.logging == True:
if not self.next_send:
time.sleep(0.1)
else:
# sleep longer to use less CPU time when multiple simulators are running until it is about time to transmit
remaining = self.next_send - time.time()
send_now = False
if remaining>self.max_sleep:
time.sleep(self.max_sleep) # worst case: 2sec latency handling command while in streaming mode
elif remaining>0.1:
time.sleep(remaining - 0.1) # sleep off most of remaining time (< max_sleep)
else:
if remaining>0:
time.sleep(remaining)
self.next_send += self.streaming_rate
send_now = True
if send_now and self.tx_real_time:
a,b,c,d,e = self.generate_data_values()
t = self.get_current_time_startlater()
msg = '\r\n#{:.4f},{:.5f}, {:.3f}, {:.4f}, {:.3f}, {}\r\n'.format(a,b,c,d,e,t)
self.send_data(msg, 'MAIN LOGGING LOOP')
# Need to handle commands that are not in the blessed list #
if data:
command_args = string.splitfields(data.rstrip('\r\n'), "=")
if data[0] == '\r' or data[0] == '\n':
locked = False
self.knock_count += 1
if self.knock_count >= 5:
self.send_data('\r\nS>\r\n', 'NEW')
if self.knock_count == 4:
self.send_data('\r\nS>\r\n', 'NEW')
if self.knock_count == 3:
self.send_data('\x00SBE 37-SM\r\n', 'NEW')
self.send_data('S>', 'NEW')
elif command_args[0] not in ['ds', 'dc', 'ts', 'tsr', 'slt', 'sltr', 'qs', 'stop', '\r\n', '\n\r']:
self.send_data('cmd not allowed while logging\n', 'non-permitted command')
data = None
if data:
handled = True
command_args = string.splitfields(data.rstrip('\r\n'), "=")
if command_args[0] == 'baud':
if command_args[1] in self.allowable_baud_rates:
self.baud_rate = command_args[1]
else:
self.send_data("***BAUD ERROR MESSAGE***", 'BAUD ERROR MESSAGE')
elif command_args[0] == 'ds':
self.send_data("SBE37-SMP V 2.6 SERIAL NO. 2165 " + self.date[2:4] + ' ' + self.months[int(self.date[0:2])] + ' 20' + self.date[4:6] + ' ' + self.time[0:2] + ':' + self.time[2:4] + ':' + self.time[4:6] + '\r\n', 'DS line 1')
if self.logging:
self.send_data("logging data\r\n", 'DS line 2')
else:
self.send_data("not logging: received stop command\r\n", 'DS line 2')
self.send_data("sample interval = " + str(self.interval) + " seconds\r\n", 'DS line 3')
self.send_data("samplenumber = " + str(self.sample_number) + ", free = " + str(200000 - self.sample_number * 8) + "\r\n", 'DS line 4') # likely more complex than i have...
if self.tx_real_time:
self.send_data("transmit real-time data\r\n", 'DS line 5')
else:
self.send_data("do not transmit real-time data\r\n", 'DS line 5')
if self.output_salinity:
self.send_data("output salinity with each sample\r\n", 'DS line 6')
else:
self.send_data("do not output salinity with each sample\r\n", 'DS line 6')
if self.output_sound_velocity:
self.send_data("output sound velocity with each sample\r\n", 'DS line 7')
else:
self.send_data("do not output sound velocity with each sample\r\n", 'DS line 7')
if self.store_time:
self.send_data("store time with each sample\r\n", 'DS line 8')
else:
self.send_data("do not store time with each sample\r\n", 'DS line 8')
self.send_data("number of samples to average = " + str(self.navg) + "\r\n", 'DS line 9')
self.send_data("reference pressure = " + str(self.reference_preassure) + " db\r\n", 'DS line 10')
if self.serial_sync_mode:
self.send_data("serial sync mode enabled\r\n", 'DS line 11')
else:
self.send_data("serial sync mode disabled\r\n", 'DS line 11')
self.send_data("wait time after serial sync sampling = " + str(self.sync_wait) + " seconds\r\n", 'DS line 12')
if self.pump_installed:
self.send_data("internal pump is installed\r\n", 'DS line 13')
else:
self.send_data("internal pump is not installed\r\n", 'DS line 13')
self.send_data("temperature = " + str(7.54) + " deg C\r\n", 'DS line 14')
self.send_data("WARNING: LOW BATTERY VOLTAGE!!\r\n", 'DS line 15')
elif command_args[0] == 'mmddyy':
try:
if ((int(command_args[1][0:2]) > 0) and
(int(command_args[1][0:2]) < 13) and
(int(command_args[1][2:4]) > 0) and
(int(command_args[1][2:4]) < 32)):
self.date=command_args[1][0:6]
else:
self.send_data("***DATE RANGE ERROR***" + command_args[1] + "\r\n", 'mmddyy line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'mmddyy line 2')
elif command_args[0] == 'ddmmyy':
try:
if ((int(command_args[1][2:4]) > 0) and
(int(command_args[1][2:4]) < 13) and
(int(command_args[1][0:2]) > 0) and
(int(command_args[1][0:2]) < 32)):
self.date=command_args[1][2:4] + command_args[1][0:2] + command_args[1][4:6]
else:
self.send_data("***DATE RANGE ERROR***" + command_args[1] + "\r\n", 'ddmmyy line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'ddmmyy line 2')
elif command_args[0] == 'hhmmss':
try:
if ((int(command_args[1][0:2]) >= 0) and
(int(command_args[1][0:2]) < 24) and
(int(command_args[1][2:4]) >= 0) and
(int(command_args[1][2:4]) < 60) and
(int(command_args[1][4:6]) >= 0) and
(int(command_args[1][4:6]) < 60)):
self.time=command_args[1][0:6]
self.time_set_at = int(time.time())
else:
self.send_data("***TIME RANGE ERROR***" + command_args[1] + "\r\n", 'hhmmss line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'hhmmss line 2')
elif command_args[0] == 'outputsal':
if command_args[1] == 'y':
self.output_salinity = True
elif command_args[1] == 'n':
self.output_salinity = False
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'outputsal line 1')
elif command_args[0] == 'outputsv':
if command_args[1] == 'y':
self.output_sound_velocity = True
elif command_args[1] == 'n':
self.output_sound_velocity = False
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'outputsv line 1')
elif command_args[0] == 'format':
if command_args[1] == '0':
self.format = 0;
elif command_args[1] == '1':
self.format = 1;
elif command_args[1] == '2':
self.format = 2;
else:
self.send_data("***ERROR VALID SETTINGS ARE 0,1,2*** " + command_args[1] + "\r\n", 'format line 1')
elif command_args[0] == 'refpress':
self.reference_preassure = command_args[1]
elif command_args[0] == 'pumpinstalled':
if command_args[1] == 'y':
self.pump_installed = True
elif command_args[1] == 'n':
self.pump_installed = False
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'pumpinstalled line 1')
elif command_args[0] == 'samplenum':
try:
self.sample_number = int(command_args[1])
except ValueError:
self.send_data("ERROR expected INTEGER", 'samplenum line 1')
elif command_args[0] == 'qs':
self.sleep_state = True # will need to work out how to get out of sleep state later.
elif command_args[0] == 'interval':
try:
self.interval = int(command_args[1])
except ValueError:
self.send_data("ERROR expected INTEGER", 'interval line 1')
elif command_args[0] == 'navg':
try:
self.navg = int(command_args[1])
except ValueError:
self.send_data("ERROR expected INTEGER", 'navg line 1')
elif command_args[0] == 'storetime':
if command_args[1] == 'y':
self.store_time = True
elif command_args[1] == 'n':
self.store_time = False
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'storetime line 1')
elif command_args[0] == 'txrealtime':
if command_args[1] == 'y':
self.tx_real_time = True
# self.next_send = time.time() + self.streaming_rate
elif command_args[1] == 'n':
self.tx_real_time = False
self.next_send = None
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'txrealtime line 1')
elif command_args[0] == 'startnow':
self.send_data('start now\r\n', 'startnow line 1')
self.logging = True
self.locked = True
self.knock_count = 0
handled = False
self.next_send = time.time() + self.streaming_rate
elif data[0] == '\r':
#self.send_data('SBE 37-SMP\r\n', '\\ x1b line 1')
handled = False
if self.logging == False:
self.send_data('\r\nS>', '\\ r line 1')
self.locked = False
data = ""
elif data[:1] == '\r\n':
#self.send_data('SBE 37-SMP\r\n', '\\ x1b line 1')
handled = False
self.send_data('S> ', '\\ r \\ n line 1')
self.locked = False
elif command_args[0] == '\x1b':
#self.send_data('SBE 37-SMP\r\n', '\\ x1b line 1')
handled = False
self.send_data('S> ', '\\ x1b line 1')
self.locked = False
elif command_args[0] == 'startmmddyy':
try:
if ((int(command_args[1][0:2]) > 0) and
(int(command_args[1][0:2]) < 13) and
(int(command_args[1][2:4]) > 0) and
(int(command_args[1][2:4]) < 32)):
self.start_mmddyy=command_args[1][0:6]
else:
self.send_data("***DATE RANGE ERROR***" + command_args[1] + "\r\n", 'startmmddyy line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'startmmddyy line 2')
elif command_args[0] == 'startddmmyy':
try:
if ((int(command_args[1][2:4]) > 0) and
(int(command_args[1][2:4]) < 13) and
(int(command_args[1][0:2]) > 0) and
(int(command_args[1][0:2]) < 32)):
self.start_mmddyy=command_args[1][2:4] + command_args[1][0:2] + command_args[1][4:6]
else:
self.send_data("***DATE RANGE ERROR***" + command_args[1] + "\r\n", 'startddmmyy line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'startddmmyy line 2')
elif command_args[0] == 'starthhmmss':
try:
if ((int(command_args[1][0:2]) > 0) and
(int(command_args[1][0:2]) < 24) and
(int(command_args[1][2:4]) > 0) and
(int(command_args[1][2:4]) < 60) and
(int(command_args[1][4:6]) >= 0) and
(int(command_args[1][4:6]) < 60)):
self.start_time=command_args[1][0:6]
else:
self.send_data("***START TIME RANGE ERROR***" + command_args[1] + "\r\n", 'starthhmmss line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'starthhmmss line 2')
elif command_args[0] == 'startlater':
self.start_later = True
self.send_data('start time = ' + self.date[0:2] + ' ' + self.months[int(self.date[2:4])] + ' 20' + self.date[4:6] + ', ' + self.time[0:2] + ':' + self.time[2:4] + ':' + self.time[4:6] + '\r\n', 'startlater line 1')
elif command_args[0] == 'stop':
self.start_later = False
self.logging = False
self.send_data('S>\r\n', 'SPECIAL STOP PROMPT')
handled = False
elif command_args[0] in ('ts', 'tss', 'tsson', 'slt', 'sl'):
a,b,c,d,e = self.generate_data_values()
t = self.date[2:4] + ' ' + self.months[int(self.date[0:2])] + ' 20' + self.date[4:6] + ', ' + self.time[0:2] + ':' + self.time[2:4] + ':' + self.time[4:6]
self.send_data('\r\n{:.4f},{:.5f}, {:.3f}, {:.4f}, {:.3f}, %s\r\n'.format(a,b,c,d,e,t), command_args[0] + ' line 1')
elif command_args[0] in ('tsr','stlr'):
self.send_data('{:9.1f}, {:9.3f}, {:7.1f}\r\n'.format(random.uniform(200000, 500000), random.uniform(2000, 3000), random.uniform(-200, -300)), command_args[0] + ' line 1')
elif command_args[0] == 'syncmode':
if command_args[1] == 'y':
self.serial_sync_mode = True
elif command_args[1] == 'n':
self.serial_sync_mode = False
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'syncmode line 1')
elif command_args[0] == 'syncwait':
try:
if int(command_args[1]) >= 0 and int(command_args[1]) < 121:
self.sync_wait = int(command_args[1])
else:
self.send_data("*** ERROR INTEGER OUT OF RANGE (0 - 120)", 'syncwait line 1')
except ValueError:
self.send_data("*** ERROR expected INTEGER", 'syncwait line 2')
elif data[0:2] == "dd":
data = data[2:].rstrip('\r\n\r')
command_args = string.splitfields(data, ",")
try:
begin = int(command_args[0])
except ValueError:
self.send_data("*** begin ERROR expected INTEGER", 'dd line 1')
try:
end = int(command_args[1])
except ValueError:
self.send_data("*** end ERROR expected INTEGER", 'dd line 2')
self.send_data('start time = ' + self.date[0:2] + ' ' + self.months[int(self.date[2:4])] + ' 20' + self.date[4:6] + ' ' + self.time[0:2] + ':' + self.time[2:4] + ':' + self.time[4:6] + '\r\n', 'dd line 3')
self.send_data('sample interval = ' + str(self.interval) + ' seconds\r\n', 'dd line 4')
self.send_data('start sample number = ' + str(self.sample_number) + '\r\n\r\n', 'dd line 5')
for sample in range(begin, end):
self.send_data('{:8.4f},{:8.5f},{:9.3f},{:9.4f},{:9.3f}'.format(random.uniform(15, 25), random.uniform(0.001, 0.01), random.uniform(0.2, 0.9), random.uniform(0.01, 0.02), random.uniform(1000, 2000)) + ', ' + self.date[0:2] + ' ' + self.months[int(self.date[2:4])] + ' 20' + self.date[4:6] + ', ' + self.time[0:2] + ':' + self.time[2:4] + ':' + self.time[4:6] + '\r\n', 'dd line 6')
elif command_args[0] == "tt":
count = 100
while count > 0:
count -= 1
self.send_data('{:8.4f}\r\n'.format(random.uniform(15, 25)), 'tt line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "tc":
count = 100
while count > 0:
count -= 1
self.send_data('{:8.5f}\r\n'.format(random.uniform(0.001, 0.1)), 'tc line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "tp":
count = 100
while count > 0:
count -= 1
self.send_data('{:8.3f}\r\n'.format(random.uniform(-6.5, -8.2)), 'tp line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "ttr":
count = 100
while count > 0:
count -= 1
self.send_data('{:9.1f}\r\n'.format(random.uniform(361215, 361219)), 'ttr line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "tcr":
count = 100
while count > 0:
count -= 1
self.send_data('{:9.3f}\r\n'.format(random.uniform(2600, 2700)), 'tcr line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "tpr":
count = 100
while count > 0:
count -= 1
self.send_data('{:7.1f},{:6.1f}\r\n'.format(random.uniform(-250, -290),random.uniform(18.1,20.2)), 'tpr line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "tr":
count = 30
while count > 0:
count -= 1
self.send_data('rtcf = {:9.7f}\r\n'.format(random.uniform(1.0, 1.1)), 'tr line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "pumpon":
"""
NOP
"""
elif command_args[0] == "pumpoff":
"""
NOP
"""
elif command_args[0] == 'dc':
self.send_data("SBE37-SM V 2.6b 3464\r\n", 'dc line 1')
self.send_data("temperature: " + self.tcaldate + "\r\n", 'dc line 2')
self.send_data(" TA0 = " + '{0:.6e}'.format(self.ta0) + "\r\n", 'dc line 3')
self.send_data(" TA1 = " + '{0:.6e}'.format(self.ta1) + "\r\n", 'dc line 4')
self.send_data(" TA2 = " + '{0:.6e}'.format(self.ta2) + "\r\n", 'dc line 5')
self.send_data(" TA3 = " + '{0:.6e}'.format(self.ta3) + "\r\n", 'dc line 6')
self.send_data("conductivity: " + self.ccaldate + "\r\n", 'dc line 7')
self.send_data(" G = " + '{0:.6e}'.format(self.cg) + "\r\n", 'dc line 8')
self.send_data(" H = " + '{0:.6e}'.format(self.ch) + "\r\n", 'dc line 9')
self.send_data(" I = " + '{0:.6e}'.format(self.ci) + "\r\n", 'dc line 10')
self.send_data(" J = " + '{0:.6e}'.format(self.cj) + "\r\n", 'dc line 11')
self.send_data(" CPCOR = " + '{0:.6e}'.format(self.cpcor) + "\r\n", 'dc line 12')
self.send_data(" CTCOR = " + '{0:.6e}'.format(self.ctcor) + "\r\n", 'dc line 13')
self.send_data(" WBOTC = " + '{0:.6e}'.format(self.wbotc) + "\r\n", 'dc line 14')
self.send_data("pressure S/N 4955, range = " + str(random.uniform(10000, 11000)) + " psia: " + self.pcaldate + "\r\n", 'dc line 15')
self.send_data(" PA0 = " + '{0:.6e}'.format(self.pa0) + "\r\n", 'dc line 16')
self.send_data(" PA1 = " + '{0:.6e}'.format(self.pa1) + "\r\n", 'dc line 17')
self.send_data(" PA2 = " + '{0:.6e}'.format(self.pa2) + "\r\n", 'dc line 18')
self.send_data(" PTCA0 = " + '{0:.6e}'.format(self.ptca0) + "\r\n", 'dc line 19')
self.send_data(" PTCA1 = " + '{0:.6e}'.format(self.ptca1) + "\r\n", 'dc line 20')
self.send_data(" PTCA2 = " + '{0:.6e}'.format(self.ptca2) + "\r\n", 'dc line 21')
self.send_data(" PTCSB0 = " + '{0:.6e}'.format(self.ptcb0) + "\r\n", 'dc line 22')
self.send_data(" PTCSB1 = " + '{0:.6e}'.format(self.ptcb1) + "\r\n", 'dc line 23')
self.send_data(" PTCSB2 = " + '{0:.6e}'.format(self.ptcb2) + "\r\n", 'dc line 24')
self.send_data(" POFFSET = " + '{0:.6e}'.format(self.poffset) + "\r\n", 'dc line 25')
self.send_data("rtc: " + self.rcaldate + "\r\n", 'dc line 26')
self.send_data(" RTCA0 = " + '{0:.6e}'.format(self.rtca0) + "\r\n", 'dc line 27')
self.send_data(" RTCA1 = " + '{0:.6e}'.format(self.rtca1) + "\r\n", 'dc line 28')
self.send_data(" RTCA2 = " + '{0:.6e}'.format(self.rtca2) + "\r\n", 'dc line 29')
################################
# now the coefficient Commands #
################################
elif command_args[0] == 'tcaldate':
self.tcaldate=command_args[1] #take it on faith
elif command_args[0] == 'ta0':
try:
self.ta0 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ta0 line 1')
elif command_args[0] == 'ta1':
try:
self.ta1 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ta1 line 1')
elif command_args[0] == 'ta2':
try:
self.ta2 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ta2 line 1')
elif command_args[0] == 'ta3':
try:
self.ta3 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ta3 line 1')
elif command_args[0] == 'ccaldate':
self.ccaldate=command_args[1] #take it on faith
elif command_args[0] == 'cg':
try:
self.cg = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'cg line 1')
elif command_args[0] == 'ch':
try:
self.ch = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ch line 1')
elif command_args[0] == 'ci':
try:
self.ci = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ci line 1')
elif command_args[0] == 'cj':
try:
self.cj = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'cj line 1')
elif command_args[0] == 'wbotc':
try:
self.wbotc = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'wbotc line 1')
elif command_args[0] == 'ctcor':
try:
self.ctcor = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ctcor line 1')
elif command_args[0] == 'cpcor':
try:
self.cpcor = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'cpcor line 1')
elif command_args[0] == 'pcaldate':
self.pcaldate=command_args[1] #take it on faith
elif command_args[0] == 'pa0':
try:
self.pa0 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'pa0 line 1')
elif command_args[0] == 'pa1':
try:
self.pa1 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'pa1 line 1')
elif command_args[0] == 'pa2':
try:
self.pa2 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'pa2 line 1')
elif command_args[0] == 'ptca0':
try:
self.ptca0 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptca0 line 1')
elif command_args[0] == 'ptca1':
try:
self.ptca1 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptca1 line 1')
elif command_args[0] == 'ptca2':
try:
self.ptca2 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptca2 line 1')
elif command_args[0] == 'ptcb0':
try:
self.ptcb0 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptcb0 line 1')
elif command_args[0] == 'ptcb1':
try:
self.ptcb1 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptcb1 line 1')
elif command_args[0] == 'ptcb2':
try:
self.ptcb2 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptcb2 line 1')
elif command_args[0] == 'poffset':
try:
self.poffset = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'poffset line 1')
elif command_args[0] == 'rcaldate':
self.rcaldate=command_args[1] #take it on faith
elif command_args[0] == 'rtca0':
try:
self.rtca0 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'rtca0 line 1')
elif command_args[0] == 'rtca1':
try:
self.rtca1 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'rtca1 line 1')
elif command_args[0] == 'rtca2':
try:
self.rtca2 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'rtca2 line 1')
else:
handled = False
self.send_data("? CMD\r\n", 'else line 1 RESPONSE TO ' + data)
if handled == True:
self.send_data("\r\nS>", 'default command prompt')
#------------------------------------------------------------------#
class SBE37_server(asyncore.dispatcher):
def __init__(self, sim_class, host, port, rate):
asyncore.dispatcher.__init__(self)
self.connection_count = 0
self.sim_class = sim_class
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.message_rate = rate
def handle_accept(self):
pair = self.accept()
if pair is None:
pass
else:
sock, addr = pair
self.connection_count += 1 # not threadsafe -- could wind up with two threads and same count value
print '%3d *** new connection from %r' % (self.connection_count,addr)
try:
thread.start_new_thread(self.sim_class, (sock, thread, self.message_rate, self.connection_count))
except Exception, e:
print "%3d *** exception starting thread: %s"%(self.connection_count,e)
def usage():
print "SBE37-SMP Simulator:\n"
print "This program simulates a SBE37-SMP sensor deployed by \nbeing connected to a MOXA NPort 5410 Serial Device Server."
print "Available options are:"
print " -h, --help : Displays this message"
print " -p, --port= : Sets the port to listen on (>1024, default = %s)." % default_port
def get_opts():
opts, args = getopt.getopt(sys.argv[1:], "c:p:h", ["class=", "port=", "rate="])
out={'rate':default_message_rate,'port':default_port,'simulator':SBE37_random}
for o, a in opts:
if o in ("-c", "--class"):
out['simulator'] = getattr(sys.modules[__name__],a)
if o in ("-r", "--rate"):
out['message_rate'] = int(a)
elif o in ("-p", "--port"):
out['port'] = int(a)
else:
print 'unknown option: '+o
return out
def main():
try:
args = get_opts()
except Exception as e:
print 'Exception: %s'%e
usage()
sys.exit()
print 'using args: %r'%args
SBE37_server(sim_class=args['simulator'], host='', port=args['port'], rate=args['rate'])
try:
asyncore.loop()
except:
sys.exit() # Be silent when ^c pressed
################################################################################################
##
## THESE CLASSES generate different sample values for the simulator
#
# return tuple of: temperature, conductivity, pressure, salinity, sound velocity
class SBE37_random(SBE37):
def generate_data_values(self):
return ( random.uniform(-10.0, 100.0), random.uniform(0.0, 100.0), random.uniform(0.0, 1000.0),
random.uniform(0.1, 40.0), random.uniform(1505, 1507))
class SBE37_High(SBE37):
def generate_data_values(self):
return ( random.uniform(45.0, 100.0), random.uniform(50.0, 100.0), random.uniform(500.0, 1000.0), random.uniform(20.05, 40.0), random.uniform(1506.0, 1507.0))
class SBE37_Low(SBE37):
def generate_data_values(self):
return ( random.uniform(-10.0, 45.0), random.uniform(0.0, 50.0), random.uniform(0.0, 500.0), random.uniform(0.1, 20.05), random.uniform(1505.0, 1506.0))
import math
def my_sin(time, Amin, Amax):
sin_val = math.sin(time)
range = Amax - Amin
adj_sin = (sin_val + 1.0) * range/2.0 + Amin
return adj_sin
# vary as sine wave over time
class SBE37_sine(SBE37):
sinwave_time = 0.0
def generate_data_values(self):
self.sinwave_time += 0.2
return ( my_sin(self.sinwave_time, -10.0, 100.0), my_sin(self.sinwave_time, 0.0, 100.0), my_sin(self.sinwave_time, 0.0, 1000.0), my_sin(self.sinwave_time, 0.1, 40.0), my_sin(self.sinwave_time, 1505, 1507))
# narrower, valid range to help ensure density can be calculated
class SBE37_midrange(SBE37):
sinwave_time = 0.0
def generate_data_values(self):
self.sinwave_time += 0.2
return ( my_sin(self.sinwave_time, 5.0, 15.0), my_sin(self.sinwave_time, 2.5, 4.5), my_sin(self.sinwave_time, 2000.0, 4000.0), my_sin(self.sinwave_time, 0.1, 40.0), my_sin(self.sinwave_time, 1505, 1507))
#> Valid ranges for conductivity are 0-7 S/m. Typical values we've seen off the Oregon coast are ~35 mS/cm, which converts to ~3.5 S/m.
#>
#> Valid ranges for temperature are -2-40 deg_C. Typical values we've seen off the Oregon coast are between 5 and 20 deg_C. 12 deg_C would be absolutely reasonable.
#>
#> Valid ranges for pressure are 0-7000 dbar. Really, just choose a depth.
#>
#> I would recommend the simulator produce at C of 3.5 S/m, a T of 12 deg_C and a depth of 10 dbar. Apply sine wave functions with some small fraction of random white noise and let it rip.
#>
################################################################################################
default_port = 4001 # TCP port to run on.
default_message_rate = 5 # 5 sec between messages when streaming
default_sim=SBE37_random
if __name__ == '__main__':
main()
| 44.94509
| 405
| 0.453398
| 4,983
| 42,563
| 3.753562
| 0.120409
| 0.103507
| 0.084688
| 0.055603
| 0.505667
| 0.449797
| 0.376925
| 0.357678
| 0.342868
| 0.31266
| 0
| 0.06122
| 0.403238
| 42,563
| 946
| 406
| 44.9926
| 0.675157
| 0.064798
| 0
| 0.336022
| 0
| 0.00672
| 0.146193
| 0.000991
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.001344
| 0.017473
| null | null | 0.020161
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ffc9b886976a36f8168389759472ba04ff485037
| 10,360
|
py
|
Python
|
Case_Study_1.py
|
Amritha29/Stout_DDA_FULL_STACK_21.github.io
|
89be2324468dfba2ba9afb378881c6e9e460696b
|
[
"CC-BY-4.0"
] | null | null | null |
Case_Study_1.py
|
Amritha29/Stout_DDA_FULL_STACK_21.github.io
|
89be2324468dfba2ba9afb378881c6e9e460696b
|
[
"CC-BY-4.0"
] | null | null | null |
Case_Study_1.py
|
Amritha29/Stout_DDA_FULL_STACK_21.github.io
|
89be2324468dfba2ba9afb378881c6e9e460696b
|
[
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Spyder Editor
Amritha Subburayan code for STOUT DDA FULL STACK CASE STUDIES
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn import preprocessing
import sklearn.metrics as sm
data = pd.read_csv(r'//Users//amrithasubburayan//Downloads//loans_full_schema.csv')
data.info()
data.describe()
#Checking missing values
data.isna().sum()
#removing emp_title, state , num_accounts_120d_past_due , num_accounts_30d_past_due, tax_liens, public_record_bankrupt,
# paid_late_fees , total_collection_amount_ever , current_accounts_delinq , num_historical_failed_to_pay
# num_collections_last_12m, delinq_2y
# check corr and remove this num_mort_accounts
#storing data to other temp
data2 = data
# DATA DESCRIPTION AND ISSUES :
#There are two issues in this dataset :
#1) Missing values 2) Multi-collinearity
#Missing values can be found in the following rows:
#1) emp_title 2) emp_length 3) annual_income_joint 4) verification_income_joint
# 5) debt_to_income_joint 6) months_since_last_delinq 7) months_since_90d_late
#8) months_since_last_credit_inquiry 9) num_accounts_120d_past_due
#Multicollinearity can be found between these columns :
#1) installment and loan amount - 0.94 2) balance and loan amount - 0.93
# 3) annula income joint and total credit limit - 0.54
#4) Inquires last 12 m and months since last credit inq - 0.51
#5) total credit lines and open credit lines - 0.76 6)
#num satisfactory acc and total credit lines - 0.75
#7) total credit lines and num total cc accounts - 0.77 8)
#total credit lines and num open cc accounts - 0.62
#Visualizations
plt.figure(figsize=(40,35))
sns.heatmap(data2.corr(), annot = True, cmap = "RdYlGn")
plt.show()
data2['loan_purpose'].value_counts().plot(kind='bar',color=['gray','red','blue','green','purple','yellow','black']).set_title('Loan Purpose')
data2.groupby('homeownership').verified_income.value_counts().unstack(0).plot.bar()
data2.groupby('homeownership').application_type.value_counts().unstack(0).plot(kind="pie",subplots=True, shadow = True,startangle=90,figsize=(15,10),autopct='%1.1f%%')
plt.scatter(data2['installment'],data2['loan_amount'])
d = data2.groupby('emp_length')
s=[]
for key,item in d:
if(key!=7.0):
s.append(d.get_group(key)['interest_rate'].mean())
dict1={"emp_length":[0,1,2,3,4,5,6,8,9,10],"int_rate":s}
plt.plot(dict1['emp_length'],s)
df= data2['application_type']
data2.groupby('application_type').loan_purpose.value_counts()
data2.groupby('application_type').loan_purpose.value_counts().unstack(0).plot(kind="pie",subplots=True, shadow = True,startangle=90,figsize=(25,20),autopct='%1.1f%%')
#Replacing missing rows
d = data2.groupby('application_type').loan_purpose.value_counts()
#data2["verification_income_joint"] = data2['verification_income_joint'].fillna('Not Verified')
for i in range(0, len(data2["verification_income_joint"])):
if pd.isna(data2['verification_income_joint'][i]):
data2['verification_income_joint'][i] = data2['verified_income'][i]
data2["debt_to_income"] = data2['debt_to_income'].fillna(0)
#combining annual income with joint annual income
for i in range(0, len(data2["annual_income_joint"])):
if pd.isna(data2['annual_income_joint'][i]):
data2['annual_income_joint'][i] = data2['annual_income'][i]
#combining debt income with joint debt income
for i in range(0, len(data2["debt_to_income_joint"])):
if pd.isna(data2['debt_to_income_joint'][i]):
data2['debt_to_income_joint'][i] = data2['debt_to_income'][i]
## Replacing with mean values
data2["months_since_last_credit_inquiry"] = data2['months_since_last_credit_inquiry'].fillna(np.mean(data2["months_since_last_credit_inquiry"]))
data2["emp_length"] = data2['emp_length'].fillna(np.mean(data2["emp_length"]))
#Removing unwanted columns because it has more 0 values which will not impact on building a model
data2.drop("emp_title", axis = 1, inplace=True)
data2.drop("state", axis = 1, inplace=True)
data2.drop("num_accounts_120d_past_due", axis = 1, inplace=True)
data2.drop("num_accounts_30d_past_due", axis = 1, inplace=True)
data2.drop("tax_liens", axis = 1, inplace=True)
data2.drop("public_record_bankrupt", axis = 1, inplace=True)
data2.drop("paid_late_fees", axis = 1, inplace=True)
data2.drop("total_collection_amount_ever", axis = 1, inplace=True)
data2.drop("current_accounts_delinq", axis = 1, inplace=True)
data2.drop("num_historical_failed_to_pay", axis = 1, inplace=True)
data2.drop("num_collections_last_12m", axis = 1, inplace=True)
data2.drop("delinq_2y", axis = 1, inplace=True)
data2.drop("verified_income", axis = 1, inplace=True)
data2.drop("annual_income", axis = 1, inplace=True)
data2.drop("debt_to_income", axis = 1, inplace=True)
data2.drop("months_since_90d_late", axis = 1, inplace=True)
data2.drop("months_since_last_delinq", axis = 1, inplace=True)
data2.drop("issue_month", axis = 1, inplace=True)
data2.drop("initial_listing_status", axis = 1, inplace=True)
data2.drop("disbursement_method", axis = 1, inplace=True)
data2.drop("grade", axis = 1, inplace=True)
#removing columns based on correlation
data2.drop("total_credit_limit", axis = 1, inplace=True)
data2.drop("current_installment_accounts", axis = 1, inplace=True)
data2.drop("accounts_opened_24m", axis = 1, inplace=True)
data2.drop("open_credit_lines", axis = 1, inplace=True)
data2.drop("loan_amount", axis = 1, inplace=True)
data2.drop("balance", axis = 1, inplace=True)
data2.drop("paid_principal", axis = 1, inplace=True)
data2.drop("num_satisfactory_accounts", axis = 1, inplace=True)
data2.drop("total_credit_lines", axis = 1, inplace=True)
data2.drop("num_active_debit_accounts", axis = 1, inplace=True)
data2.drop("num_open_cc_accounts", axis = 1, inplace=True)
data2.drop("installment", axis = 1, inplace=True)
data2.drop("num_total_cc_accounts", axis = 1, inplace=True)
#Removing Outliers based on its Quartile and Max Value
data5 = data2
sns.boxplot(data5['paid_interest'])
data5 = data5.loc[data5["inquiries_last_12m"] < 15]
data5 = data5.loc[data5["total_credit_utilized"] < 400000]
data5 = data5.loc[data5["months_since_last_credit_inquiry"] < 20]
data5 = data5.loc[data5["total_debit_limit"] < 220000]
data5 = data5.loc[data5["num_cc_carrying_balance"] < 20]
data5 = data5.loc[data5["num_mort_accounts"] < 10]
data5 = data5.loc[data5["paid_total"] < 35000]
data5 = data5.loc[data5["paid_interest"] < 3000]
# Encoding Categorical Data using LabelEncoder
le = preprocessing.LabelEncoder()
data5['sub_grade'] = le.fit_transform(data5['sub_grade'].values)
data5['verification_income_joint'] = le.fit_transform(data5['verification_income_joint'].values)
data5['loan_status'] = le.fit_transform(data5['loan_status'].values)
data5['loan_purpose'] = le.fit_transform(data5['loan_purpose'].values)
data5['application_type'] = le.fit_transform(data5['application_type'].values)
data5['homeownership'] = le.fit_transform(data5['homeownership'].values)
data5 = data5.reindex(columns=['emp_length', 'homeownership', 'annual_income_joint',
'verification_income_joint', 'debt_to_income_joint',
'earliest_credit_line', 'inquiries_last_12m', 'total_credit_utilized',
'months_since_last_credit_inquiry', 'total_debit_limit',
'num_cc_carrying_balance', 'num_mort_accounts',
'account_never_delinq_percent', 'loan_purpose', 'application_type',
'term', 'sub_grade', 'loan_status', 'paid_total',
'paid_interest', 'interest_rate'])
X = data5.iloc[:, :-1].values
y = data5.iloc[:, -1].values
y = y.reshape(len(y),1)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
#Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
#Modelling the Data
#Support Vector Regression
from sklearn.svm import SVR
regressor_SVM = SVR(kernel = 'rbf')
regressor_SVM.fit(X_train, y_train)
#For Training Data
SVR_train_pred = regressor_SVM.predict(X_train)
score2=r2_score(y_train,SVR_train_pred)
score2
print("Mean absolute error =", round(sm.mean_absolute_error(y_train, SVR_train_pred), 2))
print("Mean squared error =", round(sm.mean_squared_error(y_train, SVR_train_pred), 2))
print("Median absolute error =", round(sm.median_absolute_error(y_train, SVR_train_pred), 2))
print("Explain variance score =", round(sm.explained_variance_score(y_train, SVR_train_pred), 2))
#For Testing data
SVR_test_pred = regressor_SVM.predict(X_test)
score3=r2_score(y_test,SVR_test_pred)
score3
print("Mean absolute error =", round(sm.mean_absolute_error(y_test, SVR_test_pred), 2))
print("Mean squared error =", round(sm.mean_squared_error(y_test, SVR_test_pred), 2))
print("Median absolute error =", round(sm.median_absolute_error(y_test, SVR_test_pred), 2))
print("Explain variance score =", round(sm.explained_variance_score(y_test, SVR_test_pred), 2))
#Random Forest Model
from sklearn.ensemble import RandomForestRegressor
regressor1 = RandomForestRegressor(n_estimators = 10, random_state = 0)
regressor1.fit(X_train, y_train)
#For Training Data
random_train_pred = regressor1.predict(X_train)
score1=r2_score(y_train,random_train_pred)
score1
print("Mean absolute error =", round(sm.mean_absolute_error(y_train, random_train_pred), 2))
print("Mean squared error =", round(sm.mean_squared_error(y_train, random_train_pred), 2))
print("Median absolute error =", round(sm.median_absolute_error(y_train, random_train_pred), 2))
print("Explain variance score =", round(sm.explained_variance_score(y_train, random_train_pred), 2))
#For Testing Data
random_test_pred = regressor1.predict(X_test)
score=r2_score(y_test,random_test_pred)
score
print("Mean absolute error =", round(sm.mean_absolute_error(y_test, random_test_pred), 2))
print("Mean squared error =", round(sm.mean_squared_error(y_test, random_test_pred), 2))
print("Median absolute error =", round(sm.median_absolute_error(y_test, random_test_pred), 2))
print("Explain variance score =", round(sm.explained_variance_score(y_test, random_test_pred), 2))
| 32.888889
| 167
| 0.754826
| 1,575
| 10,360
| 4.713016
| 0.205079
| 0.041223
| 0.054964
| 0.073286
| 0.482689
| 0.403476
| 0.325475
| 0.243567
| 0.178769
| 0.15937
| 0
| 0.0359
| 0.110039
| 10,360
| 314
| 168
| 32.993631
| 0.769197
| 0.178185
| 0
| 0.013699
| 0
| 0
| 0.279484
| 0.098937
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.075342
| null | null | 0.109589
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ffcf7b955b11391d80d86773ca0338d0d81e1b2c
| 709
|
py
|
Python
|
Dataset/Leetcode/test/56/463.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/test/56/463.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/test/56/463.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, intervals: List[List[int]]) -> List[List[int]]:
if len(intervals) == 1:
return intervals
intervals.sort()
result = [intervals[0]]
for i in range(1, len(intervals)):
# 总共三种情况需要考虑,比较两个区间,另两个区间中的元素值依次为a,b,c,d。
temp = result.pop()
cur = intervals[i]
# 当b>=c 或 c<=d,区间首尾取a,d
if temp[1] >= cur[0] and temp[1] <= cur[1]:
result.append([temp[0], cur[1]])
# 当b>d, 区间首尾取a,b
elif temp[1] > cur[1]:
result.append(temp)
# 此种情况下无重叠
else:
result.extend([temp, cur])
return result
| 32.227273
| 65
| 0.472496
| 85
| 709
| 3.941176
| 0.435294
| 0.044776
| 0.071642
| 0.053731
| 0.149254
| 0.149254
| 0.149254
| 0
| 0
| 0
| 0
| 0.025287
| 0.38646
| 709
| 21
| 66
| 33.761905
| 0.744828
| 0.119887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ffd73066eb937a59b32d4daec9ba6f8807fa09da
| 5,551
|
py
|
Python
|
utils/StartMOOS.py
|
ianfixes/MOOS-python-utils
|
1c34f3b8cde4fdcee48a8ee128a3c160eb17d722
|
[
"WTFPL"
] | 3
|
2015-07-09T17:51:20.000Z
|
2016-04-14T23:06:04.000Z
|
utils/StartMOOS.py
|
ifreecarve/MOOS-python-utils
|
1c34f3b8cde4fdcee48a8ee128a3c160eb17d722
|
[
"WTFPL"
] | null | null | null |
utils/StartMOOS.py
|
ifreecarve/MOOS-python-utils
|
1c34f3b8cde4fdcee48a8ee128a3c160eb17d722
|
[
"WTFPL"
] | 3
|
2015-03-31T04:18:21.000Z
|
2016-10-22T04:55:16.000Z
|
#!/usr/bin/env python
###########################################################################
#
# Written in 2009 by Ian Katz <ijk5@mit.edu>
# Terms: WTFPL (http://sam.zoy.org/wtfpl/)
# See COPYING and WARRANTY files included in this distribution
#
###########################################################################
# this program launches MOOS processes and verifies that they're up.
# this sequential launch method is gentler to low-horsepower CPUs.
#
# It takes 2 command line arguments:
# 1. the MOOS config file to be used
# 2. OPTIONALLY the working directory that all apps should launch from
import os
import sys
import time
#MAKE ANY CHANGES HERE
def desired_MOOS_procs():
#The app name, and -- optionally -- its ID string.
# use a comma in the tupleeither way
return [
("pMOOSBridge",),
("iBatterySG", "Battery"),
("iDepth",),
("pSystemHealth", "pSystemHealth[oiv]"),
("iDVL_SG","iDVL"),
("iINS_SG","iINS",),
("iGPS_SG", "iGPS"),
("iRange",),
("iMultisonde", "CTD"),
("iActuationSG", "Thrusters"),
("iMotor", "RTU"),
# ("pLogMySQL",),
("pNav",),
("pHelmSG","pHelm"),
]
def tick():
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(0.2)
def start_MOOS_process_in_new_screen(app_name, config_file, app_id_string=None):
#start in "detatched mode" using a string identifier
command_line = "screen -dmS "
if(app_id_string is None):
command_line += app_name
else:
command_line += app_id_string
command_line += " " + app_name + " " + config_file
if(app_id_string is not None):
command_line += " " + app_id_string
#print command_line
return os.system(command_line)
def start_all_MOOSProcesses(process_list, config_file, time_between_starts=2.0):
import time for p in process_list:
appname = p[0]
args = (appname, config_file)
if len(p) > 1:
appname = p[1]
args = args + (p[1],)
print "Starting", appname.ljust(20), "in new screen...",
start_MOOS_process_in_new_screen(*args)
print "OK"
time.sleep(time_between_starts)
def start_MOOS_processes_sequentially(process_list, config_file, moosComms):
#get mail from the server manually
def FetchClients():
inbox = pyMOOS.MOOSMSG_LIST()
if not moosComms.Fetch(inbox):
return None
#go through all messages and put them in the local cache
iter = inbox.iterator()
try:
while 1:
msg = iter.next()
varname = msg.GetKey()
if varname == "DB_CLIENTS":
return msg.GetString()
except StopIteration:
return 0
#find out if we successfully fetched
def FetchSuccess(result):
if result == None: #fetch error
return False
if result == 0: #message DNE
return False
return True
print "Registering for DB_CLIENTS...",
moosComms.Register("DB_CLIENTS", 0.2)
#wait for registration confirmation
while not FetchSuccess(FetchClients()):
tick()
print "Done!"
for p in process_list:
appname = p[0]
args = (appname, config_file)
if len(p) > 1:
appname = p[1]
args = args + (p[1],)
print "Starting", appname.ljust(20, "."),
start_MOOS_process_in_new_screen(*args)
while True:
tick()
clientstring = FetchClients()
if FetchSuccess(clientstring):
clientset = set(clientstring.split(","))
if appname in clientset:
break
print "Done!"
print "Unregistering...",
moosComms.UnRegister("DB_CLIENTS")
print "Done!"
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: " + sys.argv[0] + "<MOOS config file name> [working directory]"
exit(1)
#The app name, and -- optionally -- its ID string
moosProcList = desired_MOOS_procs()
moosConfigFile = sys.argv[1]
if len(sys.argv) == 3:
#we want to run all processes in this directory
os.chdir(sys.argv[2])
print "Starting MOOSDB...",
start_MOOS_process_in_new_screen("MOOSDB", moosConfigFile)
#see if we can use pyMOOS to intelligently launch processes
try:
import pyMOOS
pi = pyMOOS.PI # force an error
except:
#fall back on basic implementation
print "Done"
print "\nNo pyMOOS detected... falling back on timed launch sequence\n"
start_all_MOOSProcesses(moosProcList, moosConfigFile, 5.0)
exit(0)
#wait for connect
myComms = pyMOOS.CMOOSCommClient()
if myComms.Run("localhost", 9000, "StartMOOS.py[" + os.uname()[1] + "]"):
print "Done!"
print "\n\nStarting MOOS processes the SCHMANCY way!\n"
else:
print "Failed to connect to local MOOSDB."
print "You may want to 'killall screen' and try again."
exit(1)
print "Connecting to MOOSDB...",
while not myComms.IsConnected():
tick()
print "Done!"
#start each process and wait for it to connect
start_MOOS_processes_sequentially(moosProcList, moosConfigFile, myComms)
print "\nAll MOOS processes successfully launched!"
| 26.816425
| 85
| 0.572149
| 639
| 5,551
| 4.838811
| 0.383412
| 0.028461
| 0.017788
| 0.023286
| 0.148124
| 0.124191
| 0.106727
| 0.086675
| 0.064683
| 0.064683
| 0
| 0.011285
| 0.297604
| 5,551
| 206
| 86
| 26.946602
| 0.781739
| 0.200324
| 0
| 0.233333
| 0
| 0
| 0.160169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.041667
| null | null | 0.158333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ffde4e382f893654ea15768c8c27165eee09e3a4
| 3,720
|
py
|
Python
|
src/Control/Sign.py
|
hieuhdh/Multi-tasking-program
|
2f064a554f647247c84979b7a27f0797d1e1b5af
|
[
"MIT"
] | null | null | null |
src/Control/Sign.py
|
hieuhdh/Multi-tasking-program
|
2f064a554f647247c84979b7a27f0797d1e1b5af
|
[
"MIT"
] | null | null | null |
src/Control/Sign.py
|
hieuhdh/Multi-tasking-program
|
2f064a554f647247c84979b7a27f0797d1e1b5af
|
[
"MIT"
] | null | null | null |
from tkinter.font import BOLD
from PIL import ImageTk
from tkinter import*
from PIL import Image
from tkinter import messagebox
from Tools.log_db import*
class Sign:
def __init__(self, root):
self.root = root
## Init frame and button
Frame_sign = Frame(self.root, bg="#120b26")
Frame_sign.place(x = 300,y = 0, height = 540, width=660)
global image_default_signup
image_default_signup = ImageTk.PhotoImage(file = 'images/interfaces/signup.png')
logo_default = Label(Frame_sign, image = image_default_signup )
logo_default.place( x = 0, y = 0, relheight = 1, relwidth = 1 )
self.txt_name = Entry(Frame_sign, font=("Times New Roman",15), fg = "#8078c4", bg = "#120b26", cursor="hand2", bd = 0, width = 10)
self.txt_name.place(x = 180, y = 175, height= 34, width= 326)
self.txt_username = Entry(Frame_sign, font=("Times New Roman", 15), fg = "#8078c4", bg = "#120b26", cursor = "hand2", bd = 0)
self.txt_username.place(x = 180, y = 248, height= 34, width= 326)
self.txt_password = Entry(Frame_sign, font=("Times New Roman",15), fg = "#8078c4",bg = "#120b26", cursor = "hand2", show = "*", bd = 0, highlightbackground = "#b0bde0")
self.txt_password.place(x = 180, y = 321, height= 34, width= 326)
self.txt_password_comfirm = Entry(Frame_sign, font = ("Times New Roman",15), fg = "#8078c4",bg = "#120b26", cursor = "hand2", show = "*", bd = 0)
self.txt_password_comfirm.place(x = 180, y = 394, height= 34, width= 326)
## Make sign in button
self.sign_btn = Button(Frame_sign, activebackground="#823af7", activeforeground="white",command=self.sign, text = "Submit", font = ("Times New Roman",12,"bold"), fg = "#211c49", bg = "#823af7", relief = "flat", cursor = "hand2", borderwidth = 0, width = 38)
self.sign_btn.place(x = 156, y = 470)
## Action for Sign in
def sign(self):
if self.txt_name.get() != "" and self.txt_username.get() != "" and self.txt_password.get() != "" and self.txt_password_comfirm !="":
if self.txt_password.get() != self.txt_password_comfirm.get():
messagebox.showerror("Error","Your password didn't get match!", parent = self.root)
else:
## Add username and password in file log.txt, Dont see username and password you just entered in database, add it
username = self.txt_username.get()
password = encode(self.txt_password.get())
arr = [username, password]
if checkDB_Sign(arr) == False:
file = open("src/Documents/log_sign.txt","a", encoding= "utf-8")
file.writelines(f"name-username-password: {self.txt_name.get()}; {username}; {password}\n")
messagebox.showinfo("Welcome","You are registered successfully!", parent = self.root)
file.close()
else:
messagebox.showerror("Error","Account already exists!", parent = self.root)
else:
if self.txt_name.get() == "":
messagebox.showerror("Error","Please, enter your full name!", parent = self.root)
elif self.txt_username.get() == "":
messagebox.showerror("Error","Please, enter your username!", parent = self.root)
elif self.txt_password.get() == "":
messagebox.showerror("Error","Please, enter your password!", parent = self.root)
elif self.txt_password_comfirm.get() == "":
messagebox.showerror("Error","Please, enter your password comfirm!", parent = self.root)
| 53.913043
| 265
| 0.596774
| 466
| 3,720
| 4.654506
| 0.291845
| 0.067773
| 0.076072
| 0.039189
| 0.344398
| 0.311664
| 0.289534
| 0.198709
| 0.119871
| 0.119871
| 0
| 0.054241
| 0.261559
| 3,720
| 69
| 266
| 53.913043
| 0.735348
| 0.045968
| 0
| 0.061224
| 0
| 0
| 0.165772
| 0.027958
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0.285714
| 0.122449
| 0
| 0.183673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
ffe63e2dda8d22501b711fdd07b98a6cfff2ea5a
| 2,484
|
py
|
Python
|
bot/PythonProject/Commands.py
|
RamaDev09/CrateBot
|
34b9f50b88da42cc1c449466402897340ec142df
|
[
"MIT"
] | null | null | null |
bot/PythonProject/Commands.py
|
RamaDev09/CrateBot
|
34b9f50b88da42cc1c449466402897340ec142df
|
[
"MIT"
] | null | null | null |
bot/PythonProject/Commands.py
|
RamaDev09/CrateBot
|
34b9f50b88da42cc1c449466402897340ec142df
|
[
"MIT"
] | null | null | null |
import os
from bot.TextInput import TextInput
from bot.prompt import color_msg
def PythonCommands(file, name, category, description, slash):
here = os.getcwd()
# Writing a new import line
cogs = file['config']['commands'] = []
cogs.append(name)
with open(here + "/main.py", "r") as f :
lines = f.readlines()
line = 0
for i in lines :
line += 1
if lines[line - 1] == "\n" : break
lines[line - 1] = f"from cogs.commands.{category}.{name} import {category}\n"
with open(here + "/main.py", "w") as f :
f.writelines(lines)
f.close()
if not slash['slash-command'] :
try :
dir = os.path.join(here + "/cogs/commands", category)
os.mkdir(dir)
try :
with open(here + "/cogs/commands/" + category + "/" + name + ".py", "x") as f :
f.write(
TextInput.CommandPy(self=TextInput(), name=name, category=category, description=description))
color_msg("#00FF00", "Command Created")
except FileExistsError :
color_msg("#ff0000", "Command Already Exits")
except FileNotFoundError :
color_msg("#ff0000", "Make sure you are in CrateBot Project")
except FileExistsError :
try :
with open(here + "/cogs/commands/" + category + "/" + name + ".py", "x") as f :
f.write(
TextInput.CommandPy(self=TextInput(), name=name, category=category, description=description))
color_msg("#00FF00", "Command Created")
except FileExistsError :
color_msg("#ff0000", "Command Already Exits")
else :
try :
dir = os.path.join(here + "/cogs/commands", category)
os.mkdir(dir)
try :
with open(here + "/cogs/commands/" + category + "/" + name + ".py", "x") as f :
f.write(
TextInput.CommandSlashPy(self=TextInput(), name=name, category=category,
description=description))
color_msg("#00FF00", "Command Created")
except FileExistsError :
color_msg("#ff0000", "Command Already Exits")
except FileNotFoundError :
color_msg("#ff0000", "Make sure you are in CrateBot Project")
| 45.163636
| 118
| 0.517311
| 252
| 2,484
| 5.063492
| 0.285714
| 0.056426
| 0.094044
| 0.094044
| 0.699843
| 0.67163
| 0.67163
| 0.67163
| 0.67163
| 0.67163
| 0
| 0.022556
| 0.357488
| 2,484
| 55
| 119
| 45.163636
| 0.776942
| 0.010064
| 0
| 0.596154
| 0
| 0
| 0.178453
| 0.012895
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.076923
| 0
| 0.096154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ffe7fe43c53e89a050ea85e42fd101e3306b2423
| 9,139
|
py
|
Python
|
vision_proc/proc_frame.py
|
SMS-Raiders/First2016
|
a08eb1fa195bd869f8e7de7761d791e3fcf23d22
|
[
"BSD-3-Clause"
] | 1
|
2016-03-08T14:39:52.000Z
|
2016-03-08T14:39:52.000Z
|
vision_proc/proc_frame.py
|
SMS-Raiders/First2016
|
a08eb1fa195bd869f8e7de7761d791e3fcf23d22
|
[
"BSD-3-Clause"
] | null | null | null |
vision_proc/proc_frame.py
|
SMS-Raiders/First2016
|
a08eb1fa195bd869f8e7de7761d791e3fcf23d22
|
[
"BSD-3-Clause"
] | null | null | null |
#!/bin/python
#Frame processing and distance estimation for
#goal
#-------------------------------------------------------------------------------
# IMPORTS
#-------------------------------------------------------------------------------
import cv2
import math
import numpy
import sys
#-------------------------------------------------------------------------------
# VARIABLES
#-------------------------------------------------------------------------------
def cvClr( R, G, B ):
"""
Color array macro
"""
return( numpy.array( [R,G,B], numpy.uint8 ) )
#=====================================================================
# Approx. The green color range
#=====================================================================
MASK_LOW = cvClr( 0, 0, 245 )
MASK_HIGH = cvClr( 255, 70, 255 )
#=====================================================================
# Approximate Areas for the goal (Pixels)
#=====================================================================
#MIN_AREA = 250
MIN_AREA = 1600
#MAX_AREA = 4000
MAX_AREA = 5000
#=================================================================
# Numbers Determined from experiment apart from 0 and 20
# Straight on to Goal
# width and height and area are in pixel area
# THIS IS THE COUNTOUR AREA NOT THE CONVEX HULL AREA!
#=================================================================
goal_lkup = [
{ 'dist ft' : 0, 'width' : 200, 'height' : 90, 'area' : 9000, 'ratio w_h' : 1.80 }, #0ft not tested needs to be large
{ 'dist ft' : 7, 'width' : 151, 'height' : 88, 'area' : 4828, 'ratio w_h' : 1.71 },
{ 'dist ft' : 8, 'width' : 141, 'height' : 85, 'area' : 4700, 'ratio w_h' : 1.65 },
{ 'dist ft' : 9, 'width' : 132, 'height' : 81, 'area' : 4300, 'ratio w_h' : 1.62 },
{ 'dist ft' : 10, 'width' : 123, 'height' : 78, 'area' : 3860, 'ratio w_h' : 1.57 },
{ 'dist ft' : 11, 'width' : 114, 'height' : 75, 'area' : 3420, 'ratio w_h' : 1.52 },
{ 'dist ft' : 12, 'width' : 108, 'height' : 73, 'area' : 3120, 'ratio w_h' : 1.47 },
{ 'dist ft' : 13, 'width' : 102, 'height' : 70, 'area' : 2770, 'ratio w_h' : 1.45 },
{ 'dist ft' : 14, 'width' : 96 , 'height' : 68, 'area' : 2357, 'ratio w_h' : 1.41 },
{ 'dist ft' : 20, 'width' : 60 , 'height' : 35, 'area' : 1000, 'ratio w_h' : 1.30 } ] #20 ft not tested needs to be small
#-------------------------------------------------------------------------------
# CLASSES
#-------------------------------------------------------------------------------
class Point:
"""Simple Class for XY point"""
x = 0
y = 0
#-------------------------------------------------------------------------------
# PROCEDURES
#-------------------------------------------------------------------------------
def find_squares( contours, debug=False ):
"""
Find square shaped objects
"""
#=================================================================
# The Minimum and Maximum rations for width vs height for the goal
# based on experimental results goal is approx 1.5:1
#=================================================================
MIN_RATIO = 1.3
MAX_RATIO = 1.8
ret = []
for shape in contours:
x, y, w, h = cv2.boundingRect( shape )
w_h_ratio = float( w ) / float( h )
if debug:
print "Area", (w * h)
print "Width ", w
print "Height", h
if MIN_RATIO < w_h_ratio and w_h_ratio < MAX_RATIO:
ret.append( shape )
return( ret )
def filter_area( contours, debug=False ):
"""
Filter out contours based on area
"""
ret = []
for x in contours:
area = cv2.contourArea( x )
if area > MIN_AREA and area < MAX_AREA:
if debug:
print "Area", area
ret.append( x )
return( ret )
def find_center( contours ):
"""
Find the center of a contour based on moments
"""
ret = []
for x in contours:
M = cv2.moments( x )
pt = Point()
pt.x = int( M['m10']/M['m00'] )
pt.y = int( M['m01']/M['m00'] )
ret.append( pt )
return( ret );
def convex_hull_area( contours, debug= False ):
"""
Find the Area of convex Hulls
"""
ret_areas = []
ret_hulls = []
for c in contours:
hull = cv2.convexHull( c )
area = cv2.contourArea( hull )
ret_areas.append( area )
ret_hulls.append( hull )
if( debug ):
print( "Hull area: {0}".format( area ) )
return ( ret_areas, ret_hulls )
def angle_from_point( x, img_width=640, fov_angle=44 ):
"""
Calculate the angle from a point
"""
return( -( ( img_width / 2 ) - x ) * fov_angle )
def lin_scale( val, x1, y1, x2, y2 ):
"""
Linearly scale Val to y1 and y2 from x1 and x2 range
x1 and y1 are low values
"""
x_range = (x2 - x1)
new_val = 0
if x_range is 0:
new_val = y1
else:
y_range = ( y2 - y1 )
new_val = ( ( ( val - x1 ) * y_range ) / x_range ) + y1
return new_val
def dist_from_goal( area ):
"""
Calculates the distance to the Goal based on area, x, y
Args:
area: the area in pixels of the target
Returns:
Feet from goal
"""
dist = 99
prev = goal_lkup[ 0 ]
for cur in goal_lkup:
#=============================================================
# If the area is less than the currently selected area, but
# greater then the previous area, then the distance is some
# where in between. Then do linear interpolation
#=============================================================
if area > cur[ 'area' ] and area < prev[ 'area' ]:
dist = lin_scale( area, cur[ 'area' ], cur[ 'dist ft' ], prev[ 'area' ], prev[ 'dist ft' ] )
return dist
prev = cur
return dist
def proc_frame( frame, debug=False ):
"""
Process a frame
"""
#=================================================================
# Convert to HSV so we can mask more easily
#=================================================================
hsv_frame = cv2.cvtColor( frame, cv2.COLOR_BGR2HSV )
#=================================================================
# Apply the color mask defined at the top of file
#=================================================================
if( debug ):
hlo = cv2.getTrackbarPos( "H low", "Mask" )
hhi = cv2.getTrackbarPos( "H hi", "Mask" )
slo = cv2.getTrackbarPos( "S low", "Mask" )
shi = cv2.getTrackbarPos( "S hi", "Mask" )
vlo = cv2.getTrackbarPos( "V low", "Mask" )
vhi = cv2.getTrackbarPos( "V hi", "Mask" )
lo = numpy.array( [ hlo, slo, vlo ], numpy.uint8 )
hi = numpy.array( [ hhi, shi, vhi ], numpy.uint8 )
color_mask = cv2.inRange( hsv_frame, lo, hi )
else:
color_mask = cv2.inRange( hsv_frame, MASK_LOW, MASK_HIGH )
#=================================================================
# Apply our color mask
#=================================================================
masked_frame = cv2.bitwise_and( hsv_frame, hsv_frame, mask = color_mask )
#=================================================================
# Contours stuff
# First convert to Gray and find the contours
#=================================================================
bw_frame = cv2.cvtColor( masked_frame, cv2.COLOR_BGR2GRAY )
contours, hierarchy = cv2.findContours( bw_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
#=================================================================
# Filter the contours based on area, convex hull area etc...
#=================================================================
draw = filter_area( contours )
hull_areas, hulls = convex_hull_area( draw )
squares = find_squares( hulls )
centers = find_center( squares )
#=================================================================
# If debug mode, show the result of the line finding in a GUI
#=================================================================
if( debug ):
#contours
cv2.drawContours( frame, draw, -1, ( 0, 255, 0 ), 3 )
cv2.drawContours( frame, squares, -1, ( 255, 255, 0 ), 3 )
for i in centers:
cv2.circle( frame, ( i.x, i.y ), 3, ( 0, 255, 255 ), )
#print "X = {0} Y = {1}".format( i.x, i.y )
cv2.imshow( "Goal", frame )
#cv2.imshow( "Mask", masked_frame )
return dist_from_goal( squares ), angle_from_point( centers[0].x, len( frame[0] ) )
| 37.454918
| 135
| 0.402889
| 938
| 9,139
| 3.826226
| 0.289979
| 0.008359
| 0.021454
| 0.02229
| 0.03455
| 0.015046
| 0
| 0
| 0
| 0
| 0
| 0.041327
| 0.274538
| 9,139
| 243
| 136
| 37.609054
| 0.5
| 0.360105
| 0
| 0.13913
| 0
| 0
| 0.089235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.034783
| null | null | 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fff197ae68beb5dbb26583494df00c1fc7732948
| 1,285
|
py
|
Python
|
tools/gen_bbox_ac.py
|
vincentzhang/faster-rcnn-fcn
|
7118d715a430f0ec2697e5f7a9a39c9752b466da
|
[
"BSD-2-Clause"
] | 7
|
2019-07-19T21:30:26.000Z
|
2021-06-17T03:57:22.000Z
|
tools/gen_bbox_ac.py
|
vincentzhang/faster-rcnn-fcn
|
7118d715a430f0ec2697e5f7a9a39c9752b466da
|
[
"BSD-2-Clause"
] | null | null | null |
tools/gen_bbox_ac.py
|
vincentzhang/faster-rcnn-fcn
|
7118d715a430f0ec2697e5f7a9a39c9752b466da
|
[
"BSD-2-Clause"
] | 1
|
2021-06-17T03:57:23.000Z
|
2021-06-17T03:57:23.000Z
|
# generated bbox ground truth from pixel-wise segmentation
# it currently only generate one bbox
from __future__ import print_function
import numpy as np
import h5py
import os
import pdb
mask_path = '../data/acce'
f = h5py.File(os.path.join(mask_path, "resized_label_ac_2d.h5"), 'r')
bbox_path = '../data/acce/bbox'
if not os.path.exists(bbox_path):
os.mkdir(bbox_path)
# dim: shape (256, 367, 342), slices, height, width
count = 0
for k in f.keys():
#pdb.set_trace()
count += 1
print("processing {}-th vol".format(count))
data = f[k][...] # convert to numpy
k = k.rsplit('_',1)[0] # strip the '_label' from the vol name
with open( os.path.join(bbox_path, k)+'_bbox.txt', 'w') as bbox_file:
# iterate through each slice
for idx in range(data.shape[0]):
mask = data[idx, :, :] # get the mask
i,j = np.where(mask) # find positive mask
if not i.size: # no positive mask
print("{}_{},{}".format(k, idx, 0), file=bbox_file)
else:
h_min,w_min = np.min(zip(i,j), axis=0)
h_max,w_max = np.max(zip(i,j), axis=0)
print("{}_{},{},{},{},{},{}".format(k, idx, 1, w_min, h_min, w_max,
h_max), file=bbox_file)
f.close()
| 34.72973
| 83
| 0.585214
| 202
| 1,285
| 3.569307
| 0.455446
| 0.044383
| 0.033287
| 0.041609
| 0.027739
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022941
| 0.253697
| 1,285
| 36
| 84
| 35.694444
| 0.728884
| 0.224125
| 0
| 0
| 1
| 0
| 0.112576
| 0.022312
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.178571
| 0
| 0.178571
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.