hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fff3557fd7e005babefb16e3b6b117ef8a3354ec
| 918
|
py
|
Python
|
file_automation.py
|
FlightDev/YSPA
|
5226712ebf305e7a3c686c43c996517a617f748b
|
[
"MIT"
] | null | null | null |
file_automation.py
|
FlightDev/YSPA
|
5226712ebf305e7a3c686c43c996517a617f748b
|
[
"MIT"
] | null | null | null |
file_automation.py
|
FlightDev/YSPA
|
5226712ebf305e7a3c686c43c996517a617f748b
|
[
"MIT"
] | null | null | null |
import os
import glob
from astropy.io import fits
#/home/student/Desktop/Images/iTelescope/20180716-California-T24-GOOD
# Yo Neal. When you use this program, you have to change a few things between iTelescope and LFOP
# FIRST, remember to change the file path or you'll be a dummy. Also for LFOP -13 and -12 while
# for iTelescope it should be -9 and -8. Hopefully you know what to do with those numbers...
#/home/student/Desktop/Images/LFOP
dir = '20180726-LFOP-GOOD'
path = '/home/student/Desktop/Images/LFOP/' + dir + '/'
dict = {}
date = ""
for filename in os.listdir(path):
if filename.endswith(".fit"):
file = path + str(filename)
image = fits.open(file)
s = image[0].header.get("DATE-OBS")
date = s[:len(s) - 13]
dict.update({s[len(s) - 12:]: filename})
for key, value in sorted(dict.items()):
print value + "\t\t" + str(key)
print date
print len(dict)
| 32.785714
| 99
| 0.667756
| 146
| 918
| 4.19863
| 0.568493
| 0.053834
| 0.088091
| 0.117455
| 0.101142
| 0.101142
| 0
| 0
| 0
| 0
| 0
| 0.039402
| 0.198257
| 918
| 27
| 100
| 34
| 0.793478
| 0.422658
| 0
| 0
| 0
| 0
| 0.131429
| 0.064762
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.166667
| null | null | 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fff46233cd9fc6a4821a3755e7bb2b8fd09e058e
| 1,030
|
py
|
Python
|
read_trials.py
|
Volkarl/P10-ExoskeletonTransferLearning
|
311daf3791c65838ff9c496eeb6526b096b41d4c
|
[
"MIT"
] | null | null | null |
read_trials.py
|
Volkarl/P10-ExoskeletonTransferLearning
|
311daf3791c65838ff9c496eeb6526b096b41d4c
|
[
"MIT"
] | 2
|
2020-11-13T18:39:27.000Z
|
2021-08-25T15:59:36.000Z
|
read_trials.py
|
Volkarl/P10-ExoskeletonTransferLearning
|
311daf3791c65838ff9c496eeb6526b096b41d4c
|
[
"MIT"
] | null | null | null |
import pickle
import matplotlib.pyplot as plt
import pandas as pd
trials = pickle.load(open("trials.p", "rb"))
print("Set breakpoint here")
#for item in trials.trials:
# args = item["vals"]
# res = item["result"]["loss"]
#itemtuples = [(item["misc"]["vals"]["dilation_group"], item["misc"]["vals"]["use_ref_points"], item["result"]["loss"]) for item in trials.trials]
#(dil, ref, loss) = zip(*itemtuples)
#plt.figure()
#plt.plot(dil, 'ro')
#plt.title('Use_dilation (1 is true, 0 is false)')
#plt.plot(loss)
#plt.plot(pd.DataFrame(loss).ewm(span=1).mean())
#plt.title('MAE')
#plt.plot(ref, 'g^')
#plt.legend()
#plt.show()
print("Set breakpoint here")
print("PRINT BEST TRIALS")
myitems = [(trial["result"]["loss"], str(trial["misc"]["vals"])) for trial in trials.trials if trial["result"]["status"] == "ok"]
myitems.sort(key=lambda tup: tup[0])
for item in myitems[:10]:
print("--------------------------\n")
print(item)
print("\n\n")
# If you want to print training times use attemptid["book_time"]
| 24.52381
| 146
| 0.635922
| 154
| 1,030
| 4.220779
| 0.467532
| 0.043077
| 0.041538
| 0.067692
| 0.064615
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006659
| 0.125243
| 1,030
| 41
| 147
| 25.121951
| 0.714761
| 0.506796
| 0
| 0.153846
| 0
| 0
| 0.262195
| 0.056911
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0.461538
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
fff5fae09ca4ba6758cfde4e7471355a0e7af098
| 3,506
|
py
|
Python
|
RecRoomAnimatedProfilePicture.py
|
zigzatuzoo/Rec-Room-Animated-Profile-Picture
|
b8eeabf478613f47d3bdb9195ad2f5051e7aaaad
|
[
"Apache-2.0"
] | 4
|
2021-08-17T01:13:03.000Z
|
2022-03-19T04:03:01.000Z
|
RecRoomAnimatedProfilePicture.py
|
zigzatuzoo/Rec-Room-Animated-Profile-Picture
|
b8eeabf478613f47d3bdb9195ad2f5051e7aaaad
|
[
"Apache-2.0"
] | null | null | null |
RecRoomAnimatedProfilePicture.py
|
zigzatuzoo/Rec-Room-Animated-Profile-Picture
|
b8eeabf478613f47d3bdb9195ad2f5051e7aaaad
|
[
"Apache-2.0"
] | null | null | null |
''' Stuff you need to update for this to work '''
'Enter your username here'
user = ''
'Enter your password here'
passwd = ''
image1 = '2d83af05944d49c69fa9565fb238a91b.jpg'
image2 = '49b2788b672e4088a25eb0a9eff35c17.jpg'
image3 = '355c2c7e87f0489bb5f0308cdec108f6.jpg'
" ^ You need to change EACH of these to whatever you want the 3 pics to be (Currently set to a waving red zigzag)"
''' Stuff that will change how the program works '''
speed = 0.2
"^ As you can probably guess, this changes how long the PFP stays on each image"
import time
try:
import requests
except:
print('''You do not have the requests library installed, you need to install it via the following command:
pip install requests
Thank you!''')
try:
import recnetlogin as rnl
except:
print('''You do not have the RecNetLogin package installed, you need to install it via the following command:
python -m pip install git+https://github.com/Jegarde/RecNet-Login.git#egg=recnetlogin
Thank you!''')
''' Just Initializing some values '''
login = rnl.login_to_recnet(username=user,password=passwd)
x = 0
BToken = ''
''' Making the strings into the format read by the rec.net image api '''
imageName1 = 'imageName=' + image1
imageName2 = 'imageName=' + image2
imageName3 = 'imageName=' + image3
''' Initial token request '''
BToken = login.access_token
print(BToken)
''' The loop program that actually makes the picure move '''
while 1 == 1:
''' The HTTP header for changing your In-Game pfp '''
Headers = {'sec-ch-ua':'";Not A Brand";v="99", "Chromium";v="88"',
'Accept' : '*/*',
'sec-ch-ua-mobile' : '?0',
'Authorization' : BToken,
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin' : 'https://rec.net',
'Sec-Fetch-Site' : 'same-site',
'Sec-Fetch-Mode' : 'cors',
'Sec-Fetch-Dest' : 'empty',
'Referer' : 'https://rec.net/',
'Accept-Encoding' : 'gzip, deflate',
'Accept-Language' : 'en-US,en;q=0.9',
}
''' The easy way to edit what pfp plays after what '''
def i1():
r = requests.put('https://accounts.rec.net/account/me/profileImage', headers = Headers, data = imageName1)
print(str(r) + " num of requests: " + str(x))
time.sleep(speed)
def i2():
r = requests.put('https://accounts.rec.net/account/me/profileImage', headers = Headers, data = imageName2)
print(str(r) + " num of requests: " + str(x))
time.sleep(speed)
def i3():
r = requests.put('https://accounts.rec.net/account/me/profileImage', headers = Headers, data = imageName3)
print(str(r) + " num of requests: " + str(x))
time.sleep(speed)
''' In this default format, it will show image 1 first, then image 2, then image 3, then image 2 again and will LOOP this. The x value in the function calls is to make the counter function, if you don't add it to your function calls or you delete them, THE COUNTER WILL NOT WORK. '''
x = x + 1
i1()
x = x + 1
i2()
x = x + 1
i3()
x = x + 1
i2()
''' Requests a new auth token when that one is no longer valid '''
r = requests.put('https://accounts.rec.net/account/me/profileImage', headers = Headers)
if r.status_code == 401:
print('Invalid Token')
login = rnl.login_to_recnet(username=user,password=passwd)
BToken = login.access_token
print(BToken)
| 35.77551
| 287
| 0.634341
| 487
| 3,506
| 4.552361
| 0.431212
| 0.018945
| 0.016238
| 0.030672
| 0.321155
| 0.321155
| 0.291385
| 0.26793
| 0.26793
| 0.22553
| 0
| 0.039508
| 0.23474
| 3,506
| 97
| 288
| 36.14433
| 0.786806
| 0.011694
| 0
| 0.338028
| 0
| 0.028169
| 0.470738
| 0.051618
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042254
| false
| 0.056338
| 0.042254
| 0
| 0.084507
| 0.112676
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
fff7d77cd5951c966e8c3d645997399fd6e953c2
| 14,121
|
py
|
Python
|
rcommander/src/rcommander/graph_view.py
|
rummanwaqar/rcommander-core
|
7106d5868db76c47dea6ad11118a54351a8bd390
|
[
"BSD-3-Clause"
] | 4
|
2015-04-08T09:57:43.000Z
|
2021-08-12T01:44:37.000Z
|
rcommander/src/rcommander/graph_view.py
|
jhu-lcsr-forks/rcommander-core
|
1a0350e9b93687eff6a4407f72b5250be5f56919
|
[
"BSD-3-Clause"
] | 1
|
2015-03-12T09:10:27.000Z
|
2015-03-12T09:10:27.000Z
|
rcommander/src/rcommander/graph_view.py
|
jhu-lcsr-forks/rcommander-core
|
1a0350e9b93687eff6a4407f72b5250be5f56919
|
[
"BSD-3-Clause"
] | 3
|
2015-03-12T10:59:17.000Z
|
2021-06-21T02:13:57.000Z
|
#import roslib; roslib.load_manifest('rcommander_core')
import graph.style as gs
import graph
import graph.layout as gl
import tool_utils as tu
import graph_model as gm
import numpy as np
import time
import copy
def copy_style(astyle, bstyle):
bstyle.background = astyle.background
bstyle.fill = astyle.fill
bstyle.stroke = astyle.stroke
bstyle.strokewidth = astyle.strokewidth
bstyle.text = astyle.text
bstyle.font = astyle.font
bstyle.fontsize = astyle.fontsize
bstyle.textwidth = astyle.textwidth
bstyle.align = astyle.align
bstyle.depth = astyle.depth
class GraphView:
def __init__(self, context, graph_model):
self.graph_model = graph_model
g = self.graph_model.gve
self.gve = g
self.context = context
node_outlines = self.context.color(0.4, 0.4, 0.4, 1.)
text_color = self.context.color(0.3, 0.3, 0.3, 1.)
node_font_size = 14
#Customizations
g.styles.default.depth = True
g.styles.default.background = self.context.color(1., 1., 1., 1.)
g.styles.default.stroke = node_outlines
g.styles.default.text = text_color
g.styles.default.fontsize = node_font_size
g.styles.root.text = self.context.color(255/255., 153/255., 51/255., 1.)
g.styles.important.fontsize = node_font_size
g.styles.important.text = text_color
g.styles.important.stroke = node_outlines
g.styles.marked.fontsize = node_font_size
g.styles.marked.text = text_color
g.styles.marked.stroke = node_outlines
#g.styles.default.fontsize = 12
#g.styles.light.fontsize = 12
#g.styles.back.fontsize = 12
#g.styles.marked.fontsize = 12
#g.styles.dark.fontsize = 12
#g.styles.highlight.fontsize = 12
#g.styles.root.fontsize = 12
self.refresh = self.gve.layout.refresh
old_outcome_style = g.styles.create('old_outcome')
active_node_style = g.styles.create('active_node')
selected_style = g.styles.create('selected')
normal_style = g.styles.create('normal')
normal_edge_style = g.styles.create('normal_edge')
selected_edge_style = g.styles.create('selected_edge')
graph_circle = g.styles.create('graph_circle')
container = g.styles.create('container')
container_selected = g.styles.create('container_selected')
copy_style(g.styles.important, old_outcome_style)
copy_style(g.styles.important, active_node_style)
copy_style(g.styles.important, selected_style)
copy_style(g.styles.default, normal_style)
copy_style(g.styles.default, normal_edge_style)
copy_style(g.styles.default, selected_edge_style)
copy_style(g.styles.default, graph_circle)
copy_style(g.styles.default, container)
copy_style(g.styles.default, container_selected)
graph_circle.fill = self.context.color(.96, .96, .96, .96)
graph_circle.stroke = self.context.color(.8, .8, .8, 1.)
graph_circle.strokewidth = 3
graph_circle.fontsize = 24
graph_circle.textwidth = 800
graph_circle.text = self.context.color(.5, .5, .5, 1.)
container.fill = self.context.color(255./255, 204./255, 102./255., .4)
container.node = g.styles.important.node
container_selected.fill = self.context.color(255./255, 204./255, 102./255., 1.)
container_selected.node = g.styles.important.node
selected_style.text = text_color
selected_edge_style.stroke = self.context.color(0.80, 0.00, 0.00, 0.75)
selected_edge_style.strokewidth = 1.0
active_node_style.text = text_color
active_node_style.fill = self.context.color(153./255, 255./255, 51/255, .75)
active_node_style.strokewidth = 3
old_outcome_style.text = text_color
old_outcome_style.fill = self.context.color(153./255, 255./255, 51/255, .4)
self.radii_increment = 150
self.fsm_start_color = 1.
self.fsm_end_color = .96
self.fsm_stroke_color = .85
self.fsm_current_context_node = None
self.fsm_dclick_cb = None
self.right_clicked = None
self.dx = 0.
self.dy = 0.
self.tx = 0.
self.ty = 0.
#g.node('start').style = 'marked'
def set_node_style(self, node_name, style):
self.gve.node(node_name).style = style
self.gve.layout.refresh()
def get_node_style(self, node_name):
return self.gve.node(node_name).style
#def drag_background_cb(self, s, e):
# #print start_click.x, start_click.y
# #print curr_pos.x, curr_pos.y
# #transform.scale(self.zoom, self.zoom)
# self.dx = e.x - s.x
# self.dy = e.y - s.y
# #print dx, dy
# #transform = QTransform()
# ##transform.scale(abs(dx), abs(dy))
# #transform.translate(dx, dy)
# #self.graphicsView.superView.setTransform(transform)
def _background_drag(self, properties_dict):
mouse_pose = properties_dict['MOUSEX'], properties_dict['MOUSEY']
if properties_dict['rightdown']:
if not self.right_clicked:
self.right_clicked = mouse_pose
else:
self.tx = mouse_pose[0] - self.right_clicked[0]
self.ty = mouse_pose[1] - self.right_clicked[1]
else:
#Commit transform
self.right_clicked = None
self.dx += self.tx
self.dy += self.ty
self.ty = 0.
self.tx = 0.
#if self._ctx._ns["rightdown"]:
# #Make sure we're not in any nodes
# in_nodes = False
# for n in self.graph.nodes:
# if self.mouse in n:
# in_nodes = True
# break
# #Set pose first time
# if not in_nodes and not self.right_clicked:
# self.right_clicked = self.mouse
# else:
# self.right_drag(self.right_clicked, self.mouse)
#else:
# self.right_clicked = None
def setup(self):
self.times = {}
self.times['draw'] = 0.
self.times['check'] = 0.
self.times['iter'] = 0
def draw(self, properties_dict):
START_TIME = time.time()
self.context.size(properties_dict['width'], properties_dict['height'])
cx = self.context
g = self.gve
for n in g.nodes:
if properties_dict['selected_node'] == n.id:
self.set_node_style(n.id, 'selected')
else:
self.set_node_style(n.id, 'normal')
if self.graph_model.get_start_state() == n.id:
if self.get_node_style(n.id) == 'selected':
self.set_node_style(n.id, 'important')
else:
self.set_node_style(n.id, 'marked')
if hasattr(self.graph_model.get_state(n.id), 'get_child'):
if self.get_node_style(n.id) == 'selected':
self.set_node_style(n.id, 'container_selected')
else:
self.set_node_style(n.id, 'container')
if self.graph_model.is_running():
if not self.graph_model.sm_thread.has_key('current_states'):
print 'KEYS!', self.graph_model.sm_thread.keys()
if self.graph_model.sm_thread['current_states'] != None and \
(len(set(self.graph_model.sm_thread['current_states']).intersection(set([n.id]))) > 0):
self.set_node_style(n.id, 'active_node')
if self.graph_model.get_last_outcome() != None:
outcome, t = self.graph_model.get_last_outcome()
if outcome == n.id:
if time.time() - t < 10.:
self.set_node_style(n.id, 'old_outcome')
#self.set_node_style(tu.InfoStateBase.GLOBAL_NAME, 'root')
draw_func = None
#if properties_dict['selected_edge'] != None:
def draw_selected():
if properties_dict['selected_edge'] == None:
return
cx = self.context
g = self.gve
#edge = self.selected_edge
edge = properties_dict['selected_edge']
x0, y0 = edge.node1.x, edge.node1.y
x1, y1 = edge.node2.x, edge.node2.y
coordinates = lambda x, y, d, a: (x+math.cos(math.radians(a))*d, y+math.sin(math.radians(a))*d)
# Find the edge's angle based on node1 and node2 position.
a = math.degrees(math.atan2(y1-y0, x1-x0))
# draw line from node's edge instead of it's center.
r = edge.node2.r
d = math.sqrt(pow(x1-x0, 2) + pow(y1-y0, 2))
x00, y00 = coordinates(x0, y0, r+1, a)
x01, y01 = coordinates(x0, y0, d-r-1, a)
# draw
p1 = [x00, y00]
p2 = [x01, y01]
cx.fill()
cx.strokewidth(1.0)
cx.stroke(1., 153./255., 0, .75)
cx.beginpath(p1[0], p1[1])
cx.lineto(p2[0], p2[1])
path = cx.endpath(False)
gs.edge_arrow(g.styles[edge.node1.style], path, edge, radius=10)
cx.drawpath(path)
def draw_fsm_circles():
g = self.gve
#figure out where centroids should be
coords = []
[coords.append([n.x, n.y]) for n in g.nodes]
coords = np.matrix(coords).T
centroid = np.median(coords, 1)
if len(coords) == 0:
return
#calculate where radii should be
radius = np.max(np.power(np.sum(np.power((coords - centroid), 2), 0), .5)) + gm.GraphModel.NODE_RADIUS*2
radius = max(radius, 200.)
container_style = g.styles.graph_circle
container_stroke = container_style.stroke
##
#Draw fsm_stack
stack = copy.copy(properties_dict['fsm_stack'])
#stack.reverse()
#smallest_radii = radius
largest_radii = radius + len(stack) * self.radii_increment
color = self.fsm_start_color
if len(stack) > 0:
color_incre = (self.fsm_start_color - self.fsm_end_color) / len(stack)
#draw stack
for el in stack:
#smallest_radii = smallest_radii + self.radii_increment
name = el.model.document.get_name()#el.document.get_name()
#Draw node
stack_node = graph.node(g, radius = largest_radii, id = name)
stack_node.x, stack_node.y = centroid[0,0], centroid[1,0]
el.graph_node = stack_node
container_style.fill = self.context.color(color, color, color, 1.)
container_style.stroke = self.context.color(self.fsm_stroke_color, self.fsm_stroke_color, 1.)
gs.node(container_style, stack_node, g.alpha)
#Draw label
node_label_node_ = graph.node(g, radius = largest_radii, id = name)
node_label_node_.x, node_label_node_.y = centroid[0,0], centroid[1,0] - largest_radii
gs.node_label(container_style, node_label_node_, g.alpha)
color -= color_incre
largest_radii -= self.radii_increment
##
#Draw node
#Draw node circle
graph_name_node = graph.node(g, radius=radius, id = properties_dict['name'])
graph_name_node.x, graph_name_node.y = centroid[0,0], centroid[1,0]
self.fsm_current_context_node = graph_name_node
container_style.fill = self.context.color(self.fsm_end_color, self.fsm_end_color, self.fsm_end_color, 1.)
container_style.stroke = container_stroke
gs.node(container_style, graph_name_node, g.alpha)
#draw node label
node_label_node = graph.node(g, radius=radius, id = properties_dict['name'])
node_label_node.x, node_label_node.y = centroid[0,0], centroid[1,0] - radius
gs.node_label(container_style, node_label_node, g.alpha)
def detect_fsm_click():
def in_node(x, y, n):
return (abs(x - n.x) < n.r) and (abs(y - n.y) < n.r)
mousex_g = self.context._ns['MOUSEX'] - self.gve.x
mousey_g = self.context._ns['MOUSEY'] - self.gve.y
if self.context._ns['mousedoubleclick'] and len(properties_dict['fsm_stack']) > 0:
if not in_node(mousex_g, mousey_g, self.fsm_current_context_node):
stack = copy.copy(properties_dict['fsm_stack'])
stack.reverse()
selected_el = None
for el in stack:
if in_node(mousex_g, mousey_g, el.graph_node):
#if p in el.graph_node:
selected_el = el
break
#selected something so load it
if selected_el != None and self.fsm_dclick_cb != None:
self.fsm_dclick_cb(selected_el)
def final_func():
draw_selected()
detect_fsm_click()
CHECK_TIME = time.time()
self._background_drag(properties_dict)
properties_dict['MOUSEX'] -= self.dx+self.tx
properties_dict['MOUSEY'] -= self.dy+self.ty
g.draw(dx=self.dx+self.tx, dy=self.dy+self.ty, directed=True, traffic=False, user_draw_start=draw_fsm_circles, user_draw_final=final_func)
DRAW_TIME = time.time()
total_draw = DRAW_TIME - CHECK_TIME
total_check = CHECK_TIME - START_TIME
self.times['draw'] += total_draw
self.times['check'] +- total_check
self.times['iter'] += 1
#print 'draw', (1000.* self.times['draw'] / self.times['iter']), 'check', (1000.* self.times['check'] / self.times['iter'])
| 39.116343
| 146
| 0.578217
| 1,831
| 14,121
| 4.260513
| 0.142545
| 0.03679
| 0.024612
| 0.015383
| 0.34111
| 0.257147
| 0.17126
| 0.124087
| 0.096398
| 0.068709
| 0
| 0.030275
| 0.307627
| 14,121
| 360
| 147
| 39.225
| 0.767618
| 0.123646
| 0
| 0.101266
| 0
| 0
| 0.034222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.07173
| null | null | 0.004219
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ffff1e4cd8bc9bad42ca402b2c639f4b45a16abe
| 791
|
py
|
Python
|
pirates/quest/QuestHolderBase.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/quest/QuestHolderBase.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/quest/QuestHolderBase.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.quest.QuestHolderBase
class QuestHolderBase:
__module__ = __name__
def __init__(self):
self._rewardCollectors = {}
def getQuests(self):
raise 'derived must implement'
def _addQuestRewardCollector(self, collector):
cId = collector._serialNum
self._rewardCollectors[cId] = collector
def _removeQuestRewardCollector(self, collector):
cId = collector._serialNum
del self._rewardCollectors[cId]
def _trackRewards(self, trade):
for collector in self._rewardCollectors.itervalues():
collector.collect(trade)
| 30.423077
| 104
| 0.694058
| 89
| 791
| 5.932584
| 0.629213
| 0.151515
| 0.060606
| 0.094697
| 0.128788
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072231
| 0.212389
| 791
| 26
| 105
| 30.423077
| 0.775281
| 0.259166
| 0
| 0.133333
| 0
| 0
| 0.037801
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
08058658e2bf102d2ac28a2a02f1701e1eb02d65
| 937
|
py
|
Python
|
container/base/src/cache.py
|
hmrc/devops-tooling-build
|
03d62df3a45d5dcce306cd6cad6c95a24a4b34ab
|
[
"Apache-2.0"
] | 1
|
2021-11-10T16:09:43.000Z
|
2021-11-10T16:09:43.000Z
|
container/base/src/cache.py
|
hmrc/devops-tooling-build
|
03d62df3a45d5dcce306cd6cad6c95a24a4b34ab
|
[
"Apache-2.0"
] | 6
|
2021-07-02T14:15:25.000Z
|
2022-02-03T12:57:36.000Z
|
container/base/src/cache.py
|
hmrc/devops-tooling-build
|
03d62df3a45d5dcce306cd6cad6c95a24a4b34ab
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import hashlib
import os
import pathlib
from typing import Optional
import yaml
def key(name):
return hashlib.sha1(name.encode()).hexdigest()
def path(name):
return pathlib.Path(os.environ['CACHE_AWS_MNT']) / key(name)
def aws_get(name) -> Optional[dict]:
try:
with path(name).open('r') as fp:
response = yaml.safe_load(fp.read())
now = datetime.datetime.now(tz=datetime.timezone.utc)
min_session_time = datetime.timedelta(hours=1)
if now + min_session_time < response['Credentials']['Expiration']:
return response
except FileNotFoundError:
pass
except (yaml.scanner.ScannerError, TypeError, KeyError):
# we somehow ended up with bad yaml, ≈ no cache; invalidate
path(name).unlink()
def aws_set(name, response) -> None:
with path(name).open('w') as fp:
fp.write(yaml.dump(response))
| 25.324324
| 78
| 0.649947
| 122
| 937
| 4.92623
| 0.54918
| 0.053245
| 0.039933
| 0.053245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002782
| 0.232657
| 937
| 36
| 79
| 26.027778
| 0.831711
| 0.060832
| 0
| 0
| 0
| 0
| 0.041002
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0.04
| 0.24
| 0.08
| 0.52
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
080b1f9b578c418d65d4a8c4119d27d86ab70fa5
| 2,451
|
py
|
Python
|
aldryn_redirects/migrations/0003_auto_20171206_1150.py
|
compoundpartners/aldryn-redirects
|
ed1b1e90a7774a4bead771e158e30d5846e64e60
|
[
"BSD-3-Clause"
] | 1
|
2020-05-14T06:41:50.000Z
|
2020-05-14T06:41:50.000Z
|
aldryn_redirects/migrations/0003_auto_20171206_1150.py
|
compoundpartners/aldryn-redirects
|
ed1b1e90a7774a4bead771e158e30d5846e64e60
|
[
"BSD-3-Clause"
] | 11
|
2016-01-11T11:42:58.000Z
|
2018-11-05T16:13:27.000Z
|
aldryn_redirects/migrations/0003_auto_20171206_1150.py
|
compoundpartners/aldryn-redirects
|
ed1b1e90a7774a4bead771e158e30d5846e64e60
|
[
"BSD-3-Clause"
] | 6
|
2016-11-22T04:53:37.000Z
|
2018-11-15T13:56:39.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-06 13:50
from __future__ import unicode_literals
import aldryn_redirects.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
('aldryn_redirects', '0002_on_delete_and_verbose_names'),
]
operations = [
migrations.CreateModel(
name='StaticRedirect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inbound_route', models.CharField(db_index=True, help_text='Redirect origin. Do not provide the domain. Always add a leading slash here.', max_length=255, validators=[aldryn_redirects.validators.validate_inbound_route], verbose_name='Redirect from')),
('outbound_route', models.CharField(help_text='Redirect destination. Domain is not required (defaults to inbound route domain).', max_length=255, validators=[aldryn_redirects.validators.validate_outbound_route], verbose_name='Redirect to')),
('sites', models.ManyToManyField(related_name='_staticredirect_sites_+', to='sites.Site')),
],
options={
'verbose_name_plural': 'Static Redirects',
'verbose_name': 'Static Redirect',
},
),
migrations.CreateModel(
name='StaticRedirectInboundRouteQueryParam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=255, verbose_name='Key')),
('value', models.CharField(blank=True, max_length=255, verbose_name='Value')),
('static_redirect', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='query_params', to='aldryn_redirects.StaticRedirect')),
],
),
migrations.AlterModelOptions(
name='redirect',
options={'ordering': ('old_path',), 'verbose_name': 'Multilanguage Redirect', 'verbose_name_plural': 'Multilanguage Redirects'},
),
migrations.AlterModelOptions(
name='redirecttranslation',
options={'default_permissions': (), 'managed': True, 'verbose_name': 'Multilanguage Redirect Translation'},
),
]
| 50.020408
| 268
| 0.651571
| 247
| 2,451
| 6.238866
| 0.417004
| 0.07852
| 0.031149
| 0.028553
| 0.203764
| 0.173913
| 0.173913
| 0.173913
| 0.102531
| 0.102531
| 0
| 0.019402
| 0.22195
| 2,451
| 48
| 269
| 51.0625
| 0.788673
| 0.027744
| 0
| 0.341463
| 1
| 0
| 0.292017
| 0.051261
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.097561
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0811b7481588bc53cfde102ac50bffe1f9e0e41c
| 161
|
py
|
Python
|
velocity/constants.py
|
aisthesis/mfstockmkt
|
d442ec4cb3b379f6984397926b4466420236c032
|
[
"MIT"
] | null | null | null |
velocity/constants.py
|
aisthesis/mfstockmkt
|
d442ec4cb3b379f6984397926b4466420236c032
|
[
"MIT"
] | 1
|
2015-12-27T17:37:54.000Z
|
2015-12-31T05:06:06.000Z
|
velocity/constants.py
|
aisthesis/mfstockmkt
|
d442ec4cb3b379f6984397926b4466420236c032
|
[
"MIT"
] | 1
|
2020-05-02T08:25:35.000Z
|
2020-05-02T08:25:35.000Z
|
"""
.. Copyright (c) 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
Constants
=========
"""
UPVEL_COL = 'Up Vel'
DOWNVEL_COL = 'Down Vel'
| 14.636364
| 45
| 0.652174
| 20
| 161
| 5.15
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.15528
| 161
| 10
| 46
| 16.1
| 0.727941
| 0.652174
| 0
| 0
| 0
| 0
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
081745d5d369bb388f32e0870139795f7243852c
| 213
|
py
|
Python
|
tuples-and-sets/3_unique_names.py
|
Minkov/python-advanced-2020-01
|
f2ee26f1325d943529673457a1cbba5657ae5905
|
[
"MIT"
] | 5
|
2020-01-16T18:17:08.000Z
|
2020-04-12T06:42:47.000Z
|
tuples-and-sets/3_unique_names.py
|
Minkov/python-advanced-2020-01
|
f2ee26f1325d943529673457a1cbba5657ae5905
|
[
"MIT"
] | null | null | null |
tuples-and-sets/3_unique_names.py
|
Minkov/python-advanced-2020-01
|
f2ee26f1325d943529673457a1cbba5657ae5905
|
[
"MIT"
] | null | null | null |
n = int(input())
# names = {input() for _ in range(n)}
names = []
for _ in range(n):
names.append(input())
unique_names = list(set(names))
[print(name)
for name
in sorted(unique_names, key=names.index)]
| 15.214286
| 42
| 0.643192
| 33
| 213
| 4.030303
| 0.484848
| 0.075188
| 0.150376
| 0.165414
| 0.240602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178404
| 213
| 13
| 43
| 16.384615
| 0.76
| 0.164319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
081da69448bb7e8d65c5e3d690d670101f274a22
| 587
|
py
|
Python
|
posts/migrations/0003_auto_20200522_0446.py
|
NotBlizzard/lark
|
b348f8d2b532ce20581030397cbba7f6565d1c56
|
[
"MIT"
] | 2
|
2020-12-10T06:13:36.000Z
|
2021-01-15T09:32:41.000Z
|
posts/migrations/0003_auto_20200522_0446.py
|
NotBlizzard/lark
|
b348f8d2b532ce20581030397cbba7f6565d1c56
|
[
"MIT"
] | 7
|
2021-03-10T21:21:55.000Z
|
2021-09-22T19:20:03.000Z
|
posts/migrations/0003_auto_20200522_0446.py
|
NotBlizzard/lark
|
b348f8d2b532ce20581030397cbba7f6565d1c56
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-05-22 04:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20200520_0536'),
]
operations = [
migrations.AddField(
model_name='post',
name='subtitle',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='title_slug',
field=models.SlugField(max_length=200),
),
]
| 23.48
| 63
| 0.574106
| 60
| 587
| 5.466667
| 0.716667
| 0.054878
| 0.079268
| 0.103659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091358
| 0.310051
| 587
| 24
| 64
| 24.458333
| 0.718519
| 0.076661
| 0
| 0.222222
| 1
| 0
| 0.1
| 0.042593
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
08217e660e94837e28763173bb72fbc25fe9ee5e
| 216
|
py
|
Python
|
locale/pot/api/plotting/_autosummary/pyvista-Plotter-enable_lightkit-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 4
|
2020-08-07T08:19:19.000Z
|
2020-12-04T09:51:11.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-Plotter-enable_lightkit-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 19
|
2020-08-06T00:24:30.000Z
|
2022-03-30T19:22:24.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-Plotter-enable_lightkit-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 1
|
2021-03-09T07:50:40.000Z
|
2021-03-09T07:50:40.000Z
|
# Create a plotter without any lights and then enable the
# default light kit.
#
import pyvista
pl = pyvista.Plotter(lighting=None)
pl.enable_lightkit()
actor = pl.add_mesh(pyvista.Cube(), show_edges=True)
pl.show()
| 24
| 57
| 0.763889
| 34
| 216
| 4.764706
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 216
| 8
| 58
| 27
| 0.857143
| 0.342593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0822f1091f07394bee07ab3fa63b7142aa217e7c
| 1,353
|
py
|
Python
|
sphinx/environment/managers/__init__.py
|
rweickelt/sphinx
|
1a4c41a7691e8f78d42e2db221192962c53b27df
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/environment/managers/__init__.py
|
rweickelt/sphinx
|
1a4c41a7691e8f78d42e2db221192962c53b27df
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/environment/managers/__init__.py
|
rweickelt/sphinx
|
1a4c41a7691e8f78d42e2db221192962c53b27df
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
sphinx.environment.managers
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Manager components for sphinx.environment.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
if False:
# For type annotation
from typing import Any # NOQA
from docutils import nodes # NOQA
from sphinx.environment import BuildEnvironment # NOQA
class EnvironmentManager(object):
"""Base class for sphinx.environment managers."""
name = None # type: unicode
env = None # type: BuildEnvironment
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env
def attach(self, env):
# type: (BuildEnvironment) -> None
self.env = env
if self.name:
setattr(env, self.name, self)
def detach(self, env):
# type: (BuildEnvironment) -> None
self.env = None
if self.name:
delattr(env, self.name)
def clear_doc(self, docname):
# type: (unicode) -> None
raise NotImplementedError
def merge_other(self, docnames, other):
# type: (List[unicode], Any) -> None
raise NotImplementedError
def process_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
raise NotImplementedError
| 26.529412
| 68
| 0.604582
| 146
| 1,353
| 5.554795
| 0.40411
| 0.051788
| 0.040691
| 0.099877
| 0.147965
| 0.147965
| 0.147965
| 0.10111
| 0
| 0
| 0
| 0.009165
| 0.274205
| 1,353
| 50
| 69
| 27.06
| 0.816701
| 0.397635
| 0
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.26087
| false
| 0
| 0.130435
| 0
| 0.521739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
08235f7faf3865296eebd91470431d320d7b228e
| 370
|
py
|
Python
|
createGlobalMap.py
|
abhi20sc/autoClim
|
b131a19e935e8ba7778a2c73107a183df37e92da
|
[
"MIT"
] | 2
|
2021-07-28T05:58:20.000Z
|
2021-08-16T18:27:27.000Z
|
createGlobalMap.py
|
abhi20sc/autoClim
|
b131a19e935e8ba7778a2c73107a183df37e92da
|
[
"MIT"
] | null | null | null |
createGlobalMap.py
|
abhi20sc/autoClim
|
b131a19e935e8ba7778a2c73107a183df37e92da
|
[
"MIT"
] | 3
|
2021-08-05T15:21:05.000Z
|
2021-10-04T03:42:16.000Z
|
import cartopy.crs as ccrs
import cartopy.feature as cf
from matplotlib import pyplot as plt
from matplotlib import image as img
def createMap():
fig = plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines(linewidth=1)
ax.add_feature(cf.BORDERS,linestyle='-',linewidth=1)
fig.savefig('globalMap.png', bbox_inches='tight', pad_inches=0)
return 0.
| 30.833333
| 64
| 0.767568
| 57
| 370
| 4.929825
| 0.631579
| 0.092527
| 0.142349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012158
| 0.110811
| 370
| 12
| 65
| 30.833333
| 0.841945
| 0
| 0
| 0
| 0
| 0
| 0.051213
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.363636
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
08248ac0b1e2686f247d443d0208fc7018480282
| 1,327
|
py
|
Python
|
test/test_merge.py
|
tawiesn/sclblonnx
|
0cf73112db5df13009cd2ddb5d49744689096209
|
[
"MIT"
] | null | null | null |
test/test_merge.py
|
tawiesn/sclblonnx
|
0cf73112db5df13009cd2ddb5d49744689096209
|
[
"MIT"
] | null | null | null |
test/test_merge.py
|
tawiesn/sclblonnx
|
0cf73112db5df13009cd2ddb5d49744689096209
|
[
"MIT"
] | null | null | null |
from sclblonnx import add_output, add_input, add_node, node, empty_graph, add_constant, display, merge, run
import numpy as np
def test_merge():
# Subgraph 1
sg1 = empty_graph("Graph 1")
n1 = node('Add', inputs=['x1', 'x2'], outputs=['sum'])
sg1 = add_node(sg1, n1)
sg1 = add_input(sg1, 'x1', "FLOAT", [1])
sg1 = add_input(sg1, 'x2', "FLOAT", [1])
sg1 = add_output(sg1, 'sum', "FLOAT", [1])
# Subgraph 2
sg2 = empty_graph("Graph 2")
sg2 = add_constant(sg2, "const", np.array([7]), "FLOAT")
n2 = node("Equal", inputs=['sum', 'const'], outputs=['equal'])
sg2 = add_node(sg2, n2)
sg2 = add_input(sg2, 'sum', "FLOAT", [1])
sg2 = add_output(sg2, 'equal', "BOOL", [1])
g = merge(sg1, sg2, outputs=["sum"], inputs=["sum"])
in1 = {"x1": np.array([2]).astype(np.float32), "x2": np.array([5]).astype(np.float32)}
result = run(g, inputs=in1, outputs=["equal"])
assert result[0], "Sum of 2 and 5 should be equal to constant 7."
in2 = {"x1": np.array([4]).astype(np.float32), "x2": np.array([5]).astype(np.float32)}
result = run(g, inputs=in2, outputs=["equal"])
assert not result[0], "Sum of 4 and 5 should not be equal to constant 7."
# todo(McK): Add tests for multiple inputs-outputs
# todo(McK): Add tests for graphs containing If
| 34.025641
| 107
| 0.605124
| 206
| 1,327
| 3.820388
| 0.291262
| 0.044473
| 0.076239
| 0.035578
| 0.233799
| 0.142313
| 0.142313
| 0.142313
| 0.142313
| 0.142313
| 0
| 0.06203
| 0.198191
| 1,327
| 38
| 108
| 34.921053
| 0.677632
| 0.087415
| 0
| 0
| 0
| 0
| 0.173732
| 0
| 0
| 0
| 0
| 0.026316
| 0.090909
| 1
| 0.045455
| false
| 0
| 0.090909
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0836babd9f72c506519d713c961b9257fd759c19
| 447
|
py
|
Python
|
tests/my_select_group.py
|
oldjun/PyMyORM
|
ac49910f21d3f3d3d4b3d75a0f998526963f0a2a
|
[
"MIT"
] | 1
|
2021-12-01T23:47:24.000Z
|
2021-12-01T23:47:24.000Z
|
tests/my_select_group.py
|
oldjun/PyMyORM
|
ac49910f21d3f3d3d4b3d75a0f998526963f0a2a
|
[
"MIT"
] | null | null | null |
tests/my_select_group.py
|
oldjun/PyMyORM
|
ac49910f21d3f3d3d4b3d75a0f998526963f0a2a
|
[
"MIT"
] | 2
|
2022-01-03T15:03:37.000Z
|
2022-02-16T09:00:58.000Z
|
from pymyorm.database import Database
from config import db
from models.user import User
if __name__ == '__main__':
Database.connect(**db)
# # case 1
# all = User.find().select('count(*) as count', 'money').group('money').order('count asc').all()
# for one in all:
# print(one)
all = User.find().select('gender', 'count(*) as count', 'avg(money) as avg').group('gender').all()
for one in all:
print(one)
| 27.9375
| 102
| 0.615213
| 63
| 447
| 4.238095
| 0.460317
| 0.052434
| 0.082397
| 0.127341
| 0.164794
| 0.164794
| 0.164794
| 0
| 0
| 0
| 0
| 0.002825
| 0.208054
| 447
| 15
| 103
| 29.8
| 0.751412
| 0.295302
| 0
| 0
| 0
| 0
| 0.175325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.375
| 0
| 0.375
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
083f30db4f011f2e287409fe5ae43ef0e966b47a
| 3,943
|
py
|
Python
|
tests/test_step.py
|
arup-group/mc
|
50b8faa8b9d40dece88e0a27f911edd427ebc064
|
[
"MIT"
] | null | null | null |
tests/test_step.py
|
arup-group/mc
|
50b8faa8b9d40dece88e0a27f911edd427ebc064
|
[
"MIT"
] | 12
|
2021-12-14T15:10:43.000Z
|
2022-03-31T13:39:25.000Z
|
tests/test_step.py
|
arup-group/mc
|
50b8faa8b9d40dece88e0a27f911edd427ebc064
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pytest
from copy import deepcopy
import os
from mc.base import BaseConfig
from mc import step
@pytest.fixture()
def config():
in_file = Path("tests/test_data/test_config.xml")
return BaseConfig(in_file)
def test_set_write_path(config):
step.set_write_path(config, {'outputDirectory': 'testing'})
assert config['controler']['outputDirectory'] == 'testing'
def test_set_input_paths(config):
step.set_input_paths(config, {'matsim_source': 'test/ing'})
assert config['network']['inputNetworkFile'] == 'test/ing/network.xml'
assert config['plans']['inputPlansFile'] == 'test/ing/population.xml.gz'
assert config['plans']['inputPersonAttributesFile'] == 'test/ing/population_attributes.xml.gz'
assert config['transit']['transitScheduleFile'] == 'test/ing/schedule-merged.xml'
assert config['transit']['vehiclesFile'] == 'test/ing/vehicles.xml'
assert config['transit']['transitLinesAttributesFile'] == 'null'
def test_set_step(config):
step.set_last_iteration(config, {'step': '999'})
assert config['controler']['lastIteration'] == '999'
def test_find_and_set_param(config):
step.find_and_set_overrides(
config,
{"modeParams:car/constant": "-1.0"}
)
assert config['planCalcScore']['scoringParameters:default']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:bus']["constant"] == "0.0"
def test_find_and_set_params(config):
step.find_and_set_overrides(
config,
{
"modeParams:car/constant": "-1.0",
"scoringParameters:unknown/modeParams:bus/constant": "-1.0"
}
)
assert config['planCalcScore']['scoringParameters:default']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:bus']["constant"] == "-1.0"
def test_find_and_set_bad_param(config):
cnfg = deepcopy(config)
step.find_and_set_overrides(
config,
{"modeParams:*/horseback": "-1.0"}
)
assert cnfg == config
def test_construct_overrides_map_from_tuple():
assert step.construct_override_map_from_tuple(
('a','b','c','d')
) == {'a':'b', 'c':'d'}
def test_step_config(tmp_path):
in_file = "tests/test_data/test_config.xml"
out_file = os.path.join(tmp_path, "test_config.xml")
step.step_config(
input_file=in_file,
output_file=out_file,
overrides=(
'matsim_source', 'test/ing',
'outputDirectory', 'testing',
'step', '999',
"modeParams:car/constant", "-1.0",
"scoringParameters:unknown/modeParams:bus/constant", "-1.0"
)
)
assert os.path.exists(out_file)
config = BaseConfig(out_file)
assert config['controler']['lastIteration'] == '999'
assert config['controler']['outputDirectory'] == 'testing'
assert config['network']['inputNetworkFile'] == 'test/ing/network.xml'
assert config['plans']['inputPlansFile'] == 'test/ing/population.xml.gz'
assert config['plans']['inputPersonAttributesFile'] == 'test/ing/population_attributes.xml.gz'
assert config['transit']['transitScheduleFile'] == 'test/ing/schedule-merged.xml'
assert config['transit']['vehiclesFile'] == 'test/ing/vehicles.xml'
assert config['transit']['transitLinesAttributesFile'] == 'null'
assert config['planCalcScore']['scoringParameters:default']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:bus']["constant"] == "-1.0"
| 39.43
| 103
| 0.672331
| 441
| 3,943
| 5.854875
| 0.190476
| 0.116189
| 0.050349
| 0.076685
| 0.725019
| 0.656468
| 0.622773
| 0.622773
| 0.605345
| 0.605345
| 0
| 0.01259
| 0.153944
| 3,943
| 99
| 104
| 39.828283
| 0.761391
| 0
| 0
| 0.37037
| 0
| 0
| 0.420238
| 0.203398
| 0
| 0
| 0
| 0
| 0.345679
| 1
| 0.111111
| false
| 0
| 0.074074
| 0
| 0.197531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0840e1f2cca91c8f40fea1035d91f9ed0ea2c8f1
| 15,552
|
py
|
Python
|
lambda_functions.py
|
intirix/serverless-secrets-manager
|
2c89b2c497f7078c38885125dfa79db944a214db
|
[
"Apache-2.0"
] | 2
|
2018-05-23T06:04:13.000Z
|
2020-11-04T23:16:09.000Z
|
lambda_functions.py
|
intirix/serverless-secrets-manager
|
2c89b2c497f7078c38885125dfa79db944a214db
|
[
"Apache-2.0"
] | null | null | null |
lambda_functions.py
|
intirix/serverless-secrets-manager
|
2c89b2c497f7078c38885125dfa79db944a214db
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import system
import db
import client
import server
import logging
import json
import base64
import os
from aws_xray_sdk.core import patch_all
if "AWS_REGION" in os.environ:
patch_all()
class LambdaCommon:
def __init__(self, ddb_client=None):
self.log = logging.getLogger("Lambda")
self.system = system.System()
userTable = "secrets-users"
if "USERS_TABLE" in os.environ:
userTable = os.environ["USERS_TABLE"]
secretsTable = "secrets-secrets"
if "SECRETS_TABLE" in os.environ:
secretsTable = os.environ["SECRETS_TABLE"]
self.db = db.CacheDB(db.DynamoDB(userTable, secretsTable, ddb_client))
self.system.setDB(self.db)
self.system.init()
self.client = client.Client(client.ClientSystemInterface(self.system))
self.server = server.Server(self.system)
self.resp = None
self.ctx = None
self.mockUser = None
if "MOCK_USER" in os.environ and len(os.environ["MOCK_USER"]) > 0:
self.mockUser = os.environ["MOCK_USER"]
def _response401(self):
self.resp = {"statusCode": 401}
def authenticate(self, event):
if self.mockUser != None:
self.ctx = self.server.mockAuthentication(self.mockUser)
return
if (
event == None
or not "headers" in event
or event["headers"] == None
or not "Authorization" in event["headers"]
):
self._response401()
return
self.ctx = self.server.validateAuthenticationHeader(
event["headers"]["Authorization"]
)
if self.ctx == None:
self._response401()
return
def getResponse(self):
return self.resp
def get_body(event):
if not "body" in event:
return None
if event["body"] == None:
return None
if "isBase64Encoded" in event and event["isBase64Encoded"] == True:
return base64.b64decode(event["body"])
return event["body"]
def matches(event, meth, path):
log = logging.getLogger("Lambda")
if event == None:
return False
if not "httpMethod" in event or meth != event["httpMethod"]:
return False
if "requestContext" in event and "resourcePath" in event["requestContext"]:
if path == event["requestContext"]["resourcePath"]:
log.info("Matched " + meth + " to " + path)
return True
return False
_singleton = None
def get_lambda_common():
global _singleton
if _singleton is None:
_singleton = LambdaCommon()
return _singleton
def single_func(event, context):
# print(json.dumps(event,indent=2))
if matches(event, "GET", "/v1/users"):
return list_users(event, context)
if matches(event, "GET", "/v1/users/{username}"):
return get_user(event, context)
if matches(event, "PUT", "/v1/users/{username}"):
return update_user(event, context)
if matches(event, "POST", "/v1/users/{username}"):
return create_user(event, context)
if matches(event, "GET", "/v1/users/{username}/keys/public"):
return get_user_public_key(event, context)
if matches(event, "PUT", "/v1/users/{username}/keys/public"):
return set_user_public_key(event, context)
if matches(event, "POST", "/v1/users/{username}/keys/public"):
return set_user_public_key(event, context)
if matches(event, "POST", "/v1/users/{username}/keys"):
return generate_user_keys(event, context)
if matches(event, "GET", "/v1/users/{username}/keys/private/encrypted"):
return get_user_private_key_encrypted(event, context)
if matches(event, "PUT", "/v1/users/{username}/keys/private/encrypted"):
return set_user_private_key_encrypted(event, context)
if matches(event, "POST", "/v1/users/{username}/keys/private/encrypted"):
return set_user_private_key_encrypted(event, context)
if matches(event, "GET", "/v1/users/{username}/secrets"):
return get_user_secrets(event, context)
if matches(event, "GET", "/v1/secrets/{sid}"):
return get_secret(event, context)
if matches(event, "PUT", "/v1/secrets/{sid}"):
return update_secret(event, context)
if matches(event, "POST", "/v1/secrets"):
return add_secret(event, context)
if matches(event, "PUT", "/v1/secrets/{sid}/users/{username}"):
return share_secret(event, context)
if matches(event, "DELETE", "/v1/secrets/{sid}/users/{username}"):
return unshare_secret(event, context)
print("Did not match the event")
return {"statusCode": 404}
def list_users(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
return {
"statusCode": 200,
"body": json.dumps(obj.server.listUsers(obj.ctx), indent=2),
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def get_user(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
return {
"statusCode": 200,
"body": json.dumps(obj.server.getUser(obj.ctx, user), indent=2),
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def update_user(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
body = get_body(event)
if obj.server.updateUser(obj.ctx, user, body):
return {
"statusCode": 200,
"body": json.dumps(obj.server.getUser(obj.ctx, user), indent=2),
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def set_user_public_key(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
body = get_body(event)
keyType = obj.server.getPublicKeyType(body)
if obj.server.setUserPublicKey(obj.ctx, user, body, keyType):
return {
"statusCode": 200,
"body": json.dumps(obj.server.getUser(obj.ctx, user), indent=2),
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def create_user(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
body = get_body(event)
if obj.server.addUser(obj.ctx, user, body):
if obj.server.addUser(obj.ctx, user, body):
return {
"statusCode": 201,
"body": json.dumps(obj.server.getUser(obj.ctx, user), indent=2),
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def get_user_public_key(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
pem = obj.server.getUserPublicKey(obj.ctx, user)
if pem == None:
return {"statusCode": 404}
return {
"statusCode": 200,
"body": pem,
"headers": {"Content-Type": "application/x-pem-file"},
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def get_user_private_key_encrypted(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
data = obj.server.getUserEncryptedPrivateKey(obj.ctx, user)
if isinstance(data, str):
data = data.encode("UTF-8")
b64 = base64.b64encode(data).decode("UTF-8")
return {
"statusCode": 200,
"body": b64,
"headers": {"Content-Type": "application/octet-stream"},
"isBase64Encoded": True,
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def generate_user_keys(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
body = get_body(event)
if body is None:
obj.log.exception("Password not provided in body")
return {"statusCode": 400}
body = body.strip()
generate = False
if (
"queryStringParameters" in event
and "generate" in event["queryStringParameters"]
):
generate = "true" == event["queryStringParameters"]["generate"]
if generate:
pem = obj.server.generateKeysForUser(obj.ctx, user, body)
return {
"statusCode": 200,
"body": pem,
"headers": {"Content-Type": "application/x-pem-file"},
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def set_user_private_key_encrypted(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
body = get_body(event)
if obj.server.setUserEncryptedPrivateKey(obj.ctx, user, body):
return {
"statusCode": 200,
"body": json.dumps(obj.server.getUser(obj.ctx, user), indent=2),
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def get_user_secrets(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
return {
"statusCode": 200,
"body": json.dumps(obj.server.getMySecrets(obj.ctx, user), indent=2),
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def get_secret(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
sid = event["pathParameters"]["sid"]
return {
"statusCode": 200,
"body": json.dumps(obj.server.getSecret(obj.ctx, sid), indent=2),
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def update_secret(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
sid = event["pathParameters"]["sid"]
body = get_body(event)
if obj.server.updateSecret(obj.ctx, sid, body):
return {
"statusCode": 200,
"body": json.dumps(obj.server.getSecret(obj.ctx, sid), indent=2),
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def add_secret(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
body = get_body(event)
sid = obj.server.addSecret(obj.ctx, body)
return {
"statusCode": 201,
"body": json.dumps(obj.server.getSecret(obj.ctx, sid), indent=2),
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def share_secret(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
sid = event["pathParameters"]["sid"]
user = event["pathParameters"]["username"]
body = get_body(event)
ret = obj.server.shareSecret(obj.ctx, sid, user, body)
return {"statusCode": 200, "body": json.dumps(ret, indent=2)}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def unshare_secret(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
sid = event["pathParameters"]["sid"]
user = event["pathParameters"]["username"]
ret = obj.server.unshareSecret(obj.ctx, sid, user)
return {"statusCode": 200, "body": json.dumps(ret, indent=2)}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
FORMAT = "%(asctime)-15s %(message)s"
logging.basicConfig(format=FORMAT)
| 29.622857
| 84
| 0.600244
| 1,676
| 15,552
| 5.49284
| 0.102625
| 0.119922
| 0.050511
| 0.036498
| 0.703237
| 0.69748
| 0.682381
| 0.669889
| 0.654573
| 0.630241
| 0
| 0.024065
| 0.267876
| 15,552
| 524
| 85
| 29.679389
| 0.784472
| 0.003215
| 0
| 0.633178
| 0
| 0
| 0.160839
| 0.030774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053738
| false
| 0.002336
| 0.021028
| 0.002336
| 0.343458
| 0.002336
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0845d2588d5c55abf24f9ab405009bd284d758d8
| 833
|
py
|
Python
|
tests/test_composition.py
|
gregorynicholas/proto-pigeon
|
65a5d961e7a8506f3a968b21aaf68f625fd13190
|
[
"Apache-2.0"
] | null | null | null |
tests/test_composition.py
|
gregorynicholas/proto-pigeon
|
65a5d961e7a8506f3a968b21aaf68f625fd13190
|
[
"Apache-2.0"
] | null | null | null |
tests/test_composition.py
|
gregorynicholas/proto-pigeon
|
65a5d961e7a8506f3a968b21aaf68f625fd13190
|
[
"Apache-2.0"
] | null | null | null |
from protorpc.messages import Message, IntegerField, StringField
import protopigeon
class MessageOne(Message):
one = IntegerField(1)
two = IntegerField(2)
class MessageTwo(Message):
three = StringField(1)
four = StringField(2)
def test():
ComposedMessage = protopigeon.compose(MessageOne, MessageTwo)
assert hasattr(ComposedMessage, 'one')
assert hasattr(ComposedMessage, 'two')
assert hasattr(ComposedMessage, 'three')
assert hasattr(ComposedMessage, 'four')
# Make sure these fields weren't modified
assert MessageOne.one.number == 1
assert MessageOne.two.number == 2
assert MessageTwo.three.number == 1
assert MessageTwo.four.number == 2
instance = ComposedMessage(
one=1,
two=2,
three='three',
four='four')
assert instance
| 23.138889
| 65
| 0.686675
| 89
| 833
| 6.426966
| 0.359551
| 0.090909
| 0.195804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015314
| 0.216086
| 833
| 35
| 66
| 23.8
| 0.860643
| 0.046819
| 0
| 0
| 0
| 0
| 0.030303
| 0
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
084d1fd01b6f648a85848dd0310b96b0d8966a0e
| 651
|
py
|
Python
|
yotta/options/registry.py
|
microbit-foundation/yotta
|
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
|
[
"Apache-2.0"
] | 176
|
2015-01-02T07:31:59.000Z
|
2022-03-21T12:40:02.000Z
|
yotta/options/registry.py
|
microbit-foundation/yotta
|
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
|
[
"Apache-2.0"
] | 549
|
2015-01-05T16:19:54.000Z
|
2021-01-15T13:46:42.000Z
|
yotta/options/registry.py
|
microbit-foundation/yotta
|
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
|
[
"Apache-2.0"
] | 84
|
2015-01-10T21:01:00.000Z
|
2022-03-24T16:04:42.000Z
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library options
from argparse import Action, SUPPRESS
class RegistryAction(Action):
def __init__(self, *args, **kwargs):
kwargs['nargs'] = 1
self.dest = kwargs['dest']
super(RegistryAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values[0])
def addTo(parser):
parser.add_argument(
'--registry', default=None, dest='registry', help=SUPPRESS,
action=RegistryAction
)
| 27.125
| 70
| 0.680492
| 77
| 651
| 5.571429
| 0.649351
| 0.04662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023077
| 0.201229
| 651
| 23
| 71
| 28.304348
| 0.801923
| 0.204301
| 0
| 0
| 0
| 0
| 0.052734
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.076923
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
084edafd90972abf12ce9cf828ac494b0afdd467
| 4,453
|
py
|
Python
|
src/pybraingym/environment.py
|
anetczuk/pybraingym
|
4f930021d7802e88c75a1a0aed135dd4de66cc1b
|
[
"MIT"
] | null | null | null |
src/pybraingym/environment.py
|
anetczuk/pybraingym
|
4f930021d7802e88c75a1a0aed135dd4de66cc1b
|
[
"MIT"
] | null | null | null |
src/pybraingym/environment.py
|
anetczuk/pybraingym
|
4f930021d7802e88c75a1a0aed135dd4de66cc1b
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2019 Arkadiusz Netczuk <dev.arnet@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from pybrain.rl.environments.environment import Environment
from gym.spaces.discrete import Discrete
class GymEnvironment(Environment):
def __init__(self, gymRawEnv):
Environment.__init__(self)
observationSpace = gymRawEnv.observation_space
if type(observationSpace) == Discrete:
self.outdim = 1
self.discreteStates = True
self.numStates = observationSpace.n
actionSpace = gymRawEnv.action_space
if type(actionSpace) == Discrete:
self.indim = 1
self.discreteActions = True
self.numActions = actionSpace.n
self.env = gymRawEnv
self.observation = None
self.reward = 0
self.cumReward = 0
self.done = True
self.info = None
self.transform = None
self.doCumulative = False
self.doRender = False
def setRendering(self, render=True):
self.doRender = render
def getCumulativeRewardMode(self):
return self.doCumulative
def setCumulativeRewardMode(self, cumulativeReward=True):
self.doCumulative = cumulativeReward
def setTransformation(self, transformation):
self.transform = transformation
self.transform.env = self
# ==========================================================================
def getSensors(self):
return self.observation
def performAction(self, action):
if self.transform is not None:
action = self.transform.action(action)
self.observation, self.reward, self.done, self.info = self.env.step(action)
if self.transform is not None:
self.observation = self.transform.observation(self.observation)
self.reward = self.transform.reward(self.reward)
self.cumReward += self.reward
def reset(self):
self.done = False
self.reward = 0
self.cumReward = 0
self.info = None
self.observation = self.env.reset()
if self.transform is not None:
self.observation = self.transform.observation(self.observation)
# ==========================================================================
def getReward(self):
if self.doCumulative:
return self.cumReward
else:
return self.reward
def sampleAction(self):
return self.env.action_space.sample()
def render(self):
self.env.render()
def close(self):
self.env.close()
class Transformation:
def __init__(self):
self._env = None
@property
def env(self):
return self._env
@env.setter
def env(self, new_env):
self._env = new_env
def observation(self, observationValue):
"""Transform observation value received from OpenAi Gym. Transformed value is passed to PyBrain.
For discrete observations Gym often returns single value, but PyBrain always requires array.
"""
return observationValue
def action(self, actionValue):
"""Transform action value received from PyBrain and pass result to OpenAi Gym."""
return actionValue
def reward(self, rewardValue):
"""Transform reward value received from OpenAi Gym and pass result to PyBrain."""
return rewardValue
| 32.50365
| 104
| 0.651246
| 511
| 4,453
| 5.636008
| 0.348337
| 0.045139
| 0.032986
| 0.017708
| 0.119792
| 0.086806
| 0.086806
| 0.054167
| 0.054167
| 0.054167
| 0
| 0.002978
| 0.245902
| 4,453
| 136
| 105
| 32.742647
| 0.854675
| 0.355042
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.233766
| false
| 0
| 0.025974
| 0.051948
| 0.402597
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
085a588c3443a2133c8229f5612a92a5ee522cad
| 335
|
py
|
Python
|
src/videos/migrations/0009_rename_updated_timestamp_video_updated.py
|
imsubhamsingh/vibeon
|
5ea67bb8dae0a0c28d36f81374eb4f046d842cf5
|
[
"Apache-2.0"
] | null | null | null |
src/videos/migrations/0009_rename_updated_timestamp_video_updated.py
|
imsubhamsingh/vibeon
|
5ea67bb8dae0a0c28d36f81374eb4f046d842cf5
|
[
"Apache-2.0"
] | 2
|
2021-07-19T18:41:46.000Z
|
2022-02-10T11:43:07.000Z
|
src/videos/migrations/0009_rename_updated_timestamp_video_updated.py
|
imsubhamsingh/vibeon
|
5ea67bb8dae0a0c28d36f81374eb4f046d842cf5
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2 on 2021-04-20 19:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("videos", "0008_video_updated_timestamp")]
operations = [
migrations.RenameField(
model_name="video", old_name="updated_timestamp", new_name="updated"
)
]
| 22.333333
| 80
| 0.674627
| 39
| 335
| 5.615385
| 0.74359
| 0.146119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068441
| 0.214925
| 335
| 14
| 81
| 23.928571
| 0.764259
| 0.128358
| 0
| 0
| 1
| 0
| 0.217241
| 0.096552
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f23153ff9da39e77238d222d2874c0c47b3effe7
| 1,765
|
py
|
Python
|
tests/test_copies.py
|
mschmidtkorth/shallow-backup
|
6629fed7d5a3a13eb068c7ef0168cfa8ffbd3bbf
|
[
"MIT"
] | 1
|
2021-07-25T19:26:47.000Z
|
2021-07-25T19:26:47.000Z
|
tests/test_copies.py
|
mschmidtkorth/shallow-backup
|
6629fed7d5a3a13eb068c7ef0168cfa8ffbd3bbf
|
[
"MIT"
] | null | null | null |
tests/test_copies.py
|
mschmidtkorth/shallow-backup
|
6629fed7d5a3a13eb068c7ef0168cfa8ffbd3bbf
|
[
"MIT"
] | null | null | null |
import os
import sys
import pytest
import shutil
from .test_utils import setup_env_vars, unset_env_vars, BACKUP_DEST_DIR, FAKE_HOME_DIR, DIRS
sys.path.insert(0, "../shallow_backup")
from shallow_backup.utils import copy_dir_if_valid
TEST_TEXT_FILE = os.path.join(FAKE_HOME_DIR, 'test-file.txt')
class TestCopyMethods:
"""
Test the functionality of copying
"""
@staticmethod
def setup_method():
setup_env_vars()
try:
os.mkdir(FAKE_HOME_DIR)
except FileExistsError:
shutil.rmtree(FAKE_HOME_DIR)
os.mkdir(FAKE_HOME_DIR)
print(f"Created {TEST_TEXT_FILE}")
open(TEST_TEXT_FILE, "w+").close()
@staticmethod
def teardown_method():
for directory in DIRS:
if os.path.isdir(directory):
shutil.rmtree(directory)
unset_env_vars()
def test_copy_dir(self):
"""
Test that copying a directory works as expected
"""
# TODO: Test that all subfiles and folders are copied.
test_dir = 'subdir-to-copy'
test_path = os.path.join(FAKE_HOME_DIR, test_dir)
os.mkdir(test_path)
copy_dir_if_valid(FAKE_HOME_DIR, BACKUP_DEST_DIR)
assert os.path.isdir(test_path)
assert os.path.isfile(os.path.join(BACKUP_DEST_DIR, os.path.split(TEST_TEXT_FILE)[1]))
assert os.path.isdir(os.path.join(BACKUP_DEST_DIR, test_dir))
@pytest.mark.parametrize('invalid', {".Trash", ".npm", ".cache", ".rvm"})
def test_copy_dir_invalid(self, invalid):
"""
Test that attempting to copy an invalid directory fails
"""
copy_dir_if_valid(invalid, FAKE_HOME_DIR)
assert not os.path.isdir(os.path.join(BACKUP_DEST_DIR, invalid))
| 32.090909
| 94
| 0.65779
| 245
| 1,765
| 4.465306
| 0.342857
| 0.060329
| 0.080439
| 0.038391
| 0.161792
| 0.128885
| 0.107861
| 0.062157
| 0.062157
| 0
| 0
| 0.001487
| 0.23796
| 1,765
| 54
| 95
| 32.685185
| 0.811896
| 0.108215
| 0
| 0.108108
| 0
| 0
| 0.064153
| 0
| 0
| 0
| 0
| 0.018519
| 0.108108
| 1
| 0.108108
| false
| 0
| 0.162162
| 0
| 0.297297
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2320f768e412bebfaa5c2e31eeb4a3c480eacaf
| 1,395
|
py
|
Python
|
loan/killeragent.py
|
Casper-Smet/LOAN
|
3aabf80cf4314bcba33779329fc6e4971b85e742
|
[
"MIT"
] | null | null | null |
loan/killeragent.py
|
Casper-Smet/LOAN
|
3aabf80cf4314bcba33779329fc6e4971b85e742
|
[
"MIT"
] | null | null | null |
loan/killeragent.py
|
Casper-Smet/LOAN
|
3aabf80cf4314bcba33779329fc6e4971b85e742
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
import networkx as nx
from mesa import Agent, Model
class KillerAgent(Agent):
def __init__(self, unique_id: int, model: Model, creator, pos: int, target_location: int, target_disease: str) -> None:
super().__init__(unique_id, model)
self.creator = creator
self.pos = pos
self.target_location = target_location
self.target_disease = target_disease
self.arrived_on_location = False
self.shortest_path_to_target_node = []
def perceive(self) -> None:
self.arrived_on_location = self.pos == self.target_location
self.shortest_path_to_target_node = nx.shortest_path(G=self.model.network, source=self.pos, target=self.target_location)
def act(self) -> None:
...
def update(self) -> None:
if self.arrived_on_location:
if self.pos in self.model.ill_vertices:
self.model.restore_vertex(self.pos)
self.model.grid._remove_agent(self, self.pos)
self.model.schedule.remove(self)
else:
self.model.grid.move_agent(self, self.shortest_path_to_target_node[1])
def __repr__(self) -> str:
return f"{self.__class__.__name__} {self.model}/{self.unique_id}: Position {self.pos}"
def __str__(self) -> str:
return self.__repr__()
def emojify(self):
return " 💉"
| 34.875
| 128
| 0.658065
| 182
| 1,395
| 4.703297
| 0.318681
| 0.057243
| 0.063084
| 0.073598
| 0.098131
| 0.098131
| 0
| 0
| 0
| 0
| 0
| 0.000939
| 0.236559
| 1,395
| 40
| 129
| 34.875
| 0.801878
| 0
| 0
| 0
| 0
| 0.032258
| 0.055874
| 0.039398
| 0
| 0
| 0
| 0
| 0
| 1
| 0.225806
| false
| 0
| 0.096774
| 0.096774
| 0.451613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2377bda1f457053d6b4f86097a8d1ba0041422b
| 260
|
py
|
Python
|
src/arm/src/iksolverservicetester.py
|
Busboombot/ros_idx6dof
|
63b3a49393ab2c619b6b56c634cd440ab9b464ef
|
[
"MIT"
] | 1
|
2020-03-15T15:30:43.000Z
|
2020-03-15T15:30:43.000Z
|
src/arm/src/iksolverservicetester.py
|
Busboombot/ros_idx6dof
|
63b3a49393ab2c619b6b56c634cd440ab9b464ef
|
[
"MIT"
] | null | null | null |
src/arm/src/iksolverservicetester.py
|
Busboombot/ros_idx6dof
|
63b3a49393ab2c619b6b56c634cd440ab9b464ef
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import rospy
from arm.srv import IKService, IKServiceResponse
rospy.init_node("asdf", anonymous=True)
rospy.wait_for_service('IKService')
srv = rospy.ServiceProxy('IKService', IKService)
resp = srv([5, 16, 8, 0, 0, 0], None)
print resp
| 17.333333
| 48
| 0.734615
| 38
| 260
| 4.947368
| 0.684211
| 0.021277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030702
| 0.123077
| 260
| 14
| 49
| 18.571429
| 0.79386
| 0.061538
| 0
| 0
| 0
| 0
| 0.090535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.285714
| null | null | 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f23e9e3046350977154c8ce79c350de302fd2dce
| 197
|
py
|
Python
|
04_While/Step03/gamjapark.py
|
StudyForCoding/BEAKJOON
|
84e1c5e463255e919ccf6b6a782978c205420dbf
|
[
"MIT"
] | null | null | null |
04_While/Step03/gamjapark.py
|
StudyForCoding/BEAKJOON
|
84e1c5e463255e919ccf6b6a782978c205420dbf
|
[
"MIT"
] | 3
|
2020-11-04T05:38:53.000Z
|
2021-03-02T02:15:19.000Z
|
04_While/Step03/gamjapark.py
|
StudyForCoding/BEAKJOON
|
84e1c5e463255e919ccf6b6a782978c205420dbf
|
[
"MIT"
] | null | null | null |
n = int(input())
temp_n = n
k=0
while True:
a = int(temp_n / 10)
b = temp_n % 10
c = (a + b) % 10
new = b*10 + c
k += 1
if new == n:
break
temp_n = new
print(k)
| 14.071429
| 24
| 0.446701
| 38
| 197
| 2.210526
| 0.447368
| 0.238095
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084746
| 0.401015
| 197
| 13
| 25
| 15.153846
| 0.627119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f23ec9a0fbd46e6d9b5f8659349c47ab52aec354
| 333
|
py
|
Python
|
bbtest/steps/appliance_steps.py
|
jancajthaml-openbank/e2e
|
a2ef84b6564022e95de76438fc795e2ef927aa2b
|
[
"Apache-2.0"
] | null | null | null |
bbtest/steps/appliance_steps.py
|
jancajthaml-openbank/e2e
|
a2ef84b6564022e95de76438fc795e2ef927aa2b
|
[
"Apache-2.0"
] | 30
|
2018-03-18T05:58:32.000Z
|
2022-01-19T23:21:31.000Z
|
bbtest/steps/appliance_steps.py
|
jancajthaml-openbank/e2e
|
a2ef84b6564022e95de76438fc795e2ef927aa2b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from behave import *
from helpers.eventually import eventually
@given('appliance is running')
def appliance_running(context):
@eventually(5)
def wait_for_appliance_up():
assert context.appliance.running(), 'appliance did not start within 5 seconds'
wait_for_appliance_up()
| 23.785714
| 82
| 0.744745
| 45
| 333
| 5.355556
| 0.622222
| 0.13278
| 0.13278
| 0.149378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013937
| 0.138138
| 333
| 13
| 83
| 25.615385
| 0.825784
| 0.129129
| 0
| 0
| 0
| 0
| 0.208333
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f23fb929e898694417f38446747b98726264f0e7
| 1,211
|
py
|
Python
|
irkshop/urls.py
|
Beomi/irkshop
|
c109a62216cb6550add64fbf402883debc5011d1
|
[
"Apache-2.0"
] | 19
|
2016-11-06T10:28:14.000Z
|
2020-11-01T02:04:51.000Z
|
irkshop/urls.py
|
Beomi/irkshop
|
c109a62216cb6550add64fbf402883debc5011d1
|
[
"Apache-2.0"
] | 17
|
2016-10-19T11:58:48.000Z
|
2022-01-13T00:32:34.000Z
|
irkshop/urls.py
|
Beomi/irkshop
|
c109a62216cb6550add64fbf402883debc5011d1
|
[
"Apache-2.0"
] | 4
|
2016-11-06T10:54:26.000Z
|
2019-08-31T16:08:56.000Z
|
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.views import login, logout
from django.conf import settings
from django.views.static import serve
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/login/$', login,
{'template_name': 'login_page/login.html'}, name='login'),
url(r'^accounts/logout/$', logout, name='logout'),
url('', include('social_django.urls', namespace='social')), # 이 줄을 등록해주면 됩니다.
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
url(r'^ht/', include('health_check.urls')),
url(r'^paypal/', include('paypal.standard.ipn.urls')),
url(r'^shop/', include('goods.urls', namespace='shop')),
url(r'^$', TemplateView.as_view(template_name='index.html')),
]
if settings.DEBUG:
import debug_toolbar
from django.conf.urls.static import static
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^uploads/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 41.758621
| 101
| 0.663088
| 155
| 1,211
| 5.064516
| 0.348387
| 0.050955
| 0.050955
| 0.04586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168456
| 1,211
| 28
| 102
| 43.25
| 0.779543
| 0.012386
| 0
| 0
| 0
| 0
| 0.229481
| 0.074539
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.32
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
f2414f0188cf0460b22148b0732eea50d4b58390
| 5,142
|
py
|
Python
|
plag/urls.py
|
neetu6860/plagiarism-detection-software
|
7f05210aafdccf33a2bc732a40348eff43f46fba
|
[
"MIT"
] | 19
|
2018-09-03T09:10:20.000Z
|
2021-12-24T13:52:18.000Z
|
plag/urls.py
|
neetu6860/plagiarism-detection-software
|
7f05210aafdccf33a2bc732a40348eff43f46fba
|
[
"MIT"
] | 3
|
2019-10-31T18:42:38.000Z
|
2021-06-10T21:37:23.000Z
|
plag/urls.py
|
neetu6860/plagiarism-detection-software
|
7f05210aafdccf33a2bc732a40348eff43f46fba
|
[
"MIT"
] | 16
|
2018-06-06T15:04:59.000Z
|
2022-03-29T04:53:07.000Z
|
from django.conf.urls import patterns, url
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
admin.autodiscover()
from plag import views, const
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^index-trial/$', views.IndexTrialView.as_view(), name='index_trial'),
url(r'^download/(?P<prot_res_id>\d+)$', views.download_file, name='download'),
url(r'^products/$', TemplateView.as_view(template_name='plag/static/products.html'),
name='products'),
url(r'^features-screenshots/$',
TemplateView.as_view(template_name='plag/static/features_and_screenshots.html'),
name='features'),
url(r'^url-protection/$', TemplateView.as_view(template_name='plag/static/url_protection.html'),
name='url_prot'),
url(r'^document-protection/$',
TemplateView.as_view(template_name='plag/static/doc_protection.html'), name='doc_prot'),
url(r'^pricing/$', TemplateView.as_view(template_name='plag/static/pricing.html'),
name='pricing'),
url(r'^risks-of-plagiarism/$',
TemplateView.as_view(template_name='plag/static/risks_of_plagiarism.html'),
name='risks_plag'),
url(r'^about-us/$', TemplateView.as_view(template_name='plag/static/about.html'), name='about'),
url(r'^our-customers/$', TemplateView.as_view(template_name='plag/static/our_customers.html'),
name='our_customers'),
url(r'^contact-us/$', TemplateView.as_view(template_name='plag/static/contact_us.html'),
name='contact'),
url(r'^order/$', views.OrderView.as_view(), name='order'),
url(r'^ajax/username-check/$', views.username_unique, name='ajax_username_unique'),
url(r'^account/$', views.account, name='account'),
url(r'^account/profile/$', login_required(views.ProfileView.as_view()), name='profile'),
url(r'^account/invoice/(?P<pk>\d+)$', views.invoice, name='invoice'),
url(r'^account/invoice/pay/(?P<pk>\d+)$', views.pay_invoice, name='pay_invoice'),
url(r'^account/invoice/subscribe/(?P<pk>\d+)$', views.subscribe_invoice,
name='subscribe_invoice'),
url(r'^ipn-endpoint/$', views.ipn, name='ipn'),
url(r'^account/recent-scans/$', views.recent_scans, name='recent_scans_default'),
url(r'^account/recent-scans/(?P<num_days>\d+)$', views.recent_scans,
name='recent_scans'),
url(r'^account/recent-scans/(?P<num_days>\d+)/(?P<hide_zero>hide-zero)$',
views.recent_scans, name='recent_scans_hide_zero'),
url(r'^account/scan-history/$', views.scan_history, name='scan_history'),
url(r'^account/scan-history/(?P<hide_zero>hide-zero)$', views.scan_history,
name='scan_history_hide_zero'),
url(r'^ajax/plag-results/$', views.plagiarism_results,
name='ajax_plag_results_default'),
url(r'^ajax/plag-results/(?P<scan_id>\d+)$', views.plagiarism_results,
name='plag_results'),
url(r'^ajax/sitemap/$', views.sitemap_to_urls, name='ajax_urls'),
url(r'^account/protected-resources/$',
login_required(views.ProtectedResources.as_view()), name='protected_resources'),
url(r'^sitemap/$', TemplateView.as_view(template_name='plag/static/sitemap.html'),
name='sitemap'),
url(r'^terms-of-service/$',
TemplateView.as_view(template_name='plag/static/terms_of_service.html'),
name='terms_of_service'),
url(r'^privacy-policy/$', TemplateView.as_view(template_name='plag/static/privacy_policy.html'),
name='privacy_policy'),
# TODO Remove
url(r'^data-cleanse/$', views.data_cleanse, name='data_cleanse'),
url(r'^copyright/$', TemplateView.as_view(template_name='plag/static/copyright.html'),
name='copyright'),
url(r'^login/$', 'django.contrib.auth.views.login',
{'template_name': 'plag/static/login_error.html'}),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': 'index'}, name='logout'),
)
| 57.775281
| 119
| 0.532283
| 536
| 5,142
| 4.923507
| 0.18097
| 0.054566
| 0.084881
| 0.116711
| 0.359606
| 0.30125
| 0.229632
| 0.093217
| 0.023494
| 0
| 0
| 0
| 0.317581
| 5,142
| 88
| 120
| 58.431818
| 0.752066
| 0.002139
| 0
| 0
| 0
| 0.014925
| 0.32092
| 0.200039
| 0
| 0
| 0
| 0.011364
| 0
| 1
| 0
| false
| 0
| 0.074627
| 0
| 0.074627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2470b57f1baf4a7e69d418b396753a2d81c5b04
| 752
|
py
|
Python
|
authentik/sources/saml/migrations/0011_auto_20210324_0736.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 15
|
2020-01-05T09:09:57.000Z
|
2020-11-28T05:27:39.000Z
|
authentik/sources/saml/migrations/0011_auto_20210324_0736.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 302
|
2020-01-21T08:03:59.000Z
|
2020-12-04T05:04:57.000Z
|
authentik/sources/saml/migrations/0011_auto_20210324_0736.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 3
|
2020-03-04T08:21:59.000Z
|
2020-08-01T20:37:18.000Z
|
# Generated by Django 3.1.7 on 2021-03-24 07:36
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_flows", "0016_auto_20201202_1307"),
("authentik_sources_saml", "0010_samlsource_pre_authentication_flow"),
]
operations = [
migrations.AlterField(
model_name="samlsource",
name="pre_authentication_flow",
field=models.ForeignKey(
help_text="Flow used before authentication.",
on_delete=django.db.models.deletion.CASCADE,
related_name="source_pre_authentication",
to="authentik_flows.flow",
),
),
]
| 28.923077
| 78
| 0.62633
| 77
| 752
| 5.87013
| 0.636364
| 0.053097
| 0.061947
| 0.097345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06422
| 0.275266
| 752
| 25
| 79
| 30.08
| 0.765138
| 0.05984
| 0
| 0.105263
| 1
| 0
| 0.296454
| 0.187234
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f249ee34b1745d4a243c396362c75f872d9af531
| 687
|
py
|
Python
|
Software_Carpentry/Conway/test_conway.py
|
dgasmith/SICM2-Software-Summer-School-2014
|
af97770cbade3bf4a246f21e607e8be66c9df7da
|
[
"MIT"
] | 2
|
2015-07-16T14:00:27.000Z
|
2016-01-10T20:21:48.000Z
|
Software_Carpentry/Conway/test_conway.py
|
dgasmith/SICM2-Software-Summer-School-2014
|
af97770cbade3bf4a246f21e607e8be66c9df7da
|
[
"MIT"
] | null | null | null |
Software_Carpentry/Conway/test_conway.py
|
dgasmith/SICM2-Software-Summer-School-2014
|
af97770cbade3bf4a246f21e607e8be66c9df7da
|
[
"MIT"
] | null | null | null |
from conway import *
def test_neighbors_at_origin():
result = [(1,1), (-1,-1), (0,1), (1,0), (-1,1), (1,-1), (-1,0), (0,-1)]
nb = neighbors((0,0))
assert( set(result) == set(nb) )
def test_neighbors_at_negative_quadrant():
result = [(0, -1), (-2, -1), (-1, 0), (-1, -2), (0, 0), (0, -2), (-2, 0), (-2, -2)]
nb = neighbors((-1,-1))
assert( set(result) == set(nb) )
def test_blinker():
blinker = [(-1,0), (0,0), (1,0)]
result = conway(blinker, generations=2)
assert( set(result) == set(blinker) )
def test_block():
block = [(0,0), (0,1), (1,0), (1,1)]
result = conway(block, generations=2)
assert( set(result) == set(block) )
| 28.625
| 87
| 0.513828
| 108
| 687
| 3.185185
| 0.185185
| 0.069767
| 0.043605
| 0.046512
| 0.389535
| 0.366279
| 0.156977
| 0
| 0
| 0
| 0
| 0.096475
| 0.215429
| 687
| 23
| 88
| 29.869565
| 0.541744
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 1
| 0.235294
| false
| 0
| 0.058824
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f24a5dc578f63a0c2e113a798ce9969cd7ed080c
| 5,426
|
py
|
Python
|
app_backend/__init__.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 1
|
2020-06-21T04:08:26.000Z
|
2020-06-21T04:08:26.000Z
|
app_backend/__init__.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 13
|
2019-10-18T17:19:32.000Z
|
2022-01-13T00:44:43.000Z
|
app_backend/__init__.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 5
|
2019-02-07T03:15:16.000Z
|
2021-09-04T14:06:28.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: __init__.py
@time: 2018-03-06 00:00
"""
from __future__ import unicode_literals
import eventlet
eventlet.monkey_patch()
from logging.config import dictConfig
from config import current_config
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from flask_login import LoginManager
from flask_moment import Moment
from flask_oauthlib.client import OAuth
from flask_mail import Mail
from flask_principal import Principal
import flask_excel as excel
# from flask_socketio import SocketIO
from flask_sqlalchemy import SQLAlchemy
from flask_babel import Babel, gettext as _
from app_common.libs.redis_session import RedisSessionInterface
from app_backend.clients.client_redis import redis_client
app = Flask(__name__)
app.config.from_object(current_config)
app.config['REMEMBER_COOKIE_NAME'] = app.config['REMEMBER_COOKIE_NAME_BACKEND']
app.session_cookie_name = app.config['SESSION_COOKIE_NAME_BACKEND']
app.session_interface = RedisSessionInterface(
redis=redis_client,
prefix=app.config['REDIS_SESSION_PREFIX_BACKEND'],
)
# CSRF Protection AJAX requests
csrf = CSRFProtect(app)
login_manager = LoginManager()
login_manager.init_app(app) # setup_app 方法已淘汰
login_manager.login_view = 'auth.index'
login_manager.login_message = _('Please log in to access this page.')
login_manager.login_message_category = 'warning' # 设置消息分类
login_manager.localize_callback = _ # 设置翻译回调
login_manager.session_protection = 'basic' # 设置安全等级(basic、strong、None)
# 用户电脑的标识(基本上是 IP 地址和 User Agent 的 MD5 hash 值)
# basic 模式下,如果该标识未匹配,会话会简单地被标记为非活 跃的,且任何需要活跃登入的东西会强制用户重新验证。
# strong模式下,如果该标识未匹配,整个会话(记住的令牌如果存在,则同样)被删除。
# Moment 时间插件
moment = Moment(app)
# 权限管理插件
principals = Principal(app, skip_static=True)
# 国际化 本地化
babel = Babel(app)
excel.init_excel(app)
# SocketIO
# socketio = SocketIO()
# socketio.init_app(app, async_mode='eventlet', message_queue=app.config['REDIS_URL'])
# 第三方开放授权登录
oauth = OAuth(app)
# 邮件
mail = Mail(app)
# GitHub
oauth_github = oauth.remote_app(
'github',
**app.config['GITHUB_OAUTH']
)
# QQ
oauth_qq = oauth.remote_app(
'qq',
**app.config['QQ_OAUTH']
)
# WeiBo
oauth_weibo = oauth.remote_app(
'weibo',
**app.config['WEIBO_OAUTH']
)
# Google
# 要银子,妹的
# 配置日志
dictConfig(app.config['LOG_CONFIG'])
# 这个 import 语句放在这里, 防止views, models import发生循环import
from app_backend import views
from app_backend.views.permissions import bp_permissions
from app_backend.views.captcha import bp_captcha
from app_backend.views.customer import bp_customer
from app_backend.views.customer_contact import bp_customer_contact
from app_backend.views.customer_invoice import bp_customer_invoice
from app_backend.views.supplier import bp_supplier
from app_backend.views.supplier_contact import bp_supplier_contact
from app_backend.views.supplier_invoice import bp_supplier_invoice
from app_backend.views.user import bp_user
from app_backend.views.user_auth import bp_auth
from app_backend.views.production import bp_production
from app_backend.views.production_sensitive import bp_production_sensitive
from app_backend.views.quotation import bp_quotation
from app_backend.views.quotation_items import bp_quotation_items
from app_backend.views.enquiry import bp_enquiry
from app_backend.views.enquiry_items import bp_enquiry_items
from app_backend.views.buyer_order import bp_buyer_order
from app_backend.views.purchase import bp_purchase
from app_backend.views.sales_order import bp_sales_order
from app_backend.views.delivery import bp_delivery
from app_backend.views.warehouse import bp_warehouse
from app_backend.views.rack import bp_rack
from app_backend.views.inventory import bp_inventory
from app_backend.views.futures import bp_futures
from app_backend.views.purchase import bp_purchase
from app_backend.views.delivery import bp_delivery
from app_backend.views.system import bp_system
# from app_backend.views.socket_io import bp_socket_io
from app_backend.views.price import bp_price
from app_backend.views.bank import bp_bank
from app_backend.views.cash import bp_cash
from app_backend.views.bank_account import bp_bank_account
# 注册蓝图
app.register_blueprint(bp_permissions)
app.register_blueprint(bp_captcha)
app.register_blueprint(bp_customer)
app.register_blueprint(bp_customer_contact)
app.register_blueprint(bp_customer_invoice)
app.register_blueprint(bp_supplier)
app.register_blueprint(bp_supplier_contact)
app.register_blueprint(bp_supplier_invoice)
app.register_blueprint(bp_user)
app.register_blueprint(bp_auth)
app.register_blueprint(bp_production)
app.register_blueprint(bp_production_sensitive)
app.register_blueprint(bp_quotation)
app.register_blueprint(bp_quotation_items)
app.register_blueprint(bp_enquiry)
app.register_blueprint(bp_enquiry_items)
app.register_blueprint(bp_buyer_order)
app.register_blueprint(bp_purchase)
app.register_blueprint(bp_sales_order)
app.register_blueprint(bp_delivery)
app.register_blueprint(bp_warehouse)
app.register_blueprint(bp_rack)
app.register_blueprint(bp_inventory)
app.register_blueprint(bp_futures)
app.register_blueprint(bp_purchase)
app.register_blueprint(bp_delivery)
app.register_blueprint(bp_system)
# app.register_blueprint(bp_socket_io)
app.register_blueprint(bp_price)
app.register_blueprint(bp_bank)
app.register_blueprint(bp_cash)
app.register_blueprint(bp_bank_account)
# 导入自定义过滤器
from app_backend import filters
| 30.483146
| 86
| 0.838555
| 792
| 5,426
| 5.435606
| 0.215909
| 0.058537
| 0.113821
| 0.141231
| 0.35029
| 0.0964
| 0.0964
| 0.0964
| 0.0964
| 0.053194
| 0
| 0.002829
| 0.08791
| 5,426
| 177
| 87
| 30.655367
| 0.867044
| 0.134353
| 0
| 0.068966
| 0
| 0
| 0.045708
| 0.017811
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.422414
| 0
| 0.422414
| 0.267241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
f24b88cb32a898b91b261cd705b2ad3fcd5d1287
| 2,950
|
py
|
Python
|
extension/visualizer/generate_visualizer_header.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 2,816
|
2018-06-26T18:52:52.000Z
|
2021-04-06T10:39:15.000Z
|
extension/visualizer/generate_visualizer_header.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 1,310
|
2021-04-06T16:04:52.000Z
|
2022-03-31T13:52:53.000Z
|
extension/visualizer/generate_visualizer_header.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 270
|
2021-04-09T06:18:28.000Z
|
2022-03-31T11:55:37.000Z
|
# this script generates visualizer header
import os
visualizer_dir = 'extension/visualizer'
visualizer_css = os.path.join(visualizer_dir, 'visualizer.css')
visualizer_d3 = os.path.join(visualizer_dir, 'd3.js')
visualizer_script = os.path.join(visualizer_dir, 'script.js')
visualizer_header = os.path.join(visualizer_dir, 'include', 'visualizer_constants.hpp')
def open_utf8(fpath, flags):
import sys
if sys.version_info[0] < 3:
return open(fpath, flags)
else:
return open(fpath, flags, encoding="utf8")
def get_byte_array(fpath, add_null_terminator = True):
with open(fpath, 'rb') as f:
text = bytearray(f.read())
result_text = ""
first = True
for byte in text:
if first:
result_text += str(byte)
else:
result_text += ", " + str(byte)
first = False
if add_null_terminator:
result_text += ", 0"
return result_text
def write_file(fname, varname):
result = "const uint8_t %s[] = {" % (varname,) + get_byte_array(fname) + "};\n"
return result
def create_visualizer_header():
result = """/* THIS FILE WAS AUTOMATICALLY GENERATED BY generate_visualizer_header.py */
/*
Copyright 2010-2020 Mike Bostock
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the author nor the names of contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
"""
result += write_file(visualizer_css, "css")
result += write_file(visualizer_d3, "d3")
result += write_file(visualizer_script, "script")
with open_utf8(visualizer_header, 'w+') as f:
f.write(result)
create_visualizer_header()
| 36.419753
| 92
| 0.737627
| 410
| 2,950
| 5.207317
| 0.453659
| 0.044965
| 0.018735
| 0.037471
| 0.129274
| 0.0637
| 0.0637
| 0.0637
| 0.0637
| 0.0637
| 0
| 0.007913
| 0.186102
| 2,950
| 81
| 93
| 36.419753
| 0.881299
| 0.01322
| 0
| 0.031746
| 1
| 0
| 0.586598
| 0.018213
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063492
| false
| 0
| 0.031746
| 0
| 0.15873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f251f3a1ac391e245be08c921c85c8b349b00732
| 1,924
|
py
|
Python
|
fineDinner.py
|
SMartQi/whose-treat
|
85f1d27dfb2b728a33cf8b6fcd73213ca24edb0b
|
[
"MIT"
] | 1
|
2020-01-30T11:09:31.000Z
|
2020-01-30T11:09:31.000Z
|
fineDinner.py
|
SMartQi/whose-treat
|
85f1d27dfb2b728a33cf8b6fcd73213ca24edb0b
|
[
"MIT"
] | null | null | null |
fineDinner.py
|
SMartQi/whose-treat
|
85f1d27dfb2b728a33cf8b6fcd73213ca24edb0b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-*- encoding:UTF-8 -*-
"""
Background:
JJ and MM want to have a fine dinner, celebrating their annual bonuses. They make this rule:
This dinner is on the person who gets more annual bonus. And the cost of the dinner is the diff of money they make mod 300, per capita.
Requirement:
Decide the money amount and the money provider, without letting one know how much the other's annual bonus is.
Method:
Hide the input.
Use the method "Best two out of three" in case of any typo, since the input strings are hidden.
"""
import getpass
def cal():
"""
Decide the money amount and the money provider.
"""
incomejj = validInput("JJ: ")
incomemm = validInput("MM: ")
diff = incomejj - incomemm
onWhom = "JJ"
if diff < 0:
onWhom = "MM"
result = int(round(abs(diff) % 300))
return result, onWhom
def validInput(prompt):
"""
Get a valid input and convert it to a float number.
"""
while 1:
inputStr = getpass.getpass(prompt)
try:
inputFloat = float(inputStr)
return inputFloat
except ValueError:
print("Invalid input. Try again.")
pass
if __name__ == "__main__":
"""
Use the method "Best two out of three" in case of any typo, since the input strings are hidden.
"""
(result1, onWhom1) = cal()
print("Let's double check.")
(result2, onWhom2) = cal()
if result1 == result2 and onWhom1 == onWhom2:
if result1 == 0:
print("No dinner at all. But go to buy some lottery~")
else :
print("OK. Let's have dinner. " + str(result1) + " yuan per person on " + onWhom1 + ".")
else :
print("Something's wrong. Let's triple check.")
(result3, onWhom3) = cal()
if (result1 == result3 and onWhom1 == onWhom3) or (result2 == result3 and onWhom2 == onWhom3):
if result3 == 0:
print("No dinner at all. But go to buy some lottery~")
else :
print("OK. " + str(result3) + " it is. It's on " + onWhom3 + ".")
else:
print("Are you kidding me? I quit!")
| 29.6
| 135
| 0.670478
| 292
| 1,924
| 4.390411
| 0.458904
| 0.024961
| 0.021841
| 0.031201
| 0.25429
| 0.25429
| 0.25429
| 0.25429
| 0.193448
| 0.193448
| 0
| 0.022981
| 0.20842
| 1,924
| 64
| 136
| 30.0625
| 0.818779
| 0.327963
| 0
| 0.157895
| 0
| 0
| 0.244617
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.078947
| 0.026316
| 0
| 0.131579
| 0.210526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
f263dc6e6df0ca9888bd8e9bcfdb5d8ed564b445
| 507
|
py
|
Python
|
yaga_ga/evolutionary_algorithm/operators/base.py
|
alessandrolenzi/yaga
|
872503ad04a2831135143750bc309188e5685284
|
[
"MIT"
] | null | null | null |
yaga_ga/evolutionary_algorithm/operators/base.py
|
alessandrolenzi/yaga
|
872503ad04a2831135143750bc309188e5685284
|
[
"MIT"
] | null | null | null |
yaga_ga/evolutionary_algorithm/operators/base.py
|
alessandrolenzi/yaga
|
872503ad04a2831135143750bc309188e5685284
|
[
"MIT"
] | null | null | null |
from typing import Generic, TypeVar
from typing_extensions import Final
from yaga_ga.evolutionary_algorithm.individuals import IndividualStructure
class InvalidOperatorError(ValueError):
pass
IndividualType = TypeVar("IndividualType")
GeneType = TypeVar("GeneType")
class GeneticOperator(Generic[IndividualType, GeneType]):
def __init__(
self, individual_structure: IndividualStructure[IndividualType, GeneType]
):
self.individual_structure: Final = individual_structure
| 24.142857
| 81
| 0.792899
| 47
| 507
| 8.340426
| 0.531915
| 0.168367
| 0.117347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142012
| 507
| 20
| 82
| 25.35
| 0.901149
| 0
| 0
| 0
| 0
| 0
| 0.043393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.083333
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
f26a5c6b870f2f9eb67aa2735878c21021be7143
| 324
|
py
|
Python
|
leetcode/easy/plus-one.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 8
|
2019-05-14T12:50:29.000Z
|
2022-03-01T09:08:27.000Z
|
leetcode/easy/plus-one.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 46
|
2019-03-24T20:59:29.000Z
|
2019-04-09T16:28:43.000Z
|
leetcode/easy/plus-one.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 1
|
2022-01-28T12:46:29.000Z
|
2022-01-28T12:46:29.000Z
|
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
carry = 1
result = []
for digit in digits[::-1]:
digit += carry
result.append(digit % 10)
carry = digit // 10
if carry:
result.append(carry)
return result[::-1]
| 20.25
| 54
| 0.475309
| 35
| 324
| 4.4
| 0.514286
| 0.090909
| 0.220779
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036082
| 0.401235
| 324
| 15
| 55
| 21.6
| 0.757732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f26cee0b9842c7bd2fa3f00e76d7e1a08850c951
| 450
|
py
|
Python
|
coloredterm/__init__.py
|
hostedposted/coloredterm
|
72d07a0bd12eb797e4b2772dfe45aca5234d27b6
|
[
"MIT"
] | 1
|
2021-02-12T01:21:44.000Z
|
2021-02-12T01:21:44.000Z
|
coloredterm/__init__.py
|
hostedposted/coloredterm
|
72d07a0bd12eb797e4b2772dfe45aca5234d27b6
|
[
"MIT"
] | 4
|
2021-07-07T04:09:58.000Z
|
2022-02-03T04:05:30.000Z
|
coloredterm/__init__.py
|
hostedposted/coloredterm
|
72d07a0bd12eb797e4b2772dfe45aca5234d27b6
|
[
"MIT"
] | 1
|
2021-02-20T22:58:31.000Z
|
2021-02-20T22:58:31.000Z
|
"""Collection of tools for changing the text of your terminal."""
from coloredterm.coloredterm import (
Back,
bg,
colored,
colors,
cprint,
fg,
Fore,
names,
pattern_input,
pattern_print,
rand,
Style
)
__version__ = "0.1.9"
__all__ = [
'Back',
'bg',
'colored',
'colors',
'cprint',
'fg',
'Fore',
'names',
'pattern_input',
'pattern_print',
'rand',
'Style'
]
| 14.516129
| 65
| 0.542222
| 47
| 450
| 4.93617
| 0.638298
| 0.051724
| 0.112069
| 0.163793
| 0.594828
| 0.594828
| 0.594828
| 0.594828
| 0.594828
| 0.594828
| 0
| 0.00974
| 0.315556
| 450
| 31
| 66
| 14.516129
| 0.743506
| 0.131111
| 0
| 0
| 0
| 0
| 0.196891
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.034483
| 0.137931
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f26e13939dbd7efae31817537aae9cd55a260550
| 1,706
|
py
|
Python
|
src/export_as_csv.py
|
mustilica/tt-history
|
1bb60cb81e97ef1abecf657cfa078798bb29cace
|
[
"MIT"
] | 26
|
2015-02-12T20:33:01.000Z
|
2018-04-25T05:29:52.000Z
|
src/export_as_csv.py
|
mustilica/tt-history
|
1bb60cb81e97ef1abecf657cfa078798bb29cace
|
[
"MIT"
] | 3
|
2019-11-27T18:19:23.000Z
|
2020-11-26T08:53:13.000Z
|
src/export_as_csv.py
|
mustilica/tt-history
|
1bb60cb81e97ef1abecf657cfa078798bb29cace
|
[
"MIT"
] | 8
|
2015-01-11T00:12:40.000Z
|
2018-04-01T22:34:45.000Z
|
# Run from GAE remote API:
# {GAE Path}\remote_api_shell.py -s {YourAPPName}.appspot.com
# import export_as_csv
import csv
from google.appengine.ext import db
from google.appengine.ext.db import GqlQuery
def exportToCsv(query, csvFileName, delimiter):
with open(csvFileName, 'wb') as csvFile:
csvWriter = csv.writer(csvFile, delimiter=delimiter,
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writeHeader(csvWriter)
rowsPerQuery = 1000
totalRowsSaved = 0
cursor = None
areMoreRows = True
while areMoreRows:
if cursor is not None:
query.with_cursor(cursor)
items = query.fetch(rowsPerQuery)
cursor = query.cursor()
currentRows = 0
for item in items:
saveItem(csvWriter, item)
currentRows += 1
totalRowsSaved += currentRows
areMoreRows = currentRows >= rowsPerQuery
print 'Saved ' + str(totalRowsSaved) + ' rows'
print 'Finished saving all rows.'
def writeHeader(csvWriter):
# Output csv header
csvWriter.writerow(['hashtag', 'region', 'timestamp',
'duration (in minutes)'])
def saveItem(csvWriter, item):
# Save items in preferred format
csvWriter.writerow([item.name, item.woeid, item.timestamp, item.time])
class Trend(db.Model):
name = db.StringProperty()
woeid = db.IntegerProperty()
timestamp = db.IntegerProperty()
time = db.IntegerProperty()
# Query for items
query = GqlQuery("SELECT * FROM Trend WHERE name = '#JeSuisCharlie'")
exportToCsv(query, '/home/mustilica/remote.csv', ',')
| 28.433333
| 74
| 0.622509
| 176
| 1,706
| 6
| 0.5
| 0.048295
| 0.035985
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005691
| 0.279015
| 1,706
| 59
| 75
| 28.915254
| 0.852846
| 0.100821
| 0
| 0
| 0
| 0
| 0.103471
| 0.017027
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.081081
| null | null | 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f27341117d08bd618bf3ac5014feb6d7ff7d069e
| 801
|
py
|
Python
|
kafka_client_decorators/util/logging_helper.py
|
cdsedson/kafka-decorator
|
f2c958df88c5698148aae4c5314dd39e31e995c3
|
[
"MIT"
] | null | null | null |
kafka_client_decorators/util/logging_helper.py
|
cdsedson/kafka-decorator
|
f2c958df88c5698148aae4c5314dd39e31e995c3
|
[
"MIT"
] | null | null | null |
kafka_client_decorators/util/logging_helper.py
|
cdsedson/kafka-decorator
|
f2c958df88c5698148aae4c5314dd39e31e995c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Define function used on logging."""
import logging
__KAFKA_DECORATOR_DEBUG__ = None
def set_debug_level(level):
"""Set the level of log.
Set logging level for all loggers create by get_logger function
Parameters
----------
level: log level define in logging module
"""
global __KAFKA_DECORATOR_DEBUG__
__KAFKA_DECORATOR_DEBUG__ = level
def get_logger(name):
"""Create and return a logger.
Parameters
----------
name: str
Logger name
Returns
-------
logging.Logger
A standard python logger
"""
logger = logging.getLogger(name)
if __KAFKA_DECORATOR_DEBUG__ is not None:
logger.setLevel(__KAFKA_DECORATOR_DEBUG__)
return logger
| 19.536585
| 67
| 0.636704
| 93
| 801
| 5.11828
| 0.494624
| 0.147059
| 0.19958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003384
| 0.262172
| 801
| 40
| 68
| 20.025
| 0.80203
| 0.475655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f27e08d8b8e21a50f9f19aef584ea000ba47242e
| 6,070
|
py
|
Python
|
app/loader.py
|
DFilyushin/librusec
|
fd6d7a99037aac4c1112f648397830284f4165f9
|
[
"Apache-2.0"
] | 2
|
2017-12-14T11:50:16.000Z
|
2021-12-27T13:42:16.000Z
|
app/loader.py
|
DFilyushin/librusec
|
fd6d7a99037aac4c1112f648397830284f4165f9
|
[
"Apache-2.0"
] | null | null | null |
app/loader.py
|
DFilyushin/librusec
|
fd6d7a99037aac4c1112f648397830284f4165f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import datetime
import time
import MySQLdb as mdb
LIB_INDEXES = 'D:\\TEMP\\librusec'
MYSQL_HOST = '127.0.0.1'
MYSQL_BASE = 'books100'
MYSQL_LOGIN = 'root'
MYSQL_PASSW = 'qwerty'
SQL_CHECK_BASE = "SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = '%s'"
SQL_CREATE_BASE = "CREATE DATABASE `%s` DEFAULT CHARACTER SET utf8 COLLATE utf8_unicode_ci;"
SQL_USE_BASE = 'USE `%s`;'
class BookDatabase(object):
"""
Database class for store books
"""
SQL_NEW_BOOK = u"INSERT INTO books VALUES ({0}, '{1}', {2}, '{3}', '{4}', '{5}')"
SQL_CHECK_AUTHOR = u"select id from authors where last_name='{0}' and first_name = '{1}' and middle_name='{2}'"
SQL_NEW_AUTHOR = u"INSERT INTO authors (last_name, first_name, middle_name) VALUES ('{0}', '{1}', '{2}')"
SQL_NEW_LINK = u"INSERT INTO link_ab VALUES ({0}, {1})"
def __init__(self):
self._conn = mdb.connect(MYSQL_HOST, MYSQL_LOGIN, MYSQL_PASSW, MYSQL_BASE, charset='utf8')
self._cur = self._conn.cursor()
def get_last_row_id(self):
return self._cur.lastrowid
def exec_sql(self, sql):
return self._cur.execute(sql)
def get_row_count(self):
return self._cur.rowcount
def get_value(self, index):
data = self._cur.fetchone()
return data[index]
def store_author(self, last_name, first_name, middle_name):
"""
Store new author with check existing record
:param last_name: last name author
:param first_name: first name author
:param middle_name: middle name author
:return: Id new record
"""
sql = self.SQL_CHECK_AUTHOR.format(last_name, first_name, middle_name)
self.exec_sql(sql)
if self.get_row_count() == 0:
sql = self.SQL_NEW_AUTHOR.format(last_name, first_name, middle_name)
self.exec_sql(sql)
id_author = self.get_last_row_id()
else:
id_author = self.get_value(0)
return id_author
def store_book(self, id_book, name, book_size, book_type, lang, genre):
"""
Store new book
:param id_book: Id book
:param name: Name of book
:param book_size: Size book in bytes
:param book_type: Type book
:param lang: Language book
:param genre: Genres
:return: Id new record
"""
book_name = name.replace("'", '`')
sql = self.SQL_NEW_BOOK.format(id_book, book_name, book_size, book_type, lang, genre)
self.exec_sql(sql)
return id_book
def store_author_in_book(self, id_book, id_author):
"""
Store links for book+author
:param id_book: Id book
:param id_author: Id author
:return: nothing
"""
sql = self.SQL_NEW_LINK.format(id_book, id_author)
self.exec_sql(sql)
def create_schema(filename):
"""
Create database schema from sql-file
:param filename: Input schema sql-file for MySql
:return:
"""
start = time.time()
f = open(filename, 'r')
sql = " ".join(f.readlines())
print "Start executing: " + filename + " at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + "\n" + sql
conn = mdb.connect(MYSQL_HOST, MYSQL_LOGIN, MYSQL_PASSW)
cur = conn.cursor()
sql_check = SQL_CHECK_BASE % MYSQL_BASE
cur.execute(sql_check)
if cur.rowcount == 0:
cur.execute(SQL_CREATE_BASE % MYSQL_BASE)
cur.execute(SQL_USE_BASE % MYSQL_BASE)
cur.execute(sql)
else:
print "Database exist. Stop!"
end = time.time()
print "Time elapsed to run the query:"
print str((end - start)*1000) + ' ms'
def process_file(inp_file, book_db):
with open(inp_file) as f:
row_counter = 0
for line in f:
row_counter += 1
line = line.decode('utf-8').strip()
book_item = line.split(chr(4))
bid = book_item[7]
bname = book_item[2]
bsize = book_item[6]
btype = book_item[9]
blang = book_item[11]
bgenre = book_item[1]
try:
id_book = book_db.store_book(int(bid), bname, int(bsize), btype, blang, bgenre)
except IndexError:
print 'Index error in %s file (%d line)' % (inp_file, row_counter)
except Exception as e:
print 'Error message: %s (%s)' % (e.args, e.message)
author_line = line.split(chr(4))[0]
author_line = author_line.replace("'", '`')
authors = author_line.split(':')
for author in authors:
item = author.split(',')
if len(item) > 1:
try:
id_author = book_db.store_author(item[0], item[1], item[2])
except Exception as e:
print 'Error message author: %s (%s). Error in %s file (%d line)' % (e.args, e.message, inp_file, row_counter)
try:
book_db.store_author_in_book(id_book, id_author)
except Exception as e:
print 'Error message link: %s (%s). Error in %s file (%d line)' % (e.args, e.message, inp_file, row_counter)
def process_index_files(path_to_index):
"""
Processing all files in path LIB_INDEXES
:param path_to_index: path to LIB_ARCHIVE
:return:
"""
book_db = BookDatabase()
index = 0
indexes = filter(lambda x: x.endswith('.inp'), os.listdir(path_to_index))
cnt_files = len(indexes)
os.chdir(path_to_index)
for index_file in indexes:
index += 1
print 'Process file %s. File %d from %d' % (index_file, index, cnt_files)
start_time = time.time()
process_file(index_file, book_db)
elapsed = (time.time() - start_time)
print "Ok. Processing in {:10.4f} s.".format(elapsed)
def main():
create_schema('schema.sql')
process_index_files(LIB_INDEXES)
if __name__ == "__main__":
main()
| 33.351648
| 134
| 0.594728
| 830
| 6,070
| 4.116867
| 0.222892
| 0.019315
| 0.019023
| 0.019901
| 0.191396
| 0.191396
| 0.134914
| 0.104185
| 0.08487
| 0.059701
| 0
| 0.01293
| 0.286491
| 6,070
| 181
| 135
| 33.535912
| 0.776033
| 0.00346
| 0
| 0.103448
| 0
| 0.008621
| 0.162982
| 0.005283
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.025862
| 0.034483
| null | null | 0.086207
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f27f8a655e82f556df2399b3f99f4848f377c47b
| 375
|
py
|
Python
|
app/models/word.py
|
shiniao/soul-api
|
1438281c2dce237d735f7309c2ddb606c8d01e1e
|
[
"Apache-2.0"
] | 1
|
2021-02-27T09:05:40.000Z
|
2021-02-27T09:05:40.000Z
|
app/models/word.py
|
shiniao/soulapi
|
1438281c2dce237d735f7309c2ddb606c8d01e1e
|
[
"Apache-2.0"
] | null | null | null |
app/models/word.py
|
shiniao/soulapi
|
1438281c2dce237d735f7309c2ddb606c8d01e1e
|
[
"Apache-2.0"
] | null | null | null |
from sqlalchemy import Column, Integer, String
from app.database import Base
class Word(Base):
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
origin = Column(String, index=True, unique=True, nullable=False)
pronunciation = Column(String)
translation = Column(String)
created_at = Column(String)
updated_at = Column(String)
| 26.785714
| 74
| 0.733333
| 47
| 375
| 5.787234
| 0.553191
| 0.220588
| 0.102941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170667
| 375
| 13
| 75
| 28.846154
| 0.874598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f280236c60f310af1d18ad0b782faeb404b108be
| 912
|
py
|
Python
|
anomaly/Read_img.py
|
Jun-CEN/Open-World-Semantic-Segmentation
|
a95bac374e573055c23220e299789f34292988bc
|
[
"MIT"
] | 19
|
2021-08-09T15:34:10.000Z
|
2022-03-14T09:20:58.000Z
|
anomaly/Read_img.py
|
Jun-CEN/Open-World-Semantic-Segmentation
|
a95bac374e573055c23220e299789f34292988bc
|
[
"MIT"
] | 4
|
2021-11-08T07:10:35.000Z
|
2022-01-16T01:53:06.000Z
|
anomaly/Read_img.py
|
Jun-CEN/Open-World-Semantic-Segmentation
|
a95bac374e573055c23220e299789f34292988bc
|
[
"MIT"
] | 4
|
2021-10-06T09:28:16.000Z
|
2022-01-14T08:26:54.000Z
|
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import bdlb
import torch
import json
# path_img = './data/test_result/t5/'
# path_img = './results_18_ce_noshuffle/2_'
#
# image = Image.open(path_img + '100.png')
# plt.imshow(image)
# plt.show()
#
# overlay = Image.open(path_img + 'overlay.png')
# plt.imshow(overlay)
# plt.show()
#
# pred = Image.open(path_img + 'pred.png')
# plt.imshow(pred)
# plt.show()
#
# target = Image.open(path_img + 'target.png')
# plt.imshow(target)
# plt.show()
#
# scores = Image.open(path_img + 'scores.png')
# scores = np.array(scores) / 255
# plt.imshow(scores)
# plt.show()
#
# dis_sum = np.load(path_img + 'dis_sum.npy')
# plt.imshow(dis_sum)
# plt.show()
with open('logit_dict.json','r',encoding='utf8')as fp:
json_data = json.load(fp)
for i in range(13):
print(len(json_data[i]))
plt.figure()
plt.hist(json_data[i])
plt.show()
| 20.727273
| 54
| 0.667763
| 146
| 912
| 4.034247
| 0.376712
| 0.095076
| 0.110357
| 0.135823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016731
| 0.148026
| 912
| 43
| 55
| 21.209302
| 0.741313
| 0.604167
| 0
| 0
| 0
| 0
| 0.06006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.461538
| 0
| 0.461538
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
f2854a477097d46506783a017f1b2352a0421334
| 570
|
py
|
Python
|
school/migrations/0018_listemplois.py
|
Belaid-RWW/PFAEspaceParent
|
8fd0000d4ee1427599bcb7da5aa301050469e7a8
|
[
"MIT"
] | null | null | null |
school/migrations/0018_listemplois.py
|
Belaid-RWW/PFAEspaceParent
|
8fd0000d4ee1427599bcb7da5aa301050469e7a8
|
[
"MIT"
] | null | null | null |
school/migrations/0018_listemplois.py
|
Belaid-RWW/PFAEspaceParent
|
8fd0000d4ee1427599bcb7da5aa301050469e7a8
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-05-07 03:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0017_emplois_emp'),
]
operations = [
migrations.CreateModel(
name='ListEmplois',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Classe', models.CharField(max_length=100)),
('pdf', models.FileField(upload_to='')),
],
),
]
| 25.909091
| 114
| 0.568421
| 58
| 570
| 5.465517
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054726
| 0.294737
| 570
| 21
| 115
| 27.142857
| 0.733831
| 0.078947
| 0
| 0
| 1
| 0
| 0.087954
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f28637ac36ec4e4cf9bd05dd4661f26ee82946dd
| 900
|
py
|
Python
|
ejercicios_resueltos/t04/t04ejer03.py
|
workready/pythonbasic
|
59bd82caf99244f5e711124e1f6f4dec8de22141
|
[
"MIT"
] | null | null | null |
ejercicios_resueltos/t04/t04ejer03.py
|
workready/pythonbasic
|
59bd82caf99244f5e711124e1f6f4dec8de22141
|
[
"MIT"
] | null | null | null |
ejercicios_resueltos/t04/t04ejer03.py
|
workready/pythonbasic
|
59bd82caf99244f5e711124e1f6f4dec8de22141
|
[
"MIT"
] | null | null | null |
import os
def gcat(filenames):
for filename in filenames:
with open(filename) as f:
for line in f:
yield line
def ggrep(pattern, filenames):
for filename in filenames:
with open(filename) as f:
for line in f:
if pattern in line:
yield line
# Codigo de pruebas para gcat
print("Fichero linea a linea")
print("-----------------------------")
for line in gcat([os.path.join(os.path.dirname(os.path.realpath(__file__)), 'quijote.txt')]):
print(line)
print("-----------------------------")
print()
print()
# Codigo de pruebas para ggrep
print("Lineas del fichero que contienen la palabra 'los'")
print("-----------------------------")
for l in list(ggrep("los", [os.path.join(os.path.dirname(os.path.realpath(__file__)), 'quijote.txt')])):
print(l)
print("-----------------------------")
| 25
| 104
| 0.537778
| 111
| 900
| 4.288288
| 0.369369
| 0.07563
| 0.056723
| 0.092437
| 0.487395
| 0.487395
| 0.487395
| 0.487395
| 0.487395
| 0.487395
| 0
| 0
| 0.221111
| 900
| 35
| 105
| 25.714286
| 0.67903
| 0.062222
| 0
| 0.583333
| 0
| 0
| 0.25119
| 0.138095
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.041667
| 0
| 0.125
| 0.416667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
f2864bce946124a8b9383d4c53008de00cff4e49
| 2,460
|
py
|
Python
|
swot_item_vote/views.py
|
imranariffin/liveswot-api
|
a2acc05fd2c51adc30e8e1785b857a94af81677d
|
[
"MIT"
] | null | null | null |
swot_item_vote/views.py
|
imranariffin/liveswot-api
|
a2acc05fd2c51adc30e8e1785b857a94af81677d
|
[
"MIT"
] | 25
|
2018-03-25T05:25:22.000Z
|
2021-06-10T19:51:12.000Z
|
swot_item_vote/views.py
|
imranariffin/liveswot-api
|
a2acc05fd2c51adc30e8e1785b857a94af81677d
|
[
"MIT"
] | 2
|
2018-07-02T02:59:24.000Z
|
2018-08-21T02:58:21.000Z
|
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError
from rest_framework.decorators import api_view
from rest_framework import status
from swot_item_vote.models import Vote
from swot_item.models import SwotItem
from .serializers import serialize, get_item_confidence
from swot.models import Swot
from core.decorators import authenticate
from core.serializers import deserialize
@api_view(['GET'])
@authenticate
@deserialize
@serialize
def vote_list(request, swot_id):
try:
Swot.objects.get(id=swot_id)
except ObjectDoesNotExist:
return (
None,
status.HTTP_404_NOT_FOUND,
['Swot {} does not exist'.format(swot_id)]
)
votes = Vote.objects.filter(swot_id=swot_id)
return (
[vote for vote in votes],
status.HTTP_200_OK,
None,
)
@api_view(['POST'])
@authenticate
@deserialize
@serialize
def vote(request, swot_item_id):
swot_item_id = int(swot_item_id)
vote_type = request.body['voteType']
user_id = request.user.id
swot_item = None
try:
swot_item = SwotItem.objects.get(id=swot_item_id)
except ObjectDoesNotExist:
return (
None,
status.HTTP_404_NOT_FOUND,
['Swot Item does {} not exist'.format(swot_item_id)]
)
try:
existing_vote = Vote.objects.get(
swot_item_id=swot_item_id,
created_by_id=user_id
)
existing_vote_type = existing_vote.voteType
existing_vote.delete()
if existing_vote_type == vote_type:
return (
None,
status.HTTP_200_OK,
None
)
except ObjectDoesNotExist:
pass
vote = None
try:
vote = Vote(
created_by_id=user_id,
swot_item_id=swot_item_id,
swot_id=swot_item.swot_id,
voteType=vote_type
)
except IntegrityError, ie:
return (
None,
status.HTTP_400_BAD_REQUEST,
[ie]
)
try:
vote.save()
except:
return (
None,
status.HTTP_400_BAD_REQUEST,
['Error saving vote']
)
SwotItem.objects\
.filter(pk=swot_item_id)\
.update(score=get_item_confidence(swot_item_id))
return (
vote,
status.HTTP_201_CREATED,
None,
)
| 21.964286
| 64
| 0.605285
| 286
| 2,460
| 4.926573
| 0.241259
| 0.096522
| 0.07807
| 0.070972
| 0.316537
| 0.176011
| 0.133428
| 0.086586
| 0.086586
| 0.086586
| 0
| 0.01253
| 0.318699
| 2,460
| 111
| 65
| 22.162162
| 0.828162
| 0
| 0
| 0.395604
| 0
| 0
| 0.032927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.010989
| 0.10989
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2895989ed18fa1ea8643af23dca6836bad3cec9
| 30,553
|
py
|
Python
|
car2dc-kiran/Scripts/StartTraffic.py
|
kirannCS/MasterThesis
|
a12771dc40efe77ae7d6e1631ed66c4b9992afd8
|
[
"Unlicense"
] | null | null | null |
car2dc-kiran/Scripts/StartTraffic.py
|
kirannCS/MasterThesis
|
a12771dc40efe77ae7d6e1631ed66c4b9992afd8
|
[
"Unlicense"
] | null | null | null |
car2dc-kiran/Scripts/StartTraffic.py
|
kirannCS/MasterThesis
|
a12771dc40efe77ae7d6e1631ed66c4b9992afd8
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
#################################################################################
################# Helper Module #################################################
################# Provides abstraction to car sensors and PHY layer #############
#################################################################################
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
sys.path.append('../src/packets/header/')
# Import proto modules
import CARRequestToMT_pb2
import MTGPSResponse_pb2
import MTSpeedResponse_pb2
import MsgFromNodeToUDM_pb2
import MessageForwardFromUDM_pb2
import BigData_pb2
import DistributeProcesses_pb2
# Import other libraries
import optparse
import subprocess
import random
import time
import zmq
import thread
import json
import xml.etree.ElementTree as ET
import netifaces as ni
import math
import sys
import linecache
import datetime
import threading
import base64
from threading import Lock, Thread
import time
from xmlr import xmliter
# Uncomment when required debugging
# debugger proc
"""def traceit(frame, event, arg):
if event == "line":
lineno = frame.f_lineno
filename = frame.f_globals["__file__"]
if (filename.endswith(".pyc") or
filename.endswith(".pyo")):
filename = filename[:-1]
name = frame.f_globals["__name__"]
line = linecache.getline(filename, lineno)
print(name, lineno, line.rstrip())
return traceit"""
# Global Vaiables
ParserIP = ""
ParserPort = ""
UDMPort = ""
Keys = []
SumoFloatingDataPath = ""
VehInfoHashMap = {}
APInfoHashMap = {}
StartPort = 12000
Incrementor = 0
AllAddrTable = {}
CommRange = 0.0
LogInfoEnable = ""
LogInfoFile = ""
LogInfoStdOutput = ""
LogDebugEnable = ""
LogDebugFile = ""
LogDebugStdOutput = ""
LogStatsEnable = ""
LogStatsFILE = ""
LogStatsStdOutput = ""
LogErrorEnable = ""
LogErrorFILE = ""
LogErrorStdOutput = ""
LogFilePath = ""
ExperimentNumber = ""
RunInForeGround = ""
RunInForeGroundList = ""
LogFile = ""
UDMPublisher = "NULL"
UDMExitPublisher = "NULL"
SystemsIP = []
SystemsIPSubscript = 0
DistributedSystemsPublisher = ""
NOTFinished = True
lock = Lock()
# Converts string into list which has literals separated by commas and returns it
def ConvertStringToList(string):
li = list(string.split(" "))
return li
# Starts a car process in different terminal, takes vid(Vehicle ID) and starting port number helps car to spawn in-car processes
# incrementor determines the number of ports could be reserved for each car
def start_carProc(vid):
global StartPort, Incrementor, RunInForeGround, RunInForeGroundList, SystemsIPSubscript, SystemsIP, DistributedSystemsPublisher
Message = DistributeProcesses_pb2.PROCESS_DISTRIBUTION()
# Create message
if RunInForeGround == "TRUE" or vid in RunInForeGroundList:
Message.RUN_IN_FOREGROUND = True
else:
Message.RUN_IN_FOREGROUND = False
Message.ID = vid
Message.START_PORT_NO = str(StartPort + Incrementor)
Message.MODULE_TYPE = "CAR"
if SystemsIPSubscript % len(SystemsIP) == 0:
SystemsIPSubscript += 1
# Send message
DistributedSystemsPublisher.send_multipart([SystemsIP[SystemsIPSubscript % len(SystemsIP)], Message.SerializeToString()])
SystemsIPSubscript = SystemsIPSubscript % len(SystemsIP) + 1
Incrementor += 5
LogsDebug("A Car process " + vid + " is started")
LogsInfo("A Car process " + vid + " is started")
# Sends message to Command Receiver running on remote machine with ID=-1 indicating one experiment run is completed
def SendKillSigToTerminals():
Message = DistributeProcesses_pb2.PROCESS_DISTRIBUTION()
Message.ID = "-1"
for each in SystemsIP:
DistributedSystemsPublisher.send_multipart([each, Message.SerializeToString()])
# Server waits for the client(car process) requests - for position and speed
def MobilityServer():
global ParserIP, ParserPort, VehInfoHashMap, NOTFinished
context = zmq.Context()
socket = context.socket(zmq.REP)
LogsDebug("Sumo Data Parser Server binds at the address "+ParserIP+":" + ParserPort)
socket.bind("tcp://"+ParserIP+":"+ParserPort)
Request = CARRequestToMT_pb2.CARREQUESTTOMT()
GPSResponse = MTGPSResponse_pb2.MTGPSRESPONSE()
SpeedResponse = MTSpeedResponse_pb2.MTSPEEDRESPONSE()
while NOTFinished:
# Wait for next request from client
vehInfoReq = socket.recv()
Request.ParseFromString(vehInfoReq)
# parse the request
VehID = Request.VID
ReqType = Request.REQ
# Check what is the request type (for position or speed) accordingly build the json reply
if ReqType == "POS":
LogsDebug("A request for Position is arrived for Vehicle ID " + VehID)
if VehID in VehInfoHashMap:
LogsDebug("Vehicle ID " + VehID + " exists and a response with updated Position data will be sent")
GPSResponse.INFO_EXISTS = 1
GPSResponse.X = float(json.loads(VehInfoHashMap[VehID])["X"])
GPSResponse.Y = float(json.loads(VehInfoHashMap[VehID])["Y"])
GPSResponse.DIR = float(json.loads(VehInfoHashMap[VehID])["DIR"])
GPSResponse.LANE = json.loads(VehInfoHashMap[VehID])["LANE"]
else:
LogsDebug("Vehicle ID " + VehID + " do not exist in the network and response is sent")
GPSResponse.INFO_EXISTS = 0
DataToSend = GPSResponse.SerializeToString()
elif ReqType == "SPE":
LogsDebug("A request for Speed is arrived for Vehicle ID " + VehID)
if VehID in VehInfoHashMap:
LogsDebug("Vehicle ID " + VehID + " exists and a response with updated Speed data will be sent")
SpeedResponse.INFO_EXISTS = 1
SpeedResponse.SPEED = float(json.loads(VehInfoHashMap[VehID])["SPE"])
else:
LogsDebug("Vehicle ID " + VehID + " do not exist in the network")
SpeedResponse.INFO_EXISTS = 0
DataToSend = SpeedResponse.SerializeToString()
socket.send(DataToSend)
# Updates the speed and position information in the hashmap
def update_hashmap(vid, x, y, speed, angle, lane):
global VehInfoHashMap
jsonVehData = {
"X" : x,
"Y" : y,
"SPE" : speed,
"DIR" : angle,
"LANE" : lane
}
VehInfoHashMap[vid] = json.dumps(jsonVehData)
Message = MsgFromNodeToUDM_pb2.MSGFROMNODETOUDM()
# udm module: server awaits the message forward requests from its clients (car or ap)
def udm_server():
global ParserIP, UDMPort, Message
context = zmq.Context()
socket = context.socket(zmq.PULL)
LogsDebug("UDM Server binds at the address "+ParserIP+":" + UDMPort)
LogsInfo("UDM Server binds at the address "+ParserIP+":" + UDMPort)
socket.bind("tcp://"+ParserIP+":"+UDMPort)
while True:
# Wait for next request from client
msgForwardReq = socket.recv()
Message.ParseFromString(msgForwardReq)
# mtype - message type (INIT or UNICAST or BROADCAST)
# Only cars send INIT message - once, when the process get started
try:
mtype = Message.MTYPE
except ValueError:
mtype = ""
LogsError("This occur very very rarely and yet to investigate on why this error!! ...")
if mtype == "INIT":
LogsInfo("INIT message recieved from " + Message.SRC_ID)
LogsDebug("INIT message recieved from " + Message.SRC_ID)
store_ip_port(msgForwardReq)
elif mtype == "UNI" or mtype == "BROAD" or mtype == "MUL":
forward_msg(msgForwardReq)
# Called when udm recieves INIT messages
# Stores IP and Port details of the Cars
def store_ip_port(msgForwardReq):
global AllAddrTable, Message
Message.ParseFromString(msgForwardReq)
_id = Message.SRC_ID
newip = Message.IP
newport = Message.PORT
_type = Message.SRC_TYPE
LogsDebug("From the INIT Message Vehicle " + _id + " IP PORT and TYPE are extracted " + newip + " " + newport + " " + _type
+ " and are stored")
new_data = {
'IP' : newip,
'PORT' : newport,
'TYPE' : _type
}
AllAddrTable[_id] = json.dumps(new_data)
# Based on the MTYPE(message type) calls different message forwarding modules
def forward_msg(msgForwardReq):
global VehInfoHashMap, APInfoHashMap, Message
Message.ParseFromString(msgForwardReq)
mtype = Message.MTYPE
src_id = Message.SRC_ID
# Checks if the source id is either present in updated vehicle hash map or AP hash map
# If it exists only then forward the messages (else the vehicles not in the network anymore)
if VehInfoHashMap and APInfoHashMap:
if src_id in VehInfoHashMap or src_id in APInfoHashMap:
if mtype == "UNI":
send_unicast_msg(msgForwardReq)
elif mtype == "BROAD":
send_broadcast_msg(msgForwardReq)
elif mtype == "MUL":
send_multicast_msg(msgForwardReq)
# Forwards unicast messages
def send_unicast_msg(msgForwardReq):
global APInfoHashMap, VehInfoHashMap, AllAddrTable, Message
Message.ParseFromString(msgForwardReq)
src_id = Message.SRC_ID
dest_id = Message.DEST_ID
LogsDebug("UNICAST message is recieved from " + src_id + " to forawrd it to " + dest_id)
LogsInfo("UNICAST message is recieved from " + src_id + " to forawrd it to " + dest_id)
if src_id in VehInfoHashMap:
src_details = json.loads(VehInfoHashMap[src_id])
elif src_id in APInfoHashMap:
src_details = json.loads(APInfoHashMap[src_id])
src_X = src_details['X']
src_Y = src_details['Y']
SrcExistsInAddrTable = True
try:
srcType_json = json.loads(AllAddrTable[src_id])
except KeyError:
SrcExistsInAddrTable = False
# Checks if destination node is still in the network
if (dest_id in VehInfoHashMap or dest_id in APInfoHashMap) and SrcExistsInAddrTable:
# Checks if destination address is known to the UDM
if dest_id in AllAddrTable:
# If the source type is BS donot check for communication range
if srcType_json['TYPE'] == "BS":
PublishMsg(msgForwardReq, dest_id)
LogsDebug("Received UNICAST message is forwarded to " + dest_id)
else:
Dest_json = json.loads(AllAddrTable[dest_id])
# If dest type is car or RSU check if source and destination are in the communication range
# else for BS skips the check and forward the message
if Dest_json['TYPE'] == "CAR":
car_json = json.loads(VehInfoHashMap[dest_id])
LogsDebug("range check between " + src_id + " -----------> " + dest_id)
LogsDebug(src_id+"("+src_X+","+src_Y+") "+dest_id+"("+ car_json["X"]+ ","+ car_json["Y"]+ ")")
if within_range(src_X, src_Y, car_json["X"], car_json["Y"]):
PublishMsg(msgForwardReq, dest_id)
LogsDebug("Received UNICAST message is forwarded to " + dest_id)
elif Dest_json['TYPE'] == "RSU":
RSU_json = json.loads(APInfoHashMap[dest_id])
LogsDebug("range check between " + src_id + " -----------> " + dest_id)
LogsDebug(src_id+"("+src_X+","+src_Y+") "+dest_id+"("+ RSU_json["X"]+ ","+ RSU_json["Y"]+ ")")
if within_range(src_X, src_Y, RSU_json["X"], RSU_json["Y"]):
PublishMsg(msgForwardReq, dest_id)
LogsDebug("Received UNICAST message is forwarded to " + dest_id)
elif Dest_json['TYPE'] == "BS":
PublishMsg(msgForwardReq, dest_id)
LogsDebug("Received UNICAST message is forwarded to " + dest_id)
else:
LogsError("Unicast message to VID " + dest_id + "failed, since vehicle doesn't exist or Address is still Unknown")
def PublishMsg(msgForwardReq, dest_id):
global UDMPublisher, Message
Message.ParseFromString(msgForwardReq)
src_id = Message.SRC_ID
dest_json = json.loads(AllAddrTable[dest_id])
src_json = json.loads(AllAddrTable[src_id])
DataToForward = MessageForwardFromUDM_pb2.MESSAGEFROMUDM()
DataToForward.SRC_ID = src_id
# Copy contents of the incoming message to new message that is forwarded to destination node
DataToForward.DATA.DATALINE1 = Message.DATA.DATALINE1
DataToForward.DATA.DATALINE2 = Message.DATA.DATALINE2
DataToForward.DATA.ID = Message.DATA.ID
DataToForward.DATA.X = Message.DATA.X
DataToForward.DATA.DIR = Message.DATA.DIR
DataToForward.DATA.SPEED = Message.DATA.SPEED
DataToForward.DATA.DATATYPE = Message.DATA.DATATYPE
DataToForward.DATA.TIMESTAMP = Message.DATA.TIMESTAMP
DataToForward.DATA.Y = Message.DATA.Y
DataToForward.DATA.CH = Message.DATA.CH
DataToForward.DATA.CMLIST = Message.DATA.CMLIST
DataToForward.DATA.CLUSTERID = Message.DATA.CLUSTERID
DataToForward.DATA.CLUSTER_SEQ_NUM = Message.DATA.CLUSTER_SEQ_NUM;
DataToForward.DATA.FILENAME = Message.DATA.FILENAME
DataToForward.DATA.DATA = Message.DATA.DATA
DataToForward.DATA.CHUNKNUM = Message.DATA.CHUNKNUM
DataToForward.DATA.LASTPKT = Message.DATA.LASTPKT
DataToForward.DATA.START_TIME = Message.DATA.START_TIME
DataToForward.DATA.TASKSEQNUM = Message.DATA.TASKSEQNUM
DataToForward.DATA.TASKMAXTIME = Message.DATA.TASKMAXTIME
DataToForward.DATA.FINISHTIME = Message.DATA.FINISHTIME
DataToForward.DATA.RESULT = Message.DATA.RESULT
# Adding additional fields
DataToForward.SRC_TYPE = src_json['TYPE']
DataToForward.DEST_TYPE = dest_json['TYPE']
DataToForward.EXIT = False
DataToSend = DataToForward.SerializeToString()
if "DCA" in Message.SUB_DEST_ID:
DestID = dest_id + "DCAID"
elif "TASK" in Message.SUB_DEST_ID:
DestID = dest_id + "TASKDID"
elif "DC" in Message.SUB_DEST_ID:
DestID = dest_id + "DCID"
else:
DestID = dest_id + "ID"
UDMPublisher.send_multipart([str(DestID), DataToSend])
# Forwards broadcast messages
def send_broadcast_msg(msgForwardReq):
global APInfoHashMap, VehInfoHashMap, AllAddrTable, Message
Message.ParseFromString(msgForwardReq)
src_id = Message.SRC_ID
LogsDebug("Broadcast message is recieved from " + src_id)
LogsInfo("Broadcast message is recieved from "+ src_id)
if src_id in VehInfoHashMap:
src_details = json.loads(VehInfoHashMap[src_id])
elif src_id in APInfoHashMap:
src_details = json.loads(APInfoHashMap[src_id])
src_X = src_details['X']
src_Y = src_details['Y']
SrcExistsInAddrTable = True
try:
srcType_json = json.loads(AllAddrTable[src_id])
except KeyError:
SrcExistsInAddrTable = False
# Check all the cars in the address table if they are within the communication range
CopyOfAllAddrTable = dict(AllAddrTable)
for _id in CopyOfAllAddrTable:
# Check if _id is exists in the network and
# and the _id should not be the source id (to avoid broadcast back to the source)
if (_id in VehInfoHashMap or _id in APInfoHashMap) and _id != src_id and SrcExistsInAddrTable:
# If the source type is BS donot check for communication range
if srcType_json['TYPE'] == "BS":
PublishMsg(msgForwardReq, _id)
LogsDebug("Received BROADCAST message is forwarded to" + _id)
# If dest type is car or RSU check if source and destination are in the communication range
# else if it is a BS skips the check and forward the message
else:
broad_json = json.loads(CopyOfAllAddrTable[_id])
if broad_json['TYPE'] == 'CAR':
car_json = json.loads(VehInfoHashMap[_id])
LogsDebug("range check between "+src_id+" -----------> "+_id)
if within_range(src_X, src_Y, car_json["X"], car_json["Y"]):
PublishMsg(msgForwardReq, _id)
LogsDebug("Received BROADCAST message is forwarded to" + _id)
elif broad_json['TYPE'] == "RSU":
RSU_json = json.loads(APInfoHashMap[_id])
LogsDebug("range check between "+src_id+" -----------> "+_id)
if within_range(src_X, src_Y, RSU_json["X"], RSU_json["Y"]):
PublishMsg(msgForwardReq, _id)
LogsDebug("Received BROADCAST message is forwarded to" + _id)
elif broad_json['TYPE'] == "BS":
PublishMsg(msgForwardReq, _id)
LogsDebug("Received BROADCAST message is forwarded to" + _id)
# Detects if both are in the communication range(Do not consider obstacles)
def within_range(x1, y1, x2, y2):
global CommRange
distance = math.sqrt( (float(x1) - float(x2)) * (float(x1) - float(x2)) + (float(y1) - float(y2)) * (float(y1) - float(y2)) )
LogsDebug("Comparison " + str(distance) +" < " + CommRange + " ??")
if float(distance) < float(CommRange):
return True
else:
return False
VehiclesExitList = []
import random
# When vehicle exists network this method sends exit message to the vehicle
def DeleteFromNetwork(each):
# Sleeps for random time (because at last timestep all cars exits at once) to avoid race to acquire lock
time.sleep(float(random.randint(1,100))/100.0)
DataToForward = MessageForwardFromUDM_pb2.MESSAGEFROMUDM()
# Indicates vehicles to exit
DataToForward.EXIT = True
DataToSend = DataToForward.SerializeToString()
print(each)
lock.acquire()
# Send all modules of cars an exit message
UDMExitPublisher.send_multipart([str(each) + "DCAEXITID", DataToSend])
UDMExitPublisher.send_multipart([str(each) + "TASKDEXITID", DataToSend])
UDMExitPublisher.send_multipart([str(each) + "EXITID", DataToSend])
UDMExitPublisher.send_multipart([str(each) + "EXITGPSID", DataToSend])
UDMExitPublisher.send_multipart([str(each) + "EXITSPEEDID", DataToSend])
lock.release()
# Allows cars to terminate before Helper removes car from the existing list
time.sleep(12.0)
del VehInfoHashMap[each]
# Deletes the car data when the car no longer exists in the network
def cleanup_VehInfoHashMap(VIDList):
global VehInfoHashMap, Keys, VehiclesExitList, UDMExitPublisher
Keys = VehInfoHashMap.keys()
for each in Keys:
if each not in VIDList and each not in VehiclesExitList:
VehiclesExitList.append(each)
# Creates a thread for each exited car to send a exit message to vehicle
thread.start_new_thread(DeleteFromNetwork,(each,))
# Subscriber always misses the first message. FirstMsg is used to send first message twice
FirstMsg = True
i = 0
_list = []
_dict = {}
# First proc called in the main()
def run():
start = 0.0
end = 0.0
FirstTime = 1
WaitPeriod = 0.0
WaitDue = 0.0
# Creates a UDM server thread
thread.start_new_thread(udm_server, ())
# Creates server that responds to speed and position requests from the vehicles
thread.start_new_thread(MobilityServer, ())
global SumoFloatingDataPath, VehInfoHashMap, APInfoHashMap, NOTFinished
car_proc_list = []
# Read AP info from XML file and spawns AP process in appropriate machine
update_APInfoHashMap()
# iterates through the floating car data timestamps(interval of 0.1 seconds)
#for iteration in root.iter('timestep'):
for d in xmliter(SumoFloatingDataPath, 'timestep'):
if float(d['@time']) % 20.0 == 0:
print("Current timestep = " , d['@time'])
# VIDList at every timestamp iteration is set to empty. Global parameter stores existing vehicles details
VIDList = []
# If at certain timestep there is no vehicle exist, it raises exception
try:
type(d['vehicle'])
except:
time.sleep(0.1)
continue
if type(d['vehicle']) == type(_dict):
vid = d['vehicle']['@id']
if vid not in VIDList:
# If its a new vehicle add it to the list
VIDList.append(vid)
if vid not in car_proc_list:
# If its a new vehicle spwan car processes
car_proc_list.append(vid)
start_carProc(vid)
update_hashmap(vid, d['vehicle']['@x'], d['vehicle']['@y'], d['vehicle']['@speed'], d['vehicle']['@angle'],d['vehicle']['@lane'])
elif type(d['vehicle']) == type(_list):
for each in d['vehicle']:
vid = each['@id']
if vid not in VIDList:
# If its a new vehicle add it to the list
VIDList.append(vid)
if vid not in car_proc_list:
# If its a new vehicle spwan car processes
car_proc_list.append(vid)
start_carProc(vid)
update_hashmap(vid, each['@x'], each['@y'], each['@speed'], each['@angle'],each['@lane'])
# After each time stamp remove vehicles from the hashmap which doesnt exist anymore in the network
cleanup_VehInfoHashMap(VIDList)
#LogsDebug("Currently the following vehicles exist in the network")
#for key, value in VehInfoHashMap.iteritems():
#LogsDebug("Vehicle ID " + key)
# Print timestep once in 20 seconds
if float(d['@time']) % 20.0 == 0:
print(0.1 - ((end - WaitPeriod) - start))
end = time.time() # Record End time
if FirstTime == 1:
end = 0.0
FirstTime = 0
WaitPeriod = 0.1 - ((end - WaitPeriod) - start) - WaitDue # Calculate waitperiod
start = time.time() # Calculate Start time
try:
# If time.sleep is passed a negative argument it raises exception
time.sleep(WaitPeriod + 0.03)
WaitDue = 0.0
except:
print("exception raised",WaitPeriod)
# If at certail step Waitperiod becomes negative - it takes the negative time due to next step
# If Waitperiod becomes negative at every timestep that starting inducing delay endless
WaitDue = -1 * WaitPeriod
WaitPeriod = 0.0
time.sleep(WaitPeriod)
print("Finished timesteps")
VIDList = []
# Send termination message to all existing vehicles
cleanup_VehInfoHashMap(VIDList)
print("Sent Termination message to all active cars")
time.sleep(18.0)
Keys = APInfoHashMap.keys()
DataToSend = ""
# Send termination message to all existin APs
for each in Keys:
print(each)
DataToForward = MessageForwardFromUDM_pb2.MESSAGEFROMUDM()
DataToForward.EXIT = True
DataToSend = DataToForward.SerializeToString()
lock.acquire()
UDMPublisher.send_multipart([str(each) + "ID", DataToSend])
UDMPublisher.send_multipart([str(each) + "DCID", DataToSend])
lock.release()
# Indicating in-car sensors that timesteps finished
NOTFinished = False
time.sleep(5.0)
print("Sent Termination message to all active APs")
LogsInfo("FINISHED")
LogsDebug("FINISHED")
# sleep allows all the vehicles and APs to write thier stats
time.sleep(60)
# Send termination messages to remote machine terminal indicating completed one experiment run
SendKillSigToTerminals()
sys.stdout.flush()
# Parse config file to extract details of base stations and rsu's and store them in 'APInfoHashMap'
# 'APInfoHashMap' stores id versus (ip, port and type(rsu or bs))
def update_APInfoHashMap():
global APInfoHashMap, RunInForeGround, RunInForeGroundList, DistributedSystemsPublisher, FirstMsg
LogsDebug("Parsing config file ../config/ap/config.xml")
tree = ET.parse('../config/ap/config.xml')
root = tree.getroot()
Message = DistributeProcesses_pb2.PROCESS_DISTRIBUTION()
for iteration in root.iter('config'):
for APDetails in iteration.iter('BS'):
BS_data = {
'X' : APDetails.get('x'),
'Y' : APDetails.get('y')
}
APInfoHashMap[APDetails.get('id')] = json.dumps(BS_data)
BS_IP_data = {
'IP' : APDetails.get('ip'),
'PORT' : APDetails.get('port'),
'TYPE' : "BS"
}
AllAddrTable[APDetails.get('id')] = json.dumps(BS_IP_data)
LogsDebug("BASESTATION module " + APDetails.get('id') + " is started in a separate terminal")
LogsInfo("BASESTATION module " + APDetails.get('id') + " is started in a separate terminal")
if RunInForeGround == "TRUE" or APDetails.get('id') in RunInForeGroundList:
Message.RUN_IN_FOREGROUND = True
else:
Message.RUN_IN_FOREGROUND = False
Message.ID = APDetails.get('id')
Message.MODULE_TYPE = "BS"
if FirstMsg:
DistributedSystemsPublisher.send_multipart([SystemsIP[0], Message.SerializeToString()])
FirstMsg = False
time.sleep(2)
DistributedSystemsPublisher.send_multipart([SystemsIP[0], Message.SerializeToString()])
for APDetails in iteration.iter('RSU'):
RSU_data = {
'X' : APDetails.get('x'),
'Y' : APDetails.get('y')
}
APInfoHashMap[APDetails.get('id')] = json.dumps(RSU_data)
RSU_IP_data = {
'IP' : APDetails.get('ip'),
'PORT' : APDetails.get('port'),
'TYPE' : "RSU"
}
AllAddrTable[APDetails.get('id')] = json.dumps(RSU_IP_data)
LogsDebug("ROADSIDE module " + APDetails.get('id') + " is started")
LogsInfo("ROADSIDE module " + APDetails.get('id') + " is started")
if RunInForeGround == "TRUE" or APDetails.get('id') in RunInForeGroundList:
Message.RUN_IN_FOREGROUND = True
else:
Message.RUN_IN_FOREGROUND = False
Message.ID = APDetails.get('id')
Message.MODULE_TYPE = "RSU"
if FirstMsg:
DistributedSystemsPublisher.send_multipart([SystemsIP[0], Message.SerializeToString()])
FirstMsg = False
time.sleep(2)
DistributedSystemsPublisher.send_multipart([SystemsIP[0], Message.SerializeToString()])
LogsDebug("Parsed details from ../config/ap/config.xml and stored. Details::")
for key, value in AllAddrTable.iteritems():
LogsDebug(str(key) + " " + str(json.loads(value)))
def updateLoggingUtilConfigParams():
global LogInfoEnable, LogInfoFile, LogInfoStdOutput, LogDebugEnable, LogDebugFile, LogDebugStdOutput, LogStatsEnable, LogStatsFILE, LogStatsStdOutput, LogErrorEnable, LogErrorFILE, LogErrorStdOutput, LogFilePath, ExperimentNumber, LogFile
tree = ET.parse('../config/logging_utility/config.xml')
root = tree.getroot()
for iteration in root.iter('config'):
for LogDetails in iteration.iter('Tool'):
name = LogDetails.get("name")
if name == "LogInfo":
LogInfoEnable = LogDetails.get('LogInfoEnable')
LogInfoFile = LogDetails.get('LogInfoFile')
LogInfoStdOutput = LogDetails.get('LogInfoStdOutput')
elif name == "LogDebug":
LogDebugEnable = LogDetails.get('LogDebugEnable')
LogDebugFile = LogDetails.get('LogDebugFile')
LogDebugStdOutput = LogDetails.get('LogDebugStdOutput')
elif name == "LogStats":
LogStatsEnable = LogDetails.get('LogStatsEnable')
LogStatsFILE = LogDetails.get('LogStatsFILE')
LogStatsStdOutput = LogDetails.get('LogStatsStdOutput')
elif name == "LogError":
LogErrorEnable = LogDetails.get('LogErrorEnable')
LogErrorFILE = LogDetails.get('LogErrorFILE')
LogErrorStdOutput = LogDetails.get('LogErrorStdOutput')
elif name == "path":
LogFilePath = LogDetails.get('LogFilePath')
elif name == "ExperimentNum":
ExperimentNumber = LogDetails.get('ExperimentNumber')
print(LogInfoEnable, LogInfoFile, LogInfoStdOutput, LogDebugEnable, LogDebugFile, LogDebugStdOutput, LogStatsEnable, LogStatsFILE, LogStatsStdOutput, LogErrorEnable, LogErrorFILE, LogErrorStdOutput, LogFilePath, ExperimentNumber)
filename = "../results/logs/" + "["+ExperimentNumber+"][MOBILITYANDUDM].txt"
LogFile = open(filename, "a")
# Creates a socket used by UDM to publish messages
def InitiatePubSocket():
global UDMPublisher
context = zmq.Context()
UDMPublisher = context.socket(zmq.PUB)
UDMPublisher.bind("tcp://"+str(ParserIP)+":"+str(int(UDMPort) + 1))
# Creates a socket used by UDM to send exit or termination messages
def InitiateExitPubSocket():
global UDMExitPublisher
context = zmq.Context()
UDMExitPublisher = context.socket(zmq.PUB)
UDMExitPublisher.bind("tcp://"+str(ParserIP)+":"+str(int(UDMPort) + 3))
# Parse config file to read parser and udm related details
def ReadConfigfile():
global ParserIP, ParserPort, UDMPort, SumoFloatingDataPath, CommRange, RunInForeGround, RunInForeGroundList, SystemsIP
print("Parsing config file ../config/common/config.xml")
tree = ET.parse('../config/common/config.xml')
root = tree.getroot()
for neighbor in root.iter('ParserIP'):
ParserIP = neighbor.get('ParserIP')
for neighbor in root.iter('ParserPort'):
ParserPort = neighbor.get('ParserPort')
for neighbor in root.iter('UDMPort'):
UDMPort = neighbor.get('UDMPort')
for neighbor in root.iter('SumoFloatingDataPath'):
SumoFloatingDataPath = neighbor.get('SumoFloatingDataPath')
for neighbor in root.iter('CommRange'):
CommRange = neighbor.get('CommRange')
for neighbor in root.iter('RunInForeGround'):
RunInForeGround = neighbor.get('RunInForeGround')
for neighbor in root.iter('RunInForeGroundList'):
RunInForeGroundList = neighbor.get('RunInForeGroundList')
for neighbor in root.iter('IPList'):
SystemsIP = ConvertStringToList(neighbor.get('IPList'))
print("Extracted values ParserIP = " + ParserIP + " ParserPort = " + ParserPort + " UDMPort = " + UDMPort
+ " SumoFloatingDataPath = " + SumoFloatingDataPath + " CommRange = " + CommRange + "RunInForeGround = " +
RunInForeGround + "RunInForeGroundList = " + RunInForeGroundList)
InitiatePubSocket()
InitiateExitPubSocket()
PrepareSystemResources()
updateLoggingUtilConfigParams()
def GetCurrentTime():
currentDT = datetime.datetime.now()
CurrentTime = str(currentDT.hour) + ":" + str(currentDT.minute) + ":" + str(currentDT.second) + "." + str(currentDT.microsecond / 1000)
return CurrentTime
def GetLogString(message):
LogString = "[" + GetCurrentTime() + "]["+ExperimentNumber+"][MOBILITYANDUDM][" + message + "]\n"
return LogString
def LogsFilewrite(message):
global LogFile
LogFile.write(GetLogString(message))
def LogsInfo(message):
global LogInfoEnable, LogInfoFile, LogInfoStdOutput
if LogInfoEnable == "TRUE":
if LogInfoFile == "TRUE":
LogsFilewrite(message)
if LogInfoStdOutput == "TRUE":
print(GetLogString(message))
def LogsDebug(message):
global LogDebugEnable, LogDebugFile, LogDebugStdOutput
if LogDebugEnable == "TRUE":
if LogDebugFile == "TRUE":
LogsFilewrite(message)
if LogDebugStdOutput == "TRUE":
print(GetLogString(message))
def LogsStats(message):
global LogStatsEnable, LogStatsFILE, LogStatsStdOutput
if LogStatsEnable == "TRUE":
if LogStatsFILE == "TRUE":
LogsFilewrite(message)
if LogStatsStdOutput == "TRUE":
print(GetLogString(message))
def LogsError(message):
global LogErrorEnable, LogErrorFILE, LogErrorStdOutput
if LogErrorEnable == "TRUE":
if LogErrorFILE == "TRUE":
LogsFilewrite(message)
if LogErrorStdOutput == "TRUE":
print(GetLogString(message))
def PrepareSystemResources():
global DistributedSystemsPublisher
context = zmq.Context()
DistributedSystemsPublisher = context.socket(zmq.PUB)
DistributedSystemsPublisher.bind("tcp://"+str(ParserIP)+":"+str(int(UDMPort) + 2))
# this is the main entry point of this script
if __name__ == "__main__":
print("here1")
global LogString, SystemsIP
#Uncomment when debugging is required
#sys.settrace(traceit)
ReadConfigfile()
print(SystemsIP)
run()
| 37.1691
| 239
| 0.700717
| 3,585
| 30,553
| 5.883961
| 0.160948
| 0.00877
| 0.007964
| 0.007585
| 0.358538
| 0.300796
| 0.268844
| 0.238741
| 0.218356
| 0.217882
| 0
| 0.005138
| 0.184565
| 30,553
| 821
| 240
| 37.214373
| 0.841535
| 0.161654
| 0
| 0.283828
| 0
| 0
| 0.130914
| 0.008127
| 0.00165
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.049505
| null | null | 0.029703
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2904abee88ac63551da7aa60f4599002d25cdcf
| 2,757
|
py
|
Python
|
side_scroller/game.py
|
pecjas/Sidescroller-PyGame
|
dfcaf4ff95a1733714eaaeb00dc00cd876ab1468
|
[
"MIT"
] | null | null | null |
side_scroller/game.py
|
pecjas/Sidescroller-PyGame
|
dfcaf4ff95a1733714eaaeb00dc00cd876ab1468
|
[
"MIT"
] | null | null | null |
side_scroller/game.py
|
pecjas/Sidescroller-PyGame
|
dfcaf4ff95a1733714eaaeb00dc00cd876ab1468
|
[
"MIT"
] | null | null | null |
import pygame
from side_scroller.constants import BLACK
from side_scroller.settings import GameSettings, Fonts
from side_scroller.player import Player, Hitbox
from side_scroller.constants import GAME_NAME
class Game():
def __init__(self):
self.player = Player(0, Player.y_bottom_barrier)
self.screen = pygame.display.set_mode((GameSettings.width, GameSettings.height))
self.game_fps = GameSettings.minFps
self.fps_clock = pygame.time.Clock()
self.fps_over_min = 1
self.per_loop_adjustment = 1
self.neutral_count = 0
self.obstacles = list()
self.initialize_game()
def initialize_game(self):
pygame.init()
pygame.display.set_caption(GAME_NAME)
self.initialize_background()
def initialize_background(self):
self.screen.blit(GameSettings.background.image, GameSettings.background.rect)
def refresh_player_location_background(self):
self.screen.blit(GameSettings.background.image, self.player.rect, self.player.rect)
def update_score_hud(self):
score_text = Fonts.hud_font.render(
f"Score: {int(self.player.score.score)}", True, BLACK
)
self.screen.blit(
GameSettings.background.image,
score_text.get_rect(),
score_text.get_rect())
self.screen.blit(score_text, score_text.get_rect())
def update_high_score(self):
self.player.adjust_high_scores()
def prepare_new_game(self):
self.player.prepare_new_game()
self.obstacles = list()
self.initialize_background()
self.neutral_count = 0
def set_current_fps_over_min_fps(self):
self.fps_over_min = self.game_fps / GameSettings.minFps
def set_per_loop_adjustment(self):
self.per_loop_adjustment = GameSettings.minFps / self.game_fps
def is_hover_limit_reached(self):
return self.neutral_count > GameSettings.hoverLimit * self.fps_over_min
def increase_count_to_obstacle_tick(self):
self.player.score.countToObstacleTick += self.per_loop_adjustment
def increase_count_to_level_tick(self):
self.player.score.countToLevelTick += self.per_loop_adjustment
def tick_game_fps_clock(self):
self.fps_clock.tick(self.game_fps)
def get_obstacles_in_player_path_y(self) -> list:
"""
Returns a list of obstacles that could be hit if the player moved along y axis.
"""
player_path_y = Hitbox(
pygame.Rect(
0,
0,
self.player.width,
10000
),
"neutral"
)
return pygame.sprite.spritecollide(player_path_y, self.obstacles, False)
| 31.329545
| 91
| 0.66848
| 341
| 2,757
| 5.1261
| 0.284457
| 0.045767
| 0.040046
| 0.048055
| 0.244279
| 0.086384
| 0.062929
| 0.062929
| 0
| 0
| 0
| 0.005764
| 0.244831
| 2,757
| 87
| 92
| 31.689655
| 0.833814
| 0.028654
| 0
| 0.126984
| 0
| 0
| 0.016579
| 0.011304
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.079365
| 0.015873
| 0.349206
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2909580065a2556ae0c58be271bee9537858bf1
| 366
|
py
|
Python
|
solutions/problem_230.py
|
ksvr444/daily-coding-problem
|
5d9f488f81c616847ee4e9e48974523ec2d598d7
|
[
"MIT"
] | 1,921
|
2018-11-13T18:19:56.000Z
|
2021-11-15T14:25:41.000Z
|
solutions/problem_230.py
|
MohitIndian/daily-coding-problem
|
5d9f488f81c616847ee4e9e48974523ec2d598d7
|
[
"MIT"
] | 2
|
2019-07-19T01:06:16.000Z
|
2019-08-01T22:21:36.000Z
|
solutions/problem_230.py
|
MohitIndian/daily-coding-problem
|
5d9f488f81c616847ee4e9e48974523ec2d598d7
|
[
"MIT"
] | 1,066
|
2018-11-19T19:06:55.000Z
|
2021-11-13T12:33:56.000Z
|
def get_min_drops(N, k):
if N == 0 or N == 1 or k == 1:
return N
possibilities = list()
for i in range(1, N + 1):
possibilities.append(
max(get_min_drops(i-1, k-1),
get_min_drops(N-i, k))
)
return min(possibilities) + 1
# Tests
assert get_min_drops(20, 2) == 6
assert get_min_drops(15, 3) == 5
| 20.333333
| 40
| 0.538251
| 61
| 366
| 3.065574
| 0.442623
| 0.160428
| 0.294118
| 0.128342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065306
| 0.330601
| 366
| 17
| 41
| 21.529412
| 0.697959
| 0.013661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f290ef8b6c3eb1ab597e06f8dc82e1806488e974
| 3,525
|
py
|
Python
|
src/advanceoperate/uploadthread.py
|
zengrx/S.M.A.R.T
|
47a9abe89008e9b34f9b9d057656dbf3fb286456
|
[
"MIT"
] | 10
|
2017-07-11T01:08:28.000Z
|
2021-05-07T01:49:00.000Z
|
src/advanceoperate/uploadthread.py
|
YanqiangHuang/S.M.A.R.T
|
47a9abe89008e9b34f9b9d057656dbf3fb286456
|
[
"MIT"
] | null | null | null |
src/advanceoperate/uploadthread.py
|
YanqiangHuang/S.M.A.R.T
|
47a9abe89008e9b34f9b9d057656dbf3fb286456
|
[
"MIT"
] | 6
|
2017-05-02T14:27:15.000Z
|
2017-05-15T05:56:40.000Z
|
#coding=utf-8
import sys, os
import socket
import hashlib
import virus_total_apis
from PyQt4 import QtCore
sys.path.append("..")
from publicfunc.fileanalyze import PEFileAnalize, getFileInfo
class UploadFile(QtCore.QThread):
finishSignal = QtCore.pyqtSignal(int, tuple)
'''
@文件名
@用户公钥
'''
def __init__(self, filename, apikey, parent=None):
super(UploadFile, self).__init__(parent)
self.filename = str(filename)#.encode('cp936')
self.apikey = apikey
print self.filename
'''
检查网络,后期转移至common功能
'''
def checkInternet(self):
try:
host = socket.gethostbyname("www.virustotal.com")
s = socket.create_connection((host, 80), 2)
print "internet ok"
return True
except:
print "internet err"
return False
'''
virustotal api函数
解析json文件内容
@apikey:用户公钥
返回响应代码及检测结果
'''
def virustotalApi(self, apikey):
key = apikey
result = []
result1 = [] # 检测到的引擎
result2 = [] # 未检测到的引擎
vt = virus_total_apis.PublicApi(key)
md5 = hashlib.md5(open(self.filename, 'rb').read()).hexdigest()
response = vt.get_file_report(md5)
# print response # 所有结果
print response["response_code"] # 网络响应码
if 204 == response["response_code"]: # 超出上传频率
print "204"
return ("http_code", "", response["response_code"], "")
response_code_ = response["results"]["response_code"]
# print response_code_ # 返回信息响应代码
if 1 == response_code_:
# 解析json回传内容
# 先显示报毒的引擎
for n in response["results"]["scans"]:
if response["results"]["scans"][n]["detected"]:
result1.append("{} ^ {}".format(n, response["results"]["scans"][n]["result"]))
else:
result2.append("{} ^ {}".format(n, response["results"]["scans"][n]["result"]))
result = sorted(result1, key=str.lower) + sorted(result2, key=str.lower)
elif -2 == response_code_:
pass
else:
response = vt.scan_file(self.filename) # 32M limit
if response["results"]["verbose_msg"]:
result.append(response["results"]["verbose_msg"])
else:
result.append(response["results"]["permalink"])
if 1 == response_code_:
return ("scan_result", result, response["response_code"], response_code_)
else:
return ("permalink", result, response["response_code"], response_code_)
# return ("scan_result", result, "http", response["response_code"], "code", response_code_)
# if response_code_ is 1 else ("permalink", result, "http", response["response_code"], "code", response_code_)
# print ("scan_result", result) if response_code_ is 1 else ("permalink", result)
def run(self):
print "run"
useless, baseinfo = getFileInfo(self.filename)
infos = ("baseinfo", baseinfo)
self.finishSignal.emit(2, infos)
ret = self.checkInternet()
if not ret:
self.finishSignal.emit(3, tuple(['网络连接失败...']))
return
msg = self.virustotalApi(self.apikey)
self.finishSignal.emit(1, msg)
class AddFileToQqueu(QtCore.QThread):
def __init__(self, filename, parent=None):
super(AddFileToQqueu, self).__init__(parent)
self.filename = filename
def run(self):
pass
| 34.558824
| 119
| 0.584681
| 364
| 3,525
| 5.497253
| 0.337912
| 0.113943
| 0.069965
| 0.041979
| 0.22089
| 0.178911
| 0.118941
| 0.118941
| 0
| 0
| 0
| 0.013064
| 0.283404
| 3,525
| 102
| 120
| 34.558824
| 0.779097
| 0.118582
| 0
| 0.138889
| 0
| 0
| 0.107191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.027778
| 0.083333
| null | null | 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f291aa8b92b2b817f77cb42f08e1e15a9557dcfe
| 240
|
py
|
Python
|
JaroEliCall/src/functionality/sending_activation_key.py
|
jaroslaw-wieczorek/Project_IP_Telephony_Python_Voip
|
05143356fe91f745c286db8c3e2432714ab122e7
|
[
"MIT"
] | null | null | null |
JaroEliCall/src/functionality/sending_activation_key.py
|
jaroslaw-wieczorek/Project_IP_Telephony_Python_Voip
|
05143356fe91f745c286db8c3e2432714ab122e7
|
[
"MIT"
] | null | null | null |
JaroEliCall/src/functionality/sending_activation_key.py
|
jaroslaw-wieczorek/Project_IP_Telephony_Python_Voip
|
05143356fe91f745c286db8c3e2432714ab122e7
|
[
"MIT"
] | 1
|
2018-03-20T21:22:40.000Z
|
2018-03-20T21:22:40.000Z
|
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("tt0815550@gmail.com", "AureliaK1609")
msg = "YOUR MESSAGE!"
server.sendmail("e.kaczmarek01@gmail.com", "tt0815550@gmail.com", msg)
server.quit()
| 21.818182
| 70
| 0.7375
| 32
| 240
| 5.53125
| 0.5625
| 0.180791
| 0.19209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104545
| 0.083333
| 240
| 10
| 71
| 24
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0.41841
| 0.096234
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f293d5631b8815a984d95fcfd9fd7e627ddefdd5
| 484
|
py
|
Python
|
tests/conftest.py
|
12rambau/commitizen
|
4309813974b6be72a246d47fc77f4c7f8ef64be1
|
[
"MIT"
] | 866
|
2020-03-18T06:09:07.000Z
|
2022-03-30T15:46:17.000Z
|
tests/conftest.py
|
12rambau/commitizen
|
4309813974b6be72a246d47fc77f4c7f8ef64be1
|
[
"MIT"
] | 364
|
2020-03-18T02:13:09.000Z
|
2022-03-31T01:57:12.000Z
|
tests/conftest.py
|
12rambau/commitizen
|
4309813974b6be72a246d47fc77f4c7f8ef64be1
|
[
"MIT"
] | 136
|
2020-03-20T18:06:32.000Z
|
2022-03-31T00:02:34.000Z
|
import pytest
from commitizen import cmd
@pytest.fixture(scope="function")
def tmp_git_project(tmpdir):
with tmpdir.as_cwd():
cmd.run("git init")
yield tmpdir
@pytest.fixture(scope="function")
def tmp_commitizen_project(tmp_git_project):
with tmp_git_project.as_cwd():
tmp_commitizen_cfg_file = tmp_git_project.join("pyproject.toml")
tmp_commitizen_cfg_file.write("[tool.commitizen]\n" 'version="0.1.0"\n')
yield tmp_git_project
| 23.047619
| 80
| 0.71281
| 69
| 484
| 4.710145
| 0.434783
| 0.092308
| 0.2
| 0.16
| 0.196923
| 0.196923
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 0.169421
| 484
| 20
| 81
| 24.2
| 0.800995
| 0
| 0
| 0.153846
| 0
| 0
| 0.152893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f296a031d5f0c54dcf0daafc3b2597cd41d7d8ee
| 524
|
py
|
Python
|
sharedData.py
|
vidalmatheus/DS.com
|
47b8d3cbb6d9ecd30178c4ba76408191c0715866
|
[
"MIT"
] | null | null | null |
sharedData.py
|
vidalmatheus/DS.com
|
47b8d3cbb6d9ecd30178c4ba76408191c0715866
|
[
"MIT"
] | null | null | null |
sharedData.py
|
vidalmatheus/DS.com
|
47b8d3cbb6d9ecd30178c4ba76408191c0715866
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, redirect,Blueprint, json, url_for, session
from modules import dataBase,usuario
import psycopg2, os, subprocess, bcrypt
#
#def getData():
# DATABASE_URL = os.environ['DATABASE_URL']
# con = psycopg2.connect(DATABASE_URL, sslmode='require')
# return con
### connect to the dataBase
DATABASE_URL = os.environ['DATABASE_URL']
connectionData = dataBase.dataAccess()
####
###Usuario
usersDataOnline = usuario.acessManager()
#userData = usuario.acessoUser()
###
| 20.96
| 93
| 0.740458
| 60
| 524
| 6.35
| 0.583333
| 0.144357
| 0.068241
| 0.104987
| 0.16273
| 0.16273
| 0
| 0
| 0
| 0
| 0
| 0.004415
| 0.135496
| 524
| 24
| 94
| 21.833333
| 0.836645
| 0.370229
| 0
| 0
| 0
| 0
| 0.038961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
f2995fcdd8762cd23c69c1f140cd16f1c0b58140
| 6,183
|
py
|
Python
|
merlin/analysis/sequential.py
|
greentea1079/MERlin
|
f4c50cb15722263ee9397561b9ce4b2eddc3d559
|
[
"MIT"
] | 14
|
2019-08-19T15:26:44.000Z
|
2022-01-12T16:38:42.000Z
|
merlin/analysis/sequential.py
|
greentea1079/MERlin
|
f4c50cb15722263ee9397561b9ce4b2eddc3d559
|
[
"MIT"
] | 60
|
2019-08-19T15:48:37.000Z
|
2021-11-11T19:19:18.000Z
|
merlin/analysis/sequential.py
|
epigen-UCSD/MERlin
|
3aa784fb28a2a4ebae92cfaf3a72f30a459daab9
|
[
"MIT"
] | 13
|
2019-08-16T06:03:23.000Z
|
2021-08-02T15:52:46.000Z
|
import pandas
import rtree
import networkx
import numpy as np
import cv2
from skimage.measure import regionprops
from merlin.core import analysistask
from merlin.util import imagefilters
class SumSignal(analysistask.ParallelAnalysisTask):
"""
An analysis task that calculates the signal intensity within the boundaries
of a cell for all rounds not used in the codebook, useful for measuring
RNA species that were stained individually.
"""
def __init__(self, dataSet, parameters=None, analysisName=None):
super().__init__(dataSet, parameters, analysisName)
if 'apply_highpass' not in self.parameters:
self.parameters['apply_highpass'] = False
if 'highpass_sigma' not in self.parameters:
self.parameters['highpass_sigma'] = 5
if 'z_index' not in self.parameters:
self.parameters['z_index'] = 0
if self.parameters['z_index'] >= len(self.dataSet.get_z_positions()):
raise analysistask.InvalidParameterException(
'Invalid z_index specified for %s. (%i > %i)'
% (self.analysisName, self.parameters['z_index'],
len(self.dataSet.get_z_positions())))
self.highpass = str(self.parameters['apply_highpass']).upper() == 'TRUE'
self.alignTask = self.dataSet.load_analysis_task(
self.parameters['global_align_task'])
def fragment_count(self):
return len(self.dataSet.get_fovs())
def get_estimated_memory(self):
return 2048
def get_estimated_time(self):
return 1
def get_dependencies(self):
return [self.parameters['warp_task'],
self.parameters['segment_task'],
self.parameters['global_align_task']]
def _extract_signal(self, cells, inputImage, zIndex) -> pandas.DataFrame:
cellCoords = []
for cell in cells:
regions = cell.get_boundaries()[zIndex]
if len(regions) == 0:
cellCoords.append([])
else:
pixels = []
for region in regions:
coords = region.exterior.coords.xy
xyZip = list(zip(coords[0].tolist(), coords[1].tolist()))
pixels.append(np.array(
self.alignTask.global_coordinates_to_fov(
cell.get_fov(), xyZip)))
cellCoords.append(pixels)
cellIDs = [str(cells[x].get_feature_id()) for x in range(len(cells))]
mask = np.zeros(inputImage.shape, np.uint8)
for i, cell in enumerate(cellCoords):
cv2.drawContours(mask, cell, -1, i+1, -1)
propsDict = {x.label: x for x in regionprops(mask, inputImage)}
propsOut = pandas.DataFrame(
data=[(propsDict[k].intensity_image.sum(),
propsDict[k].filled_area)
if k in propsDict else (0, 0)
for k in range(1, len(cellCoords) + 1)],
index=cellIDs,
columns=['Intensity', 'Pixels'])
return propsOut
def _get_sum_signal(self, fov, channels, zIndex):
fTask = self.dataSet.load_analysis_task(self.parameters['warp_task'])
sTask = self.dataSet.load_analysis_task(self.parameters['segment_task'])
cells = sTask.get_feature_database().read_features(fov)
signals = []
for ch in channels:
img = fTask.get_aligned_image(fov, ch, zIndex)
if self.highpass:
highPassSigma = self.parameters['highpass_sigma']
highPassFilterSize = int(2 * np.ceil(3 * highPassSigma) + 1)
img = imagefilters.high_pass_filter(img,
highPassFilterSize,
highPassSigma)
signals.append(self._extract_signal(cells, img,
zIndex).iloc[:, [0]])
# adding num of pixels
signals.append(self._extract_signal(cells, img, zIndex).iloc[:, [1]])
compiledSignal = pandas.concat(signals, 1)
compiledSignal.columns = channels+['Pixels']
return compiledSignal
def get_sum_signals(self, fov: int = None) -> pandas.DataFrame:
"""Retrieve the sum signals calculated from this analysis task.
Args:
fov: the fov to get the sum signals for. If not specified, the
sum signals for all fovs are returned.
Returns:
A pandas data frame containing the sum signal information.
"""
if fov is None:
return pandas.concat(
[self.get_sum_signals(fov) for fov in self.dataSet.get_fovs()]
)
return self.dataSet.load_dataframe_from_csv(
'sequential_signal', self.get_analysis_name(),
fov, 'signals', index_col=0)
def _run_analysis(self, fragmentIndex):
zIndex = int(self.parameters['z_index'])
channels, geneNames = self.dataSet.get_data_organization()\
.get_sequential_rounds()
fovSignal = self._get_sum_signal(fragmentIndex, channels, zIndex)
normSignal = fovSignal.iloc[:, :-1].div(fovSignal.loc[:, 'Pixels'], 0)
normSignal.columns = geneNames
self.dataSet.save_dataframe_to_csv(
normSignal, 'sequential_signal', self.get_analysis_name(),
fragmentIndex, 'signals')
class ExportSumSignals(analysistask.AnalysisTask):
def __init__(self, dataSet, parameters=None, analysisName=None):
super().__init__(dataSet, parameters, analysisName)
def get_estimated_memory(self):
return 2048
def get_estimated_time(self):
return 5
def get_dependencies(self):
return [self.parameters['sequential_task']]
def _run_analysis(self):
sTask = self.dataSet.load_analysis_task(
self.parameters['sequential_task'])
signals = sTask.get_sum_signals()
self.dataSet.save_dataframe_to_csv(
signals, 'sequential_sum_signals',
self.get_analysis_name())
| 37.472727
| 80
| 0.603105
| 673
| 6,183
| 5.355126
| 0.273403
| 0.073807
| 0.029967
| 0.022198
| 0.295505
| 0.285516
| 0.222531
| 0.161487
| 0.13596
| 0.109323
| 0
| 0.00784
| 0.298561
| 6,183
| 164
| 81
| 37.70122
| 0.82315
| 0.075691
| 0
| 0.12069
| 0
| 0
| 0.06546
| 0.003903
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12069
| false
| 0.094828
| 0.068966
| 0.060345
| 0.301724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
f2a53c9d24ff35deb138d84a030fd47b3eb06aa1
| 3,214
|
py
|
Python
|
Proj/2048/test_with_f/tpybrain.py
|
PiscesDream/Ideas
|
9ba710e62472f183ae4525f35659cd265c71392e
|
[
"Apache-2.0"
] | null | null | null |
Proj/2048/test_with_f/tpybrain.py
|
PiscesDream/Ideas
|
9ba710e62472f183ae4525f35659cd265c71392e
|
[
"Apache-2.0"
] | null | null | null |
Proj/2048/test_with_f/tpybrain.py
|
PiscesDream/Ideas
|
9ba710e62472f183ae4525f35659cd265c71392e
|
[
"Apache-2.0"
] | null | null | null |
from load import *
from _2048 import _2048
from numpy import *
from pybrain.datasets import ClassificationDataSet
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer, SigmoidLayer
def convolution(x, flat = False):
# return x
def dist(x, y):
if x==y:
return x
else:
return 0
ans = []
for ind in xrange(x.shape[1]):
ele = x[:, ind].reshape(int(x.shape[0] ** .5), -1)
addition = []
for i in xrange(4):
for j in xrange(4):
if i+1 < 4:
addition.append( dist(ele[i, j], ele[i+1, j]))
if j+1 < 4:
addition.append( dist(ele[i, j], ele[i, j+1]))
ans.append ( list(ele.flatten())+(addition) )
# ans.append(addition)
if flat:
return array(ans).flatten()
else:
return array(ans)
def con1(x, flat = False, ori = False):
return convolute(x, [array([[1, -0.5]]), array([[-0.5, 1]]), array([[1], [-0.5]]), array([[-0.5], [1]])], flat, ori)
def convolute(x, con_mats, flat = False, ori = True):
ans = []
n, m = x.shape
a = int(n ** 0.5)
for ind in xrange(m):
ele = x[:, ind].reshape(a, -1)
addition = []
for con_mat in con_mats:
cn, cm = con_mat.shape
for i in xrange(a - cn + 1):
for j in xrange( a - cm + 1):
acc = 0
for i_ in xrange(cn):
for j_ in xrange(cm):
acc += ele[i + i_, j + j_] * con_mat[i_, j_];
addition.append( acc )
if ori:
ans.append( list(ele.flatten()) + (addition) )
else:
ans.append(addition)
if flat:
return array(ans).flatten()
else:
return array(ans)
def softmax_dec(board, u, d, l, r, f):
p = f(con1(board.reshape(-1,1), flat = True))
# print p
if all(board == u[0]):
p[0] = 0
if all(board == d[0]):
p[1] = 0
if all(board == l[0]):
p[2] = 0
if all(board == r[0]):
p[3] = 0
# p /= p.sum()
# return random.choice(arange(4), p = p)
return p.argmax()
if __name__ == '__main__':
tr_x = load('rec_board.npy')
tr_y = load('rec_move.npy')
tr_x = con1(tr_x.T)
print tr_x.shape
print tr_y.shape
data = ClassificationDataSet(tr_x.shape[1], 1, nb_classes = 4)
for ind, ele in enumerate(tr_x):
data.addSample(ele, tr_y[ind])
data._convertToOneOfMany()
print data.outdim
fnn = buildNetwork(data.indim, 10, 10, data.outdim, hiddenclass=SigmoidLayer, outclass=SoftmaxLayer )
trainer = BackpropTrainer( fnn, dataset=data)#, momentum=0.1, verbose=True, weightdecay=0.01)
for i in xrange(3):
print trainer.train()
#trainer.trainUntilConvergence()
game = _2048(length = 4)
game.mul_test(100, lambda a, b, c, d, e: softmax_dec(a, b, c, d, e, f = fnn.activate), addition_arg = True)
| 30.903846
| 120
| 0.528002
| 449
| 3,214
| 3.69265
| 0.265033
| 0.043426
| 0.014475
| 0.028951
| 0.176116
| 0.170084
| 0.13269
| 0.13269
| 0.11339
| 0.11339
| 0
| 0.036229
| 0.330118
| 3,214
| 103
| 121
| 31.203884
| 0.73386
| 0.056627
| 0
| 0.192771
| 0
| 0
| 0.010924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.096386
| null | null | 0.048193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2a5d347767b990fa97d063da0ee6a2aa890bd9d
| 2,757
|
py
|
Python
|
run.py
|
mishel254/py-password-locker
|
c14dd314251f078125df39104b99384c8cbd292b
|
[
"MIT"
] | null | null | null |
run.py
|
mishel254/py-password-locker
|
c14dd314251f078125df39104b99384c8cbd292b
|
[
"MIT"
] | null | null | null |
run.py
|
mishel254/py-password-locker
|
c14dd314251f078125df39104b99384c8cbd292b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.8
from passwords import Credentials
from login import accounts
import random
#create credentials func
def create(fname,lname,user,passwords,email):
newCredentials = Credentials(fname,lname,user,passwords,email)
return newCredentials
#delete
'''
function to delete credentials & accounts
'''
def delete(credentials):
credentials.delete()
def deleteAccount(accounts):
accounts.deleteAccount()
'''
save credentials & accounts
'''
def saveCredentials(Credentials):
Credentials.saveCredentials()
def saveAccounts(accounts):
accounts.saveAccounts()
'''
search credentials
'''
def auth_user(email):
return Credentials.auth_by_email
'''
check if contact exist
'''
def account_exists(email):
return Credentials.accounts_display(email)
'''
display
'''
def display_all_users():
return accounts.display_all_users
def main():
print('Your name?')
username = input()
code = input(f'Press Enter {username}')
while True:
print("Use these short codes :")
print("cc - create a new contact")
print("dc - display contacts")
print("fc -find a contact")
print("ex -exit the contact list")
print("del-to delete ")
short_code = input()
if short_code == 'cc':
print('First name:')
fname = input()
print('last name:')
lname = input()
print('username')
username = input()
print('email:')
email = input()
print('password:')
passwords = input(round(random.random()))
saveCredentials(create(fname,lname,username,passwords,email))
print(f'Your data has been taken d{fname}')
elif short_code == 'dc':
if display_all_users():
print('users:')
for account in display_all_users():
print(f'{account.username} {account.email}')
elif short_code == 'fc':
print('enter email address to search')
search = input()
if accounts_exists(email):
auth_by_email = find_contact(search)
print(f'{search.first}')
else:
print('NO credentials found!')
elif short_code == 'del':
print('input Y confirm')
confirm = input()
if delete():
credentials.delete()
print('credential deleted')
else:
print('credentials not found')
elif short_code == 'ex':
print('happy coding!')
break
else:
print('Not an existing shortcut')
if __name__ == '__main__':
main()
| 22.056
| 73
| 0.573087
| 283
| 2,757
| 5.473498
| 0.335689
| 0.034861
| 0.038735
| 0.029697
| 0.036152
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001057
| 0.313384
| 2,757
| 124
| 74
| 22.233871
| 0.817221
| 0.018861
| 0
| 0.09589
| 0
| 0
| 0.176817
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123288
| false
| 0.082192
| 0.041096
| 0.041096
| 0.219178
| 0.315068
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
f2ab54aefe1c397702c020ba41c25aedb91b9d9b
| 555
|
py
|
Python
|
setup.py
|
akvatol/CosmOrc
|
6ee1e1f3521a6d2b4c8eec104fa4e93db32d9352
|
[
"MIT"
] | 1
|
2018-12-07T17:21:39.000Z
|
2018-12-07T17:21:39.000Z
|
setup.py
|
akvatol/CosmOrc
|
6ee1e1f3521a6d2b4c8eec104fa4e93db32d9352
|
[
"MIT"
] | 8
|
2018-11-23T10:05:01.000Z
|
2019-04-09T19:17:43.000Z
|
setup.py
|
akvatol/CosmOrc
|
6ee1e1f3521a6d2b4c8eec104fa4e93db32d9352
|
[
"MIT"
] | 1
|
2018-12-07T17:21:40.000Z
|
2018-12-07T17:21:40.000Z
|
from setuptools import setup, find_packages
setup(
name='CosmOrc',
version='0.1',
include_package_data=True,
packages=find_packages(),
python_requires='>=3.6',
install_requires=[
'Click==7.0',
'numpy==1.16.2',
'pandas==0.24.2',
'pyaml==19.4.1',
'PySnooper==0.2.8',
'python-dateutil==2.8.0',
'pytz==2019.3',
'PyYAML==5.1.2',
'six==1.12.0',
'typing==3.7.4.1',
],
entry_points='''
[console_scripts]
CosmOrc = main:cli
''',
)
| 21.346154
| 43
| 0.506306
| 72
| 555
| 3.791667
| 0.638889
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 0.293694
| 555
| 25
| 44
| 22.2
| 0.594388
| 0
| 0
| 0
| 0
| 0
| 0.381982
| 0.03964
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.041667
| 0
| 0.041667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2ada1e33fb51298d8dea6d25a8d7c5459098cce
| 3,976
|
py
|
Python
|
sweetie_bot_flexbe_behaviors/src/sweetie_bot_flexbe_behaviors/rbc18part2_sm.py
|
sweetie-bot-project/sweetie_bot_flexbe_behaviors
|
d8511564bb9d6125838b4373263fb68a8b858d70
|
[
"BSD-3-Clause"
] | null | null | null |
sweetie_bot_flexbe_behaviors/src/sweetie_bot_flexbe_behaviors/rbc18part2_sm.py
|
sweetie-bot-project/sweetie_bot_flexbe_behaviors
|
d8511564bb9d6125838b4373263fb68a8b858d70
|
[
"BSD-3-Clause"
] | null | null | null |
sweetie_bot_flexbe_behaviors/src/sweetie_bot_flexbe_behaviors/rbc18part2_sm.py
|
sweetie-bot-project/sweetie_bot_flexbe_behaviors
|
d8511564bb9d6125838b4373263fb68a8b858d70
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T05:06:26.000Z
|
2019-12-23T05:06:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sweetie_bot_flexbe_states.wait_for_message_state import WaitForMessageState
from sweetie_bot_flexbe_states.compound_action_state import CompoundAction
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Wed Nov 21 2018
@author: mutronics
'''
class RBC18Part2SM(Behavior):
'''
RBC Presentation Part2
'''
def __init__(self):
super(RBC18Part2SM, self).__init__()
self.name = 'RBC18Part2'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
joy_topic = '/hmi/joystick'
# x:20 y:231, x:260 y:311
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:57 y:34
OperatableStateMachine.add('WaitKey1',
WaitForMessageState(topic=joy_topic, condition=lambda x: x.buttons[12], buffered=False, clear=False),
transitions={'received': 'SpasiboMut', 'unavailable': 'failed'},
autonomy={'received': Autonomy.Off, 'unavailable': Autonomy.Off},
remapping={'message': 'message'})
# x:179 y:106
OperatableStateMachine.add('SpasiboMut',
CompoundAction(t1=[0,0.0], type1='voice/play_wav', cmd1='spasibo_mut_tvoyo_uporstvo_vsegda_radovalo_menya', t2=[0,0.0], type2=None, cmd2='', t3=[0,0.0], type3=None, cmd3='', t4=[0,0.0], type4=None, cmd4=''),
transitions={'success': 'WaitKey2', 'failure': 'failed'},
autonomy={'success': Autonomy.Off, 'failure': Autonomy.Off})
# x:442 y:360
OperatableStateMachine.add('WaitKey3',
WaitForMessageState(topic=joy_topic, condition=lambda x: x.buttons[12], buffered=False, clear=False),
transitions={'received': 'SpasiboZuviel', 'unavailable': 'failed'},
autonomy={'received': Autonomy.Off, 'unavailable': Autonomy.Off},
remapping={'message': 'message'})
# x:123 y:441
OperatableStateMachine.add('SpasiboZuviel',
CompoundAction(t1=[0,0.0], type1='voice/play_wav', cmd1='prekrasno_chuvstvuete_kakaya_skrita_vo_mne_mosch', t2=[0,0.0], type2=None, cmd2='', t3=[0,0.0], type3=None, cmd3='', t4=[0,0.0], type4=None, cmd4=''),
transitions={'success': 'finished', 'failure': 'failed'},
autonomy={'success': Autonomy.Off, 'failure': Autonomy.Off})
# x:428 y:236
OperatableStateMachine.add('SpasiboStefanShiron',
CompoundAction(t1=[0,0.0], type1='voice/play_wav', cmd1='spasibo_ya_uslishala_vse_chto_hotela', t2=[0,0.0], type2=None, cmd2='', t3=[0,0.0], type3=None, cmd3='', t4=[0,0.0], type4=None, cmd4=''),
transitions={'success': 'WaitKey3', 'failure': 'failed'},
autonomy={'success': Autonomy.Off, 'failure': Autonomy.Off})
# x:370 y:44
OperatableStateMachine.add('WaitKey2',
WaitForMessageState(topic=joy_topic, condition=lambda x: x.buttons[12], buffered=False, clear=False),
transitions={'received': 'SpasiboStefanShiron', 'unavailable': 'failed'},
autonomy={'received': Autonomy.Off, 'unavailable': Autonomy.Off},
remapping={'message': 'message'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| 37.866667
| 217
| 0.647133
| 453
| 3,976
| 5.551876
| 0.375276
| 0.019085
| 0.014314
| 0.025447
| 0.518887
| 0.498211
| 0.498211
| 0.498211
| 0.467992
| 0.434592
| 0
| 0.043796
| 0.173038
| 3,976
| 104
| 218
| 38.230769
| 0.721107
| 0.21001
| 0
| 0.307692
| 1
| 0
| 0.211335
| 0.045067
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.076923
| 0
| 0.179487
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2b258bd5e08c7cfd6f403dd7e2e5de3a6cb8a04
| 9,512
|
py
|
Python
|
steel_segmentation/utils.py
|
marcomatteo/steel-segmentation-nbdev
|
dde19b0b3bf7657ab575e691bca1751592aecc67
|
[
"Apache-2.0"
] | 1
|
2021-08-20T14:56:26.000Z
|
2021-08-20T14:56:26.000Z
|
steel_segmentation/utils.py
|
marcomatteo/steel-segmentation-nbdev
|
dde19b0b3bf7657ab575e691bca1751592aecc67
|
[
"Apache-2.0"
] | 1
|
2021-05-03T16:42:34.000Z
|
2021-05-03T16:42:34.000Z
|
steel_segmentation/utils.py
|
marcomatteo/steel_segmentation
|
dde19b0b3bf7657ab575e691bca1751592aecc67
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_eda.ipynb (unless otherwise specified).
__all__ = ['palet', 'seed_everything', 'print_competition_data', 'get_train_pivot', 'get_train_df', 'count_pct',
'get_classification_df', 'rle2mask', 'make_mask', 'mask2rle', 'plot_mask_image', 'plot_defected_image',
'get_random_idx', 'show_defects']
# Cell
from fastai.vision.all import *
import numpy as np
import pandas as pd
import cv2
from matplotlib import pyplot as plt
# Cell
palet = [
(249, 192, 12), # ClassId 1
(0, 185, 241), # ClassId 2
(114, 0, 218), # ClassId 3
(249,50,12) # ClassId 4
]
# Cell
def seed_everything(seed=69):
"""
Seeds `random`, `os.environ["PYTHONHASHSEED"]`,
`numpy`, `torch.cuda` and `torch.backends`.
"""
warnings.filterwarnings("ignore")
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Cell
def print_competition_data(p: Path):
for elem in p.ls():
print(elem)
# Cell
def get_train_pivot(df):
"""
Summarize the training csv with ClassId as columns and values EncodedPixels
"""
def rles2classids(s: pd.Series):
classids = []
for classid in s.index:
if classid != "n":
value = s[classid]
if not (value is np.nan):
classids.append(str(classid))
return " ".join(classids)
train_pivot = df.pivot(
index="ImageId", columns="ClassId", values="EncodedPixels")
train_pivot["n"] = train_pivot.notnull().sum(1)
train_pivot["ClassIds"] = train_pivot.apply(rles2classids, axis=1)
return train_pivot
def get_train_df(path, only_faulty=False, pivot=False, hard_negatives=False):
"""
Get training DataFrame with all the images in data/train_images.
Returns only the faulty images if `only_faulty`.
"""
img_path = path/"train_images"
csv_file_name = path/"train.csv"
train = pd.read_csv(csv_file_name)
img_names = [img.name for img in get_image_files(img_path)]
df_all = pd.DataFrame({'ImageId': img_names})
train_all = pd.merge(df_all, train, on="ImageId", how="outer", indicator=True)
# Renaming and fillna
train_all.rename(columns={'_merge': 'status'}, inplace=True)
rename_dict = {"both": "faulty", "left_only": "no_faulty", "right_only": "missing"}
train_all["status"] = train_all["status"].cat.rename_categories(rename_dict)
train_all = train_all[train_all["status"]!="missing"]
train_all.ClassId.fillna(0, inplace=True)
train_all.ClassId = train_all.ClassId.astype('int64')
train_all.EncodedPixels.fillna(-1, inplace=True)
train_all["ImageId_ClassId"] = train_all["ImageId"] + "_" + train_all["ClassId"].astype('str')
if hard_negatives:
hard_neg_patterns = pd.read_csv(
path/"hard_negatives_patterns.txt", header=None, names=["ImageId"])
cond = train_all["status"]=="faulty"
cond_hn = train_all["ImageId"].isin(hard_neg_patterns["ImageId"].tolist())
train_all = train_all.loc[cond | cond_hn]
if only_faulty:
train_all = train_all[train_all["status"]=="faulty"]
if pivot:
return get_train_pivot(train_all)
return train_all
# Cell
def count_pct(df, column="ClassId"):
"""Returns a `pandas.DataFrame` with count and frequencies stats for `column`."""
class_count = df[column].value_counts().sort_index()
class_count.index.set_names(column, inplace=True)
class_count = class_count.to_frame()
class_count.rename(columns={column: "num"}, inplace=True)
return class_count.assign(freq=lambda df: df["num"] / df["num"].sum())
# Cell
def get_classification_df(df: pd.DataFrame):
"""
Get the DataFrame for the multiclass classification model
"""
def assign_multi_ClassId(x):
"""Returns a string with multi ClassId sep with a blank space (' ')"""
def fill_cols(c):
return c.fillna(5).astype('int64').astype(str)
cols = [fill_cols(x[i]) for i in range(5)]
cols = [col.replace('5', '') for col in cols]
ClassId_multi = cols[0] + " " + cols[1] + " " + \
cols[2] + " " + cols[3] + " " + cols[4]
ClassId_multi = ClassId_multi.str.strip()
ClassId_multi = ClassId_multi.str.replace(' ', ' ')
return ClassId_multi.str.strip()
train_multi = df.pivot(
index="ImageId", columns="ClassId", values="ClassId")
train_multi = train_multi.assign(
ClassId_multi=lambda x: assign_multi_ClassId(x))
return train_multi.reset_index()[["ImageId", "ClassId_multi"]]
# Cell
def rle2mask(rle, value=1, shape=(256,1600)):
"""
mask_rle: run-length as string formated (start length)
shape: (width,height) of array to return
Returns numpy array, 1 - mask, 0 - background
Source: https://www.kaggle.com/paulorzp/rle-functions-run-lenght-encode-decode
"""
s = rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = value
return img.reshape((shape[1], shape[0])).T
# Cell
def make_mask(item, df, flatten=False):
'''
Given an item as:
- row index [int] or
- ImageId [str] or
- file [Path] or
- query [pd.Series],
returns the image_item and mask with two types of shapes:
- (256, 1600) if `flatten`,
- (256, 1600, 4) if not `flatten`,
'''
if isinstance(item, str): cond = df.loc[item]
elif isinstance(item, int): cond = df.iloc[item]
elif isinstance(item, Path): cond = df.loc[item.name]
elif isinstance(item, pd.Series): cond = df.loc[item["ImageId"]]
else:
print(item, type(item))
raise KeyError("invalid item")
fname = cond.name
# without 0 ClassId, only 1,2,3,4 ClassId
labels = cond[1:-2]
h, w = (256, 1600)
masks = np.zeros((h, w, 4), dtype=np.float32) # 4:class 1~4 (ch:0~3)
for itemx, label in enumerate(labels.values):
if label is not np.nan:
masks[:, :, itemx] = rle2mask(rle=label, value=1, shape=(h,w))
if flatten:
classes = np.array([1, 2, 3, 4])
masks = (masks * classes).sum(-1)
return fname, masks
# Cell
def mask2rle(mask):
"""
Efficient implementation of mask2rle, from @paulorzp
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
Source: https://www.kaggle.com/xhlulu/efficient-mask2rle
"""
pixels = mask.T.flatten()
pixels = np.pad(pixels, ((1, 1), ))
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
# Cell
def plot_mask_image(name: str, img: np.array, mask: np.array):
"""Plot a np.array image and mask with contours."""
fig, ax = plt.subplots(figsize=(15, 5))
mask = mask.astype(np.uint8)
for ch in range(4):
contours, _ = cv2.findContours(mask[:, :, ch], cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for i in range(len(contours)):
cv2.polylines(img, contours[i], True, palet[ch], 2)
ax.set_title(name, fontsize=13)
ax.imshow(img)
plt.xticks([])
plt.yticks([])
plt.show()
# Cell
def plot_defected_image(img_path: Path, df: pd.DataFrame, class_id=None):
"""Plot a `img_path` Path image from the training folder with contours."""
img_name = img_path.name
img = cv2.imread(str(img_path))
_, mask = make_mask(img_path, df)
class_ids = np.arange(1, 5)
cond = np.argmax(mask, axis=0).argmax(axis=0) > 0
classid = class_ids[cond]
if class_id is None:
title = f"Original: Image {img_name} with defect type: {list(classid)}"
plot_mask_image(title, img, mask)
else:
title = f"Original: Image {img_name} with defect type {class_id}"
idx = class_id-1
filter_mask = np.zeros((256, 1600, 4), dtype=np.float32)
filter_mask[:, :, idx] = mask[:, :, idx]
plot_mask_image(title, img, filter_mask)
# Cell
def get_random_idx(n: int) -> np.ndarray:
"""
Return a random sequence of size `n`.
"""
rng = np.random.default_rng()
return rng.permutation(n)
# Cell
def show_defects(path, df, class_id=None, n=20, only_defects=True, multi_defects=False):
"""
Plot multiple images.
Attributes:
`path`: [Path]
`df`: [pd.DataFrame] only train_pivot
`class_id`: [str or int] select a type of defect otherwise plot all kinds;
`n`: select the number of images to plot;
`only_defects` [bool, default True]: if False it shows even the no faulty images;
`multi_defects` [bool, default False]: if True it shows imgs with multi defects.
"""
cond_no_defects = df[0] == -1
cond_multi_defects = df["n"] > 1
df = df.loc[cond_no_defects] if not only_defects else df.loc[~cond_no_defects]
df = df.loc[cond_multi_defects] if multi_defects else df.loc[~cond_multi_defects]
if class_id is not None:
cond_classId = df[class_id].notna()
df = df.loc[cond_classId]
imgid_from_df = df.index.tolist()
pfiles_list = L([path / "train_images" / imgid for imgid in imgid_from_df])
perm_paths = pfiles_list[get_random_idx(len(pfiles_list))]
for img_path in perm_paths[:n]:
plot_defected_image(img_path, df)
| 34.589091
| 114
| 0.637931
| 1,362
| 9,512
| 4.299559
| 0.232012
| 0.031421
| 0.011954
| 0.013661
| 0.111339
| 0.053962
| 0.037227
| 0.013661
| 0.013661
| 0
| 0
| 0.023018
| 0.218987
| 9,512
| 275
| 115
| 34.589091
| 0.765244
| 0.201009
| 0
| 0.012195
| 1
| 0
| 0.094193
| 0.009542
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.030488
| 0.006098
| 0.20122
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2b72d00fd0e6778383cb9c2b7f0e084dcbc51b2
| 5,798
|
py
|
Python
|
gui/wndRecipeProcedure.py
|
ralic/gnu_brewtools
|
ba09dc11e23d93e623f497286f3f2c3e9aaa41c2
|
[
"BSD-3-Clause"
] | null | null | null |
gui/wndRecipeProcedure.py
|
ralic/gnu_brewtools
|
ba09dc11e23d93e623f497286f3f2c3e9aaa41c2
|
[
"BSD-3-Clause"
] | null | null | null |
gui/wndRecipeProcedure.py
|
ralic/gnu_brewtools
|
ba09dc11e23d93e623f497286f3f2c3e9aaa41c2
|
[
"BSD-3-Clause"
] | null | null | null |
"""
* Copyright (c) 2008, Flagon Slayer Brewery
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Flagon Slayer Brewery nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Flagon Slayer Brewery ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Flagon Slayer Brewery BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import pygtk
pygtk.require("2.0")
import gtk, gtk.glade
from recipe import*
from obj_manager import*
from util import*
from wndRecipeIngredients import*
class ProcedureWindow(object):
"""
#########################################################################
# Sub-Class: ProcedureWindow #
# Opens the GUI window to create and store a new #
# Procedures into the new Recipe Object. Creates the button #
# handlers and has the functions to support them. #
#-----------------------------------------------------------------------#
"""
def __init__(self):
self.gladefile = 'gui/recipe.glade'
def run(self, TempNewRecipe, Edit=None):
self.wTree = gtk.glade.XML(self.gladefile, 'wndAddProcedures')
#store the new recipe object passed to here
self.procedures = TempNewRecipe
#store editing
self.Editing = Edit
#dictionary of Handlers for events in procedure GUI window
#including the method function to support that handle
# dic = {"Button Handler Name": sef.CorresondingFunctionTitle}
dic = {
"btnAddProcedures_clicked_cb" : self.AddProcedures,
"btnDelete_clicked_cb" : self.DeleteProcedures,
"wndAddProcedures_destory_cb" : self.exit,
"btnFinish_clicked_cb" : self.exit,
"btnClearForm_clicked_cb" : self.clearFields,
}
self.wTree.signal_autoconnect(dic)
#send dictionary of hanles to GUI
#set window GUI for Recipe and open
self.wind = self.wTree.get_widget('wndAddProcedures')
self.setTrees()
self.PopulateTrees()
self.wind.run()
def setTrees(self, callback=None):
"""
creates a tree list to hold all New Recipe Procedures
"""
self.tre_Procedure = self.wTree.get_widget('tre_Procedure')
self.tre_Procedures_List = setupList(self.tre_Procedure, ['Name', 'Time', 'Description'], (str,str,str))
def PopulateTrees(self, callback=None):
"""
Populate the procedures if any stored
"""
for i in self.procedures.Procedures:
self.tre_Procedures_List.append([i.name, "%s %s" % (i.timing_delta, i.time_unit), i.description])
def AddProcedures(self, callback):
"""
Function that supports the Add Procedure button.
Function stores each single procedure created into
a procedure object that is stored in a list of procedeures
stored in the new Recipe object
"""
#create variable to hold text input fro GUI text boxes
ProcedureName = self.wTree.get_widget('Name_text')
Time = self.wTree.get_widget('Time_txt')
Description = self.wTree.get_widget('Description_txt')
Unit = self.wTree.get_widget('TimeUnit').get_active_text()
#call the Add_procedure function in recipe.py
#creates an object for each single procedure
#then stores them in a procedures list
self.procedures.Add_procedure(ProcedureName.get_text(), Time.get_text(), Description.get_text(), Unit)
self.tre_Procedures_List.insert(0,[ProcedureName.get_text(), "%s %s" % (Time.get_text(),Unit), Description.get_text()])
#test purposes to see if inputs are correct and stored
print 'reach look at fields procedure'
print ProcedureName.get_text(), Description.get_text(), Time.get_text(), Unit
self.clearFields(self)
def DeleteProcedures(self, callback):
"""
Function that supports the delete procedure button.
Deletes the selected procedure in the Recipe procedure tree.
"""
#try:
Model, selected = self.tre_Procedure.get_selection().get_selected()
selection = Model[selected]
#retreive object to delete from the ingredient list
name = selection[0]
description = selection[2]
self.procedures.Delete_Procedure(name, description)
self.tre_Procedures_List.remove(selected)
print 'Reach Delete Procedures'
print "%s" % (selected)
#except:
#print ' no selection try again'
def clearFields(self, callback):
"""
Function that supports the Clear All Procedures Form button.
Clears all text fields in the Procedures Form
"""
print 'Reach Clear All Fields for Procedure'
self.wTree.get_widget('Name_text').set_text('')
self.wTree.get_widget('Description_txt').set_text('')
self.wTree.get_widget('Time_txt').set_text('')
self.wTree.get_widget('TimeUnit').set_active(-1)
def exit(self, callback):
"""
Function to handle quit
"""
self.wind.destroy()
print 'exit procedure'
| 35.570552
| 121
| 0.723698
| 772
| 5,798
| 5.349741
| 0.339378
| 0.02615
| 0.029056
| 0.043584
| 0.153995
| 0.10799
| 0.046489
| 0.03293
| 0.03293
| 0.03293
| 0
| 0.002062
| 0.163505
| 5,798
| 162
| 122
| 35.790123
| 0.849485
| 0.106071
| 0
| 0
| 0
| 0
| 0.149282
| 0.029101
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.1
| null | null | 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2b7bb0de76b2e0ba5ce5495b4efc9822958361d
| 1,018
|
py
|
Python
|
oidc_provider/migrations/0028_change_response_types_field_1_of_3.py
|
avallbona/django-oidc-provider
|
93b41e9ada42ca7c4bd6c860de83793ba3701d68
|
[
"MIT"
] | null | null | null |
oidc_provider/migrations/0028_change_response_types_field_1_of_3.py
|
avallbona/django-oidc-provider
|
93b41e9ada42ca7c4bd6c860de83793ba3701d68
|
[
"MIT"
] | null | null | null |
oidc_provider/migrations/0028_change_response_types_field_1_of_3.py
|
avallbona/django-oidc-provider
|
93b41e9ada42ca7c4bd6c860de83793ba3701d68
|
[
"MIT"
] | 1
|
2021-02-17T16:23:41.000Z
|
2021-02-17T16:23:41.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-12-16 02:43
from __future__ import unicode_literals
from django.db import migrations, models
import oidc_provider.fields
class Migration(migrations.Migration):
dependencies = [
('oidc_provider', '0027_swappable_client_model'),
]
operations = [
migrations.RenameField(
model_name='client',
old_name='response_types',
new_name='old_response_types',
),
migrations.AddField(
model_name='client',
name='response_types',
field=oidc_provider.fields.JsonMultiSelectModelField(choices=[('code', 'code (Authorization Code Flow)'), ('id_token', 'id_token (Implicit Flow)'), ('id_token token', 'id_token token (Implicit Flow)'), ('code token', 'code token (Hybrid Flow)'), ('code id_token', 'code id_token (Hybrid Flow)'), ('code id_token token', 'code id_token token (Hybrid Flow)')], default=set, verbose_name='Response Types'),
),
]
| 37.703704
| 415
| 0.652259
| 119
| 1,018
| 5.344538
| 0.453782
| 0.08805
| 0.075472
| 0.059748
| 0.081761
| 0.081761
| 0
| 0
| 0
| 0
| 0
| 0.026185
| 0.212181
| 1,018
| 26
| 416
| 39.153846
| 0.766833
| 0.066798
| 0
| 0.210526
| 1
| 0
| 0.367476
| 0.028511
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.157895
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2b89b6b2b0dc41d3a0e5d1ce5504c256753035d
| 926
|
py
|
Python
|
reservations/migrations/0001_initial.py
|
danielmicaletti/ride_cell
|
910be09ebc714b8c744edaf81559c8a9266473e3
|
[
"MIT"
] | null | null | null |
reservations/migrations/0001_initial.py
|
danielmicaletti/ride_cell
|
910be09ebc714b8c744edaf81559c8a9266473e3
|
[
"MIT"
] | null | null | null |
reservations/migrations/0001_initial.py
|
danielmicaletti/ride_cell
|
910be09ebc714b8c744edaf81559c8a9266473e3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-26 00:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('parking', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SpotReservation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spot_reservation_date', models.DateField()),
('spot_reservation_start_time', models.TimeField()),
('spot_reservation_end_time', models.TimeField()),
('spot_location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reservation_spot', to='parking.SpotLocation')),
],
),
]
| 31.931034
| 158
| 0.62959
| 95
| 926
| 5.915789
| 0.610526
| 0.042705
| 0.049822
| 0.078292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028531
| 0.242981
| 926
| 28
| 159
| 33.071429
| 0.773181
| 0.071274
| 0
| 0
| 1
| 0
| 0.186698
| 0.085181
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2bbbde7ac14cbda28bc8fe761c19a1e71889708
| 2,808
|
py
|
Python
|
pfrock/cli/config_parser.py
|
knightliao/pfrock
|
33587f11caeeccc11d0b8219b4e02df153905486
|
[
"Apache-2.0"
] | 62
|
2016-02-24T10:47:17.000Z
|
2019-04-27T01:36:56.000Z
|
pfrock/cli/config_parser.py
|
knightliao/pfrock
|
33587f11caeeccc11d0b8219b4e02df153905486
|
[
"Apache-2.0"
] | 1
|
2019-04-19T12:13:21.000Z
|
2021-08-10T09:16:09.000Z
|
pfrock/cli/config_parser.py
|
knightliao/pfrock
|
33587f11caeeccc11d0b8219b4e02df153905486
|
[
"Apache-2.0"
] | 24
|
2016-03-01T14:59:29.000Z
|
2019-09-02T08:12:00.000Z
|
# !/usr/bin/env python
# coding=utf8
import json
import traceback
from tornado.web import RequestHandler
from pfrock.cli import logger
from pfrock.core.constants import PFROCK_CONFIG_SERVER, PFROCK_CONFIG_ROUTER, PFROCK_CONFIG_PORT, ROUTER_METHOD, \
ROUTER_PATH, ROUTER_OPTIONS, ROUTER_HANDLER
from pfrock.core.lib import auto_str
@auto_str
class PfrockConfigRouter(object):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS
def __init__(self, path, methods, handler, options={}):
self.path = path
self.handler = handler
self.options = options
self.methods = []
if methods == "any":
self.methods = []
else:
for method in methods:
method = method.upper()
if method in self.SUPPORTED_METHODS:
self.methods.append(method)
@auto_str
class PfrockConfigServer(object):
def __init__(self, routes, port):
self.routes = routes
self.port = port
class PfrockConfigParser(object):
@classmethod
def _parse_router(cls, router):
path = router[ROUTER_PATH] if ROUTER_PATH in router else None
methods = router[ROUTER_METHOD] if ROUTER_METHOD in router else []
handler = router[ROUTER_HANDLER] if ROUTER_HANDLER in router else None
options = router[ROUTER_OPTIONS] if ROUTER_OPTIONS in router else None
if path and handler:
return PfrockConfigRouter(path, methods, handler, options)
return None
@classmethod
def _parse_routers(cls, routers):
router_list = []
for router in routers:
router = cls._parse_router(router)
if router:
router_list.append(router)
return router_list
@classmethod
def _parse_servers(cls, server):
port = server[PFROCK_CONFIG_PORT] if PFROCK_CONFIG_PORT in server else None
routers = cls._parse_routers(server[PFROCK_CONFIG_ROUTER]) if PFROCK_CONFIG_ROUTER in server else None
if port and routers:
return PfrockConfigServer(routers, port)
@classmethod
def do(cls, config_file_path):
with open(config_file_path, 'r') as fin:
try:
config_data = json.load(fin)
except:
logger.error("%s not well formed \n%s" % (config_file_path, traceback.format_exc()))
return None
config_servers = config_data[PFROCK_CONFIG_SERVER] if PFROCK_CONFIG_SERVER in config_data else None
if config_servers:
for config_server in config_servers:
config_server = cls._parse_servers(config_server)
# todo: dev version just support one server
return config_server
return None
| 33.428571
| 114
| 0.649217
| 331
| 2,808
| 5.277946
| 0.244713
| 0.06182
| 0.027476
| 0.027476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000498
| 0.284544
| 2,808
| 83
| 115
| 33.831325
| 0.869089
| 0.026353
| 0
| 0.169231
| 0
| 0
| 0.00989
| 0
| 0
| 0
| 0
| 0.012048
| 0
| 1
| 0.092308
| false
| 0
| 0.092308
| 0
| 0.353846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
f2ca33e35faaa3a6ab066c758e3c492f242feea7
| 633
|
py
|
Python
|
lesson_3_set.py
|
pis2pis2/pis2pis2
|
a8ab83d89bbeaa2b4a6a2be684ae5b7513472a7f
|
[
"MIT"
] | null | null | null |
lesson_3_set.py
|
pis2pis2/pis2pis2
|
a8ab83d89bbeaa2b4a6a2be684ae5b7513472a7f
|
[
"MIT"
] | null | null | null |
lesson_3_set.py
|
pis2pis2/pis2pis2
|
a8ab83d89bbeaa2b4a6a2be684ae5b7513472a7f
|
[
"MIT"
] | 4
|
2019-11-12T06:59:35.000Z
|
2021-01-29T21:34:15.000Z
|
# Тип данных МНОЖЕСТВО (set)------------------------
#------------------------------------------
# Инициализация
temp_set = {1,2,3}
print(type(temp_set), temp_set)
temp_list = [1,2,1,2,2,3,4,12,32]
temp_set = set(temp_list)
print(type(temp_set), temp_set)
# Обращения к элементам множества
print(100 in temp_set)
for element in temp_set:
print(element)
# Функции с множествами
#----------
# Операции с множествами
# Методы
my_set_1 = set([1, 2, 3, 4, 5])
my_set_2 = set([5, 6, 7, 8, 9])
my_set_3 = my_set_1.union(my_set_2)
print(my_set_3)
my_set_4 = my_set_1.difference(my_set_2)
print(my_set_4)
| 20.419355
| 52
| 0.598736
| 107
| 633
| 3.261682
| 0.336449
| 0.143266
| 0.094556
| 0.034384
| 0.272206
| 0.223496
| 0
| 0
| 0
| 0
| 0
| 0.069943
| 0.164297
| 633
| 31
| 53
| 20.419355
| 0.589792
| 0.315956
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
4b37de6730f4bf33d3ca155f712cb0a661d6d553
| 951
|
py
|
Python
|
docker-app/qfieldcloud/core/migrations/0045_auto_20211012_2234.py
|
livelihoods-and-landscapes/qfieldcloud-tcs
|
3075e19d89caa3090a0d2027a376336526572764
|
[
"MIT"
] | 34
|
2021-06-08T12:06:24.000Z
|
2022-03-07T11:45:10.000Z
|
docker-app/qfieldcloud/core/migrations/0045_auto_20211012_2234.py
|
livelihoods-and-landscapes/qfieldcloud-tcs
|
3075e19d89caa3090a0d2027a376336526572764
|
[
"MIT"
] | 139
|
2021-06-08T00:24:51.000Z
|
2022-03-28T09:59:54.000Z
|
docker-app/qfieldcloud/core/migrations/0045_auto_20211012_2234.py
|
livelihoods-and-landscapes/qfieldcloud-tcs
|
3075e19d89caa3090a0d2027a376336526572764
|
[
"MIT"
] | 8
|
2021-06-11T04:18:36.000Z
|
2022-02-15T20:52:58.000Z
|
# Generated by Django 3.2.8 on 2021-10-12 22:34
from django.db import migrations, models
def fill_in_datetime_fields(apps, schema_editor):
# Old values in output field of delta table where string instead of json
Job = apps.get_model("core", "Job")
Job.objects.update(
started_at=models.F("created_at"), finished_at=models.F("updated_at")
)
class Migration(migrations.Migration):
dependencies = [
("core", "0044_alter_user_username"),
]
operations = [
migrations.AddField(
model_name="job",
name="finished_at",
field=models.DateTimeField(blank=True, null=True, editable=False),
),
migrations.AddField(
model_name="job",
name="started_at",
field=models.DateTimeField(blank=True, null=True, editable=False),
),
migrations.RunPython(fill_in_datetime_fields, migrations.RunPython.noop),
]
| 28.818182
| 81
| 0.64143
| 114
| 951
| 5.184211
| 0.578947
| 0.020305
| 0.047377
| 0.067682
| 0.321489
| 0.321489
| 0.22335
| 0.22335
| 0.22335
| 0.22335
| 0
| 0.026462
| 0.245005
| 951
| 32
| 82
| 29.71875
| 0.796657
| 0.121977
| 0
| 0.347826
| 1
| 0
| 0.098558
| 0.028846
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b3d9db6b58f7211471a5f7c96ec4eb5f14b1e04
| 958
|
py
|
Python
|
backend/app/exceptions/exceptions.py
|
Michal-Miko/competitive-teams
|
6bb55542e06121f413248ddf0b75285296b610bb
|
[
"MIT"
] | null | null | null |
backend/app/exceptions/exceptions.py
|
Michal-Miko/competitive-teams
|
6bb55542e06121f413248ddf0b75285296b610bb
|
[
"MIT"
] | null | null | null |
backend/app/exceptions/exceptions.py
|
Michal-Miko/competitive-teams
|
6bb55542e06121f413248ddf0b75285296b610bb
|
[
"MIT"
] | null | null | null |
from app.database import crud
from fastapi import HTTPException, status
def check_for_team_existence(db, team_id):
if crud.get_team(db, team_id=team_id) is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team {} not found".format(team_id))
def check_for_player_existence(db, player_id):
if crud.get_player(db, player_id=player_id) is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Player {} not found".format(player_id))
def check_for_match_existence(db, match_id):
if crud.get_match(db, match_id=match_id) is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Match {} not found".format(match_id))
def check_for_tournament_existence(db, tournament_id):
if crud.get_tournament(db, tournament_id=tournament_id) is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Match {} not found".format(tournament_id))
| 41.652174
| 117
| 0.775574
| 150
| 958
| 4.633333
| 0.2
| 0.092086
| 0.063309
| 0.063309
| 0.417266
| 0.417266
| 0.417266
| 0.417266
| 0.417266
| 0.417266
| 0
| 0.014337
| 0.126305
| 958
| 22
| 118
| 43.545455
| 0.81601
| 0
| 0
| 0
| 0
| 0
| 0.075157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b453f03ad86e35ca9832f88f32cb3f426b3e7ef
| 1,191
|
py
|
Python
|
api/migrations/0002_element_pokemon.py
|
danielchikara/pokemon_in_hom
|
5da9baa3f87e012ae0d4278668409e1668bf87a6
|
[
"MIT"
] | null | null | null |
api/migrations/0002_element_pokemon.py
|
danielchikara/pokemon_in_hom
|
5da9baa3f87e012ae0d4278668409e1668bf87a6
|
[
"MIT"
] | null | null | null |
api/migrations/0002_element_pokemon.py
|
danielchikara/pokemon_in_hom
|
5da9baa3f87e012ae0d4278668409e1668bf87a6
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.11 on 2022-01-08 03:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Element',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Pokemon',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.CharField(max_length=700)),
('id_pokedex', models.IntegerField()),
('description', models.TextField()),
('id_element', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='element_pokemons', to='api.element')),
],
),
]
| 35.029412
| 146
| 0.572628
| 116
| 1,191
| 5.75
| 0.482759
| 0.035982
| 0.08096
| 0.107946
| 0.338831
| 0.338831
| 0.338831
| 0.338831
| 0.338831
| 0.338831
| 0
| 0.034078
| 0.285474
| 1,191
| 33
| 147
| 36.090909
| 0.749706
| 0.038623
| 0
| 0.518519
| 1
| 0
| 0.104112
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b45b1a0f99a331cd3db5bcbd3c80d4d359c59e4
| 5,468
|
py
|
Python
|
src/CLI/actioner/sonic-cli-ip-prefix.py
|
project-arlo/sonic-mgmt-framework
|
562cd84ff3fec9ca705c7df621742f2daa61ce71
|
[
"Apache-2.0"
] | 7
|
2019-10-17T06:12:02.000Z
|
2021-09-08T11:16:19.000Z
|
src/CLI/actioner/sonic-cli-ip-prefix.py
|
noolex/sonic-mgmt-framework
|
5493889adc47fc584b04dca1a0cc0a2007211df4
|
[
"Apache-2.0"
] | 207
|
2019-06-24T04:48:11.000Z
|
2020-05-06T05:51:37.000Z
|
src/CLI/actioner/sonic-cli-ip-prefix.py
|
noolex/sonic-mgmt-framework
|
5493889adc47fc584b04dca1a0cc0a2007211df4
|
[
"Apache-2.0"
] | 20
|
2019-06-27T19:24:45.000Z
|
2021-07-15T21:12:30.000Z
|
#!/usr/bin/python
###########################################################################
#
# Copyright 2019 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import sys
import time
import json
import ast
import cli_client as cc
from rpipe_utils import pipestr
from scripts.render_cli import show_cli_output
import urllib3
urllib3.disable_warnings()
def generate_ipprefix_uri(args, delete):
_action = "PERMIT"
_mode = set_name = ge = le = _maskrange_length = _ip_prefix = ''
ge_val = le_val = prefix_exits = le_exits = ge_exits = is_error = i = 0
for arg in args:
if "permit" == arg:
_action = "PERMIT"
elif "deny" == arg:
_action = "DENY"
elif "prefix-list" == arg:
set_name = args[i+1]
if len(args) > 4:
_ip_prefix = args[i+3]
prefix_exits = 1
elif "ge" == arg:
ge_exits = 1
ge_val = int(args[i+1])
ge = args[i+1]
elif "le" == arg:
le_exits = 1
le_val = int(args[i+1])
le = args[i+1]
elif "ip" == arg:
_mode = "IPV4"
max = "32"
elif "ipv6" == arg:
_mode = "IPV6"
max = "128"
else:
temp = 1
i = i + 1
if prefix_exits:
_prefix, _mask = _ip_prefix.split("/")
mask_val = int(_mask)
if (ge_exits == 0 and le_exits == 0):
_maskrange_length = "exact"
elif (ge_exits == 1 and le_exits == 0):
if (ge_val <= mask_val):
is_error = 1
_maskrange_length = ge + ".." + max
elif (ge_exits == 0 and le_exits == 1):
if (mask_val > le_val):
is_error = 1
_maskrange_length = _mask+".."+le
else:
if ((ge_val <= mask_val) or (mask_val > le_val) or (ge_val > le_val)):
is_error = 1
_maskrange_length = ge+".."+le
if is_error:
print ("%Error: Invalid prefix range, make sure: len < ge <= le")
exit(1)
if delete:
keypath = cc.Path('/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets/prefix-set={prefix_list_name}/prefixes/prefix={prefix}%2F{mask},{masklength_range}', prefix_list_name=set_name, prefix=_prefix, mask=_mask, masklength_range=_maskrange_length)
body = None
else:
keypath = cc.Path('/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets')
body = {"openconfig-routing-policy:prefix-sets":{"prefix-set":[{"name": set_name,"config":{"name": set_name,
"mode": _mode},"prefixes":{"prefix":[{"ip-prefix": _ip_prefix,"masklength-range": _maskrange_length,"config": {
"ip-prefix": _ip_prefix,"masklength-range": _maskrange_length,"openconfig-routing-policy-ext:action": _action}}]}}]}}
else:
keypath = cc.Path('/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets/prefix-set={prefix_list_name}',
prefix_list_name=set_name)
body = None
return keypath, body
def invoke(func, args):
body = None
aa = cc.ApiClient()
if func == 'ip_prefix_create':
keypath, body = generate_ipprefix_uri(args, 0)
return aa.patch(keypath, body)
elif func == 'ip_prefix_delete':
keypath, body = generate_ipprefix_uri(args, 1)
return aa.delete(keypath)
elif func == 'ip_prefix_show_all':
keypath = cc.Path('/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets')
return aa.get(keypath)
elif func == 'ip_prefix_show_specific':
keypath = cc.Path('/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets/prefix-set={name}',name=args[1])
return aa.get(keypath)
elif func == 'ipv6_prefix_show_all':
keypath = cc.Path('/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets')
return aa.get(keypath)
elif func == 'ipv6_prefix_show_specific':
keypath = cc.Path('/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets/prefix-set={name}',name=args[1])
return aa.get(keypath)
else:
return aa.cli_not_implemented(func)
def run(func, args):
try:
response = invoke(func,args)
if response.ok():
if response.content is not None:
# Get Command Output
api_response = response.content
if api_response is None:
print("Failed")
return
#print api_response
show_cli_output(args[0], api_response)
else:
print response.error_message()
return
except Exception as e:
print "%Error: " + str(e)
return
if __name__ == '__main__':
pipestr().write(sys.argv)
run(sys.argv[1], sys.argv[2:])
| 34.389937
| 285
| 0.599854
| 702
| 5,468
| 4.481481
| 0.246439
| 0.066116
| 0.065798
| 0.046726
| 0.400191
| 0.363954
| 0.324857
| 0.315321
| 0.26637
| 0.261284
| 0
| 0.012494
| 0.253475
| 5,468
| 158
| 286
| 34.607595
| 0.758207
| 0.109912
| 0
| 0.226087
| 0
| 0.06087
| 0.244146
| 0.176671
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.069565
| null | null | 0.034783
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b48a1ce648ccd7eddf1077ee9304a100d815be4
| 581
|
py
|
Python
|
day6_10/day6b.py
|
invincible-akshay/advent_of_code2020
|
81f207c6f7218ff235c31d67e1b4659cc482297c
|
[
"MIT"
] | null | null | null |
day6_10/day6b.py
|
invincible-akshay/advent_of_code2020
|
81f207c6f7218ff235c31d67e1b4659cc482297c
|
[
"MIT"
] | null | null | null |
day6_10/day6b.py
|
invincible-akshay/advent_of_code2020
|
81f207c6f7218ff235c31d67e1b4659cc482297c
|
[
"MIT"
] | null | null | null |
import utils.fileutils as futils
inp = futils.read_list("../data/day6.txt")
nums_dict = dict()
group_size, res_count = 0, 0
for line in inp:
if line == "":
# res_count += len(nums_set)
for k, v in nums_dict.items():
if v == group_size:
res_count += 1
nums_dict = dict()
group_size = 0
continue
group_size += 1
for ch in line:
nums_dict[ch] = 1 + nums_dict.get(ch, 0)
for k, v in nums_dict.items():
if v == group_size:
res_count += 1
print("Sum of counts: {0}".format(res_count))
| 25.26087
| 48
| 0.567986
| 91
| 581
| 3.428571
| 0.395604
| 0.153846
| 0.115385
| 0.163462
| 0.397436
| 0.262821
| 0.262821
| 0.262821
| 0.262821
| 0.262821
| 0
| 0.024752
| 0.304647
| 581
| 22
| 49
| 26.409091
| 0.747525
| 0.04475
| 0
| 0.421053
| 0
| 0
| 0.061483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b4dceb98d231438a803f497f8f31de32f299051
| 241
|
py
|
Python
|
sps_demo/accounts/api/serializers.py
|
JuanDM93/sps_django
|
df47c7ee63a1e99468644a6f428a6cdabc7ac6ae
|
[
"MIT"
] | null | null | null |
sps_demo/accounts/api/serializers.py
|
JuanDM93/sps_django
|
df47c7ee63a1e99468644a6f428a6cdabc7ac6ae
|
[
"MIT"
] | 1
|
2021-07-27T06:46:05.000Z
|
2021-07-27T06:46:05.000Z
|
sps_demo/accounts/api/serializers.py
|
JuanDM93/sps_django
|
df47c7ee63a1e99468644a6f428a6cdabc7ac6ae
|
[
"MIT"
] | null | null | null |
from rest_framework.serializers import ModelSerializer
from accounts.models import Account
class AccountSerializer(ModelSerializer):
class Meta:
model = Account
fields = [
'account_id', 'limit',
]
| 21.909091
| 54
| 0.6639
| 22
| 241
| 7.181818
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26971
| 241
| 10
| 55
| 24.1
| 0.897727
| 0
| 0
| 0
| 0
| 0
| 0.062241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b4e8e6fb685efd1c0bdeed695e0d638ddd30af1
| 1,376
|
py
|
Python
|
backend/framework/qlf/dashboard/migrations/0012_auto_20180921_1717.py
|
desihub/qlf
|
a9c455f7aee41d7901c89ae90dd821c617340a86
|
[
"BSD-3-Clause"
] | 8
|
2017-09-08T00:24:20.000Z
|
2019-02-03T07:31:03.000Z
|
backend/framework/qlf/dashboard/migrations/0012_auto_20180921_1717.py
|
desihub/qlf
|
a9c455f7aee41d7901c89ae90dd821c617340a86
|
[
"BSD-3-Clause"
] | 77
|
2017-06-15T21:39:09.000Z
|
2019-07-13T19:41:27.000Z
|
backend/framework/qlf/dashboard/migrations/0012_auto_20180921_1717.py
|
desihub/qlf
|
a9c455f7aee41d7901c89ae90dd821c617340a86
|
[
"BSD-3-Clause"
] | 5
|
2017-09-10T02:25:03.000Z
|
2019-02-06T20:55:59.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-21 17:17
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0011_auto_20180727_1800'),
]
operations = [
migrations.CreateModel(
name='Fibermap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ra_obs', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)),
('dec_obs', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)),
('fiber', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)),
('objtype', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=15), size=None)),
('exposure', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fibermap_exposure', to='dashboard.Exposure')),
],
),
migrations.RemoveField(
model_name='qa',
name='job',
),
migrations.DeleteModel(
name='QA',
),
]
| 38.222222
| 152
| 0.635174
| 146
| 1,376
| 5.835616
| 0.493151
| 0.076291
| 0.123239
| 0.158451
| 0.314554
| 0.314554
| 0.314554
| 0.314554
| 0.253521
| 0.253521
| 0
| 0.032075
| 0.229651
| 1,376
| 35
| 153
| 39.314286
| 0.771698
| 0.047965
| 0
| 0.107143
| 1
| 0
| 0.091048
| 0.017598
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b53328e075db009dbb8d21c3c121da0a2ce955a
| 476
|
py
|
Python
|
qcodes/instrument_drivers/rohde_schwarz/HMC8042.py
|
LGruenhaupt/Qcodes
|
ffb74dae53c13c4885e61b5a2df3f833d524de04
|
[
"MIT"
] | 1
|
2019-12-07T01:25:49.000Z
|
2019-12-07T01:25:49.000Z
|
qcodes/instrument_drivers/rohde_schwarz/HMC8042.py
|
Dominik-Vogel/Qcodes
|
b4cf7d58bc1bf3be97af6bf48f57cb6b87d588bb
|
[
"MIT"
] | 12
|
2020-10-13T16:53:37.000Z
|
2020-10-14T17:16:22.000Z
|
qcodes/instrument_drivers/rohde_schwarz/HMC8042.py
|
Dominik-Vogel/Qcodes
|
b4cf7d58bc1bf3be97af6bf48f57cb6b87d588bb
|
[
"MIT"
] | 1
|
2020-05-03T22:47:40.000Z
|
2020-05-03T22:47:40.000Z
|
from .private.HMC804x import _RohdeSchwarzHMC804x
from qcodes.utils.deprecate import deprecate_moved_to_qcd
@deprecate_moved_to_qcd(alternative="qcodes_contrib_drivers.drivers.RohdeSchwarz.HMC8042.RohdeSchwarzHMC8042")
class RohdeSchwarzHMC8042(_RohdeSchwarzHMC804x):
"""
This is the qcodes driver for the Rohde & Schwarz HMC8042 Power Supply
"""
def __init__(self, name, address, **kwargs):
super().__init__(name, address, num_channels=2, **kwargs)
| 39.666667
| 110
| 0.781513
| 55
| 476
| 6.418182
| 0.672727
| 0.07932
| 0.090652
| 0.107649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062651
| 0.128151
| 476
| 11
| 111
| 43.272727
| 0.787952
| 0.147059
| 0
| 0
| 0
| 0
| 0.182051
| 0.182051
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
4b5a4dc0a470a6b6a0219e69281685d307bd50e5
| 465
|
py
|
Python
|
ltc/analyzer/migrations/0006_graphitevariable_function.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | 4
|
2016-12-30T13:26:59.000Z
|
2017-04-26T12:07:36.000Z
|
ltc/analyzer/migrations/0006_graphitevariable_function.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | null | null | null |
ltc/analyzer/migrations/0006_graphitevariable_function.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.20 on 2021-05-27 14:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analyzer', '0005_auto_20210526_1755'),
]
operations = [
migrations.AddField(
model_name='graphitevariable',
name='function',
field=models.CharField(choices=[('A', 'avg'), ('MA', 'max'), ('MI', 'min')], default='A', max_length=12),
),
]
| 24.473684
| 117
| 0.584946
| 51
| 465
| 5.235294
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097983
| 0.253763
| 465
| 18
| 118
| 25.833333
| 0.67147
| 0.098925
| 0
| 0
| 1
| 0
| 0.167866
| 0.055156
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b5c74257ca507c7289c021413a4bdff6ed7d6a6
| 2,865
|
py
|
Python
|
Python/061.py
|
jaimeliew1/Project_Euler_Solutions
|
963c9c6d6571cade8f87341f97a6a2cd1af202bb
|
[
"MIT"
] | null | null | null |
Python/061.py
|
jaimeliew1/Project_Euler_Solutions
|
963c9c6d6571cade8f87341f97a6a2cd1af202bb
|
[
"MIT"
] | 1
|
2018-04-16T21:01:50.000Z
|
2018-04-16T21:01:50.000Z
|
Python/061.py
|
jaimeliew1/Project_Euler_Solutions
|
963c9c6d6571cade8f87341f97a6a2cd1af202bb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Solution to Project Euler problem X
Author: Jaime Liew
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
import math
def isTri(n):
return ((math.sqrt(1+8*n)-1)/2).is_integer()
def isSqr(n):
return (math.sqrt(n)).is_integer()
def isPent(n):
return ((1+math.sqrt(1+24*n))/6).is_integer()
def isHex(n):
return ((1+math.sqrt(1+8*n))/4).is_integer()
def isHept(n):
return ((3+math.sqrt(9+40*n))/10).is_integer()
def isOct(n):
return ((2+math.sqrt(4+12*n))/6).is_integer()
isPoly = [isTri, isSqr, isPent, isHex,isHept,isOct]
class Jnum:
id = 0 #each nth bit is 1 if it is an nGon number
n = 0
isMultiPoly = False
def __init__(self, num):
self.n = num
for i in (f(num) for f in isPoly):
self.id = (self.id << 1) | i
if bin(self.id).count('1') > 1:
self.isMultiPoly = True
def __eq__(self,other):
return self.n == other.n
def __ne__(self,other):
return self.n != other.n
def checkThisSet(thisSet,depth,maxDepth, numSet):
for q in (q for q in numSet if q not in thisSet):
workingBit = 0
qIsCandidate = True
if str(thisSet[-1].n)[2:] == str(q.n)[:2]: #if cyclical
workingBit = 0
for i in (thisSet + [q]):
if workingBit & (i.id) == 0:
workingBit |= (i.id)
else:
qIsCandidate = False
break
else:
qIsCandidate = False
if qIsCandidate:
if depth == maxDepth-1:
if str(thisSet[0].n)[:2] == str(q.n)[2:]: #if cyclical back to start
return list(thisSet + [q])
else:
return [Jnum(0)]
furtherTesting = checkThisSet(list(thisSet + [q]),depth +1, maxDepth, numSet)
if furtherTesting != [Jnum(0)]:
return furtherTesting
return [Jnum(0)]
def run():
### generate set of possible candidates
numSet = []
for i in range(1000, 10000):
a = Jnum(i)
if a.id != 0:
if a.isMultiPoly:
temp = a
for k, bit in enumerate(bin(a.id)[2:].zfill(6)[::-1]):
if bit == '1':
temp.id = 1<<k
numSet.append(Jnum(a.n))
numSet[-1].id = 1<<k
else:
numSet.append(a)
#print("there are ",len(numSet)," candidate numbers.\n")
### Recursive search loop
for i in numSet:
currentSet = checkThisSet(list([i]), 1, 6, numSet)
if currentSet != [Jnum(0)]:
break
Sum = 0
for i in currentSet:
#print(i.n, bin(i.id)[2:].zfill(6))
Sum += i.n
return Sum
if __name__ == "__main__":
print(run())
| 25.131579
| 89
| 0.506806
| 386
| 2,865
| 3.689119
| 0.295337
| 0.03441
| 0.042135
| 0.021067
| 0.099017
| 0.089888
| 0.066011
| 0.066011
| 0
| 0
| 0
| 0.036938
| 0.347993
| 2,865
| 113
| 90
| 25.353982
| 0.725375
| 0.124258
| 0
| 0.157895
| 0
| 0
| 0.004018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144737
| false
| 0
| 0.013158
| 0.105263
| 0.381579
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
4b6108a9e3aec6079aa6d738f5836676b16bd14f
| 4,195
|
py
|
Python
|
examples/zoomed.py
|
ColmTalbot/psd-covariance-matrices
|
59631dd1860e9cf28658df1ce28b10f6f8d59868
|
[
"MIT"
] | null | null | null |
examples/zoomed.py
|
ColmTalbot/psd-covariance-matrices
|
59631dd1860e9cf28658df1ce28b10f6f8d59868
|
[
"MIT"
] | null | null | null |
examples/zoomed.py
|
ColmTalbot/psd-covariance-matrices
|
59631dd1860e9cf28658df1ce28b10f6f8d59868
|
[
"MIT"
] | 2
|
2021-07-01T02:00:10.000Z
|
2021-08-02T07:29:03.000Z
|
#!/usr/bin/env python
"""
Compute the comparison of the analytic and experimental PSD matrices.
This will generate Figure 1.
This is probably the only example that will run in a reasonable time without
a GPU.
For more details on the method see https://arxiv.org/abs/2106.13785.
"""
import numpy as np
import matplotlib.pyplot as plt
from bilby.core.utils import create_white_noise, create_frequency_series
from scipy.signal.windows import tukey
from scipy.interpolate import interp1d
from tqdm.auto import trange
from coarse_psd_matrix.utils import (
compute_psd_matrix,
create_parser,
fetch_psd_data,
)
from coarse_psd_matrix.plotting import plot_psd_matrix
from matplotlib import rcParams
rcParams["font.family"] = "serif"
rcParams["font.serif"] = "Computer Modern Roman"
rcParams["font.size"] = 20
rcParams["text.usetex"] = True
rcParams["grid.alpha"] = 0
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
interferometer = args.interferometer
outdir = args.outdir
duration = args.duration
medium_duration = args.medium_duration
sampling_frequency = args.sampling_frequency
low_frequency = args.low_frequency
tukey_alpha = args.tukey_alpha
minimum_frequency = 480
maximum_frequency = 530
event = args.event
data = fetch_psd_data(
interferometer_name=interferometer,
event=event,
duration=duration,
sampling_frequency=sampling_frequency,
low_frequency=low_frequency,
tukey_alpha=tukey_alpha,
medium_duration=medium_duration,
outdir=outdir,
)
svd = compute_psd_matrix(
interferometer_name=interferometer,
event=event,
duration=duration,
sampling_frequency=sampling_frequency,
low_frequency=low_frequency,
tukey_alpha=tukey_alpha,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
medium_duration=medium_duration,
outdir=outdir,
)
psd = data["medium_psd"][: sampling_frequency // 2 * medium_duration + 1]
original_frequencies = create_frequency_series(
duration=medium_duration, sampling_frequency=sampling_frequency
)
new_frequencies = create_frequency_series(
duration=256, sampling_frequency=sampling_frequency
)
psd = interp1d(original_frequencies, psd)(new_frequencies)
short_window = tukey(duration * sampling_frequency, tukey_alpha)
short_window /= np.mean(short_window ** 2) ** 0.5
analytic_psd_matrix = (svd[0] * svd[1]) @ svd[2]
estimated_psd_matrix = np.zeros_like(analytic_psd_matrix)
nfft = duration * sampling_frequency
start_idx = minimum_frequency * duration
stop_idx = maximum_frequency * duration
n_average = 1024 * 1024 // 64
for _ in trange(n_average):
white_noise, frequencies = create_white_noise(
sampling_frequency=2048, duration=256
)
coloured_noise = white_noise * psd ** 0.5
td_noise = np.fft.irfft(coloured_noise).reshape((-1, nfft))
fd_noise = np.fft.rfft(td_noise * short_window)
reduced_noise = fd_noise[:, start_idx : stop_idx + 1]
estimated_psd_matrix += np.einsum(
"ki,kj->ij", reduced_noise, reduced_noise.conjugate()
) / 2
total_averages = n_average * len(reduced_noise)
estimated_psd_matrix /= total_averages
rcParams["font.family"] = "serif"
rcParams["font.serif"] = "Computer Modern Roman"
rcParams["font.size"] = 20
rcParams["text.usetex"] = True
rcParams["grid.alpha"] = 0
fig, axes = plt.subplots(nrows=2, figsize=(10, 16))
kwargs = dict(
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
duration=duration,
vmin=-53,
vmax=-41.8,
tick_step=10,
)
plot_psd_matrix(estimated_psd_matrix, axes[0], **kwargs)
plot_psd_matrix(analytic_psd_matrix, axes[1], **kwargs)
axes[0].text(-25, 190, "(a)")
axes[1].text(-25, 190, "(b)")
plt.tight_layout()
plt.savefig(f"{outdir}/zoom_{tukey_alpha}.pdf")
if tukey_alpha == 0.1:
plt.savefig("figure_1.pdf")
plt.close()
| 32.269231
| 77
| 0.695828
| 522
| 4,195
| 5.323755
| 0.323755
| 0.04534
| 0.053976
| 0.048938
| 0.300828
| 0.256927
| 0.22814
| 0.22814
| 0.18208
| 0.18208
| 0
| 0.025872
| 0.207628
| 4,195
| 129
| 78
| 32.51938
| 0.810168
| 0.065316
| 0
| 0.295238
| 1
| 0
| 0.058778
| 0.007922
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.085714
| 0
| 0.085714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b624ab13f54c8cfd7032b48000491920f6d9a27
| 5,581
|
py
|
Python
|
web_spider/web_spider/pipelines.py
|
syun0216/simple_ci
|
83d31cb04357fe0bd428ab8f09c2db81a06eb723
|
[
"MIT"
] | null | null | null |
web_spider/web_spider/pipelines.py
|
syun0216/simple_ci
|
83d31cb04357fe0bd428ab8f09c2db81a06eb723
|
[
"MIT"
] | null | null | null |
web_spider/web_spider/pipelines.py
|
syun0216/simple_ci
|
83d31cb04357fe0bd428ab8f09c2db81a06eb723
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
class WebSpiderPipeline(object):
def init_insert_db(self,key,table_name):
pass
def process_item(self, item, spider):
# print(item['name'])
connection = pymysql.connect(host='127.0.0.1',
user='root',
password='123456',
db='mydb',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
# Create a new record
if item['type'] == 'toutiao':
insert_sql = """INSERT INTO `dongqiudi` (`id`, `name`,`url`,`time`,`comment`,`image`)
VALUES (%s, %s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
id=VALUES(id),
name=VALUES(name),
url=VALUES (url),
time=VALUES (time),
comment=VALUES (comment),
image=VALUES (image)"""
cursor.execute(insert_sql, (item['id'], item['name'], item['url'], item['time'], item['comment'], item['image']))
elif item['type'] == 'rank':
insert_sql = """INSERT INTO `rank` (`rank`,`team_avatar`,`team_name`,`round`,`win`,`draw`,`lost`,`goal`,`fumble`,`GD`,`integral`,`rel`,`rel_name`)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
rank=VALUES (rank),
team_avatar=VALUES (team_avatar),
team_name=VALUES (team_name),
round=VALUES (round),
win=VALUES (win),
draw=VALUES (draw),
lost=VALUES (lost),
goal=VALUES (goal),
fumble=VALUES (fumble),
GD=VALUES (GD),
integral=VALUES (integral),
rel=VALUES (rel),
rel_name=VALUES (rel_name)
"""
cursor.execute(insert_sql,
(item['rank'], item['team_avatar'], item['team_name'], item['round'], item['win'], item['draw'],item['lost'],item['goal'],item['fumble'],item['GD'],item['integral'],item['rel'],item['rel_name']))
elif item['type'] == 'goal':
insert_sql = """INSERT INTO `player_goal_rank` (`rank`,`data`,`player_avatar`,`player_name`,`team_avatar`,`team_name`,`rel`,`rel_name`)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
rank=VALUES (rank),
data=VALUES (data),
player_avatar=VALUES (player_avatar),
player_name=VALUES (player_name),
team_avatar=VALUES (team_avatar),
team_name=VALUES (team_name),
rel=VALUES (rel),
rel_name=VALUES (rel_name)
"""
cursor.execute(insert_sql,
(item['rank'], item['data'], item['player_avatar'], item['player_name'],item['team_avatar'], item['team_name'], item['rel'], item['rel_name']))
elif item['type'] == 'assist':
insert_sql = """INSERT INTO `player_assist_rank` (`rank`,`data`,`player_avatar`,`player_name`,`team_avatar`,`team_name`,`rel`,`rel_name`)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
rank=VALUES (rank),
data=VALUES (data),
player_avatar=VALUES (player_avatar),
player_name=VALUES (player_name),
team_avatar=VALUES (team_avatar),
team_name=VALUES (team_name),
rel=VALUES (rel),
rel_name=VALUES (rel_name)
"""
cursor.execute(insert_sql,
(item['rank'], item['data'], item['player_avatar'], item['player_name'],item['team_avatar'], item['team_name'], item['rel'], item['rel_name']))
# connection is not autocommit by default. So you must commit to save
# your changes.
connection.commit()
finally:
connection.close()
pass
| 52.650943
| 226
| 0.392403
| 477
| 5,581
| 4.446541
| 0.234801
| 0.029231
| 0.03819
| 0.043376
| 0.517209
| 0.481377
| 0.481377
| 0.466289
| 0.446488
| 0.446488
| 0
| 0.005228
| 0.485934
| 5,581
| 105
| 227
| 53.152381
| 0.734054
| 0.054291
| 0
| 0.474359
| 0
| 0.051282
| 0.688686
| 0.078778
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0.038462
| 0.012821
| 0
| 0.051282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b6b2b2466b7f50264d915b0b9ab9925c879719e
| 587
|
py
|
Python
|
kora/install/blender.py
|
wannaphong/kora
|
8a9034097d07b14094e077769c02a0b4857d179b
|
[
"MIT"
] | 91
|
2020-05-26T05:54:51.000Z
|
2022-03-09T07:33:44.000Z
|
kora/install/blender.py
|
wannaphong/kora
|
8a9034097d07b14094e077769c02a0b4857d179b
|
[
"MIT"
] | 12
|
2020-10-03T10:09:11.000Z
|
2021-03-06T23:12:21.000Z
|
kora/install/blender.py
|
wannaphong/kora
|
8a9034097d07b14094e077769c02a0b4857d179b
|
[
"MIT"
] | 16
|
2020-07-07T18:39:29.000Z
|
2021-03-06T03:46:49.000Z
|
import os
from IPython import get_ipython
# need this fix first
os.environ["LD_PRELOAD"] = ""
os.system("apt remove libtcmalloc-minimal4")
os.system("apt install libtcmalloc-minimal4")
os.environ["LD_PRELOAD"] = "/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4.3.0"
os.system("dpkg -L libtcmalloc-minimal4")
# then install blender
url = "https://download.blender.org/release/Blender2.83/blender-2.83.0-linux64.tar.xz"
os.system(f"curl {url} | tar xJ")
os.system("ln -s /content/blender-2.83.0-linux64/blender /usr/local/bin/blender")
# show result
get_ipython().system("blender -v")
| 36.6875
| 86
| 0.749574
| 96
| 587
| 4.520833
| 0.572917
| 0.092166
| 0.050691
| 0.082949
| 0.082949
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046382
| 0.081772
| 587
| 16
| 87
| 36.6875
| 0.758813
| 0.088586
| 0
| 0
| 0
| 0.181818
| 0.639098
| 0.216165
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b6cb29835a30f52c4bb14e8a53ca4a8d4a5cdb7
| 2,352
|
py
|
Python
|
parks/models.py
|
ExpertOfNone/expert_of_none
|
9ff4e4279a570712766546122c014c754f753485
|
[
"MIT"
] | null | null | null |
parks/models.py
|
ExpertOfNone/expert_of_none
|
9ff4e4279a570712766546122c014c754f753485
|
[
"MIT"
] | null | null | null |
parks/models.py
|
ExpertOfNone/expert_of_none
|
9ff4e4279a570712766546122c014c754f753485
|
[
"MIT"
] | null | null | null |
from django.db import models
from django_countries.fields import CountryField
from localflavor.us.models import USStateField
from base.models import EONBaseModel
class Amenity(EONBaseModel):
"""Common amenities, i.e. Pool, Lake, Hiking Trail, Primitive Camping, etc."""
name = models.CharField(max_length=50)
code = models.CharField(max_length=5)
description = models.TextField(null=True, blank=True)
def __str__(self):
return self.name
class Park(EONBaseModel):
"""General Park Information Utilized for Reference"""
PARK_TYPE_STATE = 'state'
PARK_TYPE_NATIONAL = 'national'
PARK_TYPE_CITY = 'city'
PARK_TYPE_OTHER = 'other'
PARK_TYPE_CHOICES = (
(PARK_TYPE_STATE, 'State'),
(PARK_TYPE_NATIONAL, 'National'),
(PARK_TYPE_CITY, 'City'),
(PARK_TYPE_OTHER, 'Other')
)
park_type = models.CharField(max_length=20, choices=PARK_TYPE_CHOICES)
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
photos = models.ManyToManyField('base.Photo', through='ParkPhoto')
address_one = models.CharField(max_length=50)
address_two = models.CharField(max_length=50, null=True, blank=True)
city = models.CharField(max_length=50)
state = USStateField(blank=True, null=True)
country = CountryField()
postal_code = models.CharField(blank=True, null=True, max_length=20)
amenities = models.ManyToManyField(Amenity, through='ParkAmenity')
topic = models.ForeignKey('base.Topic', on_delete=models.SET_NULL, null=True)
def __str__(self):
return '{name} - {park_type}'.format(
name=self.name,
park_type=self.get_park_type_display(),
)
class ParkAmenity(EONBaseModel):
park = models.ForeignKey(Park, on_delete=models.CASCADE)
amenity = models.ForeignKey(Amenity, on_delete=models.CASCADE)
additional_info = models.TextField(blank=True)
class ParkPhoto(EONBaseModel):
photo = models.ForeignKey('base.Photo', on_delete=models.CASCADE, related_name='park_photos')
park = models.ForeignKey(Park, on_delete=models.DO_NOTHING, related_name='park_photos')
def __str__(self):
return '{park_name} - Photo:{photo_name}'.format(
park_name=self.park.name,
photo_name=self.photo.name,
)
| 31.36
| 97
| 0.702381
| 289
| 2,352
| 5.487889
| 0.280277
| 0.070618
| 0.079445
| 0.105927
| 0.31652
| 0.208071
| 0.208071
| 0.105927
| 0.105927
| 0.105927
| 0
| 0.008325
| 0.182823
| 2,352
| 74
| 98
| 31.783784
| 0.816857
| 0.05102
| 0
| 0.1
| 0
| 0
| 0.075642
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.08
| 0.06
| 0.78
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
4b6d6ef8de7836397b32a70984d9c9488bd0f64f
| 3,891
|
py
|
Python
|
roomfinder_dispo/roomfinder_dispo/dispo.py
|
GuillaumeMorini/roomfinder
|
d756bba6e50a7361ecf9cf529af4a1775a0e836b
|
[
"Apache-2.0"
] | 14
|
2017-01-23T02:58:53.000Z
|
2020-12-21T14:05:07.000Z
|
roomfinder_dispo/roomfinder_dispo/dispo.py
|
GuillaumeMorini/roomfinder
|
d756bba6e50a7361ecf9cf529af4a1775a0e836b
|
[
"Apache-2.0"
] | 2
|
2017-01-23T09:46:54.000Z
|
2017-09-11T10:15:07.000Z
|
roomfinder_dispo/roomfinder_dispo/dispo.py
|
GuillaumeMorini/roomfinder
|
d756bba6e50a7361ecf9cf529af4a1775a0e836b
|
[
"Apache-2.0"
] | 9
|
2017-01-23T02:55:27.000Z
|
2020-05-20T18:38:18.000Z
|
#!/usr/bin/env python2.7
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from flask import Flask, render_template, request, jsonify
import argparse
import datetime
import os, sys
import requests
from socket import error as SocketError
import errno
import json
import pika
import uuid
app = Flask(__name__)
@app.route("/book", methods=["GET"])
def book():
starttime=request.args.get('starttime', '')
endtime=request.args.get('endtime', '')
user_name=request.args.get('user_name', '')
user_email=request.args.get('user_email', '')
room_name=request.args.get('room_name', '')
if starttime is None or endtime is None or user_name is None or user_email is None or room_name is None:
return "no parameter provided to book request\n"
data = {
"cmd": "book",
"data": {"starttime": starttime, "endtime": endtime, "user_name": user_name, "user_email": user_email, "room_name": room_name}
}
message = json.dumps(data)
return send_message_to_queue(message)
@app.route("/dispo", methods=["GET"])
def dispo():
key=request.args.get('key', '')
sys.stderr.write( "key: "+str(key)+'\r\n')
if key is not None and str(key) is not "":
data = {
"cmd": "dispo",
"data": {"key": key}
}
message = json.dumps(data)
return send_message_to_queue(message)
return "no parameter provided to dispo request\n"
def on_response(ch, method, props, body):
global corr_id
global response
if corr_id == props.correlation_id:
response = body
def send_message_to_queue(message):
global corr_id
global response
global connection
global channel
global callback_queue
response=None
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq,port=int(rabbitmq_port),heartbeat_interval=30))
channel = connection.channel()
result=channel.queue_declare(exclusive=True)
callback_queue = result.method.queue
channel.basic_consume(on_response, no_ack=True,
queue=callback_queue)
corr_id=str(uuid.uuid4())
response = None
corr_id = str(uuid.uuid4())
channel.basic_publish( exchange='',
routing_key="rpc_queue",
properties=pika.BasicProperties(
reply_to = callback_queue,
correlation_id = corr_id),
body=message)
print(" [x] Sent data to RabbitMQ")
while response is None:
connection.process_data_events()
print(" [x] Get response from RabbitMQ")
print "response: "+str(response)
return response
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser("Room Finder Dispo Service")
parser.add_argument("-r","--rabbitmq", help="IP or hostname for rabbitmq server, e.g. 'rabbit.domain.com'.")
parser.add_argument("-p","--port", help="tcp port for rabitmq server, e.g. '2765'.")
args = parser.parse_args()
rabbitmq = args.rabbitmq
if (rabbitmq == None):
rabbitmq = os.getenv("roomfinder_rabbitmq_server")
if (rabbitmq == None):
get_rabbitmq_server = raw_input("What is the rabbitmq server IP or hostname? ")
rabbitmq = get_rabbitmq_server
rabbitmq_port = args.port
if (rabbitmq_port == None):
rabbitmq_port = os.getenv("roomfinder_rabbitmq_port")
if (rabbitmq_port == None):
get_rabbitmq_port = raw_input("What is the rabbitmq TCP port? ")
rabbitmq_port = get_rabbitmq_port
try:
app.run(host='0.0.0.0', port=int("5000"))
except:
try:
app.run(host='0.0.0.0', port=int("5000"))
except:
print "Dispo web services error"
| 31.893443
| 138
| 0.627602
| 482
| 3,891
| 4.892116
| 0.298755
| 0.050891
| 0.035623
| 0.022901
| 0.182782
| 0.093299
| 0.072095
| 0.072095
| 0.072095
| 0.072095
| 0
| 0.009339
| 0.257003
| 3,891
| 121
| 139
| 32.157025
| 0.806295
| 0.005911
| 0
| 0.242424
| 0
| 0
| 0.162658
| 0.01293
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.121212
| null | null | 0.040404
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b7d6c918015930582e1fb1d514d24f1d777be05
| 1,411
|
py
|
Python
|
molecool_test/tests/test_molecule.py
|
radifar/molecool_test
|
9e0027656d6f68d2efd9cdf8f24872b4bcea6cb9
|
[
"BSD-3-Clause"
] | null | null | null |
molecool_test/tests/test_molecule.py
|
radifar/molecool_test
|
9e0027656d6f68d2efd9cdf8f24872b4bcea6cb9
|
[
"BSD-3-Clause"
] | null | null | null |
molecool_test/tests/test_molecule.py
|
radifar/molecool_test
|
9e0027656d6f68d2efd9cdf8f24872b4bcea6cb9
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
import molecool_test
@pytest.fixture
def methane_molecule():
symbols = np.array(['C', 'H', 'H', 'H', 'H'])
coordinates = np.array([
[1, 1, 1],
[2.4, 1, 1],
[-0.4, 1, 1],
[1, 1, 2.4],
[1, 1, -0.4],
])
return symbols, coordinates
def test_move_methane(methane_molecule):
symbols, coordinates = methane_molecule
coordinates[0] += 5
def test_build_bond_list(methane_molecule):
symbols, coordinates = methane_molecule
bonds = molecool_test.build_bond_list(coordinates)
assert len(bonds) == 4
for bond_length in bonds.values():
assert bond_length == 1.4
def test_build_bond_failure(methane_molecule):
symbols, coordinates = methane_molecule
with pytest.raises(ValueError):
bonds = molecool_test.build_bond_list(coordinates, min_bond=-1)
def test_molecular_mass(methane_molecule):
symbols, coordinates = methane_molecule
calculated_mass = molecool_test.calculate_molecular_mass(symbols)
actual_mass = 16.04
assert pytest.approx(actual_mass, abs=1e-2) == calculated_mass
def test_center_of_mass(methane_molecule):
symbols, coordinates = methane_molecule
center_of_mass = molecool_test.calculate_center_of_mass(symbols, coordinates)
expected_center = np.array([1,1,1])
assert np.array_equal(center_of_mass, expected_center)
| 21.707692
| 81
| 0.697378
| 188
| 1,411
| 4.957447
| 0.276596
| 0.177039
| 0.141631
| 0.177039
| 0.391631
| 0.373391
| 0.218884
| 0.019313
| 0.019313
| 0
| 0
| 0.030009
| 0.197023
| 1,411
| 64
| 82
| 22.046875
| 0.792586
| 0
| 0
| 0.135135
| 0
| 0
| 0.003544
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 1
| 0.162162
| false
| 0
| 0.081081
| 0
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b7fc93c2e30ca54b02519e2a781a191d7e736a1
| 6,705
|
py
|
Python
|
pochta/tracking.py
|
john-phonk/fs-pochta-api
|
c3b7df4ecdbfc45fb482cedd8ab6c2927e0a1c9d
|
[
"MIT"
] | 16
|
2019-05-13T01:12:10.000Z
|
2022-01-17T06:21:35.000Z
|
pochta/tracking.py
|
john-phonk/fs-pochta-api
|
c3b7df4ecdbfc45fb482cedd8ab6c2927e0a1c9d
|
[
"MIT"
] | 4
|
2020-03-06T06:46:35.000Z
|
2020-11-22T04:24:34.000Z
|
pochta/tracking.py
|
john-phonk/fs-pochta-api
|
c3b7df4ecdbfc45fb482cedd8ab6c2927e0a1c9d
|
[
"MIT"
] | 6
|
2019-08-10T13:18:21.000Z
|
2021-11-25T08:57:30.000Z
|
from abc import ABC
from typing import List
from zeep import CachingClient, Client, Settings
from .exceptions import APIError
class _BaseClient(ABC):
"""API клиент сервиса отслеживания посылок.
https://tracking.pochta.ru/specification
"""
WSDL = ''
def __init__(self, login: str, password: str, caching=True):
"""Инициализация API клиента сервиса отслеживания посылок.
:param login: Логин от системы трекинга
:param password: Пароль от системы трекинга
:param caching: Флаг, позволяющий отключить кэширование в zeep
"""
self._login = login
self._password = password
zeep_client = CachingClient if caching else Client
self._client = zeep_client(
self.WSDL,
settings=Settings(strict=False),
)
class SingleTracker(_BaseClient):
"""Клиент для взаимодеействия с API единичной обработки запросов."""
WSDL = 'https://tracking.russianpost.ru/rtm34?wsdl'
def get_history(self, barcode: str) -> dict:
"""
История операций над отправлением.
Метод getOperationHistory используется для получения информации о
конкретном отправлении. Метод возвращает подробную информацию
по всем операциям, совершенным над отправлением.
https://tracking.pochta.ru/specification#getOperationHistory
:param barcode: Идентификатор регистрируемого почтового отправления в одном из форматов:
- внутрироссийский, состоящий из 14 символов (цифровой)
- международный, состоящий из 13 символов (буквенно-цифровой) в формате S10.
:return: Ответ метода getOperationHistory содержит список элементов
historyRecord. Каждый из них содержит информацию об одной операции над
отправлением. Если над отправлением еще не зарегистрировано ни одной
операции, то возвращается пустой список элементов historyRecord.
"""
return self._client.service.getOperationHistory(
OperationHistoryRequest={
'Barcode': barcode,
'MessageType': '0'
},
AuthorizationHeader={
'login': self._login,
'password': self._password,
},
)
def get_order_events_for_mail(self, barcode: str) -> dict:
"""
История операций с наложенным платежом.
Метод PostalOrderEventsForMail позволяет получить информацию об операциях с
наложенным платежом, который связан с конкретным почтовым отправлением.
https://tracking.pochta.ru/specification#PostalOrderEventsForMail
:param barcode: Идентификатор регистрируемого почтового отправления в одном из форматов:
- внутрироссийский, состоящий из 14 символов (цифровой);
- международный, состоящий из 13 символов (буквенно-цифровой) в формате S10.
:return: Список событий
"""
return self._client.service.PostalOrderEventsForMail(
PostalOrderEventsForMailInput={
'Barcode': barcode,
},
AuthorizationHeader={
'login': self._login,
'password': self._password,
},
)
class BatchTracker(_BaseClient):
"""Клиент для взаимодеействия с API пакетной обработки запросов."""
WSDL = 'https://tracking.russianpost.ru/fc?wsdl'
def get_ticket(self, barcodes: List[str]) -> str:
"""Получения билета на подготовку информации по списку идентификаторов отправлений.
Метод getTicket используется для получения билета
на подготовку информации по списку идентификаторов отправлений.
В запросе передается список идентификаторов отправлений.
При успешном вызове метод возвращает идентификатор билета.
Ограничения и рекомендации по использованию:
- Количество идентификаторов отправлений в одном запросе не должно превышать *3000*.
- Рекомендуется выполнять первое обращение за ответом по билету не ранее,
чем через 15 минут от момента выдачи билета.
- В случае неготовности результата повторные обращения по тому же билету следует
выполнять не чаще, чем 1 раз в 15 минут
- Время хранения ответа по билету в Сервисе отслеживания составляет 32 часа.
По истечении этого периода ответ удаляется.
https://tracking.pochta.ru/specification раздел "Пакетная обработка" п.3
:param barcodes: Идентификаторы регистрируемых почтовогых отправлений в одном из форматов:
- внутрироссийский, состоящий из 14 символов (цифровой)
- международный, состоящий из 13 символов (буквенно-цифровой) в формате S10.
:return: Ответ метода getTicket содержит информацию о выданном билете в объекте
ticketResponse в случае успешного запроса, функция возвращает номер созданного ticket,
полученного из ticketResponse.value
"""
# По умолчанию zeep генерирует Request старой версии,
# где запрос отправляется в виде файла с метаданными
# Поэтому, вручную создаём объект Request и убираем аттрибуты, относящиеся к файлу
request = self._client.get_type('{http://fclient.russianpost.org}file')
request.attributes.clear()
items = [{'Barcode': barcode} for barcode in barcodes]
response = self._client.service.getTicket(
request=request(Item=items),
login=self._login,
password=self._password,
language='RUS',
)
if response['error'] is not None:
raise APIError(f'Response body contains error: {response["error"]}')
return response['value']
def get_response_by_ticket(self, ticket: str) -> List[dict]:
"""Метод используется для получения информации об отправлениях по ранее полученному билету.
Вызывает метод answerByTicketRequest используемый для получения информации
об отправлениях по ранее полученному билету.
https://tracking.pochta.ru/specification раздел "Пакетная обработка" п.4
:param ticket: Строка, содержащая номер ticket, полученного ранее при вызове getTicket
:return: Результаты пакетной обработки в виде списка словарей,
содержащих результаты выполнения запроса на пакетную обработку
"""
response = self._client.service.getResponseByTicket(
ticket=ticket,
login=self._login,
password=self._password,
)
if response['error'] is not None:
raise APIError(f'Response body contains error: {response["error"]}')
return response['value']['Item']
| 40.149701
| 99
| 0.670097
| 676
| 6,705
| 6.594675
| 0.409763
| 0.020413
| 0.02131
| 0.023553
| 0.365635
| 0.358008
| 0.290265
| 0.245402
| 0.245402
| 0.16061
| 0
| 0.006885
| 0.263535
| 6,705
| 166
| 100
| 40.391566
| 0.895909
| 0.560477
| 0
| 0.262295
| 0
| 0
| 0.122159
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0.098361
| 0.065574
| 0
| 0.311475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
4b91ba97fda9b2ee93796afb30a9ecc697c21159
| 1,205
|
py
|
Python
|
script.module.placenta/lib/resources/lib/modules/thexem.py
|
parser4life/tantrumrepo
|
3b37145f4772409e538cbddb0b7aa23be525772a
|
[
"Beerware"
] | 1
|
2021-05-09T19:55:51.000Z
|
2021-05-09T19:55:51.000Z
|
script.module.placenta/lib/resources/lib/modules/thexem.py
|
parser4life/tantrumrepo
|
3b37145f4772409e538cbddb0b7aa23be525772a
|
[
"Beerware"
] | null | null | null |
script.module.placenta/lib/resources/lib/modules/thexem.py
|
parser4life/tantrumrepo
|
3b37145f4772409e538cbddb0b7aa23be525772a
|
[
"Beerware"
] | 2
|
2020-04-01T22:11:12.000Z
|
2020-05-07T23:54:52.000Z
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: MuadDib
import json
from resources.lib.modules import client
URL_PATTERN = 'http://thexem.de/map/single?id=%s&origin=tvdb&season=%s&episode=%s&destination=scene'
def get_scene_episode_number(tvdbid, season, episode):
try:
url = URL_PATTERN % (tvdbid, season, episode)
r = client.request(url)
r = json.loads(r)
if r['result'] == 'success':
data = r['data']['scene']
return data['season'], data['episode']
except:
pass
return season, episode
| 36.515152
| 100
| 0.480498
| 129
| 1,205
| 4.449612
| 0.643411
| 0.067944
| 0.066202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003052
| 0.184232
| 1,205
| 32
| 101
| 37.65625
| 0.580875
| 0.418257
| 0
| 0
| 0
| 0.071429
| 0.217153
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0.071429
| 0.142857
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
4b9490ebcc233667c0f331f949a3dfce27be8b1f
| 8,723
|
py
|
Python
|
hirebob/portal/forms.py
|
shantanub0/hirebob
|
5a55e97c6e220059964fbb55439b0189abae1307
|
[
"MIT"
] | null | null | null |
hirebob/portal/forms.py
|
shantanub0/hirebob
|
5a55e97c6e220059964fbb55439b0189abae1307
|
[
"MIT"
] | 1
|
2018-06-23T01:20:26.000Z
|
2018-06-25T21:49:17.000Z
|
hirebob/portal/forms.py
|
shantanub0/hirebob
|
5a55e97c6e220059964fbb55439b0189abae1307
|
[
"MIT"
] | 1
|
2018-06-14T12:11:59.000Z
|
2018-06-14T12:11:59.000Z
|
from django import forms
from .models import UserAccount, JobPost, JobPostActivity, UserProfile
class FormUserCreation(forms.ModelForm):
UserTypes = ((1, 'Applicants'), (2, 'Organisations'))
user_type = forms.ChoiceField(choices=UserTypes,
widget=forms.Select(attrs={'class': "form-control"}))
user_full_name = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Full Name'}))
email = forms.EmailField(max_length=250,
help_text="Required. Invalid format",
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Email ID'}))
password = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control",
'type': 'password',
'placeholder': 'Enter Password',
'minlength': '6',
'onkeyup': 'check();'}))
confirm_password = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control",
'type': 'password',
'placeholder': 'Re-enter Password',
'minlength': '6',
'onkeyup': 'check();'}))
class Meta:
model = UserAccount
fields = ('user_type', 'user_full_name', 'email', 'password')
class FormLogin(forms.ModelForm):
email = forms.EmailField(max_length=250,
help_text="Required. Invalid format",
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Email ID'}))
password = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control",
'type': 'password',
'placeholder': 'Enter Password',
'minlength': '6'}))
class Meta:
model = UserAccount
fields = ('email', 'password')
class FormJobPost(forms.ModelForm):
Locations = (('Mumbai', 'Mumbai'), ('Navi Mumbai', 'Navi Mumbai'), ('Pune', 'Pune'))
job_types = (('Software Engineer', 'Software Engineer'), ('Database Admin', 'Database Admin'), ('DevOps', 'DevOps'))
jobs_skill = (('Java', 'Java'), ('Python', 'Python'), ('C', 'C'), ('C++', 'C++'))
job_location = forms.ChoiceField(choices=Locations,
widget=forms.Select(attrs={'class': "form-control"}))
job_type = forms.ChoiceField(choices=job_types,
widget=forms.Select(attrs={'class': "form-control"}))
job_skills = forms.ChoiceField(choices=jobs_skill,
widget=forms.Select(attrs={'class': "form-control"}))
job_title = forms.CharField(max_length=100,
required=True,
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter job title'}))
posted_by_email = forms.EmailField(max_length=250,
help_text="Required. Invalid format",
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Email ID',
'readonly': True}))
job_description = forms.CharField(widget=forms.Textarea(attrs={'class': "form-control",
'placeholder': 'Enter Job Description'}))
class Meta:
model = JobPost
fields = ('job_type', 'job_skills', 'job_location', 'posted_by_email', 'job_description', 'job_title')
class FormApply(forms.ModelForm):
email = forms.EmailField(required=True,
max_length=250,
help_text="Required. Invalid format",
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Email ID',
'readonly': True}))
to_email = forms.EmailField(required=True,
max_length=250,
help_text="Required. Invalid format",
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Email ID',
'readonly': True}))
cover_letter = forms.CharField(required=True,
widget=forms.Textarea(attrs={'class': "form-control",
'placeholder': 'Cover Letter'}))
post_id = forms.IntegerField(required=True,
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Post ID',
'readonly': True}))
job_title = forms.CharField(required=True,
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Job Title'}))
class Meta:
model = JobPostActivity
fields = ('email', 'post_id')
class FormUploadImage(forms.Form):
user_image = forms.ImageField(widget=forms.FileInput())
class Meta:
model = UserAccount
fields = ('user_image', )
class FormUploadResume(forms.Form):
resume = forms.FileField()
class Meta:
model = UserAccount
fields = ('resume', )
class FormApplicantsInfo(forms.Form):
Gender = (('Male', 'Male'), ('Female', 'Female'), ('None', 'None'))
gender = forms.ChoiceField(choices=Gender,
widget=forms.Select(attrs={'class': "form-control"}))
email = forms.EmailField(max_length=250,
help_text="Required. Invalid format",
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Email ID',
'readonly': True}))
gmail = forms.EmailField(max_length=250,
help_text="Required. Invalid format",
widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter gmail id'}))
linkedin = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Linkedin profile'}))
skype_id = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter skype id'}))
about_me = forms.CharField(widget=forms.Textarea(attrs={'class': "form-control", 'placeholder': 'Enter About you'}))
address = forms.CharField(widget=forms.Textarea(attrs={'class': "form-control", 'placeholder': 'Enter your address'}))
birthday = forms.DateField(widget=forms.TextInput(attrs={'class': "form-control", 'placeholder': 'Enter DOB in DD-MM-YYYY'}))
job_title = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Job Title'}))
location = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control",
'placeholder': 'Enter Your location'}))
class Meta:
model = UserProfile
fields = ('email', 'gmail', 'linkedin', 'skype_id', 'about_me', 'address', 'birthday', 'job_title',
'location', 'gender')
| 56.642857
| 129
| 0.458558
| 669
| 8,723
| 5.898356
| 0.171898
| 0.080841
| 0.099341
| 0.149012
| 0.667258
| 0.631272
| 0.591485
| 0.569437
| 0.525342
| 0.496452
| 0
| 0.006339
| 0.4213
| 8,723
| 153
| 130
| 57.013072
| 0.775357
| 0
| 0
| 0.5
| 0
| 0
| 0.206351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.087302
| 0.015873
| 0
| 0.404762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
4b996a561c6739777af3fa1902cca7e146f0eeaf
| 687
|
py
|
Python
|
TianJiPlanBackend/authentication/migrations/0002_auto_20210912_0929.py
|
weridolin/tianji-plan
|
b98a49d92ee2a365095f9e15f4231f5178aca1c0
|
[
"Apache-2.0"
] | null | null | null |
TianJiPlanBackend/authentication/migrations/0002_auto_20210912_0929.py
|
weridolin/tianji-plan
|
b98a49d92ee2a365095f9e15f4231f5178aca1c0
|
[
"Apache-2.0"
] | null | null | null |
TianJiPlanBackend/authentication/migrations/0002_auto_20210912_0929.py
|
weridolin/tianji-plan
|
b98a49d92ee2a365095f9e15f4231f5178aca1c0
|
[
"Apache-2.0"
] | 1
|
2021-12-07T11:45:13.000Z
|
2021-12-07T11:45:13.000Z
|
# Generated by Django 3.2.7 on 2021-09-12 01:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='mail',
field=models.EmailField(blank=True, db_index=True, max_length=127, null=True, unique=True, verbose_name='电话'),
),
migrations.AlterField(
model_name='userprofile',
name='telephone',
field=models.CharField(blank=True, db_index=True, max_length=127, null=True, unique=True, verbose_name='电话'),
),
]
| 28.625
| 122
| 0.615721
| 77
| 687
| 5.376623
| 0.571429
| 0.043478
| 0.096618
| 0.115942
| 0.304348
| 0.304348
| 0.304348
| 0.304348
| 0.304348
| 0.304348
| 0
| 0.049116
| 0.259098
| 687
| 23
| 123
| 29.869565
| 0.764244
| 0.065502
| 0
| 0.235294
| 1
| 0
| 0.101563
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4b9af91c0efeb81facf6d27474553a4bb9a6505d
| 2,025
|
py
|
Python
|
tests/unit_tests/tasks/fortran/test_fortran_compiler.py
|
bblay/fab
|
bbdac7bae20c5b8695a2d56945c9593b4fda9c74
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit_tests/tasks/fortran/test_fortran_compiler.py
|
bblay/fab
|
bbdac7bae20c5b8695a2d56945c9593b4fda9c74
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit_tests/tasks/fortran/test_fortran_compiler.py
|
bblay/fab
|
bbdac7bae20c5b8695a2d56945c9593b4fda9c74
|
[
"BSD-3-Clause"
] | null | null | null |
from pathlib import Path
from unittest import mock
import pytest
from fab.build_config import AddFlags
from fab.dep_tree import AnalysedFile
from fab.steps.compile_fortran import CompileFortran
# todo: we might have liked to reuse this from test_dep_tree
from fab.util import CompiledFile
@pytest.fixture
def src_tree():
return {
Path('src/foo.f90'): AnalysedFile(fpath=Path('src/foo.f90'), file_hash=None),
Path('src/root.f90'): AnalysedFile(
fpath=Path('src/root.f90'), file_deps={Path('src/a.f90'), Path('src/b.f90')}, file_hash=None),
Path('src/a.f90'): AnalysedFile(fpath=Path('src/a.f90'), file_deps={Path('src/c.f90')}, file_hash=None),
Path('src/b.f90'): AnalysedFile(fpath=Path('src/b.f90'), file_deps={Path('src/c.f90')}, file_hash=None),
Path('src/c.f90'): AnalysedFile(fpath=Path('src/c.f90'), file_deps=set(), file_hash=None),
}
class Test_run(object):
# todo: almost identical to the c compiler test
def test_vanilla(self, src_tree):
# ensure the compile passes match the build tree
config = mock.Mock(workspace=Path('foo/src'), multiprocessing=False)
c_compiler = CompileFortran(
compiler='gcc', common_flags=['-c'], path_flags=[AddFlags(match='foo/src/*', flags=['-Dhello'])])
def foo(items, func):
return [CompiledFile(af, output_fpath=None) for af in items]
with mock.patch('fab.steps.Step.run_mp', side_effect=foo) as mock_run_mp:
c_compiler.run(artefact_store={'build_tree': src_tree}, config=config)
# 1st pass
mock_run_mp.assert_any_call(
items={src_tree[Path('src/foo.f90')], src_tree[Path('src/c.f90')]}, func=mock.ANY)
# 2nd pass
mock_run_mp.assert_any_call(
items={src_tree[Path('src/a.f90')], src_tree[Path('src/b.f90')]}, func=mock.ANY)
# last pass
mock_run_mp.assert_called_with(items={src_tree[Path('src/root.f90')]}, func=mock.ANY)
| 38.942308
| 112
| 0.651852
| 299
| 2,025
| 4.257525
| 0.29097
| 0.104478
| 0.078555
| 0.094266
| 0.35978
| 0.169678
| 0.135114
| 0.135114
| 0.135114
| 0.135114
| 0
| 0.02457
| 0.196049
| 2,025
| 51
| 113
| 39.705882
| 0.757371
| 0.088395
| 0
| 0.064516
| 0
| 0
| 0.133225
| 0.011419
| 0
| 0
| 0
| 0.019608
| 0.096774
| 1
| 0.096774
| false
| 0
| 0.225806
| 0.064516
| 0.419355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4ba4c531fc5b73ca047fb0191f3bbb5ca13cf62d
| 209
|
py
|
Python
|
udacity/cloud-native-application-architecture/3-message-passing/lesson-3-implementing-message-passing/kafka-python-demo/producer.py
|
thomasrobertz/mooc
|
cb87365bfcbe8ccf972f36d70a251c73b3c15a7b
|
[
"MIT"
] | null | null | null |
udacity/cloud-native-application-architecture/3-message-passing/lesson-3-implementing-message-passing/kafka-python-demo/producer.py
|
thomasrobertz/mooc
|
cb87365bfcbe8ccf972f36d70a251c73b3c15a7b
|
[
"MIT"
] | 13
|
2021-12-14T20:59:34.000Z
|
2022-03-02T11:09:34.000Z
|
udacity/cloud-native-application-architecture/3-message-passing/lesson-3-implementing-message-passing/kafka-python-demo/producer.py
|
thomasrobertz/mooc
|
cb87365bfcbe8ccf972f36d70a251c73b3c15a7b
|
[
"MIT"
] | 1
|
2020-08-20T12:53:43.000Z
|
2020-08-20T12:53:43.000Z
|
from kafka import KafkaProducer
TOPIC_NAME = 'items'
KAFKA_SERVER = 'localhost:9092'
producer = KafkaProducer(bootstrap_servers=KAFKA_SERVER)
producer.send(TOPIC_NAME, b'Test Message!!!')
producer.flush()
| 19
| 56
| 0.789474
| 26
| 209
| 6.153846
| 0.692308
| 0.1125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.100478
| 209
| 10
| 57
| 20.9
| 0.829787
| 0
| 0
| 0
| 0
| 0
| 0.162679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4ba61c47eb12a3d8f57c257b4b752059384399df
| 6,948
|
py
|
Python
|
plot_curve.py
|
wenhuchen/Hierarchical-DSA
|
2dbdacde25ee82c9d42fe980694673d285b1f7f3
|
[
"MIT"
] | 45
|
2019-02-27T02:04:08.000Z
|
2022-03-21T04:49:22.000Z
|
plot_curve.py
|
wenhuchen/Hierarchical-DSA
|
2dbdacde25ee82c9d42fe980694673d285b1f7f3
|
[
"MIT"
] | 2
|
2019-08-18T03:05:11.000Z
|
2020-07-26T13:45:05.000Z
|
plot_curve.py
|
wenhuchen/Hierarchical-DSA
|
2dbdacde25ee82c9d42fe980694673d285b1f7f3
|
[
"MIT"
] | 4
|
2019-03-12T17:40:12.000Z
|
2021-06-10T07:59:39.000Z
|
import matplotlib.pyplot as plt
import numpy as np
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import json
from scipy.interpolate import interp1d
from data_utils import *
def integral(y, x):
area = 0
for xi, xj, yi, yj in zip(x[:-1], x[1:], y[:-1], y[1:]):
area += (yi + yj) / 2 * abs(xj - xi)
return area
def preprocess(x):
#x = [math.log10(_) for _ in x]
x = [_/float(max(x)) for _ in x]
return x
def func(y, x, y_int):
for y1, y2, x1, x2 in zip(y[:-1], y[1:], x[:-1], x[1:]):
if y_int == y1:
return x1
elif y_int == y2:
return x2
elif y_int > y1 and y_int < y2:
x_int = (y_int - y1) / (y2 - y1) * (x2 - x1) + x1
return x_int
def draw_curve():
x = [1, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 40000]
y_mean = [13.3, 29.6, 33.9, 43.8, 50.81, 67.7, 75.6, 81.5, 91.4, 95.6]
plt.plot(x, y_mean, 'black')
#plt.fill(x + x[::-1], y_mean + [95.6] * len(y_min), '#f4df42', alpha=.5, ec='None')
plt.fill(x + x[::-1], [0] * len(y_mean) + y_mean[::-1], '#0099ff', alpha=.5, ec='None')
plt.xlabel("Vocab")
plt.xscale('log')
plt.xlim(left=0)
plt.ylim(ymin=0)
plt.ylabel("Accuracy")
#plt.show()
plt.savefig("metrics.eps", dpi=1000, format='eps')
#draw_curve()
def draw_uncerntainy_curve():
x = [0, 100, 200, 500, 1000, 2000, 5000, 10000]
y_max = [13.3, 51.2, 67.5, 80.4, 85.1, 87.5, 90.5, 91.4]
y_mean = [13.3, 29.6, 33.9, 43.9, 50.81, 67.7, 81.5, 91.4]
y_min = [13.3, 25.6, 27, 35.1, 42.4, 56, 74.1, 91.4]
plt.plot(x, y_mean, 'black', label="Mean Accuracy Curve")
plt.plot(x, y_min, 'black', label="Lower Accuracy Curve")
plt.plot(x, y_max, 'black', label="Upper Accuracy Curve")
#plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
#plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(x + x[::-1], y_min + y_max[::-1], '#0099ff', alpha=.5, ec='None', label='Accuracy Range')
plt.legend(loc='lower right', prop={'size':14})
plt.xlim(left=0)
plt.xlabel("Vocab")
plt.ylabel("Accuracy")
plt.savefig("accuracy_curve.eps", dpi=1000, format='eps')
#plt.show()
#plt.fill(np.concatenate([x, x[::-1]]),
# np.concatenate([y_pred - 1.9600 * sigma,
# (y_pred + 1.9600 * sigma)[::-1]]),
# alpha=.5, fc='b', ec='None', label='95% confidence interval')
#draw_uncerntainy_curve()
def draw_SLU_uncerntainy_curve():
x = [0, 7, 27, 77, 100, 1778, 5134, 10000]
x = [str(_) for _ in x]
y_max = [13.3, 48.8, 81.3, 92.0, 94.0, 95.3, 95.8, 96.1]
y_mean = [13.3, 33.4, 54.3, 77.4, 88.9, 93.5, 94.2, 96.1]
y_min = [13.3, 14.2, 33.2, 46.8, 72.8, 88.4, 92.3, 96.1]
plt.plot(x, y_mean, color='black', label="Mean Accuracy Curve")
plt.plot(x, y_min, color='black', label="Lower Accuracy Curve")
plt.plot(x, y_max, color='black', label="Upper Accuracy Curve")
#plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
#plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(x + x[::-1], y_min + y_max[::-1], color='#0099ff', alpha=.5, ec='None', label='Accuracy Range')
plt.xlim(left=0)
plt.ylim(bottom=0)
plt.legend(loc='lower right', prop={'size':14})
plt.xlabel("Vocab")
plt.ylabel("Accuracy")
plt.savefig("accuracy_curve.eps", dpi=1000, format='eps')
#plt.show()
#plt.fill(np.concatenate([x, x[::-1]]),
# np.concatenate([y_pred - 1.9600 * sigma,
# (y_pred + 1.9600 * sigma)[::-1]]),
# alpha=.5, fc='b', ec='None', label='95% confidence interval')
#draw_SLU_uncerntainy_curve()
def read(string, use_str=False):
string = string.strip()
result = eval(string)
if use_str:
result = [str(_) for _ in result]
else:
result = [float(_) for _ in result]
return result
def draw_curve():
def savefig(f1, f2, f3, name):
x, y = enhanced(read(f1[1]), read(f1[0]))
plt.plot(x, y, 'y*', label="Frequency")
x, y = enhanced(read(f2[1]), read(f2[0]))
plt.plot(x, y, 'b--', label="TF-IDF")
x, y = enhanced(read(f3[1]), read(f3[0]))
plt.plot(x, y, 'r', label="Variational")
#plt.title("{} dataset".format(name))
plt.xlabel('Vocab')
plt.ylabel('Accuracy')
plt.legend(loc='best')
plt.xscale('log')
#plt.xlim(left=0.001)
#plt.show()
plt.savefig("{}.eps".format(name), format="eps", dpi=1000)
plt.clf()
plt.rcParams.update({'font.size': 14})
file = 'results/ag_news'
f1 = open(file + ".txt").readlines()
f2 = open(file + "_tf_idf.txt").readlines()
f3 = open(file + "_var.txt").readlines()
savefig(f1, f2, f3, 'images/ag_news')
file = 'results/dbpedia'
f1 = open(file + ".txt").readlines()
f2 = open(file + "_tf_idf.txt").readlines()
f3 = open(file + "_var.txt").readlines()
savefig(f1, f2, f3, 'images/dbpedia')
file = 'results/yelp_review'
f1 = open(file + ".txt").readlines()
f2 = open(file + "_tf_idf.txt").readlines()
f3 = open(file + "_var.txt").readlines()
savefig(f1, f2, f3, 'images/yelp_review')
#draw_curve()
def compute_score():
from data_utils import *
f = 'results/ag_news.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 61673)
print CR(y, x)
f = 'results/ag_news_tf_idf.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 61673)
print CR(y, x)
f = 'results/ag_news_var.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 61673)
print CR(y, x)
print()
f = 'results/dbpedia.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 563355)
print CR(y, x)
f = 'results/dbpedia_tf_idf.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 563355)
print CR(y, x)
f = 'results/dbpedia_var.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 563355)
print CR(y, x)
print()
f = 'results/yelp_review.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 252712)
print CR(y, x)
f = 'results/yelp_review_tf_idf.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 252712)
print CR(y, x)
f = 'results/yelp_review_var.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 252712)
print CR(y, x)
print()
f = 'results/sogou_news.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 254495)
print CR(y, x)
f = 'results/sogou_news_tf_idf.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 254495)
print CR(y, x)
f = 'results/snli.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 42391)
print CR(y, x)
f = 'results/snli_var.txt'
y = read(open(f).readlines()[0])
x = read(open(f).readlines()[1])
print ROC(y, x, 42391)
print CR(y, x)
compute_score()
| 29.692308
| 105
| 0.598877
| 1,221
| 6,948
| 3.324324
| 0.167076
| 0.013797
| 0.05765
| 0.115299
| 0.670116
| 0.624045
| 0.603351
| 0.556787
| 0.556787
| 0.51392
| 0
| 0.090749
| 0.181635
| 6,948
| 233
| 106
| 29.819742
| 0.623109
| 0.128382
| 0
| 0.494318
| 0
| 0
| 0.145747
| 0.03797
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.056818
| null | null | 0.164773
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4baef5968ecd4571dc42ca2e3a144059ebfa9562
| 1,471
|
py
|
Python
|
Calamous.py
|
Studio-Pasteque/Pokemon
|
6b9f457eef8a2dc28cb8b9b69527404b47c9825a
|
[
"MIT"
] | 2
|
2020-05-27T08:27:58.000Z
|
2020-05-27T09:31:45.000Z
|
Calamous.py
|
Studio-Pasteque/Pokemon
|
6b9f457eef8a2dc28cb8b9b69527404b47c9825a
|
[
"MIT"
] | null | null | null |
Calamous.py
|
Studio-Pasteque/Pokemon
|
6b9f457eef8a2dc28cb8b9b69527404b47c9825a
|
[
"MIT"
] | null | null | null |
import learnables.py
# création d'un objet Calamous
class Calamous:
#définition de son type et de si c'est un pokémon
pokemon = True
type_ = "Water"
# définition de ses talents et de son genre (qui seront random)
abilities = "Multicule" or "Glissade" or "Protéen"
gender = "M" or "F"
# création de ses niveaux
levels = 1
exp = 0
needexp = 30
lootexp = 10
difupgexp = 3
# création de la possibilité de monter de niveau
if adverse == 0:
exp = exp + lootexp * difupgexp
if levels == 1 and exp == needexp:
levels = 2
needexp = 70
exp = 0
if adverse == 0:
exp = exp + lootexp * difupgexp
if exp == needexp:
levels = levels + 1
needexp = needexp * 2
exp = 0
# définition de ses stats
hp = 50
atq = 35
df = 40
spa = 60
spd = 30
spe = 60
pre = 1
esc = 1
# définition de l'apparence et des capacités qu'il pourra apprendre
appearence = ('assets/personnages/Poké-avant/Calamous')
# définition de valeurs plus cachées
happiness = 0
# création de la possibilité de monter certaines d'entre elles
if adverse == 0:
happiness = happiness + 0.1
# description du pokédex
desc = "Ce pokémon ne peut pas vivre hors de l'eau : sa peau sèche dès qu'elle est déshydratée trop longtemps"
| 27.754717
| 114
| 0.56968
| 190
| 1,471
| 4.405263
| 0.563158
| 0.071685
| 0.035842
| 0.054958
| 0.155317
| 0.155317
| 0.081243
| 0.081243
| 0
| 0
| 0
| 0.037393
| 0.363698
| 1,471
| 53
| 114
| 27.754717
| 0.856838
| 0.28416
| 0
| 0.222222
| 0
| 0.027778
| 0.171717
| 0.038384
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027778
| 0
| 0.611111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
4bb35fc82ab5a2d2bc09de6a0496b0c17ea21b52
| 2,469
|
py
|
Python
|
Grove_Base_Hat_for_RPI/grove.py-master/grove/button/button.py
|
tcmoore/RPI-Environmental-Controller
|
7f28dcdf08c51db8400ccc0369eb049fdce5e901
|
[
"Unlicense",
"MIT"
] | 5
|
2019-11-18T02:26:18.000Z
|
2021-02-06T20:31:37.000Z
|
Grove_Base_Hat_for_RPI/grove.py-master/grove/button/button.py
|
tcmoore/RPI-Environmental-Controller
|
7f28dcdf08c51db8400ccc0369eb049fdce5e901
|
[
"Unlicense",
"MIT"
] | null | null | null |
Grove_Base_Hat_for_RPI/grove.py-master/grove/button/button.py
|
tcmoore/RPI-Environmental-Controller
|
7f28dcdf08c51db8400ccc0369eb049fdce5e901
|
[
"Unlicense",
"MIT"
] | 1
|
2020-08-26T10:22:37.000Z
|
2020-08-26T10:22:37.000Z
|
#!/usr/bin/env python
#
# This is the library for Grove Base Hat.
#
# Button Base Class
#
'''
## License
The MIT License (MIT)
Grove Base Hat for the Raspberry Pi, used to connect grove sensors.
Copyright (C) 2018 Seeed Technology Co.,Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class Button(object):
# event bits
EV_RAW_STATUS = 1 << 0
EV_SINGLE_CLICK = 1 << 1
EV_DOUBLE_CLICK = 1 << 2
EV_LONG_PRESS = 1 << 3
EV_LEVEL_CHANGED = 1 << 4
# EV_HAS = 1 << 31
pins = []
def __init__(self, pin):
self.__on_obj = None
self.__on_event = None
self.__event = 0
self.pins.append(pin)
# To use with button array
self.__index = self.pins.index(pin)
def get_on_event(self):
return self.__on_obj, self.__on_event
def on_event(self, obj, callback):
if not obj:
return
if not callable(callback):
return
self.__on_obj, self.__on_event = obj, callback
def is_pressed(self):
return False
# call by derivate class
def _send_event(self, event, pressed, tm):
if not callable(self.__on_event):
return
evt = {
'index': self.__index,
'code' : event,
'pressed': pressed,
'time' : tm,
}
self.__on_event(self.__on_obj, evt)
| 31.253165
| 78
| 0.64439
| 343
| 2,469
| 4.48105
| 0.463557
| 0.035133
| 0.035784
| 0.019519
| 0.033832
| 0.033832
| 0.033832
| 0
| 0
| 0
| 0
| 0.010239
| 0.287971
| 2,469
| 78
| 79
| 31.653846
| 0.86405
| 0.54192
| 0
| 0.090909
| 0
| 0
| 0.019305
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0
| 0.060606
| 0.515152
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
4bba68abed889d99f735d0534602287dd744310e
| 3,794
|
py
|
Python
|
hemlock/load_scripts/doc_to_mongo.py
|
Lab41/Hemlock
|
2c53cfc11bfbe1e4f901b519db578090fe7a17dd
|
[
"Apache-2.0"
] | 4
|
2015-05-14T18:59:44.000Z
|
2017-03-09T12:49:36.000Z
|
hemlock/load_scripts/doc_to_mongo.py
|
Lab41/Hemlock
|
2c53cfc11bfbe1e4f901b519db578090fe7a17dd
|
[
"Apache-2.0"
] | null | null | null |
hemlock/load_scripts/doc_to_mongo.py
|
Lab41/Hemlock
|
2c53cfc11bfbe1e4f901b519db578090fe7a17dd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2013 In-Q-Tel, Inc/Lab41, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch, os, sys, time, uuid
from pymongo import MongoClient
def mongo_server(server, port, database, collection):
# connect to the redis server
try:
m_server = MongoClient(server, port)
m_database = m_server[database]
m_collection = m_database[collection]
except:
print "Mongo server failure"
sys.exit(0)
return m_server, m_database, m_collection
def process_doc(input, m_server, m_database, m_collection):
matches = []
docs = []
for root, dirnames, filenames in os.walk(input):
for filename in fnmatch.filter(filenames, '*.txt'):
matches.append(os.path.join(root, filename))
j = 0
k = 0
for file in matches:
if len(docs) % 100 == 0 and len(docs) > 0:
m_collection.insert(docs)
print str(j), "total docs."
print str(k), "docs failed."
docs = []
doc = open(file, 'r').read()
try:
doc = unicode(doc, "utf-8")
doc = {"doc": doc}
docs.append(doc)
j += 1
except:
k += 1
if len(docs) > 0:
m_collection.insert(docs)
print str(j), "total docs."
print str(k), "docs failed."
def print_help():
print "-i \t<input path to files> (default is /mnt/)"
print "-s \t<mongo server> (default is localhost)"
print "-p \t<mongo port> (default is 27017)"
print "-d \t<mongo database> (default is local)"
print "-c \t<mongo collection> (default is collection)"
print "-h \thelp\n"
sys.exit(0)
def process_args(args):
# default initialization
input = "/mnt/"
server = "localhost"
port = 27017
database = "local"
collection = "collection"
# process args
i = 0
while i < len(args):
if args[i] == "-s":
try:
server = args[i+1]
i += 1
except:
print_help()
elif args[i] == "-p":
try:
port = int(args[i+1])
i += 1
except:
print_help()
elif args[i] == "-d":
try:
database = args[i+1]
i += 1
except:
print_help()
elif args[i] == "-c":
try:
collection = args[i+1]
i += 1
except:
print_help()
elif args[i] == "-i":
try:
input = args[i+1]
i += 1
except:
print_help()
else:
print_help()
i += 1
return input, server, port, database, collection
def get_args():
args = []
for arg in sys.argv:
args.append(arg)
return args[1:]
if __name__ == "__main__":
start_time = time.time()
args = get_args()
input, server, port, database, collection = process_args(args)
m_server, m_database, m_collection = mongo_server(server, port, database, collection)
process_doc(input, m_server, m_database, m_collection)
print "Took",time.time() - start_time,"seconds to complete."
| 29.640625
| 89
| 0.552978
| 481
| 3,794
| 4.27027
| 0.326403
| 0.026777
| 0.024343
| 0.01704
| 0.278481
| 0.242941
| 0.178676
| 0.178676
| 0.167478
| 0.126582
| 0
| 0.018182
| 0.333158
| 3,794
| 127
| 90
| 29.874016
| 0.793676
| 0.181075
| 0
| 0.363636
| 0
| 0
| 0.120466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.020202
| null | null | 0.191919
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4bbd48777ac0547cad592918b3cc57a1efffc54a
| 677
|
py
|
Python
|
project euler/q50.py
|
milkmeat/thomas
|
fbc72af34267488d931a4885d4e19fce22fea582
|
[
"MIT"
] | null | null | null |
project euler/q50.py
|
milkmeat/thomas
|
fbc72af34267488d931a4885d4e19fce22fea582
|
[
"MIT"
] | null | null | null |
project euler/q50.py
|
milkmeat/thomas
|
fbc72af34267488d931a4885d4e19fce22fea582
|
[
"MIT"
] | null | null | null |
def listprime(max):
prime=[True]*max
prime[0]=False
prime[1]=False
for x in range(max/2):
if prime[x]:
bei=x+x
while bei<max:
prime[bei]=False
bei+=x
return prime
def listprimenumber(lpn):
listprimelpn=listprime(lpn)
l=[]
for x in range(len(listprimelpn)):
if listprimelpn[x]:
l.append(x)
return l
b=listprimenumber(100)
print b
count=3
for x in range(len(b)-count):
sum=0
for y in range(count):
sum+=b[x+y]
if sum in b:
print sum
#if b[x+0]+b[x+1]+b[x+2] in b:
# print b[x],b[x+1],b[x+2]
| 22.566667
| 39
| 0.499261
| 107
| 677
| 3.158879
| 0.280374
| 0.04142
| 0.053254
| 0.097633
| 0.118343
| 0.035503
| 0
| 0
| 0
| 0
| 0
| 0.030162
| 0.363368
| 677
| 30
| 40
| 22.566667
| 0.75406
| 0.084195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4bbf1d6eb8af61adb06a84718e97dce8dddb1ac0
| 6,094
|
py
|
Python
|
spartify/settings.py
|
cl1ckname/Spartify
|
3c45236e3f8803af9d01ac638e3d10a834ab7b7d
|
[
"Apache-2.0"
] | 3
|
2021-07-26T15:43:20.000Z
|
2022-02-11T17:22:31.000Z
|
spartify/settings.py
|
cl1ckname/Spartify
|
3c45236e3f8803af9d01ac638e3d10a834ab7b7d
|
[
"Apache-2.0"
] | 2
|
2021-07-08T14:25:22.000Z
|
2021-08-19T18:17:14.000Z
|
spartify/settings.py
|
cl1ckname/Spartify
|
3c45236e3f8803af9d01ac638e3d10a834ab7b7d
|
[
"Apache-2.0"
] | 1
|
2021-08-19T18:17:48.000Z
|
2021-08-19T18:17:48.000Z
|
"""
Django settings for spartify project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get("DEBUG", default=1))
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS",'').split(" ") + ['*', '192.168.43.72', '192.168.0.53', '0.0.0.0']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'social_core.backends.spotify.SpotifyOAuth2',
)
AUTH_USER_MODEL = 'backend.User'
SOCIAL_AUTH_USER_MODEL = 'backend.User'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'social_django',
'backend',
'lobby'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'backend.middlewares.ApiMiddleware',
]
ROOT_URLCONF = 'spartify.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'spartify.wsgi.application'
ASGI_APPLICATION = 'spartify.routing.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
"NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_URL = 'staticfiles/'
STATIC_ROOT = os.path.join(BASE_DIR, STATIC_URL)
SOCIAL_AUTH_SPOTIFY_KEY = os.environ['SOCIAL_AUTH_SPOTIFY_KEY']
SOCIAL_AUTH_SPOTIFY_SECRET = os.environ['SOCIAL_AUTH_SPOTIFY_SECRET']
SOCIAL_AUTH_URL_NAMESPACE = 'social'
SOCIAL_AUTH_SPOTIFY_SCOPE = ['user-read-email','user-read-private', 'user-read-playback-state', 'user-modify-playback-state']
# SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'http://{}/complete/spotify/' % os.getenv('HOST')
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
DEFAULT_AUTO_FIELD='django.db.models.AutoField'
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
'backend.pipeline.save_access_token', #save token on login,
)
QUEUE_SESSION_ID = 'queue'
SESSION_EXPIRE_AT_BROWSER_CLOSE = 15
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'api_formatter': {
'format': '{username} -- {endpoint} -- {status_code:d}: {message}',
'style': '{',
},
'lobby_formatter': {
'format': '{id}--{username}: {message} -- {asctime}',
'style': '{',
},
},
'handlers': {
'api_errors': {
'class': 'logging.FileHandler',
'filename': 'logs/api_errors.log',
'formatter': 'api_formatter',
'level': 'ERROR',
},
},
'loggers':{
'backend': {
'handlers': ['api_errors'],
},
},
}
REDIS_HOST = os.environ.get("REDIS_HOST", '127.0.0.1')
REDIS_PORT = 6379
REDIS_DB = 0
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
"CONFIG": {
"hosts": [(REDIS_HOST, REDIS_PORT)],
},
}
}
| 29.019048
| 125
| 0.675583
| 696
| 6,094
| 5.718391
| 0.340517
| 0.052261
| 0.030151
| 0.04397
| 0.21809
| 0.173116
| 0.092211
| 0.092211
| 0.055276
| 0.025126
| 0
| 0.013723
| 0.174926
| 6,094
| 210
| 126
| 29.019048
| 0.777844
| 0.185264
| 0
| 0.041958
| 1
| 0
| 0.501417
| 0.34251
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.041958
| 0.013986
| 0
| 0.013986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4bc28189f37d50450206554fd6ab1753bd171778
| 7,386
|
py
|
Python
|
getters/haproxy_haproxylogs.py
|
gunny26/datalogger
|
7bd29ab88f2e2749284d80a6a834c94c0955a7e0
|
[
"Apache-2.0"
] | null | null | null |
getters/haproxy_haproxylogs.py
|
gunny26/datalogger
|
7bd29ab88f2e2749284d80a6a834c94c0955a7e0
|
[
"Apache-2.0"
] | null | null | null |
getters/haproxy_haproxylogs.py
|
gunny26/datalogger
|
7bd29ab88f2e2749284d80a6a834c94c0955a7e0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
"""
Program to read and parse haproxylogs to put them in shape to upload to DataLogger
The input date schould be sorted by date, and finished
the uploaded data will immediately split into TimeseriesArray, so no further data
of this day could be appended
"""
import os
import sys
import gzip
import logging
logging.basicConfig(level=logging.DEBUG)
import datetime
import zlib
import requests
import StringIO
import argparse
# own modules
from datalogger import DataLoggerWeb as DataLoggerWeb
import tilak_haproxylog
def aggregator(index_keynames, value_keynames, ts_keyname, func, interval = 60 * 5):
"""
aggregates some protocol data to get a consistent timeseries,
with interval
"""
data = {}
ts = None
#print ts_keyname
for parsts, parsdata in func():
#print parsdata
#print parsdata["log_timestamp"]
if ts is None:
ts = parsts
key = tuple((parsdata[key] for key in index_keynames))
values = tuple((int(parsdata[key]) for key in value_keynames))
if key not in data:
data[key] = values
else:
data[key] = tuple((data[key][index] + int(values[index]) for index in range(len(values))))
if parsts > (ts + interval):
for keys, values in data.items():
yield "%s\t%s\t%s" % (ts, "\t".join((str(index_key) for index_key in keys)), "\t".join((str(value_key) for value_key in values)))
ts = None
data = {}
def parser_generator(index_keynames, value_keynames, file_obj):
"""
return specific parser for this set of parameters
"""
def inner():
"""
split line into dict of fields,
and append some data according to line
"""
for line in file_obj:
logdata = tilak_haproxylog.parse_line(line)
if logdata is not None:
logdata["hits"] = 1
for value_key in value_keynames:
if value_key not in logdata:
logdata[value_key] = 0
status_code = int(logdata["status_code"])
if 100 <= status_code <= 199:
logdata["rsp_1xx"] = 1
elif 200 <= status_code <= 299:
logdata["rsp_2xx"] = 1
elif 300 <= status_code <= 399:
logdata["rsp_3xx"] = 1
elif 400 <= status_code <= 499:
logdata["rsp_4xx"] = 1
elif 500 <= status_code <= 599:
logdata["rsp_5xx"] = 1
else:
logdata["rsp_other"] = 1
ret_data = dict(zip(index_keynames, (logdata[index_key] for index_key in index_keynames)))
ret_data.update(dict(zip(value_keynames, (logdata[value_key] for value_key in value_keynames))))
yield (logdata["ts"], ret_data)
return inner
def generate_datalogger_csv(logdir, datestring, keys, values, ts_keyname):
"""
create CSV like file with StringIO
"""
if datestring == datetime.date.today().isoformat():
logging.error("todays Logs are actually written and cannot used in datalogger")
return
headers = [ts_keyname, ] + list(keys) + list(values)
linebuffer = []
linebuffer.append("\t".join(headers))
filename = os.path.join(logdir, "haproxylog_%s.gz" % datestring)
logging.info("parsing file %s", filename)
try:
parser = parser_generator(keys, values, gzip.open(filename, "rb"))
for line in aggregator(keys, values, ts_keyname, parser):
linebuffer.append(line)
except IOError as exc:
logging.exception(exc)
return StringIO.StringIO("\n".join(linebuffer))
def datestring_to_date(datestring):
"""
convert string in format YYYY-MM-DD into date object
"""
year, month, day = datestring.split("-")
date = datetime.date(year=int(year), month=int(month), day=int(day))
return date
def datewalk(datestring1, datestring2):
"""
count up from datestring1 to datestring2 in single day steps
yield in isoformat()
"""
date1 = datestring_to_date(datestring1)
date2 = datestring_to_date(datestring2)
assert date2 > date1
oneday = datetime.timedelta(1)
while date1 < date2:
yield date1.isoformat()
date1 += oneday
def main():
"""
what do you think, what main should do
"""
yesterday_datestring = (datetime.date.today() - datetime.timedelta(1)).isoformat()
parser = argparse.ArgumentParser(description='generate TimeseriesArrays on local backend')
parser.add_argument('--url', default="https://datalogger-api.tirol-kliniken.cc/DataLogger", help="url of DataLogger Webapplication")
parser.add_argument('--logdir', default="/data1/haproxy_daily/", help="directory where to find day sorted haproxylogs")
parser.add_argument("-b", '--back', help="how many days back from now")
parser.add_argument("-s", '--startdate', help="start date in isoformat YYY-MM-DD")
parser.add_argument("-e", '--enddate', default=yesterday_datestring, help="stop date in isoformat YYY-MM-DD")
parser.add_argument("-q", '--quiet', action='store_true', help="set to loglevel ERROR")
parser.add_argument("-v", '--verbose', action='store_true', help="set to loglevel DEBUG")
args = parser.parse_args()
if args.quiet is True:
logging.getLogger("").setLevel(logging.ERROR)
if args.verbose is True:
logging.getLogger("").setLevel(logging.DEBUG)
if (args.back is not None) == (args.startdate is not None):
logging.error("option -b and -e are mutual exclusive, use only one")
sys.exit(1)
startdate = None
if args.back is not None:
startdate = (datetime.date.today() - datetime.timedelta(int(args.back))).isoformat()
elif args.startdate is not None:
startdate = args.startdate
else:
logging.error("you have to provide either -b or -s")
sys.exit(1)
# lets get started
datalogger = DataLoggerWeb(args.url)
project = "haproxy"
tablename = "http_host"
baseurl = "%s/upload_raw_file/" % args.url
logdir = args.logdir # where to find haproxy logs
keys = ("http_host", )
values = ("bytes_read", "rsp_1xx", "rsp_2xx", "rsp_3xx", "rsp_4xx", "rsp_5xx", "rsp_other", "srv_queue", "backend_queue", "actconn", "feconn", "beconn", "srv_conn", "retries", "tq", "tw", "tc", "tr", "tt", "hits")
ts_keyname = "ts"
for datestring in datewalk(startdate, args.enddate):
caches = datalogger.get_caches(project, tablename, datestring)
if caches["tsa"]["raw"] is not None:
logging.info("Skipping this datestring, raw data is already available")
continue
try:
stringio = generate_datalogger_csv(logdir, datestring, keys, values, ts_keyname)
#upload data
files = {'myfile': stringio}
url = "/".join((baseurl, project, tablename, datestring))
logging.info("calling %s", url)
response = requests.post(url, files=files)
print response.content
except StandardError as exc:
logging.error("Exception on file datestring %si, skipping this date", datestring)
except zlib.error as exc:
logging.error(exc)
if __name__ == "__main__":
main()
| 40.80663
| 217
| 0.627268
| 922
| 7,386
| 4.916486
| 0.321041
| 0.013898
| 0.026252
| 0.011913
| 0.144717
| 0.107876
| 0.056034
| 0.041915
| 0.041915
| 0
| 0
| 0.012748
| 0.256566
| 7,386
| 180
| 218
| 41.033333
| 0.812785
| 0.019496
| 0
| 0.081481
| 0
| 0
| 0.152127
| 0.00326
| 0
| 0
| 0
| 0
| 0.007407
| 0
| null | null | 0
| 0.081481
| null | null | 0.007407
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4bc441d80eb90948270b0c67a69acd09e054bf96
| 793
|
py
|
Python
|
packages/jobs/statuses.py
|
NHSDigital/list-reconciliation
|
37b1ebe99a64275e23b0e7fb6a89415b92d14306
|
[
"MIT"
] | 4
|
2021-06-25T08:28:54.000Z
|
2021-12-16T11:03:42.000Z
|
packages/jobs/statuses.py
|
NHSDigital/list-reconciliation
|
37b1ebe99a64275e23b0e7fb6a89415b92d14306
|
[
"MIT"
] | 184
|
2021-06-24T15:27:08.000Z
|
2022-03-17T12:44:28.000Z
|
packages/jobs/statuses.py
|
NHSDigital/list-reconciliation
|
37b1ebe99a64275e23b0e7fb6a89415b92d14306
|
[
"MIT"
] | 3
|
2021-11-05T10:21:44.000Z
|
2022-03-04T14:29:24.000Z
|
from enum import Enum
class JobStatus(Enum):
PENDING = "PENDING"
REJECTED = "REJECTED"
SENT_TO_DPS = "SENT_TO_DPS"
PROCESSED_BY_DPS = "PROCESSED_BY_DPS"
COMPLETE = "COMPLETE"
NOTIFIED_VALIDATION_FAILED = "NOTIFIED_VALIDATION_FAILED"
CLEANED_UP = "CLEANED_UP"
class ParseStatus(Enum):
NOT_PARSED = "NOT_PARSED"
PARSE_FAILED = "PARSE_FAILED"
class InvalidErrorType(Enum):
RECORDS = "INVALID_RECORDS"
STRUCTURE = "INVALID_STRUCTURE"
FILENAME = "INVALID_FILENAME"
class InputFolderType(Enum):
IN = "inbound/"
PASS = "pass/"
FAIL = "fail/"
RETRY = "retry/"
REJECTED = "rejected/"
class RegistrationType(Enum):
GP = "OnlyOnGP"
PDS = "OnlyOnPDS"
class JobNotFound(Exception):
"""Job Not Found Exception"""
| 18.880952
| 61
| 0.675914
| 85
| 793
| 6.058824
| 0.494118
| 0.062136
| 0.034951
| 0.066019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215637
| 793
| 41
| 62
| 19.341463
| 0.827974
| 0.029004
| 0
| 0
| 0
| 0
| 0.269634
| 0.034031
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.038462
| 0.038462
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
4bc544909f30548d56d19ceee6f586966f0cd714
| 843
|
py
|
Python
|
DevOps/Data_Science_in_Production/cap02/15_scikit-learn.py
|
unimauro/Courses
|
81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2
|
[
"Apache-2.0"
] | 1
|
2020-07-25T04:56:55.000Z
|
2020-07-25T04:56:55.000Z
|
DevOps/Data_Science_in_Production/cap02/15_scikit-learn.py
|
unimauro/Courses
|
81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2
|
[
"Apache-2.0"
] | 2
|
2020-06-15T04:42:00.000Z
|
2021-08-29T03:48:28.000Z
|
DevOps/Data_Science_in_Production/cap02/15_scikit-learn.py
|
unimauro/Courses
|
81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from sklearn.linear_model import LogisticRegression
import mlflow
import mlflow.sklearn
import flask
model_path = "models/logit_games_v1"
model
= mlflow.sklearn.load_model(model_path)
app = flask.Flask(__name__)
@app.route("/", methods=["GET","POST"])
def predict():
data = {"success": False}
params = flask.request.args
if "G1" in params.keys():
new_row = { "G1": params.get("G1"),"G2": params.get("G2"),
"G3": params.get("G3"),"G4": params.get("G4"),
"G5": params.get("G5"),"G6": params.get("G6"),
"G7": params.get("G7"),"G8": params.get("G8"),
"G9": params.get("G9"),"G10":params.get("G10")}
new_x = pd.DataFrame.from_dict(new_row,
orient = "index").transpose()
data["response"] = str(model.predict_proba(new_x)[0][1])
data["success"] = True
return flask.jsonify(data)
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 31.222222
| 58
| 0.688019
| 133
| 843
| 4.180451
| 0.473684
| 0.161871
| 0.010791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039012
| 0.087782
| 843
| 26
| 59
| 32.423077
| 0.684005
| 0
| 0
| 0
| 0
| 0
| 0.136418
| 0.024911
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.192308
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
2991579a0641f47ea260ec96e0a53c12f4df3dbf
| 342
|
py
|
Python
|
authors/apps/author_follows/urls.py
|
andela/ah-backend-dojo
|
f2b14f15c4af906da846cafe722f13868d58371f
|
[
"BSD-3-Clause"
] | 3
|
2019-05-01T10:41:09.000Z
|
2021-04-25T22:17:20.000Z
|
authors/apps/author_follows/urls.py
|
andela/ah-backend-dojo
|
f2b14f15c4af906da846cafe722f13868d58371f
|
[
"BSD-3-Clause"
] | 24
|
2019-04-23T14:56:21.000Z
|
2021-12-13T19:58:37.000Z
|
authors/apps/author_follows/urls.py
|
andela/ah-backend-dojo
|
f2b14f15c4af906da846cafe722f13868d58371f
|
[
"BSD-3-Clause"
] | 4
|
2019-06-29T10:40:32.000Z
|
2022-01-04T11:44:53.000Z
|
from django.urls import path
from .views import FollowStatsViews, AuthorFollowViews
urlpatterns = [
# /authors/followers/ or ../following/
path("<str:follow_state>/", FollowStatsViews.as_view(), name="follows"),
# /authors/<author_username>/follow
path("<str:username>/follow/", AuthorFollowViews.as_view(), name="follow")
]
| 34.2
| 78
| 0.719298
| 37
| 342
| 6.540541
| 0.594595
| 0.057851
| 0.082645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122807
| 342
| 10
| 79
| 34.2
| 0.806667
| 0.204678
| 0
| 0
| 0
| 0
| 0.2
| 0.081481
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
29978909888062a7973e1bdbe5b82311fd8d9b27
| 6,173
|
py
|
Python
|
main.py
|
ml4design/text-processing-module
|
f1bfe1a49d58156e9e48e5ef69b980f89a5981ea
|
[
"MIT"
] | null | null | null |
main.py
|
ml4design/text-processing-module
|
f1bfe1a49d58156e9e48e5ef69b980f89a5981ea
|
[
"MIT"
] | null | null | null |
main.py
|
ml4design/text-processing-module
|
f1bfe1a49d58156e9e48e5ef69b980f89a5981ea
|
[
"MIT"
] | null | null | null |
import pandas as pd
from preprocessing import preprocess
from wordclouds import wordcloud, find_by_word
from sentiment_analysis import calculate_sentiment, find_by_sentiment
import nltk
import os
import tempfile
from topic_modelling import lda_topic_model, show_topics, show_example_sentences_by_topic
os.environ["MPLCONFIGDIR"] = tempfile.gettempdir()
nltk.download('punkt')
nltk.download('stopwords')
pd.set_option('display.max_columns', None)
#####################################################
# READING THE DATA #
#####################################################
# In this tutorial we will mostly deal with comma separated files (CSV) (similar to the structure of Excel files). Each line of the file is a data record. Each record consists of one or more fields, separated by commas. Check here for more information https://en.wikipedia.org/wiki/Comma-separated_values
# reads the file named "students_eng.csv".
# If you want to read a different file you need to (1) upload it in replit and (2) change "students_eng.csv" to the name of the newly uploaded file. Here we use the Pandas library ("pd") to read our file and in return we get a Pandas Dataframe. For faster processing and experimentation you can also select different subsets of the file's content through the nrows parameter -> number of lines to read.
students_data = pd.read_csv("data/students_eng.csv")
# With the next line you can print the data you just read and see how a Pandas Dataframe looks like (seems quite similar to Excel)
print(students_data.head(3))
# As you can see the data is separated in columns. Let's see how we can get the data from a specific column. The following line allows us to get only the data inside the column named "students_needs". Other options are: study_programme, degree_programme, planned_grad_year, today_feeling, physical_health, student_needs, students_sugg_to_improve_wellbeing
students_data = students_data['student_needs']
#################################################
# TEXT PREPROCESSING #
#################################################
# Here we will pre-process our entire text collection.
# First, we need to merge all the different lines of the "comments" into one big corpus, so that we can later analyze it.
corpus = students_data.to_list()
print(corpus[0:5])
# Then we need to "preprocess" our text. To do so we use the following line of code (more details on what happens under the hood could be found in the "preprocessing.py" file - feel free to take a look at it).
# The following code: makes all words lowercase, create word tokens, removes stopwords, punctuations, and digits, and reduces inflected words to their word stem (stemming).Feel free to experiment by turning any of the following values from True to False. In addition, you can add extra words which you do not want to include in your analysis by adding them within the extra_stopwords brackets e.g. extra_stopwords=["people"] would remove the word people from everywhere in the document. Hint: don't forget to use the quotes!
# tokens = [preprocess(sentence, lower=True, rem_punc=True, word_tokenization=True, rem_numb=True, rem_stopwords=True, stem=True, extra_stopwords = []) for sentence in students_data.to_list()]
# print(tokens)
#############################################
# WORD FREQUENCIES #
#############################################
# Word frequencies calculation is the most basic tool in text processing yet it gives a comprehensive picture of the content in your text collection. One the most ways to visualize word frequencies is WordCloud (which you've already seen if you opened Voyant)
# This function needs two things from you:
# 1. tokens -- the result of our preprocessing step
# 2. the name of the picture it will generate and save to your directory
# 3. Number of words to show
# wordcloud(words = tokens, name_of_output = 'wordcloud', num = 10)
# Text processing often requires working with examples, because words are often contextual and it is difficult to understand what is happening in your text collection. For this purpose, you can find documents by pieces of texts.
# This function needs two things from you:
# 1. tokens -- the result of our preprocessing step (it will look for examples in this collection)
# 2. a word or a phrase the text should include
# test = find_by_word(tokens, 'studi')
#print(test)
#############################################
# Sentiment analysis #
#############################################
# The aim of sentiment analysis is to calculate how emotional your texts are and what is the valence of these texts. In our example we use VADER (Valence Aware Dictionary and sEntiment Reasoner) but you can find other various sentiment analysis tools in the internet.
# VADER calculated how positive, neutral, and negative a text is. It also calculates a compound score which considers all three metrics to give you a precise measurement of the sentiment.
# This function requires only the preprocessed collection of texts
# sent_result = calculate_sentiment(tokens)
# print(sent_result)
# Now, when the sentiment scores are calculated, you can find the most interesting texts by looking at the documents with highest scores (in this example, we look at the 5 most positive documents).
# This function requires three things:
# 1. The result of sentiment calculation
# 2. What score you're interested in
# 3. Number of examples you want to get
# res = find_by_sentiment(df_with_scores = sent_result, score_type = 'pos', num_of_examples = 5)
# print(res)
#############################################
# TOPIC MODELING #
#############################################
# num_of_topics = 4
# word_num_per_topic = 5
# lda_model = lda_topic_model(tokens, topic_num=num_of_topics)
# show_topics(lda_model, word_num_per_topic )
# Check examples assigned to a particular topic ####
# num_of_examples = 5
# show_example_sentences_by_topic(corpus, tokens, lda_model, word_num_per_topic,topic_to_check=1, num_of_examp_to_show = num_of_examples)
| 55.116071
| 524
| 0.706302
| 907
| 6,173
| 4.699008
| 0.363837
| 0.009385
| 0.009855
| 0.010558
| 0.067574
| 0.044111
| 0.033318
| 0.033318
| 0.033318
| 0.033318
| 0
| 0.004071
| 0.164426
| 6,173
| 112
| 525
| 55.116071
| 0.822218
| 0.794751
| 0
| 0
| 0
| 0
| 0.109267
| 0.029046
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.470588
| 0
| 0.470588
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
299d93368abb2922353eb3246c80b4d0b6d61d48
| 390
|
py
|
Python
|
awx/main/migrations/0112_deployhistory_date.py
|
Pavloid21/awx
|
224827f6060013b996eb8210597bca68cda65d40
|
[
"Apache-2.0"
] | null | null | null |
awx/main/migrations/0112_deployhistory_date.py
|
Pavloid21/awx
|
224827f6060013b996eb8210597bca68cda65d40
|
[
"Apache-2.0"
] | null | null | null |
awx/main/migrations/0112_deployhistory_date.py
|
Pavloid21/awx
|
224827f6060013b996eb8210597bca68cda65d40
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.8 on 2020-03-25 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0111_auto_20200325_1311'),
]
operations = [
migrations.AddField(
model_name='deployhistory',
name='date',
field=models.DateTimeField(auto_now=True),
),
]
| 20.526316
| 54
| 0.602564
| 42
| 390
| 5.47619
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0.284615
| 390
| 18
| 55
| 21.666667
| 0.713262
| 0.115385
| 0
| 0
| 1
| 0
| 0.12828
| 0.067055
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
299f9a135fb0ac674c3200f9214021f3cf9fd561
| 920
|
py
|
Python
|
policy.py
|
shantanusingh16/Pytorch-DQN
|
b7d3270e9e345e85e5c5a5216109529879ab77bd
|
[
"MIT"
] | 4
|
2018-09-23T19:58:24.000Z
|
2022-03-22T20:32:36.000Z
|
policy.py
|
shantanusingh16/Pytorch-DQN
|
b7d3270e9e345e85e5c5a5216109529879ab77bd
|
[
"MIT"
] | null | null | null |
policy.py
|
shantanusingh16/Pytorch-DQN
|
b7d3270e9e345e85e5c5a5216109529879ab77bd
|
[
"MIT"
] | 2
|
2019-05-22T06:02:38.000Z
|
2019-10-18T17:08:24.000Z
|
import numpy as np
import torch
from utils.helpers import process_state, device
def make_epsilon_greedy_policy(estimator, nA):
"""
:param estimator: model that returns q values for a given statem/action pair
:param nA: number of actions in the environment
:return: A function that takes in a state and an epsilon and returns probs for each
action in the form of a numpy array of length nA
"""
def policy_fn(state, epsilon):
"""
:param state: tensor of b x 1 x 84 x 84
:param epsilon:
:return: action probabilities, of size b x nA
"""
A = torch.ones(nA) * epsilon / nA
state = torch.from_numpy(state).float().to(device).unsqueeze(0) / 255.0
q_vals = estimator.forward(state)
best_action = torch.argmax(q_vals, dim=0).unsqueeze(-1) # b
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
| 36.8
| 87
| 0.646739
| 139
| 920
| 4.201439
| 0.467626
| 0.030822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020741
| 0.266304
| 920
| 25
| 88
| 36.8
| 0.844444
| 0.397826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.25
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
29a7a6484cb6277e0cdd34fa9a54d64187a477f7
| 1,082
|
py
|
Python
|
matrix_multiplication_evolution_example.py
|
bobbywlindsey/stokepy
|
e7f4ad0c27d8fc22129558db6ae0dbbab0627ace
|
[
"MIT"
] | 9
|
2017-05-09T20:00:10.000Z
|
2020-07-02T18:00:22.000Z
|
matrix_multiplication_evolution_example.py
|
bobbywlindsey/stokepy
|
e7f4ad0c27d8fc22129558db6ae0dbbab0627ace
|
[
"MIT"
] | null | null | null |
matrix_multiplication_evolution_example.py
|
bobbywlindsey/stokepy
|
e7f4ad0c27d8fc22129558db6ae0dbbab0627ace
|
[
"MIT"
] | 2
|
2017-08-10T14:47:07.000Z
|
2019-01-25T02:37:34.000Z
|
import stokepy as sp
import numpy as np
# instantiate class
fmc = sp.FiniteMarkovChain()
# create initial distribution vector
phi = np.array([0, 0, 1, 0, 0])
# generate Markov chain with no boundary conditions
fmc.gen_from_params(phi, p = 0.6, num_states = 5, dim = 1)
# apply boundary condition: absorbing, reflecting, semi-reflecting
# only works for 1 dimension Markov chains at the moment
fmc.apply_boundary_condition(condition='absorbing')
# choose solution method like Matrix Multiplication Evolution
matrx_mult_evo = sp.MatrixMultiplicationEvolution(fmc, phi, steps = 2000,\
rec_class_states = [])
# run the solution
matrx_mult_evo.run()
# get data from the run
average_distribution = matrx_mult_evo.pi
tpdf = matrx_mult_evo.tpdf
absorption_proportions = matrx_mult_evo.absorption_proportions
apbrc = matrx_mult_evo.recurrent_class_absorbed_proportions
mean_absorption_time = matrx_mult_evo.mean_absorption_time
# plot absorption tiems for recurrent classes
matrx_mult_evo.plot_absorption()
| 33.8125
| 76
| 0.750462
| 145
| 1,082
| 5.365517
| 0.531034
| 0.092545
| 0.123393
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015855
| 0.183919
| 1,082
| 31
| 77
| 34.903226
| 0.865232
| 0.337338
| 0
| 0
| 1
| 0
| 0.01273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
29ac9c03bbaa51b34d7d739bc8607fc9dd0af610
| 309
|
py
|
Python
|
main.py
|
yaojenkuo/stockflow
|
946609c2fcc1d602032672b57ae7119b4cadae8d
|
[
"MIT"
] | 33
|
2015-03-08T00:43:37.000Z
|
2021-02-18T23:40:05.000Z
|
main.py
|
Asoul/stockflow
|
946609c2fcc1d602032672b57ae7119b4cadae8d
|
[
"MIT"
] | null | null | null |
main.py
|
Asoul/stockflow
|
946609c2fcc1d602032672b57ae7119b4cadae8d
|
[
"MIT"
] | 25
|
2015-03-07T15:57:23.000Z
|
2021-07-05T01:32:32.000Z
|
#!/bin/python
# -*- coding: utf-8 -*-
'''基本範例格式'''
import sys
from ctrls.Tester import Tester
from models.exampleModel import exampleModel
def main():
numbers = ['1314']# 股票編號
tester = Tester(numbers, exampleModel)# 使用測試元件
tester.run()# 模擬
if __name__ == '__main__':
sys.exit(main())
| 17.166667
| 50
| 0.647249
| 37
| 309
| 5.189189
| 0.648649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02008
| 0.194175
| 309
| 17
| 51
| 18.176471
| 0.751004
| 0.18123
| 0
| 0
| 0
| 0
| 0.049383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.333333
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
29b119e99bde0832d57541650801a62ec77c42f6
| 1,017
|
py
|
Python
|
jisho_api/word/cfg.py
|
finia2NA/jisho-api
|
c80beb44a7b70f24e799cd2a7d579356c58f8625
|
[
"Apache-2.0"
] | 26
|
2021-10-05T03:54:33.000Z
|
2022-03-26T10:46:31.000Z
|
jisho_api/word/cfg.py
|
finia2NA/jisho-api
|
c80beb44a7b70f24e799cd2a7d579356c58f8625
|
[
"Apache-2.0"
] | 7
|
2021-11-22T00:43:30.000Z
|
2022-01-12T00:34:22.000Z
|
jisho_api/word/cfg.py
|
finia2NA/jisho-api
|
c80beb44a7b70f24e799cd2a7d579356c58f8625
|
[
"Apache-2.0"
] | 4
|
2021-12-08T13:41:07.000Z
|
2022-03-25T20:54:07.000Z
|
from enum import Enum
from typing import List, Optional
from pydantic import BaseModel, HttpUrl
class Sense(BaseModel):
class Link(BaseModel):
text: str
url: HttpUrl
class Source(BaseModel):
language: str
english_definitions: List[str]
parts_of_speech: List[Optional[str]]
links: List[Link]
tags: List[str]
restrictions: List[str]
see_also: List[str]
antonyms: List[str]
source: List[Source]
info: List[str]
class Japanese(BaseModel):
# Japanese Word - full fledged kanji
# Is optional because there are words that are just kana
word: Optional[str]
# Kana reading
reading: Optional[str]
@property
def name():
if self.word:
return self.word
return self.reading
class WordConfig(BaseModel):
slug: str
is_common: Optional[bool]
tags: List[str]
jlpt: List[str]
japanese: List[Japanese]
senses: List[Sense]
def __iter__(self):
yield from self.senses
| 19.941176
| 60
| 0.647984
| 127
| 1,017
| 5.11811
| 0.448819
| 0.086154
| 0.033846
| 0.055385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26647
| 1,017
| 50
| 61
| 20.34
| 0.871314
| 0.100295
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.085714
| 0
| 0.828571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.