Dataset Viewer (First 5GB)
Auto-converted to Parquet Duplicate
query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Write the concordance entries to the output file(filename) See sample output files for format.
def write_concordance(self, filename): all_keys = self.concordance_table.get_all_keys() lines = [] for i in all_keys: a = "" a += i + ":" f = self.concordance_table.get_value(i) if f != None: for s in f: a += " " + str(s) a += "\n" lines.append(a) a = open(filename, "w+") for i in lines: a.write(i) a.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_concordance(self, filename):\n out = ''\n values = [x for x in self.concordance_table.hash_table if x is not None]\n values.sort(key=lambda x: x[0])\n for v in values:\n out += f'{v[0]}: {\" \".join(str(x) for x in sorted(set(v[1])))}\\n' \n with open(filenam...
[ "0.7794726", "0.66742295", "0.64932483", "0.64526165", "0.6379942", "0.63655496", "0.63634735", "0.62910575", "0.6240714", "0.6233921", "0.6233921", "0.6233921", "0.61785156", "0.61412483", "0.61257005", "0.610843", "0.6082861", "0.60720426", "0.6064205", "0.60603034", "0.598...
0.7876976
0
Builds a kfactor circulant matrix (A matrix with the structure of circulant matrices, but with the entries above the diagonal multiplied by the same factor.) The matrix is store in memory.
def factor_circulant_matrix(x, k): n=len(x) return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K", "def _K(m):\n M = m*(m - 1)/2\n K = np.zeros((M, m**2), dtype=np.int64)\n row = 0\n for j in range(1, m):\n col = (j - 1)*m + j...
[ "0.6495986", "0.6089255", "0.6045119", "0.59890914", "0.5949488", "0.59035623", "0.5859298", "0.58462423", "0.57634705", "0.574443", "0.5730508", "0.5717386", "0.56819576", "0.566873", "0.5568253", "0.55545205", "0.5523086", "0.55172205", "0.5492196", "0.5491694", "0.5478032"...
0.78092545
0
Compute the matrixvector product y = Cu where C is a kfactor circulant matrix All matrices are real
def factor_circulant_multiplication(u, x, k=1): n = len(u) D_k = (k**(1/n))**np.arange(0,n) Lambda = fft(D_k*x) return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tensor = np.zeros((m_dim, m_dim, q_dim), dtype=np.complex)\n \n for k in range(q_dim):\n A_k = A[:, :, k]\n b_k = B[k]\n \n x_hat = U @ b_k\n y_hat = A_k.conj().T @ x_hat\n \...
[ "0.6325033", "0.6273725", "0.6251581", "0.62479377", "0.6177961", "0.6087597", "0.6022537", "0.60215706", "0.6020421", "0.60090333", "0.6000697", "0.5998053", "0.59429264", "0.59204763", "0.58713275", "0.5850264", "0.5813686", "0.57964927", "0.57901424", "0.57262236", "0.5726...
0.693636
0
Solves Tx=b using the Levinson algorithm where T is apositivedefinite symmetric Toeplitz matrix b is a real vector
def levinson(r, b): n = len(b) y = zeros((n,)) x = zeros((n,)) # normalize the system so that the T matrix has diagonal of ones r_0 = r/r[0] b_0 = b/r[0] if n == 1: return b_0 y[0] = -r_0[1] x[0] = b_0[0] beta = 1 alpha = -r_0[1] for k in range(0,n-1): beta = (1 - alpha*alpha)*beta mu = (b_0[k+1] - dot(r_0[1:k+2], x[k::-1])) /beta x[0:k+1] = x[0:k+1] + mu*y[k::-1] x[k+1] = mu if k < n-2: alpha = -(r_0[k+2] + dot(r_0[1:k+2], y[k::-1]))/beta y[0:k+1] = y[0:k+1] + alpha * y[k::-1] y[k+1] = alpha return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _tridisolve(d, e, b, overwrite_b=True):\n\t\tN = len(b)\n\t\t# work vectors\n\t\tdw = d.copy()\n\t\tew = e.copy()\n\t\tif overwrite_b:\n\t\t\tx = b\n\t\telse:\n\t\t\tx = b.copy()\n\t\tfor k in range(1, N):\n\t\t\t# e^(k-1) = e(k-1) / d(k-1)\n\t\t\t# d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)\n\t\t\tt = ew[ k - 1 ]\n...
[ "0.63466734", "0.61827254", "0.61033237", "0.6093494", "0.60769826", "0.5885008", "0.58844715", "0.5877297", "0.58737326", "0.58588946", "0.5838278", "0.5794063", "0.57753825", "0.5773156", "0.5763559", "0.57562786", "0.574674", "0.57452273", "0.57390094", "0.57179475", "0.56...
0.7257071
0
Compute the log determinant of a positivedefinite symmetric toeplitz matrix. The determinant is computed recursively. The intermediate solutions of the Levinson recursion are expolited.
def toeplitz_slogdet(r): n = len(r) r_0 = r[0] r = np.concatenate((r, np.array([r_0]))) r /= r_0 # normalize the system so that the T matrix has diagonal of ones logdet = n*np.log(np.abs(r_0)) sign = np.sign(r_0)**n if n == 1: return (sign, logdet) # now on is a modification of Levinson algorithm y = zeros((n,)) x = zeros((n,)) b = -r[1:n+1] r = r[:n] y[0] = -r[1] x[0] = b[0] beta = 1 alpha = -r[1] d = 1 + dot(-b[0], x[0]) sign *= np.sign(d) logdet += np.log(np.abs(d)) for k in range(0,n-2): beta = (1 - alpha*alpha)*beta mu = (b[k+1] - dot(r[1:k+2], x[k::-1])) /beta x[0:k+1] = x[0:k+1] + mu*y[k::-1] x[k+1] = mu d = 1 + dot(-b[0:k+2], x[0:k+2]) sign *= np.sign(d) logdet += np.log(np.abs(d)) if k < n-2: alpha = -(r[k+2] + dot(r[1:k+2], y[k::-1]))/beta y[0:k+1] = y[0:k+1] + alpha * y[k::-1] y[k+1] = alpha return(sign, logdet)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n return -np.inf\n return ld", "def pddet(A):\r\n L = jitchol(A)\r\n logdetA = 2*sum(np.log(np.diag(L)))\r\n return logdetA", "def log_abs_det_jacobian(self, z):\n pre_u = self.u_ + self.u\n ...
[ "0.7205463", "0.69225436", "0.6803772", "0.6577487", "0.65662503", "0.6258033", "0.6235449", "0.6192166", "0.61640286", "0.60718197", "0.602648", "0.5906651", "0.5904567", "0.58784807", "0.58522433", "0.5850299", "0.58452636", "0.5838441", "0.5796368", "0.57808894", "0.577887...
0.6977162
1
Preprocessing needed for toeplitz_inverse_multiplication()
def toeplitz_inverse_multiplication_prep(T_column): phi=1 psi=2 assert phi != 0 assert psi != 0 assert phi != psi n = len(T_column) x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) ) y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) ) x_0 = x[0] D_phi = (phi**(1/n))**np.arange(0,n) D_psi = (psi**(1/n))**np.arange(0,n) Lambda_1 = fft(D_psi*x) Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1]))) Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1]))) Lambda_4 = fft(D_phi*x) return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bd_toeplitz_inverse_multiplication_prep(*arrs):\n \n t = []\n for c in arrs: # loop over each block\n t.append(toeplitz_inverse_multiplication_prep(c))\n return tuple(t)", "def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y...
[ "0.65743506", "0.63173485", "0.60780877", "0.60345995", "0.5920918", "0.5710167", "0.5684219", "0.56176597", "0.56087387", "0.5590726", "0.5568226", "0.556281", "0.5558012", "0.5548983", "0.5540906", "0.5426001", "0.5426001", "0.5406237", "0.53970987", "0.5395093", "0.5389461...
0.65871215
0
matrix multiplication with the inverse of a blockdiagonal matrix having Toeplitz blocks. y = T u Analogous to toeplitz_inverse_multiplication()
def bd_toeplitz_inverse_multiplication(u, *arrs): y = zeros(shape(u)) n_start = 0 n_end = 0 for t in arrs: n_start = n_end n_end += len(t[3]) # len(t[3]) is the length of the block y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t) assert len(y) == n_end return y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros...
[ "0.65371925", "0.6473114", "0.639856", "0.6361315", "0.6302969", "0.6292023", "0.6192051", "0.61344135", "0.61059606", "0.60929507", "0.6069136", "0.6021487", "0.60205114", "0.6011188", "0.5997013", "0.5966648", "0.5926399", "0.5926365", "0.5916658", "0.5888663", "0.5883227",...
0.7164876
0
Parse a single line of csvtoarrow output. Raise RuntimeError if a line cannot be parsed. (We can't recover from that because we don't know what's happening.)
def _parse_csv_to_arrow_warning(line: str) -> I18nMessage: for pattern, builder in _ERROR_PATTERNS: match = pattern.match(line) if match: return builder(**match.groupdict()) raise RuntimeError("Could not parse csv-to-arrow output line: %r" % line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_line(self, line):\n raise NotImplementedError", "def test_parseLine2(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"11/11/19,Brighter Futures,12000\"\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then: (Using PyTruth a...
[ "0.6595832", "0.6529445", "0.62704617", "0.61401874", "0.61335003", "0.61316746", "0.61252147", "0.61061907", "0.5982218", "0.5961737", "0.5809438", "0.5809438", "0.5809438", "0.5809438", "0.5806658", "0.5806658", "0.5729117", "0.5704075", "0.5667828", "0.56519485", "0.562726...
0.7278119
0
Return true if we should fastskip converting a pa.Array. The _true_ reason for this function is to test whether an Array contains "Inf" or "NaN". A numberconversion library will parse those. But _this_ library is for Workbench, and Workbench doesn't support NaN/Inf. So this function helps us decide _not_ to autoconvert a column when the intent isn't perfectly clear. Assume `arr` is of type `utf8` or a dictionary of `utf8`. Assume there are no gaps hidden in null values in the buffer. (It's up to the caller to prove this.)
def _utf8_chunk_may_contain_inf_or_nan(chunk: pyarrow.Array) -> bool: _, offsets_buf, data_buf = chunk.buffers() offsets = array.array("i") assert offsets.itemsize == 4 offsets.frombytes(offsets_buf) if sys.byteorder != "little": offsets.byteswap() # pyarrow is little-endian offset0 = offsets[chunk.offset] offsetN = offsets[chunk.offset + len(chunk)] # len(offsets) == 1 + len(chunk) b = data_buf[offset0:offsetN].to_pybytes() return SCARY_BYTE_REGEX.search(b) is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def asarray_chkfinite(a):\n a = asarray(a)\n if (a.dtype.char in typecodes['AllFloat']) \\\n and (_nx.isnan(a).any() or _nx.isinf(a).any()):\n raise ValueError, \"array must not contain infs or NaNs\"\n return a", "def is_array(self, arr):\n return isinstance(arr, np.ndarray)", ...
[ "0.63142204", "0.59511065", "0.59251046", "0.5863669", "0.5700599", "0.5661153", "0.5581066", "0.54970616", "0.54685277", "0.54147017", "0.53897524", "0.5384138", "0.53668594", "0.5293467", "0.52856606", "0.527953", "0.5257239", "0.5248469", "0.5248469", "0.5215622", "0.52145...
0.60420185
1
Update the config information with new dropout values.
def update_dropout(info, dropout, dropout_type, prop_name): if dropout_type == "schnet_dropout": info["model_params"]["schnet_dropout"] = dropout elif dropout_type == "chemprop_dropout": info["model_params"]["cp_dropout"] = dropout elif dropout_type == "readout_dropout": # if it's in the readout layers, find the dropout # layers in the readout dictionary and update them readout = info["model_params"]["readoutdict"] layer_dics = readout[prop_name] for layer_dic in layer_dics: if layer_dic["name"] == "Dropout": layer_dic["param"]["p"] = dropout info["model_params"]["readoutdict"] = {prop_name: layer_dics} elif dropout_type == "attention_dropout": info["model_params"]["boltzmann_dict"]["dropout_rate"] = dropout else: info["model_params"][dropout_type] = dropout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conf_update(self):\n pass", "def update(self):\n self.save_config_file()", "def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['ca...
[ "0.6544299", "0.63342535", "0.60116196", "0.59151256", "0.5909534", "0.57759255", "0.57704425", "0.5765275", "0.5730661", "0.56408286", "0.5635697", "0.558882", "0.55770063", "0.5571904", "0.5553866", "0.5534613", "0.5478377", "0.546527", "0.5463798", "0.5436312", "0.5427711"...
0.63966775
1
Update the config information with the number of attention heads.
def update_heads(info, heads): info["model_params"]["boltzmann_dict"]["num_heads"] = heads # Concatenate the fingerprints produced by the different heads info["model_params"]["boltzmann_dict"]["head_pool"] = "concatenate" readoutdict = info["model_params"]["readoutdict"] feat_dim = info["model_params"]["mol_basis"] for key, lst in readoutdict.items(): for i, dic in enumerate(lst): if "param" in dic and "in_features" in dic.get("param", {}): # make sure that the input dimension to the readout is equal to # `heads * feat_dim`, where `feat_dim` is the feature dimension # produced by each head readoutdict[key][i]["param"]["in_features"] = feat_dim * heads break info["model_params"]["readoutdict"] = readoutdict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_config(self):\n self.channel_count = self.config_global['channel_count']\n self.pixel_count = self.config_global['pixel_count']\n self.pixel_index_max = self.pixel_count - 1\n self.repeat_count = self.config_global['repeat_count']\n self.repeat_snake = self.config_glob...
[ "0.5661511", "0.5599164", "0.54210174", "0.53882116", "0.5338775", "0.5247799", "0.5247248", "0.5225227", "0.51431704", "0.5058479", "0.49841285", "0.49445143", "0.49379683", "0.48532596", "0.4848556", "0.48481622", "0.4835506", "0.48258802", "0.48030823", "0.48024145", "0.47...
0.5935313
0
Update a general parameter that's in the main info dictionary.
def update_general(info, key, val): info["model_params"][key] = val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_general_param(self, param, val):\n assert param in self.params, '%s is not recognized as a valid parameter' % param\n self.params[param].change_value(val)", "def _paramUpdate(self):\n\n # Update the database attributes accordingly.\n dt.utilities.DB_attrs_save(self.Database...
[ "0.7279819", "0.71316004", "0.70896465", "0.68731415", "0.6845889", "0.68180555", "0.6810109", "0.67108864", "0.6680052", "0.6631445", "0.6597182", "0.6568276", "0.65336627", "0.65146816", "0.64628476", "0.64187586", "0.64153326", "0.63640064", "0.63570213", "0.63570213", "0....
0.7829526
0
End of preview. Expand in Data Studio

No dataset card yet

Downloads last month
41