query stringlengths 1 46.9k | pos stringlengths 75 104k | neg listlengths 12 12 | scores listlengths 12 12 |
|---|---|---|---|
Estimate discontinuity in basis of low resolution image segmentation.
:return: discontinuity in low resolution | def __msgc_step3_discontinuity_localization(self):
"""
Estimate discontinuity in basis of low resolution image segmentation.
:return: discontinuity in low resolution
"""
import scipy
start = self._start_time
seg = 1 - self.segmentation.astype(np.int8)
self.stats["low level object voxels"] = np.sum(seg)
self.stats["low level image voxels"] = np.prod(seg.shape)
# in seg is now stored low resolution segmentation
# back to normal parameters
# step 2: discontinuity localization
# self.segparams = sparams_hi
seg_border = scipy.ndimage.filters.laplace(seg, mode="constant")
logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None))
# logger.debug(str(np.max(seg_border)))
# logger.debug(str(np.min(seg_border)))
seg_border[seg_border != 0] = 1
logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None))
# scipy.ndimage.morphology.distance_transform_edt
boundary_dilatation_distance = self.segparams["boundary_dilatation_distance"]
seg = scipy.ndimage.morphology.binary_dilation(
seg_border,
# seg,
np.ones(
[
(boundary_dilatation_distance * 2) + 1,
(boundary_dilatation_distance * 2) + 1,
(boundary_dilatation_distance * 2) + 1,
]
),
)
if self.keep_temp_properties:
self.temp_msgc_lowres_discontinuity = seg
else:
self.temp_msgc_lowres_discontinuity = None
if self.debug_images:
import sed3
pd = sed3.sed3(seg_border) # ), contour=seg)
pd.show()
pd = sed3.sed3(seg) # ), contour=seg)
pd.show()
# segzoom = scipy.ndimage.interpolation.zoom(seg.astype('float'), zoom,
# order=0).astype('int8')
self.stats["t3"] = time.time() - start
return seg | [
"def branchScale(self):\n \"\"\"See docs for `Model` abstract base class.\"\"\"\n bs = -(self.prx * scipy.diagonal(self.Prxy, axis1=1, axis2=2)\n ).sum() * self.mu / float(self.nsites)\n assert bs > 0\n return bs",
"def branchScale(self):\n \"\"\"See docs for `Mod... | [
0.6861903071403503,
0.6760287284851074,
0.6727304458618164,
0.6622427105903625,
0.6478081941604614,
0.6416714191436768,
0.6381795406341553,
0.6336018443107605,
0.633543848991394,
0.6316496729850769,
0.6313363313674927,
0.6291797757148743
] |
Run Graph-Cut segmentation with refinement of low resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use without `use_boundary_penalties` | def __multiscale_gc_lo2hi_run(self): # , pyed):
"""
Run Graph-Cut segmentation with refinement of low resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use without `use_boundary_penalties`
"""
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
self._msgc_lo2hi_resize_init()
self.__msgc_step0_init()
hard_constraints = self.__msgc_step12_low_resolution_segmentation()
# ===== high resolution data processing
seg = self.__msgc_step3_discontinuity_localization()
self.stats["t3.1"] = (time.time() - self._start_time)
graph = Graph(
seg,
voxelsize=self.voxelsize,
nsplit=self.segparams["block_size"],
edge_weight_table=self._msgc_npenalty_table,
compute_low_nodes_index=True,
)
# graph.run() = graph.generate_base_grid() + graph.split_voxels()
# graph.run()
graph.generate_base_grid()
self.stats["t3.2"] = (time.time() - self._start_time)
graph.split_voxels()
self.stats["t3.3"] = (time.time() - self._start_time)
self.stats.update(graph.stats)
self.stats["t4"] = (time.time() - self._start_time)
mul_mask, mul_val = self.__msgc_tlinks_area_weight_from_low_segmentation(seg)
area_weight = 1
unariesalt = self.__create_tlinks(
self.img,
self.voxelsize,
self.seeds,
area_weight=area_weight,
hard_constraints=hard_constraints,
mul_mask=None,
mul_val=None,
)
# N-links prepared
self.stats["t5"] = (time.time() - self._start_time)
un, ind = np.unique(graph.msinds, return_index=True)
self.stats["t6"] = (time.time() - self._start_time)
self.stats["t7"] = (time.time() - self._start_time)
unariesalt2_lo2hi = np.hstack(
[unariesalt[ind, 0, 0].reshape(-1, 1), unariesalt[ind, 0, 1].reshape(-1, 1)]
)
nlinks_lo2hi = np.hstack([graph.edges, graph.edges_weights.reshape(-1, 1)])
if self.debug_images:
import sed3
ed = sed3.sed3(unariesalt[:, :, 0].reshape(self.img.shape))
ed.show()
import sed3
ed = sed3.sed3(unariesalt[:, :, 1].reshape(self.img.shape))
ed.show()
# ed = sed3.sed3(seg)
# ed.show()
# import sed3
# ed = sed3.sed3(graph.data)
# ed.show()
# import sed3
# ed = sed3.sed3(graph.msinds)
# ed.show()
# nlinks, unariesalt2, msinds = self.__msgc_step45678_construct_graph(area_weight, hard_constraints, seg)
# self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)
self.__msgc_step9_finish_perform_gc_and_reshape(
nlinks_lo2hi, unariesalt2_lo2hi, graph.msinds
)
self._msgc_lo2hi_resize_clean_finish() | [
"def __multiscale_gc_hi2lo_run(self): # , pyed):\n \"\"\"\n Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.\n In first step is performed normal GC on low resolution data\n Second step construct finer grid on edges of segmentation from first\n step... | [
0.8354851007461548,
0.717715322971344,
0.7040823698043823,
0.6870102882385254,
0.6714046597480774,
0.6607586145401001,
0.6603902578353882,
0.6547603011131287,
0.6508672833442688,
0.6498038172721863,
0.6489167213439941,
0.641808032989502
] |
Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use without `use_boundary_penalties` | def __multiscale_gc_hi2lo_run(self): # , pyed):
"""
Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use without `use_boundary_penalties`
"""
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
self.__msgc_step0_init()
hard_constraints = self.__msgc_step12_low_resolution_segmentation()
# ===== high resolution data processing
seg = self.__msgc_step3_discontinuity_localization()
nlinks, unariesalt2, msinds = self.__msgc_step45678_hi2lo_construct_graph(
hard_constraints, seg
)
self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds) | [
"def __multiscale_gc_lo2hi_run(self): # , pyed):\n \"\"\"\n Run Graph-Cut segmentation with refinement of low resolution multiscale graph.\n In first step is performed normal GC on low resolution data\n Second step construct finer grid on edges of segmentation from first\n step.\... | [
0.846430778503418,
0.7215781211853027,
0.7195752263069153,
0.685760498046875,
0.6824662089347839,
0.6763722896575928,
0.6726657152175903,
0.6619572043418884,
0.6599618792533875,
0.6594784259796143,
0.6542990803718567,
0.6521185040473938
] |
Return values (intensities) by indexes.
Used for multiscale graph cut.
data = [[0 1 1],
[0 2 2],
[0 2 2]]
inds = [[0 1 2],
[3 4 4],
[5 4 4]]
return: [0, 1, 1, 0, 2, 0]
If the data are not consistent, it will take the maximal value | def __ordered_values_by_indexes(self, data, inds):
"""
Return values (intensities) by indexes.
Used for multiscale graph cut.
data = [[0 1 1],
[0 2 2],
[0 2 2]]
inds = [[0 1 2],
[3 4 4],
[5 4 4]]
return: [0, 1, 1, 0, 2, 0]
If the data are not consistent, it will take the maximal value
"""
# get unique labels and their first indexes
# lab, linds = np.unique(inds, return_index=True)
# compute values by indexes
# values = data.reshape(-1)[linds]
# alternative slow implementation
# if there are different data on same index, it will take
# maximal value
# lab = np.unique(inds)
# values = [0]*len(lab)
# for label in lab:
# values[label] = np.max(data[inds == label])
#
# values = np.asarray(values)
# yet another implementation
values = [None] * (np.max(inds) + 1)
linear_inds = inds.ravel()
linear_data = data.ravel()
for i in range(0, len(linear_inds)):
# going over all data pixels
if values[linear_inds[i]] is None:
# this index is found for first
values[linear_inds[i]] = linear_data[i]
elif values[linear_inds[i]] < linear_data[i]:
# here can be changed maximal or minimal value
values[linear_inds[i]] = linear_data[i]
values = np.asarray(values)
return values | [
"def get_maximum_index(indices):\n \"\"\"Internally used.\"\"\"\n def _maximum_idx_single(idx):\n if isinstance(idx, slice):\n start = -1\n stop = 0\n if idx.start is not None:\n start = idx.start.__index__()\n if idx.stop is not None:\n ... | [
0.7258252501487732,
0.7097887396812439,
0.7016910314559937,
0.7010431885719299,
0.6889966130256653,
0.6820541024208069,
0.678980827331543,
0.6788231134414673,
0.676472544670105,
0.6743552684783936,
0.6736085414886475,
0.672756016254425
] |
Function computes multiscale indexes of ndarray.
mask: Says where is original resolution (0) and where is small
resolution (1). Mask is in small resolution.
orig_shape: Original shape of input data.
zoom: Usually number greater then 1
result = [[0 1 2],
[3 4 4],
[5 4 4]] | def __hi2lo_multiscale_indexes(self, mask, orig_shape): # , zoom):
"""
Function computes multiscale indexes of ndarray.
mask: Says where is original resolution (0) and where is small
resolution (1). Mask is in small resolution.
orig_shape: Original shape of input data.
zoom: Usually number greater then 1
result = [[0 1 2],
[3 4 4],
[5 4 4]]
"""
mask_orig = zoom_to_shape(mask, orig_shape, dtype=np.int8)
inds_small = np.arange(mask.size).reshape(mask.shape)
inds_small_in_orig = zoom_to_shape(inds_small, orig_shape, dtype=np.int8)
inds_orig = np.arange(np.prod(orig_shape)).reshape(orig_shape)
# inds_orig = inds_orig * mask_orig
inds_orig += np.max(inds_small_in_orig) + 1
# print 'indexes'
# import py3DSeedEditor as ped
# import pdb; pdb.set_trace() # BREAKPOINT
# '==' is not the same as 'is' for numpy.array
inds_small_in_orig[mask_orig == True] = inds_orig[mask_orig == True] # noqa
inds = inds_small_in_orig
# print np.max(inds)
# print np.min(inds)
inds = relabel_squeeze(inds)
logger.debug(
"Index after relabeling: %s", scipy.stats.describe(inds, axis=None)
)
# logger.debug("Minimal index after relabeling: " + str(np.min(inds)))
# inds_orig[mask_orig==True] = 0
# inds_small_in_orig[mask_orig==False] = 0
# inds = (inds_orig + np.max(inds_small_in_orig) + 1) + inds_small_in_orig
return inds, mask_orig | [
"def construct_zernike_polynomials(x, y, zernike_indexes, mask=None, weight=None):\n \"\"\"Return the zerike polynomials for all objects in an image\n \n x - the X distance of a point from the center of its object\n y - the Y distance of a point from the center of its object\n zernike_indexes - an Nx... | [
0.6771979928016663,
0.6765623092651367,
0.6748027205467224,
0.6736266016960144,
0.6680997014045715,
0.6649146676063538,
0.6602632999420166,
0.6561344265937805,
0.65461266040802,
0.6545136570930481,
0.6541271209716797,
0.6522963047027588
] |
Interactive seed setting with 3d seed editor | def interactivity(self, min_val=None, max_val=None, qt_app=None):
"""
Interactive seed setting with 3d seed editor
"""
from .seed_editor_qt import QTSeedEditor
from PyQt4.QtGui import QApplication
if min_val is None:
min_val = np.min(self.img)
if max_val is None:
max_val = np.max(self.img)
window_c = (max_val + min_val) / 2 # .astype(np.int16)
window_w = max_val - min_val # .astype(np.int16)
if qt_app is None:
qt_app = QApplication(sys.argv)
pyed = QTSeedEditor(
self.img,
modeFun=self.interactivity_loop,
voxelSize=self.voxelsize,
seeds=self.seeds,
volume_unit=self.volume_unit,
)
pyed.changeC(window_c)
pyed.changeW(window_w)
qt_app.exec_() | [
"def set_seed(seed: int):\n \"\"\" Set random seed for python, numpy and pytorch RNGs \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.random.manual_seed(seed)",
"def set_seeds(self, seeds):\n \"\"\"\n Function for manual seed setting. Sets variable seeds and prepares\n v... | [
0.7103196382522583,
0.7084529399871826,
0.7084075212478638,
0.7056494355201721,
0.6945380568504333,
0.6907649636268616,
0.6894176006317139,
0.6885712146759033,
0.6882632374763489,
0.6879781484603882,
0.6860505938529968,
0.6801682710647583
] |
Function for manual seed setting. Sets variable seeds and prepares
voxels for density model.
:param seeds: ndarray (0 - nothing, 1 - object, 2 - background,
3 - object just hard constraints, no model training, 4 - background
just hard constraints, no model training) | def set_seeds(self, seeds):
"""
Function for manual seed setting. Sets variable seeds and prepares
voxels for density model.
:param seeds: ndarray (0 - nothing, 1 - object, 2 - background,
3 - object just hard constraints, no model training, 4 - background
just hard constraints, no model training)
"""
if self.img.shape != seeds.shape:
raise Exception("Seeds must be same size as input image")
self.seeds = seeds.astype("int8")
self.voxels1 = self.img[self.seeds == 1]
self.voxels2 = self.img[self.seeds == 2] | [
"def __set_hard_hard_constraints(self, tdata1, tdata2, seeds):\n \"\"\"\n it works with seed labels:\n 0: nothing\n 1: object 1 - full seeds\n 2: object 2 - full seeds\n 3: object 1 - not a training seeds\n 4: object 2 - not a training seeds\n \"\"\"\n ... | [
0.724362850189209,
0.7048511505126953,
0.6971395015716553,
0.6954063177108765,
0.6898720860481262,
0.6891217231750488,
0.6812682151794434,
0.6777561902999878,
0.6749814748764038,
0.6689315438270569,
0.6673877835273743,
0.6638182401657104
] |
Run the Graph Cut segmentation according to preset parameters.
:param run_fit_model: Allow to skip model fit when the model is prepared before
:return: | def run(self, run_fit_model=True):
"""
Run the Graph Cut segmentation according to preset parameters.
:param run_fit_model: Allow to skip model fit when the model is prepared before
:return:
"""
if run_fit_model:
self.fit_model(self.img, self.voxelsize, self.seeds)
self._start_time = time.time()
if self.segparams["method"].lower() in ("graphcut", "gc"):
self.__single_scale_gc_run()
elif self.segparams["method"].lower() in (
"multiscale_graphcut",
"multiscale_gc",
"msgc",
"msgc_lo2hi",
"lo2hi",
"multiscale_graphcut_lo2hi",
):
logger.debug("performing multiscale Graph-Cut lo2hi")
self.__multiscale_gc_lo2hi_run()
elif self.segparams["method"].lower() in (
"msgc_hi2lo",
"hi2lo",
"multiscale_graphcut_hi2lo",
):
logger.debug("performing multiscale Graph-Cut hi2lo")
self.__multiscale_gc_hi2lo_run()
else:
logger.error("Unknown segmentation method: " + self.segparams["method"]) | [
"def model_segments(copy_file, work_dir, paired):\n \"\"\"Perform segmentation on input copy number log2 ratio file.\n \"\"\"\n out_file = os.path.join(work_dir, \"%s.cr.seg\" % dd.get_sample_name(paired.tumor_data))\n tumor_counts, normal_counts = heterogzygote_counts(paired)\n if not utils.file_exi... | [
0.6835083961486816,
0.6589675545692444,
0.6549115777015686,
0.6541705131530762,
0.6504920125007629,
0.6443780660629272,
0.6398264765739441,
0.6389582753181458,
0.6380501985549927,
0.6336901187896729,
0.6321241855621338,
0.6307634711265564
] |
it works with seed labels:
0: nothing
1: object 1 - full seeds
2: object 2 - full seeds
3: object 1 - not a training seeds
4: object 2 - not a training seeds | def __set_hard_hard_constraints(self, tdata1, tdata2, seeds):
"""
it works with seed labels:
0: nothing
1: object 1 - full seeds
2: object 2 - full seeds
3: object 1 - not a training seeds
4: object 2 - not a training seeds
"""
seeds_mask = (seeds == 1) | (seeds == 3)
tdata2[seeds_mask] = np.max(tdata2) + 1
tdata1[seeds_mask] = 0
seeds_mask = (seeds == 2) | (seeds == 4)
tdata1[seeds_mask] = np.max(tdata1) + 1
tdata2[seeds_mask] = 0
return tdata1, tdata2 | [
"def get_seed_sub(self, label):\r\n \"\"\" Return list of all seeds with specific label\r\n \"\"\"\r\n sx, sy, sz = np.nonzero(self.seeds == label)\r\n\r\n return sx, sy, sz",
"def identify(label, column_type = :integer)\n if column_type == :uuid\n NamedSeeds.uuid_v5(label)... | [
0.7497216463088989,
0.70253586769104,
0.6890901327133179,
0.6859575510025024,
0.6842992901802063,
0.6806407570838928,
0.6781275868415833,
0.6768274307250977,
0.674925684928894,
0.6744183897972107,
0.6728851795196533,
0.6726276278495789
] |
Compute edge values for graph cut tlinks based on image intensity
and texture. | def __similarity_for_tlinks_obj_bgr(
self,
data,
voxelsize,
# voxels1, voxels2,
# seeds, otherfeatures=None
):
"""
Compute edge values for graph cut tlinks based on image intensity
and texture.
"""
# self.fit_model(data, voxelsize, seeds)
# There is a need to have small vaues for good fit
# R(obj) = -ln( Pr (Ip | O) )
# R(bck) = -ln( Pr (Ip | B) )
# Boykov2001b
# ln is computed in likelihood
tdata1 = (-(self.mdl.likelihood_from_image(data, voxelsize, 1))) * 10
tdata2 = (-(self.mdl.likelihood_from_image(data, voxelsize, 2))) * 10
# to spare some memory
dtype = np.int16
if np.any(tdata1 > 32760):
dtype = np.float32
if np.any(tdata2 > 32760):
dtype = np.float32
if self.segparams["use_apriori_if_available"] and self.apriori is not None:
logger.debug("using apriori information")
gamma = self.segparams["apriori_gamma"]
a1 = (-np.log(self.apriori * 0.998 + 0.001)) * 10
a2 = (-np.log(0.999 - (self.apriori * 0.998))) * 10
# logger.debug('max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)))
# logger.debug('max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2)))
# logger.debug('max ' + str(np.max(a1)) + ' min ' + str(np.min(a1)))
# logger.debug('max ' + str(np.max(a2)) + ' min ' + str(np.min(a2)))
tdata1u = (((1 - gamma) * tdata1) + (gamma * a1)).astype(dtype)
tdata2u = (((1 - gamma) * tdata2) + (gamma * a2)).astype(dtype)
tdata1 = tdata1u
tdata2 = tdata2u
# logger.debug(' max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)))
# logger.debug(' max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2)))
# logger.debug('gamma ' + str(gamma))
# import sed3
# ed = sed3.show_slices(tdata1)
# ed = sed3.show_slices(tdata2)
del tdata1u
del tdata2u
del a1
del a2
# if np.any(tdata1 < 0) or np.any(tdata2 <0):
# logger.error("Problem with tlinks. Likelihood is < 0")
# if self.debug_images:
# self.__show_debug_tdata_images(tdata1, tdata2, suptitle="likelihood")
return tdata1, tdata2 | [
"def __ordered_values_by_indexes(self, data, inds):\n \"\"\"\n Return values (intensities) by indexes.\n\n Used for multiscale graph cut.\n data = [[0 1 1],\n [0 2 2],\n [0 2 2]]\n\n inds = [[0 1 2],\n [3 4 4],\n [5 4 4]]... | [
0.6722548007965088,
0.6709698438644409,
0.6678282618522644,
0.6651525497436523,
0.6602543592453003,
0.6581569910049438,
0.6538912057876587,
0.6536920666694641,
0.6516335606575012,
0.6476464867591858,
0.6469390988349915,
0.6459125280380249
] |
CodeSearchNet Hard Negatives (Filtered) by Lumees AI
Dataset Summary
This dataset is a processed version of the CodeSearchNet dataset, enhanced with Hard Negative Mining to facilitate the training of state-of-the-art code retrieval models.
It was created by Lumees AI to improve the ability of embedding models to distinguish between syntactically similar but functionally different code snippets.
- Developer: Lumees AI
- Authors: Hasan Kurşun, Kerem Berkay Yanık
- Contact: hello@lumees.io
- Source Data: CodeSearchNet (Train split)
- Total Samples: ~1.88M triplets/tuples
Dataset Structure
The dataset is provided in .jsonl format. Each line represents a training sample containing a natural language query, the positive ground truth code, and a list of mined hard negatives.
Data Fields
query(string): The natural language docstring/description of the function.pos(string): The positive (ground truth) code snippet.neg(list of strings): A list of hard negative code snippets (semantically similar to the query but incorrect).scores(list of floats): The cosine similarity scores of the negative candidates against the query (computed by the mining model).
Example Instance
{
"query": "CommentsView sub-view (will be used recursively)",
"pos": "function ThreadBranchView(vm) { ... }",
"neg": [
"function CommentReplyView(vm, comment) { ... }",
"public function viewAction() { ... }"
],
"scores": [0.7502, 0.7481]
}
Methodology & Creation
Source Model
The mining process utilized Alibaba-NLP/gte-multilingual-base, a high-performance embedding model, to generate vector representations for both queries and code.
Mining Process
The dataset was constructed using a dense retrieval approach on the entire CodeSearchNet training corpus across 6 languages (Python, Java, Go, PHP, Ruby, JavaScript).
- Embedding: All code snippets in the corpus were encoded into dense vectors.
- Retrieval: For every query, we retrieved the top 50 semantic candidates from the corpus using GPU-accelerated Matrix Multiplication.
- Filtration:
- Self-Exclusion: The positive ground truth was removed from results.
- Duplicate Removal: Exact string duplicates of the positive code were removed.
- Score Thresholding:
- Max Similarity (0.95): Candidates with scores above 0.95 were discarded to avoid False Negatives (valid code that is too similar to the ground truth).
- Min Similarity (0.35): Candidates with scores below 0.35 were discarded to ensure the negatives are "hard" enough to be useful for training (avoiding easy negatives).
- Selection: Up to the top 12 valid hard negatives were selected for each query.
Intended Use
This dataset is optimized for:
- Contrastive Learning: Fine-tuning embedding models using losses like
MultipleNegativesRankingLossorTripletLoss. - Code Retrieval: Improving search relevance in IDEs or code search engines.
- Cross-Lingual Alignment: The dataset includes cross-lingual negatives (e.g., a Python query retrieving similar PHP code), helping models learn language-agnostic semantic features.
Licensing
This dataset adheres to the licensing terms of the original CodeSearchNet dataset (MIT/Permissive). Users should verify specific licensing requirements for individual code snippets if used for commercial code generation.
Citation
If you use this dataset, please cite Lumees AI and the original CodeSearchNet paper:
@misc{lumees2025hardnegatives,
author = {Hasan KURŞUN, Kerem Berkay YANIK},
title = {CodeSearchNet Hard Negatives (Filtered)},
year = {2025},
publisher = {Lumees AI},
howpublished = {\url{[https://lumees.io](https://lumees.io)}},
email = {hello@lumees.io}
}
@article{husain2019codesearchnet,
title={CodeSearchNet Challenge: Evaluating the State of Semantic Code Search},
author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
journal={arXiv preprint arXiv:1909.09436},
year={2019}
}
- Downloads last month
- 27