buggy_function
stringlengths 1
391k
| fixed_function
stringlengths 0
392k
|
---|---|
public static synchronized void printException(String where, Exception e) {
if (e instanceof SQLException) {
SQLException se = (SQLException) e;
if (se.getSQLState() != null) { // SQLSTATE is NULL for a
if (se.getSQLState().equals("40001"))
System.out.println("deadlocked detected");
if (se.getSQLState().equals("40XL1"))
System.out.println(" lock timeout exception");
if (se.getSQLState().equals("23500"))
System.out.println(" duplicate key violation");
}
if (se.getNextException() != null) {
String m = se.getNextException().getSQLState();
System.out.println(se.getNextException().getMessage()
+ " SQLSTATE: " + m);
}
}
if (e.getMessage().equals(null)) {
System.out.println("NULL error message detected");
System.out.println("Here is the NULL exection - " + e.toString());
System.out.println("Stack trace of the NULL exception - ");
e.printStackTrace(System.out);
}
System.out.println("At this point - " + where
+ ", exception thrown was : " + e.getMessage());
}
| public static synchronized void printException(String where, Exception e) {
if (e instanceof SQLException) {
SQLException se = (SQLException) e;
if (se.getSQLState() != null) { // SQLSTATE is NULL for a
if (se.getSQLState().equals("40001"))
System.out.println("deadlocked detected");
if (se.getSQLState().equals("40XL1"))
System.out.println(" lock timeout exception");
if (se.getSQLState().equals("23500"))
System.out.println(" duplicate key violation");
}
if (se.getNextException() != null) {
String m = se.getNextException().getSQLState();
System.out.println(se.getNextException().getMessage()
+ " SQLSTATE: " + m);
}
}
if (e.getMessage() == null) {
System.out.println("NULL error message detected");
System.out.println("Here is the NULL exection - " + e.toString());
System.out.println("Stack trace of the NULL exception - ");
e.printStackTrace(System.out);
}
System.out.println("At this point - " + where
+ ", exception thrown was : " + e.getMessage());
}
|
public MonotonicAppendingLongBuffer(int initialPageCount, int pageSize) {
super(initialPageCount, pageSize);
averages = new float[pageSize];
}
| public MonotonicAppendingLongBuffer(int initialPageCount, int pageSize) {
super(initialPageCount, pageSize);
averages = new float[initialPageCount];
}
|
public void testBuild() throws IOException {
final String LF = System.getProperty("line.separator");
String input = "oneword" + LF + "twoword" + LF + "threeword";
PlainTextDictionary ptd = new PlainTextDictionary(new StringReader(input));
Directory ramDir = newDirectory();
SpellChecker spellChecker = new SpellChecker(ramDir);
spellChecker.indexDictionary(ptd);
String[] similar = spellChecker.suggestSimilar("treeword", 2);
assertEquals(2, similar.length);
assertEquals(similar[0], "threeword");
assertEquals(similar[1], "twoword");
spellChecker.close();
ramDir.close();
}
| public void testBuild() throws IOException {
final String LF = System.getProperty("line.separator");
String input = "oneword" + LF + "twoword" + LF + "threeword";
PlainTextDictionary ptd = new PlainTextDictionary(new StringReader(input));
Directory ramDir = newDirectory();
SpellChecker spellChecker = new SpellChecker(ramDir);
spellChecker.indexDictionary(ptd);
String[] similar = spellChecker.suggestSimilar("treeword", 2);
assertEquals(2, similar.length);
assertEquals(similar[0], "threeword");
assertEquals(similar[1], "oneword");
spellChecker.close();
ramDir.close();
}
|
public void testExtendedResultsCount() throws Exception {
assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", SpellCheckComponent.SPELLCHECK_BUILD, "true", "q","bluo", SpellCheckComponent.SPELLCHECK_COUNT,"5", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"false")
,"/spellcheck/suggestions/[0]=='bluo'"
,"/spellcheck/suggestions/[1]/numFound==5"
);
assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","bluo", SpellCheckComponent.SPELLCHECK_COUNT,"3", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"true")
,"/spellcheck/suggestions/[1]/suggestion==[{'word':'blue','freq':1}, {'word':'blud','freq':1}, {'word':'boue','freq':1}]"
);
}
| public void testExtendedResultsCount() throws Exception {
assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", SpellCheckComponent.SPELLCHECK_BUILD, "true", "q","bluo", SpellCheckComponent.SPELLCHECK_COUNT,"5", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"false")
,"/spellcheck/suggestions/[0]=='bluo'"
,"/spellcheck/suggestions/[1]/numFound==5"
);
assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","bluo", SpellCheckComponent.SPELLCHECK_COUNT,"3", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"true")
,"/spellcheck/suggestions/[1]/suggestion==[{'word':'blud','freq':1}, {'word':'blue','freq':1}, {'word':'blee','freq':1}]"
);
}
|
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
| public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = IndexFileNames.parseSegmentName(fileName);
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
|
public static void prepareClass() throws Exception
{
LOCAL = FBUtilities.getLocalAddress();
tablename = "Keyspace4";
StorageService.instance.initServer();
// generate a fake endpoint for which we can spoof receiving/sending trees
REMOTE = InetAddress.getByName("127.0.0.2");
store = Table.open(tablename).getColumnFamilyStores().iterator().next();
cfname = store.columnFamily_;
}
| public static void prepareClass() throws Exception
{
LOCAL = FBUtilities.getLocalAddress();
tablename = "Keyspace5";
StorageService.instance.initServer();
// generate a fake endpoint for which we can spoof receiving/sending trees
REMOTE = InetAddress.getByName("127.0.0.2");
store = Table.open(tablename).getColumnFamilyStores().iterator().next();
cfname = store.columnFamily_;
}
|
public TestOrdValues(String name) {
super(name);
}
| public TestOrdValues(String name) {
super(name, false);
}
|
public StorageService()
{
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try
{
mbs.registerMBean(this, new ObjectName("org.apache.cassandra.service:type=StorageService"));
}
catch (Exception e)
{
throw new RuntimeException(e);
}
bootstrapSet = Multimaps.synchronizedSetMultimap(HashMultimap.<InetAddress, String>create());
/* register the verb handlers */
MessagingService.instance.registerVerbHandlers(Verb.BINARY, new BinaryVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.MUTATION, new RowMutationVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ_REPAIR, new ReadRepairVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ, new ReadVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.RANGE_SLICE, new RangeSliceVerbHandler());
// see BootStrapper for a summary of how the bootstrap verbs interact
MessagingService.instance.registerVerbHandlers(Verb.BOOTSTRAP_TOKEN, new BootStrapper.BootstrapTokenVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_REQUEST, new StreamRequestVerbHandler() );
MessagingService.instance.registerVerbHandlers(Verb.STREAM_INITIATE, new StreamInitiateVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_INITIATE_DONE, new StreamInitiateDoneVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_FINISHED, new StreamFinishedVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ_RESPONSE, new ResponseVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.TREE_REQUEST, new TreeRequestVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.TREE_RESPONSE, new AntiEntropyService.TreeResponseVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.JOIN, new GossiperJoinVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_SYN, new GossipDigestSynVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_ACK, new GossipDigestAckVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_ACK2, new GossipDigestAck2VerbHandler());
replicationStrategies = new HashMap<String, AbstractReplicationStrategy>();
for (String table : DatabaseDescriptor.getTables())
{
AbstractReplicationStrategy strat = getReplicationStrategy(tokenMetadata_, table);
replicationStrategies.put(table, strat);
}
replicationStrategies = Collections.unmodifiableMap(replicationStrategies);
// spin up the streaming serivice so it is available for jmx tools.
if (StreamingService.instance == null)
throw new RuntimeException("Streaming service is unavailable.");
}
| public StorageService()
{
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try
{
mbs.registerMBean(this, new ObjectName("org.apache.cassandra.service:type=StorageService"));
}
catch (Exception e)
{
throw new RuntimeException(e);
}
bootstrapSet = Multimaps.synchronizedSetMultimap(HashMultimap.<InetAddress, String>create());
/* register the verb handlers */
MessagingService.instance.registerVerbHandlers(Verb.BINARY, new BinaryVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.MUTATION, new RowMutationVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ_REPAIR, new ReadRepairVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ, new ReadVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.RANGE_SLICE, new RangeSliceVerbHandler());
// see BootStrapper for a summary of how the bootstrap verbs interact
MessagingService.instance.registerVerbHandlers(Verb.BOOTSTRAP_TOKEN, new BootStrapper.BootstrapTokenVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_REQUEST, new StreamRequestVerbHandler() );
MessagingService.instance.registerVerbHandlers(Verb.STREAM_INITIATE, new StreamInitiateVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_INITIATE_DONE, new StreamInitiateDoneVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_FINISHED, new StreamFinishedVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ_RESPONSE, new ResponseVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.TREE_REQUEST, new TreeRequestVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.TREE_RESPONSE, new AntiEntropyService.TreeResponseVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.JOIN, new GossiperJoinVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_SYN, new GossipDigestSynVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_ACK, new GossipDigestAckVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_ACK2, new GossipDigestAck2VerbHandler());
replicationStrategies = new HashMap<String, AbstractReplicationStrategy>();
for (String table : DatabaseDescriptor.getNonSystemTables())
{
AbstractReplicationStrategy strat = getReplicationStrategy(tokenMetadata_, table);
replicationStrategies.put(table, strat);
}
replicationStrategies = Collections.unmodifiableMap(replicationStrategies);
// spin up the streaming serivice so it is available for jmx tools.
if (StreamingService.instance == null)
throw new RuntimeException("Streaming service is unavailable.");
}
|
public void testSearch() throws Exception {
Query query = QueryParser.parse("test", "contents", analyzer);
Hits hits = searcher.search(query);
assertEquals("Find document(s)", 2, hits.length());
}
| public void testSearch() throws Exception {
Query query = new QueryParser("contents",analyzer).parse("test");
Hits hits = searcher.search(query);
assertEquals("Find document(s)", 2, hits.length());
}
|
public void setScorer(Scorer scorer) {
super.setScorer(scorer);
// TODO: might be cleaner to lazy-init 'source' and set scorer after?
assert readerContext != null;
try {
Map<String,Object> context = new HashMap<String,Object>();
assert scorer != null;
context.put("scorer", new ScoreFunctionValues(scorer));
scores = source.getValues(context, readerContext);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
| public void setScorer(Scorer scorer) {
super.setScorer(scorer);
// TODO: might be cleaner to lazy-init 'source' and set scorer after?
assert readerContext != null;
try {
Map<String,Object> context = new HashMap<String,Object>();
assert scorer != null;
context.put("scorer", scorer);
scores = source.getValues(context, readerContext);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
|
public ConcurrentUpdateSolrServer(String solrServerUrl,
HttpClient client, int queueSize, int threadCount) {
this(solrServerUrl, null, queueSize, threadCount, Executors.newCachedThreadPool(
new SolrjNamedThreadFactory("concurrentUpdateScheduler")));
shutdownExecutor = true;
}
| public ConcurrentUpdateSolrServer(String solrServerUrl,
HttpClient client, int queueSize, int threadCount) {
this(solrServerUrl, client, queueSize, threadCount, Executors.newCachedThreadPool(
new SolrjNamedThreadFactory("concurrentUpdateScheduler")));
shutdownExecutor = true;
}
|
public Token getBootstrapToken()
{
Range range = getLocalPrimaryRange();
List<DecoratedKey> keys = new ArrayList<DecoratedKey>();
for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
{
for (IndexSummary.KeyPosition info: cfs.allIndexPositions())
{
if (range.contains(info.key.token))
keys.add(info.key);
}
}
FBUtilities.sortSampledKeys(keys, range);
if (keys.size() < 3)
return partitioner_.getRandomToken();
else
return keys.get(keys.size() / 2).token;
}
| public Token getBootstrapToken()
{
Range range = getLocalPrimaryRange();
List<DecoratedKey> keys = new ArrayList<DecoratedKey>();
for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
{
for (IndexSummary.KeyPosition info: cfs.allIndexPositions())
{
if (range.contains(info.key.token))
keys.add(info.key);
}
}
FBUtilities.sortSampledKeys(keys, range);
if (keys.size() < 3)
return partitioner_.midpoint(range.left, range.right);
else
return keys.get(keys.size() / 2).token;
}
|
public void setText(CharacterIterator newText) {
start = newText.getBeginIndex();
end = newText.getEndIndex();
text = newText;
current = newText.getIndex();
}
| public void setText(CharacterIterator newText) {
start = newText.getBeginIndex();
end = newText.getEndIndex();
text = newText;
current = start;
}
|
public void testUpdateDelteSlices() {
DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue();
final int size = 200 + random().nextInt(500) * RANDOM_MULTIPLIER;
Integer[] ids = new Integer[size];
for (int i = 0; i < ids.length; i++) {
ids[i] = random().nextInt();
}
DeleteSlice slice1 = queue.newSlice();
DeleteSlice slice2 = queue.newSlice();
BufferedDeletes bd1 = new BufferedDeletes();
BufferedDeletes bd2 = new BufferedDeletes();
int last1 = 0;
int last2 = 0;
Set<Term> uniqueValues = new HashSet<Term>();
for (int j = 0; j < ids.length; j++) {
Integer i = ids[j];
// create an array here since we compare identity below against tailItem
Term[] term = new Term[] {new Term("id", i.toString())};
uniqueValues.add(term[0]);
queue.addDelete(term);
if (random().nextInt(20) == 0 || j == ids.length - 1) {
queue.updateSlice(slice1);
assertTrue(slice1.isTailItem(term));
slice1.apply(bd1, j);
assertAllBetween(last1, j, bd1, ids);
last1 = j + 1;
}
if (random().nextInt(10) == 5 || j == ids.length - 1) {
queue.updateSlice(slice2);
assertTrue(slice2.isTailItem(term));
slice2.apply(bd2, j);
assertAllBetween(last2, j, bd2, ids);
last2 = j + 1;
}
assertEquals(uniqueValues.size(), queue.numGlobalTermDeletes());
}
assertEquals(uniqueValues, bd1.terms.keySet());
assertEquals(uniqueValues, bd2.terms.keySet());
HashSet<Term> frozenSet = new HashSet<Term>();
for (Term t : queue.freezeGlobalBuffer(null).termsIterable()) {
BytesRef bytesRef = new BytesRef();
bytesRef.copyBytes(t.bytes);
frozenSet.add(new Term(t.field, bytesRef));
}
assertEquals(uniqueValues, frozenSet);
assertEquals("num deletes must be 0 after freeze", 0, queue
.numGlobalTermDeletes());
}
| public void testUpdateDelteSlices() {
DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue();
final int size = 200 + random().nextInt(500) * RANDOM_MULTIPLIER;
Integer[] ids = new Integer[size];
for (int i = 0; i < ids.length; i++) {
ids[i] = random().nextInt();
}
DeleteSlice slice1 = queue.newSlice();
DeleteSlice slice2 = queue.newSlice();
BufferedDeletes bd1 = new BufferedDeletes();
BufferedDeletes bd2 = new BufferedDeletes();
int last1 = 0;
int last2 = 0;
Set<Term> uniqueValues = new HashSet<Term>();
for (int j = 0; j < ids.length; j++) {
Integer i = ids[j];
// create an array here since we compare identity below against tailItem
Term[] term = new Term[] {new Term("id", i.toString())};
uniqueValues.add(term[0]);
queue.addDelete(term);
if (random().nextInt(20) == 0 || j == ids.length - 1) {
queue.updateSlice(slice1);
assertTrue(slice1.isTailItem(term));
slice1.apply(bd1, j);
assertAllBetween(last1, j, bd1, ids);
last1 = j + 1;
}
if (random().nextInt(10) == 5 || j == ids.length - 1) {
queue.updateSlice(slice2);
assertTrue(slice2.isTailItem(term));
slice2.apply(bd2, j);
assertAllBetween(last2, j, bd2, ids);
last2 = j + 1;
}
assertEquals(j+1, queue.numGlobalTermDeletes());
}
assertEquals(uniqueValues, bd1.terms.keySet());
assertEquals(uniqueValues, bd2.terms.keySet());
HashSet<Term> frozenSet = new HashSet<Term>();
for (Term t : queue.freezeGlobalBuffer(null).termsIterable()) {
BytesRef bytesRef = new BytesRef();
bytesRef.copyBytes(t.bytes);
frozenSet.add(new Term(t.field, bytesRef));
}
assertEquals(uniqueValues, frozenSet);
assertEquals("num deletes must be 0 after freeze", 0, queue
.numGlobalTermDeletes());
}
|
public String toString()
{
return getFilename() + "/" + StringUtils.join(sections, ",") + "\n\t progress=" + progress + "/" + size + " - " + progress*100/size + "%";
}
| public String toString()
{
return getFilename() + " sections=" + sections.size() + " progress=" + progress + "/" + size + " - " + progress*100/size + "%";
}
|
public int run(String[] args) throws Exception {
/**
Option seqOpt = obuilder.withLongName("seqFile").withRequired(false).withArgument(
abuilder.withName("seqFile").withMinimum(1).withMaximum(1).create()).withDescription(
"The Sequence File containing the Vectors").withShortName("s").create();
Option dirOpt = obuilder.withLongName("seqDirectory").withRequired(false).withArgument(
abuilder.withName("seqDirectory").withMinimum(1).withMaximum(1).create())
.withDescription("The directory containing Sequence File of Vectors")
.withShortName("d").create();
*/
addInputOption();
addOutputOption();
addOption("useKey", "u", "If the Key is a vector than dump that instead", false);
addOption("printKey", "p", "Print out the key as well, delimited by tab (or the value if useKey is true", false);
addOption("dictionary", "d", "The dictionary file.", false);
addOption("dictionaryType", "dt", "The dictionary file type (text|seqfile)", false);
addOption("csv", "c", "Output the Vector as CSV. Otherwise it substitutes in the terms for vector cell entries",
false);
addOption("namesAsComments", "n", "If using CSV output, optionally add a comment line for each NamedVector "
+ "(if the vector is one) printing out the name", false);
addOption("nameOnly", "N", "Use the name as the value for each NamedVector (skip other vectors)", false);
addOption("sortVectors", "sort", "Sort output key/value pairs of the vector entries in abs magnitude "
+ "descending order", false);
addOption("quiet", "q", "Print only file contents", false);
addOption("sizeOnly", "sz", "Dump only the size of the vector", false);
addOption("numItems", "ni", "Output at most <n> vecors", false);
addOption("vectorSize", "vs", "Truncate vectors to <vs> length when dumping (most useful when in"
+ " conjunction with -sort", false);
addOption(buildOption("filter", "fi", "Only dump out those vectors whose name matches the filter."
+ " Multiple items may be specified by repeating the argument.", true, 1, Integer.MAX_VALUE, false, null));
if (parseArguments(args, false, true) == null) {
return -1;
}
Path[] pathArr;
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path input = getInputPath();
FileStatus fileStatus = fs.getFileStatus(input);
if (fileStatus.isDir()) {
pathArr = FileUtil.stat2Paths(fs.listStatus(input, new OutputFilesFilter()));
} else {
FileStatus[] inputPaths = fs.globStatus(input);
pathArr = new Path[inputPaths.length];
int i = 0;
for (FileStatus fstatus : inputPaths) {
pathArr[i++] = fstatus.getPath();
}
}
String dictionaryType = getOption("dictionaryType", "text");
boolean sortVectors = hasOption("sortVectors");
boolean quiet = hasOption("quiet");
if (!quiet) {
log.info("Sort? {}", sortVectors);
}
String[] dictionary = null;
if (hasOption("dictionary")) {
String dictFile = getOption("dictionary");
if ("text".equals(dictionaryType)) {
dictionary = VectorHelper.loadTermDictionary(new File(dictFile));
} else if ("sequencefile".equals(dictionaryType)) {
dictionary = VectorHelper.loadTermDictionary(conf, dictFile);
} else {
//TODO: support Lucene's FST as a dictionary type
throw new IOException("Invalid dictionary type: " + dictionaryType);
}
}
Set<String> filters;
if (hasOption("filter")) {
filters = Sets.newHashSet(getOptions("filter"));
} else {
filters = null;
}
boolean useCSV = hasOption("csv");
boolean sizeOnly = hasOption("sizeOnly");
boolean nameOnly = hasOption("nameOnly");
boolean namesAsComments = hasOption("namesAsComments");
boolean transposeKeyValue = hasOption("vectorAsKey");
Writer writer;
boolean shouldClose;
File output = getOutputFile();
if (output != null) {
shouldClose = true;
writer = Files.newWriter(output, Charsets.UTF_8);
} else {
shouldClose = false;
writer = new OutputStreamWriter(System.out, Charsets.UTF_8);
}
try {
boolean printKey = hasOption("printKey");
if (useCSV && dictionary != null) {
writer.write("#");
for (int j = 0; j < dictionary.length; j++) {
writer.write(dictionary[j]);
if (j < dictionary.length - 1) {
writer.write(',');
}
}
writer.write('\n');
}
Long numItems = null;
if (hasOption("numItems")) {
numItems = Long.parseLong(getOption("numItems"));
if (quiet) {
writer.append("#Max Items to dump: ").append(String.valueOf(numItems)).append('\n');
}
}
int maxIndexesPerVector = hasOption("vectorSize")
? Integer.parseInt(getOption("vectorSize"))
: Integer.MAX_VALUE;
long itemCount = 0;
int fileCount = 0;
for (Path path : pathArr) {
if (numItems != null && numItems <= itemCount) {
break;
}
if (quiet) {
log.info("Processing file '{}' ({}/{})", path, ++fileCount, pathArr.length);
}
SequenceFileIterable<Writable, Writable> iterable =
new SequenceFileIterable<Writable, Writable>(path, true, conf);
Iterator<Pair<Writable, Writable>> iterator = iterable.iterator();
long i = 0;
while (iterator.hasNext() && (numItems == null || itemCount < numItems)) {
Pair<Writable, Writable> record = iterator.next();
Writable keyWritable = record.getFirst();
Writable valueWritable = record.getSecond();
if (printKey) {
Writable notTheVectorWritable = transposeKeyValue ? valueWritable : keyWritable;
writer.write(notTheVectorWritable.toString());
writer.write('\t');
}
Vector vector;
try {
vector = ((VectorWritable)
(transposeKeyValue ? keyWritable : valueWritable)).get();
} catch (ClassCastException e) {
if ((transposeKeyValue ? keyWritable : valueWritable)
instanceof WeightedPropertyVectorWritable) {
vector =
((WeightedPropertyVectorWritable)
(transposeKeyValue ? keyWritable : valueWritable)).getVector();
} else {
throw e;
}
}
if (filters != null
&& vector instanceof NamedVector
&& !filters.contains(((NamedVector) vector).getName())) {
//we are filtering out this item, skip
continue;
}
if (sizeOnly) {
if (vector instanceof NamedVector) {
writer.write(((NamedVector) vector).getName());
writer.write(":");
} else {
writer.write(String.valueOf(i++));
writer.write(":");
}
writer.write(String.valueOf(vector.size()));
writer.write('\n');
} else if (nameOnly) {
if (vector instanceof NamedVector) {
writer.write(((NamedVector) vector).getName());
writer.write('\n');
}
} else {
String fmtStr;
if (useCSV) {
fmtStr = VectorHelper.vectorToCSVString(vector, namesAsComments);
} else {
fmtStr = VectorHelper.vectorToJson(vector, dictionary, maxIndexesPerVector,
sortVectors);
}
writer.write(fmtStr);
writer.write('\n');
}
itemCount++;
}
}
writer.flush();
} finally {
if (shouldClose) {
Closeables.closeQuietly(writer);
}
}
return 0;
}
| public int run(String[] args) throws Exception {
/**
Option seqOpt = obuilder.withLongName("seqFile").withRequired(false).withArgument(
abuilder.withName("seqFile").withMinimum(1).withMaximum(1).create()).withDescription(
"The Sequence File containing the Vectors").withShortName("s").create();
Option dirOpt = obuilder.withLongName("seqDirectory").withRequired(false).withArgument(
abuilder.withName("seqDirectory").withMinimum(1).withMaximum(1).create())
.withDescription("The directory containing Sequence File of Vectors")
.withShortName("d").create();
*/
addInputOption();
addOutputOption();
addOption("useKey", "u", "If the Key is a vector than dump that instead", false);
addOption("printKey", "p", "Print out the key as well, delimited by tab (or the value if useKey is true", false);
addOption("dictionary", "d", "The dictionary file.", false);
addOption("dictionaryType", "dt", "The dictionary file type (text|seqfile)", false);
addOption("csv", "c", "Output the Vector as CSV. Otherwise it substitutes in the terms for vector cell entries",
false);
addOption("namesAsComments", "n", "If using CSV output, optionally add a comment line for each NamedVector "
+ "(if the vector is one) printing out the name", false);
addOption("nameOnly", "N", "Use the name as the value for each NamedVector (skip other vectors)", false);
addOption("sortVectors", "sort", "Sort output key/value pairs of the vector entries in abs magnitude "
+ "descending order", false);
addOption("quiet", "q", "Print only file contents", false);
addOption("sizeOnly", "sz", "Dump only the size of the vector", false);
addOption("numItems", "ni", "Output at most <n> vecors", false);
addOption("vectorSize", "vs", "Truncate vectors to <vs> length when dumping (most useful when in"
+ " conjunction with -sort", false);
addOption(buildOption("filter", "fi", "Only dump out those vectors whose name matches the filter."
+ " Multiple items may be specified by repeating the argument.", true, 1, Integer.MAX_VALUE, false, null));
if (parseArguments(args, false, true) == null) {
return -1;
}
Path[] pathArr;
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path input = getInputPath();
FileStatus fileStatus = fs.getFileStatus(input);
if (fileStatus.isDir()) {
pathArr = FileUtil.stat2Paths(fs.listStatus(input, new OutputFilesFilter()));
} else {
FileStatus[] inputPaths = fs.globStatus(input);
pathArr = new Path[inputPaths.length];
int i = 0;
for (FileStatus fstatus : inputPaths) {
pathArr[i++] = fstatus.getPath();
}
}
String dictionaryType = getOption("dictionaryType", "text");
boolean sortVectors = hasOption("sortVectors");
boolean quiet = hasOption("quiet");
if (!quiet) {
log.info("Sort? {}", sortVectors);
}
String[] dictionary = null;
if (hasOption("dictionary")) {
String dictFile = getOption("dictionary");
if ("text".equals(dictionaryType)) {
dictionary = VectorHelper.loadTermDictionary(new File(dictFile));
} else if ("sequencefile".equals(dictionaryType)) {
dictionary = VectorHelper.loadTermDictionary(conf, dictFile);
} else {
//TODO: support Lucene's FST as a dictionary type
throw new IOException("Invalid dictionary type: " + dictionaryType);
}
}
Set<String> filters;
if (hasOption("filter")) {
filters = Sets.newHashSet(getOptions("filter"));
} else {
filters = null;
}
boolean useCSV = hasOption("csv");
boolean sizeOnly = hasOption("sizeOnly");
boolean nameOnly = hasOption("nameOnly");
boolean namesAsComments = hasOption("namesAsComments");
boolean transposeKeyValue = hasOption("vectorAsKey");
Writer writer;
boolean shouldClose;
File output = getOutputFile();
if (output != null) {
shouldClose = true;
writer = Files.newWriter(output, Charsets.UTF_8);
} else {
shouldClose = false;
writer = new OutputStreamWriter(System.out, Charsets.UTF_8);
}
try {
boolean printKey = hasOption("printKey");
if (useCSV && dictionary != null) {
writer.write("#");
for (int j = 0; j < dictionary.length; j++) {
writer.write(dictionary[j]);
if (j < dictionary.length - 1) {
writer.write(',');
}
}
writer.write('\n');
}
Long numItems = null;
if (hasOption("numItems")) {
numItems = Long.parseLong(getOption("numItems"));
if (quiet) {
writer.append("#Max Items to dump: ").append(String.valueOf(numItems)).append('\n');
}
}
int maxIndexesPerVector = hasOption("vectorSize")
? Integer.parseInt(getOption("vectorSize"))
: Integer.MAX_VALUE;
long itemCount = 0;
int fileCount = 0;
for (Path path : pathArr) {
if (numItems != null && numItems <= itemCount) {
break;
}
if (quiet) {
log.info("Processing file '{}' ({}/{})", path, ++fileCount, pathArr.length);
}
SequenceFileIterable<Writable, Writable> iterable =
new SequenceFileIterable<Writable, Writable>(path, true, conf);
Iterator<Pair<Writable, Writable>> iterator = iterable.iterator();
long i = 0;
while (iterator.hasNext() && (numItems == null || itemCount < numItems)) {
Pair<Writable, Writable> record = iterator.next();
Writable keyWritable = record.getFirst();
Writable valueWritable = record.getSecond();
if (printKey) {
Writable notTheVectorWritable = transposeKeyValue ? valueWritable : keyWritable;
writer.write(notTheVectorWritable.toString());
writer.write('\t');
}
Vector vector;
try {
vector = ((VectorWritable)
(transposeKeyValue ? keyWritable : valueWritable)).get();
} catch (ClassCastException e) {
if ((transposeKeyValue ? keyWritable : valueWritable)
instanceof WeightedPropertyVectorWritable) {
vector =
((WeightedPropertyVectorWritable)
(transposeKeyValue ? keyWritable : valueWritable)).getVector();
} else {
throw e;
}
}
if (filters != null
&& vector instanceof NamedVector
&& !filters.contains(((NamedVector) vector).getName())) {
//we are filtering out this item, skip
continue;
}
if (sizeOnly) {
if (vector instanceof NamedVector) {
writer.write(((NamedVector) vector).getName());
writer.write(":");
} else {
writer.write(String.valueOf(i++));
writer.write(":");
}
writer.write(String.valueOf(vector.size()));
writer.write('\n');
} else if (nameOnly) {
if (vector instanceof NamedVector) {
writer.write(((NamedVector) vector).getName());
writer.write('\n');
}
} else {
String fmtStr;
if (useCSV) {
fmtStr = VectorHelper.vectorToCSVString(vector, namesAsComments);
} else {
fmtStr = VectorHelper.vectorToJson(vector, dictionary, maxIndexesPerVector,
sortVectors);
}
writer.write(fmtStr);
writer.write('\n');
}
itemCount++;
}
}
writer.flush();
} finally {
if (shouldClose) {
Closeables.close(writer, true);
}
}
return 0;
}
|
private void initParents(IndexReader reader, int first) throws IOException {
if (reader.maxDoc() == first) {
return;
}
// it's ok to use MultiFields because we only iterate on one posting list.
// breaking it to loop over the leaves() only complicates code for no
// apparent gain.
DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
Consts.FIELD_PAYLOADS, Consts.PAYLOAD_PARENT_BYTES_REF,
DocsAndPositionsEnum.FLAG_PAYLOADS);
// shouldn't really happen, if it does, something's wrong
if (positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) {
throw new CorruptIndexException("Missing parent data for category " + first);
}
int num = reader.maxDoc();
for (int i = first; i < num; i++) {
if (positions.docID() == i) {
if (positions.freq() == 0) { // shouldn't happen
throw new CorruptIndexException("Missing parent data for category " + i);
}
parents[i] = positions.nextPosition();
if (positions.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
if (i + 1 < num) {
throw new CorruptIndexException("Missing parent data for category "+ (i + 1));
}
break;
}
} else { // this shouldn't happen
throw new CorruptIndexException("Missing parent data for category " + i);
}
}
}
/**
* Adds the given ordinal/parent info and returns either a new instance if the
* underlying array had to grow, or this instance otherwise.
* <p>
* <b>NOTE:</b> you should call this method from a thread-safe code.
*/
ParallelTaxonomyArrays add(int ordinal, int parentOrdinal) {
if (ordinal >= parents.length) {
int[] newarray = ArrayUtil.grow(parents);
newarray[ordinal] = parentOrdinal;
return new ParallelTaxonomyArrays(newarray);
}
parents[ordinal] = parentOrdinal;
return this;
}
| private void initParents(IndexReader reader, int first) throws IOException {
if (reader.maxDoc() == first) {
return;
}
// it's ok to use MultiFields because we only iterate on one posting list.
// breaking it to loop over the leaves() only complicates code for no
// apparent gain.
DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
Consts.FIELD_PAYLOADS, Consts.PAYLOAD_PARENT_BYTES_REF,
DocsAndPositionsEnum.FLAG_PAYLOADS);
// shouldn't really happen, if it does, something's wrong
if (positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) {
throw new CorruptIndexException("Missing parent data for category " + first);
}
int num = reader.maxDoc();
for (int i = first; i < num; i++) {
if (positions.docID() == i) {
if (positions.freq() == 0) { // shouldn't happen
throw new CorruptIndexException("Missing parent data for category " + i);
}
parents[i] = positions.nextPosition();
if (positions.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
if (i + 1 < num) {
throw new CorruptIndexException("Missing parent data for category "+ (i + 1));
}
break;
}
} else { // this shouldn't happen
throw new CorruptIndexException("Missing parent data for category " + i);
}
}
}
/**
* Adds the given ordinal/parent info and returns either a new instance if the
* underlying array had to grow, or this instance otherwise.
* <p>
* <b>NOTE:</b> you should call this method from a thread-safe code.
*/
ParallelTaxonomyArrays add(int ordinal, int parentOrdinal) {
if (ordinal >= parents.length) {
int[] newarray = ArrayUtil.grow(parents, ordinal + 1);
newarray[ordinal] = parentOrdinal;
return new ParallelTaxonomyArrays(newarray);
}
parents[ordinal] = parentOrdinal;
return this;
}
|
private int getConnFromDatabaseName() throws DRDAProtocolException
{
Properties p = new Properties();
databaseAccessException = null;
//if we haven't got the correlation token yet, use session number for drdaID
if (session.drdaID == null)
session.drdaID = leftBrace + session.connNum + rightBrace;
p.put(Attribute.DRDAID_ATTR, session.drdaID);
try {
database.makeConnection(p);
} catch (SQLException se) {
String sqlState = se.getSQLState();
// need to set the security check code based on the reason the connection
// was denied, Cloudscape doesn't say whether the userid or password caused
// the problem, so we will just return userid invalid
databaseAccessException = se;
for (; se != null; se = se.getNextException())
{
if (SanityManager.DEBUG)
trace(se.getMessage());
println2Log(database.dbName, session.drdaID, se.getMessage());
}
if (sqlState.regionMatches(0,SQLState.LOGIN_FAILED,0,5))
return CodePoint.SECCHKCD_USERIDINVALID;
return 0;
}
catch (Exception e)
{
// If cloudscape has shut down for some reason,
// we will send an agent error and then try to
// get the driver loaded again. We have to get
// rid of the client first in case they are holding
// the DriverManager lock.
println2Log(database.dbName, session.drdaID,
"Driver not loaded"
+ e.getMessage());
try {
agentError("Driver not loaded");
}
catch (DRDAProtocolException dpe)
{
// Retry starting the server before rethrowing
// the protocol exception. Then hopfully all
// will be well when they try again.
try {
server.startNetworkServer();
} catch (Exception re) {
println2Log(database.dbName, session.drdaID, "Failed attempt to reload driver " +re.getMessage() );
}
throw dpe;
}
}
// Everything worked so log connection to the database.
if (getLogConnections())
println2Log(database.dbName, session.drdaID,
"Cloudscape Network Server connected to database " +
database.dbName);
return 0;
}
| private int getConnFromDatabaseName() throws DRDAProtocolException
{
Properties p = new Properties();
databaseAccessException = null;
//if we haven't got the correlation token yet, use session number for drdaID
if (session.drdaID == null)
session.drdaID = leftBrace + session.connNum + rightBrace;
p.put(Attribute.DRDAID_ATTR, session.drdaID);
try {
database.makeConnection(p);
} catch (SQLException se) {
String sqlState = se.getSQLState();
// need to set the security check code based on the reason the connection
// was denied, Cloudscape doesn't say whether the userid or password caused
// the problem, so we will just return userid invalid
databaseAccessException = se;
for (; se != null; se = se.getNextException())
{
if (SanityManager.DEBUG)
trace(se.getMessage());
println2Log(database.dbName, session.drdaID, se.getMessage());
}
if (sqlState.regionMatches(0,SQLState.LOGIN_FAILED,0,5))
return CodePoint.SECCHKCD_USERIDINVALID;
return 0;
}
catch (Exception e)
{
// If cloudscape has shut down for some reason,
// we will send an agent error and then try to
// get the driver loaded again. We have to get
// rid of the client first in case they are holding
// the DriverManager lock.
println2Log(database.dbName, session.drdaID,
"Driver not loaded"
+ e.getMessage());
try {
agentError("Driver not loaded");
}
catch (DRDAProtocolException dpe)
{
// Retry starting the server before rethrowing
// the protocol exception. Then hopfully all
// will be well when they try again.
try {
server.startNetworkServer();
} catch (Exception re) {
println2Log(database.dbName, session.drdaID, "Failed attempt to reload driver " +re.getMessage() );
}
throw dpe;
}
}
// Everything worked so log connection to the database.
if (getLogConnections())
println2Log(database.dbName, session.drdaID,
"Apache Derby Network Server connected to database " +
database.dbName);
return 0;
}
|
private void showFromZooKeeper(SolrQueryRequest req, SolrQueryResponse rsp,
CoreContainer coreContainer) throws KeeperException,
InterruptedException, UnsupportedEncodingException {
SolrZkClient zkClient = coreContainer.getZkController().getZkClient();
String adminFile = getAdminFileFromZooKeeper(req, rsp, zkClient);
if (adminFile == null) {
return;
}
// Show a directory listing
List<String> children = zkClient.getChildren(adminFile, null, true);
if (children.size() > 0) {
NamedList<SimpleOrderedMap<Object>> files = new SimpleOrderedMap<SimpleOrderedMap<Object>>();
for (String f : children) {
if (isHiddenFile(rsp, f)) {
continue;
}
SimpleOrderedMap<Object> fileInfo = new SimpleOrderedMap<Object>();
files.add(f, fileInfo);
List<String> fchildren = zkClient.getChildren(adminFile, null, true);
if (fchildren.size() > 0) {
fileInfo.add("directory", true);
} else {
// TODO? content type
fileInfo.add("size", f.length());
}
// TODO: ?
// fileInfo.add( "modified", new Date( f.lastModified() ) );
}
rsp.add("files", files);
} else {
// Include the file contents
// The file logic depends on RawResponseWriter, so force its use.
ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
params.set(CommonParams.WT, "raw");
req.setParams(params);
ContentStreamBase content = new ContentStreamBase.ByteArrayStream(zkClient.getData(adminFile, null, null, true), adminFile);
content.setContentType(req.getParams().get(USE_CONTENT_TYPE));
rsp.add(RawResponseWriter.CONTENT, content);
}
rsp.setHttpCaching(false);
}
| private void showFromZooKeeper(SolrQueryRequest req, SolrQueryResponse rsp,
CoreContainer coreContainer) throws KeeperException,
InterruptedException, UnsupportedEncodingException {
SolrZkClient zkClient = coreContainer.getZkController().getZkClient();
String adminFile = getAdminFileFromZooKeeper(req, rsp, zkClient);
if (adminFile == null) {
return;
}
// Show a directory listing
List<String> children = zkClient.getChildren(adminFile, null, true);
if (children.size() > 0) {
NamedList<SimpleOrderedMap<Object>> files = new SimpleOrderedMap<SimpleOrderedMap<Object>>();
for (String f : children) {
if (isHiddenFile(rsp, f)) {
continue;
}
SimpleOrderedMap<Object> fileInfo = new SimpleOrderedMap<Object>();
files.add(f, fileInfo);
List<String> fchildren = zkClient.getChildren(adminFile + "/" + f, null, true);
if (fchildren.size() > 0) {
fileInfo.add("directory", true);
} else {
// TODO? content type
fileInfo.add("size", f.length());
}
// TODO: ?
// fileInfo.add( "modified", new Date( f.lastModified() ) );
}
rsp.add("files", files);
} else {
// Include the file contents
// The file logic depends on RawResponseWriter, so force its use.
ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
params.set(CommonParams.WT, "raw");
req.setParams(params);
ContentStreamBase content = new ContentStreamBase.ByteArrayStream(zkClient.getData(adminFile, null, null, true), adminFile);
content.setContentType(req.getParams().get(USE_CONTENT_TYPE));
rsp.add(RawResponseWriter.CONTENT, content);
}
rsp.setHttpCaching(false);
}
|
private void parseSQLDTA(DRDAStatement stmt) throws DRDAProtocolException,SQLException
{
try {
parseSQLDTA_work(stmt);
}
catch (SQLException se)
{
skipRemainder(false);
throw se;
}
}
| private void parseSQLDTA(DRDAStatement stmt) throws DRDAProtocolException,SQLException
{
try {
parseSQLDTA_work(stmt);
}
catch (SQLException se)
{
skipRemainder(true);
throw se;
}
}
|
public int compare(ColumnFamilyStore o1, ColumnFamilyStore o2)
{
long size1 = o1.getTotalMemtableLiveSize();
long size2 = o2.getTotalMemtableLiveSize();
if (size1 < size2)
return -1;
if (size1 > size2)
return 1;
return 0;
}
});
// flush largest first until we get below our threshold.
// although it looks like liveBytes + flushingBytes will stay a constant, it will not if flushes finish
// while we loop, which is especially likely to happen if the flush queue fills up (so further forceFlush calls block)
while (true)
{
flushingBytes = countFlushingBytes();
if (liveBytes + flushingBytes <= DatabaseDescriptor.getTotalMemtableSpaceInMB() * 1048576L || sorted.isEmpty())
break;
ColumnFamilyStore cfs = sorted.remove(sorted.size() - 1);
long size = cfs.getTotalMemtableLiveSize();
logger.info("flushing {} to free up {} bytes", cfs, size);
liveBytes -= size;
cfs.forceFlush();
}
}
finally
{
logger.debug("memtable memory usage is {} bytes with {} live", liveBytes + flushingBytes, liveBytes);
}
}
| public int compare(ColumnFamilyStore o1, ColumnFamilyStore o2)
{
long size1 = o1.getTotalMemtableLiveSize();
long size2 = o2.getTotalMemtableLiveSize();
if (size1 < size2)
return -1;
if (size1 > size2)
return 1;
return 0;
}
});
// flush largest first until we get below our threshold.
// although it looks like liveBytes + flushingBytes will stay a constant, it will not if flushes finish
// while we loop, which is especially likely to happen if the flush queue fills up (so further forceFlush calls block)
while (true)
{
flushingBytes = countFlushingBytes();
if (liveBytes + flushingBytes <= DatabaseDescriptor.getTotalMemtableSpaceInMB() * 1048576L || sorted.isEmpty())
break;
ColumnFamilyStore cfs = sorted.remove(sorted.size() - 1);
long size = cfs.getTotalMemtableLiveSize();
logger.info("flushing {} to free up {} bytes", cfs, size);
liveBytes -= size;
cfs.forceFlush();
}
}
finally
{
logger.trace("memtable memory usage is {} bytes with {} live", liveBytes + flushingBytes, liveBytes);
}
}
|
public void addSSTable(SSTableReader sstable)
{
ssTables_.add(sstable);
CompactionManager.instance.submitMinor(this);
}
| public void addSSTable(SSTableReader sstable)
{
ssTables_.add(sstable);
CompactionManager.instance.submitMinorIfNeeded(this);
}
|
public void testCompactions() throws IOException, ExecutionException, InterruptedException
{
CompactionManager.instance.disableAutoCompaction();
// this test does enough rows to force multiple block indexes to be used
Table table = Table.open(TABLE1);
ColumnFamilyStore store = table.getColumnFamilyStore("Standard1");
final int ROWS_PER_SSTABLE = 10;
Set<String> inserted = new HashSet<String>();
for (int j = 0; j < (SSTableReader.indexInterval() * 3) / ROWS_PER_SSTABLE; j++) {
for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
String key = String.valueOf(i % 2);
RowMutation rm = new RowMutation(TABLE1, key);
rm.add(new QueryPath("Standard1", null, String.valueOf(i / 2).getBytes()), new byte[0], j * ROWS_PER_SSTABLE + i);
rm.apply();
inserted.add(key);
}
store.forceBlockingFlush();
assertEquals(inserted.size(), table.getColumnFamilyStore("Standard1").getKeyRange("", "", 10000).keys.size());
}
while (true)
{
Future<Integer> ft = CompactionManager.instance.submitMinor(store);
if (ft.get() == 0)
break;
}
if (store.getSSTables().size() > 1)
{
CompactionManager.instance.submitMajor(store).get();
}
assertEquals(inserted.size(), table.getColumnFamilyStore("Standard1").getKeyRange("", "", 10000).keys.size());
}
| public void testCompactions() throws IOException, ExecutionException, InterruptedException
{
CompactionManager.instance.disableAutoCompaction();
// this test does enough rows to force multiple block indexes to be used
Table table = Table.open(TABLE1);
ColumnFamilyStore store = table.getColumnFamilyStore("Standard1");
final int ROWS_PER_SSTABLE = 10;
Set<String> inserted = new HashSet<String>();
for (int j = 0; j < (SSTableReader.indexInterval() * 3) / ROWS_PER_SSTABLE; j++) {
for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
String key = String.valueOf(i % 2);
RowMutation rm = new RowMutation(TABLE1, key);
rm.add(new QueryPath("Standard1", null, String.valueOf(i / 2).getBytes()), new byte[0], j * ROWS_PER_SSTABLE + i);
rm.apply();
inserted.add(key);
}
store.forceBlockingFlush();
assertEquals(inserted.size(), table.getColumnFamilyStore("Standard1").getKeyRange("", "", 10000).keys.size());
}
while (true)
{
Future<Integer> ft = CompactionManager.instance.submitMinorIfNeeded(store);
if (ft.get() == 0)
break;
}
if (store.getSSTables().size() > 1)
{
CompactionManager.instance.submitMajor(store).get();
}
assertEquals(inserted.size(), table.getColumnFamilyStore("Standard1").getKeyRange("", "", 10000).keys.size());
}
|
private final SimpleDocValuesFormat defaultDVFormat = SimpleDocValuesFormat.forName("Memory");
// nocommit need simpleNormsFormat
} | private final SimpleDocValuesFormat defaultDVFormat = SimpleDocValuesFormat.forName("Lucene41");
// nocommit need simpleNormsFormat
} |
public List<String> getIncomingFiles(String host) throws IOException
{
List<String> files = new ArrayList<String>();
for (PendingFile pf : StreamInManager.getIncomingFiles(InetAddress.getByName(host)))
{
files.add(String.format("%s: %s", pf.getDescriptor().ksname, pf.toString()));
}
return files;
}
| public List<String> getIncomingFiles(String host) throws IOException
{
List<String> files = new ArrayList<String>();
for (PendingFile pf : StreamInManager.getIncomingFiles(InetAddress.getByName(host)))
{
files.add(String.format("%s: %s", pf.desc.ksname, pf.toString()));
}
return files;
}
|
public LinkedHashMap<PendingFile, PendingFile> getContextMapping(PendingFile[] remoteFiles) throws IOException
{
/* Create a local sstable for each remote sstable */
LinkedHashMap<PendingFile, PendingFile> mapping = new LinkedHashMap<PendingFile, PendingFile>();
for (PendingFile remote : remoteFiles)
{
Descriptor remotedesc = remote.getDescriptor();
// new local sstable
Table table = Table.open(remotedesc.ksname);
ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
Descriptor localdesc = Descriptor.fromFilename(cfStore.getFlushPath());
// add a local file for this component
mapping.put(remote, new PendingFile(localdesc, remote));
}
return mapping;
}
| public LinkedHashMap<PendingFile, PendingFile> getContextMapping(PendingFile[] remoteFiles) throws IOException
{
/* Create a local sstable for each remote sstable */
LinkedHashMap<PendingFile, PendingFile> mapping = new LinkedHashMap<PendingFile, PendingFile>();
for (PendingFile remote : remoteFiles)
{
Descriptor remotedesc = remote.desc;
// new local sstable
Table table = Table.open(remotedesc.ksname);
ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
Descriptor localdesc = Descriptor.fromFilename(cfStore.getFlushPath());
// add a local file for this component
mapping.put(remote, new PendingFile(localdesc, remote));
}
return mapping;
}
|
public void geohashRecursiveRandom() throws IOException {
init(12);
//1. Iterate test with the cluster at some worldly point of interest
Point[] clusterCenters = new Point[]{ctx.makePoint(-180,0), ctx.makePoint(0,90), ctx.makePoint(0,-90)};
for (Point clusterCenter : clusterCenters) {
//2. Iterate on size of cluster (a really small one and a large one)
String hashCenter = GeohashUtils.encodeLatLon(clusterCenter.getY(), clusterCenter.getX(), maxLength);
//calculate the number of degrees in the smallest grid box size (use for both lat & lon)
String smallBox = hashCenter.substring(0,hashCenter.length()-1);//chop off leaf precision
Rectangle clusterDims = GeohashUtils.decodeBoundary(smallBox,ctx);
double smallRadius = Math.max(clusterDims.getMaxX()-clusterDims.getMinX(),clusterDims.getMaxY()-clusterDims.getMinY());
assert smallRadius < 1;
double largeRadius = 20d;//good large size; don't use >=45 for this test code to work
double[] radiusDegs = {largeRadius,smallRadius};
for (double radiusDeg : radiusDegs) {
//3. Index random points in this cluster circle
deleteAll();
List<Point> points = new ArrayList<Point>();
for(int i = 0; i < 20; i++) {
//Note that this will not result in randomly distributed points in the
// circle, they will be concentrated towards the center a little. But
// it's good enough.
Point pt = ctx.getDistCalc().pointOnBearing(clusterCenter,
random().nextDouble() * radiusDeg, random().nextInt() * 360, ctx, null);
pt = alignGeohash(pt);
points.add(pt);
addDocument(newDoc("" + i, pt));
}
commit();
//3. Use some query centers. Each is twice the cluster's radius away.
for(int ri = 0; ri < 4; ri++) {
Point queryCenter = ctx.getDistCalc().pointOnBearing(clusterCenter,
radiusDeg*2, random().nextInt(360), ctx, null);
queryCenter = alignGeohash(queryCenter);
//4.1 Query a small box getting nothing
checkHits(q(queryCenter, radiusDeg - smallRadius/2), 0, null);
//4.2 Query a large box enclosing the cluster, getting everything
checkHits(q(queryCenter, radiusDeg*3*1.01), points.size(), null);
//4.3 Query a medium box getting some (calculate the correct solution and verify)
double queryDist = radiusDeg * 2;
//Find matching points. Put into int[] of doc ids which is the same thing as the index into points list.
int[] ids = new int[points.size()];
int ids_sz = 0;
for (int i = 0; i < points.size(); i++) {
Point point = points.get(i);
if (ctx.getDistCalc().distance(queryCenter, point) <= queryDist)
ids[ids_sz++] = i;
}
ids = Arrays.copyOf(ids, ids_sz);
//assert ids_sz > 0 (can't because randomness keeps us from being able to)
checkHits(q(queryCenter, queryDist), ids.length, ids);
}
}//for radiusDeg
}//for clusterCenter
}//randomTest()
| public void geohashRecursiveRandom() throws IOException {
init(12);
//1. Iterate test with the cluster at some worldly point of interest
Point[] clusterCenters = new Point[]{ctx.makePoint(-180,0), ctx.makePoint(0,90), ctx.makePoint(0,-90)};
for (Point clusterCenter : clusterCenters) {
//2. Iterate on size of cluster (a really small one and a large one)
String hashCenter = GeohashUtils.encodeLatLon(clusterCenter.getY(), clusterCenter.getX(), maxLength);
//calculate the number of degrees in the smallest grid box size (use for both lat & lon)
String smallBox = hashCenter.substring(0,hashCenter.length()-1);//chop off leaf precision
Rectangle clusterDims = GeohashUtils.decodeBoundary(smallBox,ctx);
double smallRadius = Math.max(clusterDims.getMaxX()-clusterDims.getMinX(),clusterDims.getMaxY()-clusterDims.getMinY());
assert smallRadius < 1;
double largeRadius = 20d;//good large size; don't use >=45 for this test code to work
double[] radiusDegs = {largeRadius,smallRadius};
for (double radiusDeg : radiusDegs) {
//3. Index random points in this cluster circle
deleteAll();
List<Point> points = new ArrayList<Point>();
for(int i = 0; i < 20; i++) {
//Note that this will not result in randomly distributed points in the
// circle, they will be concentrated towards the center a little. But
// it's good enough.
Point pt = ctx.getDistCalc().pointOnBearing(clusterCenter,
random().nextDouble() * radiusDeg, random().nextInt() * 360, ctx, null);
pt = alignGeohash(pt);
points.add(pt);
addDocument(newDoc("" + i, pt));
}
commit();
//3. Use some query centers. Each is twice the cluster's radius away.
for(int ri = 0; ri < 4; ri++) {
Point queryCenter = ctx.getDistCalc().pointOnBearing(clusterCenter,
radiusDeg*2, random().nextInt(360), ctx, null);
queryCenter = alignGeohash(queryCenter);
//4.1 Query a small box getting nothing
checkHits(q(queryCenter, radiusDeg - smallRadius/2), 0, null);
//4.2 Query a large box enclosing the cluster, getting everything
checkHits(q(queryCenter, radiusDeg*3 + smallRadius/2), points.size(), null);
//4.3 Query a medium box getting some (calculate the correct solution and verify)
double queryDist = radiusDeg * 2;
//Find matching points. Put into int[] of doc ids which is the same thing as the index into points list.
int[] ids = new int[points.size()];
int ids_sz = 0;
for (int i = 0; i < points.size(); i++) {
Point point = points.get(i);
if (ctx.getDistCalc().distance(queryCenter, point) <= queryDist)
ids[ids_sz++] = i;
}
ids = Arrays.copyOf(ids, ids_sz);
//assert ids_sz > 0 (can't because randomness keeps us from being able to)
checkHits(q(queryCenter, queryDist), ids.length, ids);
}
}//for radiusDeg
}//for clusterCenter
}//randomTest()
|
public CoreContainer initialize() throws IOException, ParserConfigurationException, SAXException {
CoreContainer cores = null;
String instanceDir = SolrResourceLoader.locateInstanceDir();
File fconf = new File(instanceDir, solrConfigFilename == null? "solr.xml": solrConfigFilename);
log.info("looking for solr.xml: " + fconf.getAbsolutePath());
if (fconf.exists()) {
cores = new CoreContainer();
cores.load(instanceDir, fconf);
abortOnConfigurationError = false;
// if any core aborts on startup, then abort
for (SolrCore c : cores.getCores()) {
if (c.getSolrConfig().getBool("abortOnConfigurationError", false)) {
abortOnConfigurationError = true;
break;
}
}
solrConfigFilename = cores.getConfigFile().getName();
} else {
// perform compatibility init
cores = new CoreContainer(new SolrResourceLoader(instanceDir));
SolrConfig cfg = solrConfigFilename == null ? new SolrConfig() : new SolrConfig(solrConfigFilename);
CoreDescriptor dcore = new CoreDescriptor(cores, "", cfg.getResourceLoader().getInstanceDir());
SolrCore singlecore = new SolrCore(null, null, cfg, null, dcore);
abortOnConfigurationError = cfg.getBool(
"abortOnConfigurationError", abortOnConfigurationError);
cores.register("", singlecore, false);
cores.setPersistent(false);
solrConfigFilename = cfg.getName();
}
return cores;
}
}
| public CoreContainer initialize() throws IOException, ParserConfigurationException, SAXException {
CoreContainer cores = null;
String instanceDir = SolrResourceLoader.locateInstanceDir();
File fconf = new File(instanceDir, solrConfigFilename == null? "solr.xml": solrConfigFilename);
log.info("looking for solr.xml: " + fconf.getAbsolutePath());
if (fconf.exists()) {
cores = new CoreContainer();
cores.load(instanceDir, fconf);
abortOnConfigurationError = false;
// if any core aborts on startup, then abort
for (SolrCore c : cores.getCores()) {
if (c.getSolrConfig().getBool("abortOnConfigurationError", false)) {
abortOnConfigurationError = true;
break;
}
}
solrConfigFilename = cores.getConfigFile().getName();
} else {
// perform compatibility init
cores = new CoreContainer(new SolrResourceLoader(instanceDir));
SolrConfig cfg = solrConfigFilename == null ? new SolrConfig() : new SolrConfig(solrConfigFilename);
CoreDescriptor dcore = new CoreDescriptor(cores, "", ".");
SolrCore singlecore = new SolrCore(null, null, cfg, null, dcore);
abortOnConfigurationError = cfg.getBool(
"abortOnConfigurationError", abortOnConfigurationError);
cores.register("", singlecore, false);
cores.setPersistent(false);
solrConfigFilename = cfg.getName();
}
return cores;
}
}
|
public static HashFunction[] createHashFunctions(HashType type, int numFunctions) {
HashFunction[] hashFunction = new HashFunction[numFunctions];
Random seed = new Random(11);
switch (type) {
case LINEAR:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new LinearHash(seed.nextInt(), seed.nextInt());
}
break;
case POLYNOMIAL:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new PolynomialHash(seed.nextInt(), seed.nextInt(), seed.nextInt());
}
break;
case MURMUR:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new MurmurHashWrapper(seed.nextInt());
}
break;
}
return hashFunction;
}
| public static HashFunction[] createHashFunctions(HashType type, int numFunctions) {
HashFunction[] hashFunction = new HashFunction[numFunctions];
Random seed = RandomUtils.getRandom(11);
switch (type) {
case LINEAR:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new LinearHash(seed.nextInt(), seed.nextInt());
}
break;
case POLYNOMIAL:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new PolynomialHash(seed.nextInt(), seed.nextInt(), seed.nextInt());
}
break;
case MURMUR:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new MurmurHashWrapper(seed.nextInt());
}
break;
}
return hashFunction;
}
|
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (!DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
| public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
|
public void testSortedBytes() throws IOException {
DocValuesType type = DocValuesType.SORTED;
final Directory d = newDirectory();
IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(d, cfg);
int numDocs = atLeast(100);
BytesRefHash hash = new BytesRefHash();
Map<String, String> docToString = new HashMap<String, String>();
int len = 1 + random().nextInt(50);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(newTextField("id", "" + i, Field.Store.YES));
String string = _TestUtil.randomRealisticUnicodeString(random(), 1, len);
BytesRef br = new BytesRef(string);
doc.add(new SortedBytesDocValuesField("field", br));
hash.add(br);
docToString.put("" + i, string);
w.addDocument(doc);
}
if (rarely()) {
w.commit();
}
int numDocsNoValue = atLeast(10);
for (int i = 0; i < numDocsNoValue; i++) {
Document doc = new Document();
doc.add(newTextField("id", "noValue", Field.Store.YES));
w.addDocument(doc);
}
BytesRef bytesRef = new BytesRef();
hash.add(bytesRef); // add empty value for the gaps
if (rarely()) {
w.commit();
}
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
String id = "" + i + numDocs;
doc.add(newTextField("id", id, Field.Store.YES));
String string = _TestUtil.randomRealisticUnicodeString(random(), 1, len);
BytesRef br = new BytesRef(string);
hash.add(br);
docToString.put(id, string);
doc.add(new SortedBytesDocValuesField("field", br));
w.addDocument(doc);
}
w.commit();
IndexReader reader = w.getReader();
SortedDocValues docValues = MultiSimpleDocValues.simpleSortedValues(reader, "field");
int[] sort = hash.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
BytesRef expected = new BytesRef();
BytesRef actual = new BytesRef();
assertEquals(hash.size(), docValues.getValueCount());
for (int i = 0; i < hash.size(); i++) {
hash.get(sort[i], expected);
docValues.lookupOrd(i, actual);
assertEquals(expected.utf8ToString(), actual.utf8ToString());
int ord = docValues.lookupTerm(expected, actual);
assertEquals(i, ord);
}
AtomicReader slowR = SlowCompositeReaderWrapper.wrap(reader);
Set<Entry<String, String>> entrySet = docToString.entrySet();
for (Entry<String, String> entry : entrySet) {
int docId = docId(slowR, new Term("id", entry.getKey()));
expected = new BytesRef(entry.getValue());
docValues.get(docId, actual);
assertEquals(expected, actual);
}
reader.close();
w.close();
d.close();
}
| public void testSortedBytes() throws IOException {
DocValuesType type = DocValuesType.SORTED;
final Directory d = newDirectory();
IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(d, cfg);
int numDocs = atLeast(100);
BytesRefHash hash = new BytesRefHash();
Map<String, String> docToString = new HashMap<String, String>();
int len = 1 + random().nextInt(50);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(newTextField("id", "" + i, Field.Store.YES));
String string = _TestUtil.randomRealisticUnicodeString(random(), 1, len);
BytesRef br = new BytesRef(string);
doc.add(new SortedBytesDocValuesField("field", br));
hash.add(br);
docToString.put("" + i, string);
w.addDocument(doc);
}
if (rarely()) {
w.commit();
}
int numDocsNoValue = atLeast(10);
for (int i = 0; i < numDocsNoValue; i++) {
Document doc = new Document();
doc.add(newTextField("id", "noValue", Field.Store.YES));
w.addDocument(doc);
}
BytesRef bytesRef = new BytesRef();
hash.add(bytesRef); // add empty value for the gaps
if (rarely()) {
w.commit();
}
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
String id = "" + i + numDocs;
doc.add(newTextField("id", id, Field.Store.YES));
String string = _TestUtil.randomRealisticUnicodeString(random(), 1, len);
BytesRef br = new BytesRef(string);
hash.add(br);
docToString.put(id, string);
doc.add(new SortedBytesDocValuesField("field", br));
w.addDocument(doc);
}
w.commit();
IndexReader reader = w.getReader();
SortedDocValues docValues = MultiDocValues.getSortedValues(reader, "field");
int[] sort = hash.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
BytesRef expected = new BytesRef();
BytesRef actual = new BytesRef();
assertEquals(hash.size(), docValues.getValueCount());
for (int i = 0; i < hash.size(); i++) {
hash.get(sort[i], expected);
docValues.lookupOrd(i, actual);
assertEquals(expected.utf8ToString(), actual.utf8ToString());
int ord = docValues.lookupTerm(expected, actual);
assertEquals(i, ord);
}
AtomicReader slowR = SlowCompositeReaderWrapper.wrap(reader);
Set<Entry<String, String>> entrySet = docToString.entrySet();
for (Entry<String, String> entry : entrySet) {
int docId = docId(slowR, new Term("id", entry.getKey()));
expected = new BytesRef(entry.getValue());
docValues.get(docId, actual);
assertEquals(expected, actual);
}
reader.close();
w.close();
d.close();
}
|
public void testAddDocument() throws Exception {
Document testDoc = new Document();
DocHelper.setupDoc(testDoc);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
writer.addDocument(testDoc);
writer.commit();
SegmentInfoPerCommit info = writer.newestSegment();
writer.close();
//After adding the document, we should be able to read it back in
SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random()));
assertTrue(reader != null);
StoredDocument doc = reader.document(0);
assertTrue(doc != null);
//System.out.println("Document: " + doc);
StorableField[] fields = doc.getFields("textField2");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT));
assertTrue(fields[0].fieldType().storeTermVectors());
fields = doc.getFields("textField1");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_1_TEXT));
assertFalse(fields[0].fieldType().storeTermVectors());
fields = doc.getFields("keyField");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.KEYWORD_TEXT));
fields = doc.getFields(DocHelper.NO_NORMS_KEY);
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.NO_NORMS_TEXT));
fields = doc.getFields(DocHelper.TEXT_FIELD_3_KEY);
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_3_TEXT));
// test that the norms are not present in the segment if
// omitNorms is true
for (FieldInfo fi : reader.getFieldInfos()) {
if (fi.isIndexed()) {
assertTrue(fi.omitsNorms() == (reader.simpleNormValues(fi.name) == null));
}
}
reader.close();
}
| public void testAddDocument() throws Exception {
Document testDoc = new Document();
DocHelper.setupDoc(testDoc);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
writer.addDocument(testDoc);
writer.commit();
SegmentInfoPerCommit info = writer.newestSegment();
writer.close();
//After adding the document, we should be able to read it back in
SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random()));
assertTrue(reader != null);
StoredDocument doc = reader.document(0);
assertTrue(doc != null);
//System.out.println("Document: " + doc);
StorableField[] fields = doc.getFields("textField2");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT));
assertTrue(fields[0].fieldType().storeTermVectors());
fields = doc.getFields("textField1");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_1_TEXT));
assertFalse(fields[0].fieldType().storeTermVectors());
fields = doc.getFields("keyField");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.KEYWORD_TEXT));
fields = doc.getFields(DocHelper.NO_NORMS_KEY);
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.NO_NORMS_TEXT));
fields = doc.getFields(DocHelper.TEXT_FIELD_3_KEY);
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_3_TEXT));
// test that the norms are not present in the segment if
// omitNorms is true
for (FieldInfo fi : reader.getFieldInfos()) {
if (fi.isIndexed()) {
assertTrue(fi.omitsNorms() == (reader.getNormValues(fi.name) == null));
}
}
reader.close();
}
|
public void testFloatNorms() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random()));
Similarity provider = new MySimProvider();
config.setSimilarity(provider);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
final LineFileDocs docs = new LineFileDocs(random());
int num = atLeast(100);
for (int i = 0; i < num; i++) {
Document doc = docs.nextDoc();
float nextFloat = random().nextFloat();
Field f = new TextField(floatTestField, "" + nextFloat, Field.Store.YES);
f.setBoost(nextFloat);
doc.add(f);
writer.addDocument(doc);
doc.removeField(floatTestField);
if (rarely()) {
writer.commit();
}
}
writer.commit();
writer.close();
AtomicReader open = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
NumericDocValues norms = open.simpleNormValues(floatTestField);
assertNotNull(norms);
for (int i = 0; i < open.maxDoc(); i++) {
StoredDocument document = open.document(i);
float expected = Float.parseFloat(document.get(floatTestField));
assertEquals(expected, Float.intBitsToFloat((int)norms.get(i)), 0.0f);
}
open.close();
dir.close();
docs.close();
}
| public void testFloatNorms() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random()));
Similarity provider = new MySimProvider();
config.setSimilarity(provider);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
final LineFileDocs docs = new LineFileDocs(random());
int num = atLeast(100);
for (int i = 0; i < num; i++) {
Document doc = docs.nextDoc();
float nextFloat = random().nextFloat();
Field f = new TextField(floatTestField, "" + nextFloat, Field.Store.YES);
f.setBoost(nextFloat);
doc.add(f);
writer.addDocument(doc);
doc.removeField(floatTestField);
if (rarely()) {
writer.commit();
}
}
writer.commit();
writer.close();
AtomicReader open = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
NumericDocValues norms = open.getNormValues(floatTestField);
assertNotNull(norms);
for (int i = 0; i < open.maxDoc(); i++) {
StoredDocument document = open.document(i);
float expected = Float.parseFloat(document.get(floatTestField));
assertEquals(expected, Float.intBitsToFloat((int)norms.get(i)), 0.0f);
}
open.close();
dir.close();
docs.close();
}
|
public void test() throws Exception {
NumericDocValues fooNorms = MultiSimpleDocValues.simpleNormValues(reader, "foo");
assertNotNull(fooNorms);
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(expected.get(i).longValue(), fooNorms.get(i));
}
}
| public void test() throws Exception {
NumericDocValues fooNorms = MultiDocValues.getNormValues(reader, "foo");
assertNotNull(fooNorms);
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(expected.get(i).longValue(), fooNorms.get(i));
}
}
|
public void test() throws Exception {
NumericDocValues fooNorms = MultiSimpleDocValues.simpleNormValues(reader, "foo");
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(expected.get(i).intValue(), fooNorms.get(i) & 0xff);
}
}
| public void test() throws Exception {
NumericDocValues fooNorms = MultiDocValues.getNormValues(reader, "foo");
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(expected.get(i).intValue(), fooNorms.get(i) & 0xff);
}
}
|
public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
FieldPhraseList fpl = new FieldPhraseList( stack, fq );
SimpleFragListBuilder sflb = new SimpleFragListBuilder();
FieldFragList ffl = sflb.createFieldFragList( fpl, 100 );
assertEquals( 1, ffl.fragInfos.size() );
assertEquals( "subInfos=(d((6,7)))/1.0(0,100)", ffl.fragInfos.get( 0 ).toString() );
}
| public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
FieldPhraseList fpl = new FieldPhraseList( stack, fq );
SimpleFragListBuilder sflb = new SimpleFragListBuilder();
FieldFragList ffl = sflb.createFieldFragList( fpl, 100 );
assertEquals( 1, ffl.fragInfos.size() );
assertEquals( "subInfos=(d((9,10)))/1.0(3,103)", ffl.fragInfos.get( 0 ).toString() );
}
|
public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
assertEquals( 1, stack.termList.size() );
assertEquals( "d(6,7,3)", stack.pop().toString() );
}
| public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
assertEquals( 1, stack.termList.size() );
assertEquals( "d(9,10,3)", stack.pop().toString() );
}
|
public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
FieldPhraseList fpl = new FieldPhraseList( stack, fq );
assertEquals( 1, fpl.phraseList.size() );
assertEquals( "d(1.0)((6,7))", fpl.phraseList.get( 0 ).toString() );
}
| public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
FieldPhraseList fpl = new FieldPhraseList( stack, fq );
assertEquals( 1, fpl.phraseList.size() );
assertEquals( "d(1.0)((9,10))", fpl.phraseList.get( 0 ).toString() );
}
|
private void unCache(String fileName) throws IOException {
// Only let one thread uncache at a time; this only
// happens during commit() or close():
synchronized(uncacheLock) {
if (VERBOSE) {
System.out.println("nrtdir.unCache name=" + fileName);
}
if (!cache.fileExists(fileName)) {
// Another thread beat us...
return;
}
if (delegate.fileExists(fileName)) {
throw new IOException("cannot uncache file=\"" + fileName + "\": it was separately also created in the delegate directory");
}
final IOContext context = IOContext.DEFAULT;
final IndexOutput out = delegate.createOutput(fileName, context);
IndexInput in = null;
try {
in = cache.openInput(fileName, context);
in.copyBytes(out, in.length());
} finally {
IOUtils.close(in, out);
}
// Lock order: uncacheLock -> this
synchronized(this) {
// Must sync here because other sync methods have
// if (cache.fileExists(name)) { ... } else { ... }:
cache.deleteFile(fileName);
}
}
}
| private void unCache(String fileName) throws IOException {
// Only let one thread uncache at a time; this only
// happens during commit() or close():
synchronized(uncacheLock) {
if (VERBOSE) {
System.out.println("nrtdir.unCache name=" + fileName);
}
if (!cache.fileExists(fileName)) {
// Another thread beat us...
return;
}
if (delegate.fileExists(fileName)) {
throw new IOException("cannot uncache file=\"" + fileName + "\": it was separately also created in the delegate directory");
}
final IOContext context = IOContext.DEFAULT;
final IndexOutput out = delegate.createOutput(fileName, context);
IndexInput in = null;
try {
in = cache.openInput(fileName, context);
out.copyBytes(in, in.length());
} finally {
IOUtils.close(in, out);
}
// Lock order: uncacheLock -> this
synchronized(this) {
// Must sync here because other sync methods have
// if (cache.fileExists(name)) { ... } else { ... }:
cache.deleteFile(fileName);
}
}
}
|
public void testDerby3000() throws SQLException, IOException {
ResultSet rs;
// Derby-3000 make sure we process only valid TableType values and
// process them correctly.
DatabaseMetaData dmd = getConnection().getMetaData();
Statement s = createStatement();
s.executeUpdate("CREATE TABLE APP.TAB (i int)");
s.executeUpdate("CREATE VIEW APP.V as SELECT * FROM TAB");
s.executeUpdate("CREATE SYNONYM TSYN FOR APP.TAB");
String[] withInvalidTableTypes = {"SYNONYM","TABLE","VIEW",
"GLOBAL TEMPORARY"};
// just ignore invalid types
rs = dmd.getTables( "%", "%", "%", withInvalidTableTypes);
JDBC.assertFullResultSet(rs,
new String[][] {{"","APP","TSYN","SYNONYM","",null,null,null,null,null},
{"","APP","TAB","TABLE","",null,null,null,null,null},
{"","APP","V","VIEW","",null,null,null,null,null}});
rs = dmd.getTables("%", "%", "%", new String[] {"GLOBAL TEMPORARY"});
JDBC.assertEmpty(rs);
rs = dmd.getTables("%", "%", "%", new String[] {"VIEW"});
JDBC.assertUnorderedResultSet(rs, new String[][]
{{"","APP","V","VIEW","",null,null,null,null,null}});
rs = dmd.getTables("%", "%", "%", new String[] {"TABLE"});
JDBC.assertUnorderedResultSet(rs,new String[][]
{{"","APP","TAB","TABLE","",null,null,null,null,null}} );
rs = dmd.getTables("%", "%", "%", new String[] {"SYNONYM"});
JDBC.assertUnorderedResultSet(rs, new String[][]
{{"","APP","TSYN","SYNONYM","",null,null,null,null,null}});
rs = dmd.getTables( "%", "%", "%", new String[] {"SYSTEM TABLE"});
assertEquals(19, JDBC.assertDrainResults(rs));
s.executeUpdate("DROP VIEW APP.V");
s.executeUpdate("DROP TABLE APP.TAB");
s.executeUpdate("DROP SYNONYM APP.TSYN");
}
| public void testDerby3000() throws SQLException, IOException {
ResultSet rs;
// Derby-3000 make sure we process only valid TableType values and
// process them correctly.
DatabaseMetaData dmd = getConnection().getMetaData();
Statement s = createStatement();
s.executeUpdate("CREATE TABLE APP.TAB (i int)");
s.executeUpdate("CREATE VIEW APP.V as SELECT * FROM TAB");
s.executeUpdate("CREATE SYNONYM TSYN FOR APP.TAB");
String[] withInvalidTableTypes = {"SYNONYM","TABLE","VIEW",
"GLOBAL TEMPORARY"};
// just ignore invalid types
rs = dmd.getTables( "%", "%", "%", withInvalidTableTypes);
JDBC.assertFullResultSet(rs,
new String[][] {{"","APP","TSYN","SYNONYM","",null,null,null,null,null},
{"","APP","TAB","TABLE","",null,null,null,null,null},
{"","APP","V","VIEW","",null,null,null,null,null}});
rs = dmd.getTables("%", "%", "%", new String[] {"GLOBAL TEMPORARY"});
JDBC.assertEmpty(rs);
rs = dmd.getTables("%", "%", "%", new String[] {"VIEW"});
JDBC.assertUnorderedResultSet(rs, new String[][]
{{"","APP","V","VIEW","",null,null,null,null,null}});
rs = dmd.getTables("%", "%", "%", new String[] {"TABLE"});
JDBC.assertUnorderedResultSet(rs,new String[][]
{{"","APP","TAB","TABLE","",null,null,null,null,null}} );
rs = dmd.getTables("%", "%", "%", new String[] {"SYNONYM"});
JDBC.assertUnorderedResultSet(rs, new String[][]
{{"","APP","TSYN","SYNONYM","",null,null,null,null,null}});
rs = dmd.getTables( "%", "%", "%", new String[] {"SYSTEM TABLE"});
assertEquals(20, JDBC.assertDrainResults(rs));
s.executeUpdate("DROP VIEW APP.V");
s.executeUpdate("DROP TABLE APP.TAB");
s.executeUpdate("DROP SYNONYM APP.TSYN");
}
|
public void testClobCreateLocatorSP() throws SQLException {
//initialize the locator to a default value.
int locator = -1;
//call the stored procedure to return the created locator.
CallableStatement cs = prepareCall
("? = CALL SYSIBM.CLOBCREATELOCATOR()");
cs.registerOutParameter(1, java.sql.Types.INTEGER);
cs.executeUpdate();
locator = cs.getInt(1);
//verify if the locator rturned and expected are equal.
//remember in setup a locator is already created
//hence expected value is 2
assertEquals("The locator values returned by " +
"SYSIBM.CLOBCREATELOCATOR() are incorrect", 4, locator);
cs.close();
}
| public void testClobCreateLocatorSP() throws SQLException {
//initialize the locator to a default value.
int locator = -1;
//call the stored procedure to return the created locator.
CallableStatement cs = prepareCall
("? = CALL SYSIBM.CLOBCREATELOCATOR()");
cs.registerOutParameter(1, java.sql.Types.INTEGER);
cs.executeUpdate();
locator = cs.getInt(1);
//verify if the locator rturned and expected are equal.
//remember in setup a locator is already created
//hence expected value is 2
assertEquals("The locator values returned by " +
"SYSIBM.CLOBCREATELOCATOR() are incorrect", 2, locator);
cs.close();
}
|
public void testFragmentCreation() throws Exception {
Bundle exportBundle = makeBundleWithExports("export.bundle", "1.2.3",
"export.package;version=\"1.0.0\";singleton:=true");
Dictionary fragmentHeaders = makeFragmentFromExportBundle(exportBundle)
.getHeaders();
assertNotNull("No headers in the fragment", fragmentHeaders);
assertEquals("Wrong symbolicName", "scooby.doo.test.fragment",
fragmentHeaders.get(Constants.BUNDLE_SYMBOLICNAME));
assertEquals("Wrong version", "0.0.0", fragmentHeaders
.get(Constants.BUNDLE_VERSION));
assertEquals("Wrong Bundle manifest version", "2", fragmentHeaders
.get(Constants.BUNDLE_MANIFESTVERSION));
assertEquals("Wrong Fragment host",
"scooby.doo;bundle-version=\"0.0.0\"", fragmentHeaders
.get(Constants.FRAGMENT_HOST));
assertEquals(
"Wrong Imports",
"export.package;version=\"1.0.0\";bundle-symbolic-name=\"export.bundle\";bundle-version=\"[1.2.3,1.2.3]\"",
fragmentHeaders.get(Constants.IMPORT_PACKAGE));
}
| public void testFragmentCreation() throws Exception {
Bundle exportBundle = makeBundleWithExports("export.bundle", "1.2.3",
"export.package;version=\"1.0.0\";uses:=\"foo.jar,bar.jar\";singleton:=true");
Dictionary fragmentHeaders = makeFragmentFromExportBundle(exportBundle)
.getHeaders();
assertNotNull("No headers in the fragment", fragmentHeaders);
assertEquals("Wrong symbolicName", "scooby.doo.test.fragment",
fragmentHeaders.get(Constants.BUNDLE_SYMBOLICNAME));
assertEquals("Wrong version", "0.0.0", fragmentHeaders
.get(Constants.BUNDLE_VERSION));
assertEquals("Wrong Bundle manifest version", "2", fragmentHeaders
.get(Constants.BUNDLE_MANIFESTVERSION));
assertEquals("Wrong Fragment host",
"scooby.doo;bundle-version=\"0.0.0\"", fragmentHeaders
.get(Constants.FRAGMENT_HOST));
assertEquals(
"Wrong Imports",
"export.package;version=\"1.0.0\";bundle-symbolic-name=\"export.bundle\";bundle-version=\"[1.2.3,1.2.3]\"",
fragmentHeaders.get(Constants.IMPORT_PACKAGE));
}
|
public static String docValuesId(String segmentsName, int fieldId) {
return segmentsName + "-" + fieldId;
}
| public static String docValuesId(String segmentsName, int fieldId) {
return segmentsName + "_" + fieldId;
}
|
private boolean[] expandBooleanArray(boolean[] array, int newLength) {
if (array == null) {
boolean[] newArray = new boolean[newLength];
return newArray;
}
if (array.length < newLength) {
boolean[] newArray = new boolean[newLength];
System.arraycopy(array, 0, newArray, 0, array.length);
return newArray;
}
return array;
}
void flowPrepareForSelectFromInsert() throws SqlException {
agent_.beginWriteChain(this);
writePrepareDescribeInputOutput(constructSelectFromInsertSQL(sql_), section_);
agent_.flow(this);
readPrepareDescribeInputOutput();
agent_.endReadChain();
}
void writePrepareDescribeInputOutput(String sql,
Section section) throws SqlException {
// Notice that sql_ is passed in since in general ad hoc sql must be passed in for unprepared statements
writePrepareDescribeOutput(sql, section);
writeDescribeInput(section);
}
void flowPrepareDescribeInputOutput() throws SqlException {
agent_.beginWriteChain(this);
if (sqlMode_ == isCall__) {
writePrepareDescribeInput();
agent_.flow(this);
readPrepareDescribeInput();
agent_.endReadChain();
} else {
writePrepareDescribeInputOutput();
agent_.flow(this);
readPrepareDescribeInputOutput();
agent_.endReadChain();
}
}
void flowExecute(int executeType) throws SqlException {
super.checkForClosedStatement();
super.clearWarningsX();
super.checkForAppropriateSqlMode(executeType, sqlMode_);
checkThatAllParametersAreSet();
if (sqlMode_ == isUpdate__) {
updateCount_ = 0;
} else {
updateCount_ = -1;
}
java.util.Timer queryTimer = null;
QueryTimerTask queryTimerTask = null;
if (timeout_ != 0) {
queryTimer = new java.util.Timer(); // A thread that ticks the seconds
queryTimerTask = new QueryTimerTask(this, queryTimer);
queryTimer.schedule(queryTimerTask, 1000 * timeout_);
}
try {
agent_.beginWriteChain(this);
boolean piggybackedAutocommit = super.writeCloseResultSets(true); // true means permit auto-commits
int numInputColumns = (parameterMetaData_ != null) ? parameterMetaData_.getColumnCount() : 0;
boolean outputExpected = (resultSetMetaData_ != null && resultSetMetaData_.getColumnCount() > 0);
boolean chainAutoCommit = false;
boolean commitSubstituted = false;
boolean repositionedCursor = false;
ResultSet scrollableRS = null;
switch (sqlMode_) {
case isUpdate__:
if (positionedUpdateCursorName_ != null) {
scrollableRS = agent_.sectionManager_.getPositionedUpdateResultSet(positionedUpdateCursorName_);
}
if (scrollableRS != null && !scrollableRS.isRowsetCursor_) {
repositionedCursor =
scrollableRS.repositionScrollableResultSetBeforeJDBC1PositionedUpdateDelete();
if (!repositionedCursor) {
scrollableRS = null;
}
}
chainAutoCommit = connection_.willAutoCommitGenerateFlow() && isAutoCommittableStatement_;
if (sqlUpdateMode_ == isInsertSql__ && generatedKeysColumnNames_ != null) {
writeOpenQuery(section_,
fetchSize_,
resultSetType_,
numInputColumns,
parameterMetaData_,
parameters_);
} else {
boolean chainOpenQueryForAutoGeneratedKeys = (sqlUpdateMode_ == isInsertSql__ && autoGeneratedKeys_ == RETURN_GENERATED_KEYS);
writeExecute(section_,
parameterMetaData_,
parameters_,
numInputColumns,
outputExpected,
(chainAutoCommit || chainOpenQueryForAutoGeneratedKeys)// chain flag
); // chain flag
if (chainOpenQueryForAutoGeneratedKeys) {
prepareAutoGeneratedKeysStatement();
writeOpenQuery(preparedStatementForAutoGeneratedKeys_.section_,
preparedStatementForAutoGeneratedKeys_.fetchSize_,
preparedStatementForAutoGeneratedKeys_.resultSetType_);
}
}
if (chainAutoCommit) {
// we have encountered an error in writing the execute, so do not
// flow an autocommit
if (agent_.accumulatedReadExceptions_ != null) {
// currently, the only write exception we encounter is for
// data truncation: SQLSTATE 01004, so we don't bother checking for this
connection_.writeCommitSubstitute_();
commitSubstituted = true;
} else {
// there is no write error, so flow the commit
connection_.writeCommit();
}
}
break;
case isQuery__:
writeOpenQuery(section_,
fetchSize_,
resultSetType_,
numInputColumns,
parameterMetaData_,
parameters_);
break;
case isCall__:
writeExecuteCall(outputRegistered_, // if no out/inout parameter, outputExpected = false
null,
section_,
fetchSize_,
false, // do not suppress ResultSets for regular CALLs
resultSetType_,
parameterMetaData_,
parameters_); // cross conversion
break;
}
agent_.flow(this);
super.readCloseResultSets(true); // true means permit auto-commits
// turn inUnitOfWork_ flag back on and add statement
// back on commitListeners_ list if they were off
// by an autocommit chained to a close cursor.
if (piggybackedAutocommit) {
connection_.completeTransactionStart();
}
super.markResultSetsClosed();
switch (sqlMode_) {
case isUpdate__:
// do not need to reposition for a rowset cursor
if (scrollableRS != null && !scrollableRS.isRowsetCursor_) {
scrollableRS.readPositioningFetch_();
}
if (sqlUpdateMode_ == isInsertSql__ && generatedKeysColumnNames_ != null) {
readOpenQuery();
if (resultSet_ != null) {
generatedKeysResultSet_ = resultSet_;
resultSet_ = null;
updateCount_ = 1;
}
} else {
readExecute();
if (sqlUpdateMode_ == isInsertSql__ && autoGeneratedKeys_ == RETURN_GENERATED_KEYS) {
readPrepareAutoGeneratedKeysStatement();
preparedStatementForAutoGeneratedKeys_.readOpenQuery();
generatedKeysResultSet_ = preparedStatementForAutoGeneratedKeys_.resultSet_;
preparedStatementForAutoGeneratedKeys_.resultSet_ = null;
}
}
if (chainAutoCommit) {
if (commitSubstituted) {
connection_.readCommitSubstitute_();
} else {
connection_.readCommit();
}
}
break;
case isQuery__:
try {
readOpenQuery();
} catch (DisconnectException dise) {
throw dise;
} catch (SqlException e) {
throw e;
}
// resultSet_ is null if open query failed.
// check for null resultSet_ before using it.
if (resultSet_ != null) {
resultSet_.parseScrollableRowset();
//if (resultSet_.scrollable_) resultSet_.getRowCount();
// If client's cursor name is set, map the client's cursor name to the ResultSet
// Else map the server's cursor name to the ResultSet
mapCursorNameToResultSet();
}
break;
case isCall__:
readExecuteCall();
break;
}
try {
agent_.endReadChain();
} catch (SqlException e) {
throw e;
}
if (sqlMode_ == isCall__) {
parseStorProcReturnedScrollableRowset();
// When there are no result sets back, we will commit immediately when autocommit is true.
// make sure a commit is not performed when making the call to the sqlca message procedure
if (connection_.autoCommit_ && resultSet_ == null && resultSetList_ == null && isAutoCommittableStatement_) {
connection_.flowAutoCommit();
}
}
// Throw an exception if holdability returned by the server is different from requested.
if (resultSet_ != null && resultSet_.resultSetHoldability_ != resultSetHoldability_ && sqlMode_ != isCall__) {
throw new SqlException(agent_.logWriter_, "Unable to open resultSet with requested " +
"holdability " + resultSetHoldability_ + ".");
}
} finally {
if (timeout_ != 0) { // query timers need to be cancelled.
queryTimer.cancel();
queryTimerTask.cancel();
}
}
}
| private boolean[] expandBooleanArray(boolean[] array, int newLength) {
if (array == null) {
boolean[] newArray = new boolean[newLength];
return newArray;
}
if (array.length < newLength) {
boolean[] newArray = new boolean[newLength];
System.arraycopy(array, 0, newArray, 0, array.length);
return newArray;
}
return array;
}
void flowPrepareForSelectFromInsert() throws SqlException {
agent_.beginWriteChain(this);
writePrepareDescribeInputOutput(constructSelectFromInsertSQL(sql_), section_);
agent_.flow(this);
readPrepareDescribeInputOutput();
agent_.endReadChain();
}
void writePrepareDescribeInputOutput(String sql,
Section section) throws SqlException {
// Notice that sql_ is passed in since in general ad hoc sql must be passed in for unprepared statements
writePrepareDescribeOutput(sql, section);
writeDescribeInput(section);
}
void flowPrepareDescribeInputOutput() throws SqlException {
agent_.beginWriteChain(this);
if (sqlMode_ == isCall__) {
writePrepareDescribeInput();
agent_.flow(this);
readPrepareDescribeInput();
agent_.endReadChain();
} else {
writePrepareDescribeInputOutput();
agent_.flow(this);
readPrepareDescribeInputOutput();
agent_.endReadChain();
}
}
void flowExecute(int executeType) throws SqlException {
super.checkForClosedStatement();
super.clearWarningsX();
super.checkForAppropriateSqlMode(executeType, sqlMode_);
checkThatAllParametersAreSet();
if (sqlMode_ == isUpdate__) {
updateCount_ = 0;
} else {
updateCount_ = -1;
}
java.util.Timer queryTimer = null;
QueryTimerTask queryTimerTask = null;
if (timeout_ != 0) {
queryTimer = new java.util.Timer(); // A thread that ticks the seconds
queryTimerTask = new QueryTimerTask(this, queryTimer);
queryTimer.schedule(queryTimerTask, 1000 * timeout_);
}
try {
agent_.beginWriteChain(this);
boolean piggybackedAutocommit = super.writeCloseResultSets(true); // true means permit auto-commits
int numInputColumns = (parameterMetaData_ != null) ? parameterMetaData_.getColumnCount() : 0;
boolean outputExpected = (resultSetMetaData_ != null && resultSetMetaData_.getColumnCount() > 0);
boolean chainAutoCommit = false;
boolean commitSubstituted = false;
boolean repositionedCursor = false;
ResultSet scrollableRS = null;
switch (sqlMode_) {
case isUpdate__:
if (positionedUpdateCursorName_ != null) {
scrollableRS = agent_.sectionManager_.getPositionedUpdateResultSet(positionedUpdateCursorName_);
}
if (scrollableRS != null && !scrollableRS.isRowsetCursor_) {
repositionedCursor =
scrollableRS.repositionScrollableResultSetBeforeJDBC1PositionedUpdateDelete();
if (!repositionedCursor) {
scrollableRS = null;
}
}
chainAutoCommit = connection_.willAutoCommitGenerateFlow() && isAutoCommittableStatement_;
if (sqlUpdateMode_ == isInsertSql__ && generatedKeysColumnNames_ != null) {
writeOpenQuery(section_,
fetchSize_,
resultSetType_,
numInputColumns,
parameterMetaData_,
parameters_);
} else {
boolean chainOpenQueryForAutoGeneratedKeys = (sqlUpdateMode_ == isInsertSql__ && autoGeneratedKeys_ == RETURN_GENERATED_KEYS);
writeExecute(section_,
parameterMetaData_,
parameters_,
numInputColumns,
outputExpected,
(chainAutoCommit || chainOpenQueryForAutoGeneratedKeys)// chain flag
); // chain flag
if (chainOpenQueryForAutoGeneratedKeys) {
prepareAutoGeneratedKeysStatement();
writeOpenQuery(preparedStatementForAutoGeneratedKeys_.section_,
preparedStatementForAutoGeneratedKeys_.fetchSize_,
preparedStatementForAutoGeneratedKeys_.resultSetType_);
}
}
if (chainAutoCommit) {
// we have encountered an error in writing the execute, so do not
// flow an autocommit
if (agent_.accumulatedReadExceptions_ != null) {
// currently, the only write exception we encounter is for
// data truncation: SQLSTATE 01004, so we don't bother checking for this
connection_.writeCommitSubstitute_();
commitSubstituted = true;
} else {
// there is no write error, so flow the commit
connection_.writeCommit();
}
}
break;
case isQuery__:
writeOpenQuery(section_,
fetchSize_,
resultSetType_,
numInputColumns,
parameterMetaData_,
parameters_);
break;
case isCall__:
writeExecuteCall(outputRegistered_, // if no out/inout parameter, outputExpected = false
null,
section_,
fetchSize_,
false, // do not suppress ResultSets for regular CALLs
resultSetType_,
parameterMetaData_,
parameters_); // cross conversion
break;
}
agent_.flow(this);
super.readCloseResultSets(true); // true means permit auto-commits
// turn inUnitOfWork_ flag back on and add statement
// back on commitListeners_ list if they were off
// by an autocommit chained to a close cursor.
if (piggybackedAutocommit) {
connection_.completeTransactionStart();
}
super.markResultSetsClosed(true); // true means remove from list of commit and rollback listeners
switch (sqlMode_) {
case isUpdate__:
// do not need to reposition for a rowset cursor
if (scrollableRS != null && !scrollableRS.isRowsetCursor_) {
scrollableRS.readPositioningFetch_();
}
if (sqlUpdateMode_ == isInsertSql__ && generatedKeysColumnNames_ != null) {
readOpenQuery();
if (resultSet_ != null) {
generatedKeysResultSet_ = resultSet_;
resultSet_ = null;
updateCount_ = 1;
}
} else {
readExecute();
if (sqlUpdateMode_ == isInsertSql__ && autoGeneratedKeys_ == RETURN_GENERATED_KEYS) {
readPrepareAutoGeneratedKeysStatement();
preparedStatementForAutoGeneratedKeys_.readOpenQuery();
generatedKeysResultSet_ = preparedStatementForAutoGeneratedKeys_.resultSet_;
preparedStatementForAutoGeneratedKeys_.resultSet_ = null;
}
}
if (chainAutoCommit) {
if (commitSubstituted) {
connection_.readCommitSubstitute_();
} else {
connection_.readCommit();
}
}
break;
case isQuery__:
try {
readOpenQuery();
} catch (DisconnectException dise) {
throw dise;
} catch (SqlException e) {
throw e;
}
// resultSet_ is null if open query failed.
// check for null resultSet_ before using it.
if (resultSet_ != null) {
resultSet_.parseScrollableRowset();
//if (resultSet_.scrollable_) resultSet_.getRowCount();
// If client's cursor name is set, map the client's cursor name to the ResultSet
// Else map the server's cursor name to the ResultSet
mapCursorNameToResultSet();
}
break;
case isCall__:
readExecuteCall();
break;
}
try {
agent_.endReadChain();
} catch (SqlException e) {
throw e;
}
if (sqlMode_ == isCall__) {
parseStorProcReturnedScrollableRowset();
// When there are no result sets back, we will commit immediately when autocommit is true.
// make sure a commit is not performed when making the call to the sqlca message procedure
if (connection_.autoCommit_ && resultSet_ == null && resultSetList_ == null && isAutoCommittableStatement_) {
connection_.flowAutoCommit();
}
}
// Throw an exception if holdability returned by the server is different from requested.
if (resultSet_ != null && resultSet_.resultSetHoldability_ != resultSetHoldability_ && sqlMode_ != isCall__) {
throw new SqlException(agent_.logWriter_, "Unable to open resultSet with requested " +
"holdability " + resultSetHoldability_ + ".");
}
} finally {
if (timeout_ != 0) { // query timers need to be cancelled.
queryTimer.cancel();
queryTimerTask.cancel();
}
}
}
|
public void distribCommit(CommitUpdateCommand cmd, List<Node> nodes,
ModifiableSolrParams params) throws IOException {
// we need to do any retries before commit...
servers.blockUntilFinished();
doRetriesIfNeeded();
UpdateRequest uReq = new UpdateRequest();
uReq.setParams(params);
addCommit(uReq, cmd);
log.debug("Distrib commit to:" + nodes + " params:" + params);
for (Node node : nodes) {
submit(new Req(cmd.toString(), node, uReq, false));
}
}
| public void distribCommit(CommitUpdateCommand cmd, List<Node> nodes,
ModifiableSolrParams params) throws IOException {
// we need to do any retries before commit...
servers.blockUntilFinished();
doRetriesIfNeeded();
UpdateRequest uReq = new UpdateRequest();
uReq.setParams(params);
addCommit(uReq, cmd);
log.debug("Distrib commit to: {} params: {}", nodes, params);
for (Node node : nodes) {
submit(new Req(cmd.toString(), node, uReq, false));
}
}
|
public static void validateKeyspaceNotYetExisting(String newKsName) throws InvalidRequestException
{
// keyspace names must be unique case-insensitively because the keyspace name becomes the directory
// where we store CF sstables. Names that differ only in case would thus cause problems on
// case-insensitive filesystems (NTFS, most installations of HFS+).
for (String ksName : DatabaseDescriptor.getTables())
{
if (ksName.equalsIgnoreCase(newKsName))
throw new InvalidRequestException("Keyspace names must be case-insensitively unique");
}
}
| public static void validateKeyspaceNotYetExisting(String newKsName) throws InvalidRequestException
{
// keyspace names must be unique case-insensitively because the keyspace name becomes the directory
// where we store CF sstables. Names that differ only in case would thus cause problems on
// case-insensitive filesystems (NTFS, most installations of HFS+).
for (String ksName : Schema.instance.getTables())
{
if (ksName.equalsIgnoreCase(newKsName))
throw new InvalidRequestException("Keyspace names must be case-insensitively unique");
}
}
|
public void testTriggersWithClobColumn() throws Exception {
insertDefaultData();
Statement stmt = createStatement();
stmt.executeUpdate(
"CREATE TABLE testClobTriggerA (a CLOB(400k), b int)");
stmt.executeUpdate(
"CREATE TABLE testClobTriggerB (a CLOB(400k), b int)");
stmt.executeUpdate(
"create trigger T13A after update on testClob " +
"referencing new as n old as o " +
"for each row "+
"insert into testClobTriggerA(a, b) values (n.a, n.b)");
stmt.executeUpdate(
"create trigger T13B after INSERT on testClobTriggerA " +
"referencing new table as n " +
"for each statement "+
"insert into testClobTriggerB(a, b) select n.a, n.b from n");
commit();
// Fire the triggers
stmt.executeUpdate("UPDATE testClob SET b = b + 0");
commit();
// Verify the results
Statement origSt = createStatement();
Statement trigASt = createStatement();
Statement trigBSt = createStatement();
ResultSet origRS = origSt.executeQuery(
"select a, length(a), b from testClob order by b");
ResultSet trigARS = trigASt.executeQuery(
"select a, length(a), b from testClobTriggerA order by b");
ResultSet trigBRS = trigBSt.executeQuery(
"select a, length(a), b from testClobTriggerA order by b");
int count = 0;
while (origRS.next()) {
count ++;
assertTrue("row trigger produced less rows " +
count, trigARS.next());
assertTrue("statement trigger produced less rows " +
count, trigBRS.next());
if (origRS.getClob(1) != null) {
assertEquals("FAIL - Invalid checksum for row trigger",
getStreamCheckSum(origRS.getClob(1).getAsciiStream()),
getStreamCheckSum(trigARS.getClob(1).getAsciiStream()));
assertEquals("FAIL - Invalid checksum for statement trigger",
getStreamCheckSum(origRS.getClob(1).getAsciiStream()),
getStreamCheckSum(trigBRS.getClob(1).getAsciiStream()));
}
assertEquals("FAIL - Invalid length in row trigger",
origRS.getInt(2), trigARS.getInt(2));
assertEquals("FAIL - Invalid length in statement trigger",
origRS.getInt(2), trigBRS.getInt(2));
assertEquals("FAIL - Length not updated on row trigger",
origRS.getInt(3), trigARS.getInt(3));
assertEquals("FAIL - Length not updated on statement trigger",
origRS.getInt(3), trigBRS.getInt(3));
}
origRS.close();
trigARS.close();
trigBRS.close();
origSt.close();
trigASt.close();
trigBSt.close();
stmt.executeUpdate("DROP TRIGGER T13A");
stmt.executeUpdate("DROP TRIGGER T13B");
stmt.executeUpdate("DROP TABLE testClobTriggerB");
stmt.executeUpdate("DROP TABLE testClobTriggerA");
stmt.close();
commit();
}
| public void testTriggersWithClobColumn() throws Exception {
insertDefaultData();
Statement stmt = createStatement();
stmt.executeUpdate(
"CREATE TABLE testClobTriggerA (a CLOB(400k), b int)");
stmt.executeUpdate(
"CREATE TABLE testClobTriggerB (a CLOB(400k), b int)");
stmt.executeUpdate(
"create trigger T13A after update on testClob " +
"referencing new as n old as o " +
"for each row "+
"insert into testClobTriggerA(a, b) values (n.a, n.b)");
stmt.executeUpdate(
"create trigger T13B after INSERT on testClobTriggerA " +
"referencing new table as n " +
"for each statement "+
"insert into testClobTriggerB(a, b) select n.a, n.b from n");
commit();
// Fire the triggers
stmt.executeUpdate("UPDATE testClob SET b = b + 0");
commit();
// Verify the results
Statement origSt = createStatement();
Statement trigASt = createStatement();
Statement trigBSt = createStatement();
ResultSet origRS = origSt.executeQuery(
"select a, length(a), b from testClob order by b");
ResultSet trigARS = trigASt.executeQuery(
"select a, length(a), b from testClobTriggerA order by b");
ResultSet trigBRS = trigBSt.executeQuery(
"select a, length(a), b from testClobTriggerB order by b");
int count = 0;
while (origRS.next()) {
count ++;
assertTrue("row trigger produced less rows " +
count, trigARS.next());
assertTrue("statement trigger produced less rows " +
count, trigBRS.next());
if (origRS.getClob(1) != null) {
assertEquals("FAIL - Invalid checksum for row trigger",
getStreamCheckSum(origRS.getClob(1).getAsciiStream()),
getStreamCheckSum(trigARS.getClob(1).getAsciiStream()));
assertEquals("FAIL - Invalid checksum for statement trigger",
getStreamCheckSum(origRS.getClob(1).getAsciiStream()),
getStreamCheckSum(trigBRS.getClob(1).getAsciiStream()));
}
assertEquals("FAIL - Invalid length in row trigger",
origRS.getInt(2), trigARS.getInt(2));
assertEquals("FAIL - Invalid length in statement trigger",
origRS.getInt(2), trigBRS.getInt(2));
assertEquals("FAIL - Length not updated on row trigger",
origRS.getInt(3), trigARS.getInt(3));
assertEquals("FAIL - Length not updated on statement trigger",
origRS.getInt(3), trigBRS.getInt(3));
}
origRS.close();
trigARS.close();
trigBRS.close();
origSt.close();
trigASt.close();
trigBSt.close();
stmt.executeUpdate("DROP TRIGGER T13A");
stmt.executeUpdate("DROP TRIGGER T13B");
stmt.executeUpdate("DROP TABLE testClobTriggerB");
stmt.executeUpdate("DROP TABLE testClobTriggerA");
stmt.close();
commit();
}
|
public static Test suite()
{
String testName = "InterruptResilienceTest";
if (! isSunJVM()) {
// DERBY-4463 test fails on IBM VM 1.5.
// It's fixed in IBM VM 1.6 SR9 and above.
// Remove this condition when that issue is solved in IBM VM 1.5 SR13.
if (getSystemProperty("java.version").startsWith("1.5.0"))
{
println("Test skipped for this VM, cf. DERBY-4463");
return new TestSuite(testName);
}
}
if (!JDBC.vmSupportsJDBC3()) {
println("Test skipped for this VM, " +
"DriverManager is not supported with JSR169");
return new TestSuite(testName);
}
if (hasInterruptibleIO()) {
println("Test skipped due to interruptible IO.");
println("This is default on Solaris/Sun Java <= 1.6, use " +
"-XX:-UseVMInterruptibleIO if available.");
return new TestSuite(testName);
}
return makeSuite(testName);
}
| public static Test suite()
{
String testName = "InterruptResilienceTest";
if (isIBMJVM()) {
// DERBY-4463 test fails on IBM VM 1.5.
// It's fixed in IBM VM 1.6 SR9 and above.
// Remove this condition when that issue is solved in IBM VM 1.5 SR13.
if (getSystemProperty("java.version").startsWith("1.5.0"))
{
println("Test skipped for this VM, cf. DERBY-4463");
return new TestSuite(testName);
}
}
if (!JDBC.vmSupportsJDBC3()) {
println("Test skipped for this VM, " +
"DriverManager is not supported with JSR169");
return new TestSuite(testName);
}
if (hasInterruptibleIO()) {
println("Test skipped due to interruptible IO.");
println("This is default on Solaris/Sun Java <= 1.6, use " +
"-XX:-UseVMInterruptibleIO if available.");
return new TestSuite(testName);
}
return makeSuite(testName);
}
|
public static Test suite() {
if (! isSunJVM()) {
// DERBY-4463 test fails on IBM VM 1.5.
// It's fixed in IBM VM 1.6 SR9 and above.
// Remove this condition when that issue is solved in IBM VM 1.5 SR13.
if (getSystemProperty("java.version").startsWith("1.5.0"))
{
println("Test skipped for this VM, cf. DERBY-4463");
return new TestSuite("empty LockInterruptTest");
}
}
// Only run in embedded mode since we cannot interrupt the engine
// thread from the network client.
Test test = TestConfiguration.embeddedSuite(LockInterruptTest.class);
// Set the lock timeout to a known value so that we know what to
// expect for timeouts.
test = DatabasePropertyTestSetup.setLockTimeouts(
test, DEADLOCK_TIMEOUT, LOCK_TIMEOUT);
Properties syspros = new Properties();
//Derby-4856 interrupt error create thread dump and diagnostic
//info. Add property to avoid the information.
syspros.put("derby.stream.error.extendedDiagSeverityLevel", "50000");
test = new SystemPropertyTestSetup(test, syspros, true);
return new CleanDatabaseTestSetup(test);
}
| public static Test suite() {
if (isIBMJVM()) {
// DERBY-4463 test fails on IBM VM 1.5.
// It's fixed in IBM VM 1.6 SR9 and above.
// Remove this condition when that issue is solved in IBM VM 1.5 SR13.
if (getSystemProperty("java.version").startsWith("1.5.0"))
{
println("Test skipped for this VM, cf. DERBY-4463");
return new TestSuite("empty LockInterruptTest");
}
}
// Only run in embedded mode since we cannot interrupt the engine
// thread from the network client.
Test test = TestConfiguration.embeddedSuite(LockInterruptTest.class);
// Set the lock timeout to a known value so that we know what to
// expect for timeouts.
test = DatabasePropertyTestSetup.setLockTimeouts(
test, DEADLOCK_TIMEOUT, LOCK_TIMEOUT);
Properties syspros = new Properties();
//Derby-4856 interrupt error create thread dump and diagnostic
//info. Add property to avoid the information.
syspros.put("derby.stream.error.extendedDiagSeverityLevel", "50000");
test = new SystemPropertyTestSetup(test, syspros, true);
return new CleanDatabaseTestSetup(test);
}
|
public void map(LongWritable userID,
VectorWritable vectorWritable,
OutputCollector<LongWritable, RecommendedItemsWritable> output,
Reporter reporter) throws IOException {
if (usersToRecommendFor != null && !usersToRecommendFor.contains(userID.get())) {
return;
}
Vector userVector = vectorWritable.get();
Iterator<Vector.Element> userVectorIterator = userVector.iterateNonZero();
Vector recommendationVector = new RandomAccessSparseVector(Integer.MAX_VALUE, 1000);
while (userVectorIterator.hasNext()) {
Vector.Element element = userVectorIterator.next();
int index = element.index();
double value = element.get();
Vector columnVector;
try {
columnVector = cooccurrenceColumnCache.get(new IntWritable(index));
} catch (TasteException te) {
if (te.getCause() instanceof IOException) {
throw (IOException) te.getCause();
} else {
throw new IOException(te.getCause());
}
}
columnVector.times(value).addTo(recommendationVector);
}
Queue<RecommendedItem> topItems =
new PriorityQueue<RecommendedItem>(recommendationsPerUser + 1, Collections.reverseOrder());
Iterator<Vector.Element> recommendationVectorIterator = recommendationVector.iterateNonZero();
LongWritable itemID = new LongWritable();
while (recommendationVectorIterator.hasNext()) {
Vector.Element element = recommendationVectorIterator.next();
int index = element.index();
if (userVector.get(index) != 0.0) {
if (topItems.size() < recommendationsPerUser) {
indexItemIDMap.get(new IntWritable(index), itemID);
topItems.add(new GenericRecommendedItem(itemID.get(), (float) element.get()));
} else if (element.get() > topItems.peek().getValue()) {
indexItemIDMap.get(new IntWritable(index), itemID);
topItems.add(new GenericRecommendedItem(itemID.get(), (float) element.get()));
topItems.poll();
}
}
}
List<RecommendedItem> recommendations = new ArrayList<RecommendedItem>(topItems.size());
recommendations.addAll(topItems);
Collections.sort(recommendations);
output.collect(userID, new RecommendedItemsWritable(recommendations));
}
| public void map(LongWritable userID,
VectorWritable vectorWritable,
OutputCollector<LongWritable, RecommendedItemsWritable> output,
Reporter reporter) throws IOException {
if (usersToRecommendFor != null && !usersToRecommendFor.contains(userID.get())) {
return;
}
Vector userVector = vectorWritable.get();
Iterator<Vector.Element> userVectorIterator = userVector.iterateNonZero();
Vector recommendationVector = new RandomAccessSparseVector(Integer.MAX_VALUE, 1000);
while (userVectorIterator.hasNext()) {
Vector.Element element = userVectorIterator.next();
int index = element.index();
double value = element.get();
Vector columnVector;
try {
columnVector = cooccurrenceColumnCache.get(new IntWritable(index));
} catch (TasteException te) {
if (te.getCause() instanceof IOException) {
throw (IOException) te.getCause();
} else {
throw new IOException(te.getCause());
}
}
columnVector.times(value).addTo(recommendationVector);
}
Queue<RecommendedItem> topItems =
new PriorityQueue<RecommendedItem>(recommendationsPerUser + 1, Collections.reverseOrder());
Iterator<Vector.Element> recommendationVectorIterator = recommendationVector.iterateNonZero();
LongWritable itemID = new LongWritable();
while (recommendationVectorIterator.hasNext()) {
Vector.Element element = recommendationVectorIterator.next();
int index = element.index();
if (userVector.get(index) == 0.0) {
if (topItems.size() < recommendationsPerUser) {
indexItemIDMap.get(new IntWritable(index), itemID);
topItems.add(new GenericRecommendedItem(itemID.get(), (float) element.get()));
} else if (element.get() > topItems.peek().getValue()) {
indexItemIDMap.get(new IntWritable(index), itemID);
topItems.add(new GenericRecommendedItem(itemID.get(), (float) element.get()));
topItems.poll();
}
}
}
List<RecommendedItem> recommendations = new ArrayList<RecommendedItem>(topItems.size());
recommendations.addAll(topItems);
Collections.sort(recommendations);
output.collect(userID, new RecommendedItemsWritable(recommendations));
}
|
public int docID() {
return docIt >= upto ? NO_MORE_DOCS : docs[docIt];
}
| public int docID() {
return docIt < 0 ? -1 : docIt >= upto ? NO_MORE_DOCS : docs[docIt];
}
|
private boolean mergeClosestClusters(int numUsers, List<FastIDSet> clusters, boolean done) throws TasteException {
// We find a certain number of closest clusters...
List<ClusterClusterPair> queue = findClosestClusters(numUsers, clusters);
// The first one is definitely the closest pair in existence so we can cluster
// the two together, put it back into the set of clusters, and start again. Instead
// we assume everything else in our list of closest cluster pairs is still pretty good,
// and we cluster them too.
while (!queue.isEmpty()) {
if (!clusteringByThreshold && clusters.size() <= numClusters) {
done = true;
break;
}
ClusterClusterPair top = queue.remove(0);
if (clusteringByThreshold && top.getSimilarity() < clusteringThreshold) {
done = true;
break;
}
FastIDSet cluster1 = top.getCluster1();
FastIDSet cluster2 = top.getCluster2();
// Pull out current two clusters from clusters
Iterator<FastIDSet> clusterIterator = clusters.iterator();
boolean removed1 = false;
boolean removed2 = false;
while (clusterIterator.hasNext() && !(removed1 && removed2)) {
FastIDSet current = clusterIterator.next();
// Yes, use == here
if (!removed1 && cluster1 == current) {
clusterIterator.remove();
removed1 = true;
} else if (!removed2 && cluster2 == current) {
clusterIterator.remove();
removed2 = true;
}
}
// The only catch is if a cluster showed it twice in the list of best cluster pairs;
// have to remove the others. Pull out anything referencing these clusters from queue
for (Iterator<ClusterClusterPair> queueIterator = queue.iterator(); queueIterator.hasNext();) {
ClusterClusterPair pair = queueIterator.next();
FastIDSet pair1 = pair.getCluster1();
FastIDSet pair2 = pair.getCluster2();
if (pair1 == cluster1 || pair1 == cluster2 || pair2 == cluster1 || pair2 == cluster2) {
queueIterator.remove();
}
}
// Make new merged cluster
FastIDSet merged = new FastIDSet(cluster1.size() + cluster2.size());
merged.addAll(cluster1);
merged.addAll(cluster2);
// Compare against other clusters; update queue if needed
// That new pair we're just adding might be pretty close to something else, so
// catch that case here and put it back into our queue
for (FastIDSet cluster : clusters) {
double similarity = clusterSimilarity.getSimilarity(merged, cluster);
if (similarity > queue.get(queue.size() - 1).getSimilarity()) {
ListIterator<ClusterClusterPair> queueIterator = queue.listIterator();
while (queueIterator.hasNext()) {
if (similarity > queueIterator.next().getSimilarity()) {
queueIterator.previous();
break;
}
}
queueIterator.add(new ClusterClusterPair(merged, cluster, similarity));
}
}
// Finally add new cluster to list
clusters.add(merged);
}
return done;
}
| private boolean mergeClosestClusters(int numUsers, List<FastIDSet> clusters, boolean done) throws TasteException {
// We find a certain number of closest clusters...
List<ClusterClusterPair> queue = findClosestClusters(numUsers, clusters);
// The first one is definitely the closest pair in existence so we can cluster
// the two together, put it back into the set of clusters, and start again. Instead
// we assume everything else in our list of closest cluster pairs is still pretty good,
// and we cluster them too.
while (!queue.isEmpty()) {
if (!clusteringByThreshold && clusters.size() <= numClusters) {
done = true;
break;
}
ClusterClusterPair top = queue.remove(0);
if (clusteringByThreshold && top.getSimilarity() < clusteringThreshold) {
done = true;
break;
}
FastIDSet cluster1 = top.getCluster1();
FastIDSet cluster2 = top.getCluster2();
// Pull out current two clusters from clusters
Iterator<FastIDSet> clusterIterator = clusters.iterator();
boolean removed1 = false;
boolean removed2 = false;
while (clusterIterator.hasNext() && !(removed1 && removed2)) {
FastIDSet current = clusterIterator.next();
// Yes, use == here
if (!removed1 && cluster1 == current) {
clusterIterator.remove();
removed1 = true;
} else if (!removed2 && cluster2 == current) {
clusterIterator.remove();
removed2 = true;
}
}
// The only catch is if a cluster showed it twice in the list of best cluster pairs;
// have to remove the others. Pull out anything referencing these clusters from queue
for (Iterator<ClusterClusterPair> queueIterator = queue.iterator(); queueIterator.hasNext();) {
ClusterClusterPair pair = queueIterator.next();
FastIDSet pair1 = pair.getCluster1();
FastIDSet pair2 = pair.getCluster2();
if (pair1 == cluster1 || pair1 == cluster2 || pair2 == cluster1 || pair2 == cluster2) {
queueIterator.remove();
}
}
// Make new merged cluster
FastIDSet merged = new FastIDSet(cluster1.size() + cluster2.size());
merged.addAll(cluster1);
merged.addAll(cluster2);
// Compare against other clusters; update queue if needed
// That new pair we're just adding might be pretty close to something else, so
// catch that case here and put it back into our queue
for (FastIDSet cluster : clusters) {
double similarity = clusterSimilarity.getSimilarity(merged, cluster);
if (queue.size() > 0 && similarity > queue.get(queue.size() - 1).getSimilarity()) {
ListIterator<ClusterClusterPair> queueIterator = queue.listIterator();
while (queueIterator.hasNext()) {
if (similarity > queueIterator.next().getSimilarity()) {
queueIterator.previous();
break;
}
}
queueIterator.add(new ClusterClusterPair(merged, cluster, similarity));
}
}
// Finally add new cluster to list
clusters.add(merged);
}
return done;
}
|
public static long getTotalBytes(Iterable<SSTableReader> sstables)
{
long sum = 0;
for (SSTableReader sstable : sstables)
{
sum += sstable.length();
}
return sum;
}
| public static long getTotalBytes(Iterable<SSTableReader> sstables)
{
long sum = 0;
for (SSTableReader sstable : sstables)
{
sum += sstable.onDiskLength();
}
return sum;
}
|
public CompressedSegmentedFile(String path, CompressionMetadata metadata)
{
super(path, metadata.dataLength);
this.metadata = metadata;
}
| public CompressedSegmentedFile(String path, CompressionMetadata metadata)
{
super(path, metadata.dataLength, metadata.compressedFileLength);
this.metadata = metadata;
}
|
private static List<Pair<SSTableReader, Long>> createSSTableAndLengthPairs(Collection<SSTableReader> collection)
{
List<Pair<SSTableReader, Long>> tableLengthPairs = new ArrayList<Pair<SSTableReader, Long>>();
for(SSTableReader table: collection)
tableLengthPairs.add(new Pair<SSTableReader, Long>(table, table.length()));
return tableLengthPairs;
}
| private static List<Pair<SSTableReader, Long>> createSSTableAndLengthPairs(Collection<SSTableReader> collection)
{
List<Pair<SSTableReader, Long>> tableLengthPairs = new ArrayList<Pair<SSTableReader, Long>>();
for(SSTableReader table: collection)
tableLengthPairs.add(new Pair<SSTableReader, Long>(table, table.onDiskLength()));
return tableLengthPairs;
}
|
public final void maybeRefreshBlocking() throws IOException, InterruptedException {
ensureOpen();
// Ensure only 1 thread does reopen at once
refreshLock.lock();
try {
doMaybeRefresh();
} finally {
refreshLock.lock();
}
}
| public final void maybeRefreshBlocking() throws IOException, InterruptedException {
ensureOpen();
// Ensure only 1 thread does reopen at once
refreshLock.lock();
try {
doMaybeRefresh();
} finally {
refreshLock.unlock();
}
}
|
public Sorter newSorter(Entry[] arr) {
return new ArrayTimSorter<Entry>(arr, ArrayUtil.<Entry>naturalComparator(), random().nextInt(arr.length));
}
| public Sorter newSorter(Entry[] arr) {
return new ArrayTimSorter<Entry>(arr, ArrayUtil.<Entry>naturalComparator(), _TestUtil.nextInt(random(), 0, arr.length));
}
|
protected synchronized int addCategoryDocument(CategoryPath categoryPath,
int length, int parent)
throws CorruptIndexException, IOException {
// Before Lucene 2.9, position increments >=0 were supported, so we
// added 1 to parent to allow the parent -1 (the parent of the root).
// Unfortunately, starting with Lucene 2.9, after LUCENE-1542, this is
// no longer enough, since 0 is not encoded consistently either (see
// comment in SinglePositionTokenStream). But because we must be
// backward-compatible with existing indexes, we can't just fix what
// we write here (e.g., to write parent+2), and need to do a workaround
// in the reader (which knows that anyway only category 0 has a parent
// -1).
parentStream.set(parent+1);
Document d = new Document();
d.add(parentStreamField);
fullPathField.setValue(categoryPath.toString(delimiter, length));
d.add(fullPathField);
// Note that we do no pass an Analyzer here because the fields that are
// added to the Document are untokenized or contains their own TokenStream.
// Therefore the IndexWriter's Analyzer has no effect.
indexWriter.addDocument(d);
int id = nextID++;
addToCache(categoryPath, length, id);
// also add to the parent array
getParentArray().add(id, parent);
return id;
}
| protected synchronized int addCategoryDocument(CategoryPath categoryPath,
int length, int parent)
throws CorruptIndexException, IOException {
// Before Lucene 2.9, position increments >=0 were supported, so we
// added 1 to parent to allow the parent -1 (the parent of the root).
// Unfortunately, starting with Lucene 2.9, after LUCENE-1542, this is
// no longer enough, since 0 is not encoded consistently either (see
// comment in SinglePositionTokenStream). But because we must be
// backward-compatible with existing indexes, we can't just fix what
// we write here (e.g., to write parent+2), and need to do a workaround
// in the reader (which knows that anyway only category 0 has a parent
// -1).
parentStream.set(parent+1);
Document d = new Document();
d.add(parentStreamField);
fullPathField.setStringValue(categoryPath.toString(delimiter, length));
d.add(fullPathField);
// Note that we do no pass an Analyzer here because the fields that are
// added to the Document are untokenized or contains their own TokenStream.
// Therefore the IndexWriter's Analyzer has no effect.
indexWriter.addDocument(d);
int id = nextID++;
addToCache(categoryPath, length, id);
// also add to the parent array
getParentArray().add(id, parent);
return id;
}
|
public void testPerFieldCodec() throws Exception {
final int NUM_DOCS = atLeast(173);
if (VERBOSE) {
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
}
MockDirectoryWrapper dir = newDirectory();
dir.setCheckIndexOnClose(false); // we use a custom codec provider
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setCodec(new CustomPerFieldCodec()).
setMergePolicy(newLogMergePolicy(3))
);
Document doc = new Document();
// uses default codec:
doc.add(newField("field1", "this field uses the standard codec as the test", TextField.TYPE_UNSTORED));
// uses pulsing codec:
Field field2 = newField("field2", "this field uses the pulsing codec as the test", TextField.TYPE_UNSTORED);
doc.add(field2);
Field idField = newField("id", "", StringField.TYPE_UNSTORED);
doc.add(idField);
for(int i=0;i<NUM_DOCS;i++) {
idField.setValue(""+i);
w.addDocument(doc);
if ((i+1)%10 == 0) {
w.commit();
}
}
| public void testPerFieldCodec() throws Exception {
final int NUM_DOCS = atLeast(173);
if (VERBOSE) {
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
}
MockDirectoryWrapper dir = newDirectory();
dir.setCheckIndexOnClose(false); // we use a custom codec provider
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setCodec(new CustomPerFieldCodec()).
setMergePolicy(newLogMergePolicy(3))
);
Document doc = new Document();
// uses default codec:
doc.add(newField("field1", "this field uses the standard codec as the test", TextField.TYPE_UNSTORED));
// uses pulsing codec:
Field field2 = newField("field2", "this field uses the pulsing codec as the test", TextField.TYPE_UNSTORED);
doc.add(field2);
Field idField = newField("id", "", StringField.TYPE_UNSTORED);
doc.add(idField);
for(int i=0;i<NUM_DOCS;i++) {
idField.setStringValue(""+i);
w.addDocument(doc);
if ((i+1)%10 == 0) {
w.commit();
}
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
for (int i = 0; i < 1000; i++) {
field.setValue(df.format(i));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
if (VERBOSE) {
System.out.println("TEST: setUp searcher=" + searcher);
}
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
for (int i = 0; i < 1000; i++) {
field.setStringValue(df.format(i));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
if (VERBOSE) {
System.out.println("TEST: setUp searcher=" + searcher);
}
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setOmitNorms(true);
Field field = newField("field", "", customType);
doc.add(field);
NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
for (int i = 0; i < 1000; i++) {
field.setValue(df.format(i));
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
searcher = newSearcher(reader);
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setOmitNorms(true);
Field field = newField("field", "", customType);
doc.add(field);
NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
for (int i = 0; i < 1000; i++) {
field.setStringValue(df.format(i));
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
searcher = newSearcher(reader);
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
// we generate aweful prefixes: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
final String codec = Codec.getDefault().getName();
int num = codec.equals("Lucene3x") ? 200 * RANDOM_MULTIPLIER : atLeast(1000);
for (int i = 0; i < num; i++) {
field.setValue(_TestUtil.randomUnicodeString(random, 10));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
// we generate aweful prefixes: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
final String codec = Codec.getDefault().getName();
int num = codec.equals("Lucene3x") ? 200 * RANDOM_MULTIPLIER : atLeast(1000);
for (int i = 0; i < num; i++) {
field.setStringValue(_TestUtil.randomUnicodeString(random, 10));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
|
public void assertFromTestData(int codePointTable[]) throws Exception {
if (VERBOSE) {
System.out.println("TEST: codePointTable=" + codePointTable);
}
InputStream stream = getClass().getResourceAsStream("fuzzyTestData.txt");
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8"));
int bits = Integer.parseInt(reader.readLine());
int terms = (int) Math.pow(2, bits);
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
Field field = newField("field", "", TextField.TYPE_UNSTORED);
doc.add(field);
for (int i = 0; i < terms; i++) {
field.setValue(mapInt(codePointTable, i));
writer.addDocument(doc);
}
IndexReader r = writer.getReader();
IndexSearcher searcher = newSearcher(r);
if (VERBOSE) {
System.out.println("TEST: searcher=" + searcher);
}
// even though this uses a boost-only rewrite, this test relies upon queryNorm being the default implementation,
// otherwise scores are different!
searcher.setSimilarity(new DefaultSimilarity());
writer.close();
String line;
while ((line = reader.readLine()) != null) {
String params[] = line.split(",");
String query = mapInt(codePointTable, Integer.parseInt(params[0]));
int prefix = Integer.parseInt(params[1]);
int pqSize = Integer.parseInt(params[2]);
float minScore = Float.parseFloat(params[3]);
FuzzyQuery q = new FuzzyQuery(new Term("field", query), minScore, prefix);
q.setRewriteMethod(new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(pqSize));
int expectedResults = Integer.parseInt(reader.readLine());
TopDocs docs = searcher.search(q, expectedResults);
assertEquals(expectedResults, docs.totalHits);
for (int i = 0; i < expectedResults; i++) {
String scoreDoc[] = reader.readLine().split(",");
assertEquals(Integer.parseInt(scoreDoc[0]), docs.scoreDocs[i].doc);
assertEquals(Float.parseFloat(scoreDoc[1]), docs.scoreDocs[i].score, epsilon);
}
}
r.close();
dir.close();
}
| public void assertFromTestData(int codePointTable[]) throws Exception {
if (VERBOSE) {
System.out.println("TEST: codePointTable=" + codePointTable);
}
InputStream stream = getClass().getResourceAsStream("fuzzyTestData.txt");
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8"));
int bits = Integer.parseInt(reader.readLine());
int terms = (int) Math.pow(2, bits);
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
Field field = newField("field", "", TextField.TYPE_UNSTORED);
doc.add(field);
for (int i = 0; i < terms; i++) {
field.setStringValue(mapInt(codePointTable, i));
writer.addDocument(doc);
}
IndexReader r = writer.getReader();
IndexSearcher searcher = newSearcher(r);
if (VERBOSE) {
System.out.println("TEST: searcher=" + searcher);
}
// even though this uses a boost-only rewrite, this test relies upon queryNorm being the default implementation,
// otherwise scores are different!
searcher.setSimilarity(new DefaultSimilarity());
writer.close();
String line;
while ((line = reader.readLine()) != null) {
String params[] = line.split(",");
String query = mapInt(codePointTable, Integer.parseInt(params[0]));
int prefix = Integer.parseInt(params[1]);
int pqSize = Integer.parseInt(params[2]);
float minScore = Float.parseFloat(params[3]);
FuzzyQuery q = new FuzzyQuery(new Term("field", query), minScore, prefix);
q.setRewriteMethod(new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(pqSize));
int expectedResults = Integer.parseInt(reader.readLine());
TopDocs docs = searcher.search(q, expectedResults);
assertEquals(expectedResults, docs.totalHits);
for (int i = 0; i < expectedResults; i++) {
String scoreDoc[] = reader.readLine().split(",");
assertEquals(Integer.parseInt(scoreDoc[0]), docs.scoreDocs[i].doc);
assertEquals(Float.parseFloat(scoreDoc[1]), docs.scoreDocs[i].score, epsilon);
}
}
r.close();
dir.close();
}
|
public void testRandomPhrases() throws Exception {
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer(random);
RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy()));
List<List<String>> docs = new ArrayList<List<String>>();
Document d = new Document();
Field f = newField("f", "", TextField.TYPE_UNSTORED);
d.add(f);
Random r = random;
int NUM_DOCS = atLeast(10);
for (int i = 0; i < NUM_DOCS; i++) {
// must be > 4096 so it spans multiple chunks
int termCount = _TestUtil.nextInt(random, 4097, 8200);
List<String> doc = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
while(doc.size() < termCount) {
if (r.nextInt(5) == 1 || docs.size() == 0) {
// make new non-empty-string term
String term;
while(true) {
term = _TestUtil.randomUnicodeString(r);
if (term.length() > 0) {
break;
}
}
TokenStream ts = analyzer.tokenStream("ignore", new StringReader(term));
CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while(ts.incrementToken()) {
String text = termAttr.toString();
doc.add(text);
sb.append(text).append(' ');
}
ts.end();
ts.close();
} else {
// pick existing sub-phrase
List<String> lastDoc = docs.get(r.nextInt(docs.size()));
int len = _TestUtil.nextInt(r, 1, 10);
int start = r.nextInt(lastDoc.size()-len);
for(int k=start;k<start+len;k++) {
String t = lastDoc.get(k);
doc.add(t);
sb.append(t).append(' ');
}
}
}
docs.add(doc);
f.setValue(sb.toString());
w.addDocument(d);
}
IndexReader reader = w.getReader();
IndexSearcher s = newSearcher(reader);
w.close();
// now search
int num = atLeast(10);
for(int i=0;i<num;i++) {
int docID = r.nextInt(docs.size());
List<String> doc = docs.get(docID);
final int numTerm = _TestUtil.nextInt(r, 2, 20);
final int start = r.nextInt(doc.size()-numTerm);
PhraseQuery pq = new PhraseQuery();
StringBuilder sb = new StringBuilder();
for(int t=start;t<start+numTerm;t++) {
pq.add(new Term("f", doc.get(t)));
sb.append(doc.get(t)).append(' ');
}
TopDocs hits = s.search(pq, NUM_DOCS);
boolean found = false;
for(int j=0;j<hits.scoreDocs.length;j++) {
if (hits.scoreDocs[j].doc == docID) {
found = true;
break;
}
}
assertTrue("phrase '" + sb + "' not found; start=" + start, found);
}
reader.close();
dir.close();
}
| public void testRandomPhrases() throws Exception {
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer(random);
RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy()));
List<List<String>> docs = new ArrayList<List<String>>();
Document d = new Document();
Field f = newField("f", "", TextField.TYPE_UNSTORED);
d.add(f);
Random r = random;
int NUM_DOCS = atLeast(10);
for (int i = 0; i < NUM_DOCS; i++) {
// must be > 4096 so it spans multiple chunks
int termCount = _TestUtil.nextInt(random, 4097, 8200);
List<String> doc = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
while(doc.size() < termCount) {
if (r.nextInt(5) == 1 || docs.size() == 0) {
// make new non-empty-string term
String term;
while(true) {
term = _TestUtil.randomUnicodeString(r);
if (term.length() > 0) {
break;
}
}
TokenStream ts = analyzer.tokenStream("ignore", new StringReader(term));
CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while(ts.incrementToken()) {
String text = termAttr.toString();
doc.add(text);
sb.append(text).append(' ');
}
ts.end();
ts.close();
} else {
// pick existing sub-phrase
List<String> lastDoc = docs.get(r.nextInt(docs.size()));
int len = _TestUtil.nextInt(r, 1, 10);
int start = r.nextInt(lastDoc.size()-len);
for(int k=start;k<start+len;k++) {
String t = lastDoc.get(k);
doc.add(t);
sb.append(t).append(' ');
}
}
}
docs.add(doc);
f.setStringValue(sb.toString());
w.addDocument(d);
}
IndexReader reader = w.getReader();
IndexSearcher s = newSearcher(reader);
w.close();
// now search
int num = atLeast(10);
for(int i=0;i<num;i++) {
int docID = r.nextInt(docs.size());
List<String> doc = docs.get(docID);
final int numTerm = _TestUtil.nextInt(r, 2, 20);
final int start = r.nextInt(doc.size()-numTerm);
PhraseQuery pq = new PhraseQuery();
StringBuilder sb = new StringBuilder();
for(int t=start;t<start+numTerm;t++) {
pq.add(new Term("f", doc.get(t)));
sb.append(doc.get(t)).append(' ');
}
TopDocs hits = s.search(pq, NUM_DOCS);
boolean found = false;
for(int j=0;j<hits.scoreDocs.length;j++) {
if (hits.scoreDocs[j].doc == docID) {
found = true;
break;
}
}
assertTrue("phrase '" + sb + "' not found; start=" + start, found);
}
reader.close();
dir.close();
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
fieldName = random.nextBoolean() ? "field" : ""; // sometimes use an empty string as field name
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField(fieldName, "", StringField.TYPE_UNSTORED);
doc.add(field);
List<String> terms = new ArrayList<String>();
int num = atLeast(200);
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random);
field.setValue(s);
terms.add(s);
writer.addDocument(doc);
}
if (VERBOSE) {
// utf16 order
Collections.sort(terms);
System.out.println("UTF16 order:");
for(String s : terms) {
System.out.println(" " + UnicodeUtil.toHexString(s));
}
}
reader = writer.getReader();
searcher1 = newSearcher(reader);
searcher2 = newSearcher(reader);
writer.close();
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
fieldName = random.nextBoolean() ? "field" : ""; // sometimes use an empty string as field name
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField(fieldName, "", StringField.TYPE_UNSTORED);
doc.add(field);
List<String> terms = new ArrayList<String>();
int num = atLeast(200);
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random);
field.setStringValue(s);
terms.add(s);
writer.addDocument(doc);
}
if (VERBOSE) {
// utf16 order
Collections.sort(terms);
System.out.println("UTF16 order:");
for(String s : terms) {
System.out.println(" " + UnicodeUtil.toHexString(s));
}
}
reader = writer.getReader();
searcher1 = newSearcher(reader);
searcher2 = newSearcher(reader);
writer.close();
}
|
public void setUp() throws Exception {
super.setUp();
// we generate aweful regexps: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
numIterations = Codec.getDefault().getName().equals("Lucene3x") ? 10 * RANDOM_MULTIPLIER : atLeast(50);
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_STORED);
doc.add(field);
terms = new TreeSet<BytesRef>();
int num = atLeast(200);
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random);
field.setValue(s);
terms.add(new BytesRef(s));
writer.addDocument(doc);
}
termsAutomaton = DaciukMihovAutomatonBuilder.build(terms);
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
| public void setUp() throws Exception {
super.setUp();
// we generate aweful regexps: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
numIterations = Codec.getDefault().getName().equals("Lucene3x") ? 10 * RANDOM_MULTIPLIER : atLeast(50);
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_STORED);
doc.add(field);
terms = new TreeSet<BytesRef>();
int num = atLeast(200);
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random);
field.setStringValue(s);
terms.add(new BytesRef(s));
writer.addDocument(doc);
}
termsAutomaton = DaciukMihovAutomatonBuilder.build(terms);
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
// we generate aweful prefixes: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
int num = atLeast(10);
for (int i = 0; i < num; i++) {
field.setValue(_TestUtil.randomUnicodeString(random, 10));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
// we generate aweful prefixes: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
int num = atLeast(10);
for (int i = 0; i < num; i++) {
field.setStringValue(_TestUtil.randomUnicodeString(random, 10));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
|
public void testCustomEncoder() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
config.setSimilarity(new CustomNormEncodingSimilarity());
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
Field foo = newField("foo", "", TextField.TYPE_UNSTORED);
Field bar = newField("bar", "", TextField.TYPE_UNSTORED);
doc.add(foo);
doc.add(bar);
for (int i = 0; i < 100; i++) {
bar.setValue("singleton");
writer.addDocument(doc);
}
| public void testCustomEncoder() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
config.setSimilarity(new CustomNormEncodingSimilarity());
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
Field foo = newField("foo", "", TextField.TYPE_UNSTORED);
Field bar = newField("bar", "", TextField.TYPE_UNSTORED);
doc.add(foo);
doc.add(bar);
for (int i = 0; i < 100; i++) {
bar.setStringValue("singleton");
writer.addDocument(doc);
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy());
config.setSimilarity(new TestSimilarity());
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
Field foo = newField("foo", "", TextField.TYPE_UNSTORED);
doc.add(foo);
for (int i = 0; i < 100; i++) {
foo.setValue(addValue());
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy());
config.setSimilarity(new TestSimilarity());
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
Field foo = newField("foo", "", TextField.TYPE_UNSTORED);
doc.add(foo);
for (int i = 0; i < 100; i++) {
foo.setStringValue(addValue());
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
|
public void testRollingUpdates() throws Exception {
final MockDirectoryWrapper dir = newDirectory();
dir.setCheckIndexOnClose(false); // we use a custom codec provider
final LineFileDocs docs = new LineFileDocs(random);
//provider.register(new MemoryCodec());
if ( (!"Lucene3x".equals(Codec.getDefault().getName())) && random.nextBoolean()) {
Codec.setDefault(_TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat(random.nextBoolean())));
}
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
final int SIZE = atLeast(20);
int id = 0;
IndexReader r = null;
final int numUpdates = (int) (SIZE * (2+(TEST_NIGHTLY ? 200*random.nextDouble() : 5*random.nextDouble())));
if (VERBOSE) {
System.out.println("TEST: numUpdates=" + numUpdates);
}
for(int docIter=0;docIter<numUpdates;docIter++) {
final Document doc = docs.nextDoc();
final String myID = ""+id;
if (id == SIZE-1) {
id = 0;
} else {
id++;
}
((Field) doc.getField("docid")).setValue(myID);
w.updateDocument(new Term("docid", myID), doc);
if (docIter >= SIZE && random.nextInt(50) == 17) {
if (r != null) {
r.close();
}
final boolean applyDeletions = random.nextBoolean();
r = w.getReader(applyDeletions);
assertTrue("applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE, !applyDeletions || r.numDocs() == SIZE);
}
}
if (r != null) {
r.close();
}
w.commit();
assertEquals(SIZE, w.numDocs());
w.close();
docs.close();
_TestUtil.checkIndex(dir);
dir.close();
}
| public void testRollingUpdates() throws Exception {
final MockDirectoryWrapper dir = newDirectory();
dir.setCheckIndexOnClose(false); // we use a custom codec provider
final LineFileDocs docs = new LineFileDocs(random);
//provider.register(new MemoryCodec());
if ( (!"Lucene3x".equals(Codec.getDefault().getName())) && random.nextBoolean()) {
Codec.setDefault(_TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat(random.nextBoolean())));
}
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
final int SIZE = atLeast(20);
int id = 0;
IndexReader r = null;
final int numUpdates = (int) (SIZE * (2+(TEST_NIGHTLY ? 200*random.nextDouble() : 5*random.nextDouble())));
if (VERBOSE) {
System.out.println("TEST: numUpdates=" + numUpdates);
}
for(int docIter=0;docIter<numUpdates;docIter++) {
final Document doc = docs.nextDoc();
final String myID = ""+id;
if (id == SIZE-1) {
id = 0;
} else {
id++;
}
((Field) doc.getField("docid")).setStringValue(myID);
w.updateDocument(new Term("docid", myID), doc);
if (docIter >= SIZE && random.nextInt(50) == 17) {
if (r != null) {
r.close();
}
final boolean applyDeletions = random.nextBoolean();
r = w.getReader(applyDeletions);
assertTrue("applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE, !applyDeletions || r.numDocs() == SIZE);
}
}
if (r != null) {
r.close();
}
w.commit();
assertEquals(SIZE, w.numDocs());
w.close();
docs.close();
_TestUtil.checkIndex(dir);
dir.close();
}
|
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setUseCompoundFile(false);
final int docCount = atLeast(200);
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setTokenized(false);
Field idField = newField("id", "", customType);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
FieldType customType2 = new FieldType();
customType2.setStored(true);
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
if (VERBOSE) {
System.out.println("TEST: add doc id=" + id);
}
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, customType2));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc id=" + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
int num = atLeast(1000);
for(int iter=0;iter<num;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
if (VERBOSE) {
System.out.println("TEST: test id=" + testID);
}
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.forceMerge(1);
}
}
w.close();
dir.close();
}
| public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setUseCompoundFile(false);
final int docCount = atLeast(200);
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setTokenized(false);
Field idField = newField("id", "", customType);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
FieldType customType2 = new FieldType();
customType2.setStored(true);
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setStringValue(id);
docs.put(id, doc);
if (VERBOSE) {
System.out.println("TEST: add doc id=" + id);
}
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, customType2));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc id=" + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
int num = atLeast(1000);
for(int iter=0;iter<num;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
if (VERBOSE) {
System.out.println("TEST: test id=" + testID);
}
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.forceMerge(1);
}
}
w.close();
dir.close();
}
|
End of preview. Expand
in Dataset Viewer.
YAML Metadata
Warning:
empty or missing yaml metadata in repo card
(https://huggingface.co/docs/hub/datasets-cards)
This is the Retrieval dataset used in the paper "ReAPR: Automatic Program Repair via Retrieval-Augmented Large Language Models"
- Downloads last month
- 6