public class TableRecordReader extends org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.BytesWritable,Tuple>
Constructor and Description |
---|
TableRecordReader(org.apache.hadoop.zebra.mapreduce.TableExpr expr,
java.lang.String projection,
org.apache.hadoop.mapreduce.InputSplit split,
org.apache.hadoop.mapreduce.JobContext jobContext) |
Modifier and Type | Method and Description |
---|---|
boolean |
atEnd()
Check if the end of the input has been reached
|
void |
close() |
org.apache.hadoop.io.BytesWritable |
getCurrentKey() |
Tuple |
getCurrentValue() |
long |
getPos() |
float |
getProgress() |
void |
initialize(org.apache.hadoop.mapreduce.InputSplit arg0,
org.apache.hadoop.mapreduce.TaskAttemptContext arg1) |
boolean |
nextKeyValue() |
boolean |
seekTo(org.apache.hadoop.io.BytesWritable key)
Seek to the position at the first row which has the key
or just after the key; only applicable for sorted Zebra table
|
public TableRecordReader(org.apache.hadoop.zebra.mapreduce.TableExpr expr, java.lang.String projection, org.apache.hadoop.mapreduce.InputSplit split, org.apache.hadoop.mapreduce.JobContext jobContext) throws java.io.IOException, org.apache.hadoop.zebra.parser.ParseException
expr
- Table expressionprojection
- projection schema. Should never be null.split
- the split to work onjobContext
- JobContext objectjava.io.IOException
org.apache.hadoop.zebra.parser.ParseException
public void close() throws java.io.IOException
close
in interface java.io.Closeable
close
in interface java.lang.AutoCloseable
close
in class org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.BytesWritable,Tuple>
java.io.IOException
public long getPos() throws java.io.IOException
java.io.IOException
public float getProgress() throws java.io.IOException
getProgress
in class org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.BytesWritable,Tuple>
java.io.IOException
public boolean seekTo(org.apache.hadoop.io.BytesWritable key) throws java.io.IOException
key
- the key to seek onjava.io.IOException
public boolean atEnd() throws java.io.IOException
java.io.IOException
public org.apache.hadoop.io.BytesWritable getCurrentKey() throws java.io.IOException, java.lang.InterruptedException
getCurrentKey
in class org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.BytesWritable,Tuple>
java.io.IOException
java.lang.InterruptedException
public Tuple getCurrentValue() throws java.io.IOException, java.lang.InterruptedException
getCurrentValue
in class org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.BytesWritable,Tuple>
java.io.IOException
java.lang.InterruptedException
public void initialize(org.apache.hadoop.mapreduce.InputSplit arg0, org.apache.hadoop.mapreduce.TaskAttemptContext arg1) throws java.io.IOException, java.lang.InterruptedException
initialize
in class org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.BytesWritable,Tuple>
java.io.IOException
java.lang.InterruptedException
public boolean nextKeyValue() throws java.io.IOException, java.lang.InterruptedException
nextKeyValue
in class org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.BytesWritable,Tuple>
java.io.IOException
java.lang.InterruptedException
Copyright © 2007-2012 The Apache Software Foundation