Use Memcache in Dataflow: NullPointerException at NamespaceManager.get - google-app-engine

I am trying to access GAE Memcache and Datastore APIs from Dataflow.
I have followed How to use memcache in dataflow? and setup Remote API https://cloud.google.com/appengine/docs/java/tools/remoteapi
In my pipeline I have written
public static void main(String[] args) throws IOException {
RemoteApiOptions remApiOpts = new RemoteApiOptions()
.server("xxx.appspot.com", 443)
.useApplicationDefaultCredential();
RemoteApiInstaller installer = new RemoteApiInstaller();
installer.install(remApiOpts);
try {
DatastoreConfigManager2.registerConfig("myconfig");
final String topic = DatastoreConfigManager2.getString("pubsub.topic");
final String stagingDir = DatastoreConfigManager2.getString("dataflow.staging");
...
bqRows.apply(BigQueryIO.Write
.named("Insert row")
.to(new SerializableFunction<BoundedWindow, String>() {
#Override
public String apply(BoundedWindow window) {
// The cast below is safe because CalendarWindows.days(1) produces IntervalWindows.
IntervalWindow day = (IntervalWindow) window;
String dataset = DatastoreConfigManager2.getString("dataflow.bigquery.dataset");
String tablePrefix = DatastoreConfigManager2.getString("dataflow.bigquery.tablenametemplate");
String dayString = DateTimeFormat.forPattern("yyyyMMdd")
.print(day.start());
String tableName = dataset + "." + tablePrefix + dayString;
LOG.info("Writing to BigQuery " + tableName);
return tableName;
}
})
where DatastoreConfigManager2 is
public class DatastoreConfigManager2 {
private static final DatastoreService DATASTORE = DatastoreServiceFactory.getDatastoreService();
private static final MemcacheService MEMCACHE = MemcacheServiceFactory.getMemcacheService();
static {
MEMCACHE.setErrorHandler(ErrorHandlers.getConsistentLogAndContinue(Level.INFO));
}
private static Set<String> configs = Sets.newConcurrentHashSet();
public static void registerConfig(String name) {
configs.add(name);
}
private static class DatastoreCallbacks {
// https://cloud.google.com/appengine/docs/java/datastore/callbacks
#PostPut
public void updateCacheOnPut(PutContext context) {
Entity entity = context.getCurrentElement();
if (configs.contains(entity.getKind())) {
String id = (String) entity.getProperty("id");
String value = (String) entity.getProperty("value");
MEMCACHE.put(id, value);
}
}
}
private static String lookup(String id) {
String value = (String) MEMCACHE.get(id);
if (value != null) return value;
else {
for (String config : configs) {
try {
PreparedQuery pq = DATASTORE.prepare(new Query(config)
.setFilter(new FilterPredicate("id", FilterOperator.EQUAL, id)));
for (Entity entity : pq.asIterable()) {
value = (String) entity.getProperty("value"); // use last
}
if (value != null) MEMCACHE.put(id, value);
} catch (Exception e) {
e.printStackTrace();
}
}
}
return value;
}
public static String getString(String id) {
return lookup(id);
}
}
When my pipeline runs on Dataflow I get the exception
Caused by: java.lang.NullPointerException
at com.google.appengine.api.NamespaceManager.get(NamespaceManager.java:101)
at com.google.appengine.api.memcache.BaseMemcacheServiceImpl.getEffectiveNamespace(BaseMemcacheServiceImpl.java:65)
at com.google.appengine.api.memcache.AsyncMemcacheServiceImpl.doGet(AsyncMemcacheServiceImpl.java:401)
at com.google.appengine.api.memcache.AsyncMemcacheServiceImpl.get(AsyncMemcacheServiceImpl.java:412)
at com.google.appengine.api.memcache.MemcacheServiceImpl.get(MemcacheServiceImpl.java:49)
at my.training.google.common.config.DatastoreConfigManager2.lookup(DatastoreConfigManager2.java:80)
at my.training.google.common.config.DatastoreConfigManager2.getString(DatastoreConfigManager2.java:117)
at my.training.google.mss.pipeline.InsertIntoBqWithCalendarWindow$1.apply(InsertIntoBqWithCalendarWindow.java:101)
at my.training.google.mss.pipeline.InsertIntoBqWithCalendarWindow$1.apply(InsertIntoBqWithCalendarWindow.java:95)
at com.google.cloud.dataflow.sdk.io.BigQueryIO$Write$Bound$TranslateTableSpecFunction.apply(BigQueryIO.java:1496)
at com.google.cloud.dataflow.sdk.io.BigQueryIO$Write$Bound$TranslateTableSpecFunction.apply(BigQueryIO.java:1486)
at com.google.cloud.dataflow.sdk.io.BigQueryIO$TagWithUniqueIdsAndTable.tableSpecFromWindow(BigQueryIO.java:2641)
at com.google.cloud.dataflow.sdk.io.BigQueryIO$TagWithUniqueIdsAndTable.processElement(BigQueryIO.java:2618)
Any suggestions? Thanks in advance.
EDIT: my functional requirement is building a pipeline with some configurable steps based on datastore entries.

Related

Flink AggregateFunction find sum by multiple keys( validation process and testing)

I am using Apache flink on Kinesis Data Analytics.
Flink Version : 1.13.2
Jave : 1.11
I am consuming json messages from kafka. Sample Input records look as below
null {"plateNumber":"506b9910-74a7-4c3e-a885-b5e9717efe3a","vignetteStickerId":"9e69df3f-d728-4fc8-9b09-42104588f772","currentTimestamp":"2022/04/07 16:19:55","timestamp":1649362795.444459000,"vehicleType":"TRUCK","vehicleModelType":"TOYOTA"}
null {"plateNumber":"5ffe0326-571e-4b97-8f7b-4f49aebb6993","vignetteStickerId":"6c2e1342-b096-4cc9-a92c-df61571c2c7d","currentTimestamp":"2022/04/07 16:20:00","timestamp":1649362800.638060000,"vehicleType":"CAR","vehicleModelType":"HONDA"}
null {"plateNumber":"d15f49f9-5550-4780-b260-83f3116ba64a","vignetteStickerId":"1366fbfe-7d0a-475f-9249-261ef1dd6de2","currentTimestamp":"2022/04/07 16:20:05","timestamp":1649362805.643749000,"vehicleType":"TRUCK","vehicleModelType":"TOYOTA"}
null {"plateNumber":"803508fb-9701-438e-9028-01bb8d96a804","vignetteStickerId":"b534369f-533e-4c15-ac3f-fc28cf0f3aba","currentTimestamp":"2022/04/07 16:20:10","timestamp":1649362810.648813000,"vehicleType":"CAR","vehicleModelType":"FORD"}
I want to aggregate sum these records into 20 seconds window using vehicleType (CAR OR TRUCK) and vehicleModelType (TOYOTA,HONDA or FORD) . SQL Analogy (sum() ,Group by vehicleType, vehicleModelType)
I am using Aggregate function to achieve this.
import static java.util.Objects.isNull;
import org.apache.flink.api.common.functions.AggregateFunction;
import org.springframework.stereotype.Component;
import com.helecloud.streams.demo.model.Vehicle;
import com.helecloud.streams.demo.model.VehicleStatistics;
#Component
public class VehicleStatisticsAggregator implements AggregateFunction<Vehicle, VehicleStatistics, VehicleStatistics> {
/**
*
*/
private static final long serialVersionUID = 1L;
#Override
public VehicleStatistics createAccumulator() {
System.out.println("Creating Accumulator!!");
return new VehicleStatistics();
}
#Override
public VehicleStatistics add(Vehicle vehicle, VehicleStatistics vehicleStatistics) {
System.out.println("vehicle in add method : " + vehicle);
if (isNull(vehicleStatistics.getVehicleType())) {
vehicleStatistics.setVehicleType(vehicle.getVehicleType());
}
if (isNull(vehicleStatistics.getVehicleModelType())) {
vehicleStatistics.setVehicleModelType(vehicle.getVehicleModelType());
}
// if(isNull(vehicleStatistics.getStart())) {
//
// vehicleStatistics.setStart(vehicle.getTimestamp());
// }
// if(isNull(vehicleStatistics.getCurrentTimestamp())) {
//
// vehicleStatistics.setCurrentTimestamp(vehicle.getCurrentTimestamp());
// }
if (isNull(vehicleStatistics.getCount())) {
vehicleStatistics.setCount(1);
} else {
System.out.println("incrementing count for : vehicleStatistics : " + vehicleStatistics);
vehicleStatistics.setCount(vehicleStatistics.getCount() + 1);
}
vehicleStatistics.setEnd(vehicle.getTimestamp());
System.out.println("vehicleStatistics in add : " + vehicleStatistics);
return vehicleStatistics;
}
#Override
public VehicleStatistics getResult(VehicleStatistics vehicleStatistics) {
System.out.println("vehicleStatistics in getResult : " + vehicleStatistics);
return vehicleStatistics;
}
#Override
public VehicleStatistics merge(VehicleStatistics vehicleStatistics, VehicleStatistics accumulator) {
System.out.println("Coming to merge!!");
VehicleStatistics vs = new VehicleStatistics(
// vehicleStatistics.getStart(),
accumulator.getEnd(),
// vehicleStatistics.getCurrentTimestamp(),
vehicleStatistics.getVehicleType(), vehicleStatistics.getVehicleModelType(),
vehicleStatistics.getCount() + accumulator.getCount());
System.out.println("VehicleStatistics in Merge :" + vs);
return vs;
}
}
In the above code I am also not seeing the merge code being called.
Below is the main processing code
#Service
public class ProcessingService {
#Value("${kafka.bootstrap-servers}")
private String kafkaAddress;
#Value("${kafka.group-id}")
private String kafkaGroupId;
public static final String TOPIC = "flink_input";
public static final String VEHICLE_STATISTICS_TOPIC = "flink_output";
#Autowired
private VehicleDeserializationSchema vehicleDeserializationSchema;
#Autowired
private VehicleStatisticsSerializationSchema vehicleStatisticsSerializationSchema;
#PostConstruct
public void startFlinkStreamProcessing() {
try {
processVehicleStatistic();
} catch (Exception e) {
// log.error("Cannot process", e);
e.printStackTrace();
}
}
public void processVehicleStatistic() {
try {
StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
FlinkKafkaConsumer<Vehicle> consumer = createVehicleConsumerForTopic(TOPIC, kafkaAddress, kafkaGroupId);
consumer.setStartFromLatest();
System.out.println("Starting to consume!!");
consumer.assignTimestampsAndWatermarks(WatermarkStrategy.forMonotonousTimestamps());
FlinkKafkaProducer<VehicleStatistics> producer = createVehicleStatisticsProducer(VEHICLE_STATISTICS_TOPIC, kafkaAddress);
DataStream<Vehicle> inputMessagesStream = environment.addSource(consumer);
inputMessagesStream
.keyBy((vehicle -> vehicle.getVehicleType().ordinal()))
// .keyBy(vehicle -> vehicle.getVehicleModelType().ordinal())
// .keyBy(new KeySelector<Vehicle, Tuple2<VehicleType, VehicleModelType>>() {
//
// /**
// *
// */
// private static final long serialVersionUID = 1L;
//
// #Override
// public Tuple2<VehicleType, VehicleModelType> getKey(Vehicle vehicle) throws Exception {
// return Tuple2.of(vehicle.getVehicleType(), vehicle.getVehicleModelType());
// }
// })
// .filter(v -> CAR.equals(v.getVehicleType()))
.window(TumblingEventTimeWindows.of(Time.seconds(20)))
// .windowAll(TumblingEventTimeWindows.of(Time.seconds(10)))
.aggregate(new VehicleStatisticsAggregator())
.addSink(producer);
System.out.println("Adding to Sink!!");
environment.execute("Car Truck Counts By Model");
} catch(Exception e) {
e.printStackTrace();;
}
}
private FlinkKafkaConsumer<Vehicle> createVehicleConsumerForTopic(String topic, String kafkaAddress, String kafkaGroup ) {
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", kafkaAddress);
properties.setProperty("group.id", kafkaGroup);
return new FlinkKafkaConsumer<>(topic, vehicleDeserializationSchema, properties);
}
private FlinkKafkaProducer<VehicleStatistics> createVehicleStatisticsProducer(String topic, String kafkaAddress){
return new FlinkKafkaProducer<>(kafkaAddress, topic, vehicleStatisticsSerializationSchema);
}
}
I am getting the result as below.
null {"end":1649362835.665466000,"vehicleType":"TRUCK","vehicleModelType":"HONDA","count":3}
null {"end":1649362825.656024000,"vehicleType":"CAR","vehicleModelType":"TOYOTA","count":1}
null {"end":1649362850.675786000,"vehicleType":"CAR","vehicleModelType":"TOYOTA","count":3}
null {"end":1649362855.677596000,"vehicleType":"TRUCK","vehicleModelType":"TOYOTA","count":1}
But is there a way to validate this ?
Also other question is I am trying to aggregate the result based on multiple keys is AggregateFunction the correct way to do this.
I am asking this as I saw this How can I sum multiple fields in Flink?
So If I have to aggregate sum on multiple fields can aggregate function accomplish the same ?(the way I wrote the code)
Kindly let me know. Thanks in advance.
Merge will only be called if you are using windows that merge -- in other words, if you are using session windows, or a custom merging window.
The correct way to aggregate based on multiple keys is to use keyBy with a composite type, such as Tuple2<VehicleType, VehicleModelType>>. Each time you call keyBy the stream is repartitioned from scratch (and not in addition to any previous partitioning).

Apache Flink: Job throws stack overflow error

I'm trying to execute this simple job in Apache Flink.
public class StreamingJob {
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
Properties inputProperties = new Properties();
ObjectMapper mapper = new ObjectMapper();
DataStream<String> eventStream = env
.addSource(new FileSourceFunction("/path/to/file"));
DataStream<ObjectNode> eventStreamObject = eventStream
.map(x -> mapper.readValue(x, ObjectNode.class));
DataStream<ObjectNode> eventStreamWithTime = eventStreamObject
.assignTimestampsAndWatermarks(new AscendingTimestampExtractor<ObjectNode>() {
#Override
public long extractAscendingTimestamp(ObjectNode element) {
String data = element.get("ts").asText();
if(data.endsWith("Z")) {
data = data.substring(0, data.length() -1);
}
return LocalDateTime.parse(data).toEpochSecond(ZoneOffset.UTC);
}
});
eventStreamObject.print();
env.execute("Local job");
}
}
FileSourceFunction is a custom SourceFunction
public class FileSourceFunction implements SourceFunction<String> {
/**
*
*/
private static final long serialVersionUID = 1L;
private String fileName;
private volatile boolean isRunning = true;
public FileSourceFunction(String fileName) {
this.fileName = fileName;
}
#Override
public void run(SourceContext<String> ctx) throws Exception {
// TODO Auto-generated method stub
try (BufferedReader br = new BufferedReader(new FileReader(fileName))) {
try (Stream<String> stream = br.lines()) {
Iterator<String> it = stream.iterator();
while (isRunning && it.hasNext()) {
synchronized (ctx.getCheckpointLock()) {
ctx.collect(it.next());
}
}
}
}
}
#Override
public void cancel() {
isRunning = false;
}
}
When I run the job it throws an StackOverFlowError. I'm using apache Flink 1.8.1.

Test Actors in Play Framework but Database is shutdown

I am using Play 2.0.4 and I'm doing a test unit for actors who make use of the database.
The test begins well, but then at a given moment the connection with the database is closed and the actor who is running fails.
Code:
public class ActorTest extends Helpers {
private FakeApplication app;
private ActorSystem actorSystem;
private ActorRef actorRef;
private BankAccount account;
#Before
public void initTest() {
Map<String, String> params = new HashMap<String, String>();
params.put("db.default.driver", "com.mysql.jdbc.Driver");
params.put("db.default.url", "mysql://root:XXXX#localhost/YYY");
params.put("ebean.default", "models.*");
app = fakeApplication(params);
actorSystem = play.api.libs.concurrent.Akka.system(app.getWrappedApplication());
}
#Test
public void updateAccountTransaction() {
running(app, new Runnable() {
#Override
public void run() {
account = BankAccount.find.byId(new Long(1));
actorRef = actorSystem.actorOf(new Props(new UntypedActorFactory() {
#Override
public UntypedActor create() {
return new AccountTaskActor(account);
}
}));
Calendar fromDate = Calendar.getInstance();
....
....
Calendar toDate = Calendar.getInstance();
final InputRangeDateMessage param = new InputRangeDateMessage(fromDate, toDate);
junit.framework.Assert.assertNotNull(account);
Future<Object> future = Patterns.ask(actorRef, param, 1000000);
Promise<Object> sdf = Akka.asPromise(future);
Promise<Result> r2 = sdf.map(new Function<Object, Result>() {
#Override
public Result apply(Object response) throws Throwable {
if (response instanceof ErrorMessage) {
ErrorMessage e = (ErrorMessage) response;
System.out.println("Error Message " + e.getErrorText());
junit.framework.Assert.assertEquals(e.getErrorCode(), -1);
} else if (response instanceof BankAccountMessage) {
BankAccount a = ((BankAccountMessage) response).getAccount();
System.out.println("BankAccount " + a.accountsLastUpdate);
}
return ok();
}
});
Result test2;
test2 = async(r2);
}
});
}
}
AFAIK, you have to wait for the end of your Promise:
...
Result test2 = r2.get();

Increasing heap by excessive use oft Java ScriptEngine (Jyhton)

We have a JavaEE application that uses jython to execute some python scripts. By and by the used heapspace gets bigger and bigger until there is no more heapspace left. In a heapdump i can se that there are a lot of Py*-classes.
So i wrote a small test-program:
TestApp
public class TestApp {
private final ScriptEngineManager scriptEngineManager = new ScriptEngineManager();
private HashMap<String, ScriptEngine> scriptEngines = new HashMap<String, ScriptEngine>();
private final String scriptContainerPath = "";
public static void main(String[] args) throws InterruptedException {
int counter = 1;
while(true) {
System.out.println("iteration: " + counter);
TestApp testApp = new TestApp();
testApp.execute();
counter++;
Thread.sleep(100);
}
}
void execute() {
File scriptContainer = new File(scriptContainerPath);
File[] scripts = scriptContainer.listFiles();
if (scripts != null && scripts.length > 0) {
Arrays.sort(scripts, new Comparator<File>() {
#Override
public int compare(File file1, File file2) {
return file1.getName().compareTo(file2.getName());
}
});
for (File script : scripts) {
String engineName = ScriptExecutor.getEngineNameByExtension(script.getName());
if(!scriptEngines.containsKey(engineName)) {
scriptEngines.put(engineName, scriptEngineManager.getEngineByName(engineName));
}
ScriptEngine scriptEngine = scriptEngines.get(engineName);
try {
ScriptExecutor scriptExecutor = new ScriptExecutor(scriptEngine, script, null);
Boolean disqualify = scriptExecutor.getBooleanScriptValue("disqualify");
String reason = scriptExecutor.getStringScriptValue("reason");
System.out.println("disqualify: " + disqualify);
System.out.println("reason: " + reason);
} catch (Exception e) {
e.printStackTrace();
}
}
// cleanup
for(Map.Entry<String, ScriptEngine> entry : scriptEngines.entrySet()) {
ScriptEngine engine = entry.getValue();
engine.getContext().setErrorWriter(null);
engine.getContext().setReader(null);
engine.getContext().setWriter(null);
}
}
}
}
ScriptExecutor
public class ScriptExecutor {
private final static String pythonExtension = "py";
private final static String pythonEngine = "python";
private final ScriptEngine scriptEngine;
public ScriptExecutor(ScriptEngine se, File file, Map<String, Object> keyValues) throws FileNotFoundException, ScriptException {
scriptEngine = se;
if (keyValues != null) {
for (Map.Entry<String, Object> entry : keyValues.entrySet()) {
scriptEngine.put(entry.getKey(), entry.getValue());
}
}
// execute script
Reader reader = null;
try {
reader = new FileReader(file);
scriptEngine.eval(reader);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
// nothing to do
}
}
}
}
public Boolean getBooleanScriptValue(String key) {
// convert Object to Boolean
}
public String getStringScriptValue(String key) {
// convert Object to String
}
public static String getEngineNameByExtension(String fileName) {
String extension = fileName.substring(fileName.lastIndexOf(".") + 1);
if (pythonExtension.equalsIgnoreCase(extension)) {
System.out.println("Found engine " + pythonEngine + " for extension " + extension + ".");
return pythonEngine;
}
throw new RuntimeException("No suitable engine found for extension " + extension);
}
}
In the specified directory are 14 python scripts that all look like this:
disqualify = True
reason = "reason"
I start this program with the following VM-arguments:
-Xrs -Xms16M -Xmx16M -XX:MaxPermSize=32M -XX:NewRatio=3 -Dsun.rmi.dgc.client.gcInterval=300000 -Dsun.rmi.dgc.server.gcInterval=300000 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -server
These are the arguments our AppServer is running with. Only Xms, Xmx and MaxPermSize are smaller in my testcase.
When I run this application I can see that the CMS Old Gen pool increases to its max size. After that the Par Eden Space pool increases. In addition at any time the ParNewGC does not run anymore. The cleanup part improved the situation but didn't resolve the problem. Has anybody an idea why my heap isn't completly cleaned?
I think I have found a solution for my problem: I removed the JSR223 stuff und now use the PythonInterpreter directly.

FileInputFormat where filename is KEY and text contents are VALUE

I'd like to use an entire file as a single record for MAP processing, with the filename as the key.
I've read the following post: How to get Filename/File Contents as key/value input for MAP when running a Hadoop MapReduce Job?
and while the theory of the top answer is solid, no code or "how-to" is actually provided.
Here is my custom FileInputFormat and the corresponding RecordReader, which compile, yet do not produce ANY record data.
Thanks for any help.
public class CommentsInput
extends FileInputFormat<Text,Text> {
protected boolean isSplitable(FileSystem fs, Path filename)
{
return false;
}
#Override
public RecordReader<Text, Text> createRecordReader(InputSplit split, TaskAttemptContext ctx)
throws IOException, InterruptedException {
return new CommentFileRecordReader((FileSplit) split, ctx.getConfiguration());
}
/////////////////////////
public class CommentFileRecordReader
extends RecordReader<Text,Text> {
private InputStream in;
private long start;
private long length;
private long position;
private Text key;
private Text value;
private boolean processed;
private FileSplit fileSplit;
private Configuration conf;
public CommentFileRecordReader(FileSplit fileSplit, Configuration conf) throws IOException
{
this.fileSplit = fileSplit;
this.conf=conf;
}
/** Boilerplate initialization code for file input streams. */
#Override
public void initialize(InputSplit split,
TaskAttemptContext context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
fileSplit = (FileSplit) split;
this.start = fileSplit.getStart();
this.length = fileSplit.getLength();
this.position = 0;
this.processed = false;
Path path = fileSplit.getPath();
FileSystem fs = path.getFileSystem(conf);
FSDataInputStream in = fs.open(path);
CompressionCodecFactory codecs = new CompressionCodecFactory(conf);
CompressionCodec codec = codecs.getCodec(path);
if (codec != null)
this.in = codec.createInputStream(in);
else
this.in = in;
// If using Writables:
// key = new Text();
// value = new Text();
}
public boolean next(Text key, Text value) throws IOException
{
if(!processed)
{
key = new Text(fileSplit.getPath().toString());
Path file = fileSplit.getPath();
FileSystem fs = file.getFileSystem(conf);
FSDataInputStream in = null;
byte[] contents = new byte[(int) fileSplit.getLength()];
try
{
in = fs.open(file);
IOUtils.readFully(in, contents, 0, contents.length);
value.set(contents.toString());
}
finally
{
IOUtils.closeStream(in);
}
processed = true;
return true;
}
return false;
}
#Override
public boolean nextKeyValue() throws IOException {
// TODO parse the next key value, update position and return true.
return false;
}
#Override
public Text getCurrentKey() {
return key;
}
#Override
public Text getCurrentValue() {
return value;
}
/** Returns our progress within the split, as a float between 0 and 1. */
#Override
public float getProgress() {
if (length == 0)
return 0.0f;
return Math.min(1.0f, position / (float)length);
}
#Override
public void close() throws IOException {
if (in != null)
in.close();
}
}
You need to find a way to define your own key class and make sure your classes use it. You can look up how to define your own key class and you can get a file name by calling hte getName() method on its path then use it to make your key.

Resources