I'd like to use an entire file as a single record for MAP processing, with the filename as the key.
I've read the following post: How to get Filename/File Contents as key/value input for MAP when running a Hadoop MapReduce Job?
and while the theory of the top answer is solid, no code or "how-to" is actually provided.
Here is my custom FileInputFormat and the corresponding RecordReader, which compile, yet do not produce ANY record data.
Thanks for any help.
public class CommentsInput
extends FileInputFormat<Text,Text> {
protected boolean isSplitable(FileSystem fs, Path filename)
{
return false;
}
#Override
public RecordReader<Text, Text> createRecordReader(InputSplit split, TaskAttemptContext ctx)
throws IOException, InterruptedException {
return new CommentFileRecordReader((FileSplit) split, ctx.getConfiguration());
}
/////////////////////////
public class CommentFileRecordReader
extends RecordReader<Text,Text> {
private InputStream in;
private long start;
private long length;
private long position;
private Text key;
private Text value;
private boolean processed;
private FileSplit fileSplit;
private Configuration conf;
public CommentFileRecordReader(FileSplit fileSplit, Configuration conf) throws IOException
{
this.fileSplit = fileSplit;
this.conf=conf;
}
/** Boilerplate initialization code for file input streams. */
#Override
public void initialize(InputSplit split,
TaskAttemptContext context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
fileSplit = (FileSplit) split;
this.start = fileSplit.getStart();
this.length = fileSplit.getLength();
this.position = 0;
this.processed = false;
Path path = fileSplit.getPath();
FileSystem fs = path.getFileSystem(conf);
FSDataInputStream in = fs.open(path);
CompressionCodecFactory codecs = new CompressionCodecFactory(conf);
CompressionCodec codec = codecs.getCodec(path);
if (codec != null)
this.in = codec.createInputStream(in);
else
this.in = in;
// If using Writables:
// key = new Text();
// value = new Text();
}
public boolean next(Text key, Text value) throws IOException
{
if(!processed)
{
key = new Text(fileSplit.getPath().toString());
Path file = fileSplit.getPath();
FileSystem fs = file.getFileSystem(conf);
FSDataInputStream in = null;
byte[] contents = new byte[(int) fileSplit.getLength()];
try
{
in = fs.open(file);
IOUtils.readFully(in, contents, 0, contents.length);
value.set(contents.toString());
}
finally
{
IOUtils.closeStream(in);
}
processed = true;
return true;
}
return false;
}
#Override
public boolean nextKeyValue() throws IOException {
// TODO parse the next key value, update position and return true.
return false;
}
#Override
public Text getCurrentKey() {
return key;
}
#Override
public Text getCurrentValue() {
return value;
}
/** Returns our progress within the split, as a float between 0 and 1. */
#Override
public float getProgress() {
if (length == 0)
return 0.0f;
return Math.min(1.0f, position / (float)length);
}
#Override
public void close() throws IOException {
if (in != null)
in.close();
}
}
You need to find a way to define your own key class and make sure your classes use it. You can look up how to define your own key class and you can get a file name by calling hte getName() method on its path then use it to make your key.
Related
I am trying to put and read file from the remote file system using winSCP through an SFTP connection. The leaf node of the file system is s3 object store which contain the files (for eg: xyz.txt).
Below is the overridden method of File Channel class.
XYZFileSystemProvider
public class XYZFileSystemProvider extends FileSystemProvider {
#Override
public FileChannel newFileChannel(Path path, Set<? extends OpenOption> options, FileAttribute<?>... attrs)
throws IOException {
// TODO Auto-generated method stub
Collection<XYZOptions.OpenMode> modes = XYZOptions.OpenMode.fromOpenOptions(options);
if (modes.isEmpty()) {
modes = EnumSet.of(XYZOptions.OpenMode.Read, MFEOptions.OpenMode.Write);
}
// TODO: process file attributes
return new XYZFileSystemChannel(path, modes);
}
}
XYZFileSystemChannel
public class XYZFileSystemChannel extends XYZRemotePathChannel{
public XYZFileSystemChannel(XYZPath p, Collection<XYZOptions.OpenMode> modes) throws IOException {
this(Objects.requireNonNull(p, "No target path").toString(), p.getFileSystem(), modes);
}
public XYZFileSystemChannel(String remotePath, XYZFileSystem fs, Collection<XYZOptions.OpenMode> modes) throws IOException {
super(remotePath, fs, true, modes);
}
}
XYZRemotePathChannel
public class XYZRemotePathChannel extends FileChannel {
private AmazonS3Component getAmazonS3Instance() {
return SpringContext.getBean(AmazonS3Component.class);
}
private final String path;
private final Collection<XYZOptions.OpenMode> modes;
private final boolean closeOnExit;
private XYZFileSystem fileSystem;
private final AtomicLong posTracker = new AtomicLong(0L);
public static final Set<XYZOptions.OpenMode> READ_MODES =
Collections.unmodifiableSet(EnumSet.of(XYZOptions.OpenMode.Read));
private final Object lock = new Object();
private final AtomicReference<Thread> blockingThreadHolder = new AtomicReference<>(null);
public XYZRemotePathChannel(String path, XYZFileSystem fileSystem, boolean closeOnExit,
Collection<XYZOptions.OpenMode> modes) throws IOException {
this.path = ValidateUtils.checkNotNullAndNotEmpty(path, "No remote file path specified");
this.modes = Objects.requireNonNull(modes, "No channel modes specified");
this.closeOnExit = closeOnExit;
this.fileSystem = fileSystem;
}
#Override
public int read(ByteBuffer dst) throws IOException {
// TODO Auto-generated method stub
log.debug("Position of dst is : {}",dst.position());
log.debug("Reading the bytes of the file : {}", dst);
//Some code to be done here in order to read dst and send bytes of the file recieved from s3 store
return (int) doRead(Collections.singletonList(dst), -1);
}
protected long doRead(List<ByteBuffer> buffers, long position) throws IOException {
log.debug("Do Reading the bytes of the file of list of buffer : {} and position :{}", buffers , position);
ensureOpen(READ_MODES);
synchronized (lock) {
boolean completed = false;
boolean eof = false;
long curPos = (position >= 0L) ? position : posTracker.get();
byte[] bytes = new byte[(int) curPos];
try {
long totalRead = 0;
beginBlocking();
String [] parts = this.path.toString().replaceFirst("^/", "").split("/");
String bucket = parts[parts.length-2];
String fileName = parts[parts.length-1];
InputStream fileContent = getAmazonS3Instance().getFileFromBucket(bucket, fileName);
log.debug("Contens of the file: {} from bucket: {} are : {}", fileName , bucket, fileContent);
//Some code to be done here to return the content byte length??
int fileLenght = fileContent.read(bytes, 1, (int) curPos);
log.debug("After reading the file content the file length is : {}" , fileLenght );
return fileLenght;
} finally {
if (position < 0L) {
posTracker.set(curPos);
}
endBlocking(completed);
}
}
}
private void endBlocking(boolean completed) throws AsynchronousCloseException {
blockingThreadHolder.set(null);
end(completed);
}
private void beginBlocking() {
begin();
blockingThreadHolder.set(Thread.currentThread());
}
#Override
public FileChannel position(long newPosition) throws IOException {
// TODO Auto-generated method stub
log.debug("Setting the position of the file : {}", newPosition);
if (newPosition < 0L) {
throw new IllegalArgumentException("position(" + this.path + ") illegal file channel position: " + newPosition);
}
ensureOpen(Collections.emptySet());
posTracker.set(newPosition);
return this;
}
private void ensureOpen(Collection<XYZOptions.OpenMode> reqModes) throws IOException {
if (!isOpen()) {
throw new ClosedChannelException();
}
if (GenericUtils.size(reqModes) > 0) {
for (XYZOptions.OpenMode m : reqModes) {
if (this.modes.contains(m)) {
return;
}
}
throw new IOException("ensureOpen(" + this.path + ") current channel modes (" + this.modes
+ ") do contain any of the required: " + reqModes);
}
}
}
XYZOptions
public class XYZOptions {
enum OpenMode {
Read, Write, Append, Create, Truncate, Exclusive;
public static final Set<OpenOption> SUPPORTED_OPTIONS = Collections
.unmodifiableSet(EnumSet.of(StandardOpenOption.READ, StandardOpenOption.APPEND,
StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE,
StandardOpenOption.CREATE_NEW, StandardOpenOption.SPARSE));
public static Set<OpenMode> fromOpenOptions(Collection<? extends OpenOption> options) {
if (GenericUtils.isEmpty(options)) {
return Collections.emptySet();
}
Set<OpenMode> modes = EnumSet.noneOf(OpenMode.class);
for (OpenOption option : options) {
if (option == StandardOpenOption.READ) {
modes.add(Read);
} else if (option == StandardOpenOption.APPEND) {
modes.add(Append);
} else if (option == StandardOpenOption.CREATE) {
modes.add(Create);
} else if (option == StandardOpenOption.TRUNCATE_EXISTING) {
modes.add(Truncate);
} else if (option == StandardOpenOption.WRITE) {
modes.add(Write);
} else if (option == StandardOpenOption.CREATE_NEW) {
modes.add(Create);
modes.add(Exclusive);
} else if (option == StandardOpenOption.SPARSE) {
continue;
} else {
throw new IllegalArgumentException("Unsupported open option: " + option);
}
}
return modes;
}
}
}
I am able to fetch the file from the s3 store but nor sure how to read and pass all the contents while someone drag and drop from remote file location to their own system using winSCP. I know i am missing some code at the mentioned place but not sure how to achieve it.
During ingest json data from kafka and save them as parquet files which be loaded into hive, I met the same issue mentioned in Flink BucketingSink with Custom AvroParquetWriter create empty file
. Does anyone know how to resolve it? Thank you. I used Apache Flink 1.4.0 + HDFS 2.7.3
You can directly implement the Writer interface. It could look the following way:
import org.apache.flink.util.Preconditions;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.avro.AvroParquetWriter;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import java.io.IOException;
/**
* Parquet writer.
*
* #param <T>
*/
public class ParquetSinkWriter<T extends GenericRecord> implements Writer<T> {
private static final long serialVersionUID = -975302556515811398L;
private final CompressionCodecName compressionCodecName = CompressionCodecName.SNAPPY;
private final int pageSize = 64 * 1024;
private final String schemaRepresentation;
private transient Schema schema;
private transient ParquetWriter<GenericRecord> writer;
private transient Path path;
private int position;
public ParquetSinkWriter(String schemaRepresentation) {
this.schemaRepresentation = Preconditions.checkNotNull(schemaRepresentation);
}
#Override
public void open(FileSystem fs, Path path) throws IOException {
this.position = 0;
this.path = path;
if (writer != null) {
writer.close();
}
writer = createWriter();
}
#Override
public long flush() throws IOException {
Preconditions.checkNotNull(writer);
position += writer.getDataSize();
writer.close();
writer = createWriter();
return position;
}
#Override
public long getPos() throws IOException {
Preconditions.checkNotNull(writer);
return position + writer.getDataSize();
}
#Override
public void close() throws IOException {
if (writer != null) {
writer.close();
writer = null;
}
}
#Override
public void write(T element) throws IOException {
Preconditions.checkNotNull(writer);
writer.write(element);
}
#Override
public Writer<T> duplicate() {
return new ParquetSinkWriter<>(schemaRepresentation);
}
private ParquetWriter<GenericRecord> createWriter() throws IOException {
if (schema == null) {
schema = new Schema.Parser().parse(schemaRepresentation);
}
return AvroParquetWriter.<GenericRecord>builder(path)
.withSchema(schema)
.withDataModel(new GenericData())
.withCompressionCodec(compressionCodecName)
.withPageSize(pageSize)
.build();
}
}
I am trying to access GAE Memcache and Datastore APIs from Dataflow.
I have followed How to use memcache in dataflow? and setup Remote API https://cloud.google.com/appengine/docs/java/tools/remoteapi
In my pipeline I have written
public static void main(String[] args) throws IOException {
RemoteApiOptions remApiOpts = new RemoteApiOptions()
.server("xxx.appspot.com", 443)
.useApplicationDefaultCredential();
RemoteApiInstaller installer = new RemoteApiInstaller();
installer.install(remApiOpts);
try {
DatastoreConfigManager2.registerConfig("myconfig");
final String topic = DatastoreConfigManager2.getString("pubsub.topic");
final String stagingDir = DatastoreConfigManager2.getString("dataflow.staging");
...
bqRows.apply(BigQueryIO.Write
.named("Insert row")
.to(new SerializableFunction<BoundedWindow, String>() {
#Override
public String apply(BoundedWindow window) {
// The cast below is safe because CalendarWindows.days(1) produces IntervalWindows.
IntervalWindow day = (IntervalWindow) window;
String dataset = DatastoreConfigManager2.getString("dataflow.bigquery.dataset");
String tablePrefix = DatastoreConfigManager2.getString("dataflow.bigquery.tablenametemplate");
String dayString = DateTimeFormat.forPattern("yyyyMMdd")
.print(day.start());
String tableName = dataset + "." + tablePrefix + dayString;
LOG.info("Writing to BigQuery " + tableName);
return tableName;
}
})
where DatastoreConfigManager2 is
public class DatastoreConfigManager2 {
private static final DatastoreService DATASTORE = DatastoreServiceFactory.getDatastoreService();
private static final MemcacheService MEMCACHE = MemcacheServiceFactory.getMemcacheService();
static {
MEMCACHE.setErrorHandler(ErrorHandlers.getConsistentLogAndContinue(Level.INFO));
}
private static Set<String> configs = Sets.newConcurrentHashSet();
public static void registerConfig(String name) {
configs.add(name);
}
private static class DatastoreCallbacks {
// https://cloud.google.com/appengine/docs/java/datastore/callbacks
#PostPut
public void updateCacheOnPut(PutContext context) {
Entity entity = context.getCurrentElement();
if (configs.contains(entity.getKind())) {
String id = (String) entity.getProperty("id");
String value = (String) entity.getProperty("value");
MEMCACHE.put(id, value);
}
}
}
private static String lookup(String id) {
String value = (String) MEMCACHE.get(id);
if (value != null) return value;
else {
for (String config : configs) {
try {
PreparedQuery pq = DATASTORE.prepare(new Query(config)
.setFilter(new FilterPredicate("id", FilterOperator.EQUAL, id)));
for (Entity entity : pq.asIterable()) {
value = (String) entity.getProperty("value"); // use last
}
if (value != null) MEMCACHE.put(id, value);
} catch (Exception e) {
e.printStackTrace();
}
}
}
return value;
}
public static String getString(String id) {
return lookup(id);
}
}
When my pipeline runs on Dataflow I get the exception
Caused by: java.lang.NullPointerException
at com.google.appengine.api.NamespaceManager.get(NamespaceManager.java:101)
at com.google.appengine.api.memcache.BaseMemcacheServiceImpl.getEffectiveNamespace(BaseMemcacheServiceImpl.java:65)
at com.google.appengine.api.memcache.AsyncMemcacheServiceImpl.doGet(AsyncMemcacheServiceImpl.java:401)
at com.google.appengine.api.memcache.AsyncMemcacheServiceImpl.get(AsyncMemcacheServiceImpl.java:412)
at com.google.appengine.api.memcache.MemcacheServiceImpl.get(MemcacheServiceImpl.java:49)
at my.training.google.common.config.DatastoreConfigManager2.lookup(DatastoreConfigManager2.java:80)
at my.training.google.common.config.DatastoreConfigManager2.getString(DatastoreConfigManager2.java:117)
at my.training.google.mss.pipeline.InsertIntoBqWithCalendarWindow$1.apply(InsertIntoBqWithCalendarWindow.java:101)
at my.training.google.mss.pipeline.InsertIntoBqWithCalendarWindow$1.apply(InsertIntoBqWithCalendarWindow.java:95)
at com.google.cloud.dataflow.sdk.io.BigQueryIO$Write$Bound$TranslateTableSpecFunction.apply(BigQueryIO.java:1496)
at com.google.cloud.dataflow.sdk.io.BigQueryIO$Write$Bound$TranslateTableSpecFunction.apply(BigQueryIO.java:1486)
at com.google.cloud.dataflow.sdk.io.BigQueryIO$TagWithUniqueIdsAndTable.tableSpecFromWindow(BigQueryIO.java:2641)
at com.google.cloud.dataflow.sdk.io.BigQueryIO$TagWithUniqueIdsAndTable.processElement(BigQueryIO.java:2618)
Any suggestions? Thanks in advance.
EDIT: my functional requirement is building a pipeline with some configurable steps based on datastore entries.
So i have this Base Class:
public abstract class WiresharkFile : IDisposable
{
private string _fileName;
private int _packets;
private int _packetsSent;
private string _duration;
private double _speed;
private int _progress;
protected abstract WiresharkFilePacket ReadPacket();
public abstract IEnumerator<WiresharkFilePacket> GetEnumerator();
public abstract void Rewind();
public string FileName
{
get { return _fileName; }
set { _fileName = value; }
}
public int Packets
{
get { return _packets; }
set { _packets = value; }
}
public void Dispose()
{
// implemented inside sub class.
}
}
And specific Wireshark format (libpcap):
public class Libpcap : WiresharkFile, IDisposable, IEnumerable<WiresharkFilePacket>
{
private BinaryReader binaryReader;
private Version version;
private uint snaplen;
private int thiszone;
private uint sigfigs;
private LibpcapLinkType linktype;
private long basePos;
private bool byteSwap;
private static uint MAGIC = 0xa1b2c3d4;
private static uint MAGIC_ENDIAN = 0xd4c3b2a1;
public Libpcap(string path)
: this(new FileStream(path, FileMode.Open, FileAccess.Read))
{
FileName = path;
}
private Libpcap(Stream fileStream)
{
...
}
public override void Rewind()
{
binaryReader = new BinaryReader(new FileStream(FileName, FileMode.Open, FileAccess.Read));
binaryReader.BaseStream.Position = basePos;
}
public void Dispose()
{
if (binaryReader != null)
binaryReader.Close();
}
I removed almost all parts of how i am read this file
Add files in to my application
I have this objects list:
public ObservableCollection<WiresharkFile> wiresharkFiles { get; set; }
This list is binding into my ListView.
When the user choose files to add into my application:
string[] files = openFileDialog.FileNames;
I am check this files via another class:
public class FileValidation
{
public static void DoWork(IEnumerable<string> files)
{
CancellationTokenSource tokenSource = new CancellationTokenSource();
CancellationToken token = tokenSource.Token;
Task task = Task.Factory.StartNew(() =>
{
try
{
Parallel.ForEach(files,
new ParallelOptions
{
MaxDegreeOfParallelism = 3
},
file =>
{
ProcessFile(file);
});
}
catch (Exception)
{ }
}, tokenSource.Token,
TaskCreationOptions.None,
TaskScheduler.Default).ContinueWith
(t =>
{
if (FinishValidationEventHandler != null)
FinishValidationEventHandler();
}
, TaskScheduler.FromCurrentSynchronizationContext()
);
}
private static void ProcessFile(string file)
{
ReadWiresharkFormat(file);
using (WiresharkFile wiresharkFile = new Libpcap(file))
{
WiresharkFileInfo.ReadInfo(wiresharkFile);
// Add file into my list.
}
}
private static WiresharkFileFormat ReadWiresharkFormat(string file)
{
using (BinaryReader binaryReader = new BinaryReader(File.Open(file, FileMode.Open, FileAccess.Read)))
{
// Open file and read first 4 bytes in order to verify file type.
}
}
private static void ReadInfo(WiresharkFile wiresharkFile)
{
foreach (WiresharkFilePacket packet in wiresharkFile)
{
// Collect file information (number of packets...)
}
}
}
OK so until here all good.
Now when add many files, lets say (1000+-) i can see that my memory usage is growing in 200MB but after clear this list the memory usage not changed.
Any idea what could cause this ?
I made an android dictionary application. I have created a database named "kamusJawa.sqlite" and copied it to the assets folder. I tried the code in this link Own Database in Assets Folder on Android Eclipse Project
This is my database manager class:
package com.kamusJI;
public class DBHelper extends SQLiteOpenHelper{
private static String DBPATH = "/data/data/com.kamusJI/databases/";
private static String DBNAME = "kamusJawa.sqlite";
private SQLiteDatabase DBSQ;
private final Context KJICtx;
public DBHelper(Context context) throws IOException {
super(context, DBNAME, null, 1);
this.KJICtx = context;
// TODO Auto-generated constructor stub
boolean dbexist = cekDB();
if (dbexist) {
//System.out.println("Database exists");
openDB();
} else {
System.out.println("Database doesn't exist");
createDB();
}
}
public void createDB() throws IOException{
boolean dbExist = cekDB();
if(!dbExist){
this.getReadableDatabase();
try{
salinDB();
}catch (IOException e){
throw new Error("Gagal menyalin database");
}
}
}
boolean cekDB() {
//SQLiteDatabase cekDatabase = null;
boolean cekdb = false;
try{
String path = DBPATH + DBNAME;
File dbfile = new File(path);
//cekDatabase = SQLiteDatabase.openDatabase(path, null, SQLiteDatabase.OPEN_READONLY);
cekdb = dbfile.exists();
}catch(SQLException e){
System.out.println("Database tidak ada");
}
return cekdb;
//return cekDatabase !=null ? true : false;
}
private void salinDB() throws IOException{
AssetManager AM = KJICtx.getAssets();
File DbFile = new File(DBPATH+DBNAME);
InputStream in = KJICtx.getAssets().open(DBNAME);
//OutputStream out = new FileOutputStream(DbFile);
OutputStream out = new FileOutputStream("/data/data/com.kamusJI/databases/kamusJawa.sqlite");
DbFile.createNewFile();
byte[] b = new byte[1024];
int i, r;
String[] Files = AM.list("");
Arrays.sort(Files);
i= 1;
String fdb = String.format("kamusJawa.db.00%d", i);
while(Arrays.binarySearch(Files, fdb)>=0){
//InputStream in = AM.open(fdb);
while(( r = in.read(b))>0)
out.write(b,0,r);
in.close();
i++;
fdb = String.format("kamusJawa.db.00%d", i);
}
out.flush();
out.close();
}
public void openDB() throws SQLException{
String path = DBPATH+DBNAME;
DBSQ = SQLiteDatabase.openDatabase(path, null, SQLiteDatabase.OPEN_READONLY);
}
public synchronized void close(){
if(DBSQ !=null)
DBSQ.close();
super.close();
}
#Override
public void onCreate(SQLiteDatabase arg0) {
// TODO Auto-generated method stub
}
#Override
public void onUpgrade(SQLiteDatabase arg0, int arg1, int arg2) {
// TODO Auto-generated method stub
}
}
and this is my main class:
package com.kamusJI;
public class KJI extends ListActivity {
private KJI this_class = this;
String[] Menu = {"Basa Jawa", "Bahasa Indonesia", "Tambah Data"};
/** Called when the activity is first created. */
#Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.main);
setListAdapter(new ArrayAdapter<String>(this, R.layout.row, R.id.Cari, Menu));
ListView lv = getListView();
lv.setTextFilterEnabled(false);
/* Defines On Item Click callback method */
lv.setOnItemClickListener(new OnItemClickListener() {
#Override
public void onItemClick(AdapterView<?> parent, View view, int position,
long id) {
Intent action = null;
switch(position) {
case 0:
case 1:
action = new Intent(getApplicationContext(), Cari.class);
action.putExtra("MODE", position);
break;
case 2:
action = new Intent(getApplicationContext(), Tambah.class);
action.putExtra("MODE", position);
break;
case 3:
finish();
return;
}
startActivity(action);
Toast.makeText(getApplicationContext(), ((TextView) view).getText(), Toast.LENGTH_SHORT).show();
}
});
}
public void InitDatabase() {
AsyncTask<String, Void, String> InitDB = new AsyncTask<String, Void, String>() {
Dialog progress = null;
String msg;
DBHelper dbhelper;
#Override
protected void onPreExecute() {
try {
dbhelper = new DBHelper(this_class);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
if (!dbhelper.cekDB())
progress = ProgressDialog.show(this_class, "", "Installing Database.\nPlease wait.");
super.onPreExecute();
}
#Override
protected String doInBackground(String... params) {
try {
dbhelper.createDB();
msg = "Database successfully installed.";
} catch (IOException ioe) {
msg = "Database installation failed.";
}
return msg;
}
#Override
protected void onPostExecute(String result) {
super.onPostExecute(result);
if (progress!=null) {
progress.dismiss();
Toast.makeText(getApplicationContext(), result, Toast.LENGTH_SHORT).show();
}
}
};
InitDB.execute(new String());
}
}
When I run my application, then I go to file explorer, I can't find the data/data/com.kamusJI/databases. How it can be like that?
change your database name extension to .db
You need special permissions like root access to read the path:
/data/data/com.package/databases