JDBI I want to bulk update using some thing like bulk insert with creating object - batch-file

I have to batch update using JDBI same like batch insert with out creating object.Any one know the process please let me know .Remember not using object like mapping column to object's attribute

Use argument binding.
Perhaps this is what you're looking for?
PreparedBatch insertBatch = handle.prepareBatch("INSERT INTO foo.bar (baz) VALUES (:bazArgument)");
//assume what you want to insert is stored in a List<String> bazes
for (String st : bazes) {
insertBatch.bind("bazArgument", st).add();
}
int[] countArray = insertBatch.execute();
You can extend it for more variables etc.

Here is a simple example for a batch operation with JDBI and MySQL database. The table is of InnoDB type.
package com.zetcode;
import org.skife.jdbi.v2.Batch;
import org.skife.jdbi.v2.DBI;
import org.skife.jdbi.v2.Handle;
public class JDBIEx6 {
public static void main(String[] args) {
DBI dbi = new DBI("jdbc:mysql://localhost:3306/testdb",
"testuser", "test623");
Handle handle = dbi.open();
Batch batch = handle.createBatch();
batch.add("DROP TABLE IF EXISTS Friends");
batch.add("CREATE TABLE Friends(Id INT AUTO_INCREMENT PRIMARY KEY, Name TEXT)");
batch.add("INSERT INTO Friends(Name) VALUES ('Monika')");
batch.add("INSERT INTO Friends(Name) VALUES ('Tom')");
batch.add("INSERT INTO Friends(Name) VALUES ('Jane')");
batch.add("INSERT INTO Friends(Name) VALUES ('Robert')");
batch.execute();
}
}
The following is a Maven POM file for the project.
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.zetcode</groupId>
<artifactId>JDBIEx6</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>jar</packaging>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>org.jdbi</groupId>
<artifactId>jdbi</artifactId>
<version>2.73</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.39</version>
</dependency>
</dependencies>
</project>
You can learn more about JDBI from my tutorial.

Related

Where is the problem in my corde(YML file) or Azure database? Spring Boot and Azure

Fist I am start project and create Azure database.
After that DB link to my project and it was run.
But it is not and indicate run error->
22:40:19.125 [main] ERROR org.springframework.boot.SpringApplication - Application run failed
org.yaml.snakeyaml.scanner.ScannerException: mapping values are not allowed here
in 'reader', line 4, column 13:
username: javatechi
Where is the problem in my corde(YML file) or Azure database?
application.yml
spring:
datasource:
url:jdbc:jdbc:sqlserver://xxxx.database.windows.net:1433;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;
username: xxxx
password: xxxxxxxx
jpa:
show-sql: true
hibernate:
ddl-auto: update
dialect: org.hibernate.dialect.SQLServer2012Dialect
server:
port: 9191
pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.7.1</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<groupId>com.javatechie</groupId>
<artifactId>springboot-azuresql</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>springboot-azure-sql</name>
<description>Demo project for Spring Boot</description>
<properties>
<java.version>17</java.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.springframework.data</groupId>
<artifactId>spring-data-jpa</artifactId>
<version>2.7.0</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.webjars.npm</groupId>
<artifactId>table</artifactId>
<version>5.4.6</version>
</dependency>
<dependency>
<groupId>javax.persistence</groupId>
<artifactId>persistence-api</artifactId>
<version>1.0.2</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>
Employer.java
package com.javatechie.azuresql;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.Table;
#Entity
#Table
#Data
#AllArgsConstructor
#NoArgsConstructor
public class Employee {
#Id
#GeneratedValue
private int id;
private String name;
private String dept;
private long salary;
}
**EmployeeRepository.java**
package com.javatechie.azuresql;
import org.springframework.data.jpa.repository.JpaRepository;
public interface EmployeeRepository extends JpaRepository<Employee,Integer> {
}
SpringbootAzuersqlApplication.java(Main Class)
package com.javatechie.azuresql;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RestController;
import java.util.List;
#SpringBootApplication
#RestController
public class SpringbootAzuresqlApplication {
#Autowired
private EmployeeRepository repository;
#PostMapping("/product")
public Employee addEmployee(#RequestBody Employee employee){
return repository.save(employee);
}
#GetMapping("/products")
public List<Employee> getEmployees(){
return repository.findAll();
}
public static void main(String[] args) {
SpringApplication.run(SpringbootAzuresqlApplication.class, args);
}
}
indicate error
21:55:28.055 [main] ERROR org.springframework.boot.SpringApplication - Application run failed
org.yaml.snakeyaml.scanner.ScannerException: mapping values are not allowed here
in 'reader', line 4, column 13:
username: javatechi
^
[1]: https://i.stack.imgur.com/dfEgg.jpg
The error coming from the YAML parser is misleading - it is not actually username: javatechi that is incorrect.
Please have a read through the YAML spec, 2.1 Collections and its examples, which is introduced with:
2.1. Collections
YAML’s block collections use indentation for scope and begin each entry on its own line. Block sequences indicate each entry with a dash and space (“- ”). Mappings use a colon and space (“: ”) to mark each key/value pair. Comments begin with an octothorpe (also called a “hash”, “sharp”, “pound” or “number sign” - “#”).
In other words it is actually the previous line that is incorrect:
url:jdbc:jdbc:sqlserver://xxxx.database.windows.net:1433;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;
You can fix it by adding a space after the url: key to seperate it from its value:
url: jdbc:jdbc:sqlserver://xxxx.database.windows.net:1433;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;

Undocumented Constraint? publishing to topic *from* pubsub trigger

I don't know if I'm going crazy, or if this is a limitation that just isn't documented (I've scoured the GCP API docs):
Is it possible to have a cloud function with a pubsub trigger on 'topic A', and inside that cloud function, publish a message to 'topic B'.
I've tried all the other triggers with identical code running (cloud functions as HTTP triggers, Cloud Storage Triggers, Firebase Triggers), and they all successfully publish to topics.
But the moment I (almost literally) copy-paste my code into a pubsub trigger, after consuming the message, when it attempts to publish it's own message to the next topic, it just hangs. The function just times-out when attempting to publish.
So to recap, is the following possible in GCP?
PubSub Topic A --> Cloud Function --> Pubsub Topic B
Thanks in advance for any clarifications! This is all in Java 11. Here's the code:
...<bunch of imports>
public class SignedURLGenerator implements BackgroundFunction<PubSubMessage> {
private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT");
private static final Logger logger = Logger.getLogger(SignedURLGenerator.class.getName());
/**
* Handle the incoming PubsubMessage
**/
#Override
public void accept(PubSubMessage message, Context context) throws IOException, InterruptedException {
String data = new String(Base64.getDecoder().decode(message.data));
System.out.println("The input message is: " + data.toString());
//Do a bunch of other stuff not relevant to the issue at hand...
publishSignedURL(url.toString());
}
//Here's the interesting part
public static void publishSignedURL(String message) throws IOException, InterruptedException {
String topicName = "url-ready-notifier";
String responseMessage;
Publisher publisher = null;
try {
// Create the PubsubMessage object
ByteString byteStr = ByteString.copyFrom(message, StandardCharsets.UTF_8);
PubsubMessage pubsubApiMessage = PubsubMessage.newBuilder().setData(byteStr).build();
System.out.println("Message Constructed:" + message);
//This part works fine, the message gets constructed
publisher = Publisher.newBuilder(ProjectTopicName.of(PROJECT_ID, topicName)).build();
System.out.println("Publisher Created.");
//This part also works fine, the publisher gets created
publisher.publish(pubsubApiMessage).get();
responseMessage = "Message published.";
//The code NEVER GETS HERE. The message is never published. And eventually the cloud function time's out :(
} catch (InterruptedException | ExecutionException e) {
System.out.println("Something went wrong with publishing: " + e.getMessage());
}
System.out.println("Everything wrapped up.");
}
Edit
As requested, this is my current POM
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>cloudfunctions</groupId>
<artifactId>pubsub-function</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.target>11</maven.compiler.target>
<maven.compiler.source>11</maven.compiler.source>
</properties>
<dependencies>
<dependency>
<groupId>com.google.cloud</groupId>
<artifactId>libraries-bom</artifactId>
<version>20.6.0</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>com.google.cloud.functions</groupId>
<artifactId>functions-framework-api</artifactId>
<version>1.0.1</version>
<type>jar</type>
</dependency>
<dependency>
<groupId>com.google.cloud</groupId>
<artifactId>google-cloud-storage</artifactId>
<version>1.117.1</version>
</dependency>
<dependency>
<groupId>com.google.cloud</groupId>
<artifactId>google-cloud-pubsub</artifactId>
<version>1.113.4</version>
</dependency>
<dependency>
<groupId>com.google.api</groupId>
<artifactId>gax</artifactId>
<version>1.66.0</version>
</dependency>
<dependency>
<groupId>com.google.api</groupId>
<artifactId>gax-grpc</artifactId>
<version>1.66.0</version>
</dependency>
<dependency>
<groupId>org.threeten</groupId>
<artifactId>threetenbp</artifactId>
<version>0.7.2</version>
</dependency>
</dependencies>
</project>
Can you try to explicitly set the flow control param in your publisher client? like that
publisher = Publisher.newBuilder(ProjectTopicName.of(PROJECT_ID, topicName)).setBatchingSettings(BatchingSettings.newBuilder()
.setDelayThreshold(Duration.of(10, ChronoUnit.SECONDS))
.setElementCountThreshold(1L)
.setIsEnabled(true)
.build()).build();
I don't know what happens, maybe a default and global configuration of PubSub. If it's not that, I will delete this answer.
EDIT 1
Here a screen capture of the builder class on a Publisher parent classe
You have all the default value of the library. However, the behavior that you observe isn't normal. The default must stay the default even if you are in a PubSub trigger. I will open an issue and forward it to the team directly.

How to insert two docs to solr as one document

I have two documents. One document contains the name of the person, corresponding rank and the doc id, this document is in csv format. Screenshot for the same is below.
The other set of documents contains paragraphs. Here is the screenshot of an ohter set of documents, these documents are named as doc id and is in text format.
I need to insert these two as one doc in solr such that in solr I have a doc of format :
Person: arthur w cabot
KDE Rank: 5.98+108
Text: Text from the other set of documents
How can I achieve this. Also, I would like to know if there is other approach that I can follow?
In your case you can build the solr document and commit it to solr.
Something like below :
SolrInputDocument document = new SolrInputDocument();
document.addField("id", "123456");
document.addField("title", fileName);
document.addField("text", contentBuilder.toString());
solr.add(document);
solr.commit();
In your case the fields are personName and personRank and the documentContent.
I assume that the reading of the csv file would be done from your end and you will retrieve the document name and you already know where the document is located.
As mentioned you can read the csv file, you will the data for the personName an PersonRank directly.
The third is about the field document content. As you only get the document file name, you can read the content of the document and pass it to the solr document as the third field.
I have done one option for you. Something like below :
String urlString = "http://localhost:8983/solr/TestCore";
SolrClient solr = new HttpSolrClient.Builder(urlString).build();
StringBuilder contentBuilder = new StringBuilder();
try (Stream<String> stream = Files.lines(Paths.get("D:/LogFolder/IB4_buildViewSchema.txt"),
StandardCharsets.UTF_8)) {
stream.forEach(s -> contentBuilder.append(s).append("\n"));
} catch (IOException e) {
e.printStackTrace();
}
try {
File file = new File("D:/LogFolder/IB4_buildViewSchema.txt");
String fileName = file.getName();
SolrInputDocument document = new SolrInputDocument();
document.addField("id", "123456");
document.addField("title", fileName);
document.addField("text", contentBuilder.toString());
solr.add(document);
solr.commit();
} catch (SolrServerException | IOException e) {
e.printStackTrace();
}
This will go in iterative mode for all the data of the csv.
Check if you can do it batches and you need to look for the optimizing the code as well.
This code is not a full proof solution for your problem.
I verified if the data is indexed in solr by querying it to solr by solr admin page.
Please refer the image below :
Note : I build a maven project and written the above piece of code. If you want you can use the below pom.xml for your reference.
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>solr</groupId>
<artifactId>TestSolr2</artifactId>
<version>0.0.1-SNAPSHOT</version>
<packaging>jar</packaging>
<name>TestSolr2</name>
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.target>1.8</maven.compiler.target>
<maven.compiler.source>1.8</maven.compiler.source>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.solr</groupId>
<artifactId>solr-solrj</artifactId>
<version>7.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.solr</groupId>
<artifactId>solr-cell</artifactId>
<version>7.6.0</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

How can I query any field with MongoRepository

Assume the domain object (MyDomain) has many fileds (f1, f2, f3 ... f100), define a MyDomainRepository from MongoRepository, I want to take field name and value as parameters instead of hard code the field name as part of query method, like below:
List<MyDomain> findByNameAndValue(string name, string value);
if the name and value is "f1" and "foo", the method will find all documents whose field "f1" equals "foo".
I have googled hours and no luck.
Any help from anybody, thanks!
You need to use QueryDSL predicates.
First, add the following dependencies to your pom.xml (assuming you're using maven to build your project):
<dependencies>
...
<dependency>
<groupId>com.querydsl</groupId>
<artifactId>querydsl-apt</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.querydsl</groupId>
<artifactId>querydsl-mongodb</artifactId>
</dependency>
...
</dependencies>
Also add this to your build plugins:
<build>
<plugins>
...
<plugin>
<groupId>com.mysema.maven</groupId>
<artifactId>apt-maven-plugin</artifactId>
<version>1.1.3</version>
<executions>
<execution>
<goals>
<goal>process</goal>
</goals>
<configuration>
<outputDirectory>target/generated-sources/java</outputDirectory>
<processor>org.springframework.data.mongodb.repository.support.MongoAnnotationProcessor</processor>
</configuration>
</execution>
</executions>
</plugin>
...
</plugins>
Your repository must extend QueryDslPredicateExecutor:
public interface MyDomainRepository extends MongoRepository<MyDomain, String>,
QueryDslPredicateExecutor<MyDomain> { }
Your repository will then inherit
public Iterable<MyDomain> findAll(Predicate predicate)
and a few other methods.
When you build your project, QueryDSL will generate Q-classes for you, that you can use to programmatically build predicates and query documents matching your predicates:
QMyDomain q = QMyDomain.mydomain;
Predicate p = q.f1.eq(value);
Iterable<MydDomain> i = repository.findAll(p);
To query your resources using a REST controller, you'll need something similar to:
#RestController
#RequestMapping(/"mydomain")
public class MyDomainController {
#Autowired private MyDomainRepository repository;
#GetMapping("/search/query")
public List<MyDomain> query(#QuerydslPredicate(root = MyDomain.class) Predicate predicate) {
return repository.findAll(predicate);
}
}
This last piece of code is quick and dirty made, it won't probably work as is (at least return some kind of List), but you get the idea.
pvpkiran is right, there is no such thing out of the box. You need to build your own using an injected MongoTemplate, for instance:
List<MyDomain> findByNameAndValue(string name, string value) {
Document document = new Document(name, value);
Query query = new BasicQuery(document.toJson());
return mongoTemplate.find(query, MyDomain.class);
}
The interesting thing is that you can go a little further and pass several name/value using a Map:
List<MyDomain> findByNamesAndValues(Map<String, String> parameters) {
Document document = new Document(parameters);
Query query = new BasicQuery(document.toJson());
return mongoTemplate.find(query, MyDomain.class);
}
Just in case, that works with a QueryDSL predicate too:
List<MyDomain> findByNamesAndValues(Predicate predicate) {
AbstractMongodbQuery mongoQuery = new SpringDataMongodbQuery(mongoTemplate, MyDomain.class)
.where(predicate)
Query query = new BasicQuery(mongoQuery.toString());
return mongoTemplate.find(query, MyDomain.class);
}
These methods can be further improved to handle pagination, and other cools feature such as field inclusion/exclusion.

Is it possible to do a java cron job in order to export a BigQuery table?

I want to upload a java cron job in order to do some queries and export a table from BigQuery to Google Storage once a week. To do this, I've used the Google plugin for Eclipse for upload the cron to AppEngine.
The problem is that my java cron job calls a java class that has google maven dependencies to access to BigQuery, but when the cron is uploaded to AppEngine, the below error appears:
Error for /cron/gaejcronjob
java.lang.NoClassDefFoundError: com/google/api/client/json/JsonFactory
I've read the question:
java.lang.ClassNotFoundException: com.google.api.client.json.JsonFactory, but its response don't solve the problem.
Edit:
(added pom.xml, GAEJCronServlet.java and BigQuery.java code)
pom.xml:
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>XTG.Cron.Jobs</groupId>
<artifactId>BigQuery</artifactId>
<version>0.0.1-SNAPSHOT</version>
<build>
<sourceDirectory>src</sourceDirectory>
<resources>
<resource>
<directory>src</directory>
<excludes>
<exclude>**/*.java</exclude>
</excludes>
</resource>
</resources>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.3</version>
<configuration>
<source>1.7</source>
<target>1.7</target>
</configuration>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>com.google.cloud.dataflow</groupId>
<artifactId>google-cloud-dataflow-java-sdk-all</artifactId>
<version>LATEST</version>
</dependency>
</dependencies>
</project>
GAEJCronServlet.java:
package com.gaejexperiments.cron;
import java.io.IOException;
import java.util.logging.Logger;
import javax.servlet.ServletException;
import javax.servlet.http.*;
import com.gaejexperiments.cron.BigQuery;
#SuppressWarnings("serial")
public class GAEJCronServlet extends HttpServlet {
private static final Logger _logger = Logger.getLogger(GAEJCronServlet.class.getName());
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
try {
_logger.info("Cron Job has been executed");
BigQuery bigquery = new BigQuery();
bigquery.exportTable();
} catch (Exception ex) {
//_logger.info(ex);
}
}
#Override
public void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
doGet(req, resp);
}
}
BigQuery.java:
package com.gaejexperiments.cron;
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.http.javanet.NetHttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.client.json.jackson.JacksonFactory;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.ErrorProto;
import com.google.api.services.bigquery.model.Job;
import com.google.api.services.bigquery.model.JobConfiguration;
import com.google.api.services.bigquery.model.JobConfigurationExtract;
import com.google.api.services.bigquery.model.JobReference;
import com.google.api.services.bigquery.model.TableReference;
public class BigQuery {
private final String PROJECT_ID = projectId;
private final String DATASET_ID = "bigquerytest";
private final String TABLE_ID = "test";
private Bigquery service = null;
public void main(String[] args) {
try {
HttpTransport httpTransport = new NetHttpTransport();
JsonFactory jsonFactory = new JacksonFactory();
GoogleCredential credential = GoogleCredential.getApplicationDefault(httpTransport, jsonFactory);
Bigquery.Builder serviceBuilder =
new Bigquery.Builder(httpTransport, jsonFactory, credential)
.setApplicationName("Bigquery ");
service = serviceBuilder.build();
if (service == null || service.jobs() == null) {
throw new Exception("Service is null");
}
}
catch (Exception ex) {
System.out.println("Caught exception: " + ex + "\n");
ex.printStackTrace();
System.exit(1);
}
System.exit(0);
}
public void exportTable() throws Exception{
//Export
TableReference sourceTable = new TableReference();
sourceTable.setProjectId(PROJECT_ID);
sourceTable.setDatasetId(DATASET_ID);
sourceTable.setTableId(TABLE_ID);
JobConfigurationExtract jobExtract = new JobConfigurationExtract();
jobExtract.setDestinationFormat("CSV");
jobExtract.setDestinationUri("gs://xtg-bigquery/test1.csv");
jobExtract.setSourceTable(sourceTable);
JobConfiguration jobConfig = new JobConfiguration();
jobConfig.setExtract(jobExtract);
JobReference jobRef = new JobReference();
jobRef.setProjectId(PROJECT_ID);
Job outputJob = new Job();
outputJob.setConfiguration(jobConfig);
outputJob.setJobReference(jobRef);
Job job = service.jobs().insert(PROJECT_ID,
outputJob).execute();
if (job == null) {
throw new Exception("Job is null");
}
while (true) {
String status = job.getStatus().getState();
if (status != null || ("DONE").equalsIgnoreCase(status)) {
break;
}
Thread.sleep(1000);
}
ErrorProto errorResult = job.getStatus().getErrorResult();
if (errorResult != null) {
throw new Exception("Error running job: " + errorResult);
}
}
}
You're missing a couple of appengine specific pom settings. The recommended approach is to create the pom.xml from the app engine archetype like this (as described here):
mvn archetype:generate -Dappengine-version=1.9.30 -Dapplication-id=your-app-id -Dfilter=com.google.appengine.archetypes:appengine-skeleton-archetype
Alternatively you can add the build plugins into your existing pom.xml, the build section should then look something like this (that's basically what the archetype will create for you):
<build>
<!-- for hot reload of the web application-->
<outputDirectory>${project.build.directory}/${project.build.finalName}/WEB-INF/classes</outputDirectory>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<version>3.1</version>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.7</source>
<target>1.7</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-war-plugin</artifactId>
<version>2.4</version>
<configuration>
<archiveClasses>true</archiveClasses>
<webResources>
<!-- in order to interpolate version from pom into appengine-web.xml -->
<resource>
<directory>${basedir}/src/main/webapp/WEB-INF</directory>
<filtering>true</filtering>
<targetPath>WEB-INF</targetPath>
</resource>
</webResources>
</configuration>
</plugin>
<plugin>
<groupId>com.google.appengine</groupId>
<artifactId>appengine-maven-plugin</artifactId>
<version>${appengine.version}</version>
<configuration>
<enableJarClasses>false</enableJarClasses>
<version>${app.version}</version>
<!-- Comment in the below snippet to bind to all IPs instead of just localhost -->
<address>0.0.0.0</address>
<port>8080</port>
<!-- Comment in the below snippet to enable local debugging with a remote debugger
like those included with Eclipse or IntelliJ -->
<jvmFlags>
<jvmFlag>-agentlib:jdwp=transport=dt_socket,address=8000,server=y,suspend=n</jvmFlag>
</jvmFlags>
</configuration>
</plugin>
<plugin>
<groupId>com.google.appengine</groupId>
<artifactId>gcloud-maven-plugin</artifactId>
<version>${gcloud.plugin.version}</version>
<configuration>
<set_default>true</set_default>
</configuration>
</plugin>
</plugins>
</build>
You should also add the appengine sdk to your dependencies, mine usually looks like this:
<!-- Compile/runtime dependencies -->
<dependency>
<groupId>com.google.appengine</groupId>
<artifactId>appengine-api-1.0-sdk</artifactId>
<version>${appengine.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>2.5</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>jstl</groupId>
<artifactId>jstl</artifactId>
<version>1.2</version>
</dependency>
Last but not least, the packaging of appengine projects is usually set to WAR
<packaging>war</packaging>
Having setup all that (and having a appengine-web.xml present in WEB-INF) you can deploy your appengine application with
mvn appengine:update
I recommend you create a project with the archetype and copy your content in the new project. That's much easier than adding all those configurations to your existing project.

Resources