I am trying to send a log schema from raspberry pi C application, to back end kaa server.
Here is the schema
{
"type" : "record",
"name" : "RemoteSensorLog",
"namespace" : "org.kaa.iot.log.sensor",
"fields" : [ {
"name" : "deviceId",
"type" : {
"type" : "string",
"avro.java.string" : "String"
}
}, {
"name" : "temperature",
"type" : [ "double", "null" ]
}, {
"name" : "humidity",
"type" : [ "long", "null" ]
}, {
"name" : "batteryLevel",
"type" : [ "int", "null" ]
} ],
"version" : 1,
"dependencies" : [ ],
"displayName" : "RemoteSensorLog",
"description" : "This is the log sent by remote sensors"
}
Some of the items in the log schema are optional, Here is the initialization function
void kaaLogInitializing(void *context)
{
void *log_storage_context = NULL;
void *log_upload_strategy_context = NULL;
printf("Initializing the Kaa log\n");
kaa_client_t * kaa_client_context = context;
if (context == NULL) {
return;
}
/* Log delivery listener callbacks. Each callback called whenever something happen with a log bucket. */
kaa_log_delivery_listener_t log_listener = {
.on_success = success_log_delivery_callback, /* Called if log delivered successfully */
.on_failed = failed_log_delivery_callback, /* Called if delivery failed */
.on_timeout = timeout_log_delivery_callback, /* Called if timeout occurs */
.ctx = kaa_client_context, /* Optional context */
};
/* The internal memory log storage distributed with Kaa SDK */
kaa_error_t error_code = ext_unlimited_log_storage_create(&log_storage_context,
kaa_client_get_context(
kaa_client_context
)->logger
);
if (error_code) {
printf("Failed to create Kaa log storage %d\r\n", error_code);
return;
}
error_code = ext_log_upload_strategy_create(kaa_client_get_context(
kaa_client_context),
&log_upload_strategy_context, KAA_LOG_UPLOAD_VOLUME_STRATEGY);
if (error_code) {
printf("Failed to create log upload strategy, error code %d\r\n", error_code);
return;
}
error_code = ext_log_upload_strategy_set_threshold_count(log_upload_strategy_context,
LOG_UPLOAD_THRESHOLD);
if (error_code) {
printf("Failed to set threshold log record count, error code %d\r\n", error_code);
return;
}
error_code = kaa_logging_set_strategy(kaa_client_get_context(kaa_client_context)->log_collector,
log_upload_strategy_context);
if (error_code) {
printf("Failed to set log upload strategy, error code %d\r\n", error_code);
return;
}
/* Specify log bucket size constraints */
kaa_log_bucket_constraints_t bucket_sizes = {
.max_bucket_size = MAX_LOG_BUCKET_SIZE, /* Bucket size in bytes */
.max_bucket_log_count = MAX_LOG_COUNT, /* Maximum log count in one bucket */
};
/* Initialize the log storage and strategy (by default it is not set) */
error_code = kaa_logging_init(kaa_client_get_context(
kaa_client_context)->log_collector
, log_storage_context
, log_upload_strategy_context
, &bucket_sizes);
if (error_code) {
printf("Failed to initialize Kaa log %d\r\n", error_code);
return;
}
/* Add listeners to a log collector */
kaa_logging_set_listeners(kaa_client_get_context(
kaa_client_context)->log_collector,
&log_listener);
}
Here is the function I use to send log
void sendLog(void *context)
{
kaa_client_t * kaa_client_context = context;
float temperature = 25.5;
if (context == NULL) {
return;
}
logDelivered = LOG_DELIVERY_DELIVERING;
printf("Start attempt to send Log\n");
kaa_logging_remote_sensor_log_t *log_record = kaa_logging_remote_sensor_log_create();
log_record->device_id = kaa_string_copy_create("Dev1");
log_record->temperature = kaa_logging_union_double_or_null_branch_0_create();
log_record->temperature->data = &temperature; /* create subobject */
log_record->humidity = kaa_logging_union_long_or_null_branch_1_create();
log_record->battery_level = kaa_logging_union_int_or_null_branch_1_create();
printf("Log record created\n");
/* Log information. Populated when log is added via kaa_logging_add_record() */
kaa_log_record_info_t log_info;
kaa_error_t error = kaa_logging_add_record(
kaa_client_get_context(kaa_client_context)->log_collector,
log_record, &log_info);
if (error) {
printf("Failed to add log record, error code\r\n");
kaa_client_stop(kaa_client_context);
return;
}
//log_record->destroy(log_record);
}
I have 2 problems
Problem1 #### : if I uncomment the last line in sendLog function log_record->destroy(log_record); I got this error double free or corruption (out): 0x7efe05
Problem2 #### : after commenting the mentioned line and running the application I never get any error or the server get the log nothing seems to happen neither I receive log was sent successfully or failure or timeout.
You need to manually allocate memory to store the temperature value. It will be freed in thelog_record->destroy(log_record).
So, you need to do something like this:
double *p_temperature = KAA_MALLOC(sizeof(double));
if (!p_temperature) {
// error handling
}
*p_temperature = 25.5;
log_record->temperature->data = p_temperature;
Related
I want to import data from Excel files into SQL Server. The size of the file is 22 MB and contains approximately 1 million rows, but I get the error timeout.
This is the code of my controller
[System.Web.Http.Route("UploadExcel")]
[System.Web.Http.HttpPost]
[RequestFormLimits(MultipartBodyLengthLimit = 409715200)]
[RequestSizeLimit(409715200)]
public string ExcelUpload()
{
string message = "";
HttpResponseMessage result = null;
var httpRequest = HttpContext.Current.Request;
using (AngularDBEntities objEntity = new AngularDBEntities())
{
if (httpRequest.Files.Count > 0)
{
HttpPostedFile file = httpRequest.Files[0];
Stream stream = file.InputStream;
IExcelDataReader reader = null;
if (file.FileName.EndsWith(".xls"))
{
reader = ExcelReaderFactory.CreateBinaryReader(stream);
}
else if (file.FileName.EndsWith(".xlsx"))
{
reader = ExcelReaderFactory.CreateOpenXmlReader(stream);
}
else
{
message = "This file format is not supported";
}
DataSet excelRecords = reader.AsDataSet();
reader.Close();
var finalRecords = excelRecords.Tables[0];
for (int i = 0; i < finalRecords.Rows.Count; i++)
{
UserDetail objUser = new UserDetail();
objUser.UserName = finalRecords.Rows[i][0].ToString();
objUser.EmailId = finalRecords.Rows[i][1].ToString();
objUser.Gender = finalRecords.Rows[i][2].ToString();
objUser.Address = finalRecords.Rows[i][3].ToString();
objUser.MobileNo = finalRecords.Rows[i][4].ToString();
objUser.PinCode = finalRecords.Rows[i][5].ToString();
objEntity.UserDetails.Add(objUser);
}
int output = objEntity.SaveChanges();
if (output > 0)
{
message = "Excel file has been successfully uploaded";
}
else
{
message = "Excel file uploaded failed";
}
}
else
{
result = Request.CreateResponse(HttpStatusCode.BadRequest);
}
}
return message;
}
I added maxRequestLength="1048576" executionTimeout="999999" to the web.config file in the system.web section, and maxAllowedContentLength="1073741824" to security tag, but I am still facing this problem.
Knowing that when I upload small files, the data is added to the table
you can add all items in a list and finally use bulk insert. use can use Entity Framework Extensions.
I have a process that must check the INBOX on GMail for a failure message, it's working except for the problem of the time it takes to connect and check the message, it takes about 1 minute, that is too much time.
My code:
public static SendResult sendingSuccess(final String email) {
SendResult result = new SendResult();
try {
Properties props = new Properties();
props.setProperty("mail.store.protocol", "imaps");
props.setProperty("mail.imap.com", "993");
props.setProperty("mail.imap.connectiontimeout", "5000");
props.setProperty("mail.imap.timeout", "5000");
Session session = Session.getDefaultInstance(props);
Store store = session.getStore("imaps");
store.connect("imap.googlemail.com", 993, GMAIL_USER, GMAIL_PASSWORD);
// Select and open folder
Folder inbox = store.getFolder("INBOX");
inbox.open(Folder.READ_WRITE);
// What to search for
SearchTerm searchTerm = new SearchTerm() {
private static final long serialVersionUID = -7187666524976851520L;
public boolean match(Message message) {
try {
String content = getContent(message);
boolean daemon = (message.getFrom()[0].toString()).contains("mailer-daemon#googlemail.com");
boolean failure = message.getSubject().contains("Failure");
boolean foundWarning = content.contains(email);
if (daemon && failure && foundWarning) {
return true;
}
} catch (Exception ex) {
ex.printStackTrace();
}
return false;
}
};
// Fetch unseen messages from inbox folder
Message[] messages = inbox.search(searchTerm);
// If there is no message then it's OK
result.setStatus(messages.length == 0);
result.setMessage(result.isStatus() ? "No failure message found for " + email : "Failure message found for " + email);
// Flag message as DELETED
for (Message message : messages) {
message.setFlag(Flags.Flag.DELETED, true);
}
// disconnect and close
inbox.close(false);
store.close();
} catch (Exception ex) {
result.setMessage(ex.getMessage());
ex.printStackTrace();
}
return result;
}
When I run this code to query the failure message it takes more than 1 minute to return the result to me.
======= Checking Gmail account for message failure! =====
Start...: 09:00:33
Finish..: 09:01:01
Result..: SendResult [status=true, message=No failure found for wrong.user#gmxexexex.net]
Is there any way to reduce this time?
The problem is most likely because you've written your own search term. JavaMail doesn't know how to translate your search term into an IMAP SEARCH request so it executes the search on the client, which requires downloading all the messages to the client to search there. Try this instead:
SearchTerm searchTerm = new AndTerm(new SearchTerm[] {
new FromStringTerm("mailer-daemon#googlemail.com"),
new SubjectTerm("Failure"),
new BodyTerm(email)
});
That will allow the search to be done by the IMAP server.
I am indexing text document using Flume, I do not see any error or warning message but data is not getting ingested to Solr Log level for both Solr and Flume is set to TRACE, ALL
Flume version : 1.5.2.2.3
Solr Version : 5.5
**Config files are as below**
**Flume Config :**
agent.sources = SpoolDirSrc
agent.channels = FileChannel
agent.sinks = SolrSink
# Configure Source
agent.sources.SpoolDirSrc.channels = fileChannel agent.sources.SpoolDirSrc.type = spooldir
agent.sources.SpoolDirSrc.spoolDir = /home/flume/source_emails agent.sources.SpoolDirSrc.basenameHeader = true agent.sources.SpoolDirSrc.fileHeader = true
agent.sources.SpoolDirSrc.deserializer =org.apache.flume.sink.solr.morphline.BlobDeserializer$Builder
agent.channels.FileChannel.type = file agent.channels.FileChannel.capacity = 10000
agent.sinks.SolrSink.type = org.apache.flume.sink.solr.morphline.MorphlineSolrSink
agent.sinks.SolrSink.morphlineFile = /etc/flume/conf/morphline.conf agent.sinks.SolrSink.batchsize = 1000 agent.sinks.SolrSink.batchDurationMillis = 2500 agent.sinks.SolrSink.channel = fileChannel agent.sinks.SolrSink.morphlineId = morphline1 agent.sources.SpoolDirSrc.channels = FileChannel agent.sinks.SolrSink.channel = FileChannel
"
Morphline Config
solrLocator: {
collection : gsearch
zkHost : "codesolr-as-r3p:21810,codesolr-as-r3p:21811,codesolr-as-r3p:21812"
}
morphlines :
[
{
id : morphline1
importCommands : ["org.kitesdk.**", "org.apache.solr.**"]
commands :
[
{ detectMimeType { includeDefaultMimeTypes : true } }
{
solrCell {
solrLocator : ${solrLocator}
captureAttr : true
lowernames : true
capture : [_attachment_body, _attachment_mimetype, basename, content, content_encoding, content_type, file, meta]
parsers : [ { parser : org.apache.tika.parser.txt.TXTParser } ]
}
}
{ generateUUID { field : id } }
{ sanitizeUnknownSolrFields { solrLocator : ${solrLocator} } }
{ logDebug { format : "output record: {}", args : ["#{}"] } }
{ loadSolr: { solrLocator : ${solrLocator} } }
]
}
]
Please help me what could be the issue
Regards,
~Sri
Normally en flume logs you can see more detailed of your error, you can paste the trace?
May be morphilines doesnt find your solr configuration, you can add this property in your morphilines.conf
solrHomeDir : "/your_solr_config_files"
I hope it's your help
I am attempting to use the grpc client for the Stackdriver/google cloud trace definition at https://github.com/googleapis/googleapis/blob/master/google/devtools/cloudtrace/v1/trace.proto
I am sending the protobuf for the
traces {
traces {
project_id: "brightcove-rna-master"
trace_id: "A096D4956A424EEB98AE7863505B1E1F"
spans {
span_id: 1
kind: RPC_CLIENT
name: "/bigtableAapiGrpcstaging_aggregated/queryDataSetInternal/buildScan"
start_time {
seconds: 1459230665
nanos: 705000000
}
end_time {
seconds: 1459230666
nanos: 416000000
}
labels {
key: "videocloud/account"
value: "4523394965001"
}
labels {
key: "videocloud/protocol"
value: "2"
}
labels {
key: "videocloud/dimensions"
value: "protocol,account"
}
}
spans {
span_id: 2
kind: RPC_SERVER
name: "/bigtableAapiGrpcstaging_aggregated/queryDataSetInternal/aggregateScan"
start_time {
seconds: 1459230666
nanos: 420000000
}
end_time {
seconds: 1459230667
nanos: 753000000
}
labels {
key: "videocloud/account"
value: "4523394965001"
}
labels {
key: "videocloud/protocol"
value: "2"
}
labels {
key: "videocloud/dimensions"
value: "protocol,account"
}
labels {
key: "bigtable/rowCount"
value: "339"
}
labels {
key: "bigtable/responseCount"
value: "136"
}
}
spans {
kind: RPC_SERVER
name: "/bigtableAapiGrpcstaging_aggregated/queryDataSetInternal"
start_time {
seconds: 1459230665
nanos: 556000000
}
end_time {
seconds: 1459230667
nanos: 754000000
}
labels {
key: "account"
value: "4523394965001"
}
}
}
}
But the only thing I get in return is this exception:
[WARN ] [2016-03-28 22:51:09,330] [grpc-default-executor-0] rna.api.server.ServerImpl Unable to send trace to google
io.grpc.StatusRuntimeException: CANCELLED
at io.grpc.Status.asRuntimeException(Status.java:431)
at io.grpc.stub.ClientCalls$UnaryStreamToFuture.onClose(ClientCalls.java:358)
at io.grpc.internal.ClientCallImpl$ClientStreamListenerImpl$3.runInContext(ClientCallImpl.java:462)
at io.grpc.internal.ContextRunnable.run(ContextRunnable.java:54)
at io.grpc.internal.SerializingExecutor$TaskRunner.run(SerializingExecutor.java:154)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Similarly, I tried the following ListTraces request
traceClient.listTraces(ListTracesRequest.newBuilder()
.setProjectId(projectId)
.setView(ListTracesRequest.ViewType.COMPLETE)
.setStartTime(getEpoch())
.setEndTime(getCurrentTime())
.build());
And got:
java.util.concurrent.ExecutionException: io.grpc.StatusRuntimeException: UNIMPLEMENTED: GRPC target method is not implemented.
at com.google.common.util.concurrent.AbstractFuture.getDoneValue(AbstractFuture.java:476)
at com.google.common.util.concurrent.AbstractFuture.get(AbstractFuture.java:455)
at com.brightcove.rna.api.server.ServerImpl.sendTraceAsync(ServerImpl.java:143)
at com.brightcove.rna.api.server.ServerImpl.queryDataset(ServerImpl.java:116)
at com.brightcove.rna.api.AnalyticsAPIGrpc$1.invoke(AnalyticsAPIGrpc.java:152)
at com.brightcove.rna.api.AnalyticsAPIGrpc$1.invoke(AnalyticsAPIGrpc.java:147)
at io.grpc.stub.ServerCalls$1$1.onHalfClose(ServerCalls.java:147)
at io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:255)
at io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$2.runInContext(ServerImpl.java:458)
at io.grpc.internal.ContextRunnable.run(ContextRunnable.java:54)
at io.grpc.internal.SerializingExecutor$TaskRunner.run(SerializingExecutor.java:154)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: io.grpc.StatusRuntimeException: UNIMPLEMENTED: GRPC target method is not implemented.
at io.grpc.Status.asRuntimeException(Status.java:431)
at io.grpc.stub.ClientCalls$UnaryStreamToFuture.onClose(ClientCalls.java:358)
at io.grpc.internal.ClientCallImpl$ClientStreamListenerImpl$3.runInContext(ClientCallImpl.java:462)
... 5 more
I am pretty new to the API so I don't know what I am doing wrong here. Is there a configuration value or something similar that i missed.
Update Code that generates the spans/trace. The code represents a Grpc service that gets requests and queries a Cloud BigTable table. My intention is to trace different aspects of the request:
public class ServerImpl implements AnalyticsAPIGrpc.AnalyticsAPI {
private static final Logger logger = Logger.getLogger(ServerImpl.class);
private Connection _connection = null;
private TraceServiceFutureClient traceClient;
private String projectId;
#Override
public void queryDataset(APIRequest request, StreamObserver<APIResponse> responseObserver) {
APIResponse.Builder response = APIResponse.newBuilder();
List<TraceSpan> spans = Lists.newArrayList();
if (request.getTraceToken() != null) {
response.setTraceToken(request.getTraceToken());
}
try {
spans = queryDataSetInternal(request, response);
responseObserver.onNext(response.build());
responseObserver.onCompleted();
} catch (Exception ex) {
responseObserver.onError(ex);
} finally {
// send trace
sendTraceAsync(request.getTraceToken(), spans);
}
}
private ListenableFuture<Empty> sendTraceAsync(String traceId, List<TraceSpan> spans) {
if (spans == null || spans.isEmpty()) {
return Futures.immediateFuture(Empty.getDefaultInstance());
}
PatchTracesRequest patchTracesRequest = PatchTracesRequest.newBuilder()
.setProjectId(projectId)
.setTraces(Traces.newBuilder().addTraces(
Trace.newBuilder()
.setProjectId(projectId)
.setTraceId(traceId.replaceAll("-", "").toUpperCase())
.addAllSpans(spans)))
.build();
if (logger.isTraceEnabled()) {
logger.trace("Sending trace: " + patchTracesRequest.toString());
}
ListenableFuture<Empty> future = traceClient.patchTraces(patchTracesRequest);
// add callback for logging result
Futures.addCallback(future, new FutureCallback<Empty>() {
#Override
public void onSuccess(#Nullable Empty result) {
logger.trace("Trace successfully sent to google");
}
#Override
public void onFailure(Throwable t) {
logger.warn("Unable to send trace to google", t);
}
});
return future;
}
private Connection getConnection() throws IOException {
return this._connection;
}
private Scan createScan(APIRequest request, String resourceName) {
return ScanBuilder.of(
request.getAccount(),
resourceName,
request.getStartTime(), request.getEndTime())
.build();
}
private List<TraceSpan> queryDataSetInternal(APIRequest request, APIResponse.Builder response) throws IOException {
AtomicLong spanIdCounter = new AtomicLong(0L);
String parentTraceName = "/api-qa/queryDataSetInternal";
TraceSpan.Builder parentSpan =
TraceSpan.newBuilder()
.setSpanId(spanIdCounter.getAndIncrement())
.setStartTime(getCurrentTime())
.setKind(TraceSpan.SpanKind.RPC_SERVER)
.setName(parentTraceName)
.putAllLabels(ImmutableMap.of("account", request.getAccount()));
Connection connection = this.getConnection();
List<TraceSpan> traceSpanList = Lists.newArrayList();
try (Table table = connection.getTable("tableName")) {
/// create scan ///
TraceSpan.Builder traceSpan = TraceSpan.newBuilder()
.setSpanId(spanIdCounter.getAndIncrement())
.setKind(TraceSpan.SpanKind.RPC_CLIENT)
.setName(parentTraceName + "/buildScan")
.setParentSpanId(parentSpan.getParentSpanId())
.setStartTime(getCurrentTime());
AtomicInteger count = new AtomicInteger(0);
// add trace span
String dimensionStr = Joiner.on(',').join(request.getDimensionsList());
traceSpan.putAllLabels(ImmutableMap.of(
"videocloud/account", request.getAccount(),
"videocloud/protocol", request.getProtocol(),
"videocloud/dimensions", dimensionStr));
// scan the response and send a stream of rows back
Scan scan = createScan(request, getResourceName(request));
logger.debug("Using bigtable scan: " + scan.toJSON());
ResultScanner scanner = table.getScanner(scan);
// record trace
traceSpanList.add(traceSpan.setEndTime(getCurrentTime()).build());
/// perform aggregation ///
Timestamp startTS = getCurrentTime();
List<Result> results = StreamSupport.stream(scanner.spliterator(), false)
.collect(Collectors.toList());
response.addAllRows(results);
// record trace
traceSpan = TraceSpan.newBuilder()
.setSpanId(spanIdCounter.getAndIncrement())
.setKind(TraceSpan.SpanKind.RPC_SERVER)
.setName(parentTraceName + "/aggregateScan")
.setParentSpanId(parentSpan.getParentSpanId())
.setStartTime(startTS)
.setEndTime(getCurrentTime())
.putAllLabels(ImmutableMap.of(
"videocloud/account", request.getAccount(),
"videocloud/protocol", request.getProtocol(),
"videocloud/dimensions", dimensionStr,
"bigtable/rowCount", String.valueOf(count.get()),
"bigtable/responseCount", String.valueOf(response.getRowsCount())));
traceSpanList.add(traceSpan.build());
response.setStatus(APIResponse.Status.OK)
.setDetails(String.format("Processed %d results from BigTable", count.get()));
} finally {
parentSpan.setEndTime(getCurrentTime());
traceSpanList.add(parentSpan.build());
}
return traceSpanList;
}
}
Ankur, I added an issue in the cloud bigtable github.
I have a client that retrieves a certificate (.pfx), including a private key, from a server and I add this to the local keychain with the following code: -
void AddCertToKeyChain(const QByteArray& cert, const QString& password)
{
SecKeychainRef keyChain = nil;
OSStatus err = SecKeychainCopyDomainDefault(kSecPreferencesDomainUser, &keyChain);
if (err != errSecSuccess)
{
emit Log("Failed to access system keychain: " + LogMessageForStatus(err));
return;
}
SecExternalFormat format = kSecFormatPKCS12;
SecExternalItemType itemType = kSecItemTypeAggregate;
SecItemImportExportFlags flags = 0;
SecItemImportExportKeyParameters params;
memset(¶ms, 0, sizeof(params));
params.version = SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION;
params.flags = 0;
params.passphrase = password.toCFString();
params.alertTitle = NULL;
params.alertPrompt = NULL;
params.accessRef = NULL;
// create and populate the key usage array
CFMutableArrayRef keyUsage = CFArrayCreateMutable(
kCFAllocatorDefault,
0,
&kCFTypeArrayCallBacks
);
CFArrayAppendValue(keyUsage, kSecAttrCanEncrypt);
CFArrayAppendValue(keyUsage, kSecAttrCanDecrypt);
CFArrayAppendValue(keyUsage, kSecAttrCanDerive);
CFArrayAppendValue(keyUsage, kSecAttrCanSign);
CFArrayAppendValue(keyUsage, kSecAttrCanVerify);
CFArrayAppendValue(keyUsage, kSecAttrCanWrap);
CFArrayAppendValue(keyUsage, kSecAttrCanUnwrap);
keyUsage = NULL; // Error without this - Failed to import certificate: The key usage mask is not supported.
// create and populate the key attributes array
CFMutableArrayRef keyAttributes = CFArrayCreateMutable(
kCFAllocatorDefault, 0, &kCFTypeArrayCallBacks
);
// required for import
params.keyUsage = keyUsage;
params.keyAttributes = keyAttributes;
OSStatus status = SecItemImport(cert.toCFData(), CFSTR(".p12"), &format, &itemType, flags, ¶ms, keyChain, NULL);
if(status == errSecSuccess)
emit Log("Certificate successfully imported");
else
{
emit Log("Failed to import certificate: " + LogMessageForStatus(status));
}
}
The certificate and private key appear in the keychain, as expected.
However, trying to retrieve the certificate is a problem, either programmatically or using the Keychain application.
If I select to export the private key from the keychain, I'm provided with the following error in a dialog: -
"An error has occurred. Unable to export an item. The contents of this item cannot be retrieved"
However, if the certificate and key are added to the keychain by double-clicking on the pfx, exporting the key works as expected.
So, why would the code above cause the problem of not being able to export the key?
With the assistance of Quinn at Apple, It seems that the method described in the question should work, but doesn't.
Using an old CDSA style flag instead does in fact work, doing something like this: -
OSStatus err;
SecExternalFormat format;
SecItemImportExportKeyParameters params;
params.version = SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION;
params.flags = 0;
params.passphrase = (__bridge CFStringRef) pkcs12Password;
params.alertTitle = NULL;
params.alertPrompt = NULL;
params.accessRef = NULL;
params.keyUsage = NULL;
params.keyAttributes = (__bridge CFArrayRef) #[ #(CSSM_KEYATTR_EXTRACTABLE) ];
format = kSecFormatPKCS12;
err = SecItemImport(
(__bridge CFDataRef) pkcs12Data,
CFSTR("p12"),
&format,
NULL,
0,
¶ms,
keychain,
NULL
);
Note the setting of params.keyAttributes, which defines the key to be extractable.
Alternatively, the older (deprecated) SecKeychainItemImport API may be used: -
BOOL success;
OSStatus err;
NSArray * result;
SecExternalFormat format;
SecKeyImportExportParameters params;
CFArrayRef importedItems;
result = nil;
importedItems = NULL;
format = kSecFormatPKCS12;
memset(¶ms, 0, sizeof(params));
params.passphrase = password;
params.keyAttributes = CSSM_KEYATTR_EXTRACTABLE;
err = SecKeychainItemImport(
(CFDataRef) pkcs12Blob, // importedData
NULL, // fileNameOrExtension
&format, // inputFormat
NULL, // itemType
0, // flags
¶ms, // keyParams
self->keychain, // importKeychain
&importedItems // outItems
);
success = (err == noErr);
While the function SecKeychainItemImport is defined as deprecated in Apple's documentation, I have been informed that it's unlikely to be removed any time soon.