Password to key function compatible with OpenSSL commands? - c

For example, the command:
openssl enc -aes-256-cbc -a -in test.txt -k pinkrhino -nosalt -p -out openssl_output.txt
outputs something like:
key = 33D890D33F91D52FC9B405A0DDA65336C3C4B557A3D79FE69AB674BE82C5C3D2
iv = 677C95C475C0E057B739750748608A49
How is that key generated? (C code as an answer would be too awesome to ask for :) )
Also, how is the iv generated?
Looks like some kind of hex to me.

OpenSSL uses the function EVP_BytesToKey. You can find the call to it in apps/enc.c. The enc utility used to use the MD5 digest by default in the Key Derivation Algorithm (KDF) if you didn't specify a different digest with the -md argument. Now it uses SHA-256 by default. Here's a working example using MD5:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <openssl/evp.h>
int main(int argc, char *argv[])
{
const EVP_CIPHER *cipher;
const EVP_MD *dgst = NULL;
unsigned char key[EVP_MAX_KEY_LENGTH], iv[EVP_MAX_IV_LENGTH];
const char *password = "password";
const unsigned char *salt = NULL;
int i;
OpenSSL_add_all_algorithms();
cipher = EVP_get_cipherbyname("aes-256-cbc");
if(!cipher) { fprintf(stderr, "no such cipher\n"); return 1; }
dgst=EVP_get_digestbyname("md5");
if(!dgst) { fprintf(stderr, "no such digest\n"); return 1; }
if(!EVP_BytesToKey(cipher, dgst, salt,
(unsigned char *) password,
strlen(password), 1, key, iv))
{
fprintf(stderr, "EVP_BytesToKey failed\n");
return 1;
}
printf("Key: "); for(i=0; i<cipher->key_len; ++i) { printf("%02x", key[i]); } printf("\n");
printf("IV: "); for(i=0; i<cipher->iv_len; ++i) { printf("%02x", iv[i]); } printf("\n");
return 0;
}
Example usage:
gcc b2k.c -o b2k -lcrypto -g
./b2k
Key: 5f4dcc3b5aa765d61d8327deb882cf992b95990a9151374abd8ff8c5a7a0fe08
IV: b7b4372cdfbcb3d16a2631b59b509e94
Which generates the same key as this OpenSSL command line:
openssl enc -aes-256-cbc -k password -nosalt -p < /dev/null
key=5F4DCC3B5AA765D61D8327DEB882CF992B95990A9151374ABD8FF8C5A7A0FE08
iv =B7B4372CDFBCB3D16A2631B59B509E94
OpenSSL 1.1.0c changed the digest algorithm used in some internal components. Formerly, MD5 was used, and 1.1.0 switched to SHA256. Be careful the change is not affecting you in both EVP_BytesToKey and commands like openssl enc.

If anyone is looking for implementing the same in SWIFT
I converted the EVP_BytesToKey in swift
/*
- parameter keyLen: keyLen
- parameter ivLen: ivLen
- parameter digest: digest e.g "md5" or "sha1"
- parameter salt: salt
- parameter data: data
- parameter count: count
- returns: key and IV respectively
*/
open static func evpBytesToKey(_ keyLen:Int, ivLen:Int, digest:String, salt:[UInt8], data:Data, count:Int)-> [[UInt8]] {
let saltData = Data(bytes: UnsafePointer<UInt8>(salt), count: Int(salt.count))
var both = [[UInt8]](repeating: [UInt8](), count: 2)
var key = [UInt8](repeating: 0,count: keyLen)
var key_ix = 0
var iv = [UInt8](repeating: 0,count: ivLen)
var iv_ix = 0
var nkey = keyLen;
var niv = ivLen;
var i = 0
var addmd = 0
var md:Data = Data()
var md_buf:[UInt8]
while true {
addmd = addmd + 1
md.append(data)
md.append(saltData)
if(digest=="md5"){
md = NSData(data:md.md5()) as Data
}else if (digest == "sha1"){
md = NSData(data:md.sha1()) as Data
}
for _ in 1...(count-1){
if(digest=="md5"){
md = NSData(data:md.md5()) as Data
}else if (digest == "sha1"){
md = NSData(data:md.sha1()) as Data
}
}
md_buf = Array (UnsafeBufferPointer(start: md.bytes, count: md.count))
// md_buf = Array(UnsafeBufferPointer(start: md.bytes.bindMemory(to: UInt8.self, capacity: md.count), count: md.length))
i = 0
if (nkey > 0) {
while(true) {
if (nkey == 0){
break
}
if (i == md.count){
break
}
key[key_ix] = md_buf[i];
key_ix = key_ix + 1
nkey = nkey - 1
i = i + 1
}
}
if (niv > 0 && i != md_buf.count) {
while(true) {
if (niv == 0){
break
}
if (i == md_buf.count){
break
}
iv[iv_ix] = md_buf[i]
iv_ix = iv_ix + 1
niv = niv - 1
i = i + 1
}
}
if (nkey == 0 && niv == 0) {
break
}
}
both[0] = key
both[1] = iv
return both
}
I use CryptoSwift for the hash.
This is a much cleaner way as apples does not recommend OpenSSL in iOS
UPDATE : Swift 3

Here is a version for mbedTLS / Polar SSL - tested and working.
typedef int bool;
#define false 0
#define true (!false)
//------------------------------------------------------------------------------
static bool EVP_BytesToKey( const unsigned int nDesiredKeyLen, const unsigned char* salt,
const unsigned char* password, const unsigned int nPwdLen,
unsigned char* pOutKey, unsigned char* pOutIV )
{
// This is a re-implemntation of openssl's password to key & IV routine for mbedtls.
// (See openssl apps/enc.c and /crypto/evp/evp_key.c) It is not any kind of
// standard (e.g. PBKDF2), and it only uses an interation count of 1, so it's
// pretty crappy. MD5 is used as the digest in Openssl 1.0.2, 1.1 and late
// use SHA256. Since this is for embedded system, I figure you know what you've
// got, so I made it compile-time configurable.
//
// The signature has been re-jiggered to make it less general.
//
// See: https://wiki.openssl.org/index.php/Manual:EVP_BytesToKey(3)
// And: https://www.cryptopp.com/wiki/OPENSSL_EVP_BytesToKey
#define IV_BYTE_COUNT 16
#if BTK_USE_MD5
# define DIGEST_BYTE_COUNT 16 // MD5
#else
# define DIGEST_BYTE_COUNT 32 // SHA
#endif
bool bRet;
unsigned char md_buf[ DIGEST_BYTE_COUNT ];
mbedtls_md_context_t md_ctx;
bool bAddLastMD = false;
unsigned int nKeyToGo = nDesiredKeyLen; // 32, typical
unsigned int nIVToGo = IV_BYTE_COUNT;
mbedtls_md_init( &md_ctx );
#if BTK_USE_MD5
int rc = mbedtls_md_setup( &md_ctx, mbedtls_md_info_from_type( MBEDTLS_MD_MD5 ), 0 );
#else
int rc = mbedtls_md_setup( &md_ctx, mbedtls_md_info_from_type( MBEDTLS_MD_SHA256 ), 0 );
#endif
if (rc != 0 )
{
fprintf( stderr, "mbedutils_md_setup() failed -0x%04x\n", -rc );
bRet = false;
goto exit;
}
while( 1 )
{
mbedtls_md_starts( &md_ctx ); // start digest
if ( bAddLastMD == false ) // first time
{
bAddLastMD = true; // do it next time
}
else
{
mbedtls_md_update( &md_ctx, &md_buf[0], DIGEST_BYTE_COUNT );
}
mbedtls_md_update( &md_ctx, &password[0], nPwdLen );
mbedtls_md_update( &md_ctx, &salt[0], 8 );
mbedtls_md_finish( &md_ctx, &md_buf[0] );
//
// Iteration loop here in original removed as unused by "openssl enc"
//
// Following code treats the output key and iv as one long, concatentated buffer
// and smears as much digest across it as is available. If not enough, it takes the
// big, enclosing loop, makes more digest, and continues where it left off on
// the last iteration.
unsigned int ii = 0; // index into mb_buf
if ( nKeyToGo != 0 ) // still have key to fill in?
{
while( 1 )
{
if ( nKeyToGo == 0 ) // key part is full/done
break;
if ( ii == DIGEST_BYTE_COUNT ) // ran out of digest, so loop
break;
*pOutKey++ = md_buf[ ii ]; // stick byte in output key
nKeyToGo--;
ii++;
}
}
if ( nIVToGo != 0 // still have fill up IV
&& // and
ii != DIGEST_BYTE_COUNT // have some digest available
)
{
while( 1 )
{
if ( nIVToGo == 0 ) // iv is full/done
break;
if ( ii == DIGEST_BYTE_COUNT ) // ran out of digest, so loop
break;
*pOutIV++ = md_buf[ ii ]; // stick byte in output IV
nIVToGo--;
ii++;
}
}
if ( nKeyToGo == 0 && nIVToGo == 0 ) // output full, break main loop and exit
break;
} // outermost while loop
bRet = true;
exit:
mbedtls_md_free( &md_ctx );
return bRet;
}

If anyone passing through here is looking for a working, performant reference implementation in Haskell, here it is:
import Crypto.Hash
import qualified Data.ByteString as B
import Data.ByteArray (convert)
import Data.Monoid ((<>))
evpBytesToKey :: HashAlgorithm alg =>
Int -> Int -> alg -> Maybe B.ByteString -> B.ByteString -> (B.ByteString, B.ByteString)
evpBytesToKey keyLen ivLen alg mSalt password =
let bytes = B.concat . take required . iterate go $ hash' passAndSalt
(key, rest) = B.splitAt keyLen bytes
in (key, B.take ivLen rest)
where
hash' = convert . hashWith alg
required = 1 + ((keyLen + ivLen - 1) `div` hashDigestSize alg)
passAndSalt = maybe password (password <>) mSalt
go = hash' . (<> passAndSalt)
It uses hash algorithms provided by the cryptonite package. The arguments are desired key and IV size in bytes, the hash algorithm to use (like e.g. (undefined :: MD5)), optional salt and the password. The result is a tuple of key and IV.

Related

Working with byte array I get an "invalid conversion from char* to byte"

I am reading and writing to an RFID tag using MFRC522.h
I can currently read the UID of a card and dump it to "UIDChar"
The UID of a card typically is 8 characters.
UID Example: 467EE9A9
I can use the mfrc522.MIFARE_SetUid function to write this UID to a new card. In order to do this I have to set the newUID to:
0x46,0x7E,0xE9,0xA9f
I have written this into my code.
What I am wanting to do is convert the UID string into a byte array so that I can use that in place of my manually written 0x46,0x7E,0xE9,0xA9.
I use the convert function to convert the UID into that format.
It can that be displayed with "buf".
Serial.println(buf);
Now my problem. If I replace the
byte newUid[] = {0x46,0x7E,0xE9,0xA9f};
with
byte newUid[] = {buf};
I get the error
invalid conversion from 'char*' to 'byte {aka unsigned char}'
How can I set my "newUid" as "buf"?
#define SS_PIN 0 //D2
#define RST_PIN 2 //D1
#include <SPI.h>
#include <MFRC522.h>
/* For RFID */
MFRC522 mfrc522(SS_PIN, RST_PIN); // Create MFRC522 instance.
char buf[40]; // For string to byte array convertor
void convert(char *s)
{
int i, j, k;
buf[0] = 0x0;
for (j = 0, i = 0, k = 0; j < strlen(s); j++)
{
if (i++ == 0) {
buf[k++] = '0';
buf[k++] = 'x';
}
buf[k++] = s[j];
if (i == 2) {
if(j != strlen(s) -1) buf[k++] = ',';
i = 0;
}
}
buf[k] = 0x0;
}
void clone() {
/* RFID Read */
// Look for new cards
if ( ! mfrc522.PICC_IsNewCardPresent())
{
return;
}
// Select one of the cards
if ( ! mfrc522.PICC_ReadCardSerial())
{
return;
}
//Show UID on serial monitor
Serial.println();
Serial.print(" UID tag :");
// Very basic UID dump
unsigned int hex_num;
hex_num = mfrc522.uid.uidByte[0] << 24;
hex_num += mfrc522.uid.uidByte[1] << 16;
hex_num += mfrc522.uid.uidByte[2] << 8;
hex_num += mfrc522.uid.uidByte[3];
// Get UID
int NFC_id = (int)hex_num;
Serial.print(NFC_id, HEX);
// Convert UID to string using an int and a base (hexadecimal)
String stringUID = String(NFC_id, HEX);
char UIDChar[10];
stringUID.toCharArray(UIDChar,10);
delay(1000);
Serial.println();
// Convert to uppercase
for (int i = 0; i < strlen(UIDChar); i++ )
{
if ( UIDChar[i] == NULL ) break;
UIDChar[i] = toupper(UIDChar[i]);
}
//Serial.print( &UIDChar[0] );
Serial.println();
convert(UIDChar);
Serial.println(buf);
/* RFID Write */
// Set new UID
// Change your UID hex string to 4 byte array
// I get error if I use byte newUid[] = {buf};
/* ERROR HERE */
byte newUid[] = {0x46,0x7E,0xE9,0xA9};
if ( mfrc522.MIFARE_SetUid(newUid, (byte)4, true) ) {
Serial.println( "Wrote new UID to card." );
}
// Halt PICC and re-select it so DumpToSerial doesn't get confused
mfrc522.PICC_HaltA();
if ( ! mfrc522.PICC_IsNewCardPresent() || ! mfrc522.PICC_ReadCardSerial() ) {
return;
}
// Dump the new memory contents
Serial.println( "New UID and contents:" );
mfrc522.PICC_DumpToSerial(&(mfrc522.uid));
}
void setup() {
Serial.begin ( 115200 );
/* RFID */
SPI.begin(); // Initiate SPI bus
mfrc522.PCD_Init(); // Initiate MFRC522
clone();
}
void loop() {
}
When you write
byte newUid[] = {buf};
you are trying to initialise newUid with a single element (there's only one item inside your {}), and that element is buf, which is a char* (or a char[]). That's why you get the error - you are trying to assign an array with one char* to a variable whose elements are bytes.
Without reading your full code in detail, I don't know why you are trying to do this assignment, rather than just use your buf array as it is. But to fix the problem, you probably just want to use
byte* newUid = buf;

Missing events on ReadConsoleInput in windows shell?

As I proceed in my (possibly vain) attempt to reimplement a curses style library that supports both *nix and windows under an MIT license, I've stumbled onto a problem reading terminal import using the windows api.
Basically, I don't get all the events I expect to, and I don't know why.
First I setup the terminal to be in non-buffering mode:
DWORD mode;
HANDLE hstdin = GetStdHandle( STD_INPUT_HANDLE );
// Save old mode
GetConsoleMode(hstdin, &mode);
// Set to no line-buffering, no echo, no special-key-processing
SetConsoleMode(hstdin, 0);
Then I use PeekConsoleInput and ReadConsoleInput in a loop to have a non blocking key press input; the equivalent of using termios.h and select on stdin in linux:
__EXPORT int sterm_read(void *state) {
DWORD dwRead;
INPUT_RECORD inRecords[1];
PeekConsoleInput(GetStdHandle(STD_INPUT_HANDLE), &inRecords[0], 1, &dwRead);
if (dwRead > 0) {
ReadConsoleInput(GetStdHandle(STD_INPUT_HANDLE), &inRecords[0], 1, &dwRead);
if (inRecords[0].EventType == KEY_EVENT) {
if (inRecords[0].Event.KeyEvent.bKeyDown) {
return inRecords[0].Event.KeyEvent.wVirtualKeyCode;
}
}
}
return -1;
}
Ignore the state variable; that's so the api can accept an arbitrary state struct on various platforms.
Now if I try to use this code:
#include <sterm.h>
#include <stdio.h>
#define assert(v, msg) if (!v) { printf("FAILED! %s", msg); return 1; }
int main(void) {
void *state = sterm_init();
int i;
char c;
for (;;) {
if ((c = sterm_read(state)) == 81) { // ie. press q to exit
break;
}
if (c != -1) {
sterm_write(state, &c, 1); // This is a thin wrapper around _write(1, ...)
}
}
sterm_shutdown(state);
return 0;
}
It almost works. I get the input character I press pushed out to the terminal... mostly.
Probably every 10th character press is recorded. If I type quickly, the API 'loses' events, and I get "HEO WLD" instead of "HELLO WORLD".
What's going on? Does ReadConsoleInput somehow clear the input buffer?
Am I doing something wrong? It seems almost like I'm only getting events based on a race condition which is 'is key pressed when PeekConsoleInput is called'.
...but surely that shouldn't be the case? The point of using these buffered I/O interfaces (instead of GetAsyncKeyState) is that the events should be buffered right?
Help!
I also have discovered that events are not guaranteed to stay around to be read.
This makes sense because otherwise the OS would need to provide lots and lots of buffering space.
The best I can do to deal with this is this code to do my own buffering
but clearly pastes of more than 128 characters will often fail :
static int g_eaten_ct = 0; /* Re-eaten char */
static int g_eaten_ix = -1;
static int g_eaten[128];
void reeat(int c)
{ g_eaten_ct += 1;
g_eaten[g_eaten_ix + g_eaten_ct] = c; /* save the char for later */
}
void flush_typah()
{
g_eaten_ct = 0;
g_eaten_ix = -1;
while (_kbhit())
(void)ttgetc();
}
int ttgetc()
{ if (g_eaten_ct > 0)
{ g_eaten_ct -= 1;
return g_eaten[++g_eaten_ix];
}
{ int totalwait = g_timeout_secs;
int oix = -1;
while (1)
{ int got,need;
const DWORD lim = 1000;
INPUT_RECORD rec[32];
int cc = WaitForSingleObject(g_ConsIn, lim);
switch(cc)
{ case WAIT_OBJECT_0:
need = sizeof(g_eaten)/sizeof(g_eaten[0]) - oix;
if (need > 32)
need = 32;
cc = ReadConsoleInput(g_ConsIn,&rec[0],need,(DWORD*)&got);
if (cc && got > 0)
break;
#if _DEBUG
{ DWORD errn = GetLastError();
if (errn != 6)
mlwrite("%pError %d %d ", cc, errn);
}
#endif
continue;
case WAIT_TIMEOUT:
#if _DEBUG
if (g_got_ctrl)
{ g_got_ctrl = false;
return (int)(CTRL | 'C');
}
#endif
if (--totalwait == 0) // -w opt
exit(2);
// drop through
default:continue;
}
{ int ix = -1;
while (++ix < got)
{ INPUT_RECORD * r = &rec[ix];
if (r->EventType == KEY_EVENT && r->Event.KeyEvent.bKeyDown)
{ int ctrl = 0;
int keystate = r->Event.KeyEvent.dwControlKeyState;
if (keystate & (RIGHT_CTRL_PRESSED | LEFT_CTRL_PRESSED))
{ ctrl |= CTRL;
g_chars_since_ctrl = 0;
}
{ int chr = r->Event.KeyEvent.wVirtualKeyCode;
if (in_range(chr, 0x10, 0x12))
continue; /* shifting key only */
if (keystate & (RIGHT_ALT_PRESSED | LEFT_ALT_PRESSED))
ctrl |= ALTD;
else
chr = r->Event.KeyEvent.uChar.AsciiChar & 0xff;
if (/*chr != 0x7c && */ (chr | 0x60) != 0x7c) // | BSL < or ^ BSL
{ int vsc = r->Event.KeyEvent.wVirtualScanCode;
if (in_range(vsc, SCANK_STT, 0x58))
{ ctrl |= SPEC;
chr = scantokey[vsc - SCANK_STT];
}
// else if (in_range(vsc, 2, 10) && chr == 0)
// chr = '0' - 1 + vsc;
}
if ((keystate & SHIFT_PRESSED) && ctrl) // exclude e.g. SHIFT 7
ctrl |= SHFT;
g_eaten[++oix] = ctrl | (chr == 0xdd ? 0x7c : chr);
++g_chars_since_ctrl;
}}
else if (r->EventType == MENU_EVENT)
{ /*loglog1("Menu %x", r->Event.MenuEvent.dwCommandId);*/
}
}
if (got == need && oix < sizeof(g_eaten) / sizeof(int))
{ PeekConsoleInput(g_ConsIn, &rec[0], 1, (DWORD*)&got);
if (got > 0)
continue;
}
if (oix >= 0)
{ g_eaten_ct = oix;
g_eaten_ix = 0;
return g_eaten[0];
}
}}
}}

decryption of MS office

I'm working on decryption of encrypted MS Excel(RC4 encryption with SHA1),password is already known.In vs2010,I've could decrypt it correctly,however,my program hasto work under both Win and linux.And I have no idea to get the encryption key under linux right now,which is something like below under Win:
int getEncrypKey(HCRYPTKEY *hKey, int blocknum)
{
//------------------------H0 = H(salt, password)-----
BYTE *pbSaltandPwdHash = NULL;
DWORD dwSaltandPwdLen = 0;
pbSaltandPwdHash = SHA1_2(psalt, 16, ppwd, strlen(pwd)/2, &dwSaltandPwdLen);
printf("SHA1 of SaltandPwd:\n");
for(DWORD i = 0 ; i < dwSaltandPwdLen ; i++) {
printf("%2.2x ",pbSaltandPwdHash[i]);
}
printf("\n");
//------------------------H0 = H(salt, password)-----
//------------------------Hfinal = H(H0, block)-----
HCRYPTHASH hHash1 = 0;
CryptCreateHash( hCryptProv, CALG_SHA1, 0, 0, &hHash1) ;
CryptHashData( hHash1, pbSaltandPwdHash, dwSaltandPwdLen, 0) ;
CryptHashData( hHash1, (unsigned char*)&blocknum, sizeof(blocknum), 0) ;
//------------------------Hfinal = H(H0, block)-----
CryptDeriveKey(hCryptProv, CALG_RC4, hHash1, 0x00280000, hKey);
if(hHash1 != 0) CryptDestroyHash(hHash1);
if(pbSaltandPwdHash != NULL) free(pbSaltandPwdHash);
return 0;
}
I knew how to get H0 under linux,but I dont know how to get the hHash1 and hKey.
This post sounds like it does the same thing: Implement Windows CryptoAPI CryptDeriveKey Using OpenSSL APIs
A more general way of generating hashes in openssl is below:
Before you do anything:
#include <ssl/evp.h>
int main(int argc, char argv[]) // or in an "initialise" type function
{
OpenSSL_add_all_digests()
...
}
Then to generate the hash (error checking omitted):
const EVP_MD *digest;
EVP_MD_CTX context;
unsigned char hash[EVP_MAX_MD_SIZE];
unsigned int hash_len;
digest = EVP_get_digestbyname("sha1"); /* choose the hash type here */
EVP_MD_CTX_init(&context);
EVP_DigestInit_ex(&contxt, digest, NULL);
EVP_DigestUpdate(&context, pbSaltandPwdHash, dwSaltandPwdLen);
EVP_DigestUpdate(&context, &blocknum, sizeof(blocknum));
EVP_DigestFinal_ex(&context, hash, &hash_len);
EVP_MD_CTX_cleanup(&context);
/* Now use hash and hash_len as required */

OSX FSEventStreamEventFlags not working correctly

I am watching a directory for file system events. Everything seems to work fine with one exception. When I create a file the first time, it spits out that it was created. Then I can remove it and it says it was removed. When I go to create the same file again, I get both a created and removed flag at the same time. I obviously am misunderstanding how the flags are being set when the callback is being called. What is happening here?
//
// main.c
// GoFSEvents
//
// Created by Kyle Cook on 8/22/13.
// Copyright (c) 2013 Kyle Cook. All rights reserved.
//
#include <CoreServices/CoreServices.h>
#include <stdio.h>
#include <string.h>
void eventCallback(FSEventStreamRef stream, void* callbackInfo, size_t numEvents, void* paths, const FSEventStreamEventFlags eventFlags[], const FSEventStreamEventId eventIds[]) {
char **pathsList = paths;
for(int i = 0; i<numEvents; i++) {
uint32 flag = eventFlags[i];
uint32 created = kFSEventStreamEventFlagItemCreated;
uint32 removed = kFSEventStreamEventFlagItemRemoved;
if(flag & removed) {
printf("Item Removed: %s\n", pathsList[i]);
}
else if(flag & created) {
printf("Item Created: %s\n", pathsList[i]);
}
}
}
int main(int argc, const char * argv[])
{
CFStringRef mypath = CFSTR("/path/to/dir");
CFArrayRef paths = CFArrayCreate(NULL, (const void **)&mypath, 1, NULL);
CFRunLoopRef loop = CFRunLoopGetMain();
FSEventStreamRef stream = FSEventStreamCreate(NULL, (FSEventStreamCallback)eventCallback, NULL, paths, kFSEventStreamEventIdSinceNow, 1.0, kFSEventStreamCreateFlagFileEvents | kFSEventStreamCreateFlagNoDefer);
FSEventStreamScheduleWithRunLoop(stream, loop, kCFRunLoopDefaultMode);
FSEventStreamStart(stream);
CFRunLoopRun();
FSEventStreamStop(stream);
FSEventStreamInvalidate(stream);
FSEventStreamRelease(stream);
return 0;
}
As far as I can tell, you will have to look for either kFSEventStreamEventFlagItemRemoved or kFSEventStreamEventFlagItemCreated, and then use stat() or similar to check if the file was in fact added or deleted. The FSEvents documentation seems to hint as such.
It looks like the API is or'ing the events bits together... so really it's an OR of all the changes made since the FSEventsListener is created. Since that seems to be the case, another option might be to create a new FSEventListener each time (and use the coalesce timer option).
I did some Googling, but didn't find other examples of this problem or even apple sample code, but I didn't spend too long on it.
I have previously used the kqueue API: https://gist.github.com/nielsbot/5155671 (This gist is an obj-c wrapper around kqueue)
I changed your sample code to show all flags set for each FSEvent:
#include <CoreServices/CoreServices.h>
#include <stdio.h>
#include <string.h>
static int __count = 0 ;
void eventCallback(FSEventStreamRef stream, void* callbackInfo, size_t numEvents, void* paths, const FSEventStreamEventFlags eventFlags[], const FSEventStreamEventId eventIds[]) {
char **pathsList = paths;
printf("callback #%u\n", ++__count ) ;
const char * flags[] = {
"MustScanSubDirs",
"UserDropped",
"KernelDropped",
"EventIdsWrapped",
"HistoryDone",
"RootChanged",
"Mount",
"Unmount",
"ItemCreated",
"ItemRemoved",
"ItemInodeMetaMod",
"ItemRenamed",
"ItemModified",
"ItemFinderInfoMod",
"ItemChangeOwner",
"ItemXattrMod",
"ItemIsFile",
"ItemIsDir",
"ItemIsSymlink",
"OwnEvent"
} ;
for(int i = 0; i<numEvents; i++)
{
printf("%u\n", i ) ;
printf("\tpath %s\n", pathsList[i]) ;
printf("\tflags: ") ;
long bit = 1 ;
for( int index=0, count = sizeof( flags ) / sizeof( flags[0]); index < count; ++index )
{
if ( ( eventFlags[i] & bit ) != 0 )
{
printf("%s ", flags[ index ] ) ;
}
bit <<= 1 ;
}
printf("\n") ;
}
FSEventStreamFlushSync( stream ) ;
}
int main(int argc, const char * argv[])
{
CFStringRef path = CFStringCreateWithCString( kCFAllocatorDefault, argv[1], kCFStringEncodingUTF8 ) ;
CFArrayRef paths = CFArrayCreate(NULL, (const void **)&path, 1, &kCFTypeArrayCallBacks );
if ( path ) { CFRelease( path ) ; }
CFRunLoopRef loop = CFRunLoopGetCurrent() ;
FSEventStreamRef stream = FSEventStreamCreate(NULL, (FSEventStreamCallback)eventCallback, NULL, paths, kFSEventStreamEventIdSinceNow, 0, kFSEventStreamCreateFlagFileEvents );
if ( paths ) { CFRelease( paths ) ; }
FSEventStreamScheduleWithRunLoop(stream, loop, kCFRunLoopDefaultMode);
FSEventStreamStart(stream);
CFRunLoopRun() ;
FSEventStreamStop(stream);
FSEventStreamInvalidate(stream);
FSEventStreamRelease(stream);
return 0;
}

Getting the OS version in Mac OS X using standard C

I'm trying to get the version of Mac OS X programmatically in C. After searching for a while I tried this code:
#include <CoreServices/CoreServices.h>
int GetOS()
{
SInt32 majorVersion,minorVersion,bugFixVersion;
Gestalt(gestaltSystemVersionMajor, &majorVersion);
Gestalt(gestaltSystemVersionMinor, &minorVersion);
Gestalt(gestaltSystemVersionBugFix, &bugFixVersion);
printf("Running on Mac OS X %d.%d.%d\n",majorVersion,minorVersion,bugFixVersion);
return 0;
}
XCode returns an LD error:
Undefined symbols for architecture x86_64:
"_Gestalt", referenced from:
_GetOS in main.o
What am I missing? How do you do this?
I found also this snippet
[[NSProcessInfo processInfo] operatingSystemVersionString]
But I have no idea how to write that in C.
Did you pass the appropriate framework to GCC in order to enable CoreServices?
% gcc -framework CoreServices -o getos main.c
The code below should work in the foreseeable future for figuring out the current version of Mac Os X.
/* McUsr put this together, and into public domain,
without any guarrantees about anything,
but the statement that it works for me.
*/
#if 1 == 1
#define TESTING
#endif
#include <sys/param.h>
#include <sys/sysctl.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
struct osver {
int minor;
int sub;
} ;
typedef struct osver osxver ;
void macosx_ver(char *darwinversion, osxver *osxversion ) ;
char *osversionString(void) ;
#ifdef TESTING
int main( int argc, char *argv[] )
{
osxver foundver;
char *osverstr= NULL ;
osverstr=osversionString() ;
macosx_ver(osverstr, &foundver ) ;
printf("Mac os x version = 10.%d.%d\n",foundver.minor,foundver.sub );
free(osverstr);
return 0;
}
#endif
char *osversionString(void) {
int mib[2];
size_t len;
char *kernelVersion=NULL;
mib[0] = CTL_KERN;
mib[1] = KERN_OSRELEASE;
if (sysctl(mib, 2, NULL, &len, NULL, 0) < 0 ) {
fprintf(stderr,"%s: Error during sysctl probe call!\n",__PRETTY_FUNCTION__ );
fflush(stdout);
exit(4) ;
}
kernelVersion = malloc(len );
if (kernelVersion == NULL ) {
fprintf(stderr,"%s: Error during malloc!\n",__PRETTY_FUNCTION__ );
fflush(stdout);
exit(4) ;
}
if (sysctl(mib, 2, kernelVersion, &len, NULL, 0) < 0 ) {
fprintf(stderr,"%s: Error during sysctl get verstring call!\n",__PRETTY_FUNCTION__ );
fflush(stdout);
exit(4) ;
}
return kernelVersion ;
}
void macosx_ver(char *darwinversion, osxver *osxversion ) {
/*
From the book Mac Os X and IOS Internals:
In version 10.1.1, Darwin (the core OS) was renumbered from v1.4.1 to 5.1,
and since then has followed the OS X numbers consistently by being four
numbers ahead of the minor version, and aligning its own minor with the
sub-version.
*/
char firstelm[2]= {0,0},secElm[2]={0,0};
if (strlen(darwinversion) < 5 ) {
fprintf(stderr,"%s: %s Can't possibly be a version string. Exiting\n",__PRETTY_FUNCTION__,darwinversion);
fflush(stdout);
exit(2);
}
char *s=darwinversion,*t=firstelm,*curdot=strchr(darwinversion,'.' );
while ( s != curdot )
*t++ = *s++;
t=secElm ;
curdot=strchr(++s,'.' );
while ( s != curdot )
*t++ = *s++;
int maj=0, min=0;
maj= (int)strtol(firstelm, (char **)NULL, 10);
if ( maj == 0 && errno == EINVAL ) {
fprintf(stderr,"%s Error during conversion of version string\n",__PRETTY_FUNCTION__);
fflush(stdout);
exit(4);
}
min=(int)strtol(secElm, (char **)NULL, 10);
if ( min == 0 && errno == EINVAL ) {
fprintf(stderr,"%s: Error during conversion of version string\n",__PRETTY_FUNCTION__);
fflush(stdout);
exit(4);
}
osxversion->minor=maj-4;
osxversion->sub=min;
}
Here is one with "less work", good enough for home projects (statically allocated buffers, ignoring errors). Works for me in OS X 10.11.1.
#include <stdio.h>
/*!
#brief Returns one component of the OS version
#param component 1=major, 2=minor, 3=bugfix
*/
int GetOSVersionComponent(int component) {
char cmd[64] ;
sprintf(
cmd,
"sw_vers -productVersion | awk -F '.' '{print $%d}'",
component
) ;
FILE* stdoutFile = popen(cmd, "r") ;
int answer = 0 ;
if (stdoutFile) {
char buff[16] ;
char *stdout = fgets(buff, sizeof(buff), stdoutFile) ;
pclose(stdoutFile) ;
sscanf(stdout, "%d", &answer) ;
}
return answer ;
}
int main(int argc, const char * argv[]) {
printf(
"Your OS version is: %d.%d.%d\n",
GetOSVersionComponent(1),
GetOSVersionComponent(2),
GetOSVersionComponent(3)
) ;
return 0 ;
}
Using the hint from #uchuugaka in the comment on the answer by #McUsr, I wrote a function that seems to work. I'm not saying it's better than any other answer.
/*
* Structure for MacOS version number
*/
typedef struct macos_version_str
{
ushort major;
ushort minor;
ushort point;
} macos_type;
/****************************************************************************
*
* Determine the MacOS version.
*
* Parameters:
* version_struct: (pointer to) macos_version structure to be filled in.
*
* Return value:
* 0: no error.
*
****************************************************************************/
static int get_macos_version ( macos_type *version_struct )
{
char os_temp [20] = "";
char *os_temp_ptr = os_temp;
size_t os_temp_len = sizeof(os_temp);
size_t os_temp_left = 0;
int rslt = 0;
version_struct->major = 0;
version_struct->minor = 0;
version_struct->point = 0;
rslt = sysctlbyname ( "kern.osproductversion", os_temp, &os_temp_len, NULL, 0 );
if ( rslt != 0 )
{
fprintf ( stderr,
"sysctlbyname() returned %d error (%d): %s",
rslt, errno, strerror(errno));
return ( rslt );
}
os_temp_left = os_temp_len; /* length of string returned */
int temp = atoi ( os_temp_ptr );
version_struct->major = temp;
version_struct->major = atoi ( os_temp_ptr );
while ( os_temp_left > 0 && *os_temp_ptr != '.' )
{
os_temp_left--;
os_temp_ptr++;
}
os_temp_left--;
os_temp_ptr++;
version_struct->minor = atoi ( os_temp_ptr );
while ( os_temp_left > 0 && *os_temp_ptr != '.' )
{
os_temp_left--;
os_temp_ptr++;
}
os_temp_left--;
os_temp_ptr++;
version_struct->point = atoi ( os_temp_ptr );
fprintf ( stderr, "Calculated OS Version: %d.%d.%d", version_struct->major, version_struct->minor, version_struct->point );
if ( version_struct->major == 0 ||
version_struct->minor == 0 ||
version_struct->point == 0 )
{
fprintf ( stderr, "Unable to parse MacOS version string %s", os_temp );
return ( -2 );
}
return 0;
}
If for whatever reason you want to avoid the Gestalt API (which still works fine, but is deprecated), the macosx_deployment_target.c in cctools contains a code snippet that uses the CTL_KERN + KERN_OSRELEASE sysctl(), similar to other answers here.
Here's a small program adapted from that code and taking macOS 11 and newer (tested and verified with up to macOS 12.6, which was at time of updating this post the latest stable release) into account:
#include <stdio.h>
#include <sys/sysctl.h>
int main()
{
char osversion[32];
size_t osversion_len = sizeof(osversion) - 1;
int osversion_name[] = { CTL_KERN, KERN_OSRELEASE };
if (sysctl(osversion_name, 2, osversion, &osversion_len, NULL, 0) == -1) {
printf("sysctl() failed\n");
return 1;
}
uint32_t major, minor;
if (sscanf(osversion, "%u.%u", &major, &minor) != 2) {
printf("sscanf() failed\n");
return 1;
}
if (major >= 20) {
major -= 9;
// macOS 11 and newer
printf("%u.%u\n", major, minor);
} else {
major -= 4;
// macOS 10.1.1 and newer
printf("10.%u.%u\n", major, minor);
}
return 0;
}

Resources