How to use ldap_sasl_bind in WinLDAP? - c

I currently use ldap_bind_s to bind to the server in my C application with SEC_WINNT_AUTH_IDENTITY struct, but the function is marked as deprecated. For this reason I would like to change it to the ldap_sasl_bind_s function.
int main(void) {
LDAP *ld;
int rc = 0;
char *binddn = "cn=admin,dc=local";
const int version = LDAP_VERSION3;
SEC_WINNT_AUTH_IDENTITY wincreds;
struct berval saslcred;
wincreds.User = "admin";
wincreds.UserLength = 5;
wincreds.Password = "secret";
wincreds.PasswordLength = 6;
wincreds.Domain = NULL;
wincreds.DomainLength = 0;
wincreds.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
ld = ldap_initA("localhost", LDAP_PORT);
ldap_set_optionA(ld, LDAP_OPT_PROTOCOL_VERSION, &version);
rc = ldap_bind_sA(ld, binddn, (PCHAR)&wincreds, LDAP_AUTH_DIGEST);
printf("0x%x\n", rc); // It's OK (0x0)
ldap_unbind(ld);
saslcred.bv_val = "secret";
saslcred.bv_len = 6;
rc = ldap_sasl_bind_sA(ld, binddn, "DIGEST-MD5", &saslcred, NULL, NULL, NULL);
printf("0x%x\n", rc); // Returns with 0x59
ldap_unbind(ld)
return 0;
}
The ldap_sasl_bind_s returns with LDAP_PARAM_ERROR code. Clearly, the function parameters are wrong above, but I can't find a working sample code with winldap and SASL binding.
I would be grateful for some guide, how to make this code working.

The last parameter of ldap_sasl_bind_sA cannot be NULL. It has to point to a place the function can put the server's response (struct berval*).
...
struct berval* serverResponse = NULL;
rc = ldap_sasl_bind_sA(ld, binddn, "DIGEST-MD5", &saslcred, NULL, NULL, &serverResponse);
...

So finally, after some research and debugging in the past two weeks, I've managed to write a working example code that uses DIGEST-MD5 authentication with WinLDAP's ldap_sasl_bind_s function. The corresponding RFC, this answer and the official SSPI documentation gave me a lot of helps.
Some gotchas that I ran into:
Regardless what documentation says about the ldap_connect function: If you would like to use the ldap_sasl_bind_s function it is not just a "good programming practice" to call it first, it is necessary. Without it the ldap_sasl_bind_s returns with LDAP_SERVER_DOWN (0x51) error code.
The valid pszTargetName (digest-uri) parameter is crucial for the InitializeSecurityContext function to avoid invalid token error.
I hope it will help others to spend less time about figuring out how to use SASL binding mechanisms with WinLDAP.
#include <stdio.h>
#include <windows.h>
#include <winldap.h>
#define SECURITY_WIN32 1
#include <security.h>
#include <sspi.h>
int _tmain(int argc, _TCHAR* argv[]) {
LDAP *ld;
int rc = 0;
const int version = LDAP_VERSION3;
SEC_WINNT_AUTH_IDENTITY wincreds;
struct berval *servresp = NULL;
SECURITY_STATUS res;
CredHandle credhandle;
CtxtHandle newhandle;
SecBufferDesc OutBuffDesc;
SecBuffer OutSecBuff;
SecBufferDesc InBuffDesc;
SecBuffer InSecBuff;
unsigned long contextattr;
ZeroMemory(&wincreds, sizeof(wincreds));
// Set credential information
wincreds.User = (unsigned short *)L"root";
wincreds.UserLength = 4;
wincreds.Password = (unsigned short *)L"p#ssword";
wincreds.PasswordLength = 8;
wincreds.Domain = NULL;
wincreds.DomainLength = 0;
wincreds.Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE;
res = AcquireCredentialsHandle(NULL, L"WDigest", SECPKG_CRED_OUTBOUND,
NULL, &wincreds, NULL, NULL, &credhandle, NULL);
// Buffer for the output token.
OutBuffDesc.ulVersion = 0;
OutBuffDesc.cBuffers = 1;
OutBuffDesc.pBuffers = &OutSecBuff;
OutSecBuff.BufferType = SECBUFFER_TOKEN;
OutSecBuff.pvBuffer = NULL;
ld = ldap_init(L"localhost", LDAP_PORT);
rc = ldap_set_option(ld, LDAP_OPT_PROTOCOL_VERSION, (void*)&version);
rc = ldap_connect(ld, NULL); // Need to connect before SASL bind!
do {
if (servresp != NULL) {
InBuffDesc.ulVersion = 0;
InBuffDesc.cBuffers = 1;
InBuffDesc.pBuffers = &InSecBuff;
/* The digest-challenge will be passed as an input buffer to
InitializeSecurityContext function */
InSecBuff.cbBuffer = servresp->bv_len;
InSecBuff.BufferType = SECBUFFER_TOKEN;
InSecBuff.pvBuffer = servresp->bv_val;
/* The OutBuffDesc will contain the digest-response. */
res = InitializeSecurityContext(&credhandle, &newhandle, L"ldap/localhost", ISC_REQ_MUTUAL_AUTH | ISC_REQ_ALLOCATE_MEMORY,
0, 0, &InBuffDesc, 0, &newhandle, &OutBuffDesc, &contextattr, NULL);
}
else {
res = InitializeSecurityContext(&credhandle, NULL, L"ldap/localhost", ISC_REQ_MUTUAL_AUTH, 0, 0, NULL, 0, &newhandle, &OutBuffDesc, &contextattr, NULL);
}
switch (res) {
case SEC_I_COMPLETE_NEEDED:
case SEC_I_COMPLETE_AND_CONTINUE:
case SEC_E_OK:
case SEC_I_CONTINUE_NEEDED:
break;
case SEC_E_INVALID_HANDLE:
return -2;
case SEC_E_INVALID_TOKEN:
return -1;
default:
break;
}
struct berval cred;
cred.bv_len = OutSecBuff.cbBuffer;
/* The digest-response will be passed to the server
as credential after the second (loop)run. */
cred.bv_val = (char *)OutSecBuff.pvBuffer;
// The servresp will contain the digest-challange after the first call.
rc = ldap_sasl_bind_s(ld, L"", L"DIGEST-MD5", &cred, NULL, NULL, &servresp);
ldap_get_option(ld, LDAP_OPT_ERROR_NUMBER, &res)
} while (res == LDAP_SASL_BIND_IN_PROGRESS);
if (rc != LDAP_SUCCESS) {
printf("Bind failed with 0x%x\n", rc);
} else {
printf("Bind succeeded\n");
}
return 0;
}

Related

Why isn't the Kernel receveing my generic netlink messages?

I'm trying to send nested attributes from user space to kernel using generic netlink, the function nl_send_auto() returns 52 (which was supposed to be the numbers of bytes sent to kernel) but the kernel isn't receiving the messages. Is there some problem with my approach? Here is the code that I wrote on user space:
int err = -1;
struct nl_msg *msg;
struct nlattr *attr;
struct nl_sock *sock;
int family;
int send = 0;
if ((sock = nl_socket_alloc()) == NULL)
return err;
if ((err = genl_connect(sock)))
return err;
if ((family = genl_ctrl_resolve(sock, FAMILY)) < 0)
return family;
if ((msg = nlmsg_alloc()) == NULL)
return err;
if ((genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, FAMILY, 0,
NLM_F_REQUEST, CREATE_STATE, 1)) == NULL)
return err;
if (!(attr = nla_nest_start(msg, KLUA_NL_STATE))){
nla_nest_cancel(msg, attr);
return err;
}
if ((ret = nla_put_string(msg, STATE_NAME, cmd->name)) ||
(ret = nla_put_u32(msg, MAX_ALLOC, cmd->maxalloc)) ||
(ret = nla_put_u32(msg, CURR_ALLOC, cmd->curralloc))
)
return err;
nla_nest_end(msg, attr);
if ((send = nl_send_auto(ctrl->sock, msg)) < 0)
return send;
printf("All done sended %d bytes\n", send);
nlmsg_free(msg);
This code prints 52, which is the bytes sent to kernel;
The FAMILY macro is defined as (both in kernel and user space):
#define FAMILY "family"
My netlink attributes are (both for kernel and user space):
enum {
KLUA_NL_STATE,
STATE_NAME,
MAX_ALLOC,
CURR_ALLOC,
ATTR_COUNT,
#define ATTR_MAX (ATTR_COUNT - 1)
};
My enum for operation is:
enum {
CREATE_STATE = 16,
};
And my kernel code is:
struct nla_policy lunatik_policy[ATTR_MAX] = {
[KLUA_NL_STATE] = { .type = NLA_NESTED },
};
static int klua_create_state(struct sk_buff *buff, struct genl_info *info);
static const struct genl_ops l_ops[] = {
{
.cmd = CREATE_STATE,
.doit = klua_create_state,
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)
/*Before kernel 5.2.0, each operation has its own policy*/
.policy = lunatik_policy
#endif
},
};
#define KLUA_NL_STATE_ATTRS_COUNT 3
struct genl_family lunatik_family = {
.name = FAMILY,
.version = 1,
.maxattr = ATTR_MAX,
.netnsok = true,
.policy = lunatik_policy,
.module = THIS_MODULE,
.ops = l_ops,
.n_ops = ARRAY_SIZE(l_ops),
};
static int klua_create_state(struct sk_buff *buff, struct genl_info *info)
{
pr_info("I received the message\n");
return 0;
}
This code doesn't print anything on dmesg, and I would like to know why.
You actual problems
During Linux 5.2 refactors, the semantics of the NLA_F_NESTED flag changed somewhat. It appears you now need to always include it when you call nla_nest_start():
if (!(attr = nla_nest_start(msg, KLUA_NL_STATE))){
...
}
Should be
if (!(attr = nla_nest_start(msg, NLA_F_NESTED | KLUA_NL_STATE))){
...
}
Yes, I'm well aware the libnl library should obviously do this for you, and will perhaps do so in the future, but unfortunately this is where we are now.
Also:
enum {
KLUA_NL_STATE,
...
};
Attribute zero is always reserved. You need to change that into
enum {
KLUA_NL_STATE = 1,
...
};
Just FYI: operation zero is also reserved, so it's fortunate that you chose 16. But do keep it in mind in the future.
Syntactic issues
These are probably just copy-paste errors, but I'm including them anyway for the benefit on other people landing in this page looking for examples.
if ((genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, FAMILY, 0,
NLM_F_REQUEST, CREATE_STATE, 1)) == NULL)
return err;
Should be
if ((genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, family, 0,
NLM_F_REQUEST, CREATE_STATE, 1)) == NULL)
return err;
Also:
if ((ret = nla_put_string(msg, STATE_NAME, cmd->name)) ||
(ret = nla_put_u32(msg, MAX_ALLOC, cmd->maxalloc)) ||
(ret = nla_put_u32(msg, CURR_ALLOC, cmd->curralloc))
)
return err;
Should be
if ((err = nla_put_string(msg, STATE_NAME, cmd->name)) ||
(err = nla_put_u32(msg, MAX_ALLOC, cmd->maxalloc)) ||
(err = nla_put_u32(msg, CURR_ALLOC, cmd->curralloc))
)
return err;
Also:
if ((send = nl_send_auto(ctrl->sock, msg)) < 0)
return send;
Should be
if ((send = nl_send_auto(sock, msg)) < 0)
return send;

How to "CreateChild()".UEFI

I wonder, how can I access TCP4Protocol to use in my DXE driver.
I already asked similar question: myFirstQestion
Now I can't find the solution to solve the error.
Because I get my debug string "Can't create child" when trying to load the driver. Picking my code below.
Or maybe somebody knows how to complete my task from the link through dxe driver.
Thank you.
#include "Uefi.h"
#include <Protocol/Tcp4.h>
#include <Library/UefiDriverEntryPoint.h>
#include <Library/UefiBootServicesTableLib.h>
#include <Protocol/ServiceBinding.h>
#include <Protocol/SimpleNetwork.h>
#include <Library/DebugLib.h>
EFI_STATUS
EFIAPI
Net1DriverDxeEntry(IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable)
{
EFI_TCP4_PROTOCOL *TCP4protocol = NULL;
EFI_SERVICE_BINDING_PROTOCOL *TCP4ServiceBinding = NULL;
EFI_SIMPLE_NETWORK_PROTOCOL *SimpleNetworkProtocol = NULL;
EFI_HANDLE *HandleBuffer = NULL;
EFI_HANDLE *TCP4Handle = NULL;
UINTN HandleCount;
UINTN i;
CHAR16 *Deb1 = L"Simple network protocol not found\r\n";
CHAR16 *Deb2 = L"TCP4 protocol not found\r\n";
CHAR16 *Deb3 = L"Can't create child\r\n";
CHAR16 *Deb4 = L"Can't handle protocol\n\r\n";
EFI_STATUS Status = gBS->LocateProtocol(&gEfiSimpleNetworkProtocolGuid,NULL, (VOID**) &SimpleNetworkProtocol);
if (EFI_ERROR (Status))
{
DEBUG((-1, "ShowStatus: Simple network protocol not found\n"));
gST->ConOut->OutputString(gST->ConOut, Deb1);
return EFI_UNSUPPORTED;
}
Status = gBS->LocateHandleBuffer(ByProtocol,&gEfiTcp4ProtocolGuid, NULL, &HandleCount,&HandleBuffer);
if (EFI_ERROR (Status))
{
gST->ConOut->OutputString(gST->ConOut, Deb2);
DEBUG((-2, "ShowStatus: TCP4 protocol not found\n"));
return EFI_UNSUPPORTED;
}
for (i = 0; i < HandleCount; i ++)
{
Status = gBS->HandleProtocol(HandleBuffer[i], &gEfiTcp4ServiceBindingProtocolGuid,(VOID **) &TCP4ServiceBinding);
if(EFI_ERROR (Status))
{
DEBUG((-3, "\n"));
gST->ConOut->OutputString(gST->ConOut, Deb3);
return EFI_UNSUPPORTED;
}
TCP4ServiceBinding->CreateChild(TCP4ServiceBinding, TCP4Handle);
Status = gBS->HandleProtocol(TCP4Handle,&gEfiTcp4ProtocolGuid,(VOID **) &TCP4protocol);
if(EFI_ERROR (Status))
{
gST->ConOut->OutputString(gST->ConOut, Deb4);
DEBUG((-4, "ShowStatus: Can't handle protocol\n"));
return EFI_UNSUPPORTED;
}
}
return EFI_SUCCESS;
}
TCP4Handle should be of type EFI_HANDLE not EFI_HANDLE*.
EFI_HANDLE TCP4Handle = NULL;
//....
TCP4ServiceBinding->CreateChild(TCP4ServiceBinding, &TCP4Handle);
And you must look for gEfiTcp4ServiceBindingProtocolGuid not gEfiTcp4ProtocolGuid.
Status = gBS->LocateHandleBuffer(ByProtocol,&gEfiTcp4ServiceBindingProtocolGuid, NULL, &HandleCount,&HandleBuffer);

Libwesocket Client + Server all in one

I'am curently building a C Tasker, the goal of the program is to connect to a Main Server via Websockets and keep the connection Alive(Client Watchdog) as long as it is possible.
In the mean time the Tasker should be able to receive Jsons from clients (Server Behaviour), process them accordingly and answer.
To perfom this i use GLib's multi threading methods and the Libwebsockets library for the network part.
The C Client/Server Tasker is working well so the next step should be : when the connection is lost with the Main Server to try to reconnect "forever" until the Main Server is up, or to Fallback on another Main Server.
The problem here is that when i cut the connection with the server the watchdog thread seems to ignore the reconnection. As the thread is completely isolated from the libwesockets callbacks.
Here are the code snippets :
Call of Tasker Client in Tasker Server init
int janus_websockets_init(janus_transport_callbacks *callback, const char *config_path) {
if(g_atomic_int_get(&stopping)) {
return -1;
}
if(callback == NULL || config_path == NULL)
return -1;
gateway = callback;
Client_start()
Tasker Server configuration
struct lws_context_creation_info info;
memset(&info, 0, sizeof info);
info.port = wsport;
info.iface = ip ? ip : interface;
info.protocols = wss_protocols;
info.extensions = NULL;
info.ssl_cert_filepath = NULL;
info.ssl_private_key_filepath = NULL;
info.gid = -1;
info.uid = -1;
info.options = 0;
/* Create the WebSocket context */
wss = lws_create_context(&info);
if(wss == NULL) {
ZETA_LOG(LOG_FATAL, "Error initializing libwebsockets...\n");
} else {
ZETA_LOG(LOG_INFO, "WebSockets server started (port %d)...\n", wspor\
t);
}
Tasker Client Configuration
int
Client_start()
{
//* register the signal SIGINT handler */
struct lws_context_creation_info info;
memset(&info, 0, sizeof info);
info.port = CONTEXT_PORT_NO_LISTEN;
info.iface = NULL;
info.protocols = wss_protocols;
info.ssl_cert_filepath = NULL;
info.ssl_private_key_filepath = NULL;
info.extensions = lws_get_internal_extensions();
info.gid = -1;
info.uid = -1;
info.options = 0;
protocol.name = "janus-protocol";
protocol.callback = &ws_client_service_callback;
protocol.per_session_data_size = sizeof(struct session_data);
protocol.rx_buffer_size = 0;
protocol.id = 0;
protocol.user = NULL;
context = lws_create_context(&info);
Tasker Client Watchdog Thread Call
if (context == NULL) {
printf(KRED"[Main Service] context is NULL.\n"RESET);
return -1;
}
wsz = lws_client_connect(context, "xxx.xxx.xxx.xxx",5000, 0,
"/", "xxx.xxx.xxx.xxx:5000", NULL,
protocol.name, -1);
if (wsz == NULL) {
printf(KRED"[Main] wsi create error.\n"RESET);
return -1;
}
reco = false;
printf(KGRN"[Main] wsi create success.\n"RESET);
printf(KRED"[Main Service] %d\n", ConnOk, RESET);
keepAlive = g_thread_try_new("KeepAliveLoop", keep_alive_routine, context, NULL);
while(!dead)
lws_service(context, 50);
lws_context_destroy(context);
return 0;
}
Finnaly The Tasker Client Thread routine Loop and Thread Loop Execution code
static void keep_alive_loop()
{
json_t *hb = NULL;
char *toto = NULL;
if (reco == true)
{
wsz = lws_client_connect(context, "xxx.xxx.xxx.xxx",5000, 0,
"/", "xxx.xxx.xxx.xxx:5000", NULL,
protocol.name, -1);
printf(KRED"[Main Service] UNDER LOOOOOOPING .\n"RESET);
reco = false;
return;
}
hb = json_object();
json_object_set_new(hb, "zetapush", json_string("keepalive"));
json_object_set_new(hb, "timeout", json_integer(8000));
toto = json_dumps(hb, 0);
websocket_client_keep_alive_write_back(hb);
lws_callback_on_writable(wsz);
}
static void *keep_alive_routine(void *data)
{
printf(KBRN"[pthread_routine] WATCHDOG READY AND ANGRY .\n"RESET);
g_timeout_add_seconds(8, keep_alive_loop, NULL);
alive_loop = g_main_loop_new(NULL, 0);
g_main_loop_run(alive_loop);
}
If any further precision is needed let me know, i can also upload to github for better reading, any help or hint would be much appreciated :).

event_new() function fails on hpux itanium

I'm trying to debug a code that is using a libevent library. In that library, there is a function event_new that is suppose to create an event_cb. Somehow after I dispatch the event base, the event_cb cannot be called or accessed. This problem only happens on hpux itanium. This code works on hpux pa-risc, Redhat, AIX, and Solaris. Is there any certain thing that need to be set?
This is part of the code
int ttypread (int fd, Header *h, char **buf)
{
int c,k;
struct user_data user_data;
struct bufferevent *in_buffer;
struct event_config *evconfig;
log_debug("inside ttypread");
in_buffer = NULL;
user_data.fd = fd;
user_data.h = h;
user_data.buf = buf;
log_debug("from user_data, fd = %d",user_data.fd); //the log_debug is a debugging function for me to check the value sent by the system. I use it to compare between each platform
log_debug("from user_data, buf = %s",user_data.buf);
log_debug("from user_data, h.len = %d",user_data.h->len);
log_debug("from user_data, h.type = %d",user_data.h->type);
evconfig = event_config_new();
if (evconfig == NULL) {
log_error("event_config_new failed");
return -1;
}
if (event_config_require_features(evconfig, EV_FEATURE_FDS)!=0) {
log_error("event_config_require_features failed");
return -1;
}
base = event_base_new_with_config(evconfig);
if (!base) {
log_error("ttypread:event_base_new failed");
return -1;
}
const char* method; //these 3 lines are the new line edited
method = event_base_get_method(base);
log_debug("ttyread is using method = %s",method);
ev = event_new(base, fd, EV_READ|EV_PERSIST, ttypread_event_cb, &user_data);
c = event_add(ev, NULL);
log_debug("ttypread passed event_add with c value is %d",c);
in_buffer = bufferevent_socket_new(base, STDIN_FILENO, BEV_OPT_CLOSE_ON_FREE);
log_debug("ttypread passed bufferevent_socket_new");
if(in_buffer == NULL){
log_debug("problem with bufferevent_socket_new");
}
bufferevent_setcb(in_buffer, in_read_cb, NULL, in_event_cb, NULL);
bufferevent_disable(in_buffer, EV_WRITE);
bufferevent_enable(in_buffer, EV_READ);
k =event_base_dispatch(base);
log_debug("event_base have been dispatched"); //when looking at the debugging file, the other plaform will go to ttypread_event_cb function. But for hpux itanium, it stays here.
if (k == 0){
log_debug("event_base_dispatch returned 0");
} else if (k == -1){
log_debug("event_base_dispatch returned -1");
} else {
log_debug("event_base_dispatch returned 1");
}
event_base_free(base);
event_free(ev);
log_debug("finish ttypread");
log_debug("ttypread_ret will return [%d]",ttypread_ret);
return ttypread_ret;
}
void ttypread_event_cb(evutil_socket_t fd, short events, void *arg)
{
int nread;
struct timeval t;
struct user_data *user_data;
user_data = (struct user_data*)arg;
nread = 0;
log_debug("inside ttypread_event_cb");
if (events & EV_READ) {
log_debug("got events & EV_READ");
nread = ttyread(fd, user_data->h, user_data->buf);
if (nread == -1) {
ttypread_ret = -1;
event_del(ev);
event_base_loopexit(base, NULL);
} else if (nread == 0) {
if (access(input_filename, F_OK)!=0) {
log_debug("cannot access [%s]",input_filename);
tcsetattr(0, TCSANOW, &old); /* Return terminal state */
exit(EXIT_SUCCESS);
}
t.tv_sec = 0;
t.tv_usec = 250000;
select(0, 0, 0, 0, &t);
} else {
ttypread_ret = 1;
event_del(ev);
event_base_loopexit(base, NULL);
}
}
else if (events & EV_WRITE) {
log_debug("got events & EV_WRITE");
}
}
Not sure if this help. But just some info on the hpux itanium
uname -a = HP-UX hpux-ita B.11.23 U ia64
If you need any additional info or other declaration on function, just leave a comment and I will edit the question.
EDIT : i've added a function inside ttypread. Somehow for hpux itanium its returning devpoll while other platform are returning poll. Im not sure if this is the problem. But if that is so, is there any way for me to change it?
After checking the result from event_base_get_method, I found out that only on my hpux-itanium used devpoll method. This is how I solve it.
char string[8] = "devpoll";
struct user_data user_data;
struct bufferevent *in_buffer;
struct event_config *evconfig;
const char *method;
const char *devpoll;
devpoll = string;
in_buffer = NULL;
user_data.fd = fd;
user_data.h = h;
user_data.buf = buf;
evconfig = event_config_new();
if (evconfig == NULL) {
log_error("event_config_new failed");
return -1;
}
if (event_config_require_features(evconfig, EV_FEATURE_FDS)!=0) {
log_error("event_config_require_features failed");
return -1;
}
if (event_config_avoid_method(evconfig,devpoll) != 0)
{
log_error("Failed to ignore devpoll method");
}
Force the libevent to ignore using devpoll and use poll instead.

Trying to get socket from wininet HTTP connection

We have a legacy application that implements an SSL tunnel over Web proxies. It uses the wininet API and it worked fine for years on XP, but now it fails on Windows 7. I've tried to isolate the code and made a small program to reproduce the problem. It's a small C program compiled with MSVC 9. See below.
On Windows 7, once connected to the proxy (status code 200), I just cannot get the socket descriptor from the API. All I get is an INVALID_SOCKET, even though all wininet functions returned successfully and GetLastError() returned 0.
On the XP machine, all works fine and the returned socket is valid.
Does anyone have any idea?
Thank you very much in advance.
#include <windows.h>
#include <wininet.h>
#include <stdio.h>
const char *_connect()
{
HINTERNET hOpen = 0;
HINTERNET hConnect = 0;
HINTERNET hRequest = 0;
int remotePort = 443;
const char *remoteHost = "a.b.c.d"; // Cannot disclose
hOpen = InternetOpen("wininet-test", INTERNET_OPEN_TYPE_PRECONFIG, 0, 0, 0);
if (!hOpen) return "InternetOpen";
hConnect = InternetConnect(hOpen, remoteHost, remotePort, 0, 0, INTERNET_SERVICE_HTTP, 0, 0);
if (!hConnect) return "InternetConnect";
{
DWORD flags =
INTERNET_FLAG_CACHE_IF_NET_FAIL |
INTERNET_FLAG_IGNORE_CERT_CN_INVALID |
INTERNET_FLAG_IGNORE_CERT_DATE_INVALID |
INTERNET_FLAG_KEEP_CONNECTION |
INTERNET_FLAG_NO_CACHE_WRITE |
INTERNET_FLAG_PRAGMA_NOCACHE |
INTERNET_FLAG_RELOAD |
INTERNET_FLAG_RESYNCHRONIZE |
INTERNET_FLAG_SECURE;
char url[100];
sprintf(url, "http://%s:%d/", remoteHost, remotePort);
hRequest = HttpOpenRequest(hConnect, "GET", "connect.html", "HTTP/1.0", url, 0, flags, 0);
if (!hRequest) return "HttpOpenRequest";
}
{
DWORD flags=0;
DWORD bufferLength = sizeof(flags);
if (!InternetQueryOption(hRequest, INTERNET_OPTION_SECURITY_FLAGS, &flags, &bufferLength)) {
return "InternetQueryOption";
}
flags |= (SECURITY_FLAG_IGNORE_UNKNOWN_CA | SECURITY_FLAG_IGNORE_REVOCATION);
if (!InternetSetOption(hRequest, INTERNET_OPTION_SECURITY_FLAGS, &flags, sizeof(flags))) {
return "InternetSetOption";
}
}
if (!HttpSendRequest(hRequest, 0, 0, 0, 0)) {
return "HttpSendRequest";
} else {
char buffer[4];
DWORD bufferSize = sizeof(buffer);
if (!HttpQueryInfo(hRequest, HTTP_QUERY_STATUS_CODE, &buffer, &bufferSize, NULL)) {
return "HttpQueryInfo";
} else if (atoi(buffer) != 200) {
return "status code";
}
}
{
INTERNET_DIAGNOSTIC_SOCKET_INFO idsi;
DWORD bufferSize = sizeof(idsi);
if (!InternetQueryOption(hRequest, INTERNET_OPTION_DIAGNOSTIC_SOCKET_INFO, &idsi, &bufferSize)) {
return "InternetQueryOption";
} else if (idsi.Socket == INVALID_SOCKET) {
/* This is always the case on our Windows 7 platform, why? */
return "invalid socket";
}
}
return 0;
}
int main(int argc, const char **argv)
{
const char *error = _connect();
if (error) {
printf("ERROR: %s (%d)\n", error, GetLastError());
} else {
printf("SUCCESS\n");
}
return 0;
}
Is the HTTP request using keep-alives? If not, then my guess would be that WinInet under Win7 is invalidating the socket handle after closing it when receiving the server's response, whereas XP does not invalidate the socket.
From MSDN
INTERNET_OPTION_DIAGNOSTIC_SOCKET_INFO 67
Retrieves an INTERNET_DIAGNOSTIC_SOCKET_INFO structure that contains data about a specified HTTP Request. This flag is used by InternetQueryOption.
Windows 7: This option is no longer supported.
Are your sure your project is not compiling for unicode? In case it does you need to change your char declaration to wchar_t and prefix your constants with L like:
const wchar_t * pszTmp = L"hello world";

Resources