I am adding the PAGE_GUARD protection to pages of the current thread's stack via VirtualProtect. However, on accessing the stack, the exception handler I have installed is never executed.
I assume the OS just silently swallows the Guard-Page-Violation Exceptions because it uses guard pages to manage stack growth and stack overflows.
Is it in this case possible to catch these exceptions at all ?
Here is my C-code
#include <stdio.h>
#include <Windows.h>
#include <assert.h>
LONG WINAPI BlanketExceptionHandler(struct _EXCEPTION_POINTERS *ExceptionInfo) {
printf("exception code: %lx\n", ExceptionInfo->ExceptionRecord->ExceptionCode);
return EXCEPTION_CONTINUE_SEARCH;
}
int main(void) {
// Install exception handler
SetUnhandledExceptionFilter(BlanketExceptionHandler);
ULONG_PTR stackBot = 0, stackTop = 0;
GetCurrentThreadStackLimits(&stackBot, &stackTop);
assert(stackBot < stackTop);
// turn all pages in the stack into guard-pages
DWORD savedProtect = 0;
if(!VirtualProtect((LPVOID) stackBot, stackTop - stackBot, PAGE_READWRITE | PAGE_GUARD, &savedProtect)){
fprintf(stderr, "[Error]: Could not add guard pages: %ld\n", GetLastError());
return 1;
}
// access some stack memory page. This should trigger the registered exception handler!
*(DWORD*) (stackTop - 0x1500) = 0xdeadbeef;
return 0;
}
Code needs to be compiled with
cl /nologo main.c /link /STACK:0x100000,0x100000
Running the code gives no output from the BlanketExceptionHandler. But, using VirtualQuery, I have observed that the stack pages have the correct protections.
I have also tried installing the exception handler through AddVectoredExceptionHandler but that did not work either.
Is it in this case possible to catch these exceptions at all ?
no. if you access guard page, exception (as any exception) first will be handle by kernel code. if exception ocur in current thread stack range - the kernel handler remove PAGE_GUARD from page, where exeception was, ensure that bellow this page - several PAGE_GUARD exist and adjust StackLimit := PAGE_ALIGN(MemmoryAccessAddress) in NT_TIB (you can check this). and not call user mode exception handlers (if this not the last page in stack)
code example:
PCSTR GetStateSz(ULONG State)
{
switch (State)
{
case MEM_FREE: return "FREE";
case MEM_COMMIT: return "COMMIT";
case MEM_RESERVE: return "RESERVE";
}
return "?";
}
PCSTR GetTypeSz(ULONG Type)
{
switch (Type)
{
case MEM_IMAGE: return "IMAGE";
case MEM_MAPPED: return "MAPPED";
case MEM_PRIVATE: return "PRIVATE";
}
return "?";
}
void FormatProtect(ULONG Protect, PSTR psz, ULONG cch)
{
static const ULONG pp[] = {
PAGE_NOACCESS,
PAGE_READONLY,
PAGE_READWRITE,
PAGE_WRITECOPY,
PAGE_EXECUTE,
PAGE_EXECUTE_READ,
PAGE_EXECUTE_READWRITE,
PAGE_EXECUTE_WRITECOPY,
PAGE_GUARD,
PAGE_NOCACHE,
PAGE_WRITECOMBINE,
};
static const PCSTR ss[] = {
"NOACCESS",
"READONLY",
"READWRITE",
"WRITECOPY",
"EXECUTE",
"EXECUTE_READ",
"EXECUTE_READWRITE",
"EXECUTE_WRITECOPY",
"GUARD",
"NOCACHE",
"WRITECOMBINE",
};
ULONG n = _countof(pp);
do
{
if (Protect & pp[--n])
{
int len = sprintf_s(psz, cch, " | %s", ss[n]);
if (0 >= len)
{
break;
}
psz += len, cch -= len;
}
} while (n);
}
ULONG WINAPI PrintProtect_I(PVOID BaseAddress)
{
PVOID AllocationBase = BaseAddress;
::MEMORY_BASIC_INFORMATION mbi;
while (VirtualQuery(BaseAddress, &mbi, sizeof(mbi)) &&
mbi.AllocationBase == AllocationBase)
{
CHAR szProtect[0x100];
FormatProtect(mbi.Protect, szProtect, _countof(szProtect));
DbgPrint("[%p, %p) [%X] %s %s { %s }\n", mbi.BaseAddress,
BaseAddress = (PBYTE)mbi.BaseAddress + mbi.RegionSize,
mbi.RegionSize >> PAGE_SHIFT, GetStateSz(mbi.State), GetTypeSz(mbi.Type), szProtect);
}
return 0;
}
void PrintProtect(PVOID BaseAddress)
{
if (HANDLE hThread = CreateThread(0, 0, PrintProtect_I, BaseAddress, 0, 0))
{
WaitForSingleObject(hThread, INFINITE);
CloseHandle(hThread);
}
PNT_TIB tib = reinterpret_cast<PNT_TIB>(NtCurrentTeb());
DbgPrint("[%p, %p) << Current Stack\n\n", tib->StackLimit, tib->StackBase);
}
ULONG WINAPI SetGuard(PVOID pv)
{
ULONG op;
return VirtualProtect(pv, PAGE_SIZE, PAGE_EXECUTE_READWRITE|PAGE_GUARD, &op);
}
void GDemo()
{
PVOID stack = alloca(0x10000);
PBYTE pb = (PBYTE)PAGE_ALIGN((PBYTE)stack + 0x8000);
ULONG_PTR a, b;
GetCurrentThreadStackLimits(&a, &b);
DbgPrint("[%p, %p) << Stack Region\n\n", a, b);
PrintProtect((PVOID)a);
if (HANDLE hThread = CreateThread(0, 0, SetGuard, pb, 0, 0))
{
WaitForSingleObject(hThread, INFINITE);
CloseHandle(hThread);
PrintProtect((PVOID)a);
*pb = 0;
PrintProtect((PVOID)a);
}
}
and output:
[000000EAAE4C0000, 000000EAAE5C0000) << Stack Region
[000000EAAE4C0000, 000000EAAE5AB000) [EB] RESERVE PRIVATE { }
[000000EAAE5AB000, 000000EAAE5AE000) [3] COMMIT PRIVATE { | GUARD | READWRITE }
[000000EAAE5AE000, 000000EAAE5C0000) [12] COMMIT PRIVATE { | READWRITE }
[000000EAAE5AE000, 000000EAAE5C0000) << Current Stack
[000000EAAE4C0000, 000000EAAE5AB000) [EB] RESERVE PRIVATE { }
[000000EAAE5AB000, 000000EAAE5AE000) [3] COMMIT PRIVATE { | GUARD | READWRITE }
[000000EAAE5AE000, 000000EAAE5B7000) [9] COMMIT PRIVATE { | READWRITE }
[000000EAAE5B7000, 000000EAAE5B8000) [1] COMMIT PRIVATE { | GUARD | EXECUTE_READWRITE }
[000000EAAE5B8000, 000000EAAE5C0000) [8] COMMIT PRIVATE { | READWRITE }
[000000EAAE5AE000, 000000EAAE5C0000) << Current Stack
[000000EAAE4C0000, 000000EAAE5AB000) [EB] RESERVE PRIVATE { }
[000000EAAE5AB000, 000000EAAE5AE000) [3] COMMIT PRIVATE { | GUARD | READWRITE }
[000000EAAE5AE000, 000000EAAE5B4000) [6] COMMIT PRIVATE { | READWRITE }
[000000EAAE5B4000, 000000EAAE5B7000) [3] COMMIT PRIVATE { | GUARD | READWRITE }
[000000EAAE5B7000, 000000EAAE5B8000) [1] COMMIT PRIVATE { | EXECUTE_READWRITE }
[000000EAAE5B8000, 000000EAAE5C0000) [8] COMMIT PRIVATE { | READWRITE }
[000000EAAE5B7000, 000000EAAE5C0000) << Current Stack
Related
I have been working on a project in which the analog values are sampled at a particular frequency and stored in an array. Then the value will be sent to user application ESP32 using BLE. But I got stuck in this error.
/home/runner/work/esp32-arduino-lib-builder/esp32-arduino-lib-builder/esp-idf/components/freertos/queue.c:1443
(xQueueGenericReceive)- assert failed! abort() was called at PC
0x4008e1d5 on core 1
Backtrace: 0x40091b38:0x3ffe0b20 0x40091d69:0x3ffe0b40
0x4008e1d5:0x3ffe0b60 0x400d1a2d:0x3ffe0ba0 0x4008e525:0x3ffe0be0
Rebooting... ets Jun 8 2016 00:22:57
rst:0xc (SW_CPU_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT) configsip: 0,
SPIWP:0xee
clk_drv:0x00,q_drv:0x00,d_drv:0x00,cs0_drv:0x00,hd_drv:0x00,wp_drv:0x00
mode:DIO, clock div:1 load:0x3fff0018,len:4 load:0x3fff001c,len:1044
load:0x40078000,len:8896 load:0x40080400,len:5816 entry 0x400806ac
I am Using Esp32arduino and FreeRTOS for programming. The error is in the semaphore from the interrupt but I couldn't be able to find out exact solution. Please help me out guys.
#include <ArduinoJson.h>
#include <BLEDevice.h>
#include <BLEServer.h>
#include <BLEUtils.h>
#include <BLE2902.h>
#if CONFIG_FREERTOS_UNICORE
static const BaseType_t app_cpu = 0;
#else
static const BaseType_t app_cpu = 1;
#endif
//ADC Related Global Variables
static const uint16_t timer_divider = 80;
static const uint64_t timer_max_count = 1000;
static const int adc_pin = A0;
static const int BUF_SIZE = 1000;
static int buf[BUF_SIZE];
int Buff_Len = 0;
static int Read = 0;
static int Write = 0;
static int count = 0;
static float avg = 0;
int i = 0;
int BLE_flag = 0;
String cmd;
static hw_timer_t *timer = NULL;
static uint16_t val;
static int count1 = 0;
static SemaphoreHandle_t bin_sem = NULL;
static SemaphoreHandle_t bin_sem2 = NULL;
static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED;
//ADC Related Global Variables
//BLE Global Variable
char Reading[4];
BLEServer *pServer = NULL;
BLECharacteristic *pTxCharacteristic;
bool deviceConnected = false;
bool oldDeviceConnected = false;
//Declaration BLE necessary Classes
#define SERVICE_UUID "6E400001-B5A3-F393-E0A9-E50E24DCCA9E" // UART service UUID
#define CHARACTERISTIC_UUID_TX "6E400003-B5A3-F393-E0A9-E50E24DCCA9E"
class MyServerCallbacks:public BLEServerCallbacks
{
void onConnect (BLEServer * pServer)
{
deviceConnected = true;
};
void onDisconnect (BLEServer * pServer)
{
deviceConnected = false;
}
};
//BLE Global Variables
//Task Section
void IRAM_ATTR onTimer ()
{
//sampling
xSemaphoreGiveFromISR (bin_sem2, &task_woken);
if (task_woken)
{
portYIELD_FROM_ISR ();
}
}
void move_to_Queue (void *parameters)
{
while (1)
{
xSemaphoreTake (bin_sem2, portMAX_DELAY);
if (Buff_Len == BUF_SIZE || count1 > 2000)
{
Serial.println ("Buffer is full");
xSemaphoreGive (bin_sem);
}
else
{
// storing the instantaneous sample value to buffer
}
}
}
void BLE_Task (void *parameters)
{
while (1) {
xSemaphoreTake (bin_sem, portMAX_DELAY);
Serial.println ("BLE");
// sending the data\lu
delay (10); // bluetooth stack will go into congestion, if too many packets are sent
}
}
Serial.println ();
}
}
void setup ()
{
// put your setup code here, to run once:
Serial.begin (115200);
vTaskDelay (1000 / portTICK_PERIOD_MS);
//BLE Declarations
BLEDevice::init ("UART Service");
pServer = BLEDevice::createServer ();
pServer->setCallbacks (new MyServerCallbacks ());
BLEService *pService = pServer->createService (SERVICE_UUID);
pTxCharacteristic = pService->createCharacteristic (CHARACTERISTIC_UUID_TX,
BLECharacteristic::
PROPERTY_NOTIFY);
pTxCharacteristic->addDescriptor (new BLE2902 ());
pService->start ();
pServer->getAdvertising ()->start ();
Serial.println ("Waiting a client connection to notify...");
//BLE Declaration
//ADC Semaphore and Timer Declarations
bin_sem = xSemaphoreCreateBinary ();
bin_sem2 = xSemaphoreCreateBinary ();
if (bin_sem == NULL || bin_sem2 == NULL)
{
Serial.println ("Could not create semaphore");
ESP.restart ();
}
xTaskCreatePinnedToCore (move_to_Queue,
"move_to_Queue", 1024, NULL, 2, NULL, app_cpu);
xTaskCreatePinnedToCore (BLE_Task,
"BLE_Task", 2048, NULL, 2, NULL, app_cpu);
timer = timerBegin (0, timer_divider, true);
// Provide ISR to timer (timer, function, edge)
timerAttachInterrupt (timer, &onTimer, true);
// At what count should ISR trigger (timer, count, autoreload)
timerAlarmWrite (timer, timer_max_count, true);
// Allow ISR to trigger
timerAlarmEnable (timer);
vTaskDelete (NULL);
}
void loop ()
{
// put your main code here, to run repeatedly:
}
`
Whole code: https://pastebin.com/K8ppkG28
Thanks in advance guys
I'm trying to build my own hypervisor for Linux (5.0.x kernel) on an Intel chip and I'm running into an odd problem.
Whenever I try to execute VMXON on more than one processor, it fails.
I made sure that VMX is enabled, that I allocated an aligned page and wrote VMCS REV ID into the VMXON region, but I'm not sure where the issue is.
This is my code:
vmx.c:
#include "vmx.h"
typedef struct vmstate {
bool vmx_enabled;
void *vmxon_region;
phys_addr_t vmxon_physical;
void *vmcs_region;
phys_addr_t vmcs_physical;
} vmstate;
static DEFINE_PER_CPU(vmstate*, cpu_vms);
static inline int VMXON(phys_addr_t phys){
uint8_t ret;
// TODO: Signal VMX to PT, to avoid PT crashes (Processor Trace)
__asm__ __volatile__ (
"vmxon %[pa]; setna %[ret]"
: [ret]"=rm"(ret)
: [pa]"m"(phys)
: "cc", "memory"
);
return ret;
}
static inline void VMXOFF(void){
__asm__ __volatile__("vmxoff" : : : "cc");
}
static void enable_vmx_operation_cr(void){
// Enable 14th bit in CR4
__write_cr4(__read_cr4() | 0x2000);
}
static void disable_vmx_operation_cr(void){
__write_cr4(__read_cr4() & ~(0x2000));
}
static vmstate *create_vmstate(void){
vmstate *vms = kzalloc(sizeof(vmstate), GFP_KERNEL);
vms->vmx_enabled = false;
return vms;
}
static void alloc_vmxon_region(vmstate *vms){
// TODO: respect physical width as set by the IA32_VMX_BASIC[48] bit for 32bit support
uint32_t vmcs_revid = 0;
uint32_t hi = 0;
void *vmxon_region = kmalloc(4096, GFP_KERNEL);
rdmsr_safe(MSR_IA32_VMX_BASIC, &vmcs_revid, &hi);
memcpy(vmxon_region, &vmcs_revid, 4);
vms->vmxon_region = vmxon_region;
vms->vmxon_physical = virt_to_phys(vmxon_region);
}
static void teardown_vmstate(vmstate *vms){
if(vms->vmxon_region)
kfree(vms->vmxon_region);
}
void vmx_teardown(void){
int i;
vmstate* vms;
for_each_possible_cpu(i){
vms = per_cpu(cpu_vms, i);
if(vms->vmx_enabled == true) {
VMXOFF();
vms->vmx_enabled = false;
}
disable_vmx_operation_cr();
teardown_vmstate(vms);
kfree(vms);
}
}
int vmx_setup(void){
int i;
vmstate* vms;
printk(KERN_INFO "NUM CPUS: %d", num_possible_cpus());
for_each_possible_cpu(i) {
// Allocate vmstate for every processor
per_cpu(cpu_vms, i) = create_vmstate();
vms = per_cpu(cpu_vms, i);
alloc_vmxon_region(vms);
enable_vmx_operation_cr();
if(VMXON(vms->vmxon_physical)){
printk(KERN_ALERT "VMXON operation failed!");
vms->vmx_enabled = false;
}
else
vms->vmx_enabled = true;
}
for_each_possible_cpu(i){
vms = per_cpu(cpu_vms, i);
if(vms->vmx_enabled == false) {
printk(KERN_ALERT "Tearing down after VMXON fail!");
vmx_teardown();
return -1;
}
}
return 0;
}
vmx_setup is called by a device open file operation:
static int hyper_dev_open(struct inode* inode, struct file *filep){
int err;
printk(KERN_INFO "Enabling VMX operation!\n");
if((err = vmx_setup()))
return err;
return 0;
}
When I execute VMXON on another processor, the carry flag is set to 1, zero flag is 0.
The driver works however if I add a VMXOFF() right after VMXON() so two VMX operations aren't enabled in parallel.
Any suggestions would help :)
for_each_possible_cpu simply iterates over the available CPUs; it doesn’t change execution to run on each CPU in turn. The entire loop runs on a single CPU.
Because of this, you are trying to execute vmxon repeatedly on the same CPU, which is why it is failing.
I'm trying to see how I can get a loaded module image name from an arbitrary address from the kernel code.
In user mode I would do this:
void* pAddr;
VOID* pBase;
WCHAR buff[MAX_PATH] = {0};
//Get address of some function in some module (just to test it)
pAddr = GetProcAddress(GetModuleHandle(L"kernel32.dll"), "GetCurrentProcess");
//Get module base address
RtlPcToFileHeader(pAddr, &pBase);
//Get module image file name
GetModuleFileNameEx(GetCurrentProcess(), (HMODULE)pBase, buff, SIZEOF(buff));
Is there a way to do the same in kernel mode if I have pAddr that can point to some address in kernel or user space?
EDIT: While waiting for the answer I came up with my own code (using undocumented way of traversing PEB):
#ifdef CALLING_FROM_KERNEL_MODE
//Kernel mode
TEB* pTEB = (TEB*)PsGetCurrentThreadTeb();
#else
//User mode
#if defined(_M_X64)
//64-bit
TEB* pTEB = reinterpret_cast<TEB*>(__readgsqword(reinterpret_cast<DWORD_PTR>(&static_cast<NT_TIB*>(nullptr)->Self)));
#else
//32-bit
TEB* pTEB = reinterpret_cast<TEB*>(__readfsdword(reinterpret_cast<DWORD_PTR>(&static_cast<NT_TIB*>(nullptr)->Self)));
#endif
#endif
PEB* p_PEB = pTEB->ProcessEnvironmentBlock;
PEB_LDR_DATA* pPLD = p_PEB->Ldr;
const WCHAR* pModName = NULL;
LIST_ENTRY* pLE = &pPLD->InMemoryOrderModuleList;
LIST_ENTRY* pLE_Head = pLE;
while(pLE_Head != pLE->Flink)
{
PLDR_DATA_TABLE_ENTRY pLDTE = CONTAINING_RECORD(pLE, LDR_DATA_TABLE_ENTRY, InMemoryOrderLinks);
size_t szcbSizeOfImg = (size_t)pLDTE->Reserved3[1];
if((size_t)pAddr - (size_t)pLDTE->DllBase < szcbSizeOfImg)
{
pModName = pLDTE->FullDllName.Buffer;
break;
}
pLE = pLE->Flink;
}
The problem is that although it works from a user-mode, from a kernel mode PsGetCurrentThreadTeb() seems to return NULL. Does this mean kernel threads do not have a TEB?
this can be done by creating list of all loaded modules via ZwQuerySystemInformation with SystemModuleInformation
void fgt(PVOID *Callers, ULONG Count)
{
NTSTATUS status;
ULONG cb = 0x10000;
do
{
status = STATUS_INSUFFICIENT_RESOURCES;
if (PRTL_PROCESS_MODULES prpm = (PRTL_PROCESS_MODULES)ExAllocatePool(PagedPool, cb))
{
if (0 <= (status = NtQuerySystemInformation(SystemModuleInformation, prpm, cb, &cb)))
{
do
{
PVOID Caller = *Callers++;
if (ULONG NumberOfModules = prpm->NumberOfModules)
{
PRTL_PROCESS_MODULE_INFORMATION Modules = prpm->Modules;
do
{
if ((SIZE_T)Caller - (SIZE_T)Modules->ImageBase < Modules->ImageSize)
{
DbgPrint("%p> %s\n", Caller, Modules->FullPathName);
break;
}
} while (Modules++, --NumberOfModules);
}
} while (--Count);
}
ExFreePool(prpm);
}
} while (status == STATUS_INFO_LENGTH_MISMATCH);
}
I am trying to get the well known SID for the builtin administrator account using CreateWellKnownSid so I can use it in other functions, but I am getting The parameter is incorrect error message when using WinAccountAdministratorSid as first parameter; however, if I use WinBuiltinAdministratorsSid or WinBuiltinUsersSid it works. No idea what's going on.
Code:
#include <Windows.h>
#include <wchar.h>
#include <LM.h>
#include <locale.h>
#pragma comment(lib, "Netapi32.lib")
#define MAX_NAME 256
VOID ShowError(DWORD errorCode)
{
//FormatMessageW
DWORD flags = FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS;
LPWSTR errorMessage;
DWORD size = 0;
if (!FormatMessageW(flags, NULL, errorCode, 0, (LPWSTR)&errorMessage, size, NULL))
{
fwprintf(stderr, L"Could not get the format message, error code: %u\n", GetLastError());
exit(1);
}
wprintf(L"\n%s", errorMessage);
LocalFree(errorMessage);
}
int wmain(int argc, WCHAR **argv)
{
_wsetlocale(LC_ALL, L"English");
//LocalAlloc
UINT memFlags = LMEM_FIXED; //Allocates fixed memory
DWORD numOfBytes = SECURITY_MAX_SID_SIZE;
PSID builtInAdminSid;
/*Allocating memory to hold the SID for the
built-in administrator user*/
if (!(builtInAdminSid = LocalAlloc(memFlags, numOfBytes)))
{
ShowError(GetLastError());
return 1;
}
//CreateWellKnownSid
WELL_KNOWN_SID_TYPE accountAdminSid = WinAccountAdministratorSid;
PSID domainSid = NULL;
/*We will ask Windows for the well known Admin SID.
If this function fails, we cannot continue*/
if (!CreateWellKnownSid(accountAdminSid, NULL,
builtInAdminSid, &numOfBytes))
{
ShowError(GetLastError());
LocalFree(builtInAdminSid); //Do not forget to free memory!
return 1;
}
return 0;
}
Am I doing something wrong?
EDIT:
Seems like I have to specify the DomainSid parameter, but how do I retrieve it for the local computer?
some time CreateWellKnownSid require DomainSid parameter by very simply reason - it concatenation the DomainSid with well known rid (add one SubAuthority to sid).
for get DomainSid we can use LsaQueryInformationPolicy with PolicyAccountDomainInformation - Retrieves the name and SID of the system's account domain. - this api call return POLICY_ACCOUNT_DOMAIN_INFO structure where exist DomainSid
#include <Ntsecapi.h>
ULONG CreateSid()
{
LSA_HANDLE PolicyHandle;
static LSA_OBJECT_ATTRIBUTES oa = { sizeof(oa) };
NTSTATUS status = LsaOpenPolicy(0, &oa, POLICY_VIEW_LOCAL_INFORMATION, &PolicyHandle);
if (0 <= status)
{
PPOLICY_ACCOUNT_DOMAIN_INFO ppadi;
if (0 <= (status = LsaQueryInformationPolicy(PolicyHandle, PolicyAccountDomainInformation, (void**)&ppadi)))
{
PSID sid = alloca(MAX_SID_SIZE);
ULONG cbSid = MAX_SID_SIZE;
if (!CreateWellKnownSid(::WinAccountAdministratorSid, ppadi->DomainSid, sid, &cbSid))
{
status = GetLastError();
}
LsaFreeMemory(ppadi);
}
LsaClose(PolicyHandle);
}
return status;
}
For those who wonder how I set the RbMm's answer to my code, here it is:
// LsaOpenPolicy
NTSTATUS nOpenPolicy;
LSA_OBJECT_ATTRIBUTES objectAttributes;
LSA_HANDLE policyHandle;
// Fills a block of memory with zeros.
ZeroMemory(&objectAttributes, sizeof(objectAttributes));
nOpenPolicy = LsaOpenPolicy(NULL, &objectAttributes,
POLICY_VIEW_LOCAL_INFORMATION, &policyHandle);
if (nOpenPolicy != STATUS_SUCCESS)
{
ShowError(LsaNtStatusToWinError(nOpenPolicy));
LocalFree(builtInAdminSid);
return 1;
}
// LsaQueryInformationPolicy
NTSTATUS nQueryInfo;
POLICY_INFORMATION_CLASS policyInformation = PolicyAccountDomainInformation;
PPOLICY_ACCOUNT_DOMAIN_INFO pDomainInfo;
nQueryInfo = LsaQueryInformationPolicy(policyHandle, policyInformation, (PVOID *)&pDomainInfo);
if (nQueryInfo != STATUS_SUCCESS)
{
ShowError(LsaNtStatusToWinError(nQueryInfo));
LocalFree(builtInAdminSid);
LsaClose(policyHandle);
return 1;
}
// CreateWellKnownSid
WELL_KNOWN_SID_TYPE accountAdminSid = WinAccountAdministratorSid;
/* We will ask Windows for the well known Admin SID.
If this function fails, we cannot continue */
if (!CreateWellKnownSid(accountAdminSid, pDomainInfo->DomainSid,
builtInAdminSid, &numOfBytes))
{
ShowError(GetLastError());
LocalFree(builtInAdminSid); // Do not forget to free memory!
LsaClose(policyHandle);
return 1;
}
LsaClose(policyHandle);
LsaFreeMemory(pDomainInfo);
I've a function I wrote in order to run a given function on all processors. It works perfectly well in all cases except the following case:
When I try to use it within a kprobe that I registered.
Here's some code:
static DEFINE_MUTEX(entryMutex);
static struct kretprobe my_kprobe = {
.entry_handler = (kprobe_opcode_t *) NULL,
.handler = (kprobe_opcode_t *) process_entry_callback,
.maxactive = 1000,
.data_size = 0
};
static int driver_init(void)
{
my_kprobe.kp.addr = (kprobe_opcode_t*)kallsyms_lookup_name("sys_execve");
if ((ret = register_kretprobe(&my_kprobe)) < 0)
return -1;
return 0;
}
void foo(void* nothing)
{
printk("In foo\n");
}
static int process_entry_callback(struct kretprobe_instance* instance, struct pt_regs* regs)
{
mutex_lock(&entryMutex);
for(int i = 0; i < 4; ++i) // assumes there are 4 processors
run_func(foo, NULL, i);
mutex_unlock(&entryMutex);
return 0;
}
void run_func_wrap(struct function_data* data)
{
data->func(data->context);
wake_up_process(data->waiting_task);
*(data->condition) = TRUE;
}
void run_func(SCHEDULED_FUNC func, void *context, int processor)
{
struct function_data data;
struct task_struct* th;
BOOLEAN condition = FALSE;
wait_queue_head_t queue;
init_waitqueue_head(&queue);
data.func = func;
data.waiting_task = current;
data.context = context;
data.condition = &condition;
th = kthread_create(sched_func_wrap, &data, "th");
kthread_bind(th, processor);
wake_up_process(th);
wait_event(queue, condition);
}
F
After the call to 'run_func' in process_entry_callback I can no longer run any programs. Every time I start a new program it just stuck. After a while I get 'processor lockup' warning in the system log.
I suspect that it has something to do with the IRQ levels.
Any suggestions ?
EDIT:
It also happens when using the following function:
smp_call_function_single
which can be found in smp.c # the Linux kernel source code.
instead of my function:
run_func